diff --git a/.github/actions/testing-setup/action.yml b/.github/actions/testing-setup/action.yml new file mode 100644 index 0000000000..1ab96aa3df --- /dev/null +++ b/.github/actions/testing-setup/action.yml @@ -0,0 +1,78 @@ +name: 'Build-.testing-prerequisites' +description: 'Build pre-requisites for .testing including FMS and a symmetric MOM6 executable' +inputs: + build_symmetric: + description: 'If true, will build the symmetric MOM6 executable' + required: false + default: 'true' + install_python: + description: 'If true, will install the local python env needed for .testing' + required: false + default: 'true' +runs: + using: 'composite' + steps: + - name: Git info + shell: bash + run: | + echo "::group::Git commit info" + echo "git log:" + git log | head -60 + echo "::endgroup::" + + - name: Env + shell: bash + run: | + echo "::group::Environment" + env + echo "::endgroup::" + + - name: Install needed packages for compiling + shell: bash + run: | + echo "::group::Install linux packages" + sudo apt-get update + sudo apt-get install netcdf-bin libnetcdf-dev libnetcdff-dev mpich libmpich-dev + sudo apt-get install linux-tools-common + echo "::endgroup::" + + - name: Compile FMS library + shell: bash + run: | + echo "::group::Compile FMS library" + cd .testing + make deps/lib/libFMS.a -s -j + echo "::endgroup::" + + - name: Store compiler flags used in Makefile + shell: bash + run: | + echo "::group::config.mk" + cd .testing + echo "FCFLAGS_DEBUG=-g -O0 -Wextra -Wno-compare-reals -fbacktrace -ffpe-trap=invalid,zero,overflow -fcheck=bounds" >> config.mk + echo "FCFLAGS_REPRO=-g -O2 -fbacktrace" >> config.mk + echo "FCFLAGS_INIT=-finit-real=snan -finit-integer=2147483647 -finit-derived" >> config.mk + echo "FCFLAGS_COVERAGE=--coverage" >> config.mk + cat config.mk + echo "::endgroup::" + + - name: Compile MOM6 in symmetric memory mode + shell: bash + run: | + echo "::group::Compile MOM6 in symmetric memory mode" + cd .testing + test ${{ inputs.build_symmetric }} == true && make build/symmetric/MOM6 -j + echo "::endgroup::" + + - name: Install local python venv for generating input data + shell: bash + run: | + echo "::group::Create local python env for input data generation" + cd .testing + test ${{ inputs.install_python }} == true && make work/local-env + echo "::endgroup::" + + - name: Set flags + shell: bash + run: | + echo "TIMEFORMAT=... completed in %lR (user: %lU, sys: %lS)" >> $GITHUB_ENV diff --git a/.github/workflows/coupled-api.yml b/.github/workflows/coupled-api.yml new file mode 100644 index 0000000000..86d7262548 --- /dev/null +++ b/.github/workflows/coupled-api.yml @@ -0,0 +1,33 @@ +name: API for coupled drivers + +on: [push, pull_request] + +jobs: + test-top-api: + + runs-on: ubuntu-latest + defaults: + run: + working-directory: .testing + + steps: + - uses: actions/checkout@v2 + with: + submodules: recursive + + - uses: ./.github/actions/testing-setup + with: + build_symmetric: 'false' + install_python: 'false' + + - name: Compile MOM6 for the GFDL coupled driver + shell: bash + run: make check_mom6_api_coupled -j + + - name: Compile MOM6 for the NUOPC driver + shell: bash + run: make check_mom6_api_nuopc -j + + - name: Compile MOM6 for the MCT driver + shell: bash + run: make check_mom6_api_mct -j diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml new file mode 100644 index 0000000000..60b85e412b --- /dev/null +++ b/.github/workflows/coverage.yml @@ -0,0 +1,24 @@ +name: Code coverage + +on: [push, pull_request] + +jobs: + build-test-nans: + + runs-on: ubuntu-latest + defaults: + run: + working-directory: .testing + + env: + REPORT_COVERAGE: true + + steps: + - uses: actions/checkout@v2 + with: + submodules: recursive + + - uses: ./.github/actions/testing-setup + + - name: Run and post coverage + run: make run.symmetric -k -s diff --git a/.github/workflows/documentation-and-style.yml b/.github/workflows/documentation-and-style.yml new file mode 100644 index 0000000000..c171c538d5 --- /dev/null +++ b/.github/workflows/documentation-and-style.yml @@ -0,0 +1,39 @@ +name: Doxygen and style + +on: [push, pull_request] + +jobs: + doxygen: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + with: + submodules: recursive + + - name: Check white space (non-blocking) + run: | + ./.testing/trailer.py -e TEOS10 -l 120 src config_src 2>&1 | tee style_errors + continue-on-error: true + + - name: Install packages used when generating documentation + run: | + sudo apt-get update + sudo apt-get install python3-sphinx python3-lxml perl + sudo apt-get install texlive-binaries texlive-base bibtool tex-common texlive-bibtex-extra + sudo apt-get install graphviz + + - name: Build doxygen HTML + run: | + cd docs + perl -e 'print "perl version $^V" . "\n"' + mkdir _build && make nortd DOXYGEN_RELEASE=Release_1_8_13 UPDATEHTMLEQS=Y + cat _build/doxygen_warn_nortd_log.txt + + - name: Report doxygen or style errors + run: | + grep "warning:" docs/_build/doxygen_warn_nortd_log.txt | grep -v "as part of a" | tee doxy_errors + cat style_errors doxy_errors > all_errors + cat all_errors + test ! -s all_errors diff --git a/.github/workflows/expression.yml b/.github/workflows/expression.yml new file mode 100644 index 0000000000..020d656aee --- /dev/null +++ b/.github/workflows/expression.yml @@ -0,0 +1,27 @@ +name: Expression verification + +on: [push, pull_request] + +jobs: + test-repro-and-dims: + + runs-on: ubuntu-latest + defaults: + run: + working-directory: .testing + + steps: + - uses: actions/checkout@v2 + with: + submodules: recursive + + - uses: ./.github/actions/testing-setup + + - name: Compile MOM6 using repro optimization + run: make build/repro/MOM6 -j + + - name: Create validation data + run: make run.symmetric -k -s + + - name: Run tests + run: make test.repro test.dim -k -s diff --git a/.github/workflows/other.yml b/.github/workflows/other.yml new file mode 100644 index 0000000000..34239b0b7c --- /dev/null +++ b/.github/workflows/other.yml @@ -0,0 +1,27 @@ +name: OpenMP and Restart verification + +on: [push, pull_request] + +jobs: + test-openmp-nan-restarts: + + runs-on: ubuntu-latest + defaults: + run: + working-directory: .testing + + steps: + - uses: actions/checkout@v2 + with: + submodules: recursive + + - uses: ./.github/actions/testing-setup + + - name: Compile with openMP + run: make build/openmp/MOM6 -j + + - name: Create validation data + run: make run.symmetric -k -s + + - name: Run tests + run: make test.openmp test.nan test.restart -k -s diff --git a/.github/workflows/perfmon.yml b/.github/workflows/perfmon.yml new file mode 100644 index 0000000000..00e645c4fd --- /dev/null +++ b/.github/workflows/perfmon.yml @@ -0,0 +1,36 @@ +name: Performance Monitor + +on: [pull_request] + +jobs: + build-test-perfmon: + + runs-on: ubuntu-latest + defaults: + run: + working-directory: .testing + + steps: + - uses: actions/checkout@v2 + with: + submodules: recursive + + - uses: ./.github/actions/testing-setup + + - name: Compile optimized models + run: >- + make -j build.prof + MOM_TARGET_SLUG=$GITHUB_REPOSITORY + MOM_TARGET_LOCAL_BRANCH=$GITHUB_BASE_REF + DO_REGRESSION_TESTS=true + + - name: Generate profile data + run: >- + pip install f90nml && + make profile + DO_REGRESSION_TESTS=true + + - name: Generate perf data + run: | + sudo sysctl -w kernel.perf_event_paranoid=2 + make perf DO_REGRESSION_TESTS=true diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml new file mode 100644 index 0000000000..acc42e4720 --- /dev/null +++ b/.github/workflows/regression.yml @@ -0,0 +1,27 @@ +name: Regression + +on: [pull_request] + +jobs: + build-test-regression: + + runs-on: ubuntu-latest + defaults: + run: + working-directory: .testing + + steps: + - uses: actions/checkout@v2 + with: + submodules: recursive + + - uses: ./.github/actions/testing-setup + + - name: Compile reference model + run: make build.regressions MOM_TARGET_SLUG=$GITHUB_REPOSITORY MOM_TARGET_LOCAL_BRANCH=$GITHUB_BASE_REF DO_REGRESSION_TESTS=true -j + + - name: Create validation data + run: make run.symmetric -k -s + + - name: Regression test + run: make test.regression DO_REGRESSION_TESTS=true -k -s diff --git a/.github/workflows/stencil.yml b/.github/workflows/stencil.yml new file mode 100644 index 0000000000..51a0611fc4 --- /dev/null +++ b/.github/workflows/stencil.yml @@ -0,0 +1,27 @@ +name: Stencil related verification + +on: [push, pull_request] + +jobs: + test-symmetric-layout-rotation: + + runs-on: ubuntu-latest + defaults: + run: + working-directory: .testing + + steps: + - uses: actions/checkout@v2 + with: + submodules: recursive + + - uses: ./.github/actions/testing-setup + + - name: Compile MOM6 in asymmetric memory mode + run: make build/asymmetric/MOM6 -j + + - name: Create validation data + run: make run.symmetric -k -s + + - name: Run tests + run: make test.grid test.layout test.rotate -k -s diff --git a/.gitignore b/.gitignore index ccaecbbead..25f7524d1c 100644 --- a/.gitignore +++ b/.gitignore @@ -4,13 +4,7 @@ html -# Build output -*.o -*.mod -MOM6 - - -# Autoconf +# Autoconf output aclocal.m4 autom4te.cache/ config.log diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 1622ae9886..5e1da1c1f9 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,109 +1,78 @@ stages: - - merge+setup - builds - run - tests - cleanup +variables: + CACHE_DIR: "/lustre/f2/scratch/oar.gfdl.ogrp-account/runner/cache/" + + # Merges MOM6 with dev/gfdl. Changes directory to test directory, if it exists. +# - set cache location +# - get MOM6-examples/tools/MRS scripts by cloning Gaea-stats and then MOM6-examples +# - set working directory to MOM6-examples +# - pull down latest of dev/gfdl (MOM6-examples might be ahead of Gaea-stats) before_script: - - MOM6_SRC=$CI_PROJECT_DIR - - echo Cache directory set to ${CACHE_DIR:=/lustre/f2/scratch/oar.gfdl.ogrp-account/runner/cache/} - - git pull --no-edit https://github.com/NOAA-GFDL/MOM6.git dev/gfdl && git submodule init && git submodule update - - pwd ; ls + - echo Cache directory set to $CACHE_DIR + - echo -e "\e[0Ksection_start:`date +%s`:before[collapsed=true]\r\e[0KPre-script" + - git clone https://gitlab.gfdl.noaa.gov/ogrp/Gaea-stats-MOM6-examples.git tests + - cd tests && git submodule init && git submodule update + - cd MOM6-examples && git checkout dev/gfdl && git pull + - echo -e "\e[0Ksection_end:`date +%s`:before\r\e[0K" # Tests that merge with dev/gfdl works. merge: - stage: merge+setup + stage: builds tags: - ncrc4 script: - - pwd ; ls + - cd $CI_PROJECT_DIR - git pull --no-edit https://github.com/NOAA-GFDL/MOM6.git dev/gfdl -# Clones regression repo, if necessary, pulls latest of everything, and sets up working space -setup: - stage: merge+setup - tags: - - ncrc4 - script: - - pwd ; ls - # Clone regressions directory - - git clone --recursive http://gitlab.gfdl.noaa.gov/ogrp/Gaea-stats-MOM6-examples.git tests && cd tests - # Install / update testing scripts - - git clone https://github.com/adcroft/MRS.git MRS - # Update MOM6-examples and submodules - - (cd MOM6-examples && git checkout . && git checkout dev/gfdl && git pull && git submodule init && git submodule update) - - (cd MOM6-examples/src/MOM6 && git submodule update) - - test -d MOM6-examples/src/LM3 || make -f MRS/Makefile.clone clone_gfdl -s - - make -f MRS/Makefile.clone MOM6-examples/.datasets -s - - env > gitlab_session.log - # Cache everything under tests to unpack for each subsequent stage - - cd ../ ; time tar zcf $CACHE_DIR/tests_$CI_PIPELINE_ID.tgz tests - # Compiles gnu:repro: stage: builds tags: - ncrc4 script: - - time tar zxf $CACHE_DIR/tests_$CI_PIPELINE_ID.tgz && cd tests - - time make -f MRS/Makefile.build MOM6_SRC=../ build_gnu -s -j - - time make -f MRS/Makefile.build MOM6_SRC=../ static_gnu -s -j - - time tar zvcf $CACHE_DIR/build-gnu-repro-$CI_PIPELINE_ID.tgz `find build/gnu -name MOM6` + - time make -f tools/MRS/Makefile MOM6_SRC=../.. pipeline-build-repro-gnu -s -j + - time make -f tools/MRS/Makefile MOM6_SRC=../.. pipeline-build-static-gnu -s -j gnu:ocean-only-nolibs: stage: builds tags: - ncrc4 script: - - time tar zxf $CACHE_DIR/tests_$CI_PIPELINE_ID.tgz && cd tests - - make -f MRS/Makefile.build build/gnu/env && cd build/gnu - # mkdir -p build/gnu/repro/symmetric_dynamic/ocean_only && cd build/gnu/repro/symmetric_dynamic/ocean_only - - ../../MOM6-examples/src/mkmf/bin/list_paths -l ../../../config_src/{solo_driver,dynamic_symmetric,ext*} ../../../src ../../MOM6-examples/src/FMS - - sed -i '/FMS\/.*\/test_/d' path_names - - ../../MOM6-examples/src/mkmf/bin/mkmf -t ../../MOM6-examples/src/mkmf/templates/ncrc-gnu.mk -p MOM6 -c"-Duse_libMPI -Duse_netCDF" path_names - - time (source ./env ; make NETCDF=3 REPRO=1 MOM6 -s -j) + - make -f tools/MRS/Makefile pipeline-build-gnu-oceanonly-nolibs gnu:ice-ocean-nolibs: stage: builds tags: - ncrc4 script: - - time tar zxf $CACHE_DIR/tests_$CI_PIPELINE_ID.tgz && cd tests - - make -f MRS/Makefile.build build/gnu/env && cd build/gnu - # mkdir -p build/gnu/repro/symmetric_dynamic/ocean_only && cd build/gnu/repro/symmetric_dynamic/ocean_only - - ../../MOM6-examples/src/mkmf/bin/list_paths -l ../../../config_src/{coupled_driver,dynamic,ext*} ../../../src ../../MOM6-examples/src/{FMS,coupler,SIS2,icebergs,ice_param,land_null,atmos_null} - - sed -i '/FMS\/.*\/test_/d' path_names - - ../../MOM6-examples/src/mkmf/bin/mkmf -t ../../MOM6-examples/src/mkmf/templates/ncrc-gnu.mk -p MOM6 -c"-Duse_libMPI -Duse_netCDF -D_USE_LEGACY_LAND_ -Duse_AM3_physics" path_names - - time (source ./env ; make NETCDF=3 REPRO=1 MOM6 -s -j) + - make -f tools/MRS/Makefile pipeline-build-gnu-iceocean-nolibs intel:repro: stage: builds tags: - ncrc4 script: - - time tar zxf $CACHE_DIR/tests_$CI_PIPELINE_ID.tgz && cd tests - - make -f MRS/Makefile.build MOM6_SRC=../ build_intel -s -j - - time tar zvcf $CACHE_DIR/build-intel-repro-$CI_PIPELINE_ID.tgz `find build/intel -name MOM6` + - time make -f tools/MRS/Makefile MOM6_SRC=../.. pipeline-build-repro-intel -s -j pgi:repro: stage: builds tags: - ncrc4 script: - - time tar zxf $CACHE_DIR/tests_$CI_PIPELINE_ID.tgz && cd tests - - make -f MRS/Makefile.build MOM6_SRC=../ build_pgi -s -j - - time tar zvcf $CACHE_DIR/build-pgi-repro-$CI_PIPELINE_ID.tgz `find build/pgi -name MOM6` + - time make -f tools/MRS/Makefile MOM6_SRC=../.. pipeline-build-repro-pgi -s -j gnu:debug: stage: builds tags: - ncrc4 script: - - time tar zxf $CACHE_DIR/tests_$CI_PIPELINE_ID.tgz && cd tests - - make -f MRS/Makefile.build MOM6_SRC=../ debug_gnu -s -j - - time tar zvcf $CACHE_DIR/build-gnu-debug-$CI_PIPELINE_ID.tgz `find build/gnu -name MOM6` + - time make -f tools/MRS/Makefile MOM6_SRC=../.. pipeline-build-debug-gnu -s -j # Runs run: @@ -111,16 +80,43 @@ run: tags: - ncrc4 script: - - time tar zxf $CACHE_DIR/tests_$CI_PIPELINE_ID.tgz && cd tests - - time tar zxf $CACHE_DIR/build-gnu-repro-$CI_PIPELINE_ID.tgz - - time tar zxf $CACHE_DIR/build-intel-repro-$CI_PIPELINE_ID.tgz - - time tar zxf $CACHE_DIR/build-pgi-repro-$CI_PIPELINE_ID.tgz - # time tar zxf $CACHE_DIR/build-gnu-debug-$CI_PIPELINE_ID.tgz - - (echo '#!/bin/tcsh';echo 'make -f MRS/Makefile.tests all') > job.sh - - sbatch --clusters=c3,c4 --nodes=29 --time=0:34:00 --account=gfdl_o --qos=debug --job-name=mom6_regressions --output=log.$CI_PIPELINE_ID --wait job.sh - - cat log.$CI_PIPELINE_ID - - test -f restart_results_gnu.tar.gz - - time tar zvcf $CACHE_DIR/results-$CI_PIPELINE_ID.tgz *.tar.gz + - make -f tools/MRS/Makefile mom6-pipeline-run + +gnu.testing: + stage: run + tags: + - ncrc4 + before_script: + - echo -e "\e[0Ksection_start:`date +%s`:submodules[collapsed=true]\r\e[0KCloning submodules" + - git submodule init ; git submodule update + - echo -e "\e[0Ksection_end:`date +%s`:submodules\r\e[0K" + script: + - echo -e "\e[0Ksection_start:`date +%s`:compile[collapsed=true]\r\e[0KCompiling executables" + - cd .testing + - module unload PrgEnv-pgi PrgEnv-intel PrgEnv-gnu darshan ; module load PrgEnv-gnu ; module unload netcdf gcc ; module load gcc/7.3.0 cray-hdf5 cray-netcdf + - make work/local-env + - make -s -j + - echo -e "\e[0Ksection_end:`date +%s`:compile\r\e[0K" + - (echo '#!/bin/bash';echo '. ./work/local-env/bin/activate';echo 'make MPIRUN="srun -mblock --exclusive" test -s -j') > job.sh + - sbatch --clusters=c3,c4 --nodes=5 --time=0:05:00 --account=gfdl_o --qos=debug --job-name=MOM6.gnu.testing --output=log.$CI_PIPELINE_ID --wait job.sh && make test || cat log.$CI_PIPELINE_ID + +intel.testing: + stage: run + tags: + - ncrc4 + before_script: + - echo -e "\e[0Ksection_start:`date +%s`:submodules[collapsed=true]\r\e[0KCloning submodules" + - git submodule init ; git submodule update + - echo -e "\e[0Ksection_end:`date +%s`:submodules\r\e[0K" + script: + - echo -e "\e[0Ksection_start:`date +%s`:compile[collapsed=true]\r\e[0KCompiling executables" + - cd .testing + - module unload PrgEnv-pgi PrgEnv-intel PrgEnv-gnu darshan; module load PrgEnv-intel; module unload netcdf intel; module load intel/18.0.6.288 cray-hdf5 cray-netcdf + - make work/local-env + - make -s -j + - echo -e "\e[0Ksection_end:`date +%s`:compile\r\e[0K" + - (echo '#!/bin/bash';echo '. ./work/local-env/bin/activate';echo 'make MPIRUN="srun -mblock --exclusive" test -s -j') > job.sh + - sbatch --clusters=c3,c4 --nodes=5 --time=0:05:00 --account=gfdl_o --qos=debug --job-name=MOM6.gnu.testing --output=log.$CI_PIPELINE_ID --wait job.sh && make test || cat log.$CI_PIPELINE_ID # Tests gnu:non-symmetric: @@ -128,113 +124,91 @@ gnu:non-symmetric: tags: - ncrc4 script: - - time tar zxf $CACHE_DIR/tests_$CI_PIPELINE_ID.tgz && cd tests - - time tar zxf $CACHE_DIR/results-$CI_PIPELINE_ID.tgz - - make -f MRS/Makefile.tests gnu_non_symmetric + - make -f tools/MRS/Makefile mom6-pipeline-test-gnu_non_symmetric -intel:non-symmetric: +gnu:symmetric: stage: tests tags: - ncrc4 script: - - time tar zxf $CACHE_DIR/tests_$CI_PIPELINE_ID.tgz && cd tests - - time tar zxf $CACHE_DIR/results-$CI_PIPELINE_ID.tgz - - make -f MRS/Makefile.tests intel_non_symmetric + - make -f tools/MRS/Makefile mom6-pipeline-test-gnu_symmetric -pgi:non-symmetric: +gnu:memory: stage: tests tags: - ncrc4 script: - - time tar zxf $CACHE_DIR/tests_$CI_PIPELINE_ID.tgz && cd tests - - time tar zxf $CACHE_DIR/results-$CI_PIPELINE_ID.tgz - - make -f MRS/Makefile.tests pgi_non_symmetric + - make -f tools/MRS/Makefile mom6-pipeline-test-gnu_memory -gnu:symmetric: +gnu:static: stage: tests tags: - ncrc4 script: - - time tar zxf $CACHE_DIR/tests_$CI_PIPELINE_ID.tgz && cd tests - - time tar zxf $CACHE_DIR/results-$CI_PIPELINE_ID.tgz - - make -f MRS/Makefile.tests gnu_symmetric + - make -f tools/MRS/Makefile mom6-pipeline-test-gnu_static -intel:symmetric: +gnu:restart: stage: tests tags: - ncrc4 script: - - time tar zxf $CACHE_DIR/tests_$CI_PIPELINE_ID.tgz && cd tests - - time tar zxf $CACHE_DIR/results-$CI_PIPELINE_ID.tgz - - make -f MRS/Makefile.tests intel_symmetric + - make -f tools/MRS/Makefile mom6-pipeline-test-gnu_restarts -pgi:symmetric: +gnu:params: stage: tests tags: - ncrc4 script: - - time tar zxf $CACHE_DIR/tests_$CI_PIPELINE_ID.tgz && cd tests - - time tar zxf $CACHE_DIR/results-$CI_PIPELINE_ID.tgz - - make -f MRS/Makefile.tests pgi_symmetric + - make -f tools/MRS/Makefile mom6-pipeline-test-params_gnu_symmetric + allow_failure: true -gnu:layout: +intel:symmetric: stage: tests tags: - ncrc4 script: - - time tar zxf $CACHE_DIR/tests_$CI_PIPELINE_ID.tgz && cd tests - - time tar zxf $CACHE_DIR/results-$CI_PIPELINE_ID.tgz - - make -f MRS/Makefile.tests gnu_layout + - make -f tools/MRS/Makefile mom6-pipeline-test-intel_symmetric -intel:layout: +intel:non-symmetric: stage: tests tags: - ncrc4 script: - - time tar zxf $CACHE_DIR/tests_$CI_PIPELINE_ID.tgz && cd tests - - time tar zxf $CACHE_DIR/results-$CI_PIPELINE_ID.tgz - - make -f MRS/Makefile.tests intel_layout + - make -f tools/MRS/Makefile mom6-pipeline-test-intel_non_symmetric -pgi:layout: +intel:memory: stage: tests tags: - ncrc4 script: - - time tar zxf $CACHE_DIR/tests_$CI_PIPELINE_ID.tgz && cd tests - - time tar zxf $CACHE_DIR/results-$CI_PIPELINE_ID.tgz - - make -f MRS/Makefile.tests pgi_layout + - make -f tools/MRS/Makefile mom6-pipeline-test-intel_memory -gnu:static: +pgi:symmetric: stage: tests tags: - ncrc4 script: - - time tar zxf $CACHE_DIR/tests_$CI_PIPELINE_ID.tgz && cd tests - - time tar zxf $CACHE_DIR/results-$CI_PIPELINE_ID.tgz - - make -f MRS/Makefile.tests gnu_static + - make -f tools/MRS/Makefile mom6-pipeline-test-pgi_symmetric -gnu:restart: +pgi:non-symmetric: stage: tests tags: - ncrc4 script: - - time tar zxf $CACHE_DIR/tests_$CI_PIPELINE_ID.tgz && cd tests - - time tar zxf $CACHE_DIR/results-$CI_PIPELINE_ID.tgz - - make -f MRS/Makefile.tests gnu_check_restarts + - make -f tools/MRS/Makefile mom6-pipeline-test-pgi_non_symmetric -gnu:params: +pgi:memory: stage: tests tags: - ncrc4 script: - - time tar zxf $CACHE_DIR/tests_$CI_PIPELINE_ID.tgz && cd tests - - time tar zxf $CACHE_DIR/results-$CI_PIPELINE_ID.tgz - - make -f MRS/Makefile.tests params_gnu_symmetric - allow_failure: true + - make -f tools/MRS/Makefile mom6-pipeline-test-pgi_memory cleanup: stage: cleanup tags: - ncrc4 + before_script: + - echo Skipping submodule update script: - rm $CACHE_DIR/*$CI_PIPELINE_ID.tgz diff --git a/.readthedocs.yml b/.readthedocs.yml index b95a9b901f..f7ad4421b4 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -1,11 +1,16 @@ -# don't build extra formats (like HTML zip) -formats: - - none +version: 2 -# path to pip requirements file to bring in -# doxygen extensions -requirements_file: docs/requirements.txt +# Extra formats +# PDF generation is failing for now; disabled on 2020-12-02 +#formats: +# - pdf + +# Build documentation +sphinx: + configuration: docs/conf.py python: # make sure we're using Python 3 - version: 3.5 + version: 3 + install: + - requirements: docs/requirements.txt diff --git a/.testing/Makefile b/.testing/Makefile index 4b3dfdefb8..bd0cbc4c0a 100644 --- a/.testing/Makefile +++ b/.testing/Makefile @@ -1,21 +1,91 @@ +# MOM6 Test suite Makefile +# +# Usage: +# make -j +# Build the FMS library and test executables +# +# make -j test +# Run the test suite, defined in the `tc` directores. +# +# make clean +# Wipe the MOM6 test executables +# (NOTE: This does not delete FMS in the `deps`) +# +# +# Configuration: +# These settings can be provided as either command-line flags, or saved in a +# `config.mk` file. +# +# Experiment Configuration: +# BUILDS Executables to be built by `make` or `make all` +# CONFIGS Model configurations to test (default: `tc*`) +# TESTS Tests to run +# DIMS Dimensional scaling tests +# (NOTE: Each test will build its required executables, regardless of BUILDS) +# +# General test configuration: +# FRAMEWORK Model framework (fms1 or fms2) +# MPIRUN MPI job launcher (mpirun, srun, etc) +# DO_REPRO_TESTS Enable production ("repro") testing equivalence +# DO_REGRESSION_TESTS Enable regression tests (usually dev/gfdl) +# REPORT_COVERAGE Enable code coverage and report to codecov +# +# Compiler configuration: +# CC C compiler +# MPICC MPI C compiler +# FC Fortran compiler +# MPIFC MPI Fortran compiler +# (NOTE: These are environment variables and may be inherited from a shell.) +# +# Build configuration: +# FCFLAGS_DEBUG Testing ("debug") compiler flags +# FCFLAGS_REPRO Production ("repro") compiler flags +# FCFLAGS_OPT Aggressive optimization compiler flags +# FCFLAGS_INIT Variable initialization flags +# FCFLAGS_COVERAGE Code coverage flags +# +# Regression repository ("target") configuration: +# MOM_TARGET_SLUG URL slug (minus domain) of the target repo +# MOM_TARGET_URL Full URL of the target repo +# MOM_TARGET_LOCAL_BRANCH Target branch name +# (NOTE: These would typically be configured by a CI.) +# +#---- + +# TODO: POSIX shell compatibility SHELL = bash + +# No implicit rules .SUFFIXES: +# No implicit variables +MAKEFLAGS += -R + # User-defined configuration -include config.mk +# Set the infra framework +FRAMEWORK ?= fms1 + # Set the MPI launcher here +# TODO: This needs more automated configuration MPIRUN ?= mpirun -# Default target compiler flags +# Generic compiler variables are pass through to the builds +export CC +export MPICC +export FC +export MPIFC + +# Builds are distinguished by FCFLAGS # NOTE: FMS will be built using FCFLAGS_DEBUG FCFLAGS_DEBUG ?= -g -O0 FCFLAGS_REPRO ?= -g -O2 +FCFLAGS_OPT ?= -g -O3 -mavx -fno-omit-frame-pointer FCFLAGS_INIT ?= FCFLAGS_COVERAGE ?= # Additional notes: -# -# - The default values are simple, minimalist flags, supported by nearly all +# - These default values are simple, minimalist flags, supported by nearly all # compilers which are comparable to GFDL's canonical DEBUG and REPRO builds. # # - These flags should be configured outside of the Makefile, either with @@ -24,11 +94,25 @@ FCFLAGS_COVERAGE ?= # - FMS cannot be built with the same aggressive initialization flags as MOM6, # so FCFLAGS_INIT is used to provide additional MOM6 configuration. +# User-defined LDFLAGS (applied to all builds and FMS) +LDFLAGS_USER ?= + # Set to `true` to require identical results from DEBUG and REPRO builds +# NOTE: Many compilers (Intel, GCC on ARM64) do not yet produce identical +# results across DEBUG and REPRO builds (as defined below), so we disable on +# default. DO_REPRO_TESTS ?= -# Many compilers (Intel, GCC on ARM64) do not yet produce identical results -# across DEBUG and REPRO builds (as defined below), so we disable on default. +# Time measurement (configurable by the CI) +TIME ?= time + + +# Experiment configuration +BUILDS ?= symmetric asymmetric openmp +CONFIGS ?= $(wildcard tc*) +TESTS ?= grid layout rotate restart openmp nan $(foreach d,$(DIMS),dim.$(d)) +DIMS ?= t l h z q r + #--- # Dependencies @@ -42,32 +126,30 @@ MKMF := $(DEPS)/bin/mkmf #--- # Test configuration -# Executables -BUILDS = symmetric asymmetric repro openmp -CONFIGS := $(wildcard tc*) -TESTS = grids layouts restarts nans dims openmps rotations -DIMS = t l h z q r - # REPRO tests enable reproducibility with optimization, and often do not match # the DEBUG results in older GCCs and vendor compilers, so we can optionally # disable them. ifeq ($(DO_REPRO_TESTS), true) BUILDS += repro - TESTS += repros + TESTS += repro +endif + +# Profiling +ifeq ($(DO_PROFILE), false) + BUILDS += opt opt_target endif # The following variables are configured by Travis: # DO_REGRESSION_TESTS: true if $(TRAVIS_PULL_REQUEST) is a PR number # MOM_TARGET_SLUG: TRAVIS_REPO_SLUG # MOM_TARGET_LOCAL_BRANCH: TRAVIS_BRANCH - # These are set to true by our Travis configuration if testing a pull request DO_REGRESSION_TESTS ?= REPORT_COVERAGE ?= ifeq ($(DO_REGRESSION_TESTS), true) BUILDS += target - TESTS += regressions + TESTS += regression MOM_TARGET_SLUG ?= NOAA-GFDL/MOM6 MOM_TARGET_URL ?= https://github.com/$(MOM_TARGET_SLUG) @@ -91,10 +173,12 @@ SOURCE = \ $(foreach ext,F90 inc c h,$(wildcard $(1)/*/*.$(ext) $(1)/*/*/*.$(ext))) MOM_SOURCE = $(call SOURCE,../src) \ - $(wildcard ../config_src/solo_driver/*.F90) \ + $(wildcard ../config_src/infra/FMS1/*.F90) \ + $(wildcard ../config_src/drivers/solo_driver/*.F90) \ $(wildcard ../config_src/ext*/*/*.F90) TARGET_SOURCE = $(call SOURCE,build/target_codebase/src) \ - $(wildcard build/target_codebase/config_src/solo_driver/*.F90) \ + $(wildcard build/target_codebase/config_src/infra/FMS1/*.F90) \ + $(wildcard build/target_codebase/config_src/drivers/solo_driver/*.F90) \ $(wildcard build/target_codebase/config_src/ext*/*.F90) FMS_SOURCE = $(call SOURCE,$(DEPS)/fms/src) @@ -125,9 +209,10 @@ endif #--- # Rules -.PHONY: all build.regressions -all: $(foreach b,$(BUILDS),build/$(b)/MOM6) +.PHONY: all build.regressions build.prof +all: $(foreach b,$(BUILDS),build/$(b)/MOM6) $(VENV_PATH) build.regressions: $(foreach b,symmetric target,build/$(b)/MOM6) +build.prof: $(foreach b,opt opt_target,build/$(b)/MOM6) # Executable BUILD_TARGETS = MOM6 Makefile path_names @@ -150,11 +235,12 @@ PATH_FMS = PATH="${PATH}:../../$(DEPS)/bin" SYMMETRIC_FCFLAGS := FCFLAGS="$(FCFLAGS_DEBUG) $(FCFLAGS_INIT) $(COVERAGE) $(FCFLAGS_FMS)" ASYMMETRIC_FCFLAGS := FCFLAGS="$(FCFLAGS_DEBUG) $(FCFLAGS_INIT) $(FCFLAGS_FMS)" REPRO_FCFLAGS := FCFLAGS="$(FCFLAGS_REPRO) $(FCFLAGS_FMS)" +OPT_FCFLAGS := FCFLAGS="$(FCFLAGS_OPT) $(FCFLAGS_FMS)" OPENMP_FCFLAGS := FCFLAGS="$(FCFLAGS_DEBUG) $(FCFLAGS_INIT) $(FCFLAGS_FMS)" TARGET_FCFLAGS := FCFLAGS="$(FCFLAGS_DEBUG) $(FCFLAGS_INIT) $(FCFLAGS_FMS)" -MOM_LDFLAGS := LDFLAGS="$(LDFLAGS_FMS)" -SYMMETRIC_LDFLAGS := LDFLAGS="$(COVERAGE) $(LDFLAGS_FMS)" +MOM_LDFLAGS := LDFLAGS="$(LDFLAGS_FMS) $(LDFLAGS_USER)" +SYMMETRIC_LDFLAGS := LDFLAGS="$(COVERAGE) $(LDFLAGS_FMS) $(LDFLAGS_USER)" # Environment variable configuration @@ -163,7 +249,11 @@ build/asymmetric/Makefile: MOM_ENV=$(PATH_FMS) $(ASYMMETRIC_FCFLAGS) $(MOM_LDFLA build/repro/Makefile: MOM_ENV=$(PATH_FMS) $(REPRO_FCFLAGS) $(MOM_LDFLAGS) build/openmp/Makefile: MOM_ENV=$(PATH_FMS) $(OPENMP_FCFLAGS) $(MOM_LDFLAGS) build/target/Makefile: MOM_ENV=$(PATH_FMS) $(TARGET_FCFLAGS) $(MOM_LDFLAGS) - +build/opt/Makefile: MOM_ENV=$(PATH_FMS) $(OPT_FCFLAGS) $(MOM_LDFLAGS) +build/opt_target/Makefile: MOM_ENV=$(PATH_FMS) $(OPT_FCFLAGS) $(MOM_LDFLAGS) +build/coupled/Makefile: MOM_ENV=$(PATH_FMS) $(SYMMETRIC_FCFLAGS) $(SYMMETRIC_LDFLAGS) +build/nuopc/Makefile: MOM_ENV=$(PATH_FMS) $(SYMMETRIC_FCFLAGS) $(SYMMETRIC_LDFLAGS) +build/mct/Makefile: MOM_ENV=$(PATH_FMS) $(SYMMETRIC_FCFLAGS) $(SYMMETRIC_LDFLAGS) # Configure script flags build/symmetric/Makefile: MOM_ACFLAGS= @@ -171,10 +261,15 @@ build/asymmetric/Makefile: MOM_ACFLAGS=--enable-asymmetric build/repro/Makefile: MOM_ACFLAGS= build/openmp/Makefile: MOM_ACFLAGS=--enable-openmp build/target/Makefile: MOM_ACFLAGS= - +build/opt/Makefile: MOM_ACFLAGS= +build/opt_target/Makefile: MOM_ACFLAGS= +build/coupled/Makefile: MOM_ACFLAGS=--with-driver=coupled_driver +build/nuopc/Makefile: MOM_ACFLAGS=--with-driver=nuopc_driver +build/mct/Makefile: MOM_ACFLAGS=--with-driver=mct_driver # Fetch regression target source code build/target/Makefile: | $(TARGET_CODEBASE) +build/opt_target/Makefile: | $(TARGET_CODEBASE) # Define source code dependencies @@ -182,13 +277,13 @@ build/target/Makefile: | $(TARGET_CODEBASE) # Ideally we would want to re-run both Makefile and mkmf, but our mkmf call # is inside ./configure, so we must re-run ./configure as well. $(foreach b,$(filter-out target,$(BUILDS)),build/$(b)/Makefile): $(MOM_SOURCE) -build/target/configure: $(TARGET_SOURCE) +build/target_codebase/configure: $(TARGET_SOURCE) # Build MOM6 .PRECIOUS: $(foreach b,$(BUILDS),build/$(b)/MOM6) build/%/MOM6: build/%/Makefile - cd $(@D) && time $(MAKE) -j + cd $(@D) && $(TIME) $(MAKE) -j # Use autoconf to construct the Makefile for each target @@ -196,7 +291,7 @@ build/%/MOM6: build/%/Makefile build/%/Makefile: ../ac/configure ../ac/Makefile.in $(DEPS)/lib/libFMS.a $(MKMF) $(LIST_PATHS) mkdir -p $(@D) cd $(@D) \ - && $(MOM_ENV) ../../../ac/configure $(MOM_ACFLAGS) \ + && $(MOM_ENV) ../../../ac/configure $(MOM_ACFLAGS) --with-framework=$(FRAMEWORK) \ || (cat config.log && false) @@ -205,7 +300,8 @@ build/%/Makefile: ../ac/configure ../ac/Makefile.in $(DEPS)/lib/libFMS.a $(MKMF) # Fetch the regression target codebase -build/target/Makefile: $(TARGET_CODEBASE)/ac/configure $(DEPS)/lib/libFMS.a $(MKMF) $(LIST_PATHS) +build/target/Makefile build/opt_target/Makefile: \ + $(TARGET_CODEBASE)/ac/configure $(DEPS)/lib/libFMS.a $(MKMF) $(LIST_PATHS) mkdir -p $(@D) cd $(@D) \ && $(MOM_ENV) ../../$(TARGET_CODEBASE)/ac/configure $(MOM_ACFLAGS) \ @@ -218,15 +314,15 @@ $(TARGET_CODEBASE)/ac/configure: $(TARGET_CODEBASE) $(TARGET_CODEBASE): git clone --recursive $(MOM_TARGET_URL) $@ - cd $@ && git checkout $(MOM_TARGET_BRANCH) - # Copy modern autoconf files to target? - mkdir -p $(TARGET_CODEBASE)/ac - cp -r ../ac/{configure.ac,Makefile.in,m4} $(TARGET_CODEBASE)/ac + cd $@ && git checkout --recurse-submodules $(MOM_TARGET_BRANCH) #--- # FMS +# Set up the FMS build environment variables +FMS_ENV = PATH="${PATH}:../../bin" FCFLAGS="$(FCFLAGS_DEBUG)" + # TODO: *.mod dependencies? $(DEPS)/lib/libFMS.a: $(DEPS)/fms/build/libFMS.a $(MAKE) -C $(DEPS) lib/libFMS.a @@ -235,7 +331,8 @@ $(DEPS)/fms/build/libFMS.a: $(DEPS)/fms/build/Makefile $(MAKE) -C $(DEPS) fms/build/libFMS.a $(DEPS)/fms/build/Makefile: $(DEPS)/fms/src/configure $(DEPS)/Makefile.fms.in $(MKMF) $(LIST_PATHS) - PATH_ENV="${PATH}:../../bin" FCFLAGS_ENV="$(FCFLAGS_DEBUG)" $(MAKE) -C $(DEPS) fms/build/Makefile + $(FMS_ENV) $(MAKE) -C $(DEPS) fms/build/Makefile + $(MAKE) -C $(DEPS) fms/build/Makefile $(DEPS)/Makefile.fms.in: ../ac/deps/Makefile.fms.in $(DEPS)/Makefile cp $< $(DEPS) @@ -261,18 +358,43 @@ $(DEPS)/Makefile: ../ac/deps/Makefile mkdir -p $(@D) cp $< $@ + +#--- +# The following block does a non-library build of a coupled driver interface to MOM, along with everything below it. +# This simply checks that we have not broken the ability to compile. This is not a means to build a complete coupled executable. +# Todo: +# - avoid re-building FMS and MOM6 src by re-using existing object/mod files +# - use autoconf rather than mkmf templates +MK_TEMPLATE ?= ../../$(DEPS)/mkmf/templates/ncrc-gnu.mk +# NUOPC driver +build/nuopc/mom_ocean_model_nuopc.o: build/nuopc/Makefile + cd $(@D) && make $(@F) +check_mom6_api_nuopc: build/nuopc/mom_ocean_model_nuopc.o +# GFDL coupled driver +build/coupled/ocean_model_MOM.o: build/coupled/Makefile + cd $(@D) && make $(@F) +check_mom6_api_coupled: build/coupled/ocean_model_MOM.o +# MCT driver +build/mct/mom_ocean_model_mct.o: build/mct/Makefile + cd $(@D) && make $(@F) +check_mom6_api_mct: build/mct/mom_ocean_model_mct.o + + #--- # Python preprocessing + # NOTE: Some less mature environments (e.g. Arm64 Ubuntu) require explicit # installation of numpy before netCDF4, as well as wheel and cython support. work/local-env: python3 -m venv $@ . $@/bin/activate \ + && python3 -m pip install --upgrade pip \ && pip3 install wheel \ && pip3 install cython \ && pip3 install numpy \ && pip3 install netCDF4 + #--- # Testing @@ -287,74 +409,98 @@ test: $(foreach t,$(TESTS),test.$(t)) # TODO: restart checksum comparison is not yet implemented .PHONY: $(foreach t,$(TESTS),test.$(t)) -test.grids: $(foreach c,$(filter-out tc3,$(CONFIGS)),$(c).grid $(c).grid.diag) -test.layouts: $(foreach c,$(CONFIGS),$(c).layout $(c).layout.diag) -test.rotations: $(foreach c,$(CONFIGS),$(c).rotate) -test.restarts: $(foreach c,$(CONFIGS),$(c).restart) -test.repros: $(foreach c,$(CONFIGS),$(c).repro $(c).repro.diag) -test.openmps: $(foreach c,$(CONFIGS),$(c).openmp $(c).openmp.diag) -test.nans: $(foreach c,$(CONFIGS),$(c).nan $(c).nan.diag) -test.dims: $(foreach c,$(CONFIGS),$(foreach d,$(DIMS),$(c).dim.$(d) $(c).dim.$(d).diag)) -test.regressions: $(foreach c,$(CONFIGS),$(c).regression $(c).regression.diag) +test.grid: $(foreach c,$(filter-out tc3,$(CONFIGS)),$(c).grid $(c).grid.diag) +test.layout: $(foreach c,$(CONFIGS),$(c).layout $(c).layout.diag) +test.rotate: $(foreach c,$(CONFIGS),$(c).rotate) +test.restart: $(foreach c,$(CONFIGS),$(c).restart) +test.repro: $(foreach c,$(CONFIGS),$(c).repro $(c).repro.diag) +test.openmp: $(foreach c,$(CONFIGS),$(c).openmp $(c).openmp.diag) +test.nan: $(foreach c,$(CONFIGS),$(c).nan $(c).nan.diag) +test.regression: $(foreach c,$(CONFIGS),$(c).regression $(c).regression.diag) +test.dim: $(foreach d,$(DIMS),test.dim.$(d)) +define TEST_DIM_RULE +test.dim.$(1): $(foreach c,$(CONFIGS),$(c).dim.$(1) $(c).dim.$(1).diag) +endef +$(foreach d,$(DIMS),$(eval $(call TEST_DIM_RULE,$(d)))) .PHONY: run.symmetric run.asymmetric run.nans run.openmp run.symmetric: $(foreach c,$(CONFIGS),work/$(c)/symmetric/ocean.stats) run.asymmetric: $(foreach c,$(filter-out tc3,$(CONFIGS)),$(CONFIGS),work/$(c)/asymmetric/ocean.stats) -run.nans: $(foreach c,$(CONFIGS),work/$(c)/nan/ocean.stats) +run.nan: $(foreach c,$(CONFIGS),work/$(c)/nan/ocean.stats) run.openmp: $(foreach c,$(CONFIGS),work/$(c)/openmp/ocean.stats) +# Configuration test rules +# $(1): Configuration name (tc1, tc2, &c.) +# $(2): Excluded tests +.PRECIOUS: $(foreach c,$(CONFIGS),$(c)) +define CONFIG_RULE +$(1): \ + $(foreach t,$(filter-out $(2),$(TESTS)),$(1).$(t)) \ + $(foreach t,$(filter-out $(2) rotate restart,$(TESTS)),$(1).$(t).diag) +endef +$(foreach c,$(filter-out tc3,$(CONFIGS)),$(eval $(call CONFIG_RULE,$(c),))) +# NOTE: tc3 uses OBCs and does not support asymmetric grid +$(eval $(call CONFIG_RULE,tc3,grid)) + # Color highlights for test results -RED=\033[0;31m -YELLOW=\033[0;33m -GREEN=\033[0;32m -MAGENTA=\033[0;35m -RESET=\033[0m +RED = \033[0;31m +YELLOW = \033[0;33m +GREEN = \033[0;32m +MAGENTA = \033[0;35m +RESET = \033[0m -DONE=${GREEN}DONE${RESET} -PASS=${GREEN}PASS${RESET} -WARN=${YELLOW}WARN${RESET} -FAIL=${RED}FAIL${RESET} +DONE = ${GREEN}DONE${RESET} +PASS = ${GREEN}PASS${RESET} +WARN = ${YELLOW}WARN${RESET} +FAIL = ${RED}FAIL${RESET} # Comparison rules -# $(1): Test type (grid, layout, &c.) -# $(2): Comparison targets (symmetric asymmetric, symmetric layout, &c.) +# $(1): Configuration (tc1, tc2, &c.) +# $(2): Test type (grid, layout, &c.) +# $(3): Comparison targets (symmetric asymmetric, symmetric layout, &c.) define CMP_RULE -.PRECIOUS: $(foreach b,$(2),work/%/$(b)/ocean.stats) -%.$(1): $(foreach b,$(2),work/%/$(b)/ocean.stats) +.PRECIOUS: $(foreach b,$(3),work/$(1)/$(b)/ocean.stats) +$(1).$(2): $(foreach b,$(3),work/$(1)/$(b)/ocean.stats) + @test "$$(shell ls -A results/$(1) 2>/dev/null)" || rm -rf results/$(1) @cmp $$^ || !( \ - mkdir -p results/$$*; \ - (diff $$^ | tee results/$$*/ocean.stats.$(1).diff | head) ; \ - echo -e "$(FAIL): Solutions $$*.$(1) have changed." \ + mkdir -p results/$(1); \ + (diff $$^ | tee results/$(1)/ocean.stats.$(2).diff | head -n 20) ; \ + echo -e "$(FAIL): Solutions $(1).$(2) have changed." \ ) - @echo -e "$(PASS): Solutions $$*.$(1) agree." + @echo -e "$(PASS): Solutions $(1).$(2) agree." -.PRECIOUS: $(foreach b,$(2),work/%/$(b)/chksum_diag) -%.$(1).diag: $(foreach b,$(2),work/%/$(b)/chksum_diag) +.PRECIOUS: $(foreach b,$(3),work/$(1)/$(b)/chksum_diag) +$(1).$(2).diag: $(foreach b,$(3),work/$(1)/$(b)/chksum_diag) @cmp $$^ || !( \ - mkdir -p results/$$*; \ - (diff $$^ | tee results/$$*/chksum_diag.$(1).diff | head) ; \ - echo -e "$(FAIL): Diagnostics $$*.$(1).diag have changed." \ + mkdir -p results/$(1); \ + (diff $$^ | tee results/$(1)/chksum_diag.$(2).diff | head -n 20) ; \ + echo -e "$(FAIL): Diagnostics $(1).$(2).diag have changed." \ ) - @echo -e "$(PASS): Diagnostics $$*.$(1).diag agree." + @echo -e "$(PASS): Diagnostics $(1).$(2).diag agree." endef -$(eval $(call CMP_RULE,grid,symmetric asymmetric)) -$(eval $(call CMP_RULE,layout,symmetric layout)) -$(eval $(call CMP_RULE,rotate,symmetric rotate)) -$(eval $(call CMP_RULE,repro,symmetric repro)) -$(eval $(call CMP_RULE,openmp,symmetric openmp)) -$(eval $(call CMP_RULE,nan,symmetric nan)) -$(foreach d,$(DIMS),$(eval $(call CMP_RULE,dim.$(d),symmetric dim.$(d)))) +$(foreach c,$(CONFIGS),$(eval $(call CMP_RULE,$(c),grid,symmetric asymmetric))) +$(foreach c,$(CONFIGS),$(eval $(call CMP_RULE,$(c),layout,symmetric layout))) +$(foreach c,$(CONFIGS),$(eval $(call CMP_RULE,$(c),rotate,symmetric rotate))) +$(foreach c,$(CONFIGS),$(eval $(call CMP_RULE,$(c),repro,symmetric repro))) +$(foreach c,$(CONFIGS),$(eval $(call CMP_RULE,$(c),openmp,symmetric openmp))) +$(foreach c,$(CONFIGS),$(eval $(call CMP_RULE,$(c),nan,symmetric nan))) +define CONFIG_DIM_RULE +$(1).dim: $(foreach d,$(DIMS),$(1).dim.$(d)) +$(foreach d,$(DIMS),$(eval $(call CMP_RULE,$(1),dim.$(d),symmetric dim.$(d)))) +endef +$(foreach c,$(CONFIGS),$(eval $(call CONFIG_DIM_RULE,$(c)))) # Custom comparison rules # Restart tests only compare the final stat record .PRECIOUS: $(foreach b,symmetric restart target,work/%/$(b)/ocean.stats) %.restart: $(foreach b,symmetric restart,work/%/$(b)/ocean.stats) + @test "$(shell ls -A results/$* 2>/dev/null)" || rm -rf results/$* @cmp $(foreach f,$^,<(tr -s ' ' < $(f) | cut -d ' ' -f3- | tail -n 1)) \ || !( \ mkdir -p results/$*; \ - (diff $^ | tee results/$*/chksum_diag.restart.diff | head) ; \ + (diff $^ | tee results/$*/chksum_diag.restart.diff | head -n 20) ; \ echo -e "$(FAIL): Solutions $*.restart have changed." \ ) @echo -e "$(PASS): Solutions $*.restart agree." @@ -363,22 +509,26 @@ $(foreach d,$(DIMS),$(eval $(call CMP_RULE,dim.$(d),symmetric dim.$(d)))) # stats rule is unchanged, but we cannot use CMP_RULE to generate it. %.regression: $(foreach b,symmetric target,work/%/$(b)/ocean.stats) + @test "$(shell ls -A results/$* 2>/dev/null)" || rm -rf results/$* @cmp $^ || !( \ mkdir -p results/$*; \ - (diff $^ | tee results/$*/ocean.stats.regression.diff | head) ; \ + (diff $^ | tee results/$*/ocean.stats.regression.diff | head -n 20) ; \ echo -e "$(FAIL): Solutions $*.regression have changed." \ ) @echo -e "$(PASS): Solutions $*.regression agree." # Regression testing only checks for changes in existing diagnostics %.regression.diag: $(foreach b,symmetric target,work/%/$(b)/chksum_diag) - @! diff $^ | grep "^[<>]" | grep "^>" \ + @! diff $^ | grep "^[<>]" | grep "^>" > /dev/null \ || ! (\ mkdir -p results/$*; \ - (diff $^ | tee results/$*/chksum_diag.regression.diff | head) ; \ + (diff $^ | tee results/$*/chksum_diag.regression.diff | head -n 20) ; \ echo -e "$(FAIL): Diagnostics $*.regression.diag have changed." \ ) - @diff $^ || echo -e "$(WARN): New diagnostics in $<" + @cmp $^ || ( \ + diff $^ | head -n 20; \ + echo -e "$(WARN): New diagnostics in $<" \ + ) @echo -e "$(PASS): Diagnostics $*.regression.diag agree." @@ -407,21 +557,23 @@ work/%/$(1)/ocean.stats work/%/$(1)/chksum_diag: build/$(2)/MOM6 $(VENV_PATH) fi mkdir -p $$(@D)/RESTART echo -e "$(4)" > $$(@D)/MOM_override + rm -f results/$$*/std.$(1).{out,err} cd $$(@D) \ - && time $(5) $(MPIRUN) -n $(6) ../../../$$< 2> std.err > std.out \ + && $(TIME) $(5) $(MPIRUN) -n $(6) ../../../$$< 2> std.err > std.out \ || !( \ mkdir -p ../../../results/$$*/ ; \ - cat std.out | tee ../../../results/$$*/std.$(1).out | tail -20 ; \ - cat std.err | tee ../../../results/$$*/std.$(1).err | tail -20 ; \ + cat std.out | tee ../../../results/$$*/std.$(1).out | tail -n 20 ; \ + cat std.err | tee ../../../results/$$*/std.$(1).err | tail -n 20 ; \ rm ocean.stats chksum_diag ; \ echo -e "$(FAIL): $$*.$(1) failed at runtime." \ ) @echo -e "$(DONE): $$*.$(1); no runtime errors." if [ $(3) ]; then \ mkdir -p results/$$* ; \ - bash <(curl -s https://codecov.io/bash) -n $$@ \ - > work/$$*/codecov.$(1).out \ - 2> work/$$*/codecov.$(1).err \ + cd build/symmetric \ + && bash <(curl -s https://codecov.io/bash) -Z -n $$@ \ + > codecov.$$*.$(1).out \ + 2> codecov.$$*.$(1).err \ && echo -e "${MAGENTA}Report uploaded to codecov.${RESET}"; \ fi endef @@ -437,7 +589,7 @@ $(eval $(call STAT_RULE,repro,repro,,,,1)) $(eval $(call STAT_RULE,openmp,openmp,,,GOMP_CPU_AFFINITY=0,1)) $(eval $(call STAT_RULE,layout,symmetric,,LAYOUT=2$(,)1,,2)) $(eval $(call STAT_RULE,rotate,symmetric,,ROTATE_INDEX=True\nINDEX_TURNS=1,,1)) -$(eval $(call STAT_RULE,nan,symmetric,,,MALLOC_PERTURB_=256,1)) +$(eval $(call STAT_RULE,nan,symmetric,,,MALLOC_PERTURB_=1,1)) $(eval $(call STAT_RULE,dim.t,symmetric,,T_RESCALE_POWER=11,,1)) $(eval $(call STAT_RULE,dim.l,symmetric,,L_RESCALE_POWER=11,,1)) $(eval $(call STAT_RULE,dim.h,symmetric,,H_RESCALE_POWER=11,,1)) @@ -446,7 +598,11 @@ $(eval $(call STAT_RULE,dim.q,symmetric,,Q_RESCALE_POWER=11,,1)) $(eval $(call STAT_RULE,dim.r,symmetric,,R_RESCALE_POWER=11,,1)) -# Restart tests require significant preprocessing, and are handled separately. +# Generate the half-period input namelist as follows: +# 1. Fetch DAYMAX and TIMEUNIT from MOM_input +# 2. Convert DAYMAX from TIMEUNIT to seconds +# 3. Apply seconds to `ocean_solo_nml` inside input.nml. +# NOTE: Assumes that runtime set by DAYMAX, will fail if set by input.nml work/%/restart/ocean.stats: build/symmetric/MOM6 $(VENV_PATH) rm -rf $(@D) mkdir -p $(@D) @@ -459,20 +615,21 @@ work/%/restart/ocean.stats: build/symmetric/MOM6 $(VENV_PATH) cd work/$*/restart; \ fi mkdir -p $(@D)/RESTART - # Generate the half-period input namelist - # TODO: Assumes that runtime set by DAYMAX, will fail if set by input.nml + # Set the half-period cd $(@D) \ && daymax=$$(grep DAYMAX MOM_input | cut -d '!' -f 1 | cut -d '=' -f 2 | xargs) \ && timeunit=$$(grep TIMEUNIT MOM_input | cut -d '!' -f 1 | cut -d '=' -f 2 | xargs) \ && if [ -z "$${timeunit}" ]; then timeunit="8.64e4"; fi \ && printf -v timeunit_int "%.f" "$${timeunit}" \ - && halfperiod=$$(printf "%.f" $$(bc <<< "scale=10; 0.5 * $${daymax} * $${timeunit_int}")) \ + && halfperiod=$$(awk -v t=$${daymax} -v dt=$${timeunit} 'BEGIN {printf "%.f", 0.5*t*dt}') \ && printf "\n&ocean_solo_nml\n seconds = $${halfperiod}\n/\n" >> input.nml + # Remove any previous archived output + rm -f results/$*/std.restart{1,2}.{out,err} # Run the first half-period - cd $(@D) && time $(MPIRUN) -n 1 ../../../$< 2> std1.err > std1.out \ + cd $(@D) && $(TIME) $(MPIRUN) -n 1 ../../../$< 2> std1.err > std1.out \ || !( \ - cat std1.out | tee ../../../results/$*/std.restart1.out | tail ; \ - cat std1.err | tee ../../../results/$*/std.restart1.err | tail ; \ + cat std1.out | tee ../../../results/$*/std.restart1.out | tail -n 20 ; \ + cat std1.err | tee ../../../results/$*/std.restart1.err | tail -n 20 ; \ echo -e "$(FAIL): $*.restart failed at runtime." \ ) # Setup the next inputs @@ -480,10 +637,10 @@ work/%/restart/ocean.stats: build/symmetric/MOM6 $(VENV_PATH) mkdir $(@D)/RESTART cd $(@D) && sed -i -e "s/input_filename *= *'n'/input_filename = 'r'/g" input.nml # Run the second half-period - cd $(@D) && time $(MPIRUN) -n 1 ../../../$< 2> std2.err > std2.out \ + cd $(@D) && $(TIME) $(MPIRUN) -n 1 ../../../$< 2> std2.err > std2.out \ || !( \ - cat std2.out | tee ../../../results/$*/std.restart2.out | tail ; \ - cat std2.err | tee ../../../results/$*/std.restart2.err | tail ; \ + cat std2.out | tee ../../../results/$*/std.restart2.out | tail -n 20 ; \ + cat std2.err | tee ../../../results/$*/std.restart2.err | tail -n 20 ; \ echo -e "$(FAIL): $*.restart failed at runtime." \ ) @@ -516,11 +673,75 @@ test.summary: fi +#--- +# Profiling +# XXX: This is experimental work to track, log, and report changes in runtime +PCONFIGS = p0 + +.PHONY: profile +profile: $(foreach p,$(PCONFIGS), prof.$(p)) + +.PHONY: prof.p0 +prof.p0: work/p0/opt/clocks.json work/p0/opt_target/clocks.json + python tools/compare_clocks.py $^ + +work/p0/%/clocks.json: work/p0/%/std.out + python tools/parse_fms_clocks.py -d $(@D) $^ > $@ + +work/p0/opt/std.out: build/opt/MOM6 +work/p0/opt_target/std.out: build/opt_target/MOM6 + +work/p0/%/std.out: + mkdir -p $(@D) + cp -RL p0/* $(@D) + mkdir -p $(@D)/RESTART + echo -e "" > $(@D)/MOM_override + cd $(@D) \ + && $(MPIRUN) -n 1 ../../../$< 2> std.err > std.out + +#--- +# Same but with perf + +# TODO: This expects the -e flag, can I handle it in the command? +PERF_EVENTS ?= + +.PHONY: perf +perf: $(foreach p,$(PCONFIGS), perf.$(p)) + +.PHONY: prof.p0 +perf.p0: work/p0/opt/profile.json work/p0/opt_target/profile.json + python tools/compare_perf.py $^ + +work/p0/%/profile.json: work/p0/%/perf.data + python tools/parse_perf.py -f $< > $@ + +work/p0/opt/perf.data: build/opt/MOM6 +work/p0/opt_target/perf.data: build/opt_target/MOM6 + +work/p0/%/perf.data: + mkdir -p $(@D) + cp -RL p0/* $(@D) + mkdir -p $(@D)/RESTART + echo -e "" > $(@D)/MOM_override + cd $(@D) \ + && perf record \ + -F 3999 \ + ${PERF_EVENTS} \ + ../../../$< 2> std.perf.err > std.perf.out \ + || cat std.perf.err + + #---- # NOTE: These tests assert that we are in the .testing directory. .PHONY: clean -clean: clean.stats +clean: clean.build clean.stats + @[ $$(basename $$(pwd)) = .testing ] + rm -rf deps + + +.PHONY: clean.build +clean.build: @[ $$(basename $$(pwd)) = .testing ] rm -rf build diff --git a/.testing/README.md b/.testing/README.md index adc56e56cd..ef02bcfa09 100644 --- a/.testing/README.md +++ b/.testing/README.md @@ -229,6 +229,7 @@ configurations in the MOM6-examples repository. - `tc2`: An ALE configuration based on tc1 with tides - `tc2.a`: Use sigma, PPM_H4 and no tides - `tc3`: An open-boundary condition (OBC) test based on `circle_obcs` +- `tc4`: Sponges and initialization using I/O ## Code coverage diff --git a/.testing/p0/MOM_input b/.testing/p0/MOM_input new file mode 100644 index 0000000000..8f751d7bf1 --- /dev/null +++ b/.testing/p0/MOM_input @@ -0,0 +1,505 @@ +! This input file provides the adjustable run-time parameters for version 6 of the Modular Ocean Model (MOM6). +! Where appropriate, parameters use usually given in MKS units. + +! This particular file is for the example in benchmark. + +! This MOM_input file typically contains only the non-default values that are needed to reproduce this example. +! A full list of parameters for this example can be found in the corresponding MOM_parameter_doc.all file +! which is generated by the model at run-time. + +! === module MOM_domains === +NIGLOBAL = 32 ! + ! The total number of thickness grid points in the x-direction in the physical + ! domain. With STATIC_MEMORY_ this is set in MOM_memory.h at compile time. +NJGLOBAL = 32 ! + ! The total number of thickness grid points in the y-direction in the physical + ! domain. With STATIC_MEMORY_ this is set in MOM_memory.h at compile time. +LAYOUT = 1, 1 + ! The processor layout that was actually used. + +! === module MOM === +THICKNESSDIFFUSE = True ! [Boolean] default = False + ! If true, interface heights are diffused with a coefficient of KHTH. +THICKNESSDIFFUSE_FIRST = True ! [Boolean] default = False + ! If true, do thickness diffusion before dynamics. This is only used if + ! THICKNESSDIFFUSE is true. +DT = 900.0 ! [s] + ! The (baroclinic) dynamics time step. The time-step that is actually used will + ! be an integer fraction of the forcing time-step (DT_FORCING in ocean-only mode + ! or the coupling timestep in coupled mode.) +DT_THERM = 3600.0 ! [s] default = 900.0 + ! The thermodynamic and tracer advection time step. Ideally DT_THERM should be + ! an integer multiple of DT and less than the forcing or coupling time-step, + ! unless THERMO_SPANS_COUPLING is true, in which case DT_THERM can be an integer + ! multiple of the coupling timestep. By default DT_THERM is set to DT. +DTBT_RESET_PERIOD = 0.0 ! [s] default = 3600.0 + ! The period between recalculations of DTBT (if DTBT <= 0). If DTBT_RESET_PERIOD + ! is negative, DTBT is set based only on information available at + ! initialization. If 0, DTBT will be set every dynamics time step. The default + ! is set by DT_THERM. This is only used if SPLIT is true. +FRAZIL = True ! [Boolean] default = False + ! If true, water freezes if it gets too cold, and the accumulated heat deficit + ! is returned in the surface state. FRAZIL is only used if + ! ENABLE_THERMODYNAMICS is true. +C_P = 3925.0 ! [J kg-1 K-1] default = 3991.86795711963 + ! The heat capacity of sea water, approximated as a constant. This is only used + ! if ENABLE_THERMODYNAMICS is true. The default value is from the TEOS-10 + ! definition of conservative temperature. +SAVE_INITIAL_CONDS = True ! [Boolean] default = False + ! If true, write the initial conditions to a file given by IC_OUTPUT_FILE. +IC_OUTPUT_FILE = "GOLD_IC" ! default = "MOM_IC" + ! The file into which to write the initial conditions. + +! === module MOM_fixed_initialization === +INPUTDIR = "INPUT" ! default = "." + ! The directory in which input files are found. + +! === module MOM_grid_init === +GRID_CONFIG = "cartesian" ! + ! A character string that determines the method for defining the horizontal + ! grid. Current options are: + ! mosaic - read the grid from a mosaic (supergrid) + ! file set by GRID_FILE. + ! cartesian - use a (flat) Cartesian grid. + ! spherical - use a simple spherical grid. + ! mercator - use a Mercator spherical grid. +SOUTHLAT = -41.0 ! [degrees] + ! The southern latitude of the domain. +LENLAT = 41.0 ! [degrees] + ! The latitudinal length of the domain. +LENLON = 90.0 ! [degrees] + ! The longitudinal length of the domain. +ISOTROPIC = True ! [Boolean] default = False + ! If true, an isotropic grid on a sphere (also known as a Mercator grid) is + ! used. With an isotropic grid, the meridional extent of the domain (LENLAT), + ! the zonal extent (LENLON), and the number of grid points in each direction are + ! _not_ independent. In MOM the meridional extent is determined to fit the zonal + ! extent and the number of grid points, while grid is perfectly isotropic. +TOPO_CONFIG = "benchmark" ! + ! This specifies how bathymetry is specified: + ! file - read bathymetric information from the file + ! specified by (TOPO_FILE). + ! flat - flat bottom set to MAXIMUM_DEPTH. + ! bowl - an analytically specified bowl-shaped basin + ! ranging between MAXIMUM_DEPTH and MINIMUM_DEPTH. + ! spoon - a similar shape to 'bowl', but with an vertical + ! wall at the southern face. + ! halfpipe - a zonally uniform channel with a half-sine + ! profile in the meridional direction. + ! bbuilder - build topography from list of functions. + ! benchmark - use the benchmark test case topography. + ! Neverworld - use the Neverworld test case topography. + ! DOME - use a slope and channel configuration for the + ! DOME sill-overflow test case. + ! ISOMIP - use a slope and channel configuration for the + ! ISOMIP test case. + ! DOME2D - use a shelf and slope configuration for the + ! DOME2D gravity current/overflow test case. + ! Kelvin - flat but with rotated land mask. + ! seamount - Gaussian bump for spontaneous motion test case. + ! dumbbell - Sloshing channel with reservoirs on both ends. + ! shelfwave - exponential slope for shelfwave test case. + ! Phillips - ACC-like idealized topography used in the Phillips config. + ! dense - Denmark Strait-like dense water formation and overflow. + ! USER - call a user modified routine. + +! === module benchmark_initialize_topography === +MINIMUM_DEPTH = 1.0 ! [m] default = 0.0 + ! The minimum depth of the ocean. +MAXIMUM_DEPTH = 5500.0 ! [m] + ! The maximum depth of the ocean. + +! === module MOM_verticalGrid === +! Parameters providing information about the vertical grid. +NK = 75 ! [nondim] + ! The number of model layers. + +! === module MOM_EOS === + +! === module MOM_tracer_flow_control === +USE_IDEAL_AGE_TRACER = True ! [Boolean] default = False + ! If true, use the ideal_age_example tracer package. + +! === module ideal_age_example === + +! === module MOM_coord_initialization === +COORD_CONFIG = "ts_range" ! default = "none" + ! This specifies how layers are to be defined: + ! ALE or none - used to avoid defining layers in ALE mode + ! file - read coordinate information from the file + ! specified by (COORD_FILE). + ! BFB - Custom coords for buoyancy-forced basin case + ! based on SST_S, T_BOT and DRHO_DT. + ! linear - linear based on interfaces not layers + ! layer_ref - linear based on layer densities + ! ts_ref - use reference temperature and salinity + ! ts_range - use range of temperature and salinity + ! (T_REF and S_REF) to determine surface density + ! and GINT calculate internal densities. + ! gprime - use reference density (RHO_0) for surface + ! density and GINT calculate internal densities. + ! ts_profile - use temperature and salinity profiles + ! (read from COORD_FILE) to set layer densities. + ! USER - call a user modified routine. +TS_RANGE_T_LIGHT = 25.0 ! [degC] default = 10.0 + ! The initial temperature of the lightest layer when COORD_CONFIG is set to + ! ts_range. +TS_RANGE_T_DENSE = 3.0 ! [degC] default = 10.0 + ! The initial temperature of the densest layer when COORD_CONFIG is set to + ! ts_range. +TS_RANGE_RESOLN_RATIO = 5.0 ! [nondim] default = 1.0 + ! The ratio of density space resolution in the densest part of the range to that + ! in the lightest part of the range when COORD_CONFIG is set to ts_range. Values + ! greater than 1 increase the resolution of the denser water. + +! === module MOM_state_initialization === +THICKNESS_CONFIG = "benchmark" ! default = "uniform" + ! A string that determines how the initial layer thicknesses are specified for a + ! new run: + ! file - read interface heights from the file specified + ! thickness_file - read thicknesses from the file specified + ! by (THICKNESS_FILE). + ! coord - determined by ALE coordinate. + ! uniform - uniform thickness layers evenly distributed + ! between the surface and MAXIMUM_DEPTH. + ! list - read a list of positive interface depths. + ! DOME - use a slope and channel configuration for the + ! DOME sill-overflow test case. + ! ISOMIP - use a configuration for the + ! ISOMIP test case. + ! benchmark - use the benchmark test case thicknesses. + ! Neverworld - use the Neverworld test case thicknesses. + ! search - search a density profile for the interface + ! densities. This is not yet implemented. + ! circle_obcs - the circle_obcs test case is used. + ! DOME2D - 2D version of DOME initialization. + ! adjustment2d - 2D lock exchange thickness ICs. + ! sloshing - sloshing gravity thickness ICs. + ! seamount - no motion test with seamount ICs. + ! dumbbell - sloshing channel ICs. + ! soliton - Equatorial Rossby soliton. + ! rossby_front - a mixed layer front in thermal wind balance. + ! USER - call a user modified routine. + +! === module benchmark_initialize_thickness === +TS_CONFIG = "benchmark" ! + ! A string that determines how the initial tempertures and salinities are + ! specified for a new run: + ! file - read velocities from the file specified + ! by (TS_FILE). + ! fit - find the temperatures that are consistent with + ! the layer densities and salinity S_REF. + ! TS_profile - use temperature and salinity profiles + ! (read from TS_FILE) to set layer densities. + ! benchmark - use the benchmark test case T & S. + ! linear - linear in logical layer space. + ! DOME2D - 2D DOME initialization. + ! ISOMIP - ISOMIP initialization. + ! adjustment2d - 2d lock exchange T/S ICs. + ! sloshing - sloshing mode T/S ICs. + ! seamount - no motion test with seamount ICs. + ! dumbbell - sloshing channel ICs. + ! rossby_front - a mixed layer front in thermal wind balance. + ! SCM_CVMix_tests - used in the SCM CVMix tests. + ! USER - call a user modified routine. + +! === module MOM_diag_mediator === + +! === module MOM_lateral_mixing_coeffs === +USE_VARIABLE_MIXING = True ! [Boolean] default = False + ! If true, the variable mixing code will be called. This allows diagnostics to + ! be created even if the scheme is not used. If KHTR_SLOPE_CFF>0 or + ! KhTh_Slope_Cff>0, this is set to true regardless of what is in the parameter + ! file. +USE_VISBECK = True ! [Boolean] default = False + ! If true, use the Visbeck et al. (1997) formulation for + ! thickness diffusivity. +RESOLN_SCALED_KH = True ! [Boolean] default = False + ! If true, the Laplacian lateral viscosity is scaled away when the first + ! baroclinic deformation radius is well resolved. +RESOLN_SCALED_KHTH = True ! [Boolean] default = False + ! If true, the interface depth diffusivity is scaled away when the first + ! baroclinic deformation radius is well resolved. +RESOLN_SCALED_KHTR = True ! [Boolean] default = False + ! If true, the epipycnal tracer diffusivity is scaled away when the first + ! baroclinic deformation radius is well resolved. +KHTH_SLOPE_CFF = 0.1 ! [nondim] default = 0.0 + ! The nondimensional coefficient in the Visbeck formula for the interface depth + ! diffusivity +KHTR_SLOPE_CFF = 0.1 ! [nondim] default = 0.0 + ! The nondimensional coefficient in the Visbeck formula for the epipycnal tracer + ! diffusivity +VARMIX_KTOP = 6 ! [nondim] default = 2 + ! The layer number at which to start vertical integration of S*N for purposes of + ! finding the Eady growth rate. +VISBECK_L_SCALE = 3.0E+04 ! [m] default = 0.0 + ! The fixed length scale in the Visbeck formula. + +! === module MOM_set_visc === +PRANDTL_TURB = 0.0 ! [nondim] default = 1.0 + ! The turbulent Prandtl number applied to shear instability. +DYNAMIC_VISCOUS_ML = True ! [Boolean] default = False + ! If true, use a bulk Richardson number criterion to determine the mixed layer + ! thickness for viscosity. +ML_OMEGA_FRAC = 1.0 ! [nondim] default = 0.0 + ! When setting the decay scale for turbulence, use this fraction of the absolute + ! rotation rate blended with the local value of f, as sqrt((1-of)*f^2 + + ! of*4*omega^2). +HBBL = 10.0 ! [m] + ! The thickness of a bottom boundary layer with a viscosity of KVBBL if + ! BOTTOMDRAGLAW is not defined, or the thickness over which near-bottom + ! velocities are averaged for the drag law if BOTTOMDRAGLAW is defined but + ! LINEAR_DRAG is not. +DRAG_BG_VEL = 0.1 ! [m s-1] default = 0.0 + ! DRAG_BG_VEL is either the assumed bottom velocity (with LINEAR_DRAG) or an + ! unresolved velocity that is combined with the resolved velocity to estimate + ! the velocity magnitude. DRAG_BG_VEL is only used when BOTTOMDRAGLAW is + ! defined. +BBL_THICK_MIN = 0.1 ! [m] default = 0.0 + ! The minimum bottom boundary layer thickness that can be used with + ! BOTTOMDRAGLAW. This might be Kv/(cdrag*drag_bg_vel) to give Kv as the minimum + ! near-bottom viscosity. +KV = 1.0E-04 ! [m2 s-1] + ! The background kinematic viscosity in the interior. The molecular value, ~1e-6 + ! m2 s-1, may be used. + +! === module MOM_thickness_diffuse === +KHTH = 1.0 ! [m2 s-1] default = 0.0 + ! The background horizontal thickness diffusivity. +KHTH_MAX = 900.0 ! [m2 s-1] default = 0.0 + ! The maximum horizontal thickness diffusivity. + +! === module MOM_dynamics_split_RK2 === + +! === module MOM_continuity === + +! === module MOM_continuity_PPM === +ETA_TOLERANCE = 1.0E-06 ! [m] default = 1.1E-09 + ! The tolerance for the differences between the barotropic and baroclinic + ! estimates of the sea surface height due to the fluxes through each face. The + ! total tolerance for SSH is 4 times this value. The default is + ! 0.5*NK*ANGSTROM, and this should not be set less than about + ! 10^-15*MAXIMUM_DEPTH. +VELOCITY_TOLERANCE = 0.001 ! [m s-1] default = 3.0E+08 + ! The tolerance for barotropic velocity discrepancies between the barotropic + ! solution and the sum of the layer thicknesses. + +! === module MOM_CoriolisAdv === +BOUND_CORIOLIS = True ! [Boolean] default = False + ! If true, the Coriolis terms at u-points are bounded by the four estimates of + ! (f+rv)v from the four neighboring v-points, and similarly at v-points. This + ! option would have no effect on the SADOURNY Coriolis scheme if it were + ! possible to use centered difference thickness fluxes. + +! === module MOM_PressureForce === + +! === module MOM_PressureForce_AFV === + +! === module MOM_hor_visc === +AH_VEL_SCALE = 0.05 ! [m s-1] default = 0.0 + ! The velocity scale which is multiplied by the cube of the grid spacing to + ! calculate the biharmonic viscosity. The final viscosity is the largest of this + ! scaled viscosity, the Smagorinsky and Leith viscosities, and AH. +SMAGORINSKY_AH = True ! [Boolean] default = False + ! If true, use a biharmonic Smagorinsky nonlinear eddy viscosity. +SMAG_BI_CONST = 0.06 ! [nondim] default = 0.0 + ! The nondimensional biharmonic Smagorinsky constant, typically 0.015 - 0.06. + +! === module MOM_vert_friction === +MAXVEL = 10.0 ! [m s-1] default = 3.0E+08 + ! The maximum velocity allowed before the velocity components are truncated. + +! === module MOM_barotropic === +BOUND_BT_CORRECTION = True ! [Boolean] default = False + ! If true, the corrective pseudo mass-fluxes into the barotropic solver are + ! limited to values that require less than maxCFL_BT_cont to be accommodated. +NONLINEAR_BT_CONTINUITY = True ! [Boolean] default = False + ! If true, use nonlinear transports in the barotropic continuity equation. This + ! does not apply if USE_BT_CONT_TYPE is true. +BT_PROJECT_VELOCITY = True ! [Boolean] default = False + ! If true, step the barotropic velocity first and project out the velocity + ! tendency by 1+BEBT when calculating the transport. The default (false) is to + ! use a predictor continuity step to find the pressure field, and then to do a + ! corrector continuity step using a weighted average of the old and new + ! velocities, with weights of (1-BEBT) and BEBT. +BEBT = 0.2 ! [nondim] default = 0.1 + ! BEBT determines whether the barotropic time stepping uses the forward-backward + ! time-stepping scheme or a backward Euler scheme. BEBT is valid in the range + ! from 0 (for a forward-backward treatment of nonrotating gravity waves) to 1 + ! (for a backward Euler treatment). In practice, BEBT must be greater than about + ! 0.05. +DTBT = -0.95 ! [s or nondim] default = -0.98 + ! The barotropic time step, in s. DTBT is only used with the split explicit time + ! stepping. To set the time step automatically based the maximum stable value + ! use 0, or a negative value gives the fraction of the stable value. Setting + ! DTBT to 0 is the same as setting it to -0.98. The value of DTBT that will + ! actually be used is an integer fraction of DT, rounding down. + +! === module MOM_mixed_layer_restrat === +MIXEDLAYER_RESTRAT = True ! [Boolean] default = False + ! If true, a density-gradient dependent re-stratifying flow is imposed in the + ! mixed layer. Can be used in ALE mode without restriction but in layer mode can + ! only be used if BULKMIXEDLAYER is true. +FOX_KEMPER_ML_RESTRAT_COEF = 5.0 ! [nondim] default = 0.0 + ! A nondimensional coefficient that is proportional to the ratio of the + ! deformation radius to the dominant lengthscale of the submesoscale mixed layer + ! instabilities, times the minimum of the ratio of the mesoscale eddy kinetic + ! energy to the large-scale geostrophic kinetic energy or 1 plus the square of + ! the grid spacing over the deformation radius, as detailed by Fox-Kemper et al. + ! (2010) + +! === module MOM_diagnostics === + +! === module MOM_diabatic_driver === +! The following parameters are used for diabatic processes. + +! === module MOM_entrain_diffusive === +MAX_ENT_IT = 20 ! default = 5 + ! The maximum number of iterations that may be used to calculate the interior + ! diapycnal entrainment. +TOLERANCE_ENT = 1.0E-05 ! [m] default = 1.341640786499874E-05 + ! The tolerance with which to solve for entrainment values. + +! === module MOM_set_diffusivity === + +! === module MOM_bkgnd_mixing === +! Adding static vertical background mixing coefficients +KD = 2.0E-05 ! [m2 s-1] default = 0.0 + ! The background diapycnal diffusivity of density in the interior. Zero or the + ! molecular value, ~1e-7 m2 s-1, may be used. + +! === module MOM_kappa_shear === +! Parameterization of shear-driven turbulence following Jackson, Hallberg and Legg, JPO 2008 +USE_JACKSON_PARAM = True ! [Boolean] default = False + ! If true, use the Jackson-Hallberg-Legg (JPO 2008) shear mixing + ! parameterization. +MAX_RINO_IT = 25 ! [nondim] default = 50 + ! The maximum number of iterations that may be used to estimate the Richardson + ! number driven mixing. + +! === module MOM_diabatic_aux === +! The following parameters are used for auxiliary diabatic processes. +RECLAIM_FRAZIL = False ! [Boolean] default = True + ! If true, try to use any frazil heat deficit to cool any overlying layers down + ! to the freezing point, thereby avoiding the creation of thin ice when the SST + ! is above the freezing point. + +! === module MOM_mixed_layer === +MSTAR = 0.3 ! [nondim] default = 1.2 + ! The ratio of the friction velocity cubed to the TKE input to the mixed layer. +BULK_RI_ML = 0.05 ! [nondim] + ! The efficiency with which mean kinetic energy released by mechanically forced + ! entrainment of the mixed layer is converted to turbulent kinetic energy. +ABSORB_ALL_SW = True ! [Boolean] default = False + ! If true, all shortwave radiation is absorbed by the ocean, instead of passing + ! through to the bottom mud. +TKE_DECAY = 10.0 ! [nondim] default = 2.5 + ! TKE_DECAY relates the vertical rate of decay of the TKE available for + ! mechanical entrainment to the natural Ekman depth. +HMIX_MIN = 2.0 ! [m] default = 0.0 + ! The minimum mixed layer depth if the mixed layer depth is determined + ! dynamically. +LIMIT_BUFFER_DETRAIN = True ! [Boolean] default = False + ! If true, limit the detrainment from the buffer layers to not be too different + ! from the neighbors. +DEPTH_LIMIT_FLUXES = 0.1 ! [m] default = 0.2 + ! The surface fluxes are scaled away when the total ocean depth is less than + ! DEPTH_LIMIT_FLUXES. +CORRECT_ABSORPTION_DEPTH = True ! [Boolean] default = False + ! If true, the average depth at which penetrating shortwave radiation is + ! absorbed is adjusted to match the average heating depth of an exponential + ! profile by moving some of the heating upward in the water column. + +! === module MOM_opacity === +PEN_SW_SCALE = 15.0 ! [m] default = 0.0 + ! The vertical absorption e-folding depth of the penetrating shortwave + ! radiation. +PEN_SW_FRAC = 0.42 ! [nondim] default = 0.0 + ! The fraction of the shortwave radiation that penetrates below the surface. + +! === module MOM_tracer_advect === + +! === module MOM_tracer_hor_diff === +KHTR = 1.0 ! [m2 s-1] default = 0.0 + ! The background along-isopycnal tracer diffusivity. +KHTR_MAX = 900.0 ! [m2 s-1] default = 0.0 + ! The maximum along-isopycnal tracer diffusivity. +DIFFUSE_ML_TO_INTERIOR = True ! [Boolean] default = False + ! If true, enable epipycnal mixing between the surface boundary layer and the + ! interior. +ML_KHTR_SCALE = 0.0 ! [nondim] default = 1.0 + ! With Diffuse_ML_interior, the ratio of the truly horizontal diffusivity in the + ! mixed layer to the epipycnal diffusivity. The valid range is 0 to 1. + +! === module MOM_sum_output === +MAXTRUNC = 5000 ! [truncations save_interval-1] default = 0 + ! The run will be stopped, and the day set to a very large value if the velocity + ! is truncated more than MAXTRUNC times between energy saves. Set MAXTRUNC to 0 + ! to stop if there is any truncation of velocities. +ENERGYSAVEDAYS = 0.25 ! [days] default = 1.0 + ! The interval in units of TIMEUNIT between saves of the energies of the run and + ! other globally summed diagnostics. + +! === module MOM_surface_forcing === +BUOY_CONFIG = "linear" ! default = "zero" + ! The character string that indicates how buoyancy forcing is specified. Valid + ! options include (file), (zero), (linear), (USER), (BFB) and (NONE). +WIND_CONFIG = "gyres" ! default = "zero" + ! The character string that indicates how wind forcing is specified. Valid + ! options include (file), (2gyre), (1gyre), (gyres), (zero), and (USER). +TAUX_SIN_AMP = 0.1 ! [Pa] default = 0.0 + ! With the gyres wind_config, the sine amplitude in the zonal wind stress + ! profile: B in taux = A + B*sin(n*pi*y/L) + C*cos(n*pi*y/L). +TAUX_N_PIS = 1.0 ! [nondim] default = 0.0 + ! With the gyres wind_config, the number of gyres in the zonal wind stress + ! profile: n in taux = A + B*sin(n*pi*y/L) + C*cos(n*pi*y/L). +RESTOREBUOY = True ! [Boolean] default = False + ! If true, the buoyancy fluxes drive the model back toward some specified + ! surface state with a rate given by FLUXCONST. +FLUXCONST = 0.5 ! [m day-1] default = 0.0 + ! The constant that relates the restoring surface fluxes to the relative surface + ! anomalies (akin to a piston velocity). Note the non-MKS units. +SST_NORTH = 27.0 ! [deg C] default = 0.0 + ! With buoy_config linear, the sea surface temperature at the northern end of + ! the domain toward which to to restore. +SST_SOUTH = 3.0 ! [deg C] default = 0.0 + ! With buoy_config linear, the sea surface temperature at the southern end of + ! the domain toward which to to restore. +GUST_CONST = 0.02 ! [Pa] default = 0.0 + ! The background gustiness in the winds. + +! === module MOM_main (MOM_driver) === +DT_FORCING = 3600.0 ! [s] default = 900.0 + ! The time step for changing forcing, coupling with other components, or + ! potentially writing certain diagnostics. The default value is given by DT. +DAYMAX = 3.0 ! [days] + ! The final time of the whole simulation, in units of TIMEUNIT seconds. This + ! also sets the potential end time of the present run segment if the end time is + ! not set via ocean_solo_nml in input.nml. +RESTART_CONTROL = 3 ! default = 1 + ! An integer whose bits encode which restart files are written. Add 2 (bit 1) + ! for a time-stamped file, and odd (bit 0) for a non-time-stamped file. A + ! non-time-stamped restart file is saved at the end of the run segment for any + ! non-negative value. +RESTINT = 365.0 ! [days] default = 0.0 + ! The interval between saves of the restart file in units of TIMEUNIT. Use 0 + ! (the default) to not save incremental restart files at all. + +! === module MOM_write_cputime === +MAXCPU = 2.88E+04 ! [wall-clock seconds] default = -1.0 + ! The maximum amount of cpu time per processor for which MOM should run before + ! saving a restart file and quitting with a return value that indicates that a + ! further run is required to complete the simulation. If automatic restarts are + ! not desired, use a negative value for MAXCPU. MAXCPU has units of wall-clock + ! seconds, so the actual CPU time used is larger by a factor of the number of + ! processors used. + +! Debugging parameters set to non-default values +U_TRUNC_FILE = "U_velocity_truncations" ! default = "" + ! The absolute path to a file into which the accelerations leading to zonal + ! velocity truncations are written. Undefine this for efficiency if this + ! diagnostic is not needed. +V_TRUNC_FILE = "V_velocity_truncations" ! default = "" + ! The absolute path to a file into which the accelerations leading to meridional + ! velocity truncations are written. Undefine this for efficiency if this + ! diagnostic is not needed. diff --git a/.testing/p0/MOM_override b/.testing/p0/MOM_override new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.testing/p0/diag_table b/.testing/p0/diag_table new file mode 100644 index 0000000000..68c71dd2c4 --- /dev/null +++ b/.testing/p0/diag_table @@ -0,0 +1,91 @@ +"MOM benchmark Experiment" +1 1 1 0 0 0 +"prog", 1,"days",1,"days","time", +#"ave_prog", 5,"days",1,"days","Time",365,"days" +#"cont", 5,"days",1,"days","Time",365,"days" + +#This is the field section of the diag_table. + +# Prognostic Ocean fields: +#========================= + +"ocean_model","u","u","prog","all",.false.,"none",2 +"ocean_model","v","v","prog","all",.false.,"none",2 +"ocean_model","h","h","prog","all",.false.,"none",1 +"ocean_model","e","e","prog","all",.false.,"none",2 +"ocean_model","temp","temp","prog","all",.false.,"none",2 +#"ocean_model","salt","salt","prog","all",.false.,"none",2 + +#"ocean_model","u","u","ave_prog_%4yr_%3dy","all",.true.,"none",2 +#"ocean_model","v","v","ave_prog_%4yr_%3dy","all",.true.,"none",2 +#"ocean_model","h","h","ave_prog_%4yr_%3dy","all",.true.,"none",1 +#"ocean_model","e","e","ave_prog_%4yr_%3dy","all",.true.,"none",2 + +# Auxilary Tracers: +#================== +#"ocean_model","vintage","vintage","prog_%4yr_%3dy","all",.false.,"none",2 +#"ocean_model","age","age","prog_%4yr_%3dy","all",.false.,"none",2 + +# Continuity Equation Terms: +#=========================== +#"ocean_model","dhdt","dhdt","cont_%4yr_%3dy","all",.true.,"none",2 +#"ocean_model","wd","wd","cont_%4yr_%3dy","all",.true.,"none",2 +#"ocean_model","uh","uh","cont_%4yr_%3dy","all",.true.,"none",2 +#"ocean_model","vh","vh","cont_%4yr_%3dy","all",.true.,"none",2 +#"ocean_model","h_rho","h_rho","cont_%4yr_%3dy","all",.true.,"none",2 +#"ocean_model","uh_rho","uh_rho","cont_%4yr_%3dy","all",.true.,"none",2 +#"ocean_model","vh_rho","vh_rho","cont_%4yr_%3dy","all",.true.,"none",2 +#"ocean_model","uhGM_rho","uhGM_rho","cont_%4yr_%3dy","all",.true.,"none",2 +#"ocean_model","vhGM_rho","vhGM_rho","cont_%4yr_%3dy","all",.true.,"none",2 + +# +# Tracer Fluxes: +#================== +#"ocean_model","T_adx", "T_adx", "ave_prog_%4yr_%3dy","all",.true.,"none",2 +#"ocean_model","T_ady", "T_ady", "ave_prog_%4yr_%3dy","all",.true.,"none",2 +#"ocean_model","T_diffx","T_diffx","ave_prog_%4yr_%3dy","all",.true.,"none",2 +#"ocean_model","T_diffy","T_diffy","ave_prog_%4yr_%3dy","all",.true.,"none",2 +#"ocean_model","S_adx", "S_adx", "ave_prog_%4yr_%3dy","all",.true.,"none",2 +#"ocean_model","S_ady", "S_ady", "ave_prog_%4yr_%3dy","all",.true.,"none",2 +#"ocean_model","S_diffx","S_diffx","ave_prog_%4yr_%3dy","all",.true.,"none",2 +#"ocean_model","S_diffy","S_diffy","ave_prog_%4yr_%3dy","all",.true.,"none",2 + +# testing +# ======= +#"ocean_model","Kv_u","Kv_u","prog","all",.false.,"none",2 +#"ocean_model","Kv_v","Kv_v","prog","all",.false.,"none",2 + +#============================================================================================= +# +#===- This file can be used with diag_manager/v2.0a (or higher) ==== +# +# +# FORMATS FOR FILE ENTRIES (not all input values are used) +# ------------------------ +# +#"file_name", output_freq, "output_units", format, "time_units", "time_long_name", ... +# (opt) new_file_frequecy, (opt) "new_file_freq_units", "new_file_start_date" +# +# +#output_freq: > 0 output frequency in "output_units" +# = 0 output frequency every time step +# =-1 output frequency at end of run +# +#output_units = units used for output frequency +# (years, months, days, minutes, hours, seconds) +# +#time_units = units used to label the time axis +# (days, minutes, hours, seconds) +# +# +# FORMAT FOR FIELD ENTRIES (not all input values are used) +# ------------------------ +# +#"module_name", "field_name", "output_name", "file_name" "time_sampling", time_avg, "other_opts", packing +# +#time_avg = .true. or .false. +# +#packing = 1 double precision +# = 2 float +# = 4 packed 16-bit integers +# = 8 packed 1-byte (not tested?) diff --git a/.testing/p0/input.nml b/.testing/p0/input.nml new file mode 100644 index 0000000000..41555b8822 --- /dev/null +++ b/.testing/p0/input.nml @@ -0,0 +1,22 @@ + &MOM_input_nml + output_directory = './', + input_filename = 'n' + restart_input_dir = 'INPUT/', + restart_output_dir = 'RESTART/', + parameter_filename = 'MOM_input', + 'MOM_override' / + + &diag_manager_nml + / + + &fms_nml + clock_grain='ROUTINE' + clock_flags='SYNC' + !domains_stack_size = 955296 + domains_stack_size = 14256000 + stack_size =0 / + +!&ocean_solo_nml +! hours = 1 +! !days = 1 +!/ diff --git a/.testing/tc0/MOM_input b/.testing/tc0/MOM_input index ff64c55803..e4d1694e72 100644 --- a/.testing/tc0/MOM_input +++ b/.testing/tc0/MOM_input @@ -230,7 +230,6 @@ ENERGYSAVEDAYS = 1.0 DIAG_AS_CHKSUM = True DEBUG = True -DEFAULT_2018_ANSWERS = True ! [Boolean] default = True GRID_ROTATION_ANGLE_BUGS = True ! [Boolean] default = True USE_GM_WORK_BUG = True ! [Boolean] default = True FIX_UNSPLIT_DT_VISC_BUG = False ! [Boolean] default = False diff --git a/.testing/tc1/MOM_input b/.testing/tc1/MOM_input index 68674f7a86..151c093ff9 100644 --- a/.testing/tc1/MOM_input +++ b/.testing/tc1/MOM_input @@ -575,7 +575,6 @@ ENERGYSAVEDAYS = 0.125 ! [days] default = 3600.0 DIAG_AS_CHKSUM = True DEBUG = True USE_PSURF_IN_EOS = False ! [Boolean] default = False -DEFAULT_2018_ANSWERS = True ! [Boolean] default = True GRID_ROTATION_ANGLE_BUGS = True ! [Boolean] default = True INTERPOLATE_RES_FN = True ! [Boolean] default = True GILL_EQUATORIAL_LD = False ! [Boolean] default = False diff --git a/.testing/tc2/MOM_input b/.testing/tc2/MOM_input index 1818390192..ca84d1c382 100644 --- a/.testing/tc2/MOM_input +++ b/.testing/tc2/MOM_input @@ -610,7 +610,6 @@ DIAG_AS_CHKSUM = True DEBUG = True USE_GM_WORK_BUG = False USE_PSURF_IN_EOS = False ! [Boolean] default = False -DEFAULT_2018_ANSWERS = True ! [Boolean] default = True GRID_ROTATION_ANGLE_BUGS = True ! [Boolean] default = True REMAP_UV_USING_OLD_ALG = True ! [Boolean] default = True USE_LAND_MASK_FOR_HVISC = False ! [Boolean] default = False diff --git a/.testing/tc3/MOM_input b/.testing/tc3/MOM_input index 9112898b4c..a034960d1e 100644 --- a/.testing/tc3/MOM_input +++ b/.testing/tc3/MOM_input @@ -469,7 +469,6 @@ ENERGYSAVEDAYS = 3.0 ! [hours] default = 1.44E+04 ! energies of the run and other globally summed diagnostics. DIAG_AS_CHKSUM = True DEBUG = True -DEFAULT_2018_ANSWERS = True ! [Boolean] default = True OBC_RADIATION_MAX = 10.0 ! [nondim] default = 10.0 GRID_ROTATION_ANGLE_BUGS = True ! [Boolean] default = True USE_GM_WORK_BUG = True ! [Boolean] default = True diff --git a/.testing/tc4/MOM_input b/.testing/tc4/MOM_input index 04598a9dc9..e33bf40bf6 100644 --- a/.testing/tc4/MOM_input +++ b/.testing/tc4/MOM_input @@ -409,7 +409,6 @@ DIAG_AS_CHKSUM = True DEBUG = True USE_PSURF_IN_EOS = False ! [Boolean] default = False -DEFAULT_2018_ANSWERS = True ! [Boolean] default = True GRID_ROTATION_ANGLE_BUGS = True ! [Boolean] default = True INTERPOLATE_RES_FN = True ! [Boolean] default = True GILL_EQUATORIAL_LD = False ! [Boolean] default = False diff --git a/.testing/tools/compare_clocks.py b/.testing/tools/compare_clocks.py new file mode 100755 index 0000000000..77198fda6a --- /dev/null +++ b/.testing/tools/compare_clocks.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python +import argparse +import json + +# Ignore timers below this threshold (in seconds) +DEFAULT_THRESHOLD = 0.05 + +# Thresholds for reporting +DT_WARN = 0.10 # Slowdown warning +DT_FAIL = 0.25 # Slowdown abort + +ANSI_RED = '\033[31m' +ANSI_GREEN = '\033[32m' +ANSI_YELLOW = '\033[33m' +ANSI_RESET = '\033[0m' + + +def main(): + desc = ( + 'Compare two FMS clock output files and report any differences within ' + 'a defined threshold.' + ) + + parser = argparse.ArgumentParser(description=desc) + parser.add_argument('expt') + parser.add_argument('ref') + parser.add_argument('--threshold') + parser.add_argument('--verbose', action='store_true') + args = parser.parse_args() + + threshold = float(args.threshold) if args.threshold else DEFAULT_THRESHOLD + verbose = args.verbose + + clock_cmp = {} + + print('{:35s}{:8s} {:8s}'.format('', 'Profile', 'Reference')) + print() + + with open(args.expt) as log_expt, open(args.ref) as log_ref: + clocks_expt = json.load(log_expt)['clocks'] + clocks_ref = json.load(log_ref)['clocks'] + + # Gather timers which appear in both clocks + clock_tags = [clk for clk in clocks_expt if clk in clocks_ref] + + for clk in clock_tags: + clock_cmp[clk] = {} + + # For now, we only comparge tavg, the rank-averaged timing + rec = 'tavg' + + t_expt = clocks_expt[clk][rec] + t_ref = clocks_ref[clk][rec] + + # Compare the relative runtimes + if all(t > threshold for t in (t_expt, t_ref)): + dclk = (t_expt - t_ref) / t_ref + else: + dclk = 0. + clock_cmp[clk][rec] = dclk + + # Skip trivially low clocks + if all(t < threshold for t in (t_expt, t_ref)) and not verbose: + continue + + # Report the time differences + ansi_color = ANSI_RESET + + if abs(t_expt - t_ref) > threshold: + if dclk > DT_FAIL: + ansi_color = ANSI_RED + elif dclk > DT_WARN: + ansi_color = ANSI_YELLOW + elif dclk < -DT_WARN: + ansi_color = ANSI_GREEN + + print('{}{}: {:7.3f}s, {:7.3f}s ({:.1f}%){}'.format( + ansi_color, + ' ' * (32 - len(clk)) + clk, + t_expt, + t_ref, + 100. * dclk, + ANSI_RESET, + )) + + +if __name__ == '__main__': + main() diff --git a/.testing/tools/compare_perf.py b/.testing/tools/compare_perf.py new file mode 100755 index 0000000000..e4a651c709 --- /dev/null +++ b/.testing/tools/compare_perf.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python +import argparse +import json + +# Ignore timers below this threshold (in seconds) +DEFAULT_THRESHOLD = 0.05 + +# Thresholds for reporting +DT_WARN = 0.10 # Slowdown warning +DT_FAIL = 0.25 # Slowdown abort + +ANSI_RED = '\033[31m' +ANSI_GREEN = '\033[32m' +ANSI_YELLOW = '\033[33m' +ANSI_RESET = '\033[0m' + + +def main(): + desc = ( + 'Compare two FMS clock output files and report any differences within ' + 'a defined threshold.' + ) + + parser = argparse.ArgumentParser(description=desc) + parser.add_argument('expt') + parser.add_argument('ref') + parser.add_argument('--threshold') + parser.add_argument('--verbose', action='store_true') + args = parser.parse_args() + + threshold = float(args.threshold) if args.threshold else DEFAULT_THRESHOLD + verbose = args.verbose + + clock_cmp = {} + + print('{:35s}{:8s} {:8s}'.format('', 'Profile', 'Reference')) + print() + + with open(args.expt) as profile_expt, open(args.ref) as profile_ref: + perf_expt = json.load(profile_expt) + perf_ref = json.load(profile_ref) + + events = [ev for ev in perf_expt if ev in perf_ref] + + for event in events: + # For now, only report the times + if event not in ('task-clock', 'cpu-clock'): + continue + + count_expt = perf_expt[event]['count'] + count_ref = perf_ref[event]['count'] + + symbols_expt = perf_expt[event]['symbol'] + symbols_ref = perf_ref[event]['symbol'] + + symbols = [ + s for s in symbols_expt + if s in symbols_ref + and not s.startswith('0x') + ] + + for symbol in symbols: + t_expt = float(symbols_expt[symbol]) / 1e9 + t_ref = float(symbols_ref[symbol]) / 1e9 + + # Compare the relative runtimes + if all(t > threshold for t in (t_expt, t_ref)): + dclk = (t_expt - t_ref) / t_ref + else: + dclk = 0. + + # Skip trivially low clocks + if all(t < threshold for t in (t_expt, t_ref)) and not verbose: + continue + + # Report the time differences + ansi_color = ANSI_RESET + + if abs(t_expt - t_ref) > threshold: + if dclk > DT_FAIL: + ansi_color = ANSI_RED + elif dclk > DT_WARN: + ansi_color = ANSI_YELLOW + elif dclk < -DT_WARN: + ansi_color = ANSI_GREEN + + # Remove module name + sname = symbol.split('_MOD_', 1)[-1] + + # Strip version from glibc calls + sname = sname.split('@')[0] + + # Remove GCC optimization renaming + sname = sname.replace('.constprop.0', '') + + if len(sname) > 32: + sname = sname[:29] + '...' + + print('{}{}: {:7.3f}s, {:7.3f}s ({:.1f}%){}'.format( + ansi_color, + ' ' * (32 - len(sname)) + sname, + t_expt, + t_ref, + 100. * dclk, + ANSI_RESET, + )) + + +if __name__ == '__main__': + main() diff --git a/.testing/tools/parse_fms_clocks.py b/.testing/tools/parse_fms_clocks.py new file mode 100755 index 0000000000..b57fc481ab --- /dev/null +++ b/.testing/tools/parse_fms_clocks.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python +import argparse +import collections +import json +import os +import sys + +import f90nml + +record_type = collections.defaultdict(lambda: float) +for rec in ('grain', 'pemin', 'pemax',): + record_type[rec] = int + + +def main(): + desc = 'Parse MOM6 model stdout and return clock data in JSON format.' + + parser = argparse.ArgumentParser(description=desc) + parser.add_argument('--format', '-f', action='store_true') + parser.add_argument('--dir', '-d') + parser.add_argument('log') + args = parser.parse_args() + + config = {} + + if args.dir: + # Gather model configuration + input_nml = os.path.join(args.dir, 'input.nml') + nml = f90nml.read(input_nml) + config['input.nml'] = nml.todict() + + parameter_filenames = [ + ('params', 'MOM_parameter_doc.all'), + ('layout', 'MOM_parameter_doc.layout'), + ('debug', 'MOM_parameter_doc.debugging'), + ] + for key, fname in parameter_filenames: + config[key] = {} + with open(os.path.join(args.dir, fname)) as param_file: + params = parse_mom6_param(param_file) + config[key].update(params) + + # Get log path + if os.path.isfile(args.log): + log_path = args.log + elif os.path.isfile(os.path.join(args.dir, args.log)): + log_path = os.path.join(args.dir, args.log) + else: + sys.exit('stdout log not found.') + + # Parse timings + with open(log_path) as log: + clocks = parse_clocks(log) + + config['clocks'] = clocks + + if args.format: + print(json.dumps(config, indent=4)) + else: + print(json.dumps(config)) + + +def parse_mom6_param(param_file): + params = {} + for line in param_file: + param_stmt = line.split('!')[0].strip() + if param_stmt: + key, val = [s.strip() for s in param_stmt.split('=')] + + # TODO: Convert to equivalent Python types + if val in ('True', 'False'): + params[key] = bool(val) + else: + params[key] = val + + return params + + +def parse_clocks(log): + clock_start_msg = 'Tabulating mpp_clock statistics across' + clock_end_msg = 'MPP_STACK high water mark=' + + fields = [] + for line in log: + if line.startswith(clock_start_msg): + npes = line.lstrip(clock_start_msg).split()[0] + + # Get records + fields = [] + line = next(log) + + # Skip blank lines + while line.isspace(): + line = next(log) + + fields = line.split() + + # Exit this loop, begin clock parsing + break + + clocks = {} + for line in log: + # Treat MPP_STACK usage as end of clock reports + if line.lstrip().startswith(clock_end_msg): + break + + record = line.split()[-len(fields):] + + clk = line.split(record[0])[0].strip() + clocks[clk] = {} + + for fld, rec in zip(fields, record): + rtype = record_type[fld] + clocks[clk][fld] = rtype(rec) + + return clocks + + +if __name__ == '__main__': + main() diff --git a/.testing/tools/parse_perf.py b/.testing/tools/parse_perf.py new file mode 100755 index 0000000000..b86b1cc106 --- /dev/null +++ b/.testing/tools/parse_perf.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python +import argparse +import collections +import json +import os +import shlex +import subprocess +import sys + + +def main(): + desc = 'Parse perf.data and return in JSON format.' + + parser = argparse.ArgumentParser(description=desc) + parser.add_argument('--format', '-f', action='store_true') + parser.add_argument('data') + args = parser.parse_args() + + profile = parse_perf_report(args.data) + + if args.format: + print(json.dumps(profile, indent=4)) + else: + print(json.dumps(profile)) + + +def parse_perf_report(perf_data_path): + profile = {} + + cmd = shlex.split( + 'perf report -s symbol,period -i {}'.format(perf_data_path) + ) + with subprocess.Popen(cmd, stdout=subprocess.PIPE, text=True) as proc: + event_name = None + for line in proc.stdout: + # Skip blank lines: + if not line or line.isspace(): + continue + + # Set the current event + if line.startswith('# Samples: '): + event_name = line.split()[-1].strip("'") + + # Remove perf modifiers for now + event_name = event_name.rsplit(':', 1)[0] + + profile[event_name] = {} + profile[event_name]['symbol'] = {} + + # Get total count + elif line.startswith('# Event count '): + event_count = int(line.split()[-1]) + profile[event_name]['count'] = event_count + + # skip all other 'comment' lines + elif line.startswith('#'): + continue + + # get per-symbol count + else: + tokens = line.split() + symbol = tokens[2] + period = int(tokens[3]) + + profile[event_name]['symbol'][symbol] = period + + return profile + + +if __name__ == '__main__': + main() diff --git a/.testing/trailer.py b/.testing/trailer.py index a483bf9995..64f016275f 100755 --- a/.testing/trailer.py +++ b/.testing/trailer.py @@ -1,100 +1,137 @@ #!/usr/bin/env python +"""Subroutines for Validating the whitespace of the source code.""" import argparse import os import re import sys + def parseCommandLine(): - """ - Parse the command line positional and optional arguments. - This is the highest level procedure invoked from the very end of the script. - """ + """Parse the command line positional and optional arguments. + + This is the highest level procedure invoked from the very end of the + script. + """ + # Arguments + parser = argparse.ArgumentParser( + description='trailer.py checks Fortran files for trailing white ' + 'space.', + epilog='Written by A.Adcroft, 2017.' + ) + parser.add_argument( + 'files_or_dirs', type=str, nargs='+', + metavar='FILE|DIR', + help='Fortran files or director in which to search for Fortran files ' + '(with .f, .f90, .F90 suffixes).''' + ) + parser.add_argument( + '-e', '--exclude_dir', type=str, action='append', + metavar='DIR', + help='''Exclude directories from search that end in DIR.''' + ) + parser.add_argument( + '-l', '--line_length', type=int, default=512, + help='''Maximum allowed length of a line.''' + ) + parser.add_argument( + '-s', '--source_line_length', type=int, default=132, + help='''Maximum allowed length of a source line excluding comments.''' + ) + parser.add_argument( + '-d', '--debug', action='store_true', + help='turn on debugging information.' + ) + args = parser.parse_args() - # Arguments - parser = argparse.ArgumentParser(description='''trailer.py checks Fortran files for trailing white space.''', - epilog='Written by A.Adcroft, 2017.') - parser.add_argument('files_or_dirs', type=str, nargs='+', - metavar='FILE|DIR', - help='''Fortran files or director in which to search for Fortran files (with .f, .f90, .F90 suffixes).''') - parser.add_argument('-e','--exclude_dir', type=str, action='append', - metavar='DIR', - help='''Exclude directories from search that end in DIR.''') - parser.add_argument('-l','--line_length', type=int, default=512, - help='''Maximum allowed length of a line.''') - parser.add_argument('-s','--source_line_length', type=int, default=132, - help='''Maximum allowed length of a source line excluding comments.''') - parser.add_argument('-d','--debug', action='store_true', - help='turn on debugging information.') - args = parser.parse_args() + global debug + debug = args.debug - global debug - debug = args.debug + main(args) - main(args) def main(args): - ''' - Does the actual work - ''' - if (debug): print(args) + """Do the actual work.""" + if (debug): + print(args) - # Process files_or_dirs argument into list of files - all_files = [] - for a in args.files_or_dirs: - if os.path.isfile(a): all_files.append(a) - elif os.path.isdir(a): - for d,s,files in os.walk(a): - ignore = False - if args.exclude_dir is not None: - for e in args.exclude_dir: - if e+'/' in d+'/': ignore = True - if not ignore: - for f in files: - _,ext = os.path.splitext(f) - if ext in ('.f','.F','.f90','.F90'): all_files.append( os.path.join(d,f) ) - else: raise Exception('Argument '+a+' is not a file or directory! Stopping.') - if (debug): print('Found: ',all_files) + # Process files_or_dirs argument into list of files + all_files = [] + for a in args.files_or_dirs: + if os.path.isfile(a): + all_files.append(a) + elif os.path.isdir(a): + for d, s, files in os.walk(a): + ignore = False + if args.exclude_dir is not None: + for e in args.exclude_dir: + if e+'/' in d+'/': + ignore = True + if not ignore: + for f in files: + _, ext = os.path.splitext(f) + if ext in ('.f', '.F', '.f90', '.F90'): + all_files.append(os.path.join(d, f)) + else: + raise Exception('Argument '+a+' is not a file or directory! ' + 'Stopping.') + if (debug): + print('Found: ', all_files) + + # For each file, check for trailing white space + fail = False + for filename in all_files: + this = scan_file(filename, line_length=args.line_length, + source_line_length=args.source_line_length) + fail = fail or this + if fail: + sys.exit(1) - # For each file, check for trailing white space - fail = False - for filename in all_files: - this = scan_file(filename, line_length=args.line_length, source_line_length=args.source_line_length) - fail = fail or this - if fail: sys.exit(1) def scan_file(filename, line_length=512, source_line_length=132): - '''Scans file for trailing white space''' - def msg(filename,lineno,mesg,line=None): - if line is None: print('%s, line %i: %s'%(filename,lineno,mesg)) - else: print('%s, line %i: %s "%s"'%(filename,lineno,mesg,line)) - white_space_detected = False - tabs_space_detected = False - long_line_detected = False - with open(filename) as file: - trailing_space = re.compile(r'.* +$') - tabs = re.compile(r'.*\t.*') - lineno = 0 - for line in file.readlines(): - lineno += 1 - line = line.replace('\n','') - srcline = line.split('!', 1)[0] # Discard comments - if trailing_space.match(line) is not None: - if debug: print(filename,lineno,line,trailing_space.match(line)) - if len(line.strip())>0: msg(filename,lineno,'Trailing space detected',line) - else: msg(filename,lineno,'Blank line contains spaces') - white_space_detected = True - if tabs.match(line) is not None: - if len(line.strip())>0: msg(filename,lineno,'Tab detected',line) - else: msg(filename,lineno,'Blank line contains tabs') - tabs_space_detected = True - if len(line)>line_length: - if len(line.strip())>0: msg(filename,lineno,'Line length exceeded',line) - else: msg(filename,lineno,'Blank line exceeds line length limit') - long_line_detected = True - if len(srcline)>source_line_length: - msg(filename,lineno,'Non-comment line length exceeded',line) - return white_space_detected or tabs_space_detected or long_line_detected + """Scan file for trailing white space.""" + def msg(filename, lineno, mesg, line=None): + if line is None: + print('%s, line %i: %s' % (filename, lineno, mesg)) + else: + print('%s, line %i: %s "%s"' % (filename, lineno, mesg, line)) + white_space_detected = False + tabs_space_detected = False + long_line_detected = False + with open(filename) as file: + trailing_space = re.compile(r'.* +$') + tabs = re.compile(r'.*\t.*') + lineno = 0 + for line in file.readlines(): + lineno += 1 + line = line.replace('\n', '') + srcline = line.split('!', 1)[0] # Discard comments + if trailing_space.match(line) is not None: + if debug: + print(filename, lineno, line, trailing_space.match(line)) + if len(line.strip()) > 0: + msg(filename, lineno, 'Trailing space detected', line) + else: + msg(filename, lineno, 'Blank line contains spaces') + white_space_detected = True + if tabs.match(line) is not None: + if len(line.strip()) > 0: + msg(filename, lineno, 'Tab detected', line) + else: + msg(filename, lineno, 'Blank line contains tabs') + tabs_space_detected = True + if len(line) > line_length: + if len(line.strip()) > 0: + msg(filename, lineno, 'Line length exceeded', line) + else: + msg(filename, lineno, + 'Blank line exceeds line length limit') + long_line_detected = True + if len(srcline) > source_line_length: + msg(filename, lineno, 'Non-comment line length exceeded', line) + return white_space_detected or tabs_space_detected or long_line_detected + # Invoke parseCommandLine(), the top-level procedure -if __name__ == '__main__': parseCommandLine() +if __name__ == '__main__': + parseCommandLine() diff --git a/.travis.yml b/.travis.yml index 22c497f916..c34089ddf6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,10 +12,12 @@ addons: packages: - tcsh pkg-config netcdf-bin libnetcdf-dev libnetcdff-dev gfortran - mpich libmpich-dev - - doxygen graphviz flex bison cmake + - graphviz flex bison cmake - python-numpy python-netcdf4 - - python3 python3-dev python3-venv python3-pip + - python3 python3-dev python3-venv python3-pip python3-sphinx python3-lxml - bc + - perl + - texlive-binaries texlive-base bibtool tex-common texlive-bibtex-extra # Environment variables env: @@ -34,8 +36,12 @@ jobs: # Whitespace - ./.testing/trailer.py -e TEOS10 -l 120 src config_src # API Documentation - - cd docs && doxygen Doxyfile_nortd - - grep -v "config_src/solo_driver/coupler_types.F90" doxygen.log | tee doxy_errors + - perl -e 'print "perl version $^V" . "\n"' + - cd docs && mkdir _build && make nortd DOXYGEN_RELEASE=Release_1_8_13 UPDATEHTMLEQS=Y + # We can tighten up the warnings here. Math im image captions should only generate + # \f warnings. All other latex math should be double escaped (\\) like (\\Phi) for + # html image captions. + - grep "warning:" _build/doxygen_warn_nortd_log.txt | grep -v 'Illegal command f as part of a \\image' | tee doxy_errors - test ! -s doxy_errors - env: @@ -56,7 +62,6 @@ jobs: env: - JOB="x86 Regression testing" - DO_REGRESSION_TESTS=true - - REPORT_COVERAGE=true - MOM_TARGET_SLUG=${TRAVIS_REPO_SLUG} - MOM_TARGET_LOCAL_BRANCH=${TRAVIS_BRANCH} script: @@ -67,18 +72,6 @@ jobs: - time make -k -s test.regressions - make test.summary - - if: NOT type = pull_request - env: - - JOB="Coverage upload" - - REPORT_COVERAGE=true - - DO_REGRESSION_TESTS=false - script: - - cd .testing - - echo 'Build executables...' && echo -en 'travis_fold:start:script.1\\r' - - make build/symmetric/MOM6 - - echo -en 'travis_fold:end:script.1\\r' - - make -k -s run.symmetric - - arch: arm64 env: - JOB="ARM64 verification testing" diff --git a/CMakeLists.txt b/CMakeLists.txt index 352ab228a9..6980a07cf1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -80,7 +80,7 @@ target_include_directories(${PROJECT_NAME} INTERFACE # Installed .h include locations target_include_directories(mom6 PUBLIC - $ + $ $ $) @@ -88,7 +88,7 @@ target_include_directories(mom6 PUBLIC # mom6.x executable #------------------------------------------------------------------------------- set ( mom6_exe_files - config_src/solo_driver/MOM_driver.F90 + config_src/drivers/solo_driver/MOM_driver.F90 ) ecbuild_add_executable( TARGET mom6.x diff --git a/ac/Makefile.in b/ac/Makefile.in index ce8173e6f1..7be6c5bf2b 100644 --- a/ac/Makefile.in +++ b/ac/Makefile.in @@ -10,19 +10,28 @@ # # The following variables are used by Makefiles generated by mkmf. # -# CC: C compiler -# CXX: C++ compiler -# FC: Fortran compiler (f77 and f90) -# LD: Linker +# CC C compiler +# CXX C++ compiler +# FC Fortran compiler (f77 and f90) +# LD Linker +# AR Archiver # -# CPPDEFS: Preprocessor macros -# CPPFLAGS: C preprocessing flags -# CXXFLAGS: C++ preprocessing flags -# FPPFLAGS: Fortran preprocessing flags +# CPPDEFS Preprocessor macros +# CPPFLAGS C preprocessing flags +# CXXFLAGS C++ preprocessing flags +# FPPFLAGS Fortran preprocessing flags +# +# CFLAGS C compiler flags +# FFLAGS Fortran compiler flags +# LDFLAGS Linker flags + libraries +# ARFLAGS Archiver flags +# +# OTHERFLAGS Additional flags for all languages (C, C++, Fortran) +# OTHER_CFLAGS Optional C flags +# OTHER_CXXFLAGS Optional C++ flags +# OTHER_FFLAGS Optional Fortran flags +# TMPFILES Placeholder for `make clean` deletion (as `make neat`). # -# CFLAGS: C compiler flags -# FFLAGS: Fortran compiler flags -# LDFLAGS: Linker flags + libraries # # NOTES: # - FPPFLAGS and FFLAGS always appear as a pair, and autoconf does not use @@ -33,13 +42,6 @@ # # - LDFLAGS does not distinguish between autoconf's LDFLAGS and LIBS. # It also places both after the executable rather than just LIBS. -# -# OTHERFLAGS: Additional flags for all languages (C, C++, Fortran) -# OTHER_CFLAGS: Optional C flags -# OTHER_CXXFLAGS: Optional C++ flags -# OTHER_FFLAGS: Optional Fortran flags -# -# TMPFILES: Placeholder for `make clean` deletion (as `make neat`). FC = @FC@ LD = @FC@ diff --git a/ac/README.md b/ac/README.md index d5a5310ab8..f50275c3a0 100644 --- a/ac/README.md +++ b/ac/README.md @@ -28,25 +28,31 @@ support. The following instructions will allow a new user to quickly create a MOM6 executable for ocean-only simulations. -Each set of instructions is meant to be run from the root directory of the -repository. +Before starting, ensure that all submodules have been updated. +``` +$ git submodule update --init --recursive +``` -A separate Makefile in `ac/deps/` is provided to gather and build any GFDL -dependencies. +Next, fetch the GFDL `mkmf` build tool and build the FMS framework library. + +For new users, a separate Makefile in `./ac/deps/` is provided for this step. ``` $ cd ac/deps $ make -j ``` -This will fetch the `mkmf` tool and build the FMS library. -To build MOM6, first generate the Autoconf `configure` script. +To build MOM6, first generate the Autoconf `configure` script in `./ac`. ``` +$ cd ../.. # Return to the root directory $ cd ac $ autoreconf ``` -Then select your build directory, e.g. `./build`, run the configure script, and -build the model. +Then select your build directory, run the configure script, and build the +model. + +The instructions below build the model in the `./build` directory. ``` +$ cd .. # Return to the root directory $ mkdir -p build $ cd build $ ../ac/configure @@ -54,31 +60,32 @@ $ make -j ``` This will create the MOM6 executable in the build directory. -This executable is only useable for ocean-only simulations, and cannot be used -for coupled modeling. It also requires the necessary experiment configuration -files, such as `input.nml` and `MOM_input`. For more information, consult the -[MOM6-examples wiki](https://github.com/NOAA-GFDL/MOM6-examples/wiki). +The steps above will produce an executable for ocean-only simulations, and +cannot be used for coupled modeling. It also requires the necessary experiment +configuration files, such as `input.nml` and `MOM_input`. For more +information, consult the [MOM6-examples +wiki](https://github.com/NOAA-GFDL/MOM6-examples/wiki). # Build rules The Makefile produced by Autoconf provides the following rules. -``make`` +`make` Build the MOM6 executable. -``make clean`` +`make clean` Delete the executable and any object and module files, but preserve the Autoconf output. -``make distclean`` +`make distclean` Delete all of the files above, as well as any files generated by `./configure`. Note that this will delete the Makefile containing this rule. -``make ac-clean`` +`make ac-clean` Delete all of the files above, including `./configure` and any other files created by `autoreconf`. As with `make distclean`, this will also delete the @@ -124,31 +131,24 @@ For the complete list of settings, run `./configure --help`. # GFDL Dependencies -This section briefly describes the management of GFDL dependencies `mkmf` and -FMS. - -The `configure` script will first check if the compiler and its configured -flags (`FCFLAGS`, `LDFLAGS`, etc.) can find `mkmf` and the FMS library. If -unavailable, then it will search in the local `ac/deps` library. If still -unavailable, then the build will abort. - -Running `make -C ac/deps` will ensure that the libraries are available. But if -the user wishes to target an external FMS library, then they should add the -appropriate `FCFLAGS` and `LDFLAGS` to find the library. - -Similar options are provided for `mkmf` with respect to `PATH`, although it -is usually not necessary to use an external `mkmf` script. - -Some configuration options are provided by the `ac/deps` Makefile: +This section briefly describes the management of the `mkmf` and FMS +dependencies. -`PATH_ENV` +When building MOM6, the `configure` script will first check if the compiler and +its configured flags (`FCFLAGS`, `LDFLAGS`, etc.) can locate `mkmf` and the FMS +library. If unavailable, then it will search in the local `ac/deps` library. +If still unavailable, then the build will abort. - This variable will override the value of `PATH` when building the dependencies. +The dependencies are not automatically provided in `ac/deps`. However, running +`make -C ac/deps` will fetch and build them. If the user wishes to target an +external FMS library or `mkmf` tools, then they should set `PATH`, `FCFLAGS` +and `LDFLAGS` so that `configure` can locate them. -`FCFLAGS_ENV` +Exported environment variables such as `FC` or `FCFLAGS` will be passed to the +corresponding `configure` scripts. - Used to override the default Autoconf flags, `-g -O2`. This is useful if, - for example, one wants to build with `-O0` to speed up the build time. +The following configuration options are also provided, which can be used to +specify the git URL and commit of the dependencies. `MKMF_URL` (*default:* https://github.com/NOAA-GFDL/mkmf.git) @@ -157,12 +157,6 @@ Some configuration options are provided by the `ac/deps` Makefile: `FMS_URL` (*default:* https://github.com/NOAA-GFDL/FMS.git) `FMS_COMMIT` (*default:* `2019.01.03`) - - These are used to specify where to check out the source code for each - respective project. - -Additional hooks for FMS builds do not yet exist, but can be added if -necessary. # Known issues / Future development diff --git a/ac/configure.ac b/ac/configure.ac index ee6b76dacb..3d1af81b05 100644 --- a/ac/configure.ac +++ b/ac/configure.ac @@ -8,6 +8,8 @@ # - We would probably run this inside of a script to avoid the explicit # dependency on git. +AC_PREREQ([2.63]) + AC_INIT( [MOM6], [ ], @@ -40,17 +42,35 @@ srcdir=$srcdir/.. # Default to symmetric grid # NOTE: --enable is more properly used to add a feature, rather than to select # a compile-time mode, so this is not exactly being used as intended. -MEM_LAYOUT=${srcdir}/config_src/dynamic_symmetric +MEM_LAYOUT=${srcdir}/config_src/memory/dynamic_symmetric AC_ARG_ENABLE([asymmetric], AS_HELP_STRING([--enable-asymmetric], [Use the asymmetric grid])) AS_IF([test "$enable_asymmetric" = yes], - [MEM_LAYOUT=${srcdir}/config_src/dynamic]) + [MEM_LAYOUT=${srcdir}/config_src/memory/dynamic_nonsymmetric]) +# Default to solo_driver +DRIVER_DIR=${srcdir}/config_src/drivers/solo_driver +AC_ARG_WITH([driver], + AS_HELP_STRING([--with-driver=coupled_driver|solo_driver], [Select directory for driver source code])) +AS_IF([test "x$with_driver" != "x"], + [DRIVER_DIR=${srcdir}/config_src/drivers/${with_driver}]) # TODO: Rather than point to a pre-configured header file, autoconf could be # used to configure a header based on a template. #AC_CONFIG_HEADERS(["$MEM_LAYOUT/MOM_memory.h"]) +# Select the model framework (default: FMS1) +# NOTE: We can phase this out after the FMS1 I/O has been removed from FMS and +# replace with a detection test. For now, it is a user-defined switch. +MODEL_FRAMEWORK=${srcdir}/config_src/infra/FMS1 +AC_ARG_WITH([framework], + AS_HELP_STRING([--with-framework=fms1|fms2], [Select the model framework])) +AS_CASE(["$with_framework"], + [fms1], [MODEL_FRAMEWORK=${srcdir}/config_src/infra/FMS1], + [fms2], [MODEL_FRAMEWORK=${srcdir}/config_src/infra/FMS2], + [MODEL_FRAMEWORK=${srcdir}/config_src/infra/FMS1] +) + # Explicitly assume free-form Fortran AC_LANG(Fortran) @@ -80,18 +100,57 @@ AX_FC_CHECK_MODULE([mpi], # netCDF configuration -AC_PATH_PROG([NC_CONFIG], [nc-config]) -AS_IF([test -n "$NC_CONFIG"], - [CPPFLAGS="$CPPFLAGS -I$($NC_CONFIG --includedir)" - FCFLAGS="$FCFLAGS -I$($NC_CONFIG --includedir)" - LDFLAGS="$LDFLAGS -L$($NC_CONFIG --libdir)"], - [AC_MSG_ERROR([Could not find nc-config.])]) - -AX_FC_CHECK_MODULE([netcdf], - [], [AC_MSG_ERROR([Could not find FMS library.])]) -AX_FC_CHECK_LIB([netcdff], [nf_create], [netcdf], - [], [AC_MSG_ERROR([Could not link netcdff library.])] -) + +# Search for the Fortran netCDF module, fallback to nf-config. +AX_FC_CHECK_MODULE([netcdf], [], [ + AS_UNSET([ax_fc_cv_mod_netcdf]) + AC_PATH_PROG([NF_CONFIG], [nf-config]) + AS_IF([test -n "$NF_CONFIG"], [ + AC_SUBST([FCFLAGS], ["$FCFLAGS -I$($NF_CONFIG --includedir)"]) + ], [AC_MSG_ERROR([Could not find nf-config.])] + ) + AX_FC_CHECK_MODULE([netcdf], [], [ + AC_MSG_ERROR([Could not find netcdf module.]) + ]) +]) + +# FMS may invoke netCDF C calls, so we link to libnetcdf. +AC_LANG_PUSH([C]) +AC_CHECK_LIB([netcdf], [nc_create], [], [ + AS_UNSET([ac_cv_lib_netcdf_nc_create]) + AC_PATH_PROG([NC_CONFIG], [nc-config]) + AS_IF([test -n "$NC_CONFIG"], [ + AC_SUBST([LDFLAGS], + ["$LDFLAGS -L$($NC_CONFIG --libdir)"] + ) + ], [AC_MSG_ERROR([Could not find nc-config.])] + ) + AC_CHECK_LIB([netcdf], [nc_create], [], [ + AC_MSG_ERROR([Could not find libnetcdf.]) + ]) +]) +AC_LANG_POP([C]) + +# NOTE: We test for nf_create, rather than nf90_create, because AX_FC_CHECK_LIB +# is currently not yet able to properly probe inside modules. +# Testing of the nf90_* functions will require a macro update. +# NOTE: nf-config does not have a --libdir flag, so we use --prefix and assume +# that libraries are in the $prefix/lib directory. + +# Link to Fortran netCDF library, netcdff +AX_FC_CHECK_LIB([netcdff], [nf_create], [], [], [ + AS_UNSET([ax_fc_cv_lib_netcdff_nf_create]) + AC_PATH_PROG([NF_CONFIG], [nf-config]) + AS_IF([test -n "$NF_CONFIG"], [ + AC_SUBST([LDFLAGS], + ["$LDFLAGS -L$($NF_CONFIG --prefix)/lib"] + ) + ], [AC_MSG_ERROR([Could not find nf-config.])] + ) + AX_FC_CHECK_LIB([netcdff], [nf_create], [], [], [ + AC_MSG_ERROR([Could not find libnetcdff.]) + ]) +]) # Force 8-byte reals @@ -102,11 +161,22 @@ AS_IF( # OpenMP configuration -AC_OPENMP + +# NOTE: AC_OPENMP fails on `Fortran` for Autoconf <2.69 due to a m4 bug. +# For older versions, we test against CC and use the result for FC. +m4_version_prereq([2.69], [AC_OPENMP], [ + AC_LANG_PUSH([C]) + AC_OPENMP + AC_LANG_POP([C]) + OPENMP_FCFLAGS="$OPENMP_CFLAGS" +]) + +# NOTE: Only apply OpenMP flags if explicitly enabled. AS_IF( - [test "$enable_openmp" = yes], - [FCFLAGS="$FCFLAGS $OPENMP_FCFLAGS" - LDFLAGS="$LDFLAGS $OPENMP_FCFLAGS"]) + [test "$enable_openmp" = yes], [ + FCFLAGS="$FCFLAGS $OPENMP_FCFLAGS" + LDFLAGS="$LDFLAGS $OPENMP_FCFLAGS" +]) # FMS support @@ -134,6 +204,22 @@ AX_FC_CHECK_LIB([FMS], [fms_init], [fms_mod], ) +# Verify that FMS is at least 2019.01.02 +# NOTE: 2019.01.02 introduced two changes: +# - diag_axis_init supports an optional domain_position argument +# - position values NORTH, EAST, CENTER were added to diag_axis_mod +# For our versioning test, we check the second feature. +AC_MSG_CHECKING([if diag_axis_mod supports domain positions]) +AC_COMPILE_IFELSE( + [AC_LANG_PROGRAM([], [use diag_axis_mod, only: NORTH, EAST, CENTER])], + [AC_MSG_RESULT([yes])], + [ + AC_MSG_RESULT([no]) + AC_MSG_ERROR([diag_axis_mod in MOM6 requires FMS 2019.01.02 or newer.]) + ] +) + + # Search for mkmf build tools AC_PATH_PROG([LIST_PATHS], [list_paths]) AS_IF([test -z "$LIST_PATHS"], [ @@ -158,10 +244,14 @@ AS_IF([test -z "$MKMF"], [ AC_CONFIG_COMMANDS([path_names], [list_paths -l \ ${srcdir}/src \ - ${srcdir}/config_src/solo_driver \ + ${MODEL_FRAMEWORK} \ ${srcdir}/config_src/ext* \ - ${MEM_LAYOUT} -], [MEM_LAYOUT=$MEM_LAYOUT]) + ${DRIVER_DIR} \ + ${MEM_LAYOUT}], + [MODEL_FRAMEWORK=$MODEL_FRAMEWORK + MEM_LAYOUT=$MEM_LAYOUT + DRIVER_DIR=$DRIVER_DIR] +) AC_CONFIG_COMMANDS([Makefile.mkmf], diff --git a/ac/deps/Makefile b/ac/deps/Makefile index 91fe343047..0ed4fd19a7 100644 --- a/ac/deps/Makefile +++ b/ac/deps/Makefile @@ -1,22 +1,10 @@ SHELL = bash -.SUFFIXES: - -# FMS build configuration -PATH_ENV ?= -FCFLAGS_ENV ?= - -# Only set FCFLAGS if an argument is provided. -FMS_FCFLAGS = -ifneq ($(FCFLAGS_ENV),) - FMS_FCFLAGS := FCFLAGS="$(FCFLAGS_ENV)" -endif +# Disable implicit rules +.SUFFIXES: -# Ditto for path -FMS_PATH = -ifneq ($(PATH_ENV),) - FMS_PATH := PATH="$(PATH_ENV)" -endif +# Disable implicit variables +MAKEFLAGS += -R # mkmf, list_paths (GFDL build toolchain) @@ -44,7 +32,6 @@ FMS_SOURCE = $(call SOURCE,fms/src) .PHONY: all all: bin/mkmf bin/list_paths lib/libFMS.a - #--- # mkmf checkout @@ -65,6 +52,7 @@ mkmf: # This is a flawed approach, since module files are untracked and could be # handled more safely, but this is adequate for now. + # TODO: track *.mod copy? lib/libFMS.a: fms/build/libFMS.a fms/build/Makefile mkdir -p {lib,include} @@ -76,13 +64,10 @@ fms/build/libFMS.a: fms/build/Makefile make -C fms/build libFMS.a -# TODO: Include FC, CC, CFLAGS? -fms/build/Makefile: FMS_ENV=$(FMS_PATH) $(FMS_FCFLAGS) - fms/build/Makefile: Makefile.fms.in fms/src/configure bin/mkmf bin/list_paths mkdir -p fms/build cp Makefile.fms.in fms/src/Makefile.in - cd $(@D) && $(FMS_ENV) ../src/configure --srcdir=../src + cd $(@D) && ../src/configure --srcdir=../src # TODO: Track m4 macros? @@ -100,7 +85,6 @@ fms/src: clean: rm -rf fms/build lib include bin - .PHONY: distclean distclean: clean rm -rf fms mkmf diff --git a/ac/deps/Makefile.fms.in b/ac/deps/Makefile.fms.in index 694ad8e0b0..0286d94b58 100644 --- a/ac/deps/Makefile.fms.in +++ b/ac/deps/Makefile.fms.in @@ -1,46 +1,58 @@ -# Makefile template for MOM6 +# Makefile template for autoconf builds using mkmf # -# Previously this would have been generated by mkmf using a template file. +# Compiler flags are configured by autoconf's configure script. # -# The proposed autoconf build inverts this approach by constructing the -# information previously stored in the mkmf template, such as compiler names -# and flags, and importing the un-templated mkmf output for its rules and -# dependencies. +# Source code dependencies are configured by mkmf and list_paths, specified in +# the `Makefile.mkmf` file. # -# While this approach does not eliminate our dependency on mkmf, it does -# promises to eliminate our reliance on platform-specific templates, and -# instead allows us to provide a configure script for determining our compilers -# and flags. As a last resort, we provide hooks to override such settings. - -# NOTE: mkmf conventions are close, but not identical, to autoconf. -# -# CC: C compiler -# CXX: C++ compiler -# FC: Fortran compiler (f77 and f90) -# LD: Linker -# -# CPPDEFS: Preprocessor macros -# CPPFLAGS: C preprocessing flags -# CXXFLAGS: C++ preprocessing flags -# FPPFLAGS: Fortran preprocessing flags -# -# CFLAGS: C compiler flags -# FFLAGS: Fortran compiler flags (f77 and f90) -# LDFLAGS: Linker flags -# -# OTHERFLAGS: Additional flags for all languages (C, C++, Fortran) -# OTHER_CFLAGS: Optional C flags -# OTHER_CXXFLAGS: Optional C++ flags -# OTHER_FFLAGS: Optional Fortran flags +# mkmf conventions are close, but not identical, to autoconf. We attempt to +# map the autoconf variables to the mkmf variables. +# +# The following variables are used by Makefiles generated by mkmf. +# +# CC C compiler +# CXX C++ compiler +# FC Fortran compiler (f77 and f90) +# LD Linker +# AR Archiver +# +# CPPDEFS Preprocessor macros +# CPPFLAGS C preprocessing flags +# CXXFLAGS C++ preprocessing flags +# FPPFLAGS Fortran preprocessing flags +# +# CFLAGS C compiler flags +# FFLAGS Fortran compiler flags +# LDFLAGS Linker flags + libraries +# ARFLAGS Archiver flags +# +# OTHERFLAGS Additional flags for all languages (C, C++, Fortran) +# OTHER_CFLAGS Optional C flags +# OTHER_CXXFLAGS Optional C++ flags +# OTHER_FFLAGS Optional Fortran flags +# TMPFILES Placeholder for `make clean` deletion (as `make neat`). +# +# +# NOTES: +# - FPPFLAGS and FFLAGS always appear as a pair, and autoconf does not use +# FPPFLAGS, so FPPFLAGS does not serve much purpose. +# +# - mkmf's FFLAGS does not distinguish between autoconf's fixed-format +# FFLAGS and free-format FCFLAGS. +# +# - LDFLAGS does not distinguish between autoconf's LDFLAGS and LIBS. +# It also places both after the executable rather than just LIBS. CC = @CC@ FC = @FC@ LD = @FC@ +AR = @AR@ CPPDEFS = @DEFS@ CPPFLAGS = @CPPFLAGS@ FFLAGS = @FCFLAGS@ LDFLAGS = @LDFLAGS@ +ARFLAGS = @ARFLAGS@ # Gather modulefiles TMPFILES = $(wildcard *.mod) diff --git a/ac/deps/configure.fms.ac b/ac/deps/configure.fms.ac index 1d66194c81..bf899126cc 100644 --- a/ac/deps/configure.fms.ac +++ b/ac/deps/configure.fms.ac @@ -1,4 +1,6 @@ # Autoconf configuration +AC_PREREQ([2.63]) + AC_INIT( [FMS], [ ], @@ -13,13 +15,14 @@ AC_PROG_CC AX_MPI CC=$MPICC + # FMS configuration -# Linux and OSX have a gettid system call, but it is not implemented in older +# Linux and macOS have a gettid system call, but it is not implemented in older # glibc implementations. When unavailable, a native syscall is used. # # On Linux, this is defined in unistd.h as __NR_gettid, and FMS is hard-coded -# to use this value. In OS X, this is defined in sys/syscall.h as SYS_gettid, +# to use this value. In macOS, this is defined in sys/syscall.h as SYS_gettid, # so we override this macro if __NR_gettid is unavailable. AC_CHECK_FUNCS([gettid], [], [ AC_MSG_CHECKING([if __NR_gettid must be redefined]) @@ -71,18 +74,39 @@ AC_DEFINE([use_libMPI]) # netCDF configuration -AC_PATH_PROG([NC_CONFIG], [nc-config]) -AS_IF([test -n "$NC_CONFIG"], - [CPPFLAGS="$CPPFLAGS -I$($NC_CONFIG --includedir)" - FCFLAGS="$FCFLAGS -I$($NC_CONFIG --includedir)" - LDFLAGS="$LDFLAGS -L$($NC_CONFIG --libdir)"], - [AC_MSG_ERROR([Could not find nc-config.])]) - -AX_FC_CHECK_MODULE([netcdf], - [], [AC_MSG_ERROR([Could not find FMS library.])]) -AX_FC_CHECK_LIB([netcdff], [nf_create], [netcdf], - [], [AC_MSG_ERROR([Could not link netcdff library.])] -) + +# Check for netcdf.h header function declarations. +# If unavailable, then try to invoke nc-create. +AC_LANG_PUSH([C]) +AC_CHECK_HEADERS([netcdf.h], [], [ + AS_UNSET([ac_cv_header_netcdf_h]) + AC_PATH_PROG([NC_CONFIG], [nc-config]) + AS_IF([test -n "$NC_CONFIG"], [ + AC_SUBST([CPPFLAGS], ["$CPPFLAGS -I$($NC_CONFIG --includedir)"]) + ], + [AC_MSG_ERROR([Could not find nc-config.])] + ) + AC_CHECK_HEADERS([netcdf.h], [], [ + AC_MSG_ERROR([Could not find netcdf.h]) + ]) +]) +AC_LANG_POP([C]) + +# Search for the Fortran netCDF module, fallback to nf-config. +AX_FC_CHECK_MODULE([netcdf], [], [ + AS_UNSET([ax_fc_cv_mod_netcdf]) + AC_PATH_PROG([NF_CONFIG], [nf-config]) + AS_IF([test -n "$NF_CONFIG"], [ + AC_SUBST([FCFLAGS], ["$FCFLAGS -I$($NF_CONFIG --includedir)"]) + ], + [AC_MSG_ERROR([Could not find nf-config.])] + ) + AX_FC_CHECK_MODULE([netcdf], [], [ + AC_MSG_ERROR([Could not find netcdf module.]) + ]) +]) + +# FMS requires this macro to signal netCDF support. AC_DEFINE([use_netCDF]) @@ -98,15 +122,31 @@ AS_IF( # OpenMP configuration -AC_OPENMP + +# NOTE: AC_OPENMP fails in Autoconf <2.69 when LANG is Fortran or Fortran 77. +# For older versions, we test against CC and use the result for FC. +m4_version_prereq([2.69], [AC_OPENMP], [ + AC_LANG_PUSH([C]) + AC_OPENMP + AC_LANG_POP([C]) + OPENMP_FCFLAGS="$OPENMP_CFLAGS" +]) + +# NOTE: Only apply OpenMP flags if explicitly enabled. AS_IF( - [test "$enable_openmp" = yes], - [FCFLAGS="$FCFLAGS $OPENMP_FCFLAGS" - LDFLAGS="$LDFLAGS $OPENMP_FCFLAGS"]) + [test "$enable_openmp" = yes], [ + FCFLAGS="$FCFLAGS $OPENMP_FCFLAGS" + LDFLAGS="$LDFLAGS $OPENMP_FCFLAGS" +]) + +# Unlimited line length (2.67) +# AC_FC_LINE_LENGTH was added in 2.67. +m4_version_prereq([2.67], + [AC_FC_LINE_LENGTH([unlimited])], + [AX_FC_LINE_LENGTH([unlimited])] +) -# Unlimited line length -AC_FC_LINE_LENGTH([unlimited]) # Allow invaliz BOZ assignment AX_FC_ALLOW_INVALID_BOZ @@ -141,12 +181,22 @@ AS_IF([test -z "$MKMF"], [ # MKMF commands AC_CONFIG_COMMANDS([path_names], [${LIST_PATHS} -l ${srcdir}], - [LIST_PATHS=${LIST_PATHS}]) + [LIST_PATHS=${LIST_PATHS}] +) AC_CONFIG_COMMANDS([mkmf], [${MKMF} -p libFMS.a -m Makefile.mkmf path_names], - [MKMF=${MKMF}]) + [MKMF=${MKMF}] +) + + +# Autoconf does not configure the archiver (ar), as it is handled by Automake. +# TODO: Properly configure this tool. For now, we hard-set this to `ar`. +AR=ar +ARFLAGS=rv +AC_SUBST(AR) +AC_SUBST(ARFLAGS) # Prepare output diff --git a/ac/deps/m4/ax_fc_cray_pointer.m4 b/ac/deps/m4/ax_fc_cray_pointer.m4 index a9f5d9bbe3..57ed186afa 100644 --- a/ac/deps/m4/ax_fc_cray_pointer.m4 +++ b/ac/deps/m4/ax_fc_cray_pointer.m4 @@ -19,8 +19,8 @@ dnl AC_DEFUN([AX_FC_CRAY_POINTER], [ AC_LANG_ASSERT([Fortran]) AC_MSG_CHECKING([for $FC option to support Cray pointers]) - AC_CACHE_VAL([ac_cv_prog_fc_cray_ptr], [ - ac_cv_prog_fc_cray_ptr='unknown' + AC_CACHE_VAL([ac_cv_fc_cray_ptr], [ + ac_cv_fc_cray_ptr='unknown' ac_save_FCFLAGS=$FCFLAGS for ac_option in none -fcray-pointer -Mcray=pointer; do test "$ac_option" != none && FCFLAGS="$ac_save_FCFLAGS $ac_option" @@ -29,21 +29,21 @@ AC_DEFUN([AX_FC_CRAY_POINTER], [ integer aptr(2) pointer (iptr, aptr) ])], - [ac_cv_prog_fc_cray_ptr=$ac_option], + [ac_cv_fc_cray_ptr=$ac_option], ) FCFLAGS=$ac_save_FCFLAGS - AS_IF([test "$ac_cv_prog_fc_cray_ptr" != unknown], [break]) + AS_IF([test "$ac_cv_fc_cray_ptr" != unknown], [break]) done ]) - AS_CASE([ac_cv_prog_fc_cray_ptr], + AS_CASE([ac_cv_fc_cray_ptr], [none], [AC_MSG_RESULT([none_needed])], [unknown], [AC_MSG_RESULT([unsupported])], - [AC_MSG_RESULT([$ac_cv_prog_fc_cray_ptr])] + [AC_MSG_RESULT([$ac_cv_fc_cray_ptr])] ) - AS_IF([test "$ac_cv_prog_fc_cray_ptr" != unknown], [ + AS_IF([test "$ac_cv_fc_cray_ptr" != unknown], [ m4_default([$1], [ - AS_IF([test "$ac_cv_prog_fc_cray_ptr" != none], - [FCFLAGS="$FCFLAGS $ac_cv_prog_fc_cray_ptr"] + AS_IF([test "$ac_cv_fc_cray_ptr" != none], + [FCFLAGS="$FCFLAGS $ac_cv_fc_cray_ptr"] ) ])], [m4_default([$2], [AC_MSG_ERROR(["$FC does not support Cray pointers"])])] diff --git a/ac/deps/m4/ax_fc_line_length.m4 b/ac/deps/m4/ax_fc_line_length.m4 new file mode 100644 index 0000000000..97271da1f6 --- /dev/null +++ b/ac/deps/m4/ax_fc_line_length.m4 @@ -0,0 +1,101 @@ +# AX_FC_LINE_LENGTH([LENGTH], [ACTION-IF-SUCCESS], +# [ACTION-IF-FAILURE = FAILURE]) +# ------------------------------------------------ +# This is a backport of the AC_FC_LINE_LENGTH macro in Autoconf 2.67 and newer. +# Comments below are from the Autoconf 2.69 implementation. +# +# Look for a compiler flag to make the Fortran (FC) compiler accept long lines +# in the current (free- or fixed-format) source code, and adds it to FCFLAGS. +# The optional LENGTH may be 80, 132 (default), or `unlimited' for longer +# lines. Note that line lengths above 250 columns are not portable, and some +# compilers (hello ifort) do not accept more than 132 columns at least for +# fixed format. Call ACTION-IF-SUCCESS (defaults to nothing) if successful +# (i.e. can compile code using new extension) and ACTION-IF-FAILURE (defaults +# to failing with an error message) if not. (Defined via DEFUN_ONCE to +# prevent flag from being added to FCFLAGS multiple times.) +# You should call AC_FC_FREEFORM or AC_FC_FIXEDFORM to set the desired format +# prior to using this macro. +# +# The known flags are: +# -f{free,fixed}-line-length-N with N 72, 80, 132, or 0 or none for none. +# -ffree-line-length-none: GNU gfortran +# -ffree-line-length-huge: g95 (also -ffixed-line-length-N as above) +# -qfixed=132 80 72: IBM compiler (xlf) +# -Mextend: Cray +# -132 -80 -72: Intel compiler (ifort) +# Needs to come before -extend_source because ifort +# accepts that as well with an optional parameter and +# doesn't fail but only warns about unknown arguments. +# -extend_source: SGI compiler +# -W, -WNN (132, 80, 72): Absoft Fortran +# +es, +extend_source: HP Fortran (254 in either form, default is 72 fixed, +# 132 free) +# -w, (-)-wide: Lahey/Fujitsu Fortran (255 cols in fixed form) +# -e: Sun Fortran compiler (132 characters) +# -132: NAGWare +# -72, -f, -Wf,-f: f2c (a weak form of "free-form" and long lines). +# /XLine: Open Watcom + +AC_DEFUN_ONCE([AX_FC_LINE_LENGTH], [ + AC_LANG_ASSERT([Fortran]) + m4_case(m4_default([$1], [132]), + [unlimited], [ + ac_fc_line_len_string=unlimited + ac_fc_line_len=0 + ac_fc_line_length_test=' + subroutine longer_than_132(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,'\ +'arg9,arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19)' + ], + [132], [ + ac_fc_line_len=132 + ac_fc_line_length_test=' + subroutine longer_than_80(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,'\ +'arg10)' + ], + [80], [ + ac_fc_line_len=80 + ac_fc_line_length_test=' + subroutine longer_than_72(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9)' + ], + [m4_warning([Invalid length argument `$1'])] + ) + : ${ac_fc_line_len_string=$ac_fc_line_len} + AC_MSG_CHECKING([for Fortran flag needed to accept $ac_fc_line_len_string column source lines]) + AC_CACHE_VAL([ac_cv_fc_line_length], [ + ac_cv_fc_line_length=unknown + ac_save_FCFLAGS=$FCFLAGS + for ac_flag in none \ + -ffree-line-length-none \ + -ffixed-line-length-none \ + -ffree-line-length-huge \ + -ffree-line-length-$ac_fc_line_len \ + -ffixed-line-length-$ac_fc_line_len \ + -qfixed=$ac_fc_line_len \ + -Mextend \ + -$ac_fc_line_len \ + -extend_source \ + -W$ac_fc_line_len \ + -W +extend_source +es -wide --wide -w -e -f -Wf,-f -xline + do + test "$ac_flag" != none && FCFLAGS="$ac_save_FCFLAGS $ac_flag" + AC_COMPILE_IFELSE([$ac_fc_line_length_test + end subroutine + ], [ac_cv_fc_line_length=$ac_flag] + ) + FCFLAGS=$ac_save_FCFLAGS + dnl TODO: Remove conftest.{err,$ac_objext,$ac_ext} ?? + AS_IF([test "$ac_cv_fc_line_length" != unknown], [break]) + done + ]) + AC_MSG_RESULT([$ac_cv_fc_line_length]) + AS_IF([test "$ac_cv_fc_line_length" != unknown], [ + m4_default([$2], [ + AS_IF([test "$ac_cv_fc_line_length" != none], [ + FCFLAGS="$FCFLAGS $ac_cv_fc_line_length" + ]) + ])], [ + m4_default([$3], [ + AC_MSG_ERROR([Fortran does not accept long source lines], 77) + ]) + ]) +]) diff --git a/ac/deps/m4/ax_mpi.m4 b/ac/deps/m4/ax_mpi.m4 index ecce2e141a..3d9966a19d 100644 --- a/ac/deps/m4/ax_mpi.m4 +++ b/ac/deps/m4/ax_mpi.m4 @@ -67,7 +67,7 @@ AU_ALIAS([ACX_MPI], [AX_MPI]) AC_DEFUN([AX_MPI], [ -AC_PREREQ(2.50) dnl for AC_LANG_CASE +AC_PREREQ([2.50]) dnl for AC_LANG_CASE AC_LANG_CASE([C], [ AC_REQUIRE([AC_PROG_CC]) @@ -135,16 +135,16 @@ if test x = x"$MPILIBS"; then AC_CHECK_LIB(mpich, MPI_Init, [MPILIBS="-lmpich"]) fi -dnl We have to use AC_TRY_COMPILE and not AC_CHECK_HEADER because the +dnl We have to use AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]], [[]])],[],[]) and not AC_CHECK_HEADER because the dnl latter uses $CPP, not $CC (which may be mpicc). AC_LANG_CASE([C], [if test x != x"$MPILIBS"; then AC_MSG_CHECKING([for mpi.h]) - AC_TRY_COMPILE([#include ],[],[AC_MSG_RESULT(yes)], [MPILIBS="" + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], [[]])],[AC_MSG_RESULT(yes)],[MPILIBS="" AC_MSG_RESULT(no)]) fi], [C++], [if test x != x"$MPILIBS"; then AC_MSG_CHECKING([for mpi.h]) - AC_TRY_COMPILE([#include ],[],[AC_MSG_RESULT(yes)], [MPILIBS="" + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], [[]])],[AC_MSG_RESULT(yes)],[MPILIBS="" AC_MSG_RESULT(no)]) fi], [Fortran 77], [if test x != x"$MPILIBS"; then diff --git a/ac/m4/ax_fc_check_lib.m4 b/ac/m4/ax_fc_check_lib.m4 index c0accab6cd..a7f848cd60 100644 --- a/ac/m4/ax_fc_check_lib.m4 +++ b/ac/m4/ax_fc_check_lib.m4 @@ -18,7 +18,7 @@ dnl library with different -L flags, or perhaps other ld configurations. dnl dnl Results are cached in the ax_fc_cv_lib_LIBRARY_FUNCTION variable. dnl -AC_DEFUN([AX_FC_CHECK_LIB],[dnl +AC_DEFUN([AX_FC_CHECK_LIB],[ AS_VAR_PUSHDEF([ax_fc_Lib], [ax_fc_cv_lib_$1_$2]) m4_ifval([$6], [ax_fc_lib_msg_LDFLAGS=" with $6"], @@ -29,14 +29,15 @@ AC_DEFUN([AX_FC_CHECK_LIB],[dnl LDFLAGS="$6 $LDFLAGS" ax_fc_check_lib_save_LIBS=$LIBS LIBS="-l$1 $7 $LIBS" - AS_IF([test -n $3], + AS_IF([test -n "$3"], [ax_fc_use_mod="use $3"], [ax_fc_use_mod=""]) - AC_LINK_IFELSE([ - AC_LANG_PROGRAM([], [dnl + AC_LINK_IFELSE([dnl +dnl Begin 7-column code block +AC_LANG_PROGRAM([], [dnl $ax_fc_use_mod - call $2]dnl - ) + call $2])dnl +dnl End code block ], [AS_VAR_SET([ax_fc_Lib], [yes])], [AS_VAR_SET([ax_fc_Lib], [no])] diff --git a/ac/m4/ax_mpi.m4 b/ac/m4/ax_mpi.m4 index ecce2e141a..3d9966a19d 100644 --- a/ac/m4/ax_mpi.m4 +++ b/ac/m4/ax_mpi.m4 @@ -67,7 +67,7 @@ AU_ALIAS([ACX_MPI], [AX_MPI]) AC_DEFUN([AX_MPI], [ -AC_PREREQ(2.50) dnl for AC_LANG_CASE +AC_PREREQ([2.50]) dnl for AC_LANG_CASE AC_LANG_CASE([C], [ AC_REQUIRE([AC_PROG_CC]) @@ -135,16 +135,16 @@ if test x = x"$MPILIBS"; then AC_CHECK_LIB(mpich, MPI_Init, [MPILIBS="-lmpich"]) fi -dnl We have to use AC_TRY_COMPILE and not AC_CHECK_HEADER because the +dnl We have to use AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]], [[]])],[],[]) and not AC_CHECK_HEADER because the dnl latter uses $CPP, not $CC (which may be mpicc). AC_LANG_CASE([C], [if test x != x"$MPILIBS"; then AC_MSG_CHECKING([for mpi.h]) - AC_TRY_COMPILE([#include ],[],[AC_MSG_RESULT(yes)], [MPILIBS="" + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], [[]])],[AC_MSG_RESULT(yes)],[MPILIBS="" AC_MSG_RESULT(no)]) fi], [C++], [if test x != x"$MPILIBS"; then AC_MSG_CHECKING([for mpi.h]) - AC_TRY_COMPILE([#include ],[],[AC_MSG_RESULT(yes)], [MPILIBS="" + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], [[]])],[AC_MSG_RESULT(yes)],[MPILIBS="" AC_MSG_RESULT(no)]) fi], [Fortran 77], [if test x != x"$MPILIBS"; then diff --git a/config_src/CMakeLists.txt b/config_src/CMakeLists.txt index 3d966a409e..072ecf5cfc 100644 --- a/config_src/CMakeLists.txt +++ b/config_src/CMakeLists.txt @@ -1,17 +1,35 @@ list ( APPEND _files -config_src/dynamic/MOM_memory.h +config_src/memory/dynamic_nonsymmetric/MOM_memory.h config_src/external/ODA_hooks/kdtree.f90 +config_src/drivers/FMS_cap/MOM_surface_forcing_gfdl.F90 +config_src/drivers/FMS_cap/ocean_model_MOM.F90 +config_src/drivers/solo_driver/atmos_ocean_fluxes.F90 +config_src/drivers/solo_driver/MESO_surface_forcing.F90 +config_src/drivers/solo_driver/MOM_driver.F90 +config_src/drivers/solo_driver/MOM_surface_forcing.F90 +config_src/drivers/solo_driver/user_surface_forcing.F90 +config_src/external/drifters/MOM_particles.F90 +config_src/external/drifters/MOM_particles_types.F90 config_src/external/ODA_hooks/ocean_da_core.F90 config_src/external/ODA_hooks/ocean_da_types.F90 config_src/external/ODA_hooks/write_ocean_obs.F90 -config_src/solo_driver/atmos_ocean_fluxes.F90 -config_src/solo_driver/MESO_surface_forcing.F90 -config_src/solo_driver/MOM_surface_forcing.F90 -config_src/solo_driver/user_surface_forcing.F90 +config_src/infra/FMS1/MOM_coms_infra.F90 +config_src/infra/FMS1/MOM_constants.F90 +config_src/infra/FMS1/MOM_couplertype_infra.F90 +config_src/infra/FMS1/MOM_cpu_clock_infra.F90 +config_src/infra/FMS1/MOM_data_override_infra.F90 +config_src/infra/FMS1/MOM_diag_manager_infra.F90 +config_src/infra/FMS1/MOM_domain_infra.F90 +config_src/infra/FMS1/MOM_ensemble_manager_infra.F90 +config_src/infra/FMS1/MOM_error_infra.F90 +config_src/infra/FMS1/MOM_interp_infra.F90 +config_src/infra/FMS1/MOM_io_infra.F90 +config_src/infra/FMS1/MOM_time_manager.F90 ) if ( NOT ENABLE_OCEAN_BGC ) list ( APPEND _files + config_src/external/GFDL_ocean_BGC/FMS_coupler_util.F90 config_src/external/GFDL_ocean_BGC/generic_tracer.F90 config_src/external/GFDL_ocean_BGC/generic_tracer_utils.F90 ) diff --git a/config_src/coupled_driver/MOM_surface_forcing_gfdl.F90 b/config_src/drivers/FMS_cap/MOM_surface_forcing_gfdl.F90 similarity index 93% rename from config_src/coupled_driver/MOM_surface_forcing_gfdl.F90 rename to config_src/drivers/FMS_cap/MOM_surface_forcing_gfdl.F90 index 7075fb7c10..6479549eb7 100644 --- a/config_src/coupled_driver/MOM_surface_forcing_gfdl.F90 +++ b/config_src/drivers/FMS_cap/MOM_surface_forcing_gfdl.F90 @@ -5,10 +5,14 @@ module MOM_surface_forcing_gfdl !#CTRL# use MOM_controlled_forcing, only : apply_ctrl_forcing, register_ctrl_forcing_restarts !#CTRL# use MOM_controlled_forcing, only : controlled_forcing_init, controlled_forcing_end !#CTRL# use MOM_controlled_forcing, only : ctrl_forcing_CS -use MOM_coms, only : reproducing_sum +use MOM_coms, only : reproducing_sum, field_chksum use MOM_constants, only : hlv, hlf +use MOM_coupler_types, only : coupler_2d_bc_type, coupler_type_write_chksums +use MOM_coupler_types, only : coupler_type_initialized, coupler_type_spawn +use MOM_coupler_types, only : coupler_type_copy_data use MOM_cpu_clock, only : cpu_clock_id, cpu_clock_begin, cpu_clock_end use MOM_cpu_clock, only : CLOCK_SUBCOMPONENT +use MOM_data_override, only : data_override_init, data_override use MOM_diag_mediator, only : diag_ctrl, safe_alloc_ptr, time_type use MOM_domains, only : pass_vector, pass_var, fill_symmetric_edges use MOM_domains, only : AGRID, BGRID_NE, CGRID_NE, To_All @@ -21,7 +25,10 @@ module MOM_surface_forcing_gfdl use MOM_forcing_type, only : allocate_mech_forcing, deallocate_mech_forcing use MOM_get_input, only : Get_MOM_Input, directories use MOM_grid, only : ocean_grid_type +use MOM_interpolate, only : init_external_field, time_interp_external +use MOM_interpolate, only : time_interp_external_init use MOM_io, only : slasher, write_version_number, MOM_read_data +use MOM_io, only : stdout_if_root use MOM_restart, only : register_restart_field, restart_init, MOM_restart_CS use MOM_restart, only : restart_init_end, save_restart, restore_state use MOM_string_functions, only : uppercase @@ -30,15 +37,7 @@ module MOM_surface_forcing_gfdl use MOM_variables, only : surface use user_revise_forcing, only : user_alter_forcing, user_revise_forcing_init use user_revise_forcing, only : user_revise_forcing_CS - -use coupler_types_mod, only : coupler_2d_bc_type, coupler_type_write_chksums -use coupler_types_mod, only : coupler_type_initialized, coupler_type_spawn -use coupler_types_mod, only : coupler_type_copy_data -use data_override_mod, only : data_override_init, data_override -use fms_mod, only : stdout -use mpp_mod, only : mpp_chksum -use time_interp_external_mod, only : init_external_field, time_interp_external -use time_interp_external_mod, only : time_interp_external_init +use iso_fortran_env, only : int64 implicit none ; private @@ -112,6 +111,8 @@ module MOM_surface_forcing_gfdl logical :: restore_temp !< If true, the coupled MOM driver adds a term to restore sea !! surface temperature to a specified value. real :: Flux_const !< Piston velocity for surface restoring [Z T-1 ~> m s-1] + real :: Flux_const_salt !< Piston velocity for surface salt restoring [Z T-1 ~> m s-1] + real :: Flux_const_temp !< Piston velocity for surface temp restoring [Z T-1 ~> m s-1] logical :: salt_restore_as_sflux !< If true, SSS restore as salt flux instead of water flux logical :: adjust_net_srestore_to_zero !< Adjust srestore to zero (for both salt_flux or vprec) logical :: adjust_net_srestore_by_scaling !< Adjust srestore w/o moving zero contour @@ -292,22 +293,18 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, call safe_alloc_ptr(fluxes%salt_flux,isd,ied,jsd,jed) call safe_alloc_ptr(fluxes%salt_flux_in,isd,ied,jsd,jed) - call safe_alloc_ptr(fluxes%salt_flux_added,isd,ied,jsd,jed) call safe_alloc_ptr(fluxes%TKE_tidal,isd,ied,jsd,jed) call safe_alloc_ptr(fluxes%ustar_tidal,isd,ied,jsd,jed) - if (CS%allow_flux_adjustments) then - call safe_alloc_ptr(fluxes%heat_added,isd,ied,jsd,jed) - call safe_alloc_ptr(fluxes%salt_flux_added,isd,ied,jsd,jed) - endif + call safe_alloc_ptr(fluxes%heat_added,isd,ied,jsd,jed) + call safe_alloc_ptr(fluxes%salt_flux_added,isd,ied,jsd,jed) do j=js-2,je+2 ; do i=is-2,ie+2 fluxes%TKE_tidal(i,j) = CS%TKE_tidal(i,j) fluxes%ustar_tidal(i,j) = CS%ustar_tidal(i,j) enddo ; enddo - if (CS%restore_temp) call safe_alloc_ptr(fluxes%heat_added,isd,ied,jsd,jed) endif ! endif for allocation and initialization @@ -319,8 +316,7 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, if ((.not.coupler_type_initialized(fluxes%tr_fluxes)) .and. & coupler_type_initialized(IOB%fluxes)) & - call coupler_type_spawn(IOB%fluxes, fluxes%tr_fluxes, & - (/is,is,ie,ie/), (/js,js,je,je/)) + call coupler_type_spawn(IOB%fluxes, fluxes%tr_fluxes, (/is,is,ie,ie/), (/js,js,je,je/)) ! It might prove valuable to use the same array extents as the rest of the ! ocean model, rather than using haloless arrays, in which case the last line ! would be: ( (/isd,is,ie,ied/), (/jsd,js,je,jed/)) @@ -338,10 +334,8 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, fluxes%fluxes_used = .false. fluxes%dt_buoy_accum = US%s_to_T*valid_time - if (CS%allow_flux_adjustments) then - fluxes%heat_added(:,:) = 0.0 - fluxes%salt_flux_added(:,:) = 0.0 - endif + fluxes%heat_added(:,:) = 0.0 + fluxes%salt_flux_added(:,:) = 0.0 do j=js,je ; do i=is,ie fluxes%salt_flux(i,j) = 0.0 @@ -350,7 +344,7 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, ! Salinity restoring logic if (CS%restore_salt) then - call time_interp_external(CS%id_srestore,Time,data_restore) + call time_interp_external(CS%id_srestore, Time, data_restore) ! open_ocn_mask indicates where to restore salinity (1 means restore, 0 does not) open_ocn_mask(:,:) = 1.0 if (CS%mask_srestore_under_ice) then ! Do not restore under sea-ice @@ -362,7 +356,7 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, do j=js,je ; do i=is,ie delta_sss = data_restore(i,j)- sfc_state%SSS(i,j) delta_sss = sign(1.0,delta_sss)*min(abs(delta_sss),CS%max_delta_srestore) - fluxes%salt_flux(i,j) = 1.e-3*G%mask2dT(i,j) * (CS%Rho0*CS%Flux_const)* & + fluxes%salt_flux(i,j) = 1.e-3*G%mask2dT(i,j) * (CS%Rho0*CS%Flux_const_salt)* & (CS%basin_mask(i,j)*open_ocn_mask(i,j)*CS%srestore_mask(i,j)) *delta_sss ! R Z T-1 ~> kg Salt m-2 s-1 enddo ; enddo if (CS%adjust_net_srestore_to_zero) then @@ -372,9 +366,10 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, fluxes%saltFluxGlobalAdj = 0. else work_sum(is:ie,js:je) = US%L_to_m**2*US%RZ_T_to_kg_m2s * & - G%areaT(is:ie,js:je)*fluxes%salt_flux(is:ie,js:je) + G%areaT(is:ie,js:je)*fluxes%salt_flux(is:ie,js:je) * G%mask2dT(is:ie,js:je) fluxes%saltFluxGlobalAdj = reproducing_sum(work_sum(:,:), isr,ier, jsr,jer)/CS%area_surf - fluxes%salt_flux(is:ie,js:je) = fluxes%salt_flux(is:ie,js:je) - kg_m2_s_conversion * fluxes%saltFluxGlobalAdj + fluxes%salt_flux(is:ie,js:je) = fluxes%salt_flux(is:ie,js:je) - & + kg_m2_s_conversion * fluxes%saltFluxGlobalAdj * G%mask2dT(is:ie,js:je) endif endif fluxes%salt_flux_added(is:ie,js:je) = fluxes%salt_flux(is:ie,js:je) ! Diagnostic @@ -384,7 +379,7 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, delta_sss = sfc_state%SSS(i,j) - data_restore(i,j) delta_sss = sign(1.0,delta_sss)*min(abs(delta_sss),CS%max_delta_srestore) fluxes%vprec(i,j) = (CS%basin_mask(i,j)*open_ocn_mask(i,j)*CS%srestore_mask(i,j))* & - (CS%Rho0*CS%Flux_const) * & + (CS%Rho0*CS%Flux_const_salt) * & delta_sss / (0.5*(sfc_state%SSS(i,j) + data_restore(i,j))) endif enddo ; enddo @@ -407,12 +402,12 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, ! SST restoring logic if (CS%restore_temp) then - call time_interp_external(CS%id_trestore,Time,data_restore) + call time_interp_external(CS%id_trestore, Time, data_restore) do j=js,je ; do i=is,ie delta_sst = data_restore(i,j)- sfc_state%SST(i,j) delta_sst = sign(1.0,delta_sst)*min(abs(delta_sst),CS%max_delta_trestore) fluxes%heat_added(i,j) = G%mask2dT(i,j) * CS%trestore_mask(i,j) * & - rhoXcp * delta_sst * CS%Flux_const ! W m-2 + rhoXcp * delta_sst * CS%Flux_const_temp ! W m-2 enddo ; enddo endif @@ -1114,35 +1109,34 @@ subroutine apply_flux_adjustments(G, US, CS, Time, fluxes) type(forcing), intent(inout) :: fluxes !< Surface fluxes structure ! Local variables - real, dimension(SZI_(G),SZJ_(G)) :: temp_at_h ! Various fluxes at h points [W m-2] or [kg m-2 s-1] + real, dimension(G%isc:G%iec,G%jsc:G%jec) :: temp_at_h ! Various fluxes at h points [W m-2] or [kg m-2 s-1] integer :: isc, iec, jsc, jec, i, j logical :: overrode_h isc = G%isc; iec = G%iec ; jsc = G%jsc; jec = G%jec - overrode_h = .false. - call data_override('OCN', 'hflx_adj', temp_at_h(isc:iec,jsc:jec), Time, override=overrode_h) + call data_override(G%Domain, 'hflx_adj', temp_at_h, Time, override=overrode_h, & + scale=US%W_m2_to_QRZ_T) if (overrode_h) then ; do j=jsc,jec ; do i=isc,iec - fluxes%heat_added(i,j) = fluxes%heat_added(i,j) + US%W_m2_to_QRZ_T*temp_at_h(i,j)* G%mask2dT(i,j) + fluxes%heat_added(i,j) = fluxes%heat_added(i,j) + temp_at_h(i,j)* G%mask2dT(i,j) enddo ; enddo ; endif ! Not needed? ! if (overrode_h) call pass_var(fluxes%heat_added, G%Domain) - overrode_h = .false. - call data_override('OCN', 'sflx_adj', temp_at_h(isc:iec,jsc:jec), Time, override=overrode_h) + call data_override(G%Domain, 'sflx_adj', temp_at_h, Time, override=overrode_h, & + scale=US%kg_m2s_to_RZ_T) if (overrode_h) then ; do j=jsc,jec ; do i=isc,iec - fluxes%salt_flux_added(i,j) = fluxes%salt_flux_added(i,j) + & - US%kg_m2s_to_RZ_T * temp_at_h(i,j)* G%mask2dT(i,j) + fluxes%salt_flux_added(i,j) = fluxes%salt_flux_added(i,j) + temp_at_h(i,j) * G%mask2dT(i,j) enddo ; enddo ; endif ! Not needed? ! if (overrode_h) call pass_var(fluxes%salt_flux_added, G%Domain) - overrode_h = .false. - call data_override('OCN', 'prcme_adj', temp_at_h(isc:iec,jsc:jec), Time, override=overrode_h) + call data_override(G%Domain, 'prcme_adj', temp_at_h, Time, override=overrode_h, & + scale=US%kg_m2s_to_RZ_T) if (overrode_h) then ; do j=jsc,jec ; do i=isc,iec - fluxes%vprec(i,j) = fluxes%vprec(i,j) + US%kg_m2s_to_RZ_T * temp_at_h(i,j)* G%mask2dT(i,j) + fluxes%vprec(i,j) = fluxes%vprec(i,j) + temp_at_h(i,j)* G%mask2dT(i,j) enddo ; enddo ; endif ! Not needed? ! if (overrode_h) call pass_var(fluxes%vprec, G%Domain) end subroutine apply_flux_adjustments @@ -1174,8 +1168,8 @@ subroutine apply_force_adjustments(G, US, CS, Time, forces) tempx_at_h(:,:) = 0.0 ; tempy_at_h(:,:) = 0.0 ! Either reads data or leaves contents unchanged overrode_x = .false. ; overrode_y = .false. - call data_override('OCN', 'taux_adj', tempx_at_h(isc:iec,jsc:jec), Time, override=overrode_x) - call data_override('OCN', 'tauy_adj', tempy_at_h(isc:iec,jsc:jec), Time, override=overrode_y) + call data_override(G%Domain, 'taux_adj', tempx_at_h, Time, override=overrode_x, scale=Pa_conversion) + call data_override(G%Domain, 'tauy_adj', tempy_at_h, Time, override=overrode_y, scale=Pa_conversion) if (overrode_x .or. overrode_y) then if (.not. (overrode_x .and. overrode_y)) call MOM_error(FATAL,"apply_flux_adjustments: "//& @@ -1190,8 +1184,8 @@ subroutine apply_force_adjustments(G, US, CS, Time, forces) if (rDlon > 0.) rDlon = 1. / rDlon cosA = dLonDx * rDlon sinA = dLonDy * rDlon - zonal_tau = Pa_conversion * tempx_at_h(i,j) - merid_tau = Pa_conversion * tempy_at_h(i,j) + zonal_tau = tempx_at_h(i,j) + merid_tau = tempy_at_h(i,j) tempx_at_h(i,j) = cosA * zonal_tau - sinA * merid_tau tempy_at_h(i,j) = sinA * zonal_tau + cosA * merid_tau enddo ; enddo @@ -1255,6 +1249,7 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, wind_stagger) character(len=48) :: flnam character(len=240) :: basin_file integer :: i, j, isd, ied, jsd, jed + real :: unscaled_fluxconst isd = G%isd ; ied = G%ied ; jsd = G%jsd ; jed = G%jed @@ -1378,9 +1373,14 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, wind_stagger) call get_param(param_file, mdl, "FLUXCONST", CS%Flux_const, & "The constant that relates the restoring surface fluxes to the relative "//& "surface anomalies (akin to a piston velocity). Note the non-MKS units.", & - default=0.0, units="m day-1", scale=US%m_to_Z*US%T_to_s) + default=0.0, units="m day-1", scale=US%m_to_Z*US%T_to_s,unscaled=unscaled_fluxconst) + call get_param(param_file, mdl, "FLUXCONST_SALT", CS%Flux_const_salt, & + "The constant that relates the restoring surface salt fluxes to the relative "//& + "surface anomalies (akin to a piston velocity). Note the non-MKS units.", & + fail_if_missing=.false.,default=unscaled_fluxconst, units="m day-1", scale=US%m_to_Z*US%T_to_s) ! Convert CS%Flux_const from m day-1 to m s-1. CS%Flux_const = CS%Flux_const / 86400.0 + CS%Flux_const_salt = CS%Flux_const_salt / 86400.0 call get_param(param_file, mdl, "SALT_RESTORE_FILE", CS%salt_restore_file, & "A file in which to find the surface salinity to use for restoring.", & default="salt_restore.nc") @@ -1425,9 +1425,14 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, wind_stagger) call get_param(param_file, mdl, "FLUXCONST", CS%Flux_const, & "The constant that relates the restoring surface fluxes to the relative "//& "surface anomalies (akin to a piston velocity). Note the non-MKS units.", & - default=0.0, units="m day-1", scale=US%m_to_Z*US%T_to_s) + default=0.0, units="m day-1", scale=US%m_to_Z*US%T_to_s,unscaled=unscaled_fluxconst) + call get_param(param_file, mdl, "FLUXCONST_TEMP", CS%Flux_const_temp, & + "The constant that relates the restoring surface temperature fluxes to the relative "//& + "surface anomalies (akin to a piston velocity). Note the non-MKS units.", & + fail_if_missing=.false.,default=unscaled_fluxconst, units="m day-1", scale=US%m_to_Z*US%T_to_s) ! Convert CS%Flux_const from m day-1 to m s-1. CS%Flux_const = CS%Flux_const / 86400.0 + CS%Flux_const_temp = CS%Flux_const_temp / 86400.0 call get_param(param_file, mdl, "SST_RESTORE_FILE", CS%temp_restore_file, & "A file in which to find the surface temperature to use for restoring.", & default="temp_restore.nc") @@ -1486,7 +1491,7 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, wind_stagger) enddo ; enddo endif - call time_interp_external_init + call time_interp_external_init() ! Optionally read a x-y gustiness field in place of a global constant. call get_param(param_file, mdl, "READ_GUST_2D", CS%read_gust_2d, & @@ -1554,11 +1559,11 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, wind_stagger) "above land points (i.e. G%mask2dT = 0).", default=.false., & debuggingParam=.true.) - call data_override_init(Ocean_domain_in=G%Domain%mpp_domain) + call data_override_init(G%Domain) if (CS%restore_salt) then salt_file = trim(CS%inputdir) // trim(CS%salt_restore_file) - CS%id_srestore = init_external_field(salt_file, CS%salt_restore_var_name, domain=G%Domain%mpp_domain) + CS%id_srestore = init_external_field(salt_file, CS%salt_restore_var_name, MOM_domain=G%Domain) call safe_alloc_ptr(CS%srestore_mask,isd,ied,jsd,jed); CS%srestore_mask(:,:) = 1.0 if (CS%mask_srestore) then ! read a 2-d file containing a mask for restoring fluxes flnam = trim(CS%inputdir) // 'salt_restore_mask.nc' @@ -1568,7 +1573,7 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, wind_stagger) if (CS%restore_temp) then temp_file = trim(CS%inputdir) // trim(CS%temp_restore_file) - CS%id_trestore = init_external_field(temp_file, CS%temp_restore_var_name, domain=G%Domain%mpp_domain) + CS%id_trestore = init_external_field(temp_file, CS%temp_restore_var_name, MOM_domain=G%Domain) call safe_alloc_ptr(CS%trestore_mask,isd,ied,jsd,jed); CS%trestore_mask(:,:) = 1.0 if (CS%mask_trestore) then ! read a 2-d file containing a mask for restoring fluxes flnam = trim(CS%inputdir) // 'temp_restore_mask.nc' @@ -1627,32 +1632,39 @@ subroutine ice_ocn_bnd_type_chksum(id, timestep, iobt) type(ice_ocean_boundary_type), & intent(in) :: iobt !< An ice-ocean boundary type with fluxes to drive the !! ocean in a coupled model whose checksums are reported - integer :: n,m, outunit - - outunit = stdout() - - write(outunit,*) "BEGIN CHECKSUM(ice_ocean_boundary_type):: ", id, timestep - write(outunit,100) 'iobt%u_flux ', mpp_chksum( iobt%u_flux ) - write(outunit,100) 'iobt%v_flux ', mpp_chksum( iobt%v_flux ) - write(outunit,100) 'iobt%t_flux ', mpp_chksum( iobt%t_flux ) - write(outunit,100) 'iobt%q_flux ', mpp_chksum( iobt%q_flux ) - write(outunit,100) 'iobt%salt_flux ', mpp_chksum( iobt%salt_flux ) - write(outunit,100) 'iobt%lw_flux ', mpp_chksum( iobt%lw_flux ) - write(outunit,100) 'iobt%sw_flux_vis_dir', mpp_chksum( iobt%sw_flux_vis_dir) - write(outunit,100) 'iobt%sw_flux_vis_dif', mpp_chksum( iobt%sw_flux_vis_dif) - write(outunit,100) 'iobt%sw_flux_nir_dir', mpp_chksum( iobt%sw_flux_nir_dir) - write(outunit,100) 'iobt%sw_flux_nir_dif', mpp_chksum( iobt%sw_flux_nir_dif) - write(outunit,100) 'iobt%lprec ', mpp_chksum( iobt%lprec ) - write(outunit,100) 'iobt%fprec ', mpp_chksum( iobt%fprec ) - write(outunit,100) 'iobt%runoff ', mpp_chksum( iobt%runoff ) - write(outunit,100) 'iobt%calving ', mpp_chksum( iobt%calving ) - write(outunit,100) 'iobt%p ', mpp_chksum( iobt%p ) - if (associated(iobt%ustar_berg)) & - write(outunit,100) 'iobt%ustar_berg ', mpp_chksum( iobt%ustar_berg ) - if (associated(iobt%area_berg)) & - write(outunit,100) 'iobt%area_berg ', mpp_chksum( iobt%area_berg ) - if (associated(iobt%mass_berg)) & - write(outunit,100) 'iobt%mass_berg ', mpp_chksum( iobt%mass_berg ) + ! Local variables + integer(kind=int64) :: chks ! A checksum for the field + logical :: root ! True only on the root PE + integer :: outunit ! The output unit to write to + + root = is_root_pe() + outunit = stdout_if_root() + + if (root) write(outunit,*) "BEGIN CHECKSUM(ice_ocean_boundary_type):: ", id, timestep + chks = field_chksum( iobt%u_flux ) ; if (root) write(outunit,100) 'iobt%u_flux ', chks + chks = field_chksum( iobt%v_flux ) ; if (root) write(outunit,100) 'iobt%v_flux ', chks + chks = field_chksum( iobt%t_flux ) ; if (root) write(outunit,100) 'iobt%t_flux ', chks + chks = field_chksum( iobt%q_flux ) ; if (root) write(outunit,100) 'iobt%q_flux ', chks + chks = field_chksum( iobt%salt_flux ) ; if (root) write(outunit,100) 'iobt%salt_flux ', chks + chks = field_chksum( iobt%lw_flux ) ; if (root) write(outunit,100) 'iobt%lw_flux ', chks + chks = field_chksum( iobt%sw_flux_vis_dir) ; if (root) write(outunit,100) 'iobt%sw_flux_vis_dir', chks + chks = field_chksum( iobt%sw_flux_vis_dif) ; if (root) write(outunit,100) 'iobt%sw_flux_vis_dif', chks + chks = field_chksum( iobt%sw_flux_nir_dir) ; if (root) write(outunit,100) 'iobt%sw_flux_nir_dir', chks + chks = field_chksum( iobt%sw_flux_nir_dif) ; if (root) write(outunit,100) 'iobt%sw_flux_nir_dif', chks + chks = field_chksum( iobt%lprec ) ; if (root) write(outunit,100) 'iobt%lprec ', chks + chks = field_chksum( iobt%fprec ) ; if (root) write(outunit,100) 'iobt%fprec ', chks + chks = field_chksum( iobt%runoff ) ; if (root) write(outunit,100) 'iobt%runoff ', chks + chks = field_chksum( iobt%calving ) ; if (root) write(outunit,100) 'iobt%calving ', chks + chks = field_chksum( iobt%p ) ; if (root) write(outunit,100) 'iobt%p ', chks + if (associated(iobt%ustar_berg)) then + chks = field_chksum( iobt%ustar_berg ) ; if (root) write(outunit,100) 'iobt%ustar_berg ', chks + endif + if (associated(iobt%area_berg)) then + chks = field_chksum( iobt%area_berg ) ; if (root) write(outunit,100) 'iobt%area_berg ', chks + endif + if (associated(iobt%mass_berg)) then + chks = field_chksum( iobt%mass_berg ) ; if (root) write(outunit,100) 'iobt%mass_berg ', chks + endif 100 FORMAT(" CHECKSUM::",A20," = ",Z20) call coupler_type_write_chksums(iobt%fluxes, outunit, 'iobt%') @@ -1664,7 +1676,8 @@ subroutine check_mask_val_consistency(val, mask, i, j, varname, G) real, intent(in) :: val !< value of flux/variable passed by IOB real, intent(in) :: mask !< value of ocean mask - integer, intent(in) :: i, j !< model grid cell indices + integer, intent(in) :: i !< model grid cell indices + integer, intent(in) :: j !< model grid cell indices character(len=*), intent(in) :: varname !< variable name type(ocean_grid_type), intent(in) :: G !< The ocean's grid structure ! Local variables diff --git a/config_src/coupled_driver/ocean_model_MOM.F90 b/config_src/drivers/FMS_cap/ocean_model_MOM.F90 similarity index 90% rename from config_src/coupled_driver/ocean_model_MOM.F90 rename to config_src/drivers/FMS_cap/ocean_model_MOM.F90 index 082099158c..50ea6c943d 100644 --- a/config_src/coupled_driver/ocean_model_MOM.F90 +++ b/config_src/drivers/FMS_cap/ocean_model_MOM.F90 @@ -15,13 +15,19 @@ module ocean_model_mod use MOM, only : extract_surface_state, allocate_surface_state, finish_MOM_initialization use MOM, only : get_MOM_state_elements, MOM_state_is_synchronized use MOM, only : get_ocean_stocks, step_offline +use MOM_coms, only : field_chksum use MOM_constants, only : CELSIUS_KELVIN_OFFSET, hlf +use MOM_coupler_types, only : coupler_1d_bc_type, coupler_2d_bc_type +use MOM_coupler_types, only : coupler_type_spawn, coupler_type_write_chksums +use MOM_coupler_types, only : coupler_type_initialized, coupler_type_copy_data +use MOM_coupler_types, only : coupler_type_set_diags, coupler_type_send_data use MOM_diag_mediator, only : diag_ctrl, enable_averaging, disable_averaging use MOM_diag_mediator, only : diag_mediator_close_registration, diag_mediator_end -use MOM_domains, only : pass_var, pass_vector, AGRID, BGRID_NE, CGRID_NE -use MOM_domains, only : TO_ALL, Omit_Corners +use MOM_domains, only : MOM_domain_type, domain2d, clone_MOM_domain, get_domain_extent +use MOM_domains, only : pass_var, pass_vector, AGRID, BGRID_NE, CGRID_NE, TO_ALL, Omit_Corners use MOM_error_handler, only : MOM_error, MOM_mesg, FATAL, WARNING, is_root_pe use MOM_error_handler, only : callTree_enter, callTree_leave +use MOM_EOS, only : gsw_sp_from_sr, gsw_pt_from_ct use MOM_file_parser, only : get_param, log_version, close_param_file, param_file_type use MOM_forcing_type, only : forcing, mech_forcing, allocate_forcing_type use MOM_forcing_type, only : fluxes_accumulate, get_net_mass_forcing @@ -29,7 +35,7 @@ module ocean_model_mod use MOM_forcing_type, only : forcing_diagnostics, mech_forcing_diags use MOM_get_input, only : Get_MOM_Input, directories use MOM_grid, only : ocean_grid_type -use MOM_io, only : close_file, file_exists, read_data, write_version_number +use MOM_io, only : write_version_number, stdout_if_root use MOM_marine_ice, only : iceberg_forces, iceberg_fluxes, marine_ice_init, marine_ice_CS use MOM_restart, only : MOM_restart_CS, save_restart use MOM_string_functions, only : uppercase @@ -48,18 +54,9 @@ module ocean_model_mod use MOM_verticalGrid, only : verticalGrid_type use MOM_ice_shelf, only : initialize_ice_shelf, shelf_calc_flux, ice_shelf_CS use MOM_ice_shelf, only : add_shelf_forces, ice_shelf_end, ice_shelf_save_restart -use coupler_types_mod, only : coupler_1d_bc_type, coupler_2d_bc_type -use coupler_types_mod, only : coupler_type_spawn, coupler_type_write_chksums -use coupler_types_mod, only : coupler_type_initialized, coupler_type_copy_data -use coupler_types_mod, only : coupler_type_set_diags, coupler_type_send_data -use mpp_domains_mod, only : domain2d, mpp_get_layout, mpp_get_global_domain -use mpp_domains_mod, only : mpp_define_domains, mpp_get_compute_domain, mpp_get_data_domain -use atmos_ocean_fluxes_mod, only : aof_set_coupler_flux -use fms_mod, only : stdout -use mpp_mod, only : mpp_chksum -use MOM_EOS, only : gsw_sp_from_sr, gsw_pt_from_ct use MOM_wave_interface, only: wave_parameters_CS, MOM_wave_interface_init -use MOM_wave_interface, only: MOM_wave_interface_init_lite, Update_Surface_Waves +use MOM_wave_interface, only: Update_Surface_Waves +use iso_fortran_env, only : int64 #include @@ -107,7 +104,7 @@ module ocean_model_mod !! points of the two velocity components. Valid entries !! include AGRID, BGRID_NE, CGRID_NE, BGRID_SW, and CGRID_SW, !! corresponding to the community-standard Arakawa notation. - !! (These are named integers taken from mpp_parameter_mod.) + !! (These are named integers taken from the MOM_domains module.) !! Following MOM5, stagger is BGRID_NE by default when the !! ocean is initialized, but here it is set to -999 so that !! a global max across ocean and non-ocean processors can be @@ -121,6 +118,8 @@ module ocean_model_mod !! i.e. dzt(1) + eta_t + patm/rho0/grav [m] frazil =>NULL(), & !< Accumulated heating [J m-2] from frazil !! formation in the ocean. + melt_potential => NULL(), & !< Instantaneous heat used to melt sea ice [J m-2]. + OBLD => NULL(), & !< Ocean boundary layer depth [m]. area => NULL() !< cell area of the ocean surface [m2]. type(coupler_2d_bc_type) :: fields !< A structure that may contain named !! arrays of tracer-related surface fields. @@ -179,13 +178,13 @@ module ocean_model_mod !! processes before time stepping the dynamics. type(directories) :: dirs !< A structure containing several relevant directory paths. - type(mech_forcing) :: forces !< A structure with the driving mechanical surface forces - type(forcing) :: fluxes !< A structure containing pointers to - !! the thermodynamic ocean forcing fields. - type(forcing) :: flux_tmp !< A secondary structure containing pointers to the + type(mech_forcing) :: forces !< A structure with the driving mechanical surface forces + type(forcing) :: fluxes !< A structure containing pointers to + !! the thermodynamic ocean forcing fields. + type(forcing) :: flux_tmp !< A secondary structure containing pointers to the !! ocean forcing fields for when multiple coupled !! timesteps are taken per thermodynamic step. - type(surface) :: sfc_state !< A structure containing pointers to + type(surface) :: sfc_state !< A structure containing pointers to !! the ocean surface state fields. type(ocean_grid_type), pointer :: & grid => NULL() !< A pointer to a grid structure containing metrics @@ -196,8 +195,8 @@ module ocean_model_mod type(unit_scale_type), pointer :: & US => NULL() !< A pointer to a structure containing dimensional !! unit scaling factors. - type(MOM_control_struct), pointer :: & - MOM_CSp => NULL() !< A pointer to the MOM control structure + type(MOM_control_struct) :: MOM_CSp + !< MOM control structure type(ice_shelf_CS), pointer :: & Ice_shelf_CSp => NULL() !< A pointer to the control structure for the !! ice shelf model that couples with MOM6. This @@ -206,7 +205,7 @@ module ocean_model_mod marine_ice_CSp => NULL() !< A pointer to the control structure for the !! marine ice effects module. type(wave_parameters_cs), pointer :: & - Waves !< A structure containing pointers to the surface wave fields + Waves => NULL() !< A pointer to the surface wave control structure type(surface_forcing_CS), pointer :: & forcing_CSp => NULL() !< A pointer to the MOM forcing control structure type(MOM_restart_CS), pointer :: & @@ -267,6 +266,10 @@ subroutine ocean_model_init(Ocean_sfc, OS, Time_init, Time_in, wind_stagger, gas endif allocate(OS) +! allocate(OS%fluxes) +! allocate(OS%forces) +! allocate(OS%flux_tmp) + OS%is_ocean_pe = Ocean_sfc%is_ocean_pe if (.not.OS%is_ocean_pe) return @@ -355,6 +358,7 @@ subroutine ocean_model_init(Ocean_sfc, OS, Time_init, Time_in, wind_stagger, gas use_melt_pot=.false. endif + !allocate(OS%sfc_state) call allocate_surface_state(OS%sfc_state, OS%grid, use_temperature, do_integrals=.true., & gas_fields_ocn=gas_fields_ocn, use_meltpot=use_melt_pot) @@ -378,20 +382,12 @@ subroutine ocean_model_init(Ocean_sfc, OS, Time_init, Time_in, wind_stagger, gas call get_param(param_file, mdl, "USE_WAVES", OS%Use_Waves, & "If true, enables surface wave modules.", default=.false.) - if (OS%use_waves) then - call MOM_wave_interface_init(OS%Time, OS%grid, OS%GV, OS%US, param_file, OS%Waves, OS%diag) - else - call MOM_wave_interface_init_lite(param_file) - endif + ! MOM_wave_interface_init is called regardless of the value of USE_WAVES because + ! it also initializes statistical waves. + call MOM_wave_interface_init(OS%Time, OS%grid, OS%GV, OS%US, param_file, OS%Waves, OS%diag) - if (associated(OS%grid%Domain%maskmap)) then - call initialize_ocean_public_type(OS%grid%Domain%mpp_domain, Ocean_sfc, & - OS%diag, maskmap=OS%grid%Domain%maskmap, & - gas_fields_ocn=gas_fields_ocn) - else - call initialize_ocean_public_type(OS%grid%Domain%mpp_domain, Ocean_sfc, & - OS%diag, gas_fields_ocn=gas_fields_ocn) - endif + call initialize_ocean_public_type(OS%grid%Domain, Ocean_sfc, OS%diag, & + gas_fields_ocn=gas_fields_ocn) ! This call can only occur here if the coupler_bc_type variables have been ! initialized already using the information from gas_fields_ocn. @@ -506,8 +502,7 @@ subroutine update_ocean_model(Ice_ocean_boundary, OS, Ocean_sfc, time_start_upda (/is,is,ie,ie/), (/js,js,je,je/), as_needed=.true.) ! Translate Ice_ocean_boundary into fluxes and forces. - call mpp_get_compute_domain(Ocean_sfc%Domain, index_bnds(1), index_bnds(2), & - index_bnds(3), index_bnds(4)) + call get_domain_extent(Ocean_sfc%Domain, index_bnds(1), index_bnds(2), index_bnds(3), index_bnds(4)) if (do_dyn) then call convert_IOB_to_forces(Ice_ocean_boundary, OS%forces, index_bnds, OS%Time_dyn, OS%grid, OS%US, & @@ -726,7 +721,7 @@ end subroutine ocean_model_end subroutine ocean_model_save_restart(OS, Time, directory, filename_suffix) type(ocean_state_type), pointer :: OS !< A pointer to the structure containing the !! internal ocean state (in). - type(time_type), intent(in) :: Time !< The model time at this call, needed for mpp_write calls. + type(time_type), intent(in) :: Time !< The model time at this call, needed for writing files. character(len=*), optional, intent(in) :: directory !< An optional directory into which to !! write these restart files. character(len=*), optional, intent(in) :: filename_suffix !< An optional suffix (e.g., a time-stamp) @@ -758,16 +753,12 @@ subroutine ocean_model_save_restart(OS, Time, directory, filename_suffix) end subroutine ocean_model_save_restart !> Initialize the public ocean type -subroutine initialize_ocean_public_type(input_domain, Ocean_sfc, diag, maskmap, & - gas_fields_ocn) - type(domain2D), intent(in) :: input_domain !< The ocean model domain description +subroutine initialize_ocean_public_type(input_domain, Ocean_sfc, diag, gas_fields_ocn) + type(MOM_domain_type), intent(in) :: input_domain !< The ocean model domain description type(ocean_public_type), intent(inout) :: Ocean_sfc !< A structure containing various publicly - !! visible ocean surface properties after initialization, whose - !! elements are allocated here. - type(diag_ctrl), intent(in) :: diag !< A structure that regulates diagnsotic output - logical, dimension(:,:), & - optional, intent(in) :: maskmap !< A mask indicating which virtual processors - !! are actually in use. If missing, all are used. + !! visible ocean surface properties after + !! initialization, whose elements are allocated here. + type(diag_ctrl), intent(in) :: diag !< A structure that regulates diagnostic output type(coupler_1d_bc_type), & optional, intent(in) :: gas_fields_ocn !< If present, this type describes the !! ocean and surface-ice fields that will participate @@ -779,14 +770,9 @@ subroutine initialize_ocean_public_type(input_domain, Ocean_sfc, diag, maskmap, ! and have no halos. integer :: isc, iec, jsc, jec - call mpp_get_layout(input_domain,layout) - call mpp_get_global_domain(input_domain, xsize=xsz, ysize=ysz) - if (PRESENT(maskmap)) then - call mpp_define_domains((/1,xsz,1,ysz/),layout,Ocean_sfc%Domain, maskmap=maskmap) - else - call mpp_define_domains((/1,xsz,1,ysz/),layout,Ocean_sfc%Domain) - endif - call mpp_get_compute_domain(Ocean_sfc%Domain, isc, iec, jsc, jec) + call clone_MOM_domain(input_domain, Ocean_sfc%Domain, halo_size=0, symmetric=.false.) + + call get_domain_extent(Ocean_sfc%Domain, isc, iec, jsc, jec) allocate ( Ocean_sfc%t_surf (isc:iec,jsc:jec), & Ocean_sfc%s_surf (isc:iec,jsc:jec), & @@ -794,6 +780,8 @@ subroutine initialize_ocean_public_type(input_domain, Ocean_sfc, diag, maskmap, Ocean_sfc%v_surf (isc:iec,jsc:jec), & Ocean_sfc%sea_lev(isc:iec,jsc:jec), & Ocean_sfc%area (isc:iec,jsc:jec), & + Ocean_sfc%melt_potential(isc:iec,jsc:jec), & + Ocean_sfc%OBLD (isc:iec,jsc:jec), & Ocean_sfc%frazil (isc:iec,jsc:jec)) Ocean_sfc%t_surf(:,:) = 0.0 ! time averaged sst (Kelvin) passed to atmosphere/ice model @@ -802,6 +790,8 @@ subroutine initialize_ocean_public_type(input_domain, Ocean_sfc, diag, maskmap, Ocean_sfc%v_surf(:,:) = 0.0 ! time averaged v-current (m/sec) passed to atmosphere/ice models Ocean_sfc%sea_lev(:,:) = 0.0 ! time averaged thickness of top model grid cell (m) plus patm/rho0/grav Ocean_sfc%frazil(:,:) = 0.0 ! time accumulated frazil (J/m^2) passed to ice model + Ocean_sfc%melt_potential(:,:) = 0.0 ! time accumulated melt potential (J/m^2) passed to ice model + Ocean_sfc%OBLD(:,:) = 0.0 ! ocean boundary layer depth (m) Ocean_sfc%area(:,:) = 0.0 Ocean_sfc%axes = diag%axesT1%handles !diag axes to be used by coupler tracer flux diagnostics @@ -838,8 +828,7 @@ subroutine convert_state_to_ocean_type(sfc_state, Ocean_sfc, G, US, patm, press_ is = G%isc ; ie = G%iec ; js = G%jsc ; je = G%jec call pass_vector(sfc_state%u, sfc_state%v, G%Domain) - call mpp_get_compute_domain(Ocean_sfc%Domain, isc_bnd, iec_bnd, & - jsc_bnd, jec_bnd) + call get_domain_extent(Ocean_sfc%Domain, isc_bnd, iec_bnd, jsc_bnd, jec_bnd) if (present(patm)) then ! Check that the inidicies in patm are (isc_bnd:iec_bnd,jsc_bnd:jec_bnd). if (.not.present(press_to_z)) call MOM_error(FATAL, & @@ -887,6 +876,18 @@ subroutine convert_state_to_ocean_type(sfc_state, Ocean_sfc, G, US, patm, press_ enddo ; enddo endif + if (allocated(sfc_state%melt_potential)) then + do j=jsc_bnd,jec_bnd ; do i=isc_bnd,iec_bnd + Ocean_sfc%melt_potential(i,j) = US%Q_to_J_kg*US%RZ_to_kg_m2 * sfc_state%melt_potential(i+i0,j+j0) + enddo ; enddo + endif + + if (allocated(sfc_state%Hml)) then + do j=jsc_bnd,jec_bnd ; do i=isc_bnd,iec_bnd + Ocean_sfc%OBLD(i,j) = US%Z_to_m * sfc_state%Hml(i+i0,j+j0) + enddo ; enddo + endif + if (Ocean_sfc%stagger == AGRID) then do j=jsc_bnd,jec_bnd ; do i=isc_bnd,iec_bnd Ocean_sfc%u_surf(i,j) = G%mask2dT(i+i0,j+j0) * US%L_T_to_m_s * & @@ -1021,51 +1022,52 @@ subroutine ocean_model_data2D_get(OS, Ocean, name, array2D, isc, jsc) integer , intent(in) :: isc !< The starting i-index of array2D integer , intent(in) :: jsc !< The starting j-index of array2D - integer :: g_isc, g_iec, g_jsc, g_jec,g_isd, g_ied, g_jsd, g_jed, i, j + integer :: g_isc, g_iec, g_jsc, g_jec, g_isd, g_ied, g_jsd, g_jed, i, j if (.not.associated(OS)) return if (.not.OS%is_ocean_pe) return -! The problem is %areaT is on MOM domain but Ice_Ocean_Boundary%... is on mpp domain. -! We want to return the MOM data on the mpp (compute) domain -! Get MOM domain extents - call mpp_get_compute_domain(OS%grid%Domain%mpp_domain, g_isc, g_iec, g_jsc, g_jec) - call mpp_get_data_domain (OS%grid%Domain%mpp_domain, g_isd, g_ied, g_jsd, g_jed) + ! The problem is that %areaT is on MOM domain but Ice_Ocean_Boundary%... is on a haloless domain. + ! We want to return the MOM data on the haloless (compute) domain + call get_domain_extent(OS%grid%Domain, g_isc, g_iec, g_jsc, g_jec, g_isd, g_ied, g_jsd, g_jed) g_isc = g_isc-g_isd+1 ; g_iec = g_iec-g_isd+1 ; g_jsc = g_jsc-g_jsd+1 ; g_jec = g_jec-g_jsd+1 - select case(name) - case('area') - array2D(isc:,jsc:) = OS%US%L_to_m**2*OS%grid%areaT(g_isc:g_iec,g_jsc:g_jec) - case('mask') - array2D(isc:,jsc:) = OS%grid%mask2dT(g_isc:g_iec,g_jsc:g_jec) + case('area') + array2D(isc:,jsc:) = OS%US%L_to_m**2*OS%grid%areaT(g_isc:g_iec,g_jsc:g_jec) + case('mask') + array2D(isc:,jsc:) = OS%grid%mask2dT(g_isc:g_iec,g_jsc:g_jec) !OR same result ! do j=g_jsc,g_jec ; do i=g_isc,g_iec ! array2D(isc+i-g_isc,jsc+j-g_jsc) = OS%grid%mask2dT(i,j) ! enddo ; enddo - case('t_surf') - array2D(isc:,jsc:) = Ocean%t_surf(isc:,jsc:)-CELSIUS_KELVIN_OFFSET - case('t_pme') - array2D(isc:,jsc:) = Ocean%t_surf(isc:,jsc:)-CELSIUS_KELVIN_OFFSET - case('t_runoff') - array2D(isc:,jsc:) = Ocean%t_surf(isc:,jsc:)-CELSIUS_KELVIN_OFFSET - case('t_calving') - array2D(isc:,jsc:) = Ocean%t_surf(isc:,jsc:)-CELSIUS_KELVIN_OFFSET - case('btfHeat') - array2D(isc:,jsc:) = 0 - case('cos_rot') - array2D(isc:,jsc:) = OS%grid%cos_rot(g_isc:g_iec,g_jsc:g_jec) ! =1 - case('sin_rot') - array2D(isc:,jsc:) = OS%grid%sin_rot(g_isc:g_iec,g_jsc:g_jec) ! =0 - case('s_surf') - array2D(isc:,jsc:) = Ocean%s_surf(isc:,jsc:) - case('sea_lev') - array2D(isc:,jsc:) = Ocean%sea_lev(isc:,jsc:) - case('frazil') - array2D(isc:,jsc:) = Ocean%frazil(isc:,jsc:) - case default - call MOM_error(FATAL,'get_ocean_grid_data2D: unknown argument name='//name) + case('t_surf') + array2D(isc:,jsc:) = Ocean%t_surf(isc:,jsc:)-CELSIUS_KELVIN_OFFSET + case('t_pme') + array2D(isc:,jsc:) = Ocean%t_surf(isc:,jsc:)-CELSIUS_KELVIN_OFFSET + case('t_runoff') + array2D(isc:,jsc:) = Ocean%t_surf(isc:,jsc:)-CELSIUS_KELVIN_OFFSET + case('t_calving') + array2D(isc:,jsc:) = Ocean%t_surf(isc:,jsc:)-CELSIUS_KELVIN_OFFSET + case('btfHeat') + array2D(isc:,jsc:) = 0 + case('cos_rot') + array2D(isc:,jsc:) = OS%grid%cos_rot(g_isc:g_iec,g_jsc:g_jec) ! =1 + case('sin_rot') + array2D(isc:,jsc:) = OS%grid%sin_rot(g_isc:g_iec,g_jsc:g_jec) ! =0 + case('s_surf') + array2D(isc:,jsc:) = Ocean%s_surf(isc:,jsc:) + case('sea_lev') + array2D(isc:,jsc:) = Ocean%sea_lev(isc:,jsc:) + case('frazil') + array2D(isc:,jsc:) = Ocean%frazil(isc:,jsc:) + case('melt_pot') + array2D(isc:,jsc:) = Ocean%melt_potential(isc:,jsc:) + case('obld') + array2D(isc:,jsc:) = Ocean%OBLD(isc:,jsc:) + case default + call MOM_error(FATAL,'get_ocean_grid_data2D: unknown argument name='//name) end select end subroutine ocean_model_data2D_get @@ -1091,25 +1093,29 @@ subroutine ocean_model_data1D_get(OS, Ocean, name, value) end subroutine ocean_model_data1D_get -!> Write out FMS-format checsums on fields from the ocean surface state +!> Write out checksums for fields from the ocean surface state subroutine ocean_public_type_chksum(id, timestep, ocn) character(len=*), intent(in) :: id !< An identifying string for this call integer, intent(in) :: timestep !< The number of elapsed timesteps type(ocean_public_type), intent(in) :: ocn !< A structure containing various publicly !! visible ocean surface fields. - integer :: n, m, outunit - - outunit = stdout() - - write(outunit,*) "BEGIN CHECKSUM(ocean_type):: ", id, timestep - write(outunit,100) 'ocean%t_surf ',mpp_chksum(ocn%t_surf ) - write(outunit,100) 'ocean%s_surf ',mpp_chksum(ocn%s_surf ) - write(outunit,100) 'ocean%u_surf ',mpp_chksum(ocn%u_surf ) - write(outunit,100) 'ocean%v_surf ',mpp_chksum(ocn%v_surf ) - write(outunit,100) 'ocean%sea_lev ',mpp_chksum(ocn%sea_lev) - write(outunit,100) 'ocean%frazil ',mpp_chksum(ocn%frazil ) - + ! Local variables + integer(kind=int64) :: chks ! A checksum for the field + logical :: root ! True only on the root PE + integer :: outunit ! The output unit to write to + + root = is_root_pe() + outunit = stdout_if_root() + + if (root) write(outunit,*) "BEGIN CHECKSUM(ocean_type):: ", id, timestep + chks = field_chksum(ocn%t_surf ) ; if (root) write(outunit,100) 'ocean%t_surf ', chks + chks = field_chksum(ocn%s_surf ) ; if (root) write(outunit,100) 'ocean%s_surf ', chks + chks = field_chksum(ocn%u_surf ) ; if (root) write(outunit,100) 'ocean%u_surf ', chks + chks = field_chksum(ocn%v_surf ) ; if (root) write(outunit,100) 'ocean%v_surf ', chks + chks = field_chksum(ocn%sea_lev) ; if (root) write(outunit,100) 'ocean%sea_lev ', chks + chks = field_chksum(ocn%frazil ) ; if (root) write(outunit,100) 'ocean%frazil ', chks + chks = field_chksum(ocn%melt_potential) ; if (root) write(outunit,100) 'ocean%melt_potential ', chks call coupler_type_write_chksums(ocn%fields, outunit, 'ocean%') 100 FORMAT(" CHECKSUM::",A20," = ",Z20) @@ -1153,13 +1159,14 @@ subroutine ocean_model_get_UV_surf(OS, Ocean, name, array2D, isc, jsc) G => OS%grid is = G%isc ; ie = G%iec ; js = G%jsc ; je = G%jec - call mpp_get_compute_domain(Ocean%Domain, isc_bnd, iec_bnd, & - jsc_bnd, jec_bnd) + call get_domain_extent(Ocean%Domain, isc_bnd, iec_bnd, jsc_bnd, jec_bnd) i0 = is - isc_bnd ; j0 = js - jsc_bnd sfc_state => OS%sfc_state + call pass_vector(sfc_state%u, sfc_state%v, G%Domain) + select case(name) case('ua') do j=jsc_bnd,jec_bnd ; do i=isc_bnd,iec_bnd @@ -1182,7 +1189,7 @@ subroutine ocean_model_get_UV_surf(OS, Ocean, name, array2D, isc, jsc) 0.5*(sfc_state%v(i+i0,J+j0)+sfc_state%v(i+i0+1,J+j0)) enddo ; enddo case default - call MOM_error(FATAL,'ocean_model_get_UV_surf: unknown argument name='//name) + call MOM_error(FATAL,'ocean_model_get_UV_surf: unknown argument name='//name) end select end subroutine ocean_model_get_UV_surf diff --git a/config_src/ice_solo_driver/atmos_ocean_fluxes.F90 b/config_src/drivers/ice_solo_driver/atmos_ocean_fluxes.F90 similarity index 100% rename from config_src/ice_solo_driver/atmos_ocean_fluxes.F90 rename to config_src/drivers/ice_solo_driver/atmos_ocean_fluxes.F90 diff --git a/config_src/ice_solo_driver/ice_shelf_driver.F90 b/config_src/drivers/ice_solo_driver/ice_shelf_driver.F90 similarity index 94% rename from config_src/ice_solo_driver/ice_shelf_driver.F90 rename to config_src/drivers/ice_solo_driver/ice_shelf_driver.F90 index b1323a5485..959e4676d0 100644 --- a/config_src/ice_solo_driver/ice_shelf_driver.F90 +++ b/config_src/drivers/ice_solo_driver/ice_shelf_driver.F90 @@ -38,9 +38,9 @@ program Shelf_main use MOM_get_input, only : Get_MOM_Input, directories use MOM_grid, only : ocean_grid_type, MOM_grid_init, MOM_grid_end use MOM_hor_index, only : hor_index_type, hor_index_init - use MOM_io, only : MOM_io_init, file_exists, open_file, close_file + use MOM_io, only : MOM_io_init, file_exists, open_ASCII_file, close_file use MOM_io, only : check_nml_error, io_infra_init, io_infra_end - use MOM_io, only : APPEND_FILE, ASCII_FILE, READONLY_FILE, SINGLE_FILE + use MOM_io, only : APPEND_FILE, READONLY_FILE, SINGLE_FILE use MOM_open_boundary, only : ocean_OBC_type use MOM_restart, only : save_restart use MOM_string_functions,only : uppercase @@ -176,7 +176,7 @@ program Shelf_main if (file_exists('input.nml')) then ! Provide for namelist specification of the run length and calendar data. - call open_file(unit, 'input.nml', form=ASCII_FILE, action=READONLY_FILE) + call open_ASCII_file(unit, 'input.nml', action=READONLY_FILE) read(unit, ice_solo_nml, iostat=io_status) call close_file(unit) ierr = check_nml_error(io_status,'ice_solo_nml') @@ -185,17 +185,18 @@ program Shelf_main endif endif + call Get_MOM_Input(param_file, dirs) + ! Read ocean_solo restart, which can override settings from the namelist. if (file_exists(trim(dirs%restart_input_dir)//'ice_solo.res')) then - call open_file(unit,trim(dirs%restart_input_dir)//'ice_solo.res', & - form=ASCII_FILE,action=READONLY_FILE) + call open_ASCII_file(unit, trim(dirs%restart_input_dir)//'ice_solo.res', action=READONLY_FILE) read(unit,*) calendar_type read(unit,*) date_init read(unit,*) date call close_file(unit) else calendar = uppercase(calendar) - if (calendar(1:6) == 'JULIAN') then ; calendar_type = JULIAN + if (calendar(1:6) == 'JULIAN') then ; calendar_type = JULIAN elseif (calendar(1:9) == 'GREGORIAN') then ; calendar_type = GREGORIAN elseif (calendar(1:6) == 'NOLEAP') then ; calendar_type = NOLEAP elseif (calendar(1:10)=='THIRTY_DAY') then ; calendar_type = THIRTY_DAY_MONTHS @@ -216,7 +217,6 @@ program Shelf_main Start_time = real_to_time(0.0) endif - call Get_MOM_Input(param_file, dirs) ! Determining the internal unit scaling factors for this run. call unit_scaling_init(param_file, US) @@ -341,15 +341,14 @@ program Shelf_main call diag_mediator_close_registration(diag) ! Write out a time stamp file. - if (calendar_type /= NO_CALENDAR) then - call open_file(unit, 'time_stamp.out', form=ASCII_FILE, action=APPEND_FILE, & - threading=SINGLE_FILE) + if (is_root_pe() .and. (calendar_type /= NO_CALENDAR)) then + call open_ASCII_file(unit, 'time_stamp.out', action=APPEND_FILE) call get_date(Time, date(1), date(2), date(3), date(4), date(5), date(6)) month = month_name(date(2)) - if (is_root_pe()) write(unit,'(6i4,2x,a3)') date, month(1:3) + write(unit,'(6i4,2x,a3)') date, month(1:3) call get_date(Time_end, date(1), date(2), date(3), date(4), date(5), date(6)) month = month_name(date(2)) - if (is_root_pe()) write(unit,'(6i4,2x,a3)') date, month(1:3) + write(unit,'(6i4,2x,a3)') date, month(1:3) call close_file(unit) endif @@ -428,19 +427,19 @@ program Shelf_main dirs%restart_output_dir) ! Write ice shelf solo restart file. - call open_file(unit, trim(dirs%restart_output_dir)//'shelf.res', nohdrs=.true.) if (is_root_pe())then - write(unit, '(i6,8x,a)') calendar_type, & - '(Calendar: no_calendar=0, thirty_day_months=1, julian=2, gregorian=3, noleap=4)' - - call get_date(Start_time, yr, mon, day, hr, mins, sec) - write(unit, '(6i6,8x,a)') yr, mon, day, hr, mins, sec, & - 'Model start time: year, month, day, hour, minute, second' - call get_date(Time, yr, mon, day, hr, mins, sec) - write(unit, '(6i6,8x,a)') yr, mon, day, hr, mins, sec, & - 'Current model time: year, month, day, hour, minute, second' + call open_ASCII_file(unit, trim(dirs%restart_output_dir)//'shelf.res') + write(unit, '(i6,8x,a)') calendar_type, & + '(Calendar: no_calendar=0, thirty_day_months=1, julian=2, gregorian=3, noleap=4)' + + call get_date(Start_time, yr, mon, day, hr, mins, sec) + write(unit, '(6i6,8x,a)') yr, mon, day, hr, mins, sec, & + 'Model start time: year, month, day, hour, minute, second' + call get_date(Time, yr, mon, day, hr, mins, sec) + write(unit, '(6i6,8x,a)') yr, mon, day, hr, mins, sec, & + 'Current model time: year, month, day, hour, minute, second' + call close_file(unit) endif - call close_file(unit) endif if (is_root_pe()) then diff --git a/config_src/mct_driver/mom_ocean_model_mct.F90 b/config_src/drivers/mct_cap/mom_ocean_model_mct.F90 similarity index 97% rename from config_src/mct_driver/mom_ocean_model_mct.F90 rename to config_src/drivers/mct_cap/mom_ocean_model_mct.F90 index 2f94c9b7f9..3bd0e1e28d 100644 --- a/config_src/mct_driver/mom_ocean_model_mct.F90 +++ b/config_src/drivers/mct_cap/mom_ocean_model_mct.F90 @@ -15,6 +15,7 @@ module MOM_ocean_model_mct use MOM, only : extract_surface_state, allocate_surface_state, finish_MOM_initialization use MOM, only : get_MOM_state_elements, MOM_state_is_synchronized use MOM, only : get_ocean_stocks, step_offline +use MOM_coms, only : field_chksum use MOM_constants, only : CELSIUS_KELVIN_OFFSET, hlf use MOM_diag_mediator, only : diag_ctrl, enable_averaging, disable_averaging use MOM_diag_mediator, only : diag_mediator_close_registration, diag_mediator_end @@ -56,15 +57,16 @@ module MOM_ocean_model_mct use coupler_types_mod, only : coupler_type_set_diags, coupler_type_send_data use mpp_domains_mod, only : domain2d, mpp_get_layout, mpp_get_global_domain use mpp_domains_mod, only : mpp_define_domains, mpp_get_compute_domain, mpp_get_data_domain -use fms_mod, only : stdout +use MOM_io, only : stdout use mpp_mod, only : mpp_chksum use MOM_EOS, only : gsw_sp_from_sr, gsw_pt_from_ct use MOM_wave_interface, only : wave_parameters_CS, MOM_wave_interface_init -use MOM_wave_interface, only : MOM_wave_interface_init_lite, Update_Surface_Waves +use MOM_wave_interface, only : Update_Surface_Waves use time_interp_external_mod, only : time_interp_external_init ! MCT specfic routines use MOM_domains, only : MOM_infra_end +use iso_fortran_env, only : int64 #include @@ -193,8 +195,8 @@ module MOM_ocean_model_mct !! about the vertical grid. type(unit_scale_type), pointer :: US => NULL() !< A pointer to a structure containing !! dimensional unit scaling factors. - type(MOM_control_struct), pointer :: & - MOM_CSp => NULL() !< A pointer to the MOM control structure + type(MOM_control_struct) :: MOM_CSp + !< MOM control structure type(ice_shelf_CS), pointer :: & Ice_shelf_CSp => NULL() !< A pointer to the control structure for the !! ice shelf model that couples with MOM6. This @@ -203,7 +205,7 @@ module MOM_ocean_model_mct marine_ice_CSp => NULL() !< A pointer to the control structure for the !! marine ice effects module. type(wave_parameters_cs), pointer :: & - Waves !< A structure containing pointers to the surface wave fields + Waves => NULL() !< A pointer to the surface wave control structure type(surface_forcing_CS), pointer :: & forcing_CSp => NULL() !< A pointer to the MOM forcing control structure type(MOM_restart_CS), pointer :: & @@ -381,11 +383,9 @@ subroutine ocean_model_init(Ocean_sfc, OS, Time_init, Time_in, gas_fields_ocn, i call get_param(param_file, mdl, "USE_WAVES", OS%Use_Waves, & "If true, enables surface wave modules.", default=.false.) - if (OS%use_waves) then - call MOM_wave_interface_init(OS%Time, OS%grid, OS%GV, OS%US, param_file, OS%Waves, OS%diag) - else - call MOM_wave_interface_init_lite(param_file) - endif + ! MOM_wave_interface_init is called regardless of the value of USE_WAVES because + ! it also initializes statistical waves. + call MOM_wave_interface_init(OS%Time, OS%grid, OS%GV, OS%US, param_file, OS%Waves, OS%diag) if (associated(OS%grid%Domain%maskmap)) then call initialize_ocean_public_type(OS%grid%Domain%mpp_domain, Ocean_sfc, & @@ -409,10 +409,6 @@ subroutine ocean_model_init(Ocean_sfc, OS, Time_init, Time_in, gas_fields_ocn, i call close_param_file(param_file) call diag_mediator_close_registration(OS%diag) - - if (is_root_pe()) & - write(*,'(/12x,a/)') '======== COMPLETED MOM INITIALIZATION ========' - call callTree_leave("ocean_model_init(") end subroutine ocean_model_init @@ -1046,27 +1042,27 @@ subroutine Ocean_stock_pe(OS, index, value, time_index) end subroutine Ocean_stock_pe -!> Write out FMS-format checsums on fields from the ocean surface state +!> Write out checksums for fields from the ocean surface state subroutine ocean_public_type_chksum(id, timestep, ocn) character(len=*), intent(in) :: id !< An identifying string for this call integer, intent(in) :: timestep !< The number of elapsed timesteps type(ocean_public_type), intent(in) :: ocn !< A structure containing various publicly !! visible ocean surface fields. - integer :: n, m, outunit - - outunit = stdout() - - write(outunit,*) "BEGIN CHECKSUM(ocean_type):: ", id, timestep - write(outunit,100) 'ocean%t_surf ',mpp_chksum(ocn%t_surf ) - write(outunit,100) 'ocean%s_surf ',mpp_chksum(ocn%s_surf ) - write(outunit,100) 'ocean%u_surf ',mpp_chksum(ocn%u_surf ) - write(outunit,100) 'ocean%v_surf ',mpp_chksum(ocn%v_surf ) - write(outunit,100) 'ocean%sea_lev ',mpp_chksum(ocn%sea_lev) - write(outunit,100) 'ocean%frazil ',mpp_chksum(ocn%frazil ) - write(outunit,100) 'ocean%melt_potential ',mpp_chksum(ocn%melt_potential) - - call coupler_type_write_chksums(ocn%fields, outunit, 'ocean%') + ! Local variables + integer(kind=int64) :: chks ! A checksum for the field + logical :: root ! True only on the root PE + integer :: outunit ! The output unit to write to + + if (root) write(outunit,*) "BEGIN CHECKSUM(ocean_type):: ", id, timestep + chks = field_chksum(ocn%t_surf ) ; if (root) write(outunit,100) 'ocean%t_surf ', chks + chks = field_chksum(ocn%s_surf ) ; if (root) write(outunit,100) 'ocean%s_surf ', chks + chks = field_chksum(ocn%u_surf ) ; if (root) write(outunit,100) 'ocean%u_surf ', chks + chks = field_chksum(ocn%v_surf ) ; if (root) write(outunit,100) 'ocean%v_surf ', chks + chks = field_chksum(ocn%sea_lev) ; if (root) write(outunit,100) 'ocean%sea_lev ', chks + chks = field_chksum(ocn%frazil ) ; if (root) write(outunit,100) 'ocean%frazil ', chks + chks = field_chksum(ocn%melt_potential) ; if (root) write(outunit,100) 'ocean%melt_potential ', chks + call coupler_type_write_chksums(ocn%fields, stdout, 'ocean%') 100 FORMAT(" CHECKSUM::",A20," = ",Z20) end subroutine ocean_public_type_chksum diff --git a/config_src/mct_driver/mom_surface_forcing_mct.F90 b/config_src/drivers/mct_cap/mom_surface_forcing_mct.F90 similarity index 95% rename from config_src/mct_driver/mom_surface_forcing_mct.F90 rename to config_src/drivers/mct_cap/mom_surface_forcing_mct.F90 index 92b5d148bb..7b32270b4c 100644 --- a/config_src/mct_driver/mom_surface_forcing_mct.F90 +++ b/config_src/drivers/mct_cap/mom_surface_forcing_mct.F90 @@ -2,7 +2,7 @@ module MOM_surface_forcing_mct ! This file is part of MOM6. See LICENSE.md for the license. -use MOM_coms, only : reproducing_sum +use MOM_coms, only : reproducing_sum, field_chksum use MOM_constants, only : hlv, hlf use MOM_cpu_clock, only : cpu_clock_id, cpu_clock_begin, cpu_clock_end use MOM_cpu_clock, only : CLOCK_SUBCOMPONENT @@ -34,10 +34,11 @@ module MOM_surface_forcing_mct use coupler_types_mod, only : coupler_type_initialized, coupler_type_spawn use coupler_types_mod, only : coupler_type_copy_data use data_override_mod, only : data_override_init, data_override -use fms_mod, only : stdout use mpp_mod, only : mpp_chksum use time_interp_external_mod, only : init_external_field, time_interp_external use time_interp_external_mod, only : time_interp_external_init +use MOM_io, only : stdout +use iso_fortran_env, only : int64 implicit none ; private @@ -486,17 +487,18 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, ! latent heat flux (W/m^2) fluxes%latent(i,j) = 0.0 - ! contribution from frozen ppt + ! contribution from frozen ppt (notice minus sign since fprec is positive into the ocean) if (associated(IOB%fprec)) then - fluxes%latent(i,j) = fluxes%latent(i,j) + & + fluxes%latent(i,j) = fluxes%latent(i,j) - & IOB%fprec(i-i0,j-j0)*US%W_m2_to_QRZ_T*CS%latent_heat_fusion - fluxes%latent_fprec_diag(i,j) = G%mask2dT(i,j) * IOB%fprec(i-i0,j-j0)*US%W_m2_to_QRZ_T*CS%latent_heat_fusion + fluxes%latent_fprec_diag(i,j) = - G%mask2dT(i,j) * IOB%fprec(i-i0,j-j0)*US%W_m2_to_QRZ_T*CS%latent_heat_fusion endif - ! contribution from frozen runoff + ! contribution from frozen runoff (notice minus sign since rofi_flux is positive into the ocean) if (associated(fluxes%frunoff)) then - fluxes%latent(i,j) = fluxes%latent(i,j) + & + fluxes%latent(i,j) = fluxes%latent(i,j) - & IOB%rofi_flux(i-i0,j-j0)*US%W_m2_to_QRZ_T*CS%latent_heat_fusion - fluxes%latent_frunoff_diag(i,j) = G%mask2dT(i,j) * IOB%rofi_flux(i-i0,j-j0)*US%W_m2_to_QRZ_T*CS%latent_heat_fusion + fluxes%latent_frunoff_diag(i,j) = -G%mask2dT(i,j) & + * IOB%rofi_flux(i-i0,j-j0)*US%W_m2_to_QRZ_T*CS%latent_heat_fusion endif ! contribution from evaporation if (associated(IOB%q_flux)) then @@ -1243,7 +1245,8 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, restore_salt, "If true, use a 2-dimensional gustiness supplied from "//& "an input file", default=.false.) call get_param(param_file, mdl, "GUST_CONST", CS%gust_const, & - "The background gustiness in the winds.", units="Pa", default=0.0) + "The background gustiness in the winds.", units="Pa", default=0.0, & + scale=US%kg_m3_to_R*US%m_s_to_L_T**2*US%L_to_Z) if (CS%read_gust_2d) then call get_param(param_file, mdl, "GUST_2D_FILE", gust_file, & "The file in which the wind gustiness is found in "//& @@ -1361,37 +1364,43 @@ subroutine ice_ocn_bnd_type_chksum(id, timestep, iobt) !! ocean in a coupled model whose checksums are reported ! local variables - integer :: n,m, outunit - - outunit = stdout() - - write(outunit,*) "BEGIN CHECKSUM(ice_ocean_boundary_type):: ", id, timestep - write(outunit,100) 'iobt%u_flux ' , mpp_chksum( iobt%u_flux ) - write(outunit,100) 'iobt%v_flux ' , mpp_chksum( iobt%v_flux ) - write(outunit,100) 'iobt%t_flux ' , mpp_chksum( iobt%t_flux ) - write(outunit,100) 'iobt%q_flux ' , mpp_chksum( iobt%q_flux ) - write(outunit,100) 'iobt%salt_flux ' , mpp_chksum( iobt%salt_flux ) - write(outunit,100) 'iobt%seaice_melt_heat' , mpp_chksum( iobt%seaice_melt_heat) - write(outunit,100) 'iobt%seaice_melt ' , mpp_chksum( iobt%seaice_melt ) - write(outunit,100) 'iobt%lw_flux ' , mpp_chksum( iobt%lw_flux ) - write(outunit,100) 'iobt%sw_flux_vis_dir' , mpp_chksum( iobt%sw_flux_vis_dir) - write(outunit,100) 'iobt%sw_flux_vis_dif' , mpp_chksum( iobt%sw_flux_vis_dif) - write(outunit,100) 'iobt%sw_flux_nir_dir' , mpp_chksum( iobt%sw_flux_nir_dir) - write(outunit,100) 'iobt%sw_flux_nir_dif' , mpp_chksum( iobt%sw_flux_nir_dif) - write(outunit,100) 'iobt%lprec ' , mpp_chksum( iobt%lprec ) - write(outunit,100) 'iobt%fprec ' , mpp_chksum( iobt%fprec ) - write(outunit,100) 'iobt%runoff ' , mpp_chksum( iobt%runoff ) - write(outunit,100) 'iobt%calving ' , mpp_chksum( iobt%calving ) - write(outunit,100) 'iobt%p ' , mpp_chksum( iobt%p ) - if (associated(iobt%ustar_berg)) & - write(outunit,100) 'iobt%ustar_berg ' , mpp_chksum( iobt%ustar_berg ) - if (associated(iobt%area_berg)) & - write(outunit,100) 'iobt%area_berg ' , mpp_chksum( iobt%area_berg ) - if (associated(iobt%mass_berg)) & - write(outunit,100) 'iobt%mass_berg ' , mpp_chksum( iobt%mass_berg ) + integer(kind=int64) :: chks ! A checksum for the field + logical :: root ! True only on the root PE + integer :: outunit ! The output unit to write to + + outunit = stdout + root = is_root_pe() + + if (root) write(outunit,*) "BEGIN CHECKSUM(ice_ocean_boundary_type):: ", id, timestep + chks = field_chksum( iobt%u_flux ) ; if (root) write(outunit,100) 'iobt%u_flux ', chks + chks = field_chksum( iobt%v_flux ) ; if (root) write(outunit,100) 'iobt%v_flux ', chks + chks = field_chksum( iobt%t_flux ) ; if (root) write(outunit,100) 'iobt%t_flux ', chks + chks = field_chksum( iobt%q_flux ) ; if (root) write(outunit,100) 'iobt%q_flux ', chks + chks = field_chksum( iobt%seaice_melt_heat); if (root) write(outunit,100) 'iobt%seaice_melt_heat', chks + chks = field_chksum( iobt%seaice_melt) ; if (root) write(outunit,100) 'iobt%seaice_melt ', chks + chks = field_chksum( iobt%salt_flux ) ; if (root) write(outunit,100) 'iobt%salt_flux ', chks + chks = field_chksum( iobt%lw_flux ) ; if (root) write(outunit,100) 'iobt%lw_flux ', chks + chks = field_chksum( iobt%sw_flux_vis_dir) ; if (root) write(outunit,100) 'iobt%sw_flux_vis_dir', chks + chks = field_chksum( iobt%sw_flux_vis_dif) ; if (root) write(outunit,100) 'iobt%sw_flux_vis_dif', chks + chks = field_chksum( iobt%sw_flux_nir_dir) ; if (root) write(outunit,100) 'iobt%sw_flux_nir_dir', chks + chks = field_chksum( iobt%sw_flux_nir_dif) ; if (root) write(outunit,100) 'iobt%sw_flux_nir_dif', chks + chks = field_chksum( iobt%lprec ) ; if (root) write(outunit,100) 'iobt%lprec ', chks + chks = field_chksum( iobt%fprec ) ; if (root) write(outunit,100) 'iobt%fprec ', chks + chks = field_chksum( iobt%rofl_flux ) ; if (root) write(outunit,100) 'rofl_flux ', chks + chks = field_chksum( iobt%rofi_flux ) ; if (root) write(outunit,100) 'rofi_flux ', chks + chks = field_chksum( iobt%p ) ; if (root) write(outunit,100) 'iobt%p ', chks + if (associated(iobt%ustar_berg)) then + chks = field_chksum( iobt%ustar_berg ) ; if (root) write(outunit,100) 'iobt%ustar_berg ', chks + endif + if (associated(iobt%area_berg)) then + chks = field_chksum( iobt%area_berg ) ; if (root) write(outunit,100) 'iobt%area_berg ', chks + endif + if (associated(iobt%mass_berg)) then + chks = field_chksum( iobt%mass_berg ) ; if (root) write(outunit,100) 'iobt%mass_berg ', chks + endif 100 FORMAT(" CHECKSUM::",A20," = ",Z20) - call coupler_type_write_chksums(iobt%fluxes, outunit, 'iobt%') + call coupler_type_write_chksums(iobt%fluxes, stdout, 'iobt%') end subroutine ice_ocn_bnd_type_chksum diff --git a/config_src/mct_driver/ocn_cap_methods.F90 b/config_src/drivers/mct_cap/ocn_cap_methods.F90 similarity index 100% rename from config_src/mct_driver/ocn_cap_methods.F90 rename to config_src/drivers/mct_cap/ocn_cap_methods.F90 diff --git a/config_src/mct_driver/ocn_comp_mct.F90 b/config_src/drivers/mct_cap/ocn_comp_mct.F90 similarity index 94% rename from config_src/mct_driver/ocn_comp_mct.F90 rename to config_src/drivers/mct_cap/ocn_comp_mct.F90 index 741ce832e8..1872fff335 100644 --- a/config_src/mct_driver/ocn_comp_mct.F90 +++ b/config_src/drivers/mct_cap/ocn_comp_mct.F90 @@ -42,6 +42,7 @@ module ocn_comp_mct use MOM_constants, only: CELSIUS_KELVIN_OFFSET use MOM_domains, only: AGRID, BGRID_NE, CGRID_NE, pass_vector use mpp_domains_mod, only: mpp_get_compute_domain +use MOM_io, only: stdout ! Previously inlined - now in separate modules use MOM_ocean_model_mct, only: ocean_public_type, ocean_state_type @@ -88,7 +89,6 @@ module ocn_comp_mct type(cpl_indices_type) :: ind !< Variable IDs logical :: sw_decomp !< Controls whether shortwave is decomposed into 4 components real :: c1, c2, c3, c4 !< Coeffs. used in the shortwave decomposition i/o - integer :: stdout !< standard output unit. (by default, points to ocn.log.* ) character(len=384) :: pointer_filename !< Name of the ascii file that contains the path !! and filename of the latest restart file. end type MCT_MOM_Data @@ -194,14 +194,14 @@ subroutine ocn_init_mct( EClock, cdata_o, x2o_o, o2x_o, NLFilename ) call shr_file_getLogUnit (shrlogunit) call shr_file_getLogLevel(shrloglev) - glb%stdout = shr_file_getUnit() ! get an unused unit number + stdout = shr_file_getUnit() ! get an unused unit number ! open the ocn_modelio.nml file and then open a log file associated with stdout ocn_modelio_name = 'ocn_modelio.nml' // trim(inst_suffix) - call shr_file_setIO(ocn_modelio_name,glb%stdout) + call shr_file_setIO(ocn_modelio_name,stdout) ! set the shr log io unit number - call shr_file_setLogUnit(glb%stdout) + call shr_file_setLogUnit(stdout) end if call set_calendar_type(NOLEAP) !TODO: confirm this @@ -218,23 +218,23 @@ subroutine ocn_init_mct( EClock, cdata_o, x2o_o, o2x_o, NLFilename ) ! Debugging clocks if (debug .and. is_root_pe()) then - write(glb%stdout,*) 'ocn_init_mct, current time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds + write(stdout,*) 'ocn_init_mct, current time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds call ESMF_ClockGet(EClock, StartTime=time_var, rc=rc) call ESMF_TimeGet(time_var, yy=year, mm=month, dd=day, h=hour, m=minute, s=seconds, rc=rc) - write(glb%stdout,*) 'ocn_init_mct, start time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds + write(stdout,*) 'ocn_init_mct, start time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds call ESMF_ClockGet(EClock, StopTime=time_var, rc=rc) call ESMF_TimeGet(time_var, yy=year, mm=month, dd=day, h=hour, m=minute, s=seconds, rc=rc) - write(glb%stdout,*) 'ocn_init_mct, stop time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds + write(stdout,*) 'ocn_init_mct, stop time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds call ESMF_ClockGet(EClock, PrevTime=time_var, rc=rc) call ESMF_TimeGet(time_var, yy=year, mm=month, dd=day, h=hour, m=minute, s=seconds, rc=rc) - write(glb%stdout,*) 'ocn_init_mct, previous time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds + write(stdout,*) 'ocn_init_mct, previous time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds call ESMF_ClockGet(EClock, TimeStep=ocn_cpl_interval, rc=rc) call ESMF_TimeIntervalGet(ocn_cpl_interval, yy=year, mm=month, d=day, s=seconds, sn=seconds_n, sd=seconds_d, rc=rc) - write(glb%stdout,*) 'ocn_init_mct, time step: y,m,d-',year,month,day,'s,sn,sd=',seconds,seconds_n,seconds_d + write(stdout,*) 'ocn_init_mct, time step: y,m,d-',year,month,day,'s,sn,sd=',seconds,seconds_n,seconds_d endif npes = num_pes() @@ -298,7 +298,7 @@ subroutine ocn_init_mct( EClock, cdata_o, x2o_o, o2x_o, NLFilename ) ! read name of restart file in the pointer file nu = shr_file_getUnit() restart_pointer_file = trim(glb%pointer_filename) - if (is_root_pe()) write(glb%stdout,*) 'Reading ocn pointer file: ',restart_pointer_file + if (is_root_pe()) write(stdout,*) 'Reading ocn pointer file: ',restart_pointer_file restartfile = ""; restartfiles = ""; open(nu, file=restart_pointer_file, form='formatted', status='unknown') do @@ -316,13 +316,13 @@ subroutine ocn_init_mct( EClock, cdata_o, x2o_o, o2x_o, NLFilename ) enddo close(nu) if (is_root_pe()) then - write(glb%stdout,*) 'Reading restart file(s): ',trim(restartfiles) + write(stdout,*) 'Reading restart file(s): ',trim(restartfiles) end if call shr_file_freeUnit(nu) call ocean_model_init(glb%ocn_public, glb%ocn_state, time0, time_start, input_restart_file=trim(restartfiles)) endif if (is_root_pe()) then - write(glb%stdout,'(/12x,a/)') '======== COMPLETED MOM INITIALIZATION ========' + write(stdout,'(/12x,a/)') '======== COMPLETED MOM INITIALIZATION ========' end if ! Initialize ocn_state%sfc_state out of sight @@ -383,7 +383,7 @@ subroutine ocn_init_mct( EClock, cdata_o, x2o_o, o2x_o, NLFilename ) ncouple_per_day = seconds_in_day / ocn_cpl_dt mom_cpl_dt = seconds_in_day / ncouple_per_day if (mom_cpl_dt /= ocn_cpl_dt) then - write(glb%stdout,*) 'ERROR mom_cpl_dt and ocn_cpl_dt must be identical' + write(stdout,*) 'ERROR mom_cpl_dt and ocn_cpl_dt must be identical' call exit(0) end if @@ -457,7 +457,7 @@ subroutine ocn_run_mct( EClock, cdata_o, x2o_o, o2x_o) if (is_root_pe()) then call shr_file_getLogUnit(shrlogunit) call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit(glb%stdout) + call shr_file_setLogUnit(stdout) endif ! Query the beginning time of the current coupling interval @@ -484,7 +484,7 @@ subroutine ocn_run_mct( EClock, cdata_o, x2o_o, o2x_o) if (runtype /= "continue" .and. runtype /= "branch") then if (debug .and. is_root_pe()) then - write(glb%stdout,*) 'doubling first interval duration!' + write(stdout,*) 'doubling first interval duration!' endif ! shift back the start time by one coupling interval (to align the start time with other components) @@ -500,19 +500,19 @@ subroutine ocn_run_mct( EClock, cdata_o, x2o_o, o2x_o) if (debug .and. is_root_pe()) then call ESMF_ClockGet(EClock, CurrTime=time_var, rc=rc) call ESMF_TimeGet(time_var, yy=year, mm=month, dd=day, h=hour, m=minute, s=seconds, rc=rc) - write(glb%stdout,*) 'ocn_run_mct, current time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds + write(stdout,*) 'ocn_run_mct, current time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds call ESMF_ClockGet(EClock, StartTime=time_var, rc=rc) call ESMF_TimeGet(time_var, yy=year, mm=month, dd=day, h=hour, m=minute, s=seconds, rc=rc) - write(glb%stdout,*) 'ocn_run_mct, start time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds + write(stdout,*) 'ocn_run_mct, start time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds call ESMF_ClockGet(EClock, StopTime=time_var, rc=rc) call ESMF_TimeGet(time_var, yy=year, mm=month, dd=day, h=hour, m=minute, s=seconds, rc=rc) - write(glb%stdout,*) 'ocn_run_mct, stop time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds + write(stdout,*) 'ocn_run_mct, stop time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds call ESMF_ClockGet(EClock, PrevTime=time_var, rc=rc) call ESMF_TimeGet(time_var, yy=year, mm=month, dd=day, h=hour, m=minute, s=seconds, rc=rc) - write(glb%stdout,*) 'ocn_run_mct, previous time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds + write(stdout,*) 'ocn_run_mct, previous time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds call ESMF_ClockGet(EClock, TimeStep=ocn_cpl_interval, rc=rc) call ESMF_TimeIntervalGet(ocn_cpl_interval, yy=year, mm=month, d=day, s=seconds, sn=seconds_n, sd=seconds_d, rc=rc) - write(glb%stdout,*) 'ocn_init_mct, time step: y,m,d-',year,month,day,'s,sn,sd=',seconds,seconds_n,seconds_d + write(stdout,*) 'ocn_init_mct, time step: y,m,d-',year,month,day,'s,sn,sd=',seconds,seconds_n,seconds_d endif ! set the cdata pointers: @@ -525,10 +525,10 @@ subroutine ocn_run_mct( EClock, cdata_o, x2o_o, o2x_o) !glb%sw_decomp = .false. !END TODO: if (glb%sw_decomp) then - call ocn_import(x2o_o%rattr, glb%ind, glb%grid, Ice_ocean_boundary, glb%ocn_public, glb%stdout, Eclock, & + call ocn_import(x2o_o%rattr, glb%ind, glb%grid, Ice_ocean_boundary, glb%ocn_public, stdout, Eclock, & c1=glb%c1, c2=glb%c2, c3=glb%c3, c4=glb%c4) else - call ocn_import(x2o_o%rattr, glb%ind, glb%grid, Ice_ocean_boundary, glb%ocn_public, glb%stdout, Eclock ) + call ocn_import(x2o_o%rattr, glb%ind, glb%grid, Ice_ocean_boundary, glb%ocn_public, stdout, Eclock ) end if ! Update internal ocean @@ -540,7 +540,7 @@ subroutine ocn_run_mct( EClock, cdata_o, x2o_o, o2x_o) !--- write out intermediate restart file when needed. ! Check alarms for flag to write restart at end of day write_restart_at_eod = seq_timemgr_RestartAlarmIsOn(EClock) - if (debug .and. is_root_pe()) write(glb%stdout,*) 'ocn_run_mct, write_restart_at_eod=', write_restart_at_eod + if (debug .and. is_root_pe()) write(stdout,*) 'ocn_run_mct, write_restart_at_eod=', write_restart_at_eod if (write_restart_at_eod) then ! case name @@ -575,7 +575,7 @@ subroutine ocn_run_mct( EClock, cdata_o, x2o_o, o2x_o) endif close(nu) - write(glb%stdout,*) 'ocn restart pointer file written: ',trim(restartname) + write(stdout,*) 'ocn restart pointer file written: ',trim(restartname) endif call shr_file_freeUnit(nu) @@ -761,7 +761,7 @@ end subroutine ocn_domain_mct else if (trim(starttype) == trim(seq_infodata_start_type_brnch)) then get_runtype = "branch" else - write(glb%stdout,*) 'ocn_comp_mct ERROR: unknown starttype' + write(stdout,*) 'ocn_comp_mct ERROR: unknown starttype' call exit(0) end if return diff --git a/config_src/mct_driver/ocn_cpl_indices.F90 b/config_src/drivers/mct_cap/ocn_cpl_indices.F90 similarity index 100% rename from config_src/mct_driver/ocn_cpl_indices.F90 rename to config_src/drivers/mct_cap/ocn_cpl_indices.F90 diff --git a/config_src/nuopc_driver/mom_cap.F90 b/config_src/drivers/nuopc_cap/mom_cap.F90 similarity index 75% rename from config_src/nuopc_driver/mom_cap.F90 rename to config_src/drivers/nuopc_cap/mom_cap.F90 index c2a2e98838..ee498f4184 100644 --- a/config_src/nuopc_driver/mom_cap.F90 +++ b/config_src/drivers/nuopc_cap/mom_cap.F90 @@ -25,31 +25,35 @@ module MOM_cap_mod use time_manager_mod, only: fms_get_calendar_type => get_calendar_type use MOM_domains, only: MOM_infra_init, num_pes, root_pe, pe_here use MOM_file_parser, only: get_param, log_version, param_file_type, close_param_file -use MOM_get_input, only: Get_MOM_Input, directories +use MOM_get_input, only: get_MOM_input, directories use MOM_domains, only: pass_var use MOM_error_handler, only: MOM_error, FATAL, is_root_pe -use MOM_ocean_model_nuopc, only: ice_ocean_boundary_type use MOM_grid, only: ocean_grid_type, get_global_grid_size +use MOM_ocean_model_nuopc, only: ice_ocean_boundary_type use MOM_ocean_model_nuopc, only: ocean_model_restart, ocean_public_type, ocean_state_type -use MOM_ocean_model_nuopc, only: ocean_model_init_sfc +use MOM_ocean_model_nuopc, only: ocean_model_init_sfc, ocean_model_flux_init use MOM_ocean_model_nuopc, only: ocean_model_init, update_ocean_model, ocean_model_end -use MOM_ocean_model_nuopc, only: get_ocean_grid, get_eps_omesh +use MOM_ocean_model_nuopc, only: get_ocean_grid, get_eps_omesh, query_ocean_state use MOM_cap_time, only: AlarmInit -use MOM_cap_methods, only: mom_import, mom_export, mom_set_geomtype +use MOM_cap_methods, only: mom_import, mom_export, mom_set_geomtype, mod2med_areacor +use MOM_cap_methods, only: med2mod_areacor, state_diagnose +use MOM_cap_methods, only: ChkErr + #ifdef CESMCOUPLED use shr_file_mod, only: shr_file_setLogUnit, shr_file_getLogUnit +use shr_mpi_mod, only : shr_mpi_min, shr_mpi_max #endif use time_utils_mod, only: esmf2fms_time use, intrinsic :: iso_fortran_env, only: output_unit -use ESMF, only: ESMF_ClockAdvance, ESMF_ClockGet, ESMF_ClockPrint +use ESMF, only: ESMF_ClockAdvance, ESMF_ClockGet, ESMF_ClockPrint, ESMF_VMget use ESMF, only: ESMF_ClockGetAlarm, ESMF_ClockGetNextTime, ESMF_ClockAdvance use ESMF, only: ESMF_ClockSet, ESMF_Clock, ESMF_GeomType_Flag, ESMF_LOGMSG_INFO use ESMF, only: ESMF_Grid, ESMF_GridCreate, ESMF_GridAddCoord use ESMF, only: ESMF_GridGetCoord, ESMF_GridAddItem, ESMF_GridGetItem use ESMF, only: ESMF_GridComp, ESMF_GridCompSetEntryPoint, ESMF_GridCompGet -use ESMF, only: ESMF_LogFoundError, ESMF_LogWrite, ESMF_LogSetError +use ESMF, only: ESMF_LogWrite, ESMF_LogSetError use ESMF, only: ESMF_LOGERR_PASSTHRU, ESMF_KIND_R8, ESMF_RC_VAL_WRONG use ESMF, only: ESMF_GEOMTYPE_MESH, ESMF_GEOMTYPE_GRID, ESMF_SUCCESS use ESMF, only: ESMF_METHOD_INITIALIZE, ESMF_MethodRemove, ESMF_State @@ -68,12 +72,14 @@ module MOM_cap_mod use ESMF, only: ESMF_COORDSYS_SPH_DEG, ESMF_GridCreate, ESMF_INDEX_DELOCAL use ESMF, only: ESMF_MESHLOC_ELEMENT, ESMF_RC_VAL_OUTOFRANGE, ESMF_StateGet use ESMF, only: ESMF_TimePrint, ESMF_AlarmSet, ESMF_FieldGet, ESMF_Array +use ESMF, only: ESMF_FieldRegridGetArea use ESMF, only: ESMF_ArrayCreate use ESMF, only: ESMF_RC_FILE_OPEN, ESMF_RC_FILE_READ, ESMF_RC_FILE_WRITE -use ESMF, only: ESMF_VMBroadcast +use ESMF, only: ESMF_VMBroadcast, ESMF_VMReduce, ESMF_REDUCE_MAX, ESMF_REDUCE_MIN use ESMF, only: ESMF_AlarmCreate, ESMF_ClockGetAlarmList, ESMF_AlarmList_Flag use ESMF, only: ESMF_AlarmGet, ESMF_AlarmIsCreated, ESMF_ALARMLIST_ALL, ESMF_AlarmIsEnabled use ESMF, only: ESMF_STATEITEM_NOTFOUND, ESMF_FieldWrite +use ESMF, only: ESMF_END_ABORT, ESMF_Finalize use ESMF, only: operator(==), operator(/=), operator(+), operator(-) ! TODO ESMF_GridCompGetInternalState does not have an explicit Fortran interface. @@ -91,6 +97,7 @@ module MOM_cap_mod use NUOPC_Model, only: model_label_SetRunClock => label_SetRunClock use NUOPC_Model, only: model_label_Finalize => label_Finalize use NUOPC_Model, only: SetVM +!$use omp_lib , only : omp_set_num_threads implicit none; private @@ -123,7 +130,7 @@ module MOM_cap_mod integer :: fldsFrOcn_num = 0 type (fld_list_type) :: fldsFrOcn(fldsMax) -integer :: debug = 0 +integer :: dbug = 0 integer :: import_slice = 1 integer :: export_slice = 1 character(len=256) :: tmpstr @@ -133,11 +140,14 @@ module MOM_cap_mod integer :: logunit !< stdout logging unit number logical :: profile_memory = .true. logical :: grid_attach_area = .false. +logical :: use_coldstart = .true. +logical :: use_mommesh = .true. character(len=128) :: scalar_field_name = '' integer :: scalar_field_count = 0 integer :: scalar_field_idx_grid_nx = 0 integer :: scalar_field_idx_grid_ny = 0 -character(len=*),parameter :: u_file_u = & +integer :: nthrds !< number of openmp threads per task +character(len=*),parameter :: u_FILE_u = & __FILE__ #ifdef CESMCOUPLED @@ -145,8 +155,9 @@ module MOM_cap_mod type(ESMF_GeomType_Flag) :: geomtype = ESMF_GEOMTYPE_MESH #else logical :: cesm_coupled = .false. -type(ESMF_GeomType_Flag) :: geomtype = ESMF_GEOMTYPE_GRID +type(ESMF_GeomType_Flag) :: geomtype #endif +character(len=8) :: restart_mode = 'alarms' contains @@ -168,32 +179,20 @@ subroutine SetServices(gcomp, rc) ! the NUOPC model component will register the generic methods call NUOPC_CompDerive(gcomp, model_routine_SS, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! switching to IPD versions call ESMF_GridCompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & userRoutine=InitializeP0, phase=0, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! set entry point for methods that require specific implementation call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & phaseLabelList=(/"IPDv03p1"/), userRoutine=InitializeAdvertise, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & phaseLabelList=(/"IPDv03p3"/), userRoutine=InitializeRealize, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return !------------------ ! attach specializing method(s) @@ -201,36 +200,21 @@ subroutine SetServices(gcomp, rc) call NUOPC_CompSpecialize(gcomp, specLabel=model_label_DataInitialize, & specRoutine=DataInitialize, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Advance, & specRoutine=ModelAdvance, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_MethodRemove(gcomp, label=model_label_SetRunClock, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call NUOPC_CompSpecialize(gcomp, specLabel=model_label_SetRunClock, & specRoutine=ModelSetRunClock, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Finalize, & specRoutine=ocean_model_finalize, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return end subroutine SetServices @@ -263,97 +247,64 @@ subroutine InitializeP0(gcomp, importState, exportState, clock, rc) ! Switch to IPDv03 by filtering all other phaseMap entries call NUOPC_CompFilterPhaseMap(gcomp, ESMF_METHOD_INITIALIZE, & acceptStringList=(/"IPDv03p"/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return write_diagnostics = .false. call NUOPC_CompAttributeGet(gcomp, name="DumpFields", value=value, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresent .and. isSet) write_diagnostics=(trim(value)=="true") write(logmsg,*) write_diagnostics - call ESMF_LogWrite('MOM_cap:DumpFields = '//trim(logmsg), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite('MOM_cap:DumpFields = '//trim(logmsg), ESMF_LOGMSG_INFO) overwrite_timeslice = .false. call NUOPC_CompAttributeGet(gcomp, name="OverwriteSlice", value=value, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresent .and. isSet) overwrite_timeslice=(trim(value)=="true") write(logmsg,*) overwrite_timeslice - call ESMF_LogWrite('MOM_cap:OverwriteSlice = '//trim(logmsg), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite('MOM_cap:OverwriteSlice = '//trim(logmsg), ESMF_LOGMSG_INFO) profile_memory = .false. call NUOPC_CompAttributeGet(gcomp, name="ProfileMemory", value=value, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresent .and. isSet) profile_memory=(trim(value)=="true") write(logmsg,*) profile_memory - call ESMF_LogWrite('MOM_cap:ProfileMemory = '//trim(logmsg), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite('MOM_cap:ProfileMemory = '//trim(logmsg), ESMF_LOGMSG_INFO) grid_attach_area = .false. call NUOPC_CompAttributeGet(gcomp, name="GridAttachArea", value=value, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresent .and. isSet) grid_attach_area=(trim(value)=="true") write(logmsg,*) grid_attach_area - call ESMF_LogWrite('MOM_cap:GridAttachArea = '//trim(logmsg), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite('MOM_cap:GridAttachArea = '//trim(logmsg), ESMF_LOGMSG_INFO) + + call NUOPC_CompAttributeGet(gcomp, name='dbug_flag', value=value, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + read(value,*) dbug + end if + write(logmsg,'(i6)') dbug + call ESMF_LogWrite('MOM_cap:dbug = '//trim(logmsg), ESMF_LOGMSG_INFO) scalar_field_name = "" call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldName", value=value, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresent .and. isSet) then scalar_field_name = trim(value) - call ESMF_LogWrite('MOM_cap:ScalarFieldName = '//trim(scalar_field_name), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite('MOM_cap:ScalarFieldName = '//trim(scalar_field_name), ESMF_LOGMSG_INFO) endif scalar_field_count = 0 call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldCount", value=value, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresent .and. isSet) then - read(value, '(i)', iostat=iostat) scalar_field_count + read(value, *, iostat=iostat) scalar_field_count if (iostat /= 0) then call ESMF_LogSetError(ESMF_RC_ARG_BAD, & msg=subname//": ScalarFieldCount not an integer: "//trim(value), & @@ -361,22 +312,15 @@ subroutine InitializeP0(gcomp, importState, exportState, clock, rc) return endif write(logmsg,*) scalar_field_count - call ESMF_LogWrite('MOM_cap:ScalarFieldCount = '//trim(logmsg), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite('MOM_cap:ScalarFieldCount = '//trim(logmsg), ESMF_LOGMSG_INFO) endif scalar_field_idx_grid_nx = 0 call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNX", value=value, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresent .and. isSet) then - read(value, '(i)', iostat=iostat) scalar_field_idx_grid_nx + read(value, *, iostat=iostat) scalar_field_idx_grid_nx if (iostat /= 0) then call ESMF_LogSetError(ESMF_RC_ARG_BAD, & msg=subname//": ScalarFieldIdxGridNX not an integer: "//trim(value), & @@ -384,22 +328,15 @@ subroutine InitializeP0(gcomp, importState, exportState, clock, rc) return endif write(logmsg,*) scalar_field_idx_grid_nx - call ESMF_LogWrite('MOM_cap:ScalarFieldIdxGridNX = '//trim(logmsg), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite('MOM_cap:ScalarFieldIdxGridNX = '//trim(logmsg), ESMF_LOGMSG_INFO) endif scalar_field_idx_grid_ny = 0 call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNY", value=value, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresent .and. isSet) then - read(value, '(i)', iostat=iostat) scalar_field_idx_grid_ny + read(value, *, iostat=iostat) scalar_field_idx_grid_ny if (iostat /= 0) then call ESMF_LogSetError(ESMF_RC_ARG_BAD, & msg=subname//": ScalarFieldIdxGridNY not an integer: "//trim(value), & @@ -407,11 +344,34 @@ subroutine InitializeP0(gcomp, importState, exportState, clock, rc) return endif write(logmsg,*) scalar_field_idx_grid_ny - call ESMF_LogWrite('MOM_cap:ScalarFieldIdxGridNY = '//trim(logmsg), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite('MOM_cap:ScalarFieldIdxGridNY = '//trim(logmsg), ESMF_LOGMSG_INFO) + endif + + use_coldstart = .true. + call NUOPC_CompAttributeGet(gcomp, name="use_coldstart", value=value, & + isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) use_coldstart=(trim(value)=="true") + write(logmsg,*) use_coldstart + call ESMF_LogWrite('MOM_cap:use_coldstart = '//trim(logmsg), ESMF_LOGMSG_INFO) + + use_mommesh = .true. + call NUOPC_CompAttributeGet(gcomp, name="use_mommesh", value=value, & + isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) use_mommesh=(trim(value)=="true") + write(logmsg,*) use_mommesh + call ESMF_LogWrite('MOM_cap:use_mommesh = '//trim(logmsg), ESMF_LOGMSG_INFO) + + if(use_mommesh)then + geomtype = ESMF_GEOMTYPE_MESH + call NUOPC_CompAttributeGet(gcomp, name='mesh_ocn', isPresent=isPresent, isSet=isSet, rc=rc) + if (.not. isPresent .and. .not. isSet) then + call ESMF_LogWrite('geomtype set to mesh but mesh_ocn is not specified', ESMF_LOGMSG_INFO) + call ESMF_Finalize(endflag=ESMF_END_ABORT) + endif + else + geomtype = ESMF_GEOMTYPE_GRID endif end subroutine @@ -442,6 +402,7 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) type(ice_ocean_boundary_type), pointer :: Ice_ocean_boundary => NULL() type(ocean_internalstate_wrapper) :: ocean_internalstate type(ocean_grid_type), pointer :: ocean_grid => NULL() + type(directories) :: dirs type(time_type) :: Run_len !< length of experiment type(time_type) :: time0 !< Start time of coupled model's calendar. type(time_type) :: time_start !< The time at which to initialize the ocean model @@ -457,10 +418,14 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) character(len=512) :: diro character(len=512) :: logfile character(ESMF_MAXSTR) :: cvalue + character(len=64) :: logmsg logical :: isPresent, isPresentDiro, isPresentLogfile, isSet logical :: existflag + logical :: use_waves ! If true, the wave modules are active. + character(len=40) :: wave_method ! Wave coupling method. integer :: userRc integer :: localPet + integer :: localPeCount integer :: iostat integer :: readunit character(len=512) :: restartfile ! Path/Name of restart file @@ -472,11 +437,7 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' enter', ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite(subname//' enter', ESMF_LOGMSG_INFO) allocate(Ice_ocean_boundary) !allocate(ocean_state) ! ocean_model_init allocate this pointer @@ -487,34 +448,43 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) ocean_internalstate%ptr%ocean_state_type_ptr => ocean_state call ESMF_VMGetCurrent(vm, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return - call ESMF_VMGet(VM, mpiCommunicator=mpi_comm_mom, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + call ESMF_VMGet(VM, mpiCommunicator=mpi_comm_mom, localPet=localPet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_ClockGet(CLOCK, currTIME=MyTime, TimeStep=TINT, RC=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_TimeGet (MyTime, YY=YEAR, MM=MONTH, DD=DAY, H=HOUR, M=MINUTE, S=SECOND, RC=rc ) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return CALL ESMF_TimeIntervalGet(TINT, S=DT_OCEAN, RC=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + !--------------------------------- + ! openmp threads + !--------------------------------- + + call ESMF_VMGet(vm, pet=localPet, peCount=localPeCount, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + if(localPeCount == 1) then + call NUOPC_CompAttributeGet(gcomp, name="nthreads", value=cvalue, & + isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + read(cvalue,*) nthrds + else + nthrds = localPeCount + endif + else + nthrds = localPeCount + endif + write(logmsg,*) nthrds + call ESMF_LogWrite(trim(subname)//': nthreads = '//trim(logmsg), ESMF_LOGMSG_INFO) + +!$ call omp_set_num_threads(nthrds) call fms_init(mpi_comm_mom) call constants_init @@ -524,10 +494,7 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) if (cesm_coupled) then call NUOPC_CompAttributeGet(gcomp, name="calendar", value=cvalue, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresent .and. isSet) then read(cvalue,*) calendar select case (trim(calendar)) @@ -561,16 +528,10 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) ! get start/reference time call ESMF_ClockGet(CLOCK, refTime=MyTime, RC=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_TimeGet (MyTime, YY=YEAR, MM=MONTH, DD=DAY, H=HOUR, M=MINUTE, S=SECOND, RC=rc ) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return time0 = set_date (YEAR,MONTH,DAY,HOUR,MINUTE,SECOND) @@ -586,28 +547,16 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) if (is_root_pe()) then call NUOPC_CompAttributeGet(gcomp, name="diro", & isPresent=isPresentDiro, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call NUOPC_CompAttributeGet(gcomp, name="logfile", & isPresent=isPresentLogfile, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresentDiro .and. isPresentLogfile) then call NUOPC_CompAttributeGet(gcomp, name="diro", value=diro, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call NUOPC_CompAttributeGet(gcomp, name="logfile", value=logfile, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return - open(newunit=logunit,file=trim(diro)//"/"//trim(logfile)) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + open(newunit=logunit,file=trim(diro)//"/"//trim(logfile)) else logunit = output_unit endif @@ -618,19 +567,12 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) starttype = "" call NUOPC_CompAttributeGet(gcomp, name='start_type', value=cvalue, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresent .and. isSet) then read(cvalue,*) starttype else call ESMF_LogWrite('MOM_cap:start_type unset - using input.nml for restart option', & - ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + ESMF_LOGMSG_INFO) endif runtype = "" @@ -648,26 +590,27 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) endif if (len_trim(runtype) > 0) then - call ESMF_LogWrite('MOM_cap:startup = '//trim(runtype), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite('MOM_cap:startup = '//trim(runtype), ESMF_LOGMSG_INFO) endif restartfile = ""; restartfiles = "" if (runtype == "initial") then - - restartfiles = "n" + if (cesm_coupled) then + restartfiles = "n" + else + call get_MOM_input(dirs=dirs) + restartfiles = dirs%input_filename(1:1) + endif + call ESMF_LogWrite('MOM_cap:restartfile = '//trim(restartfiles), ESMF_LOGMSG_INFO) else if (runtype == "continue") then ! hybrid or branch or continuos runs if (cesm_coupled) then call ESMF_LogWrite('MOM_cap: restart requested, using rpointer.ocn', ESMF_LOGMSG_WARNING) call ESMF_GridCompGet(gcomp, vm=vm, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_VMGet(vm, localPet=localPet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (localPet == 0) then ! this hard coded for rpointer.ocn right now @@ -698,7 +641,7 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) endif ! broadcast attribute set on master task to all tasks call ESMF_VMBroadcast(vm, restartfiles, count=len(restartfiles), rootPet=0, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return else call ESMF_LogWrite('MOM_cap: restart requested, use input.nml', ESMF_LOGMSG_WARNING) endif @@ -708,6 +651,10 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) ocean_public%is_ocean_pe = .true. call ocean_model_init(ocean_public, ocean_state, time0, time_start, input_restart_file=trim(restartfiles)) + ! GMM, this call is not needed for NCAR. Check with EMC. + ! If this can be deleted, perhaps we should also delete ocean_model_flux_init + call ocean_model_flux_init(ocean_state) + call ocean_model_init_sfc(ocean_state, ocean_public) call mpp_get_compute_domain(ocean_public%domain, isc, iec, jsc, jec) @@ -727,6 +674,8 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) Ice_ocean_boundary% seaice_melt_heat (isc:iec,jsc:jec),& Ice_ocean_boundary% seaice_melt (isc:iec,jsc:jec), & Ice_ocean_boundary% mi (isc:iec,jsc:jec), & + Ice_ocean_boundary% ice_fraction (isc:iec,jsc:jec), & + Ice_ocean_boundary% u10_sqr (isc:iec,jsc:jec), & Ice_ocean_boundary% p (isc:iec,jsc:jec), & Ice_ocean_boundary% lrunoff_hflx (isc:iec,jsc:jec), & Ice_ocean_boundary% frunoff_hflx (isc:iec,jsc:jec), & @@ -748,36 +697,45 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) Ice_ocean_boundary%seaice_melt = 0.0 Ice_ocean_boundary%seaice_melt_heat= 0.0 Ice_ocean_boundary%mi = 0.0 + Ice_ocean_boundary%ice_fraction = 0.0 + Ice_ocean_boundary%u10_sqr = 0.0 Ice_ocean_boundary%p = 0.0 Ice_ocean_boundary%lrunoff_hflx = 0.0 Ice_ocean_boundary%frunoff_hflx = 0.0 Ice_ocean_boundary%lrunoff = 0.0 Ice_ocean_boundary%frunoff = 0.0 + call query_ocean_state(ocean_state, use_waves=use_waves, wave_method=wave_method) + if (use_waves) then + call query_ocean_state(ocean_state, NumWaveBands=Ice_ocean_boundary%num_stk_bands) + if (wave_method == "EFACTOR") then + allocate( Ice_ocean_boundary%lamult(isc:iec,jsc:jec) ) + Ice_ocean_boundary%lamult = 0.0 + else + allocate ( Ice_ocean_boundary% ustk0 (isc:iec,jsc:jec), & + Ice_ocean_boundary% vstk0 (isc:iec,jsc:jec), & + Ice_ocean_boundary% ustkb (isc:iec,jsc:jec,Ice_ocean_boundary%num_stk_bands), & + Ice_ocean_boundary% vstkb (isc:iec,jsc:jec,Ice_ocean_boundary%num_stk_bands), & + Ice_ocean_boundary%stk_wavenumbers (Ice_ocean_boundary%num_stk_bands)) + Ice_ocean_boundary%ustk0 = 0.0 + Ice_ocean_boundary%vstk0 = 0.0 + call query_ocean_state(ocean_state, WaveNumbers=Ice_ocean_boundary%stk_wavenumbers, unscale=.true.) + Ice_ocean_boundary%ustkb = 0.0 + Ice_ocean_boundary%vstkb = 0.0 + endif + endif + ! Consider adding this: + ! if (.not.use_waves) Ice_ocean_boundary%num_stk_bands = 0 + ocean_internalstate%ptr%ocean_state_type_ptr => ocean_state call ESMF_GridCompSetInternalState(gcomp, ocean_internalstate, rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (len_trim(scalar_field_name) > 0) then call fld_list_add(fldsToOcn_num, fldsToOcn, trim(scalar_field_name), "will_provide") call fld_list_add(fldsFrOcn_num, fldsFrOcn, trim(scalar_field_name), "will_provide") end if - if (cesm_coupled) then - !call fld_list_add(fldsToOcn_num, fldsToOcn, "Sw_lamult" , "will provide") - !call fld_list_add(fldsToOcn_num, fldsToOcn, "Sw_ustokes" , "will provide") - !call fld_list_add(fldsToOcn_num, fldsToOcn, "Sw_vstokes" , "will provide") - !call fld_list_add(fldsToOcn_num, fldsToOcn, "Sw_hstokes" , "will provide") - !call fld_list_add(fldsToOcn_num, fldsToOcn, "Fioi_melth" , "will provide") - !call fld_list_add(fldsToOcn_num, fldsToOcn, "Fioi_meltw" , "will provide") - !call fld_list_add(fldsFrOcn_num, fldsFrOcn, "So_fswpen" , "will provide") - else - !call fld_list_add(fldsToOcn_num, fldsToOcn, "mass_of_overlying_sea_ice" , "will provide") - !call fld_list_add(fldsFrOcn_num, fldsFrOcn, "sea_lev" , "will provide") - endif !--------- import fields ------------- call fld_list_add(fldsToOcn_num, fldsToOcn, "mean_salt_rate" , "will provide") ! from ice @@ -795,11 +753,28 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) call fld_list_add(fldsToOcn_num, fldsToOcn, "inst_pres_height_surface" , "will provide") call fld_list_add(fldsToOcn_num, fldsToOcn, "Foxx_rofl" , "will provide") !-> liquid runoff call fld_list_add(fldsToOcn_num, fldsToOcn, "Foxx_rofi" , "will provide") !-> ice runoff + call fld_list_add(fldsToOcn_num, fldsToOcn, "Si_ifrac" , "will provide") !-> ice fraction + call fld_list_add(fldsToOcn_num, fldsToOcn, "So_duu10n" , "will provide") !-> wind^2 at 10m call fld_list_add(fldsToOcn_num, fldsToOcn, "mean_fresh_water_to_ocean_rate", "will provide") call fld_list_add(fldsToOcn_num, fldsToOcn, "net_heat_flx_to_ocn" , "will provide") !These are not currently used and changing requires a nuopc dictionary change !call fld_list_add(fldsToOcn_num, fldsToOcn, "mean_runoff_heat_flx" , "will provide") !call fld_list_add(fldsToOcn_num, fldsToOcn, "mean_calving_heat_flx" , "will provide") + if (use_waves) then + if (wave_method == "EFACTOR") then + call fld_list_add(fldsToOcn_num, fldsToOcn, "Sw_lamult" , "will provide") + else + if (Ice_ocean_boundary%num_stk_bands > 3) then + call MOM_error(FATAL, "Number of Stokes Bands > 3, NUOPC cap not set up for this") + endif + call fld_list_add(fldsToOcn_num, fldsToOcn, "eastward_partitioned_stokes_drift_1" , "will provide") + call fld_list_add(fldsToOcn_num, fldsToOcn, "northward_partitioned_stokes_drift_1", "will provide") + call fld_list_add(fldsToOcn_num, fldsToOcn, "eastward_partitioned_stokes_drift_2" , "will provide") + call fld_list_add(fldsToOcn_num, fldsToOcn, "northward_partitioned_stokes_drift_2", "will provide") + call fld_list_add(fldsToOcn_num, fldsToOcn, "eastward_partitioned_stokes_drift_3" , "will provide") + call fld_list_add(fldsToOcn_num, fldsToOcn, "northward_partitioned_stokes_drift_3", "will provide") + endif + endif !--------- export fields ------------- call fld_list_add(fldsFrOcn_num, fldsFrOcn, "ocean_mask" , "will provide") @@ -814,18 +789,12 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) do n = 1,fldsToOcn_num call NUOPC_Advertise(importState, standardName=fldsToOcn(n)%stdname, name=fldsToOcn(n)%shortname, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return enddo do n = 1,fldsFrOcn_num call NUOPC_Advertise(exportState, standardName=fldsFrOcn(n)%stdname, name=fldsFrOcn(n)%shortname, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return enddo end subroutine InitializeAdvertise @@ -873,6 +842,7 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) integer :: lbnd3,ubnd3,lbnd4,ubnd4 integer :: nblocks_tot logical :: found + logical :: isPresent, isSet integer(ESMF_KIND_I4), pointer :: dataPtr_mask(:,:) real(ESMF_KIND_R8), pointer :: dataPtr_area(:,:) real(ESMF_KIND_R8), pointer :: dataPtr_xcen(:,:) @@ -881,6 +851,7 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) real(ESMF_KIND_R8), pointer :: dataPtr_ycor(:,:) integer :: mpicom integer :: localPet + integer :: localPeCount integer :: lsize integer :: ig,jg, ni,nj,k integer, allocatable :: gindex(:) ! global index space @@ -888,16 +859,29 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) character(len=256) :: cvalue character(len=256) :: frmt ! format specifier for several error msgs character(len=512) :: err_msg ! error messages + integer :: spatialDim + integer :: numOwnedElements + type(ESMF_Array) :: elemMaskArray + real(ESMF_KIND_R8) , pointer :: ownedElemCoords(:) + real(ESMF_KIND_R8) , pointer :: lat(:), latMesh(:) + real(ESMF_KIND_R8) , pointer :: lon(:), lonMesh(:) + integer(ESMF_KIND_I4) , pointer :: mask(:), maskMesh(:) + real(ESMF_KIND_R8) :: diff_lon, diff_lat + real :: eps_omesh + real(ESMF_KIND_R8) :: L2_to_rad2 + type(ESMF_Field) :: lfield + real(ESMF_KIND_R8), allocatable :: mesh_areas(:) + real(ESMF_KIND_R8), allocatable :: model_areas(:) + real(ESMF_KIND_R8), pointer :: dataPtr_mesh_areas(:) + real(ESMF_KIND_R8) :: max_mod2med_areacor + real(ESMF_KIND_R8) :: max_med2mod_areacor + real(ESMF_KIND_R8) :: min_mod2med_areacor + real(ESMF_KIND_R8) :: min_med2mod_areacor + real(ESMF_KIND_R8) :: max_mod2med_areacor_glob + real(ESMF_KIND_R8) :: max_med2mod_areacor_glob + real(ESMF_KIND_R8) :: min_mod2med_areacor_glob + real(ESMF_KIND_R8) :: min_med2mod_areacor_glob character(len=*), parameter :: subname='(MOM_cap:InitializeRealize)' - integer :: spatialDim - integer :: numOwnedElements - type(ESMF_Array) :: elemMaskArray - real(ESMF_KIND_R8) , pointer :: ownedElemCoords(:) - real(ESMF_KIND_R8) , pointer :: lat(:), latMesh(:) - real(ESMF_KIND_R8) , pointer :: lon(:), lonMesh(:) - integer(ESMF_KIND_I4) , pointer :: mask(:), maskMesh(:) - real(ESMF_KIND_R8) :: diff_lon, diff_lat - real :: eps_omesh !-------------------------------- rc = ESMF_SUCCESS @@ -909,10 +893,7 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) !---------------------------------------------------------------------------- call ESMF_GridCompGetInternalState(gcomp, ocean_internalstate, rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return Ice_ocean_boundary => ocean_internalstate%ptr%ice_ocean_boundary_type_ptr ocean_public => ocean_internalstate%ptr%ocean_public_type_ptr @@ -923,16 +904,32 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) !---------------------------------------------------------------------------- call ESMF_VMGetCurrent(vm, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_VMGet(vm, petCount=npet, mpiCommunicator=mpicom, localPet=localPet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + !--------------------------------- + ! openmp threads + !--------------------------------- + + call ESMF_VMGet(vm, pet=localPet, peCount=localPeCount, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + if(localPeCount == 1) then + call NUOPC_CompAttributeGet(gcomp, name="nthreads", value=cvalue, & + isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + read(cvalue,*) nthrds + else + nthrds = localPeCount + endif + else + nthrds = localPeCount + endif + +!$ call omp_set_num_threads(nthrds) !--------------------------------- ! global mom grid size @@ -940,11 +937,7 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) call mpp_get_global_domain(ocean_public%domain, xsize=nxg, ysize=nyg) write(tmpstr,'(a,2i6)') subname//' nxg,nyg = ',nxg,nyg - call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) !--------------------------------- ! number of tiles per PET, assumed to be 1, and number of pes (tiles) total @@ -953,19 +946,11 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) ntiles=mpp_get_ntile_count(ocean_public%domain) ! this is tiles on this pe if (ntiles /= 1) then rc = ESMF_FAILURE - call ESMF_LogWrite(subname//' ntiles must be 1', ESMF_LOGMSG_ERROR, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite(subname//' ntiles must be 1', ESMF_LOGMSG_ERROR) endif ntiles=mpp_get_domain_npes(ocean_public%domain) write(tmpstr,'(a,1i6)') subname//' ntiles = ',ntiles - call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) !--------------------------------- ! get start and end indices of each tile and their PET @@ -974,14 +959,10 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) allocate(xb(ntiles),xe(ntiles),yb(ntiles),ye(ntiles),pe(ntiles)) call mpp_get_compute_domains(ocean_public%domain, xbegin=xb, xend=xe, ybegin=yb, yend=ye) call mpp_get_pelist(ocean_public%domain, pe) - if (debug > 0) then + if (dbug > 1) then do n = 1,ntiles write(tmpstr,'(a,6i6)') subname//' tiles ',n,pe(n),xb(n),xe(n),yb(n),ye(n) - call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) enddo endif @@ -1014,23 +995,14 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) enddo DistGrid = ESMF_DistGridCreate(arbSeqIndexList=gindex, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! read in the mesh call NUOPC_CompAttributeGet(gcomp, name='mesh_ocn', value=cvalue, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return EMeshTemp = ESMF_MeshCreate(filename=trim(cvalue), fileformat=ESMF_FILEFORMAT_ESMFMESH, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (localPet == 0) then write(logunit,*)'mesh file for mom6 domain is ',trim(cvalue) @@ -1038,17 +1010,11 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) ! recreate the mesh using the above distGrid EMesh = ESMF_MeshCreate(EMeshTemp, elementDistgrid=Distgrid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! Check for consistency of lat, lon and mask between mesh and mom6 grid call ESMF_MeshGet(Emesh, spatialDim=spatialDim, numOwnedElements=numOwnedElements, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return allocate(ownedElemCoords(spatialDim*numOwnedElements)) allocate(lonMesh(numOwnedElements), lon(numOwnedElements)) @@ -1056,25 +1022,16 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) allocate(maskMesh(numOwnedElements), mask(numOwnedElements)) call ESMF_MeshGet(Emesh, ownedElemCoords=ownedElemCoords, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return do n = 1,numOwnedElements lonMesh(n) = ownedElemCoords(2*n-1) latMesh(n) = ownedElemCoords(2*n) end do elemMaskArray = ESMF_ArrayCreate(Distgrid, maskMesh, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_MeshGet(Emesh, elemMaskArray=elemMaskArray, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call mpp_get_compute_domain(ocean_public%domain, isc, iec, jsc, jec) n = 0 @@ -1115,22 +1072,75 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) end if end do - deallocate(ownedElemCoords) - deallocate(lonMesh , lon ) - deallocate(latMesh , lat ) - deallocate(maskMesh, mask) ! realize the import and export fields using the mesh call MOM_RealizeFields(importState, fldsToOcn_num, fldsToOcn, "Ocn import", mesh=Emesh, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call MOM_RealizeFields(exportState, fldsFrOcn_num, fldsFrOcn, "Ocn export", mesh=Emesh, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + !--------------------------------- + ! determine flux area correction factors - module variables in mom_cap_methods + !--------------------------------- + ! Area correction factors are ONLY valid for meshes that are read in - so do not need them for + ! grids that are calculated internally + + ! Determine mesh areas for regridding + call ESMF_MeshGet(Emesh, numOwnedElements=numOwnedElements, spatialDim=spatialDim, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + allocate (mod2med_areacor(numOwnedElements)) + allocate (med2mod_areacor(numOwnedElements)) + mod2med_areacor(:) = 1._ESMF_KIND_R8 + med2mod_areacor(:) = 1._ESMF_KIND_R8 + +#ifdef CESMCOUPLED + ! Determine model areas and flux correction factors (module variables in mom_) + call ESMF_StateGet(exportState, itemName=trim(fldsFrOcn(2)%stdname), field=lfield, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call ESMF_FieldRegridGetArea(lfield, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call ESMF_FieldGet(lfield, farrayPtr=dataPtr_mesh_areas, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + allocate(mesh_areas(numOwnedElements)) + allocate(model_areas(numOwnedElements)) + k = 0 + do j = ocean_grid%jsc, ocean_grid%jec + do i = ocean_grid%isc, ocean_grid%iec + k = k + 1 ! Increment position within gindex + if (mask(k) /= 0) then + mesh_areas(k) = dataPtr_mesh_areas(k) + model_areas(k) = ocean_grid%US%L_to_m**2 * ocean_grid%AreaT(i,j) / ocean_grid%Rad_Earth**2 + mod2med_areacor(k) = model_areas(k) / mesh_areas(k) + med2mod_areacor(k) = mesh_areas(k) / model_areas(k) + end if + end do + end do + deallocate(mesh_areas) + deallocate(model_areas) + + ! Write diagnostic output for correction factors + min_mod2med_areacor = minval(mod2med_areacor) + max_mod2med_areacor = maxval(mod2med_areacor) + min_med2mod_areacor = minval(med2mod_areacor) + max_med2mod_areacor = maxval(med2mod_areacor) + call shr_mpi_max(max_mod2med_areacor, max_mod2med_areacor_glob, mpicom) + call shr_mpi_min(min_mod2med_areacor, min_mod2med_areacor_glob, mpicom) + call shr_mpi_max(max_med2mod_areacor, max_med2mod_areacor_glob, mpicom) + call shr_mpi_min(min_med2mod_areacor, min_med2mod_areacor_glob, mpicom) + if (localPet == 0) then + write(logunit,'(2A,2g23.15,A )') trim(subname),' : min_mod2med_areacor, max_mod2med_areacor ',& + min_mod2med_areacor_glob, max_mod2med_areacor_glob, 'MOM6' + write(logunit,'(2A,2g23.15,A )') trim(subname),' : min_med2mod_areacor, max_med2mod_areacor ',& + min_med2mod_areacor_glob, max_med2mod_areacor_glob, 'MOM6' + end if +#endif + + deallocate(ownedElemCoords) + deallocate(lonMesh , lon ) + deallocate(latMesh , lat ) + deallocate(maskMesh, mask) else if (geomtype == ESMF_GEOMTYPE_GRID) then @@ -1152,19 +1162,16 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) deBlockList(2,2,n) = ye(n) petMap(n) = pe(n) ! write(tmpstr,'(a,3i8)') subname//' iglo = ',n,deBlockList(1,1,n),deBlockList(1,2,n) - ! call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) + ! call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) ! write(tmpstr,'(a,3i8)') subname//' jglo = ',n,deBlockList(2,1,n),deBlockList(2,2,n) - ! call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) + ! call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) ! write(tmpstr,'(a,2i8)') subname//' pe = ',n,petMap(n) - ! call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) + ! call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) !--- assume a tile with starting index of 1 has an equivalent wraparound tile on the other side enddo delayout = ESMF_DELayoutCreate(petMap, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! rsd this assumes tripole grid, but sometimes in CESM a bipole ! grid is used -- need to introduce conditional logic here @@ -1175,18 +1182,12 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) call ESMF_DistGridConnectionSet(connectionList(1), tileIndexA=1, & tileIndexB=1, positionVector=(/nxg+1, 2*nyg+1/), & orientationVector=(/-1, -2/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! periodic boundary condition along first dimension call ESMF_DistGridConnectionSet(connectionList(2), tileIndexA=1, & tileIndexB=1, positionVector=(/nxg, 0/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return distgrid = ESMF_DistGridCreate(minIndex=(/1,1/), maxIndex=(/nxg,nyg/), & ! indexflag = ESMF_INDEX_DELOCAL, & @@ -1195,10 +1196,7 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) delayout=delayout, & connectionList=connectionList, & rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return deallocate(xb,xe,yb,ye,pe) deallocate(connectionList) @@ -1207,32 +1205,18 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) deallocate(petMap) call ESMF_DistGridGet(distgrid=distgrid, localDE=0, elementCount=cnt, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return allocate(indexList(cnt)) write(tmpstr,'(a,i8)') subname//' distgrid cnt= ',cnt - call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) call ESMF_DistGridGet(distgrid=distgrid, localDE=0, seqIndexList=indexList, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return write(tmpstr,'(a,4i8)') subname//' distgrid list= ',& indexList(1),indexList(cnt),minval(indexList), maxval(indexList) - call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) deallocate(IndexList) @@ -1242,91 +1226,55 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) gridEdgeLWidth=(/0,0/), gridEdgeUWidth=(/0,1/), & coordSys = ESMF_COORDSYS_SPH_DEG, & rc = rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return - + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_GridAddCoord(gridIn, staggerLoc=ESMF_STAGGERLOC_CENTER, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_GridAddCoord(gridIn, staggerLoc=ESMF_STAGGERLOC_CORNER, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_GridAddItem(gridIn, itemFlag=ESMF_GRIDITEM_MASK, itemTypeKind=ESMF_TYPEKIND_I4, & staggerLoc=ESMF_STAGGERLOC_CENTER, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! Attach area to the Grid optionally. By default the cell areas are computed. if(grid_attach_area) then call ESMF_GridAddItem(gridIn, itemFlag=ESMF_GRIDITEM_AREA, itemTypeKind=ESMF_TYPEKIND_R8, & staggerLoc=ESMF_STAGGERLOC_CENTER, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return - + if (ChkErr(rc,__LINE__,u_FILE_u)) return endif call ESMF_GridGetCoord(gridIn, coordDim=1, & staggerloc=ESMF_STAGGERLOC_CENTER, & farrayPtr=dataPtr_xcen, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_GridGetCoord(gridIn, coordDim=2, & staggerloc=ESMF_STAGGERLOC_CENTER, & farrayPtr=dataPtr_ycen, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return - + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_GridGetCoord(gridIn, coordDim=1, & staggerloc=ESMF_STAGGERLOC_CORNER, & farrayPtr=dataPtr_xcor, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_GridGetCoord(gridIn, coordDim=2, & staggerloc=ESMF_STAGGERLOC_CORNER, & farrayPtr=dataPtr_ycor, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_GridGetItem(gridIn, itemflag=ESMF_GRIDITEM_MASK, & staggerloc=ESMF_STAGGERLOC_CENTER, & farrayPtr=dataPtr_mask, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if(grid_attach_area) then call ESMF_GridGetItem(gridIn, itemflag=ESMF_GRIDITEM_AREA, & staggerloc=ESMF_STAGGERLOC_CENTER, & farrayPtr=dataPtr_area, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return endif ! load up area, mask, center and corner values @@ -1349,13 +1297,13 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) ubnd4 = ubound(dataPtr_xcor,2) write(tmpstr,*) subname//' iscjsc = ',isc,iec,jsc,jec - call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) + call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) write(tmpstr,*) subname//' lbub12 = ',lbnd1,ubnd1,lbnd2,ubnd2 - call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) + call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) write(tmpstr,*) subname//' lbub34 = ',lbnd3,ubnd3,lbnd4,ubnd4 - call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) + call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) if (iec-isc /= ubnd1-lbnd1 .or. jec-jsc /= ubnd2-lbnd2) then call ESMF_LogSetError(ESMF_RC_ARG_BAD, & @@ -1394,39 +1342,32 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) enddo write(tmpstr,*) subname//' mask = ',minval(dataPtr_mask),maxval(dataPtr_mask) - call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) + call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) if(grid_attach_area) then write(tmpstr,*) subname//' area = ',minval(dataPtr_area),maxval(dataPtr_area) - call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) + call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) endif write(tmpstr,*) subname//' xcen = ',minval(dataPtr_xcen),maxval(dataPtr_xcen) - call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) + call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) write(tmpstr,*) subname//' ycen = ',minval(dataPtr_ycen),maxval(dataPtr_ycen) - call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) + call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) write(tmpstr,*) subname//' xcor = ',minval(dataPtr_xcor),maxval(dataPtr_xcor) - call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) + call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) write(tmpstr,*) subname//' ycor = ',minval(dataPtr_ycor),maxval(dataPtr_ycor) - call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) + call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) gridOut = gridIn ! for now out same as in call MOM_RealizeFields(importState, fldsToOcn_num, fldsToOcn, "Ocn import", grid=gridIn, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call MOM_RealizeFields(exportState, fldsFrOcn_num, fldsFrOcn, "Ocn export", grid=gridOut, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return - + if (ChkErr(rc,__LINE__,u_FILE_u)) return endif !--------------------------------- @@ -1434,20 +1375,13 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) !--------------------------------- if (len_trim(scalar_field_name) > 0) then - call State_SetScalar(dble(nxg),scalar_field_idx_grid_nx, exportState, localPet, & + call State_SetScalar(real(nxg,ESMF_KIND_R8),scalar_field_idx_grid_nx, exportState, localPet, & scalar_field_name, scalar_field_count, rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return - call State_SetScalar(dble(nyg),scalar_field_idx_grid_ny, exportState, localPet, & + call State_SetScalar(real(nyg,ESMF_KIND_R8),scalar_field_idx_grid_ny, exportState, localPet, & scalar_field_name, scalar_field_count, rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return - + if (ChkErr(rc,__LINE__,u_FILE_u)) return endif !--------------------------------- @@ -1461,10 +1395,7 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) !call NUOPC_Write(exportState, fileNamePrefix='post_realize_field_ocn_export_', & ! timeslice=1, relaxedFlag=.true., rc=rc) - !if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - ! line=__LINE__, & - ! file=__FILE__)) & - ! return ! bail out + !if (ChkErr(rc,__LINE__,u_FILE_u)) return end subroutine InitializeRealize @@ -1498,21 +1429,15 @@ subroutine DataInitialize(gcomp, rc) ! query the Component for its clock, importState and exportState call ESMF_GridCompGet(gcomp, clock=clock, importState=importState, exportState=exportState, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_ClockGet(clock, currTime=currTime, timeStep=timeStep, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_TimeGet(currTime, timestring=timestr, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_GridCompGetInternalState(gcomp, ocean_internalstate, rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return Ice_ocean_boundary => ocean_internalstate%ptr%ice_ocean_boundary_type_ptr ocean_public => ocean_internalstate%ptr%ocean_public_type_ptr @@ -1520,62 +1445,44 @@ subroutine DataInitialize(gcomp, rc) call get_ocean_grid(ocean_state, ocean_grid) call mom_export(ocean_public, ocean_grid, ocean_state, exportState, clock, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_StateGet(exportState, itemCount=fieldCount, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return allocate(fieldNameList(fieldCount)) call ESMF_StateGet(exportState, itemNameList=fieldNameList, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return do n=1, fieldCount call ESMF_StateGet(exportState, itemName=fieldNameList(n), field=field, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call NUOPC_SetAttribute(field, name="Updated", value="true", rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return enddo deallocate(fieldNameList) ! check whether all Fields in the exportState are "Updated" if (NUOPC_IsUpdated(exportState)) then call NUOPC_CompAttributeSet(gcomp, name="InitializeDataComplete", value="true", rc=rc) - call ESMF_LogWrite("MOM6 - Initialize-Data-Dependency SATISFIED!!!", ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + call ESMF_LogWrite("MOM6 - Initialize-Data-Dependency SATISFIED!!!", ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return endif if(write_diagnostics) then do n = 1,fldsFrOcn_num fldname = fldsFrOcn(n)%shortname call ESMF_StateGet(exportState, itemName=trim(fldname), itemType=itemType, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (itemType /= ESMF_STATEITEM_NOTFOUND) then call ESMF_StateGet(exportState, itemName=trim(fldname), field=field, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_FieldWrite(field, fileName='field_init_ocn_export_'//trim(timestr)//'.nc', & timeslice=1, overwrite=overwrite_timeslice, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return endif enddo endif @@ -1636,46 +1543,28 @@ subroutine ModelAdvance(gcomp, rc) call shr_file_setLogUnit (logunit) +!$ call omp_set_num_threads(nthrds) + ! query the Component for its clock, importState and exportState call ESMF_GridCompGet(gcomp, clock=clock, importState=importState, & exportState=exportState, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! HERE THE MODEL ADVANCES: currTime -> currTime + timeStep call ESMF_ClockPrint(clock, options="currTime", & preString="------>Advancing OCN from: ", unit=msgString, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out - call ESMF_LogWrite(subname//trim(msgString), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call ESMF_LogWrite(subname//trim(msgString), ESMF_LOGMSG_INFO) call ESMF_ClockGet(clock, startTime=startTime, currTime=currTime, & timeStep=timeStep, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_TimePrint(currTime + timeStep, & preString="--------------------------------> to: ", unit=msgString, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out - call ESMF_LogWrite(trim(msgString), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call ESMF_LogWrite(trim(msgString), ESMF_LOGMSG_INFO) call ESMF_TimeGet(currTime, timestring=import_timestr, rc=rc) call ESMF_TimeGet(currTime+timestep, timestring=export_timestr, rc=rc) @@ -1687,16 +1576,12 @@ subroutine ModelAdvance(gcomp, rc) ! Apply ocean lag for startup runs: !--------------- - if (cesm_coupled) then + if (cesm_coupled .or. (.not.use_coldstart)) then if (trim(runtype) == "initial") then ! Do not call MOM6 timestepping routine if the first cpl tstep of a startup run if (currTime == startTime) then - call ESMF_LogWrite("MOM6 - Skipping the first coupling timestep", ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + call ESMF_LogWrite("MOM6 - Skipping the first coupling timestep", ESMF_LOGMSG_INFO) do_advance = .false. else do_advance = .true. @@ -1705,18 +1590,10 @@ subroutine ModelAdvance(gcomp, rc) if (do_advance) then ! If the second cpl tstep of a startup run, step back a cpl tstep and advance for two cpl tsteps if (currTime == startTime + timeStep) then - call ESMF_LogWrite("MOM6 - Stepping back one coupling timestep", ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + call ESMF_LogWrite("MOM6 - Stepping back one coupling timestep", ESMF_LOGMSG_INFO) Time = esmf2fms_time(currTime-timeStep) ! i.e., startTime - call ESMF_LogWrite("MOM6 - doubling the coupling timestep", ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + call ESMF_LogWrite("MOM6 - doubling the coupling timestep", ESMF_LOGMSG_INFO) Time_step_coupled = 2 * esmf2fms_time(timeStep) endif end if @@ -1727,10 +1604,7 @@ subroutine ModelAdvance(gcomp, rc) if (do_advance) then call ESMF_GridCompGetInternalState(gcomp, ocean_internalstate, rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return Ice_ocean_boundary => ocean_internalstate%ptr%ice_ocean_boundary_type_ptr ocean_public => ocean_internalstate%ptr%ocean_public_type_ptr @@ -1741,22 +1615,27 @@ subroutine ModelAdvance(gcomp, rc) !--------------- if (write_diagnostics) then - do n = 1,fldsToOcn_num - fldname = fldsToOcn(n)%shortname - call ESMF_StateGet(importState, itemName=trim(fldname), itemType=itemType, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - if (itemType /= ESMF_STATEITEM_NOTFOUND) then - call ESMF_StateGet(importState, itemName=trim(fldname), field=lfield, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - call ESMF_FieldWrite(lfield, fileName='field_ocn_import_'//trim(import_timestr)//'.nc', & - timeslice=1, overwrite=overwrite_timeslice, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - endif - enddo + do n = 1,fldsToOcn_num + fldname = fldsToOcn(n)%shortname + call ESMF_StateGet(importState, itemName=trim(fldname), itemType=itemType, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + if (itemType /= ESMF_STATEITEM_NOTFOUND) then + call ESMF_StateGet(importState, itemName=trim(fldname), field=lfield, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + call ESMF_FieldWrite(lfield, fileName='field_ocn_import_'//trim(import_timestr)//'.nc', & + timeslice=1, overwrite=overwrite_timeslice, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + endif + enddo endif + if (dbug > 0) then + call state_diagnose(importState,subname//':IS ',rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + end if + !--------------- ! Get ocean grid !--------------- @@ -1768,10 +1647,7 @@ subroutine ModelAdvance(gcomp, rc) !--------------- call mom_import(ocean_public, ocean_grid, importState, ice_ocean_boundary, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return !--------------- ! Update MOM6 @@ -1786,11 +1662,12 @@ subroutine ModelAdvance(gcomp, rc) !--------------- call mom_export(ocean_public, ocean_grid, ocean_state, exportState, clock, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (dbug > 0) then + call state_diagnose(exportState,subname//':ES ',rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + end if endif !--------------- @@ -1798,61 +1675,42 @@ subroutine ModelAdvance(gcomp, rc) !--------------- call ESMF_ClockGetAlarm(clock, alarmname='stop_alarm', alarm=stop_alarm, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return !--------------- ! If restart alarm exists and is ringing - write restart file !--------------- - call ESMF_ClockGetAlarm(clock, alarmname='restart_alarm', alarm=restart_alarm, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out - - if (ESMF_AlarmIsRinging(restart_alarm, rc=rc)) then - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out - - ! turn off the alarm - call ESMF_AlarmRingerOff(restart_alarm, rc=rc ) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (restart_mode == 'alarms') then + call ESMF_ClockGetAlarm(clock, alarmname='restart_alarm', alarm=restart_alarm, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + if (ESMF_AlarmIsRinging(restart_alarm, rc=rc)) then + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + ! turn off the alarm + call ESMF_AlarmRingerOff(restart_alarm, rc=rc ) + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! determine restart filename - call ESMF_ClockGetNextTime(clock, MyTime, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out - call ESMF_TimeGet (MyTime, yy=year, mm=month, dd=day, h=hour, m=minute, s=seconds, rc=rc ) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out - if (cesm_coupled) then + call ESMF_ClockGetNextTime(clock, MyTime, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call ESMF_TimeGet (MyTime, yy=year, mm=month, dd=day, h=hour, m=minute, s=seconds, rc=rc ) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + if (cesm_coupled) then call NUOPC_CompAttributeGet(gcomp, name='case_name', value=casename, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_GridCompGet(gcomp, vm=vm, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_VMGet(vm, localPet=localPet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return write(restartname,'(A,".mom6.r.",I4.4,"-",I2.2,"-",I2.2,"-",I5.5)') & trim(casename), year, month, day, seconds - - call ESMF_LogWrite("MOM_cap: Writing restart : "//trim(restartname), ESMF_LOGMSG_INFO, rc=rc) - + call ESMF_LogWrite("MOM_cap: Writing restart : "//trim(restartname), ESMF_LOGMSG_INFO) ! write restart file(s) call ocean_model_restart(ocean_state, restartname=restartname, num_rest_files=num_rest_files) - if (localPet == 0) then ! Write name of restart file in the rpointer file - this is currently hard-coded for the ocean open(newunit=writeunit, file='rpointer.ocn', form='formatted', status='unknown', iostat=iostat) @@ -1874,28 +1732,27 @@ subroutine ModelAdvance(gcomp, rc) write(writeunit,'(a)') trim(restartname) // trim(suffix) // '.nc' enddo endif - close(writeunit) endif - else - ! write the final restart without a timestamp - if (ESMF_AlarmIsRinging(stop_alarm, rc=rc)) then - write(restartname,'(A)')"MOM.res" - else - write(restartname,'(A,I4.4,"-",I2.2,"-",I2.2,"-",I2.2,"-",I2.2,"-",I2.2)') & - "MOM.res.", year, month, day, hour, minute, seconds - endif - - call ESMF_LogWrite("MOM_cap: Writing restart : "//trim(restartname), ESMF_LOGMSG_INFO, rc=rc) + else ! not cesm_coupled + ! write the final restart without a timestamp + if (ESMF_AlarmIsRinging(stop_alarm, rc=rc)) then + write(restartname,'(A)')"MOM.res" + else + write(restartname,'(A,I4.4,"-",I2.2,"-",I2.2,"-",I2.2,"-",I2.2,"-",I2.2)') & + "MOM.res.", year, month, day, hour, minute, seconds + endif + call ESMF_LogWrite("MOM_cap: Writing restart : "//trim(restartname), ESMF_LOGMSG_INFO) - ! write restart file(s) - call ocean_model_restart(ocean_state, restartname=restartname) - end if + ! write restart file(s) + call ocean_model_restart(ocean_state, restartname=restartname) + endif - if (is_root_pe()) then - write(logunit,*) subname//' writing restart file ',trim(restartname) - endif - endif + if (is_root_pe()) then + write(logunit,*) subname//' writing restart file ',trim(restartname) + endif + endif + end if ! restart_mode !--------------- ! Write diagnostics @@ -1905,15 +1762,15 @@ subroutine ModelAdvance(gcomp, rc) do n = 1,fldsFrOcn_num fldname = fldsFrOcn(n)%shortname call ESMF_StateGet(exportState, itemName=trim(fldname), itemType=itemType, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (itemType /= ESMF_STATEITEM_NOTFOUND) then call ESMF_StateGet(exportState, itemName=trim(fldname), field=lfield, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_FieldWrite(lfield, fileName='field_ocn_export_'//trim(export_timestr)//'.nc', & timeslice=1, overwrite=overwrite_timeslice, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return endif enddo endif @@ -1949,23 +1806,14 @@ subroutine ModelSetRunClock(gcomp, rc) ! query the Component for its clock, importState and exportState call NUOPC_ModelGet(gcomp, driverClock=dclock, modelClock=mclock, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_ClockGet(dclock, currTime=dcurrtime, timeStep=dtimestep, & stopTime=dstoptime, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_ClockGet(mclock, currTime=mcurrtime, timeStep=mtimestep, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return !-------------------------------- ! check that the current time in the model and driver are the same @@ -1973,16 +1821,10 @@ subroutine ModelSetRunClock(gcomp, rc) if (mcurrtime /= dcurrtime) then call ESMF_TimeGet(dcurrtime, timeString=dtimestring, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_TimeGet(mcurrtime, timeString=mtimestring, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_LogSetError(ESMF_RC_VAL_WRONG, & msg=subname//": ERROR in time consistency: "//trim(dtimestring)//" != "//trim(mtimestring), & @@ -1997,10 +1839,7 @@ subroutine ModelSetRunClock(gcomp, rc) mstoptime = mcurrtime + dtimestep call ESMF_ClockSet(mclock, currTime=dcurrtime, timeStep=dtimestep, stopTime=mstoptime, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (first_time) then !-------------------------------- @@ -2014,18 +1853,18 @@ subroutine ModelSetRunClock(gcomp, rc) if (cesm_coupled) then call NUOPC_CompAttributeGet(gcomp, name="restart_option", value=restart_option, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! If restart_option is set then must also have set either restart_n or restart_ymd call NUOPC_CompAttributeGet(gcomp, name="restart_n", value=cvalue, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresent .and. isSet) then read(cvalue,*) restart_n endif call NUOPC_CompAttributeGet(gcomp, name="restart_ymd", value=cvalue, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresent .and. isSet) then read(cvalue,*) restart_ymd endif @@ -2040,66 +1879,64 @@ subroutine ModelSetRunClock(gcomp, rc) else call NUOPC_CompAttributeGet(gcomp, name="restart_n", value=cvalue, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, file=__FILE__)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return - ! If restart_option is set then must also have set either restart_n or restart_ymd + ! If restart_n is set and non-zero, then restart_option must be available from config if (isPresent .and. isSet) then - call ESMF_LogWrite(subname//" Restart_n = "//trim(cvalue), ESMF_LOGMSG_INFO, rc=rc) + call ESMF_LogWrite(subname//" Restart_n = "//trim(cvalue), ESMF_LOGMSG_INFO) read(cvalue,*) restart_n if(restart_n /= 0)then call NUOPC_CompAttributeGet(gcomp, name="restart_option", value=cvalue, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, file=__FILE__)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresent .and. isSet) then read(cvalue,*) restart_option call ESMF_LogWrite(subname//" Restart_option = "//restart_option, & - ESMF_LOGMSG_INFO, rc=rc) + ESMF_LOGMSG_INFO) + else + call ESMF_LogSetError(ESMF_RC_VAL_WRONG, & + msg=subname//": ERROR both restart_n and restart_option must be set ", & + line=__LINE__, file=__FILE__, rcToReturn=rc) + return endif - + ! not used in nems call NUOPC_CompAttributeGet(gcomp, name="restart_ymd", value=cvalue, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, file=__FILE__)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresent .and. isSet) then read(cvalue,*) restart_ymd - call ESMF_LogWrite(subname//" Restart_ymd = "//trim(cvalue), ESMF_LOGMSG_INFO, rc=rc) + call ESMF_LogWrite(subname//" Restart_ymd = "//trim(cvalue), ESMF_LOGMSG_INFO) endif else - restart_option = 'none' - call ESMF_LogWrite(subname//" Set restart option = "//restart_option, ESMF_LOGMSG_INFO, rc=rc) + ! restart_n is zero, restarts will be written at finalize only (no alarm control) + restart_mode = 'no_alarms' + call ESMF_LogWrite(subname//" Restarts will be written at finalize only", ESMF_LOGMSG_INFO) endif endif endif - call AlarmInit(mclock, & - alarm = restart_alarm, & - option = trim(restart_option), & - opt_n = restart_n, & - opt_ymd = restart_ymd, & - RefTime = mcurrTime, & - alarmname = 'restart_alarm', rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out - - call ESMF_AlarmSet(restart_alarm, clock=mclock, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out - call ESMF_LogWrite(subname//" Restart alarm is Created and Set", ESMF_LOGMSG_INFO, rc=rc) + if (restart_mode == 'alarms') then + call AlarmInit(mclock, & + alarm = restart_alarm, & + option = trim(restart_option), & + opt_n = restart_n, & + opt_ymd = restart_ymd, & + RefTime = mcurrTime, & + alarmname = 'restart_alarm', rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + call ESMF_AlarmSet(restart_alarm, clock=mclock, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call ESMF_LogWrite(subname//" Restart alarm is Created and Set", ESMF_LOGMSG_INFO) + end if ! create a 1-shot alarm at the driver stop time stop_alarm = ESMF_AlarmCreate(mclock, ringtime=dstopTime, name = "stop_alarm", rc=rc) - call ESMF_LogWrite(subname//" Create Stop alarm", ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, file=__FILE__)) return + call ESMF_LogWrite(subname//" Create Stop alarm", ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_TimeGet(dstoptime, timestring=timestr, rc=rc) - call ESMF_LogWrite("Stop Alarm will ring at : "//trim(timestr), ESMF_LOGMSG_INFO, rc=rc) + call ESMF_LogWrite("Stop Alarm will ring at : "//trim(timestr), ESMF_LOGMSG_INFO) first_time = .false. @@ -2110,20 +1947,13 @@ subroutine ModelSetRunClock(gcomp, rc) !-------------------------------- call ESMF_ClockAdvance(mclock,rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_ClockSet(mclock, currTime=dcurrtime, timeStep=dtimestep, stopTime=mstoptime, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return end subroutine ModelSetRunClock - !=============================================================================== !> Called by NUOPC at the end of the run to clean up. @@ -2145,54 +1975,34 @@ subroutine ocean_model_finalize(gcomp, rc) type(ESMF_Alarm), allocatable :: alarmList(:) integer :: alarmCount character(len=64) :: timestamp - character(len=64) :: alarm_name logical :: write_restart - integer :: i character(len=*),parameter :: subname='(MOM_cap:ocean_model_finalize)' write(*,*) 'MOM: --- finalize called ---' rc = ESMF_SUCCESS call ESMF_GridCompGetInternalState(gcomp, ocean_internalstate, rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ocean_public => ocean_internalstate%ptr%ocean_public_type_ptr ocean_state => ocean_internalstate%ptr%ocean_state_type_ptr call NUOPC_ModelGet(gcomp, modelClock=clock, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_ClockGet(clock, currTime=currTime, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return Time = esmf2fms_time(currTime) - ! Check if the clock has a restart alarm - and if it does do not write a restart - call ESMF_ClockGet(clock, alarmCount=alarmCount, rc = rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, file=__FILE__)) return - - allocate(alarmList(1:alarmCount)) - call ESMF_ClockGetAlarmList(clock, alarmlistflag=ESMF_ALARMLIST_ALL, alarmList=alarmList, rc = rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, file=__FILE__)) return - - write_restart = .true. - do i = 1,alarmCount - call ESMF_AlarmGet(alarmlist(i), name=alarm_name, rc = rc) - if(trim(alarm_name) == 'restart_alarm' .and. ESMF_AlarmIsEnabled(alarmlist(i), rc=rc))write_restart = .false. - enddo - deallocate(alarmList) + ! Do not write a restart unless mode is no_alarms + if (restart_mode == 'no_alarms') then + write_restart = .true. + else + write_restart = .false. + end if + if (write_restart)call ESMF_LogWrite("No Restart Alarm, writing restart at Finalize ", & + ESMF_LOGMSG_INFO) - if(write_restart)call ESMF_LogWrite("No Restart Alarm, writing restart at Finalize ", ESMF_LOGMSG_INFO, rc=rc) call ocean_model_end(ocean_public, ocean_State, Time, write_restart=write_restart) call field_manager_end() @@ -2223,16 +2033,15 @@ subroutine State_SetScalar(value, scalar_id, State, mytask, scalar_name, scalar_ rc = ESMF_SUCCESS call ESMF_StateGet(State, itemName=trim(scalar_name), field=field, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (mytask == 0) then call ESMF_FieldGet(field, farrayPtr=farrayptr, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (scalar_id < 0 .or. scalar_id > scalar_count) then call ESMF_LogSetError(ESMF_RC_ARG_BAD, & - msg=subname//": ERROR in scalar_id", & - line=__LINE__, file=__FILE__, rcToReturn=rc) + msg=subname//": ERROR in scalar_id", line=__LINE__, file=__FILE__, rcToReturn=rc) return endif @@ -2270,57 +2079,36 @@ subroutine MOM_RealizeFields(state, nfields, field_defs, tag, grid, mesh, rc) if (field_defs(i)%shortname == scalar_field_name) then call ESMF_LogWrite(subname // tag // " Field "// trim(field_defs(i)%stdname) // " is connected on root pe.", & - ESMF_LOGMSG_INFO, & - line=__LINE__, & - file=__FILE__, & - rc=rc) + ESMF_LOGMSG_INFO) call SetScalarField(field, rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return else call ESMF_LogWrite(subname // tag // " Field "// trim(field_defs(i)%stdname) // " is connected.", & - ESMF_LOGMSG_INFO, & - line=__LINE__, & - file=__FILE__, & - rc=rc) + ESMF_LOGMSG_INFO) if (present(grid)) then field = ESMF_FieldCreate(grid, ESMF_TYPEKIND_R8, indexflag=ESMF_INDEX_DELOCAL, & name=field_defs(i)%shortname, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! initialize fldptr to zero call ESMF_FieldGet(field, farrayPtr=fldptr2d, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return fldptr2d(:,:) = 0.0 else if (present(mesh)) then field = ESMF_FieldCreate(mesh=mesh, typekind=ESMF_TYPEKIND_R8, meshloc=ESMF_MESHLOC_ELEMENT, & name=field_defs(i)%shortname, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! initialize fldptr to zero call ESMF_FieldGet(field, farrayPtr=fldptr1d, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return fldptr1d(:) = 0.0 endif @@ -2329,24 +2117,16 @@ subroutine MOM_RealizeFields(state, nfields, field_defs, tag, grid, mesh, rc) ! Realize connected field call NUOPC_Realize(state, field=field, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return else ! field is not connected call ESMF_LogWrite(subname // tag // " Field "// trim(field_defs(i)%stdname) // " is not connected.", & - ESMF_LOGMSG_INFO, & - line=__LINE__, & - file=__FILE__, & - rc=rc) + ESMF_LOGMSG_INFO) + ! remove a not connected Field from State call ESMF_StateRemove(state, (/field_defs(i)%shortname/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return endif @@ -2369,24 +2149,15 @@ subroutine SetScalarField(field, rc) ! create a DistGrid with a single index space element, which gets mapped onto DE 0. distgrid = ESMF_DistGridCreate(minIndex=(/1/), maxIndex=(/1/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return grid = ESMF_GridCreate(distgrid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! num of scalar values field = ESMF_FieldCreate(name=trim(scalar_field_name), grid=grid, typekind=ESMF_TYPEKIND_R8, & ungriddedLBound=(/1/), ungriddedUBound=(/scalar_field_count/), gridToFieldMap=(/2/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return end subroutine SetScalarField diff --git a/config_src/nuopc_driver/mom_cap_methods.F90 b/config_src/drivers/nuopc_cap/mom_cap_methods.F90 similarity index 60% rename from config_src/nuopc_driver/mom_cap_methods.F90 rename to config_src/drivers/nuopc_cap/mom_cap_methods.F90 index 70915d0e95..bb9743bb84 100644 --- a/config_src/nuopc_driver/mom_cap_methods.F90 +++ b/config_src/drivers/nuopc_cap/mom_cap_methods.F90 @@ -5,7 +5,7 @@ module MOM_cap_methods use ESMF, only: ESMF_TimeInterval, ESMF_TimeIntervalGet use ESMF, only: ESMF_State, ESMF_StateGet use ESMF, only: ESMF_Field, ESMF_FieldGet, ESMF_FieldCreate -use ESMF, only: ESMF_GridComp, ESMF_Mesh, ESMF_Grid, ESMF_GridCreate +use ESMF, only: ESMF_GridComp, ESMF_Mesh, ESMF_MeshGet, ESMF_Grid, ESMF_GridCreate use ESMF, only: ESMF_DistGrid, ESMF_DistGridCreate use ESMF, only: ESMF_KIND_R8, ESMF_SUCCESS, ESMF_LogFoundError use ESMF, only: ESMF_LOGERR_PASSTHRU, ESMF_LOGMSG_INFO, ESMF_LOGWRITE @@ -13,7 +13,8 @@ module MOM_cap_methods use ESMF, only: ESMF_StateItem_Flag, ESMF_STATEITEM_NOTFOUND use ESMF, only: ESMF_GEOMTYPE_FLAG, ESMF_GEOMTYPE_GRID, ESMF_GEOMTYPE_MESH use ESMF, only: ESMF_RC_VAL_OUTOFRANGE, ESMF_INDEX_DELOCAL, ESMF_MESHLOC_ELEMENT -use ESMF, only: ESMF_TYPEKIND_R8 +use ESMF, only: ESMF_TYPEKIND_R8, ESMF_FIELDSTATUS_COMPLETE +use ESMF, only: ESMF_FieldStatus_Flag, ESMF_LOGMSG_ERROR, ESMF_FAILURE, ESMF_MAXSTR use ESMF, only: operator(/=), operator(==) use MOM_ocean_model_nuopc, only: ocean_public_type, ocean_state_type use MOM_surface_forcing_nuopc, only: ice_ocean_boundary_type @@ -28,6 +29,8 @@ module MOM_cap_methods public :: mom_set_geomtype public :: mom_import public :: mom_export +public :: state_diagnose +public :: ChkErr private :: State_getImport private :: State_setExport @@ -43,6 +46,14 @@ module MOM_cap_methods type(ESMF_GeomType_Flag) :: geomtype !< SMF type describing type of !! geometry (mesh or grid) +! area correction factors for fluxes send and received from mediator +! these actors are ONLY valid for meshes that are read in - so do not need them for +! grids that are calculated internally + +real(ESMF_KIND_R8), public, allocatable :: mod2med_areacor(:) ! ratios of model areas to input mesh areas +real(ESMF_KIND_R8), public, allocatable :: med2mod_areacor(:) ! ratios of input mesh areas to model areas +character(len=*),parameter :: u_FILE_u = __FILE__ + contains !> Sets module variable geometry type @@ -70,6 +81,8 @@ subroutine mom_import(ocean_public, ocean_grid, importState, ice_ocean_boundary, character(len=128) :: fldname real(ESMF_KIND_R8), allocatable :: taux(:,:) real(ESMF_KIND_R8), allocatable :: tauy(:,:) + real(ESMF_KIND_R8), allocatable :: stkx1(:,:),stkx2(:,:),stkx3(:,:) + real(ESMF_KIND_R8), allocatable :: stky1(:,:),stky2(:,:),stky3(:,:) character(len=*) , parameter :: subname = '(mom_import)' rc = ESMF_SUCCESS @@ -86,60 +99,42 @@ subroutine mom_import(ocean_public, ocean_grid, importState, ice_ocean_boundary, !---- call state_getimport(importState, 'inst_pres_height_surface', & isc, iec, jsc, jec, ice_ocean_boundary%p, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return !---- ! near-IR, direct shortwave (W/m2) !---- call state_getimport(importState, 'mean_net_sw_ir_dir_flx', & - isc, iec, jsc, jec, ice_ocean_boundary%sw_flux_nir_dir, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + isc, iec, jsc, jec, ice_ocean_boundary%sw_flux_nir_dir, areacor=med2mod_areacor, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return !---- ! near-IR, diffuse shortwave (W/m2) !---- call state_getimport(importState, 'mean_net_sw_ir_dif_flx', & - isc, iec, jsc, jec, ice_ocean_boundary%sw_flux_nir_dif, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + isc, iec, jsc, jec, ice_ocean_boundary%sw_flux_nir_dif, areacor=med2mod_areacor, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return !---- ! visible, direct shortwave (W/m2) !---- call state_getimport(importState, 'mean_net_sw_vis_dir_flx', & - isc, iec, jsc, jec, ice_ocean_boundary%sw_flux_vis_dir, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + isc, iec, jsc, jec, ice_ocean_boundary%sw_flux_vis_dir, areacor=med2mod_areacor, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return !---- ! visible, diffuse shortwave (W/m2) !---- call state_getimport(importState, 'mean_net_sw_vis_dif_flx', & - isc, iec, jsc, jec, ice_ocean_boundary%sw_flux_vis_dif, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + isc, iec, jsc, jec, ice_ocean_boundary%sw_flux_vis_dif, areacor=med2mod_areacor, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! ------- ! Net longwave radiation (W/m2) ! ------- call state_getimport(importState, 'mean_net_lw_flx', & - isc, iec, jsc, jec, ice_ocean_boundary%lw_flux, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + isc, iec, jsc, jec, ice_ocean_boundary%lw_flux, areacor=med2mod_areacor, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return !---- ! zonal and meridional surface stress @@ -147,17 +142,12 @@ subroutine mom_import(ocean_public, ocean_grid, importState, ice_ocean_boundary, allocate (taux(isc:iec,jsc:jec)) allocate (tauy(isc:iec,jsc:jec)) - call state_getimport(importState, 'mean_zonal_moment_flx', isc, iec, jsc, jec, taux, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out - call state_getimport(importState, 'mean_merid_moment_flx', isc, iec, jsc, jec, tauy, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out - + call state_getimport(importState, 'mean_zonal_moment_flx', isc, iec, jsc, jec, taux, & + areacor=med2mod_areacor, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call state_getimport(importState, 'mean_merid_moment_flx', isc, iec, jsc, jec, tauy, & + areacor=med2mod_areacor, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! rotate taux and tauy from true zonal/meridional to local coordinates do j = jsc, jec @@ -177,41 +167,29 @@ subroutine mom_import(ocean_public, ocean_grid, importState, ice_ocean_boundary, ! sensible heat flux (W/m2) !---- call state_getimport(importState, 'mean_sensi_heat_flx', & - isc, iec, jsc, jec, ice_ocean_boundary%t_flux, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + isc, iec, jsc, jec, ice_ocean_boundary%t_flux, areacor=med2mod_areacor, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return !---- ! evaporation flux (W/m2) !---- call state_getimport(importState, 'mean_evap_rate', & - isc, iec, jsc, jec, ice_ocean_boundary%q_flux, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + isc, iec, jsc, jec, ice_ocean_boundary%q_flux, areacor=med2mod_areacor, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return !---- ! liquid precipitation (rain) !---- call state_getimport(importState, 'mean_prec_rate', & - isc, iec, jsc, jec, ice_ocean_boundary%lprec, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + isc, iec, jsc, jec, ice_ocean_boundary%lprec, areacor=med2mod_areacor, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return !---- ! frozen precipitation (snow) !---- call state_getimport(importState, 'mean_fprec_rate', & - isc, iec, jsc, jec, ice_ocean_boundary%fprec, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + isc, iec, jsc, jec, ice_ocean_boundary%fprec, areacor=med2mod_areacor, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return !---- ! mass and heat content of liquid and frozen runoff @@ -222,85 +200,135 @@ subroutine mom_import(ocean_public, ocean_grid, importState, ice_ocean_boundary, ! liquid runoff ice_ocean_boundary%lrunoff (:,:) = 0._ESMF_KIND_R8 call state_getimport(importState, 'Foxx_rofl', & - isc, iec, jsc, jec, ice_ocean_boundary%lrunoff,rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + isc, iec, jsc, jec, ice_ocean_boundary%lrunoff, areacor=med2mod_areacor, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! ice runoff ice_ocean_boundary%frunoff (:,:) = 0._ESMF_KIND_R8 call state_getimport(importState, 'Foxx_rofi', & - isc, iec, jsc, jec, ice_ocean_boundary%frunoff,rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + isc, iec, jsc, jec, ice_ocean_boundary%frunoff, areacor=med2mod_areacor, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! heat content of lrunoff ice_ocean_boundary%lrunoff_hflx(:,:) = 0._ESMF_KIND_R8 call state_getimport(importState, 'mean_runoff_heat_flx', & - isc, iec, jsc, jec, ice_ocean_boundary%lrunoff_hflx, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + isc, iec, jsc, jec, ice_ocean_boundary%lrunoff_hflx, areacor=med2mod_areacor, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! heat content of frunoff ice_ocean_boundary%frunoff_hflx(:,:) = 0._ESMF_KIND_R8 call state_getimport(importState, 'mean_calving_heat_flx', & - isc, iec, jsc, jec, ice_ocean_boundary%frunoff_hflx, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + isc, iec, jsc, jec, ice_ocean_boundary%frunoff_hflx, areacor=med2mod_areacor, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return !---- ! salt flux from ice !---- ice_ocean_boundary%salt_flux(:,:) = 0._ESMF_KIND_R8 call state_getimport(importState, 'mean_salt_rate', & - isc, iec, jsc, jec, ice_ocean_boundary%salt_flux,rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out - - ! !---- - ! ! snow&ice melt heat flux (W/m^2) - ! !---- + isc, iec, jsc, jec, ice_ocean_boundary%salt_flux, areacor=med2mod_areacor, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + !---- + ! snow&ice melt heat flux (W/m^2) + !---- ice_ocean_boundary%seaice_melt_heat(:,:) = 0._ESMF_KIND_R8 call state_getimport(importState, 'net_heat_flx_to_ocn', & - isc, iec, jsc, jec, ice_ocean_boundary%seaice_melt_heat,rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out - - ! !---- - ! ! snow&ice melt water flux (W/m^2) - ! !---- + isc, iec, jsc, jec, ice_ocean_boundary%seaice_melt_heat, areacor=med2mod_areacor, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + !---- + ! snow&ice melt water flux (W/m^2) + !---- ice_ocean_boundary%seaice_melt(:,:) = 0._ESMF_KIND_R8 call state_getimport(importState, 'mean_fresh_water_to_ocean_rate', & - isc, iec, jsc, jec, ice_ocean_boundary%seaice_melt,rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + isc, iec, jsc, jec, ice_ocean_boundary%seaice_melt, areacor=med2mod_areacor, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return !---- ! mass of overlying ice !---- ! Note - preset values to 0, if field does not exist in importState, then will simply return ! and preset value will be used - ice_ocean_boundary%mi(:,:) = 0._ESMF_KIND_R8 call state_getimport(importState, 'mass_of_overlying_ice', & - isc, iec, jsc, jec, ice_ocean_boundary%mi, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + isc, iec, jsc, jec, ice_ocean_boundary%mi,rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + !---- + ! sea-ice fraction + !---- + ice_ocean_boundary%ice_fraction(:,:) = 0._ESMF_KIND_R8 + call state_getimport(importState, 'Si_ifrac', & + isc, iec, jsc, jec, ice_ocean_boundary%ice_fraction, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + !---- + ! 10m wind squared + !---- + ice_ocean_boundary%u10_sqr(:,:) = 0._ESMF_KIND_R8 + call state_getimport(importState, 'So_duu10n', & + isc, iec, jsc, jec, ice_ocean_boundary%u10_sqr, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + !---- + ! Langmuir enhancement factor + !---- + if ( associated(ice_ocean_boundary%lamult) ) then + ice_ocean_boundary%lamult (:,:) = 0._ESMF_KIND_R8 + call state_getimport(importState, 'Sw_lamult', & + isc, iec, jsc, jec, ice_ocean_boundary%lamult, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + endif + + !---- + ! Partitioned Stokes Drift Components + !---- + if ( associated(ice_ocean_boundary%ustkb) ) then + allocate(stkx1(isc:iec,jsc:jec)) + allocate(stky1(isc:iec,jsc:jec)) + allocate(stkx2(isc:iec,jsc:jec)) + allocate(stky2(isc:iec,jsc:jec)) + allocate(stkx3(isc:iec,jsc:jec)) + allocate(stky3(isc:iec,jsc:jec)) + + call state_getimport(importState,'eastward_partitioned_stokes_drift_1' , isc, iec, jsc, jec, stkx1,rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call state_getimport(importState,'northward_partitioned_stokes_drift_1', isc, iec, jsc, jec, stky1,rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call state_getimport(importState,'eastward_partitioned_stokes_drift_2' , isc, iec, jsc, jec, stkx2,rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call state_getimport(importState,'northward_partitioned_stokes_drift_2', isc, iec, jsc, jec, stky2,rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call state_getimport(importState,'eastward_partitioned_stokes_drift_3' , isc, iec, jsc, jec, stkx3,rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call state_getimport(importState,'northward_partitioned_stokes_drift_3', isc, iec, jsc, jec, stky3,rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + ! rotate from true zonal/meridional to local coordinates + do j = jsc, jec + jg = j + ocean_grid%jsc - jsc + do i = isc, iec + ig = i + ocean_grid%isc - isc + ice_ocean_boundary%ustkb(i,j,1) = ocean_grid%cos_rot(ig,jg)*stkx1(i,j) & + - ocean_grid%sin_rot(ig,jg)*stky1(i,j) + ice_ocean_boundary%vstkb(i,j,1) = ocean_grid%cos_rot(ig,jg)*stky1(i,j) & + + ocean_grid%sin_rot(ig,jg)*stkx1(i,j) + + ice_ocean_boundary%ustkb(i,j,2) = ocean_grid%cos_rot(ig,jg)*stkx2(i,j) & + - ocean_grid%sin_rot(ig,jg)*stky2(i,j) + ice_ocean_boundary%vstkb(i,j,2) = ocean_grid%cos_rot(ig,jg)*stky2(i,j) & + + ocean_grid%sin_rot(ig,jg)*stkx2(i,j) + + ice_ocean_boundary%ustkb(i,j,3) = ocean_grid%cos_rot(ig,jg)*stkx3(i,j) & + - ocean_grid%sin_rot(ig,jg)*stky3(i,j) + ice_ocean_boundary%vstkb(i,j,3) = ocean_grid%cos_rot(ig,jg)*stky3(i,j) & + + ocean_grid%sin_rot(ig,jg)*stkx3(i,j) + enddo + enddo + + deallocate(stkx1,stkx2,stkx3,stky1,stky2,stky3) + endif end subroutine mom_import @@ -339,16 +367,10 @@ subroutine mom_export(ocean_public, ocean_grid, ocean_state, exportState, clock, rc = ESMF_SUCCESS call ESMF_ClockGet( clock, timeStep=timeStep, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_TimeIntervalGet( timeStep, s=dt_int, rc=rc ) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! Use Adcroft's rule of reciprocals; it does the right thing here. if (real(dt_int) > 0.0) then @@ -378,10 +400,7 @@ subroutine mom_export(ocean_public, ocean_grid, ocean_state, exportState, clock, call State_SetExport(exportState, 'ocean_mask', & isc, iec, jsc, jec, omask, ocean_grid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return deallocate(omask) @@ -390,20 +409,14 @@ subroutine mom_export(ocean_public, ocean_grid, ocean_state, exportState, clock, ! ------- call State_SetExport(exportState, 'sea_surface_temperature', & isc, iec, jsc, jec, ocean_public%t_surf, ocean_grid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! ------- ! Sea surface salinity ! ------- call State_SetExport(exportState, 's_surf', & isc, iec, jsc, jec, ocean_public%s_surf, ocean_grid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! ------- ! zonal and meridional currents @@ -430,17 +443,11 @@ subroutine mom_export(ocean_public, ocean_grid, ocean_state, exportState, clock, call State_SetExport(exportState, 'ocn_current_zonal', & isc, iec, jsc, jec, ocz_rot, ocean_grid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call State_SetExport(exportState, 'ocn_current_merid', & isc, iec, jsc, jec, ocm_rot, ocean_grid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return deallocate(ocz, ocm, ocz_rot, ocm_rot) @@ -451,10 +458,7 @@ subroutine mom_export(ocean_public, ocean_grid, ocean_state, exportState, clock, if (itemFlag /= ESMF_STATEITEM_NOTFOUND) then call State_SetExport(exportState, 'So_bldepth', & isc, iec, jsc, jec, ocean_public%obld, ocean_grid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return endif ! ------- @@ -477,11 +481,8 @@ subroutine mom_export(ocean_public, ocean_grid, ocean_state, exportState, clock, enddo call State_SetExport(exportState, 'freezing_melting_potential', & - isc, iec, jsc, jec, melt_potential, ocean_grid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + isc, iec, jsc, jec, melt_potential, ocean_grid, areacor=mod2med_areacor, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return deallocate(melt_potential) @@ -492,10 +493,7 @@ subroutine mom_export(ocean_public, ocean_grid, ocean_state, exportState, clock, if (itemFlag /= ESMF_STATEITEM_NOTFOUND) then call State_SetExport(exportState, 'sea_level', & isc, iec, jsc, jec, ocean_public%sea_lev, ocean_grid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return endif !---------------- @@ -598,17 +596,11 @@ subroutine mom_export(ocean_public, ocean_grid, ocean_state, exportState, clock, call State_SetExport(exportState, 'sea_surface_slope_zonal', & isc, iec, jsc, jec, dhdx_rot, ocean_grid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call State_SetExport(exportState, 'sea_surface_slope_merid', & isc, iec, jsc, jec, dhdy_rot, ocean_grid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return deallocate(ssh, dhdx, dhdy, dhdx_rot, dhdy_rot) @@ -627,15 +619,9 @@ subroutine State_GetFldPtr_1d(State, fldname, fldptr, rc) character(len=*),parameter :: subname='(MOM_cap:State_GetFldPtr)' call ESMF_StateGet(State, itemName=trim(fldname), field=lfield, rc=lrc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_FieldGet(lfield, farrayPtr=fldptr, rc=lrc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (present(rc)) rc = lrc @@ -654,22 +640,16 @@ subroutine State_GetFldPtr_2d(State, fldname, fldptr, rc) character(len=*),parameter :: subname='(MOM_cap:State_GetFldPtr)' call ESMF_StateGet(State, itemName=trim(fldname), field=lfield, rc=lrc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_FieldGet(lfield, farrayPtr=fldptr, rc=lrc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (present(rc)) rc = lrc end subroutine State_GetFldPtr_2d !> Map import state field to output array -subroutine State_GetImport(state, fldname, isc, iec, jsc, jec, output, do_sum, rc) +subroutine State_GetImport(state, fldname, isc, iec, jsc, jec, output, do_sum, areacor, rc) type(ESMF_State) , intent(in) :: state !< ESMF state character(len=*) , intent(in) :: fldname !< Field name integer , intent(in) :: isc !< The start i-index of cell centers within @@ -682,6 +662,8 @@ subroutine State_GetImport(state, fldname, isc, iec, jsc, jec, output, do_sum, r !! the computational domain real (ESMF_KIND_R8) , intent(inout) :: output(isc:iec,jsc:jec)!< Output 2D array logical, optional , intent(in) :: do_sum !< If true, sums the data + real (ESMF_KIND_R8), optional, intent(in) :: areacor(:) !< flux area correction factors + !! applicable to meshes integer , intent(out) :: rc !< Return code ! local variables @@ -702,20 +684,25 @@ subroutine State_GetImport(state, fldname, isc, iec, jsc, jec, output, do_sum, r ! get field pointer call state_getfldptr(state, trim(fldname), dataptr1d, rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return - ! determine output array + ! determine output array and apply area correction if present n = 0 do j = jsc,jec do i = isc,iec n = n + 1 if (present(do_sum)) then - output(i,j) = output(i,j) + dataPtr1d(n) + if (present(areacor)) then + output(i,j) = output(i,j) + dataPtr1d(n) * areacor(n) + else + output(i,j) = output(i,j) + dataPtr1d(n) + end if else - output(i,j) = dataPtr1d(n) + if (present(areacor)) then + output(i,j) = dataPtr1d(n) * areacor(n) + else + output(i,j) = dataPtr1d(n) + end if endif enddo enddo @@ -723,10 +710,7 @@ subroutine State_GetImport(state, fldname, isc, iec, jsc, jec, output, do_sum, r else if (geomtype == ESMF_GEOMTYPE_GRID) then call state_getfldptr(state, trim(fldname), dataptr2d, rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return lbnd1 = lbound(dataPtr2d,1) lbnd2 = lbound(dataPtr2d,2) @@ -750,7 +734,7 @@ subroutine State_GetImport(state, fldname, isc, iec, jsc, jec, output, do_sum, r end subroutine State_GetImport !> Map input array to export state -subroutine State_SetExport(state, fldname, isc, iec, jsc, jec, input, ocean_grid, rc) +subroutine State_SetExport(state, fldname, isc, iec, jsc, jec, input, ocean_grid, areacor, rc) type(ESMF_State) , intent(inout) :: state !< ESMF state character(len=*) , intent(in) :: fldname !< Field name integer , intent(in) :: isc !< The start i-index of cell centers within @@ -763,6 +747,8 @@ subroutine State_SetExport(state, fldname, isc, iec, jsc, jec, input, ocean_grid !! the computational domain real (ESMF_KIND_R8) , intent(in) :: input(isc:iec,jsc:jec)!< Input 2D array type(ocean_grid_type) , intent(in) :: ocean_grid !< Ocean horizontal grid + real (ESMF_KIND_R8), optional, intent(in) :: areacor(:) !< flux area correction factors + !! applicable to meshes integer , intent(out) :: rc !< Return code ! local variables @@ -786,10 +772,7 @@ subroutine State_SetExport(state, fldname, isc, iec, jsc, jec, input, ocean_grid if (geomtype == ESMF_GEOMTYPE_MESH) then call state_getfldptr(state, trim(fldname), dataptr1d, rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return n = 0 do j = jsc, jec @@ -800,14 +783,16 @@ subroutine State_SetExport(state, fldname, isc, iec, jsc, jec, input, ocean_grid dataPtr1d(n) = input(i,j) * ocean_grid%mask2dT(ig,jg) enddo enddo + if (present(areacor)) then + do n = 1,(size(dataPtr1d)) + dataPtr1d(n) = dataPtr1d(n) * areacor(n) + enddo + end if else if (geomtype == ESMF_GEOMTYPE_GRID) then call state_getfldptr(state, trim(fldname), dataptr2d, rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return lbnd1 = lbound(dataPtr2d,1) lbnd2 = lbound(dataPtr2d,2) @@ -828,4 +813,190 @@ subroutine State_SetExport(state, fldname, isc, iec, jsc, jec, input, ocean_grid end subroutine State_SetExport +!> This subroutine writes the minimum, maximum and sum of each field +!! contained within an ESMF state. +subroutine state_diagnose(State, string, rc) + + ! ---------------------------------------------- + ! Diagnose status of State + ! ---------------------------------------------- + + type(ESMF_State), intent(in) :: state !< An ESMF State + character(len=*), intent(in) :: string !< A string indicating whether the State is an + !! import or export State + integer , intent(out) :: rc !< Return code + + ! local variables + integer :: i,j,n + type(ESMf_Field) :: lfield + integer :: fieldCount, lrank + character(ESMF_MAXSTR) ,pointer :: lfieldnamelist(:) + real(ESMF_KIND_R8), pointer :: dataPtr1d(:) + real(ESMF_KIND_R8), pointer :: dataPtr2d(:,:) + character(len=*),parameter :: subname='(state_diagnose)' + character(len=ESMF_MAXSTR) :: msgString + ! ---------------------------------------------- + + call ESMF_StateGet(state, itemCount=fieldCount, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + allocate(lfieldnamelist(fieldCount)) + + call ESMF_StateGet(state, itemNameList=lfieldnamelist, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + do n = 1, fieldCount + + call ESMF_StateGet(state, itemName=lfieldnamelist(n), field=lfield, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + call field_getfldptr(lfield, fldptr1=dataPtr1d, fldptr2=dataPtr2d, rank=lrank, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + if (lrank == 0) then + ! no local data + elseif (lrank == 1) then + if (size(dataPtr1d) > 0) then + write(msgString,'(A,3g14.7,i8)') trim(string)//': '//trim(lfieldnamelist(n)), & + minval(dataPtr1d), maxval(dataPtr1d), sum(dataPtr1d), size(dataPtr1d) + else + write(msgString,'(A,a)') trim(string)//': '//trim(lfieldnamelist(n))," no data" + endif + elseif (lrank == 2) then + if (size(dataPtr2d) > 0) then + write(msgString,'(A,3g14.7,i8)') trim(string)//': '//trim(lfieldnamelist(n)), & + minval(dataPtr2d), maxval(dataPtr2d), sum(dataPtr2d), size(dataPtr2d) + else + write(msgString,'(A,a)') trim(string)//': '//trim(lfieldnamelist(n))," no data" + endif + else + call ESMF_LogWrite(trim(subname)//": ERROR rank not supported ", ESMF_LOGMSG_ERROR) + rc = ESMF_FAILURE + return + endif + call ESMF_LogWrite(trim(msgString), ESMF_LOGMSG_INFO) + enddo + + deallocate(lfieldnamelist) + +end subroutine state_diagnose + +!> Obtain a pointer to a rank 1 or rank 2 ESMF field +subroutine field_getfldptr(field, fldptr1, fldptr2, rank, abort, rc) + + ! input/output variables + type(ESMF_Field) , intent(in) :: field !< An ESMF field + real(ESMF_KIND_R8), pointer , intent(inout), optional :: fldptr1(:) !< A pointer to a rank 1 ESMF field + real(ESMF_KIND_R8), pointer , intent(inout), optional :: fldptr2(:,:) !< A pointer to a rank 2 ESMF field + integer , intent(out) , optional :: rank !< Field rank + logical , intent(in) , optional :: abort !< Abort code + integer , intent(out) , optional :: rc !< Return code + + ! local variables + type(ESMF_GeomType_Flag) :: geomtype + type(ESMF_FieldStatus_Flag) :: status + type(ESMF_Mesh) :: lmesh + integer :: lrank, nnodes, nelements + logical :: labort + character(len=*), parameter :: subname='(field_getfldptr)' + ! ---------------------------------------------- + + if (.not.present(rc)) then + call ESMF_LogWrite(trim(subname)//": ERROR rc not present ", & + ESMF_LOGMSG_ERROR, line=__LINE__, file=u_FILE_u) + rc = ESMF_FAILURE + return + endif + + rc = ESMF_SUCCESS + + labort = .true. + if (present(abort)) then + labort = abort + endif + lrank = -99 + + call ESMF_FieldGet(field, status=status, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + if (status /= ESMF_FIELDSTATUS_COMPLETE) then + lrank = 0 + if (labort) then + call ESMF_LogWrite(trim(subname)//": ERROR data not allocated ", ESMF_LOGMSG_INFO) + rc = ESMF_FAILURE + return + else + call ESMF_LogWrite(trim(subname)//": WARNING data not allocated ", ESMF_LOGMSG_INFO) + endif + else + + call ESMF_FieldGet(field, geomtype=geomtype, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + if (geomtype == ESMF_GEOMTYPE_GRID) then + call ESMF_FieldGet(field, rank=lrank, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + elseif (geomtype == ESMF_GEOMTYPE_MESH) then + call ESMF_FieldGet(field, rank=lrank, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call ESMF_FieldGet(field, mesh=lmesh, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call ESMF_MeshGet(lmesh, numOwnedNodes=nnodes, numOwnedElements=nelements, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (nnodes == 0 .and. nelements == 0) lrank = 0 + else + call ESMF_LogWrite(trim(subname)//": ERROR geomtype not supported ", & + ESMF_LOGMSG_INFO) + rc = ESMF_FAILURE + return + endif ! geomtype + + if (lrank == 0) then + call ESMF_LogWrite(trim(subname)//": no local nodes or elements ", & + ESMF_LOGMSG_INFO) + elseif (lrank == 1) then + if (.not.present(fldptr1)) then + call ESMF_LogWrite(trim(subname)//": ERROR missing rank=1 array ", & + ESMF_LOGMSG_ERROR, line=__LINE__, file=u_FILE_u) + rc = ESMF_FAILURE + return + endif + call ESMF_FieldGet(field, farrayPtr=fldptr1, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + elseif (lrank == 2) then + if (.not.present(fldptr2)) then + call ESMF_LogWrite(trim(subname)//": ERROR missing rank=2 array ", & + ESMF_LOGMSG_ERROR, line=__LINE__, file=u_FILE_u) + rc = ESMF_FAILURE + return + endif + call ESMF_FieldGet(field, farrayPtr=fldptr2, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call ESMF_LogWrite(trim(subname)//": ERROR in rank ", & + ESMF_LOGMSG_ERROR, line=__LINE__, file=u_FILE_u) + rc = ESMF_FAILURE + return + endif + + endif ! status + + if (present(rank)) then + rank = lrank + endif + +end subroutine field_getfldptr + +!> Returns true if ESMF_LogFoundError() determines that rc is an error code. Otherwise false. +logical function ChkErr(rc, line, file) + integer, intent(in) :: rc !< return code to check + integer, intent(in) :: line !< Integer source line number + character(len=*), intent(in) :: file !< User-provided source file name + integer :: lrc + ChkErr = .false. + lrc = rc + if (ESMF_LogFoundError(rcToCheck=lrc, msg=ESMF_LOGERR_PASSTHRU, line=line, file=file)) then + ChkErr = .true. + endif +end function ChkErr + end module MOM_cap_methods diff --git a/config_src/nuopc_driver/mom_cap_time.F90 b/config_src/drivers/nuopc_cap/mom_cap_time.F90 similarity index 79% rename from config_src/nuopc_driver/mom_cap_time.F90 rename to config_src/drivers/nuopc_cap/mom_cap_time.F90 index daf9889c43..7f210bda71 100644 --- a/config_src/nuopc_driver/mom_cap_time.F90 +++ b/config_src/drivers/nuopc_cap/mom_cap_time.F90 @@ -16,6 +16,7 @@ module MOM_cap_time use ESMF , only : ESMF_RC_ARG_BAD use ESMF , only : operator(<), operator(/=), operator(+), operator(-), operator(*) , operator(>=) use ESMF , only : operator(<=), operator(>), operator(==) +use MOM_cap_methods , only : ChkErr implicit none; private @@ -125,22 +126,13 @@ subroutine AlarmInit( clock, alarm, option, & endif call ESMF_ClockGet(clock, CurrTime=CurrTime, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_TimeGet(CurrTime, yy=cyy, mm=cmm, dd=cdd, s=csec, rc=rc ) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_TimeGet(CurrTime, yy=nyy, mm=nmm, dd=ndd, s=nsec, rc=rc ) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! initial guess of next alarm, this will be updated below if (present(RefTime)) then @@ -151,25 +143,16 @@ subroutine AlarmInit( clock, alarm, option, & ! Determine calendar call ESMF_ClockGet(clock, calendar=cal, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! Determine inputs for call to create alarm selectcase (trim(option)) case (optNONE, optNever) call ESMF_TimeIntervalSet(AlarmInterval, yy=9999, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_TimeSet( NextAlarm, yy=9999, mm=12, dd=1, s=0, calendar=cal, rc=rc ) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return update_nextalarm = .false. case (optDate) @@ -188,15 +171,9 @@ subroutine AlarmInit( clock, alarm, option, & return endif call ESMF_TimeIntervalSet(AlarmInterval, yy=9999, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call TimeInit(NextAlarm, lymd, cal, tod=ltod, desc="optDate", rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return update_nextalarm = .false. case (optIfdays0) @@ -208,104 +185,65 @@ subroutine AlarmInit( clock, alarm, option, & return endif call ESMF_TimeIntervalSet(AlarmInterval, mm=1, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_TimeSet( NextAlarm, yy=cyy, mm=cmm, dd=opt_n, s=0, calendar=cal, rc=rc ) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return update_nextalarm = .true. case (optNSteps, optNStep) call ESMF_ClockGet(clock, TimeStep=AlarmInterval, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return AlarmInterval = AlarmInterval * opt_n update_nextalarm = .true. case (optNSeconds, optNSecond) call ESMF_TimeIntervalSet(AlarmInterval, s=1, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return AlarmInterval = AlarmInterval * opt_n update_nextalarm = .true. case (optNMinutes, optNMinute) call ESMF_TimeIntervalSet(AlarmInterval, s=60, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return AlarmInterval = AlarmInterval * opt_n update_nextalarm = .true. case (optNHours, optNHour) call ESMF_TimeIntervalSet(AlarmInterval, s=3600, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return AlarmInterval = AlarmInterval * opt_n update_nextalarm = .true. case (optNDays, optNDay) call ESMF_TimeIntervalSet(AlarmInterval, d=1, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return AlarmInterval = AlarmInterval * opt_n update_nextalarm = .true. case (optNMonths, optNMonth) call ESMF_TimeIntervalSet(AlarmInterval, mm=1, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return AlarmInterval = AlarmInterval * opt_n update_nextalarm = .true. case (optMonthly) call ESMF_TimeIntervalSet(AlarmInterval, mm=1, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_TimeSet( NextAlarm, yy=cyy, mm=cmm, dd=1, s=0, calendar=cal, rc=rc ) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return update_nextalarm = .true. case (optNYears, optNYear) call ESMF_TimeIntervalSet(AlarmInterval, yy=1, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return AlarmInterval = AlarmInterval * opt_n update_nextalarm = .true. case (optYearly) call ESMF_TimeIntervalSet(AlarmInterval, yy=1, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_TimeSet( NextAlarm, yy=cyy, mm=1, dd=1, s=0, calendar=cal, rc=rc ) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return update_nextalarm = .true. case default @@ -332,10 +270,7 @@ subroutine AlarmInit( clock, alarm, option, & endif alarm = ESMF_AlarmCreate( name=lalarmname, clock=clock, ringTime=NextAlarm, ringInterval=AlarmInterval, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return end subroutine AlarmInit @@ -378,10 +313,7 @@ subroutine TimeInit( Time, ymd, cal, tod, desc, logunit, rc) call date2ymd (ymd,yr,mon,day) call ESMF_TimeSet( Time, yy=yr, mm=mon, dd=day, s=ltod, calendar=cal, rc=rc ) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return end subroutine TimeInit diff --git a/config_src/nuopc_driver/mom_ocean_model_nuopc.F90 b/config_src/drivers/nuopc_cap/mom_ocean_model_nuopc.F90 similarity index 92% rename from config_src/nuopc_driver/mom_ocean_model_nuopc.F90 rename to config_src/drivers/nuopc_cap/mom_ocean_model_nuopc.F90 index 1ba3484ef9..aab909e56e 100644 --- a/config_src/nuopc_driver/mom_ocean_model_nuopc.F90 +++ b/config_src/drivers/nuopc_cap/mom_ocean_model_nuopc.F90 @@ -15,6 +15,7 @@ module MOM_ocean_model_nuopc use MOM, only : extract_surface_state, allocate_surface_state, finish_MOM_initialization use MOM, only : get_MOM_state_elements, MOM_state_is_synchronized use MOM, only : get_ocean_stocks, step_offline +use MOM_coms, only : field_chksum use MOM_constants, only : CELSIUS_KELVIN_OFFSET, hlf use MOM_diag_mediator, only : diag_ctrl, enable_averaging, disable_averaging use MOM_diag_mediator, only : diag_mediator_close_registration, diag_mediator_end @@ -40,7 +41,6 @@ module MOM_ocean_model_nuopc use MOM_time_manager, only : operator(/=), operator(<=), operator(>=) use MOM_time_manager, only : operator(<), real_to_time_type, time_type_to_real use time_interp_external_mod,only : time_interp_external_init -use MOM_tracer_flow_control, only : call_tracer_register, tracer_flow_control_init use MOM_tracer_flow_control, only : call_tracer_flux_init use MOM_unit_scaling, only : unit_scale_type use MOM_variables, only : surface @@ -56,12 +56,13 @@ module MOM_ocean_model_nuopc use fms_mod, only : stdout use mpp_mod, only : mpp_chksum use MOM_EOS, only : gsw_sp_from_sr, gsw_pt_from_ct -use MOM_wave_interface, only: wave_parameters_CS, MOM_wave_interface_init -use MOM_wave_interface, only: MOM_wave_interface_init_lite, Update_Surface_Waves +use MOM_wave_interface, only : wave_parameters_CS, MOM_wave_interface_init +use MOM_wave_interface, only : Update_Surface_Waves, query_wave_properties use MOM_surface_forcing_nuopc, only : surface_forcing_init, convert_IOB_to_fluxes use MOM_surface_forcing_nuopc, only : convert_IOB_to_forces, ice_ocn_bnd_type_chksum use MOM_surface_forcing_nuopc, only : ice_ocean_boundary_type, surface_forcing_CS use MOM_surface_forcing_nuopc, only : forcing_save_restart +use iso_fortran_env, only : int64 #include @@ -78,7 +79,7 @@ module MOM_ocean_model_nuopc public ocean_model_restart public ice_ocn_bnd_type_chksum public ocean_public_type_chksum -public get_ocean_grid +public get_ocean_grid, query_ocean_state public get_eps_omesh !> This type is used for communication with other components via the FMS coupler. @@ -144,7 +145,8 @@ module MOM_ocean_model_nuopc integer :: nstep = 0 !< The number of calls to update_ocean. logical :: use_ice_shelf !< If true, the ice shelf model is enabled. - logical :: use_waves !< If true use wave coupling. + logical,public :: use_waves !< If true use wave coupling. + character(len=40) :: wave_method !< Wave coupling method. logical :: icebergs_alter_ocean !< If true, the icebergs can change ocean the !! ocean dynamics and forcing fluxes. @@ -195,8 +197,8 @@ module MOM_ocean_model_nuopc !! about the vertical grid. type(unit_scale_type), pointer :: US => NULL() !< A pointer to a structure containing !! dimensional unit scaling factors. - type(MOM_control_struct), pointer :: & - MOM_CSp => NULL() !< A pointer to the MOM control structure + type(MOM_control_struct) :: MOM_CSp + !< MOM control structure type(ice_shelf_CS), pointer :: & Ice_shelf_CSp => NULL() !< A pointer to the control structure for the !! ice shelf model that couples with MOM6. This @@ -204,8 +206,8 @@ module MOM_ocean_model_nuopc type(marine_ice_CS), pointer :: & marine_ice_CSp => NULL() !< A pointer to the control structure for the !! marine ice effects module. - type(wave_parameters_cs), pointer :: & - Waves !< A structure containing pointers to the surface wave fields + type(wave_parameters_CS), pointer, public :: & + Waves => NULL() !< A pointer to the surface wave control structure type(surface_forcing_CS), pointer :: & forcing_CSp => NULL() !< A pointer to the MOM forcing control structure type(MOM_restart_CS), pointer :: & @@ -240,6 +242,7 @@ subroutine ocean_model_init(Ocean_sfc, OS, Time_init, Time_in, gas_fields_ocn, i !! tracer fluxes, and can be used to spawn related !! internal variables in the ice model. character(len=*), optional, intent(in) :: input_restart_file !< If present, name of restart file to read + ! Local variables real :: Rho0 ! The Boussinesq ocean density, in kg m-3. real :: G_Earth ! The gravitational acceleration in m s-2. @@ -247,7 +250,9 @@ subroutine ocean_model_init(Ocean_sfc, OS, Time_init, Time_in, gas_fields_ocn, i !! The actual depth over which melt potential is computed will !! min(HFrz, OBLD), where OBLD is the boundary layer depth. !! If HFrz <= 0 (default), melt potential will not be computed. - logical :: use_melt_pot!< If true, allocate melt_potential array + logical :: use_melt_pot !< If true, allocate melt_potential array + logical :: use_CFC !< If true, allocated arrays for surface CFCs. + ! This include declares and sets the variable "version". #include "version_variable.h" @@ -366,13 +371,17 @@ subroutine ocean_model_init(Ocean_sfc, OS, Time_init, Time_in, gas_fields_ocn, i use_melt_pot=.false. endif + call get_param(param_file, mdl, "USE_CFC_CAP", use_CFC, & + default=.false., do_not_log=.true.) + ! Consider using a run-time flag to determine whether to do the diagnostic ! vertical integrals, since the related 3-d sums are not negligible in cost. call allocate_surface_state(OS%sfc_state, OS%grid, use_temperature, & - do_integrals=.true., gas_fields_ocn=gas_fields_ocn, use_meltpot=use_melt_pot) + do_integrals=.true., gas_fields_ocn=gas_fields_ocn, & + use_meltpot=use_melt_pot, use_cfcs=use_CFC) call surface_forcing_init(Time_in, OS%grid, OS%US, param_file, OS%diag, & - OS%forcing_CSp, OS%restore_salinity, OS%restore_temp) + OS%forcing_CSp, OS%restore_salinity, OS%restore_temp, OS%use_waves) if (OS%use_ice_shelf) then call initialize_ice_shelf(param_file, OS%grid, OS%Time, OS%ice_shelf_CSp, & @@ -384,13 +393,15 @@ subroutine ocean_model_init(Ocean_sfc, OS, Time_init, Time_in, gas_fields_ocn, i call allocate_forcing_type(OS%grid, OS%fluxes, shelf=.true.) endif + call allocate_forcing_type(OS%grid, OS%fluxes, waves=.true.) call get_param(param_file, mdl, "USE_WAVES", OS%Use_Waves, & "If true, enables surface wave modules.", default=.false.) - if (OS%use_waves) then - call MOM_wave_interface_init(OS%Time, OS%grid, OS%GV, OS%US, param_file, OS%Waves, OS%diag) - else - call MOM_wave_interface_init_lite(param_file) + if (OS%Use_Waves) then + call get_param(param_file, mdl, "WAVE_METHOD", OS%wave_method, default="EMPTY", do_not_log=.true.) endif + ! MOM_wave_interface_init is called regardless of the value of USE_WAVES because + ! it also initializes statistical waves. + call MOM_wave_interface_init(OS%Time, OS%grid, OS%GV, OS%US, param_file, OS%Waves, OS%diag) if (associated(OS%grid%Domain%maskmap)) then call initialize_ocean_public_type(OS%grid%Domain%mpp_domain, Ocean_sfc, & @@ -413,6 +424,8 @@ subroutine ocean_model_init(Ocean_sfc, OS, Time_init, Time_in, gas_fields_ocn, i endif + call extract_surface_state(OS%MOM_CSp, OS%sfc_state) + call close_param_file(param_file) call diag_mediator_close_registration(OS%diag) @@ -572,7 +585,9 @@ subroutine update_ocean_model(Ice_ocean_boundary, OS, Ocean_sfc, & call set_net_mass_forcing(OS%fluxes, OS%forces, OS%grid, OS%US) if (OS%use_waves) then - call Update_Surface_Waves(OS%grid, OS%GV, OS%US, OS%time, ocean_coupling_time_step, OS%waves) + if (OS%wave_method /= "EFACTOR") then + call Update_Surface_Waves(OS%grid, OS%GV, OS%US, OS%time, ocean_coupling_time_step, OS%waves, OS%forces) + endif endif if (OS%nstep==0) then @@ -733,7 +748,7 @@ subroutine ocean_model_end(Ocean_sfc, Ocean_state, Time, write_restart) type(time_type), intent(in) :: Time !< The model time, used for writing restarts. logical, intent(in) :: write_restart !< true => write restart file - call ocean_model_save_restart(Ocean_state, Time) + if(write_restart)call ocean_model_save_restart(Ocean_state, Time) call diag_mediator_end(Time, Ocean_state%diag, end_diag_manager=.true.) call MOM_end(Ocean_state%MOM_CSp) if (Ocean_state%use_ice_shelf) call ice_shelf_end(Ocean_state%Ice_shelf_CSp) @@ -1000,6 +1015,33 @@ subroutine ocean_model_flux_init(OS, verbosity) end subroutine ocean_model_flux_init +!> This interface allows certain properties that are stored in the ocean_state_type to be +!! obtained. +subroutine query_ocean_state(OS, use_waves, NumWaveBands, Wavenumbers, unscale, wave_method) + type(ocean_state_type), intent(in) :: OS !< The structure with the complete ocean state + logical, optional, intent(out) :: use_waves !< Indicates whether surface waves are in use + integer, optional, intent(out) :: NumWaveBands !< If present, this gives the number of + !! wavenumber partitions in the wave discretization + real, dimension(:), optional, intent(out) :: Wavenumbers !< If present, this gives the characteristic + !! wavenumbers of the wave discretization [m-1 or Z-1 ~> m-1] + logical, optional, intent(in) :: unscale !< If present and true, undo any dimensional + !! rescaling and return dimensional values in MKS units + character(len=40), optional, intent(out) :: wave_method !< Wave coupling method. + + logical :: undo_scaling + undo_scaling = .false. ; if (present(unscale)) undo_scaling = unscale + + if (present(use_waves)) use_waves = OS%use_waves + if (present(NumWaveBands)) call query_wave_properties(OS%Waves, NumBands=NumWaveBands) + if (present(Wavenumbers) .and. undo_scaling) then + call query_wave_properties(OS%Waves, WaveNumbers=WaveNumbers, US=OS%US) + elseif (present(Wavenumbers)) then + call query_wave_properties(OS%Waves, WaveNumbers=WaveNumbers) + endif + if (present(wave_method)) wave_method = OS%wave_method + +end subroutine query_ocean_state + !> Ocean_stock_pe - returns the integrated stocks of heat, water, etc. for conservation checks. !! Because of the way FMS is coded, only the root PE has the integrated amount, !! while all other PEs get 0. @@ -1042,26 +1084,29 @@ subroutine Ocean_stock_pe(OS, index, value, time_index) end subroutine Ocean_stock_pe -!> Write out FMS-format checsums on fields from the ocean surface state +!> Write out checksums for fields from the ocean surface state subroutine ocean_public_type_chksum(id, timestep, ocn) character(len=*), intent(in) :: id !< An identifying string for this call integer, intent(in) :: timestep !< The number of elapsed timesteps type(ocean_public_type), intent(in) :: ocn !< A structure containing various publicly !! visible ocean surface fields. - integer :: n, m, outunit + ! Local variables + integer(kind=int64) :: chks ! A checksum for the field + logical :: root ! True only on the root PE + integer :: outunit ! The output unit to write to outunit = stdout() - - write(outunit,*) "BEGIN CHECKSUM(ocean_type):: ", id, timestep - write(outunit,100) 'ocean%t_surf ',mpp_chksum(ocn%t_surf ) - write(outunit,100) 'ocean%s_surf ',mpp_chksum(ocn%s_surf ) - write(outunit,100) 'ocean%u_surf ',mpp_chksum(ocn%u_surf ) - write(outunit,100) 'ocean%v_surf ',mpp_chksum(ocn%v_surf ) - write(outunit,100) 'ocean%sea_lev ',mpp_chksum(ocn%sea_lev) - write(outunit,100) 'ocean%frazil ',mpp_chksum(ocn%frazil ) - write(outunit,100) 'ocean%melt_potential ',mpp_chksum(ocn%melt_potential) - + root = is_root_pe() + + if (root) write(outunit,*) "BEGIN CHECKSUM(ocean_type):: ", id, timestep + chks = field_chksum(ocn%t_surf ) ; if (root) write(outunit,100) 'ocean%t_surf ', chks + chks = field_chksum(ocn%s_surf ) ; if (root) write(outunit,100) 'ocean%s_surf ', chks + chks = field_chksum(ocn%u_surf ) ; if (root) write(outunit,100) 'ocean%u_surf ', chks + chks = field_chksum(ocn%v_surf ) ; if (root) write(outunit,100) 'ocean%v_surf ', chks + chks = field_chksum(ocn%sea_lev) ; if (root) write(outunit,100) 'ocean%sea_lev ', chks + chks = field_chksum(ocn%frazil ) ; if (root) write(outunit,100) 'ocean%frazil ', chks + chks = field_chksum(ocn%melt_potential) ; if (root) write(outunit,100) 'ocean%melt_potential ', chks call coupler_type_write_chksums(ocn%fields, outunit, 'ocean%') 100 FORMAT(" CHECKSUM::",A20," = ",Z20) diff --git a/config_src/nuopc_driver/mom_surface_forcing_nuopc.F90 b/config_src/drivers/nuopc_cap/mom_surface_forcing_nuopc.F90 similarity index 88% rename from config_src/nuopc_driver/mom_surface_forcing_nuopc.F90 rename to config_src/drivers/nuopc_cap/mom_surface_forcing_nuopc.F90 index 9ecf8bb01a..c704214930 100644 --- a/config_src/nuopc_driver/mom_surface_forcing_nuopc.F90 +++ b/config_src/drivers/nuopc_cap/mom_surface_forcing_nuopc.F90 @@ -3,7 +3,7 @@ module MOM_surface_forcing_nuopc ! This file is part of MOM6. See LICENSE.md for the license. -use MOM_coms, only : reproducing_sum +use MOM_coms, only : reproducing_sum, field_chksum use MOM_constants, only : hlv, hlf use MOM_cpu_clock, only : cpu_clock_id, cpu_clock_begin, cpu_clock_end use MOM_cpu_clock, only : CLOCK_SUBCOMPONENT @@ -21,7 +21,9 @@ module MOM_surface_forcing_nuopc use MOM_forcing_type, only : allocate_mech_forcing, deallocate_mech_forcing use MOM_get_input, only : Get_MOM_Input, directories use MOM_grid, only : ocean_grid_type +use MOM_CFC_cap, only : CFC_cap_fluxes use MOM_io, only : slasher, write_version_number, MOM_read_data +use MOM_io, only : stdout use MOM_restart, only : register_restart_field, restart_init, MOM_restart_CS use MOM_restart, only : restart_init_end, save_restart, restore_state use MOM_string_functions, only : uppercase @@ -35,10 +37,10 @@ module MOM_surface_forcing_nuopc use coupler_types_mod, only : coupler_type_initialized, coupler_type_spawn use coupler_types_mod, only : coupler_type_copy_data use data_override_mod, only : data_override_init, data_override -use fms_mod, only : stdout use mpp_mod, only : mpp_chksum use time_interp_external_mod, only : init_external_field, time_interp_external use time_interp_external_mod, only : time_interp_external_init +use iso_fortran_env, only : int64 implicit none ; private @@ -79,7 +81,7 @@ module MOM_surface_forcing_nuopc !! the correction for the atmospheric (and sea-ice) !! pressure limited by max_p_surf instead of the !! full atmospheric pressure. The default is true. - + logical :: use_CFC !< enables the MOM_CFC_cap tracer package. real :: gust_const !< constant unresolved background gustiness for ustar [R L Z T-1 ~> Pa] logical :: read_gust_2d !< If true, use a 2-dimensional gustiness supplied !! from an input file. @@ -128,6 +130,7 @@ module MOM_surface_forcing_nuopc type(diag_ctrl), pointer :: diag !< structure to regulate diagnostic output timing character(len=200) :: inputdir !< directory where NetCDF input files are + character(len=200) :: CFC_BC_file !< filename with cfc11 and cfc12 data character(len=200) :: salt_restore_file !< filename for salt restoring data character(len=30) :: salt_restore_var_name !< name of surface salinity in salt_restore_file logical :: mask_srestore !< if true, apply a 2-dimensional mask to the surface @@ -141,9 +144,13 @@ module MOM_surface_forcing_nuopc !! temperature restoring fluxes. The masking file should be !! in inputdir/temp_restore_mask.nc and the field should !! be named 'mask' + character(len=30) :: cfc11_var_name !< name of cfc11 in CFC_BC_file + character(len=30) :: cfc12_var_name !< name of cfc11 in CFC_BC_file real, pointer, dimension(:,:) :: trestore_mask => NULL() !< mask for SST restoring - integer :: id_srestore = -1 !< id number for time_interp_external. - integer :: id_trestore = -1 !< id number for time_interp_external. + integer :: id_srestore = -1 !< id number for time_interp_external. + integer :: id_trestore = -1 !< id number for time_interp_external. + integer :: id_cfc11_atm = -1 !< id number for time_interp_external. + integer :: id_cfc12_atm = -1 !< id number for time_interp_external. ! Diagnostics handles type(forcing_diags), public :: handles @@ -178,11 +185,24 @@ module MOM_surface_forcing_nuopc real, pointer, dimension(:,:) :: frunoff_hflx =>NULL() !< heat content of frozen runoff [W/m2] real, pointer, dimension(:,:) :: p =>NULL() !< pressure of overlying ice and atmosphere !< on ocean surface [Pa] + real, pointer, dimension(:,:) :: ice_fraction =>NULL() !< fractional ice area [nondim] + real, pointer, dimension(:,:) :: u10_sqr =>NULL() !< wind speed squared at 10m [m2/s2] real, pointer, dimension(:,:) :: mi =>NULL() !< mass of ice [kg/m2] real, pointer, dimension(:,:) :: ice_rigidity =>NULL() !< rigidity of the sea ice, sea-ice and !! ice-shelves, expressed as a coefficient !! for divergence damping, as determined !! outside of the ocean model in [m3/s] + real, pointer, dimension(:,:) :: lamult => NULL() !< Langmuir enhancement factor [nondim] + real, pointer, dimension(:,:) :: ustk0 => NULL() !< Surface Stokes drift, zonal [m/s] + real, pointer, dimension(:,:) :: vstk0 => NULL() !< Surface Stokes drift, meridional [m/s] + real, pointer, dimension(:) :: stk_wavenumbers => NULL() !< The central wave number of Stokes bands [rad/m] + real, pointer, dimension(:,:,:) :: ustkb => NULL() !< Stokes Drift spectrum, zonal [m/s] + !! Horizontal - u points + !! 3rd dimension - wavenumber + real, pointer, dimension(:,:,:) :: vstkb => NULL() !< Stokes Drift spectrum, meridional [m/s] + !! Horizontal - v points + !! 3rd dimension - wavenumber + integer :: num_stk_bands !< Number of Stokes drift bands passed through the coupler integer :: xtype !< The type of the exchange - REGRID, REDIST or DIRECT type(coupler_2d_bc_type) :: fluxes !< A structure that may contain an array of !! named fields used for passive tracer fluxes. @@ -224,6 +244,8 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, ! local variables real, dimension(SZI_(G),SZJ_(G)) :: & + cfc11_atm, & !< CFC11 concentration in the atmopshere [???????] + cfc12_atm, & !< CFC11 concentration in the atmopshere [???????] data_restore, & !< The surface value toward which to restore [g/kg or degC] SST_anom, & !< Instantaneous sea surface temperature anomalies from a target value [deg C] SSS_anom, & !< Instantaneous sea surface salinity anomalies from a target value [g/kg] @@ -283,7 +305,8 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, ! flux type has been used. if (fluxes%dt_buoy_accum < 0) then call allocate_forcing_type(G, fluxes, water=.true., heat=.true., ustar=.true., & - press=.true., fix_accum_bug=CS%fix_ustar_gustless_bug) + press=.true., fix_accum_bug=CS%fix_ustar_gustless_bug, & + cfc=CS%use_CFC) call safe_alloc_ptr(fluxes%sw_vis_dir,isd,ied,jsd,jed) call safe_alloc_ptr(fluxes%sw_vis_dif,isd,ied,jsd,jed) @@ -423,12 +446,16 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, enddo ; enddo endif + ! Check that liquid runoff has a place to go if (CS%liquid_runoff_from_data .and. .not. associated(IOB%lrunoff)) then call MOM_error(FATAL, "liquid runoff is being added via data_override but "// & "there is no associated runoff in the IOB") return - end if + endif + if (associated(IOB%lrunoff)) then + if(CS%liquid_runoff_from_data)call data_override('OCN', 'runoff', IOB%lrunoff, Time) + endif ! obtain fluxes from IOB; note the staggering of indices i0 = is - isc_bnd ; j0 = js - jsc_bnd @@ -445,7 +472,6 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, ! liquid runoff flux if (associated(IOB%lrunoff)) then - if(CS%liquid_runoff_from_data)call data_override('OCN', 'runoff', IOB%lrunoff, Time) fluxes%lrunoff(i,j) = kg_m2_s_conversion * IOB%lrunoff(i-i0,j-j0) * G%mask2dT(i,j) endif @@ -485,15 +511,17 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, fluxes%seaice_melt(i,j) = kg_m2_s_conversion * G%mask2dT(i,j) * IOB%seaice_melt(i-i0,j-j0) fluxes%latent(i,j) = 0.0 + ! notice minus sign since fprec is positive into the ocean if (associated(IOB%fprec)) then - fluxes%latent(i,j) = fluxes%latent(i,j) + & + fluxes%latent(i,j) = fluxes%latent(i,j) - & IOB%fprec(i-i0,j-j0)*US%W_m2_to_QRZ_T*CS%latent_heat_fusion - fluxes%latent_fprec_diag(i,j) = G%mask2dT(i,j) * IOB%fprec(i-i0,j-j0)*US%W_m2_to_QRZ_T*CS%latent_heat_fusion + fluxes%latent_fprec_diag(i,j) = - G%mask2dT(i,j) * IOB%fprec(i-i0,j-j0)*US%W_m2_to_QRZ_T*CS%latent_heat_fusion endif + ! notice minus sign since frunoff is positive into the ocean if (associated(IOB%frunoff)) then - fluxes%latent(i,j) = fluxes%latent(i,j) + & + fluxes%latent(i,j) = fluxes%latent(i,j) - & IOB%frunoff(i-i0,j-j0) * US%W_m2_to_QRZ_T * CS%latent_heat_fusion - fluxes%latent_frunoff_diag(i,j) = G%mask2dT(i,j) * & + fluxes%latent_frunoff_diag(i,j) = - G%mask2dT(i,j) * & IOB%frunoff(i-i0,j-j0) * US%W_m2_to_QRZ_T * CS%latent_heat_fusion endif if (associated(IOB%q_flux)) then @@ -518,8 +546,27 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, fluxes%sw(i,j) = fluxes%sw_vis_dir(i,j) + fluxes%sw_vis_dif(i,j) + & fluxes%sw_nir_dir(i,j) + fluxes%sw_nir_dif(i,j) + ! sea ice fraction [nondim] + if (associated(IOB%ice_fraction) .and. associated(fluxes%ice_fraction)) & + fluxes%ice_fraction(i,j) = G%mask2dT(i,j) * IOB%ice_fraction(i-i0,j-j0) + ! 10-m wind speed squared [m2/s2] + if (associated(IOB%u10_sqr) .and. associated(fluxes%u10_sqr)) & + fluxes%u10_sqr(i,j) = US%m_to_L**2 * US%T_to_s**2 * G%mask2dT(i,j) * IOB%u10_sqr(i-i0,j-j0) + enddo ; enddo + ! wave to ocean coupling + if ( associated(IOB%lamult)) then + do j=js,je; do i=is,ie + if (IOB%ice_fraction(i-i0,j-j0) <= 0.05 ) then + fluxes%lamult(i,j) = IOB%lamult(i-i0,j-j0) + else + fluxes%lamult(i,j) = 1.0 + endif + enddo ; enddo + call pass_var(fluxes%lamult, G%domain, halo=1 ) + endif + ! applied surface pressure from atmosphere and cryosphere if (associated(IOB%p)) then if (CS%max_p_surf >= 0.0) then @@ -536,6 +583,11 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, fluxes%accumulate_p_surf = .true. ! Multiple components may contribute to surface pressure. endif + ! CFCs + if (CS%use_CFC) then + call CFC_cap_fluxes(fluxes, sfc_state, G, US, CS%Rho0, Time, CS%id_cfc11_atm, CS%id_cfc11_atm) + endif + if (associated(IOB%salt_flux)) then do j=js,je ; do i=is,ie fluxes%salt_flux(i,j) = G%mask2dT(i,j)*(fluxes%salt_flux(i,j) + kg_m2_s_conversion*IOB%salt_flux(i-i0,j-j0)) @@ -624,7 +676,7 @@ subroutine convert_IOB_to_forces(IOB, forces, index_bounds, Time, G, US, CS) real :: mass_eff !< effective mass of sea ice for rigidity [R Z ~> kg m-2] integer :: wind_stagger !< AGRID, BGRID_NE, or CGRID_NE (integers from MOM_domains) - integer :: i, j, is, ie, js, je, Isq, Ieq, Jsq, Jeq, i0, j0 + integer :: i, j, is, ie, js, je, Isq, Ieq, Jsq, Jeq, i0, j0, istk integer :: isd, ied, jsd, jed, IsdB, IedB, JsdB, JedB, isr, ier, jsr, jer integer :: isc_bnd, iec_bnd, jsc_bnd, jec_bnd @@ -663,6 +715,7 @@ subroutine convert_IOB_to_forces(IOB, forces, index_bounds, Time, G, US, CS) if ( (associated(IOB%area_berg) .and. (.not. associated(forces%area_berg))) .or. & (associated(IOB%mass_berg) .and. (.not. associated(forces%mass_berg))) ) & call allocate_mech_forcing(G, forces, iceberg=.true.) + if (associated(IOB%ice_rigidity)) then rigidity_at_h(:,:) = 0.0 call safe_alloc_ptr(forces%rigidity_ice_u,IsdB,IedB,jsd,jed) @@ -673,6 +726,12 @@ subroutine convert_IOB_to_forces(IOB, forces, index_bounds, Time, G, US, CS) if (associated(forces%rigidity_ice_u)) forces%rigidity_ice_u(:,:) = 0.0 if (associated(forces%rigidity_ice_v)) forces%rigidity_ice_v(:,:) = 0.0 + if ( associated(IOB%lamult) ) then + call allocate_mech_forcing(G, forces, waves=.true., num_stk_bands=0) + elseif ( associated(IOB%ustkb) ) then + call allocate_mech_forcing(G, forces, waves=.true., num_stk_bands=IOB%num_stk_bands) + endif + ! applied surface pressure from atmosphere and cryosphere if (CS%use_limited_P_SSH) then forces%p_surf_SSH => forces%p_surf @@ -778,7 +837,7 @@ subroutine convert_IOB_to_forces(IOB, forces, index_bounds, Time, G, US, CS) endif forces%ustar(i,j) = sqrt(gustiness*Irho0 + Irho0*tau_mag) enddo ; enddo - + call pass_vector(forces%taux, forces%tauy, G%Domain, halo=1) elseif (wind_stagger == AGRID) then call pass_vector(taux_at_h, tauy_at_h, G%Domain, To_All+Omit_Corners, stagger=AGRID, halo=1) @@ -804,7 +863,7 @@ subroutine convert_IOB_to_forces(IOB, forces, index_bounds, Time, G, US, CS) forces%ustar(i,j) = sqrt(gustiness*Irho0 + Irho0 * G%mask2dT(i,j) * & sqrt(taux_at_h(i,j)**2 + tauy_at_h(i,j)**2)) enddo ; enddo - + call pass_vector(forces%taux, forces%tauy, G%Domain, halo=1) else ! C-grid wind stresses. if (G%symmetric) & call fill_symmetric_edges(forces%taux, forces%tauy, G%Domain) @@ -830,6 +889,24 @@ subroutine convert_IOB_to_forces(IOB, forces, index_bounds, Time, G, US, CS) endif ! endif for wind related fields + ! wave to ocean coupling + if ( associated(IOB%ustkb) ) then + + forces%stk_wavenumbers(:) = IOB%stk_wavenumbers + do j=js,je; do i=is,ie + forces%ustk0(i,j) = IOB%ustk0(i-I0,j-J0) ! How to be careful here that the domains are right? + forces%vstk0(i,j) = IOB%vstk0(i-I0,j-J0) + enddo ; enddo + call pass_vector(forces%ustk0,forces%vstk0, G%domain ) + do istk = 1,IOB%num_stk_bands + do j=js,je; do i=is,ie + forces%ustkb(i,j,istk) = IOB%ustkb(i-I0,j-J0,istk) + forces%vstkb(i,j,istk) = IOB%vstkb(i-I0,j-J0,istk) + enddo; enddo + call pass_vector(forces%ustkb(:,:,istk),forces%vstkb(:,:,istk), G%domain ) + enddo + endif + ! sea ice related dynamic fields if (associated(IOB%ice_rigidity)) then call pass_var(rigidity_at_h, G%Domain, halo=1) @@ -1005,7 +1082,7 @@ subroutine forcing_save_restart(CS, G, Time, directory, time_stamped, & end subroutine forcing_save_restart !> Initialize the surface forcing, including setting parameters and allocating permanent memory. -subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, restore_salt, restore_temp) +subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, restore_salt, restore_temp, use_waves) type(time_type), intent(in) :: Time !< The current model time type(ocean_grid_type), intent(in) :: G !< The ocean's grid structure type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type @@ -1018,6 +1095,8 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, restore_salt, !! restoring will be applied in this model. logical, optional, intent(in) :: restore_temp !< If present and true surface temperature !! restoring will be applied in this model. + logical, optional, intent(in) :: use_waves !< If present and true, use waves and activate + !! the corresponding wave forcing diagnostics ! Local variables real :: utide ! The RMS tidal velocity [Z T-1 ~> m s-1]. @@ -1125,6 +1204,9 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, restore_salt, "coupler. This is used for testing and should be =1.0 for any "//& "production runs.", default=1.0) + call get_param(param_file, mdl, "USE_CFC_CAP", CS%use_CFC, & + default=.false., do_not_log=.true.) + if (restore_salt) then call get_param(param_file, mdl, "FLUXCONST", CS%Flux_const, & "The constant that relates the restoring surface fluxes to the relative "//& @@ -1137,7 +1219,6 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, restore_salt, "The name of the surface salinity variable to read from "//& "SALT_RESTORE_FILE for restoring salinity.", & default="salt") - call get_param(param_file, mdl, "SRESTORE_AS_SFLUX", CS%salt_restore_as_sflux, & "If true, the restoring of salinity is applied as a salt "//& "flux instead of as a freshwater flux.", default=.false.) @@ -1233,7 +1314,8 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, restore_salt, enddo ; enddo endif - call time_interp_external_init + ! initialize time interpolator module + call time_interp_external_init() ! Optionally read a x-y gustiness field in place of a global ! constant. @@ -1284,8 +1366,9 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, restore_salt, call get_param(param_file, mdl, "ALLOW_ICEBERG_FLUX_DIAGNOSTICS", iceberg_flux_diags, & "If true, makes available diagnostics of fluxes from icebergs "//& "as seen by MOM6.", default=.false.) + call register_forcing_type_diags(Time, diag, US, CS%use_temperature, CS%handles, & - use_berg_fluxes=iceberg_flux_diags) + use_berg_fluxes=iceberg_flux_diags, use_waves=use_waves, use_cfcs=CS%use_CFC) call get_param(param_file, mdl, "ALLOW_FLUX_ADJUSTMENTS", CS%allow_flux_adjustments, & "If true, allows flux adjustments to specified via the "//& @@ -1319,6 +1402,29 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, restore_salt, endif endif ; endif + ! Do not log these params here since they are logged in the CFC cap module + if (CS%use_CFC) then + call get_param(param_file, mdl, "CFC_BC_FILE", CS%CFC_BC_file, & + "The file in which the CFC-11 and CFC-12 atm concentrations can be "//& + "found (units must be parts per trillion), or an empty string for "//& + "internal BC generation (TODO).", default=" ", do_not_log=.true.) + if ((len_trim(CS%CFC_BC_file) > 0) .and. (scan(CS%CFC_BC_file,'/') == 0)) then + ! Add the directory if CFC_BC_file is not already a complete path. + CS%CFC_BC_file = trim(CS%inputdir) // trim(CS%CFC_BC_file) + endif + if (len_trim(CS%CFC_BC_file) > 0) then + call get_param(param_file, mdl, "CFC11_VARIABLE", CS%cfc11_var_name, & + "The name of the variable representing CFC-11 in "//& + "CFC_BC_FILE.", default="CFC_11", do_not_log=.true.) + call get_param(param_file, mdl, "CFC12_VARIABLE", CS%cfc12_var_name, & + "The name of the variable representing CFC-12 in "//& + "CFC_BC_FILE.", default="CFC_12", do_not_log=.true.) + + CS%id_cfc11_atm = init_external_field(CS%CFC_BC_file, CS%cfc11_var_name, domain=G%Domain%mpp_domain) + CS%id_cfc12_atm = init_external_field(CS%CFC_BC_file, CS%cfc12_var_name, domain=G%Domain%mpp_domain) + endif + endif + ! Set up any restart fields associated with the forcing. call restart_init(param_file, CS%restart_CSp, "MOM_forcing.res") call restart_init_end(CS%restart_CSp) @@ -1365,35 +1471,48 @@ subroutine ice_ocn_bnd_type_chksum(id, timestep, iobt) intent(in) :: iobt !< An ice-ocean boundary type with fluxes to drive the !! ocean in a coupled model whose checksums are reported - ! local variables - integer :: n,m, outunit - - outunit = stdout() - - write(outunit,*) "BEGIN CHECKSUM(ice_ocean_boundary_type):: ", id, timestep - write(outunit,100) 'iobt%u_flux ' , mpp_chksum( iobt%u_flux ) - write(outunit,100) 'iobt%v_flux ' , mpp_chksum( iobt%v_flux ) - write(outunit,100) 'iobt%t_flux ' , mpp_chksum( iobt%t_flux ) - write(outunit,100) 'iobt%q_flux ' , mpp_chksum( iobt%q_flux ) - write(outunit,100) 'iobt%salt_flux ' , mpp_chksum( iobt%salt_flux ) - write(outunit,100) 'iobt%seaice_melt_heat' , mpp_chksum( iobt%seaice_melt_heat) - write(outunit,100) 'iobt%seaice_melt ' , mpp_chksum( iobt%seaice_melt ) - write(outunit,100) 'iobt%lw_flux ' , mpp_chksum( iobt%lw_flux ) - write(outunit,100) 'iobt%sw_flux_vis_dir' , mpp_chksum( iobt%sw_flux_vis_dir) - write(outunit,100) 'iobt%sw_flux_vis_dif' , mpp_chksum( iobt%sw_flux_vis_dif) - write(outunit,100) 'iobt%sw_flux_nir_dir' , mpp_chksum( iobt%sw_flux_nir_dir) - write(outunit,100) 'iobt%sw_flux_nir_dif' , mpp_chksum( iobt%sw_flux_nir_dif) - write(outunit,100) 'iobt%lprec ' , mpp_chksum( iobt%lprec ) - write(outunit,100) 'iobt%fprec ' , mpp_chksum( iobt%fprec ) - write(outunit,100) 'iobt%lrunoff ' , mpp_chksum( iobt%lrunoff ) - write(outunit,100) 'iobt%frunoff ' , mpp_chksum( iobt%frunoff ) - write(outunit,100) 'iobt%p ' , mpp_chksum( iobt%p ) - if (associated(iobt%ustar_berg)) & - write(outunit,100) 'iobt%ustar_berg ' , mpp_chksum( iobt%ustar_berg ) - if (associated(iobt%area_berg)) & - write(outunit,100) 'iobt%area_berg ' , mpp_chksum( iobt%area_berg ) - if (associated(iobt%mass_berg)) & - write(outunit,100) 'iobt%mass_berg ' , mpp_chksum( iobt%mass_berg ) + ! Local variables + integer(kind=int64) :: chks ! A checksum for the field + logical :: root ! True only on the root PE + integer :: outunit ! The output unit to write to + + outunit = stdout + root = is_root_pe() + + if (root) write(outunit,*) "BEGIN CHECKSUM(ice_ocean_boundary_type):: ", id, timestep + chks = field_chksum( iobt%u_flux ) ; if (root) write(outunit,100) 'iobt%u_flux ', chks + chks = field_chksum( iobt%v_flux ) ; if (root) write(outunit,100) 'iobt%v_flux ', chks + chks = field_chksum( iobt%t_flux ) ; if (root) write(outunit,100) 'iobt%t_flux ', chks + chks = field_chksum( iobt%q_flux ) ; if (root) write(outunit,100) 'iobt%q_flux ', chks + chks = field_chksum( iobt%seaice_melt_heat); if (root) write(outunit,100) 'iobt%seaice_melt_heat', chks + chks = field_chksum( iobt%seaice_melt) ; if (root) write(outunit,100) 'iobt%seaice_melt ', chks + chks = field_chksum( iobt%salt_flux ) ; if (root) write(outunit,100) 'iobt%salt_flux ', chks + chks = field_chksum( iobt%lw_flux ) ; if (root) write(outunit,100) 'iobt%lw_flux ', chks + chks = field_chksum( iobt%sw_flux_vis_dir) ; if (root) write(outunit,100) 'iobt%sw_flux_vis_dir', chks + chks = field_chksum( iobt%sw_flux_vis_dif) ; if (root) write(outunit,100) 'iobt%sw_flux_vis_dif', chks + chks = field_chksum( iobt%sw_flux_nir_dir) ; if (root) write(outunit,100) 'iobt%sw_flux_nir_dir', chks + chks = field_chksum( iobt%sw_flux_nir_dif) ; if (root) write(outunit,100) 'iobt%sw_flux_nir_dif', chks + chks = field_chksum( iobt%lprec ) ; if (root) write(outunit,100) 'iobt%lprec ', chks + chks = field_chksum( iobt%fprec ) ; if (root) write(outunit,100) 'iobt%fprec ', chks + chks = field_chksum( iobt%lrunoff ) ; if (root) write(outunit,100) 'iobt%lrunoff ', chks + chks = field_chksum( iobt%frunoff ) ; if (root) write(outunit,100) 'iobt%frunoff ', chks + chks = field_chksum( iobt%p ) ; if (root) write(outunit,100) 'iobt%p ', chks + if (associated(iobt%ice_fraction)) then + chks = field_chksum( iobt%ice_fraction ) ; if (root) write(outunit,100) 'iobt%ice_fraction ', chks + endif + if (associated(iobt%u10_sqr)) then + chks = field_chksum( iobt%u10_sqr ) ; if (root) write(outunit,100) 'iobt%u10_sqr ', chks + endif + if (associated(iobt%ustar_berg)) then + chks = field_chksum( iobt%ustar_berg ) ; if (root) write(outunit,100) 'iobt%ustar_berg ', chks + endif + if (associated(iobt%area_berg)) then + chks = field_chksum( iobt%area_berg ) ; if (root) write(outunit,100) 'iobt%area_berg ', chks + endif + if (associated(iobt%mass_berg)) then + chks = field_chksum( iobt%mass_berg ) ; if (root) write(outunit,100) 'iobt%mass_berg ', chks + endif + 100 FORMAT(" CHECKSUM::",A20," = ",Z20) call coupler_type_write_chksums(iobt%fluxes, outunit, 'iobt%') diff --git a/config_src/nuopc_driver/ocn_comp_NUOPC.F90 b/config_src/drivers/nuopc_cap/ocn_comp_NUOPC.F90 similarity index 100% rename from config_src/nuopc_driver/ocn_comp_NUOPC.F90 rename to config_src/drivers/nuopc_cap/ocn_comp_NUOPC.F90 diff --git a/config_src/nuopc_driver/time_utils.F90 b/config_src/drivers/nuopc_cap/time_utils.F90 similarity index 93% rename from config_src/nuopc_driver/time_utils.F90 rename to config_src/drivers/nuopc_cap/time_utils.F90 index e995c1b697..81efcd2765 100644 --- a/config_src/nuopc_driver/time_utils.F90 +++ b/config_src/drivers/nuopc_cap/time_utils.F90 @@ -14,6 +14,7 @@ module time_utils_mod use ESMF, only: ESMF_Time, ESMF_TimeGet, ESMF_LogFoundError use ESMF, only: ESMF_LOGERR_PASSTHRU,ESMF_TimeInterval use ESMF, only: ESMF_TimeIntervalGet, ESMF_TimeSet, ESMF_SUCCESS +use MOM_cap_methods, only: ChkErr implicit none; private @@ -34,6 +35,9 @@ module time_utils_mod public fms2esmf_time public string_to_date +character(len=*),parameter :: u_FILE_u = & + __FILE__ + contains !> Sets fms2esmf_cal_c to the corresponding ESMF calendar type @@ -90,10 +94,7 @@ function esmf2fms_time_t(time) call ESMF_TimeGet(time, yy=yy, mm=mm, dd=dd, h=h, m=m, s=s, & calkindflag=calkind, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return esmf2fms_time_t = set_date(yy, mm, dd, h, m, s) @@ -111,10 +112,7 @@ function esmf2fms_timestep(timestep) integer :: rc call ESMF_TimeIntervalGet(timestep, s=s, calkindflag=calkind, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return esmf2fms_timestep = set_time(s, 0) @@ -142,10 +140,7 @@ function fms2esmf_time(time, calkind) call ESMF_TimeSet(fms2esmf_time, yy=yy, mm=mm, d=d, h=h, m=m, s=s, & calkindflag=l_calkind, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return end function fms2esmf_time diff --git a/config_src/solo_driver/MESO_surface_forcing.F90 b/config_src/drivers/solo_driver/MESO_surface_forcing.F90 similarity index 99% rename from config_src/solo_driver/MESO_surface_forcing.F90 rename to config_src/drivers/solo_driver/MESO_surface_forcing.F90 index e2f0694b6c..7c778d9753 100644 --- a/config_src/solo_driver/MESO_surface_forcing.F90 +++ b/config_src/drivers/solo_driver/MESO_surface_forcing.F90 @@ -243,7 +243,8 @@ subroutine MESO_surface_forcing_init(Time, G, US, param_file, diag, CS) "parameters from vertical units of m to kg m-2.", & units="kg m-3", default=1035.0, scale=US%kg_m3_to_R) call get_param(param_file, mdl, "GUST_CONST", CS%gust_const, & - "The background gustiness in the winds.", units="Pa", default=0.0) + "The background gustiness in the winds.", units="Pa", default=0.0, & + scale=US%kg_m3_to_R*US%m_s_to_L_T**2*US%L_to_Z) call get_param(param_file, mdl, "RESTOREBUOY", CS%restorebuoy, & "If true, the buoyancy fluxes drive the model back "//& @@ -274,7 +275,7 @@ subroutine MESO_surface_forcing_init(Time, G, US, param_file, diag, CS) call get_param(param_file, mdl, "INPUTDIR", CS%inputdir, default=".") CS%inputdir = slasher(CS%inputdir) - endif + endif end subroutine MESO_surface_forcing_init diff --git a/config_src/solo_driver/MOM_driver.F90 b/config_src/drivers/solo_driver/MOM_driver.F90 similarity index 79% rename from config_src/solo_driver/MOM_driver.F90 rename to config_src/drivers/solo_driver/MOM_driver.F90 index ba52d9c02a..ebf3e5a43d 100644 --- a/config_src/solo_driver/MOM_driver.F90 +++ b/config_src/drivers/solo_driver/MOM_driver.F90 @@ -32,49 +32,43 @@ program MOM_main use MOM, only : extract_surface_state, finish_MOM_initialization use MOM, only : get_MOM_state_elements, MOM_state_is_synchronized use MOM, only : step_offline - use MOM_domains, only : MOM_infra_init, MOM_infra_end + use MOM_coms, only : Set_PElist + use MOM_domains, only : MOM_infra_init, MOM_infra_end, set_MOM_thread_affinity + use MOM_ensemble_manager, only : ensemble_manager_init, get_ensemble_size + use MOM_ensemble_manager, only : ensemble_pelist_setup use MOM_error_handler, only : MOM_error, MOM_mesg, WARNING, FATAL, is_root_pe use MOM_error_handler, only : callTree_enter, callTree_leave, callTree_waypoint use MOM_file_parser, only : read_param, get_param, log_param, log_version, param_file_type use MOM_file_parser, only : close_param_file use MOM_forcing_type, only : forcing, mech_forcing, forcing_diagnostics use MOM_forcing_type, only : mech_forcing_diags, MOM_forcing_chksum, MOM_mech_forcing_chksum - use MOM_get_input, only : directories + use MOM_get_input, only : get_MOM_input, directories use MOM_grid, only : ocean_grid_type - use MOM_io, only : file_exists, open_file, close_file + use MOM_ice_shelf, only : initialize_ice_shelf, ice_shelf_end, ice_shelf_CS + use MOM_ice_shelf, only : shelf_calc_flux, add_shelf_forces, ice_shelf_save_restart + use MOM_ice_shelf, only : initialize_ice_shelf_fluxes, initialize_ice_shelf_forces + use MOM_interpolate, only : time_interp_external_init + use MOM_io, only : file_exists, open_ASCII_file, close_file use MOM_io, only : check_nml_error, io_infra_init, io_infra_end - use MOM_io, only : APPEND_FILE, ASCII_FILE, READONLY_FILE, SINGLE_FILE + use MOM_io, only : APPEND_FILE, READONLY_FILE use MOM_restart, only : MOM_restart_CS, save_restart use MOM_string_functions,only : uppercase use MOM_surface_forcing, only : set_forcing, forcing_save_restart use MOM_surface_forcing, only : surface_forcing_init, surface_forcing_CS - use MOM_time_manager, only : time_type, set_date, get_date - use MOM_time_manager, only : real_to_time, time_type_to_real + use MOM_time_manager, only : time_type, set_date, get_date, real_to_time, time_type_to_real use MOM_time_manager, only : operator(+), operator(-), operator(*), operator(/) use MOM_time_manager, only : operator(>), operator(<), operator(>=) use MOM_time_manager, only : increment_date, set_calendar_type, month_name - use MOM_time_manager, only : JULIAN, GREGORIAN, NOLEAP, THIRTY_DAY_MONTHS - use MOM_time_manager, only : NO_CALENDAR + use MOM_time_manager, only : JULIAN, GREGORIAN, NOLEAP, THIRTY_DAY_MONTHS, NO_CALENDAR use MOM_tracer_flow_control, only : tracer_flow_control_CS use MOM_unit_scaling, only : unit_scale_type use MOM_variables, only : surface use MOM_verticalGrid, only : verticalGrid_type + use MOM_wave_interface, only : wave_parameters_CS, MOM_wave_interface_init + use MOM_wave_interface, only : Update_Surface_Waves use MOM_write_cputime, only : write_cputime, MOM_write_cputime_init use MOM_write_cputime, only : write_cputime_start_clock, write_cputime_CS - use ensemble_manager_mod, only : ensemble_manager_init, get_ensemble_size - use ensemble_manager_mod, only : ensemble_pelist_setup - use mpp_mod, only : set_current_pelist => mpp_set_current_pelist - use time_interp_external_mod, only : time_interp_external_init - use fms_affinity_mod, only : fms_affinity_init, fms_affinity_set,fms_affinity_get - - use MOM_ice_shelf, only : initialize_ice_shelf, ice_shelf_end, ice_shelf_CS - use MOM_ice_shelf, only : shelf_calc_flux, add_shelf_forces, ice_shelf_save_restart -! , add_shelf_flux_forcing, add_shelf_flux_IOB - - use MOM_wave_interface, only: wave_parameters_CS, MOM_wave_interface_init - use MOM_wave_interface, only: MOM_wave_interface_init_lite, Update_Surface_Waves - implicit none #include @@ -84,18 +78,17 @@ program MOM_main ! A structure containing pointers to the thermodynamic forcing fields ! at the ocean surface. type(forcing) :: fluxes - ! A structure containing pointers to the ocean surface state fields. type(surface) :: sfc_state ! A pointer to a structure containing metrics and related information. - type(ocean_grid_type), pointer :: grid - type(verticalGrid_type), pointer :: GV + type(ocean_grid_type), pointer :: grid => NULL() + type(verticalGrid_type), pointer :: GV => NULL() ! A pointer to a structure containing dimensional unit scaling factors. - type(unit_scale_type), pointer :: US + type(unit_scale_type), pointer :: US => NULL() ! If .true., use the ice shelf model for part of the domain. - logical :: use_ice_shelf + logical :: use_ice_shelf = .false. ! If .true., use surface wave coupling logical :: use_waves = .false. @@ -103,8 +96,6 @@ program MOM_main ! This is .true. if incremental restart files may be saved. logical :: permit_incr_restart = .true. - integer :: ns - ! nmax is the number of iterations after which to stop so that the ! simulation does not exceed its CPU time limit. nmax is determined by ! evaluating the CPU time used between successive calls to write_cputime. @@ -127,6 +118,7 @@ program MOM_main type(time_type) :: Time_end ! End time for the segment or experiment. type(time_type) :: restart_time ! The next time to write restart files. type(time_type) :: Time_step_ocean ! A time_type version of dt_forcing. + logical :: segment_start_time_set ! True if segment_start_time has been set to a valid value. real :: elapsed_time = 0.0 ! Elapsed time in this run [s]. logical :: elapsed_time_master ! If true, elapsed time is used to set the @@ -143,9 +135,9 @@ program MOM_main ! chosen so that dt_forcing is an integer multiple of dt_dyn. real :: dtdia ! The diabatic timestep [s] real :: t_elapsed_seg ! The elapsed time in this run segment [s] - integer :: n, n_max, nts, n_last_thermo + integer :: n, ns, n_max, nts, n_last_thermo logical :: diabatic_first, single_step_call - type(time_type) :: Time2, time_chg + type(time_type) :: Time2, time_chg ! Temporary time variables integer :: Restart_control ! An integer that is bit-tested to determine whether ! incremental restart files are saved and whether they @@ -159,23 +151,13 @@ program MOM_main type(time_type) :: daymax ! The final day of the simulation. integer :: CPU_steps ! The number of steps between writing CPU time. - integer :: date_init(6)=0 ! The start date of the whole simulation. - integer :: date(6)=-1 ! Possibly the start date of this run segment. - integer :: years=0, months=0, days=0 ! These may determine the segment run - integer :: hours=0, minutes=0, seconds=0 ! length, if read from a namelist. - integer :: yr, mon, day, hr, mins, sec ! Temp variables for writing the date. + integer :: date(6) ! Possibly the start date of this run segment. type(param_file_type) :: param_file ! The structure indicating the file(s) ! containing all run-time parameters. - character(len=9) :: month - character(len=16) :: calendar = 'julian' - integer :: calendar_type=-1 - integer :: unit, io_status, ierr - integer :: ensemble_size, nPEs_per, ensemble_info(6) + integer :: calendar_type=-1 ! A coded integer indicating the calendar type. - integer, dimension(0) :: atm_PElist, land_PElist, ice_PElist - integer, dimension(:), allocatable :: ocean_PElist - logical :: unit_in_use + integer :: unit, io_status, ierr integer :: initClock, mainClock, termClock logical :: debug ! If true, write verbose checksums for debugging purposes. @@ -187,8 +169,8 @@ program MOM_main ! and diffusion equation are read in from files stored from ! a previous integration of the prognostic model - type(MOM_control_struct), pointer :: MOM_CSp => NULL() - !> A pointer to the tracer flow control structure. + type(MOM_control_struct) :: MOM_CSp !< The control structure with all the MOM6 internal types, + !! parameters and variables type(tracer_flow_control_CS), pointer :: & tracer_flow_CSp => NULL() !< A pointer to the tracer flow control structure type(surface_forcing_CS), pointer :: surface_forcing_CSp => NULL() @@ -198,19 +180,23 @@ program MOM_main type(MOM_restart_CS), pointer :: & restart_CSp => NULL() !< A pointer to the restart control structure !! that will be used for MOM restart files. - type(diag_ctrl), pointer :: & - diag => NULL() !< A pointer to the diagnostic regulatory structure + type(diag_ctrl), pointer :: & + diag => NULL() !< A pointer to the diagnostic regulatory structure !----------------------------------------------------------------------- character(len=4), parameter :: vers_num = 'v2.0' -! This include declares and sets the variable "version". -#include "version_variable.h" + ! This include declares and sets the variable "version". +# include "version_variable.h" character(len=40) :: mod_name = "MOM_main (MOM_driver)" ! This module's name. + ! These are the variables that might be read via the namelist capability. + integer :: date_init(6)=0 ! The start date of the whole simulation. + character(len=16) :: calendar = 'julian' ! The name of the calendar type. + integer :: years=0, months=0, days=0 ! These may determine the segment run + integer :: hours=0, minutes=0, seconds=0 ! length, if read from a namelist. integer :: ocean_nthreads = 1 logical :: use_hyper_thread = .false. - integer :: omp_get_num_threads,omp_get_thread_num - namelist /ocean_solo_nml/ date_init, calendar, months, days, hours, minutes, seconds,& + namelist /ocean_solo_nml/ date_init, calendar, months, days, hours, minutes, seconds, & ocean_nthreads, use_hyper_thread !===================================================================== @@ -219,18 +205,10 @@ program MOM_main call MOM_infra_init() ; call io_infra_init() - ! Initialize the ensemble manager. If there are no settings for ensemble_size - ! in input.nml(ensemble.nml), these should not do anything. In coupled - ! configurations, this all occurs in the external driver. - call ensemble_manager_init() ; ensemble_info(:) = get_ensemble_size() - ensemble_size=ensemble_info(1) ; nPEs_per=ensemble_info(2) - if (ensemble_size > 1) then ! There are multiple ensemble members. - allocate(ocean_pelist(nPEs_per)) - call ensemble_pelist_setup(.true., 0, nPEs_per, 0, 0, atm_pelist, ocean_pelist, & - land_pelist, ice_pelist) - call set_current_pelist(ocean_pelist) - deallocate(ocean_pelist) - endif + !allocate(forces,fluxes,sfc_state) + + ! Initialize the ensemble manager based on settings in input.nml(ensemble.nml). + call initialize_ocean_only_ensembles() ! These clocks are on the global pelist. initClock = cpu_clock_id( 'Initialization' ) @@ -243,34 +221,39 @@ program MOM_main if (file_exists('input.nml')) then ! Provide for namelist specification of the run length and calendar data. - call open_file(unit, 'input.nml', form=ASCII_FILE, action=READONLY_FILE) + call open_ASCII_file(unit, 'input.nml', action=READONLY_FILE) read(unit, ocean_solo_nml, iostat=io_status) call close_file(unit) ierr = check_nml_error(io_status,'ocean_solo_nml') - if (years+months+days+hours+minutes+seconds > 0) then - if (is_root_pe()) write(*,ocean_solo_nml) - endif + if (is_root_pe() .and. (years+months+days+hours+minutes+seconds > 0)) write(*,ocean_solo_nml) endif -!$ call fms_affinity_init -!$ call fms_affinity_set('OCEAN', use_hyper_thread, ocean_nthreads) -!$ call omp_set_num_threads(ocean_nthreads) -!$OMP PARALLEL -!$ write(6,*) "ocean_solo OMPthreading ", fms_affinity_get(), omp_get_thread_num(), omp_get_num_threads() -!$ call flush(6) -!$OMP END PARALLEL + ! This call sets the number and affinity of threads with openMP. + !$ call set_MOM_thread_affinity(ocean_nthreads, use_hyper_thread) + + ! This call is required to initiate dirs%restart_input_dir for ocean_solo.res + ! The contents of dirs will be reread in initialize_MOM. + call get_MOM_input(dirs=dirs) + segment_start_time_set = .false. ! Read ocean_solo restart, which can override settings from the namelist. if (file_exists(trim(dirs%restart_input_dir)//'ocean_solo.res')) then - call open_file(unit,trim(dirs%restart_input_dir)//'ocean_solo.res', & - form=ASCII_FILE,action=READONLY_FILE) + date(:) = -1 + call open_ASCII_file(unit, trim(dirs%restart_input_dir)//'ocean_solo.res', action=READONLY_FILE) read(unit,*) calendar_type read(unit,*) date_init read(unit,*) date call close_file(unit) + + call set_calendar_type(calendar_type) + if (sum(date) >= 0) then + ! In this case, the segment starts at a time fixed by ocean_solo.res + segment_start_time = set_date(date(1), date(2), date(3), date(4), date(5), date(6)) + segment_start_time_set = .true. + endif else calendar = uppercase(calendar) - if (calendar(1:6) == 'JULIAN') then ; calendar_type = JULIAN + if (calendar(1:6) == 'JULIAN') then ; calendar_type = JULIAN elseif (calendar(1:9) == 'GREGORIAN') then ; calendar_type = GREGORIAN elseif (calendar(1:6) == 'NOLEAP') then ; calendar_type = NOLEAP elseif (calendar(1:10)=='THIRTY_DAY') then ; calendar_type = THIRTY_DAY_MONTHS @@ -280,37 +263,47 @@ program MOM_main else call MOM_error(FATAL,'MOM_driver: No namelist value for calendar') endif + call set_calendar_type(calendar_type) endif - call set_calendar_type(calendar_type) if (sum(date_init) > 0) then - Start_time = set_date(date_init(1),date_init(2), date_init(3), & - date_init(4),date_init(5),date_init(6)) + Start_time = set_date(date_init(1), date_init(2), date_init(3), & + date_init(4), date_init(5), date_init(6)) else Start_time = real_to_time(0.0) endif - call time_interp_external_init + call time_interp_external_init() - if (sum(date) >= 0) then + ! Call initialize MOM with an optional Ice Shelf CS which, if present triggers + ! initialization of ice shelf parameters and arrays. + if (segment_start_time_set) then ! In this case, the segment starts at a time fixed by ocean_solo.res - segment_start_time = set_date(date(1),date(2),date(3),date(4),date(5),date(6)) Time = segment_start_time call initialize_MOM(Time, Start_time, param_file, dirs, MOM_CSp, restart_CSp, & segment_start_time, offline_tracer_mode=offline_tracer_mode, & - diag_ptr=diag, tracer_flow_CSp=tracer_flow_CSp) + diag_ptr=diag, tracer_flow_CSp=tracer_flow_CSp, ice_shelf_CSp=ice_shelf_CSp) else ! In this case, the segment starts at a time read from the MOM restart file - ! or left as Start_time by MOM_initialize. + ! or is left at Start_time by MOM_initialize. Time = Start_time call initialize_MOM(Time, Start_time, param_file, dirs, MOM_CSp, restart_CSp, & offline_tracer_mode=offline_tracer_mode, diag_ptr=diag, & - tracer_flow_CSp=tracer_flow_CSp) + tracer_flow_CSp=tracer_flow_CSp, ice_shelf_CSp=ice_shelf_CSp) endif call get_MOM_state_elements(MOM_CSp, G=grid, GV=GV, US=US, C_p_scaled=fluxes%C_p) Master_Time = Time + use_ice_shelf = associated(ice_shelf_CSp) + + if (use_ice_shelf) then + ! These arrays are not initialized in most solo cases, but are needed + ! when using an ice shelf + call initialize_ice_shelf_fluxes(ice_shelf_CSp, grid, US, fluxes) + call initialize_ice_shelf_forces(ice_shelf_CSp, grid, US, forces) + endif + call callTree_waypoint("done initialize_MOM") @@ -320,22 +313,12 @@ program MOM_main surface_forcing_CSp, tracer_flow_CSp) call callTree_waypoint("done surface_forcing_init") - call get_param(param_file, mod_name, "ICE_SHELF", use_ice_shelf, & - "If true, enables the ice shelf model.", default=.false.) - if (use_ice_shelf) then - ! These arrays are not initialized in most solo cases, but are needed - ! when using an ice shelf - call initialize_ice_shelf(param_file, grid, Time, ice_shelf_CSp, & - diag, forces, fluxes) - endif - call get_param(param_file,mod_name,"USE_WAVES",Use_Waves,& + call get_param(param_file, mod_name, "USE_WAVES", Use_Waves, & "If true, enables surface wave modules.",default=.false.) - if (use_waves) then - call MOM_wave_interface_init(Time, grid, GV, US, param_file, Waves_CSp, diag) - else - call MOM_wave_interface_init_lite(param_file) - endif + ! MOM_wave_interface_init is called regardless of the value of USE_WAVES because + ! it also initializes statistical waves. + call MOM_wave_interface_init(Time, grid, GV, US, param_file, Waves_CSp, diag) segment_start_time = Time elapsed_time = 0.0 @@ -435,17 +418,7 @@ program MOM_main call diag_mediator_close_registration(diag) ! Write out a time stamp file. - if (calendar_type /= NO_CALENDAR) then - call open_file(unit, 'time_stamp.out', form=ASCII_FILE, action=APPEND_FILE, & - threading=SINGLE_FILE) - call get_date(Time, date(1), date(2), date(3), date(4), date(5), date(6)) - month = month_name(date(2)) - if (is_root_pe()) write(unit,'(6i4,2x,a3)') date, month(1:3) - call get_date(Time_end, date(1), date(2), date(3), date(4), date(5), date(6)) - month = month_name(date(2)) - if (is_root_pe()) write(unit,'(6i4,2x,a3)') date, month(1:3) - call close_file(unit) - endif + if (is_root_pe() .and. (calendar_type /= NO_CALENDAR)) call write_time_stamp_file(Time) if (cpu_steps > 0) call write_cputime(Time, 0, write_CPU_CSp) @@ -482,7 +455,7 @@ program MOM_main if (use_ice_shelf) then call shelf_calc_flux(sfc_state, fluxes, Time, dt_forcing, ice_shelf_CSp) - call add_shelf_forces(grid, US, Ice_shelf_CSp, forces) + call add_shelf_forces(grid, US, Ice_shelf_CSp, forces, external_call=.true.) endif fluxes%fluxes_used = .false. fluxes%dt_buoy_accum = US%s_to_T*dt_forcing @@ -620,37 +593,23 @@ program MOM_main call save_restart(dirs%restart_output_dir, Time, grid, restart_CSp, GV=GV) if (use_ice_shelf) call ice_shelf_save_restart(ice_shelf_CSp, Time, & dirs%restart_output_dir) - ! Write ocean solo restart file. - call open_file(unit, trim(dirs%restart_output_dir)//'ocean_solo.res', nohdrs=.true.) - if (is_root_pe())then - write(unit, '(i6,8x,a)') calendar_type, & - '(Calendar: no_calendar=0, thirty_day_months=1, julian=2, gregorian=3, noleap=4)' - - call get_date(Start_time, yr, mon, day, hr, mins, sec) - write(unit, '(6i6,8x,a)') yr, mon, day, hr, mins, sec, & - 'Model start time: year, month, day, hour, minute, second' - call get_date(Time, yr, mon, day, hr, mins, sec) - write(unit, '(6i6,8x,a)') yr, mon, day, hr, mins, sec, & - 'Current model time: year, month, day, hour, minute, second' - endif - call close_file(unit) + ! Write the ocean solo restart file. + call write_ocean_solo_res(Time, Start_time, calendar_type, & + trim(dirs%restart_output_dir)//'ocean_solo.res') endif if (is_root_pe()) then - do unit=10,1967 - INQUIRE(unit,OPENED=unit_in_use) - if (.not.unit_in_use) exit - enddo - open(unit,FILE="exitcode",FORM="FORMATTED",STATUS="REPLACE",action="WRITE") + call open_ASCII_file(unit, "exitcode") if (Time < daymax) then write(unit,*) 9 else write(unit,*) 0 endif - close(unit) + call close_file(unit) endif call callTree_waypoint("End MOM_main") + if (use_ice_shelf) call ice_shelf_end(ice_shelf_CSp) call diag_mediator_end(Time, diag, end_diag_manager=.true.) if (cpu_steps > 0) call write_cputime(Time, ns-1, write_CPU_CSp, call_end=.true.) call cpu_clock_end(termClock) @@ -658,6 +617,73 @@ program MOM_main call io_infra_end ; call MOM_infra_end call MOM_end(MOM_CSp) - if (use_ice_shelf) call ice_shelf_end(ice_shelf_CSp) + +contains + +!> Write out the ocean solo restart file to the indicated path. +subroutine write_ocean_solo_res(Time, Start_time, calendar, file_path) + type(time_type), intent(in) :: Time !< The current model time. + type(time_type), intent(in) :: Start_Time !< The start time of the simulation. + integer, intent(in) :: calendar !< A coded integer indicating the calendar type. + character(len=*), intent(in) :: file_path !< The full path and name of the restart file + + ! Local variables + integer :: unit + integer :: yr, mon, day, hr, mins, sec ! Temp variables for writing the date. + + if (.not.is_root_pe()) return + + call open_ASCII_file(unit, trim(file_path)) + write(unit, '(i6,8x,a)') calendar, & + '(Calendar: no_calendar=0, thirty_day_months=1, julian=2, gregorian=3, noleap=4)' + + call get_date(Start_time, yr, mon, day, hr, mins, sec) + write(unit, '(6i6,8x,a)') yr, mon, day, hr, mins, sec, & + 'Model start time: year, month, day, hour, minute, second' + call get_date(Time, yr, mon, day, hr, mins, sec) + write(unit, '(6i6,8x,a)') yr, mon, day, hr, mins, sec, & + 'Current model time: year, month, day, hour, minute, second' + call close_file(unit) +end subroutine write_ocean_solo_res + + +!> Write out an ascii time stamp file with the model time, following FMS conventions. +subroutine write_time_stamp_file(Time) + type(time_type), intent(in) :: Time !< The current model time. + ! Local variables + integer :: unit + integer :: yr, mon, day, hr, mins, sec ! Temp variables for writing the date. + character(len=9) :: month ! The name of the month + + if (.not.is_root_PE()) return + + call open_ASCII_file(unit, 'time_stamp.out', action=APPEND_FILE) + call get_date(Time, yr, mon, day, hr, mins, sec) + month = month_name(mon) + write(unit,'(6i4,2x,a3)') yr, mon, day, hr, mins, sec, month(1:3) + call get_date(Time_end, yr, mon, day, hr, mins, sec) + month = month_name(mon) + write(unit,'(6i4,2x,a3)') yr, mon, day, hr, mins, sec, month(1:3) + call close_file(unit) +end subroutine write_time_stamp_file + +!> Initialize the ensemble manager. If there are no settings for ensemble_size +!! in input.nml(ensemble.nml), these should not do anything. In coupled +!! configurations, this all occurs in the external driver. +subroutine initialize_ocean_only_ensembles() + integer, dimension(:), allocatable :: ocean_PElist + integer, dimension(0) :: atm_PElist, land_PElist, ice_PElist + integer :: ensemble_size, nPEs_per, ensemble_info(6) + + call ensemble_manager_init() ; ensemble_info(:) = get_ensemble_size() + ensemble_size = ensemble_info(1) ; nPEs_per = ensemble_info(2) + if (ensemble_size > 1) then ! There are multiple ensemble members. + allocate(ocean_pelist(nPEs_per)) + call ensemble_pelist_setup(.true., 0, nPEs_per, 0, 0, atm_pelist, ocean_pelist, & + land_pelist, ice_pelist) + call Set_PElist(ocean_pelist) + deallocate(ocean_pelist) + endif +end subroutine initialize_ocean_only_ensembles end program MOM_main diff --git a/config_src/solo_driver/MOM_surface_forcing.F90 b/config_src/drivers/solo_driver/MOM_surface_forcing.F90 similarity index 93% rename from config_src/solo_driver/MOM_surface_forcing.F90 rename to config_src/drivers/solo_driver/MOM_surface_forcing.F90 index 3d8b398516..85c363b897 100644 --- a/config_src/solo_driver/MOM_surface_forcing.F90 +++ b/config_src/drivers/solo_driver/MOM_surface_forcing.F90 @@ -1,5 +1,5 @@ !> Functions that calculate the surface wind stresses and fluxes of buoyancy -!! or temperature/salinity andfresh water, in ocean-only (solo) mode. +!! or temperature/salinity and fresh water, in ocean-only (solo) mode. !! !! These functions are called every time step, even if the wind stresses !! or buoyancy fluxes are constant in time - in that case these routines @@ -12,6 +12,7 @@ module MOM_surface_forcing use MOM_constants, only : hlv, hlf use MOM_cpu_clock, only : cpu_clock_id, cpu_clock_begin, cpu_clock_end use MOM_cpu_clock, only : CLOCK_MODULE +use MOM_data_override, only : data_override_init, data_override use MOM_diag_mediator, only : post_data, query_averaging_enabled use MOM_diag_mediator, only : diag_ctrl, safe_alloc_ptr use MOM_domains, only : pass_var, pass_vector, AGRID, To_South, To_West, To_All @@ -54,7 +55,6 @@ module MOM_surface_forcing use BFB_surface_forcing, only : BFB_surface_forcing_init, BFB_surface_forcing_CS use dumbbell_surface_forcing, only : dumbbell_surface_forcing_init, dumbbell_surface_forcing_CS use dumbbell_surface_forcing, only : dumbbell_buoyancy_forcing -use data_override_mod, only : data_override_init, data_override implicit none ; private @@ -151,7 +151,7 @@ module MOM_surface_forcing character(len=200) :: runoff_file = '' !< The file from which the runoff is read character(len=200) :: longwaveup_file = '' !< The file from which the upward longwave heat flux is read - character(len=200) :: shortwaveup_file = '' !< The file from which the upward shorwave heat flux is read + character(len=200) :: shortwaveup_file = '' !< The file from which the upward shortwave heat flux is read character(len=200) :: SSTrestore_file = '' !< The file from which to read the sea surface !! temperature to restore toward @@ -161,7 +161,7 @@ module MOM_surface_forcing character(len=80) :: stress_x_var = '' !< X-windstress variable name in the input file character(len=80) :: stress_y_var = '' !< Y-windstress variable name in the input file character(len=80) :: ustar_var = '' !< ustar variable name in the input file - character(len=80) :: LW_var = '' !< lonngwave heat flux variable name in the input file + character(len=80) :: LW_var = '' !< longwave heat flux variable name in the input file character(len=80) :: SW_var = '' !< shortwave heat flux variable name in the input file character(len=80) :: latent_var = '' !< latent heat flux variable name in the input file character(len=80) :: sens_var = '' !< sensible heat flux variable name in the input file @@ -170,7 +170,7 @@ module MOM_surface_forcing character(len=80) :: snow_var = '' !< snowfall variable name in the input file character(len=80) :: lrunoff_var = '' !< liquid runoff variable name in the input file character(len=80) :: frunoff_var = '' !< frozen runoff variable name in the input file - character(len=80) :: SST_restore_var = '' !< target sea surface temeperature variable name in the input file + character(len=80) :: SST_restore_var = '' !< target sea surface temperature variable name in the input file character(len=80) :: SSS_restore_var = '' !< target sea surface salinity variable name in the input file ! These variables give the number of time levels in the various forcing files. @@ -228,7 +228,7 @@ subroutine set_forcing(sfc_state, forces, fluxes, day_start, day_interval, G, US type(time_type), intent(in) :: day_interval !< Length of time over which these fluxes applied type(ocean_grid_type), intent(inout) :: G !< The ocean's grid structure type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type - type(surface_forcing_CS), pointer :: CS !< pointer to control struct returned by + type(surface_forcing_CS), pointer :: CS !< pointer to control structure returned by !! a previous surface_forcing_init call ! Local variables real :: dt ! length of time over which fluxes applied [s] @@ -243,7 +243,7 @@ subroutine set_forcing(sfc_state, forces, fluxes, day_start, day_interval, G, US dt = time_type_to_real(day_interval) if (CS%first_call_set_forcing) then - ! Allocate memory for the mechanical and thermodyanmic forcing fields. + ! Allocate memory for the mechanical and thermodynamic forcing fields. call allocate_mech_forcing(G, forces, stress=.true., ustar=.true., press=.true.) call allocate_forcing_type(G, fluxes, ustar=.true., fix_accum_bug=CS%fix_ustar_gustless_bug) @@ -376,7 +376,7 @@ subroutine wind_forcing_const(sfc_state, forces, tau_x0, tau_y0, day, G, US, CS) type(time_type), intent(in) :: day !< The time of the fluxes type(ocean_grid_type), intent(in) :: G !< The ocean's grid structure type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type - type(surface_forcing_CS), pointer :: CS !< pointer to control struct returned by + type(surface_forcing_CS), pointer :: CS !< pointer to control structure returned by !! a previous surface_forcing_init call ! Local variables real :: Pa_conversion ! A unit conversion factor from Pa to the internal units [R Z L T-2 Pa-1 ~> 1] @@ -421,7 +421,7 @@ subroutine wind_forcing_2gyre(sfc_state, forces, day, G, US, CS) type(time_type), intent(in) :: day !< The time of the fluxes type(ocean_grid_type), intent(in) :: G !< The ocean's grid structure type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type - type(surface_forcing_CS), pointer :: CS !< pointer to control struct returned by + type(surface_forcing_CS), pointer :: CS !< pointer to control structure returned by !! a previous surface_forcing_init call ! Local variables real :: PI @@ -455,7 +455,7 @@ subroutine wind_forcing_1gyre(sfc_state, forces, day, G, US, CS) type(time_type), intent(in) :: day !< The time of the fluxes type(ocean_grid_type), intent(in) :: G !< The ocean's grid structure type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type - type(surface_forcing_CS), pointer :: CS !< pointer to control struct returned by + type(surface_forcing_CS), pointer :: CS !< pointer to control structure returned by !! a previous surface_forcing_init call ! Local variables real :: PI @@ -488,7 +488,7 @@ subroutine wind_forcing_gyres(sfc_state, forces, day, G, US, CS) type(time_type), intent(in) :: day !< The time of the fluxes type(ocean_grid_type), intent(in) :: G !< The ocean's grid structure type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type - type(surface_forcing_CS), pointer :: CS !< pointer to control struct returned by + type(surface_forcing_CS), pointer :: CS !< pointer to control structure returned by !! a previous surface_forcing_init call ! Local variables real :: PI, y, I_rho @@ -541,7 +541,7 @@ subroutine Neverworld_wind_forcing(sfc_state, forces, day, G, US, CS) type(time_type), intent(in) :: day !< Time used for determining the fluxes. type(ocean_grid_type), intent(inout) :: G !< Grid structure. type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type - type(surface_forcing_CS), pointer :: CS !< pointer to control struct returned by + type(surface_forcing_CS), pointer :: CS !< pointer to control structure returned by !! a previous surface_forcing_init call ! Local variables integer :: i, j, is, ie, js, je, Isq, Ieq, Jsq, Jeq @@ -606,7 +606,7 @@ subroutine scurve_wind_forcing(sfc_state, forces, day, G, US, CS) type(time_type), intent(in) :: day !< Time used for determining the fluxes. type(ocean_grid_type), intent(inout) :: G !< Grid structure. type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type - type(surface_forcing_CS), pointer :: CS !< pointer to control struct returned by + type(surface_forcing_CS), pointer :: CS !< pointer to control structure returned by !! a previous surface_forcing_init call ! Local variables integer :: i, j, kseg @@ -671,16 +671,16 @@ subroutine wind_forcing_from_file(sfc_state, forces, day, G, US, CS) type(time_type), intent(in) :: day !< The time of the fluxes type(ocean_grid_type), intent(inout) :: G !< The ocean's grid structure type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type - type(surface_forcing_CS), pointer :: CS !< pointer to control struct returned by + type(surface_forcing_CS), pointer :: CS !< pointer to control structure returned by !! a previous surface_forcing_init call ! Local variables character(len=200) :: filename ! The name of the input file. - real :: temp_x(SZI_(G),SZJ_(G)) ! Pseudo-zonal and psuedo-meridional + real :: temp_x(SZI_(G),SZJ_(G)) ! Pseudo-zonal and pseudo-meridional real :: temp_y(SZI_(G),SZJ_(G)) ! wind stresses at h-points [R L Z T-1 ~> Pa]. real :: Pa_conversion ! A unit conversion factor from Pa to the internal wind stress ! units [R Z L T-2 Pa-1 ~> 1] integer :: time_lev_daily ! The time levels to read for fields with - integer :: time_lev_monthly ! daily and montly cycles. + integer :: time_lev_monthly ! daily and monthly cycles. integer :: time_lev ! The time level that is used for a field. integer :: days, seconds integer :: i, j, is, ie, js, je, Isq, Ieq, Jsq, Jeq @@ -787,13 +787,13 @@ subroutine wind_forcing_from_file(sfc_state, forces, day, G, US, CS) call pass_vector(forces%taux, forces%tauy, G%Domain, To_All) if (.not.read_Ustar) then if (CS%read_gust_2d) then - do j=js, je ; do i=is, ie + do j=js,je ; do i=is,ie forces%ustar(i,j) = sqrt((CS%gust(i,j) + & sqrt(0.5*((forces%tauy(i,j-1)**2 + forces%tauy(i,j)**2) + & (forces%taux(i-1,j)**2 + forces%taux(i,j)**2))) ) * US%L_to_Z / CS%Rho0 ) enddo ; enddo else - do j=js, je ; do i=is, ie + do j=js,je ; do i=is,ie forces%ustar(i,j) = sqrt(US%L_to_Z * ( (CS%gust_const/CS%Rho0) + & sqrt(0.5*((forces%tauy(i,j-1)**2 + forces%tauy(i,j)**2) + & (forces%taux(i-1,j)**2 + forces%taux(i,j)**2)))/CS%Rho0)) @@ -826,68 +826,58 @@ subroutine wind_forcing_by_data_override(sfc_state, forces, day, G, US, CS) type(time_type), intent(in) :: day !< The time of the fluxes type(ocean_grid_type), intent(inout) :: G !< The ocean's grid structure type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type - type(surface_forcing_CS), pointer :: CS !< pointer to control struct returned by + type(surface_forcing_CS), pointer :: CS !< pointer to control structure returned by !! a previous surface_forcing_init call ! Local variables - real :: temp_x(SZI_(G),SZJ_(G)) ! Pseudo-zonal and psuedo-meridional - real :: temp_y(SZI_(G),SZJ_(G)) ! wind stresses at h-points [Pa]. - real :: temp_ustar(SZI_(G),SZJ_(G)) ! ustar [m s-1] (not rescaled). + real :: temp_x(SZI_(G),SZJ_(G)) ! Pseudo-zonal wind stresses at h-points [R Z L T-2 ~> Pa]. + real :: temp_y(SZI_(G),SZJ_(G)) ! Psuedo-meridional wind stresses at h-points [R Z L T-2 ~> Pa]. real :: Pa_conversion ! A unit conversion factor from Pa to the internal units [R Z L T-2 Pa-1 ~> 1] - integer :: i, j, is_in, ie_in, js_in, je_in - logical :: read_uStar + integer :: i, j call callTree_enter("wind_forcing_by_data_override, MOM_surface_forcing.F90") if (.not.CS%dataOverrideIsInitialized) then call allocate_mech_forcing(G, forces, stress=.true., ustar=.true., press=.true.) - call data_override_init(Ocean_domain_in=G%Domain%mpp_domain) + call data_override_init(G%Domain) CS%dataOverrideIsInitialized = .True. endif - is_in = G%isc - G%isd + 1 ; ie_in = G%iec - G%isd + 1 - js_in = G%jsc - G%jsd + 1 ; je_in = G%jec - G%jsd + 1 Pa_conversion = US%kg_m3_to_R*US%m_s_to_L_T**2*US%L_to_Z temp_x(:,:) = 0.0 ; temp_y(:,:) = 0.0 - call data_override('OCN', 'taux', temp_x, day, is_in=is_in, ie_in=ie_in, js_in=js_in, je_in=je_in) - call data_override('OCN', 'tauy', temp_y, day, is_in=is_in, ie_in=ie_in, js_in=js_in, je_in=je_in) + ! CS%wind_scale is ignored here because it is not set in this mode. + call data_override(G%Domain, 'taux', temp_x, day, scale=Pa_conversion) + call data_override(G%Domain, 'tauy', temp_y, day, scale=Pa_conversion) call pass_vector(temp_x, temp_y, G%Domain, To_All, AGRID) - ! Ignore CS%wind_scale when using data_override ????? do j=G%jsc,G%jec ; do I=G%isc-1,G%IecB - forces%taux(I,j) = Pa_conversion * 0.5 * (temp_x(i,j) + temp_x(i+1,j)) + forces%taux(I,j) = 0.5 * (temp_x(i,j) + temp_x(i+1,j)) enddo ; enddo do J=G%jsc-1,G%JecB ; do i=G%isc,G%iec - forces%tauy(i,J) = Pa_conversion * 0.5 * (temp_y(i,j) + temp_y(i,j+1)) + forces%tauy(i,J) = 0.5 * (temp_y(i,j) + temp_y(i,j+1)) enddo ; enddo - read_Ustar = (len_trim(CS%ustar_var) > 0) ! Need better control higher up ???? - if (read_Ustar) then - do j=G%jsc,G%jec ; do i=G%isc,G%iec ; temp_ustar(i,j) = US%Z_to_m*US%s_to_T*forces%ustar(i,j) ; enddo ; enddo - call data_override('OCN', 'ustar', temp_ustar, day, is_in=is_in, ie_in=ie_in, js_in=js_in, je_in=je_in) - do j=G%jsc,G%jec ; do i=G%isc,G%iec ; forces%ustar(i,j) = US%m_to_Z*US%T_to_s*temp_ustar(i,j) ; enddo ; enddo + if (CS%read_gust_2d) then + call data_override(G%Domain, 'gust', CS%gust, day, scale=Pa_conversion) + do j=G%jsc,G%jec ; do i=G%isc,G%iec + forces%ustar(i,j) = sqrt((sqrt(temp_x(i,j)**2 + temp_y(i,j)**2) + & + CS%gust(i,j)) * US%L_to_Z / CS%Rho0) + enddo ; enddo else - if (CS%read_gust_2d) then - call data_override('OCN', 'gust', CS%gust, day, is_in=is_in, ie_in=ie_in, js_in=js_in, je_in=je_in) - do j=G%jsc,G%jec ; do i=G%isc,G%iec - forces%ustar(i,j) = sqrt((Pa_conversion * sqrt(temp_x(i,j)*temp_x(i,j) + & - temp_y(i,j)*temp_y(i,j)) + CS%gust(i,j)) * US%L_to_Z / CS%Rho0) - enddo ; enddo - else - do j=G%jsc,G%jec ; do i=G%isc,G%iec - forces%ustar(i,j) = sqrt(US%L_to_Z * (Pa_conversion*sqrt(temp_x(i,j)*temp_x(i,j) + & - temp_y(i,j)*temp_y(i,j))/CS%Rho0 + CS%gust_const/CS%Rho0 )) - enddo ; enddo - endif + do j=G%jsc,G%jec ; do i=G%isc,G%iec + forces%ustar(i,j) = sqrt(US%L_to_Z * (sqrt(temp_x(i,j)**2 + temp_y(i,j)**2)/CS%Rho0 + & + CS%gust_const/CS%Rho0)) + enddo ; enddo endif + ! Give the data override the option to modify the newly calculated forces%ustar. + call data_override(G%Domain, 'ustar', forces%ustar, day, scale=US%m_to_Z*US%T_to_s) call pass_vector(forces%taux, forces%tauy, G%Domain, To_All) -! call pass_var(forces%ustar, G%Domain, To_All) Not needed ????? call callTree_leave("wind_forcing_by_data_override") end subroutine wind_forcing_by_data_override -!> Specifies zero surface bouyancy fluxes from input files. +!> Specifies zero surface buoyancy fluxes from input files. subroutine buoyancy_forcing_from_files(sfc_state, fluxes, day, dt, G, US, CS) type(surface), intent(inout) :: sfc_state !< A structure containing fields that !! describe the surface state of the ocean. @@ -897,7 +887,7 @@ subroutine buoyancy_forcing_from_files(sfc_state, fluxes, day, dt, G, US, CS) !! the fluxes apply [s] type(ocean_grid_type), intent(inout) :: G !< The ocean's grid structure type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type - type(surface_forcing_CS), pointer :: CS !< pointer to control struct returned by + type(surface_forcing_CS), pointer :: CS !< pointer to control structure returned by !! a previous surface_forcing_init call ! Local variables real, dimension(SZI_(G),SZJ_(G)) :: & @@ -1165,7 +1155,7 @@ subroutine buoyancy_forcing_from_files(sfc_state, fluxes, day, dt, G, US, CS) call callTree_leave("buoyancy_forcing_from_files") end subroutine buoyancy_forcing_from_files -!> Specifies zero surface bouyancy fluxes from data over-ride. +!> Specifies zero surface buoyancy fluxes from data over-ride. subroutine buoyancy_forcing_from_data_override(sfc_state, fluxes, day, dt, G, US, CS) type(surface), intent(inout) :: sfc_state !< A structure containing fields that !! describe the surface state of the ocean. @@ -1175,7 +1165,7 @@ subroutine buoyancy_forcing_from_data_override(sfc_state, fluxes, day, dt, G, US !! the fluxes apply [s] type(ocean_grid_type), intent(inout) :: G !< The ocean's grid structure type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type - type(surface_forcing_CS), pointer :: CS !< pointer to control struct returned by + type(surface_forcing_CS), pointer :: CS !< pointer to control structure returned by !! a previous surface_forcing_init call ! Local variables real, dimension(SZI_(G),SZJ_(G)) :: & @@ -1190,14 +1180,7 @@ subroutine buoyancy_forcing_from_data_override(sfc_state, fluxes, day, dt, G, US real :: kg_m2_s_conversion ! A combination of unit conversion factors for rescaling ! mass fluxes [R Z s m2 kg-1 T-1 ~> 1]. real :: rhoXcp ! The mean density times the heat capacity [Q R degC-1 ~> J m-3 degC-1]. - - integer :: time_lev_daily ! The time levels to read for fields with - integer :: time_lev_monthly ! daily and montly cycles. - integer :: itime_lev ! The time level that is used for a field. - - integer :: days, seconds integer :: i, j, is, ie, js, je, isd, ied, jsd, jed - integer :: is_in, ie_in, js_in, je_in call callTree_enter("buoyancy_forcing_from_data_override, MOM_surface_forcing.F90") @@ -1208,75 +1191,32 @@ subroutine buoyancy_forcing_from_data_override(sfc_state, fluxes, day, dt, G, US if (CS%use_temperature) rhoXcp = CS%Rho0 * fluxes%C_p if (.not.CS%dataOverrideIsInitialized) then - call data_override_init(Ocean_domain_in=G%Domain%mpp_domain) + call data_override_init(G%Domain) CS%dataOverrideIsInitialized = .True. endif - is_in = G%isc - G%isd + 1 - ie_in = G%iec - G%isd + 1 - js_in = G%jsc - G%jsd + 1 - je_in = G%jec - G%jsd + 1 + call data_override(G%Domain, 'lw', fluxes%lw, day, scale=US%W_m2_to_QRZ_T) + call data_override(G%Domain, 'sw', fluxes%sw, day, scale=US%W_m2_to_QRZ_T) - call data_override('OCN', 'lw', fluxes%lw(:,:), day, & - is_in=is_in, ie_in=ie_in, js_in=js_in, je_in=je_in) ! scale=US%W_m2_to_QRZ_T - if (US%QRZ_T_to_W_m2 /= 1.0) then ; do j=js,je ; do i=is,ie - fluxes%lw(i,j) = fluxes%lw(i,j) * US%W_m2_to_QRZ_T - enddo ; enddo ; endif - call data_override('OCN', 'evap', fluxes%evap(:,:), day, & - is_in=is_in, ie_in=ie_in, js_in=js_in, je_in=je_in) + ! The normal MOM6 sign conventions are that fluxes%evap and fluxes%sens are positive into the + ! ocean but evap and sens are normally positive quantities in the files. + call data_override(G%Domain, 'evap', fluxes%evap, day, scale=-US%kg_m2s_to_RZ_T) + call data_override(G%Domain, 'sens', fluxes%sens, day, scale=-US%W_m2_to_QRZ_T) - ! note the sign convention do j=js,je ; do i=is,ie - ! The normal convention is that fluxes%evap positive into the ocean - ! but evap is normally a positive quantity in the files - ! This conversion is dangerous because it is not clear whether the data files have been read! - fluxes%evap(i,j) = -kg_m2_s_conversion*fluxes%evap(i,j) fluxes%latent(i,j) = CS%latent_heat_vapor*fluxes%evap(i,j) fluxes%latent_evap_diag(i,j) = fluxes%latent(i,j) enddo ; enddo - call data_override('OCN', 'sens', fluxes%sens(:,:), day, & - is_in=is_in, ie_in=ie_in, js_in=js_in, je_in=je_in) - - ! note the sign convention - do j=js,je ; do i=is,ie - fluxes%sens(i,j) = -US%W_m2_to_QRZ_T * fluxes%sens(i,j) ! Normal convention is positive into the ocean - ! but sensible is normally a positive quantity in the files - enddo ; enddo - - call data_override('OCN', 'sw', fluxes%sw(:,:), day, & - is_in=is_in, ie_in=ie_in, js_in=js_in, je_in=je_in) ! scale=US%W_m2_to_QRZ_T - if (US%QRZ_T_to_W_m2 /= 1.0) then ; do j=js,je ; do i=is,ie - fluxes%sw(i,j) = fluxes%sw(i,j) * US%W_m2_to_QRZ_T - enddo ; enddo ; endif - - call data_override('OCN', 'snow', fluxes%fprec(:,:), day, & - is_in=is_in, ie_in=ie_in, js_in=js_in, je_in=je_in) ! scale=kg_m2_s_conversion - - call data_override('OCN', 'rain', fluxes%lprec(:,:), day, & - is_in=is_in, ie_in=ie_in, js_in=js_in, je_in=je_in) ! scale=kg_m2_s_conversion - - call data_override('OCN', 'runoff', fluxes%lrunoff(:,:), day, & - is_in=is_in, ie_in=ie_in, js_in=js_in, je_in=je_in) ! scale=kg_m2_s_conversion - - call data_override('OCN', 'calving', fluxes%frunoff(:,:), day, & - is_in=is_in, ie_in=ie_in, js_in=js_in, je_in=je_in) ! scale=kg_m2_s_conversion - - if (kg_m2_s_conversion /= 1.0) then ; do j=js,je ; do i=is,ie - fluxes%lprec(i,j) = fluxes%lprec(i,j) * kg_m2_s_conversion - fluxes%fprec(i,j) = fluxes%fprec(i,j) * kg_m2_s_conversion - fluxes%lrunoff(i,j) = fluxes%lrunoff(i,j) * kg_m2_s_conversion - fluxes%frunoff(i,j) = fluxes%frunoff(i,j) * kg_m2_s_conversion - enddo ; enddo ; endif + call data_override(G%Domain, 'snow', fluxes%fprec, day, scale=kg_m2_s_conversion) + call data_override(G%Domain, 'rain', fluxes%lprec, day, scale=kg_m2_s_conversion) + call data_override(G%Domain, 'runoff', fluxes%lrunoff, day, scale=kg_m2_s_conversion) + call data_override(G%Domain, 'calving', fluxes%frunoff, day, scale=kg_m2_s_conversion) ! Read the SST and SSS fields for damping. if (CS%restorebuoy) then !#CTRL# .or. associated(CS%ctrl_forcing_CSp)) then - call data_override('OCN', 'SST_restore', CS%T_restore(:,:), day, & - is_in=is_in, ie_in=ie_in, js_in=js_in, je_in=je_in) - - call data_override('OCN', 'SSS_restore', CS%S_restore(:,:), day, & - is_in=is_in, ie_in=ie_in, js_in=js_in, je_in=je_in) - + call data_override(G%Domain, 'SST_restore', CS%T_restore, day) + call data_override(G%Domain, 'SSS_restore', CS%S_restore, day) endif ! restoring boundary fluxes @@ -1334,7 +1274,6 @@ subroutine buoyancy_forcing_from_data_override(sfc_state, fluxes, day, dt, G, US fluxes%latent_frunoff_diag(i,j) = -fluxes%frunoff(i,j)*CS%latent_heat_fusion enddo ; enddo - !#CTRL# if (associated(CS%ctrl_forcing_CSp)) then !#CTRL# do j=js,je ; do i=is,ie !#CTRL# SST_anom(i,j) = sfc_state%SST(i,j) - CS%T_Restore(i,j) @@ -1348,7 +1287,7 @@ subroutine buoyancy_forcing_from_data_override(sfc_state, fluxes, day, dt, G, US call callTree_leave("buoyancy_forcing_from_data_override") end subroutine buoyancy_forcing_from_data_override -!> This subroutine specifies zero surface bouyancy fluxes +!> This subroutine specifies zero surface buoyancy fluxes subroutine buoyancy_forcing_zero(sfc_state, fluxes, day, dt, G, CS) type(surface), intent(inout) :: sfc_state !< A structure containing fields that !! describe the surface state of the ocean. @@ -1357,7 +1296,7 @@ subroutine buoyancy_forcing_zero(sfc_state, fluxes, day, dt, G, CS) real, intent(in) :: dt !< The amount of time over which !! the fluxes apply [s] type(ocean_grid_type), intent(in) :: G !< The ocean's grid structure - type(surface_forcing_CS), pointer :: CS !< pointer to control struct returned by + type(surface_forcing_CS), pointer :: CS !< pointer to control structure returned by !! a previous surface_forcing_init call ! Local variables integer :: i, j, is, ie, js, je @@ -1401,7 +1340,7 @@ subroutine buoyancy_forcing_const(sfc_state, fluxes, day, dt, G, US, CS) !! the fluxes apply [s] type(ocean_grid_type), intent(in) :: G !< The ocean's grid structure type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type - type(surface_forcing_CS), pointer :: CS !< pointer to control struct returned by + type(surface_forcing_CS), pointer :: CS !< pointer to control structure returned by !! a previous surface_forcing_init call ! Local variables integer :: i, j, is, ie, js, je @@ -1444,7 +1383,7 @@ subroutine buoyancy_forcing_linear(sfc_state, fluxes, day, dt, G, US, CS) !! the fluxes apply [s] type(ocean_grid_type), intent(in) :: G !< The ocean's grid structure type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type - type(surface_forcing_CS), pointer :: CS !< pointer to control struct returned by + type(surface_forcing_CS), pointer :: CS !< pointer to control structure returned by !! a previous surface_forcing_init call ! Local variables real :: y, T_restore, S_restore @@ -1518,7 +1457,7 @@ end subroutine buoyancy_forcing_linear !> Save a restart file for the forcing fields subroutine forcing_save_restart(CS, G, Time, directory, time_stamped, & filename_suffix) - type(surface_forcing_CS), pointer :: CS !< pointer to control struct returned by + type(surface_forcing_CS), pointer :: CS !< pointer to control structure returned by !! a previous surface_forcing_init call type(ocean_grid_type), intent(inout) :: G !< The ocean's grid structure type(time_type), intent(in) :: Time !< model time at this call; needed for mpp_write calls @@ -1526,7 +1465,7 @@ subroutine forcing_save_restart(CS, G, Time, directory, time_stamped, & logical, optional, intent(in) :: time_stamped !< If true, the restart file names !! include a unique time stamp; the default is false. character(len=*), optional, intent(in) :: filename_suffix !< optional suffix (e.g., a time-stamp) - !! to append to the restart fname + !! to append to the restart file name if (.not.associated(CS)) return if (.not.associated(CS%restart_CSp)) return @@ -1542,7 +1481,7 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, tracer_flow_C type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type type(param_file_type), intent(in) :: param_file !< A structure to parse for run-time parameters type(diag_ctrl), target, intent(inout) :: diag !< structure used to regulate diagnostic output - type(surface_forcing_CS), pointer :: CS !< pointer to control struct returned by + type(surface_forcing_CS), pointer :: CS !< pointer to control structure returned by !! a previous surface_forcing_init call type(tracer_flow_control_CS), pointer :: tracer_flow_CSp !< Forcing for tracers? @@ -1593,9 +1532,9 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, tracer_flow_C "initialization of the model.", default=.true.) call get_param(param_file, mdl, "BUOY_CONFIG", CS%buoy_config, & - "The character string that indicates how buoyancy forcing "//& - "is specified. Valid options include (file), (zero), "//& - "(linear), (USER), (BFB) and (NONE).", default="zero") + "The character string that indicates how buoyancy forcing is specified. Valid "//& + "options include (file), (data_override), (zero), (const), (linear), (MESO), "//& + "(SCM_CVmix_tests), (BFB), (dumbbell), (USER) and (NONE).", default="zero") if (trim(CS%buoy_config) == "file") then call get_param(param_file, mdl, "ARCHAIC_OMIP_FORCING_FILE", CS%archaic_OMIP_file, & "If true, use the forcing variable decomposition from "//& @@ -1735,9 +1674,10 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, tracer_flow_C units='W/m2', scale=US%W_m2_to_QRZ_T, fail_if_missing=.true.) endif call get_param(param_file, mdl, "WIND_CONFIG", CS%wind_config, & - "The character string that indicates how wind forcing "//& - "is specified. Valid options include (file), (2gyre), "//& - "(1gyre), (gyres), (zero), and (USER).", default="zero") + "The character string that indicates how wind forcing is specified. Valid "//& + "options include (file), (data_override), (2gyre), (1gyre), (gyres), (zero), "//& + "(const), (Neverworld), (scurves), (ideal_hurr), (SCM_ideal_hurr), "//& + "(SCM_CVmix_tests) and (USER).", default="zero") if (trim(CS%wind_config) == "file") then call get_param(param_file, mdl, "WIND_FILE", CS%wind_file, & "The file in which the wind stresses are found in "//& @@ -1964,7 +1904,7 @@ end subroutine surface_forcing_init !> Deallocate memory associated with the surface forcing module subroutine surface_forcing_end(CS, fluxes) - type(surface_forcing_CS), pointer :: CS !< pointer to control struct returned by + type(surface_forcing_CS), pointer :: CS !< pointer to control structure returned by !! a previous surface_forcing_init call type(forcing), optional, intent(inout) :: fluxes !< A structure containing thermodynamic forcing fields ! Arguments: CS - A pointer to the control structure returned by a previous diff --git a/config_src/solo_driver/atmos_ocean_fluxes.F90 b/config_src/drivers/solo_driver/atmos_ocean_fluxes.F90 similarity index 100% rename from config_src/solo_driver/atmos_ocean_fluxes.F90 rename to config_src/drivers/solo_driver/atmos_ocean_fluxes.F90 diff --git a/config_src/solo_driver/user_surface_forcing.F90 b/config_src/drivers/solo_driver/user_surface_forcing.F90 similarity index 99% rename from config_src/solo_driver/user_surface_forcing.F90 rename to config_src/drivers/solo_driver/user_surface_forcing.F90 index f5372e07d2..940bcd04b4 100644 --- a/config_src/solo_driver/user_surface_forcing.F90 +++ b/config_src/drivers/solo_driver/user_surface_forcing.F90 @@ -11,7 +11,6 @@ module user_surface_forcing use MOM_forcing_type, only : forcing, mech_forcing use MOM_forcing_type, only : allocate_forcing_type, allocate_mech_forcing use MOM_grid, only : ocean_grid_type -use MOM_io, only : file_exists, read_data use MOM_time_manager, only : time_type, operator(+), operator(/) use MOM_tracer_flow_control, only : call_tracer_set_forcing use MOM_tracer_flow_control, only : tracer_flow_control_CS @@ -89,7 +88,7 @@ subroutine USER_wind_forcing(sfc_state, forces, day, G, US, CS) ! is always positive. if (associated(forces%ustar)) then ; do j=js,je ; do i=is,ie ! This expression can be changed if desired, but need not be. - forces%ustar(i,j) = G%mask2dT(i,j) * sqrt((CS%gust_const + & + forces%ustar(i,j) = G%mask2dT(i,j) * sqrt((CS%gust_const + & sqrt(0.5*(forces%taux(I-1,j)**2 + forces%taux(I,j)**2) + & 0.5*(forces%tauy(i,J-1)**2 + forces%tauy(i,J)**2))) * (US%L_to_Z/CS%Rho0)) enddo ; enddo ; endif diff --git a/config_src/unit_drivers/MOM_sum_driver.F90 b/config_src/drivers/unit_drivers/MOM_sum_driver.F90 similarity index 79% rename from config_src/unit_drivers/MOM_sum_driver.F90 rename to config_src/drivers/unit_drivers/MOM_sum_driver.F90 index 5673b201ee..7291eb913a 100644 --- a/config_src/unit_drivers/MOM_sum_driver.F90 +++ b/config_src/drivers/unit_drivers/MOM_sum_driver.F90 @@ -18,15 +18,14 @@ program MOM_main use MOM_coms, only : EFP_type, operator(+), operator(-), assignment(=), EFP_to_real, real_to_EFP use MOM_cpu_clock, only : cpu_clock_id, cpu_clock_begin, cpu_clock_end use MOM_cpu_clock, only : CLOCK_COMPONENT -! use MOM_diag_mediator, only : diag_mediator_end, diag_mediator_init -! use MOM_diag_mediator, only : diag_mediator_close_registration - use MOM_domains, only : MOM_domains_init, MOM_infra_init, MOM_infra_end + use MOM_domains, only : MOM_domain_type, MOM_domains_init, MOM_infra_init, MOM_infra_end + use MOM_dyn_horgrid, only : dyn_horgrid_type, create_dyn_horgrid, destroy_dyn_horgrid use MOM_error_handler, only : MOM_error, MOM_mesg, WARNING, FATAL, is_root_pe use MOM_error_handler, only : MOM_set_verbosity use MOM_file_parser, only : read_param, get_param, log_param, log_version, param_file_type use MOM_file_parser, only : open_param_file, close_param_file - use MOM_grid, only : MOM_grid_init, ocean_grid_type use MOM_grid_initialize, only : set_grid_metrics + use MOM_hor_index, only : hor_index_type, hor_index_init use MOM_io, only : MOM_io_init, file_exists, open_file, close_file use MOM_io, only : check_nml_error, io_infra_init, io_infra_end use MOM_io, only : APPEND_FILE, ASCII_FILE, READONLY_FILE, SINGLE_FILE @@ -35,15 +34,15 @@ program MOM_main #include - type(ocean_grid_type) :: grid ! A structure containing metrics and grid info. - - type(param_file_type) :: param_file ! The structure indicating the file(s) + type(MOM_domain_type), pointer :: Domain => NULL() !< Ocean model domain + type(dyn_horgrid_type), pointer :: grid => NULL() ! A structure containing metrics and grid info + type(hor_index_type) :: HI ! A hor_index_type for array extents + type(param_file_type) :: param_file ! The structure indicating the file(s) ! containing all run-time parameters. - real :: max_depth + real :: max_depth ! The maximum ocean depth [m] integer :: verbosity integer :: num_sums - integer :: n, i, j, is, ie, js, je, nz - integer :: isd, ied, jsd, jed, IsdB, IedB, JsdB, JedB + integer :: n, i, j, is, ie, js, je, isd, ied, jsd, jed integer :: unit, io_status, ierr logical :: unit_in_use @@ -55,8 +54,8 @@ program MOM_main !----------------------------------------------------------------------- character(len=4), parameter :: vers_num = 'v2.0' -! This include declares and sets the variable "version". -#include "version_variable.h" + ! This include declares and sets the variable "version". +# include "version_variable.h" character(len=40) :: mdl = "MOM_main (MOM_sum_driver)" ! This module's name. character(len=200) :: mesg @@ -79,15 +78,16 @@ program MOM_main verbosity = 2 ; call read_param(param_file, "VERBOSITY", verbosity) call MOM_set_verbosity(verbosity) - call MOM_domains_init(grid%domain, param_file) + call MOM_domains_init(Domain, param_file) call MOM_io_init(param_file) ! call diag_mediator_init(param_file) - call MOM_grid_init(grid, param_file) + call hor_index_init(Domain, HI, param_file) + call create_dyn_horgrid(grid, HI) + grid%Domain => Domain - is = grid%isc ; ie = grid%iec ; js = grid%jsc ; je = grid%jec ; nz = grid%ke - isd = grid%isd ; ied = grid%ied ; jsd = grid%jsd ; jed = grid%jed - IsdB = grid%IsdB ; IedB = grid%IedB ; JsdB = grid%JsdB ; JedB = grid%JedB + is = HI%isc ; ie = HI%iec ; js = HI%jsc ; je = HI%jec + isd = HI%isd ; ied = HI%ied ; jsd = HI%jsd ; jed = HI%jed ! Read all relevant parameters and write them to the model log. call log_version(param_file, "MOM", version, "") @@ -103,7 +103,7 @@ program MOM_main allocate(depth_tot_std(num_sums)) ; depth_tot_std(:) = 0.0 allocate(depth_tot_fastR(num_sums)) ; depth_tot_fastR(:) = 0.0 -! Set up the parameters of the physical domain (i.e. the grid), G +! Set up the parameters of the physical grid call set_grid_metrics(grid, param_file) ! Set up the bottom depth, grid%bathyT either analytically or from file @@ -161,37 +161,40 @@ program MOM_main endif enddo + call destroy_dyn_horgrid(grid) call io_infra_end ; call MOM_infra_end contains +!> This subroutine sets up the benchmark test case topography for debugging subroutine benchmark_init_topog_local(D, G, param_file, max_depth) - type(ocean_grid_type), intent(in) :: G !< The ocean's grid structure - real, dimension(SZI_(G),SZJ_(G)), intent(out) :: D !< The ocean bottom depth in m + type(dyn_horgrid_type), intent(in) :: G !< The dynamic horizontal grid type + real, dimension(G%isd:G%ied,G%jsd:G%jed), & + intent(out) :: D !< Ocean bottom depth in m or [Z ~> m] if US is present type(param_file_type), intent(in) :: param_file !< A structure to parse for run-time parameters - real, intent(in) :: max_depth !< The maximum ocean depth in m + real, intent(in) :: max_depth !< The maximum ocean depth [m] -! This subroutine sets up the benchmark test case topography real :: min_depth ! The minimum ocean depth in m. real :: PI ! 3.1415926... calculated as 4*atan(1) real :: D0 ! A constant to make the maximum ! ! basin depth MAXIMUM_DEPTH. ! + real :: m_to_Z ! A dimensional rescaling factor. real :: x, y -! This include declares and sets the variable "version". -#include "version_variable.h" - character(len=40) :: mdl = "benchmark_initialize_topography" ! This subroutine's name. + ! This include declares and sets the variable "version". +# include "version_variable.h" + character(len=40) :: mdl = "benchmark_init_topog_local" ! This subroutine's name. integer :: i, j, is, ie, js, je, isd, ied, jsd, jed is = G%isc ; ie = G%iec ; js = G%jsc ; je = G%jec isd = G%isd ; ied = G%ied ; jsd = G%jsd ; jed = G%jed - call MOM_mesg(" benchmark_initialization.F90, benchmark_initialize_topography: setting topography", 5) + m_to_Z = 1.0 ! ; if (present(US)) m_to_Z = US%m_to_Z - call log_version(param_file, mdl, version) + call log_version(param_file, mdl, version, "") call get_param(param_file, mdl, "MINIMUM_DEPTH", min_depth, & - "The minimum depth of the ocean.", units="m", default=0.0) + "The minimum depth of the ocean.", units="m", default=0.0, scale=m_to_Z) PI = 4.0*atan(1.0) - D0 = max_depth / 0.5; + D0 = max_depth / 0.5 ! Calculate the depth of the bottom. do i=is,ie ; do j=js,je diff --git a/config_src/external/GFDL_ocean_BGC/FMS_coupler_util.F90 b/config_src/external/GFDL_ocean_BGC/FMS_coupler_util.F90 index f3d63dd061..e50f2ccf0b 100644 --- a/config_src/external/GFDL_ocean_BGC/FMS_coupler_util.F90 +++ b/config_src/external/GFDL_ocean_BGC/FMS_coupler_util.F90 @@ -12,11 +12,15 @@ module FMS_coupler_util subroutine extract_coupler_values(BC_struc, BC_index, BC_element, array_out, ilb, jlb, & is, ie, js, je, conversion) real, dimension(ilb:,jlb:),intent(out) :: array_out !< The array being filled with the input values - integer, intent(in) :: ilb, jlb !< Lower bounds + integer, intent(in) :: ilb !< Lower bounds + integer, intent(in) :: jlb !< Lower bounds type(coupler_2d_bc_type), intent(in) :: BC_struc !< The type from which the data is being extracted integer, intent(in) :: BC_index !< The boundary condition number being extracted integer, intent(in) :: BC_element !< The element of the boundary condition being extracted - integer, optional, intent(in) :: is, ie, js, je !< The i- and j- limits of array_out to be filled + integer, optional, intent(in) :: is !< The i- limits of array_out to be filled + integer, optional, intent(in) :: ie !< The i- limits of array_out to be filled + integer, optional, intent(in) :: js !< The j- limits of array_out to be filled + integer, optional, intent(in) :: je !< The j- limits of array_out to be filled real, optional, intent(in) :: conversion !< A number that every element is multiplied by end subroutine extract_coupler_values @@ -24,11 +28,15 @@ end subroutine extract_coupler_values subroutine set_coupler_values(array_in, BC_struc, BC_index, BC_element, ilb, jlb,& is, ie, js, je, conversion) real, dimension(ilb:,jlb:), intent(in) :: array_in !< The array containing the values to load into the BC - integer, intent(in) :: ilb, jlb !< Lower bounds + integer, intent(in) :: ilb !< Lower bounds + integer, intent(in) :: jlb !< Lower bounds type(coupler_2d_bc_type), intent(inout) :: BC_struc !< The type into which the data is being loaded integer, intent(in) :: BC_index !< The boundary condition number being set integer, intent(in) :: BC_element !< The element of the boundary condition being set - integer, optional, intent(in) :: is, ie, js, je !< The i- and j- limits of array_out to be filled + integer, optional, intent(in) :: is !< The i- limits of array_out to be filled + integer, optional, intent(in) :: ie !< The i- limits of array_out to be filled + integer, optional, intent(in) :: js !< The j- limits of array_out to be filled + integer, optional, intent(in) :: je !< The j- limits of array_out to be filled real, optional, intent(in) :: conversion !< A number that every element is multiplied by end subroutine set_coupler_values diff --git a/config_src/external/GFDL_ocean_BGC/generic_tracer.F90 b/config_src/external/GFDL_ocean_BGC/generic_tracer.F90 index bfbc846af9..4d2e4183f7 100644 --- a/config_src/external/GFDL_ocean_BGC/generic_tracer.F90 +++ b/config_src/external/GFDL_ocean_BGC/generic_tracer.F90 @@ -33,7 +33,17 @@ end subroutine generic_tracer_register !> Initialize generic tracers subroutine generic_tracer_init(isc,iec,jsc,jec,isd,ied,jsd,jed,nk,ntau,axes,grid_tmask,grid_kmt,init_time) - integer, intent(in) :: isc,iec,jsc,jec,isd,ied,jsd,jed,nk,ntau,axes(3) !< Domain boundaries and axes + integer, intent(in) :: isc !< Computation start index in i direction + integer, intent(in) :: iec !< Computation end index in i direction + integer, intent(in) :: jsc !< Computation start index in j direction + integer, intent(in) :: jec !< Computation end index in j direction + integer, intent(in) :: isd !< Data start index in i direction + integer, intent(in) :: ied !< Data end index in i direction + integer, intent(in) :: jsd !< Data start index in j direction + integer, intent(in) :: jed !< Data end index in j direction + integer, intent(in) :: nk !< Number of levels in k direction + integer, intent(in) :: ntau !< Unknown + integer, intent(in) :: axes(3) !< Domain axes? type(time_type), intent(in) :: init_time !< Time real, dimension(:,:,:),target, intent(in) :: grid_tmask !< Mask integer, dimension(:,:) , intent(in) :: grid_kmt !< Number of wet cells in column @@ -61,7 +71,7 @@ subroutine generic_tracer_source(Temp,Salt,rho_dzt,dzt,hblt_depth,ilb,jlb,tau,dt frunoff,grid_ht, current_wave_stress, sosga) real, dimension(ilb:,jlb:,:), intent(in) :: Temp !< Potential temperature [deg C] real, dimension(ilb:,jlb:,:), intent(in) :: Salt !< Salinity [psu] - real, dimension(ilb:,jlb:,:), intent(in) :: rho_dzt + real, dimension(ilb:,jlb:,:), intent(in) :: rho_dzt !< Unknown real, dimension(ilb:,jlb:,:), intent(in) :: dzt !< Ocean layer thickness [m] real, dimension(ilb:,jlb:), intent(in) :: hblt_depth !< Boundary layer depth integer, intent(in) :: ilb !< Lower bounds of x extent of input arrays on data domain @@ -71,14 +81,14 @@ subroutine generic_tracer_source(Temp,Salt,rho_dzt,dzt,hblt_depth,ilb,jlb,tau,dt real, dimension(ilb:,jlb:), intent(in) :: grid_dat !< Unknown type(time_type), intent(in) :: model_time !< Time integer, intent(in) :: nbands !< Unknown - real, dimension(:), intent(in) :: max_wavelength_band + real, dimension(:), intent(in) :: max_wavelength_band !< Unknown real, dimension(:,ilb:,jlb:), intent(in) :: sw_pen_band !< Shortwave penetration real, dimension(:,ilb:,jlb:,:), intent(in) :: opacity_band !< Unknown real, dimension(ilb:,jlb:),optional, intent(in) :: internal_heat !< Unknown real, dimension(ilb:,jlb:),optional, intent(in) :: frunoff !< Unknown real, dimension(ilb:,jlb:),optional, intent(in) :: grid_ht !< Unknown real, dimension(ilb:,jlb:),optional , intent(in) :: current_wave_stress !< Unknown - real, optional , intent(in) :: sosga ! global avg. sea surface salinity + real, optional , intent(in) :: sosga !< Global average sea surface salinity end subroutine generic_tracer_source !> Update the tracers from bottom fluxes diff --git a/config_src/external/GFDL_ocean_BGC/generic_tracer_utils.F90 b/config_src/external/GFDL_ocean_BGC/generic_tracer_utils.F90 index 6937ef4710..de513a7f11 100644 --- a/config_src/external/GFDL_ocean_BGC/generic_tracer_utils.F90 +++ b/config_src/external/GFDL_ocean_BGC/generic_tracer_utils.F90 @@ -21,8 +21,10 @@ module g_tracer_utils !> Tracer concentration in river runoff real, allocatable, dimension(:,:) :: trunoff logical :: requires_restart = .true. !< Unknown - !> Tracer source: filename, type, var name, units, record, gridfile - character(len=fm_string_len) :: src_file, src_var_name, src_var_unit, src_var_gridspec + character(len=fm_string_len) :: src_file !< Tracer source filename + character(len=fm_string_len) :: src_var_name !< Tracer source variable name + character(len=fm_string_len) :: src_var_unit !< Tracer source variable units + character(len=fm_string_len) :: src_var_gridspec !< Tracer source grid file name integer :: src_var_record !< Unknown logical :: requires_src_info = .false. !< Unknown real :: src_var_unit_conversion = 1.0 !< This factor depends on the tracer. Ask Jasmin @@ -38,7 +40,8 @@ module g_tracer_utils type g_tracer_common ! type(g_diag_ctrl) :: diag_CS !< Unknown !> Domain extents - integer :: isd,jsd + integer :: isd !< Start index of the data domain in the i-direction + integer :: jsd !< Start index of the data domain in the j-direction end type g_tracer_common !> Unknown dangerous module data! @@ -102,7 +105,17 @@ subroutine g_tracer_set_csdiag(diag_CS) end subroutine g_tracer_set_csdiag subroutine g_tracer_set_common(isc,iec,jsc,jec,isd,ied,jsd,jed,nk,ntau,axes,grid_tmask,grid_kmt,init_time) - integer, intent(in) :: isc,iec,jsc,jec,isd,ied,jsd,jed,nk,ntau,axes(3) !< Unknown + integer, intent(in) :: isc !< Computation start index in i direction + integer, intent(in) :: iec !< Computation end index in i direction + integer, intent(in) :: jsc !< Computation start index in j direction + integer, intent(in) :: jec !< Computation end index in j direction + integer, intent(in) :: isd !< Data start index in i direction + integer, intent(in) :: ied !< Data end index in i direction + integer, intent(in) :: jsd !< Data start index in j direction + integer, intent(in) :: jed !< Data end index in j direction + integer, intent(in) :: nk !< Number of levels in k direction + integer, intent(in) :: ntau !< Unknown + integer, intent(in) :: axes(3) !< Domain axes? real, dimension(isd:,jsd:,:),intent(in) :: grid_tmask !< Unknown integer,dimension(isd:,jsd:),intent(in) :: grid_kmt !< Unknown type(time_type), intent(in) :: init_time !< Unknown @@ -110,10 +123,19 @@ end subroutine g_tracer_set_common subroutine g_tracer_get_common(isc,iec,jsc,jec,isd,ied,jsd,jed,nk,ntau,& axes,grid_tmask,grid_mask_coast,grid_kmt,init_time,diag_CS) - integer, intent(out) :: isc,iec,jsc,jec,isd,ied,jsd,jed,nk,ntau !< Unknown - integer,optional, intent(out) :: axes(3) !< Unknown + integer, intent(out) :: isc !< Computation start index in i direction + integer, intent(out) :: iec !< Computation end index in i direction + integer, intent(out) :: jsc !< Computation start index in j direction + integer, intent(out) :: jec !< Computation end index in j direction + integer, intent(out) :: isd !< Data start index in i direction + integer, intent(out) :: ied !< Data end index in i direction + integer, intent(out) :: jsd !< Data start index in j direction + integer, intent(out) :: jed !< Data end index in j direction + integer, intent(out) :: nk !< Number of levels in k direction + integer, intent(out) :: ntau !< Unknown + integer, optional, intent(out) :: axes(3) !< Unknown type(time_type), optional, intent(out) :: init_time !< Unknown - real, optional, dimension(:,:,:),pointer :: grid_tmask !< Unknown + real, optional, dimension(:,:,:), pointer :: grid_tmask !< Unknown integer, optional, dimension(:,:), pointer :: grid_mask_coast !< Unknown integer, optional, dimension(:,:), pointer :: grid_kmt !< Unknown type(g_diag_ctrl), optional, pointer :: diag_CS !< Unknown @@ -123,32 +145,33 @@ end subroutine g_tracer_get_common subroutine g_tracer_get_4D(g_tracer_list,name,member,array_ptr) character(len=*), intent(in) :: name !< Unknown character(len=*), intent(in) :: member !< Unknown - type(g_tracer_type), pointer :: g_tracer_list, g_tracer !< Unknown - real, dimension(:,:,:,:), pointer :: array_ptr + type(g_tracer_type), pointer :: g_tracer_list !< Unknown + real, dimension(:,:,:,:), pointer :: array_ptr !< Unknown end subroutine g_tracer_get_4D !> Unknown subroutine g_tracer_get_3D(g_tracer_list,name,member,array_ptr) character(len=*), intent(in) :: name !< Unknown character(len=*), intent(in) :: member !< Unknown - type(g_tracer_type), pointer :: g_tracer_list, g_tracer !< Unknown - real, dimension(:,:,:), pointer :: array_ptr !< Unknown + type(g_tracer_type), pointer :: g_tracer_list !< Unknown + real, dimension(:,:,:), pointer :: array_ptr !< Unknown end subroutine g_tracer_get_3D !> Unknown subroutine g_tracer_get_2D(g_tracer_list,name,member,array_ptr) character(len=*), intent(in) :: name !< Unknown character(len=*), intent(in) :: member !< Unknown - type(g_tracer_type), pointer :: g_tracer_list, g_tracer !< Unknown - real, dimension(:,:), pointer :: array_ptr !< Unknown + type(g_tracer_type), pointer :: g_tracer_list !< Unknown + real, dimension(:,:), pointer :: array_ptr !< Unknown end subroutine g_tracer_get_2D !> Unknown subroutine g_tracer_get_4D_val(g_tracer_list,name,member,array,isd,jsd) character(len=*), intent(in) :: name !< Unknown character(len=*), intent(in) :: member !< Unknown - type(g_tracer_type), pointer :: g_tracer_list, g_tracer !< Unknown - integer, intent(in) :: isd,jsd !< Unknown + type(g_tracer_type), pointer :: g_tracer_list !< Unknown + integer, intent(in) :: isd !< Unknown + integer, intent(in) :: jsd !< Unknown real, dimension(isd:,jsd:,:,:), intent(out):: array !< Unknown end subroutine g_tracer_get_4D_val @@ -156,8 +179,9 @@ end subroutine g_tracer_get_4D_val subroutine g_tracer_get_3D_val(g_tracer_list,name,member,array,isd,jsd,ntau,positive) character(len=*), intent(in) :: name !< Unknown character(len=*), intent(in) :: member !< Unknown - type(g_tracer_type), pointer :: g_tracer_list, g_tracer !< Unknown - integer, intent(in) :: isd,jsd !< Unknown + type(g_tracer_type), pointer :: g_tracer_list !< Unknown + integer, intent(in) :: isd !< Unknown + integer, intent(in) :: jsd !< Unknown integer, optional, intent(in) :: ntau !< Unknown logical, optional, intent(in) :: positive !< Unknown real, dimension(isd:,jsd:,:), intent(out):: array !< Unknown @@ -169,8 +193,9 @@ end subroutine g_tracer_get_3D_val subroutine g_tracer_get_2D_val(g_tracer_list,name,member,array,isd,jsd) character(len=*), intent(in) :: name !< Unknown character(len=*), intent(in) :: member !< Unknown - type(g_tracer_type), pointer :: g_tracer_list, g_tracer !< Unknown - integer, intent(in) :: isd,jsd !< Unknown + type(g_tracer_type), pointer :: g_tracer_list !< Unknown + integer, intent(in) :: isd !< Unknown + integer, intent(in) :: jsd !< Unknown real, dimension(isd:,jsd:), intent(out):: array !< Unknown end subroutine g_tracer_get_2D_val @@ -178,15 +203,15 @@ end subroutine g_tracer_get_2D_val subroutine g_tracer_get_real(g_tracer_list,name,member,value) character(len=*), intent(in) :: name !< Unknown character(len=*), intent(in) :: member !< Unknown - type(g_tracer_type), pointer :: g_tracer_list, g_tracer !< Unknown - real, intent(out):: value + type(g_tracer_type), pointer :: g_tracer_list !< Unknown + real, intent(out):: value !< Unknown end subroutine g_tracer_get_real !> Unknown subroutine g_tracer_get_string(g_tracer_list,name,member,string) character(len=*), intent(in) :: name !< Unknown character(len=*), intent(in) :: member !< Unknown - type(g_tracer_type), pointer :: g_tracer_list, g_tracer !< Unknown + type(g_tracer_type), pointer :: g_tracer_list !< Unknown character(len=fm_string_len), intent(out) :: string !< Unknown end subroutine g_tracer_get_string @@ -194,8 +219,9 @@ end subroutine g_tracer_get_string subroutine g_tracer_set_2D(g_tracer_list,name,member,array,isd,jsd,weight) character(len=*), intent(in) :: name !< Unknown character(len=*), intent(in) :: member !< Unknown - type(g_tracer_type), pointer :: g_tracer_list, g_tracer !< Unknown - integer, intent(in) :: isd,jsd !< Unknown + type(g_tracer_type), pointer :: g_tracer_list !< Unknown + integer, intent(in) :: isd !< Unknown + integer, intent(in) :: jsd !< Unknown real, dimension(isd:,jsd:),intent(in) :: array !< Unknown real, optional ,intent(in) :: weight !< Unknown end subroutine g_tracer_set_2D @@ -204,8 +230,9 @@ end subroutine g_tracer_set_2D subroutine g_tracer_set_3D(g_tracer_list,name,member,array,isd,jsd,ntau) character(len=*), intent(in) :: name !< Unknown character(len=*), intent(in) :: member !< Unknown - type(g_tracer_type), pointer :: g_tracer_list, g_tracer !< Unknown - integer, intent(in) :: isd,jsd !< Unknown + type(g_tracer_type), pointer :: g_tracer_list !< Unknown + integer, intent(in) :: isd !< Unknown + integer, intent(in) :: jsd !< Unknown integer, optional, intent(in) :: ntau !< Unknown real, dimension(isd:,jsd:,:), intent(in) :: array !< Unknown end subroutine g_tracer_set_3D @@ -214,16 +241,17 @@ end subroutine g_tracer_set_3D subroutine g_tracer_set_4D(g_tracer_list,name,member,array,isd,jsd) character(len=*), intent(in) :: name !< Unknown character(len=*), intent(in) :: member !< Unknown - type(g_tracer_type), pointer :: g_tracer_list, g_tracer !< Unknown - integer, intent(in) :: isd,jsd !< Unknown - real, dimension(isd:,jsd:,:,:), intent(in) :: array !< Unknown + type(g_tracer_type), pointer :: g_tracer_list !< Unknown + integer, intent(in) :: isd !< Unknown + integer, intent(in) :: jsd !< Unknown + real, dimension(isd:,jsd:,:,:), intent(in) :: array !< Unknown end subroutine g_tracer_set_4D !> Unknown subroutine g_tracer_set_real(g_tracer_list,name,member,value) character(len=*), intent(in) :: name !< Unknown character(len=*), intent(in) :: member !< Unknown - type(g_tracer_type), pointer :: g_tracer_list, g_tracer !< Unknown + type(g_tracer_type), pointer :: g_tracer_list !< Unknown real, intent(in) :: value !< Unknown end subroutine g_tracer_set_real @@ -265,7 +293,7 @@ end subroutine g_tracer_get_next !! Since the surface flux from the atmosphere (%stf) has the units of mol/m^2/sec the resulting !! tracer concentration has units of mol/Kg subroutine g_tracer_vertdiff_G(g_tracer, h_old, ea, eb, dt, kg_m2_to_H, m_to_H, tau, mom) - type(g_tracer_type), pointer :: g_tracer + type(g_tracer_type), pointer :: g_tracer !< Unknown !> Layer thickness before entrainment, in m or kg m-2. real, dimension(g_tracer_com%isd:,g_tracer_com%jsd:,:), intent(in) :: h_old !> The amount of fluid entrained from the layer above, in H. @@ -278,7 +306,7 @@ subroutine g_tracer_vertdiff_G(g_tracer, h_old, ea, eb, dt, kg_m2_to_H, m_to_H, real, intent(in) :: m_to_H !< A conversion factor that translates m into the units !! of h_old (H). integer, intent(in) :: tau !< Unknown - logical, intent(in), optional :: mom + logical, intent(in), optional :: mom !< Unknown end subroutine g_tracer_vertdiff_G end module g_tracer_utils diff --git a/config_src/external/ODA_hooks/ocean_da_types.F90 b/config_src/external/ODA_hooks/ocean_da_types.F90 index bc5af1d782..e71c76a048 100644 --- a/config_src/external/ODA_hooks/ocean_da_types.F90 +++ b/config_src/external/ODA_hooks/ocean_da_types.F90 @@ -7,10 +7,9 @@ module ocean_da_types_mod private - !> Example type for ocean ensemble DA state type, public :: OCEAN_CONTROL_STRUCT - integer :: ensemble_size + integer :: ensemble_size !< ensemble size real, pointer, dimension(:,:,:) :: SSH=>NULL() !NULL() !NULL() !NULL() !< all profiles are stored as linked list. - type(ocean_profile_type), pointer :: prev=>NULL() - type(ocean_profile_type), pointer :: cnext=>NULL() ! current profiles are stored as linked list. - type(ocean_profile_type), pointer :: cprev=>NULL() - integer :: nbr_xi, nbr_yi ! nearest neighbor model gridpoint for the profile - real :: nbr_dist ! distance to nearest neighbor model gridpoint + type(ocean_profile_type), pointer :: prev=>NULL() !< previous + type(ocean_profile_type), pointer :: cnext=>NULL() !< current profiles are stored as linked list. + type(ocean_profile_type), pointer :: cprev=>NULL() !< previous + integer :: nbr_xi !< x nearest neighbor model gridpoint for the profile + integer :: nbr_yi !< y nearest neighbor model gridpoint for the profile + real :: nbr_dist !< distance to nearest neighbor model gridpoint logical :: compute !< profile is within current compute domain real, dimension(:,:), pointer :: depth => NULL() !< depth of measurement [m] real, dimension(:,:), pointer :: data => NULL() !< data by variable type @@ -54,32 +55,36 @@ module ocean_da_types_mod real, dimension(:,:,:), pointer :: analysis => NULL() !< ensemble member analysis type(forward_operator_type), pointer :: obs_def => NULL() !< observation forward operator type(time_type) :: time !< profile time type - real :: i_index, j_index !< model longitude and latitude indices respectively + real :: i_index !< model longitude indices respectively + real :: j_index !< model latitude indices respectively real, dimension(:,:), pointer :: k_index !< model depth indices type(time_type) :: tdiff !< difference between model time and observation time - character(len=128) :: filename + character(len=128) :: filename !< a filename end type ocean_profile_type !> Example forward operator type. type, public :: forward_operator_type - integer :: num + integer :: num !< how many? integer, dimension(2) :: state_size !< for integer, dimension(:), pointer :: state_var_index !< for flattened data integer, dimension(:), pointer :: i_index !< i-dimension index integer, dimension(:), pointer :: j_index !< j-dimension index - real, dimension(:), pointer :: coef + real, dimension(:), pointer :: coef !< coefficient end type forward_operator_type !> Grid type for DA type, public :: grid_type - real, pointer, dimension(:,:) :: x=>NULL(), y=>NULL() - real, pointer, dimension(:,:,:) :: z=>NULL() - real, pointer, dimension(:,:,:) :: h=>NULL() - real, pointer, dimension(:,:) :: basin_mask => NULL() - real, pointer, dimension(:,:,:) :: mask => NULL() - real, pointer, dimension(:,:) :: bathyT => NULL() - logical :: tripolar_N - integer :: ni, nj, nk + real, pointer, dimension(:,:) :: x=>NULL() !< x + real, pointer, dimension(:,:) :: y=>NULL() !< y + real, pointer, dimension(:,:,:) :: z=>NULL() !< z + real, pointer, dimension(:,:,:) :: h=>NULL() !< h + real, pointer, dimension(:,:) :: basin_mask => NULL() !< basin mask + real, pointer, dimension(:,:,:) :: mask => NULL() !< land mask? + real, pointer, dimension(:,:) :: bathyT => NULL() !< bathymetry at T points + logical :: tripolar_N !< True for tripolar grids + integer :: ni !< ni + integer :: nj !< nj + integer :: nk !< nk end type grid_type end module ocean_da_types_mod diff --git a/config_src/external/ODA_hooks/write_ocean_obs.F90 b/config_src/external/ODA_hooks/write_ocean_obs.F90 index a2c41b58d6..da4a404d3d 100644 --- a/config_src/external/ODA_hooks/write_ocean_obs.F90 +++ b/config_src/external/ODA_hooks/write_ocean_obs.F90 @@ -15,13 +15,13 @@ module write_ocean_obs_mod contains !> Open a profile file -integer function open_profile_file(name, nvar, grid_lon, grid_lat,thread,fset) +integer function open_profile_file(name, nvar, grid_lon, grid_lat, thread, fset) character(len=*), intent(in) :: name !< File name integer, intent(in), optional :: nvar !< Number of variables real, dimension(:), optional, intent(in) :: grid_lon !< Longitude [degreeE] real, dimension(:), optional, intent(in) :: grid_lat !< Latitude [degreeN] - integer, intent(in), optional :: thread !< Thread - integer, intent(in), optional :: fset !< File set + integer, optional, intent(in) :: thread !< Thread number + integer, optional, intent(in) :: fset !< File set open_profile_file=-1 end function open_profile_file @@ -29,7 +29,7 @@ end function open_profile_file !> Write a profile subroutine write_profile(unit,profile) integer, intent(in) :: unit !< File unit - type(ocean_profile_type), intent(in) :: profile !< Profile + type(ocean_profile_type), intent(in) :: profile !< Profile to write return end subroutine write_profile diff --git a/config_src/external/drifters/MOM_particles.F90 b/config_src/external/drifters/MOM_particles.F90 new file mode 100644 index 0000000000..135f5d284c --- /dev/null +++ b/config_src/external/drifters/MOM_particles.F90 @@ -0,0 +1,59 @@ +!> A set of dummy interfaces for compiling the MOM6 drifters code +module MOM_particles_mod + +use MOM_grid, only : ocean_grid_type +use MOM_time_manager, only : time_type, get_date, operator(-) +use MOM_variables, only : thermo_var_ptrs + + +use particles_types_mod, only: particles, particles_gridded + +public particles_run, particles_init, particles_save_restart, particles_end + +contains + +!> Initializes particles container "parts" +subroutine particles_init(parts, Grid, Time, dt, u, v) + ! Arguments + type(particles), pointer, intent(out) :: parts !< Container for all types and memory + type(ocean_grid_type), target, intent(in) :: Grid !< Grid type from parent model + type(time_type), intent(in) :: Time !< Time type from parent model + real, intent(in) :: dt !< particle timestep in seconds + real, dimension(:,:,:),intent(in) :: u !< Zonal velocity field + real, dimension(:,:,:),intent(in) :: v !< Meridional velocity field + +end subroutine particles_init + +!> The main driver the steps updates particles +subroutine particles_run(parts, time, uo, vo, ho, tv, stagger) + ! Arguments + type(particles), pointer :: parts !< Container for all types and memory + type(time_type), intent(in) :: time !< Model time + real, dimension(:,:,:),intent(in) :: uo !< Ocean zonal velocity (m/s) + real, dimension(:,:,:),intent(in) :: vo !< Ocean meridional velocity (m/s) + real, dimension(:,:,:),intent(in) :: ho !< Ocean layer thickness [H ~> m or kg m-2] + type(thermo_var_ptrs), intent(in) :: tv !< structure containing pointers to available thermodynamic fields + integer, optional, intent(in) :: stagger !< Flag for whether velocities are staggered + +end subroutine particles_run + + +!>Save particle locations (and sometimes other vars) to restart file +subroutine particles_save_restart(parts,temp,salt) +! Arguments +type(particles), pointer :: parts !< Container for all types and memory +real,dimension(:,:,:),optional,intent(in) :: temp !< Optional container for temperature +real,dimension(:,:,:),optional,intent(in) :: salt !< Optional container for salinity + +end subroutine particles_save_restart + +!> Deallocate all memory and disassociated pointer +subroutine particles_end(parts,temp,salt) +! Arguments +type(particles), pointer :: parts !< Container for all types and memory +real,dimension(:,:,:),optional,intent(in) :: temp !< Optional container for temperature +real,dimension(:,:,:),optional,intent(in) :: salt !< Optional container for salinity + +end subroutine particles_end + +end module MOM_particles_mod diff --git a/config_src/external/drifters/MOM_particles_types.F90 b/config_src/external/drifters/MOM_particles_types.F90 new file mode 100644 index 0000000000..b7bc01acb9 --- /dev/null +++ b/config_src/external/drifters/MOM_particles_types.F90 @@ -0,0 +1,161 @@ +!> Dummy data structures and methods for drifters package +module particles_types_mod + +use MOM_grid, only : ocean_grid_type +use mpp_domains_mod, only: domain2D + + +!> Container for gridded fields +type :: particles_gridded + type(domain2D), pointer :: domain !< MPP parallel domain + integer :: halo !< Nominal halo width + integer :: isc !< Start i-index of computational domain + integer :: iec !< End i-index of computational domain + integer :: jsc !< Start j-index of computational domain + integer :: jec !< End j-index of computational domain + integer :: isd !< Start i-index of data domain + integer :: ied !< End i-index of data domain + integer :: jsd !< Start j-index of data domain + integer :: jed !< End j-index of data domain + integer :: isg !< Start i-index of global domain + integer :: ieg !< End i-index of global domain + integer :: jsg !< Start j-index of global domain + integer :: jeg !< End j-index of global domain + integer :: is_offset=0 !< add to i to recover global i-index + integer :: js_offset=0 !< add to j to recover global j-index + integer :: my_pe !< MPI PE index + integer :: pe_N !< MPI PE index of PE to the north + integer :: pe_S !< MPI PE index of PE to the south + integer :: pe_E !< MPI PE index of PE to the east + integer :: pe_W !< MPI PE index of PE to the west + logical :: grid_is_latlon !< Flag to say whether the coordinate is in lat-lon degrees, or meters + logical :: grid_is_regular !< Flag to say whether point in cell can be found assuming regular Cartesian grid + real :: Lx !< Length of the domain in x direction + real, dimension(:,:), allocatable :: lon !< Longitude of cell corners (degree E) + real, dimension(:,:), allocatable :: lat !< Latitude of cell corners (degree N) + real, dimension(:,:), allocatable :: lonc !< Longitude of cell centers (degree E) + real, dimension(:,:), allocatable :: latc !< Latitude of cell centers (degree N) + real, dimension(:,:), allocatable :: dx !< Length of cell edge (m) + real, dimension(:,:), allocatable :: dy !< Length of cell edge (m) + real, dimension(:,:), allocatable :: area !< Area of cell (m^2) + real, dimension(:,:), allocatable :: msk !< Ocean-land mask (1=ocean) + real, dimension(:,:), allocatable :: cos !< Cosine from rotation matrix to lat-lon coords + real, dimension(:,:), allocatable :: sin !< Sine from rotation matrix to lat-lon coords + real, dimension(:,:), allocatable :: ocean_depth !< Depth of ocean (m) + real, dimension(:,:), allocatable :: uo !< Ocean zonal flow (m/s) + real, dimension(:,:), allocatable :: vo !< Ocean meridional flow (m/s) + real, dimension(:,:), allocatable :: tmp !< Temporary work space + real, dimension(:,:), allocatable :: tmpc !< Temporary work space + real, dimension(:,:), allocatable :: parity_x !< X component of vector point from i,j to i+1,j+1 + real, dimension(:,:), allocatable :: parity_y !< Y component of vector point from i,j to i+1,j+1 + ! (For detecting tri-polar fold) + integer, dimension(:,:), allocatable :: particle_counter_grd !< Counts particles created for naming purposes + !>@{ + !! Diagnostic handle + integer :: id_uo=-1, id_vo=-1, id_unused=-1 + integer :: id_count=-1, id_chksum=-1 + !>@} + +end type particles_gridded + + +!>xyt is a data structure containing particle position and velocity fields. +type :: xyt + real :: lon !< Longitude of particle (degree N or unit of grid coordinate) + real :: lat !< Latitude of particle (degree N or unit of grid coordinate) + real :: day !< Day of this record (days) + real :: lat_old !< Previous latitude + real :: lon_old !< Previous longitude + real :: uvel !< Zonal velocity of particle (m/s) + real :: vvel !< Meridional velocity of particle (m/s) + real :: uvel_old !< Previous zonal velocity component (m/s) + real :: vvel_old !< Previous meridional velocity component (m/s) + integer :: year !< Year of this record + integer :: particle_num !< Current particle number + integer(kind=8) :: id = -1 !< Particle Identifier + type(xyt), pointer :: next=>null() !< Pointer to the next position in the list +end type xyt + +!>particle types are data structures describing a tracked particle +type :: particle + type(particle), pointer :: prev=>null() !< Previous link in list + type(particle), pointer :: next=>null() !< Next link in list +! State variables (specific to the particles, needed for restarts) + real :: lon !< Longitude of particle (degree N or unit of grid coordinate) + real :: lat !< Latitude of particle (degree E or unit of grid coordinate) + real :: depth !< Depth of particle + real :: uvel !< Zonal velocity of particle (m/s) + real :: vvel !< Meridional velocity of particle (m/s) + real :: lon_old !< previous lon (degrees) + real :: lat_old !< previous lat (degrees) + real :: uvel_old !< previous uvel + real :: vvel_old !< previous vvel + real :: start_lon !< starting longitude where particle was created + real :: start_lat !< starting latitude where particle was created + real :: start_day !< origination position (degrees) and day + integer :: start_year !< origination year + real :: halo_part !< equal to zero for particles on the computational domain, and 1 for particles on the halo + integer(kind=8) :: id !< particle identifier + integer(kind=8) :: drifter_num !< particle identifier + integer :: ine !< nearest i-index in NE direction (for convenience) + integer :: jne !< nearest j-index in NE direction (for convenience) + real :: xi !< non-dimensional x-coordinate within current cell (0..1) + real :: yj !< non-dimensional y-coordinate within current cell (0..1) + real :: uo !< zonal ocean velocity + real :: vo !< meridional ocean velocity + !< by the particle (m/s) + type(xyt), pointer :: trajectory=>null() !< Trajectory for this particle +end type particle + + +!>A buffer structure for message passing +type :: buffer + integer :: size=0 !< Size of buffer + real, dimension(:,:), pointer :: data !< Buffer memory +end type buffer + +!> A wrapper for the particle linked list (since an array of pointers is not allowed) +type :: linked_list + type(particle), pointer :: first=>null() !< Pointer to the beginning of a linked list of parts +end type linked_list + + +!> A grand data structure for the particles in the local MOM domain +type :: particles !; private + type(particles_gridded) :: grd !< Container with all gridded data + type(linked_list), dimension(:,:), allocatable :: list !< Linked list of particles + type(xyt), pointer :: trajectories=>null() !< A linked list for detached segments of trajectories + real :: dt !< Time-step between particle calls + integer :: current_year !< Current year (years) + real :: current_yearday !< Current year-day, 1.00-365.99, (days) + integer :: traj_sample_hrs !< Period between sampling for trajectories (hours) + integer :: traj_write_hrs !< Period between writing of trajectories (hours) + integer :: verbose_hrs !< Period between terminal status reports (hours) + !>@{ + !! Handles for clocks + integer :: clock, clock_mom, clock_the, clock_int, clock_cal, clock_com, clock_ini, clock_ior, clock_iow, clock_dia + integer :: clock_trw, clock_trp + !>@} + logical :: restarted=.false. !< Indicate whether we read state from a restart or not + logical :: Runge_not_Verlet=.True. !< True=Runge-Kutta, False=Verlet. + logical :: ignore_missing_restart_parts=.False. !< True allows the model to ignore particles missing in the restart. + logical :: halo_debugging=.False. !< Use for debugging halos (remove when its working) + logical :: save_short_traj=.false. !< True saves only lon,lat,time,id in particle_trajectory.nc + logical :: ignore_traj=.False. !< If true, then model does not write trajectory data at all + logical :: use_new_predictive_corrective =.False. !< Flag to use Bob's predictive corrective particle scheme + !Added by Alon + integer(kind=8) :: debug_particle_with_id = -1 !< If positive, monitors a part with this id + type(buffer), pointer :: obuffer_n=>null() !< Buffer for outgoing parts to the north + type(buffer), pointer :: ibuffer_n=>null() !< Buffer for incoming parts from the north + type(buffer), pointer :: obuffer_s=>null() !< Buffer for outgoing parts to the south + type(buffer), pointer :: ibuffer_s=>null() !< Buffer for incoming parts from the south + type(buffer), pointer :: obuffer_e=>null() !< Buffer for outgoing parts to the east + type(buffer), pointer :: ibuffer_e=>null() !< Buffer for incoming parts from the east + type(buffer), pointer :: obuffer_w=>null() !< Buffer for outgoing parts to the west + type(buffer), pointer :: ibuffer_w=>null() !< Buffer for incoming parts from the west + type(buffer), pointer :: obuffer_io=>null() !< Buffer for outgoing parts during i/o + type(buffer), pointer :: ibuffer_io=>null() !< Buffer for incoming parts during i/o +end type particles + + +end module particles_types_mod diff --git a/config_src/infra/FMS1/MOM_coms_infra.F90 b/config_src/infra/FMS1/MOM_coms_infra.F90 new file mode 100644 index 0000000000..555b4df119 --- /dev/null +++ b/config_src/infra/FMS1/MOM_coms_infra.F90 @@ -0,0 +1,455 @@ +!> Thin interfaces to non-domain-oriented mpp communication subroutines +module MOM_coms_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use iso_fortran_env, only : int32, int64 + +use mpp_mod, only : mpp_pe, mpp_root_pe, mpp_npes, mpp_set_root_pe +use mpp_mod, only : mpp_set_current_pelist, mpp_get_current_pelist +use mpp_mod, only : mpp_broadcast, mpp_sync, mpp_sync_self, mpp_chksum +use mpp_mod, only : mpp_sum, mpp_max, mpp_min +use memutils_mod, only : print_memuse_stats +use fms_mod, only : fms_end, fms_init + +implicit none ; private + +public :: PE_here, root_PE, num_PEs, set_rootPE, Set_PElist, Get_PElist +public :: broadcast, sum_across_PEs, min_across_PEs, max_across_PEs +public :: field_chksum, MOM_infra_init, MOM_infra_end + +! This module provides interfaces to the non-domain-oriented communication +! subroutines. + +!> Communicate an array, string or scalar from one PE to others +interface broadcast + module procedure broadcast_char, broadcast_int32_0D, broadcast_int64_0D, broadcast_int1D + module procedure broadcast_real0D, broadcast_real1D, broadcast_real2D +end interface broadcast + +!> Compute a checksum for a field distributed over a PE list. If no PE list is +!! provided, then the current active PE list is used. +interface field_chksum + module procedure field_chksum_real_0d + module procedure field_chksum_real_1d + module procedure field_chksum_real_2d + module procedure field_chksum_real_3d + module procedure field_chksum_real_4d +end interface field_chksum + +!> Find the sum of field across PEs, and update PEs with the sums. +interface sum_across_PEs + module procedure sum_across_PEs_int4_0d + module procedure sum_across_PEs_int4_1d + module procedure sum_across_PEs_int8_0d + module procedure sum_across_PEs_int8_1d + module procedure sum_across_PEs_int8_2d + module procedure sum_across_PEs_real_0d + module procedure sum_across_PEs_real_1d + module procedure sum_across_PEs_real_2d +end interface sum_across_PEs + +!> Find the maximum value of field across PEs, and update PEs with the values. +interface max_across_PEs + module procedure max_across_PEs_int_0d + module procedure max_across_PEs_real_0d + module procedure max_across_PEs_real_1d +end interface max_across_PEs + +!> Find the minimum value of field across PEs, and update PEs with the values. +interface min_across_PEs + module procedure min_across_PEs_int_0d + module procedure min_across_PEs_real_0d + module procedure min_across_PEs_real_1d +end interface min_across_PEs + +contains + +!> Return the ID of the PE for the current process. +function PE_here() result(pe) + integer :: pe !< PE ID of the current process + pe = mpp_pe() +end function PE_here + +!> Return the ID of the root PE for the PE list of the current procss. +function root_PE() result(pe) + integer :: pe !< root PE ID + pe = mpp_root_pe() +end function root_PE + +!> Return the number of PEs for the current PE list. +function num_PEs() result(npes) + integer :: npes !< Number of PEs + npes = mpp_npes() +end function num_PEs + +!> Designate a PE as the root PE +subroutine set_rootPE(pe) + integer, intent(in) :: pe !< ID of the PE to be assigned as root + call mpp_set_root_pe(pe) +end subroutine + +!> Set the current PE list. If no list is provided, then the current PE list +!! is set to the list of all available PEs on the communicator. Setting the +!! list will trigger a rank synchronization unless the `no_sync` flag is set. +subroutine Set_PEList(pelist, no_sync) + integer, optional, intent(in) :: pelist(:) !< List of PEs to set for communication + logical, optional, intent(in) :: no_sync !< Do not sync after list update. + call mpp_set_current_pelist(pelist, no_sync) +end subroutine Set_PEList + +!> Retrieve the current PE list and any metadata if requested. +subroutine Get_PEList(pelist, name, commID) + integer, intent(out) :: pelist(:) !< List of PE IDs of the current PE list + character(len=*), optional, intent(out) :: name !< Name of PE list + integer, optional, intent(out) :: commID !< Communicator ID of PE list + + call mpp_get_current_pelist(pelist, name, commiD) +end subroutine Get_PEList + +!> Communicate a 1-D array of character strings from one PE to others +subroutine broadcast_char(dat, length, from_PE, PElist, blocking) + character(len=*), intent(inout) :: dat(:) !< The data to communicate and destination + integer, intent(in) :: length !< The length of each string + integer, optional, intent(in) :: from_PE !< The source PE, by default the root PE + integer, optional, intent(in) :: PElist(:) !< The list of participating PEs, by default the + !! active PE set as previously set via Set_PElist. + logical, optional, intent(in) :: blocking !< If true, barriers are added around the call + + integer :: src_PE ! The processor that is sending the data + logical :: do_block ! If true add synchronizing barriers + + do_block = .false. ; if (present(blocking)) do_block = blocking + if (present(from_PE)) then ; src_PE = from_PE ; else ; src_PE = root_PE() ; endif + + if (do_block) call mpp_sync(PElist) + call mpp_broadcast(dat, length, src_PE, PElist) + if (do_block) call mpp_sync_self(PElist) + +end subroutine broadcast_char + +!> Communicate an integer from one PE to others +subroutine broadcast_int64_0D(dat, from_PE, PElist, blocking) + integer(kind=int64), intent(inout) :: dat !< The data to communicate and destination + integer, optional, intent(in) :: from_PE !< The source PE, by default the root PE + integer, optional, intent(in) :: PElist(:) !< The list of participating PEs, by default the + !! active PE set as previously set via Set_PElist. + logical, optional, intent(in) :: blocking !< If true, barriers are added around the call + + integer :: src_PE ! The processor that is sending the data + logical :: do_block ! If true add synchronizing barriers + + do_block = .false. ; if (present(blocking)) do_block = blocking + if (present(from_PE)) then ; src_PE = from_PE ; else ; src_PE = root_PE() ; endif + + if (do_block) call mpp_sync(PElist) + call mpp_broadcast(dat, src_PE, PElist) + if (do_block) call mpp_sync_self(PElist) + +end subroutine broadcast_int64_0D + + +!> Communicate an integer from one PE to others +subroutine broadcast_int32_0D(dat, from_PE, PElist, blocking) + integer(kind=int32), intent(inout) :: dat !< The data to communicate and destination + integer, optional, intent(in) :: from_PE !< The source PE, by default the root PE + integer, optional, intent(in) :: PElist(:) !< The list of participating PEs, by default the + !! active PE set as previously set via Set_PElist. + logical, optional, intent(in) :: blocking !< If true, barriers are added around the call + + integer :: src_PE ! The processor that is sending the data + logical :: do_block ! If true add synchronizing barriers + + do_block = .false. ; if (present(blocking)) do_block = blocking + if (present(from_PE)) then ; src_PE = from_PE ; else ; src_PE = root_PE() ; endif + + if (do_block) call mpp_sync(PElist) + call mpp_broadcast(dat, src_PE, PElist) + if (do_block) call mpp_sync_self(PElist) + +end subroutine broadcast_int32_0D + +!> Communicate a 1-D array of integers from one PE to others +subroutine broadcast_int1D(dat, length, from_PE, PElist, blocking) + integer, dimension(:), intent(inout) :: dat !< The data to communicate and destination + integer, intent(in) :: length !< The number of data elements + integer, optional, intent(in) :: from_PE !< The source PE, by default the root PE + integer, optional, intent(in) :: PElist(:) !< The list of participating PEs, by default the + !! active PE set as previously set via Set_PElist. + logical, optional, intent(in) :: blocking !< If true, barriers are added around the call + + integer :: src_PE ! The processor that is sending the data + logical :: do_block ! If true add synchronizing barriers + + do_block = .false. ; if (present(blocking)) do_block = blocking + if (present(from_PE)) then ; src_PE = from_PE ; else ; src_PE = root_PE() ; endif + + if (do_block) call mpp_sync(PElist) + call mpp_broadcast(dat, length, src_PE, PElist) + if (do_block) call mpp_sync_self(PElist) + +end subroutine broadcast_int1D + +!> Communicate a real number from one PE to others +subroutine broadcast_real0D(dat, from_PE, PElist, blocking) + real, intent(inout) :: dat !< The data to communicate and destination + integer, optional, intent(in) :: from_PE !< The source PE, by default the root PE + integer, optional, intent(in) :: PElist(:) !< The list of participating PEs, by default the + !! active PE set as previously set via Set_PElist. + logical, optional, intent(in) :: blocking !< If true, barriers are added around the call + + integer :: src_PE ! The processor that is sending the data + logical :: do_block ! If true add synchronizing barriers + + do_block = .false. ; if (present(blocking)) do_block = blocking + if (present(from_PE)) then ; src_PE = from_PE ; else ; src_PE = root_PE() ; endif + + if (do_block) call mpp_sync(PElist) + call mpp_broadcast(dat, src_PE, PElist) + if (do_block) call mpp_sync_self(PElist) + +end subroutine broadcast_real0D + +!> Communicate a 1-D array of reals from one PE to others +subroutine broadcast_real1D(dat, length, from_PE, PElist, blocking) + real, dimension(:), intent(inout) :: dat !< The data to communicate and destination + integer, intent(in) :: length !< The number of data elements + integer, optional, intent(in) :: from_PE !< The source PE, by default the root PE + integer, optional, intent(in) :: PElist(:) !< The list of participating PEs, by default the + !! active PE set as previously set via Set_PElist. + logical, optional, intent(in) :: blocking !< If true, barriers are added around the call + + integer :: src_PE ! The processor that is sending the data + logical :: do_block ! If true add synchronizing barriers + + do_block = .false. ; if (present(blocking)) do_block = blocking + if (present(from_PE)) then ; src_PE = from_PE ; else ; src_PE = root_PE() ; endif + + if (do_block) call mpp_sync(PElist) + call mpp_broadcast(dat, length, src_PE, PElist) + if (do_block) call mpp_sync_self(PElist) + +end subroutine broadcast_real1D + +!> Communicate a 2-D array of reals from one PE to others +subroutine broadcast_real2D(dat, length, from_PE, PElist, blocking) + real, dimension(:,:), intent(inout) :: dat !< The data to communicate and destination + integer, intent(in) :: length !< The total number of data elements + integer, optional, intent(in) :: from_PE !< The source PE, by default the root PE + integer, optional, intent(in) :: PElist(:) !< The list of participating PEs, by default the + !! active PE set as previously set via Set_PElist. + logical, optional, intent(in) :: blocking !< If true, barriers are added around the call + + integer :: src_PE ! The processor that is sending the data + logical :: do_block ! If true add synchronizing barriers + + do_block = .false. ; if (present(blocking)) do_block = blocking + if (present(from_PE)) then ; src_PE = from_PE ; else ; src_PE = root_PE() ; endif + + if (do_block) call mpp_sync(PElist) + call mpp_broadcast(dat, length, src_PE, PElist) + if (do_block) call mpp_sync_self(PElist) + +end subroutine broadcast_real2D + +! field_chksum wrappers + +!> Compute a checksum for a field distributed over a PE list. If no PE list is +!! provided, then the current active PE list is used. +function field_chksum_real_0d(field, pelist, mask_val) result(chksum) + real, intent(in) :: field !< Input scalar + integer, optional, intent(in) :: pelist(:) !< PE list of ranks to checksum + real, optional, intent(in) :: mask_val !< FMS mask value + integer(kind=int64) :: chksum !< checksum of array + + chksum = mpp_chksum(field, pelist, mask_val) +end function field_chksum_real_0d + +!> Compute a checksum for a field distributed over a PE list. If no PE list is +!! provided, then the current active PE list is used. +function field_chksum_real_1d(field, pelist, mask_val) result(chksum) + real, dimension(:), intent(in) :: field !< Input array + integer, optional, intent(in) :: pelist(:) !< PE list of ranks to checksum + real, optional, intent(in) :: mask_val !< FMS mask value + integer(kind=int64) :: chksum !< checksum of array + + chksum = mpp_chksum(field, pelist, mask_val) +end function field_chksum_real_1d + +!> Compute a checksum for a field distributed over a PE list. If no PE list is +!! provided, then the current active PE list is used. +function field_chksum_real_2d(field, pelist, mask_val) result(chksum) + real, dimension(:,:), intent(in) :: field !< Unrotated input field + integer, optional, intent(in) :: pelist(:) !< PE list of ranks to checksum + real, optional, intent(in) :: mask_val !< FMS mask value + integer(kind=int64) :: chksum !< checksum of array + + chksum = mpp_chksum(field, pelist, mask_val) +end function field_chksum_real_2d + +!> Compute a checksum for a field distributed over a PE list. If no PE list is +!! provided, then the current active PE list is used. +function field_chksum_real_3d(field, pelist, mask_val) result(chksum) + real, dimension(:,:,:), intent(in) :: field !< Unrotated input field + integer, optional, intent(in) :: pelist(:) !< PE list of ranks to checksum + real, optional, intent(in) :: mask_val !< FMS mask value + integer(kind=int64) :: chksum !< checksum of array + + chksum = mpp_chksum(field, pelist, mask_val) +end function field_chksum_real_3d + +!> Compute a checksum for a field distributed over a PE list. If no PE list is +!! provided, then the current active PE list is used. +function field_chksum_real_4d(field, pelist, mask_val) result(chksum) + real, dimension(:,:,:,:), intent(in) :: field !< Unrotated input field + integer, optional, intent(in) :: pelist(:) !< PE list of ranks to checksum + real, optional, intent(in) :: mask_val !< FMS mask value + integer(kind=int64) :: chksum !< checksum of array + + chksum = mpp_chksum(field, pelist, mask_val) +end function field_chksum_real_4d + +! sum_across_PEs wrappers + +!> Find the sum of field across PEs, and return this sum in field. +subroutine sum_across_PEs_int4_0d(field, pelist) + integer(kind=int32), intent(inout) :: field !< Value on this PE, and the sum across PEs upon return + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_sum(field, pelist) +end subroutine sum_across_PEs_int4_0d + +!> Find the sum of the values in corresponding positions of field across PEs, and return these sums in field. +subroutine sum_across_PEs_int4_1d(field, length, pelist) + integer(kind=int32), dimension(:), intent(inout) :: field !< The values to add, the sums upon return + integer, intent(in) :: length !< Number of elements in field to add + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_sum(field, length, pelist) +end subroutine sum_across_PEs_int4_1d + +!> Find the sum of field across PEs, and return this sum in field. +subroutine sum_across_PEs_int8_0d(field, pelist) + integer(kind=int64), intent(inout) :: field !< Value on this PE, and the sum across PEs upon return + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_sum(field, pelist) +end subroutine sum_across_PEs_int8_0d + +!> Find the sum of the values in corresponding positions of field across PEs, and return these sums in field. +subroutine sum_across_PEs_int8_1d(field, length, pelist) + integer(kind=int64), dimension(:), intent(inout) :: field !< The values to add, the sums upon return + integer, intent(in) :: length !< Number of elements in field to add + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_sum(field, length, pelist) +end subroutine sum_across_PEs_int8_1d + +!> Find the sum of the values in corresponding positions of field across PEs, and return these sums in field. +subroutine sum_across_PEs_int8_2d(field, length, pelist) + integer(kind=int64), & + dimension(:,:), intent(inout) :: field !< The values to add, the sums upon return + integer, intent(in) :: length !< The total number of positions to sum, usually + !! the product of the array sizes. + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_sum(field, length, pelist) +end subroutine sum_across_PEs_int8_2d + +!> Find the sum of field across PEs, and return this sum in field. +subroutine sum_across_PEs_real_0d(field, pelist) + real, intent(inout) :: field !< Value on this PE, and the sum across PEs upon return + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_sum(field, pelist) +end subroutine sum_across_PEs_real_0d + +!> Find the sum of the values in corresponding positions of field across PEs, and return these sums in field. +subroutine sum_across_PEs_real_1d(field, length, pelist) + real, dimension(:), intent(inout) :: field !< The values to add, the sums upon return + integer, intent(in) :: length !< Number of elements in field to add + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_sum(field, length, pelist) +end subroutine sum_across_PEs_real_1d + +!> Find the sum of the values in corresponding positions of field across PEs, and return these sums in field. +subroutine sum_across_PEs_real_2d(field, length, pelist) + real, dimension(:,:), intent(inout) :: field !< The values to add, the sums upon return + integer, intent(in) :: length !< The total number of positions to sum, usually + !! the product of the array sizes. + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_sum(field, length, pelist) +end subroutine sum_across_PEs_real_2d + +! max_across_PEs wrappers + +!> Find the maximum value of field across PEs, and store this maximum in field. +subroutine max_across_PEs_int_0d(field, pelist) + integer, intent(inout) :: field !< The values to compare, the maximum upon return + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_max(field, pelist) +end subroutine max_across_PEs_int_0d + +!> Find the maximum value of field across PEs, and store this maximum in field. +subroutine max_across_PEs_real_0d(field, pelist) + real, intent(inout) :: field !< The values to compare, the maximum upon return + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_max(field, pelist) +end subroutine max_across_PEs_real_0d + +!> Find the maximum values in each position of field across PEs, and store these minima in field. +subroutine max_across_PEs_real_1d(field, length, pelist) + real, dimension(:), intent(inout) :: field !< The list of values being compared, with the + !! maxima in each position upon return + integer, intent(in) :: length !< Number of elements in field to compare + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_max(field, length, pelist) +end subroutine max_across_PEs_real_1d + +! min_across_PEs wrappers + +!> Find the minimum value of field across PEs, and store this minimum in field. +subroutine min_across_PEs_int_0d(field, pelist) + integer, intent(inout) :: field !< The values to compare, the minimum upon return + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_min(field, pelist) +end subroutine min_across_PEs_int_0d + +!> Find the minimum value of field across PEs, and store this minimum in field. +subroutine min_across_PEs_real_0d(field, pelist) + real, intent(inout) :: field !< The values to compare, the minimum upon return + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + call mpp_min(field, pelist) +end subroutine min_across_PEs_real_0d + +!> Find the minimum values in each position of field across PEs, and store these minima in field. +subroutine min_across_PEs_real_1d(field, length, pelist) + real, dimension(:), intent(inout) :: field !< The list of values being compared, with the + !! minima in each position upon return + integer, intent(in) :: length !< Number of elements in field to compare + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_min(field, length, pelist) +end subroutine min_across_PEs_real_1d + +!> Initialize the model framework, including PE communication over a designated communicator. +!! If no communicator ID is provided, the framework's default communicator is used. +subroutine MOM_infra_init(localcomm) + integer, optional, intent(in) :: localcomm !< Communicator ID to initialize + call fms_init(localcomm) +end subroutine + +!> This subroutine carries out all of the calls required to close out the infrastructure cleanly. +!! This should only be called in ocean-only runs, as the coupler takes care of this in coupled runs. +subroutine MOM_infra_end + call print_memuse_stats( 'Memory HiWaterMark', always=.TRUE. ) + call fms_end() +end subroutine MOM_infra_end + +end module MOM_coms_infra diff --git a/src/framework/MOM_constants.F90 b/config_src/infra/FMS1/MOM_constants.F90 similarity index 100% rename from src/framework/MOM_constants.F90 rename to config_src/infra/FMS1/MOM_constants.F90 diff --git a/config_src/infra/FMS1/MOM_couplertype_infra.F90 b/config_src/infra/FMS1/MOM_couplertype_infra.F90 new file mode 100644 index 0000000000..3bcccc1dc7 --- /dev/null +++ b/config_src/infra/FMS1/MOM_couplertype_infra.F90 @@ -0,0 +1,503 @@ +!> This module wraps the FMS coupler types module +module MOM_couplertype_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use coupler_types_mod, only : coupler_type_spawn, coupler_type_initialized, coupler_type_destructor +use coupler_types_mod, only : coupler_type_set_diags, coupler_type_send_data, coupler_type_copy_data +use coupler_types_mod, only : coupler_type_write_chksums, coupler_type_redistribute_data +use coupler_types_mod, only : coupler_type_increment_data, coupler_type_rescale_data +use coupler_types_mod, only : coupler_type_extract_data, coupler_type_set_data +use coupler_types_mod, only : coupler_type_data_override +use coupler_types_mod, only : ind_flux, ind_alpha, ind_csurf +use coupler_types_mod, only : coupler_1d_bc_type, coupler_2d_bc_type, coupler_3d_bc_type +use atmos_ocean_fluxes_mod, only : aof_set_coupler_flux +use MOM_domain_infra, only : domain2D +use MOM_time_manager, only : time_type + +implicit none ; private + +public :: CT_spawn, CT_initialized, CT_destructor +public :: CT_set_diags, CT_send_data, CT_data_override, CT_write_chksums +public :: CT_set_data, CT_increment_data, CT_rescale_data +public :: CT_copy_data, CT_extract_data, CT_redistribute_data +public :: atmos_ocn_coupler_flux +public :: ind_flux, ind_alpha, ind_csurf +public :: coupler_1d_bc_type, coupler_2d_bc_type, coupler_3d_bc_type + +!> This is the interface to spawn one coupler_bc_type into another. +interface CT_spawn + module procedure CT_spawn_1d_2d, CT_spawn_1d_3d, CT_spawn_2d_2d, CT_spawn_2d_3d + module procedure CT_spawn_3d_2d, CT_spawn_3d_3d +end interface CT_spawn + +!> This function interface indicates whether a coupler_bc_type has been initialized. +interface CT_initialized + module procedure CT_initialized_1d, CT_initialized_2d, CT_initialized_3d +end interface CT_initialized + +!> This is the interface to deallocate any data associated with a coupler_bc_type. +interface CT_destructor + module procedure CT_destructor_1d, CT_destructor_2d +end interface CT_destructor + +!> Copy all elements of the data in either a coupler_2d_bc_type or a coupler_3d_bc_type into +!! another structure of the same or the other type. Both must have the same array sizes in common +!! dimensions, while the details of any expansion from 2d to 3d are controlled by arguments. +interface CT_copy_data + module procedure CT_copy_data_2d, CT_copy_data_3d, CT_copy_data_2d_3d +end interface CT_copy_data + +!> Increment data in all elements of one coupler_2d_bc_type or coupler_3d_bc_type with +!! the data from another. Both must have the same array sizes and rank. +interface CT_increment_data + module procedure CT_increment_data_2d, CT_increment_data_3d, CT_increment_data_2d_3d +end interface CT_increment_data + +!> Rescales the fields in the elements of a coupler_2d_bc_type by multiplying by a factor scale. +!! If scale is 0, this is a direct assignment to 0, so that NaNs will not persist. +interface CT_rescale_data + module procedure CT_rescale_data_2d, CT_rescale_data_3d +end interface CT_rescale_data + +!> Redistribute the data in all elements of one coupler_2d_bc_type or coupler_3d_bc_type into +!! another, which may be on different processors with a different decomposition. +interface CT_redistribute_data + module procedure CT_redistribute_data_2d, CT_redistribute_data_3d +end interface CT_redistribute_data + +!> Write out checksums for the elements of a coupler_2d_bc_type or coupler_3d_bc_type +interface CT_write_chksums + module procedure CT_write_chksums_2d, CT_write_chksums_3d +end interface CT_write_chksums + +contains + +!> This subroutine sets many of the parameters for calculating an atmosphere-ocean tracer flux +!! and retuns an integer index for that flux. +function atmos_ocn_coupler_flux(name, flux_type, implementation, param, mol_wt, & + ice_restart_file, ocean_restart_file, units, caller, verbosity) & + result (coupler_index) + + character(len=*), intent(in) :: name !< A name to use for the flux + character(len=*), intent(in) :: flux_type !< A string describing the type of this flux, + !! perhaps 'air_sea_gas_flux'. + character(len=*), intent(in) :: implementation !< A name describing the specific + !! implementation of this flux, such as 'ocmip2'. + real, dimension(:), optional, intent(in) :: param !< An array of parameters used for the fluxes + real, optional, intent(in) :: mol_wt !< The molecular weight of this tracer + character(len=*), optional, intent(in) :: ice_restart_file !< A sea-ice restart file to use with this flux. + character(len=*), optional, intent(in) :: ocean_restart_file !< An ocean restart file to use with this flux. + character(len=*), optional, intent(in) :: units !< The units of the flux + character(len=*), optional, intent(in) :: caller !< The name of the calling routine + integer, optional, intent(in) :: verbosity !< A 0-9 integer indicating a level of verbosity. + integer :: coupler_index !< The resulting integer handle to use for this flux in subsequent calls. + + coupler_index = aof_set_coupler_flux(name, flux_type, implementation, & + param=param, mol_wt=mol_wt, ice_restart_file=ice_restart_file, & + ocean_restart_file=ocean_restart_file, & + units=units, caller=caller, verbosity=verbosity) + +end function atmos_ocn_coupler_flux + +!> Generate a 2-D coupler type using a 1-D coupler type as a template. +subroutine CT_spawn_1d_2d(var_in, var, idim, jdim, suffix, as_needed) + type(coupler_1d_bc_type), intent(in) :: var_in !< structure from which to copy information + type(coupler_2d_bc_type), intent(inout) :: var !< structure into which to copy information + integer, dimension(4), intent(in) :: idim !< The data and computational domain extents of + !! the first dimension in a non-decreasing list + integer, dimension(4), intent(in) :: jdim !< The data and computational domain extents of + !! the second dimension in a non-decreasing list + character(len=*), optional, intent(in) :: suffix !< optional suffix to make the name identifier unique + logical, optional, intent(in) :: as_needed !< Only do the spawn if the target type (var) + !! is not set and the parent type (var_in) is set. + + call coupler_type_spawn(var_in, var, idim, jdim, suffix=suffix, as_needed=as_needed) + +end subroutine CT_spawn_1d_2d + +!> Generate a 3-D coupler type using a 1-D coupler type as a template. +subroutine CT_spawn_1d_3d(var_in, var, idim, jdim, kdim, suffix, as_needed) + type(coupler_1d_bc_type), intent(in) :: var_in !< structure from which to copy information + type(coupler_3d_bc_type), intent(inout) :: var !< structure into which to copy information + integer, dimension(4), intent(in) :: idim !< The data and computational domain extents of + !! the first dimension in a non-decreasing list + integer, dimension(4), intent(in) :: jdim !< The data and computational domain extents of + !! the second dimension in a non-decreasing list + integer, dimension(2), intent(in) :: kdim !< The array extents of the third dimension in + !! a non-decreasing list + character(len=*), optional, intent(in) :: suffix !< optional suffix to make the name identifier unique + logical, optional, intent(in) :: as_needed !< Only do the spawn if the target type (var) + !! is not set and the parent type (var_in) is set. + + call coupler_type_spawn(var_in, var, idim, jdim, kdim, suffix=suffix, as_needed=as_needed) + +end subroutine CT_spawn_1d_3d + +!> Generate one 2-D coupler type using another 2-D coupler type as a template. +subroutine CT_spawn_2d_2d(var_in, var, idim, jdim, suffix, as_needed) + type(coupler_2d_bc_type), intent(in) :: var_in !< structure from which to copy information + type(coupler_2d_bc_type), intent(inout) :: var !< structure into which to copy information + integer, dimension(4), intent(in) :: idim !< The data and computational domain extents of + !! the first dimension in a non-decreasing list + integer, dimension(4), intent(in) :: jdim !< The data and computational domain extents of + !! the second dimension in a non-decreasing list + character(len=*), optional, intent(in) :: suffix !< optional suffix to make the name identifier unique + logical, optional, intent(in) :: as_needed !< Only do the spawn if the target type (var) + !! is not set and the parent type (var_in) is set. + + call coupler_type_spawn(var_in, var, idim, jdim, suffix=suffix, as_needed=as_needed) + +end subroutine CT_spawn_2d_2d + +!> Generate a 3-D coupler type using a 2-D coupler type as a template. +subroutine CT_spawn_2d_3d(var_in, var, idim, jdim, kdim, suffix, as_needed) + type(coupler_2d_bc_type), intent(in) :: var_in !< structure from which to copy information + type(coupler_3d_bc_type), intent(inout) :: var !< structure into which to copy information + integer, dimension(4), intent(in) :: idim !< The data and computational domain extents of + !! the first dimension in a non-decreasing list + integer, dimension(4), intent(in) :: jdim !< The data and computational domain extents of + !! the second dimension in a non-decreasing list + integer, dimension(2), intent(in) :: kdim !< The array extents of the third dimension in + !! a non-decreasing list + character(len=*), optional, intent(in) :: suffix !< optional suffix to make the name identifier unique + logical, optional, intent(in) :: as_needed !< Only do the spawn if the target type (var) + !! is not set and the parent type (var_in) is set. + + call coupler_type_spawn(var_in, var, idim, jdim, kdim, suffix=suffix, as_needed=as_needed) + +end subroutine CT_spawn_2d_3d + +!> Generate a 2-D coupler type using a 3-D coupler type as a template. +subroutine CT_spawn_3d_2d(var_in, var, idim, jdim, suffix, as_needed) + type(coupler_3d_bc_type), intent(in) :: var_in !< structure from which to copy information + type(coupler_2d_bc_type), intent(inout) :: var !< structure into which to copy information + integer, dimension(4), intent(in) :: idim !< The data and computational domain extents of + !! the first dimension in a non-decreasing list + integer, dimension(4), intent(in) :: jdim !< The data and computational domain extents of + !! the second dimension in a non-decreasing list + character(len=*), optional, intent(in) :: suffix !< optional suffix to make the name identifier unique + logical, optional, intent(in) :: as_needed !< Only do the spawn if the target type (var) + !! is not set and the parent type (var_in) is set. + + call coupler_type_spawn(var_in, var, idim, jdim, suffix=suffix, as_needed=as_needed) + +end subroutine CT_spawn_3d_2d + +!> Generate a 3-D coupler type using another 3-D coupler type as a template. +subroutine CT_spawn_3d_3d(var_in, var, idim, jdim, kdim, suffix, as_needed) + type(coupler_3d_bc_type), intent(in) :: var_in !< structure from which to copy information + type(coupler_3d_bc_type), intent(inout) :: var !< structure into which to copy information + integer, dimension(4), intent(in) :: idim !< The data and computational domain extents of + !! the first dimension in a non-decreasing list + integer, dimension(4), intent(in) :: jdim !< The data and computational domain extents of + !! the second dimension in a non-decreasing list + integer, dimension(2), intent(in) :: kdim !< The array extents of the third dimension in + !! a non-decreasing list + character(len=*), optional, intent(in) :: suffix !< optional suffix to make the name identifier unique + logical, optional, intent(in) :: as_needed !< Only do the spawn if the target type (var) + !! is not set and the parent type (var_in) is set. + + call coupler_type_spawn(var_in, var, idim, jdim, kdim, suffix=suffix, as_needed=as_needed) + +end subroutine CT_spawn_3d_3d + +!> Copy all elements of the data in a coupler_2d_bc_type into another. Both must have the same array sizes. +subroutine CT_copy_data_2d(var_in, var, halo_size, bc_index, field_index, & + exclude_flux_type, only_flux_type, pass_through_ice) + type(coupler_2d_bc_type), intent(in) :: var_in !< BC_type structure with the data to copy + type(coupler_2d_bc_type), intent(inout) :: var !< The recipient BC_type structure + integer, optional, intent(in) :: halo_size !< The extent of the halo to copy; 0 by default + integer, optional, intent(in) :: bc_index !< The index of the boundary condition + !! that is being copied + integer, optional, intent(in) :: field_index !< The index of the field in the + !! boundary condition that is being copied + character(len=*), optional, intent(in) :: exclude_flux_type !< A string describing which types of fluxes + !! to exclude from this copy. + character(len=*), optional, intent(in) :: only_flux_type !< A string describing which types of fluxes + !! to include from this copy. + logical, optional, intent(in) :: pass_through_ice !< If true, only copy BCs whose + !! value of pass_through ice matches this + + call coupler_type_copy_data(var_in, var, halo_size, bc_index, field_index, & + exclude_flux_type, only_flux_type, pass_through_ice) +end subroutine CT_copy_data_2d + +!> Copy all elements of the data in a coupler_3d_bc_type into another. Both must have the same array sizes. +subroutine CT_copy_data_3d(var_in, var, halo_size, bc_index, field_index, & + exclude_flux_type, only_flux_type, pass_through_ice) + type(coupler_3d_bc_type), intent(in) :: var_in !< BC_type structure with the data to copy + type(coupler_3d_bc_type), intent(inout) :: var !< The recipient BC_type structure + integer, optional, intent(in) :: halo_size !< The extent of the halo to copy; 0 by default + integer, optional, intent(in) :: bc_index !< The index of the boundary condition + !! that is being copied + integer, optional, intent(in) :: field_index !< The index of the field in the + !! boundary condition that is being copied + character(len=*), optional, intent(in) :: exclude_flux_type !< A string describing which types of fluxes + !! to exclude from this copy. + character(len=*), optional, intent(in) :: only_flux_type !< A string describing which types of fluxes + !! to include from this copy. + logical, optional, intent(in) :: pass_through_ice !< If true, only copy BCs whose + !! value of pass_through ice matches this + + call coupler_type_copy_data(var_in, var, halo_size, bc_index, field_index, & + exclude_flux_type, only_flux_type, pass_through_ice) +end subroutine CT_copy_data_3d + +!> Copy all elements of the data in a coupler_2d_bc_type into a coupler_3d_bc_type. +!! Both must have the same array sizes for the first two dimensions, while the extent +!! of the 3rd dimension that is being filled may be specified via optional arguments. +subroutine CT_copy_data_2d_3d(var_in, var, halo_size, bc_index, field_index, & + exclude_flux_type, only_flux_type, pass_through_ice, ind3_start, ind3_end) + type(coupler_2d_bc_type), intent(in) :: var_in !< BC_type structure with the data to copy + type(coupler_3d_bc_type), intent(inout) :: var !< The recipient BC_type structure + integer, optional, intent(in) :: halo_size !< The extent of the halo to copy; 0 by default + integer, optional, intent(in) :: bc_index !< The index of the boundary condition + !! that is being copied + integer, optional, intent(in) :: field_index !< The index of the field in the + !! boundary condition that is being copied + character(len=*), optional, intent(in) :: exclude_flux_type !< A string describing which types of fluxes + !! to exclude from this copy. + character(len=*), optional, intent(in) :: only_flux_type !< A string describing which types of fluxes + !! to include from this copy. + logical, optional, intent(in) :: pass_through_ice !< If true, only copy BCs whose + !! value of pass_through ice matches this + integer, optional, intent(in) :: ind3_start !< The starting value of the 3rd + !! index of the 3d type to fill in. + integer, optional, intent(in) :: ind3_end !< The ending value of the 3rd + !! index of the 3d type to fill in. + + call coupler_type_copy_data(var_in, var, halo_size, bc_index, field_index, & + exclude_flux_type, only_flux_type, pass_through_ice, ind3_start, ind3_end) +end subroutine CT_copy_data_2d_3d + +!> Increment data in all elements of one coupler_2d_bc_type with the data from another. Both +!! must have the same array sizes. +subroutine CT_increment_data_2d(var_in, var, halo_size, scale_factor, scale_prev) + type(coupler_2d_bc_type), intent(in) :: var_in !< coupler_type structure with the data to add to the other type + type(coupler_2d_bc_type), intent(inout) :: var !< The coupler_type structure whose fields are being incremented + integer, optional, intent(in) :: halo_size !< The extent of the halo to increment; 0 by default + real, optional, intent(in) :: scale_factor !< A scaling factor for the data that is being added + real, optional, intent(in) :: scale_prev !< A scaling factor for the data that is already here + + call coupler_type_increment_data(var_in, var, halo_size=halo_size, scale_factor=scale_factor, & + scale_prev=scale_prev) + +end subroutine CT_increment_data_2d + +!> Increment data in all elements of one coupler_3d_bc_type with the data from another. Both +!! must have the same array sizes. +subroutine CT_increment_data_3d(var_in, var, halo_size, scale_factor, scale_prev, exclude_flux_type, only_flux_type) + type(coupler_3d_bc_type), intent(in) :: var_in !< coupler_type structure with the data to add to the other type + type(coupler_3d_bc_type), intent(inout) :: var !< The coupler_type structure whose fields are being incremented + integer, optional, intent(in) :: halo_size !< The extent of the halo to increment; 0 by default + real, optional, intent(in) :: scale_factor !< A scaling factor for the data that is being added + real, optional, intent(in) :: scale_prev !< A scaling factor for the data that is already here + character(len=*), optional, intent(in) :: exclude_flux_type !< A string describing which types + !! of fluxes to exclude from this increment. + character(len=*), optional, intent(in) :: only_flux_type !< A string describing which types + !! of fluxes to include from this increment. + + call coupler_type_increment_data(var_in, var, halo_size=halo_size, scale_factor=scale_factor, & + scale_prev=scale_prev, exclude_flux_type=exclude_flux_type, & + only_flux_type=only_flux_type) + +end subroutine CT_increment_data_3d + +!> Rescales the fields in the elements of a coupler_2d_bc_type by multiplying by a factor scale. +!! If scale is 0, this is a direct assignment to 0, so that NaNs will not persist. +subroutine CT_rescale_data_2d(var, scale) + type(coupler_2d_bc_type), intent(inout) :: var !< The BC_type structure whose fields are being rescaled + real, intent(in) :: scale !< A scaling factor to multiply fields by + + call coupler_type_rescale_data(var, scale) + +end subroutine CT_rescale_data_2d + +!> Rescales the fields in the elements of a coupler_3d_bc_type by multiplying by a factor scale. +!! If scale is 0, this is a direct assignment to 0, so that NaNs will not persist. +subroutine CT_rescale_data_3d(var, scale) + type(coupler_3d_bc_type), intent(inout) :: var !< The BC_type structure whose fields are being rescaled + real, intent(in) :: scale !< A scaling factor to multiply fields by + + call coupler_type_rescale_data(var, scale) + +end subroutine CT_rescale_data_3d + +!> Increment data in the elements of a coupler_2d_bc_type with weighted averages of elements of a +!! coupler_3d_bc_type +subroutine CT_increment_data_2d_3d(var_in, weights, var, halo_size) + type(coupler_3d_bc_type), intent(in) :: var_in !< coupler_type structure with the data to add to the other type + real, dimension(:,:,:), intent(in) :: weights !< An array of normalized weights for the 3d-data to + !! increment the 2d-data. There is no renormalization, + !! so if the weights do not sum to 1 in the 3rd dimension + !! there may be adverse consequences! + type(coupler_2d_bc_type), intent(inout) :: var !< The coupler_type structure whose fields are being incremented + integer, optional, intent(in) :: halo_size !< The extent of the halo to increment; 0 by default + + call coupler_type_increment_data(var_in, weights, var, halo_size=halo_size) + +end subroutine CT_increment_data_2d_3d + + +!> Redistribute the data in all elements of one coupler_2d_bc_type into another, which may be on +!! different processors with a different decomposition. +subroutine CT_redistribute_data_2d(var_in, domain_in, var_out, domain_out, complete) + type(coupler_2d_bc_type), intent(in) :: var_in !< BC_type structure with the data to copy + type(domain2D), intent(in) :: domain_in !< The FMS domain for the input structure + type(coupler_2d_bc_type), intent(inout) :: var_out !< The recipient BC_type structure (data intent out) + type(domain2D), intent(in) :: domain_out !< The FMS domain for the output structure + logical, optional, intent(in) :: complete !< If true, complete the updates + + call coupler_type_redistribute_data(var_in, domain_in, var_out, domain_out, complete) +end subroutine CT_redistribute_data_2d + +!> Redistribute the data in all elements of one coupler_3d_bc_type into another, which may be on +!! different processors with a different decomposition. +subroutine CT_redistribute_data_3d(var_in, domain_in, var_out, domain_out, complete) + type(coupler_3d_bc_type), intent(in) :: var_in !< BC_type structure with the data to copy + type(domain2D), intent(in) :: domain_in !< The FMS domain for the input structure + type(coupler_3d_bc_type), intent(inout) :: var_out !< The recipient BC_type structure (data intent out) + type(domain2D), intent(in) :: domain_out !< The FMS domain for the output structure + logical, optional, intent(in) :: complete !< If true, complete the updates + + call coupler_type_redistribute_data(var_in, domain_in, var_out, domain_out, complete) +end subroutine CT_redistribute_data_3d + +!> Extract a 2d field from a coupler_2d_bc_type into a two-dimensional array. +subroutine CT_extract_data(var_in, bc_index, field_index, array_out, & + scale_factor, halo_size, idim, jdim) + type(coupler_2d_bc_type), intent(in) :: var_in !< BC_type structure with the data to extract + integer, intent(in) :: bc_index !< The index of the boundary condition + !! that is being copied + integer, intent(in) :: field_index !< The index of the field in the boundary + !! condition that is being copied, or the + !! surface flux by default. + real, dimension(1:,1:), intent(out) :: array_out !< The recipient array for the field; its size + !! must match the size of the data being copied + !! unless idim and jdim are supplied. + real, optional, intent(in) :: scale_factor !< A scaling factor for the data that is being added + integer, optional, intent(in) :: halo_size !< The extent of the halo to copy; 0 by default + integer, dimension(4), optional, intent(in) :: idim !< The data and computational domain extents of + !! the first dimension of the output array + !! in a non-decreasing list + integer, dimension(4), optional, intent(in) :: jdim !< The data and computational domain extents of + !! the second dimension of the output array + !! in a non-decreasing list + call coupler_type_extract_data(var_in, bc_index, field_index, array_out, scale_factor, halo_size, idim, jdim) + +end subroutine CT_extract_data + +!> Set single 2d field in coupler_2d_bc_type from a two-dimensional array. +subroutine CT_set_data(array_in, bc_index, field_index, var, & + scale_factor, halo_size, idim, jdim) + real, dimension(1:,1:), intent(in) :: array_in !< The source array for the field; its size + !! must match the size of the data being copied + !! unless idim and jdim are supplied. + integer, intent(in) :: bc_index !< The index of the boundary condition + !! that is being copied + integer, intent(in) :: field_index !< The index of the field in the + !! boundary condition that is being set. The + !! surface concentration is set by default. + type(coupler_2d_bc_type), intent(inout) :: var !< BC_type structure with the data to set + real, optional, intent(in) :: scale_factor !< A scaling factor for the data that is being added + integer, optional, intent(in) :: halo_size !< The extent of the halo to copy; 0 by default + integer, dimension(4), optional, intent(in) :: idim !< The data and computational domain extents of + !! the first dimension of the output array + !! in a non-decreasing list + integer, dimension(4), optional, intent(in) :: jdim !< The data and computational domain extents of + !! the second dimension of the output array + !! in a non-decreasing list + + integer :: subfield ! An integer indicating which field to set. + + call coupler_type_set_data(array_in, bc_index, field_index, var, scale_factor, halo_size, idim, jdim) + +end subroutine CT_set_data + +!> Potentially override the values in a coupler_2d_bc_type +subroutine CT_data_override(gridname, var, time) + character(len=3), intent(in) :: gridname !< 3-character long model grid ID + type(coupler_2d_bc_type), intent(inout) :: var !< BC_type structure to override + type(time_type), intent(in) :: time !< The current model time + + call coupler_type_data_override(gridname, var, time) +end subroutine CT_data_override + +!> Register the diagnostics of a coupler_2d_bc_type +subroutine CT_set_diags(var, diag_name, axes, time) + type(coupler_2d_bc_type), intent(inout) :: var !< BC_type structure for which to register diagnostics + character(len=*), intent(in) :: diag_name !< name for diagnostic file, or blank not to register the fields + integer, dimension(:), intent(in) :: axes !< array of axes identifiers for diagnostic variable registration + type(time_type), intent(in) :: time !< model time variable for registering diagnostic field + + call coupler_type_set_diags(var, diag_name, axes, time) + +end subroutine CT_set_diags + +!> Write out all diagnostics of elements of a coupler_2d_bc_type +subroutine CT_send_data(var, Time) + type(coupler_2d_bc_type), intent(in) :: var !< BC_type structure with the diagnostics to write + type(time_type), intent(in) :: time !< The current model time + + call coupler_type_send_data(var, Time) +end subroutine CT_send_data + +!> Write out checksums for the elements of a coupler_2d_bc_type +subroutine CT_write_chksums_2d(var, outunit, name_lead) + type(coupler_2d_bc_type), intent(in) :: var !< BC_type structure for which to register diagnostics + integer, intent(in) :: outunit !< The index of a open output file + character(len=*), optional, intent(in) :: name_lead !< An optional prefix for the variable names + + call coupler_type_write_chksums(var, outunit, name_lead) + +end subroutine CT_write_chksums_2d + +!> Write out checksums for the elements of a coupler_3d_bc_type +subroutine CT_write_chksums_3d(var, outunit, name_lead) + type(coupler_3d_bc_type), intent(in) :: var !< BC_type structure for which to register diagnostics + integer, intent(in) :: outunit !< The index of a open output file + character(len=*), optional, intent(in) :: name_lead !< An optional prefix for the variable names + + call coupler_type_write_chksums(var, outunit, name_lead) + +end subroutine CT_write_chksums_3d + +!> Indicate whether a coupler_1d_bc_type has been initialized. +logical function CT_initialized_1d(var) + type(coupler_1d_bc_type), intent(in) :: var !< BC_type structure to be deconstructed + + CT_initialized_1d = coupler_type_initialized(var) +end function CT_initialized_1d + +!> Indicate whether a coupler_2d_bc_type has been initialized. +logical function CT_initialized_2d(var) + type(coupler_2d_bc_type), intent(in) :: var !< BC_type structure to be deconstructed + + CT_initialized_2d = coupler_type_initialized(var) +end function CT_initialized_2d + +!> Indicate whether a coupler_3d_bc_type has been initialized. +logical function CT_initialized_3d(var) + type(coupler_3d_bc_type), intent(in) :: var !< BC_type structure to be deconstructed + + CT_initialized_3d = coupler_type_initialized(var) +end function CT_initialized_3d + +!> Deallocate all data associated with a coupler_1d_bc_type +subroutine CT_destructor_1d(var) + type(coupler_1d_bc_type), intent(inout) :: var !< BC_type structure to be deconstructed + + call coupler_type_destructor(var) + +end subroutine CT_destructor_1d + +!> Deallocate all data associated with a coupler_2d_bc_type +subroutine CT_destructor_2d(var) + type(coupler_2d_bc_type), intent(inout) :: var !< BC_type structure to be deconstructed + + call coupler_type_destructor(var) + +end subroutine CT_destructor_2d + +end module MOM_couplertype_infra diff --git a/config_src/infra/FMS1/MOM_cpu_clock_infra.F90 b/config_src/infra/FMS1/MOM_cpu_clock_infra.F90 new file mode 100644 index 0000000000..62c21e5772 --- /dev/null +++ b/config_src/infra/FMS1/MOM_cpu_clock_infra.F90 @@ -0,0 +1,94 @@ +!> Wraps the MPP cpu clock functions +!! +!! The functions and constants should be accessed via mom_cpu_clock +module MOM_cpu_clock_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +! These interfaces and constants from MPP/FMS will not be directly exposed outside of this module +use fms_mod, only : clock_flag_default +use mpp_mod, only : mpp_clock_begin +use mpp_mod, only : mpp_clock_end, mpp_clock_id +use mpp_mod, only : MPP_CLOCK_COMPONENT => CLOCK_COMPONENT +use mpp_mod, only : MPP_CLOCK_SUBCOMPONENT => CLOCK_SUBCOMPONENT +use mpp_mod, only : MPP_CLOCK_MODULE_DRIVER => CLOCK_MODULE_DRIVER +use mpp_mod, only : MPP_CLOCK_MODULE => CLOCK_MODULE +use mpp_mod, only : MPP_CLOCK_ROUTINE => CLOCK_ROUTINE +use mpp_mod, only : MPP_CLOCK_LOOP => CLOCK_LOOP +use mpp_mod, only : MPP_CLOCK_INFRA => CLOCK_INFRA + +implicit none ; private + +! Public entities +public :: cpu_clock_id, cpu_clock_begin, cpu_clock_end +public :: CLOCK_COMPONENT, CLOCK_SUBCOMPONENT, CLOCK_MODULE_DRIVER, CLOCK_MODULE +public :: CLOCK_ROUTINE, CLOCK_LOOP, CLOCK_INFRA + +!> A granularity value to passed to cpu_clock_id() to indicate the clock is for a +!! component, e.g. the entire MOM6 model +integer, parameter :: CLOCK_COMPONENT = MPP_CLOCK_COMPONENT + +!> A granularity value to passed to cpu_clock_id() to indicate the clock is for a +!! sub-component, e.g. dynamics or thermodynamics +integer, parameter :: CLOCK_SUBCOMPONENT = MPP_CLOCK_SUBCOMPONENT + +!> A granularity value to passed to cpu_clock_id() to indicate the clock is for a +!! module driver, e.g. a routine that calls multiple other routines +integer, parameter :: CLOCK_MODULE_DRIVER = MPP_CLOCK_MODULE_DRIVER + +!> A granularity value to passed to cpu_clock_id() to indicate the clock is for a +!! module, e.g. the main entry routine for a module +integer, parameter :: CLOCK_MODULE = MPP_CLOCK_MODULE + +!> A granularity value to passed to cpu_clock_id() to indicate the clock is for a +!! subroutine or function +integer, parameter :: CLOCK_ROUTINE = MPP_CLOCK_ROUTINE + +!> A granularity value to passed to cpu_clock_id() to indicate the clock is for a +!! section with in a routine, e.g. around a loop +integer, parameter :: CLOCK_LOOP = MPP_CLOCK_LOOP + +!> A granularity value to passed to cpu_clock_id() to indicate the clock is for an +!! infrastructure operation, e.g. a halo update +integer, parameter :: CLOCK_INFRA = MPP_CLOCK_INFRA + +contains + +!> Turns on clock with handle "id" +subroutine cpu_clock_begin(id) + integer, intent(in) :: id !< Handle for clock + + call mpp_clock_begin(id) + +end subroutine cpu_clock_begin + +!> Turns off clock with handle "id" +subroutine cpu_clock_end(id) + integer, intent(in) :: id !< Handle for clock + + call mpp_clock_end(id) + +end subroutine cpu_clock_end + +!> Returns the integer handle for a named CPU clock. +integer function cpu_clock_id(name, sync, grain) + character(len=*), intent(in) :: name !< The unique name of the CPU clock + logical, optional, intent(in) :: sync !< A flag that controls whether the + !! PEs are synchronized before the cpu clocks start counting. + !! Synchronization occurs before the start of a clock if this + !! is enabled, while additional (expensive) statistics can + !! set for other values. + !! If absent, the default is taken from the settings for FMS. + integer, optional, intent(in) :: grain !< The timing granularity for this clock, usually set to + !! the values of CLOCK_COMPONENT, CLOCK_ROUTINE, CLOCK_LOOP, etc. + + integer :: clock_flags + + clock_flags = clock_flag_default + if (present(sync)) & + clock_flags = ibset(clock_flags, 0) + + cpu_clock_id = mpp_clock_id(name, flags=clock_flags, grain=grain) +end function cpu_clock_id + +end module MOM_cpu_clock_infra diff --git a/config_src/infra/FMS1/MOM_data_override_infra.F90 b/config_src/infra/FMS1/MOM_data_override_infra.F90 new file mode 100644 index 0000000000..1484f0c128 --- /dev/null +++ b/config_src/infra/FMS1/MOM_data_override_infra.F90 @@ -0,0 +1,105 @@ +!> These interfaces allow for ocean or sea-ice variables to be replaced with data. +module MOM_data_override_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use MOM_domain_infra, only : MOM_domain_type, domain2d +use MOM_domain_infra, only : get_simple_array_i_ind, get_simple_array_j_ind +use MOM_time_manager, only : time_type +use data_override_mod, only : data_override_init +use data_override_mod, only : data_override +use data_override_mod, only : data_override_unset_domains + +implicit none ; private + +public :: impose_data_init, impose_data, impose_data_unset_domains + +!> Potentially override the values of a field in the model with values from a dataset. +interface impose_data + module procedure data_override_MD, data_override_2d +end interface + +contains + +!> Initialize the data override capability and set the domains for the ocean and ice components. +!> There should be a call to impose_data_init before impose_data is called. +subroutine impose_data_init(MOM_domain_in, Ocean_domain_in, Ice_domain_in) + type (MOM_domain_type), intent(in), optional :: MOM_domain_in + type (domain2d), intent(in), optional :: Ocean_domain_in + type (domain2d), intent(in), optional :: Ice_domain_in + + if (present(MOM_domain_in)) then + call data_override_init(Ocean_domain_in=MOM_domain_in%mpp_domain, Ice_domain_in=Ice_domain_in) + else + call data_override_init(Ocean_domain_in=Ocean_domain_in, Ice_domain_in=Ice_domain_in) + endif +end subroutine impose_data_init + + +!> Potentially override a 2-d field on a MOM6 domain with values from a dataset. +subroutine data_override_MD(domain, fieldname, data_2D, time, scale, override, is_ice) + type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information + character(len=*), intent(in) :: fieldname !< Name of the field to override + real, dimension(:,:), intent(inout) :: data_2D !< Data that may be modified by this call. + type(time_type), intent(in) :: time !< The model time, and the time for the data + real, optional, intent(in) :: scale !< A scaling factor that an overridden field is + !! multiplied by before it is returned. However, + !! if there is no override, there is no rescaling. + logical, optional, intent(out) :: override !< True if the field has been overridden successfully + logical, optional, intent(in) :: is_ice !< If present and true, use the ice domain. + + logical :: overridden, is_ocean + integer :: i, j, is, ie, js, je + + overridden = .false. + is_ocean = .true. ; if (present(is_ice)) is_ocean = .not.is_ice + if (is_ocean) then + call data_override('OCN', fieldname, data_2D, time, override=overridden) + else + call data_override('ICE', fieldname, data_2D, time, override=overridden) + endif + + if (overridden .and. present(scale)) then ; if (scale /= 1.0) then + ! Rescale data in the computational domain if the data override has occurred. + call get_simple_array_i_ind(domain, size(data_2D,1), is, ie) + call get_simple_array_j_ind(domain, size(data_2D,2), js, je) + do j=js,je ; do i=is,ie + data_2D(i,j) = scale*data_2D(i,j) + enddo ; enddo + endif ; endif + + if (present(override)) override = overridden + +end subroutine data_override_MD + + +!> Potentially override a 2-d field with values from a dataset. +subroutine data_override_2d(gridname, fieldname, data_2D, time, override) + character(len=3), intent(in) :: gridname !< String identifying the model component, in MOM6 + !! and SIS this may be either 'OCN' or 'ICE' + character(len=*), intent(in) :: fieldname !< Name of the field to override + real, dimension(:,:), intent(inout) :: data_2D !< Data that may be modified by this call + type(time_type), intent(in) :: time !< The model time, and the time for the data + logical, optional, intent(out) :: override !< True if the field has been overridden successfully + + call data_override(gridname, fieldname, data_2D, time, override) + +end subroutine data_override_2d + +!> Unset domains that had previously been set for use by data_override. +subroutine impose_data_unset_domains(unset_Ocean, unset_Ice, must_be_set) + logical, intent(in), optional :: unset_Ocean !< If present and true, unset the ocean domain for overrides + logical, intent(in), optional :: unset_Ice !< If present and true, unset the sea-ice domain for overrides + logical, intent(in), optional :: must_be_set !< If present and true, it is a fatal error to unset + !! a domain that is not set. + + call data_override_unset_domains(unset_Ocean=unset_Ocean, unset_Ice=unset_Ice, & + must_be_set=must_be_set) +end subroutine impose_data_unset_domains + +end module MOM_data_override_infra + +!> \namespace MOM_data_override_infra +!! +!! The routines here wrap routines from the FMS module data_override_mod, which potentially replace +!! model values with values read from a data file. diff --git a/config_src/infra/FMS1/MOM_diag_manager_infra.F90 b/config_src/infra/FMS1/MOM_diag_manager_infra.F90 new file mode 100644 index 0000000000..18c80cf24c --- /dev/null +++ b/config_src/infra/FMS1/MOM_diag_manager_infra.F90 @@ -0,0 +1,423 @@ +!> A wrapper for the FMS diag_manager routines. This module should be the +!! only MOM6 module which imports the FMS shared infrastructure for +!! diagnostics. Pass through interfaces are being documented +!! here and renamed in order to clearly identify these APIs as being +!! consistent with the FMS infrastructure (Any future updates to +!! those APIs would be applied here). +module MOM_diag_manager_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use diag_axis_mod, only : fms_axis_init=>diag_axis_init +use diag_axis_mod, only : fms_get_diag_axis_name => get_diag_axis_name +use diag_axis_mod, only : EAST, NORTH +use diag_data_mod, only : null_axis_id +use diag_manager_mod, only : fms_diag_manager_init => diag_manager_init +use diag_manager_mod, only : fms_diag_manager_end => diag_manager_end +use diag_manager_mod, only : send_data_fms => send_data +use diag_manager_mod, only : fms_diag_field_add_attribute => diag_field_add_attribute +use diag_manager_mod, only : DIAG_FIELD_NOT_FOUND +use diag_manager_mod, only : register_diag_field_fms => register_diag_field +use diag_manager_mod, only : register_static_field_fms => register_static_field +use diag_manager_mod, only : get_diag_field_id_fms => get_diag_field_id +use MOM_time_manager, only : time_type +use MOM_domain_infra, only : MOM_domain_type +use MOM_error_infra, only : MOM_error => MOM_err, FATAL, WARNING + +implicit none ; private + +!> transmit data for diagnostic output +interface register_diag_field_infra + module procedure register_diag_field_infra_scalar + module procedure register_diag_field_infra_array +end interface register_diag_field_infra + +!> transmit data for diagnostic output +interface send_data_infra + module procedure send_data_infra_0d, send_data_infra_1d + module procedure send_data_infra_2d, send_data_infra_3d +#ifdef OVERLOAD_R8 + module procedure send_data_infra_2d_r8, send_data_infra_3d_r8 +#endif +end interface send_data_infra + +!> Add an attribute to a diagnostic field +interface MOM_diag_field_add_attribute + module procedure MOM_diag_field_add_attribute_scalar_r + module procedure MOM_diag_field_add_attribute_scalar_i + module procedure MOM_diag_field_add_attribute_scalar_c + module procedure MOM_diag_field_add_attribute_r1d + module procedure MOM_diag_field_add_attribute_i1d +end interface MOM_diag_field_add_attribute + + +! Public interfaces +public MOM_diag_axis_init +public get_MOM_diag_axis_name +public MOM_diag_manager_init +public MOM_diag_manager_end +public send_data_infra +public MOM_diag_field_add_attribute +public register_diag_field_infra +public register_static_field_infra +public get_MOM_diag_field_id +! Public data +public null_axis_id +public DIAG_FIELD_NOT_FOUND +public EAST, NORTH + + +contains + +!> Initialize a diagnostic axis +integer function MOM_diag_axis_init(name, data, units, cart_name, long_name, MOM_domain, position, & + & direction, edges, set_name, coarsen, null_axis) + character(len=*), intent(in) :: name !< The name of this axis + real, dimension(:), intent(in) :: data !< The array of coordinate values + character(len=*), intent(in) :: units !< The units for the axis data + character(len=*), intent(in) :: cart_name !< Cartesian axis ("X", "Y", "Z", "T", or "N" for none) + character(len=*), & + optional, intent(in) :: long_name !< The long name of this axis + type(MOM_domain_type), & + optional, intent(in) :: MOM_Domain !< A MOM_Domain that describes the decomposition + integer, optional, intent(in) :: position !< This indicates the relative position of this + !! axis. The default is CENTER, but EAST and NORTH + !! are common options. + integer, optional, intent(in) :: direction !< This indicates the direction along which this + !! axis increases: 1 for upward, -1 for downward, or + !! 0 for non-vertical axes (the default) + integer, optional, intent(in) :: edges !< The axis_id of the complementary axis that + !! describes the edges of this axis + character(len=*), & + optional, intent(in) :: set_name !< A name to use for this set of axes. + integer, optional, intent(in) :: coarsen !< An optional degree of coarsening for the grid, 1 + !! by default. + logical, optional, intent(in) :: null_axis !< If present and true, return the special null axis + !! id for use with scalars. + + integer :: coarsening ! The degree of grid coarsening + + if (present(null_axis)) then ; if (null_axis) then + ! Return the special null axis id for scalars + MOM_diag_axis_init = null_axis_id + return + endif ; endif + + if (present(MOM_domain)) then + coarsening = 1 ; if (present(coarsen)) coarsening = coarsen + if (coarsening == 1) then + MOM_diag_axis_init = fms_axis_init(name, data, units, cart_name, long_name=long_name, & + direction=direction, set_name=set_name, edges=edges, & + domain2=MOM_domain%mpp_domain, domain_position=position) + elseif (coarsening == 2) then + MOM_diag_axis_init = fms_axis_init(name, data, units, cart_name, long_name=long_name, & + direction=direction, set_name=set_name, edges=edges, & + domain2=MOM_domain%mpp_domain_d2, domain_position=position) + else + call MOM_error(FATAL, "diag_axis_init called with an invalid value of coarsen.") + endif + else + if (present(coarsen)) then ; if (coarsen /= 1) then + call MOM_error(FATAL, "diag_axis_init does not support grid coarsening without a MOM_domain.") + endif ; endif + MOM_diag_axis_init = fms_axis_init(name, data, units, cart_name, long_name=long_name, & + direction=direction, set_name=set_name, edges=edges) + endif + +end function MOM_diag_axis_init + +!> Returns the short name of the axis +subroutine get_MOM_diag_axis_name(id, name) + integer, intent(in) :: id !< The axis numeric id + character(len=*), intent(out) :: name !< The short name of the axis + + call fms_get_diag_axis_name(id, name) + +end subroutine get_MOM_diag_axis_name + +!> Return a unique numeric ID field a module/field name combination. +integer function get_MOM_diag_field_id(module_name, field_name) + character(len=*), intent(in) :: module_name !< A module name string to query. + character(len=*), intent(in) :: field_name !< A field name string to query. + + + get_MOM_diag_field_id = -1 + get_MOM_diag_field_id = get_diag_field_id_fms(module_name, field_name) + +end function get_MOM_diag_field_id + +!> Initializes the diagnostic manager +subroutine MOM_diag_manager_init(diag_model_subset, time_init, err_msg) + integer, optional, intent(in) :: diag_model_subset !< An optional diagnostic subset + integer, dimension(6), optional, intent(in) :: time_init !< An optional reference time for diagnostics + !! The default uses the value contained in the + !! diag_table. Format is Y-M-D-H-M-S + character(len=*), optional, intent(out) :: err_msg !< Error message. + call FMS_diag_manager_init(diag_model_subset, time_init, err_msg) + +end subroutine MOM_diag_manager_init + +!> Close the diagnostic manager +subroutine MOM_diag_manager_end(time) + type(time_type), intent(in) :: time !< Model time at call to close. + + call FMS_diag_manager_end(time) + +end subroutine MOM_diag_manager_end + +!> Register a MOM diagnostic field for scalars +integer function register_diag_field_infra_scalar(module_name, field_name, init_time, & + long_name, units, missing_value, range, standard_name, do_not_log, & + err_msg, area, volume) + character(len=*), intent(in) :: module_name !< The name of the associated module + character(len=*), intent(in) :: field_name !< The name of the field + type(time_type), optional, intent(in) :: init_time !< The registration time + character(len=*), optional, intent(in) :: long_name !< A long name for the field + character(len=*), optional, intent(in) :: units !< Field units + character(len=*), optional, intent(in) :: standard_name !< A standard name for the field + real, optional, intent(in) :: missing_value !< Missing value attribute + real, dimension(2), optional, intent(in) :: range !< A valid range of the field + logical, optional, intent(in) :: do_not_log !< if TRUE, field information is not logged + character(len=*), optional, intent(out):: err_msg !< An error message to return + integer, optional, intent(in) :: area !< Diagnostic ID of the field containing the area attribute + integer, optional, intent(in) :: volume !< Diagnostic ID of the field containing the volume attribute + + register_diag_field_infra_scalar = register_diag_field_fms(module_name, field_name, init_time, & + long_name, units, missing_value, range, standard_name, do_not_log, err_msg, area, volume) + +end function register_diag_field_infra_scalar + +!> Register a MOM diagnostic field for scalars +integer function register_diag_field_infra_array(module_name, field_name, axes, init_time, & + long_name, units, missing_value, range, mask_variant, standard_name, verbose, & + do_not_log, err_msg, interp_method, tile_count, area, volume) + character(len=*), intent(in) :: module_name !< The name of the associated module + character(len=*), intent(in) :: field_name !< The name of the field + integer, dimension(:), intent(in) :: axes !< Diagnostic IDs of axis attributes for the field + type(time_type), optional, intent(in) :: init_time !< The registration time + character(len=*), optional, intent(in) :: long_name !< A long name for the field + character(len=*), optional, intent(in) :: units !< Units of the field + real, optional, intent(in) :: missing_value !< Missing value attribute + real, dimension(2), optional, intent(in) :: range !< A valid range of the field + logical, optional, intent(in) :: mask_variant !< If true, the field mask is varying in time + character(len=*), optional, intent(in) :: standard_name !< A standard name for the field + logical, optional, intent(in) :: verbose !< If true, provide additional log information + logical, optional, intent(in) :: do_not_log !< if TRUE, field information is not logged + character(len=*), optional, intent(in) :: interp_method !< If 'none' indicates the field should + !! not be interpolated as a scalar + integer, optional, intent(in) :: tile_count !< The tile number for the current PE + character(len=*), optional, intent(out):: err_msg !< An error message to return + integer, optional, intent(in) :: area !< Diagnostic ID of the field containing the area attribute + integer, optional, intent(in) :: volume !< Diagnostic ID of the field containing the volume attribute + + register_diag_field_infra_array = register_diag_field_fms(module_name, field_name, axes, init_time, & + long_name, units, missing_value, range, mask_variant, standard_name, verbose, do_not_log, & + err_msg, interp_method, tile_count, area, volume) + +end function register_diag_field_infra_array + + +integer function register_static_field_infra(module_name, field_name, axes, long_name, units, & + missing_value, range, mask_variant, standard_name, do_not_log, interp_method, & + tile_count, area, volume) + character(len=*), intent(in) :: module_name !< The name of the associated module + character(len=*), intent(in) :: field_name !< The name of the field + integer, dimension(:), intent(in) :: axes !< Diagnostic IDs of axis attributes for the field + character(len=*), optional, intent(in) :: long_name !< A long name for the field + character(len=*), optional, intent(in) :: units !< Units of the field + real, optional, intent(in) :: missing_value !< Missing value attribute + real, dimension(2), optional, intent(in) :: range !< A valid range of the field + logical, optional, intent(in) :: mask_variant !< If true, the field mask is varying in time + character(len=*), optional, intent(in) :: standard_name !< A standard name for the field + logical, optional, intent(in) :: do_not_log !< if TRUE, field information is not logged + character(len=*), optional, intent(in) :: interp_method !< If 'none' indicates the field should + !! not be interpolated as a scalar + integer, optional, intent(in) :: tile_count !< The tile number for the current PE + integer, optional, intent(in) :: area !< Diagnostic ID of the field containing the area attribute + integer, optional, intent(in) :: volume !< Diagnostic ID of the field containing the volume attribute + + register_static_field_infra = register_static_field_fms(module_name, field_name, axes, long_name, units,& + & missing_value, range, mask_variant, standard_name, dynamic=.false.,do_not_log=do_not_log, & + interp_method=interp_method,tile_count=tile_count, area=area, volume=volume) +end function register_static_field_infra + +!> Returns true if the argument data are successfully passed to a diagnostic manager +!! with the indicated unique reference id, false otherwise. +logical function send_data_infra_0d(diag_field_id, field, time, err_msg) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + real, intent(in) :: field !< The value being recorded + TYPE(time_type), optional, intent(in) :: time !< The time for the current record + CHARACTER(len=*), optional, intent(out) :: err_msg !< An optional error message + + send_data_infra_0d = send_data_fms(diag_field_id, field, time, err_msg) +end function send_data_infra_0d + +!> Returns true if the argument data are successfully passed to a diagnostic manager +!! with the indicated unique reference id, false otherwise. +logical function send_data_infra_1d(diag_field_id, field, is_in, ie_in, time, mask, rmask, weight, err_msg) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + real, dimension(:), intent(in) :: field !< A 1-d array of values being recorded + integer, optional, intent(in) :: is_in !< The starting index for the data being recorded + integer, optional, intent(in) :: ie_in !< The end index for the data being recorded + type(time_type), optional, intent(in) :: time !< The time for the current record + logical, dimension(:), optional, intent(in) :: mask !< An optional rank 1 logical mask + real, dimension(:), optional, intent(in) :: rmask !< An optional rank 1 mask array + real, optional, intent(in) :: weight !< A scalar weight factor to apply to the current + !! record if there is averaging in time + character(len=*), optional, intent(out) :: err_msg !< A log indicating the status of the post upon + !! returning to the calling routine + + send_data_infra_1d = send_data_fms(diag_field_id, field, time, is_in, mask, rmask, ie_in, weight, err_msg) + +end function send_data_infra_1d + +!> Returns true if the argument data are successfully passed to a diagnostic manager +!! with the indicated unique reference id, false otherwise. +logical function send_data_infra_2d(diag_field_id, field, is_in, ie_in, js_in, je_in, & + time, mask, rmask, weight, err_msg) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + real, dimension(:,:), intent(in) :: field !< A 2-d array of values being recorded + integer, optional, intent(in) :: is_in !< The starting i-index for the data being recorded + integer, optional, intent(in) :: ie_in !< The end i-index for the data being recorded + integer, optional, intent(in) :: js_in !< The starting j-index for the data being recorded + integer, optional, intent(in) :: je_in !< The end j-index for the data being recorded + type(time_type), optional, intent(in) :: time !< The time for the current record + logical, dimension(:,:), optional, intent(in) :: mask !< An optional 2-d logical mask + real, dimension(:,:), optional, intent(in) :: rmask !< An optional 2-d mask array + real, optional, intent(in) :: weight !< A scalar weight factor to apply to the current + !! record if there is averaging in time + character(len=*), optional, intent(out) :: err_msg !< A log indicating the status of the post upon + !! returning to the calling routine + + send_data_infra_2d = send_data_fms(diag_field_id, field, time, is_in, js_in, mask, & + rmask, ie_in, je_in, weight, err_msg) + +end function send_data_infra_2d + +!> Returns true if the argument data are successfully passed to a diagnostic manager +!! with the indicated unique reference id, false otherwise. +logical function send_data_infra_3d(diag_field_id, field, is_in, ie_in, js_in, je_in, ks_in, ke_in, & + time, mask, rmask, weight, err_msg) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + real, dimension(:,:,:), intent(in) :: field !< A rank 1 array of floating point values being recorded + integer, optional, intent(in) :: is_in !< The starting i-index for the data being recorded + integer, optional, intent(in) :: ie_in !< The end i-index for the data being recorded + integer, optional, intent(in) :: js_in !< The starting j-index for the data being recorded + integer, optional, intent(in) :: je_in !< The end j-index for the data being recorded + integer, optional, intent(in) :: ks_in !< The starting k-index for the data being recorded + integer, optional, intent(in) :: ke_in !< The end k-index for the data being recorded + type(time_type), optional, intent(in) :: time !< The time for the current record + logical, dimension(:,:,:), optional, intent(in) :: mask !< An optional 3-d logical mask + real, dimension(:,:,:), optional, intent(in) :: rmask !< An optional 3-d mask array + real, optional, intent(in) :: weight !< A scalar weight factor to apply to the current + !! record if there is averaging in time + character(len=*), optional, intent(out) :: err_msg !< A log indicating the status of the post upon + !! returning to the calling routine + + send_data_infra_3d = send_data_fms(diag_field_id, field, time, is_in, js_in, ks_in, mask, & + rmask, ie_in, je_in, ke_in, weight, err_msg) + +end function send_data_infra_3d + + +#ifdef OVERLOAD_R8 +!> Returns true if the argument data are successfully passed to a diagnostic manager +!! with the indicated unique reference id, false otherwise. +logical function send_data_infra_2d_r8(diag_field_id, field, is_in, ie_in, js_in, je_in, & + time, mask, rmask, weight, err_msg) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + real(kind=8), dimension(:,:), intent(in) :: field !< A 2-d array of values being recorded + integer, optional, intent(in) :: is_in !< The starting i-index for the data being recorded + integer, optional, intent(in) :: ie_in !< The end i-index for the data being recorded + integer, optional, intent(in) :: js_in !< The starting j-index for the data being recorded + integer, optional, intent(in) :: je_in !< The end j-index for the data being recorded + type(time_type), optional, intent(in) :: time !< The time for the current record + logical, dimension(:,:), optional, intent(in) :: mask !< An optional 2-d logical mask + real, dimension(:,:), optional, intent(in) :: rmask !< An optional 2-d mask array + real, optional, intent(in) :: weight !< A scalar weight factor to apply to the current + !! record if there is averaging in time + character(len=*), optional, intent(out) :: err_msg !< A log indicating the status of the post upon + !! returning to the calling routine + + send_data_infra_2d_r8 = send_data_fms(diag_field_id, field, time, is_in, js_in, mask, & + rmask, ie_in, je_in, weight, err_msg) + +end function send_data_infra_2d_r8 + +!> Returns true if the argument data are successfully passed to a diagnostic manager +!! with the indicated unique reference id, false otherwise. +logical function send_data_infra_3d_r8(diag_field_id, field, is_in, ie_in, js_in, je_in, ks_in, ke_in, & + time, mask, rmask, weight, err_msg) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + real(kind=8), dimension(:,:,:), intent(in) :: field !< A rank 1 array of floating point values being recorded + integer, optional, intent(in) :: is_in !< The starting i-index for the data being recorded + integer, optional, intent(in) :: ie_in !< The end i-index for the data being recorded + integer, optional, intent(in) :: js_in !< The starting j-index for the data being recorded + integer, optional, intent(in) :: je_in !< The end j-index for the data being recorded + integer, optional, intent(in) :: ks_in !< The starting k-index for the data being recorded + integer, optional, intent(in) :: ke_in !< The end k-index for the data being recorded + type(time_type), optional, intent(in) :: time !< The time for the current record + logical, dimension(:,:,:), optional, intent(in) :: mask !< An optional 3-d logical mask + real, dimension(:,:,:), optional, intent(in) :: rmask !< An optional 3-d mask array + real, optional, intent(in) :: weight !< A scalar weight factor to apply to the current + !! record if there is averaging in time + character(len=*), optional, intent(out) :: err_msg !< A log indicating the status of the post upon + !! returning to the calling routine + + send_data_infra_3d_r8 = send_data_fms(diag_field_id, field, time, is_in, js_in, ks_in, mask, rmask, & + ie_in, je_in, ke_in, weight, err_msg) + +end function send_data_infra_3d_r8 +#endif + +!> Add a real scalar attribute to a diagnostic field +subroutine MOM_diag_field_add_attribute_scalar_r(diag_field_id, att_name, att_value) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + character(len=*), intent(in) :: att_name !< The name of the attribute + real, intent(in) :: att_value !< A real scalar value + + call FMS_diag_field_add_attribute(diag_field_id, att_name, att_value) + +end subroutine MOM_diag_field_add_attribute_scalar_r + +!> Add an integer attribute to a diagnostic field +subroutine MOM_diag_field_add_attribute_scalar_i(diag_field_id, att_name, att_value) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + character(len=*), intent(in) :: att_name !< The name of the attribute + integer, intent(in) :: att_value !< An integer scalar value + + call FMS_diag_field_add_attribute(diag_field_id, att_name, att_value) + +end subroutine MOM_diag_field_add_attribute_scalar_i + +!> Add a character string attribute to a diagnostic field +subroutine MOM_diag_field_add_attribute_scalar_c(diag_field_id, att_name, att_value) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + character(len=*), intent(in) :: att_name !< The name of the attribute + character(len=*), intent(in) :: att_value !< A character string value + + call FMS_diag_field_add_attribute(diag_field_id, att_name, att_value) + +end subroutine MOM_diag_field_add_attribute_scalar_c + +!> Add a real list of attributes attribute to a diagnostic field +subroutine MOM_diag_field_add_attribute_r1d(diag_field_id, att_name, att_value) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + character(len=*), intent(in) :: att_name !< The name of the attribute + real, dimension(:), intent(in) :: att_value !< An array of real values + + call FMS_diag_field_add_attribute(diag_field_id, att_name, att_value) + +end subroutine MOM_diag_field_add_attribute_r1d + +!> Add a integer list of attributes attribute to a diagnostic field +subroutine MOM_diag_field_add_attribute_i1d(diag_field_id, att_name, att_value) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + character(len=*), intent(in) :: att_name !< The name of the attribute + integer, dimension(:), intent(in) :: att_value !< An array of integer values + + call FMS_diag_field_add_attribute(diag_field_id, att_name, att_value) + +end subroutine MOM_diag_field_add_attribute_i1d + +end module MOM_diag_manager_infra diff --git a/config_src/infra/FMS1/MOM_domain_infra.F90 b/config_src/infra/FMS1/MOM_domain_infra.F90 new file mode 100644 index 0000000000..7eff4597f3 --- /dev/null +++ b/config_src/infra/FMS1/MOM_domain_infra.F90 @@ -0,0 +1,1996 @@ +!> Describes the decomposed MOM domain and has routines for communications across PEs +module MOM_domain_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use MOM_coms_infra, only : PE_here, root_PE, num_PEs +use MOM_cpu_clock_infra, only : cpu_clock_begin, cpu_clock_end +use MOM_error_infra, only : MOM_error=>MOM_err, NOTE, WARNING, FATAL + +use mpp_domains_mod, only : domain2D, domain1D +use mpp_domains_mod, only : mpp_define_io_domain, mpp_define_domains, mpp_deallocate_domain +use mpp_domains_mod, only : mpp_get_domain_components, mpp_get_domain_extents, mpp_get_layout +use mpp_domains_mod, only : mpp_get_compute_domain, mpp_get_data_domain, mpp_get_global_domain +use mpp_domains_mod, only : mpp_get_boundary, mpp_update_domains +use mpp_domains_mod, only : mpp_start_update_domains, mpp_complete_update_domains +use mpp_domains_mod, only : mpp_create_group_update, mpp_do_group_update +use mpp_domains_mod, only : mpp_reset_group_update_field, mpp_group_update_initialized +use mpp_domains_mod, only : mpp_start_group_update, mpp_complete_group_update +use mpp_domains_mod, only : mpp_compute_block_extent +use mpp_domains_mod, only : mpp_broadcast_domain, mpp_redistribute, mpp_global_field +use mpp_domains_mod, only : AGRID, BGRID_NE, CGRID_NE, SCALAR_PAIR, BITWISE_EXACT_SUM +use mpp_domains_mod, only : CYCLIC_GLOBAL_DOMAIN, FOLD_NORTH_EDGE +use mpp_domains_mod, only : To_East => WUPDATE, To_West => EUPDATE, Omit_Corners => EDGEUPDATE +use mpp_domains_mod, only : To_North => SUPDATE, To_South => NUPDATE +use mpp_domains_mod, only : CENTER, CORNER, NORTH_FACE => NORTH, EAST_FACE => EAST +use fms_io_mod, only : file_exist, parse_mask_table +use fms_affinity_mod, only : fms_affinity_init, fms_affinity_set, fms_affinity_get + +! This subroutine is not in MOM6/src but may be required by legacy drivers +use mpp_domains_mod, only : global_field_sum => mpp_global_sum + +! The `group_pass_type` fields are never accessed, so we keep it as an FMS type +use mpp_domains_mod, only : group_pass_type => mpp_group_update_type + +implicit none ; private + +! These types are inherited from mpp, but are treated as opaque here. +public :: domain2D, domain1D, group_pass_type +! These interfaces are actually implemented or have explicit interfaces in this file. +public :: create_MOM_domain, clone_MOM_domain, get_domain_components, get_domain_extent +public :: deallocate_MOM_domain, get_global_shape, compute_block_extent +public :: pass_var, pass_vector, fill_symmetric_edges, rescale_comp_data +public :: pass_var_start, pass_var_complete, pass_vector_start, pass_vector_complete +public :: create_group_pass, do_group_pass, start_group_pass, complete_group_pass +public :: redistribute_array, broadcast_domain, same_domain, global_field +public :: get_simple_array_i_ind, get_simple_array_j_ind +public :: MOM_thread_affinity_set, set_MOM_thread_affinity +! These are encoding constant parmeters. +public :: To_East, To_West, To_North, To_South, To_All, Omit_Corners +public :: AGRID, BGRID_NE, CGRID_NE, SCALAR_PAIR +public :: CORNER, CENTER, NORTH_FACE, EAST_FACE +! These are no longer used by MOM6 because the reproducing sum works so well, but they are +! still referenced by some of the non-GFDL couplers. +public :: global_field_sum, BITWISE_EXACT_SUM + +!> Do a halo update on an array +interface pass_var + module procedure pass_var_3d, pass_var_2d +end interface pass_var + +!> Do a halo update on a pair of arrays representing the two components of a vector +interface pass_vector + module procedure pass_vector_3d, pass_vector_2d +end interface pass_vector + +!> Initiate a non-blocking halo update on an array +interface pass_var_start + module procedure pass_var_start_3d, pass_var_start_2d +end interface pass_var_start + +!> Complete a non-blocking halo update on an array +interface pass_var_complete + module procedure pass_var_complete_3d, pass_var_complete_2d +end interface pass_var_complete + +!> Initiate a halo update on a pair of arrays representing the two components of a vector +interface pass_vector_start + module procedure pass_vector_start_3d, pass_vector_start_2d +end interface pass_vector_start + +!> Complete a halo update on a pair of arrays representing the two components of a vector +interface pass_vector_complete + module procedure pass_vector_complete_3d, pass_vector_complete_2d +end interface pass_vector_complete + +!> Set up a group of halo updates +interface create_group_pass + module procedure create_var_group_pass_2d + module procedure create_var_group_pass_3d + module procedure create_vector_group_pass_2d + module procedure create_vector_group_pass_3d +end interface create_group_pass + +!> Do a set of halo updates that fill in the values at the duplicated edges +!! of a staggered symmetric memory domain +interface fill_symmetric_edges + module procedure fill_vector_symmetric_edges_2d !, fill_vector_symmetric_edges_3d +! module procedure fill_scalar_symmetric_edges_2d, fill_scalar_symmetric_edges_3d +end interface fill_symmetric_edges + +!> Rescale the values of an array in its computational domain by a constant factor +interface rescale_comp_data + module procedure rescale_comp_data_4d, rescale_comp_data_3d, rescale_comp_data_2d +end interface rescale_comp_data + +!> Pass an array from one MOM domain to another +interface redistribute_array + module procedure redistribute_array_2d, redistribute_array_3d, redistribute_array_4d +end interface redistribute_array + +!> Copy one MOM_domain_type into another +interface clone_MOM_domain + module procedure clone_MD_to_MD, clone_MD_to_d2D +end interface clone_MOM_domain + +!> Extract the 1-d domain components from a MOM_domain or domain2d +interface get_domain_components + module procedure get_domain_components_MD, get_domain_components_d2D +end interface get_domain_components + +!> Returns the index ranges that have been stored in a MOM_domain_type +interface get_domain_extent + module procedure get_domain_extent_MD, get_domain_extent_d2D +end interface get_domain_extent + + +!> The MOM_domain_type contains information about the domain decomposition. +type, public :: MOM_domain_type + character(len=64) :: name !< The name of this domain + type(domain2D), pointer :: mpp_domain => NULL() !< The FMS domain with halos + !! on this processor, centered at h points. + type(domain2D), pointer :: mpp_domain_d2 => NULL() !< A coarse FMS domain with halos + !! on this processor, centered at h points. + integer :: niglobal !< The total horizontal i-domain size. + integer :: njglobal !< The total horizontal j-domain size. + integer :: nihalo !< The i-halo size in memory. + integer :: njhalo !< The j-halo size in memory. + logical :: symmetric !< True if symmetric memory is used with this domain. + logical :: nonblocking_updates !< If true, non-blocking halo updates are + !! allowed. The default is .false. (for now). + logical :: thin_halo_updates !< If true, optional arguments may be used to + !! specify the width of the halos that are + !! updated with each call. + integer :: layout(2) !< This domain's processor layout. This is + !! saved to enable the construction of related + !! new domains with different resolutions or + !! other properties. + integer :: io_layout(2) !< The IO-layout used with this domain. + integer :: X_FLAGS !< Flag that specifies the properties of the + !! domain in the i-direction in a define_domain call. + integer :: Y_FLAGS !< Flag that specifies the properties of the + !! domain in the j-direction in a define_domain call. + logical, pointer :: maskmap(:,:) => NULL() !< A pointer to an array indicating + !! which logical processors are actually used for + !! the ocean code. The other logical processors + !! would be contain only land points and are not + !! assigned to actual processors. This need not be + !! assigned if all logical processors are used. + integer :: turns !< Number of quarter-turns from input to this grid. + type(MOM_domain_type), pointer :: domain_in => NULL() + !< Reference to unrotated domain (if turned) +end type MOM_domain_type + +integer, parameter :: To_All = To_East + To_West + To_North + To_South !< A flag for passing in all directions + +contains + +!> pass_var_3d does a halo update for a three-dimensional array. +subroutine pass_var_3d(array, MOM_dom, sideflag, complete, position, halo, & + clock) + real, dimension(:,:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! sothe halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before + !! progress resumes. Omitting complete is the + !! same as setting complete to .true. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + integer :: dirflag + logical :: block_til_complete + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + block_til_complete = .true. + if (present(complete)) block_til_complete = complete + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_update_domains(array, MOM_dom%mpp_domain, flags=dirflag, & + complete=block_til_complete, position=position, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_update_domains(array, MOM_dom%mpp_domain, flags=dirflag, & + complete=block_til_complete, position=position) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_var_3d + +!> pass_var_2d does a halo update for a two-dimensional array. +subroutine pass_var_2d(array, MOM_dom, sideflag, complete, position, halo, inner_halo, clock) + real, dimension(:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before + !! progress resumes. Omitting complete is the + !! same as setting complete to .true. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full halo + !! by default. + integer, optional, intent(in) :: inner_halo !< The size of an inner halo to avoid updating, + !! or 0 to avoid updating symmetric memory + !! computational domain points. Setting this >=0 + !! also enforces that complete=.true. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + ! Local variables + real, allocatable, dimension(:,:) :: tmp + integer :: pos, i_halo, j_halo + integer :: isc, iec, jsc, jec, isd, ied, jsd, jed, IscB, IecB, JscB, JecB + integer :: inner, i, j, isfw, iefw, isfe, iefe, jsfs, jefs, jsfn, jefn + integer :: dirflag + logical :: block_til_complete + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + block_til_complete = .true. ; if (present(complete)) block_til_complete = complete + pos = CENTER ; if (present(position)) pos = position + + if (present(inner_halo)) then ; if (inner_halo >= 0) then + ! Store the original values. + allocate(tmp(size(array,1), size(array,2))) + tmp(:,:) = array(:,:) + block_til_complete = .true. + endif ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_update_domains(array, MOM_dom%mpp_domain, flags=dirflag, & + complete=block_til_complete, position=position, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_update_domains(array, MOM_dom%mpp_domain, flags=dirflag, & + complete=block_til_complete, position=position) + endif + + if (present(inner_halo)) then ; if (inner_halo >= 0) then + call mpp_get_compute_domain(MOM_dom%mpp_domain, isc, iec, jsc, jec) + call mpp_get_data_domain(MOM_dom%mpp_domain, isd, ied, jsd, jed) + ! Convert to local indices for arrays starting at 1. + isc = isc - (isd-1) ; iec = iec - (isd-1) ; ied = ied - (isd-1) ; isd = 1 + jsc = jsc - (jsd-1) ; jec = jec - (jsd-1) ; jed = jed - (jsd-1) ; jsd = 1 + i_halo = min(inner_halo, isc-1) ; j_halo = min(inner_halo, jsc-1) + + ! Figure out the array index extents of the eastern, western, northern and southern regions to copy. + if (pos == CENTER) then + if (size(array,1) == ied) then + isfw = isc - i_halo ; iefw = isc ; isfe = iec ; iefe = iec + i_halo + else ; call MOM_error(FATAL, "pass_var_2d: wrong i-size for CENTER array.") ; endif + if (size(array,2) == jed) then + isfw = isc - i_halo ; iefw = isc ; isfe = iec ; iefe = iec + i_halo + else ; call MOM_error(FATAL, "pass_var_2d: wrong j-size for CENTER array.") ; endif + elseif (pos == CORNER) then + if (size(array,1) == ied) then + isfw = max(isc - (i_halo+1), 1) ; iefw = isc ; isfe = iec ; iefe = iec + i_halo + elseif (size(array,1) == ied+1) then + isfw = isc - i_halo ; iefw = isc+1 ; isfe = iec+1 ; iefe = min(iec + 1 + i_halo, ied+1) + else ; call MOM_error(FATAL, "pass_var_2d: wrong i-size for CORNER array.") ; endif + if (size(array,2) == jed) then + jsfs = max(jsc - (j_halo+1), 1) ; jefs = jsc ; jsfn = jec ; jefn = jec + j_halo + elseif (size(array,2) == jed+1) then + jsfs = jsc - j_halo ; jefs = jsc+1 ; jsfn = jec+1 ; jefn = min(jec + 1 + j_halo, jed+1) + else ; call MOM_error(FATAL, "pass_var_2d: wrong j-size for CORNER array.") ; endif + elseif (pos == NORTH_FACE) then + if (size(array,1) == ied) then + isfw = isc - i_halo ; iefw = isc ; isfe = iec ; iefe = iec + i_halo + else ; call MOM_error(FATAL, "pass_var_2d: wrong i-size for NORTH_FACE array.") ; endif + if (size(array,2) == jed) then + jsfs = max(jsc - (j_halo+1), 1) ; jefs = jsc ; jsfn = jec ; jefn = jec + j_halo + elseif (size(array,2) == jed+1) then + jsfs = jsc - j_halo ; jefs = jsc+1 ; jsfn = jec+1 ; jefn = min(jec + 1 + j_halo, jed+1) + else ; call MOM_error(FATAL, "pass_var_2d: wrong j-size for NORTH_FACE array.") ; endif + elseif (pos == EAST_FACE) then + if (size(array,1) == ied) then + isfw = max(isc - (i_halo+1), 1) ; iefw = isc ; isfe = iec ; iefe = iec + i_halo + elseif (size(array,1) == ied+1) then + isfw = isc - i_halo ; iefw = isc+1 ; isfe = iec+1 ; iefe = min(iec + 1 + i_halo, ied+1) + else ; call MOM_error(FATAL, "pass_var_2d: wrong i-size for EAST_FACE array.") ; endif + if (size(array,2) == jed) then + isfw = isc - i_halo ; iefw = isc ; isfe = iec ; iefe = iec + i_halo + else ; call MOM_error(FATAL, "pass_var_2d: wrong j-size for EAST_FACE array.") ; endif + else + call MOM_error(FATAL, "pass_var_2d: Unrecognized position") + endif + + ! Copy back the stored inner halo points + do j=jsfs,jefn ; do i=isfw,iefw ; array(i,j) = tmp(i,j) ; enddo ; enddo + do j=jsfs,jefn ; do i=isfe,iefe ; array(i,j) = tmp(i,j) ; enddo ; enddo + do j=jsfs,jefs ; do i=isfw,iefe ; array(i,j) = tmp(i,j) ; enddo ; enddo + do j=jsfn,jefn ; do i=isfw,iefe ; array(i,j) = tmp(i,j) ; enddo ; enddo + + deallocate(tmp) + endif ; endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_var_2d + +!> pass_var_start_2d starts a halo update for a two-dimensional array. +function pass_var_start_2d(array, MOM_dom, sideflag, position, complete, halo, & + clock) + real, dimension(:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before + !! progress resumes. Omitting complete is the + !! same as setting complete to .true. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + integer :: pass_var_start_2d !0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + pass_var_start_2d = mpp_start_update_domains(array, MOM_dom%mpp_domain, & + flags=dirflag, position=position, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + pass_var_start_2d = mpp_start_update_domains(array, MOM_dom%mpp_domain, & + flags=dirflag, position=position) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end function pass_var_start_2d + +!> pass_var_start_3d starts a halo update for a three-dimensional array. +function pass_var_start_3d(array, MOM_dom, sideflag, position, complete, halo, & + clock) + real, dimension(:,:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before + !! progress resumes. Omitting complete is the + !! same as setting complete to .true. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + integer :: pass_var_start_3d !< The integer index for this update. + + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + pass_var_start_3d = mpp_start_update_domains(array, MOM_dom%mpp_domain, & + flags=dirflag, position=position, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + pass_var_start_3d = mpp_start_update_domains(array, MOM_dom%mpp_domain, & + flags=dirflag, position=position) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end function pass_var_start_3d + +!> pass_var_complete_2d completes a halo update for a two-dimensional array. +subroutine pass_var_complete_2d(id_update, array, MOM_dom, sideflag, position, halo, & + clock) + integer, intent(in) :: id_update !< The integer id of this update which has + !! been returned from a previous call to + !! pass_var_start. + real, dimension(:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_complete_update_domains(id_update, array, MOM_dom%mpp_domain, & + flags=dirflag, position=position, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_complete_update_domains(id_update, array, MOM_dom%mpp_domain, & + flags=dirflag, position=position) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_var_complete_2d + +!> pass_var_complete_3d completes a halo update for a three-dimensional array. +subroutine pass_var_complete_3d(id_update, array, MOM_dom, sideflag, position, halo, & + clock) + integer, intent(in) :: id_update !< The integer id of this update which has + !! been returned from a previous call to + !! pass_var_start. + real, dimension(:,:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_complete_update_domains(id_update, array, MOM_dom%mpp_domain, & + flags=dirflag, position=position, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_complete_update_domains(id_update, array, MOM_dom%mpp_domain, & + flags=dirflag, position=position) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_var_complete_3d + +!> pass_vector_2d does a halo update for a pair of two-dimensional arrays +!! representing the components of a two-dimensional horizontal vector. +subroutine pass_vector_2d(u_cmpt, v_cmpt, MOM_dom, direction, stagger, complete, halo, & + clock) + real, dimension(:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before progress resumes. + !! Omitting complete is the same as setting complete to .true. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + ! Local variables + integer :: stagger_local + integer :: dirflag + logical :: block_til_complete + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + block_til_complete = .true. + if (present(complete)) block_til_complete = complete + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_update_domains(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & + gridtype=stagger_local, complete = block_til_complete, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_update_domains(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & + gridtype=stagger_local, complete = block_til_complete) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_vector_2d + +!> fill_vector_symmetric_edges_2d does an usual set of halo updates that only +!! fill in the values at the edge of a pair of symmetric memory two-dimensional +!! arrays representing the components of a two-dimensional horizontal vector. +!! If symmetric memory is not being used, this subroutine does nothing except to +!! possibly turn optional cpu clocks on or off. +subroutine fill_vector_symmetric_edges_2d(u_cmpt, v_cmpt, MOM_dom, stagger, scalar, & + clock) + real, dimension(:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + logical, optional, intent(in) :: scalar !< An optional argument indicating whether. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + ! Local variables + integer :: stagger_local + integer :: dirflag + integer :: i, j, isc, iec, jsc, jec, isd, ied, jsd, jed, IscB, IecB, JscB, JecB + real, allocatable, dimension(:) :: sbuff_x, sbuff_y, wbuff_x, wbuff_y + logical :: block_til_complete + + if (.not. MOM_dom%symmetric) then + return + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + if (.not.(stagger_local == CGRID_NE .or. stagger_local == BGRID_NE)) return + + call mpp_get_compute_domain(MOM_dom%mpp_domain, isc, iec, jsc, jec) + call mpp_get_data_domain(MOM_dom%mpp_domain, isd, ied, jsd, jed) + + ! Adjust isc, etc., to account for the fact that the input arrays indices all + ! start at 1 (and are effectively on a SW grid!). + isc = isc - (isd-1) ; iec = iec - (isd-1) + jsc = jsc - (jsd-1) ; jec = jec - (jsd-1) + IscB = isc ; IecB = iec+1 ; JscB = jsc ; JecB = jec+1 + + dirflag = To_All ! 60 + if (present(scalar)) then ; if (scalar) dirflag = To_All+SCALAR_PAIR ; endif + + if (stagger_local == CGRID_NE) then + allocate(wbuff_x(jsc:jec)) ; allocate(sbuff_y(isc:iec)) + wbuff_x(:) = 0.0 ; sbuff_y(:) = 0.0 + call mpp_get_boundary(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & + wbufferx=wbuff_x, sbuffery=sbuff_y, & + gridtype=CGRID_NE) + do i=isc,iec + v_cmpt(i,JscB) = sbuff_y(i) + enddo + do j=jsc,jec + u_cmpt(IscB,j) = wbuff_x(j) + enddo + deallocate(wbuff_x) ; deallocate(sbuff_y) + elseif (stagger_local == BGRID_NE) then + allocate(wbuff_x(JscB:JecB)) ; allocate(sbuff_x(IscB:IecB)) + allocate(wbuff_y(JscB:JecB)) ; allocate(sbuff_y(IscB:IecB)) + wbuff_x(:) = 0.0 ; wbuff_y(:) = 0.0 ; sbuff_x(:) = 0.0 ; sbuff_y(:) = 0.0 + call mpp_get_boundary(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & + wbufferx=wbuff_x, sbufferx=sbuff_x, & + wbuffery=wbuff_y, sbuffery=sbuff_y, & + gridtype=BGRID_NE) + do I=IscB,IecB + u_cmpt(I,JscB) = sbuff_x(I) ; v_cmpt(I,JscB) = sbuff_y(I) + enddo + do J=JscB,JecB + u_cmpt(IscB,J) = wbuff_x(J) ; v_cmpt(IscB,J) = wbuff_y(J) + enddo + deallocate(wbuff_x) ; deallocate(sbuff_x) + deallocate(wbuff_y) ; deallocate(sbuff_y) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine fill_vector_symmetric_edges_2d + +!> pass_vector_3d does a halo update for a pair of three-dimensional arrays +!! representing the components of a three-dimensional horizontal vector. +subroutine pass_vector_3d(u_cmpt, v_cmpt, MOM_dom, direction, stagger, complete, halo, & + clock) + real, dimension(:,:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before progress resumes. + !! Omitting complete is the same as setting complete to .true. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + ! Local variables + integer :: stagger_local + integer :: dirflag + logical :: block_til_complete + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + block_til_complete = .true. + if (present(complete)) block_til_complete = complete + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_update_domains(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & + gridtype=stagger_local, complete = block_til_complete, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_update_domains(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & + gridtype=stagger_local, complete = block_til_complete) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_vector_3d + +!> pass_vector_start_2d starts a halo update for a pair of two-dimensional arrays +!! representing the components of a two-dimensional horizontal vector. +function pass_vector_start_2d(u_cmpt, v_cmpt, MOM_dom, direction, stagger, complete, halo, & + clock) + real, dimension(:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before progress resumes. + !! Omitting complete is the same as setting complete to .true. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + integer :: pass_vector_start_2d !< The integer index for this + !! update. + + ! Local variables + integer :: stagger_local + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + pass_vector_start_2d = mpp_start_update_domains(u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + pass_vector_start_2d = mpp_start_update_domains(u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end function pass_vector_start_2d + +!> pass_vector_start_3d starts a halo update for a pair of three-dimensional arrays +!! representing the components of a three-dimensional horizontal vector. +function pass_vector_start_3d(u_cmpt, v_cmpt, MOM_dom, direction, stagger, complete, halo, & + clock) + real, dimension(:,:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before progress resumes. + !! Omitting complete is the same as setting complete to .true. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + integer :: pass_vector_start_3d !< The integer index for this + !! update. + ! Local variables + integer :: stagger_local + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + pass_vector_start_3d = mpp_start_update_domains(u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + pass_vector_start_3d = mpp_start_update_domains(u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end function pass_vector_start_3d + +!> pass_vector_complete_2d completes a halo update for a pair of two-dimensional arrays +!! representing the components of a two-dimensional horizontal vector. +subroutine pass_vector_complete_2d(id_update, u_cmpt, v_cmpt, MOM_dom, direction, stagger, halo, & + clock) + integer, intent(in) :: id_update !< The integer id of this update which has been + !! returned from a previous call to + !! pass_var_start. + real, dimension(:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + ! Local variables + integer :: stagger_local + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_complete_update_domains(id_update, u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_complete_update_domains(id_update, u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_vector_complete_2d + +!> pass_vector_complete_3d completes a halo update for a pair of three-dimensional +!! arrays representing the components of a three-dimensional horizontal vector. +subroutine pass_vector_complete_3d(id_update, u_cmpt, v_cmpt, MOM_dom, direction, stagger, halo, & + clock) + integer, intent(in) :: id_update !< The integer id of this update which has been + !! returned from a previous call to + !! pass_var_start. + real, dimension(:,:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + ! Local variables + integer :: stagger_local + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_complete_update_domains(id_update, u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_complete_update_domains(id_update, u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_vector_complete_3d + +!> create_var_group_pass_2d sets up a group of two-dimensional array halo updates. +subroutine create_var_group_pass_2d(group, array, MOM_dom, sideflag, position, & + halo, clock) + type(group_pass_type), intent(inout) :: group !< The data type that store information for + !! group update. This data will be used in + !! do_group_pass. + real, dimension(:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + ! Local variables + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + + if (mpp_group_update_initialized(group)) then + call mpp_reset_group_update_field(group,array) + elseif (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_create_group_update(group, array, MOM_dom%mpp_domain, flags=dirflag, & + position=position, whalo=halo, ehalo=halo, & + shalo=halo, nhalo=halo) + else + call mpp_create_group_update(group, array, MOM_dom%mpp_domain, flags=dirflag, & + position=position) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine create_var_group_pass_2d + +!> create_var_group_pass_3d sets up a group of three-dimensional array halo updates. +subroutine create_var_group_pass_3d(group, array, MOM_dom, sideflag, position, halo, & + clock) + type(group_pass_type), intent(inout) :: group !< The data type that store information for + !! group update. This data will be used in + !! do_group_pass. + real, dimension(:,:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + ! Local variables + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + + if (mpp_group_update_initialized(group)) then + call mpp_reset_group_update_field(group,array) + elseif (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_create_group_update(group, array, MOM_dom%mpp_domain, flags=dirflag, & + position=position, whalo=halo, ehalo=halo, & + shalo=halo, nhalo=halo) + else + call mpp_create_group_update(group, array, MOM_dom%mpp_domain, flags=dirflag, & + position=position) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine create_var_group_pass_3d + +!> create_vector_group_pass_2d sets up a group of two-dimensional vector halo updates. +subroutine create_vector_group_pass_2d(group, u_cmpt, v_cmpt, MOM_dom, direction, stagger, halo, & + clock) + type(group_pass_type), intent(inout) :: group !< The data type that store information for + !! group update. This data will be used in + !! do_group_pass. + real, dimension(:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + ! Local variables + integer :: stagger_local + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + + if (mpp_group_update_initialized(group)) then + call mpp_reset_group_update_field(group,u_cmpt, v_cmpt) + elseif (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_create_group_update(group, u_cmpt, v_cmpt, MOM_dom%mpp_domain, & + flags=dirflag, gridtype=stagger_local, whalo=halo, ehalo=halo, & + shalo=halo, nhalo=halo) + else + call mpp_create_group_update(group, u_cmpt, v_cmpt, MOM_dom%mpp_domain, & + flags=dirflag, gridtype=stagger_local) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine create_vector_group_pass_2d + +!> create_vector_group_pass_3d sets up a group of three-dimensional vector halo updates. +subroutine create_vector_group_pass_3d(group, u_cmpt, v_cmpt, MOM_dom, direction, stagger, halo, & + clock) + type(group_pass_type), intent(inout) :: group !< The data type that store information for + !! group update. This data will be used in + !! do_group_pass. + real, dimension(:,:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + ! Local variables + integer :: stagger_local + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + + if (mpp_group_update_initialized(group)) then + call mpp_reset_group_update_field(group,u_cmpt, v_cmpt) + elseif (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_create_group_update(group, u_cmpt, v_cmpt, MOM_dom%mpp_domain, & + flags=dirflag, gridtype=stagger_local, whalo=halo, ehalo=halo, & + shalo=halo, nhalo=halo) + else + call mpp_create_group_update(group, u_cmpt, v_cmpt, MOM_dom%mpp_domain, & + flags=dirflag, gridtype=stagger_local) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine create_vector_group_pass_3d + +!> do_group_pass carries out a group halo update. +subroutine do_group_pass(group, MOM_dom, clock) + type(group_pass_type), intent(inout) :: group !< The data type that store information for + !! group update. This data will be used in + !! do_group_pass. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + real :: d_type + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + call mpp_do_group_update(group, MOM_dom%mpp_domain, d_type) + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine do_group_pass + +!> start_group_pass starts out a group halo update. +subroutine start_group_pass(group, MOM_dom, clock) + type(group_pass_type), intent(inout) :: group !< The data type that store information for + !! group update. This data will be used in + !! do_group_pass. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + real :: d_type + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + call mpp_start_group_update(group, MOM_dom%mpp_domain, d_type) + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine start_group_pass + +!> complete_group_pass completes a group halo update. +subroutine complete_group_pass(group, MOM_dom, clock) + type(group_pass_type), intent(inout) :: group !< The data type that store information for + !! group update. This data will be used in + !! do_group_pass. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + real :: d_type + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + call mpp_complete_group_update(group, MOM_dom%mpp_domain, d_type) + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine complete_group_pass + + +!> Pass a 2-D array from one MOM domain to another +subroutine redistribute_array_2d(Domain1, array1, Domain2, array2, complete) + type(domain2d), & + intent(in) :: Domain1 !< The MOM domain from which to extract information. + real, dimension(:,:), intent(in) :: array1 !< The array from which to extract information. + type(domain2d), & + intent(in) :: Domain2 !< The MOM domain receiving information. + real, dimension(:,:), intent(out) :: array2 !< The array receiving information. + logical, optional, intent(in) :: complete !< If true, finish communication before proceeding. + + ! Local variables + logical :: do_complete + + do_complete=.true.;if (PRESENT(complete)) do_complete = complete + + call mpp_redistribute(Domain1, array1, Domain2, array2, do_complete) + +end subroutine redistribute_array_2d + +!> Pass a 3-D array from one MOM domain to another +subroutine redistribute_array_3d(Domain1, array1, Domain2, array2, complete) + type(domain2d), & + intent(in) :: Domain1 !< The MOM domain from which to extract information. + real, dimension(:,:,:), intent(in) :: array1 !< The array from which to extract information. + type(domain2d), & + intent(in) :: Domain2 !< The MOM domain receiving information. + real, dimension(:,:,:), intent(out) :: array2 !< The array receiving information. + logical, optional, intent(in) :: complete !< If true, finish communication before proceeding. + + ! Local variables + logical :: do_complete + + do_complete=.true.;if (PRESENT(complete)) do_complete = complete + + call mpp_redistribute(Domain1, array1, Domain2, array2, do_complete) + +end subroutine redistribute_array_3d + +!> Pass a 4-D array from one MOM domain to another +subroutine redistribute_array_4d(Domain1, array1, Domain2, array2, complete) + type(domain2d), & + intent(in) :: Domain1 !< The MOM domain from which to extract information. + real, dimension(:,:,:,:), intent(in) :: array1 !< The array from which to extract information. + type(domain2d), & + intent(in) :: Domain2 !< The MOM domain receiving information. + real, dimension(:,:,:,:), intent(out) :: array2 !< The array receiving information. + logical, optional, intent(in) :: complete !< If true, finish communication before proceeding. + + ! Local variables + logical :: do_complete + + do_complete=.true.;if (PRESENT(complete)) do_complete = complete + + call mpp_redistribute(Domain1, array1, Domain2, array2, do_complete) + +end subroutine redistribute_array_4d + + +!> Rescale the values of a 4-D array in its computational domain by a constant factor +subroutine rescale_comp_data_4d(domain, array, scale) + type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information + real, dimension(:,:,:,:), intent(inout) :: array !< The array which is having the data in its + !! computational domain rescaled + real, intent(in) :: scale !< A scaling factor by which to multiply the + !! values in the computational domain of array + integer :: is, ie, js, je + + if (scale == 1.0) return + + call get_simple_array_i_ind(domain, size(array,1), is, ie) + call get_simple_array_j_ind(domain, size(array,2), js, je) + array(is:ie,js:je,:,:) = scale*array(is:ie,js:je,:,:) + +end subroutine rescale_comp_data_4d + +!> Rescale the values of a 3-D array in its computational domain by a constant factor +subroutine rescale_comp_data_3d(domain, array, scale) + type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information + real, dimension(:,:,:), intent(inout) :: array !< The array which is having the data in its + !! computational domain rescaled + real, intent(in) :: scale !< A scaling factor by which to multiply the + !! values in the computational domain of array + integer :: is, ie, js, je + + if (scale == 1.0) return + + call get_simple_array_i_ind(domain, size(array,1), is, ie) + call get_simple_array_j_ind(domain, size(array,2), js, je) + array(is:ie,js:je,:) = scale*array(is:ie,js:je,:) + +end subroutine rescale_comp_data_3d + +!> Rescale the values of a 2-D array in its computational domain by a constant factor +subroutine rescale_comp_data_2d(domain, array, scale) + type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information + real, dimension(:,:), intent(inout) :: array !< The array which is having the data in its + !! computational domain rescaled + real, intent(in) :: scale !< A scaling factor by which to multiply the + !! values in the computational domain of array + integer :: is, ie, js, je + + if (scale == 1.0) return + + call get_simple_array_i_ind(domain, size(array,1), is, ie) + call get_simple_array_j_ind(domain, size(array,2), js, je) + array(is:ie,js:je) = scale*array(is:ie,js:je) + +end subroutine rescale_comp_data_2d + +!> create_MOM_domain creates and initializes a MOM_domain_type variables, based on the information +!! provided in arguments. +subroutine create_MOM_domain(MOM_dom, n_global, n_halo, reentrant, tripolar_N, layout, io_layout, & + domain_name, mask_table, symmetric, thin_halos, nonblocking) + type(MOM_domain_type), pointer :: MOM_dom !< A pointer to the MOM_domain_type being defined here. + integer, dimension(2), intent(in) :: n_global !< The number of points on the global grid in + !! the i- and j-directions + integer, dimension(2), intent(in) :: n_halo !< The number of halo points on each processor + logical, dimension(2), intent(in) :: reentrant !< If true the grid is periodic in the i- and j- directions + logical, intent(in) :: tripolar_N !< If true the grid uses northern tripolar connectivity + integer, dimension(2), intent(in) :: layout !< The layout of logical PEs in the i- and j-directions. + integer, dimension(2), optional, intent(in) :: io_layout !< The layout for parallel input and output. + character(len=*), optional, intent(in) :: domain_name !< A name for this domain, "MOM" if missing. + character(len=*), optional, intent(in) :: mask_table !< The full relative or absolute path to the mask table. + logical, optional, intent(in) :: symmetric !< If present, this specifies whether this domain + !! uses symmetric memory, or true if missing. + logical, optional, intent(in) :: thin_halos !< If present, this specifies whether to permit the use of + !! thin halo updates, or true if missing. + logical, optional, intent(in) :: nonblocking !< If present, this specifies whether to permit the use of + !! nonblocking halo updates, or false if missing. + + ! local variables + integer, dimension(4) :: global_indices ! The lower and upper global i- and j-index bounds + integer :: X_FLAGS ! A combination of integers encoding the x-direction grid connectivity. + integer :: Y_FLAGS ! A combination of integers encoding the y-direction grid connectivity. + integer :: xhalo_d2, yhalo_d2 + character(len=200) :: mesg ! A string for use in error messages + logical :: mask_table_exists ! Mask_table is present and the file it points to exists + + if (.not.associated(MOM_dom)) then + allocate(MOM_dom) + allocate(MOM_dom%mpp_domain) + allocate(MOM_dom%mpp_domain_d2) + endif + + MOM_dom%name = "MOM" ; if (present(domain_name)) MOM_dom%name = trim(domain_name) + + X_FLAGS = 0 ; Y_FLAGS = 0 + if (reentrant(1)) X_FLAGS = CYCLIC_GLOBAL_DOMAIN + if (reentrant(2)) Y_FLAGS = CYCLIC_GLOBAL_DOMAIN + if (tripolar_N) then + Y_FLAGS = FOLD_NORTH_EDGE + if (reentrant(2)) call MOM_error(FATAL,"MOM_domains: "// & + "TRIPOLAR_N and REENTRANT_Y may not be used together.") + endif + + MOM_dom%nonblocking_updates = nonblocking + MOM_dom%thin_halo_updates = thin_halos + MOM_dom%symmetric = .true. ; if (present(symmetric)) MOM_dom%symmetric = symmetric + MOM_dom%niglobal = n_global(1) ; MOM_dom%njglobal = n_global(2) + MOM_dom%nihalo = n_halo(1) ; MOM_dom%njhalo = n_halo(2) + + ! Save the extra data for creating other domains of different resolution that overlay this domain. + MOM_dom%X_FLAGS = X_FLAGS + MOM_dom%Y_FLAGS = Y_FLAGS + MOM_dom%layout(:) = layout(:) + + ! Set up the io_layout, with error handling. + MOM_dom%io_layout(:) = (/ 1, 1 /) + if (present(io_layout)) then + if (io_layout(1) == 0) then + MOM_dom%io_layout(1) = layout(1) + elseif (io_layout(1) > 1) then + MOM_dom%io_layout(1) = io_layout(1) + if (modulo(layout(1), io_layout(1)) /= 0) then + write(mesg,'("MOM_domains_init: The i-direction I/O-layout, IO_LAYOUT(1)=",i4, & + &", does not evenly divide the i-direction layout, NIPROC=,",i4,".")') io_layout(1), layout(1) + call MOM_error(FATAL, mesg) + endif + endif + + if (io_layout(2) == 0) then + MOM_dom%io_layout(2) = layout(2) + elseif (io_layout(2) > 1) then + MOM_dom%io_layout(2) = io_layout(2) + if (modulo(layout(2), io_layout(2)) /= 0) then + write(mesg,'("MOM_domains_init: The j-direction I/O-layout, IO_LAYOUT(2)=",i4, & + &", does not evenly divide the j-direction layout, NJPROC=,",i4,".")') io_layout(2), layout(2) + call MOM_error(FATAL, mesg) + endif + endif + endif + + if (present(mask_table)) then + mask_table_exists = file_exist(mask_table) + if (mask_table_exists) then + allocate(MOM_dom%maskmap(layout(1), layout(2))) + call parse_mask_table(mask_table, MOM_dom%maskmap, MOM_dom%name) + endif + else + mask_table_exists = .false. + endif + + ! Initialize as an unrotated domain + MOM_dom%turns = 0 + + call clone_MD_to_d2D(MOM_dom, MOM_dom%mpp_domain) + + !For downsampled domain, recommend a halo of 1 (or 0?) since we're not doing wide-stencil computations. + !But that does not work because the downsampled field would not have the correct size to pass the checks, e.g., we get + !error: downsample_diag_indices_get: peculiar size 28 in i-direction\ndoes not match one of 24 25 26 27 + ! call clone_MD_to_d2D(MOM_dom, MOM_dom%mpp_domain_d2, halo_size=(MOM_dom%nihalo/2), coarsen=2) + call clone_MD_to_d2D(MOM_dom, MOM_dom%mpp_domain_d2, coarsen=2) + +end subroutine create_MOM_domain + +!> dealloc_MOM_domain deallocates memory associated with a pointer to a MOM_domain_type +!! and potentially all of its contents +subroutine deallocate_MOM_domain(MOM_domain, cursory) + type(MOM_domain_type), pointer :: MOM_domain !< A pointer to the MOM_domain_type being deallocated + logical, optional, intent(in) :: cursory !< If true do not deallocate fields associated + !! with the underlying infrastructure + logical :: invasive ! If true, deallocate fields associated with the underlying infrastructure + + invasive = .true. ; if (present(cursory)) invasive = .not.cursory + + if (associated(MOM_domain)) then + if (associated(MOM_domain%mpp_domain)) then + if (invasive) call mpp_deallocate_domain(MOM_domain%mpp_domain) + deallocate(MOM_domain%mpp_domain) + endif + if (associated(MOM_domain%mpp_domain_d2)) then + if (invasive) call mpp_deallocate_domain(MOM_domain%mpp_domain_d2) + deallocate(MOM_domain%mpp_domain_d2) + endif + if (associated(MOM_domain%maskmap)) deallocate(MOM_domain%maskmap) + deallocate(MOM_domain) + endif + +end subroutine deallocate_MOM_domain + +!> MOM_thread_affinity_set returns true if the number of openMP threads have been set to a value greater than 1. +function MOM_thread_affinity_set() + ! Local variables + !$ integer :: ocean_nthreads ! Number of openMP threads + !$ integer :: omp_get_num_threads ! An openMP function that returns the number of threads + logical :: MOM_thread_affinity_set + + MOM_thread_affinity_set = .false. + !$ call fms_affinity_init() + !$OMP PARALLEL + !$OMP MASTER + !$ ocean_nthreads = omp_get_num_threads() + !$OMP END MASTER + !$OMP END PARALLEL + !$ MOM_thread_affinity_set = (ocean_nthreads > 1 ) +end function MOM_thread_affinity_set + +!> set_MOM_thread_affinity sets the number of openMP threads to use with the ocean. +subroutine set_MOM_thread_affinity(ocean_nthreads, ocean_hyper_thread) + integer, intent(in) :: ocean_nthreads !< Number of openMP threads to use for the ocean model + logical, intent(in) :: ocean_hyper_thread !< If true, use hyper threading + + ! Local variables + !$ integer :: omp_get_thread_num, omp_get_num_threads !< These are the results of openMP functions + + !$ call fms_affinity_init() ! fms_affinity_init can be safely called more than once. + !$ call fms_affinity_set('OCEAN', ocean_hyper_thread, ocean_nthreads) + !$ call omp_set_num_threads(ocean_nthreads) + !$OMP PARALLEL + !$ write(6,*) "MOM_domains_mod OMPthreading ", fms_affinity_get(), omp_get_thread_num(), omp_get_num_threads() + !$ flush(6) + !$OMP END PARALLEL +end subroutine set_MOM_thread_affinity + +!> This subroutine retrieves the 1-d domains that make up the 2d-domain in a MOM_domain +subroutine get_domain_components_MD(MOM_dom, x_domain, y_domain) + type(MOM_domain_type), intent(in) :: MOM_dom !< The MOM_domain whose contents are being extracted + type(domain1D), optional, intent(inout) :: x_domain !< The 1-d logical x-domain + type(domain1D), optional, intent(inout) :: y_domain !< The 1-d logical y-domain + + call mpp_get_domain_components(MOM_dom%mpp_domain, x_domain, y_domain) +end subroutine get_domain_components_MD + +!> This subroutine retrieves the 1-d domains that make up a 2d-domain +subroutine get_domain_components_d2D(domain, x_domain, y_domain) + type(domain2D), intent(in) :: domain !< The 2D domain whose contents are being extracted + type(domain1D), optional, intent(inout) :: x_domain !< The 1-d logical x-domain + type(domain1D), optional, intent(inout) :: y_domain !< The 1-d logical y-domain + + call mpp_get_domain_components(domain, x_domain, y_domain) +end subroutine get_domain_components_d2D + +!> clone_MD_to_MD copies one MOM_domain_type into another, while allowing +!! some properties of the new type to differ from the original one. +subroutine clone_MD_to_MD(MD_in, MOM_dom, min_halo, halo_size, symmetric, domain_name, & + turns, refine, extra_halo) + type(MOM_domain_type), target, intent(in) :: MD_in !< An existing MOM_domain + type(MOM_domain_type), pointer :: MOM_dom + !< A pointer to a MOM_domain that will be + !! allocated if it is unassociated, and will have data + !! copied from MD_in + integer, dimension(2), & + optional, intent(inout) :: min_halo !< If present, this sets the + !! minimum halo size for this domain in the i- and j- + !! directions, and returns the actual halo size used. + integer, optional, intent(in) :: halo_size !< If present, this sets the halo + !! size for the domain in the i- and j-directions. + !! min_halo and halo_size can not both be present. + logical, optional, intent(in) :: symmetric !< If present, this specifies + !! whether the new domain is symmetric, regardless of + !! whether the macro SYMMETRIC_MEMORY_ is defined. + character(len=*), & + optional, intent(in) :: domain_name !< A name for the new domain, copied + !! from MD_in if missing. + integer, optional, intent(in) :: turns !< Number of quarter turns + integer, optional, intent(in) :: refine !< A factor by which to enhance the grid resolution. + integer, optional, intent(in) :: extra_halo !< An extra number of points in the halos + !! compared with MD_in + + integer :: global_indices(4) + logical :: mask_table_exists + integer, dimension(:), allocatable :: exni ! The extents of the grid for each i-row of the layout. + ! The sum of exni must equal MOM_dom%niglobal. + integer, dimension(:), allocatable :: exnj ! The extents of the grid for each j-row of the layout. + ! The sum of exni must equal MOM_dom%niglobal. + integer :: qturns ! The number of quarter turns, restricted to the range of 0 to 3. + integer :: i, j, nl1, nl2 + + qturns = 0 + if (present(turns)) qturns = modulo(turns, 4) + + if (.not.associated(MOM_dom)) then + allocate(MOM_dom) + allocate(MOM_dom%mpp_domain) + allocate(MOM_dom%mpp_domain_d2) + endif + +! Save the extra data for creating other domains of different resolution that overlay this domain + MOM_dom%symmetric = MD_in%symmetric + MOM_dom%nonblocking_updates = MD_in%nonblocking_updates + MOM_dom%thin_halo_updates = MD_in%thin_halo_updates + + if (modulo(qturns, 2) /= 0) then + MOM_dom%niglobal = MD_in%njglobal ; MOM_dom%njglobal = MD_in%niglobal + MOM_dom%nihalo = MD_in%njhalo ; MOM_dom%njhalo = MD_in%nihalo + call get_layout_extents(MD_in, exnj, exni) + + MOM_dom%X_FLAGS = MD_in%Y_FLAGS ; MOM_dom%Y_FLAGS = MD_in%X_FLAGS + MOM_dom%layout(:) = MD_in%layout(2:1:-1) + MOM_dom%io_layout(:) = MD_in%io_layout(2:1:-1) + else + MOM_dom%niglobal = MD_in%niglobal ; MOM_dom%njglobal = MD_in%njglobal + MOM_dom%nihalo = MD_in%nihalo ; MOM_dom%njhalo = MD_in%njhalo + call get_layout_extents(MD_in, exni, exnj) + + MOM_dom%X_FLAGS = MD_in%X_FLAGS ; MOM_dom%Y_FLAGS = MD_in%Y_FLAGS + MOM_dom%layout(:) = MD_in%layout(:) + MOM_dom%io_layout(:) = MD_in%io_layout(:) + endif + + ! Ensure that the points per processor are the same on the source and densitation grids. + select case (qturns) + case (1) ; call invert(exni) + case (2) ; call invert(exni) ; call invert(exnj) + case (3) ; call invert(exnj) + end select + + if (associated(MD_in%maskmap)) then + mask_table_exists = .true. + allocate(MOM_dom%maskmap(MOM_dom%layout(1), MOM_dom%layout(2))) + + nl1 = MOM_dom%layout(1) ; nl2 = MOM_dom%layout(2) + select case (qturns) + case (0) + do j=1,nl2 ; do i=1,nl1 + MOM_dom%maskmap(i,j) = MD_in%maskmap(i, j) + enddo ; enddo + case (1) + do j=1,nl2 ; do i=1,nl1 + MOM_dom%maskmap(i,j) = MD_in%maskmap(j, nl1+1-i) + enddo ; enddo + case (2) + do j=1,nl2 ; do i=1,nl1 + MOM_dom%maskmap(i,j) = MD_in%maskmap(nl1+1-i, nl2+1-j) + enddo ; enddo + case (3) + do j=1,nl2 ; do i=1,nl1 + MOM_dom%maskmap(i,j) = MD_in%maskmap(nl2+1-j, i) + enddo ; enddo + end select + else + mask_table_exists = .false. + endif + + ! Optionally enhance the grid resolution. + if (present(refine)) then ; if (refine > 1) then + MOM_dom%niglobal = refine*MOM_dom%niglobal ; MOM_dom%njglobal = refine*MOM_dom%njglobal + MOM_dom%nihalo = refine*MOM_dom%nihalo ; MOM_dom%njhalo = refine*MOM_dom%njhalo + do i=1,MOM_dom%layout(1) ; exni(i) = refine*exni(i) ; enddo + do j=1,MOM_dom%layout(2) ; exnj(j) = refine*exnj(j) ; enddo + endif ; endif + + ! Optionally enhance the grid resolution. + if (present(extra_halo)) then ; if (extra_halo > 0) then + MOM_dom%nihalo = MOM_dom%nihalo + extra_halo ; MOM_dom%njhalo = MOM_dom%njhalo + extra_halo + endif ; endif + + if (present(halo_size) .and. present(min_halo)) call MOM_error(FATAL, & + "clone_MOM_domain can not have both halo_size and min_halo present.") + + if (present(min_halo)) then + MOM_dom%nihalo = max(MOM_dom%nihalo, min_halo(1)) + min_halo(1) = MOM_dom%nihalo + MOM_dom%njhalo = max(MOM_dom%njhalo, min_halo(2)) + min_halo(2) = MOM_dom%njhalo + endif + + if (present(halo_size)) then + MOM_dom%nihalo = halo_size ; MOM_dom%njhalo = halo_size + endif + + if (present(symmetric)) then ; MOM_dom%symmetric = symmetric ; endif + + if (present(domain_name)) then + MOM_dom%name = trim(domain_name) + else + MOM_dom%name = MD_in%name + endif + + MOM_dom%turns = qturns + if (qturns /= 0) then + MOM_dom%domain_in => MD_in + endif + + call clone_MD_to_d2D(MOM_dom, MOM_dom%mpp_domain, xextent=exni, yextent=exnj) + call clone_MD_to_d2D(MOM_dom, MOM_dom%mpp_domain_d2, domain_name=MOM_dom%name, coarsen=2) + +end subroutine clone_MD_to_MD + + +!> clone_MD_to_d2D uses information from a MOM_domain_type to create a new +!! domain2d type, while allowing some properties of the new type to differ from +!! the original one. +subroutine clone_MD_to_d2D(MD_in, mpp_domain, min_halo, halo_size, symmetric, & + domain_name, turns, xextent, yextent, coarsen) + type(MOM_domain_type), intent(in) :: MD_in !< An existing MOM_domain to be cloned + type(domain2d), intent(inout) :: mpp_domain !< The new mpp_domain to be set up + integer, dimension(2), & + optional, intent(inout) :: min_halo !< If present, this sets the + !! minimum halo size for this domain in the i- and j- + !! directions, and returns the actual halo size used. + integer, optional, intent(in) :: halo_size !< If present, this sets the halo + !! size for the domain in the i- and j-directions. + !! min_halo and halo_size can not both be present. + logical, optional, intent(in) :: symmetric !< If present, this specifies + !! whether the new domain is symmetric, regardless of + !! whether the macro SYMMETRIC_MEMORY_ is defined or + !! whether MD_in is symmetric. + character(len=*), & + optional, intent(in) :: domain_name !< A name for the new domain, "MOM" + !! if missing. + integer, optional, intent(in) :: turns !< Number of quarter turns - not implemented here. + integer, optional, intent(in) :: coarsen !< A factor by which to coarsen this grid. + !! The default of 1 is for no coarsening. + integer, dimension(:), optional, intent(in) :: xextent !< The number of grid points in the + !! tracer computational domain for division of the x-layout. + integer, dimension(:), optional, intent(in) :: yextent !< The number of grid points in the + !! tracer computational domain for division of the y-layout. + + integer :: global_indices(4) + integer :: nihalo, njhalo + logical :: symmetric_dom, do_coarsen + character(len=64) :: dom_name + + if (present(turns)) & + call MOM_error(FATAL, "Rotation not supported for MOM_domain to domain2d") + + if (present(halo_size) .and. present(min_halo)) call MOM_error(FATAL, & + "clone_MOM_domain can not have both halo_size and min_halo present.") + + do_coarsen = .false. ; if (present(coarsen)) then ; do_coarsen = (coarsen > 1) ; endif + + nihalo = MD_in%nihalo ; njhalo = MD_in%njhalo + if (do_coarsen) then + nihalo = int(MD_in%nihalo / coarsen) ; njhalo = int(MD_in%njhalo / coarsen) + endif + + if (present(min_halo)) then + nihalo = max(nihalo, min_halo(1)) + njhalo = max(njhalo, min_halo(2)) + min_halo(1) = nihalo ; min_halo(2) = njhalo + endif + if (present(halo_size)) then + nihalo = halo_size ; njhalo = halo_size + endif + + symmetric_dom = MD_in%symmetric + if (present(symmetric)) then ; symmetric_dom = symmetric ; endif + + dom_name = MD_in%name + if (do_coarsen) dom_name = trim(MD_in%name)//"c" + if (present(domain_name)) dom_name = trim(domain_name) + + global_indices(1:4) = (/ 1, MD_in%niglobal, 1, MD_in%njglobal /) + if (do_coarsen) then + global_indices(1:4) = (/ 1, (MD_in%niglobal/coarsen), 1, (MD_in%njglobal/coarsen) /) + endif + + if (associated(MD_in%maskmap)) then + call mpp_define_domains( global_indices, MD_in%layout, mpp_domain, & + xflags=MD_in%X_FLAGS, yflags=MD_in%Y_FLAGS, xhalo=nihalo, yhalo=njhalo, & + xextent=xextent, yextent=yextent, symmetry=symmetric_dom, name=dom_name, & + maskmap=MD_in%maskmap ) + else + call mpp_define_domains( global_indices, MD_in%layout, mpp_domain, & + xflags=MD_in%X_FLAGS, yflags=MD_in%Y_FLAGS, xhalo=nihalo, yhalo=njhalo, & + symmetry=symmetric_dom, xextent=xextent, yextent=yextent, name=dom_name) + endif + + if (MD_in%layout(1) * MD_in%layout(2) > 1) then + if ((MD_in%io_layout(1) + MD_in%io_layout(2) > 0)) then + call mpp_define_io_domain(mpp_domain, MD_in%io_layout) + else + call mpp_define_io_domain(mpp_domain, [1, 1] ) + endif + endif +end subroutine clone_MD_to_d2D + +!> Returns the index ranges that have been stored in a MOM_domain_type +subroutine get_domain_extent_MD(Domain, isc, iec, jsc, jec, isd, ied, jsd, jed, & + isg, ieg, jsg, jeg, idg_offset, jdg_offset, & + symmetric, local_indexing, index_offset, coarsen) + type(MOM_domain_type), & + intent(in) :: Domain !< The MOM domain from which to extract information + integer, intent(out) :: isc !< The start i-index of the computational domain + integer, intent(out) :: iec !< The end i-index of the computational domain + integer, intent(out) :: jsc !< The start j-index of the computational domain + integer, intent(out) :: jec !< The end j-index of the computational domain + integer, intent(out) :: isd !< The start i-index of the data domain + integer, intent(out) :: ied !< The end i-index of the data domain + integer, intent(out) :: jsd !< The start j-index of the data domain + integer, intent(out) :: jed !< The end j-index of the data domain + integer, optional, intent(out) :: isg !< The start i-index of the global domain + integer, optional, intent(out) :: ieg !< The end i-index of the global domain + integer, optional, intent(out) :: jsg !< The start j-index of the global domain + integer, optional, intent(out) :: jeg !< The end j-index of the global domain + integer, optional, intent(out) :: idg_offset !< The offset between the corresponding global and + !! data i-index spaces. + integer, optional, intent(out) :: jdg_offset !< The offset between the corresponding global and + !! data j-index spaces. + logical, optional, intent(out) :: symmetric !< True if symmetric memory is used. + logical, optional, intent(in) :: local_indexing !< If true, local tracer array indices start at 1, + !! as in most MOM6 code. The default is true. + integer, optional, intent(in) :: index_offset !< A fixed additional offset to all indices. This + !! can be useful for some types of debugging with + !! dynamic memory allocation. The default is 0. + integer, optional, intent(in) :: coarsen !< A factor by which the grid is coarsened. + !! The default is 1, for no coarsening. + + ! Local variables + integer :: isg_, ieg_, jsg_, jeg_ + integer :: ind_off, idg_off, jdg_off, coarsen_lev + logical :: local + + local = .true. ; if (present(local_indexing)) local = local_indexing + ind_off = 0 ; if (present(index_offset)) ind_off = index_offset + + coarsen_lev = 1 ; if (present(coarsen)) coarsen_lev = coarsen + + if (coarsen_lev == 1) then + call mpp_get_compute_domain(Domain%mpp_domain, isc, iec, jsc, jec) + call mpp_get_data_domain(Domain%mpp_domain, isd, ied, jsd, jed) + call mpp_get_global_domain(Domain%mpp_domain, isg_, ieg_, jsg_, jeg_) + elseif (coarsen_lev == 2) then + if (.not.associated(Domain%mpp_domain_d2)) call MOM_error(FATAL, & + "get_domain_extent called with coarsen=2, but Domain%mpp_domain_d2 is not associated.") + call mpp_get_compute_domain(Domain%mpp_domain_d2, isc, iec, jsc, jec) + call mpp_get_data_domain(Domain%mpp_domain_d2, isd, ied, jsd, jed) + call mpp_get_global_domain(Domain%mpp_domain_d2, isg_, ieg_, jsg_, jeg_) + else + call MOM_error(FATAL, "get_domain_extent called with an unsupported level of coarsening.") + endif + + if (local) then + ! This code institutes the MOM convention that local array indices start at 1. + idg_off = isd - 1 ; jdg_off = jsd - 1 + isc = isc - isd + 1 ; iec = iec - isd + 1 ; jsc = jsc - jsd + 1 ; jec = jec - jsd + 1 + ied = ied - isd + 1 ; jed = jed - jsd + 1 + isd = 1 ; jsd = 1 + else + idg_off = 0 ; jdg_off = 0 + endif + if (ind_off /= 0) then + idg_off = idg_off + ind_off ; jdg_off = jdg_off + ind_off + isc = isc + ind_off ; iec = iec + ind_off + jsc = jsc + ind_off ; jec = jec + ind_off + isd = isd + ind_off ; ied = ied + ind_off + jsd = jsd + ind_off ; jed = jed + ind_off + endif + if (present(isg)) isg = isg_ + if (present(ieg)) ieg = ieg_ + if (present(jsg)) jsg = jsg_ + if (present(jeg)) jeg = jeg_ + if (present(idg_offset)) idg_offset = idg_off + if (present(jdg_offset)) jdg_offset = jdg_off + if (present(symmetric)) symmetric = Domain%symmetric + +end subroutine get_domain_extent_MD + +!> Returns the index ranges that have been stored in a domain2D type +subroutine get_domain_extent_d2D(Domain, isc, iec, jsc, jec, isd, ied, jsd, jed) + type(domain2d), intent(in) :: Domain !< The MOM domain from which to extract information + integer, intent(out) :: isc !< The start i-index of the computational domain + integer, intent(out) :: iec !< The end i-index of the computational domain + integer, intent(out) :: jsc !< The start j-index of the computational domain + integer, intent(out) :: jec !< The end j-index of the computational domain + integer, optional, intent(out) :: isd !< The start i-index of the data domain + integer, optional, intent(out) :: ied !< The end i-index of the data domain + integer, optional, intent(out) :: jsd !< The start j-index of the data domain + integer, optional, intent(out) :: jed !< The end j-index of the data domain + + ! Local variables + integer :: isd_, ied_, jsd_, jed_, jsg_, jeg_, isg_, ieg_ + + call mpp_get_compute_domain(Domain, isc, iec, jsc, jec) + call mpp_get_data_domain(Domain, isd_, ied_, jsd_, jed_) + + if (present(isd)) isd = isd_ + if (present(ied)) ied = ied_ + if (present(jsd)) jsd = jsd_ + if (present(jed)) jed = jed_ + +end subroutine get_domain_extent_d2D + +!> Return the (potentially symmetric) computational domain i-bounds for an array +!! passed without index specifications (i.e. indices start at 1) based on an array size. +subroutine get_simple_array_i_ind(domain, size, is, ie, symmetric) + type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information + integer, intent(in) :: size !< The i-array size + integer, intent(out) :: is !< The computational domain starting i-index. + integer, intent(out) :: ie !< The computational domain ending i-index. + logical, optional, intent(in) :: symmetric !< If present, indicates whether symmetric sizes + !! can be considered. + ! Local variables + logical :: sym + character(len=120) :: mesg, mesg2 + integer :: isc, iec, jsc, jec, isd, ied, jsd, jed + + call mpp_get_compute_domain(Domain%mpp_domain, isc, iec, jsc, jec) + call mpp_get_data_domain(Domain%mpp_domain, isd, ied, jsd, jed) + + isc = isc-isd+1 ; iec = iec-isd+1 ; ied = ied-isd+1 ; isd = 1 + sym = Domain%symmetric ; if (present(symmetric)) sym = symmetric + + if (size == ied) then ; is = isc ; ie = iec + elseif (size == 1+iec-isc) then ; is = 1 ; ie = size + elseif (sym .and. (size == 1+ied)) then ; is = isc ; ie = iec+1 + elseif (sym .and. (size == 2+iec-isc)) then ; is = 1 ; ie = size+1 + else + write(mesg,'("Unrecognized size ", i6, "in call to get_simple_array_i_ind. \")') size + if (sym) then + write(mesg2,'("Valid sizes are : ", 2i7)') ied, 1+iec-isc + else + write(mesg2,'("Valid sizes are : ", 4i7)') ied, 1+iec-isc, 1+ied, 2+iec-isc + endif + call MOM_error(FATAL, trim(mesg)//trim(mesg2)) + endif + +end subroutine get_simple_array_i_ind + + +!> Return the (potentially symmetric) computational domain j-bounds for an array +!! passed without index specifications (i.e. indices start at 1) based on an array size. +subroutine get_simple_array_j_ind(domain, size, js, je, symmetric) + type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information + integer, intent(in) :: size !< The j-array size + integer, intent(out) :: js !< The computational domain starting j-index. + integer, intent(out) :: je !< The computational domain ending j-index. + logical, optional, intent(in) :: symmetric !< If present, indicates whether symmetric sizes + !! can be considered. + ! Local variables + logical :: sym + character(len=120) :: mesg, mesg2 + integer :: isc, iec, jsc, jec, isd, ied, jsd, jed + + call mpp_get_compute_domain(Domain%mpp_domain, isc, iec, jsc, jec) + call mpp_get_data_domain(Domain%mpp_domain, isd, ied, jsd, jed) + + jsc = jsc-jsd+1 ; jec = jec-jsd+1 ; jed = jed-jsd+1 ; jsd = 1 + sym = Domain%symmetric ; if (present(symmetric)) sym = symmetric + + if (size == jed) then ; js = jsc ; je = jec + elseif (size == 1+jec-jsc) then ; js = 1 ; je = size + elseif (sym .and. (size == 1+jed)) then ; js = jsc ; je = jec+1 + elseif (sym .and. (size == 2+jec-jsc)) then ; js = 1 ; je = size+1 + else + write(mesg,'("Unrecognized size ", i6, "in call to get_simple_array_j_ind. \")') size + if (sym) then + write(mesg2,'("Valid sizes are : ", 2i7)') jed, 1+jec-jsc + else + write(mesg2,'("Valid sizes are : ", 4i7)') jed, 1+jec-jsc, 1+jed, 2+jec-jsc + endif + call MOM_error(FATAL, trim(mesg)//trim(mesg2)) + endif + +end subroutine get_simple_array_j_ind + +!> Invert the contents of a 1-d array +subroutine invert(array) + integer, dimension(:), intent(inout) :: array !< The 1-d array to invert + integer :: i, ni, swap + ni = size(array) + do i=1,ni + swap = array(i) + array(i) = array(ni+1-i) + array(ni+1-i) = swap + enddo +end subroutine invert + +!> Returns the global shape of h-point arrays +subroutine get_global_shape(domain, niglobal, njglobal) + type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information + integer, intent(out) :: niglobal !< i-index global size of h-point arrays + integer, intent(out) :: njglobal !< j-index global size of h-point arrays + + niglobal = domain%niglobal + njglobal = domain%njglobal +end subroutine get_global_shape + +!> Get the array ranges in one dimension for the divisions of a global index space +subroutine compute_block_extent(isg, ieg, ndivs, ibegin, iend) + integer, intent(in) :: isg !< The starting index of the global index space + integer, intent(in) :: ieg !< The ending index of the global index space + integer, intent(in) :: ndivs !< The number of divisions + integer, dimension(:), intent(out) :: ibegin !< The starting index of each division + integer, dimension(:), intent(out) :: iend !< The ending index of each division + + call mpp_compute_block_extent(isg, ieg, ndivs, ibegin, iend) +end subroutine compute_block_extent + +!> Broadcast a 2-d domain from the root PE to the other PEs +subroutine broadcast_domain(domain) + type(domain2d), intent(inout) :: domain !< The domain2d type that will be shared across PEs. + + call mpp_broadcast_domain(domain) +end subroutine broadcast_domain + +!> Broadcast an entire 2-d array from the root processor to all others. +subroutine global_field(domain, local, global) + type(domain2d), intent(inout) :: domain !< The domain2d type that describes the decomposition + real, dimension(:,:), intent(in) :: local !< The portion of the array on the local PE + real, dimension(:,:), intent(out) :: global !< The whole global array + + call mpp_global_field(domain, local, global) +end subroutine global_field + +!> same_domain returns true if two domains use the same list of PEs and layouts and have the same +!! size computational domains, and false if the domains do not conform with each other. +!! Different halo sizes or indexing conventions do not alter the results. +logical function same_domain(domain_a, domain_b) + type(domain2D), intent(in) :: domain_a !< The first domain in the comparison + type(domain2D), intent(in) :: domain_b !< The second domain in the comparison + + ! Local variables + integer :: isc_a, iec_a, jsc_a, jec_a, isc_b, iec_b, jsc_b, jec_b + integer :: layout_a(2), layout_b(2) + + ! This routine currently does a few checks for consistent domains; more could be added. + call mpp_get_layout(domain_a, layout_a) + call mpp_get_layout(domain_b, layout_b) + + call get_domain_extent(domain_a, isc_a, iec_a, jsc_a, jec_a) + call get_domain_extent(domain_b, isc_b, iec_b, jsc_b, jec_b) + + same_domain = (layout_a(1) == layout_b(1)) .and. (layout_a(2) == layout_b(2)) .and. & + (iec_a - isc_a == iec_b - isc_b) .and. (jec_a - jsc_a == jec_b - jsc_b) + +end function same_domain + +!> Returns arrays of the i- and j- sizes of the h-point computational domains for each +!! element of the grid layout. Any input values in the extent arrays are discarded, so +!! they are effectively intent out despite their declared intent of inout. +subroutine get_layout_extents(Domain, extent_i, extent_j) + type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information + integer, dimension(:), allocatable, intent(inout) :: extent_i !< The number of points in the + !! i-direction in each i-row of the layout + integer, dimension(:), allocatable, intent(inout) :: extent_j !< The number of points in the + !! j-direction in each j-row of the layout + + if (allocated(extent_i)) deallocate(extent_i) + if (allocated(extent_j)) deallocate(extent_j) + allocate(extent_i(domain%layout(1))) ; extent_i(:) = 0 + allocate(extent_j(domain%layout(2))) ; extent_j(:) = 0 + call mpp_get_domain_extents(domain%mpp_domain, extent_i, extent_j) +end subroutine get_layout_extents + +end module MOM_domain_infra diff --git a/config_src/infra/FMS1/MOM_ensemble_manager_infra.F90 b/config_src/infra/FMS1/MOM_ensemble_manager_infra.F90 new file mode 100644 index 0000000000..66bbb86e2f --- /dev/null +++ b/config_src/infra/FMS1/MOM_ensemble_manager_infra.F90 @@ -0,0 +1,95 @@ +!> A simple (very thin) wrapper for managing ensemble member layout information +module MOM_ensemble_manager_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use ensemble_manager_mod, only : FMS_ensemble_manager_init => ensemble_manager_init +use ensemble_manager_mod, only : FMS_ensemble_pelist_setup => ensemble_pelist_setup +use ensemble_manager_mod, only : FMS_get_ensemble_id => get_ensemble_id +use ensemble_manager_mod, only : FMS_get_ensemble_size => get_ensemble_size +use ensemble_manager_mod, only : FMS_get_ensemble_pelist => get_ensemble_pelist +use ensemble_manager_mod, only : FMS_get_ensemble_filter_pelist => get_ensemble_filter_pelist + +implicit none ; private + +public :: ensemble_manager_init, ensemble_pelist_setup +public :: get_ensemble_id, get_ensemble_size +public :: get_ensemble_pelist, get_ensemble_filter_pelist + +contains + +!> Initializes the ensemble manager which divides available resources +!! in order to concurrently execute an ensemble of model realizations. +subroutine ensemble_manager_init() + + call FMS_ensemble_manager_init() + +end subroutine ensemble_manager_init + +!> Create a list of processing elements (PEs) across components +!! associated with the current ensemble member. +subroutine ensemble_pelist_setup(concurrent, atmos_npes, ocean_npes, land_npes, ice_npes, & + Atm_pelist, Ocean_pelist, Land_pelist, Ice_pelist) + logical, intent(in) :: concurrent !< A logical flag, if True, then ocean fast + !! PEs are run concurrently with + !! slow PEs within the coupler. + integer, intent(in) :: atmos_npes !< The number of atmospheric (fast) PEs + integer, intent(in) :: ocean_npes !< The number of ocean (slow) PEs + integer, intent(in) :: land_npes !< The number of land PEs (fast) + integer, intent(in) :: ice_npes !< The number of ice (fast) PEs + integer, dimension(:), intent(inout) :: Atm_pelist !< A list of Atm PEs + integer, dimension(:), intent(inout) :: Ocean_pelist !< A list of Ocean PEs + integer, dimension(:), intent(inout) :: Land_pelist !< A list of Land PEs + integer, dimension(:), intent(inout) :: Ice_pelist !< A list of Ice PEs + + + call FMS_ensemble_pelist_setup(concurrent, atmos_npes, ocean_npes, land_npes, ice_npes, & + Atm_pelist, Ocean_pelist, Land_pelist, Ice_pelist) + +end subroutine ensemble_pelist_setup + +!> Returns the numeric id for the current ensemble member +function get_ensemble_id() + integer :: get_ensemble_id + + get_ensemble_id = FMS_get_ensemble_id() + +end function get_ensemble_id + +!> Returns ensemble information as follows, +!! index (1) :: ensemble size +!! index (2) :: Number of PEs per ensemble member +!! index (3) :: Number of ocean PEs per ensemble member +!! index (4) :: Number of atmos PEs per ensemble member +!! index (5) :: Number of land PEs per ensemble member +!! index (6) :: Number of ice PEs per ensemble member +function get_ensemble_size() + integer, dimension(6) :: get_ensemble_size + + get_ensemble_size = FMS_get_ensemble_size() + +end function get_ensemble_size + +!> Returns the list of PEs associated with all ensemble members +!! Results are stored in the argument array which must be large +!! enough to contain the list. If the optional name argument is present, +!! the returned processor list are for a particular component (atmos, ocean ,land, ice) +subroutine get_ensemble_pelist(pelist, name) + integer, intent(inout) :: pelist(:,:) !< A processor list for all ensemble members + character(len=*), optional, intent(in) :: name !< An optional component name (atmos, ocean, land, ice) + + call FMS_get_ensemble_pelist(pelist, name) + +end subroutine get_ensemble_pelist + +!> Returns the list of PEs associated with the named ensemble filter application. +!! Valid component names include ('atmos', 'ocean', 'land', and 'ice') +subroutine get_ensemble_filter_pelist(pelist, name) + integer, intent(inout) :: pelist(:) !< A processor list for the ensemble filter + character(len=*), intent(in) :: name !< The component name (atmos, ocean, land, ice) + + call FMS_get_Ensemble_filter_pelist(pelist, name) + +end subroutine get_ensemble_filter_pelist + +end module MOM_ensemble_manager_infra diff --git a/config_src/infra/FMS1/MOM_error_infra.F90 b/config_src/infra/FMS1/MOM_error_infra.F90 new file mode 100644 index 0000000000..e5a8b8dc68 --- /dev/null +++ b/config_src/infra/FMS1/MOM_error_infra.F90 @@ -0,0 +1,42 @@ +!> Routines for error handling and I/O management +module MOM_error_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use mpp_mod, only : mpp_error, mpp_pe, mpp_root_pe, mpp_stdlog=>stdlog, mpp_stdout=>stdout +use mpp_mod, only : NOTE, WARNING, FATAL + +implicit none ; private + +public :: MOM_err, is_root_pe, stdlog, stdout +!> Integer parameters encoding the severity of an error message +public :: NOTE, WARNING, FATAL + +contains + +!> MOM_err writes an error message, and may cause the run to stop depending on the +!! severity of the error. +subroutine MOM_err(severity, message) + integer, intent(in) :: severity !< The severity level of this error + character(len=*), intent(in) :: message !< A message to write out + + call mpp_error(severity, message) +end subroutine MOM_err + +!> stdout returns the standard Fortran unit number for output +integer function stdout() + stdout = mpp_stdout() +end function stdout + +!> stdlog returns the standard Fortran unit number to use to log messages +integer function stdlog() + stdlog = mpp_stdlog() +end function stdlog + +!> is_root_pe returns .true. if the current PE is the root PE. +logical function is_root_pe() + is_root_pe = .false. + if (mpp_pe() == mpp_root_pe()) is_root_pe = .true. +end function is_root_pe + +end module MOM_error_infra diff --git a/config_src/infra/FMS1/MOM_interp_infra.F90 b/config_src/infra/FMS1/MOM_interp_infra.F90 new file mode 100644 index 0000000000..774f6a67d2 --- /dev/null +++ b/config_src/infra/FMS1/MOM_interp_infra.F90 @@ -0,0 +1,267 @@ +!> This module wraps the FMS temporal and spatial interpolation routines +module MOM_interp_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use MOM_domain_infra, only : MOM_domain_type, domain2d +use MOM_time_manager, only : time_type +use horiz_interp_mod, only : horiz_interp_new, horiz_interp, horiz_interp_init, horiz_interp_type +use mpp_io_mod, only : axistype, mpp_get_axis_data +use time_interp_external_mod, only : time_interp_external +use time_interp_external_mod, only : init_external_field, time_interp_external_init +use time_interp_external_mod, only : get_external_field_size +use time_interp_external_mod, only : get_external_field_axes, get_external_field_missing + +implicit none ; private + +public :: horiz_interp_type, horiz_interp_init +public :: time_interp_extern, init_extern_field, time_interp_external_init +public :: get_external_field_info, axistype, get_axis_data +public :: run_horiz_interp, build_horiz_interp_weights + +!> Read a field based on model time, and rotate to the model domain. +interface time_interp_extern + module procedure time_interp_extern_0d + module procedure time_interp_extern_2d + module procedure time_interp_extern_3d +end interface time_interp_extern + +!> perform horizontal interpolation of field +interface run_horiz_interp + module procedure horiz_interp_from_weights_field2d + module procedure horiz_interp_from_weights_field3d +end interface + +!> build weights for horizontal interpolation of field +interface build_horiz_interp_weights + module procedure build_horiz_interp_weights_2d_to_2d +end interface build_horiz_interp_weights + +contains + +!> perform horizontal interpolation of a 2d field using pre-computed weights +!! source and destination coordinates are 2d +subroutine horiz_interp_from_weights_field2d(Interp, data_in, data_out, verbose, mask_in, mask_out, & + missing_value, missing_permit, err_msg) + + type(horiz_interp_type), intent(in) :: Interp !< type containing interpolation options and weights + real, dimension(:,:), intent(in) :: data_in !< input data + real, dimension(:,:), intent(out) :: data_out !< output data + integer, optional, intent(in) :: verbose !< verbosity level + real, dimension(:,:), optional, intent(in) :: mask_in !< mask for input data + real, dimension(:,:), optional, intent(out) :: mask_out !< mask for output data + real, optional, intent(in) :: missing_value !< A value indicating missing data + integer, optional, intent(in) :: missing_permit !< number of allowed points with + !! missing value for interpolation (0-3) + character(len=*), optional, intent(out) :: err_msg !< error message + + call horiz_interp(Interp, data_in, data_out, verbose, & + mask_in, mask_out, missing_value, missing_permit, & + err_msg, new_missing_handle=.true. ) + +end subroutine horiz_interp_from_weights_field2d + + +!> perform horizontal interpolation of a 3d field using pre-computed weights +!! source and destination coordinates are 2d +subroutine horiz_interp_from_weights_field3d(Interp, data_in, data_out, verbose, mask_in, mask_out, & + missing_value, missing_permit, err_msg) + + type(horiz_interp_type), intent(in) :: Interp !< type containing interpolation options and weights + real, dimension(:,:,:), intent(in) :: data_in !< input data + real, dimension(:,:,:), intent(out) :: data_out !< output data + integer, optional, intent(in) :: verbose !< verbosity level + real, dimension(:,:,:), optional, intent(in) :: mask_in !< mask for input data + real, dimension(:,:,:), optional, intent(out) :: mask_out !< mask for output data + real, optional, intent(in) :: missing_value !< A value indicating missing data + integer, optional, intent(in) :: missing_permit !< number of allowed points with + !! missing value for interpolation (0-3) + character(len=*), optional, intent(out) :: err_msg !< error message + + call horiz_interp(Interp, data_in, data_out, verbose, mask_in, mask_out, & + missing_value, missing_permit, err_msg) + +end subroutine horiz_interp_from_weights_field3d + + +!> build horizontal interpolation weights from source grid defined by 2d lon/lat to destination grid +!! defined by 2d lon/lat +subroutine build_horiz_interp_weights_2d_to_2d(Interp, lon_in, lat_in, lon_out, lat_out, & + verbose, interp_method, num_nbrs, max_dist, & + src_modulo, mask_in, mask_out, & + is_latlon_in, is_latlon_out) + + type(horiz_interp_type), intent(inout) :: Interp !< type containing interpolation options and weights + real, dimension(:,:), intent(in) :: lon_in !< input longitude 2d + real, dimension(:,:), intent(in) :: lat_in !< input latitude 2d + real, dimension(:,:), intent(in) :: lon_out !< output longitude 2d + real, dimension(:,:), intent(in) :: lat_out !< output latitude 2d + integer, optional, intent(in) :: verbose !< verbosity level + character(len=*), optional, intent(in) :: interp_method !< interpolation method + integer, optional, intent(in) :: num_nbrs !< number of nearest neighbors + real, optional, intent(in) :: max_dist !< maximum region of influence + logical, optional, intent(in) :: src_modulo !< periodicity of E-W boundary + real, dimension(:,:), optional, intent(in) :: mask_in !< mask for input data + real, dimension(:,:), optional, intent(inout) :: mask_out !< mask for output data + logical, optional, intent(in) :: is_latlon_in !< input grid is regular lat/lon grid + logical, optional, intent(in) :: is_latlon_out !< output grid is regular lat/lon grid + + call horiz_interp_new(Interp, lon_in, lat_in, lon_out, lat_out, & + verbose, interp_method, num_nbrs, max_dist, & + src_modulo, mask_in, mask_out, & + is_latlon_in, is_latlon_out) + +end subroutine build_horiz_interp_weights_2d_to_2d + + +!> Extracts and returns the axis data stored in an axistype. +subroutine get_axis_data( axis, dat ) + type(axistype), intent(in) :: axis !< An axis type + real, dimension(:), intent(out) :: dat !< The data in the axis variable + + call mpp_get_axis_data( axis, dat ) +end subroutine get_axis_data + + +!> get size of an external field from field index +function get_extern_field_size(index) + + integer, intent(in) :: index !< field index + integer :: get_extern_field_size(4) !< field size + + get_extern_field_size = get_external_field_size(index) + +end function get_extern_field_size + + +!> get axes of an external field from field index +function get_extern_field_axes(index) + + integer, intent(in) :: index !< field index + type(axistype), dimension(4) :: get_extern_field_axes !< field axes + + get_extern_field_axes = get_external_field_axes(index) + +end function get_extern_field_axes + + +!> get missing value of an external field from field index +function get_extern_field_missing(index) + + integer, intent(in) :: index !< field index + real :: get_extern_field_missing !< field missing value + + get_extern_field_missing = get_external_field_missing(index) + +end function get_extern_field_missing + + +!> Get information about the external fields. +subroutine get_external_field_info(field_id, size, axes, missing) + integer, intent(in) :: field_id !< The integer index of the external + !! field returned from a previous + !! call to init_external_field() + integer, dimension(4), optional, intent(inout) :: size !< Dimension sizes for the input data + type(axistype), dimension(4), optional, intent(inout) :: axes !< Axis types for the input data + real, optional, intent(inout) :: missing !< Missing value for the input data + + if (present(size)) then + size(1:4) = get_extern_field_size(field_id) + endif + + if (present(axes)) then + axes(1:4) = get_extern_field_axes(field_id) + endif + + if (present(missing)) then + missing = get_extern_field_missing(field_id) + endif + +end subroutine get_external_field_info + + +!> Read a scalar field based on model time. +subroutine time_interp_extern_0d(field_id, time, data_in, verbose) + integer, intent(in) :: field_id !< The integer index of the external field returned + !! from a previous call to init_external_field() + type(time_type), intent(in) :: time !< The target time for the data + real, intent(inout) :: data_in !< The interpolated value + logical, optional, intent(in) :: verbose !< If true, write verbose output for debugging + + call time_interp_external(field_id, time, data_in, verbose=verbose) +end subroutine time_interp_extern_0d + + +!> Read a 2d field from an external based on model time, potentially including horizontal +!! interpolation and rotation of the data +subroutine time_interp_extern_2d(field_id, time, data_in, interp, verbose, horz_interp, mask_out) + integer, intent(in) :: field_id !< The integer index of the external field returned + !! from a previous call to init_external_field() + type(time_type), intent(in) :: time !< The target time for the data + real, dimension(:,:), intent(inout) :: data_in !< The array in which to store the interpolated values + integer, optional, intent(in) :: interp !< A flag indicating the temporal interpolation method + logical, optional, intent(in) :: verbose !< If true, write verbose output for debugging + type(horiz_interp_type), & + optional, intent(in) :: horz_interp !< A structure to control horizontal interpolation + logical, dimension(:,:), & + optional, intent(out) :: mask_out !< An array that is true where there is valid data + + call time_interp_external(field_id, time, data_in, interp=interp, verbose=verbose, & + horz_interp=horz_interp, mask_out=mask_out) +end subroutine time_interp_extern_2d + + +!> Read a 3d field based on model time, and rotate to the model grid +subroutine time_interp_extern_3d(field_id, time, data_in, interp, verbose, horz_interp, mask_out) + integer, intent(in) :: field_id !< The integer index of the external field returned + !! from a previous call to init_external_field() + type(time_type), intent(in) :: time !< The target time for the data + real, dimension(:,:,:), intent(inout) :: data_in !< The array in which to store the interpolated values + integer, optional, intent(in) :: interp !< A flag indicating the temporal interpolation method + logical, optional, intent(in) :: verbose !< If true, write verbose output for debugging + type(horiz_interp_type), & + optional, intent(in) :: horz_interp !< A structure to control horizontal interpolation + logical, dimension(:,:,:), & + optional, intent(out) :: mask_out !< An array that is true where there is valid data + + call time_interp_external(field_id, time, data_in, interp=interp, verbose=verbose, & + horz_interp=horz_interp, mask_out=mask_out) +end subroutine time_interp_extern_3d + + +!> initialize an external field +integer function init_extern_field(file, fieldname, MOM_domain, domain, verbose, & + threading, ierr, ignore_axis_atts, correct_leap_year_inconsistency ) + + character(len=*), intent(in) :: file !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The name of the field in the file + integer, optional, intent(in) :: threading !< A flag specifying whether the root PE reads + !! the data and broadcasts it (SINGLE_FILE) or all + !! processors read (MULTIPLE, the default). + logical, optional, intent(in) :: verbose !< If true, write verbose output for debugging + type(domain2d), optional, intent(in) :: domain !< A domain2d type that describes the decomposition + type(MOM_domain_type), & + optional, intent(in) :: MOM_Domain !< A MOM_Domain that describes the decomposition + integer, optional, intent(out) :: ierr !< Returns a non-zero error code in case of failure + logical, optional, intent(in) :: ignore_axis_atts !< If present and true, do not issue a + !! fatal error if the axis Cartesian attribute is + !! not set to a recognized value. + logical, optional, intent(in) :: correct_leap_year_inconsistency !< If present and true, + !! then if, (1) a calendar containing leap years + !! is in use, and (2) the modulo time period of the + !! data is an integer number of years, then map + !! a model date of Feb 29. onto a common year on Feb. 28. + + if (present(MOM_Domain)) then + init_extern_field = init_external_field(file, fieldname, domain=MOM_domain%mpp_domain, & + verbose=verbose, threading=threading, ierr=ierr, ignore_axis_atts=ignore_axis_atts, & + correct_leap_year_inconsistency=correct_leap_year_inconsistency) + else + init_extern_field = init_external_field(file, fieldname, domain=domain, & + verbose=verbose, threading=threading, ierr=ierr, ignore_axis_atts=ignore_axis_atts, & + correct_leap_year_inconsistency=correct_leap_year_inconsistency) + endif + +end function init_extern_field + +end module MOM_interp_infra diff --git a/config_src/infra/FMS1/MOM_io_infra.F90 b/config_src/infra/FMS1/MOM_io_infra.F90 new file mode 100644 index 0000000000..f956f9fa51 --- /dev/null +++ b/config_src/infra/FMS1/MOM_io_infra.F90 @@ -0,0 +1,989 @@ +!> This module contains a thin inteface to mpp and fms I/O code +module MOM_io_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use MOM_domain_infra, only : MOM_domain_type, rescale_comp_data, AGRID, BGRID_NE, CGRID_NE +use MOM_domain_infra, only : domain2d, domain1d, CENTER, CORNER, NORTH_FACE, EAST_FACE +use MOM_error_infra, only : MOM_error=>MOM_err, NOTE, FATAL, WARNING + +use fms_mod, only : write_version_number, open_namelist_file, check_nml_error +use fms_io_mod, only : file_exist, field_exist, field_size, read_data +use fms_io_mod, only : fms_io_exit, get_filename_appendix +use mpp_io_mod, only : mpp_open, mpp_close, mpp_flush +use mpp_io_mod, only : mpp_write_meta, mpp_write, mpp_read +use mpp_io_mod, only : mpp_get_atts, mpp_attribute_exist +use mpp_io_mod, only : mpp_get_axes, axistype, mpp_get_axis_data +use mpp_io_mod, only : mpp_get_fields, fieldtype +use mpp_io_mod, only : mpp_get_info, mpp_get_times +use mpp_io_mod, only : mpp_io_init +use mpp_mod, only : stdout_if_root=>stdout +! These are encoding constants. +use mpp_io_mod, only : APPEND_FILE=>MPP_APPEND, WRITEONLY_FILE=>MPP_WRONLY +use mpp_io_mod, only : OVERWRITE_FILE=>MPP_OVERWR, READONLY_FILE=>MPP_RDONLY +use mpp_io_mod, only : NETCDF_FILE=>MPP_NETCDF, ASCII_FILE=>MPP_ASCII +use mpp_io_mod, only : MULTIPLE=>MPP_MULTI, SINGLE_FILE=>MPP_SINGLE +use mpp_mod, only : lowercase +use iso_fortran_env, only : int64 + +implicit none ; private + +! These interfaces are actually implemented or have explicit interfaces in this file. +public :: open_file, open_ASCII_file, file_is_open, close_file, flush_file, file_exists +public :: get_file_info, get_file_fields, get_file_times, get_filename_suffix +public :: read_field, read_vector, write_metadata, write_field +public :: field_exists, get_field_atts, get_field_size, get_axis_data, read_field_chksum +public :: io_infra_init, io_infra_end, MOM_namelist_file, check_namelist_error, write_version +public :: stdout_if_root +! These types are inherited from underlying infrastructure code, to act as containers for +! information about fields and axes, respectively, and are opaque to this module. +public :: fieldtype, axistype +! These are encoding constant parmeters. +public :: ASCII_FILE, NETCDF_FILE, SINGLE_FILE, MULTIPLE +public :: APPEND_FILE, READONLY_FILE, OVERWRITE_FILE, WRITEONLY_FILE +public :: CENTER, CORNER, NORTH_FACE, EAST_FACE + +!> Indicate whether a file exists, perhaps with domain decomposition +interface file_exists + module procedure FMS_file_exists + module procedure MOM_file_exists +end interface + +!> Open a file (or fileset) for parallel or single-file I/). +interface open_file + module procedure open_file_type, open_file_unit +end interface open_file + +!> Read a data field from a file +interface read_field + module procedure read_field_4d + module procedure read_field_3d + module procedure read_field_2d, read_field_2d_region + module procedure read_field_1d, read_field_1d_int + module procedure read_field_0d, read_field_0d_int +end interface read_field + +!> Write a registered field to an output file +interface write_field + module procedure write_field_4d + module procedure write_field_3d + module procedure write_field_2d + module procedure write_field_1d + module procedure write_field_0d + module procedure MOM_write_axis +end interface write_field + +!> Read a pair of data fields representing the two components of a vector from a file +interface read_vector + module procedure MOM_read_vector_3d + module procedure MOM_read_vector_2d +end interface read_vector + +!> Write metadata about a variable or axis to a file and store it for later reuse +interface write_metadata + module procedure write_metadata_axis, write_metadata_field, write_metadata_global +end interface write_metadata + +!> Close a file (or fileset). If the file handle does not point to an open file, +!! close_file simply returns without doing anything. +interface close_file + module procedure close_file_type, close_file_unit +end interface close_file + +!> Ensure that the output stream associated with a file handle is fully sent to disk +interface flush_file + module procedure flush_file_type, flush_file_unit +end interface flush_file + +!> Type for holding a handle to an open file and related information +type, public :: file_type ; private + integer :: unit = -1 !< The framework identfier or netCDF unit number of an output file + character(len=:), allocatable :: filename !< The path to this file, if it is open + logical :: open_to_read = .false. !< If true, this file or fileset can be read + logical :: open_to_write = .false. !< If true, this file or fileset can be written to +end type file_type + +contains + +!> Reads the checksum value for a field that was recorded in a file, along with a flag indicating +!! whether the file contained a valid checksum for this field. +subroutine read_field_chksum(field, chksum, valid_chksum) + type(fieldtype), intent(in) :: field !< The field whose checksum attribute is to be read. + integer(kind=int64), intent(out) :: chksum !< The checksum for the field. + logical, intent(out) :: valid_chksum !< If true, chksum has been successfully read. + ! Local variables + integer(kind=int64), dimension(3) :: checksum_file + + checksum_file(:) = -1 + valid_chksum = mpp_attribute_exist(field, "checksum") + if (valid_chksum) then + call get_field_atts(field, checksum=checksum_file) + chksum = checksum_file(1) + else + chksum = -1 + endif +end subroutine read_field_chksum + +!> Returns true if the named file or its domain-decomposed variant exists. +logical function MOM_file_exists(filename, MOM_Domain) + character(len=*), intent(in) :: filename !< The name of the file being inquired about + type(MOM_domain_type), intent(in) :: MOM_Domain !< The MOM_Domain that describes the decomposition + +! This function uses the fms_io function file_exist to determine whether +! a named file (or its decomposed variant) exists. + + MOM_file_exists = file_exist(filename, MOM_Domain%mpp_domain) + +end function MOM_file_exists + +!> Returns true if the named file or its domain-decomposed variant exists. +logical function FMS_file_exists(filename) + character(len=*), intent(in) :: filename !< The name of the file being inquired about + ! This function uses the fms_io function file_exist to determine whether + ! a named file (or its decomposed variant) exists. + + FMS_file_exists = file_exist(filename) +end function FMS_file_exists + +!> indicates whether an I/O handle is attached to an open file +logical function file_is_open(IO_handle) + type(file_type), intent(in) :: IO_handle !< Handle to a file to inquire about + + file_is_open = (IO_handle%unit >= 0) +end function file_is_open + +!> closes a file (or fileset). If the file handle does not point to an open file, +!! close_file_type simply returns without doing anything. +subroutine close_file_type(IO_handle) + type(file_type), intent(inout) :: IO_handle !< The I/O handle for the file to be closed + + call mpp_close(IO_handle%unit) + if (allocated(IO_handle%filename)) deallocate(IO_handle%filename) + IO_handle%open_to_read = .false. ; IO_handle%open_to_write = .false. +end subroutine close_file_type + +!> closes a file. If the unit does not point to an open file, +!! close_file_unit simply returns without doing anything. +subroutine close_file_unit(unit) + integer, intent(inout) :: unit !< The I/O unit for the file to be closed + + call mpp_close(unit) +end subroutine close_file_unit + +!> Ensure that the output stream associated with a file handle is fully sent to disk. +subroutine flush_file_type(file) + type(file_type), intent(in) :: file !< The I/O handle for the file to flush + + call mpp_flush(file%unit) +end subroutine flush_file_type + +!> Ensure that the output stream associated with a unit is fully sent to disk. +subroutine flush_file_unit(unit) + integer, intent(in) :: unit !< The I/O unit for the file to flush + + call mpp_flush(unit) +end subroutine flush_file_unit + +!> Initialize the underlying I/O infrastructure +subroutine io_infra_init(maxunits) + integer, optional, intent(in) :: maxunits !< An optional maximum number of file + !! unit numbers that can be used. + call mpp_io_init(maxunit=maxunits) +end subroutine io_infra_init + +!> Gracefully close out and terminate the underlying I/O infrastructure +subroutine io_infra_end() + call fms_io_exit() +end subroutine io_infra_end + +!> Open a single namelist file that is potentially readable by all PEs. +function MOM_namelist_file(file) result(unit) + character(len=*), optional, intent(in) :: file !< The file to open, by default "input.nml". + integer :: unit !< The opened unit number of the namelist file + unit = open_namelist_file(file) +end function MOM_namelist_file + +!> Checks the iostat argument that is returned after reading a namelist variable and writes a +!! message if there is an error. +subroutine check_namelist_error(IOstat, nml_name) + integer, intent(in) :: IOstat !< An I/O status field from a namelist read call + character(len=*), intent(in) :: nml_name !< The name of the namelist + integer :: ierr + ierr = check_nml_error(IOstat, nml_name) +end subroutine check_namelist_error + +!> Write a file version number to the log file or other output file +subroutine write_version(version, tag, unit) + character(len=*), intent(in) :: version !< A string that contains the routine name and version + character(len=*), optional, intent(in) :: tag !< A tag name to add to the message + integer, optional, intent(in) :: unit !< An alternate unit number for output + + call write_version_number(version, tag, unit) +end subroutine write_version + +!> open_file opens a file for parallel or single-file I/O. +subroutine open_file_unit(unit, filename, action, form, threading, fileset, nohdrs, domain, MOM_domain) + integer, intent(out) :: unit !< The I/O unit for the opened file + character(len=*), intent(in) :: filename !< The name of the file being opened + integer, optional, intent(in) :: action !< A flag indicating whether the file can be read + !! or written to and how to handle existing files. + integer, optional, intent(in) :: form !< A flag indicating the format of a new file. The + !! default is ASCII_FILE, but NETCDF_FILE is also common. + integer, optional, intent(in) :: threading !< A flag indicating whether one (SINGLE_FILE) + !! or multiple PEs (MULTIPLE) participate in I/O. + !! With the default, the root PE does I/O. + integer, optional, intent(in) :: fileset !< A flag indicating whether multiple PEs doing I/O due + !! to threading=MULTIPLE write to the same file (SINGLE_FILE) + !! or to one file per PE (MULTIPLE, the default). + logical, optional, intent(in) :: nohdrs !< If nohdrs is .TRUE., headers are not written to + !! ASCII files. The default is .false. + type(domain2d), optional, intent(in) :: domain !< A domain2d type that describes the decomposition + type(MOM_domain_type), optional, intent(in) :: MOM_Domain !< A MOM_Domain that describes the decomposition + + if (present(MOM_Domain)) then + call mpp_open(unit, filename, action=action, form=form, threading=threading, fileset=fileset, & + nohdrs=nohdrs, domain=MOM_Domain%mpp_domain) + else + call mpp_open(unit, filename, action=action, form=form, threading=threading, fileset=fileset, & + nohdrs=nohdrs, domain=domain) + endif +end subroutine open_file_unit + +!> open_file opens a file for parallel or single-file I/O. +subroutine open_file_type(IO_handle, filename, action, MOM_domain, threading, fileset) + type(file_type), intent(inout) :: IO_handle !< The handle for the opened file + character(len=*), intent(in) :: filename !< The path name of the file being opened + integer, optional, intent(in) :: action !< A flag indicating whether the file can be read + !! or written to and how to handle existing files. + !! The default is WRITE_ONLY. + type(MOM_domain_type), & + optional, intent(in) :: MOM_Domain !< A MOM_Domain that describes the decomposition + integer, optional, intent(in) :: threading !< A flag indicating whether one (SINGLE_FILE) + !! or multiple PEs (MULTIPLE) participate in I/O. + !! With the default, the root PE does I/O. + integer, optional, intent(in) :: fileset !< A flag indicating whether multiple PEs doing I/O due + !! to threading=MULTIPLE write to the same file (SINGLE_FILE) + !! or to one file per PE (MULTIPLE, the default). + + if (present(MOM_Domain)) then + call mpp_open(IO_handle%unit, filename, action=action, form=NETCDF_FILE, threading=threading, & + fileset=fileset, domain=MOM_Domain%mpp_domain) + else + call mpp_open(IO_handle%unit, filename, action=action, form=NETCDF_FILE, threading=threading, & + fileset=fileset) + endif + IO_handle%filename = trim(filename) + if (present(action)) then + if (action == READONLY_FILE) then + IO_handle%open_to_read = .true. ; IO_handle%open_to_write = .false. + else + IO_handle%open_to_read = .false. ; IO_handle%open_to_write = .true. + endif + else + IO_handle%open_to_read = .false. ; IO_handle%open_to_write = .true. + endif + +end subroutine open_file_type + +!> open_file opens an ascii file for parallel or single-file I/O using Fortran read and write calls. +subroutine open_ASCII_file(unit, file, action, threading, fileset) + integer, intent(out) :: unit !< The I/O unit for the opened file + character(len=*), intent(in) :: file !< The name of the file being opened + integer, optional, intent(in) :: action !< A flag indicating whether the file can be read + !! or written to and how to handle existing files. + integer, optional, intent(in) :: threading !< A flag indicating whether one (SINGLE_FILE) + !! or multiple PEs (MULTIPLE) participate in I/O. + !! With the default, the root PE does I/O. + integer, optional, intent(in) :: fileset !< A flag indicating whether multiple PEs doing I/O due + !! to threading=MULTIPLE write to the same file (SINGLE_FILE) + !! or to one file per PE (MULTIPLE, the default). + + call mpp_open(unit, file, action=action, form=ASCII_FILE, threading=threading, fileset=fileset, & + nohdrs=.true.) + +end subroutine open_ASCII_file + + +!> Provide a string to append to filenames, to differentiate ensemble members, for example. +subroutine get_filename_suffix(suffix) + character(len=*), intent(out) :: suffix !< A string to append to filenames + + call get_filename_appendix(suffix) +end subroutine get_filename_suffix + + +!> Get information about the number of dimensions, variables and time levels +!! in the file associated with an open file unit +subroutine get_file_info(IO_handle, ndim, nvar, ntime) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for I/O + integer, optional, intent(out) :: ndim !< The number of dimensions in the file + integer, optional, intent(out) :: nvar !< The number of variables in the file + integer, optional, intent(out) :: ntime !< The number of time levels in the file + + ! Local variables + integer :: ndims, nvars, natts, ntimes + + call mpp_get_info(IO_handle%unit, ndims, nvars, natts, ntimes ) + + if (present(ndim)) ndim = ndims + if (present(nvar)) nvar = nvars + if (present(ntime)) ntime = ntimes + +end subroutine get_file_info + + +!> Get the times of records from a file + !### Modify this to also convert to time_type, using information about the dimensions? +subroutine get_file_times(IO_handle, time_values, ntime) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for I/O + real, allocatable, dimension(:), intent(inout) :: time_values !< The real times for the records in file. + integer, optional, intent(out) :: ntime !< The number of time levels in the file + + integer :: ntimes + + if (allocated(time_values)) deallocate(time_values) + call get_file_info(IO_handle, ntime=ntimes) + if (present(ntime)) ntime = ntimes + if (ntimes > 0) then + allocate(time_values(ntimes)) + call mpp_get_times(IO_handle%unit, time_values) + endif +end subroutine get_file_times + +!> Set up the field information (e.g., names and metadata) for all of the variables in a file. The +!! argument fields must be allocated with a size that matches the number of variables in a file. +subroutine get_file_fields(IO_handle, fields) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for I/O + type(fieldtype), dimension(:), intent(inout) :: fields !< Field-type descriptions of all of + !! the variables in a file. + call mpp_get_fields(IO_handle%unit, fields) +end subroutine get_file_fields + +!> Extract information from a field type, as stored or as found in a file +subroutine get_field_atts(field, name, units, longname, checksum) + type(fieldtype), intent(in) :: field !< The field to extract information from + character(len=*), optional, intent(out) :: name !< The variable name + character(len=*), optional, intent(out) :: units !< The units of the variable + character(len=*), optional, intent(out) :: longname !< The long name of the variable + integer(kind=int64), dimension(:), & + optional, intent(out) :: checksum !< The checksums of the variable in a file + call mpp_get_atts(field, name=name, units=units, longname=longname, checksum=checksum) +end subroutine get_field_atts + +!> Field_exists returns true if the field indicated by field_name is present in the +!! file file_name. If file_name does not exist, it returns false. +function field_exists(filename, field_name, domain, no_domain, MOM_domain) + character(len=*), intent(in) :: filename !< The name of the file being inquired about + character(len=*), intent(in) :: field_name !< The name of the field being sought + type(domain2d), target, optional, intent(in) :: domain !< A domain2d type that describes the decomposition + logical, optional, intent(in) :: no_domain !< This file does not use domain decomposition + type(MOM_domain_type), optional, intent(in) :: MOM_Domain !< A MOM_Domain that describes the decomposition + logical :: field_exists !< True if filename exists and field_name is in filename + + if (present(MOM_domain)) then + field_exists = field_exist(filename, field_name, domain=MOM_domain%mpp_domain, no_domain=no_domain) + else + field_exists = field_exist(filename, field_name, domain=domain, no_domain=no_domain) + endif + +end function field_exists + +!> Given filename and fieldname, this subroutine returns the size of the field in the file +subroutine get_field_size(filename, fieldname, sizes, field_found, no_domain) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The name of the variable whose sizes are returned + integer, dimension(:), intent(inout) :: sizes !< The sizes of the variable in each dimension + logical, optional, intent(out) :: field_found !< This indicates whether the field was found in + !! the input file. Without this argument, there + !! is a fatal error if the field is not found. + logical, optional, intent(in) :: no_domain !< If present and true, do not check for file + !! names with an appended tile number + + call field_size(filename, fieldname, sizes, field_found=field_found, no_domain=no_domain) + +end subroutine get_field_size + +!> Extracts and returns the axis data stored in an axistype. +subroutine get_axis_data( axis, dat ) + type(axistype), intent(in) :: axis !< An axis type + real, dimension(:), intent(out) :: dat !< The data in the axis variable + + call mpp_get_axis_data( axis, dat ) +end subroutine get_axis_data + +!> This routine uses the fms_io subroutine read_data to read a scalar named +!! "fieldname" from a single or domain-decomposed file "filename". +subroutine read_field_0d(filename, fieldname, data, timelevel, scale, MOM_Domain, & + global_file, file_may_be_4d) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The variable name of the data in the file + real, intent(inout) :: data !< The 1-dimensional array into which the data + integer, optional, intent(in) :: timelevel !< The time level in the file to read + real, optional, intent(in) :: scale !< A scaling factor that the field is multiplied + !! by before it is returned. + type(MOM_domain_type), & + optional, intent(in) :: MOM_Domain !< The MOM_Domain that describes the decomposition + logical, optional, intent(in) :: global_file !< If true, read from a single global file + logical, optional, intent(in) :: file_may_be_4d !< If true, this file may have 4-d arrays, + !! in which case a more elaborate set of calls + !! is needed to read it due to FMS limitations. + + ! Local variables + character(len=80) :: varname ! The name of a variable in the file + type(fieldtype), allocatable :: fields(:) ! An array of types describing all the variables in the file + logical :: use_fms_read_data, file_is_global + integer :: n, unit, ndim, nvar, natt, ntime + + use_fms_read_data = .true. ; if (present(file_may_be_4d)) use_fms_read_data = .not.file_may_be_4d + file_is_global = .true. ; if (present(global_file)) file_is_global = global_file + + if (.not.use_fms_read_data) then + if (file_is_global) then + call mpp_open(unit, trim(filename), form=NETCDF_FILE, action=READONLY_FILE, & + threading=MULTIPLE, fileset=SINGLE_FILE) !, domain=MOM_Domain%mpp_domain ) + else + call mpp_open(unit, trim(filename), form=NETCDF_FILE, action=READONLY_FILE, & + threading=MULTIPLE, fileset=MULTIPLE, domain=MOM_Domain%mpp_domain ) + endif + call mpp_get_info(unit, ndim, nvar, natt, ntime) + allocate(fields(nvar)) + call mpp_get_fields(unit, fields(1:nvar)) + do n=1, nvar + call mpp_get_atts(fields(n), name=varname) + if (lowercase(trim(varname)) == lowercase(trim(fieldname))) then + ! Maybe something should be done depending on the value of ntime. + call mpp_read(unit, fields(n), data, timelevel) + exit + endif + enddo + + deallocate(fields) + call mpp_close(unit) + elseif (present(MOM_Domain)) then + call read_data(filename, fieldname, data, MOM_Domain%mpp_domain, timelevel=timelevel) + else + call read_data(filename, fieldname, data, timelevel=timelevel, no_domain=.true.) + endif + + if (present(scale)) then ; if (scale /= 1.0) then + data = scale*data + endif ; endif +end subroutine read_field_0d + +!> This routine uses the fms_io subroutine read_data to read a 1-D data field named +!! "fieldname" from a single or domain-decomposed file "filename". +subroutine read_field_1d(filename, fieldname, data, timelevel, scale, MOM_Domain, & + global_file, file_may_be_4d) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The variable name of the data in the file + real, dimension(:), intent(inout) :: data !< The 1-dimensional array into which the data + integer, optional, intent(in) :: timelevel !< The time level in the file to read + real, optional, intent(in) :: scale !< A scaling factor that the field is multiplied + !! by before they are returned. + type(MOM_domain_type), & + optional, intent(in) :: MOM_Domain !< The MOM_Domain that describes the decomposition + logical, optional, intent(in) :: global_file !< If true, read from a single global file + logical, optional, intent(in) :: file_may_be_4d !< If true, this file may have 4-d arrays, + !! in which case a more elaborate set of calls + !! is needed to read it due to FMS limitations. + + ! Local variables + character(len=80) :: varname ! The name of a variable in the file + type(fieldtype), allocatable :: fields(:) ! An array of types describing all the variables in the file + logical :: use_fms_read_data, file_is_global + integer :: n, unit, ndim, nvar, natt, ntime + + use_fms_read_data = .true. ; if (present(file_may_be_4d)) use_fms_read_data = .not.file_may_be_4d + file_is_global = .true. ; if (present(global_file)) file_is_global = global_file + + if (.not.use_fms_read_data) then + if (file_is_global) then + call mpp_open(unit, trim(filename), form=NETCDF_FILE, action=READONLY_FILE, & + threading=MULTIPLE, fileset=SINGLE_FILE) !, domain=MOM_Domain%mpp_domain ) + else + call mpp_open(unit, trim(filename), form=NETCDF_FILE, action=READONLY_FILE, & + threading=MULTIPLE, fileset=MULTIPLE, domain=MOM_Domain%mpp_domain ) + endif + call mpp_get_info(unit, ndim, nvar, natt, ntime) + allocate(fields(nvar)) + call mpp_get_fields(unit, fields(1:nvar)) + do n=1, nvar + call mpp_get_atts(fields(n), name=varname) + if (lowercase(trim(varname)) == lowercase(trim(fieldname))) then + call MOM_error(NOTE, "Reading 1-d variable "//trim(fieldname)//" from file "//trim(filename)) + ! Maybe something should be done depending on the value of ntime. + call mpp_read(unit, fields(n), data, timelevel) + exit + endif + enddo + if ((n == nvar+1) .or. (nvar < 1)) call MOM_error(WARNING, & + "read_field apparently did not find 1-d variable "//trim(fieldname)//" in file "//trim(filename)) + + deallocate(fields) + call mpp_close(unit) + elseif (present(MOM_Domain)) then + call read_data(filename, fieldname, data, MOM_Domain%mpp_domain, timelevel=timelevel) + else + call read_data(filename, fieldname, data, timelevel=timelevel, no_domain=.true.) + endif + + if (present(scale)) then ; if (scale /= 1.0) then + data(:) = scale*data(:) + endif ; endif +end subroutine read_field_1d + +!> This routine uses the fms_io subroutine read_data to read a distributed +!! 2-D data field named "fieldname" from file "filename". Valid values for +!! "position" include CORNER, CENTER, EAST_FACE and NORTH_FACE. +subroutine read_field_2d(filename, fieldname, data, MOM_Domain, & + timelevel, position, scale, global_file, file_may_be_4d) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The variable name of the data in the file + real, dimension(:,:), intent(inout) :: data !< The 2-dimensional array into which the data + !! should be read + type(MOM_domain_type), intent(in) :: MOM_Domain !< The MOM_Domain that describes the decomposition + integer, optional, intent(in) :: timelevel !< The time level in the file to read + integer, optional, intent(in) :: position !< A flag indicating where this data is located + real, optional, intent(in) :: scale !< A scaling factor that the field is multiplied + !! by before it is returned. + logical, optional, intent(in) :: global_file !< If true, read from a single global file + logical, optional, intent(in) :: file_may_be_4d !< If true, this file may have 4-d arrays, + !! in which case a more elaborate set of calls + !! is needed to read it due to FMS limitations. + + ! Local variables + character(len=80) :: varname ! The name of a variable in the file + type(fieldtype), allocatable :: fields(:) ! An array of types describing all the variables in the file + logical :: use_fms_read_data, file_is_global + integer :: n, unit, ndim, nvar, natt, ntime + + use_fms_read_data = .true. ; if (present(file_may_be_4d)) use_fms_read_data = .not.file_may_be_4d + file_is_global = .true. ; if (present(global_file)) file_is_global = global_file + + if (use_fms_read_data) then + call read_data(filename, fieldname, data, MOM_Domain%mpp_domain, & + timelevel=timelevel, position=position) + else + if (file_is_global) then + call mpp_open(unit, trim(filename), form=NETCDF_FILE, action=READONLY_FILE, & + threading=MULTIPLE, fileset=SINGLE_FILE) !, domain=MOM_Domain%mpp_domain ) + else + call mpp_open(unit, trim(filename), form=NETCDF_FILE, action=READONLY_FILE, & + threading=MULTIPLE, fileset=MULTIPLE, domain=MOM_Domain%mpp_domain ) + endif + call mpp_get_info(unit, ndim, nvar, natt, ntime) + allocate(fields(nvar)) + call mpp_get_fields(unit, fields(1:nvar)) + do n=1, nvar + call mpp_get_atts(fields(n), name=varname) + if (lowercase(trim(varname)) == lowercase(trim(fieldname))) then + call MOM_error(NOTE, "Reading 2-d variable "//trim(fieldname)//" from file "//trim(filename)) + ! Maybe something should be done depending on the value of ntime. + call mpp_read(unit, fields(n), MOM_Domain%mpp_domain, data, timelevel) + exit + endif + enddo + if ((n == nvar+1) .or. (nvar < 1)) call MOM_error(WARNING, & + "read_field apparently did not find 2-d variable "//trim(fieldname)//" in file "//trim(filename)) + + deallocate(fields) + call mpp_close(unit) + endif + + if (present(scale)) then ; if (scale /= 1.0) then + call rescale_comp_data(MOM_Domain, data, scale) + endif ; endif +end subroutine read_field_2d + +!> This routine uses the fms_io subroutine read_data to read a region from a distributed or +!! global 2-D data field named "fieldname" from file "filename". +subroutine read_field_2d_region(filename, fieldname, data, start, nread, MOM_domain, & + no_domain, scale) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The variable name of the data in the file + real, dimension(:,:), intent(inout) :: data !< The 2-dimensional array into which the data + !! should be read + integer, dimension(:), intent(in) :: start !< The starting index to read in each of 4 + !! dimensions. For this 2-d read, the 3rd + !! and 4th values are always 1. + integer, dimension(:), intent(in) :: nread !< The number of points to read in each of 4 + !! dimensions. For this 2-d read, the 3rd + !! and 4th values are always 1. + type(MOM_domain_type), & + optional, intent(in) :: MOM_Domain !< The MOM_Domain that describes the decomposition + logical, optional, intent(in) :: no_domain !< If present and true, this variable does not + !! use domain decomposion. + real, optional, intent(in) :: scale !< A scaling factor that the field is multiplied + !! by before it is returned. + + if (present(MOM_Domain)) then + call read_data(filename, fieldname, data, start, nread, domain=MOM_Domain%mpp_domain, & + no_domain=no_domain) + else + call read_data(filename, fieldname, data, start, nread, no_domain=no_domain) + endif + + if (present(scale)) then ; if (scale /= 1.0) then + if (present(MOM_Domain)) then + call rescale_comp_data(MOM_Domain, data, scale) + else + ! Dangerously rescale the whole array + data(:,:) = scale*data(:,:) + endif + endif ; endif +end subroutine read_field_2d_region + +!> This routine uses the fms_io subroutine read_data to read a distributed +!! 3-D data field named "fieldname" from file "filename". Valid values for +!! "position" include CORNER, CENTER, EAST_FACE and NORTH_FACE. +subroutine read_field_3d(filename, fieldname, data, MOM_Domain, & + timelevel, position, scale, global_file, file_may_be_4d) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The variable name of the data in the file + real, dimension(:,:,:), intent(inout) :: data !< The 3-dimensional array into which the data + !! should be read + type(MOM_domain_type), intent(in) :: MOM_Domain !< The MOM_Domain that describes the decomposition + integer, optional, intent(in) :: timelevel !< The time level in the file to read + integer, optional, intent(in) :: position !< A flag indicating where this data is located + real, optional, intent(in) :: scale !< A scaling factor that the field is multiplied + !! by before it is returned. + logical, optional, intent(in) :: global_file !< If true, read from a single global file + logical, optional, intent(in) :: file_may_be_4d !< If true, this file may have 4-d arrays, + !! in which case a more elaborate set of calls + !! is needed to read it due to FMS limitations. + + ! Local variables + character(len=80) :: varname ! The name of a variable in the file + type(fieldtype), allocatable :: fields(:) ! An array of types describing all the variables in the file + logical :: use_fms_read_data, file_is_global + integer :: n, unit, ndim, nvar, natt, ntime + + use_fms_read_data = .true. ; if (present(file_may_be_4d)) use_fms_read_data = .not.file_may_be_4d + file_is_global = .true. ; if (present(global_file)) file_is_global = global_file + + if (use_fms_read_data) then + call read_data(filename, fieldname, data, MOM_Domain%mpp_domain, & + timelevel=timelevel, position=position) + else + if (file_is_global) then + call mpp_open(unit, trim(filename), form=NETCDF_FILE, action=READONLY_FILE, & + threading=MULTIPLE, fileset=SINGLE_FILE) !, domain=MOM_Domain%mpp_domain ) + else + call mpp_open(unit, trim(filename), form=NETCDF_FILE, action=READONLY_FILE, & + threading=MULTIPLE, fileset=MULTIPLE, domain=MOM_Domain%mpp_domain ) + endif + call mpp_get_info(unit, ndim, nvar, natt, ntime) + allocate(fields(nvar)) + call mpp_get_fields(unit, fields(1:nvar)) + do n=1, nvar + call mpp_get_atts(fields(n), name=varname) + if (lowercase(trim(varname)) == lowercase(trim(fieldname))) then + call MOM_error(NOTE, "Reading 3-d variable "//trim(fieldname)//" from file "//trim(filename)) + ! Maybe something should be done depending on the value of ntime. + call mpp_read(unit, fields(n), MOM_Domain%mpp_domain, data, timelevel) + exit + endif + enddo + if ((n == nvar+1) .or. (nvar < 1)) call MOM_error(WARNING, & + "read_field apparently did not find 3-d variable "//trim(fieldname)//" in file "//trim(filename)) + + deallocate(fields) + call mpp_close(unit) + endif + + if (present(scale)) then ; if (scale /= 1.0) then + call rescale_comp_data(MOM_Domain, data, scale) + endif ; endif +end subroutine read_field_3d + +!> This routine uses the fms_io subroutine read_data to read a distributed +!! 4-D data field named "fieldname" from file "filename". Valid values for +!! "position" include CORNER, CENTER, EAST_FACE and NORTH_FACE. +subroutine read_field_4d(filename, fieldname, data, MOM_Domain, & + timelevel, position, scale, global_file) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The variable name of the data in the file + real, dimension(:,:,:,:), intent(inout) :: data !< The 4-dimensional array into which the data + !! should be read + type(MOM_domain_type), intent(in) :: MOM_Domain !< The MOM_Domain that describes the decomposition + integer, optional, intent(in) :: timelevel !< The time level in the file to read + integer, optional, intent(in) :: position !< A flag indicating where this data is located + real, optional, intent(in) :: scale !< A scaling factor that the field is multiplied + !! by before it is returned. + logical, optional, intent(in) :: global_file !< If true, read from a single global file + + ! Local variables + character(len=80) :: varname ! The name of a variable in the file + type(fieldtype), allocatable :: fields(:) ! An array of types describing all the variables in the file + logical :: use_fms_read_data, file_is_global + integer :: n, unit, ndim, nvar, natt, ntime + integer :: is, ie, js, je + + ! This single call does not work for a 4-d array due to FMS limitations, so multiple calls are + ! needed. + ! call read_data(filename, fieldname, data, MOM_Domain%mpp_domain, & + ! timelevel=timelevel, position=position) + + file_is_global = .true. ; if (present(global_file)) file_is_global = global_file + + if (file_is_global) then + call mpp_open(unit, trim(filename), form=NETCDF_FILE, action=READONLY_FILE, & + threading=MULTIPLE, fileset=SINGLE_FILE) !, domain=MOM_Domain%mpp_domain ) + else + call mpp_open(unit, trim(filename), form=NETCDF_FILE, action=READONLY_FILE, & + threading=MULTIPLE, fileset=MULTIPLE, domain=MOM_Domain%mpp_domain ) + endif + call mpp_get_info(unit, ndim, nvar, natt, ntime) + allocate(fields(nvar)) + call mpp_get_fields(unit, fields(1:nvar)) + do n=1, nvar + call mpp_get_atts(fields(n), name=varname) + if (lowercase(trim(varname)) == lowercase(trim(fieldname))) then + call MOM_error(NOTE, "Reading 4-d variable "//trim(fieldname)//" from file "//trim(filename)) + ! Maybe something should be done depending on the value of ntime. + call mpp_read(unit, fields(n), MOM_Domain%mpp_domain, data, timelevel) + exit + endif + enddo + if ((n == nvar+1) .or. (nvar < 1)) call MOM_error(WARNING, & + "read_field apparently did not find 4-d variable "//trim(fieldname)//" in file "//trim(filename)) + + deallocate(fields) + call mpp_close(unit) + + if (present(scale)) then ; if (scale /= 1.0) then + call rescale_comp_data(MOM_Domain, data, scale) + endif ; endif +end subroutine read_field_4d + +!> This routine uses the fms_io subroutine read_data to read a scalar integer +!! data field named "fieldname" from file "filename". +subroutine read_field_0d_int(filename, fieldname, data, timelevel) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The variable name of the data in the file + integer, intent(inout) :: data !< The 1-dimensional array into which the data + integer, optional, intent(in) :: timelevel !< The time level in the file to read + + call read_data(filename, fieldname, data, timelevel=timelevel, no_domain=.true.) +end subroutine read_field_0d_int + +!> This routine uses the fms_io subroutine read_data to read a 1-D integer +!! data field named "fieldname" from file "filename". +subroutine read_field_1d_int(filename, fieldname, data, timelevel) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The variable name of the data in the file + integer, dimension(:), intent(inout) :: data !< The 1-dimensional array into which the data + integer, optional, intent(in) :: timelevel !< The time level in the file to read + + call read_data(filename, fieldname, data, timelevel=timelevel, no_domain=.true.) +end subroutine read_field_1d_int + + +!> This routine uses the fms_io subroutine read_data to read a pair of distributed +!! 2-D data fields with names given by "[uv]_fieldname" from file "filename". Valid values for +!! "stagger" include CGRID_NE, BGRID_NE, and AGRID. +subroutine MOM_read_vector_2d(filename, u_fieldname, v_fieldname, u_data, v_data, MOM_Domain, & + timelevel, stagger, scalar_pair, scale) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: u_fieldname !< The variable name of the u data in the file + character(len=*), intent(in) :: v_fieldname !< The variable name of the v data in the file + real, dimension(:,:), intent(inout) :: u_data !< The 2 dimensional array into which the + !! u-component of the data should be read + real, dimension(:,:), intent(inout) :: v_data !< The 2 dimensional array into which the + !! v-component of the data should be read + type(MOM_domain_type), intent(in) :: MOM_Domain !< The MOM_Domain that describes the decomposition + integer, optional, intent(in) :: timelevel !< The time level in the file to read + integer, optional, intent(in) :: stagger !< A flag indicating where this vector is discretized + logical, optional, intent(in) :: scalar_pair !< If true, a pair of scalars are to be read + real, optional, intent(in) :: scale !< A scaling factor that the fields are multiplied + !! by before they are returned. + integer :: u_pos, v_pos + + u_pos = EAST_FACE ; v_pos = NORTH_FACE + if (present(stagger)) then + if (stagger == CGRID_NE) then ; u_pos = EAST_FACE ; v_pos = NORTH_FACE + elseif (stagger == BGRID_NE) then ; u_pos = CORNER ; v_pos = CORNER + elseif (stagger == AGRID) then ; u_pos = CENTER ; v_pos = CENTER ; endif + endif + + call read_data(filename, u_fieldname, u_data, MOM_Domain%mpp_domain, & + timelevel=timelevel, position=u_pos) + call read_data(filename, v_fieldname, v_data, MOM_Domain%mpp_domain, & + timelevel=timelevel, position=v_pos) + + if (present(scale)) then ; if (scale /= 1.0) then + call rescale_comp_data(MOM_Domain, u_data, scale) + call rescale_comp_data(MOM_Domain, v_data, scale) + endif ; endif + +end subroutine MOM_read_vector_2d + +!> This routine uses the fms_io subroutine read_data to read a pair of distributed +!! 3-D data fields with names given by "[uv]_fieldname" from file "filename". Valid values for +!! "stagger" include CGRID_NE, BGRID_NE, and AGRID. +subroutine MOM_read_vector_3d(filename, u_fieldname, v_fieldname, u_data, v_data, MOM_Domain, & + timelevel, stagger, scalar_pair, scale) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: u_fieldname !< The variable name of the u data in the file + character(len=*), intent(in) :: v_fieldname !< The variable name of the v data in the file + real, dimension(:,:,:), intent(inout) :: u_data !< The 3 dimensional array into which the + !! u-component of the data should be read + real, dimension(:,:,:), intent(inout) :: v_data !< The 3 dimensional array into which the + !! v-component of the data should be read + type(MOM_domain_type), intent(in) :: MOM_Domain !< The MOM_Domain that describes the decomposition + integer, optional, intent(in) :: timelevel !< The time level in the file to read + integer, optional, intent(in) :: stagger !< A flag indicating where this vector is discretized + logical, optional, intent(in) :: scalar_pair !< If true, a pair of scalars are to be read.cretized + real, optional, intent(in) :: scale !< A scaling factor that the fields are multiplied + !! by before they are returned. + + integer :: u_pos, v_pos + + u_pos = EAST_FACE ; v_pos = NORTH_FACE + if (present(stagger)) then + if (stagger == CGRID_NE) then ; u_pos = EAST_FACE ; v_pos = NORTH_FACE + elseif (stagger == BGRID_NE) then ; u_pos = CORNER ; v_pos = CORNER + elseif (stagger == AGRID) then ; u_pos = CENTER ; v_pos = CENTER ; endif + endif + + call read_data(filename, u_fieldname, u_data, MOM_Domain%mpp_domain, & + timelevel=timelevel, position=u_pos) + call read_data(filename, v_fieldname, v_data, MOM_Domain%mpp_domain, & + timelevel=timelevel, position=v_pos) + + if (present(scale)) then ; if (scale /= 1.0) then + call rescale_comp_data(MOM_Domain, u_data, scale) + call rescale_comp_data(MOM_Domain, v_data, scale) + endif ; endif + +end subroutine MOM_read_vector_3d + + +!> Write a 4d field to an output file. +subroutine write_field_4d(IO_handle, field_md, MOM_domain, field, tstamp, tile_count, fill_value) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for writing + type(fieldtype), intent(in) :: field_md !< Field type with metadata + type(MOM_domain_type), intent(in) :: MOM_domain !< The MOM_Domain that describes the decomposition + real, dimension(:,:,:,:), intent(inout) :: field !< Field to write + real, optional, intent(in) :: tstamp !< Model time of this field + integer, optional, intent(in) :: tile_count !< PEs per tile (default: 1) + real, optional, intent(in) :: fill_value !< Missing data fill value + + call mpp_write(IO_handle%unit, field_md, MOM_domain%mpp_domain, field, tstamp=tstamp, & + tile_count=tile_count, default_data=fill_value) +end subroutine write_field_4d + +!> Write a 3d field to an output file. +subroutine write_field_3d(IO_handle, field_md, MOM_domain, field, tstamp, tile_count, fill_value) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for writing + type(fieldtype), intent(in) :: field_md !< Field type with metadata + type(MOM_domain_type), intent(in) :: MOM_domain !< The MOM_Domain that describes the decomposition + real, dimension(:,:,:), intent(inout) :: field !< Field to write + real, optional, intent(in) :: tstamp !< Model time of this field + integer, optional, intent(in) :: tile_count !< PEs per tile (default: 1) + real, optional, intent(in) :: fill_value !< Missing data fill value + + call mpp_write(IO_handle%unit, field_md, MOM_domain%mpp_domain, field, tstamp=tstamp, & + tile_count=tile_count, default_data=fill_value) +end subroutine write_field_3d + +!> Write a 2d field to an output file. +subroutine write_field_2d(IO_handle, field_md, MOM_domain, field, tstamp, tile_count, fill_value) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for writing + type(fieldtype), intent(in) :: field_md !< Field type with metadata + type(MOM_domain_type), intent(in) :: MOM_domain !< The MOM_Domain that describes the decomposition + real, dimension(:,:), intent(inout) :: field !< Field to write + real, optional, intent(in) :: tstamp !< Model time of this field + integer, optional, intent(in) :: tile_count !< PEs per tile (default: 1) + real, optional, intent(in) :: fill_value !< Missing data fill value + + call mpp_write(IO_handle%unit, field_md, MOM_domain%mpp_domain, field, tstamp=tstamp, & + tile_count=tile_count, default_data=fill_value) +end subroutine write_field_2d + +!> Write a 1d field to an output file. +subroutine write_field_1d(IO_handle, field_md, field, tstamp) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for writing + type(fieldtype), intent(in) :: field_md !< Field type with metadata + real, dimension(:), intent(in) :: field !< Field to write + real, optional, intent(in) :: tstamp !< Model time of this field + + call mpp_write(IO_handle%unit, field_md, field, tstamp=tstamp) +end subroutine write_field_1d + +!> Write a 0d field to an output file. +subroutine write_field_0d(IO_handle, field_md, field, tstamp) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for writing + type(fieldtype), intent(in) :: field_md !< Field type with metadata + real, intent(in) :: field !< Field to write + real, optional, intent(in) :: tstamp !< Model time of this field + + call mpp_write(IO_handle%unit, field_md, field, tstamp=tstamp) +end subroutine write_field_0d + +!> Write the data for an axis +subroutine MOM_write_axis(IO_handle, axis) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for writing + type(axistype), intent(in) :: axis !< An axis type variable with information to write + + call mpp_write(IO_handle%unit, axis) + +end subroutine MOM_write_axis + +!> Store information about an axis in a previously defined axistype and write this +!! information to the file indicated by unit. +subroutine write_metadata_axis(IO_handle, axis, name, units, longname, cartesian, sense, domain, & + data, edge_axis, calendar) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for writing + type(axistype), intent(inout) :: axis !< The axistype where this information is stored. + character(len=*), intent(in) :: name !< The name in the file of this axis + character(len=*), intent(in) :: units !< The units of this axis + character(len=*), intent(in) :: longname !< The long description of this axis + character(len=*), optional, intent(in) :: cartesian !< A variable indicating which direction + !! this axis corresponds with. Valid values + !! include 'X', 'Y', 'Z', 'T', and 'N' for none. + integer, optional, intent(in) :: sense !< This is 1 for axes whose values increase upward, or + !! -1 if they increase downward. + type(domain1D), optional, intent(in) :: domain !< The domain decomposion for this axis + real, dimension(:), optional, intent(in) :: data !< The coordinate values of the points on this axis + logical, optional, intent(in) :: edge_axis !< If true, this axis marks an edge of the tracer cells + character(len=*), optional, intent(in) :: calendar !< The name of the calendar used with a time axis + + call mpp_write_meta(IO_handle%unit, axis, name, units, longname, cartesian=cartesian, sense=sense, & + domain=domain, data=data, calendar=calendar) +end subroutine write_metadata_axis + +!> Store information about an output variable in a previously defined fieldtype and write this +!! information to the file indicated by unit. +subroutine write_metadata_field(IO_handle, field, axes, name, units, longname, & + pack, standard_name, checksum) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for writing + type(fieldtype), intent(inout) :: field !< The fieldtype where this information is stored + type(axistype), dimension(:), intent(in) :: axes !< Handles for the axis used for this variable + character(len=*), intent(in) :: name !< The name in the file of this variable + character(len=*), intent(in) :: units !< The units of this variable + character(len=*), intent(in) :: longname !< The long description of this variable + integer, optional, intent(in) :: pack !< A precision reduction factor with which the + !! variable. The default, 1, has no reduction, + !! but 2 is not uncommon. + character(len=*), optional, intent(in) :: standard_name !< The standard (e.g., CMOR) name for this variable + integer(kind=int64), dimension(:), & + optional, intent(in) :: checksum !< Checksum values that can be used to verify reads. + + + call mpp_write_meta(IO_handle%unit, field, axes, name, units, longname, & + pack=pack, standard_name=standard_name, checksum=checksum) + ! unused opt. args: min=min, max=max, fill=fill, scale=scale, add=add, & + +end subroutine write_metadata_field + +!> Write a global text attribute to a file. +subroutine write_metadata_global(IO_handle, name, attribute) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for writing + character(len=*), intent(in) :: name !< The name in the file of this global attribute + character(len=*), intent(in) :: attribute !< The value of this attribute + + call mpp_write_meta(IO_handle%unit, name, cval=attribute) +end subroutine write_metadata_global + +end module MOM_io_infra diff --git a/src/framework/MOM_time_manager.F90 b/config_src/infra/FMS1/MOM_time_manager.F90 similarity index 62% rename from src/framework/MOM_time_manager.F90 rename to config_src/infra/FMS1/MOM_time_manager.F90 index 229c3ded3a..5f3279b713 100644 --- a/src/framework/MOM_time_manager.F90 +++ b/config_src/infra/FMS1/MOM_time_manager.F90 @@ -14,9 +14,6 @@ module MOM_time_manager use time_manager_mod, only : set_calendar_type, get_calendar_type use time_manager_mod, only : JULIAN, NOLEAP, THIRTY_DAY_MONTHS, GREGORIAN use time_manager_mod, only : NO_CALENDAR -use time_interp_external_mod, only : init_external_field, time_interp_external, time_interp_external_init -use time_interp_external_mod, only : get_external_field_size -use time_interp_external_mod, only : get_external_field_axes, get_external_field_missing implicit none ; private @@ -29,24 +26,18 @@ module MOM_time_manager public :: get_date, set_date, increment_date, month_name, days_in_month public :: JULIAN, NOLEAP, THIRTY_DAY_MONTHS, GREGORIAN, NO_CALENDAR public :: set_calendar_type, get_calendar_type -public :: init_external_field -public :: time_interp_external -public :: time_interp_external_init -public :: get_external_field_size -public :: get_external_field_axes -public :: get_external_field_missing contains -!> This is an alternate implementation of the FMS function real_to_time_type that is accurate over -!! a larger range of input values. With 32 bit signed integers, this version should work over the -!! entire valid range (2^31 days or ~5.8835 million years) of time_types, whereas the standard -!! version in the FMS time_manager stops working for conversions of times greater than 2^31 seconds, -!! or ~68.1 years. -function real_to_time(x, err_msg) - type(time_type) :: real_to_time !< The output time as a time_type - real, intent(in) :: x !< The input time in real seconds. - character(len=*), intent(out), optional :: err_msg !< An optional returned error message. +!> Returns a time_type version of a real time in seconds, using an alternate implementation to the +!! FMS function real_to_time_type that is accurate over a larger range of input values. With 32 bit +!! signed integers, this version should work over the entire valid range (2^31 days or ~5.8835 +!! million years) of time_types, whereas the standard version in the FMS time_manager stops working +!! for conversions of times greater than 2^31 seconds, or ~68.1 years. +type(time_type) function real_to_time(x, err_msg) +! type(time_type) :: real_to_time !< The output time as a time_type + real, intent(in) :: x !< The input time in real seconds. + character(len=*), optional, intent(out) :: err_msg !< An optional returned error message. ! Local variables integer :: seconds, days, ticks @@ -60,5 +51,4 @@ function real_to_time(x, err_msg) real_to_time = set_time(seconds=seconds, days=days, ticks=ticks, err_msg=err_msg) end function real_to_time - end module MOM_time_manager diff --git a/config_src/infra/FMS2/MOM_coms_infra.F90 b/config_src/infra/FMS2/MOM_coms_infra.F90 new file mode 100644 index 0000000000..555b4df119 --- /dev/null +++ b/config_src/infra/FMS2/MOM_coms_infra.F90 @@ -0,0 +1,455 @@ +!> Thin interfaces to non-domain-oriented mpp communication subroutines +module MOM_coms_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use iso_fortran_env, only : int32, int64 + +use mpp_mod, only : mpp_pe, mpp_root_pe, mpp_npes, mpp_set_root_pe +use mpp_mod, only : mpp_set_current_pelist, mpp_get_current_pelist +use mpp_mod, only : mpp_broadcast, mpp_sync, mpp_sync_self, mpp_chksum +use mpp_mod, only : mpp_sum, mpp_max, mpp_min +use memutils_mod, only : print_memuse_stats +use fms_mod, only : fms_end, fms_init + +implicit none ; private + +public :: PE_here, root_PE, num_PEs, set_rootPE, Set_PElist, Get_PElist +public :: broadcast, sum_across_PEs, min_across_PEs, max_across_PEs +public :: field_chksum, MOM_infra_init, MOM_infra_end + +! This module provides interfaces to the non-domain-oriented communication +! subroutines. + +!> Communicate an array, string or scalar from one PE to others +interface broadcast + module procedure broadcast_char, broadcast_int32_0D, broadcast_int64_0D, broadcast_int1D + module procedure broadcast_real0D, broadcast_real1D, broadcast_real2D +end interface broadcast + +!> Compute a checksum for a field distributed over a PE list. If no PE list is +!! provided, then the current active PE list is used. +interface field_chksum + module procedure field_chksum_real_0d + module procedure field_chksum_real_1d + module procedure field_chksum_real_2d + module procedure field_chksum_real_3d + module procedure field_chksum_real_4d +end interface field_chksum + +!> Find the sum of field across PEs, and update PEs with the sums. +interface sum_across_PEs + module procedure sum_across_PEs_int4_0d + module procedure sum_across_PEs_int4_1d + module procedure sum_across_PEs_int8_0d + module procedure sum_across_PEs_int8_1d + module procedure sum_across_PEs_int8_2d + module procedure sum_across_PEs_real_0d + module procedure sum_across_PEs_real_1d + module procedure sum_across_PEs_real_2d +end interface sum_across_PEs + +!> Find the maximum value of field across PEs, and update PEs with the values. +interface max_across_PEs + module procedure max_across_PEs_int_0d + module procedure max_across_PEs_real_0d + module procedure max_across_PEs_real_1d +end interface max_across_PEs + +!> Find the minimum value of field across PEs, and update PEs with the values. +interface min_across_PEs + module procedure min_across_PEs_int_0d + module procedure min_across_PEs_real_0d + module procedure min_across_PEs_real_1d +end interface min_across_PEs + +contains + +!> Return the ID of the PE for the current process. +function PE_here() result(pe) + integer :: pe !< PE ID of the current process + pe = mpp_pe() +end function PE_here + +!> Return the ID of the root PE for the PE list of the current procss. +function root_PE() result(pe) + integer :: pe !< root PE ID + pe = mpp_root_pe() +end function root_PE + +!> Return the number of PEs for the current PE list. +function num_PEs() result(npes) + integer :: npes !< Number of PEs + npes = mpp_npes() +end function num_PEs + +!> Designate a PE as the root PE +subroutine set_rootPE(pe) + integer, intent(in) :: pe !< ID of the PE to be assigned as root + call mpp_set_root_pe(pe) +end subroutine + +!> Set the current PE list. If no list is provided, then the current PE list +!! is set to the list of all available PEs on the communicator. Setting the +!! list will trigger a rank synchronization unless the `no_sync` flag is set. +subroutine Set_PEList(pelist, no_sync) + integer, optional, intent(in) :: pelist(:) !< List of PEs to set for communication + logical, optional, intent(in) :: no_sync !< Do not sync after list update. + call mpp_set_current_pelist(pelist, no_sync) +end subroutine Set_PEList + +!> Retrieve the current PE list and any metadata if requested. +subroutine Get_PEList(pelist, name, commID) + integer, intent(out) :: pelist(:) !< List of PE IDs of the current PE list + character(len=*), optional, intent(out) :: name !< Name of PE list + integer, optional, intent(out) :: commID !< Communicator ID of PE list + + call mpp_get_current_pelist(pelist, name, commiD) +end subroutine Get_PEList + +!> Communicate a 1-D array of character strings from one PE to others +subroutine broadcast_char(dat, length, from_PE, PElist, blocking) + character(len=*), intent(inout) :: dat(:) !< The data to communicate and destination + integer, intent(in) :: length !< The length of each string + integer, optional, intent(in) :: from_PE !< The source PE, by default the root PE + integer, optional, intent(in) :: PElist(:) !< The list of participating PEs, by default the + !! active PE set as previously set via Set_PElist. + logical, optional, intent(in) :: blocking !< If true, barriers are added around the call + + integer :: src_PE ! The processor that is sending the data + logical :: do_block ! If true add synchronizing barriers + + do_block = .false. ; if (present(blocking)) do_block = blocking + if (present(from_PE)) then ; src_PE = from_PE ; else ; src_PE = root_PE() ; endif + + if (do_block) call mpp_sync(PElist) + call mpp_broadcast(dat, length, src_PE, PElist) + if (do_block) call mpp_sync_self(PElist) + +end subroutine broadcast_char + +!> Communicate an integer from one PE to others +subroutine broadcast_int64_0D(dat, from_PE, PElist, blocking) + integer(kind=int64), intent(inout) :: dat !< The data to communicate and destination + integer, optional, intent(in) :: from_PE !< The source PE, by default the root PE + integer, optional, intent(in) :: PElist(:) !< The list of participating PEs, by default the + !! active PE set as previously set via Set_PElist. + logical, optional, intent(in) :: blocking !< If true, barriers are added around the call + + integer :: src_PE ! The processor that is sending the data + logical :: do_block ! If true add synchronizing barriers + + do_block = .false. ; if (present(blocking)) do_block = blocking + if (present(from_PE)) then ; src_PE = from_PE ; else ; src_PE = root_PE() ; endif + + if (do_block) call mpp_sync(PElist) + call mpp_broadcast(dat, src_PE, PElist) + if (do_block) call mpp_sync_self(PElist) + +end subroutine broadcast_int64_0D + + +!> Communicate an integer from one PE to others +subroutine broadcast_int32_0D(dat, from_PE, PElist, blocking) + integer(kind=int32), intent(inout) :: dat !< The data to communicate and destination + integer, optional, intent(in) :: from_PE !< The source PE, by default the root PE + integer, optional, intent(in) :: PElist(:) !< The list of participating PEs, by default the + !! active PE set as previously set via Set_PElist. + logical, optional, intent(in) :: blocking !< If true, barriers are added around the call + + integer :: src_PE ! The processor that is sending the data + logical :: do_block ! If true add synchronizing barriers + + do_block = .false. ; if (present(blocking)) do_block = blocking + if (present(from_PE)) then ; src_PE = from_PE ; else ; src_PE = root_PE() ; endif + + if (do_block) call mpp_sync(PElist) + call mpp_broadcast(dat, src_PE, PElist) + if (do_block) call mpp_sync_self(PElist) + +end subroutine broadcast_int32_0D + +!> Communicate a 1-D array of integers from one PE to others +subroutine broadcast_int1D(dat, length, from_PE, PElist, blocking) + integer, dimension(:), intent(inout) :: dat !< The data to communicate and destination + integer, intent(in) :: length !< The number of data elements + integer, optional, intent(in) :: from_PE !< The source PE, by default the root PE + integer, optional, intent(in) :: PElist(:) !< The list of participating PEs, by default the + !! active PE set as previously set via Set_PElist. + logical, optional, intent(in) :: blocking !< If true, barriers are added around the call + + integer :: src_PE ! The processor that is sending the data + logical :: do_block ! If true add synchronizing barriers + + do_block = .false. ; if (present(blocking)) do_block = blocking + if (present(from_PE)) then ; src_PE = from_PE ; else ; src_PE = root_PE() ; endif + + if (do_block) call mpp_sync(PElist) + call mpp_broadcast(dat, length, src_PE, PElist) + if (do_block) call mpp_sync_self(PElist) + +end subroutine broadcast_int1D + +!> Communicate a real number from one PE to others +subroutine broadcast_real0D(dat, from_PE, PElist, blocking) + real, intent(inout) :: dat !< The data to communicate and destination + integer, optional, intent(in) :: from_PE !< The source PE, by default the root PE + integer, optional, intent(in) :: PElist(:) !< The list of participating PEs, by default the + !! active PE set as previously set via Set_PElist. + logical, optional, intent(in) :: blocking !< If true, barriers are added around the call + + integer :: src_PE ! The processor that is sending the data + logical :: do_block ! If true add synchronizing barriers + + do_block = .false. ; if (present(blocking)) do_block = blocking + if (present(from_PE)) then ; src_PE = from_PE ; else ; src_PE = root_PE() ; endif + + if (do_block) call mpp_sync(PElist) + call mpp_broadcast(dat, src_PE, PElist) + if (do_block) call mpp_sync_self(PElist) + +end subroutine broadcast_real0D + +!> Communicate a 1-D array of reals from one PE to others +subroutine broadcast_real1D(dat, length, from_PE, PElist, blocking) + real, dimension(:), intent(inout) :: dat !< The data to communicate and destination + integer, intent(in) :: length !< The number of data elements + integer, optional, intent(in) :: from_PE !< The source PE, by default the root PE + integer, optional, intent(in) :: PElist(:) !< The list of participating PEs, by default the + !! active PE set as previously set via Set_PElist. + logical, optional, intent(in) :: blocking !< If true, barriers are added around the call + + integer :: src_PE ! The processor that is sending the data + logical :: do_block ! If true add synchronizing barriers + + do_block = .false. ; if (present(blocking)) do_block = blocking + if (present(from_PE)) then ; src_PE = from_PE ; else ; src_PE = root_PE() ; endif + + if (do_block) call mpp_sync(PElist) + call mpp_broadcast(dat, length, src_PE, PElist) + if (do_block) call mpp_sync_self(PElist) + +end subroutine broadcast_real1D + +!> Communicate a 2-D array of reals from one PE to others +subroutine broadcast_real2D(dat, length, from_PE, PElist, blocking) + real, dimension(:,:), intent(inout) :: dat !< The data to communicate and destination + integer, intent(in) :: length !< The total number of data elements + integer, optional, intent(in) :: from_PE !< The source PE, by default the root PE + integer, optional, intent(in) :: PElist(:) !< The list of participating PEs, by default the + !! active PE set as previously set via Set_PElist. + logical, optional, intent(in) :: blocking !< If true, barriers are added around the call + + integer :: src_PE ! The processor that is sending the data + logical :: do_block ! If true add synchronizing barriers + + do_block = .false. ; if (present(blocking)) do_block = blocking + if (present(from_PE)) then ; src_PE = from_PE ; else ; src_PE = root_PE() ; endif + + if (do_block) call mpp_sync(PElist) + call mpp_broadcast(dat, length, src_PE, PElist) + if (do_block) call mpp_sync_self(PElist) + +end subroutine broadcast_real2D + +! field_chksum wrappers + +!> Compute a checksum for a field distributed over a PE list. If no PE list is +!! provided, then the current active PE list is used. +function field_chksum_real_0d(field, pelist, mask_val) result(chksum) + real, intent(in) :: field !< Input scalar + integer, optional, intent(in) :: pelist(:) !< PE list of ranks to checksum + real, optional, intent(in) :: mask_val !< FMS mask value + integer(kind=int64) :: chksum !< checksum of array + + chksum = mpp_chksum(field, pelist, mask_val) +end function field_chksum_real_0d + +!> Compute a checksum for a field distributed over a PE list. If no PE list is +!! provided, then the current active PE list is used. +function field_chksum_real_1d(field, pelist, mask_val) result(chksum) + real, dimension(:), intent(in) :: field !< Input array + integer, optional, intent(in) :: pelist(:) !< PE list of ranks to checksum + real, optional, intent(in) :: mask_val !< FMS mask value + integer(kind=int64) :: chksum !< checksum of array + + chksum = mpp_chksum(field, pelist, mask_val) +end function field_chksum_real_1d + +!> Compute a checksum for a field distributed over a PE list. If no PE list is +!! provided, then the current active PE list is used. +function field_chksum_real_2d(field, pelist, mask_val) result(chksum) + real, dimension(:,:), intent(in) :: field !< Unrotated input field + integer, optional, intent(in) :: pelist(:) !< PE list of ranks to checksum + real, optional, intent(in) :: mask_val !< FMS mask value + integer(kind=int64) :: chksum !< checksum of array + + chksum = mpp_chksum(field, pelist, mask_val) +end function field_chksum_real_2d + +!> Compute a checksum for a field distributed over a PE list. If no PE list is +!! provided, then the current active PE list is used. +function field_chksum_real_3d(field, pelist, mask_val) result(chksum) + real, dimension(:,:,:), intent(in) :: field !< Unrotated input field + integer, optional, intent(in) :: pelist(:) !< PE list of ranks to checksum + real, optional, intent(in) :: mask_val !< FMS mask value + integer(kind=int64) :: chksum !< checksum of array + + chksum = mpp_chksum(field, pelist, mask_val) +end function field_chksum_real_3d + +!> Compute a checksum for a field distributed over a PE list. If no PE list is +!! provided, then the current active PE list is used. +function field_chksum_real_4d(field, pelist, mask_val) result(chksum) + real, dimension(:,:,:,:), intent(in) :: field !< Unrotated input field + integer, optional, intent(in) :: pelist(:) !< PE list of ranks to checksum + real, optional, intent(in) :: mask_val !< FMS mask value + integer(kind=int64) :: chksum !< checksum of array + + chksum = mpp_chksum(field, pelist, mask_val) +end function field_chksum_real_4d + +! sum_across_PEs wrappers + +!> Find the sum of field across PEs, and return this sum in field. +subroutine sum_across_PEs_int4_0d(field, pelist) + integer(kind=int32), intent(inout) :: field !< Value on this PE, and the sum across PEs upon return + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_sum(field, pelist) +end subroutine sum_across_PEs_int4_0d + +!> Find the sum of the values in corresponding positions of field across PEs, and return these sums in field. +subroutine sum_across_PEs_int4_1d(field, length, pelist) + integer(kind=int32), dimension(:), intent(inout) :: field !< The values to add, the sums upon return + integer, intent(in) :: length !< Number of elements in field to add + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_sum(field, length, pelist) +end subroutine sum_across_PEs_int4_1d + +!> Find the sum of field across PEs, and return this sum in field. +subroutine sum_across_PEs_int8_0d(field, pelist) + integer(kind=int64), intent(inout) :: field !< Value on this PE, and the sum across PEs upon return + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_sum(field, pelist) +end subroutine sum_across_PEs_int8_0d + +!> Find the sum of the values in corresponding positions of field across PEs, and return these sums in field. +subroutine sum_across_PEs_int8_1d(field, length, pelist) + integer(kind=int64), dimension(:), intent(inout) :: field !< The values to add, the sums upon return + integer, intent(in) :: length !< Number of elements in field to add + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_sum(field, length, pelist) +end subroutine sum_across_PEs_int8_1d + +!> Find the sum of the values in corresponding positions of field across PEs, and return these sums in field. +subroutine sum_across_PEs_int8_2d(field, length, pelist) + integer(kind=int64), & + dimension(:,:), intent(inout) :: field !< The values to add, the sums upon return + integer, intent(in) :: length !< The total number of positions to sum, usually + !! the product of the array sizes. + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_sum(field, length, pelist) +end subroutine sum_across_PEs_int8_2d + +!> Find the sum of field across PEs, and return this sum in field. +subroutine sum_across_PEs_real_0d(field, pelist) + real, intent(inout) :: field !< Value on this PE, and the sum across PEs upon return + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_sum(field, pelist) +end subroutine sum_across_PEs_real_0d + +!> Find the sum of the values in corresponding positions of field across PEs, and return these sums in field. +subroutine sum_across_PEs_real_1d(field, length, pelist) + real, dimension(:), intent(inout) :: field !< The values to add, the sums upon return + integer, intent(in) :: length !< Number of elements in field to add + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_sum(field, length, pelist) +end subroutine sum_across_PEs_real_1d + +!> Find the sum of the values in corresponding positions of field across PEs, and return these sums in field. +subroutine sum_across_PEs_real_2d(field, length, pelist) + real, dimension(:,:), intent(inout) :: field !< The values to add, the sums upon return + integer, intent(in) :: length !< The total number of positions to sum, usually + !! the product of the array sizes. + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_sum(field, length, pelist) +end subroutine sum_across_PEs_real_2d + +! max_across_PEs wrappers + +!> Find the maximum value of field across PEs, and store this maximum in field. +subroutine max_across_PEs_int_0d(field, pelist) + integer, intent(inout) :: field !< The values to compare, the maximum upon return + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_max(field, pelist) +end subroutine max_across_PEs_int_0d + +!> Find the maximum value of field across PEs, and store this maximum in field. +subroutine max_across_PEs_real_0d(field, pelist) + real, intent(inout) :: field !< The values to compare, the maximum upon return + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_max(field, pelist) +end subroutine max_across_PEs_real_0d + +!> Find the maximum values in each position of field across PEs, and store these minima in field. +subroutine max_across_PEs_real_1d(field, length, pelist) + real, dimension(:), intent(inout) :: field !< The list of values being compared, with the + !! maxima in each position upon return + integer, intent(in) :: length !< Number of elements in field to compare + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_max(field, length, pelist) +end subroutine max_across_PEs_real_1d + +! min_across_PEs wrappers + +!> Find the minimum value of field across PEs, and store this minimum in field. +subroutine min_across_PEs_int_0d(field, pelist) + integer, intent(inout) :: field !< The values to compare, the minimum upon return + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_min(field, pelist) +end subroutine min_across_PEs_int_0d + +!> Find the minimum value of field across PEs, and store this minimum in field. +subroutine min_across_PEs_real_0d(field, pelist) + real, intent(inout) :: field !< The values to compare, the minimum upon return + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + call mpp_min(field, pelist) +end subroutine min_across_PEs_real_0d + +!> Find the minimum values in each position of field across PEs, and store these minima in field. +subroutine min_across_PEs_real_1d(field, length, pelist) + real, dimension(:), intent(inout) :: field !< The list of values being compared, with the + !! minima in each position upon return + integer, intent(in) :: length !< Number of elements in field to compare + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_min(field, length, pelist) +end subroutine min_across_PEs_real_1d + +!> Initialize the model framework, including PE communication over a designated communicator. +!! If no communicator ID is provided, the framework's default communicator is used. +subroutine MOM_infra_init(localcomm) + integer, optional, intent(in) :: localcomm !< Communicator ID to initialize + call fms_init(localcomm) +end subroutine + +!> This subroutine carries out all of the calls required to close out the infrastructure cleanly. +!! This should only be called in ocean-only runs, as the coupler takes care of this in coupled runs. +subroutine MOM_infra_end + call print_memuse_stats( 'Memory HiWaterMark', always=.TRUE. ) + call fms_end() +end subroutine MOM_infra_end + +end module MOM_coms_infra diff --git a/config_src/infra/FMS2/MOM_constants.F90 b/config_src/infra/FMS2/MOM_constants.F90 new file mode 100644 index 0000000000..2db177e08c --- /dev/null +++ b/config_src/infra/FMS2/MOM_constants.F90 @@ -0,0 +1,14 @@ +!> Provides a few physical constants +module MOM_constants + +! This file is part of MOM6. See LICENSE.md for the license. + +use constants_mod, only : HLV, HLF + +implicit none ; private + +!> The constant offset for converting temperatures in Kelvin to Celsius +real, public, parameter :: CELSIUS_KELVIN_OFFSET = 273.15 +public :: HLV, HLF + +end module MOM_constants diff --git a/config_src/infra/FMS2/MOM_couplertype_infra.F90 b/config_src/infra/FMS2/MOM_couplertype_infra.F90 new file mode 100644 index 0000000000..3bcccc1dc7 --- /dev/null +++ b/config_src/infra/FMS2/MOM_couplertype_infra.F90 @@ -0,0 +1,503 @@ +!> This module wraps the FMS coupler types module +module MOM_couplertype_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use coupler_types_mod, only : coupler_type_spawn, coupler_type_initialized, coupler_type_destructor +use coupler_types_mod, only : coupler_type_set_diags, coupler_type_send_data, coupler_type_copy_data +use coupler_types_mod, only : coupler_type_write_chksums, coupler_type_redistribute_data +use coupler_types_mod, only : coupler_type_increment_data, coupler_type_rescale_data +use coupler_types_mod, only : coupler_type_extract_data, coupler_type_set_data +use coupler_types_mod, only : coupler_type_data_override +use coupler_types_mod, only : ind_flux, ind_alpha, ind_csurf +use coupler_types_mod, only : coupler_1d_bc_type, coupler_2d_bc_type, coupler_3d_bc_type +use atmos_ocean_fluxes_mod, only : aof_set_coupler_flux +use MOM_domain_infra, only : domain2D +use MOM_time_manager, only : time_type + +implicit none ; private + +public :: CT_spawn, CT_initialized, CT_destructor +public :: CT_set_diags, CT_send_data, CT_data_override, CT_write_chksums +public :: CT_set_data, CT_increment_data, CT_rescale_data +public :: CT_copy_data, CT_extract_data, CT_redistribute_data +public :: atmos_ocn_coupler_flux +public :: ind_flux, ind_alpha, ind_csurf +public :: coupler_1d_bc_type, coupler_2d_bc_type, coupler_3d_bc_type + +!> This is the interface to spawn one coupler_bc_type into another. +interface CT_spawn + module procedure CT_spawn_1d_2d, CT_spawn_1d_3d, CT_spawn_2d_2d, CT_spawn_2d_3d + module procedure CT_spawn_3d_2d, CT_spawn_3d_3d +end interface CT_spawn + +!> This function interface indicates whether a coupler_bc_type has been initialized. +interface CT_initialized + module procedure CT_initialized_1d, CT_initialized_2d, CT_initialized_3d +end interface CT_initialized + +!> This is the interface to deallocate any data associated with a coupler_bc_type. +interface CT_destructor + module procedure CT_destructor_1d, CT_destructor_2d +end interface CT_destructor + +!> Copy all elements of the data in either a coupler_2d_bc_type or a coupler_3d_bc_type into +!! another structure of the same or the other type. Both must have the same array sizes in common +!! dimensions, while the details of any expansion from 2d to 3d are controlled by arguments. +interface CT_copy_data + module procedure CT_copy_data_2d, CT_copy_data_3d, CT_copy_data_2d_3d +end interface CT_copy_data + +!> Increment data in all elements of one coupler_2d_bc_type or coupler_3d_bc_type with +!! the data from another. Both must have the same array sizes and rank. +interface CT_increment_data + module procedure CT_increment_data_2d, CT_increment_data_3d, CT_increment_data_2d_3d +end interface CT_increment_data + +!> Rescales the fields in the elements of a coupler_2d_bc_type by multiplying by a factor scale. +!! If scale is 0, this is a direct assignment to 0, so that NaNs will not persist. +interface CT_rescale_data + module procedure CT_rescale_data_2d, CT_rescale_data_3d +end interface CT_rescale_data + +!> Redistribute the data in all elements of one coupler_2d_bc_type or coupler_3d_bc_type into +!! another, which may be on different processors with a different decomposition. +interface CT_redistribute_data + module procedure CT_redistribute_data_2d, CT_redistribute_data_3d +end interface CT_redistribute_data + +!> Write out checksums for the elements of a coupler_2d_bc_type or coupler_3d_bc_type +interface CT_write_chksums + module procedure CT_write_chksums_2d, CT_write_chksums_3d +end interface CT_write_chksums + +contains + +!> This subroutine sets many of the parameters for calculating an atmosphere-ocean tracer flux +!! and retuns an integer index for that flux. +function atmos_ocn_coupler_flux(name, flux_type, implementation, param, mol_wt, & + ice_restart_file, ocean_restart_file, units, caller, verbosity) & + result (coupler_index) + + character(len=*), intent(in) :: name !< A name to use for the flux + character(len=*), intent(in) :: flux_type !< A string describing the type of this flux, + !! perhaps 'air_sea_gas_flux'. + character(len=*), intent(in) :: implementation !< A name describing the specific + !! implementation of this flux, such as 'ocmip2'. + real, dimension(:), optional, intent(in) :: param !< An array of parameters used for the fluxes + real, optional, intent(in) :: mol_wt !< The molecular weight of this tracer + character(len=*), optional, intent(in) :: ice_restart_file !< A sea-ice restart file to use with this flux. + character(len=*), optional, intent(in) :: ocean_restart_file !< An ocean restart file to use with this flux. + character(len=*), optional, intent(in) :: units !< The units of the flux + character(len=*), optional, intent(in) :: caller !< The name of the calling routine + integer, optional, intent(in) :: verbosity !< A 0-9 integer indicating a level of verbosity. + integer :: coupler_index !< The resulting integer handle to use for this flux in subsequent calls. + + coupler_index = aof_set_coupler_flux(name, flux_type, implementation, & + param=param, mol_wt=mol_wt, ice_restart_file=ice_restart_file, & + ocean_restart_file=ocean_restart_file, & + units=units, caller=caller, verbosity=verbosity) + +end function atmos_ocn_coupler_flux + +!> Generate a 2-D coupler type using a 1-D coupler type as a template. +subroutine CT_spawn_1d_2d(var_in, var, idim, jdim, suffix, as_needed) + type(coupler_1d_bc_type), intent(in) :: var_in !< structure from which to copy information + type(coupler_2d_bc_type), intent(inout) :: var !< structure into which to copy information + integer, dimension(4), intent(in) :: idim !< The data and computational domain extents of + !! the first dimension in a non-decreasing list + integer, dimension(4), intent(in) :: jdim !< The data and computational domain extents of + !! the second dimension in a non-decreasing list + character(len=*), optional, intent(in) :: suffix !< optional suffix to make the name identifier unique + logical, optional, intent(in) :: as_needed !< Only do the spawn if the target type (var) + !! is not set and the parent type (var_in) is set. + + call coupler_type_spawn(var_in, var, idim, jdim, suffix=suffix, as_needed=as_needed) + +end subroutine CT_spawn_1d_2d + +!> Generate a 3-D coupler type using a 1-D coupler type as a template. +subroutine CT_spawn_1d_3d(var_in, var, idim, jdim, kdim, suffix, as_needed) + type(coupler_1d_bc_type), intent(in) :: var_in !< structure from which to copy information + type(coupler_3d_bc_type), intent(inout) :: var !< structure into which to copy information + integer, dimension(4), intent(in) :: idim !< The data and computational domain extents of + !! the first dimension in a non-decreasing list + integer, dimension(4), intent(in) :: jdim !< The data and computational domain extents of + !! the second dimension in a non-decreasing list + integer, dimension(2), intent(in) :: kdim !< The array extents of the third dimension in + !! a non-decreasing list + character(len=*), optional, intent(in) :: suffix !< optional suffix to make the name identifier unique + logical, optional, intent(in) :: as_needed !< Only do the spawn if the target type (var) + !! is not set and the parent type (var_in) is set. + + call coupler_type_spawn(var_in, var, idim, jdim, kdim, suffix=suffix, as_needed=as_needed) + +end subroutine CT_spawn_1d_3d + +!> Generate one 2-D coupler type using another 2-D coupler type as a template. +subroutine CT_spawn_2d_2d(var_in, var, idim, jdim, suffix, as_needed) + type(coupler_2d_bc_type), intent(in) :: var_in !< structure from which to copy information + type(coupler_2d_bc_type), intent(inout) :: var !< structure into which to copy information + integer, dimension(4), intent(in) :: idim !< The data and computational domain extents of + !! the first dimension in a non-decreasing list + integer, dimension(4), intent(in) :: jdim !< The data and computational domain extents of + !! the second dimension in a non-decreasing list + character(len=*), optional, intent(in) :: suffix !< optional suffix to make the name identifier unique + logical, optional, intent(in) :: as_needed !< Only do the spawn if the target type (var) + !! is not set and the parent type (var_in) is set. + + call coupler_type_spawn(var_in, var, idim, jdim, suffix=suffix, as_needed=as_needed) + +end subroutine CT_spawn_2d_2d + +!> Generate a 3-D coupler type using a 2-D coupler type as a template. +subroutine CT_spawn_2d_3d(var_in, var, idim, jdim, kdim, suffix, as_needed) + type(coupler_2d_bc_type), intent(in) :: var_in !< structure from which to copy information + type(coupler_3d_bc_type), intent(inout) :: var !< structure into which to copy information + integer, dimension(4), intent(in) :: idim !< The data and computational domain extents of + !! the first dimension in a non-decreasing list + integer, dimension(4), intent(in) :: jdim !< The data and computational domain extents of + !! the second dimension in a non-decreasing list + integer, dimension(2), intent(in) :: kdim !< The array extents of the third dimension in + !! a non-decreasing list + character(len=*), optional, intent(in) :: suffix !< optional suffix to make the name identifier unique + logical, optional, intent(in) :: as_needed !< Only do the spawn if the target type (var) + !! is not set and the parent type (var_in) is set. + + call coupler_type_spawn(var_in, var, idim, jdim, kdim, suffix=suffix, as_needed=as_needed) + +end subroutine CT_spawn_2d_3d + +!> Generate a 2-D coupler type using a 3-D coupler type as a template. +subroutine CT_spawn_3d_2d(var_in, var, idim, jdim, suffix, as_needed) + type(coupler_3d_bc_type), intent(in) :: var_in !< structure from which to copy information + type(coupler_2d_bc_type), intent(inout) :: var !< structure into which to copy information + integer, dimension(4), intent(in) :: idim !< The data and computational domain extents of + !! the first dimension in a non-decreasing list + integer, dimension(4), intent(in) :: jdim !< The data and computational domain extents of + !! the second dimension in a non-decreasing list + character(len=*), optional, intent(in) :: suffix !< optional suffix to make the name identifier unique + logical, optional, intent(in) :: as_needed !< Only do the spawn if the target type (var) + !! is not set and the parent type (var_in) is set. + + call coupler_type_spawn(var_in, var, idim, jdim, suffix=suffix, as_needed=as_needed) + +end subroutine CT_spawn_3d_2d + +!> Generate a 3-D coupler type using another 3-D coupler type as a template. +subroutine CT_spawn_3d_3d(var_in, var, idim, jdim, kdim, suffix, as_needed) + type(coupler_3d_bc_type), intent(in) :: var_in !< structure from which to copy information + type(coupler_3d_bc_type), intent(inout) :: var !< structure into which to copy information + integer, dimension(4), intent(in) :: idim !< The data and computational domain extents of + !! the first dimension in a non-decreasing list + integer, dimension(4), intent(in) :: jdim !< The data and computational domain extents of + !! the second dimension in a non-decreasing list + integer, dimension(2), intent(in) :: kdim !< The array extents of the third dimension in + !! a non-decreasing list + character(len=*), optional, intent(in) :: suffix !< optional suffix to make the name identifier unique + logical, optional, intent(in) :: as_needed !< Only do the spawn if the target type (var) + !! is not set and the parent type (var_in) is set. + + call coupler_type_spawn(var_in, var, idim, jdim, kdim, suffix=suffix, as_needed=as_needed) + +end subroutine CT_spawn_3d_3d + +!> Copy all elements of the data in a coupler_2d_bc_type into another. Both must have the same array sizes. +subroutine CT_copy_data_2d(var_in, var, halo_size, bc_index, field_index, & + exclude_flux_type, only_flux_type, pass_through_ice) + type(coupler_2d_bc_type), intent(in) :: var_in !< BC_type structure with the data to copy + type(coupler_2d_bc_type), intent(inout) :: var !< The recipient BC_type structure + integer, optional, intent(in) :: halo_size !< The extent of the halo to copy; 0 by default + integer, optional, intent(in) :: bc_index !< The index of the boundary condition + !! that is being copied + integer, optional, intent(in) :: field_index !< The index of the field in the + !! boundary condition that is being copied + character(len=*), optional, intent(in) :: exclude_flux_type !< A string describing which types of fluxes + !! to exclude from this copy. + character(len=*), optional, intent(in) :: only_flux_type !< A string describing which types of fluxes + !! to include from this copy. + logical, optional, intent(in) :: pass_through_ice !< If true, only copy BCs whose + !! value of pass_through ice matches this + + call coupler_type_copy_data(var_in, var, halo_size, bc_index, field_index, & + exclude_flux_type, only_flux_type, pass_through_ice) +end subroutine CT_copy_data_2d + +!> Copy all elements of the data in a coupler_3d_bc_type into another. Both must have the same array sizes. +subroutine CT_copy_data_3d(var_in, var, halo_size, bc_index, field_index, & + exclude_flux_type, only_flux_type, pass_through_ice) + type(coupler_3d_bc_type), intent(in) :: var_in !< BC_type structure with the data to copy + type(coupler_3d_bc_type), intent(inout) :: var !< The recipient BC_type structure + integer, optional, intent(in) :: halo_size !< The extent of the halo to copy; 0 by default + integer, optional, intent(in) :: bc_index !< The index of the boundary condition + !! that is being copied + integer, optional, intent(in) :: field_index !< The index of the field in the + !! boundary condition that is being copied + character(len=*), optional, intent(in) :: exclude_flux_type !< A string describing which types of fluxes + !! to exclude from this copy. + character(len=*), optional, intent(in) :: only_flux_type !< A string describing which types of fluxes + !! to include from this copy. + logical, optional, intent(in) :: pass_through_ice !< If true, only copy BCs whose + !! value of pass_through ice matches this + + call coupler_type_copy_data(var_in, var, halo_size, bc_index, field_index, & + exclude_flux_type, only_flux_type, pass_through_ice) +end subroutine CT_copy_data_3d + +!> Copy all elements of the data in a coupler_2d_bc_type into a coupler_3d_bc_type. +!! Both must have the same array sizes for the first two dimensions, while the extent +!! of the 3rd dimension that is being filled may be specified via optional arguments. +subroutine CT_copy_data_2d_3d(var_in, var, halo_size, bc_index, field_index, & + exclude_flux_type, only_flux_type, pass_through_ice, ind3_start, ind3_end) + type(coupler_2d_bc_type), intent(in) :: var_in !< BC_type structure with the data to copy + type(coupler_3d_bc_type), intent(inout) :: var !< The recipient BC_type structure + integer, optional, intent(in) :: halo_size !< The extent of the halo to copy; 0 by default + integer, optional, intent(in) :: bc_index !< The index of the boundary condition + !! that is being copied + integer, optional, intent(in) :: field_index !< The index of the field in the + !! boundary condition that is being copied + character(len=*), optional, intent(in) :: exclude_flux_type !< A string describing which types of fluxes + !! to exclude from this copy. + character(len=*), optional, intent(in) :: only_flux_type !< A string describing which types of fluxes + !! to include from this copy. + logical, optional, intent(in) :: pass_through_ice !< If true, only copy BCs whose + !! value of pass_through ice matches this + integer, optional, intent(in) :: ind3_start !< The starting value of the 3rd + !! index of the 3d type to fill in. + integer, optional, intent(in) :: ind3_end !< The ending value of the 3rd + !! index of the 3d type to fill in. + + call coupler_type_copy_data(var_in, var, halo_size, bc_index, field_index, & + exclude_flux_type, only_flux_type, pass_through_ice, ind3_start, ind3_end) +end subroutine CT_copy_data_2d_3d + +!> Increment data in all elements of one coupler_2d_bc_type with the data from another. Both +!! must have the same array sizes. +subroutine CT_increment_data_2d(var_in, var, halo_size, scale_factor, scale_prev) + type(coupler_2d_bc_type), intent(in) :: var_in !< coupler_type structure with the data to add to the other type + type(coupler_2d_bc_type), intent(inout) :: var !< The coupler_type structure whose fields are being incremented + integer, optional, intent(in) :: halo_size !< The extent of the halo to increment; 0 by default + real, optional, intent(in) :: scale_factor !< A scaling factor for the data that is being added + real, optional, intent(in) :: scale_prev !< A scaling factor for the data that is already here + + call coupler_type_increment_data(var_in, var, halo_size=halo_size, scale_factor=scale_factor, & + scale_prev=scale_prev) + +end subroutine CT_increment_data_2d + +!> Increment data in all elements of one coupler_3d_bc_type with the data from another. Both +!! must have the same array sizes. +subroutine CT_increment_data_3d(var_in, var, halo_size, scale_factor, scale_prev, exclude_flux_type, only_flux_type) + type(coupler_3d_bc_type), intent(in) :: var_in !< coupler_type structure with the data to add to the other type + type(coupler_3d_bc_type), intent(inout) :: var !< The coupler_type structure whose fields are being incremented + integer, optional, intent(in) :: halo_size !< The extent of the halo to increment; 0 by default + real, optional, intent(in) :: scale_factor !< A scaling factor for the data that is being added + real, optional, intent(in) :: scale_prev !< A scaling factor for the data that is already here + character(len=*), optional, intent(in) :: exclude_flux_type !< A string describing which types + !! of fluxes to exclude from this increment. + character(len=*), optional, intent(in) :: only_flux_type !< A string describing which types + !! of fluxes to include from this increment. + + call coupler_type_increment_data(var_in, var, halo_size=halo_size, scale_factor=scale_factor, & + scale_prev=scale_prev, exclude_flux_type=exclude_flux_type, & + only_flux_type=only_flux_type) + +end subroutine CT_increment_data_3d + +!> Rescales the fields in the elements of a coupler_2d_bc_type by multiplying by a factor scale. +!! If scale is 0, this is a direct assignment to 0, so that NaNs will not persist. +subroutine CT_rescale_data_2d(var, scale) + type(coupler_2d_bc_type), intent(inout) :: var !< The BC_type structure whose fields are being rescaled + real, intent(in) :: scale !< A scaling factor to multiply fields by + + call coupler_type_rescale_data(var, scale) + +end subroutine CT_rescale_data_2d + +!> Rescales the fields in the elements of a coupler_3d_bc_type by multiplying by a factor scale. +!! If scale is 0, this is a direct assignment to 0, so that NaNs will not persist. +subroutine CT_rescale_data_3d(var, scale) + type(coupler_3d_bc_type), intent(inout) :: var !< The BC_type structure whose fields are being rescaled + real, intent(in) :: scale !< A scaling factor to multiply fields by + + call coupler_type_rescale_data(var, scale) + +end subroutine CT_rescale_data_3d + +!> Increment data in the elements of a coupler_2d_bc_type with weighted averages of elements of a +!! coupler_3d_bc_type +subroutine CT_increment_data_2d_3d(var_in, weights, var, halo_size) + type(coupler_3d_bc_type), intent(in) :: var_in !< coupler_type structure with the data to add to the other type + real, dimension(:,:,:), intent(in) :: weights !< An array of normalized weights for the 3d-data to + !! increment the 2d-data. There is no renormalization, + !! so if the weights do not sum to 1 in the 3rd dimension + !! there may be adverse consequences! + type(coupler_2d_bc_type), intent(inout) :: var !< The coupler_type structure whose fields are being incremented + integer, optional, intent(in) :: halo_size !< The extent of the halo to increment; 0 by default + + call coupler_type_increment_data(var_in, weights, var, halo_size=halo_size) + +end subroutine CT_increment_data_2d_3d + + +!> Redistribute the data in all elements of one coupler_2d_bc_type into another, which may be on +!! different processors with a different decomposition. +subroutine CT_redistribute_data_2d(var_in, domain_in, var_out, domain_out, complete) + type(coupler_2d_bc_type), intent(in) :: var_in !< BC_type structure with the data to copy + type(domain2D), intent(in) :: domain_in !< The FMS domain for the input structure + type(coupler_2d_bc_type), intent(inout) :: var_out !< The recipient BC_type structure (data intent out) + type(domain2D), intent(in) :: domain_out !< The FMS domain for the output structure + logical, optional, intent(in) :: complete !< If true, complete the updates + + call coupler_type_redistribute_data(var_in, domain_in, var_out, domain_out, complete) +end subroutine CT_redistribute_data_2d + +!> Redistribute the data in all elements of one coupler_3d_bc_type into another, which may be on +!! different processors with a different decomposition. +subroutine CT_redistribute_data_3d(var_in, domain_in, var_out, domain_out, complete) + type(coupler_3d_bc_type), intent(in) :: var_in !< BC_type structure with the data to copy + type(domain2D), intent(in) :: domain_in !< The FMS domain for the input structure + type(coupler_3d_bc_type), intent(inout) :: var_out !< The recipient BC_type structure (data intent out) + type(domain2D), intent(in) :: domain_out !< The FMS domain for the output structure + logical, optional, intent(in) :: complete !< If true, complete the updates + + call coupler_type_redistribute_data(var_in, domain_in, var_out, domain_out, complete) +end subroutine CT_redistribute_data_3d + +!> Extract a 2d field from a coupler_2d_bc_type into a two-dimensional array. +subroutine CT_extract_data(var_in, bc_index, field_index, array_out, & + scale_factor, halo_size, idim, jdim) + type(coupler_2d_bc_type), intent(in) :: var_in !< BC_type structure with the data to extract + integer, intent(in) :: bc_index !< The index of the boundary condition + !! that is being copied + integer, intent(in) :: field_index !< The index of the field in the boundary + !! condition that is being copied, or the + !! surface flux by default. + real, dimension(1:,1:), intent(out) :: array_out !< The recipient array for the field; its size + !! must match the size of the data being copied + !! unless idim and jdim are supplied. + real, optional, intent(in) :: scale_factor !< A scaling factor for the data that is being added + integer, optional, intent(in) :: halo_size !< The extent of the halo to copy; 0 by default + integer, dimension(4), optional, intent(in) :: idim !< The data and computational domain extents of + !! the first dimension of the output array + !! in a non-decreasing list + integer, dimension(4), optional, intent(in) :: jdim !< The data and computational domain extents of + !! the second dimension of the output array + !! in a non-decreasing list + call coupler_type_extract_data(var_in, bc_index, field_index, array_out, scale_factor, halo_size, idim, jdim) + +end subroutine CT_extract_data + +!> Set single 2d field in coupler_2d_bc_type from a two-dimensional array. +subroutine CT_set_data(array_in, bc_index, field_index, var, & + scale_factor, halo_size, idim, jdim) + real, dimension(1:,1:), intent(in) :: array_in !< The source array for the field; its size + !! must match the size of the data being copied + !! unless idim and jdim are supplied. + integer, intent(in) :: bc_index !< The index of the boundary condition + !! that is being copied + integer, intent(in) :: field_index !< The index of the field in the + !! boundary condition that is being set. The + !! surface concentration is set by default. + type(coupler_2d_bc_type), intent(inout) :: var !< BC_type structure with the data to set + real, optional, intent(in) :: scale_factor !< A scaling factor for the data that is being added + integer, optional, intent(in) :: halo_size !< The extent of the halo to copy; 0 by default + integer, dimension(4), optional, intent(in) :: idim !< The data and computational domain extents of + !! the first dimension of the output array + !! in a non-decreasing list + integer, dimension(4), optional, intent(in) :: jdim !< The data and computational domain extents of + !! the second dimension of the output array + !! in a non-decreasing list + + integer :: subfield ! An integer indicating which field to set. + + call coupler_type_set_data(array_in, bc_index, field_index, var, scale_factor, halo_size, idim, jdim) + +end subroutine CT_set_data + +!> Potentially override the values in a coupler_2d_bc_type +subroutine CT_data_override(gridname, var, time) + character(len=3), intent(in) :: gridname !< 3-character long model grid ID + type(coupler_2d_bc_type), intent(inout) :: var !< BC_type structure to override + type(time_type), intent(in) :: time !< The current model time + + call coupler_type_data_override(gridname, var, time) +end subroutine CT_data_override + +!> Register the diagnostics of a coupler_2d_bc_type +subroutine CT_set_diags(var, diag_name, axes, time) + type(coupler_2d_bc_type), intent(inout) :: var !< BC_type structure for which to register diagnostics + character(len=*), intent(in) :: diag_name !< name for diagnostic file, or blank not to register the fields + integer, dimension(:), intent(in) :: axes !< array of axes identifiers for diagnostic variable registration + type(time_type), intent(in) :: time !< model time variable for registering diagnostic field + + call coupler_type_set_diags(var, diag_name, axes, time) + +end subroutine CT_set_diags + +!> Write out all diagnostics of elements of a coupler_2d_bc_type +subroutine CT_send_data(var, Time) + type(coupler_2d_bc_type), intent(in) :: var !< BC_type structure with the diagnostics to write + type(time_type), intent(in) :: time !< The current model time + + call coupler_type_send_data(var, Time) +end subroutine CT_send_data + +!> Write out checksums for the elements of a coupler_2d_bc_type +subroutine CT_write_chksums_2d(var, outunit, name_lead) + type(coupler_2d_bc_type), intent(in) :: var !< BC_type structure for which to register diagnostics + integer, intent(in) :: outunit !< The index of a open output file + character(len=*), optional, intent(in) :: name_lead !< An optional prefix for the variable names + + call coupler_type_write_chksums(var, outunit, name_lead) + +end subroutine CT_write_chksums_2d + +!> Write out checksums for the elements of a coupler_3d_bc_type +subroutine CT_write_chksums_3d(var, outunit, name_lead) + type(coupler_3d_bc_type), intent(in) :: var !< BC_type structure for which to register diagnostics + integer, intent(in) :: outunit !< The index of a open output file + character(len=*), optional, intent(in) :: name_lead !< An optional prefix for the variable names + + call coupler_type_write_chksums(var, outunit, name_lead) + +end subroutine CT_write_chksums_3d + +!> Indicate whether a coupler_1d_bc_type has been initialized. +logical function CT_initialized_1d(var) + type(coupler_1d_bc_type), intent(in) :: var !< BC_type structure to be deconstructed + + CT_initialized_1d = coupler_type_initialized(var) +end function CT_initialized_1d + +!> Indicate whether a coupler_2d_bc_type has been initialized. +logical function CT_initialized_2d(var) + type(coupler_2d_bc_type), intent(in) :: var !< BC_type structure to be deconstructed + + CT_initialized_2d = coupler_type_initialized(var) +end function CT_initialized_2d + +!> Indicate whether a coupler_3d_bc_type has been initialized. +logical function CT_initialized_3d(var) + type(coupler_3d_bc_type), intent(in) :: var !< BC_type structure to be deconstructed + + CT_initialized_3d = coupler_type_initialized(var) +end function CT_initialized_3d + +!> Deallocate all data associated with a coupler_1d_bc_type +subroutine CT_destructor_1d(var) + type(coupler_1d_bc_type), intent(inout) :: var !< BC_type structure to be deconstructed + + call coupler_type_destructor(var) + +end subroutine CT_destructor_1d + +!> Deallocate all data associated with a coupler_2d_bc_type +subroutine CT_destructor_2d(var) + type(coupler_2d_bc_type), intent(inout) :: var !< BC_type structure to be deconstructed + + call coupler_type_destructor(var) + +end subroutine CT_destructor_2d + +end module MOM_couplertype_infra diff --git a/config_src/infra/FMS2/MOM_cpu_clock_infra.F90 b/config_src/infra/FMS2/MOM_cpu_clock_infra.F90 new file mode 100644 index 0000000000..62c21e5772 --- /dev/null +++ b/config_src/infra/FMS2/MOM_cpu_clock_infra.F90 @@ -0,0 +1,94 @@ +!> Wraps the MPP cpu clock functions +!! +!! The functions and constants should be accessed via mom_cpu_clock +module MOM_cpu_clock_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +! These interfaces and constants from MPP/FMS will not be directly exposed outside of this module +use fms_mod, only : clock_flag_default +use mpp_mod, only : mpp_clock_begin +use mpp_mod, only : mpp_clock_end, mpp_clock_id +use mpp_mod, only : MPP_CLOCK_COMPONENT => CLOCK_COMPONENT +use mpp_mod, only : MPP_CLOCK_SUBCOMPONENT => CLOCK_SUBCOMPONENT +use mpp_mod, only : MPP_CLOCK_MODULE_DRIVER => CLOCK_MODULE_DRIVER +use mpp_mod, only : MPP_CLOCK_MODULE => CLOCK_MODULE +use mpp_mod, only : MPP_CLOCK_ROUTINE => CLOCK_ROUTINE +use mpp_mod, only : MPP_CLOCK_LOOP => CLOCK_LOOP +use mpp_mod, only : MPP_CLOCK_INFRA => CLOCK_INFRA + +implicit none ; private + +! Public entities +public :: cpu_clock_id, cpu_clock_begin, cpu_clock_end +public :: CLOCK_COMPONENT, CLOCK_SUBCOMPONENT, CLOCK_MODULE_DRIVER, CLOCK_MODULE +public :: CLOCK_ROUTINE, CLOCK_LOOP, CLOCK_INFRA + +!> A granularity value to passed to cpu_clock_id() to indicate the clock is for a +!! component, e.g. the entire MOM6 model +integer, parameter :: CLOCK_COMPONENT = MPP_CLOCK_COMPONENT + +!> A granularity value to passed to cpu_clock_id() to indicate the clock is for a +!! sub-component, e.g. dynamics or thermodynamics +integer, parameter :: CLOCK_SUBCOMPONENT = MPP_CLOCK_SUBCOMPONENT + +!> A granularity value to passed to cpu_clock_id() to indicate the clock is for a +!! module driver, e.g. a routine that calls multiple other routines +integer, parameter :: CLOCK_MODULE_DRIVER = MPP_CLOCK_MODULE_DRIVER + +!> A granularity value to passed to cpu_clock_id() to indicate the clock is for a +!! module, e.g. the main entry routine for a module +integer, parameter :: CLOCK_MODULE = MPP_CLOCK_MODULE + +!> A granularity value to passed to cpu_clock_id() to indicate the clock is for a +!! subroutine or function +integer, parameter :: CLOCK_ROUTINE = MPP_CLOCK_ROUTINE + +!> A granularity value to passed to cpu_clock_id() to indicate the clock is for a +!! section with in a routine, e.g. around a loop +integer, parameter :: CLOCK_LOOP = MPP_CLOCK_LOOP + +!> A granularity value to passed to cpu_clock_id() to indicate the clock is for an +!! infrastructure operation, e.g. a halo update +integer, parameter :: CLOCK_INFRA = MPP_CLOCK_INFRA + +contains + +!> Turns on clock with handle "id" +subroutine cpu_clock_begin(id) + integer, intent(in) :: id !< Handle for clock + + call mpp_clock_begin(id) + +end subroutine cpu_clock_begin + +!> Turns off clock with handle "id" +subroutine cpu_clock_end(id) + integer, intent(in) :: id !< Handle for clock + + call mpp_clock_end(id) + +end subroutine cpu_clock_end + +!> Returns the integer handle for a named CPU clock. +integer function cpu_clock_id(name, sync, grain) + character(len=*), intent(in) :: name !< The unique name of the CPU clock + logical, optional, intent(in) :: sync !< A flag that controls whether the + !! PEs are synchronized before the cpu clocks start counting. + !! Synchronization occurs before the start of a clock if this + !! is enabled, while additional (expensive) statistics can + !! set for other values. + !! If absent, the default is taken from the settings for FMS. + integer, optional, intent(in) :: grain !< The timing granularity for this clock, usually set to + !! the values of CLOCK_COMPONENT, CLOCK_ROUTINE, CLOCK_LOOP, etc. + + integer :: clock_flags + + clock_flags = clock_flag_default + if (present(sync)) & + clock_flags = ibset(clock_flags, 0) + + cpu_clock_id = mpp_clock_id(name, flags=clock_flags, grain=grain) +end function cpu_clock_id + +end module MOM_cpu_clock_infra diff --git a/config_src/infra/FMS2/MOM_data_override_infra.F90 b/config_src/infra/FMS2/MOM_data_override_infra.F90 new file mode 100644 index 0000000000..1484f0c128 --- /dev/null +++ b/config_src/infra/FMS2/MOM_data_override_infra.F90 @@ -0,0 +1,105 @@ +!> These interfaces allow for ocean or sea-ice variables to be replaced with data. +module MOM_data_override_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use MOM_domain_infra, only : MOM_domain_type, domain2d +use MOM_domain_infra, only : get_simple_array_i_ind, get_simple_array_j_ind +use MOM_time_manager, only : time_type +use data_override_mod, only : data_override_init +use data_override_mod, only : data_override +use data_override_mod, only : data_override_unset_domains + +implicit none ; private + +public :: impose_data_init, impose_data, impose_data_unset_domains + +!> Potentially override the values of a field in the model with values from a dataset. +interface impose_data + module procedure data_override_MD, data_override_2d +end interface + +contains + +!> Initialize the data override capability and set the domains for the ocean and ice components. +!> There should be a call to impose_data_init before impose_data is called. +subroutine impose_data_init(MOM_domain_in, Ocean_domain_in, Ice_domain_in) + type (MOM_domain_type), intent(in), optional :: MOM_domain_in + type (domain2d), intent(in), optional :: Ocean_domain_in + type (domain2d), intent(in), optional :: Ice_domain_in + + if (present(MOM_domain_in)) then + call data_override_init(Ocean_domain_in=MOM_domain_in%mpp_domain, Ice_domain_in=Ice_domain_in) + else + call data_override_init(Ocean_domain_in=Ocean_domain_in, Ice_domain_in=Ice_domain_in) + endif +end subroutine impose_data_init + + +!> Potentially override a 2-d field on a MOM6 domain with values from a dataset. +subroutine data_override_MD(domain, fieldname, data_2D, time, scale, override, is_ice) + type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information + character(len=*), intent(in) :: fieldname !< Name of the field to override + real, dimension(:,:), intent(inout) :: data_2D !< Data that may be modified by this call. + type(time_type), intent(in) :: time !< The model time, and the time for the data + real, optional, intent(in) :: scale !< A scaling factor that an overridden field is + !! multiplied by before it is returned. However, + !! if there is no override, there is no rescaling. + logical, optional, intent(out) :: override !< True if the field has been overridden successfully + logical, optional, intent(in) :: is_ice !< If present and true, use the ice domain. + + logical :: overridden, is_ocean + integer :: i, j, is, ie, js, je + + overridden = .false. + is_ocean = .true. ; if (present(is_ice)) is_ocean = .not.is_ice + if (is_ocean) then + call data_override('OCN', fieldname, data_2D, time, override=overridden) + else + call data_override('ICE', fieldname, data_2D, time, override=overridden) + endif + + if (overridden .and. present(scale)) then ; if (scale /= 1.0) then + ! Rescale data in the computational domain if the data override has occurred. + call get_simple_array_i_ind(domain, size(data_2D,1), is, ie) + call get_simple_array_j_ind(domain, size(data_2D,2), js, je) + do j=js,je ; do i=is,ie + data_2D(i,j) = scale*data_2D(i,j) + enddo ; enddo + endif ; endif + + if (present(override)) override = overridden + +end subroutine data_override_MD + + +!> Potentially override a 2-d field with values from a dataset. +subroutine data_override_2d(gridname, fieldname, data_2D, time, override) + character(len=3), intent(in) :: gridname !< String identifying the model component, in MOM6 + !! and SIS this may be either 'OCN' or 'ICE' + character(len=*), intent(in) :: fieldname !< Name of the field to override + real, dimension(:,:), intent(inout) :: data_2D !< Data that may be modified by this call + type(time_type), intent(in) :: time !< The model time, and the time for the data + logical, optional, intent(out) :: override !< True if the field has been overridden successfully + + call data_override(gridname, fieldname, data_2D, time, override) + +end subroutine data_override_2d + +!> Unset domains that had previously been set for use by data_override. +subroutine impose_data_unset_domains(unset_Ocean, unset_Ice, must_be_set) + logical, intent(in), optional :: unset_Ocean !< If present and true, unset the ocean domain for overrides + logical, intent(in), optional :: unset_Ice !< If present and true, unset the sea-ice domain for overrides + logical, intent(in), optional :: must_be_set !< If present and true, it is a fatal error to unset + !! a domain that is not set. + + call data_override_unset_domains(unset_Ocean=unset_Ocean, unset_Ice=unset_Ice, & + must_be_set=must_be_set) +end subroutine impose_data_unset_domains + +end module MOM_data_override_infra + +!> \namespace MOM_data_override_infra +!! +!! The routines here wrap routines from the FMS module data_override_mod, which potentially replace +!! model values with values read from a data file. diff --git a/config_src/infra/FMS2/MOM_diag_manager_infra.F90 b/config_src/infra/FMS2/MOM_diag_manager_infra.F90 new file mode 100644 index 0000000000..18c80cf24c --- /dev/null +++ b/config_src/infra/FMS2/MOM_diag_manager_infra.F90 @@ -0,0 +1,423 @@ +!> A wrapper for the FMS diag_manager routines. This module should be the +!! only MOM6 module which imports the FMS shared infrastructure for +!! diagnostics. Pass through interfaces are being documented +!! here and renamed in order to clearly identify these APIs as being +!! consistent with the FMS infrastructure (Any future updates to +!! those APIs would be applied here). +module MOM_diag_manager_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use diag_axis_mod, only : fms_axis_init=>diag_axis_init +use diag_axis_mod, only : fms_get_diag_axis_name => get_diag_axis_name +use diag_axis_mod, only : EAST, NORTH +use diag_data_mod, only : null_axis_id +use diag_manager_mod, only : fms_diag_manager_init => diag_manager_init +use diag_manager_mod, only : fms_diag_manager_end => diag_manager_end +use diag_manager_mod, only : send_data_fms => send_data +use diag_manager_mod, only : fms_diag_field_add_attribute => diag_field_add_attribute +use diag_manager_mod, only : DIAG_FIELD_NOT_FOUND +use diag_manager_mod, only : register_diag_field_fms => register_diag_field +use diag_manager_mod, only : register_static_field_fms => register_static_field +use diag_manager_mod, only : get_diag_field_id_fms => get_diag_field_id +use MOM_time_manager, only : time_type +use MOM_domain_infra, only : MOM_domain_type +use MOM_error_infra, only : MOM_error => MOM_err, FATAL, WARNING + +implicit none ; private + +!> transmit data for diagnostic output +interface register_diag_field_infra + module procedure register_diag_field_infra_scalar + module procedure register_diag_field_infra_array +end interface register_diag_field_infra + +!> transmit data for diagnostic output +interface send_data_infra + module procedure send_data_infra_0d, send_data_infra_1d + module procedure send_data_infra_2d, send_data_infra_3d +#ifdef OVERLOAD_R8 + module procedure send_data_infra_2d_r8, send_data_infra_3d_r8 +#endif +end interface send_data_infra + +!> Add an attribute to a diagnostic field +interface MOM_diag_field_add_attribute + module procedure MOM_diag_field_add_attribute_scalar_r + module procedure MOM_diag_field_add_attribute_scalar_i + module procedure MOM_diag_field_add_attribute_scalar_c + module procedure MOM_diag_field_add_attribute_r1d + module procedure MOM_diag_field_add_attribute_i1d +end interface MOM_diag_field_add_attribute + + +! Public interfaces +public MOM_diag_axis_init +public get_MOM_diag_axis_name +public MOM_diag_manager_init +public MOM_diag_manager_end +public send_data_infra +public MOM_diag_field_add_attribute +public register_diag_field_infra +public register_static_field_infra +public get_MOM_diag_field_id +! Public data +public null_axis_id +public DIAG_FIELD_NOT_FOUND +public EAST, NORTH + + +contains + +!> Initialize a diagnostic axis +integer function MOM_diag_axis_init(name, data, units, cart_name, long_name, MOM_domain, position, & + & direction, edges, set_name, coarsen, null_axis) + character(len=*), intent(in) :: name !< The name of this axis + real, dimension(:), intent(in) :: data !< The array of coordinate values + character(len=*), intent(in) :: units !< The units for the axis data + character(len=*), intent(in) :: cart_name !< Cartesian axis ("X", "Y", "Z", "T", or "N" for none) + character(len=*), & + optional, intent(in) :: long_name !< The long name of this axis + type(MOM_domain_type), & + optional, intent(in) :: MOM_Domain !< A MOM_Domain that describes the decomposition + integer, optional, intent(in) :: position !< This indicates the relative position of this + !! axis. The default is CENTER, but EAST and NORTH + !! are common options. + integer, optional, intent(in) :: direction !< This indicates the direction along which this + !! axis increases: 1 for upward, -1 for downward, or + !! 0 for non-vertical axes (the default) + integer, optional, intent(in) :: edges !< The axis_id of the complementary axis that + !! describes the edges of this axis + character(len=*), & + optional, intent(in) :: set_name !< A name to use for this set of axes. + integer, optional, intent(in) :: coarsen !< An optional degree of coarsening for the grid, 1 + !! by default. + logical, optional, intent(in) :: null_axis !< If present and true, return the special null axis + !! id for use with scalars. + + integer :: coarsening ! The degree of grid coarsening + + if (present(null_axis)) then ; if (null_axis) then + ! Return the special null axis id for scalars + MOM_diag_axis_init = null_axis_id + return + endif ; endif + + if (present(MOM_domain)) then + coarsening = 1 ; if (present(coarsen)) coarsening = coarsen + if (coarsening == 1) then + MOM_diag_axis_init = fms_axis_init(name, data, units, cart_name, long_name=long_name, & + direction=direction, set_name=set_name, edges=edges, & + domain2=MOM_domain%mpp_domain, domain_position=position) + elseif (coarsening == 2) then + MOM_diag_axis_init = fms_axis_init(name, data, units, cart_name, long_name=long_name, & + direction=direction, set_name=set_name, edges=edges, & + domain2=MOM_domain%mpp_domain_d2, domain_position=position) + else + call MOM_error(FATAL, "diag_axis_init called with an invalid value of coarsen.") + endif + else + if (present(coarsen)) then ; if (coarsen /= 1) then + call MOM_error(FATAL, "diag_axis_init does not support grid coarsening without a MOM_domain.") + endif ; endif + MOM_diag_axis_init = fms_axis_init(name, data, units, cart_name, long_name=long_name, & + direction=direction, set_name=set_name, edges=edges) + endif + +end function MOM_diag_axis_init + +!> Returns the short name of the axis +subroutine get_MOM_diag_axis_name(id, name) + integer, intent(in) :: id !< The axis numeric id + character(len=*), intent(out) :: name !< The short name of the axis + + call fms_get_diag_axis_name(id, name) + +end subroutine get_MOM_diag_axis_name + +!> Return a unique numeric ID field a module/field name combination. +integer function get_MOM_diag_field_id(module_name, field_name) + character(len=*), intent(in) :: module_name !< A module name string to query. + character(len=*), intent(in) :: field_name !< A field name string to query. + + + get_MOM_diag_field_id = -1 + get_MOM_diag_field_id = get_diag_field_id_fms(module_name, field_name) + +end function get_MOM_diag_field_id + +!> Initializes the diagnostic manager +subroutine MOM_diag_manager_init(diag_model_subset, time_init, err_msg) + integer, optional, intent(in) :: diag_model_subset !< An optional diagnostic subset + integer, dimension(6), optional, intent(in) :: time_init !< An optional reference time for diagnostics + !! The default uses the value contained in the + !! diag_table. Format is Y-M-D-H-M-S + character(len=*), optional, intent(out) :: err_msg !< Error message. + call FMS_diag_manager_init(diag_model_subset, time_init, err_msg) + +end subroutine MOM_diag_manager_init + +!> Close the diagnostic manager +subroutine MOM_diag_manager_end(time) + type(time_type), intent(in) :: time !< Model time at call to close. + + call FMS_diag_manager_end(time) + +end subroutine MOM_diag_manager_end + +!> Register a MOM diagnostic field for scalars +integer function register_diag_field_infra_scalar(module_name, field_name, init_time, & + long_name, units, missing_value, range, standard_name, do_not_log, & + err_msg, area, volume) + character(len=*), intent(in) :: module_name !< The name of the associated module + character(len=*), intent(in) :: field_name !< The name of the field + type(time_type), optional, intent(in) :: init_time !< The registration time + character(len=*), optional, intent(in) :: long_name !< A long name for the field + character(len=*), optional, intent(in) :: units !< Field units + character(len=*), optional, intent(in) :: standard_name !< A standard name for the field + real, optional, intent(in) :: missing_value !< Missing value attribute + real, dimension(2), optional, intent(in) :: range !< A valid range of the field + logical, optional, intent(in) :: do_not_log !< if TRUE, field information is not logged + character(len=*), optional, intent(out):: err_msg !< An error message to return + integer, optional, intent(in) :: area !< Diagnostic ID of the field containing the area attribute + integer, optional, intent(in) :: volume !< Diagnostic ID of the field containing the volume attribute + + register_diag_field_infra_scalar = register_diag_field_fms(module_name, field_name, init_time, & + long_name, units, missing_value, range, standard_name, do_not_log, err_msg, area, volume) + +end function register_diag_field_infra_scalar + +!> Register a MOM diagnostic field for scalars +integer function register_diag_field_infra_array(module_name, field_name, axes, init_time, & + long_name, units, missing_value, range, mask_variant, standard_name, verbose, & + do_not_log, err_msg, interp_method, tile_count, area, volume) + character(len=*), intent(in) :: module_name !< The name of the associated module + character(len=*), intent(in) :: field_name !< The name of the field + integer, dimension(:), intent(in) :: axes !< Diagnostic IDs of axis attributes for the field + type(time_type), optional, intent(in) :: init_time !< The registration time + character(len=*), optional, intent(in) :: long_name !< A long name for the field + character(len=*), optional, intent(in) :: units !< Units of the field + real, optional, intent(in) :: missing_value !< Missing value attribute + real, dimension(2), optional, intent(in) :: range !< A valid range of the field + logical, optional, intent(in) :: mask_variant !< If true, the field mask is varying in time + character(len=*), optional, intent(in) :: standard_name !< A standard name for the field + logical, optional, intent(in) :: verbose !< If true, provide additional log information + logical, optional, intent(in) :: do_not_log !< if TRUE, field information is not logged + character(len=*), optional, intent(in) :: interp_method !< If 'none' indicates the field should + !! not be interpolated as a scalar + integer, optional, intent(in) :: tile_count !< The tile number for the current PE + character(len=*), optional, intent(out):: err_msg !< An error message to return + integer, optional, intent(in) :: area !< Diagnostic ID of the field containing the area attribute + integer, optional, intent(in) :: volume !< Diagnostic ID of the field containing the volume attribute + + register_diag_field_infra_array = register_diag_field_fms(module_name, field_name, axes, init_time, & + long_name, units, missing_value, range, mask_variant, standard_name, verbose, do_not_log, & + err_msg, interp_method, tile_count, area, volume) + +end function register_diag_field_infra_array + + +integer function register_static_field_infra(module_name, field_name, axes, long_name, units, & + missing_value, range, mask_variant, standard_name, do_not_log, interp_method, & + tile_count, area, volume) + character(len=*), intent(in) :: module_name !< The name of the associated module + character(len=*), intent(in) :: field_name !< The name of the field + integer, dimension(:), intent(in) :: axes !< Diagnostic IDs of axis attributes for the field + character(len=*), optional, intent(in) :: long_name !< A long name for the field + character(len=*), optional, intent(in) :: units !< Units of the field + real, optional, intent(in) :: missing_value !< Missing value attribute + real, dimension(2), optional, intent(in) :: range !< A valid range of the field + logical, optional, intent(in) :: mask_variant !< If true, the field mask is varying in time + character(len=*), optional, intent(in) :: standard_name !< A standard name for the field + logical, optional, intent(in) :: do_not_log !< if TRUE, field information is not logged + character(len=*), optional, intent(in) :: interp_method !< If 'none' indicates the field should + !! not be interpolated as a scalar + integer, optional, intent(in) :: tile_count !< The tile number for the current PE + integer, optional, intent(in) :: area !< Diagnostic ID of the field containing the area attribute + integer, optional, intent(in) :: volume !< Diagnostic ID of the field containing the volume attribute + + register_static_field_infra = register_static_field_fms(module_name, field_name, axes, long_name, units,& + & missing_value, range, mask_variant, standard_name, dynamic=.false.,do_not_log=do_not_log, & + interp_method=interp_method,tile_count=tile_count, area=area, volume=volume) +end function register_static_field_infra + +!> Returns true if the argument data are successfully passed to a diagnostic manager +!! with the indicated unique reference id, false otherwise. +logical function send_data_infra_0d(diag_field_id, field, time, err_msg) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + real, intent(in) :: field !< The value being recorded + TYPE(time_type), optional, intent(in) :: time !< The time for the current record + CHARACTER(len=*), optional, intent(out) :: err_msg !< An optional error message + + send_data_infra_0d = send_data_fms(diag_field_id, field, time, err_msg) +end function send_data_infra_0d + +!> Returns true if the argument data are successfully passed to a diagnostic manager +!! with the indicated unique reference id, false otherwise. +logical function send_data_infra_1d(diag_field_id, field, is_in, ie_in, time, mask, rmask, weight, err_msg) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + real, dimension(:), intent(in) :: field !< A 1-d array of values being recorded + integer, optional, intent(in) :: is_in !< The starting index for the data being recorded + integer, optional, intent(in) :: ie_in !< The end index for the data being recorded + type(time_type), optional, intent(in) :: time !< The time for the current record + logical, dimension(:), optional, intent(in) :: mask !< An optional rank 1 logical mask + real, dimension(:), optional, intent(in) :: rmask !< An optional rank 1 mask array + real, optional, intent(in) :: weight !< A scalar weight factor to apply to the current + !! record if there is averaging in time + character(len=*), optional, intent(out) :: err_msg !< A log indicating the status of the post upon + !! returning to the calling routine + + send_data_infra_1d = send_data_fms(diag_field_id, field, time, is_in, mask, rmask, ie_in, weight, err_msg) + +end function send_data_infra_1d + +!> Returns true if the argument data are successfully passed to a diagnostic manager +!! with the indicated unique reference id, false otherwise. +logical function send_data_infra_2d(diag_field_id, field, is_in, ie_in, js_in, je_in, & + time, mask, rmask, weight, err_msg) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + real, dimension(:,:), intent(in) :: field !< A 2-d array of values being recorded + integer, optional, intent(in) :: is_in !< The starting i-index for the data being recorded + integer, optional, intent(in) :: ie_in !< The end i-index for the data being recorded + integer, optional, intent(in) :: js_in !< The starting j-index for the data being recorded + integer, optional, intent(in) :: je_in !< The end j-index for the data being recorded + type(time_type), optional, intent(in) :: time !< The time for the current record + logical, dimension(:,:), optional, intent(in) :: mask !< An optional 2-d logical mask + real, dimension(:,:), optional, intent(in) :: rmask !< An optional 2-d mask array + real, optional, intent(in) :: weight !< A scalar weight factor to apply to the current + !! record if there is averaging in time + character(len=*), optional, intent(out) :: err_msg !< A log indicating the status of the post upon + !! returning to the calling routine + + send_data_infra_2d = send_data_fms(diag_field_id, field, time, is_in, js_in, mask, & + rmask, ie_in, je_in, weight, err_msg) + +end function send_data_infra_2d + +!> Returns true if the argument data are successfully passed to a diagnostic manager +!! with the indicated unique reference id, false otherwise. +logical function send_data_infra_3d(diag_field_id, field, is_in, ie_in, js_in, je_in, ks_in, ke_in, & + time, mask, rmask, weight, err_msg) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + real, dimension(:,:,:), intent(in) :: field !< A rank 1 array of floating point values being recorded + integer, optional, intent(in) :: is_in !< The starting i-index for the data being recorded + integer, optional, intent(in) :: ie_in !< The end i-index for the data being recorded + integer, optional, intent(in) :: js_in !< The starting j-index for the data being recorded + integer, optional, intent(in) :: je_in !< The end j-index for the data being recorded + integer, optional, intent(in) :: ks_in !< The starting k-index for the data being recorded + integer, optional, intent(in) :: ke_in !< The end k-index for the data being recorded + type(time_type), optional, intent(in) :: time !< The time for the current record + logical, dimension(:,:,:), optional, intent(in) :: mask !< An optional 3-d logical mask + real, dimension(:,:,:), optional, intent(in) :: rmask !< An optional 3-d mask array + real, optional, intent(in) :: weight !< A scalar weight factor to apply to the current + !! record if there is averaging in time + character(len=*), optional, intent(out) :: err_msg !< A log indicating the status of the post upon + !! returning to the calling routine + + send_data_infra_3d = send_data_fms(diag_field_id, field, time, is_in, js_in, ks_in, mask, & + rmask, ie_in, je_in, ke_in, weight, err_msg) + +end function send_data_infra_3d + + +#ifdef OVERLOAD_R8 +!> Returns true if the argument data are successfully passed to a diagnostic manager +!! with the indicated unique reference id, false otherwise. +logical function send_data_infra_2d_r8(diag_field_id, field, is_in, ie_in, js_in, je_in, & + time, mask, rmask, weight, err_msg) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + real(kind=8), dimension(:,:), intent(in) :: field !< A 2-d array of values being recorded + integer, optional, intent(in) :: is_in !< The starting i-index for the data being recorded + integer, optional, intent(in) :: ie_in !< The end i-index for the data being recorded + integer, optional, intent(in) :: js_in !< The starting j-index for the data being recorded + integer, optional, intent(in) :: je_in !< The end j-index for the data being recorded + type(time_type), optional, intent(in) :: time !< The time for the current record + logical, dimension(:,:), optional, intent(in) :: mask !< An optional 2-d logical mask + real, dimension(:,:), optional, intent(in) :: rmask !< An optional 2-d mask array + real, optional, intent(in) :: weight !< A scalar weight factor to apply to the current + !! record if there is averaging in time + character(len=*), optional, intent(out) :: err_msg !< A log indicating the status of the post upon + !! returning to the calling routine + + send_data_infra_2d_r8 = send_data_fms(diag_field_id, field, time, is_in, js_in, mask, & + rmask, ie_in, je_in, weight, err_msg) + +end function send_data_infra_2d_r8 + +!> Returns true if the argument data are successfully passed to a diagnostic manager +!! with the indicated unique reference id, false otherwise. +logical function send_data_infra_3d_r8(diag_field_id, field, is_in, ie_in, js_in, je_in, ks_in, ke_in, & + time, mask, rmask, weight, err_msg) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + real(kind=8), dimension(:,:,:), intent(in) :: field !< A rank 1 array of floating point values being recorded + integer, optional, intent(in) :: is_in !< The starting i-index for the data being recorded + integer, optional, intent(in) :: ie_in !< The end i-index for the data being recorded + integer, optional, intent(in) :: js_in !< The starting j-index for the data being recorded + integer, optional, intent(in) :: je_in !< The end j-index for the data being recorded + integer, optional, intent(in) :: ks_in !< The starting k-index for the data being recorded + integer, optional, intent(in) :: ke_in !< The end k-index for the data being recorded + type(time_type), optional, intent(in) :: time !< The time for the current record + logical, dimension(:,:,:), optional, intent(in) :: mask !< An optional 3-d logical mask + real, dimension(:,:,:), optional, intent(in) :: rmask !< An optional 3-d mask array + real, optional, intent(in) :: weight !< A scalar weight factor to apply to the current + !! record if there is averaging in time + character(len=*), optional, intent(out) :: err_msg !< A log indicating the status of the post upon + !! returning to the calling routine + + send_data_infra_3d_r8 = send_data_fms(diag_field_id, field, time, is_in, js_in, ks_in, mask, rmask, & + ie_in, je_in, ke_in, weight, err_msg) + +end function send_data_infra_3d_r8 +#endif + +!> Add a real scalar attribute to a diagnostic field +subroutine MOM_diag_field_add_attribute_scalar_r(diag_field_id, att_name, att_value) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + character(len=*), intent(in) :: att_name !< The name of the attribute + real, intent(in) :: att_value !< A real scalar value + + call FMS_diag_field_add_attribute(diag_field_id, att_name, att_value) + +end subroutine MOM_diag_field_add_attribute_scalar_r + +!> Add an integer attribute to a diagnostic field +subroutine MOM_diag_field_add_attribute_scalar_i(diag_field_id, att_name, att_value) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + character(len=*), intent(in) :: att_name !< The name of the attribute + integer, intent(in) :: att_value !< An integer scalar value + + call FMS_diag_field_add_attribute(diag_field_id, att_name, att_value) + +end subroutine MOM_diag_field_add_attribute_scalar_i + +!> Add a character string attribute to a diagnostic field +subroutine MOM_diag_field_add_attribute_scalar_c(diag_field_id, att_name, att_value) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + character(len=*), intent(in) :: att_name !< The name of the attribute + character(len=*), intent(in) :: att_value !< A character string value + + call FMS_diag_field_add_attribute(diag_field_id, att_name, att_value) + +end subroutine MOM_diag_field_add_attribute_scalar_c + +!> Add a real list of attributes attribute to a diagnostic field +subroutine MOM_diag_field_add_attribute_r1d(diag_field_id, att_name, att_value) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + character(len=*), intent(in) :: att_name !< The name of the attribute + real, dimension(:), intent(in) :: att_value !< An array of real values + + call FMS_diag_field_add_attribute(diag_field_id, att_name, att_value) + +end subroutine MOM_diag_field_add_attribute_r1d + +!> Add a integer list of attributes attribute to a diagnostic field +subroutine MOM_diag_field_add_attribute_i1d(diag_field_id, att_name, att_value) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + character(len=*), intent(in) :: att_name !< The name of the attribute + integer, dimension(:), intent(in) :: att_value !< An array of integer values + + call FMS_diag_field_add_attribute(diag_field_id, att_name, att_value) + +end subroutine MOM_diag_field_add_attribute_i1d + +end module MOM_diag_manager_infra diff --git a/config_src/infra/FMS2/MOM_domain_infra.F90 b/config_src/infra/FMS2/MOM_domain_infra.F90 new file mode 100644 index 0000000000..5f8d5fb20b --- /dev/null +++ b/config_src/infra/FMS2/MOM_domain_infra.F90 @@ -0,0 +1,1995 @@ +!> Describes the decomposed MOM domain and has routines for communications across PEs +module MOM_domain_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use MOM_coms_infra, only : PE_here, root_PE, num_PEs +use MOM_cpu_clock_infra, only : cpu_clock_begin, cpu_clock_end +use MOM_error_infra, only : MOM_error=>MOM_err, NOTE, WARNING, FATAL + +use mpp_domains_mod, only : domain2D, domain1D +use mpp_domains_mod, only : mpp_define_io_domain, mpp_define_domains, mpp_deallocate_domain +use mpp_domains_mod, only : mpp_get_domain_components, mpp_get_domain_extents, mpp_get_layout +use mpp_domains_mod, only : mpp_get_compute_domain, mpp_get_data_domain, mpp_get_global_domain +use mpp_domains_mod, only : mpp_get_boundary, mpp_update_domains +use mpp_domains_mod, only : mpp_start_update_domains, mpp_complete_update_domains +use mpp_domains_mod, only : mpp_create_group_update, mpp_do_group_update +use mpp_domains_mod, only : mpp_reset_group_update_field, mpp_group_update_initialized +use mpp_domains_mod, only : mpp_start_group_update, mpp_complete_group_update +use mpp_domains_mod, only : mpp_compute_block_extent +use mpp_domains_mod, only : mpp_broadcast_domain, mpp_redistribute, mpp_global_field +use mpp_domains_mod, only : AGRID, BGRID_NE, CGRID_NE, SCALAR_PAIR, BITWISE_EXACT_SUM +use mpp_domains_mod, only : CYCLIC_GLOBAL_DOMAIN, FOLD_NORTH_EDGE +use mpp_domains_mod, only : To_East => WUPDATE, To_West => EUPDATE, Omit_Corners => EDGEUPDATE +use mpp_domains_mod, only : To_North => SUPDATE, To_South => NUPDATE +use mpp_domains_mod, only : CENTER, CORNER, NORTH_FACE => NORTH, EAST_FACE => EAST +use fms_io_mod, only : file_exist, parse_mask_table +use fms_affinity_mod, only : fms_affinity_init, fms_affinity_set, fms_affinity_get + +! This subroutine is not in MOM6/src but may be required by legacy drivers +use mpp_domains_mod, only : global_field_sum => mpp_global_sum + +! The `group_pass_type` fields are never accessed, so we keep it as an FMS type +use mpp_domains_mod, only : group_pass_type => mpp_group_update_type + +implicit none ; private + +! These types are inherited from mpp, but are treated as opaque here. +public :: domain2D, domain1D, group_pass_type +! These interfaces are actually implemented or have explicit interfaces in this file. +public :: create_MOM_domain, clone_MOM_domain, get_domain_components, get_domain_extent +public :: deallocate_MOM_domain, get_global_shape, compute_block_extent +public :: pass_var, pass_vector, fill_symmetric_edges, rescale_comp_data +public :: pass_var_start, pass_var_complete, pass_vector_start, pass_vector_complete +public :: create_group_pass, do_group_pass, start_group_pass, complete_group_pass +public :: redistribute_array, broadcast_domain, same_domain, global_field +public :: get_simple_array_i_ind, get_simple_array_j_ind +public :: MOM_thread_affinity_set, set_MOM_thread_affinity +! These are encoding constant parmeters. +public :: To_East, To_West, To_North, To_South, To_All, Omit_Corners +public :: AGRID, BGRID_NE, CGRID_NE, SCALAR_PAIR +public :: CORNER, CENTER, NORTH_FACE, EAST_FACE +! These are no longer used by MOM6 because the reproducing sum works so well, but they are +! still referenced by some of the non-GFDL couplers. +public :: global_field_sum, BITWISE_EXACT_SUM + +!> Do a halo update on an array +interface pass_var + module procedure pass_var_3d, pass_var_2d +end interface pass_var + +!> Do a halo update on a pair of arrays representing the two components of a vector +interface pass_vector + module procedure pass_vector_3d, pass_vector_2d +end interface pass_vector + +!> Initiate a non-blocking halo update on an array +interface pass_var_start + module procedure pass_var_start_3d, pass_var_start_2d +end interface pass_var_start + +!> Complete a non-blocking halo update on an array +interface pass_var_complete + module procedure pass_var_complete_3d, pass_var_complete_2d +end interface pass_var_complete + +!> Initiate a halo update on a pair of arrays representing the two components of a vector +interface pass_vector_start + module procedure pass_vector_start_3d, pass_vector_start_2d +end interface pass_vector_start + +!> Complete a halo update on a pair of arrays representing the two components of a vector +interface pass_vector_complete + module procedure pass_vector_complete_3d, pass_vector_complete_2d +end interface pass_vector_complete + +!> Set up a group of halo updates +interface create_group_pass + module procedure create_var_group_pass_2d + module procedure create_var_group_pass_3d + module procedure create_vector_group_pass_2d + module procedure create_vector_group_pass_3d +end interface create_group_pass + +!> Do a set of halo updates that fill in the values at the duplicated edges +!! of a staggered symmetric memory domain +interface fill_symmetric_edges + module procedure fill_vector_symmetric_edges_2d !, fill_vector_symmetric_edges_3d +! module procedure fill_scalar_symmetric_edges_2d, fill_scalar_symmetric_edges_3d +end interface fill_symmetric_edges + +!> Rescale the values of an array in its computational domain by a constant factor +interface rescale_comp_data + module procedure rescale_comp_data_4d, rescale_comp_data_3d, rescale_comp_data_2d +end interface rescale_comp_data + +!> Pass an array from one MOM domain to another +interface redistribute_array + module procedure redistribute_array_2d, redistribute_array_3d, redistribute_array_4d +end interface redistribute_array + +!> Copy one MOM_domain_type into another +interface clone_MOM_domain + module procedure clone_MD_to_MD, clone_MD_to_d2D +end interface clone_MOM_domain + +!> Extract the 1-d domain components from a MOM_domain or domain2d +interface get_domain_components + module procedure get_domain_components_MD, get_domain_components_d2D +end interface get_domain_components + +!> Returns the index ranges that have been stored in a MOM_domain_type +interface get_domain_extent + module procedure get_domain_extent_MD, get_domain_extent_d2D +end interface get_domain_extent + + +!> The MOM_domain_type contains information about the domain decomposition. +type, public :: MOM_domain_type + character(len=64) :: name !< The name of this domain + type(domain2D), pointer :: mpp_domain => NULL() !< The FMS domain with halos + !! on this processor, centered at h points. + type(domain2D), pointer :: mpp_domain_d2 => NULL() !< A coarse FMS domain with halos + !! on this processor, centered at h points. + integer :: niglobal !< The total horizontal i-domain size. + integer :: njglobal !< The total horizontal j-domain size. + integer :: nihalo !< The i-halo size in memory. + integer :: njhalo !< The j-halo size in memory. + logical :: symmetric !< True if symmetric memory is used with this domain. + logical :: nonblocking_updates !< If true, non-blocking halo updates are + !! allowed. The default is .false. (for now). + logical :: thin_halo_updates !< If true, optional arguments may be used to + !! specify the width of the halos that are + !! updated with each call. + integer :: layout(2) !< This domain's processor layout. This is + !! saved to enable the construction of related + !! new domains with different resolutions or + !! other properties. + integer :: io_layout(2) !< The IO-layout used with this domain. + integer :: X_FLAGS !< Flag that specifies the properties of the + !! domain in the i-direction in a define_domain call. + integer :: Y_FLAGS !< Flag that specifies the properties of the + !! domain in the j-direction in a define_domain call. + logical, pointer :: maskmap(:,:) => NULL() !< A pointer to an array indicating + !! which logical processors are actually used for + !! the ocean code. The other logical processors + !! would be contain only land points and are not + !! assigned to actual processors. This need not be + !! assigned if all logical processors are used. + integer :: turns !< Number of quarter-turns from input to this grid. + type(MOM_domain_type), pointer :: domain_in => NULL() + !< Reference to unrotated domain (if turned) +end type MOM_domain_type + +integer, parameter :: To_All = To_East + To_West + To_North + To_South !< A flag for passing in all directions + +contains + +!> pass_var_3d does a halo update for a three-dimensional array. +subroutine pass_var_3d(array, MOM_dom, sideflag, complete, position, halo, & + clock) + real, dimension(:,:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! sothe halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before + !! progress resumes. Omitting complete is the + !! same as setting complete to .true. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + integer :: dirflag + logical :: block_til_complete + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + block_til_complete = .true. + if (present(complete)) block_til_complete = complete + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_update_domains(array, MOM_dom%mpp_domain, flags=dirflag, & + complete=block_til_complete, position=position, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_update_domains(array, MOM_dom%mpp_domain, flags=dirflag, & + complete=block_til_complete, position=position) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_var_3d + +!> pass_var_2d does a halo update for a two-dimensional array. +subroutine pass_var_2d(array, MOM_dom, sideflag, complete, position, halo, inner_halo, clock) + real, dimension(:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before + !! progress resumes. Omitting complete is the + !! same as setting complete to .true. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full halo + !! by default. + integer, optional, intent(in) :: inner_halo !< The size of an inner halo to avoid updating, + !! or 0 to avoid updating symmetric memory + !! computational domain points. Setting this >=0 + !! also enforces that complete=.true. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + ! Local variables + real, allocatable, dimension(:,:) :: tmp + integer :: pos, i_halo, j_halo + integer :: isc, iec, jsc, jec, isd, ied, jsd, jed, IscB, IecB, JscB, JecB + integer :: inner, i, j, isfw, iefw, isfe, iefe, jsfs, jefs, jsfn, jefn + integer :: dirflag + logical :: block_til_complete + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + block_til_complete = .true. ; if (present(complete)) block_til_complete = complete + pos = CENTER ; if (present(position)) pos = position + + if (present(inner_halo)) then ; if (inner_halo >= 0) then + ! Store the original values. + allocate(tmp(size(array,1), size(array,2))) + tmp(:,:) = array(:,:) + block_til_complete = .true. + endif ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_update_domains(array, MOM_dom%mpp_domain, flags=dirflag, & + complete=block_til_complete, position=position, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_update_domains(array, MOM_dom%mpp_domain, flags=dirflag, & + complete=block_til_complete, position=position) + endif + + if (present(inner_halo)) then ; if (inner_halo >= 0) then + call mpp_get_compute_domain(MOM_dom%mpp_domain, isc, iec, jsc, jec) + call mpp_get_data_domain(MOM_dom%mpp_domain, isd, ied, jsd, jed) + ! Convert to local indices for arrays starting at 1. + isc = isc - (isd-1) ; iec = iec - (isd-1) ; ied = ied - (isd-1) ; isd = 1 + jsc = jsc - (jsd-1) ; jec = jec - (jsd-1) ; jed = jed - (jsd-1) ; jsd = 1 + i_halo = min(inner_halo, isc-1) ; j_halo = min(inner_halo, jsc-1) + + ! Figure out the array index extents of the eastern, western, northern and southern regions to copy. + if (pos == CENTER) then + if (size(array,1) == ied) then + isfw = isc - i_halo ; iefw = isc ; isfe = iec ; iefe = iec + i_halo + else ; call MOM_error(FATAL, "pass_var_2d: wrong i-size for CENTER array.") ; endif + if (size(array,2) == jed) then + isfw = isc - i_halo ; iefw = isc ; isfe = iec ; iefe = iec + i_halo + else ; call MOM_error(FATAL, "pass_var_2d: wrong j-size for CENTER array.") ; endif + elseif (pos == CORNER) then + if (size(array,1) == ied) then + isfw = max(isc - (i_halo+1), 1) ; iefw = isc ; isfe = iec ; iefe = iec + i_halo + elseif (size(array,1) == ied+1) then + isfw = isc - i_halo ; iefw = isc+1 ; isfe = iec+1 ; iefe = min(iec + 1 + i_halo, ied+1) + else ; call MOM_error(FATAL, "pass_var_2d: wrong i-size for CORNER array.") ; endif + if (size(array,2) == jed) then + jsfs = max(jsc - (j_halo+1), 1) ; jefs = jsc ; jsfn = jec ; jefn = jec + j_halo + elseif (size(array,2) == jed+1) then + jsfs = jsc - j_halo ; jefs = jsc+1 ; jsfn = jec+1 ; jefn = min(jec + 1 + j_halo, jed+1) + else ; call MOM_error(FATAL, "pass_var_2d: wrong j-size for CORNER array.") ; endif + elseif (pos == NORTH_FACE) then + if (size(array,1) == ied) then + isfw = isc - i_halo ; iefw = isc ; isfe = iec ; iefe = iec + i_halo + else ; call MOM_error(FATAL, "pass_var_2d: wrong i-size for NORTH_FACE array.") ; endif + if (size(array,2) == jed) then + jsfs = max(jsc - (j_halo+1), 1) ; jefs = jsc ; jsfn = jec ; jefn = jec + j_halo + elseif (size(array,2) == jed+1) then + jsfs = jsc - j_halo ; jefs = jsc+1 ; jsfn = jec+1 ; jefn = min(jec + 1 + j_halo, jed+1) + else ; call MOM_error(FATAL, "pass_var_2d: wrong j-size for NORTH_FACE array.") ; endif + elseif (pos == EAST_FACE) then + if (size(array,1) == ied) then + isfw = max(isc - (i_halo+1), 1) ; iefw = isc ; isfe = iec ; iefe = iec + i_halo + elseif (size(array,1) == ied+1) then + isfw = isc - i_halo ; iefw = isc+1 ; isfe = iec+1 ; iefe = min(iec + 1 + i_halo, ied+1) + else ; call MOM_error(FATAL, "pass_var_2d: wrong i-size for EAST_FACE array.") ; endif + if (size(array,2) == jed) then + isfw = isc - i_halo ; iefw = isc ; isfe = iec ; iefe = iec + i_halo + else ; call MOM_error(FATAL, "pass_var_2d: wrong j-size for EAST_FACE array.") ; endif + else + call MOM_error(FATAL, "pass_var_2d: Unrecognized position") + endif + + ! Copy back the stored inner halo points + do j=jsfs,jefn ; do i=isfw,iefw ; array(i,j) = tmp(i,j) ; enddo ; enddo + do j=jsfs,jefn ; do i=isfe,iefe ; array(i,j) = tmp(i,j) ; enddo ; enddo + do j=jsfs,jefs ; do i=isfw,iefe ; array(i,j) = tmp(i,j) ; enddo ; enddo + do j=jsfn,jefn ; do i=isfw,iefe ; array(i,j) = tmp(i,j) ; enddo ; enddo + + deallocate(tmp) + endif ; endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_var_2d + +!> pass_var_start_2d starts a halo update for a two-dimensional array. +function pass_var_start_2d(array, MOM_dom, sideflag, position, complete, halo, & + clock) + real, dimension(:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before + !! progress resumes. Omitting complete is the + !! same as setting complete to .true. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + integer :: pass_var_start_2d !0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + pass_var_start_2d = mpp_start_update_domains(array, MOM_dom%mpp_domain, & + flags=dirflag, position=position, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + pass_var_start_2d = mpp_start_update_domains(array, MOM_dom%mpp_domain, & + flags=dirflag, position=position) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end function pass_var_start_2d + +!> pass_var_start_3d starts a halo update for a three-dimensional array. +function pass_var_start_3d(array, MOM_dom, sideflag, position, complete, halo, & + clock) + real, dimension(:,:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before + !! progress resumes. Omitting complete is the + !! same as setting complete to .true. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + integer :: pass_var_start_3d !< The integer index for this update. + + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + pass_var_start_3d = mpp_start_update_domains(array, MOM_dom%mpp_domain, & + flags=dirflag, position=position, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + pass_var_start_3d = mpp_start_update_domains(array, MOM_dom%mpp_domain, & + flags=dirflag, position=position) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end function pass_var_start_3d + +!> pass_var_complete_2d completes a halo update for a two-dimensional array. +subroutine pass_var_complete_2d(id_update, array, MOM_dom, sideflag, position, halo, & + clock) + integer, intent(in) :: id_update !< The integer id of this update which has + !! been returned from a previous call to + !! pass_var_start. + real, dimension(:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_complete_update_domains(id_update, array, MOM_dom%mpp_domain, & + flags=dirflag, position=position, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_complete_update_domains(id_update, array, MOM_dom%mpp_domain, & + flags=dirflag, position=position) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_var_complete_2d + +!> pass_var_complete_3d completes a halo update for a three-dimensional array. +subroutine pass_var_complete_3d(id_update, array, MOM_dom, sideflag, position, halo, & + clock) + integer, intent(in) :: id_update !< The integer id of this update which has + !! been returned from a previous call to + !! pass_var_start. + real, dimension(:,:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_complete_update_domains(id_update, array, MOM_dom%mpp_domain, & + flags=dirflag, position=position, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_complete_update_domains(id_update, array, MOM_dom%mpp_domain, & + flags=dirflag, position=position) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_var_complete_3d + +!> pass_vector_2d does a halo update for a pair of two-dimensional arrays +!! representing the components of a two-dimensional horizontal vector. +subroutine pass_vector_2d(u_cmpt, v_cmpt, MOM_dom, direction, stagger, complete, halo, & + clock) + real, dimension(:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before progress resumes. + !! Omitting complete is the same as setting complete to .true. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + ! Local variables + integer :: stagger_local + integer :: dirflag + logical :: block_til_complete + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + block_til_complete = .true. + if (present(complete)) block_til_complete = complete + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_update_domains(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & + gridtype=stagger_local, complete = block_til_complete, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_update_domains(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & + gridtype=stagger_local, complete = block_til_complete) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_vector_2d + +!> fill_vector_symmetric_edges_2d does an usual set of halo updates that only +!! fill in the values at the edge of a pair of symmetric memory two-dimensional +!! arrays representing the components of a two-dimensional horizontal vector. +!! If symmetric memory is not being used, this subroutine does nothing except to +!! possibly turn optional cpu clocks on or off. +subroutine fill_vector_symmetric_edges_2d(u_cmpt, v_cmpt, MOM_dom, stagger, scalar, & + clock) + real, dimension(:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + logical, optional, intent(in) :: scalar !< An optional argument indicating whether. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + ! Local variables + integer :: stagger_local + integer :: dirflag + integer :: i, j, isc, iec, jsc, jec, isd, ied, jsd, jed, IscB, IecB, JscB, JecB + real, allocatable, dimension(:) :: sbuff_x, sbuff_y, wbuff_x, wbuff_y + logical :: block_til_complete + + if (.not. MOM_dom%symmetric) then + return + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + if (.not.(stagger_local == CGRID_NE .or. stagger_local == BGRID_NE)) return + + call mpp_get_compute_domain(MOM_dom%mpp_domain, isc, iec, jsc, jec) + call mpp_get_data_domain(MOM_dom%mpp_domain, isd, ied, jsd, jed) + + ! Adjust isc, etc., to account for the fact that the input arrays indices all + ! start at 1 (and are effectively on a SW grid!). + isc = isc - (isd-1) ; iec = iec - (isd-1) + jsc = jsc - (jsd-1) ; jec = jec - (jsd-1) + IscB = isc ; IecB = iec+1 ; JscB = jsc ; JecB = jec+1 + + dirflag = To_All ! 60 + if (present(scalar)) then ; if (scalar) dirflag = To_All+SCALAR_PAIR ; endif + + if (stagger_local == CGRID_NE) then + allocate(wbuff_x(jsc:jec)) ; allocate(sbuff_y(isc:iec)) + wbuff_x(:) = 0.0 ; sbuff_y(:) = 0.0 + call mpp_get_boundary(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & + wbufferx=wbuff_x, sbuffery=sbuff_y, & + gridtype=CGRID_NE) + do i=isc,iec + v_cmpt(i,JscB) = sbuff_y(i) + enddo + do j=jsc,jec + u_cmpt(IscB,j) = wbuff_x(j) + enddo + deallocate(wbuff_x) ; deallocate(sbuff_y) + elseif (stagger_local == BGRID_NE) then + allocate(wbuff_x(JscB:JecB)) ; allocate(sbuff_x(IscB:IecB)) + allocate(wbuff_y(JscB:JecB)) ; allocate(sbuff_y(IscB:IecB)) + wbuff_x(:) = 0.0 ; wbuff_y(:) = 0.0 ; sbuff_x(:) = 0.0 ; sbuff_y(:) = 0.0 + call mpp_get_boundary(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & + wbufferx=wbuff_x, sbufferx=sbuff_x, & + wbuffery=wbuff_y, sbuffery=sbuff_y, & + gridtype=BGRID_NE) + do I=IscB,IecB + u_cmpt(I,JscB) = sbuff_x(I) ; v_cmpt(I,JscB) = sbuff_y(I) + enddo + do J=JscB,JecB + u_cmpt(IscB,J) = wbuff_x(J) ; v_cmpt(IscB,J) = wbuff_y(J) + enddo + deallocate(wbuff_x) ; deallocate(sbuff_x) + deallocate(wbuff_y) ; deallocate(sbuff_y) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine fill_vector_symmetric_edges_2d + +!> pass_vector_3d does a halo update for a pair of three-dimensional arrays +!! representing the components of a three-dimensional horizontal vector. +subroutine pass_vector_3d(u_cmpt, v_cmpt, MOM_dom, direction, stagger, complete, halo, & + clock) + real, dimension(:,:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before progress resumes. + !! Omitting complete is the same as setting complete to .true. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + ! Local variables + integer :: stagger_local + integer :: dirflag + logical :: block_til_complete + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + block_til_complete = .true. + if (present(complete)) block_til_complete = complete + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_update_domains(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & + gridtype=stagger_local, complete = block_til_complete, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_update_domains(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & + gridtype=stagger_local, complete = block_til_complete) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_vector_3d + +!> pass_vector_start_2d starts a halo update for a pair of two-dimensional arrays +!! representing the components of a two-dimensional horizontal vector. +function pass_vector_start_2d(u_cmpt, v_cmpt, MOM_dom, direction, stagger, complete, halo, & + clock) + real, dimension(:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before progress resumes. + !! Omitting complete is the same as setting complete to .true. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + integer :: pass_vector_start_2d !< The integer index for this + !! update. + + ! Local variables + integer :: stagger_local + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + pass_vector_start_2d = mpp_start_update_domains(u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + pass_vector_start_2d = mpp_start_update_domains(u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end function pass_vector_start_2d + +!> pass_vector_start_3d starts a halo update for a pair of three-dimensional arrays +!! representing the components of a three-dimensional horizontal vector. +function pass_vector_start_3d(u_cmpt, v_cmpt, MOM_dom, direction, stagger, complete, halo, & + clock) + real, dimension(:,:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before progress resumes. + !! Omitting complete is the same as setting complete to .true. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + integer :: pass_vector_start_3d !< The integer index for this + !! update. + ! Local variables + integer :: stagger_local + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + pass_vector_start_3d = mpp_start_update_domains(u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + pass_vector_start_3d = mpp_start_update_domains(u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end function pass_vector_start_3d + +!> pass_vector_complete_2d completes a halo update for a pair of two-dimensional arrays +!! representing the components of a two-dimensional horizontal vector. +subroutine pass_vector_complete_2d(id_update, u_cmpt, v_cmpt, MOM_dom, direction, stagger, halo, & + clock) + integer, intent(in) :: id_update !< The integer id of this update which has been + !! returned from a previous call to + !! pass_var_start. + real, dimension(:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + ! Local variables + integer :: stagger_local + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_complete_update_domains(id_update, u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_complete_update_domains(id_update, u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_vector_complete_2d + +!> pass_vector_complete_3d completes a halo update for a pair of three-dimensional +!! arrays representing the components of a three-dimensional horizontal vector. +subroutine pass_vector_complete_3d(id_update, u_cmpt, v_cmpt, MOM_dom, direction, stagger, halo, & + clock) + integer, intent(in) :: id_update !< The integer id of this update which has been + !! returned from a previous call to + !! pass_var_start. + real, dimension(:,:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + ! Local variables + integer :: stagger_local + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_complete_update_domains(id_update, u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_complete_update_domains(id_update, u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_vector_complete_3d + +!> create_var_group_pass_2d sets up a group of two-dimensional array halo updates. +subroutine create_var_group_pass_2d(group, array, MOM_dom, sideflag, position, & + halo, clock) + type(group_pass_type), intent(inout) :: group !< The data type that store information for + !! group update. This data will be used in + !! do_group_pass. + real, dimension(:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + ! Local variables + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + + if (mpp_group_update_initialized(group)) then + call mpp_reset_group_update_field(group,array) + elseif (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_create_group_update(group, array, MOM_dom%mpp_domain, flags=dirflag, & + position=position, whalo=halo, ehalo=halo, & + shalo=halo, nhalo=halo) + else + call mpp_create_group_update(group, array, MOM_dom%mpp_domain, flags=dirflag, & + position=position) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine create_var_group_pass_2d + +!> create_var_group_pass_3d sets up a group of three-dimensional array halo updates. +subroutine create_var_group_pass_3d(group, array, MOM_dom, sideflag, position, halo, & + clock) + type(group_pass_type), intent(inout) :: group !< The data type that store information for + !! group update. This data will be used in + !! do_group_pass. + real, dimension(:,:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + ! Local variables + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + + if (mpp_group_update_initialized(group)) then + call mpp_reset_group_update_field(group,array) + elseif (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_create_group_update(group, array, MOM_dom%mpp_domain, flags=dirflag, & + position=position, whalo=halo, ehalo=halo, & + shalo=halo, nhalo=halo) + else + call mpp_create_group_update(group, array, MOM_dom%mpp_domain, flags=dirflag, & + position=position) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine create_var_group_pass_3d + +!> create_vector_group_pass_2d sets up a group of two-dimensional vector halo updates. +subroutine create_vector_group_pass_2d(group, u_cmpt, v_cmpt, MOM_dom, direction, stagger, halo, & + clock) + type(group_pass_type), intent(inout) :: group !< The data type that store information for + !! group update. This data will be used in + !! do_group_pass. + real, dimension(:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + ! Local variables + integer :: stagger_local + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + + if (mpp_group_update_initialized(group)) then + call mpp_reset_group_update_field(group,u_cmpt, v_cmpt) + elseif (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_create_group_update(group, u_cmpt, v_cmpt, MOM_dom%mpp_domain, & + flags=dirflag, gridtype=stagger_local, whalo=halo, ehalo=halo, & + shalo=halo, nhalo=halo) + else + call mpp_create_group_update(group, u_cmpt, v_cmpt, MOM_dom%mpp_domain, & + flags=dirflag, gridtype=stagger_local) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine create_vector_group_pass_2d + +!> create_vector_group_pass_3d sets up a group of three-dimensional vector halo updates. +subroutine create_vector_group_pass_3d(group, u_cmpt, v_cmpt, MOM_dom, direction, stagger, halo, & + clock) + type(group_pass_type), intent(inout) :: group !< The data type that store information for + !! group update. This data will be used in + !! do_group_pass. + real, dimension(:,:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + ! Local variables + integer :: stagger_local + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + + if (mpp_group_update_initialized(group)) then + call mpp_reset_group_update_field(group,u_cmpt, v_cmpt) + elseif (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_create_group_update(group, u_cmpt, v_cmpt, MOM_dom%mpp_domain, & + flags=dirflag, gridtype=stagger_local, whalo=halo, ehalo=halo, & + shalo=halo, nhalo=halo) + else + call mpp_create_group_update(group, u_cmpt, v_cmpt, MOM_dom%mpp_domain, & + flags=dirflag, gridtype=stagger_local) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine create_vector_group_pass_3d + +!> do_group_pass carries out a group halo update. +subroutine do_group_pass(group, MOM_dom, clock) + type(group_pass_type), intent(inout) :: group !< The data type that store information for + !! group update. This data will be used in + !! do_group_pass. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + real :: d_type + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + call mpp_do_group_update(group, MOM_dom%mpp_domain, d_type) + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine do_group_pass + +!> start_group_pass starts out a group halo update. +subroutine start_group_pass(group, MOM_dom, clock) + type(group_pass_type), intent(inout) :: group !< The data type that store information for + !! group update. This data will be used in + !! do_group_pass. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + real :: d_type + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + call mpp_start_group_update(group, MOM_dom%mpp_domain, d_type) + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine start_group_pass + +!> complete_group_pass completes a group halo update. +subroutine complete_group_pass(group, MOM_dom, clock) + type(group_pass_type), intent(inout) :: group !< The data type that store information for + !! group update. This data will be used in + !! do_group_pass. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + real :: d_type + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + call mpp_complete_group_update(group, MOM_dom%mpp_domain, d_type) + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine complete_group_pass + + +!> Pass a 2-D array from one MOM domain to another +subroutine redistribute_array_2d(Domain1, array1, Domain2, array2, complete) + type(domain2d), & + intent(in) :: Domain1 !< The MOM domain from which to extract information. + real, dimension(:,:), intent(in) :: array1 !< The array from which to extract information. + type(domain2d), & + intent(in) :: Domain2 !< The MOM domain receiving information. + real, dimension(:,:), intent(out) :: array2 !< The array receiving information. + logical, optional, intent(in) :: complete !< If true, finish communication before proceeding. + + ! Local variables + logical :: do_complete + + do_complete=.true.;if (PRESENT(complete)) do_complete = complete + + call mpp_redistribute(Domain1, array1, Domain2, array2, do_complete) + +end subroutine redistribute_array_2d + +!> Pass a 3-D array from one MOM domain to another +subroutine redistribute_array_3d(Domain1, array1, Domain2, array2, complete) + type(domain2d), & + intent(in) :: Domain1 !< The MOM domain from which to extract information. + real, dimension(:,:,:), intent(in) :: array1 !< The array from which to extract information. + type(domain2d), & + intent(in) :: Domain2 !< The MOM domain receiving information. + real, dimension(:,:,:), intent(out) :: array2 !< The array receiving information. + logical, optional, intent(in) :: complete !< If true, finish communication before proceeding. + + ! Local variables + logical :: do_complete + + do_complete=.true.;if (PRESENT(complete)) do_complete = complete + + call mpp_redistribute(Domain1, array1, Domain2, array2, do_complete) + +end subroutine redistribute_array_3d + +!> Pass a 4-D array from one MOM domain to another +subroutine redistribute_array_4d(Domain1, array1, Domain2, array2, complete) + type(domain2d), & + intent(in) :: Domain1 !< The MOM domain from which to extract information. + real, dimension(:,:,:,:), intent(in) :: array1 !< The array from which to extract information. + type(domain2d), & + intent(in) :: Domain2 !< The MOM domain receiving information. + real, dimension(:,:,:,:), intent(out) :: array2 !< The array receiving information. + logical, optional, intent(in) :: complete !< If true, finish communication before proceeding. + + ! Local variables + logical :: do_complete + + do_complete=.true.;if (PRESENT(complete)) do_complete = complete + + call mpp_redistribute(Domain1, array1, Domain2, array2, do_complete) + +end subroutine redistribute_array_4d + + +!> Rescale the values of a 4-D array in its computational domain by a constant factor +subroutine rescale_comp_data_4d(domain, array, scale) + type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information + real, dimension(:,:,:,:), intent(inout) :: array !< The array which is having the data in its + !! computational domain rescaled + real, intent(in) :: scale !< A scaling factor by which to multiply the + !! values in the computational domain of array + integer :: is, ie, js, je + + if (scale == 1.0) return + + call get_simple_array_i_ind(domain, size(array,1), is, ie) + call get_simple_array_j_ind(domain, size(array,2), js, je) + array(is:ie,js:je,:,:) = scale*array(is:ie,js:je,:,:) + +end subroutine rescale_comp_data_4d + +!> Rescale the values of a 3-D array in its computational domain by a constant factor +subroutine rescale_comp_data_3d(domain, array, scale) + type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information + real, dimension(:,:,:), intent(inout) :: array !< The array which is having the data in its + !! computational domain rescaled + real, intent(in) :: scale !< A scaling factor by which to multiply the + !! values in the computational domain of array + integer :: is, ie, js, je + + if (scale == 1.0) return + + call get_simple_array_i_ind(domain, size(array,1), is, ie) + call get_simple_array_j_ind(domain, size(array,2), js, je) + array(is:ie,js:je,:) = scale*array(is:ie,js:je,:) + +end subroutine rescale_comp_data_3d + +!> Rescale the values of a 2-D array in its computational domain by a constant factor +subroutine rescale_comp_data_2d(domain, array, scale) + type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information + real, dimension(:,:), intent(inout) :: array !< The array which is having the data in its + !! computational domain rescaled + real, intent(in) :: scale !< A scaling factor by which to multiply the + !! values in the computational domain of array + integer :: is, ie, js, je + + if (scale == 1.0) return + + call get_simple_array_i_ind(domain, size(array,1), is, ie) + call get_simple_array_j_ind(domain, size(array,2), js, je) + array(is:ie,js:je) = scale*array(is:ie,js:je) + +end subroutine rescale_comp_data_2d + +!> create_MOM_domain creates and initializes a MOM_domain_type variables, based on the information +!! provided in arguments. +subroutine create_MOM_domain(MOM_dom, n_global, n_halo, reentrant, tripolar_N, layout, io_layout, & + domain_name, mask_table, symmetric, thin_halos, nonblocking) + type(MOM_domain_type), pointer :: MOM_dom !< A pointer to the MOM_domain_type being defined here. + integer, dimension(2), intent(in) :: n_global !< The number of points on the global grid in + !! the i- and j-directions + integer, dimension(2), intent(in) :: n_halo !< The number of halo points on each processor + logical, dimension(2), intent(in) :: reentrant !< If true the grid is periodic in the i- and j- directions + logical, intent(in) :: tripolar_N !< If true the grid uses northern tripolar connectivity + integer, dimension(2), intent(in) :: layout !< The layout of logical PEs in the i- and j-directions. + integer, dimension(2), optional, intent(in) :: io_layout !< The layout for parallel input and output. + character(len=*), optional, intent(in) :: domain_name !< A name for this domain, "MOM" if missing. + character(len=*), optional, intent(in) :: mask_table !< The full relative or absolute path to the mask table. + logical, optional, intent(in) :: symmetric !< If present, this specifies whether this domain + !! uses symmetric memory, or true if missing. + logical, optional, intent(in) :: thin_halos !< If present, this specifies whether to permit the use of + !! thin halo updates, or true if missing. + logical, optional, intent(in) :: nonblocking !< If present, this specifies whether to permit the use of + !! nonblocking halo updates, or false if missing. + + ! local variables + integer, dimension(4) :: global_indices ! The lower and upper global i- and j-index bounds + integer :: X_FLAGS ! A combination of integers encoding the x-direction grid connectivity. + integer :: Y_FLAGS ! A combination of integers encoding the y-direction grid connectivity. + integer :: xhalo_d2, yhalo_d2 + character(len=200) :: mesg ! A string for use in error messages + logical :: mask_table_exists ! Mask_table is present and the file it points to exists + + if (.not.associated(MOM_dom)) then + allocate(MOM_dom) + allocate(MOM_dom%mpp_domain) + allocate(MOM_dom%mpp_domain_d2) + endif + + MOM_dom%name = "MOM" ; if (present(domain_name)) MOM_dom%name = trim(domain_name) + + X_FLAGS = 0 ; Y_FLAGS = 0 + if (reentrant(1)) X_FLAGS = CYCLIC_GLOBAL_DOMAIN + if (reentrant(2)) Y_FLAGS = CYCLIC_GLOBAL_DOMAIN + if (tripolar_N) then + Y_FLAGS = FOLD_NORTH_EDGE + if (reentrant(2)) call MOM_error(FATAL,"MOM_domains: "// & + "TRIPOLAR_N and REENTRANT_Y may not be used together.") + endif + + MOM_dom%nonblocking_updates = nonblocking + MOM_dom%thin_halo_updates = thin_halos + MOM_dom%symmetric = .true. ; if (present(symmetric)) MOM_dom%symmetric = symmetric + MOM_dom%niglobal = n_global(1) ; MOM_dom%njglobal = n_global(2) + MOM_dom%nihalo = n_halo(1) ; MOM_dom%njhalo = n_halo(2) + + ! Save the extra data for creating other domains of different resolution that overlay this domain. + MOM_dom%X_FLAGS = X_FLAGS + MOM_dom%Y_FLAGS = Y_FLAGS + MOM_dom%layout(:) = layout(:) + + ! Set up the io_layout, with error handling. + MOM_dom%io_layout(:) = (/ 1, 1 /) + if (present(io_layout)) then + if (io_layout(1) == 0) then + MOM_dom%io_layout(1) = layout(1) + elseif (io_layout(1) > 1) then + MOM_dom%io_layout(1) = io_layout(1) + if (modulo(layout(1), io_layout(1)) /= 0) then + write(mesg,'("MOM_domains_init: The i-direction I/O-layout, IO_LAYOUT(1)=",i4, & + &", does not evenly divide the i-direction layout, NIPROC=,",i4,".")') io_layout(1), layout(1) + call MOM_error(FATAL, mesg) + endif + endif + + if (io_layout(2) == 0) then + MOM_dom%io_layout(2) = layout(2) + elseif (io_layout(2) > 1) then + MOM_dom%io_layout(2) = io_layout(2) + if (modulo(layout(2), io_layout(2)) /= 0) then + write(mesg,'("MOM_domains_init: The j-direction I/O-layout, IO_LAYOUT(2)=",i4, & + &", does not evenly divide the j-direction layout, NJPROC=,",i4,".")') io_layout(2), layout(2) + call MOM_error(FATAL, mesg) + endif + endif + endif + + if (present(mask_table)) then + mask_table_exists = file_exist(mask_table) + if (mask_table_exists) then + allocate(MOM_dom%maskmap(layout(1), layout(2))) + call parse_mask_table(mask_table, MOM_dom%maskmap, MOM_dom%name) + endif + else + mask_table_exists = .false. + endif + + ! Initialize as an unrotated domain + MOM_dom%turns = 0 + + call clone_MD_to_d2D(MOM_dom, MOM_dom%mpp_domain) + + !For downsampled domain, recommend a halo of 1 (or 0?) since we're not doing wide-stencil computations. + !But that does not work because the downsampled field would not have the correct size to pass the checks, e.g., we get + !error: downsample_diag_indices_get: peculiar size 28 in i-direction\ndoes not match one of 24 25 26 27 + ! call clone_MD_to_d2D(MOM_dom, MOM_dom%mpp_domain_d2, halo_size=(MOM_dom%nihalo/2), coarsen=2) + call clone_MD_to_d2D(MOM_dom, MOM_dom%mpp_domain_d2, coarsen=2) +end subroutine create_MOM_domain + +!> dealloc_MOM_domain deallocates memory associated with a pointer to a MOM_domain_type +!! and potentially all of its contents +subroutine deallocate_MOM_domain(MOM_domain, cursory) + type(MOM_domain_type), pointer :: MOM_domain !< A pointer to the MOM_domain_type being deallocated + logical, optional, intent(in) :: cursory !< If true do not deallocate fields associated + !! with the underlying infrastructure + logical :: invasive ! If true, deallocate fields associated with the underlying infrastructure + + invasive = .true. ; if (present(cursory)) invasive = .not.cursory + + if (associated(MOM_domain)) then + if (associated(MOM_domain%mpp_domain)) then + if (invasive) call mpp_deallocate_domain(MOM_domain%mpp_domain) + deallocate(MOM_domain%mpp_domain) + endif + if (associated(MOM_domain%mpp_domain_d2)) then + if (invasive) call mpp_deallocate_domain(MOM_domain%mpp_domain_d2) + deallocate(MOM_domain%mpp_domain_d2) + endif + if (associated(MOM_domain%maskmap)) deallocate(MOM_domain%maskmap) + deallocate(MOM_domain) + endif + +end subroutine deallocate_MOM_domain + +!> MOM_thread_affinity_set returns true if the number of openMP threads have been set to a value greater than 1. +function MOM_thread_affinity_set() + ! Local variables + !$ integer :: ocean_nthreads ! Number of openMP threads + !$ integer :: omp_get_num_threads ! An openMP function that returns the number of threads + logical :: MOM_thread_affinity_set + + MOM_thread_affinity_set = .false. + !$ call fms_affinity_init() + !$OMP PARALLEL + !$OMP MASTER + !$ ocean_nthreads = omp_get_num_threads() + !$OMP END MASTER + !$OMP END PARALLEL + !$ MOM_thread_affinity_set = (ocean_nthreads > 1 ) +end function MOM_thread_affinity_set + +!> set_MOM_thread_affinity sets the number of openMP threads to use with the ocean. +subroutine set_MOM_thread_affinity(ocean_nthreads, ocean_hyper_thread) + integer, intent(in) :: ocean_nthreads !< Number of openMP threads to use for the ocean model + logical, intent(in) :: ocean_hyper_thread !< If true, use hyper threading + + ! Local variables + !$ integer :: omp_get_thread_num, omp_get_num_threads !< These are the results of openMP functions + + !$ call fms_affinity_init() ! fms_affinity_init can be safely called more than once. + !$ call fms_affinity_set('OCEAN', ocean_hyper_thread, ocean_nthreads) + !$ call omp_set_num_threads(ocean_nthreads) + !$OMP PARALLEL + !$ write(6,*) "MOM_domains_mod OMPthreading ", fms_affinity_get(), omp_get_thread_num(), omp_get_num_threads() + !$ flush(6) + !$OMP END PARALLEL +end subroutine set_MOM_thread_affinity + +!> This subroutine retrieves the 1-d domains that make up the 2d-domain in a MOM_domain +subroutine get_domain_components_MD(MOM_dom, x_domain, y_domain) + type(MOM_domain_type), intent(in) :: MOM_dom !< The MOM_domain whose contents are being extracted + type(domain1D), optional, intent(inout) :: x_domain !< The 1-d logical x-domain + type(domain1D), optional, intent(inout) :: y_domain !< The 1-d logical y-domain + + call mpp_get_domain_components(MOM_dom%mpp_domain, x_domain, y_domain) +end subroutine get_domain_components_MD + +!> This subroutine retrieves the 1-d domains that make up a 2d-domain +subroutine get_domain_components_d2D(domain, x_domain, y_domain) + type(domain2D), intent(in) :: domain !< The 2D domain whose contents are being extracted + type(domain1D), optional, intent(inout) :: x_domain !< The 1-d logical x-domain + type(domain1D), optional, intent(inout) :: y_domain !< The 1-d logical y-domain + + call mpp_get_domain_components(domain, x_domain, y_domain) +end subroutine get_domain_components_d2D + +!> clone_MD_to_MD copies one MOM_domain_type into another, while allowing +!! some properties of the new type to differ from the original one. +subroutine clone_MD_to_MD(MD_in, MOM_dom, min_halo, halo_size, symmetric, domain_name, & + turns, refine, extra_halo) + type(MOM_domain_type), target, intent(in) :: MD_in !< An existing MOM_domain + type(MOM_domain_type), pointer :: MOM_dom + !< A pointer to a MOM_domain that will be + !! allocated if it is unassociated, and will have data + !! copied from MD_in + integer, dimension(2), & + optional, intent(inout) :: min_halo !< If present, this sets the + !! minimum halo size for this domain in the i- and j- + !! directions, and returns the actual halo size used. + integer, optional, intent(in) :: halo_size !< If present, this sets the halo + !! size for the domain in the i- and j-directions. + !! min_halo and halo_size can not both be present. + logical, optional, intent(in) :: symmetric !< If present, this specifies + !! whether the new domain is symmetric, regardless of + !! whether the macro SYMMETRIC_MEMORY_ is defined. + character(len=*), & + optional, intent(in) :: domain_name !< A name for the new domain, copied + !! from MD_in if missing. + integer, optional, intent(in) :: turns !< Number of quarter turns + integer, optional, intent(in) :: refine !< A factor by which to enhance the grid resolution. + integer, optional, intent(in) :: extra_halo !< An extra number of points in the halos + !! compared with MD_in + + integer :: global_indices(4) + logical :: mask_table_exists + integer, dimension(:), allocatable :: exni ! The extents of the grid for each i-row of the layout. + ! The sum of exni must equal MOM_dom%niglobal. + integer, dimension(:), allocatable :: exnj ! The extents of the grid for each j-row of the layout. + ! The sum of exni must equal MOM_dom%niglobal. + integer :: qturns ! The number of quarter turns, restricted to the range of 0 to 3. + integer :: i, j, nl1, nl2 + + qturns = 0 + if (present(turns)) qturns = modulo(turns, 4) + + if (.not.associated(MOM_dom)) then + allocate(MOM_dom) + allocate(MOM_dom%mpp_domain) + allocate(MOM_dom%mpp_domain_d2) + endif + +! Save the extra data for creating other domains of different resolution that overlay this domain + MOM_dom%symmetric = MD_in%symmetric + MOM_dom%nonblocking_updates = MD_in%nonblocking_updates + MOM_dom%thin_halo_updates = MD_in%thin_halo_updates + + if (modulo(qturns, 2) /= 0) then + MOM_dom%niglobal = MD_in%njglobal ; MOM_dom%njglobal = MD_in%niglobal + MOM_dom%nihalo = MD_in%njhalo ; MOM_dom%njhalo = MD_in%nihalo + call get_layout_extents(MD_in, exnj, exni) + + MOM_dom%X_FLAGS = MD_in%Y_FLAGS ; MOM_dom%Y_FLAGS = MD_in%X_FLAGS + MOM_dom%layout(:) = MD_in%layout(2:1:-1) + MOM_dom%io_layout(:) = MD_in%io_layout(2:1:-1) + else + MOM_dom%niglobal = MD_in%niglobal ; MOM_dom%njglobal = MD_in%njglobal + MOM_dom%nihalo = MD_in%nihalo ; MOM_dom%njhalo = MD_in%njhalo + call get_layout_extents(MD_in, exni, exnj) + + MOM_dom%X_FLAGS = MD_in%X_FLAGS ; MOM_dom%Y_FLAGS = MD_in%Y_FLAGS + MOM_dom%layout(:) = MD_in%layout(:) + MOM_dom%io_layout(:) = MD_in%io_layout(:) + endif + + ! Ensure that the points per processor are the same on the source and densitation grids. + select case (qturns) + case (1) ; call invert(exni) + case (2) ; call invert(exni) ; call invert(exnj) + case (3) ; call invert(exnj) + end select + + if (associated(MD_in%maskmap)) then + mask_table_exists = .true. + allocate(MOM_dom%maskmap(MOM_dom%layout(1), MOM_dom%layout(2))) + + nl1 = MOM_dom%layout(1) ; nl2 = MOM_dom%layout(2) + select case (qturns) + case (0) + do j=1,nl2 ; do i=1,nl1 + MOM_dom%maskmap(i,j) = MD_in%maskmap(i, j) + enddo ; enddo + case (1) + do j=1,nl2 ; do i=1,nl1 + MOM_dom%maskmap(i,j) = MD_in%maskmap(j, nl1+1-i) + enddo ; enddo + case (2) + do j=1,nl2 ; do i=1,nl1 + MOM_dom%maskmap(i,j) = MD_in%maskmap(nl1+1-i, nl2+1-j) + enddo ; enddo + case (3) + do j=1,nl2 ; do i=1,nl1 + MOM_dom%maskmap(i,j) = MD_in%maskmap(nl2+1-j, i) + enddo ; enddo + end select + else + mask_table_exists = .false. + endif + + ! Optionally enhance the grid resolution. + if (present(refine)) then ; if (refine > 1) then + MOM_dom%niglobal = refine*MOM_dom%niglobal ; MOM_dom%njglobal = refine*MOM_dom%njglobal + MOM_dom%nihalo = refine*MOM_dom%nihalo ; MOM_dom%njhalo = refine*MOM_dom%njhalo + do i=1,MOM_dom%layout(1) ; exni(i) = refine*exni(i) ; enddo + do j=1,MOM_dom%layout(2) ; exnj(j) = refine*exnj(j) ; enddo + endif ; endif + + ! Optionally enhance the grid resolution. + if (present(extra_halo)) then ; if (extra_halo > 0) then + MOM_dom%nihalo = MOM_dom%nihalo + extra_halo ; MOM_dom%njhalo = MOM_dom%njhalo + extra_halo + endif ; endif + + if (present(halo_size) .and. present(min_halo)) call MOM_error(FATAL, & + "clone_MOM_domain can not have both halo_size and min_halo present.") + + if (present(min_halo)) then + MOM_dom%nihalo = max(MOM_dom%nihalo, min_halo(1)) + min_halo(1) = MOM_dom%nihalo + MOM_dom%njhalo = max(MOM_dom%njhalo, min_halo(2)) + min_halo(2) = MOM_dom%njhalo + endif + + if (present(halo_size)) then + MOM_dom%nihalo = halo_size ; MOM_dom%njhalo = halo_size + endif + + if (present(symmetric)) then ; MOM_dom%symmetric = symmetric ; endif + + if (present(domain_name)) then + MOM_dom%name = trim(domain_name) + else + MOM_dom%name = MD_in%name + endif + + MOM_dom%turns = qturns + if (qturns /= 0) then + MOM_dom%domain_in => MD_in + endif + + call clone_MD_to_d2D(MOM_dom, MOM_dom%mpp_domain, xextent=exni, yextent=exnj) + call clone_MD_to_d2D(MOM_dom, MOM_dom%mpp_domain_d2, domain_name=MOM_dom%name, coarsen=2) + +end subroutine clone_MD_to_MD + + +!> clone_MD_to_d2D uses information from a MOM_domain_type to create a new +!! domain2d type, while allowing some properties of the new type to differ from +!! the original one. +subroutine clone_MD_to_d2D(MD_in, mpp_domain, min_halo, halo_size, symmetric, & + domain_name, turns, xextent, yextent, coarsen) + type(MOM_domain_type), intent(in) :: MD_in !< An existing MOM_domain to be cloned + type(domain2d), intent(inout) :: mpp_domain !< The new mpp_domain to be set up + integer, dimension(2), & + optional, intent(inout) :: min_halo !< If present, this sets the + !! minimum halo size for this domain in the i- and j- + !! directions, and returns the actual halo size used. + integer, optional, intent(in) :: halo_size !< If present, this sets the halo + !! size for the domain in the i- and j-directions. + !! min_halo and halo_size can not both be present. + logical, optional, intent(in) :: symmetric !< If present, this specifies + !! whether the new domain is symmetric, regardless of + !! whether the macro SYMMETRIC_MEMORY_ is defined or + !! whether MD_in is symmetric. + character(len=*), & + optional, intent(in) :: domain_name !< A name for the new domain, "MOM" + !! if missing. + integer, optional, intent(in) :: turns !< Number of quarter turns - not implemented here. + integer, optional, intent(in) :: coarsen !< A factor by which to coarsen this grid. + !! The default of 1 is for no coarsening. + integer, dimension(:), optional, intent(in) :: xextent !< The number of grid points in the + !! tracer computational domain for division of the x-layout. + integer, dimension(:), optional, intent(in) :: yextent !< The number of grid points in the + !! tracer computational domain for division of the y-layout. + + integer :: global_indices(4) + integer :: nihalo, njhalo + logical :: symmetric_dom, do_coarsen + character(len=64) :: dom_name + + if (present(turns)) & + call MOM_error(FATAL, "Rotation not supported for MOM_domain to domain2d") + + if (present(halo_size) .and. present(min_halo)) call MOM_error(FATAL, & + "clone_MOM_domain can not have both halo_size and min_halo present.") + + do_coarsen = .false. ; if (present(coarsen)) then ; do_coarsen = (coarsen > 1) ; endif + + nihalo = MD_in%nihalo ; njhalo = MD_in%njhalo + if (do_coarsen) then + nihalo = int(MD_in%nihalo / coarsen) ; njhalo = int(MD_in%njhalo / coarsen) + endif + + if (present(min_halo)) then + nihalo = max(nihalo, min_halo(1)) + njhalo = max(njhalo, min_halo(2)) + min_halo(1) = nihalo ; min_halo(2) = njhalo + endif + if (present(halo_size)) then + nihalo = halo_size ; njhalo = halo_size + endif + + symmetric_dom = MD_in%symmetric + if (present(symmetric)) then ; symmetric_dom = symmetric ; endif + + dom_name = MD_in%name + if (do_coarsen) dom_name = trim(MD_in%name)//"c" + if (present(domain_name)) dom_name = trim(domain_name) + + global_indices(1:4) = (/ 1, MD_in%niglobal, 1, MD_in%njglobal /) + if (do_coarsen) then + global_indices(1:4) = (/ 1, (MD_in%niglobal/coarsen), 1, (MD_in%njglobal/coarsen) /) + endif + + if (associated(MD_in%maskmap)) then + call mpp_define_domains( global_indices, MD_in%layout, mpp_domain, & + xflags=MD_in%X_FLAGS, yflags=MD_in%Y_FLAGS, xhalo=nihalo, yhalo=njhalo, & + xextent=xextent, yextent=yextent, symmetry=symmetric_dom, name=dom_name, & + maskmap=MD_in%maskmap ) + else + call mpp_define_domains( global_indices, MD_in%layout, mpp_domain, & + xflags=MD_in%X_FLAGS, yflags=MD_in%Y_FLAGS, xhalo=nihalo, yhalo=njhalo, & + symmetry=symmetric_dom, xextent=xextent, yextent=yextent, name=dom_name) + endif + + if ((MD_in%io_layout(1) + MD_in%io_layout(2) > 0) .and. & + (MD_in%layout(1)*MD_in%layout(2) > 1)) then + call mpp_define_io_domain(mpp_domain, MD_in%io_layout) + else + call mpp_define_io_domain(mpp_domain, (/ 1, 1 /) ) + endif + +end subroutine clone_MD_to_d2D + +!> Returns the index ranges that have been stored in a MOM_domain_type +subroutine get_domain_extent_MD(Domain, isc, iec, jsc, jec, isd, ied, jsd, jed, & + isg, ieg, jsg, jeg, idg_offset, jdg_offset, & + symmetric, local_indexing, index_offset, coarsen) + type(MOM_domain_type), & + intent(in) :: Domain !< The MOM domain from which to extract information + integer, intent(out) :: isc !< The start i-index of the computational domain + integer, intent(out) :: iec !< The end i-index of the computational domain + integer, intent(out) :: jsc !< The start j-index of the computational domain + integer, intent(out) :: jec !< The end j-index of the computational domain + integer, intent(out) :: isd !< The start i-index of the data domain + integer, intent(out) :: ied !< The end i-index of the data domain + integer, intent(out) :: jsd !< The start j-index of the data domain + integer, intent(out) :: jed !< The end j-index of the data domain + integer, optional, intent(out) :: isg !< The start i-index of the global domain + integer, optional, intent(out) :: ieg !< The end i-index of the global domain + integer, optional, intent(out) :: jsg !< The start j-index of the global domain + integer, optional, intent(out) :: jeg !< The end j-index of the global domain + integer, optional, intent(out) :: idg_offset !< The offset between the corresponding global and + !! data i-index spaces. + integer, optional, intent(out) :: jdg_offset !< The offset between the corresponding global and + !! data j-index spaces. + logical, optional, intent(out) :: symmetric !< True if symmetric memory is used. + logical, optional, intent(in) :: local_indexing !< If true, local tracer array indices start at 1, + !! as in most MOM6 code. The default is true. + integer, optional, intent(in) :: index_offset !< A fixed additional offset to all indices. This + !! can be useful for some types of debugging with + !! dynamic memory allocation. The default is 0. + integer, optional, intent(in) :: coarsen !< A factor by which the grid is coarsened. + !! The default is 1, for no coarsening. + + ! Local variables + integer :: isg_, ieg_, jsg_, jeg_ + integer :: ind_off, idg_off, jdg_off, coarsen_lev + logical :: local + + local = .true. ; if (present(local_indexing)) local = local_indexing + ind_off = 0 ; if (present(index_offset)) ind_off = index_offset + + coarsen_lev = 1 ; if (present(coarsen)) coarsen_lev = coarsen + + if (coarsen_lev == 1) then + call mpp_get_compute_domain(Domain%mpp_domain, isc, iec, jsc, jec) + call mpp_get_data_domain(Domain%mpp_domain, isd, ied, jsd, jed) + call mpp_get_global_domain(Domain%mpp_domain, isg_, ieg_, jsg_, jeg_) + elseif (coarsen_lev == 2) then + if (.not.associated(Domain%mpp_domain_d2)) call MOM_error(FATAL, & + "get_domain_extent called with coarsen=2, but Domain%mpp_domain_d2 is not associated.") + call mpp_get_compute_domain(Domain%mpp_domain_d2, isc, iec, jsc, jec) + call mpp_get_data_domain(Domain%mpp_domain_d2, isd, ied, jsd, jed) + call mpp_get_global_domain(Domain%mpp_domain_d2, isg_, ieg_, jsg_, jeg_) + else + call MOM_error(FATAL, "get_domain_extent called with an unsupported level of coarsening.") + endif + + if (local) then + ! This code institutes the MOM convention that local array indices start at 1. + idg_off = isd - 1 ; jdg_off = jsd - 1 + isc = isc - isd + 1 ; iec = iec - isd + 1 ; jsc = jsc - jsd + 1 ; jec = jec - jsd + 1 + ied = ied - isd + 1 ; jed = jed - jsd + 1 + isd = 1 ; jsd = 1 + else + idg_off = 0 ; jdg_off = 0 + endif + if (ind_off /= 0) then + idg_off = idg_off + ind_off ; jdg_off = jdg_off + ind_off + isc = isc + ind_off ; iec = iec + ind_off + jsc = jsc + ind_off ; jec = jec + ind_off + isd = isd + ind_off ; ied = ied + ind_off + jsd = jsd + ind_off ; jed = jed + ind_off + endif + if (present(isg)) isg = isg_ + if (present(ieg)) ieg = ieg_ + if (present(jsg)) jsg = jsg_ + if (present(jeg)) jeg = jeg_ + if (present(idg_offset)) idg_offset = idg_off + if (present(jdg_offset)) jdg_offset = jdg_off + if (present(symmetric)) symmetric = Domain%symmetric + +end subroutine get_domain_extent_MD + +!> Returns the index ranges that have been stored in a domain2D type +subroutine get_domain_extent_d2D(Domain, isc, iec, jsc, jec, isd, ied, jsd, jed) + type(domain2d), intent(in) :: Domain !< The MOM domain from which to extract information + integer, intent(out) :: isc !< The start i-index of the computational domain + integer, intent(out) :: iec !< The end i-index of the computational domain + integer, intent(out) :: jsc !< The start j-index of the computational domain + integer, intent(out) :: jec !< The end j-index of the computational domain + integer, optional, intent(out) :: isd !< The start i-index of the data domain + integer, optional, intent(out) :: ied !< The end i-index of the data domain + integer, optional, intent(out) :: jsd !< The start j-index of the data domain + integer, optional, intent(out) :: jed !< The end j-index of the data domain + + ! Local variables + integer :: isd_, ied_, jsd_, jed_, jsg_, jeg_, isg_, ieg_ + + call mpp_get_compute_domain(Domain, isc, iec, jsc, jec) + call mpp_get_data_domain(Domain, isd_, ied_, jsd_, jed_) + + if (present(isd)) isd = isd_ + if (present(ied)) ied = ied_ + if (present(jsd)) jsd = jsd_ + if (present(jed)) jed = jed_ + +end subroutine get_domain_extent_d2D + +!> Return the (potentially symmetric) computational domain i-bounds for an array +!! passed without index specifications (i.e. indices start at 1) based on an array size. +subroutine get_simple_array_i_ind(domain, size, is, ie, symmetric) + type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information + integer, intent(in) :: size !< The i-array size + integer, intent(out) :: is !< The computational domain starting i-index. + integer, intent(out) :: ie !< The computational domain ending i-index. + logical, optional, intent(in) :: symmetric !< If present, indicates whether symmetric sizes + !! can be considered. + ! Local variables + logical :: sym + character(len=120) :: mesg, mesg2 + integer :: isc, iec, jsc, jec, isd, ied, jsd, jed + + call mpp_get_compute_domain(Domain%mpp_domain, isc, iec, jsc, jec) + call mpp_get_data_domain(Domain%mpp_domain, isd, ied, jsd, jed) + + isc = isc-isd+1 ; iec = iec-isd+1 ; ied = ied-isd+1 ; isd = 1 + sym = Domain%symmetric ; if (present(symmetric)) sym = symmetric + + if (size == ied) then ; is = isc ; ie = iec + elseif (size == 1+iec-isc) then ; is = 1 ; ie = size + elseif (sym .and. (size == 1+ied)) then ; is = isc ; ie = iec+1 + elseif (sym .and. (size == 2+iec-isc)) then ; is = 1 ; ie = size+1 + else + write(mesg,'("Unrecognized size ", i6, "in call to get_simple_array_i_ind. \")') size + if (sym) then + write(mesg2,'("Valid sizes are : ", 2i7)') ied, 1+iec-isc + else + write(mesg2,'("Valid sizes are : ", 4i7)') ied, 1+iec-isc, 1+ied, 2+iec-isc + endif + call MOM_error(FATAL, trim(mesg)//trim(mesg2)) + endif + +end subroutine get_simple_array_i_ind + + +!> Return the (potentially symmetric) computational domain j-bounds for an array +!! passed without index specifications (i.e. indices start at 1) based on an array size. +subroutine get_simple_array_j_ind(domain, size, js, je, symmetric) + type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information + integer, intent(in) :: size !< The j-array size + integer, intent(out) :: js !< The computational domain starting j-index. + integer, intent(out) :: je !< The computational domain ending j-index. + logical, optional, intent(in) :: symmetric !< If present, indicates whether symmetric sizes + !! can be considered. + ! Local variables + logical :: sym + character(len=120) :: mesg, mesg2 + integer :: isc, iec, jsc, jec, isd, ied, jsd, jed + + call mpp_get_compute_domain(Domain%mpp_domain, isc, iec, jsc, jec) + call mpp_get_data_domain(Domain%mpp_domain, isd, ied, jsd, jed) + + jsc = jsc-jsd+1 ; jec = jec-jsd+1 ; jed = jed-jsd+1 ; jsd = 1 + sym = Domain%symmetric ; if (present(symmetric)) sym = symmetric + + if (size == jed) then ; js = jsc ; je = jec + elseif (size == 1+jec-jsc) then ; js = 1 ; je = size + elseif (sym .and. (size == 1+jed)) then ; js = jsc ; je = jec+1 + elseif (sym .and. (size == 2+jec-jsc)) then ; js = 1 ; je = size+1 + else + write(mesg,'("Unrecognized size ", i6, "in call to get_simple_array_j_ind. \")') size + if (sym) then + write(mesg2,'("Valid sizes are : ", 2i7)') jed, 1+jec-jsc + else + write(mesg2,'("Valid sizes are : ", 4i7)') jed, 1+jec-jsc, 1+jed, 2+jec-jsc + endif + call MOM_error(FATAL, trim(mesg)//trim(mesg2)) + endif + +end subroutine get_simple_array_j_ind + +!> Invert the contents of a 1-d array +subroutine invert(array) + integer, dimension(:), intent(inout) :: array !< The 1-d array to invert + integer :: i, ni, swap + ni = size(array) + do i=1,ni + swap = array(i) + array(i) = array(ni+1-i) + array(ni+1-i) = swap + enddo +end subroutine invert + +!> Returns the global shape of h-point arrays +subroutine get_global_shape(domain, niglobal, njglobal) + type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information + integer, intent(out) :: niglobal !< i-index global size of h-point arrays + integer, intent(out) :: njglobal !< j-index global size of h-point arrays + + niglobal = domain%niglobal + njglobal = domain%njglobal +end subroutine get_global_shape + +!> Get the array ranges in one dimension for the divisions of a global index space +subroutine compute_block_extent(isg, ieg, ndivs, ibegin, iend) + integer, intent(in) :: isg !< The starting index of the global index space + integer, intent(in) :: ieg !< The ending index of the global index space + integer, intent(in) :: ndivs !< The number of divisions + integer, dimension(:), intent(out) :: ibegin !< The starting index of each division + integer, dimension(:), intent(out) :: iend !< The ending index of each division + + call mpp_compute_block_extent(isg, ieg, ndivs, ibegin, iend) +end subroutine compute_block_extent + +!> Broadcast a 2-d domain from the root PE to the other PEs +subroutine broadcast_domain(domain) + type(domain2d), intent(inout) :: domain !< The domain2d type that will be shared across PEs. + + call mpp_broadcast_domain(domain) +end subroutine broadcast_domain + +!> Broadcast an entire 2-d array from the root processor to all others. +subroutine global_field(domain, local, global) + type(domain2d), intent(inout) :: domain !< The domain2d type that describes the decomposition + real, dimension(:,:), intent(in) :: local !< The portion of the array on the local PE + real, dimension(:,:), intent(out) :: global !< The whole global array + + call mpp_global_field(domain, local, global) +end subroutine global_field + +!> same_domain returns true if two domains use the same list of PEs and layouts and have the same +!! size computational domains, and false if the domains do not conform with each other. +!! Different halo sizes or indexing conventions do not alter the results. +logical function same_domain(domain_a, domain_b) + type(domain2D), intent(in) :: domain_a !< The first domain in the comparison + type(domain2D), intent(in) :: domain_b !< The second domain in the comparison + + ! Local variables + integer :: isc_a, iec_a, jsc_a, jec_a, isc_b, iec_b, jsc_b, jec_b + integer :: layout_a(2), layout_b(2) + + ! This routine currently does a few checks for consistent domains; more could be added. + call mpp_get_layout(domain_a, layout_a) + call mpp_get_layout(domain_b, layout_b) + + call get_domain_extent(domain_a, isc_a, iec_a, jsc_a, jec_a) + call get_domain_extent(domain_b, isc_b, iec_b, jsc_b, jec_b) + + same_domain = (layout_a(1) == layout_b(1)) .and. (layout_a(2) == layout_b(2)) .and. & + (iec_a - isc_a == iec_b - isc_b) .and. (jec_a - jsc_a == jec_b - jsc_b) + +end function same_domain + +!> Returns arrays of the i- and j- sizes of the h-point computational domains for each +!! element of the grid layout. Any input values in the extent arrays are discarded, so +!! they are effectively intent out despite their declared intent of inout. +subroutine get_layout_extents(Domain, extent_i, extent_j) + type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information + integer, dimension(:), allocatable, intent(inout) :: extent_i !< The number of points in the + !! i-direction in each i-row of the layout + integer, dimension(:), allocatable, intent(inout) :: extent_j !< The number of points in the + !! j-direction in each j-row of the layout + + if (allocated(extent_i)) deallocate(extent_i) + if (allocated(extent_j)) deallocate(extent_j) + allocate(extent_i(domain%layout(1))) ; extent_i(:) = 0 + allocate(extent_j(domain%layout(2))) ; extent_j(:) = 0 + call mpp_get_domain_extents(domain%mpp_domain, extent_i, extent_j) +end subroutine get_layout_extents + +end module MOM_domain_infra diff --git a/config_src/infra/FMS2/MOM_ensemble_manager_infra.F90 b/config_src/infra/FMS2/MOM_ensemble_manager_infra.F90 new file mode 100644 index 0000000000..66bbb86e2f --- /dev/null +++ b/config_src/infra/FMS2/MOM_ensemble_manager_infra.F90 @@ -0,0 +1,95 @@ +!> A simple (very thin) wrapper for managing ensemble member layout information +module MOM_ensemble_manager_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use ensemble_manager_mod, only : FMS_ensemble_manager_init => ensemble_manager_init +use ensemble_manager_mod, only : FMS_ensemble_pelist_setup => ensemble_pelist_setup +use ensemble_manager_mod, only : FMS_get_ensemble_id => get_ensemble_id +use ensemble_manager_mod, only : FMS_get_ensemble_size => get_ensemble_size +use ensemble_manager_mod, only : FMS_get_ensemble_pelist => get_ensemble_pelist +use ensemble_manager_mod, only : FMS_get_ensemble_filter_pelist => get_ensemble_filter_pelist + +implicit none ; private + +public :: ensemble_manager_init, ensemble_pelist_setup +public :: get_ensemble_id, get_ensemble_size +public :: get_ensemble_pelist, get_ensemble_filter_pelist + +contains + +!> Initializes the ensemble manager which divides available resources +!! in order to concurrently execute an ensemble of model realizations. +subroutine ensemble_manager_init() + + call FMS_ensemble_manager_init() + +end subroutine ensemble_manager_init + +!> Create a list of processing elements (PEs) across components +!! associated with the current ensemble member. +subroutine ensemble_pelist_setup(concurrent, atmos_npes, ocean_npes, land_npes, ice_npes, & + Atm_pelist, Ocean_pelist, Land_pelist, Ice_pelist) + logical, intent(in) :: concurrent !< A logical flag, if True, then ocean fast + !! PEs are run concurrently with + !! slow PEs within the coupler. + integer, intent(in) :: atmos_npes !< The number of atmospheric (fast) PEs + integer, intent(in) :: ocean_npes !< The number of ocean (slow) PEs + integer, intent(in) :: land_npes !< The number of land PEs (fast) + integer, intent(in) :: ice_npes !< The number of ice (fast) PEs + integer, dimension(:), intent(inout) :: Atm_pelist !< A list of Atm PEs + integer, dimension(:), intent(inout) :: Ocean_pelist !< A list of Ocean PEs + integer, dimension(:), intent(inout) :: Land_pelist !< A list of Land PEs + integer, dimension(:), intent(inout) :: Ice_pelist !< A list of Ice PEs + + + call FMS_ensemble_pelist_setup(concurrent, atmos_npes, ocean_npes, land_npes, ice_npes, & + Atm_pelist, Ocean_pelist, Land_pelist, Ice_pelist) + +end subroutine ensemble_pelist_setup + +!> Returns the numeric id for the current ensemble member +function get_ensemble_id() + integer :: get_ensemble_id + + get_ensemble_id = FMS_get_ensemble_id() + +end function get_ensemble_id + +!> Returns ensemble information as follows, +!! index (1) :: ensemble size +!! index (2) :: Number of PEs per ensemble member +!! index (3) :: Number of ocean PEs per ensemble member +!! index (4) :: Number of atmos PEs per ensemble member +!! index (5) :: Number of land PEs per ensemble member +!! index (6) :: Number of ice PEs per ensemble member +function get_ensemble_size() + integer, dimension(6) :: get_ensemble_size + + get_ensemble_size = FMS_get_ensemble_size() + +end function get_ensemble_size + +!> Returns the list of PEs associated with all ensemble members +!! Results are stored in the argument array which must be large +!! enough to contain the list. If the optional name argument is present, +!! the returned processor list are for a particular component (atmos, ocean ,land, ice) +subroutine get_ensemble_pelist(pelist, name) + integer, intent(inout) :: pelist(:,:) !< A processor list for all ensemble members + character(len=*), optional, intent(in) :: name !< An optional component name (atmos, ocean, land, ice) + + call FMS_get_ensemble_pelist(pelist, name) + +end subroutine get_ensemble_pelist + +!> Returns the list of PEs associated with the named ensemble filter application. +!! Valid component names include ('atmos', 'ocean', 'land', and 'ice') +subroutine get_ensemble_filter_pelist(pelist, name) + integer, intent(inout) :: pelist(:) !< A processor list for the ensemble filter + character(len=*), intent(in) :: name !< The component name (atmos, ocean, land, ice) + + call FMS_get_Ensemble_filter_pelist(pelist, name) + +end subroutine get_ensemble_filter_pelist + +end module MOM_ensemble_manager_infra diff --git a/config_src/infra/FMS2/MOM_error_infra.F90 b/config_src/infra/FMS2/MOM_error_infra.F90 new file mode 100644 index 0000000000..e5a8b8dc68 --- /dev/null +++ b/config_src/infra/FMS2/MOM_error_infra.F90 @@ -0,0 +1,42 @@ +!> Routines for error handling and I/O management +module MOM_error_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use mpp_mod, only : mpp_error, mpp_pe, mpp_root_pe, mpp_stdlog=>stdlog, mpp_stdout=>stdout +use mpp_mod, only : NOTE, WARNING, FATAL + +implicit none ; private + +public :: MOM_err, is_root_pe, stdlog, stdout +!> Integer parameters encoding the severity of an error message +public :: NOTE, WARNING, FATAL + +contains + +!> MOM_err writes an error message, and may cause the run to stop depending on the +!! severity of the error. +subroutine MOM_err(severity, message) + integer, intent(in) :: severity !< The severity level of this error + character(len=*), intent(in) :: message !< A message to write out + + call mpp_error(severity, message) +end subroutine MOM_err + +!> stdout returns the standard Fortran unit number for output +integer function stdout() + stdout = mpp_stdout() +end function stdout + +!> stdlog returns the standard Fortran unit number to use to log messages +integer function stdlog() + stdlog = mpp_stdlog() +end function stdlog + +!> is_root_pe returns .true. if the current PE is the root PE. +logical function is_root_pe() + is_root_pe = .false. + if (mpp_pe() == mpp_root_pe()) is_root_pe = .true. +end function is_root_pe + +end module MOM_error_infra diff --git a/config_src/infra/FMS2/MOM_interp_infra.F90 b/config_src/infra/FMS2/MOM_interp_infra.F90 new file mode 100644 index 0000000000..b02beca313 --- /dev/null +++ b/config_src/infra/FMS2/MOM_interp_infra.F90 @@ -0,0 +1,269 @@ +!> This module wraps the FMS temporal and spatial interpolation routines +module MOM_interp_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use MOM_domain_infra, only : MOM_domain_type, domain2d +use MOM_time_manager, only : time_type +use horiz_interp_mod, only : horiz_interp_new, horiz_interp, horiz_interp_init, horiz_interp_type +use mpp_io_mod, only : axistype, mpp_get_axis_data +use time_interp_external_mod, only : time_interp_external +use time_interp_external_mod, only : init_external_field, time_interp_external_init +use time_interp_external_mod, only : get_external_field_size +use time_interp_external_mod, only : get_external_field_axes, get_external_field_missing + +implicit none ; private + +public :: horiz_interp_type, horiz_interp_init +public :: time_interp_extern, init_extern_field, time_interp_external_init +public :: get_external_field_info, axistype, get_axis_data +public :: run_horiz_interp, build_horiz_interp_weights + +!> Read a field based on model time, and rotate to the model domain. +interface time_interp_extern + module procedure time_interp_extern_0d + module procedure time_interp_extern_2d + module procedure time_interp_extern_3d +end interface time_interp_extern + +!> perform horizontal interpolation of field +interface run_horiz_interp + module procedure horiz_interp_from_weights_field2d + module procedure horiz_interp_from_weights_field3d +end interface + +!> build weights for horizontal interpolation of field +interface build_horiz_interp_weights + module procedure build_horiz_interp_weights_2d_to_2d +end interface build_horiz_interp_weights + +contains + +!> perform horizontal interpolation of a 2d field using pre-computed weights +!! source and destination coordinates are 2d +subroutine horiz_interp_from_weights_field2d(Interp, data_in, data_out, verbose, mask_in, mask_out, & + missing_value, missing_permit, err_msg) + + type(horiz_interp_type), intent(in) :: Interp !< type containing interpolation options and weights + real, dimension(:,:), intent(in) :: data_in !< input data + real, dimension(:,:), intent(out) :: data_out !< output data + integer, optional, intent(in) :: verbose !< verbosity level + real, dimension(:,:), optional, intent(in) :: mask_in !< mask for input data + real, dimension(:,:), optional, intent(out) :: mask_out !< mask for output data + real, optional, intent(in) :: missing_value !< A value indicating missing data + integer, optional, intent(in) :: missing_permit !< number of allowed points with + !! missing value for interpolation (0-3) + character(len=*), optional, intent(out) :: err_msg !< error message + + call horiz_interp(Interp, data_in, data_out, verbose, & + mask_in, mask_out, missing_value, missing_permit, & + err_msg, new_missing_handle=.true. ) + +end subroutine horiz_interp_from_weights_field2d + + +!> perform horizontal interpolation of a 3d field using pre-computed weights +!! source and destination coordinates are 2d +subroutine horiz_interp_from_weights_field3d(Interp, data_in, data_out, verbose, mask_in, mask_out, & + missing_value, missing_permit, err_msg) + + type(horiz_interp_type), intent(in) :: Interp !< type containing interpolation options and weights + real, dimension(:,:,:), intent(in) :: data_in !< input data + real, dimension(:,:,:), intent(out) :: data_out !< output data + integer, optional, intent(in) :: verbose !< verbosity level + real, dimension(:,:,:), optional, intent(in) :: mask_in !< mask for input data + real, dimension(:,:,:), optional, intent(out) :: mask_out !< mask for output data + real, optional, intent(in) :: missing_value !< A value indicating missing data + integer, optional, intent(in) :: missing_permit !< number of allowed points with + !! missing value for interpolation (0-3) + character(len=*), optional, intent(out) :: err_msg !< error message + + call horiz_interp(Interp, data_in, data_out, verbose, mask_in, mask_out, & + missing_value, missing_permit, err_msg) + +end subroutine horiz_interp_from_weights_field3d + + +!> build horizontal interpolation weights from source grid defined by 2d lon/lat to destination grid +!! defined by 2d lon/lat +subroutine build_horiz_interp_weights_2d_to_2d(Interp, lon_in, lat_in, lon_out, lat_out, & + verbose, interp_method, num_nbrs, max_dist, & + src_modulo, mask_in, mask_out, & + is_latlon_in, is_latlon_out) + + type(horiz_interp_type), intent(inout) :: Interp !< type containing interpolation options and weights + real, dimension(:,:), intent(in) :: lon_in !< input longitude 2d + real, dimension(:,:), intent(in) :: lat_in !< input latitude 2d + real, dimension(:,:), intent(in) :: lon_out !< output longitude 2d + real, dimension(:,:), intent(in) :: lat_out !< output latitude 2d + integer, optional, intent(in) :: verbose !< verbosity level + character(len=*), optional, intent(in) :: interp_method !< interpolation method + integer, optional, intent(in) :: num_nbrs !< number of nearest neighbors + real, optional, intent(in) :: max_dist !< maximum region of influence + logical, optional, intent(in) :: src_modulo !< periodicity of E-W boundary + real, dimension(:,:), optional, intent(in) :: mask_in !< mask for input data + real, dimension(:,:), optional, intent(inout) :: mask_out !< mask for output data + logical, optional, intent(in) :: is_latlon_in !< input grid is regular lat/lon grid + logical, optional, intent(in) :: is_latlon_out !< output grid is regular lat/lon grid + + call horiz_interp_new(Interp, lon_in, lat_in, lon_out, lat_out, & + verbose, interp_method, num_nbrs, max_dist, & + src_modulo, mask_in, mask_out, & + is_latlon_in, is_latlon_out) + +end subroutine build_horiz_interp_weights_2d_to_2d + + +!> Extracts and returns the axis data stored in an axistype. +subroutine get_axis_data( axis, dat ) + type(axistype), intent(in) :: axis !< An axis type + real, dimension(:), intent(out) :: dat !< The data in the axis variable + + call mpp_get_axis_data( axis, dat ) +end subroutine get_axis_data + + +!> get size of an external field from field index +function get_extern_field_size(index) + + integer, intent(in) :: index !< field index + integer :: get_extern_field_size(4) !< field size + + get_extern_field_size = get_external_field_size(index) + +end function get_extern_field_size + + +!> get axes of an external field from field index +function get_extern_field_axes(index) + + integer, intent(in) :: index !< field index + type(axistype), dimension(4) :: get_extern_field_axes !< field axes + + get_extern_field_axes = get_external_field_axes(index) + +end function get_extern_field_axes + + +!> get missing value of an external field from field index +function get_extern_field_missing(index) + + integer, intent(in) :: index !< field index + real :: get_extern_field_missing !< field missing value + + get_extern_field_missing = get_external_field_missing(index) + +end function get_extern_field_missing + + +!> Get information about the external fields. +subroutine get_external_field_info(field_id, size, axes, missing) + integer, intent(in) :: field_id !< The integer index of the external + !! field returned from a previous + !! call to init_external_field() + integer, dimension(4), optional, intent(inout) :: size !< Dimension sizes for the input data + type(axistype), dimension(4), optional, intent(inout) :: axes !< Axis types for the input data + real, optional, intent(inout) :: missing !< Missing value for the input data + + if (present(size)) then + size(1:4) = get_extern_field_size(field_id) + endif + + if (present(axes)) then + axes(1:4) = get_extern_field_axes(field_id) + endif + + if (present(missing)) then + missing = get_extern_field_missing(field_id) + endif + +end subroutine get_external_field_info + + +!> Read a scalar field based on model time. +subroutine time_interp_extern_0d(field_id, time, data_in, verbose) + integer, intent(in) :: field_id !< The integer index of the external field returned + !! from a previous call to init_external_field() + type(time_type), intent(in) :: time !< The target time for the data + real, intent(inout) :: data_in !< The interpolated value + logical, optional, intent(in) :: verbose !< If true, write verbose output for debugging + + call time_interp_external(field_id, time, data_in, verbose=verbose) +end subroutine time_interp_extern_0d + + +!> Read a 2d field from an external based on model time, potentially including horizontal +!! interpolation and rotation of the data +subroutine time_interp_extern_2d(field_id, time, data_in, interp, verbose, horz_interp, mask_out) + integer, intent(in) :: field_id !< The integer index of the external field returned + !! from a previous call to init_external_field() + type(time_type), intent(in) :: time !< The target time for the data + real, dimension(:,:), intent(inout) :: data_in !< The array in which to store the interpolated values + integer, optional, intent(in) :: interp !< A flag indicating the temporal interpolation method + logical, optional, intent(in) :: verbose !< If true, write verbose output for debugging + type(horiz_interp_type), & + optional, intent(in) :: horz_interp !< A structure to control horizontal interpolation + logical, dimension(:,:), & + optional, intent(out) :: mask_out !< An array that is true where there is valid data + + call time_interp_external(field_id, time, data_in, interp=interp, verbose=verbose, & + horz_interp=horz_interp, mask_out=mask_out) +end subroutine time_interp_extern_2d + + +!> Read a 3d field based on model time, and rotate to the model grid +subroutine time_interp_extern_3d(field_id, time, data_in, interp, verbose, horz_interp, mask_out) + integer, intent(in) :: field_id !< The integer index of the external field returned + !! from a previous call to init_external_field() + type(time_type), intent(in) :: time !< The target time for the data + real, dimension(:,:,:), intent(inout) :: data_in !< The array in which to store the interpolated values + integer, optional, intent(in) :: interp !< A flag indicating the temporal interpolation method + logical, optional, intent(in) :: verbose !< If true, write verbose output for debugging + type(horiz_interp_type), & + optional, intent(in) :: horz_interp !< A structure to control horizontal interpolation + logical, dimension(:,:,:), & + optional, intent(out) :: mask_out !< An array that is true where there is valid data + + call time_interp_external(field_id, time, data_in, interp=interp, verbose=verbose, & + horz_interp=horz_interp, mask_out=mask_out) +end subroutine time_interp_extern_3d + + +!> initialize an external field +integer function init_extern_field(file, fieldname, MOM_domain, domain, verbose, & + threading, ierr, ignore_axis_atts, correct_leap_year_inconsistency ) + + character(len=*), intent(in) :: file !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The name of the field in the file + integer, optional, intent(in) :: threading !< A flag specifying whether the root PE reads + !! the data and broadcasts it (SINGLE_FILE) or all + !! processors read (MULTIPLE, the default). + logical, optional, intent(in) :: verbose !< If true, write verbose output for debugging + type(domain2d), optional, intent(in) :: domain !< A domain2d type that describes the decomposition + type(MOM_domain_type), & + optional, intent(in) :: MOM_Domain !< A MOM_Domain that describes the decomposition + integer, optional, intent(out) :: ierr !< Returns a non-zero error code in case of failure + logical, optional, intent(in) :: ignore_axis_atts !< If present and true, do not issue a + !! fatal error if the axis Cartesian attribute is + !! not set to a recognized value. + logical, optional, intent(in) :: correct_leap_year_inconsistency !< If present and true, + !! then if, (1) a calendar containing leap years + !! is in use, and (2) the modulo time period of the + !! data is an integer number of years, then map + !! a model date of Feb 29. onto a common year on Feb. 28. + + + + if (present(MOM_Domain)) then + init_extern_field = init_external_field(file, fieldname, domain=MOM_domain%mpp_domain, & + verbose=verbose, threading=threading, ierr=ierr, ignore_axis_atts=ignore_axis_atts, & + correct_leap_year_inconsistency=correct_leap_year_inconsistency) + else + init_extern_field = init_external_field(file, fieldname, domain=domain, & + verbose=verbose, threading=threading, ierr=ierr, ignore_axis_atts=ignore_axis_atts, & + correct_leap_year_inconsistency=correct_leap_year_inconsistency) + endif + +end function init_extern_field + +end module MOM_interp_infra diff --git a/config_src/infra/FMS2/MOM_io_infra.F90 b/config_src/infra/FMS2/MOM_io_infra.F90 new file mode 100644 index 0000000000..62a43ab99b --- /dev/null +++ b/config_src/infra/FMS2/MOM_io_infra.F90 @@ -0,0 +1,1954 @@ +!> This module contains a thin inteface to mpp and fms I/O code +module MOM_io_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use MOM_domain_infra, only : MOM_domain_type, rescale_comp_data, AGRID, BGRID_NE, CGRID_NE +use MOM_domain_infra, only : domain2d, domain1d, CENTER, CORNER, NORTH_FACE, EAST_FACE +use MOM_error_infra, only : MOM_error=>MOM_err, NOTE, FATAL, WARNING, is_root_PE +use MOM_string_functions, only : lowercase + +use fms2_io_mod, only : fms2_open_file => open_file, check_if_open, fms2_close_file => close_file +use fms2_io_mod, only : FmsNetcdfDomainFile_t, FmsNetcdfFile_t, fms2_read_data => read_data +use fms2_io_mod, only : get_unlimited_dimension_name, get_num_dimensions, get_num_variables +use fms2_io_mod, only : get_variable_names, variable_exists, get_variable_size, get_variable_units +use fms2_io_mod, only : register_field, write_data, register_variable_attribute, register_global_attribute +use fms2_io_mod, only : variable_att_exists, get_variable_attribute, get_variable_num_dimensions +use fms2_io_mod, only : get_variable_dimension_names, is_dimension_registered, get_dimension_size +use fms2_io_mod, only : is_dimension_unlimited, register_axis, unlimited +use fms2_io_mod, only : get_global_io_domain_indices +use fms_io_utils_mod, only : fms2_file_exist => file_exists + +use fms_mod, only : write_version_number, open_namelist_file, check_nml_error +use fms_io_mod, only : file_exist, field_exist, field_size, read_data +use fms_io_mod, only : fms_io_exit, get_filename_appendix +use mpp_domains_mod, only : mpp_get_compute_domain, mpp_get_global_domain +use mpp_io_mod, only : mpp_open, mpp_close, mpp_flush +use mpp_io_mod, only : mpp_write_meta, mpp_write +use mpp_io_mod, only : mpp_get_atts, mpp_attribute_exist +use mpp_io_mod, only : mpp_get_axes, mpp_axistype=>axistype, mpp_get_axis_data +use mpp_io_mod, only : mpp_get_fields, mpp_fieldtype=>fieldtype +use mpp_io_mod, only : mpp_get_info, mpp_get_times +use mpp_io_mod, only : mpp_io_init +use mpp_mod, only : stdout_if_root=>stdout +! These are encoding constants. +use mpp_io_mod, only : APPEND_FILE=>MPP_APPEND, WRITEONLY_FILE=>MPP_WRONLY +use mpp_io_mod, only : OVERWRITE_FILE=>MPP_OVERWR, READONLY_FILE=>MPP_RDONLY +use mpp_io_mod, only : NETCDF_FILE=>MPP_NETCDF, ASCII_FILE=>MPP_ASCII +use mpp_io_mod, only : MULTIPLE=>MPP_MULTI, SINGLE_FILE=>MPP_SINGLE +use iso_fortran_env, only : int64 + +implicit none ; private + +! These interfaces are actually implemented or have explicit interfaces in this file. +public :: open_file, open_ASCII_file, file_is_open, close_file, flush_file, file_exists +public :: get_file_info, get_file_fields, get_file_times, get_filename_suffix +public :: read_field, read_vector, write_metadata, write_field +public :: field_exists, get_field_atts, get_field_size, get_axis_data, read_field_chksum +public :: io_infra_init, io_infra_end, MOM_namelist_file, check_namelist_error, write_version +public :: stdout_if_root +! These types act as containers for information about files, fields and axes, respectively, +! and may also wrap opaque types from the underlying infrastructure. +public :: file_type, fieldtype, axistype +! These are encoding constant parmeters. +public :: ASCII_FILE, NETCDF_FILE, SINGLE_FILE, MULTIPLE +public :: APPEND_FILE, READONLY_FILE, OVERWRITE_FILE, WRITEONLY_FILE +public :: CENTER, CORNER, NORTH_FACE, EAST_FACE + +!> Indicate whether a file exists, perhaps with domain decomposition +interface file_exists + module procedure FMS_file_exists + module procedure MOM_file_exists +end interface + +!> Open a file (or fileset) for parallel or single-file I/O. +interface open_file + module procedure open_file_type, open_file_unit +end interface open_file + +!> Read a data field from a file +interface read_field + module procedure read_field_4d + module procedure read_field_3d + module procedure read_field_2d, read_field_2d_region + module procedure read_field_1d, read_field_1d_int + module procedure read_field_0d, read_field_0d_int +end interface + +!> Write a registered field to an output file +interface write_field + module procedure write_field_4d + module procedure write_field_3d + module procedure write_field_2d + module procedure write_field_1d + module procedure write_field_0d + module procedure MOM_write_axis +end interface write_field + +!> Read a pair of data fields representing the two components of a vector from a file +interface read_vector + module procedure read_vector_3d + module procedure read_vector_2d +end interface read_vector + +!> Write metadata about a variable or axis to a file and store it for later reuse +interface write_metadata + module procedure write_metadata_axis, write_metadata_field, write_metadata_global +end interface write_metadata + +!> Close a file (or fileset). If the file handle does not point to an open file, +!! close_file simply returns without doing anything. +interface close_file + module procedure close_file_type, close_file_unit +end interface close_file + +!> Ensure that the output stream associated with a file handle is fully sent to disk +interface flush_file + module procedure flush_file_type, flush_file_unit +end interface flush_file + +!> Type for holding a handle to an open file and related information +type :: file_type ; private + integer :: unit = -1 !< The framework identfier or netCDF unit number of an output file + type(FmsNetcdfDomainFile_t), pointer :: fileobj => NULL() !< A domain-decomposed + !! file object that is open for writing + character(len=:), allocatable :: filename !< The path to this file, if it is open + logical :: open_to_read = .false. !< If true, this file or fileset can be read + logical :: open_to_write = .false. !< If true, this file or fileset can be written to + integer :: num_times !< The number of time levels in this file + real :: file_time !< The time of the latest entry in the file. + logical :: FMS2_file !< If true, this file-type is to be used with FMS2 interfaces. +end type file_type + +!> This type is a container for information about a variable in a file. +type :: fieldtype ; private + character(len=256) :: name !< The name of this field in the files. + type(mpp_fieldtype) :: FT !< The FMS1 field-type that this type wraps + character(len=:), allocatable :: longname !< The long name for this field + character(len=:), allocatable :: units !< The units for this field + integer(kind=int64) :: chksum_read !< A checksum that has been read from a file + logical :: valid_chksum !< If true, this field has a valid checksum value. + logical :: FMS2_field !< If true, this field-type should be used with FMS2 interfaces. +end type fieldtype + +!> This type is a container for information about an axis in a file. +type :: axistype ; private + character(len=256) :: name !< The name of this axis in the files. + type(mpp_axistype) :: AT !< The FMS1 axis-type that this type wraps + real, allocatable, dimension(:) :: ax_data !< The values of the data on the axis. + logical :: domain_decomposed = .false. !< True if axis is domain-decomposed +end type axistype + +!> For now, these module-variables are hard-coded to exercise the new FMS2 interfaces. +logical :: FMS2_reads = .true. +logical :: FMS2_writes = .true. + +contains + +!> Reads the checksum value for a field that was recorded in a file, along with a flag indicating +!! whether the file contained a valid checksum for this field. +subroutine read_field_chksum(field, chksum, valid_chksum) + type(fieldtype), intent(in) :: field !< The field whose checksum attribute is to be read. + integer(kind=int64), intent(out) :: chksum !< The checksum for the field. + logical, intent(out) :: valid_chksum !< If true, chksum has been successfully read. + + chksum = -1 + valid_chksum = field%valid_chksum + if (valid_chksum) chksum = field%chksum_read + +end subroutine read_field_chksum + +!> Returns true if the named file or its domain-decomposed variant exists. +logical function MOM_file_exists(filename, MOM_Domain) + character(len=*), intent(in) :: filename !< The name of the file being inquired about + type(MOM_domain_type), intent(in) :: MOM_Domain !< The MOM_Domain that describes the decomposition + +! This function uses the fms_io function file_exist to determine whether +! a named file (or its decomposed variant) exists. + + MOM_file_exists = file_exist(filename, MOM_Domain%mpp_domain) + +end function MOM_file_exists + +!> Returns true if the named file or its domain-decomposed variant exists. +logical function FMS_file_exists(filename) + character(len=*), intent(in) :: filename !< The name of the file being inquired about + ! This function uses the fms_io function file_exist to determine whether + ! a named file (or its decomposed variant) exists. + + FMS_file_exists = fms2_file_exist(filename) +end function FMS_file_exists + +!> indicates whether an I/O handle is attached to an open file +logical function file_is_open(IO_handle) + type(file_type), intent(in) :: IO_handle !< Handle to a file to inquire about + + file_is_open = ((IO_handle%unit >= 0) .or. associated(IO_handle%fileobj)) +end function file_is_open + +!> closes a file (or fileset). If the file handle does not point to an open file, +!! close_file_type simply returns without doing anything. +subroutine close_file_type(IO_handle) + type(file_type), intent(inout) :: IO_handle !< The I/O handle for the file to be closed + + if (associated(IO_handle%fileobj)) then + call fms2_close_file(IO_handle%fileobj) + deallocate(IO_handle%fileobj) + else + call mpp_close(IO_handle%unit) + endif + if (allocated(IO_handle%filename)) deallocate(IO_handle%filename) + IO_handle%open_to_read = .false. ; IO_handle%open_to_write = .false. + IO_handle%num_times = 0 ; IO_handle%file_time = 0.0 + IO_handle%FMS2_file = .false. +end subroutine close_file_type + +!> closes a file. If the unit does not point to an open file, +!! close_file_unit simply returns without doing anything. +subroutine close_file_unit(unit) + integer, intent(inout) :: unit !< The I/O unit for the file to be closed + + call mpp_close(unit) +end subroutine close_file_unit + +!> Ensure that the output stream associated with a file handle is fully sent to disk. +subroutine flush_file_type(IO_handle) + type(file_type), intent(in) :: IO_handle !< The I/O handle for the file to flush + + if (associated(IO_handle%fileobj)) then + ! There does not appear to be an fms2 flush call. + else + call mpp_flush(IO_handle%unit) + endif +end subroutine flush_file_type + +!> Ensure that the output stream associated with a unit is fully sent to disk. +subroutine flush_file_unit(unit) + integer, intent(in) :: unit !< The I/O unit for the file to flush + + call mpp_flush(unit) +end subroutine flush_file_unit + +!> Initialize the underlying I/O infrastructure +subroutine io_infra_init(maxunits) + integer, optional, intent(in) :: maxunits !< An optional maximum number of file + !! unit numbers that can be used. + call mpp_io_init(maxunit=maxunits) +end subroutine io_infra_init + +!> Gracefully close out and terminate the underlying I/O infrastructure +subroutine io_infra_end() + call fms_io_exit() +end subroutine io_infra_end + +!> Open a single namelist file that is potentially readable by all PEs. +function MOM_namelist_file(file) result(unit) + character(len=*), optional, intent(in) :: file !< The file to open, by default "input.nml". + integer :: unit !< The opened unit number of the namelist file + unit = open_namelist_file(file) +end function MOM_namelist_file + +!> Checks the iostat argument that is returned after reading a namelist variable and writes a +!! message if there is an error. +subroutine check_namelist_error(IOstat, nml_name) + integer, intent(in) :: IOstat !< An I/O status field from a namelist read call + character(len=*), intent(in) :: nml_name !< The name of the namelist + integer :: ierr + ierr = check_nml_error(IOstat, nml_name) +end subroutine check_namelist_error + +!> Write a file version number to the log file or other output file +subroutine write_version(version, tag, unit) + character(len=*), intent(in) :: version !< A string that contains the routine name and version + character(len=*), optional, intent(in) :: tag !< A tag name to add to the message + integer, optional, intent(in) :: unit !< An alternate unit number for output + + call write_version_number(version, tag, unit) +end subroutine write_version + +!> open_file opens a file for parallel or single-file I/O. +subroutine open_file_unit(unit, filename, action, form, threading, fileset, nohdrs, domain, MOM_domain) + integer, intent(out) :: unit !< The I/O unit for the opened file + character(len=*), intent(in) :: filename !< The name of the file being opened + integer, optional, intent(in) :: action !< A flag indicating whether the file can be read + !! or written to and how to handle existing files. + integer, optional, intent(in) :: form !< A flag indicating the format of a new file. The + !! default is ASCII_FILE, but NETCDF_FILE is also common. + integer, optional, intent(in) :: threading !< A flag indicating whether one (SINGLE_FILE) + !! or multiple PEs (MULTIPLE) participate in I/O. + !! With the default, the root PE does I/O. + integer, optional, intent(in) :: fileset !< A flag indicating whether multiple PEs doing I/O due + !! to threading=MULTIPLE write to the same file (SINGLE_FILE) + !! or to one file per PE (MULTIPLE, the default). + logical, optional, intent(in) :: nohdrs !< If nohdrs is .TRUE., headers are not written to + !! ASCII files. The default is .false. + type(domain2d), optional, intent(in) :: domain !< A domain2d type that describes the decomposition + type(MOM_domain_type), optional, intent(in) :: MOM_Domain !< A MOM_Domain that describes the decomposition + + if (present(MOM_Domain)) then + call mpp_open(unit, filename, action=action, form=form, threading=threading, fileset=fileset, & + nohdrs=nohdrs, domain=MOM_Domain%mpp_domain) + else + call mpp_open(unit, filename, action=action, form=form, threading=threading, fileset=fileset, & + nohdrs=nohdrs, domain=domain) + endif +end subroutine open_file_unit + +!> open_file opens a file for parallel or single-file I/O. +subroutine open_file_type(IO_handle, filename, action, MOM_domain, threading, fileset) + type(file_type), intent(inout) :: IO_handle !< The handle for the opened file + character(len=*), intent(in) :: filename !< The path name of the file being opened + integer, optional, intent(in) :: action !< A flag indicating whether the file can be read + !! or written to and how to handle existing files. + !! The default is WRITE_ONLY. + type(MOM_domain_type), & + optional, intent(in) :: MOM_Domain !< A MOM_Domain that describes the decomposition + integer, optional, intent(in) :: threading !< A flag indicating whether one (SINGLE_FILE) + !! or multiple PEs (MULTIPLE) participate in I/O. + !! With the default, the root PE does I/O. + integer, optional, intent(in) :: fileset !< A flag indicating whether multiple PEs doing I/O due + !! to threading=MULTIPLE write to the same file (SINGLE_FILE) + !! or to one file per PE (MULTIPLE, the default). + + ! Local variables + type(FmsNetcdfDomainFile_t) :: fileobj_read ! A handle to a domain-decomposed file for obtaining information + ! about the exiting time axis entries in append mode. + logical :: success ! If true, the file was opened successfully + integer :: file_mode ! An integer that encodes whether the file is to be opened for + ! reading, writing or appending + character(len=40) :: mode ! A character string that encodes whether the file is to be opened for + ! reading, writing or appending + character(len=:), allocatable :: filename_tmp ! A copy of filename with .nc appended if necessary. + character(len=256) :: dim_unlim_name ! name of the unlimited dimension in the file + integer :: index_nc + + if (IO_handle%open_to_write) then + call MOM_error(WARNING, "open_file_type called for file "//trim(filename)//& + " with an IO_handle that is already open to to write.") + return + endif + if (IO_handle%open_to_read) then + call MOM_error(FATAL, "open_file_type called for file "//trim(filename)//& + " with an IO_handle that is already open to to read.") + endif + + file_mode = WRITEONLY_FILE ; if (present(action)) file_mode = action + + if (FMS2_writes .and. present(MOM_Domain)) then + if (.not.associated(IO_handle%fileobj)) allocate (IO_handle%fileobj) + + ! The FMS1 interface automatically appends .nc if necessary, but FMS2 interface does not. + index_nc = index(trim(filename), ".nc") + if (index_nc > 0) then + filename_tmp = trim(filename) + else + filename_tmp = trim(filename)//".nc" + if (is_root_PE()) call MOM_error(WARNING, "Open_file is appending .nc to the filename "//trim(filename)) + endif + + if (file_mode == WRITEONLY_FILE) then ; mode = "write" + elseif (file_mode == APPEND_FILE) then ; mode = "append" + elseif (file_mode == OVERWRITE_FILE) then ; mode = "overwrite" + elseif (file_mode == READONLY_FILE) then ; mode = "read" + else + call MOM_error(FATAL, "open_file_type called with unrecognized action.") + endif + + IO_handle%num_times = 0 + IO_handle%file_time = 0.0 + if ((file_mode == APPEND_FILE) .and. file_exists(filename_tmp, MOM_Domain)) then + ! Determine the latest file time and number of records so far. + success = fms2_open_file(fileObj_read, trim(filename_tmp), "read", MOM_domain%mpp_domain) + call get_unlimited_dimension_name(fileObj_read, dim_unlim_name) + if (len_trim(dim_unlim_name) > 0) & + call get_dimension_size(fileObj_read, trim(dim_unlim_name), IO_handle%num_times) + if (IO_handle%num_times > 0) & + call fms2_read_data(fileObj_read, trim(dim_unlim_name), IO_handle%file_time, & + unlim_dim_level=IO_handle%num_times) + call fms2_close_file(fileObj_read) + endif + + success = fms2_open_file(IO_handle%fileobj, trim(filename_tmp), trim(mode), MOM_domain%mpp_domain) + if (.not.success) call MOM_error(FATAL, "Unable to open file "//trim(filename_tmp)) + IO_handle%FMS2_file = .true. + elseif (present(MOM_Domain)) then + call mpp_open(IO_handle%unit, filename, action=file_mode, form=NETCDF_FILE, threading=threading, & + fileset=fileset, domain=MOM_Domain%mpp_domain) + IO_handle%FMS2_file = .false. + else + call mpp_open(IO_handle%unit, filename, action=file_mode, form=NETCDF_FILE, threading=threading, & + fileset=fileset) + IO_handle%FMS2_file = .false. + endif + IO_handle%filename = trim(filename) + + if (file_mode == READONLY_FILE) then + IO_handle%open_to_read = .true. ; IO_handle%open_to_write = .false. + else + IO_handle%open_to_read = .false. ; IO_handle%open_to_write = .true. + endif + +end subroutine open_file_type + +!> open_file opens an ascii file for parallel or single-file I/O using Fortran read and write calls. +subroutine open_ASCII_file(unit, file, action, threading, fileset) + integer, intent(out) :: unit !< The I/O unit for the opened file + character(len=*), intent(in) :: file !< The name of the file being opened + integer, optional, intent(in) :: action !< A flag indicating whether the file can be read + !! or written to and how to handle existing files. + integer, optional, intent(in) :: threading !< A flag indicating whether one (SINGLE_FILE) + !! or multiple PEs (MULTIPLE) participate in I/O. + !! With the default, the root PE does I/O. + integer, optional, intent(in) :: fileset !< A flag indicating whether multiple PEs doing I/O due + !! to threading=MULTIPLE write to the same file (SINGLE_FILE) + !! or to one file per PE (MULTIPLE, the default). + + call mpp_open(unit, file, action=action, form=ASCII_FILE, threading=threading, fileset=fileset, & + nohdrs=.true.) + +end subroutine open_ASCII_file + + +!> Provide a string to append to filenames, to differentiate ensemble members, for example. +subroutine get_filename_suffix(suffix) + character(len=*), intent(out) :: suffix !< A string to append to filenames + + call get_filename_appendix(suffix) +end subroutine get_filename_suffix + + +!> Get information about the number of dimensions, variables and time levels +!! in the file associated with an open file unit +subroutine get_file_info(IO_handle, ndim, nvar, ntime) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for I/O + integer, optional, intent(out) :: ndim !< The number of dimensions in the file + integer, optional, intent(out) :: nvar !< The number of variables in the file + integer, optional, intent(out) :: ntime !< The number of time levels in the file + + ! Local variables + character(len=256) :: dim_unlim_name ! name of the unlimited dimension in the file + integer :: ndims, nvars, natts, ntimes + + if (IO_handle%FMS2_file) then + if (present(ndim)) ndim = get_num_dimensions(IO_handle%fileobj) + if (present(nvar)) nvar = get_num_variables(IO_handle%fileobj) + if (present(ntime)) then + ntime = 0 + call get_unlimited_dimension_name(IO_handle%fileobj, dim_unlim_name) + if (len_trim(dim_unlim_name) > 0) & + call get_dimension_size(IO_handle%fileobj, trim(dim_unlim_name), ntime) + endif + else + call mpp_get_info(IO_handle%unit, ndims, nvars, natts, ntimes ) + + if (present(ndim)) ndim = ndims + if (present(nvar)) nvar = nvars + if (present(ntime)) ntime = ntimes + endif + +end subroutine get_file_info + + +!> Get the times of records from a file +subroutine get_file_times(IO_handle, time_values, ntime) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for I/O + real, allocatable, dimension(:), intent(inout) :: time_values !< The real times for the records in file. + integer, optional, intent(out) :: ntime !< The number of time levels in the file + + character(len=256) :: dim_unlim_name ! name of the unlimited dimension in the file + integer :: ntimes ! The number of time levels in the file + + !### Modify this routine to optionally convert to time_type, using information about the dimensions? + + if (allocated(time_values)) deallocate(time_values) + call get_file_info(IO_handle, ntime=ntimes) + if (present(ntime)) ntime = ntimes + if (ntimes > 0) then + allocate(time_values(ntimes)) + if (IO_handle%FMS2_file) then + call get_unlimited_dimension_name(IO_handle%fileobj, dim_unlim_name) + call fms2_read_data(IO_handle%fileobj, trim(dim_unlim_name), time_values) + else + call mpp_get_times(IO_handle%unit, time_values) + endif + endif +end subroutine get_file_times + +!> Set up the field information (e.g., names and metadata) for all of the variables in a file. The +!! argument fields must be allocated with a size that matches the number of variables in a file. +subroutine get_file_fields(IO_handle, fields) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for I/O + type(fieldtype), dimension(:), intent(inout) :: fields !< Field-type descriptions of all of + !! the variables in a file. + type(mpp_fieldtype), dimension(size(fields)) :: mpp_fields ! Fieldtype structures for the variables + character(len=256), dimension(size(fields)) :: var_names ! The names of all variables + character(len=256) :: units ! The units of a variable as recorded in the file + character(len=2048) :: longname ! The long-name of a variable as recorded in the file + character(len=64) :: checksum_char ! The hexadecimal checksum read from the file + integer(kind=int64), dimension(3) :: checksum_file ! The checksums for a variable in the file + integer :: nvar ! The number of variables in the file + integer :: i + + nvar = size(fields) + ! Local variables + if (IO_handle%FMS2_file) then + call get_variable_names(IO_handle%fileobj, var_names) + do i=1,nvar + fields(i)%name = trim(var_names(i)) + longname = "" + if (variable_att_exists(IO_handle%fileobj, var_names(i), "long_name")) & + call get_variable_attribute(IO_handle%fileobj, var_names(i), "long_name", longname) + fields(i)%longname = trim(longname) + units = "" + if (variable_att_exists(IO_handle%fileobj, var_names(i), "units")) & + call get_variable_attribute(IO_handle%fileobj, var_names(i), "units", units) + fields(i)%units = trim(units) + + fields(i)%valid_chksum = variable_att_exists(IO_handle%fileobj, var_names(i), "checksum") + if (fields(i)%valid_chksum) then + call get_variable_attribute(IO_handle%fileobj, var_names(i), 'checksum', checksum_char) + ! If there are problems, there might need to be code added to handle commas. + read (checksum_char(1:16), '(Z16)') fields(i)%chksum_read + endif + enddo + else + call mpp_get_fields(IO_handle%unit, mpp_fields) + do i=1,nvar + fields(i)%FT = mpp_fields(i) + call mpp_get_atts(fields(i)%FT, name=fields(i)%name, units=units, longname=longname, & + checksum=checksum_file) + fields(i)%longname = trim(longname) + fields(i)%units = trim(units) + fields(i)%valid_chksum = mpp_attribute_exist(fields(i)%FT, "checksum") + if (fields(i)%valid_chksum) fields(i)%chksum_read = checksum_file(1) + enddo + endif + +end subroutine get_file_fields + +!> Extract information from a field type, as stored or as found in a file +subroutine get_field_atts(field, name, units, longname, checksum) + type(fieldtype), intent(in) :: field !< The field to extract information from + character(len=*), optional, intent(out) :: name !< The variable name + character(len=*), optional, intent(out) :: units !< The units of the variable + character(len=*), optional, intent(out) :: longname !< The long name of the variable + integer(kind=int64), dimension(:), & + optional, intent(out) :: checksum !< The checksums of the variable in a file + + if (present(name)) name = trim(field%name) + if (present(units)) units = trim(field%units) + if (present(longname)) longname = trim(field%longname) + if (present(checksum)) checksum = field%chksum_read + +end subroutine get_field_atts + +!> Field_exists returns true if the field indicated by field_name is present in the +!! file file_name. If file_name does not exist, it returns false. +function field_exists(filename, field_name, domain, no_domain, MOM_domain) + character(len=*), intent(in) :: filename !< The name of the file being inquired about + character(len=*), intent(in) :: field_name !< The name of the field being sought + type(domain2d), target, optional, intent(in) :: domain !< A domain2d type that describes the decomposition + logical, optional, intent(in) :: no_domain !< This file does not use domain decomposition + type(MOM_domain_type), optional, intent(in) :: MOM_Domain !< A MOM_Domain that describes the decomposition + logical :: field_exists !< True if filename exists and field_name is in filename + + ! Local variables + type(FmsNetcdfDomainFile_t) :: fileObj_dd ! A handle to a domain-decomposed file for obtaining information + ! about the exiting time axis entries in append mode. + type(FmsNetcdfFile_t) :: fileObj_simple ! A handle to a non-domain-decomposed file for obtaining information + ! about the exiting time axis entries in append mode. + logical :: success ! If true, the file was opened successfully + logical :: domainless ! If true, this file does not use a domain-decomposed file. + + domainless = .not.(present(MOM_domain) .or. present(domain)) + if (present(no_domain)) then + if (domainless .and. .not.no_domain) call MOM_error(FATAL, & + "field_exists: When no_domain is present and false, a domain must be supplied in query about "//& + trim(field_name)//" in file "//trim(filename)) + domainless = no_domain + endif + + if (FMS2_reads) then + field_exists = .false. + if (file_exists(filename)) then + if (domainless) then + success = fms2_open_file(fileObj_simple, trim(filename), "read") + if (success) then + field_exists = variable_exists(fileObj_simple, field_name) + call fms2_close_file(fileObj_simple) + endif + else + if (present(MOM_domain)) then + success = fms2_open_file(fileObj_dd, trim(filename), "read", MOM_domain%mpp_domain) + else + success = fms2_open_file(fileObj_dd, trim(filename), "read", domain) + endif + if (success) then + field_exists = variable_exists(fileobj_dd, field_name) + call fms2_close_file(fileObj_dd) + endif + endif + endif + elseif (present(MOM_domain)) then + field_exists = field_exist(filename, field_name, domain=MOM_domain%mpp_domain, no_domain=no_domain) + else + field_exists = field_exist(filename, field_name, domain=domain, no_domain=no_domain) + endif + +end function field_exists + +!> Given filename and fieldname, this subroutine returns the size of the field in the file +subroutine get_field_size(filename, fieldname, sizes, field_found, no_domain) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The name of the variable whose sizes are returned + integer, dimension(:), intent(inout) :: sizes !< The sizes of the variable in each dimension + logical, optional, intent(out) :: field_found !< This indicates whether the field was found in + !! the input file. Without this argument, there + !! is a fatal error if the field is not found. + logical, optional, intent(in) :: no_domain !< If present and true, do not check for file + !! names with an appended tile number + + ! Local variables + type(FmsNetcdfFile_t) :: fileobj_read ! A handle to a non-domain-decomposed file for obtaining information + ! about the exiting time axis entries in append mode. + logical :: success ! If true, the file was opened successfully + logical :: field_exists ! True if filename exists and field_name is in filename + integer :: i, ndims + + if (FMS2_reads) then + field_exists = .false. + if (file_exists(filename)) then + success = fms2_open_file(fileObj_read, trim(filename), "read") + if (success) then + field_exists = variable_exists(fileobj_read, fieldname) + if (field_exists) then + ndims = get_variable_num_dimensions(fileobj_read, fieldname) + if (ndims > size(sizes)) call MOM_error(FATAL, & + "get_field_size called with too few sizes for "//trim(fieldname)//" in "//trim(filename)) + call get_variable_size(fileobj_read, fieldname, sizes(1:ndims)) + do i=ndims+1,size(sizes) ; sizes(i) = 0 ; enddo + endif + endif + endif + if (present(field_found)) field_found = field_exists + else + call field_size(filename, fieldname, sizes, field_found=field_found, no_domain=no_domain) + endif + +end subroutine get_field_size + +!> Extracts and returns the axis data stored in an axistype. +subroutine get_axis_data( axis, dat ) + type(axistype), intent(in) :: axis !< An axis type + real, dimension(:), intent(out) :: dat !< The data in the axis variable + + integer :: i + + ! This routine might not be needed for MOM6. + if (allocated(axis%ax_data)) then + if (size(axis%ax_data) > size(dat)) call MOM_error(FATAL, & + "get_axis_data called with too small of an output data array for "//trim(axis%name)) + do i=1,size(axis%ax_data) ; dat(i) = axis%ax_data(i) ; enddo + elseif (.not.FMS2_writes) then + call mpp_get_axis_data( axis%AT, dat ) + endif + +end subroutine get_axis_data + +!> This routine uses the fms_io subroutine read_data to read a scalar named +!! "fieldname" from a single or domain-decomposed file "filename". +subroutine read_field_0d(filename, fieldname, data, timelevel, scale, MOM_Domain, & + global_file, file_may_be_4d) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The variable name of the data in the file + real, intent(inout) :: data !< The 1-dimensional array into which the data + integer, optional, intent(in) :: timelevel !< The time level in the file to read + real, optional, intent(in) :: scale !< A scaling factor that the field is multiplied + !! by before it is returned. + type(MOM_domain_type), & + optional, intent(in) :: MOM_Domain !< The MOM_Domain that describes the decomposition + logical, optional, intent(in) :: global_file !< If true, read from a single global file + logical, optional, intent(in) :: file_may_be_4d !< If true, this file may have 4-d arrays, but + !! with the FMS2 I/O interfaces this does not matter. + + ! Local variables + type(FmsNetcdfFile_t) :: fileObj ! A handle to a non-domain-decomposed file + type(FmsNetcdfDomainFile_t) :: fileobj_DD ! A handle to a domain-decomposed file object + character(len=96) :: var_to_read ! Name of variable to read from the netcdf file + logical :: has_time_dim ! True if the variable has an unlimited time axis. + logical :: success ! True if the file was successfully opened + + if (present(MOM_Domain) .and. FMS2_reads) then + ! Open the FMS2 file-set. + success = fms2_open_file(fileobj_DD, filename, "read", MOM_domain%mpp_domain) + if (.not.success) call MOM_error(FATAL, "Failed to open "//trim(filename)) + + ! Find the matching case-insensitive variable name in the file and prepare to read it. + call prepare_to_read_var(fileobj_DD, fieldname, "read_field_0d: ", filename, & + var_to_read, has_time_dim, timelevel) + + ! Read the data. + if (present(timelevel) .and. has_time_dim) then + call fms2_read_data(fileobj_DD, var_to_read, data, unlim_dim_level=timelevel) + else + call fms2_read_data(fileobj_DD, var_to_read, data) + endif + + ! Close the file-set. + if (check_if_open(fileobj_DD)) call fms2_close_file(fileobj_DD) + elseif (FMS2_reads) then + ! Open the FMS2 file-set. + success = fms2_open_file(fileObj, trim(filename), "read") + if (.not.success) call MOM_error(FATAL, "Failed to open "//trim(filename)) + + ! Find the matching case-insensitive variable name in the file, and determine whether it + ! has a time dimension. + call find_varname_in_file(fileObj, fieldname, "read_field_0d: ", filename, & + var_to_read, has_time_dim, timelevel) + + ! Read the data. + if (present(timelevel) .and. has_time_dim) then + call fms2_read_data(fileobj, var_to_read, data, unlim_dim_level=timelevel) + else + call fms2_read_data(fileobj, var_to_read, data) + endif + + ! Close the file-set. + if (check_if_open(fileobj)) call fms2_close_file(fileobj) + elseif (present(MOM_Domain)) then ! Read the variable using the FMS-1 interface. + call read_data(filename, fieldname, data, MOM_Domain%mpp_domain, timelevel=timelevel) + else + call read_data(filename, fieldname, data, timelevel=timelevel, no_domain=.true.) + endif + + if (present(scale)) then ; if (scale /= 1.0) then + data = scale*data + endif ; endif + +end subroutine read_field_0d + +!> This routine uses the fms_io subroutine read_data to read a 1-D data field named +!! "fieldname" from a single or domain-decomposed file "filename". +subroutine read_field_1d(filename, fieldname, data, timelevel, scale, MOM_Domain, & + global_file, file_may_be_4d) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The variable name of the data in the file + real, dimension(:), intent(inout) :: data !< The 1-dimensional array into which the data + integer, optional, intent(in) :: timelevel !< The time level in the file to read + real, optional, intent(in) :: scale !< A scaling factor that the field is multiplied + !! by before they are returned. + type(MOM_domain_type), & + optional, intent(in) :: MOM_Domain !< The MOM_Domain that describes the decomposition + logical, optional, intent(in) :: global_file !< If true, read from a single global file + logical, optional, intent(in) :: file_may_be_4d !< If true, this file may have 4-d arrays, but + !! with the FMS2 I/O interfaces this does not matter. + + ! Local variables + type(FmsNetcdfFile_t) :: fileObj ! A handle to a non-domain-decomposed file + type(FmsNetcdfDomainFile_t) :: fileobj_DD ! A handle to a domain-decomposed file object + character(len=96) :: var_to_read ! Name of variable to read from the netcdf file + logical :: has_time_dim ! True if the variable has an unlimited time axis. + logical :: success ! True if the file was successfully opened + + if (present(MOM_Domain) .and. FMS2_reads) then + ! Open the FMS2 file-set. + success = fms2_open_file(fileobj_DD, filename, "read", MOM_domain%mpp_domain) + if (.not.success) call MOM_error(FATAL, "Failed to open "//trim(filename)) + + ! Find the matching case-insensitive variable name in the file and prepare to read it. + call prepare_to_read_var(fileobj_DD, fieldname, "read_field_1d: ", filename, & + var_to_read, has_time_dim, timelevel) + + ! Read the data. + if (present(timelevel) .and. has_time_dim) then + call fms2_read_data(fileobj_DD, var_to_read, data, unlim_dim_level=timelevel) + else + call fms2_read_data(fileobj_DD, var_to_read, data) + endif + + ! Close the file-set. + if (check_if_open(fileobj_DD)) call fms2_close_file(fileobj_DD) + elseif (FMS2_reads) then + ! Open the FMS2 file-set. + success = fms2_open_file(fileObj, trim(filename), "read") + if (.not.success) call MOM_error(FATAL, "Failed to open "//trim(filename)) + + ! Find the matching case-insensitive variable name in the file, and determine whether it + ! has a time dimension. + call find_varname_in_file(fileObj, fieldname, "read_field_1d: ", filename, & + var_to_read, has_time_dim, timelevel) + + ! Read the data. + if (present(timelevel) .and. has_time_dim) then + call fms2_read_data(fileobj, var_to_read, data, unlim_dim_level=timelevel) + else + call fms2_read_data(fileobj, var_to_read, data) + endif + + ! Close the file-set. + if (check_if_open(fileobj)) call fms2_close_file(fileobj) + elseif (present(MOM_Domain)) then ! Read the variable using the FMS-1 interface. + call read_data(filename, fieldname, data, MOM_Domain%mpp_domain, timelevel=timelevel) + else + call read_data(filename, fieldname, data, timelevel=timelevel, no_domain=.true.) + endif + + if (present(scale)) then ; if (scale /= 1.0) then + data(:) = scale*data(:) + endif ; endif + +end subroutine read_field_1d + +!> This routine uses the fms_io subroutine read_data to read a distributed +!! 2-D data field named "fieldname" from file "filename". Valid values for +!! "position" include CORNER, CENTER, EAST_FACE and NORTH_FACE. +subroutine read_field_2d(filename, fieldname, data, MOM_Domain, & + timelevel, position, scale, global_file, file_may_be_4d) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The variable name of the data in the file + real, dimension(:,:), intent(inout) :: data !< The 2-dimensional array into which the data + !! should be read + type(MOM_domain_type), intent(in) :: MOM_Domain !< The MOM_Domain that describes the decomposition + integer, optional, intent(in) :: timelevel !< The time level in the file to read + integer, optional, intent(in) :: position !< A flag indicating where this data is located + real, optional, intent(in) :: scale !< A scaling factor that the field is multiplied + !! by before it is returned. + logical, optional, intent(in) :: global_file !< If true, read from a single global file + logical, optional, intent(in) :: file_may_be_4d !< If true, this file may have 4-d arrays, but + !! with the FMS2 I/O interfaces this does not matter. + + ! Local variables + type(FmsNetcdfDomainFile_t) :: fileobj ! A handle to a domain-decomposed file object + character(len=96) :: var_to_read ! Name of variable to read from the netcdf file + logical :: has_time_dim ! True if the variable has an unlimited time axis. + logical :: success ! True if the file was successfully opened + + if (FMS2_reads) then + ! Open the FMS2 file-set. + success = fms2_open_file(fileobj, filename, "read", MOM_domain%mpp_domain) + if (.not.success) call MOM_error(FATAL, "Failed to open "//trim(filename)) + + ! Find the matching case-insensitive variable name in the file and prepare to read it. + call prepare_to_read_var(fileobj, fieldname, "read_field_2d: ", filename, & + var_to_read, has_time_dim, timelevel, position) + + ! Read the data. + if (present(timelevel) .and. has_time_dim) then + call fms2_read_data(fileobj, var_to_read, data, unlim_dim_level=timelevel) + else + call fms2_read_data(fileobj, var_to_read, data) + endif + + ! Close the file-set. + if (check_if_open(fileobj)) call fms2_close_file(fileobj) + else ! Read the variable using the FMS-1 interface. + call read_data(filename, fieldname, data, MOM_Domain%mpp_domain, & + timelevel=timelevel, position=position) + endif + + if (present(scale)) then ; if (scale /= 1.0) then + call rescale_comp_data(MOM_Domain, data, scale) + endif ; endif + +end subroutine read_field_2d + +!> This routine uses the fms_io subroutine read_data to read a region from a distributed or +!! global 2-D data field named "fieldname" from file "filename". +subroutine read_field_2d_region(filename, fieldname, data, start, nread, MOM_domain, & + no_domain, scale) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The variable name of the data in the file + real, dimension(:,:), intent(inout) :: data !< The 2-dimensional array into which the data + !! should be read + integer, dimension(:), intent(in) :: start !< The starting index to read in each of 4 + !! dimensions. For this 2-d read, the 3rd + !! and 4th values are always 1. + integer, dimension(:), intent(in) :: nread !< The number of points to read in each of 4 + !! dimensions. For this 2-d read, the 3rd + !! and 4th values are always 1. + type(MOM_domain_type), & + optional, intent(in) :: MOM_Domain !< The MOM_Domain that describes the decomposition + logical, optional, intent(in) :: no_domain !< If present and true, this variable does not + !! use domain decomposion. + real, optional, intent(in) :: scale !< A scaling factor that the field is multiplied + !! by before it is returned. + + ! Local variables + type(FmsNetcdfFile_t) :: fileObj ! A handle to a non-domain-decomposed file + type(FmsNetcdfDomainFile_t) :: fileobj_DD ! A handle to a domain-decomposed file object + character(len=96) :: var_to_read ! Name of variable to read from the netcdf file + logical :: success ! True if the file was successfully opened + + if (present(MOM_Domain) .and. FMS2_reads) then + ! Open the FMS2 file-set. + success = fms2_open_file(fileobj_DD, filename, "read", MOM_domain%mpp_domain) + if (.not.success) call MOM_error(FATAL, "Failed to open "//trim(filename)) + + ! Find the matching case-insensitive variable name in the file and prepare to read it. + call prepare_to_read_var(fileobj_DD, fieldname, "read_field_2d_region: ", & + filename, var_to_read) + + ! Read the data. + call fms2_read_data(fileobj_DD, var_to_read, data, corner=start(1:2), edge_lengths=nread(1:2)) + + ! Close the file-set. + if (check_if_open(fileobj_DD)) call fms2_close_file(fileobj_DD) + elseif (FMS2_reads) then + ! Open the FMS2 file-set. + success = fms2_open_file(fileObj, trim(filename), "read") + if (.not.success) call MOM_error(FATAL, "Failed to open "//trim(filename)) + + ! Find the matching case-insensitive variable name in the file, and determine whether it + ! has a time dimension. + call find_varname_in_file(fileObj, fieldname, "read_field_2d_region: ", filename, var_to_read) + + ! Read the data. + call fms2_read_data(fileobj, var_to_read, data, corner=start(1:2), edge_lengths=nread(1:2)) + + ! Close the file-set. + if (check_if_open(fileobj)) call fms2_close_file(fileobj) + elseif (present(MOM_Domain)) then ! Read the variable using the FMS-1 interface. + call read_data(filename, fieldname, data, start, nread, domain=MOM_Domain%mpp_domain, & + no_domain=no_domain) + else + call read_data(filename, fieldname, data, start, nread, no_domain=no_domain) + endif + + if (present(scale)) then ; if (scale /= 1.0) then + if (present(MOM_Domain)) then + call rescale_comp_data(MOM_Domain, data, scale) + else + ! Dangerously rescale the whole array + data(:,:) = scale*data(:,:) + endif + endif ; endif + +end subroutine read_field_2d_region + +!> This routine uses the fms_io subroutine read_data to read a distributed +!! 3-D data field named "fieldname" from file "filename". Valid values for +!! "position" include CORNER, CENTER, EAST_FACE and NORTH_FACE. +subroutine read_field_3d(filename, fieldname, data, MOM_Domain, & + timelevel, position, scale, global_file, file_may_be_4d) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The variable name of the data in the file + real, dimension(:,:,:), intent(inout) :: data !< The 3-dimensional array into which the data + !! should be read + type(MOM_domain_type), intent(in) :: MOM_Domain !< The MOM_Domain that describes the decomposition + integer, optional, intent(in) :: timelevel !< The time level in the file to read + integer, optional, intent(in) :: position !< A flag indicating where this data is located + real, optional, intent(in) :: scale !< A scaling factor that the field is multiplied + !! by before it is returned. + logical, optional, intent(in) :: global_file !< If true, read from a single global file + logical, optional, intent(in) :: file_may_be_4d !< If true, this file may have 4-d arrays, but + !! with the FMS2 I/O interfaces this does not matter. + + ! Local variables + type(FmsNetcdfDomainFile_t) :: fileobj ! A handle to a domain-decomposed file object + character(len=96) :: var_to_read ! Name of variable to read from the netcdf file + logical :: has_time_dim ! True if the variable has an unlimited time axis. + logical :: success ! True if the file was successfully opened + + if (FMS2_reads) then + ! Open the FMS2 file-set. + success = fms2_open_file(fileobj, filename, "read", MOM_domain%mpp_domain) + if (.not.success) call MOM_error(FATAL, "Failed to open "//trim(filename)) + + ! Find the matching case-insensitive variable name in the file and prepare to read it. + call prepare_to_read_var(fileobj, fieldname, "read_field_3d: ", filename, & + var_to_read, has_time_dim, timelevel, position) + + ! Read the data. + if (present(timelevel) .and. has_time_dim) then + call fms2_read_data(fileobj, var_to_read, data, unlim_dim_level=timelevel) + else + call fms2_read_data(fileobj, var_to_read, data) + endif + + ! Close the file-set. + if (check_if_open(fileobj)) call fms2_close_file(fileobj) + else ! Read the variable using the FMS-1 interface. + call read_data(filename, fieldname, data, MOM_Domain%mpp_domain, & + timelevel=timelevel, position=position) + endif + + if (present(scale)) then ; if (scale /= 1.0) then + call rescale_comp_data(MOM_Domain, data, scale) + endif ; endif + +end subroutine read_field_3d + +!> This routine uses the fms_io subroutine read_data to read a distributed +!! 4-D data field named "fieldname" from file "filename". Valid values for +!! "position" include CORNER, CENTER, EAST_FACE and NORTH_FACE. +subroutine read_field_4d(filename, fieldname, data, MOM_Domain, & + timelevel, position, scale, global_file) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The variable name of the data in the file + real, dimension(:,:,:,:), intent(inout) :: data !< The 4-dimensional array into which the data + !! should be read + type(MOM_domain_type), intent(in) :: MOM_Domain !< The MOM_Domain that describes the decomposition + integer, optional, intent(in) :: timelevel !< The time level in the file to read + integer, optional, intent(in) :: position !< A flag indicating where this data is located + real, optional, intent(in) :: scale !< A scaling factor that the field is multiplied + !! by before it is returned. + logical, optional, intent(in) :: global_file !< If true, read from a single global file + + + ! Local variables + type(FmsNetcdfDomainFile_t) :: fileobj ! A handle to a domain-decomposed file object + logical :: has_time_dim ! True if the variable has an unlimited time axis. + character(len=96) :: var_to_read ! Name of variable to read from the netcdf file + logical :: success ! True if the file was successfully opened + + if (FMS2_reads) then + ! Open the FMS2 file-set. + success = fms2_open_file(fileobj, filename, "read", MOM_domain%mpp_domain) + if (.not.success) call MOM_error(FATAL, "Failed to open "//trim(filename)) + + ! Find the matching case-insensitive variable name in the file and prepare to read it. + call prepare_to_read_var(fileobj, fieldname, "read_field_4d: ", filename, & + var_to_read, has_time_dim, timelevel, position) + + ! Read the data. + if (present(timelevel) .and. has_time_dim) then + call fms2_read_data(fileobj, var_to_read, data, unlim_dim_level=timelevel) + else + call fms2_read_data(fileobj, var_to_read, data) + endif + + ! Close the file-set. + if (check_if_open(fileobj)) call fms2_close_file(fileobj) + else ! Read the variable using the FMS-1 interface. + call read_data(filename, fieldname, data, MOM_Domain%mpp_domain, & + timelevel=timelevel, position=position) + endif + + if (present(scale)) then ; if (scale /= 1.0) then + call rescale_comp_data(MOM_Domain, data, scale) + endif ; endif + +end subroutine read_field_4d + +!> This routine uses the fms_io subroutine read_data to read a scalar integer +!! data field named "fieldname" from file "filename". +subroutine read_field_0d_int(filename, fieldname, data, timelevel) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The variable name of the data in the file + integer, intent(inout) :: data !< The 1-dimensional array into which the data + integer, optional, intent(in) :: timelevel !< The time level in the file to read + + ! Local variables + type(FmsNetcdfFile_t) :: fileObj ! A handle to a non-domain-decomposed file + logical :: has_time_dim ! True if the variable has an unlimited time axis. + character(len=96) :: var_to_read ! Name of variable to read from the netcdf file + logical :: success ! If true, the file was opened successfully + + ! This routine might not be needed for MOM6. + if (FMS2_reads) then + ! Open the FMS2 file-set. + success = fms2_open_file(fileObj, trim(filename), "read") + if (.not.success) call MOM_error(FATAL, "Failed to open "//trim(filename)) + + ! Find the matching case-insensitive variable name in the file, and determine whether it + ! has a time dimension. + call find_varname_in_file(fileObj, fieldname, "read_field_0d_int: ", filename, & + var_to_read, has_time_dim, timelevel) + + ! Read the data. + if (present(timelevel) .and. has_time_dim) then + call fms2_read_data(fileobj, var_to_read, data, unlim_dim_level=timelevel) + else + call fms2_read_data(fileobj, var_to_read, data) + endif + + ! Close the file-set. + if (check_if_open(fileobj)) call fms2_close_file(fileobj) + else + call read_data(filename, fieldname, data, timelevel=timelevel, no_domain=.true.) + endif + +end subroutine read_field_0d_int + +!> This routine uses the fms_io subroutine read_data to read a 1-D integer +!! data field named "fieldname" from file "filename". +subroutine read_field_1d_int(filename, fieldname, data, timelevel) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The variable name of the data in the file + integer, dimension(:), intent(inout) :: data !< The 1-dimensional array into which the data + integer, optional, intent(in) :: timelevel !< The time level in the file to read + + ! Local variables + type(FmsNetcdfFile_t) :: fileObj ! A handle to a non-domain-decomposed file for obtaining information + ! about the exiting time axis entries in append mode. + logical :: has_time_dim ! True if the variable has an unlimited time axis. + character(len=96) :: var_to_read ! Name of variable to read from the netcdf file + logical :: success ! If true, the file was opened successfully + + ! This routine might not be needed for MOM6. + if (FMS2_reads) then + ! Open the FMS2 file-set. + success = fms2_open_file(fileObj, trim(filename), "read") + if (.not.success) call MOM_error(FATAL, "Failed to open "//trim(filename)) + + ! Find the matching case-insensitive variable name in the file, and determine whether it + ! has a time dimension. + call find_varname_in_file(fileObj, fieldname, "read_field_1d_int: ", filename, & + var_to_read, has_time_dim, timelevel) + + ! Read the data. + if (present(timelevel) .and. has_time_dim) then + call fms2_read_data(fileobj, var_to_read, data, unlim_dim_level=timelevel) + else + call fms2_read_data(fileobj, var_to_read, data) + endif + + ! Close the file-set. + if (check_if_open(fileobj)) call fms2_close_file(fileobj) + else + call read_data(filename, fieldname, data, timelevel=timelevel, no_domain=.true.) + endif + +end subroutine read_field_1d_int + + +!> This routine uses the fms_io subroutine read_data to read a pair of distributed +!! 2-D data fields with names given by "[uv]_fieldname" from file "filename". Valid values for +!! "stagger" include CGRID_NE, BGRID_NE, and AGRID. +subroutine read_vector_2d(filename, u_fieldname, v_fieldname, u_data, v_data, MOM_Domain, & + timelevel, stagger, scalar_pair, scale) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: u_fieldname !< The variable name of the u data in the file + character(len=*), intent(in) :: v_fieldname !< The variable name of the v data in the file + real, dimension(:,:), intent(inout) :: u_data !< The 2 dimensional array into which the + !! u-component of the data should be read + real, dimension(:,:), intent(inout) :: v_data !< The 2 dimensional array into which the + !! v-component of the data should be read + type(MOM_domain_type), intent(in) :: MOM_Domain !< The MOM_Domain that describes the decomposition + integer, optional, intent(in) :: timelevel !< The time level in the file to read + integer, optional, intent(in) :: stagger !< A flag indicating where this vector is discretized + logical, optional, intent(in) :: scalar_pair !< If true, a pair of scalars are to be read + real, optional, intent(in) :: scale !< A scaling factor that the fields are multiplied + !! by before they are returned. + ! Local variables + type(FmsNetcdfDomainFile_t) :: fileobj ! A handle to a domain-decomposed file object + logical :: has_time_dim ! True if the variables have an unlimited time axis. + character(len=96) :: u_var, v_var ! Name of u and v variables to read from the netcdf file + logical :: success ! True if the file was successfully opened + integer :: u_pos, v_pos ! Flags indicating the positions of the u- and v- components. + + u_pos = EAST_FACE ; v_pos = NORTH_FACE + if (present(stagger)) then + if (stagger == CGRID_NE) then ; u_pos = EAST_FACE ; v_pos = NORTH_FACE + elseif (stagger == BGRID_NE) then ; u_pos = CORNER ; v_pos = CORNER + elseif (stagger == AGRID) then ; u_pos = CENTER ; v_pos = CENTER ; endif + endif + + if (FMS2_reads) then + ! Open the FMS2 file-set. + success = fms2_open_file(fileobj, filename, "read", MOM_domain%mpp_domain) + if (.not.success) call MOM_error(FATAL, "Failed to open "//trim(filename)) + + ! Find the matching case-insensitive u- and v-variable names in the file and prepare to read them. + call prepare_to_read_var(fileobj, u_fieldname, "read_vector_2d: ", filename, & + u_var, has_time_dim, timelevel, position=u_pos) + call prepare_to_read_var(fileobj, v_fieldname, "read_vector_2d: ", filename, & + v_var, has_time_dim, timelevel, position=v_pos) + + ! Read the u-data and v-data. There would already been an error message for one + ! of the variables if they are inconsistent in having an unlimited dimension. + if (present(timelevel) .and. has_time_dim) then + call fms2_read_data(fileobj, u_var, u_data, unlim_dim_level=timelevel) + call fms2_read_data(fileobj, v_var, v_data, unlim_dim_level=timelevel) + else + call fms2_read_data(fileobj, u_var, u_data) + call fms2_read_data(fileobj, v_var, v_data) + endif + + ! Close the file-set. + if (check_if_open(fileobj)) call fms2_close_file(fileobj) + else ! Read the variable using the FMS-1 interface. + call read_data(filename, u_fieldname, u_data, MOM_Domain%mpp_domain, & + timelevel=timelevel, position=u_pos) + call read_data(filename, v_fieldname, v_data, MOM_Domain%mpp_domain, & + timelevel=timelevel, position=v_pos) + endif + + if (present(scale)) then ; if (scale /= 1.0) then + call rescale_comp_data(MOM_Domain, u_data, scale) + call rescale_comp_data(MOM_Domain, v_data, scale) + endif ; endif + +end subroutine read_vector_2d + +!> This routine uses the fms_io subroutine read_data to read a pair of distributed +!! 3-D data fields with names given by "[uv]_fieldname" from file "filename". Valid values for +!! "stagger" include CGRID_NE, BGRID_NE, and AGRID. +subroutine read_vector_3d(filename, u_fieldname, v_fieldname, u_data, v_data, MOM_Domain, & + timelevel, stagger, scalar_pair, scale) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: u_fieldname !< The variable name of the u data in the file + character(len=*), intent(in) :: v_fieldname !< The variable name of the v data in the file + real, dimension(:,:,:), intent(inout) :: u_data !< The 3 dimensional array into which the + !! u-component of the data should be read + real, dimension(:,:,:), intent(inout) :: v_data !< The 3 dimensional array into which the + !! v-component of the data should be read + type(MOM_domain_type), intent(in) :: MOM_Domain !< The MOM_Domain that describes the decomposition + integer, optional, intent(in) :: timelevel !< The time level in the file to read + integer, optional, intent(in) :: stagger !< A flag indicating where this vector is discretized + logical, optional, intent(in) :: scalar_pair !< If true, a pair of scalars are to be read. + real, optional, intent(in) :: scale !< A scaling factor that the fields are multiplied + !! by before they are returned. + + ! Local variables + type(FmsNetcdfDomainFile_t) :: fileobj ! A handle to a domain-decomposed file object + logical :: has_time_dim ! True if the variables have an unlimited time axis. + character(len=96) :: u_var, v_var ! Name of u and v variables to read from the netcdf file + logical :: success ! True if the file was successfully opened + integer :: u_pos, v_pos ! Flags indicating the positions of the u- and v- components. + + u_pos = EAST_FACE ; v_pos = NORTH_FACE + if (present(stagger)) then + if (stagger == CGRID_NE) then ; u_pos = EAST_FACE ; v_pos = NORTH_FACE + elseif (stagger == BGRID_NE) then ; u_pos = CORNER ; v_pos = CORNER + elseif (stagger == AGRID) then ; u_pos = CENTER ; v_pos = CENTER ; endif + endif + + if (FMS2_reads) then + ! Open the FMS2 file-set. + success = fms2_open_file(fileobj, filename, "read", MOM_domain%mpp_domain) + if (.not.success) call MOM_error(FATAL, "Failed to open "//trim(filename)) + + ! Find the matching case-insensitive u- and v-variable names in the file and prepare to read them. + call prepare_to_read_var(fileobj, u_fieldname, "read_vector_3d: ", filename, & + u_var, has_time_dim, timelevel, position=u_pos) + call prepare_to_read_var(fileobj, v_fieldname, "read_vector_3d: ", filename, & + v_var, has_time_dim, timelevel, position=v_pos) + + ! Read the u-data and v-data, dangerously assuming either both or neither have time dimensions. + ! There would already been an error message for one of the variables if they are inconsistent. + if (present(timelevel) .and. has_time_dim) then + call fms2_read_data(fileobj, u_var, u_data, unlim_dim_level=timelevel) + call fms2_read_data(fileobj, v_var, v_data, unlim_dim_level=timelevel) + else + call fms2_read_data(fileobj, u_var, u_data) + call fms2_read_data(fileobj, v_var, v_data) + endif + + ! Close the file-set. + if (check_if_open(fileobj)) call fms2_close_file(fileobj) + else ! Read the variable using the FMS-1 interface. + call read_data(filename, u_fieldname, u_data, MOM_Domain%mpp_domain, & + timelevel=timelevel, position=u_pos) + call read_data(filename, v_fieldname, v_data, MOM_Domain%mpp_domain, & + timelevel=timelevel, position=v_pos) + endif + + if (present(scale)) then ; if (scale /= 1.0) then + call rescale_comp_data(MOM_Domain, u_data, scale) + call rescale_comp_data(MOM_Domain, v_data, scale) + endif ; endif + +end subroutine read_vector_3d + + +!> Find the case-sensitive name of the variable in a netCDF file with a case-insensitive name match. +!! Optionally also determine whether this variable has an unlimited time dimension. +subroutine find_varname_in_file(fileobj, fieldname, err_header, filename, var_to_read, has_time_dim, timelevel) + type(FmsNetcdfFile_t), intent(inout) :: fileobj !< An FMS2 handle to an open NetCDF file + character(len=*), intent(in) :: fieldname !< The variable name to seek in the file + character(len=*), intent(in) :: err_header !< A descriptive prefix for error messages + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(out) :: var_to_read !< The variable name to read from the file + logical, optional, intent(out) :: has_time_dim !< Indicates whether fieldname has a time dimension + integer, optional, intent(in) :: timelevel !< A time level to read + + ! Local variables + logical :: variable_found ! Is a case-insensitive version of the variable found in the netCDF file? + character(len=256), allocatable, dimension(:) :: var_names ! The names of all the variables in the netCDF file + character(len=256), allocatable :: dim_names(:) ! The names of a variable's dimensions + integer :: nvars ! The number of variables in the file + integer :: dim_unlim_size ! The current size of the unlimited (time) dimension in the file. + integer :: num_var_dims ! The number of dimensions a variable has in the file. + integer :: time_dim ! The position of the unlimited (time) dimension for a variable, or -1 + ! if it has no unlimited dimension. + integer :: i + + ! Open the file if necessary + if (.not.check_if_open(fileobj)) & + call MOM_error(FATAL, trim(err_header)//trim(filename)//" was not open in call to find_varname_in_file.") + + ! Search for the variable in the file, looking for the case-sensitive name first. + if (variable_exists(fileobj, trim(fieldname))) then + var_to_read = trim(fieldname) + else ! Look for case-insensitive variable name matches. + nvars = get_num_variables(fileobj) + if (nvars < 1) call MOM_error(FATAL, "nvars is less than 1 for file "//trim(filename)) + allocate(var_names(nvars)) + call get_variable_names(fileobj, var_names) + + ! search for the variable in the file + variable_found = .false. + do i=1,nvars + if (lowercase(trim(var_names(i))) == lowercase(trim(fieldname))) then + variable_found = .true. + var_to_read = trim(var_names(i)) + exit + endif + enddo + if (.not.(variable_found)) & + call MOM_error(FATAL, trim(err_header)//trim(fieldname)//" not found in "//trim(filename)) + deallocate(var_names) + endif + + ! FMS2 can not handle a timelevel argument if the variable does not have one in the file, + ! so some error checking and logic are required. + if (present(has_time_dim) .or. present(timelevel)) then + time_dim = -1 + + num_var_dims = get_variable_num_dimensions(fileobj, trim(var_to_read)) + allocate(dim_names(num_var_dims)) ; dim_names(:) = "" + call get_variable_dimension_names(fileobj, trim(var_to_read), dim_names) + + do i=1,num_var_dims + if (is_dimension_unlimited(fileobj, dim_names(i))) then + time_dim = i + if (present(timelevel)) then + call get_dimension_size(fileobj, dim_names(i), dim_unlim_size) + if ((timelevel > dim_unlim_size) .and. is_root_PE()) call MOM_error(FATAL, & + trim(err_header)//"Attempting to read a time level of "//trim(var_to_read)//& + " that exceeds the size of the time dimension in "//trim(filename)) + endif + exit + endif + enddo + deallocate(dim_names) + + if (present(timelevel) .and. (time_dim < 0) .and. is_root_PE()) & + call MOM_error(WARNING, trim(err_header)//"time level specified, but the variable "//& + trim(var_to_read)//" does not have an unlimited dimension in "//trim(filename)) + if ((.not.present(timelevel)) .and. (time_dim > 0) .and. is_root_PE()) & + call MOM_error(WARNING, trim(err_header)//"The variable "//trim(var_to_read)//& + " has an unlimited dimension in "//trim(filename)//" but no time level is specified.") + if (present(has_time_dim)) has_time_dim = (time_dim > 0) + endif + +end subroutine find_varname_in_file + + +!> Find the case-insensitive name match with a variable in an open domain-decomposed file-set, +!! prepare FMS2 to read this variable, and return some information needed to call fms2_read_data +!! correctly for this variable and file. +subroutine prepare_to_read_var(fileobj, fieldname, err_header, filename, var_to_read, & + has_time_dim, timelevel, position) + type(FmsNetcdfDomainFile_t), intent(inout) :: fileobj !< An FMS2 handle to an open domain-decomposed file + character(len=*), intent(in) :: fieldname !< The variable name to seek in the file + character(len=*), intent(in) :: err_header !< A descriptive prefix for error messages + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(out) :: var_to_read !< The variable name to read from the file + logical, optional, intent(out) :: has_time_dim !< Indicates whether fieldname has a time dimension + integer, optional, intent(in) :: timelevel !< A time level to read + integer, optional, intent(in) :: position !< A flag indicating where this variable is discretized + + ! Local variables + logical :: variable_found ! Is a case-insensitive version of the variable found in the netCDF file? + character(len=256), allocatable, dimension(:) :: var_names ! The names of all the variables in the netCDF file + character(len=256), allocatable :: dim_names(:) ! The names of a variable's dimensions + integer :: nvars ! The number of variables in the file. + integer :: dim_unlim_size ! The current size of the unlimited (time) dimension in the file. + integer :: num_var_dims ! The number of dimensions a variable has in the file. + integer :: time_dim ! The position of the unlimited (time) dimension for a variable, or -1 + ! if it has no unlimited dimension. + integer :: i + + ! Open the file if necessary + if (.not.check_if_open(fileobj)) & + call MOM_error(FATAL, trim(err_header)//trim(filename)//" was not open in call to prepare_to_read_var.") + + ! Search for the variable in the file, looking for the case-sensitive name first. + if (variable_exists(fileobj, trim(fieldname))) then + var_to_read = trim(fieldname) + else ! Look for case-insensitive variable name matches. + nvars = get_num_variables(fileobj) + if (nvars < 1) call MOM_error(FATAL, "nvars is less than 1 for file "//trim(filename)) + allocate(var_names(nvars)) + call get_variable_names(fileobj, var_names) + + variable_found = .false. + do i=1,nvars + if (lowercase(trim(var_names(i))) == lowercase(trim(fieldname))) then + variable_found = .true. + var_to_read = trim(var_names(i)) + exit + endif + enddo + if (.not.(variable_found)) & + call MOM_error(FATAL, trim(err_header)//trim(fieldname)//" not found in "//trim(filename)) + deallocate(var_names) + endif + + ! FMS2 can not handle a timelevel argument if the variable does not have one in the file, + ! so some error checking and logic are required. + if (present(has_time_dim) .or. present(timelevel)) then + time_dim = -1 + + num_var_dims = get_variable_num_dimensions(fileobj, trim(var_to_read)) + allocate(dim_names(num_var_dims)) ; dim_names(:) = "" + call get_variable_dimension_names(fileobj, trim(var_to_read), dim_names) + + do i=1,num_var_dims + if (is_dimension_unlimited(fileobj, dim_names(i))) then + time_dim = i + if (present(timelevel)) then + call get_dimension_size(fileobj, dim_names(i), dim_unlim_size) + if ((timelevel > dim_unlim_size) .and. is_root_PE()) call MOM_error(FATAL, & + trim(err_header)//"Attempting to read a time level of "//trim(var_to_read)//& + " that exceeds the size of the time dimension in "//trim(filename)) + endif + exit + endif + enddo + deallocate(dim_names) + + if (present(timelevel) .and. (time_dim < 0) .and. is_root_PE()) & + call MOM_error(WARNING, trim(err_header)//"time level specified, but the variable "//& + trim(var_to_read)//" does not have an unlimited dimension in "//trim(filename)) + if ((.not.present(timelevel)) .and. (time_dim > 0) .and. is_root_PE()) & + call MOM_error(WARNING, trim(err_header)//"The variable "//trim(var_to_read)//& + " has an unlimited dimension in "//trim(filename)//" but no time level is specified.") + if (present(has_time_dim)) has_time_dim = (time_dim > 0) + endif + + ! Registering the variable axes essentially just specifies the discrete position of this variable. + call MOM_register_variable_axes(fileobj, var_to_read, filename, position) + +end subroutine prepare_to_read_var + +!> register axes associated with a variable from a domain-decomposed netCDF file +subroutine MOM_register_variable_axes(fileObj, variableName, filename, position) + type(FmsNetcdfDomainFile_t), intent(inout) :: fileObj !< Handle to an open FMS2 netCDF file object + character(len=*), intent(in) :: variableName !< name of the variable + character(len=*), intent(in) :: filename !< The name of the file to read + integer, optional, intent(in) :: position !< A flag indicating where this data is discretized + + ! Local variables + character(len=256), allocatable, dimension(:) :: dim_names ! variable dimension names + integer, allocatable, dimension(:) :: dimSizes ! variable dimension sizes + logical, allocatable, dimension(:) :: is_x ! Is this a (likely domain-decomposed) x-axis + logical, allocatable, dimension(:) :: is_y ! Is this a (likely domain-decomposed) y-axis + logical, allocatable, dimension(:) :: is_t ! Is this a time axis or another unlimited axis + integer :: ndims ! number of dimensions + integer :: xPos, yPos ! Discrete positions for x and y axes. Default is CENTER + integer :: i + + xPos = CENTER ; yPos = CENTER + if (present(position)) then + if ((position == CORNER) .or. (position == EAST_FACE)) xPos = EAST_FACE + if ((position == CORNER) .or. (position == NORTH_FACE)) yPos = NORTH_FACE + endif + + ! get variable dimension names and lengths + ndims = get_variable_num_dimensions(fileObj, trim(variableName)) + allocate(dimSizes(ndims)) + allocate(dim_names(ndims)) + allocate(is_x(ndims)) ; is_x(:) = .false. + allocate(is_y(ndims)) ; is_y(:) = .false. + allocate(is_t(ndims)) ; is_t(:) = .false. + call get_variable_size(fileObj, trim(variableName), dimSizes) + call get_variable_dimension_names(fileObj, trim(variableName), dim_names) + call categorize_axes(fileObj, filename, ndims, dim_names, is_x, is_y, is_t) + + ! register the axes + do i=1,ndims + if ( .not.is_dimension_registered(fileobj, trim(dim_names(i))) ) then + if (is_x(i)) then + call register_axis(fileObj, trim(dim_names(i)), "x", domain_position=xPos) + elseif (is_y(i)) then + call register_axis(fileObj, trim(dim_names(i)), "y", domain_position=yPos) + else + call register_axis(fileObj, trim(dim_names(i)), dimSizes(i)) + endif + endif + enddo + + deallocate(dimSizes, dim_names, is_x, is_y, is_t) +end subroutine MOM_register_variable_axes + +!> Determine whether a variable's axes are associated with x-, y- or time-dimensions. Other +!! unlimited dimensions are also labeled as time axes for these purposes. +subroutine categorize_axes(fileObj, filename, ndims, dim_names, is_x, is_y, is_t) + type(FmsNetcdfDomainFile_t), intent(in) :: fileObj !< Handle to an open FMS2 netCDF file object + character(len=*), intent(in) :: filename !< The name of the file to read + integer, intent(in) :: ndims !< The number of dimensions associated with a variable + character(len=*), dimension(ndims), intent(in) :: dim_names !< Names of the dimensions associated with a variable + logical, dimension(ndims), intent(out) :: is_x !< Indicates if each dimension a (likely decomposed) x-axis + logical, dimension(ndims), intent(out) :: is_y !< Indicates if each dimension a (likely decomposed) y-axis + logical, dimension(ndims), intent(out) :: is_t !< Indicates if each dimension unlimited (usually time) axis + + ! Local variables + character(len=128) :: cartesian ! A flag indicating a Cartesian direction - usually a single character. + character(len=512) :: dim_list ! A concatenated list of dimension names. + character(len=128) :: units ! units corresponding to a specific variable dimension + logical :: x_found, y_found ! Indicate whether an x- or y- dimension have been found. + integer :: i + + x_found = .false. ; y_found = .false. + is_x(:) = .false. ; is_y(:) = .false. + do i=1,ndims + is_t(i) = is_dimension_unlimited(fileObj, trim(dim_names(i))) + ! First look for indicative variable attributes + if (.not.is_t(i)) then + if (variable_exists(fileobj, trim(dim_names(i)))) then + if (variable_att_exists(fileobj, trim(dim_names(i)), "cartesian_axis")) then + call get_variable_attribute(fileobj, trim(dim_names(i)), "cartesian_axis", cartesian) + cartesian = adjustl(cartesian) + if ((index(cartesian, "X") == 1) .or. (index(cartesian, "x") == 1)) is_x(i) = .true. + if ((index(cartesian, "Y") == 1) .or. (index(cartesian, "y") == 1)) is_y(i) = .true. + if ((index(cartesian, "T") == 1) .or. (index(cartesian, "t") == 1)) is_t(i) = .true. + endif + endif + endif + if (is_x(i)) x_found = .true. + if (is_y(i)) y_found = .true. + enddo + + if (.not.(x_found .and. y_found)) then + ! Next look for hints from axis names for uncharacterized axes + do i=1,ndims ; if (.not.(is_x(i) .or. is_y(i) .or. is_t(i))) then + call categorize_axis_from_name(dim_names(i), is_x(i), is_y(i)) + if (is_x(i)) x_found = .true. + if (is_y(i)) y_found = .true. + endif ; enddo + endif + + if (.not.(x_found .and. y_found)) then + ! Look for hints from CF-compliant axis units for uncharacterized axes + do i=1,ndims ; if (.not.(is_x(i) .or. is_y(i) .or. is_t(i))) then + call get_variable_units(fileobj, trim(dim_names(i)), units) + call categorize_axis_from_units(units, is_x(i), is_y(i)) + if (is_x(i)) x_found = .true. + if (is_y(i)) y_found = .true. + endif ; enddo + endif + + if (.not.(x_found .and. y_found) .and. ((ndims>2) .or. ((ndims==2) .and. .not.is_t(ndims)))) then + ! This is a case where one would expect to find x-and y-dimensions, but none have been found. + if (is_root_pe()) then + dim_list = trim(dim_names(1))//", "//trim(dim_names(2)) + do i=3,ndims ; dim_list = trim(dim_list)//", "//trim(dim_names(i)) ; enddo + call MOM_error(WARNING, "categorize_axes: Failed to identify x- and y- axes in the axis list ("//& + trim(dim_list)//") of a variable being read from "//trim(filename)) + endif + endif + +end subroutine categorize_axes + +!> Determine whether an axis is associated with the x- or y-directions based on a comparison of +!! its units with CF-compliant variants of latitude or longitude units. +subroutine categorize_axis_from_units(unit_string, is_x, is_y) + character(len=*), intent(in) :: unit_string !< string of units + logical, intent(out) :: is_x !< Indicates if the axis units are associated with an x-direction axis + logical, intent(out) :: is_y !< Indicates if the axis units are associated with an y-direction axis + + is_x = .false. ; is_y = .false. + select case (lowercase(trim(unit_string))) + case ("degrees_north"); is_y = .true. + case ("degree_north") ; is_y = .true. + case ("degrees_n") ; is_y = .true. + case ("degree_n") ; is_y = .true. + case ("degreen") ; is_y = .true. + case ("degreesn") ; is_y = .true. + case ("degrees_east") ; is_x = .true. + case ("degree_east") ; is_x = .true. + case ("degreese") ; is_x = .true. + case ("degreee") ; is_x = .true. + case ("degree_e") ; is_x = .true. + case ("degrees_e") ; is_x = .true. + case default ; is_x = .false. ; is_y = .false. + end select + +end subroutine categorize_axis_from_units + +!> Tries to determine whether the axis name is commonly associated with an x- or y- axis. This +!! approach is fragile and unreliable, but it a backup to reading a CARTESIAN file attribute. +subroutine categorize_axis_from_name(dimname, is_x, is_y) + character(len=*), intent(in) :: dimname !< A dimension name + logical, intent(out) :: is_x !< Indicates if the axis name is associated with an x-direction axis + logical, intent(out) :: is_y !< Indicates if the axis name is associated with an y-direction axis + + is_x = .false. ; is_y = .false. + select case(trim(lowercase(dimname))) + case ("grid_x_t") ; is_x = .true. + case ("nx") ; is_x = .true. + case ("nxp") ; is_x = .true. + case ("longitude") ; is_x = .true. + case ("long") ; is_x = .true. + case ("lon") ; is_x = .true. + case ("lonh") ; is_x = .true. + case ("lonq") ; is_x = .true. + case ("xh") ; is_x = .true. + case ("xq") ; is_x = .true. + case ("i") ; is_x = .true. + + case ("grid_y_t") ; is_y = .true. + case ("ny") ; is_y = .true. + case ("nyp") ; is_y = .true. + case ("latitude") ; is_y = .true. + case ("lat") ; is_y = .true. + case ("lath") ; is_y = .true. + case ("latq") ; is_y = .true. + case ("yh") ; is_y = .true. + case ("yq") ; is_y = .true. + case ("j") ; is_y = .true. + + case default ; is_x = .false. ; is_y = .false. + end select + +end subroutine categorize_axis_from_name + + +!> Write a 4d field to an output file. +subroutine write_field_4d(IO_handle, field_md, MOM_domain, field, tstamp, tile_count, fill_value) + type(file_type), intent(inout) :: IO_handle !< Handle for a file that is open for writing + type(fieldtype), intent(in) :: field_md !< Field type with metadata + type(MOM_domain_type), intent(in) :: MOM_domain !< The MOM_Domain that describes the decomposition + real, dimension(:,:,:,:), intent(inout) :: field !< Field to write + real, optional, intent(in) :: tstamp !< Model time of this field + integer, optional, intent(in) :: tile_count !< PEs per tile (default: 1) + real, optional, intent(in) :: fill_value !< Missing data fill value + + + ! Local variables + integer :: time_index + + if (IO_handle%FMS2_file .and. present(tstamp)) then + time_index = write_time_if_later(IO_handle, tstamp) + call write_data(IO_handle%fileobj, trim(field_md%name), field, unlim_dim_level=time_index) + elseif (IO_handle%FMS2_file) then + call write_data(IO_handle%fileobj, trim(field_md%name), field) + else + call mpp_write(IO_handle%unit, field_md%FT, MOM_domain%mpp_domain, field, tstamp=tstamp, & + tile_count=tile_count, default_data=fill_value) + endif +end subroutine write_field_4d + +!> Write a 3d field to an output file. +subroutine write_field_3d(IO_handle, field_md, MOM_domain, field, tstamp, tile_count, fill_value) + type(file_type), intent(inout) :: IO_handle !< Handle for a file that is open for writing + type(fieldtype), intent(in) :: field_md !< Field type with metadata + type(MOM_domain_type), intent(in) :: MOM_domain !< The MOM_Domain that describes the decomposition + real, dimension(:,:,:), intent(inout) :: field !< Field to write + real, optional, intent(in) :: tstamp !< Model time of this field + integer, optional, intent(in) :: tile_count !< PEs per tile (default: 1) + real, optional, intent(in) :: fill_value !< Missing data fill value + + ! Local variables + integer :: time_index + + if (IO_handle%FMS2_file .and. present(tstamp)) then + time_index = write_time_if_later(IO_handle, tstamp) + call write_data(IO_handle%fileobj, trim(field_md%name), field, unlim_dim_level=time_index) + elseif (IO_handle%FMS2_file) then + call write_data(IO_handle%fileobj, trim(field_md%name), field) + else + call mpp_write(IO_handle%unit, field_md%FT, MOM_domain%mpp_domain, field, tstamp=tstamp, & + tile_count=tile_count, default_data=fill_value) + endif +end subroutine write_field_3d + +!> Write a 2d field to an output file. +subroutine write_field_2d(IO_handle, field_md, MOM_domain, field, tstamp, tile_count, fill_value) + type(file_type), intent(inout) :: IO_handle !< Handle for a file that is open for writing + type(fieldtype), intent(in) :: field_md !< Field type with metadata + type(MOM_domain_type), intent(in) :: MOM_domain !< The MOM_Domain that describes the decomposition + real, dimension(:,:), intent(inout) :: field !< Field to write + real, optional, intent(in) :: tstamp !< Model time of this field + integer, optional, intent(in) :: tile_count !< PEs per tile (default: 1) + real, optional, intent(in) :: fill_value !< Missing data fill value + + ! Local variables + integer :: time_index + + if (IO_handle%FMS2_file .and. present(tstamp)) then + time_index = write_time_if_later(IO_handle, tstamp) + call write_data(IO_handle%fileobj, trim(field_md%name), field, unlim_dim_level=time_index) + elseif (IO_handle%FMS2_file) then + call write_data(IO_handle%fileobj, trim(field_md%name), field) + else + call mpp_write(IO_handle%unit, field_md%FT, MOM_domain%mpp_domain, field, tstamp=tstamp, & + tile_count=tile_count, default_data=fill_value) + endif +end subroutine write_field_2d + +!> Write a 1d field to an output file. +subroutine write_field_1d(IO_handle, field_md, field, tstamp) + type(file_type), intent(inout) :: IO_handle !< Handle for a file that is open for writing + type(fieldtype), intent(in) :: field_md !< Field type with metadata + real, dimension(:), intent(in) :: field !< Field to write + real, optional, intent(in) :: tstamp !< Model time of this field + + ! Local variables + integer :: time_index + + if (IO_handle%FMS2_file .and. present(tstamp)) then + time_index = write_time_if_later(IO_handle, tstamp) + call write_data(IO_handle%fileobj, trim(field_md%name), field, unlim_dim_level=time_index) + elseif (IO_handle%FMS2_file) then + call write_data(IO_handle%fileobj, trim(field_md%name), field) + else + call mpp_write(IO_handle%unit, field_md%FT, field, tstamp=tstamp) + endif +end subroutine write_field_1d + +!> Write a 0d field to an output file. +subroutine write_field_0d(IO_handle, field_md, field, tstamp) + type(file_type), intent(inout) :: IO_handle !< Handle for a file that is open for writing + type(fieldtype), intent(in) :: field_md !< Field type with metadata + real, intent(in) :: field !< Field to write + real, optional, intent(in) :: tstamp !< Model time of this field + + ! Local variables + integer :: time_index + + if (IO_handle%FMS2_file .and. present(tstamp)) then + time_index = write_time_if_later(IO_handle, tstamp) + call write_data(IO_handle%fileobj, trim(field_md%name), field, unlim_dim_level=time_index) + elseif (IO_handle%FMS2_file) then + call write_data(IO_handle%fileobj, trim(field_md%name), field) + else + call mpp_write(IO_handle%unit, field_md%FT, field, tstamp=tstamp) + endif +end subroutine write_field_0d + +!> Returns the integer time index for a write in this file, also writing the time variable to +!! the file if this time is later than what is already in the file. +integer function write_time_if_later(IO_handle, field_time) + type(file_type), intent(inout) :: IO_handle !< Handle for a file that is open for writing + real, intent(in) :: field_time !< Model time of this field + + ! Local variables + character(len=256) :: dim_unlim_name ! name of the unlimited dimension in the file + + if ((field_time > IO_handle%file_time) .or. (IO_handle%num_times == 0)) then + IO_handle%file_time = field_time + IO_handle%num_times = IO_handle%num_times + 1 + if (IO_handle%FMS2_file) then + call get_unlimited_dimension_name(IO_handle%fileobj, dim_unlim_name) + call write_data(IO_handle%fileobj, trim(dim_unlim_name), (/field_time/), & + corner=(/IO_handle%num_times/), edge_lengths=(/1/)) + endif + endif + + write_time_if_later = IO_handle%num_times +end function write_time_if_later + +!> Write the data for an axis +subroutine MOM_write_axis(IO_handle, axis) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for writing + type(axistype), intent(in) :: axis !< An axis type variable with information to write + + integer :: is, ie + + if (IO_handle%FMS2_file) then + if (axis%domain_decomposed) then + ! FMS2 does not domain-decompose 1d arrays, so we explicitly slice it + call get_global_io_domain_indices(IO_handle%fileobj, trim(axis%name), is, ie) + call write_data(IO_handle%fileobj, trim(axis%name), axis%ax_data(is:ie)) + else + call write_data(IO_handle%fileobj, trim(axis%name), axis%ax_data) + endif + else + call mpp_write(IO_handle%unit, axis%AT) + endif + +end subroutine MOM_write_axis + +!> Store information about an axis in a previously defined axistype and write this +!! information to the file indicated by unit. +subroutine write_metadata_axis(IO_handle, axis, name, units, longname, cartesian, sense, domain, data, & + edge_axis, calendar) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for writing + type(axistype), intent(inout) :: axis !< The axistype where this information is stored. + character(len=*), intent(in) :: name !< The name in the file of this axis + character(len=*), intent(in) :: units !< The units of this axis + character(len=*), intent(in) :: longname !< The long description of this axis + character(len=*), optional, intent(in) :: cartesian !< A variable indicating which direction + !! this axis corresponds with. Valid values + !! include 'X', 'Y', 'Z', 'T', and 'N' for none. + integer, optional, intent(in) :: sense !< This is 1 for axes whose values increase upward, or + !! -1 if they increase downward. + type(domain1D), optional, intent(in) :: domain !< The domain decomposion for this axis + real, dimension(:), optional, intent(in) :: data !< The coordinate values of the points on this axis + logical, optional, intent(in) :: edge_axis !< If true, this axis marks an edge of the tracer cells + character(len=*), optional, intent(in) :: calendar !< The name of the calendar used with a time axis + + character(len=:), allocatable :: cart ! A left-adjusted and trimmed copy of cartesian + logical :: is_x, is_y, is_t ! If true, this is a domain-decomposed axis in one of the directions. + integer :: position ! A flag indicating the axis staggering position. + integer :: i, isc, iec, global_size + + if (IO_handle%FMS2_file) then + if (is_dimension_registered(IO_handle%fileobj, trim(name))) then + call MOM_error(FATAL, "write_metadata_axis was called more than once for axis "//trim(name)//& + " in file "//trim(IO_handle%filename)) + return + endif + endif + + axis%name = trim(name) + if (present(data) .and. allocated(axis%ax_data)) call MOM_error(FATAL, & + "Data is already allocated in a call to write_metadata_axis for axis "//& + trim(name)//" in file "//trim(IO_handle%filename)) + + if (IO_handle%FMS2_file) then + is_x = .false. ; is_y = .false. ; is_t = .false. + position = CENTER + if (present(cartesian)) then + cart = trim(adjustl(cartesian)) + if ((index(cart, "X") == 1) .or. (index(cart, "x") == 1)) is_x = .true. + if ((index(cart, "Y") == 1) .or. (index(cart, "y") == 1)) is_y = .true. + if ((index(cart, "T") == 1) .or. (index(cart, "t") == 1)) is_t = .true. + endif + + ! For now, we assume that all horizontal axes are domain-decomposed. + if (is_x .or. is_y) & + axis%domain_decomposed = .true. + + if (is_x) then + if (present(edge_axis)) then ; if (edge_axis) position = EAST_FACE ; endif + call register_axis(IO_handle%fileobj, trim(name), 'x', domain_position=position) + elseif (is_y) then + if (present(edge_axis)) then ; if (edge_axis) position = NORTH_FACE ; endif + call register_axis(IO_handle%fileobj, trim(name), 'y', domain_position=position) + elseif (is_t .and. .not.present(data)) then + ! This is the unlimited (time) dimension. + call register_axis(IO_handle%fileobj, trim(name), unlimited) + else + if (.not.present(data)) call MOM_error(FATAL,"MOM_io:register_diagnostic_axis: "//& + "An axis_length argument is required to register the axis "//trim(name)) + call register_axis(IO_handle%fileobj, trim(name), size(data)) + endif + + if (present(data)) then + ! With FMS2, the data for the axis labels has to match the computational domain on this PE. + if (present(domain)) then + ! The commented-out code on the next ~11 lines runs but there is missing data in the output file + ! call mpp_get_compute_domain(domain, isc, iec) + ! call mpp_get_global_domain(domain, size=global_size) + ! if (size(data) == global_size) then + ! allocate(axis%ax_data(iec+1-isc)) ; axis%ax_data(:) = data(isc:iec) + ! ! A simpler set of labels: do i=1,iec-isc ; axis%ax_data(i) = real(isc + i) - 1.0 ; enddo + ! elseif (size(data) == global_size+1) then + ! ! This is an edge axis. Note the effective SW indexing convention here. + ! allocate(axis%ax_data(iec+2-isc)) ; axis%ax_data(:) = data(isc:iec+1) + ! ! A simpler set of labels: do i=1,iec+1-isc ; axis%ax_data(i) = real(isc + i) - 1.5 ; enddo + ! else + ! call MOM_error(FATAL, "Unexpected size of data for "//trim(name)//" in write_metadata_axis.") + ! endif + + ! This works for a simple 1x1 IO layout, but gives errors for nontrivial IO layouts + allocate(axis%ax_data(size(data))) ; axis%ax_data(:) = data(:) + + else ! Store the entire array of axis labels. + allocate(axis%ax_data(size(data))) ; axis%ax_data(:) = data(:) + endif + endif + + + ! Now create the variable that describes this axis. + call register_field(IO_handle%fileobj, trim(name), "double", dimensions=(/name/)) + if (len_trim(longname) > 0) & + call register_variable_attribute(IO_handle%fileobj, trim(name), 'long_name', & + trim(longname), len_trim(longname)) + if (len_trim(units) > 0) & + call register_variable_attribute(IO_handle%fileobj, trim(name), 'units', & + trim(units), len_trim(units)) + if (present(cartesian)) & + call register_variable_attribute(IO_handle%fileobj, trim(name), 'cartesian_axis', & + trim(cartesian), len_trim(cartesian)) + if (present(sense)) & + call register_variable_attribute(IO_handle%fileobj, trim(name), 'sense', sense) + else + if (present(data)) then + allocate(axis%ax_data(size(data))) ; axis%ax_data(:) = data(:) + endif + + call mpp_write_meta(IO_handle%unit, axis%AT, name, units, longname, cartesian=cartesian, sense=sense, & + domain=domain, data=data, calendar=calendar) + endif +end subroutine write_metadata_axis + +!> Store information about an output variable in a previously defined fieldtype and write this +!! information to the file indicated by unit. +subroutine write_metadata_field(IO_handle, field, axes, name, units, longname, & + pack, standard_name, checksum) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for writing + type(fieldtype), intent(inout) :: field !< The fieldtype where this information is stored + type(axistype), dimension(:), intent(in) :: axes !< Handles for the axis used for this variable + character(len=*), intent(in) :: name !< The name in the file of this variable + character(len=*), intent(in) :: units !< The units of this variable + character(len=*), intent(in) :: longname !< The long description of this variable + integer, optional, intent(in) :: pack !< A precision reduction factor with which the + !! variable. The default, 1, has no reduction, + !! but 2 is not uncommon. + character(len=*), optional, intent(in) :: standard_name !< The standard (e.g., CMOR) name for this variable + integer(kind=int64), dimension(:), & + optional, intent(in) :: checksum !< Checksum values that can be used to verify reads. + + ! Local variables + character(len=256), dimension(size(axes)) :: dim_names ! The names of the dimensions + type(mpp_axistype), dimension(size(axes)) :: mpp_axes ! The array of mpp_axistypes for this variable + character(len=16) :: prec_string ! A string specifying the precision with which to save this variable + character(len=64) :: checksum_string ! checksum character array created from checksum argument + integer :: i, ndims + + ndims = size(axes) + if (IO_handle%FMS2_file) then + do i=1,ndims ; dim_names(i) = trim(axes(i)%name) ; enddo + prec_string = "double" ; if (present(pack)) then ; if (pack > 1) prec_string = "float" ; endif + call register_field(IO_handle%fileobj, trim(name), trim(prec_string), dimensions=dim_names) + if (len_trim(longname) > 0) & + call register_variable_attribute(IO_handle%fileobj, trim(name), 'long_name', & + trim(longname), len_trim(longname)) + if (len_trim(units) > 0) & + call register_variable_attribute(IO_handle%fileobj, trim(name), 'units', & + trim(units), len_trim(units)) + if (present(standard_name)) & + call register_variable_attribute(IO_handle%fileobj, trim(name), 'standard_name', & + trim(standard_name), len_trim(standard_name)) + if (present(checksum)) then + write (checksum_string,'(Z16)') checksum(1) ! Z16 is the hexadecimal format code + call register_variable_attribute(IO_handle%fileobj, trim(name), "checksum", & + trim(checksum_string), len_trim(checksum_string)) + endif + else + do i=1,ndims ; mpp_axes(i) = axes(i)%AT ; enddo + call mpp_write_meta(IO_handle%unit, field%FT, mpp_axes, name, units, longname, & + pack=pack, standard_name=standard_name, checksum=checksum) + ! unused opt. args: min=min, max=max, fill=fill, scale=scale, add=add, & + endif + + ! Store information in the field-type, regardless of which interfaces are used. + field%name = trim(name) + field%longname = trim(longname) + field%units = trim(units) + field%chksum_read = -1 + field%valid_chksum = .false. + +end subroutine write_metadata_field + +!> Write a global text attribute to a file. +subroutine write_metadata_global(IO_handle, name, attribute) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for writing + character(len=*), intent(in) :: name !< The name in the file of this global attribute + character(len=*), intent(in) :: attribute !< The value of this attribute + + if (IO_handle%FMS2_file) then + call register_global_attribute(IO_handle%fileobj, name, attribute, len_trim(attribute)) + else + call mpp_write_meta(IO_handle%unit, name, cval=attribute) + endif + +end subroutine write_metadata_global + +end module MOM_io_infra diff --git a/config_src/infra/FMS2/MOM_time_manager.F90 b/config_src/infra/FMS2/MOM_time_manager.F90 new file mode 100644 index 0000000000..5f3279b713 --- /dev/null +++ b/config_src/infra/FMS2/MOM_time_manager.F90 @@ -0,0 +1,54 @@ +!> Wraps the FMS time manager functions +module MOM_time_manager + +! This file is part of MOM6. See LICENSE.md for the license. + +use time_manager_mod, only : time_type, get_time, set_time +use time_manager_mod, only : time_type_to_real, real_to_time_type +use time_manager_mod, only : operator(+), operator(-), operator(*), operator(/) +use time_manager_mod, only : operator(>), operator(<), operator(>=), operator(<=) +use time_manager_mod, only : operator(==), operator(/=), operator(//) +use time_manager_mod, only : set_ticks_per_second , get_ticks_per_second +use time_manager_mod, only : get_date, set_date, increment_date +use time_manager_mod, only : days_in_month, month_name +use time_manager_mod, only : set_calendar_type, get_calendar_type +use time_manager_mod, only : JULIAN, NOLEAP, THIRTY_DAY_MONTHS, GREGORIAN +use time_manager_mod, only : NO_CALENDAR + +implicit none ; private + +public :: time_type, get_time, set_time +public :: time_type_to_real, real_to_time_type, real_to_time +public :: set_ticks_per_second, get_ticks_per_second +public :: operator(+), operator(-), operator(*), operator(/) +public :: operator(>), operator(<), operator(>=), operator(<=) +public :: operator(==), operator(/=), operator(//) +public :: get_date, set_date, increment_date, month_name, days_in_month +public :: JULIAN, NOLEAP, THIRTY_DAY_MONTHS, GREGORIAN, NO_CALENDAR +public :: set_calendar_type, get_calendar_type + +contains + +!> Returns a time_type version of a real time in seconds, using an alternate implementation to the +!! FMS function real_to_time_type that is accurate over a larger range of input values. With 32 bit +!! signed integers, this version should work over the entire valid range (2^31 days or ~5.8835 +!! million years) of time_types, whereas the standard version in the FMS time_manager stops working +!! for conversions of times greater than 2^31 seconds, or ~68.1 years. +type(time_type) function real_to_time(x, err_msg) +! type(time_type) :: real_to_time !< The output time as a time_type + real, intent(in) :: x !< The input time in real seconds. + character(len=*), optional, intent(out) :: err_msg !< An optional returned error message. + + ! Local variables + integer :: seconds, days, ticks + real :: real_subsecond_remainder + + days = floor(x/86400.) + seconds = floor(x - 86400.*days) + real_subsecond_remainder = x - (days*86400. + seconds) + ticks = nint(real_subsecond_remainder * get_ticks_per_second()) + + real_to_time = set_time(seconds=seconds, days=days, ticks=ticks, err_msg=err_msg) +end function real_to_time + +end module MOM_time_manager diff --git a/config_src/dynamic/MOM_memory.h b/config_src/memory/dynamic_nonsymmetric/MOM_memory.h similarity index 100% rename from config_src/dynamic/MOM_memory.h rename to config_src/memory/dynamic_nonsymmetric/MOM_memory.h diff --git a/config_src/dynamic_symmetric/MOM_memory.h b/config_src/memory/dynamic_symmetric/MOM_memory.h similarity index 100% rename from config_src/dynamic_symmetric/MOM_memory.h rename to config_src/memory/dynamic_symmetric/MOM_memory.h diff --git a/docs/.gitignore b/docs/.gitignore index de2f06d096..e8b6a0513b 100644 --- a/docs/.gitignore +++ b/docs/.gitignore @@ -2,8 +2,17 @@ doxygen doxygen.log APIs +MOM6.tags +details/tutorial + + # Ignore sphinx-build output _build api src xml + + +# Citation output +bib*.aux +citelist.doc* diff --git a/docs/Doxyfile_nortd b/docs/Doxyfile_nortd index 76b66b9dd3..4a81c20bd7 100644 --- a/docs/Doxyfile_nortd +++ b/docs/Doxyfile_nortd @@ -1,4 +1,4 @@ -# Doxyfile 1.8.15 +# Doxyfile 1.8.19 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. @@ -58,7 +58,7 @@ PROJECT_LOGO = # entered, it will be relative to the location where doxygen was started. If # left blank the current directory will be used. -OUTPUT_DIRECTORY = +OUTPUT_DIRECTORY = . # If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- # directories (in 2 levels) under the output directory of each output format and @@ -187,6 +187,16 @@ SHORT_NAMES = NO JAVADOC_AUTOBRIEF = NO +# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line +# such as +# /*************** +# as being the beginning of a Javadoc-style comment "banner". If set to NO, the +# Javadoc-style will behave just like regular comments and it will not be +# interpreted by doxygen. +# The default value is: NO. + +JAVADOC_BANNER = NO + # If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first # line (until the first dot) of a Qt-style comment as the brief description. If # set to NO, the Qt-style will behave just like regular Qt-style comments (thus @@ -236,15 +246,45 @@ TAB_SIZE = 2 # "Side Effects:". You can put \n's in the value part of an alias to insert # newlines (in the resulting output). You can put ^^ in the value part of an # alias to insert a newline as if a physical newline was in the original file. - -ALIASES = +# When you need a literal { or } or , in the value part of an alias you have to +# escape them by means of a backslash (\), this can lead to conflicts with the +# commands \{ and \} for these it is advised to use the version @{ and @} or use +# a double escape (\\{ and \\}) + +# Reference: https://git.ligo.org/lscsoft/lalsuite-archive/commit/e6e2dae8a73a73979a64854bbf697b077803697a +#ALIASES = "eqref{1}= Eq. \\eqref{\1}" \ +# "figref{1}= Fig. [\ref \1]" \ +# "tableref{1}= Table [\ref \1]" \ +# "figure{4}= \anchor \1 \image html \1.png \"Fig. [\1]: \4\"" + +# This allows doxygen passthrough of \eqref to html for mathjax +# Single reference within a math block +ALIASES += eqref{1}="\latexonly\ref{\1}\endlatexonly\htmlonly \eqref{\1}\endhtmlonly\xmlonly \\eqref{\1}\endxmlonly" + +# Large math block with multiple references +# TODO: We should be able to overload functions but recursion is happening? For now, the +# second command creates a \eqref2 that is passed to sphinx for processing. This breaks +# the html generation for doxygen +# Doxygen 1.8.13 requires extra help via xmlonly +# See python sphinxcontrib-autodoc_doxygen module autodoc_doxygen/xmlutils.py +# \eqref{eq:ale-thickness-equation,ale-equations,thickness} +ALIASES += eqref{3}="\latexonly\ref{\1}\endlatexonly\htmlonly \eqref2{\2,\3}\endhtmlonly\xmlonly \\eqref4{\1}\\eqref2{\2,\3}\endxmlonly" + +# Reference: https://stackoverflow.com/questions/25290453/how-do-i-add-a-footnote-in-doxygen +# TODO: Use this simple js library to create actual footnotes in html +# Reference: https://github.com/jheftmann/footnoted +ALIASES += footnote{1}="\latexonly\footnote\{\1\}\endlatexonly\htmlonly[*]\endhtmlonly\xmlonly[*]\endxmlonly" + +# \image latex does the wrong things to support equations in captions, this recreates +# what it does and just passes through the string uninterpreted. +# The image also needs to be added to LATEX_EXTRA_FILES. +# Default 3rd argument: \includegraphics[width=\textwidth\,height=\textheight/2\,keepaspectratio=true] +ALIASES += imagelatex{3}="\latexonly\begin{DoxyImage}\3{\1}\doxyfigcaption{\2}\end{DoxyImage}\endlatexonly\xmlonly\2\endxmlonly" # This tag can be used to specify a number of word-keyword mappings (TCL only). # A mapping has the form "name=value". For example adding "class=itcl::class" # will allow you to use the command class in the itcl::class meaning. -TCL_SUBST = - # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources # only. Doxygen will then generate output that is more tailored for C. For # instance, some of the names that are used will be different. The list of all @@ -273,17 +313,26 @@ OPTIMIZE_FOR_FORTRAN = YES OPTIMIZE_OUTPUT_VHDL = NO +# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice +# sources only. Doxygen will then generate output that is more tailored for that +# language. For instance, namespaces will be presented as modules, types will be +# separated into more groups, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_SLICE = NO + # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given # extension. Doxygen has a built-in mapping, but you can override or extend it # using this tag. The format is ext=language, where ext is a file extension, and -# language is one of the parsers supported by doxygen: IDL, Java, Javascript, -# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: -# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: -# Fortran. In the later case the parser tries to guess whether the code is fixed -# or free formatted code, this is the default for Fortran type files), VHDL. For -# instance to make doxygen treat .inc files as Fortran files (default is PHP), -# and .f files as C (default is Fortran), use: inc=Fortran f=C. +# language is one of the parsers supported by doxygen: IDL, Java, JavaScript, +# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice, VHDL, +# Fortran (fixed format Fortran: FortranFixed, free formatted Fortran: +# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser +# tries to guess whether the code is fixed or free formatted code, this is the +# default for Fortran type files). For instance to make doxygen treat .inc files +# as Fortran files (default is PHP), and .f files as C (default is Fortran), +# use: inc=Fortran f=C. # # Note: For files without extension you can use no_extension as a placeholder. # @@ -294,7 +343,7 @@ EXTENSION_MAPPING = # If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments # according to the Markdown format, which allows for more readable -# documentation. See http://daringfireball.net/projects/markdown/ for details. +# documentation. See https://daringfireball.net/projects/markdown/ for details. # The output of markdown processing is further processed by doxygen, so you can # mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in # case of backward compatibilities issues. @@ -306,7 +355,7 @@ MARKDOWN_SUPPORT = YES # to that level are automatically included in the table of contents, even if # they do not have an id attribute. # Note: This feature currently applies only to Markdown headings. -# Minimum value: 0, maximum value: 99, default value: 0. +# Minimum value: 0, maximum value: 99, default value: 5. # This tag requires that the tag MARKDOWN_SUPPORT is set to YES. TOC_INCLUDE_HEADINGS = 0 @@ -422,6 +471,19 @@ TYPEDEF_HIDES_STRUCT = NO LOOKUP_CACHE_SIZE = 0 +# The NUM_PROC_THREADS specifies the number threads doxygen is allowed to use +# during processing. When set to 0 doxygen will based this on the number of +# cores available in the system. You can set it explicitly to a value larger +# than 0 to get more control over the balance between CPU load and processing +# speed. At this moment only the input processing can be done using multiple +# threads. Since this is still an experimental feature the default is set to 1, +# which efficively disables parallel processing. Please report any issues you +# encounter. Generating dot graphs in parallel is controlled by the +# DOT_NUM_THREADS setting. +# Minimum value: 0, maximum value: 32, default value: 1. + +NUM_PROC_THREADS = 1 + #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- @@ -442,6 +504,12 @@ EXTRACT_ALL = NO EXTRACT_PRIVATE = YES +# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual +# methods of a class will be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIV_VIRTUAL = NO + # If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal # scope will be included in the documentation. # The default value is: NO. @@ -496,8 +564,8 @@ HIDE_UNDOC_MEMBERS = NO HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend -# (class|struct|union) declarations. If set to NO, these declarations will be -# included in the documentation. +# declarations. If set to NO, these declarations will be included in the +# documentation. # The default value is: NO. HIDE_FRIEND_COMPOUNDS = NO @@ -520,7 +588,7 @@ INTERNAL_DOCS = YES # names in lower-case letters. If set to YES, upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows -# and Mac users are advised to set this option to NO. +# (including Cygwin) and Mac users are advised to set this option to NO. # The default value is: system dependent. CASE_SENSE_NAMES = YES @@ -712,7 +780,7 @@ LAYOUT_FILE = layout.xml # LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the # search path. See also \cite for info how to create references. -CITE_BIB_FILES = +CITE_BIB_FILES = ocean.bib references.bib zotero.bib #--------------------------------------------------------------------------- # Configuration options related to warning and progress messages @@ -778,7 +846,7 @@ WARN_FORMAT = "$file:$line: $text" # messages should be written. If left blank the output is written to standard # error (stderr). -WARN_LOGFILE = doxygen.log +WARN_LOGFILE = _build/doxygen_warn_nortd_log.txt #--------------------------------------------------------------------------- # Configuration options related to the input files @@ -792,11 +860,10 @@ WARN_LOGFILE = doxygen.log INPUT = ../src \ front_page.md \ - ../config_src/solo_driver \ - ../config_src/dynamic_symmetric - ../config_src/external - ../config_src/coupled_driver - + ../config_src/drivers/solo_driver \ + ../config_src/memory/dynamic_symmetric \ + ../config_src/external \ + ../config_src/drivers/FMS_cap # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses @@ -818,28 +885,58 @@ INPUT_ENCODING = UTF-8 # If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, # *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, # *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, -# *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, -# *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf and *.qsf. +# *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment), +# *.doc (to be provided as doxygen C comment), *.txt (to be provided as doxygen +# C comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, +# *.vhdl, *.ucf, *.qsf and *.ice. FILE_PATTERNS = *.c \ *.cc \ *.cxx \ *.cpp \ *.c++ \ + *.java \ + *.ii \ + *.ixx \ + *.ipp \ + *.i++ \ + *.inl \ + *.idl \ + *.ddl \ + *.odl \ *.h \ *.hh \ *.hxx \ *.hpp \ *.h++ \ + *.cs \ + *.d \ + *.php \ + *.php4 \ + *.php5 \ + *.phtml \ *.inc \ *.m \ *.markdown \ *.md \ *.mm \ *.dox \ + *.doc \ + *.txt \ + *.py \ + *.pyw \ *.f90 \ + *.f95 \ + *.f03 \ + *.f08 \ + *.f18 \ *.f \ *.for \ + *.vhd \ + *.vhdl \ + *.ucf \ + *.qsf \ + *.ice \ *.F90 # The RECURSIVE tag can be used to specify whether or not subdirectories should @@ -1096,7 +1193,7 @@ GENERATE_HTML = YES # The default directory is: html. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_OUTPUT = APIs +HTML_OUTPUT = _build/APIs # The HTML_FILE_EXTENSION tag can be used to specify the file extension for each # generated HTML page (for example: .htm, .php, .asp). @@ -1211,9 +1308,9 @@ HTML_TIMESTAMP = NO # If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML # documentation will contain a main index with vertical navigation menus that -# are dynamically created via Javascript. If disabled, the navigation index will +# are dynamically created via JavaScript. If disabled, the navigation index will # consists of multiple levels of tabs that are statically embedded in every HTML -# page. Disable this option to support browsers that do not have Javascript, +# page. Disable this option to support browsers that do not have JavaScript, # like the Qt help browser. # The default value is: YES. # This tag requires that the tag GENERATE_HTML is set to YES. @@ -1243,13 +1340,13 @@ HTML_INDEX_NUM_ENTRIES = 900 # If the GENERATE_DOCSET tag is set to YES, additional index files will be # generated that can be used as input for Apple's Xcode 3 integrated development -# environment (see: https://developer.apple.com/tools/xcode/), introduced with -# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a +# environment (see: https://developer.apple.com/xcode/), introduced with OSX +# 10.5 (Leopard). To create a documentation set, doxygen will generate a # Makefile in the HTML output directory. Running make will produce the docset in # that directory and running make install will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at -# startup. See https://developer.apple.com/tools/creatingdocsetswithdoxygen.html -# for more information. +# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy +# genXcode/_index.html for more information. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. @@ -1288,7 +1385,7 @@ DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three # additional HTML index files: index.hhp, index.hhc, and index.hhk. The # index.hhp is a project file that can be read by Microsoft's HTML Help Workshop -# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on +# (see: https://www.microsoft.com/en-us/download/details.aspx?id=21138) on # Windows. # # The HTML Help Workshop contains a compiler that can convert all HTML output @@ -1319,7 +1416,7 @@ CHM_FILE = HHC_LOCATION = # The GENERATE_CHI flag controls if a separate .chi index file is generated -# (YES) or that it should be included in the master .chm file (NO). +# (YES) or that it should be included in the main .chm file (NO). # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. @@ -1364,7 +1461,7 @@ QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help # Project output. For more information please see Qt Help Project / Namespace -# (see: http://doc.qt.io/qt-4.8/qthelpproject.html#namespace). +# (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace). # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_QHP is set to YES. @@ -1372,7 +1469,8 @@ QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt # Help Project output. For more information please see Qt Help Project / Virtual -# Folders (see: http://doc.qt.io/qt-4.8/qthelpproject.html#virtual-folders). +# Folders (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual- +# folders). # The default value is: doc. # This tag requires that the tag GENERATE_QHP is set to YES. @@ -1380,21 +1478,23 @@ QHP_VIRTUAL_FOLDER = doc # If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom # filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://doc.qt.io/qt-4.8/qthelpproject.html#custom-filters). +# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom- +# filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_NAME = # The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://doc.qt.io/qt-4.8/qthelpproject.html#custom-filters). +# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom- +# filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's filter section matches. Qt Help Project / Filter Attributes (see: -# http://doc.qt.io/qt-4.8/qthelpproject.html#filter-attributes). +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_SECT_FILTER_ATTRS = @@ -1478,6 +1578,17 @@ TREEVIEW_WIDTH = 250 EXT_LINKS_IN_WINDOW = NO +# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg +# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see +# https://inkscape.org) to generate formulas as SVG images instead of PNGs for +# the HTML output. These images will generally look nicer at scaled resolutions. +# Possible values are: png (the default) and svg (looks nicer but requires the +# pdf2svg or inkscape tool). +# The default value is: png. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FORMULA_FORMAT = svg + # Use this tag to change the font size of LaTeX formulas included as images in # the HTML documentation. When you change the font size after a successful # doxygen run you need to manually remove any form_*.png images from the HTML @@ -1498,8 +1609,14 @@ FORMULA_FONTSIZE = 10 FORMULA_TRANSPARENT = YES +# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands +# to create new LaTeX commands to be used in formulas as building blocks. See +# the section "Including formulas" for details. + +FORMULA_MACROFILE = + # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see -# https://www.mathjax.org) which uses client side Javascript for the rendering +# https://www.mathjax.org) which uses client side JavaScript for the rendering # instead of using pre-rendered bitmaps. Use this if you do not have LaTeX # installed or if you want to formulas look prettier in the HTML output. When # enabled you may also need to install MathJax separately and configure the path @@ -1527,10 +1644,10 @@ MATHJAX_FORMAT = HTML-CSS # Content Delivery Network so you can quickly see the result without installing # MathJax. However, it is strongly recommended to install a local copy of # MathJax from https://www.mathjax.org before deployment. -# The default value is: https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.2/. +# The default value is: https://cdn.jsdelivr.net/npm/mathjax@2. # This tag requires that the tag USE_MATHJAX is set to YES. -MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest +MATHJAX_RELPATH = https://cdn.jsdelivr.net/npm/mathjax@2 # The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax # extension names that should be enabled during MathJax rendering. For example @@ -1569,7 +1686,7 @@ MATHJAX_CODEFILE = SEARCHENGINE = YES # When the SERVER_BASED_SEARCH tag is enabled the search engine will be -# implemented using a web server instead of a web client using Javascript. There +# implemented using a web server instead of a web client using JavaScript. There # are two flavors of web server based searching depending on the EXTERNAL_SEARCH # setting. When disabled, doxygen will generate a PHP script for searching and # an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing @@ -1664,11 +1781,24 @@ LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate # index for LaTeX. +# Note: This tag is used in the Makefile / make.bat. +# See also: LATEX_MAKEINDEX_CMD for the part in the generated output file +# (.tex). # The default file is: makeindex. # This tag requires that the tag GENERATE_LATEX is set to YES. MAKEINDEX_CMD_NAME = makeindex +# The LATEX_MAKEINDEX_CMD tag can be used to specify the command name to +# generate index for LaTeX. In case there is no backslash (\) as first character +# it will be automatically added in the LaTeX code. +# Note: This tag is used in the generated output file (.tex). +# See also: MAKEINDEX_CMD_NAME for the part in the Makefile / make.bat. +# The default value is: makeindex. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_MAKEINDEX_CMD = makeindex + # If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX # documents. This may be useful for small projects and may help to save some # trees in general. @@ -1696,7 +1826,7 @@ PAPER_TYPE = a4 # If left blank no extra packages will be included. # This tag requires that the tag GENERATE_LATEX is set to YES. -EXTRA_PACKAGES = +EXTRA_PACKAGES =amsmath amstext # The LATEX_HEADER tag can be used to specify a personal LaTeX header for the # generated LaTeX document. The header should contain everything until the first @@ -1742,7 +1872,15 @@ LATEX_EXTRA_STYLESHEET = # markers available. # This tag requires that the tag GENERATE_LATEX is set to YES. -LATEX_EXTRA_FILES = +# Graphics that have math or equations in the caption need to be listed below. +LATEX_EXTRA_FILES = \ + images/cell_3d.png \ + images/Grid_metrics.png \ + images/h_PPM.png \ + images/Newton_PPM.png \ + images/PG_loop.png \ + images/shao3.png \ + images/shao4.png # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is # prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will @@ -1753,9 +1891,11 @@ LATEX_EXTRA_FILES = PDF_HYPERLINKS = YES -# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate -# the PDF file directly from the LaTeX files. Set this option to YES, to get a -# higher quality PDF documentation. +# If the USE_PDFLATEX tag is set to YES, doxygen will use the engine as +# specified with LATEX_CMD_NAME to generate the PDF file directly from the LaTeX +# files. Set this option to YES, to get a higher quality PDF documentation. +# +# See also section LATEX_CMD_NAME for selecting the engine. # The default value is: YES. # This tag requires that the tag GENERATE_LATEX is set to YES. @@ -1803,6 +1943,14 @@ LATEX_BIB_STYLE = plain LATEX_TIMESTAMP = NO +# The LATEX_EMOJI_DIRECTORY tag is used to specify the (relative or absolute) +# path from which the emoji images will be read. If a relative path is entered, +# it will be relative to the LATEX_OUTPUT directory. If left blank the +# LATEX_OUTPUT directory will be used. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_EMOJI_DIRECTORY = + #--------------------------------------------------------------------------- # Configuration options related to the RTF output #--------------------------------------------------------------------------- @@ -1940,6 +2088,13 @@ XML_OUTPUT = xml XML_PROGRAMLISTING = YES +# If the XML_NS_MEMB_FILE_SCOPE tag is set to YES, doxygen will include +# namespace members in file scope as well, matching the HTML output. +# The default value is: NO. +# This tag requires that the tag GENERATE_XML is set to YES. + +XML_NS_MEMB_FILE_SCOPE = NO + #--------------------------------------------------------------------------- # Configuration options related to the DOCBOOK output #--------------------------------------------------------------------------- @@ -2057,7 +2212,7 @@ SEARCH_INCLUDES = YES # This tag requires that the tag SEARCH_INCLUDES is set to YES. INCLUDE_PATH = ../src/framework \ - ../config_src/dynamic_symmetric + ../config_src/memory/dynamic_symmetric # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the @@ -2119,7 +2274,7 @@ TAGFILES = # tag file that is based on the input files it reads. See section "Linking to # external documentation" for more information about the usage of tag files. -GENERATE_TAGFILE = +GENERATE_TAGFILE = MOM6.tags # If the ALLEXTERNALS tag is set to YES, all external class will be listed in # the class index. If set to NO, only the inherited external classes will be @@ -2142,12 +2297,6 @@ EXTERNAL_GROUPS = YES EXTERNAL_PAGES = YES -# The PERL_PATH should be the absolute path and name of the perl script -# interpreter (i.e. the result of 'which perl'). -# The default file (with absolute path) is: /usr/bin/perl. - -PERL_PATH = /usr/bin/perl - #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- @@ -2161,15 +2310,6 @@ PERL_PATH = /usr/bin/perl CLASS_DIAGRAMS = YES -# You can define message sequence charts within doxygen comments using the \msc -# command. Doxygen will then run the mscgen tool (see: -# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the -# documentation. The MSCGEN_PATH tag allows you to specify the directory where -# the mscgen tool resides. If left empty the tool is assumed to be found in the -# default search path. - -MSCGEN_PATH = - # You can include diagrams made with dia in doxygen documentation. Doxygen will # then run dia to produce the diagram and insert it in the documentation. The # DIA_PATH tag allows you to specify the directory where the dia binary resides. diff --git a/docs/Doxyfile_nortd_latex b/docs/Doxyfile_nortd_latex new file mode 100644 index 0000000000..207e645195 --- /dev/null +++ b/docs/Doxyfile_nortd_latex @@ -0,0 +1,2608 @@ +# Doxyfile 1.8.19 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the configuration +# file that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# https://www.gnu.org/software/libiconv/ for the list of possible encodings. +# The default value is: UTF-8. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = "MOM6" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = + +# With the PROJECT_LOGO tag one can specify a logo or an icon that is included +# in the documentation. The maximum height of the logo should not exceed 55 +# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy +# the logo to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path +# into which the generated documentation will be written. If a relative path is +# entered, it will be relative to the location where doxygen was started. If +# left blank the current directory will be used. + +OUTPUT_DIRECTORY = . + +# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- +# directories (in 2 levels) under the output directory of each output format and +# will distribute the generated files over these directories. Enabling this +# option can be useful when feeding doxygen a huge amount of source files, where +# putting all generated files in the same directory would otherwise causes +# performance problems for the file system. +# The default value is: NO. + +CREATE_SUBDIRS = NO + +# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII +# characters to appear in the names of generated files. If set to NO, non-ASCII +# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode +# U+3044. +# The default value is: NO. + +ALLOW_UNICODE_NAMES = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, +# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), +# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, +# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), +# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, +# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, +# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, +# Ukrainian and Vietnamese. +# The default value is: English. + +OUTPUT_LANGUAGE = English + +# The OUTPUT_TEXT_DIRECTION tag is used to specify the direction in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all generated output in the proper direction. +# Possible values are: None, LTR, RTL and Context. +# The default value is: None. + +OUTPUT_TEXT_DIRECTION = None + +# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member +# descriptions after the members that are listed in the file and class +# documentation (similar to Javadoc). Set to NO to disable this. +# The default value is: YES. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief +# description of a member or function before the detailed description +# +# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. +# The default value is: YES. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator that is +# used to form the text in various listings. Each string in this list, if found +# as the leading text of the brief description, will be stripped from the text +# and the result, after processing the whole list, is used as the annotated +# text. Otherwise, the brief description is used as-is. If left blank, the +# following values are used ($name is automatically replaced with the name of +# the entity):The $name class, The $name widget, The $name file, is, provides, +# specifies, contains, represents, a, an and the. + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# doxygen will generate a detailed section even if there is only a brief +# description. +# The default value is: NO. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. +# The default value is: NO. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path +# before files name in the file list and in the header files. If set to NO the +# shortest path that makes the file name unique will be used +# The default value is: YES. + +FULL_PATH_NAMES = YES + +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the +# path mentioned in the documentation of a class, which tells the reader which +# header file to include in order to use a class. If left blank only the name of +# the header file containing the class definition is used. Otherwise one should +# specify the list of include paths that are normally passed to the compiler +# using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but +# less readable) file names. This can be useful is your file systems doesn't +# support long names like on DOS, Mac, or CD-ROM. +# The default value is: NO. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the +# first line (until the first dot) of a Javadoc-style comment as the brief +# description. If set to NO, the Javadoc-style will behave just like regular Qt- +# style comments (thus requiring an explicit @brief command for a brief +# description.) +# The default value is: NO. + +JAVADOC_AUTOBRIEF = NO + +# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line +# such as +# /*************** +# as being the beginning of a Javadoc-style comment "banner". If set to NO, the +# Javadoc-style will behave just like regular comments and it will not be +# interpreted by doxygen. +# The default value is: NO. + +JAVADOC_BANNER = NO + +# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first +# line (until the first dot) of a Qt-style comment as the brief description. If +# set to NO, the Qt-style will behave just like regular Qt-style comments (thus +# requiring an explicit \brief command for a brief description.) +# The default value is: NO. + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a +# multi-line C++ special comment block (i.e. a block of //! or /// comments) as +# a brief description. This used to be the default behavior. The new default is +# to treat a multi-line C++ comment block as a detailed description. Set this +# tag to YES if you prefer the old behavior instead. +# +# Note that setting this tag to YES also means that rational rose comments are +# not recognized any more. +# The default value is: NO. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the +# documentation from any documented member that it re-implements. +# The default value is: YES. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new +# page for each member. If set to NO, the documentation of a member will be part +# of the file/class/namespace that contains it. +# The default value is: NO. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen +# uses this value to replace tabs by spaces in code fragments. +# Minimum value: 1, maximum value: 16, default value: 4. + +TAB_SIZE = 2 + +# This tag can be used to specify a number of aliases that act as commands in +# the documentation. An alias has the form: +# name=value +# For example adding +# "sideeffect=@par Side Effects:\n" +# will allow you to put the command \sideeffect (or @sideeffect) in the +# documentation, which will result in a user-defined paragraph with heading +# "Side Effects:". You can put \n's in the value part of an alias to insert +# newlines (in the resulting output). You can put ^^ in the value part of an +# alias to insert a newline as if a physical newline was in the original file. +# When you need a literal { or } or , in the value part of an alias you have to +# escape them by means of a backslash (\), this can lead to conflicts with the +# commands \{ and \} for these it is advised to use the version @{ and @} or use +# a double escape (\\{ and \\}) + +# Reference: https://git.ligo.org/lscsoft/lalsuite-archive/commit/e6e2dae8a73a73979a64854bbf697b077803697a +#ALIASES = "eqref{1}= Eq. \\eqref{\1}" \ +# "figref{1}= Fig. [\ref \1]" \ +# "tableref{1}= Table [\ref \1]" \ +# "figure{4}= \anchor \1 \image html \1.png \"Fig. [\1]: \4\"" + +# This allows doxygen passthrough of \eqref to html for mathjax +# Single reference within a math block +ALIASES += eqref{1}="\latexonly\ref{\1}\endlatexonly\htmlonly \eqref{\1}\endhtmlonly\xmlonly \\eqref{\1}\endxmlonly" + +# Large math block with multiple references +# TODO: We should be able to overload functions but recursion is happening? For now, the +# second command creates a \eqref2 that is passed to sphinx for processing. This breaks +# the html generation for doxygen +# Doxygen 1.8.13 requires extra help via xmlonly +# See python sphinxcontrib-autodoc_doxygen module autodoc_doxygen/xmlutils.py +# \eqref{eq:ale-thickness-equation,ale-equations,thickness} +ALIASES += eqref{3}="\latexonly\ref{\1}\endlatexonly\htmlonly \eqref2{\2,\3}\endhtmlonly\xmlonly \\eqref4{\1}\\eqref2{\2,\3}\endxmlonly" + +# Reference: https://stackoverflow.com/questions/25290453/how-do-i-add-a-footnote-in-doxygen +# TODO: Use this simple js library to create actual footnotes in html +# Reference: https://github.com/jheftmann/footnoted +ALIASES += footnote{1}="\latexonly\footnote\{\1\}\endlatexonly\htmlonly[*]\endhtmlonly\xmlonly[*]\endxmlonly" + +# \image latex does the wrong things to support equations in captions, this recreates +# what it does and just passes through the string uninterpreted. +# The image also needs to be added to LATEX_EXTRA_FILES. +# Default 3rd argument: \includegraphics[width=\textwidth\,height=\textheight/2\,keepaspectratio=true] +ALIASES += imagelatex{3}="\latexonly\begin{DoxyImage}\3{\1}\doxyfigcaption{\2}\end{DoxyImage}\endlatexonly\xmlonly\2\endxmlonly" + +# This tag can be used to specify a number of word-keyword mappings (TCL only). +# A mapping has the form "name=value". For example adding "class=itcl::class" +# will allow you to use the command class in the itcl::class meaning. + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. For +# instance, some of the names that are used will be different. The list of all +# members will be omitted, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or +# Python sources only. Doxygen will then generate output that is more tailored +# for that language. For instance, namespaces will be presented as packages, +# qualified scopes will look different, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources. Doxygen will then generate output that is tailored for Fortran. +# The default value is: NO. + +OPTIMIZE_FOR_FORTRAN = YES + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for VHDL. +# The default value is: NO. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice +# sources only. Doxygen will then generate output that is more tailored for that +# language. For instance, namespaces will be presented as modules, types will be +# separated into more groups, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_SLICE = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, and +# language is one of the parsers supported by doxygen: IDL, Java, JavaScript, +# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice, VHDL, +# Fortran (fixed format Fortran: FortranFixed, free formatted Fortran: +# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser +# tries to guess whether the code is fixed or free formatted code, this is the +# default for Fortran type files). For instance to make doxygen treat .inc files +# as Fortran files (default is PHP), and .f files as C (default is Fortran), +# use: inc=Fortran f=C. +# +# Note: For files without extension you can use no_extension as a placeholder. +# +# Note that for custom extensions you also need to set FILE_PATTERNS otherwise +# the files are not read by doxygen. + +EXTENSION_MAPPING = + +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See https://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up +# to that level are automatically included in the table of contents, even if +# they do not have an id attribute. +# Note: This feature currently applies only to Markdown headings. +# Minimum value: 0, maximum value: 99, default value: 5. +# This tag requires that the tag MARKDOWN_SUPPORT is set to YES. + +TOC_INCLUDE_HEADINGS = 0 + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by putting a % sign in front of the word or +# globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should set this +# tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); +# versus func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. +# The default value is: NO. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. +# The default value is: NO. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: +# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen +# will parse them like normal C++ but will assume all classes use public instead +# of private inheritance when no explicit protection keyword is present. +# The default value is: NO. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES will make +# doxygen to replace the get and set methods by a property in the documentation. +# This will only work if the methods are indeed getting or setting a simple +# type. If this is not the case, or you want to show the methods anyway, you +# should set this option to NO. +# The default value is: YES. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. +# The default value is: NO. + +DISTRIBUTE_GROUP_DOC = YES + +# If one adds a struct or class to a group and this option is enabled, then also +# any nested class or struct is added to the same group. By default this option +# is disabled and one has to add nested compounds explicitly via \ingroup. +# The default value is: NO. + +GROUP_NESTED_COMPOUNDS = NO + +# Set the SUBGROUPING tag to YES to allow class member groups of the same type +# (for instance a group of public functions) to be put as a subgroup of that +# type (e.g. under the Public Functions section). Set it to NO to prevent +# subgrouping. Alternatively, this can be done per class using the +# \nosubgrouping command. +# The default value is: YES. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions +# are shown inside the group in which they are included (e.g. using \ingroup) +# instead of on a separate page (for HTML and Man pages) or section (for LaTeX +# and RTF). +# +# Note that this feature does not work in combination with +# SEPARATE_MEMBER_PAGES. +# The default value is: NO. + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions +# with only public data fields or simple typedef fields will be shown inline in +# the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO, structs, classes, and unions are shown on a separate page (for HTML and +# Man pages) or section (for LaTeX and RTF). +# The default value is: NO. + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or +# enum is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically be +# useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. +# The default value is: NO. + +TYPEDEF_HIDES_STRUCT = NO + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can be +# an expensive process and often the same symbol appears multiple times in the +# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small +# doxygen will become slower. If the cache is too large, memory is wasted. The +# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range +# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 +# symbols. At the end of a run doxygen will report the cache usage and suggest +# the optimal cache size from a speed point of view. +# Minimum value: 0, maximum value: 9, default value: 0. + +LOOKUP_CACHE_SIZE = 0 + +# The NUM_PROC_THREADS specifies the number threads doxygen is allowed to use +# during processing. When set to 0 doxygen will based this on the number of +# cores available in the system. You can set it explicitly to a value larger +# than 0 to get more control over the balance between CPU load and processing +# speed. At this moment only the input processing can be done using multiple +# threads. Since this is still an experimental feature the default is set to 1, +# which efficively disables parallel processing. Please report any issues you +# encounter. Generating dot graphs in parallel is controlled by the +# DOT_NUM_THREADS setting. +# Minimum value: 0, maximum value: 32, default value: 1. + +NUM_PROC_THREADS = 1 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in +# documentation are documented, even if no documentation was available. Private +# class members and static file members will be hidden unless the +# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. +# Note: This will also disable the warnings about undocumented members that are +# normally produced when WARNINGS is set to YES. +# The default value is: NO. + +EXTRACT_ALL = NO + +# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will +# be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIVATE = YES + +# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual +# methods of a class will be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIV_VIRTUAL = NO + +# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal +# scope will be included in the documentation. +# The default value is: NO. + +EXTRACT_PACKAGE = YES + +# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be +# included in the documentation. +# The default value is: NO. + +EXTRACT_STATIC = YES + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO, +# only classes defined in header files are included. Does not have any effect +# for Java sources. +# The default value is: YES. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. If set to YES, local methods, +# which are defined in the implementation section but not in the interface are +# included in the documentation. If set to NO, only methods in the interface are +# included. +# The default value is: NO. + +EXTRACT_LOCAL_METHODS = YES + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base name of +# the file that contains the anonymous namespace. By default anonymous namespace +# are hidden. +# The default value is: NO. + +EXTRACT_ANON_NSPACES = YES + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all +# undocumented members inside documented classes or files. If set to NO these +# members will be included in the various overviews, but no documentation +# section is generated. This option has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. If set +# to NO, these classes will be included in the various overviews. This option +# has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend +# declarations. If set to NO, these declarations will be included in the +# documentation. +# The default value is: NO. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any +# documentation blocks found inside the body of a function. If set to NO, these +# blocks will be appended to the function's detailed documentation block. +# The default value is: NO. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation that is typed after a +# \internal command is included. If the tag is set to NO then the documentation +# will be excluded. Set it to YES to include the internal documentation. +# The default value is: NO. + +INTERNAL_DOCS = YES + +# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file +# names in lower-case letters. If set to YES, upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# (including Cygwin) and Mac users are advised to set this option to NO. +# The default value is: system dependent. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with +# their full class and namespace scopes in the documentation. If set to YES, the +# scope will be hidden. +# The default value is: NO. + +HIDE_SCOPE_NAMES = NO + +# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will +# append additional text to a page's title, such as Class Reference. If set to +# YES the compound reference will be hidden. +# The default value is: NO. + +HIDE_COMPOUND_REFERENCE= NO + +# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of +# the files that are included by a file in the documentation of that file. +# The default value is: YES. + +SHOW_INCLUDE_FILES = YES + +# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each +# grouped member an include statement to the documentation, telling the reader +# which file to include in order to use the member. +# The default value is: NO. + +SHOW_GROUPED_MEMB_INC = NO + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include +# files with double quotes in the documentation rather than with sharp brackets. +# The default value is: NO. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the +# documentation for inline members. +# The default value is: YES. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the +# (detailed) documentation of file and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. +# The default value is: YES. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief +# descriptions of file, namespace and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. Note that +# this will also influence the order of the classes in the class list. +# The default value is: NO. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the +# (brief and detailed) documentation of class members so that constructors and +# destructors are listed first. If set to NO the constructors will appear in the +# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. +# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief +# member documentation. +# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting +# detailed member documentation. +# The default value is: NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy +# of group names into alphabetical order. If set to NO the group names will +# appear in their defined order. +# The default value is: NO. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by +# fully-qualified names, including namespaces. If set to NO, the class list will +# be sorted only by class name, not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the alphabetical +# list. +# The default value is: NO. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper +# type resolution of all parameters of a function it will reject a match between +# the prototype and the implementation of a member function even if there is +# only one candidate or it is obvious which candidate to choose by doing a +# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still +# accept a match between prototype and implementation in such cases. +# The default value is: NO. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo +# list. This list is created by putting \todo commands in the documentation. +# The default value is: YES. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test +# list. This list is created by putting \test commands in the documentation. +# The default value is: YES. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug +# list. This list is created by putting \bug commands in the documentation. +# The default value is: YES. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) +# the deprecated list. This list is created by putting \deprecated commands in +# the documentation. +# The default value is: YES. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional documentation +# sections, marked by \if ... \endif and \cond +# ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the +# initial value of a variable or macro / define can have for it to appear in the +# documentation. If the initializer consists of more lines than specified here +# it will be hidden. Use a value of 0 to hide initializers completely. The +# appearance of the value of individual variables and macros / defines can be +# controlled using \showinitializer or \hideinitializer command in the +# documentation regardless of this setting. +# Minimum value: 0, maximum value: 10000, default value: 30. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at +# the bottom of the documentation of classes and structs. If set to YES, the +# list will mention the files that were used to generate the documentation. +# The default value is: YES. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This +# will remove the Files entry from the Quick Index and from the Folder Tree View +# (if specified). +# The default value is: YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces +# page. This will remove the Namespaces entry from the Quick Index and from the +# Folder Tree View (if specified). +# The default value is: YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command command input-file, where command is the value of the +# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided +# by doxygen. Whatever the program writes to standard output is used as the file +# version. For an example see the documentation. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. You can +# optionally specify a file name after the option, if omitted DoxygenLayout.xml +# will be used as the name of the layout file. +# +# Note that if you run doxygen from a directory containing a file called +# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE +# tag is left empty. + +LAYOUT_FILE = layout.xml + +# The CITE_BIB_FILES tag can be used to specify one or more bib files containing +# the reference definitions. This must be a list of .bib files. The .bib +# extension is automatically appended if omitted. This requires the bibtex tool +# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info. +# For LaTeX the style of the bibliography can be controlled using +# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the +# search path. See also \cite for info how to create references. + +CITE_BIB_FILES = ocean.bib references.bib zotero.bib + +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated to +# standard output by doxygen. If QUIET is set to YES this implies that the +# messages are off. +# The default value is: NO. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES +# this implies that the warnings are on. +# +# Tip: Turn warnings on while writing the documentation. +# The default value is: YES. + +WARNINGS = YES + +# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate +# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: YES. + +WARN_IF_UNDOCUMENTED = YES + +# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some parameters +# in a documented function, or documenting parameters that don't exist or using +# markup commands wrongly. +# The default value is: YES. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that +# are documented, but have no documentation for their parameters or return +# value. If set to NO, doxygen will only warn about wrong or incomplete +# parameter documentation, but not about the absence of documentation. If +# EXTRACT_ALL is set to YES then this flag will automatically be disabled. +# The default value is: NO. + +WARN_NO_PARAMDOC = NO + +# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when +# a warning is encountered. +# The default value is: NO. + +WARN_AS_ERROR = NO + +# The WARN_FORMAT tag determines the format of the warning messages that doxygen +# can produce. The string should contain the $file, $line, and $text tags, which +# will be replaced by the file and line number from which the warning originated +# and the warning text. Optionally the format may contain $version, which will +# be replaced by the version of the file (if it could be obtained via +# FILE_VERSION_FILTER) +# The default value is: $file:$line: $text. + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning and error +# messages should be written. If left blank the output is written to standard +# error (stderr). + +WARN_LOGFILE = _build/doxygen_warn_nortd_latex_log.txt + +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING +# Note: If this tag is empty the current directory is searched. + +INPUT = ../src \ + front_page.md \ + ../config_src/drivers/solo_driver \ + ../config_src/memory/dynamic_symmetric \ + ../config_src/external \ + ../config_src/drivers/FMS_cap + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses +# libiconv (or the iconv built into libc) for the transcoding. See the libiconv +# documentation (see: https://www.gnu.org/software/libiconv/) for the list of +# possible encodings. +# The default value is: UTF-8. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# read by doxygen. +# +# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, +# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, +# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, +# *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment), +# *.doc (to be provided as doxygen C comment), *.txt (to be provided as doxygen +# C comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, +# *.vhdl, *.ucf, *.qsf and *.ice. + +FILE_PATTERNS = *.c \ + *.cc \ + *.cxx \ + *.cpp \ + *.c++ \ + *.java \ + *.ii \ + *.ixx \ + *.ipp \ + *.i++ \ + *.inl \ + *.idl \ + *.ddl \ + *.odl \ + *.h \ + *.hh \ + *.hxx \ + *.hpp \ + *.h++ \ + *.cs \ + *.d \ + *.php \ + *.php4 \ + *.php5 \ + *.phtml \ + *.inc \ + *.m \ + *.markdown \ + *.md \ + *.mm \ + *.dox \ + *.doc \ + *.txt \ + *.py \ + *.pyw \ + *.f90 \ + *.f95 \ + *.f03 \ + *.f08 \ + *.f18 \ + *.f \ + *.for \ + *.vhd \ + *.vhdl \ + *.ucf \ + *.qsf \ + *.ice \ + *.F90 + +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = ../src/equation_of_state/TEOS10 + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. +# The default value is: NO. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories for example use the pattern */test/* + +EXCLUDE_PATTERNS = makedep.py \ + Makefile \ + INSTALL + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories use the pattern */test/* + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or directories +# that contain example code fragments that are included (see the \include +# command). + +EXAMPLE_PATH = ../src + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank all +# files are included. + +EXAMPLE_PATTERNS = * + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude commands +# irrespective of the value of the RECURSIVE tag. +# The default value is: NO. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or directories +# that contain images that are to be included in the documentation (see the +# \image command). + +IMAGE_PATH = images \ + ../src + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command: +# +# +# +# where is the value of the INPUT_FILTER tag, and is the +# name of an input file. Doxygen will then use the output that the filter +# program writes to standard output. If FILTER_PATTERNS is specified, this tag +# will be ignored. +# +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: pattern=filter +# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how +# filters are used. If the FILTER_PATTERNS tag is empty or if none of the +# patterns match the file name, INPUT_FILTER is applied. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will also be used to filter the input files that are used for +# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). +# The default value is: NO. + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and +# it is also possible to disable source filtering for a specific pattern using +# *.ext= (so without naming a filter). +# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = front_page.md + +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will be +# generated. Documented entities will be cross-referenced with these sources. +# +# Note: To get rid of all source code in the generated output, make sure that +# also VERBATIM_HEADERS is set to NO. +# The default value is: NO. + +SOURCE_BROWSER = YES + +# Setting the INLINE_SOURCES tag to YES will include the body of functions, +# classes and enums directly into the documentation. +# The default value is: NO. + +INLINE_SOURCES = YES + +# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any +# special comment blocks from generated source code fragments. Normal C, C++ and +# Fortran comments will always remain visible. +# The default value is: YES. + +STRIP_CODE_COMMENTS = NO + +# If the REFERENCED_BY_RELATION tag is set to YES then for each documented +# entity all documented functions referencing it will be listed. +# The default value is: NO. + +REFERENCED_BY_RELATION = YES + +# If the REFERENCES_RELATION tag is set to YES then for each documented function +# all documented entities called/used by that function will be listed. +# The default value is: NO. + +REFERENCES_RELATION = YES + +# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set +# to YES then the hyperlinks from functions in REFERENCES_RELATION and +# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will +# link to the documentation. +# The default value is: YES. + +REFERENCES_LINK_SOURCE = YES + +# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the +# source code will show a tooltip with additional information such as prototype, +# brief description and links to the definition and documentation. Since this +# will make the HTML file larger and loading of large files a bit slower, you +# can opt to disable this feature. +# The default value is: YES. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +SOURCE_TOOLTIPS = YES + +# If the USE_HTAGS tag is set to YES then the references to source code will +# point to the HTML generated by the htags(1) tool instead of doxygen built-in +# source browser. The htags tool is part of GNU's global source tagging system +# (see https://www.gnu.org/software/global/global.html). You will need version +# 4.8.6 or higher. +# +# To use it do the following: +# - Install the latest version of global +# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file +# - Make sure the INPUT points to the root of the source tree +# - Run doxygen as normal +# +# Doxygen will invoke htags (and that will in turn invoke gtags), so these +# tools must be available from the command line (i.e. in the search path). +# +# The result: instead of the source browser generated by doxygen, the links to +# source code will now point to the output of htags. +# The default value is: NO. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a +# verbatim copy of the header file for each class for which an include is +# specified. Set to NO to disable this. +# See also: Section \class. +# The default value is: YES. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all +# compounds will be generated. Enable this if the project contains a lot of +# classes, structs, unions or interfaces. +# The default value is: YES. + +ALPHABETICAL_INDEX = YES + +# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in +# which the alphabetical index list will be split. +# Minimum value: 1, maximum value: 20, default value: 5. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +COLS_IN_ALPHA_INDEX = 1 + +# In case all classes in a project start with a common prefix, all classes will +# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag +# can be used to specify a prefix (or a list of prefixes) that should be ignored +# while generating the index headers. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output +# The default value is: YES. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_OUTPUT = _build/APIs + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each +# generated HTML page (for example: .htm, .php, .asp). +# The default value is: .html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a user-defined HTML header file for +# each generated HTML page. If the tag is left blank doxygen will generate a +# standard header. +# +# To get valid HTML the header file that includes any scripts and style sheets +# that doxygen needs, which is dependent on the configuration options used (e.g. +# the setting GENERATE_TREEVIEW). It is highly recommended to start with a +# default header using +# doxygen -w html new_header.html new_footer.html new_stylesheet.css +# YourConfigFile +# and then modify the file new_header.html. See also section "Doxygen usage" +# for information on how to generate the default header that doxygen normally +# uses. +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. For a description +# of the possible markers and block names see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each +# generated HTML page. If the tag is left blank doxygen will generate a standard +# footer. See HTML_HEADER for more information on how to generate a default +# footer and what special commands can be used inside the footer. See also +# section "Doxygen usage" for information on how to generate the default footer +# that doxygen normally uses. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style +# sheet that is used by each HTML page. It can be used to fine-tune the look of +# the HTML output. If left blank doxygen will generate a default style sheet. +# See also section "Doxygen usage" for information on how to generate the style +# sheet that doxygen normally uses. +# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as +# it is more robust and this tag (HTML_STYLESHEET) will in the future become +# obsolete. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined +# cascading style sheets that are included after the standard style sheets +# created by doxygen. Using this option one can overrule certain style aspects. +# This is preferred over using HTML_STYLESHEET since it does not replace the +# standard style sheet and is therefore more robust against future updates. +# Doxygen will copy the style sheet files to the output directory. +# Note: The order of the extra style sheet files is of importance (e.g. the last +# style sheet in the list overrules the setting of the previous ones in the +# list). For an example see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that the +# files will be copied as-is; there are no commands or markers available. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen +# will adjust the colors in the style sheet and background images according to +# this color. Hue is specified as an angle on a colorwheel, see +# https://en.wikipedia.org/wiki/Hue for more information. For instance the value +# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 +# purple, and 360 is red again. +# Minimum value: 0, maximum value: 359, default value: 220. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors +# in the HTML output. For a value of 0 the output will use grayscales only. A +# value of 255 will produce the most vivid colors. +# Minimum value: 0, maximum value: 255, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the +# luminance component of the colors in the HTML output. Values below 100 +# gradually make the output lighter, whereas values above 100 make the output +# darker. The value divided by 100 is the actual gamma applied, so 80 represents +# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not +# change the gamma. +# Minimum value: 40, maximum value: 240, default value: 80. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting this +# to YES can help to show when doxygen was last run and thus if the +# documentation is up to date. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_TIMESTAMP = NO + +# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML +# documentation will contain a main index with vertical navigation menus that +# are dynamically created via JavaScript. If disabled, the navigation index will +# consists of multiple levels of tabs that are statically embedded in every HTML +# page. Disable this option to support browsers that do not have JavaScript, +# like the Qt help browser. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_MENUS = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries +# shown in the various tree structured indices initially; the user can expand +# and collapse entries dynamically later on. Doxygen will expand the tree to +# such a level that at most the specified number of entries are visible (unless +# a fully collapsed tree already exceeds this amount). So setting the number of +# entries 1 will produce a full collapsed tree by default. 0 is a special value +# representing an infinite number of entries and will result in a full expanded +# tree by default. +# Minimum value: 0, maximum value: 9999, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_INDEX_NUM_ENTRIES = 900 + +# If the GENERATE_DOCSET tag is set to YES, additional index files will be +# generated that can be used as input for Apple's Xcode 3 integrated development +# environment (see: https://developer.apple.com/xcode/), introduced with OSX +# 10.5 (Leopard). To create a documentation set, doxygen will generate a +# Makefile in the HTML output directory. Running make will produce the docset in +# that directory and running make install will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at +# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy +# genXcode/_index.html for more information. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_DOCSET = NO + +# This tag determines the name of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# The default value is: Doxygen generated docs. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# This tag specifies a string that should uniquely identify the documentation +# set bundle. This should be a reverse domain-name style string, e.g. +# com.mycompany.MyDocSet. Doxygen will append .docset to the name. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. +# The default value is: org.doxygen.Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. +# The default value is: Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three +# additional HTML index files: index.hhp, index.hhc, and index.hhk. The +# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop +# (see: https://www.microsoft.com/en-us/download/details.aspx?id=21138) on +# Windows. +# +# The HTML Help Workshop contains a compiler that can convert all HTML output +# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML +# files are now used as the Windows 98 help format, and will replace the old +# Windows help format (.hlp) on all Windows platforms in the future. Compressed +# HTML files also contain an index, a table of contents, and you can search for +# words in the documentation. The HTML workshop also contains a viewer for +# compressed HTML files. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_HTMLHELP = NO + +# The CHM_FILE tag can be used to specify the file name of the resulting .chm +# file. You can add a path in front of the file if the result should not be +# written to the html output directory. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_FILE = + +# The HHC_LOCATION tag can be used to specify the location (absolute path +# including file name) of the HTML help compiler (hhc.exe). If non-empty, +# doxygen will try to run the HTML help compiler on the generated index.hhp. +# The file has to be specified with full path. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +HHC_LOCATION = + +# The GENERATE_CHI flag controls if a separate .chi index file is generated +# (YES) or that it should be included in the main .chm file (NO). +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +GENERATE_CHI = NO + +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) +# and project file content. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_INDEX_ENCODING = + +# The BINARY_TOC flag controls whether a binary table of contents is generated +# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it +# enables the Previous and Next buttons. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members to +# the table of contents of the HTML help documentation and to the tree view. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that +# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help +# (.qch) of the generated HTML documentation. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify +# the file name of the resulting .qch file. The path specified is relative to +# the HTML output folder. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help +# Project output. For more information please see Qt Help Project / Namespace +# (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace). +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt +# Help Project output. For more information please see Qt Help Project / Virtual +# Folders (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual- +# folders). +# The default value is: doc. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_VIRTUAL_FOLDER = doc + +# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom +# filter to add. For more information please see Qt Help Project / Custom +# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see Qt Help Project / Custom +# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's filter section matches. Qt Help Project / Filter Attributes (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_SECT_FILTER_ATTRS = + +# The QHG_LOCATION tag can be used to specify the location of Qt's +# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the +# generated .qhp file. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be +# generated, together with the HTML files, they form an Eclipse help plugin. To +# install this plugin and make it available under the help contents menu in +# Eclipse, the contents of the directory containing the HTML and XML files needs +# to be copied into the plugins directory of eclipse. The name of the directory +# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. +# After copying Eclipse needs to be restarted before the help appears. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the Eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have this +# name. Each documentation set should have its own identifier. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# If you want full control over the layout of the generated HTML pages it might +# be necessary to disable the index and replace it with your own. The +# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top +# of each HTML page. A value of NO enables the index and the value YES disables +# it. Since the tabs in the index contain the same information as the navigation +# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. If the tag +# value is set to YES, a side panel will be generated containing a tree-like +# index structure (just like the one that is generated for HTML Help). For this +# to work a browser that supports JavaScript, DHTML, CSS and frames is required +# (i.e. any modern browser). Windows users are probably better off using the +# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can +# further fine-tune the look of the index. As an example, the default style +# sheet generated by doxygen has an example that shows how to put an image at +# the root of the tree instead of the PROJECT_NAME. Since the tree basically has +# the same information as the tab index, you could consider setting +# DISABLE_INDEX to YES when enabling this option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_TREEVIEW = YES + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that +# doxygen will group on one line in the generated HTML documentation. +# +# Note that a value of 0 will completely suppress the enum values from appearing +# in the overview section. +# Minimum value: 0, maximum value: 20, default value: 4. +# This tag requires that the tag GENERATE_HTML is set to YES. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used +# to set the initial width (in pixels) of the frame in which the tree is shown. +# Minimum value: 0, maximum value: 1500, default value: 250. +# This tag requires that the tag GENERATE_HTML is set to YES. + +TREEVIEW_WIDTH = 250 + +# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to +# external symbols imported via tag files in a separate window. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +EXT_LINKS_IN_WINDOW = NO + +# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg +# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see +# https://inkscape.org) to generate formulas as SVG images instead of PNGs for +# the HTML output. These images will generally look nicer at scaled resolutions. +# Possible values are: png (the default) and svg (looks nicer but requires the +# pdf2svg or inkscape tool). +# The default value is: png. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FORMULA_FORMAT = svg + +# Use this tag to change the font size of LaTeX formulas included as images in +# the HTML documentation. When you change the font size after a successful +# doxygen run you need to manually remove any form_*.png images from the HTML +# output directory to force them to be regenerated. +# Minimum value: 8, maximum value: 50, default value: 10. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANSPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are not +# supported properly for IE 6.0, but are supported on all modern browsers. +# +# Note that when changing this option you need to delete any form_*.png files in +# the HTML output directory before the changes have effect. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_TRANSPARENT = YES + +# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands +# to create new LaTeX commands to be used in formulas as building blocks. See +# the section "Including formulas" for details. + +FORMULA_MACROFILE = + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# https://www.mathjax.org) which uses client side JavaScript for the rendering +# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = YES + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. See the MathJax site (see: +# http://docs.mathjax.org/en/latest/output.html) for more details. +# Possible values are: HTML-CSS (which is slower, but has the best +# compatibility), NativeMML (i.e. MathML) and SVG. +# The default value is: HTML-CSS. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the HTML +# output directory using the MATHJAX_RELPATH option. The destination directory +# should contain the MathJax.js script. For instance, if the mathjax directory +# is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax +# Content Delivery Network so you can quickly see the result without installing +# MathJax. However, it is strongly recommended to install a local copy of +# MathJax from https://www.mathjax.org before deployment. +# The default value is: https://cdn.jsdelivr.net/npm/mathjax@2. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_RELPATH = https://cdn.jsdelivr.net/npm/mathjax@2 + +# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax +# extension names that should be enabled during MathJax rendering. For example +# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces +# of code that will be used on startup of the MathJax code. See the MathJax site +# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an +# example see the documentation. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box for +# the HTML output. The underlying search engine uses javascript and DHTML and +# should work on any modern browser. Note that when using HTML help +# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) +# there is already a search function so this one should typically be disabled. +# For large projects the javascript based search engine can be slow, then +# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to +# search using the keyboard; to jump to the search box use + S +# (what the is depends on the OS and browser, but it is typically +# , /