diff --git a/ChangeLog b/ChangeLog index 17470429..b38549b0 100644 --- a/ChangeLog +++ b/ChangeLog @@ -17,7 +17,7 @@ CISM version 2.0, which originated from Glimmer-CISM and Glimmer, has a number o * new and updated documentation More information including full documentation of the code can be found at: -http://oceans11.lanl.gov/cism/ +https://github.com/CISM/cism-documentation CISM version 2.0 will be hosted at the CISM Github organization: https://github.com/cism/cism diff --git a/NEWS b/NEWS index 8ad9ef88..f7b6d168 100644 --- a/NEWS +++ b/NEWS @@ -21,7 +21,7 @@ relative to these previous codes: * new and updated documentation More information including full documentation of the code can be found at: -http://oceans11.lanl.gov/cism/index.html +https://github.com/CISM/cism-documentation The original Glimmer-CISM and Glimmer codes can be found at the Glimmer-CISM Github organization: https://github.com/glimmer-cism diff --git a/README b/README index 6bfb03b9..30a9b204 100644 --- a/README +++ b/README @@ -4,7 +4,7 @@ CISM README file: CISM is a land ice model designed to be used as part of an earth-system model or as a stand-alone model. Full documentation can be found at: -http://oceans11.lanl.gov/cism/documentation.html +https://github.com/CISM/cism-documentation Licensing: ========= @@ -52,7 +52,7 @@ Building / Installing CISM: =========================== For detailed instructions on how to install and build CISM, please see Chapter 2 of the users -guide (available at: http://oceans11.lanl.gov/cism/documentation.html) +guide (available at: https://github.com/CISM/cism-documentation) last updated: 10/21/2014 diff --git a/builds/blizzard-gnu/blizzard-gnu-build-and-test.csh b/builds/blizzard-gnu/blizzard-gnu-build-and-test.csh deleted file mode 100644 index d24d993a..00000000 --- a/builds/blizzard-gnu/blizzard-gnu-build-and-test.csh +++ /dev/null @@ -1,154 +0,0 @@ -#!/bin/csh - - - -# Master build script for mac laptops. Last updated 2/28/2013 by SFP. -# This is a hacked version of Kate's original script for use on Hopper. -# For now, only supports parallel build with Trilinos using gnu and cmake. -# Only a subset of the small, standard tests are run, on both 1 and 4 procs. - -# (1) execute from the builds/blizzard-gnu subdirectory of CISM - -#add logic at the top to decide which versions to build - -# PARALLEL BUILD WITH CMAKE - -# setenv TEST_DIR "/USERS/$USER/work/modeling/cism/seacism-oceans11/tests/higher-order" - -# 5/7/2014 DMR -- added performance tests: - -## This will automatically submit dome60-500 ijobs. gis_1km and gis_4km will not be submitted -## automatically because you will have to build and run Felix/Albany on hopper first. Once you do that, -## you can go to lines #193-194, 197-198, 201-202, and uncomment them. -setenv PERF_TEST 0 - -@ run_perf_tests = (($1 == run-perf-tests) || ($2 == run-perf-tests) || ($3 == run-perf-tests) || ($4 == run-perf-tests) || ($5 == run-perf-tests)) - -if ($run_perf_tests) then - setenv PERF_TEST 1 -endif - -@ skip_build_set = (($1 == skip-build) || ($2 == skip-build) || ($3 == skip-build) || ($4 == skip-build) || ($5 == skip-build)) - -@ no_copy_set = (($1 == no-copy) || ($2 == no-copy) || ($3 == no-copy) || ($4 == no-copy) || ($5 == no-copy)) - -@ skip_tests_set = (($1 == skip-tests) || ($2 == skip-tests) || ($3 == skip-tests) || ($4 == skip-tests) || ($5 == skip-tests)) - -#**!move this and source it to your .bashrc (wherever your higher-order directory is located) -#setenv TEST_DIR /lustre/atlas/scratch/$USER/cli062/higher-order - -if (! -d $TEST_DIR) mkdir -p $TEST_DIR - -setenv TEST_SUITE_DEFAULT_LOC http://oceans11.lanl.gov/cism/livv -#setenv TEST_SUITE_DEFAULT_LOC /ccs/proj/cli062/test_suite - -setenv build_problem 0 - -set COMPILER_NAME = gnu -set PLATFORM_NAME = blizzard - -# set PLATFORM_NAME = $1 -# set COMPILER_NAME = $2 - -set CMAKE_SCRIPT = $PLATFORM_NAME'-'$COMPILER_NAME'-cmake' -set CMAKE_CONF_OUT = 'conf_'$COMPILER_NAME'.out' -set CMAKE_BUILD_OUT = 'cmake_'$COMPILER_NAME'_build.out' -#set CISM_RUN_SCRIPT = $PLATFORM_NAME'job' -#set CISM_RUN_SCRIPT = 'hopjob' -set CISM_RUN_SCRIPT = 'ijob_linux' -set CISM_VV_SCRIPT = $PLATFORM_NAME'_VV.bash' -#set CISM_VV_SCRIPT = 'rhea_VV.bash' - -echo -echo 'To use this script, type: csh '$PLATFORM_NAME'-'$COMPILER_NAME'-build-and-test.csh' -echo -#echo 'For a quick test (dome only), type: csh '$PLATFORM_NAME'-'$COMPILER_NAME'-build-and-test.csh quick-test' -echo -echo "Call with no-copy to prevent copying of the reg_test and livv defaults." -echo "Call with run-perf-tests to run the performance tests." -echo "Call with skip-tests to skip testing (builds executable and copies it to TEST_DIR)." - - -echo -echo 'See the LIVV documentation for instructions on setting up the test directory (TEST_DIR).' -echo - - -#echo 'The following environment variables must be set: TEST_DIR, GLIMMER_TRILINOS_DIR' -#echo 'Examples (place in .cshrc or .bashrc):' -#echo 'csh, tcsh: setenv GLIMMER_TRILINOS_DIR "/Users/$USER/Trilinos/gcc-build/install"' -#echo 'bash: export GLIMMER_TRILINOS_DIR="/Users/$USER/Trilinos/gcc-build/install"' -echo -echo 'Setting TEST_DIR to the location: ' -echo 'TEST_DIR =' $TEST_DIR -echo 'TEST_DIR must also be set in your .bashrc file.' - -# PARALLEL BUILD WITH CMAKE - - -if ($skip_build_set == 0) then - -echo -echo "Configuring and building in directory: " $PWD -echo - -echo 'Configuring '$COMPILER_NAME' cmake build...' -source ./$CMAKE_SCRIPT >& $CMAKE_CONF_OUT -echo 'Making parallel '$COMPILER_NAME'...' -make -j 8 >& $CMAKE_BUILD_OUT - -#if ( -e example-drivers/simple_glide/src/simple_glide ) then -# echo 'Copying '$COMPILER_NAME' parallel simple_glide_'$COMPILER_NAME' to test directory' -# cp -f example-drivers/simple_glide/src/simple_glide $TEST_DIR/simple_glide_$COMPILER_NAME -#else -# echo "cmake '$COMPILER_NAME' build failed, no executable" -# @ build_problem = 1 -#endif - -if ( -e cism_driver/cism_driver ) then - echo 'Copying '$COMPILER_NAME' parallel cism_driver_'$COMPILER_NAME' to test directory' - cp -f cism_driver/cism_driver $TEST_DIR/cism_driver_$COMPILER_NAME -else - echo "cmake '$COMPILER_NAME' build failed, no executable" - @ build_problem = 1 -endif - -endif # skip_build_set - -if ($build_problem == 1) then - echo "No job submitted -- cmake build failed." -else # execute tests: - - # Make copy of test suite in $TEST_DIR: -if (! ($no_copy_set)) then - echo "Copying default reg_test and LIVV to $TEST_DIR" - pushd . > /dev/null - cd $TEST_DIR - if ( -e reg_test_default.tgz ) rm -f reg_test_default.tgz - wget $TEST_SUITE_DEFAULT_LOC/reg_test_default.tgz - tar xfz reg_test_default.tgz - popd > /dev/null - - if ($PERF_TEST) then - echo "Copying default perf_test to $TEST_DIR" - pushd . > /dev/null - cd $TEST_DIR - if ( -e perf_test_default.tgz ) rm -f perf_test_default.tgz - wget $TEST_SUITE_DEFAULT_LOC/perf_test_default.tgz - tar xfz perf_test_default.tgz - popd > /dev/null - endif - - cp -rf ../../tests/higher-order/livv $TEST_DIR -endif - -if ($skip_tests_set) then - echo "Skipping tests." - exit -endif - -csh $TEST_DIR/livv/run_livv_default_tests.csh $TEST_DIR $CISM_RUN_SCRIPT $PERF_TEST $CISM_VV_SCRIPT -echo "Back in build-and-test script, exiting." -exit - - diff --git a/builds/blizzard-gnu/blizzard-gnu-cmake b/builds/blizzard-gnu/blizzard-gnu-cmake deleted file mode 100755 index 9ee113c7..00000000 --- a/builds/blizzard-gnu/blizzard-gnu-cmake +++ /dev/null @@ -1,67 +0,0 @@ -# cmake configuration script that works on the Linux box in Matt's office (blueskies) with GCC -# Others will need to modify the Netcdf path. -# This config script is setup to perform a parallel build with Trilinos. -# -# BUILD OPTIONS: -# The call to cmake below includes several input ON/OFF switch parameters, to -# provide a simple way to select different build options. These are: -# CISM_BUILD_CISM_DRIVER -- ON by default, set to OFF to only build the CISM libraries. -# CISM_ENABLE_BISICLES -- OFF by default, set to ON to build a BISICLES-capable cism_driver. -# CISM_ENABLE_FELIX -- OFF by default, set to ON to build a FELIX-capable cism_driver. -# CISM_USE_TRILINOS -- OFF by default, set to on for builds with Trilinos. -# CISM_MPI_MODE -- ON by default, only set to OFF for serial builds. -# CISM_SERIAL_MODE -- OFF by default, set to ON for serial builds. -# CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation. -# CISM_COUPLED -- OFF by default, set to ON to build with CESM. - -# After this executes, do: -# make -j 8 -# - -#echo -#echo Run this script by typing: source linux-gnu-cism-cmake -#echo -#echo Set CISM_TRILINOS_DIR to your Trilinos installation directory. -#echo - -# remove old build data: -rm -f ./CMakeCache.txt -rm -rf ./CMakeFiles - -echo -echo "Doing CMake Configuration step" - -cmake \ - -D CISM_BUILD_CISM_DRIVER:BOOL=ON \ - -D CISM_ENABLE_BISICLES=OFF \ - -D CISM_ENABLE_FELIX=OFF \ -\ - -D CISM_USE_TRILINOS:BOOL=ON \ - -D CISM_MPI_MODE:BOOL=ON \ - -D CISM_SERIAL_MODE:BOOL=OFF \ -\ - -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=ON \ - -D CISM_COUPLED:BOOL=OFF \ -\ - -D CISM_TRILINOS_DIR=/opt/trilinos-11.4.1_GNU4.8.3 \ - -D CISM_HDF5_LIB_DIR=/opt/hdf5_gcc4.8.3 \ - -D CISM_NETCDF_DIR=/opt/netcdf4.3.2_gcc4.8.3 \ -\ - -D CMAKE_Fortran_FLAGS="-g -O2 -ffree-line-length-none -fPIC -fno-range-check" \ -\ - -D CMAKE_CXX_COMPILER=mpicxx \ - -D CMAKE_C_COMPILER=mpicc \ - -D CMAKE_Fortran_COMPILER=mpif90 \ -\ - -D CISM_EXTRA_LIBS:STRING="-lblas -lcurl" \ -\ - -D CISM_MPI_INC_DIR=/opt/mpi3.1.2_gnu4.8.3/include \ - -D CISM_MPI_LIB_DIR=/opt/mpi3.1.2_gnu4.8.3/lib \ -\ - -D CMAKE_VERBOSE_MAKEFILE=OFF \ - -D CISM_STATIC_LINKING:BOOL=ON \ - .. - -# Note: last argument above "../.." is path to top seacism directory - -# -D CISM_NETCDF_LIBS="netcdff" \ diff --git a/builds/blizzard-gnu/blizzard-gnu-cmake-debug b/builds/blizzard-gnu/blizzard-gnu-cmake-debug deleted file mode 100755 index 5afc3133..00000000 --- a/builds/blizzard-gnu/blizzard-gnu-cmake-debug +++ /dev/null @@ -1,81 +0,0 @@ -# Requires (command line or in .bashrc): -# module load cmake -# -# module unload cmake python netcdf hdf5 -# module swap PrgEnv-pgi PrgEnv-gnu -# module load netcdf-hdf5parallel/4.2.0 cmake/2.8.6 python -# -# cmake configuration script that works on jaguar with GCC -# This script needs to be run from a subdirectory (e.g. build-gnu) -# of the main seacism repository (reflected in the several -# instances of # ".." below). -# -# After this executes, do: -# make -j 8 -# cp example-drivers/simple_glide/src/sgcmake . - -# remove old build data: -rm -f ./CMakeCache.txt -rm -rf ./CMakeFiles - -echo -echo "Doing CMake Configuration step" - -cmake \ - -D CISM_BUILD_CISM_DRIVER:BOOL=ON \ - -D CISM_ENABLE_BISICLES=OFF \ - -D CISM_ENABLE_FELIX=OFF \ -\ - -D CISM_USE_TRILINOS:BOOL=ON \ - -D CISM_MPI_MODE:BOOL=ON \ - -D CISM_SERIAL_MODE:BOOL=OFF \ -\ - -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=ON \ - -D CISM_COUPLED:BOOL=OFF \ -\ - -D CISM_TRILINOS_DIR=/opt/trilinos-11.4.1_GNU4.8.3 \ - -D CISM_HDF5_LIB_DIR=/opt/hdf5_gcc4.8.3 \ - -D CISM_NETCDF_DIR=/opt/netcdf4.3.2_gcc4.8.3 \ -\ - -D CMAKE_Fortran_FLAGS="-g -fbounds-check -fcheck-array-temporaries -ffree-line-length-none" \ -\ - -D CMAKE_CXX_COMPILER=mpicxx \ - -D CMAKE_C_COMPILER=mpicc \ - -D CMAKE_Fortran_COMPILER=mpif90 \ -\ - -D CISM_EXTRA_LIBS:STRING="-lblas -lcurl" \ -\ - -D CISM_MPI_INC_DIR=/opt/mpi3.1.2_gnu4.8.3/include \ - -D CISM_MPI_LIB_DIR=/opt/mpi3.1.2_gnu4.8.3/lib \ -\ - -D CMAKE_VERBOSE_MAKEFILE=OFF \ - -D CISM_STATIC_LINKING:BOOL=ON \ - .. - -# Note: last argument above ".." is path to top seacism directory - -# Prg Env that worked in titan 1/17/2013 -#Currently Loaded Modulefiles: -# 1) modules/3.2.6.6 22) audit/1.0.0-1.0401.34509.4.34.gem -# 2) xtpe-network-gemini 23) rca/1.0.0-2.0401.34092.9.59.gem -# 3) xtpe-interlagos 24) krca/1.0.0-2.0401.33562.3.95.gem -# 4) eswrap/1.0.15 25) dvs/0.9.0-1.0401.1327.13.34.gem -# 5) lustredu/1.2 26) csa/3.0.0-1_2.0401.33458.3.110.gem -# 6) DefApps 27) job/1.5.5-0.1_2.0401.34507.6.2.gem -# 7) altd/1.0 28) xpmem/0.1-2.0401.32557.3.12.gem -# 8) torque/4.1.4 29) gni-headers/2.1-1.0401.5618.16.1.gem -# 9) moab/7.1.3 30) dmapp/3.2.1-1.0401.5585.5.2.gem -# 10) cray-mpich2/5.5.5 31) pmi/4.0.0-1.0000.9282.69.4.gem -# 11) subversion/1.6.17 32) ugni/4.0-1.0401.5617.15.1.gem -# 12) atp/1.5.2 33) udreg/2.3.2-1.0401.5828.5.1.gem -# 13) xe-sysroot/4.1.20 34) xt-libsci/11.1.01 -# 14) switch/1.0-1.0401.34518.4.34.gem 35) gcc/4.7.2 -# 15) shared-root/1.0-1.0401.34936.4.9.gem 36) xt-asyncpe/5.16 -# 16) pdsh/2.2-1.0401.34516.3.1.gem 37) PrgEnv-gnu/4.1.20 -# 17) nodehealth/3.0-1.0401.35104.16.2.gem 38) cmake/2.8.6 -# 18) lbcd/2.1-1.0401.34512.5.1.gem 39) python/2.7.2 -# 19) hosts/1.0-1.0401.34511.5.34.gem 40) hdf5-parallel/1.8.8 -# 20) configuration/1.0-1.0401.34510.3.3.gem 41) netcdf-hdf5parallel/4.2.0 -# 21) ccm/2.2.0-1.0401.34937.13.25 - - diff --git a/builds/hopper-gnu-felix/hopper-gnu-bisicles-cmake b/builds/cori-gnu-felix/cori-gnu-bisicles-cmake similarity index 91% rename from builds/hopper-gnu-felix/hopper-gnu-bisicles-cmake rename to builds/cori-gnu-felix/cori-gnu-bisicles-cmake index 1dfa0eb5..d3f7cc6c 100755 --- a/builds/hopper-gnu-felix/hopper-gnu-bisicles-cmake +++ b/builds/cori-gnu-felix/cori-gnu-bisicles-cmake @@ -1,14 +1,14 @@ -# run this script by typing: source hopper-gnu-bisicles-cmake +# run this script by typing: source cori-gnu-bisicles-cmake # After thus script completes, type: make -j 8 # If rebuilding, type 'make clean' before running 'make -j 8' # This cmake configuration script builds cism_driver -# on hopper using the gnu compiler suite. It no longer relies on a build +# on cori using the gnu compiler suite. It no longer relies on a build # of Trilinos, but does need a BISICLES build located in BISICLES_INTERFACE_DIR # (currently set to /global/u2/d/dmartin/BISICLES/code/interface) -# This script should be run from the builds/hopper-pgi subdirectory +# This script should be run from the builds/cori-pgi subdirectory # of the main seacism repository (reflected in the two instances # of "../.." below). @@ -24,10 +24,10 @@ # CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation. # CISM_COUPLED -- OFF by default, set to ON to build with CESM. echo -echo Run this script by typing: source hopper-gnu-bisicles-cmake +echo Run this script by typing: source cori-gnu-bisicles-cmake echo -#echo Warning: Python problem. After the last hopper system updates 1/28/13, in order to run simple_glide +#echo Warning: Python problem. After the last cori system updates 1/28/13, in order to run simple_glide #echo or simple_bisicles, you need to replace the python/2.7.3 module with the python/2.7.1 module. #echo The easiest way to do this: In your .cshrc.ext or .bashrc.ext add the lines: #echo module unload python diff --git a/builds/hopper-gnu-felix/hopper-gnu-felix-cmake b/builds/cori-gnu-felix/cori-gnu-felix-cmake similarity index 90% rename from builds/hopper-gnu-felix/hopper-gnu-felix-cmake rename to builds/cori-gnu-felix/cori-gnu-felix-cmake index a361ab86..8c9bf8c8 100755 --- a/builds/hopper-gnu-felix/hopper-gnu-felix-cmake +++ b/builds/cori-gnu-felix/cori-gnu-felix-cmake @@ -1,14 +1,14 @@ -# run this script by typing: source hopper-gnu-felix-cmake +# run this script by typing: source cori-gnu-felix-cmake # After thus script completes, type: make -j 8 # If rebuilding, type 'make clean' before running 'make -j 8' # This cmake configuration script builds cism_driver -# on hopper using the PGI compiler suite. It relies on a build of Trilinos +# on cori using the PGI compiler suite. It relies on a build of Trilinos # located in /global/project/projectdirs/piscees, and a build of BISICLES # located in the ranken home directory: /global/u1/r/ranken/BISICLES -# This script should be run from the builds/hopper-pgi subdirectory +# This script should be run from the builds/cori-pgi subdirectory # of the main seacism repository (reflected in the two instances # of "../.." below). @@ -24,10 +24,10 @@ # CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation. # CISM_COUPLED -- OFF by default, set to ON to build with CESM. echo -echo Run this script by typing: source hopper-gnu-felix-cmake +echo Run this script by typing: source cori-gnu-felix-cmake echo -#echo Warning: Python problem. After the last hopper system updates 1/28/13, in order to run simple_glide +#echo Warning: Python problem. After the last cori system updates 1/28/13, in order to run simple_glide #echo or simple_bisicles, you need to replace the python/2.7.3 module with the python/2.7.1 module. #echo The easiest way to do this: In your .cshrc.ext or .bashrc.ext add the lines: #echo module unload python diff --git a/builds/hopper-gnu/README b/builds/cori-gnu/README similarity index 100% rename from builds/hopper-gnu/README rename to builds/cori-gnu/README diff --git a/builds/hopper-gnu/hopper-gnu-bisicles-cmake b/builds/cori-gnu/cori-gnu-bisicles-cmake similarity index 91% rename from builds/hopper-gnu/hopper-gnu-bisicles-cmake rename to builds/cori-gnu/cori-gnu-bisicles-cmake index 58539551..1cf8478c 100644 --- a/builds/hopper-gnu/hopper-gnu-bisicles-cmake +++ b/builds/cori-gnu/cori-gnu-bisicles-cmake @@ -1,14 +1,14 @@ -# run this script by typing: source hopper-bisicles-gnu-cmake +# run this script by typing: source cori-bisicles-gnu-cmake # After thus script completes, type: make -j 8 # If rebuilding, type 'make clean' before running 'make -j 8' # This cmake configuration script builds cism_driver -# on hopper using the gnu compiler suite. It no longer relies on a build +# on cori using the gnu compiler suite. It no longer relies on a build # of Trilinos, but does need a BISICLES build located in BISICLES_INTERFACE_DIR # (currently set to ./../../../BISICLES/CISM-interface/interface ) -# This script should be run from the builds/hopper-gnu subdirectory +# This script should be run from the builds/cori-gnu subdirectory # of the main seacism repository (reflected in the two instances # of "../.." below). @@ -24,7 +24,7 @@ # CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation. # CISM_COUPLED -- OFF by default, set to ON to build with CESM. echo -echo Run this script by typing: source hopper-bisicles-gnu-cmake +echo Run this script by typing: source cori-bisicles-gnu-cmake echo module unload cmake @@ -83,14 +83,14 @@ cmake \ -D ALBANY_FELIX_DYCORE:BOOL=OFF \ \ -D CISM_TRILINOS_DIR=/project/projectdirs/piscees/trilinos/trilinos-albany-build/install \ - -D CISM_TRILINOS_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/Trilinos-11.12.1/hopper-gnu-ci-nophal/install \ + -D CISM_TRILINOS_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/Trilinos-11.12.1/cori-gnu-ci-nophal/install \ -D CISM_TRILINOS_ALBANY_DIR=/project/projectdirs/piscees/trilinos/trilinos-albany-build/install \ \ -D CISM_NETCDF_DIR=$NETCDF_DIR \ -D CISM_HDF5_LIB_DIR=$HDF5_DIR \ -D CISM_MPI_BASE_DIR=$MPICH_DIR \ \ - -D CISM_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/libgptl/libgptl-hopper-gnu \ + -D CISM_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/libgptl/libgptl-cori-gnu \ \ -D CMAKE_INSTALL_PREFIX:PATH=$PWD/install \ -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \ diff --git a/builds/hopper-gnu/hopper-gnu-cmake b/builds/cori-gnu/cori-gnu-cmake similarity index 91% rename from builds/hopper-gnu/hopper-gnu-cmake rename to builds/cori-gnu/cori-gnu-cmake index 809ba7e3..0ab04eac 100644 --- a/builds/hopper-gnu/hopper-gnu-cmake +++ b/builds/cori-gnu/cori-gnu-cmake @@ -1,13 +1,13 @@ -# run this script by typing: source hopper-gnu-cmake +# run this script by typing: source cori-gnu-cmake # After thus script completes, type: make -j 8 # If rebuilding, type 'make clean' before running 'make -j 8' # This cmake configuration script builds cism_driver -# on hopper using the gnu compiler suite. It no longer relies on a build +# on cori using the gnu compiler suite. It no longer relies on a build # of Trilinos, but does need a BISICLES build located in BISICLES_INTERFACE_DIR # (currently set to /global/u2/d/dmartin/BISICLES/code/interface) -# This script should be run from the builds/hopper-gnu subdirectory +# This script should be run from the builds/cori-gnu subdirectory # of the main seacism repository (reflected in the two instances # of "../.." below). @@ -24,7 +24,7 @@ # CISM_COUPLED -- OFF by default, set to ON to build with CESM. echo -echo Run this script by typing: source hopper-gnu-cmake +echo Run this script by typing: source cori-gnu-cmake echo module unload cmake @@ -81,14 +81,14 @@ cmake \ -D ALBANY_FELIX_DYCORE:BOOL=OFF \ \ -D CISM_TRILINOS_DIR=/project/projectdirs/piscees/trilinos/trilinos-albany-build/install \ - -D CISM_TRILINOS_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/Trilinos-11.12.1/hopper-gnu-ci-nophal/install \ + -D CISM_TRILINOS_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/Trilinos-11.12.1/cori-gnu-ci-nophal/install \ -D CISM_TRILINOS_ALBANY_DIR=/project/projectdirs/piscees/trilinos/trilinos-albany-build/install \ \ -D CISM_NETCDF_DIR=$NETCDF_DIR \ -D CISM_HDF5_LIB_DIR=$HDF5_DIR \ -D CISM_MPI_BASE_DIR=$MPICH_DIR \ \ - -D CISM_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/libgptl/libgptl-hopper-gnu \ + -D CISM_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/libgptl/libgptl-cori-gnu \ \ -D CMAKE_INSTALL_PREFIX:PATH=$PWD/install \ -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \ diff --git a/builds/hopper-gnu/hopper-gnu-cmake.sh b/builds/cori-gnu/cori-gnu-cmake.sh similarity index 95% rename from builds/hopper-gnu/hopper-gnu-cmake.sh rename to builds/cori-gnu/cori-gnu-cmake.sh index aa5dba89..badfa46d 100755 --- a/builds/hopper-gnu/hopper-gnu-cmake.sh +++ b/builds/cori-gnu/cori-gnu-cmake.sh @@ -77,16 +77,16 @@ cmake \ -D ALBANY_FELIX_DYCORE:BOOL=OFF \ \ -D CISM_TRILINOS_DIR=/project/projectdirs/piscees/trilinos/trilinos-albany-build/install \ - -D CISM_TRILINOS_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/Trilinos-11.12.1/hopper-gnu-ci-nophal/install \ + -D CISM_TRILINOS_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/Trilinos-11.12.1/cori-gnu-ci-nophal/install \ -D CISM_TRILINOS_ALBANY_DIR=/project/projectdirs/piscees/trilinos/trilinos-albany-build/install \ \ -D CISM_NETCDF_DIR="$NETCDF_DIR" \ -D CISM_HDF5_LIB_DIR="$HDF5_DIR" \ -D CISM_MPI_BASE_DIR="$MPICH_DIR" \ \ - -D CISM_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/libgptl/libgptl-hopper-gnu \ + -D CISM_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/libgptl/libgptl-cori-gnu \ \ - -D CMAKE_INSTALL_PREFIX:PATH="$cism_top/builds/hopper-gnu/install" \ + -D CMAKE_INSTALL_PREFIX:PATH="$cism_top/builds/cori-gnu/install" \ -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \ -D CMAKE_VERBOSE_CONFIGURE:BOOL=ON \ \ diff --git a/builds/hopper-gnu/hopper-gnu-serial-cmake b/builds/cori-gnu/cori-gnu-serial-cmake similarity index 89% rename from builds/hopper-gnu/hopper-gnu-serial-cmake rename to builds/cori-gnu/cori-gnu-serial-cmake index 1fd4e1ed..981cbb79 100644 --- a/builds/hopper-gnu/hopper-gnu-serial-cmake +++ b/builds/cori-gnu/cori-gnu-serial-cmake @@ -1,14 +1,14 @@ -# run this script by typing: source hopper-gnu-cmake +# run this script by typing: source cori-gnu-cmake # After thus script completes, type: make -j 8 # If rebuilding, type 'make clean' before running 'make -j 8' # This cmake configuration script builds cism_driver -# on hopper using the gnu compiler suite. It no longer relies on a build +# on cori using the gnu compiler suite. It no longer relies on a build # of Trilinos, but does need a BISICLES build located in BISICLES_INTERFACE_DIR # (currently set to /global/u2/d/dmartin/BISICLES/code/interface) -# This script should be run from the builds/hopper-pgi subdirectory +# This script should be run from the builds/cori-pgi subdirectory # of the main seacism repository (reflected in the two instances # of "../.." below). @@ -24,9 +24,9 @@ # CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation. # CISM_COUPLED -- OFF by default, set to ON to build with CESM. echo -echo Run this script by typing: source hopper-bisicles-gnu-cmake +echo Run this script by typing: source cori-bisicles-gnu-cmake echo -echo Warning: Python problem. After the last hopper system updates 1/28/13, in order to run simple_glide +echo Warning: Python problem. After the last cori system updates 1/28/13, in order to run simple_glide echo or simple_bisicles, you need to replace the python/2.7.3 module with the python/2.7.1 module. echo The easiest way to do this: In your .cshrc.ext or .bashrc.ext add the lines: echo module unload python @@ -87,13 +87,13 @@ cmake \ -D ALBANY_FELIX_DYCORE:BOOL=OFF \ \ -D CISM_TRILINOS_DIR=/project/projectdirs/piscees/trilinos/trilinos-albany-build/install \ - -D CISM_TRILINOS_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/Trilinos/hopper-gnu-cism-albany-ci-nophal/install \ + -D CISM_TRILINOS_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/Trilinos/cori-gnu-cism-albany-ci-nophal/install \ -D CISM_TRILINOS_ALBANY_DIR=/project/projectdirs/piscees/trilinos/trilinos-albany-build/install \ -D CISM_NETCDF_DIR=/opt/cray/netcdf/4.3.0/GNU/47 \ -D CISM_HDF5_LIB_DIR=/opt/cray/hdf5/1.8.11/GNU/47/lib \ -D CISM_MPI_BASE_DIR=/opt/cray/mpt/5.6.4/gni/mpich2-gnu/47 \ \ - -D CISM_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/libgptl/libgptl-hopper-gnu_4.7.2 \ + -D CISM_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/libgptl/libgptl-cori-gnu_4.7.2 \ \ -D CMAKE_INSTALL_PREFIX:PATH=$PWD/install \ -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \ diff --git a/builds/hopper-pgi/README b/builds/cori-pgi/README similarity index 86% rename from builds/hopper-pgi/README rename to builds/cori-pgi/README index 57ef8848..08ab1f09 100644 --- a/builds/hopper-pgi/README +++ b/builds/cori-pgi/README @@ -31,7 +31,7 @@ build problems, first check the differences between the two scripts. ------------------------------------------------------------------------------ -The cmake configure file hopper-pgi-cmake can be used to build parallel versions of +The cmake configure file cori-pgi-cmake can be used to build parallel versions of simple_glide and simple_bisicles, 2 programs that are part of CISM (the Community Ice Sheet Model). The PGI compiler suite is used for this build. @@ -39,21 +39,21 @@ Build Instructions: Standard Build (uses Trilinos, builds simple_glide, doesn't build simple_bisicles): -In the builds/hopper-pgi directory, configure for the build using: +In the builds/cori-pgi directory, configure for the build using: make clean -source hopper-pgi-cmake +source cori-pgi-cmake The configuration process should complete with a final message: --- Build files have been written to: /seacism/builds/hopper-pgi +-- Build files have been written to: /seacism/builds/cori-pgi The next step is to use the make program to do the build: make -j 8 ------------------------------------------------------------------------------ -In the file hopper-pgi-cmake, the first 4 lines of the cmake call can be modified +In the file cori-pgi-cmake, the first 4 lines of the cmake call can be modified to configure different builds. These lines are: -D NO_TRILINOS:BOOL=OFF \ -D CISM_MPI_MODE:BOOL=ON \ @@ -75,7 +75,7 @@ For a serial build of simple_glide, use: Dependencies: The packages this build depends on (Trilinos, BISICLES, and Chombo) have already -been built. The paths to these packages can be found in hopper-pgi-cmake. +been built. The paths to these packages can be found in cori-pgi-cmake. Testing: @@ -83,7 +83,7 @@ simple_glide quick test: In seacism/tests/higher-order/dome, do: 1) type dome.py, this will give a 'simple glide not found' error 2) qsub -I -V -q interactive -l mppwidth=4 -3) aprun -n 4 ...hopper-config/example_drivers/simple_glide/src/simple_glide dome.9_5_2012.config +3) aprun -n 4 ...cori-config/example_drivers/simple_glide/src/simple_glide dome.9_5_2012.config simple_bisicles quick test: diff --git a/builds/hopper-pgi/hopper-pgi-bisicles-cmake b/builds/cori-pgi/cori-pgi-bisicles-cmake similarity index 87% rename from builds/hopper-pgi/hopper-pgi-bisicles-cmake rename to builds/cori-pgi/cori-pgi-bisicles-cmake index 8d4cc6b3..e3d81c40 100644 --- a/builds/hopper-pgi/hopper-pgi-bisicles-cmake +++ b/builds/cori-pgi/cori-pgi-bisicles-cmake @@ -1,14 +1,14 @@ -# run this script by typing: source hopper-pgi-cmake +# run this script by typing: source cori-pgi-cmake # After thus script completes, type: make -j 8 # If rebuilding, type 'make clean' before running 'make -j 8' # This cmake configuration script builds cism_driver -# on hopper using the PGI compiler suite. It no longer relies on a build +# on cori using the PGI compiler suite. It no longer relies on a build # of Trilinos, but does need a BISICLES build located in BISICLES_INTERFACE_DIR # (currently set to /global/u2/d/dmartin/BISICLES/code/interface) -# This script should be run from the builds/hopper-pgi subdirectory +# This script should be run from the builds/cori-pgi subdirectory # of the main seacism repository (reflected in the two instances # of "../.." below). @@ -24,15 +24,15 @@ # CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation. # CISM_COUPLED -- OFF by default, set to ON to build with CESM. echo -echo Run this script by typing: source hopper-bisicles-pgi-cmake +echo Run this script by typing: source cori-bisicles-pgi-cmake echo -echo Warning: Python problem. After the last hopper system updates 1/28/13, in order to run simple_glide +echo Warning: Python problem. After the last cori system updates 1/28/13, in order to run simple_glide echo or simple_bisicles, you need to replace the python/2.7.3 module with the python/2.7.1 module. echo The easiest way to do this: In your .cshrc.ext or .bashrc.ext add the lines: echo module unload python echo module load python/2.7.1 echo -#echo Warning: Python problem. After the last hopper system updates 1/28/13, in order to run simple_glide +#echo Warning: Python problem. After the last cori system updates 1/28/13, in order to run simple_glide #echo or simple_bisicles, you need to replace the python/2.7.3 module with the python/2.7.1 module. #echo The easiest way to do this: In your .cshrc.ext or .bashrc.ext add the lines: #echo module unload python @@ -87,9 +87,9 @@ cmake \ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=OFF \ -D CISM_COUPLED:BOOL=OFF \ \ - -D CISM_TRILINOS_DIR=/project/projectdirs/piscees/trilinos-default/hopper-pgi/install \ - -D CISM_TRILINOS_GPTL_DIR=/project/projectdirs/piscees/trilinos-default/hopper-pgi-gptl/install \ - -D CISM_TRILINOS_ALBANY_DIR=/project/projectdirs/piscees/trilinos-default/hopper-pgi-albany/install \ + -D CISM_TRILINOS_DIR=/project/projectdirs/piscees/trilinos-default/cori-pgi/install \ + -D CISM_TRILINOS_GPTL_DIR=/project/projectdirs/piscees/trilinos-default/cori-pgi-gptl/install \ + -D CISM_TRILINOS_ALBANY_DIR=/project/projectdirs/piscees/trilinos-default/cori-pgi-albany/install \ \ -D CISM_NETCDF_DIR=$NETCDF_DIR \ -D CISM_FMAIN=/opt/pgi/13.6.0/linux86-64/13.6/lib/f90main.o \ diff --git a/builds/hopper-pgi/hopper-pgi-bisicles-petsc-cmake b/builds/cori-pgi/cori-pgi-bisicles-petsc-cmake similarity index 89% rename from builds/hopper-pgi/hopper-pgi-bisicles-petsc-cmake rename to builds/cori-pgi/cori-pgi-bisicles-petsc-cmake index 4708e4ca..d67b6107 100644 --- a/builds/hopper-pgi/hopper-pgi-bisicles-petsc-cmake +++ b/builds/cori-pgi/cori-pgi-bisicles-petsc-cmake @@ -1,14 +1,14 @@ -# run this script by typing: source hopper-pgi-cmake +# run this script by typing: source cori-pgi-cmake # After thus script completes, type: make -j 8 # If rebuilding, type 'make clean' before running 'make -j 8' # This cmake configuration script builds cism_driver -# on hopper using the PGI compiler suite. It no longer relies on a build +# on cori using the PGI compiler suite. It no longer relies on a build # of Trilinos, but does need a BISICLES build located in BISICLES_INTERFACE_DIR # (currently set to /global/u2/d/dmartin/BISICLES/code/interface) -# This script should be run from the builds/hopper-pgi subdirectory +# This script should be run from the builds/cori-pgi subdirectory # of the main seacism repository (reflected in the two instances # of "../.." below). @@ -24,9 +24,9 @@ # CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation. # CISM_COUPLED -- OFF by default, set to ON to build with CESM. echo -echo Run this script by typing: source hopper-petsc-bisicles-pgi-cmake +echo Run this script by typing: source cori-petsc-bisicles-pgi-cmake echo -#echo Warning: Python problem. After the last hopper system updates 1/28/13, in order to run simple_glide +#echo Warning: Python problem. After the last cori system updates 1/28/13, in order to run simple_glide #echo or simple_bisicles, you need to replace the python/2.7.3 module with the python/2.7.1 module. #echo The easiest way to do this: In your .cshrc.ext or .bashrc.ext add the lines: #echo module unload python @@ -80,9 +80,9 @@ cmake \ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=OFF \ -D CISM_COUPLED:BOOL=OFF \ \ - -D CISM_TRILINOS_DIR=/project/projectdirs/piscees/trilinos-default/hopper-pgi/install \ - -D CISM_TRILINOS_GPTL_DIR=/project/projectdirs/piscees/trilinos-default/hopper-pgi-gptl/install \ - -D CISM_TRILINOS_ALBANY_DIR=/project/projectdirs/piscees/trilinos-default/hopper-pgi-albany/install \ + -D CISM_TRILINOS_DIR=/project/projectdirs/piscees/trilinos-default/cori-pgi/install \ + -D CISM_TRILINOS_GPTL_DIR=/project/projectdirs/piscees/trilinos-default/cori-pgi-gptl/install \ + -D CISM_TRILINOS_ALBANY_DIR=/project/projectdirs/piscees/trilinos-default/cori-pgi-albany/install \ \ -D CISM_NETCDF_DIR=$NETCDF_DIR \ -D CISM_FMAIN=/opt/pgi/13.6.0/linux86-64/13.6/lib/f90main.o \ diff --git a/builds/hopper-pgi/hopper-pgi-cmake b/builds/cori-pgi/cori-pgi-cmake similarity index 90% rename from builds/hopper-pgi/hopper-pgi-cmake rename to builds/cori-pgi/cori-pgi-cmake index abf4659f..48deeab9 100755 --- a/builds/hopper-pgi/hopper-pgi-cmake +++ b/builds/cori-pgi/cori-pgi-cmake @@ -1,14 +1,14 @@ -# run this script by typing: source hopper-pgi-cmake +# run this script by typing: source cori-pgi-cmake # After thus script completes, type: make -j 8 # If rebuilding, type 'make clean' before running 'make -j 8' # This cmake configuration script builds cism_driver -# on hopper using the PGI compiler suite. It no longer relies on a build +# on cori using the PGI compiler suite. It no longer relies on a build # of Trilinos, but does need a BISICLES build located in BISICLES_INTERFACE_DIR # (currently set to /global/u2/d/dmartin/BISICLES/code/interface) -# This script should be run from the builds/hopper-pgi subdirectory +# This script should be run from the builds/cori-pgi subdirectory # of the main seacism repository (reflected in the two instances # of "../.." below). @@ -25,7 +25,7 @@ # CISM_COUPLED -- OFF by default, set to ON to build with CESM. echo -echo Run this script by typing: source hopper-pgi-cmake +echo Run this script by typing: source cori-pgi-cmake echo echo NOTE: problem after March 2015 upgrade: to build with Trilinos needs this line: echo -D CISM_FMAIN=/opt/pgi/14.2.0/linux86-64/14.2/lib/f90main.o \ @@ -76,9 +76,9 @@ cmake \ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=ON \ -D CISM_COUPLED:BOOL=OFF \ \ - -D CISM_TRILINOS_DIR=/project/projectdirs/piscees/trilinos-default/hopper-pgi/install \ - -D CISM_TRILINOS_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/Trilinos-11.12.1/hopper-pgi-ci-nophal/install \ - -D CISM_TRILINOS_ALBANY_DIR=/project/projectdirs/piscees/trilinos-default/hopper-pgi-albany/install \ + -D CISM_TRILINOS_DIR=/project/projectdirs/piscees/trilinos-default/cori-pgi/install \ + -D CISM_TRILINOS_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/Trilinos-11.12.1/cori-pgi-ci-nophal/install \ + -D CISM_TRILINOS_ALBANY_DIR=/project/projectdirs/piscees/trilinos-default/cori-pgi-albany/install \ \ -D CISM_NETCDF_DIR=$NETCDF_DIR \ -D CISM_MPI_BASE_DIR=$MPICH_DIR \ @@ -94,7 +94,7 @@ cmake \ -D CMAKE_Fortran_COMPILER=ftn \ \ -D CISM_SCI_LIB_DIR=$CRAY_LIBSCI_PREFIX_DIR/lib \ - -D CISM_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/libgptl/libgptl-hopper-pgi \ + -D CISM_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/libgptl/libgptl-cori-pgi \ \ -D CMAKE_CXX_FLAGS:STRING="-O2 -mp --diag_suppress 554,111,611 -DH5_USE_16_API" \ -D CISM_Fortran_FLAGS:STRING="-O2 -mp" \ diff --git a/builds/hopper-pgi/hopper-pgi-cmake.sh b/builds/cori-pgi/cori-pgi-cmake.sh similarity index 92% rename from builds/hopper-pgi/hopper-pgi-cmake.sh rename to builds/cori-pgi/cori-pgi-cmake.sh index 273624eb..81cf268e 100755 --- a/builds/hopper-pgi/hopper-pgi-cmake.sh +++ b/builds/cori-pgi/cori-pgi-cmake.sh @@ -50,7 +50,7 @@ rm -rf ./CMakeCache.txt rm -rf ./CMakeFiles # This cmake configuration script builds cism_driver -# on hopper using the PGI compiler suite. It no longer relies on a build +# on cori using the PGI compiler suite. It no longer relies on a build # of Trilinos, but does need a BISICLES build located in BISICLES_INTERFACE_DIR # (currently set to /global/u2/d/dmartin/BISICLES/code/interface) @@ -79,16 +79,16 @@ cmake \ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=ON \ -D CISM_COUPLED:BOOL=OFF \ \ - -D CISM_TRILINOS_DIR=/project/projectdirs/piscees/trilinos-default/hopper-pgi/install \ - -D CISM_TRILINOS_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/Trilinos-11.12.1/hopper-pgi-ci-nophal/install \ - -D CISM_TRILINOS_ALBANY_DIR=/project/projectdirs/piscees/trilinos-default/hopper-pgi-albany/install \ + -D CISM_TRILINOS_DIR=/project/projectdirs/piscees/trilinos-default/cori-pgi/install \ + -D CISM_TRILINOS_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/Trilinos-11.12.1/cori-pgi-ci-nophal/install \ + -D CISM_TRILINOS_ALBANY_DIR=/project/projectdirs/piscees/trilinos-default/cori-pgi-albany/install \ \ -D CISM_NETCDF_DIR="$NETCDF_DIR" \ -D CISM_MPI_BASE_DIR="$MPICH_DIR" \ \ -D CISM_FMAIN=/opt/pgi/14.2.0/linux86-64/14.2/lib/f90main.o \ \ - -D CMAKE_INSTALL_PREFIX:PATH="$cism_top/builds/hopper-pgi/install" \ + -D CMAKE_INSTALL_PREFIX:PATH="$cism_top/builds/cori-pgi/install" \ -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \ -D CMAKE_VERBOSE_CONFIGURE:BOOL=ON \ \ @@ -97,7 +97,7 @@ cmake \ -D CMAKE_Fortran_COMPILER=ftn \ \ -D CISM_SCI_LIB_DIR="$CRAY_LIBSCI_PREFIX_DIR/lib" \ - -D CISM_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/libgptl/libgptl-hopper-pgi \ + -D CISM_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/libgptl/libgptl-cori-pgi \ \ -D CMAKE_CXX_FLAGS:STRING="-O2 -mp --diag_suppress 554,111,611 -DH5_USE_16_API" \ -D CISM_Fortran_FLAGS:STRING="-O2 -mp" \ diff --git a/builds/yellowstone-intel/yellowstone-intel-cmake b/builds/yellowstone-intel/yellowstone-intel-cmake deleted file mode 100755 index fc61a0bf..00000000 --- a/builds/yellowstone-intel/yellowstone-intel-cmake +++ /dev/null @@ -1,72 +0,0 @@ -# Run this script by typing: source yellowstone-intel-cmake -# After this script completes, type: make -j 8 -# If rebuilding, type 'make clean' before running 'make -j 8' - -# This cmake configuration script is set up to perform a parallel build with Trilinos - -module purge -module load ncarenv/1.0 -module load ncarbinlibs/1.0 -module load intel/13.1.2 -module load mkl/11.0.1 -module load trilinos/11.0.3 -module load netcdf-mpi/4.3.0 -module load ncarcompilers/1.0 -module load pnetcdf/1.3.0 -module load cmake/2.8.10.2 -module load python -module load all-python-libs - -# remove old build data: -rm ./CMakeCache.txt -rm -r ./CMakeFiles - -echo -echo "Doing CMake Configuration step" - -# Note: the compilation flags were taken from the defaults for a CESM build on -# yellowstone-intel (using Machines_140218). Some of these options (e.g., -# -convert big_endian and -assume byterecl) are probably unnecessary for a -# standalone build, but I am keeping things consistent with the CESM build for -# simplicity. - -# A few non-intuitive things: -# -# - CISM_FORCE_FORTRAN_LINKER: without this, cmake tries to use a C++ linker, which doesn't work -# -# - CISM_INCLUDE_IMPLICIT_LINK_LIBRARIES: if this is on (the default), some -# libraries are included on the link line which can't be found (e.g., -# hdf5). This may be related to the fact that trilinos on yellowstone is old, -# and/or the fact that cmake wants to use a C++ linker but we're telling it to -# use a fortran linker. - -cmake \ - -D CISM_BUILD_CISM_DRIVER:BOOL=ON \ - -D CISM_ENABLE_BISICLES=OFF \ - -D CISM_ENABLE_FELIX=OFF \ -\ - -D CISM_USE_TRILINOS:BOOL=OFF \ - -D CISM_MPI_MODE:BOOL=ON \ - -D CISM_SERIAL_MODE:BOOL=OFF \ -\ - -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=OFF \ - -D CISM_COUPLED:BOOL=OFF \ - -D CISM_USE_CISM_FRONT_END:BOOL=OFF \ -\ - -D CISM_TRILINOS_DIR=$TRILINOS_PATH \ - -D CISM_NETCDF_DIR=$NETCDF \ - -D CISM_FORCE_FORTRAN_LINKER:BOOL=ON \ - -D CISM_INCLUDE_IMPLICIT_LINK_LIBRARIES:BOOL=OFF \ - -D CMAKE_VERBOSE_MAKEFILE:BOOL=OFF \ -\ - -D CMAKE_CXX_COMPILER=mpiicpc \ - -D CMAKE_C_COMPILER=mpicc \ - -D CMAKE_Fortran_COMPILER=mpif90 \ -\ - -D CMAKE_Fortran_FLAGS:STRING="-fp-model source -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -xHost -O2" \ - -D CMAKE_C_FLAGS:STRING="-O2 -fp-model precise -xHost" \ - -D CMAKE_CXX_FLAGS:STRING="-O2 -fp-model precise -xHost" \ - ../.. - -# Note: last argument above "../.." is path to top seacism directory -# Note: last argument above "../.." is path to top seacism directory diff --git a/builds/yellowstone-intel/yellowstone-intel-serial-cmake b/builds/yellowstone-intel/yellowstone-intel-serial-cmake deleted file mode 100755 index 755e7583..00000000 --- a/builds/yellowstone-intel/yellowstone-intel-serial-cmake +++ /dev/null @@ -1,69 +0,0 @@ -# Run this script by typing: source yellowstone-intel-cmake -# After this script completes, type: make -j 8 -# If rebuilding, type 'make clean' before running 'make -j 8' - -# This cmake configuration script is set up to perform a serial build - -module purge -module load ncarenv/1.0 -module load ncarbinlibs/1.0 -module load intel/13.1.2 -module load mkl/11.0.1 -module load netcdf/4.3.0 -module load ncarcompilers/1.0 -module load cmake/2.8.10.2 -module load python -module load all-python-libs - -# remove old build data: -rm ./CMakeCache.txt -rm -r ./CMakeFiles - -echo -echo "Doing CMake Configuration step" - -# Note: the compilation flags were taken from the defaults for a CESM build on -# yellowstone-intel (using Machines_140218). Some of these options (e.g., -# -convert big_endian and -assume byterecl) are probably unnecessary for a -# standalone build, but I am keeping things consistent with the CESM build for -# simplicity. - -# A few non-intuitive things: -# -# - CISM_FORCE_FORTRAN_LINKER: without this, cmake tries to use a C++ linker, which doesn't work -# -# - CISM_INCLUDE_IMPLICIT_LINK_LIBRARIES: (this is a note that applies to the -# parallel build with trilinos, and may or may not apply to this serial -# build): if this is on (the default), some libraries are included on the link -# line which can't be found (e.g., hdf5). This may be related to the fact that -# trilinos on yellowstone is old, and/or the fact that cmake wants to use a -# C++ linker but we're telling it to use a fortran linker. - -cmake \ - -D CISM_USE_TRILINOS:BOOL=OFF \ - -D CISM_COUPLED:BOOL=OFF \ - -D CISM_MPI_MODE:BOOL=OFF \ - -D CISM_SERIAL_MODE:BOOL=ON \ - -D CISM_BUILD_SIMPLE_GLIDE:BOOL=ON \ - -D CISM_BUILD_SIMPLE_BISICLES:BOOL=OFF \ - -D CISM_BUILD_GLINT_EXAMPLE:BOOL=OFF \ - -D CISM_BUILD_CISM_DRIVER:BOOL=ON \ - -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=OFF \ - -D CISM_USE_DEFAULT_IO:BOOL=OFF \ - -D CISM_USE_CISM_FRONT_END:BOOL=OFF \ -\ - -D CISM_NETCDF_DIR=$NETCDF \ - -D CISM_FORCE_FORTRAN_LINKER:BOOL=ON \ - -D CISM_INCLUDE_IMPLICIT_LINK_LIBRARIES:BOOL=OFF \ - -D CMAKE_VERBOSE_MAKEFILE:BOOL=OFF \ -\ - -D CMAKE_CXX_COMPILER=icpc \ - -D CMAKE_C_COMPILER=icc \ - -D CMAKE_Fortran_COMPILER=ifort \ -\ - -D CMAKE_Fortran_FLAGS:STRING="-fp-model source -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -xHost -O2" \ - -D CMAKE_C_FLAGS:STRING="-O2 -fp-model precise -xHost" \ - -D CMAKE_CXX_FLAGS:STRING="-O2 -fp-model precise -xHost" \ - ../.. - -# Note: last argument above "../.." is path to top seacism directory diff --git a/cism_driver/cism_driver.F90 b/cism_driver/cism_driver.F90 index a08df31b..5cfaa854 100644 --- a/cism_driver/cism_driver.F90 +++ b/cism_driver/cism_driver.F90 @@ -25,7 +25,6 @@ !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ program cism_driver - use parallel ! use glimmer_commandline ! use glide use gcm_cism_interface diff --git a/cism_driver/eismint_forcing.F90 b/cism_driver/eismint_forcing.F90 index 8209994b..8dbd55a5 100644 --- a/cism_driver/eismint_forcing.F90 +++ b/cism_driver/eismint_forcing.F90 @@ -260,6 +260,16 @@ subroutine eismint_readconfig(eismint_climate, config) return end if + call GetSection(config,section,'MISMIP+') + if (associated(section)) then + return + end if + + call GetSection(config,section,'MISMIP') + if (associated(section)) then + return + end if + !TODO - Any other allowed tests to add here? ! Abort if one of the above cases has not been specified. diff --git a/cism_driver/gcm_cism_interface.F90 b/cism_driver/gcm_cism_interface.F90 index 1308f9a5..4a243890 100644 --- a/cism_driver/gcm_cism_interface.F90 +++ b/cism_driver/gcm_cism_interface.F90 @@ -27,7 +27,7 @@ ! from glide_types.F90: ! integer, parameter :: DYCORE_GLIDE = 0 ! old shallow-ice dycore from Glimmer ! integer, parameter :: DYCORE_GLAM = 1 ! Payne-Price finite-difference solver -! integer, parameter :: DYCORE_GLISSADE = 2 ! prototype finite-element solver +! integer, parameter :: DYCORE_GLISSADE = 2 ! finite-element solver ! integer, parameter :: DYCORE_ALBANYFELIX = 3 ! External Albany-Felix finite-element solver ! integer, parameter :: DYCORE_BISICLES = 4 ! BISICLES external dycore diff --git a/libglide/glide.F90 b/libglide/glide.F90 index e567d9b2..5b4605e5 100644 --- a/libglide/glide.F90 +++ b/libglide/glide.F90 @@ -289,6 +289,9 @@ subroutine glide_initialise(model) ! print*, 'Created Glide variables' ! print*, 'max, min bheatflx (W/m2)=', maxval(model%temper%bheatflx), minval(model%temper%bheatflx) + ! Compute the cell areas of the grid + model%geometry%cell_area = model%numerics%dew*model%numerics%dns + ! If a 2D bheatflx field is present in the input file, it will have been written ! to model%temper%bheatflx. For the case model%options%gthf = 0, we want to use ! a uniform heat flux instead. diff --git a/libglide/glide_setup.F90 b/libglide/glide_setup.F90 index 3a20d238..164848a7 100644 --- a/libglide/glide_setup.F90 +++ b/libglide/glide_setup.F90 @@ -862,15 +862,15 @@ subroutine print_options(model) 'Diagonal preconditioner (glissade PCG) ', & 'SIA preconditioner (glissade PCG) ' /) - character(len=*), dimension(0:1), parameter :: ho_whichgradient = (/ & - 'centered gradient (glissade dycore) ', & - 'upstream gradient (glissade dycore) ' /) + character(len=*), dimension(0:2), parameter :: ho_whichgradient = (/ & + 'centered gradient (glissade) ', & + 'first-order upstream gradient (glissade) ', & + 'second-order upstream gradient (glissade)' /) - character(len=*), dimension(0:3), parameter :: ho_whichgradient_margin = (/ & - 'compute edge gradient when either cell has ice ', & - 'compute edge gradient when grounded ice lies above ice-free cell', & - 'compute edge gradient only when both cells have ice ', & - 'compute edge gradient when ice lies above ice-free land ' /) + character(len=*), dimension(0:2), parameter :: ho_whichgradient_margin = (/ & + 'compute edge gradient when either cell has ice ', & + 'compute edge gradient when ice lies above ice-free land', & + 'compute edge gradient when both cells have ice ' /) character(len=*), dimension(0:1), parameter :: ho_whichvertical_remap = (/ & 'first-order accurate ', & @@ -975,10 +975,6 @@ subroutine print_options(model) call write_log('Error, SLAP solver not supported for more than one processor', GM_FATAL) end if - if (tasks > 1 .and. model%options%which_ho_babc==HO_BABC_ISHOMC) then - call write_log('Error, ISHOM C basal BCs not supported for more than one processor', GM_FATAL) - endif - if (tasks > 1 .and. model%options%whichbwat==BWATER_FLUX) then call write_log('Error, flux-based basal water option not supported for more than one processor', GM_FATAL) endif @@ -1012,15 +1008,6 @@ subroutine print_options(model) call write_log('Defaulting to SIA dissipation') endif - if (model%options%which_ho_gradient_margin == HO_GRADIENT_MARGIN_ICE_OVER_LAND ) then - model%options%which_ho_gradient_margin = HO_GRADIENT_MARGIN_GROUNDED_ICE - write(message,*) 'Warning: Local SIA solver does not support which_ho_gradient_margin =', & - HO_GRADIENT_MARGIN_ICE_OVER_LAND - call write_log(message) - write(message,*) 'Defaulting to option', HO_GRADIENT_MARGIN_GROUNDED_ICE - call write_log(message) - endif - endif ! Glissade local SIA solver endif @@ -1540,6 +1527,7 @@ subroutine handle_parameters(section, model) !TODO - Change default_flwa to flwa_constant? Would have to change config files. ! Change flow_factor to flow_enhancement_factor? Would have to change many SIA config files call GetValue(section,'flow_factor', model%paramets%flow_enhancement_factor) + call GetValue(section,'flow_factor_ssa', model%paramets%flow_enhancement_factor_ssa) call GetValue(section,'default_flwa', model%paramets%default_flwa) call GetValue(section,'efvs_constant', model%paramets%efvs_constant) call GetValue(section,'hydro_time', model%paramets%hydtim) @@ -1785,7 +1773,10 @@ subroutine print_parameters(model) write(message,*) 'geothermal flux (W/m^2) : ', model%paramets%geot call write_log(message) - write(message,*) 'flow enhancement factor : ', model%paramets%flow_enhancement_factor + write(message,*) 'flow enhancement factor (SIA) : ', model%paramets%flow_enhancement_factor + call write_log(message) + + write(message,*) 'flow enhancement factor (SSA) : ', model%paramets%flow_enhancement_factor_ssa call write_log(message) write(message,*) 'basal hydro time constant (yr): ', model%paramets%hydtim diff --git a/libglide/glide_types.F90 b/libglide/glide_types.F90 index eab0f8da..c9c5ef32 100644 --- a/libglide/glide_types.F90 +++ b/libglide/glide_types.F90 @@ -260,12 +260,12 @@ module glide_types integer, parameter :: HO_PRECOND_SIA = 2 integer, parameter :: HO_GRADIENT_CENTERED = 0 - integer, parameter :: HO_GRADIENT_UPSTREAM = 1 + integer, parameter :: HO_GRADIENT_UPSTREAM1 = 1 + integer, parameter :: HO_GRADIENT_UPSTREAM2 = 2 - integer, parameter :: HO_GRADIENT_MARGIN_ALL = 0 - integer, parameter :: HO_GRADIENT_MARGIN_GROUNDED_ICE = 1 - integer, parameter :: HO_GRADIENT_MARGIN_ICE_ONLY = 2 - integer, parameter :: HO_GRADIENT_MARGIN_ICE_OVER_LAND = 3 + integer, parameter :: HO_GRADIENT_MARGIN_LAND = 0 + integer, parameter :: HO_GRADIENT_MARGIN_HYBRID = 1 + integer, parameter :: HO_GRADIENT_MARGIN_MARINE = 2 integer, parameter :: HO_VERTICAL_REMAP_FIRST_ORDER = 0 integer, parameter :: HO_VERTICAL_REMAP_SECOND_ORDER = 1 @@ -696,17 +696,18 @@ module glide_types integer :: which_ho_gradient = 0 !> Flag that indicates which gradient operator to use in the glissade dycore. !> Not valid for other dycores - !> NOTE: Option 1 may be better for ice evolution because it damps checkerboard noise. + !> NOTE: Upstream may be better for ice evolution because it damps checkerboard noise. !> \begin{description} !> \item[0] Centered gradient - !> \item[1] Upstream gradient + !> \item[1] First-order accurate upstream gradient + !> \item[2] Second-order accurate upstream gradient !WHL - Changed default from 1 to 3. ! Option 3 is appropriate for ice sheets with both land and marine boundaries, ! when a lateral spreading force is computed for marine ice cliffs. ! This lateral force ensures spreading where the gradient is zero. - integer :: which_ho_gradient_margin = 3 + integer :: which_ho_gradient_margin = 1 !> Flag that indicates how to compute the gradient at the ice margin in the glissade dycore. !> Note: Gradients are always computed at edges with ice on both sides. !> The methods differ in whether gradients are computed when an ice-covered cell @@ -714,9 +715,8 @@ module glide_types !> Not valid for other dycores !> \begin{description} !> \item[0] Compute edge gradient when either cell is ice-covered - !> \item[1] Compute edge gradient for grounded ice above ice-free land or ocean + !> \item[1] Compute edge gradient for ice-covered cell above ice-free land (not ocean) !> \item[2] Compute edge gradient only when both cells have ice - !> \item[3] Compute edge gradient for ice-covered cell above ice-free land (not ocean) !TODO: Change the default to 2nd order vertical remapping ! WHL: Keeping 1st order vertical remapping for now so that standard tests are BFB @@ -834,6 +834,9 @@ module glide_types real(dp),dimension(:,:),pointer :: dthck_dt => null() !> ice thickness tendency, divided by \texttt{thk0/tim0} real(dp),dimension(:,:),pointer :: dthck_dt_tavg => null() !> ice thickness tendency, divided by \texttt{thk0/tim0} (time average) + real(dp),dimension(:,:),pointer :: cell_area => null() + !> The cell area of the grid, divided by \texttt{len0*len0}. + integer :: ntracers !> number of tracers to be transported @@ -1641,7 +1644,10 @@ module glide_types real(dp) :: geot = -5.0d-2 ! W m^{-2}, positive down real(dp) :: flow_enhancement_factor = 1.0d0 ! flow enhancement parameter for the Arrhenius relationship; ! typically used in SIA model to speed up the ice - ! (NOTE change relative to prev. versions of code - used to be 3) + ! (Note the change relative to prev. versions of code - used to be 3.0) + real(dp) :: flow_enhancement_factor_ssa = 1.0d0 ! flow enhancement parameter for floating ice + ! Default is 1.0, but for marine simulations a smaller value + ! may be needed to match observed shelf speeds real(dp) :: slip_ratio = 1.0d0 ! Slip ratio, used only in higher order code when the slip ratio beta computation is requested real(dp) :: hydtim = 1000.0d0 ! years, converted to s^{-1} and scaled ! 0 if no drainage @@ -2063,6 +2069,7 @@ subroutine glide_allocarr(model) call coordsystem_allocate(model%general%ice_grid, model%geometry%topg) call coordsystem_allocate(model%general%ice_grid, model%geometry%thkmask) call coordsystem_allocate(model%general%velo_grid, model%geometry%stagmask) + call coordsystem_allocate(model%general%ice_grid, model%geometry%cell_area) call coordsystem_allocate(model%general%velo_grid, model%geomderv%stagthck) call coordsystem_allocate(model%general%velo_grid, model%geomderv%dthckdew) @@ -2534,6 +2541,9 @@ subroutine glide_deallocarr(model) if (associated(model%geomderv%dusrfdns)) & deallocate(model%geomderv%dusrfdns) + if (associated(model%geometry%cell_area)) & + deallocate(model%geometry%cell_area) + if (associated(model%geometry%sfc_mbal_flux)) & deallocate(model%geometry%sfc_mbal_flux) if (associated(model%geometry%sfc_mbal_flux_tavg)) & diff --git a/libglide/glide_vars.def b/libglide/glide_vars.def index bfeeb5df..e295161a 100644 --- a/libglide/glide_vars.def +++ b/libglide/glide_vars.def @@ -79,6 +79,14 @@ units: meter long_name: vertical coordinate of lithosphere layer dimlen: model%lithot%nlayer +[cell_area] +dimensions: time, y1, x1 +units: meter2 +long_name: cell area of cism grid +data: data%geometry%cell_area +factor: len0*len0 +load: 1 + [relx] dimensions: time, y1, x1 units: meter diff --git a/libglimmer/glimmer_ncio.F90 b/libglimmer/glimmer_ncio.F90 index 82cfa454..a24ae3da 100644 --- a/libglimmer/glimmer_ncio.F90 +++ b/libglimmer/glimmer_ncio.F90 @@ -394,6 +394,12 @@ subroutine glimmer_nc_checkwrite(outfile,model,forcewrite,time,external_time) ! nfreq = nint(outfile%freq / model%numerics%tinc) + if (nfreq == 0) then ! freq < tinc/2 + nfreq = 1 + write(message,*) 'WARNING: output file frequency is smaller than timestep; writing output every timestep' + call write_log(trim(message)) + endif + ! Write output if any of the following is true: ! (1) forcewrite = T ! (2) tstep_count = 0 & write_init = T diff --git a/libglint/glint_example_clim.F90 b/libglint/glint_example_clim.F90 index 8e3f0cf3..c9f03044 100644 --- a/libglint/glint_example_clim.F90 +++ b/libglint/glint_example_clim.F90 @@ -699,9 +699,11 @@ subroutine example_climate(params,precip,temp,time) integer :: lower,upper real(dp) :: fyear + integer :: nt ! used to access the number of years of available forcing ! Calculate fraction of year - fyear = real(mod(time,real(params%hours_in_year,dp))) / real(params%hours_in_year,dp) + nt = size(params%st_time) + fyear = mod(time / real(params%hours_in_year,dp), params%st_time(nt)) ! Do temperature interpolation call bracket_point(fyear, params%st_time, lower, upper, pos) diff --git a/libglissade/glissade.F90 b/libglissade/glissade.F90 index 249b4756..13471ba2 100644 --- a/libglissade/glissade.F90 +++ b/libglissade/glissade.F90 @@ -259,6 +259,9 @@ subroutine glissade_initialise(model, evolve_ice) ! create glide variables call glide_io_createall(model, model) + ! Compute the cell areas of the grid + model%geometry%cell_area = model%numerics%dew*model%numerics%dns + ! If a 2D bheatflx field is present in the input file, it will have been written ! to model%temper%bheatflx. For the case model%options%gthf = 0, we want to use ! a uniform heat flux instead. @@ -1843,7 +1846,9 @@ subroutine glissade_diagnostic_variable_solve(model) model%temper%temp(1:model%general%upn-1,:,:), & model%temper%flwa, & ! Pa^{-n} s^{-1} model%paramets%default_flwa / scyr, & ! scale to Pa^{-n} s^{-1} - model%paramets%flow_enhancement_factor, & + model%paramets%flow_enhancement_factor, & + model%paramets%flow_enhancement_factor_ssa, & + floating_mask, & model%temper%waterfrac(:,:,:)) !TODO - flwa halo update not needed? diff --git a/libglissade/glissade_basal_traction.F90 b/libglissade/glissade_basal_traction.F90 index c2888201..394b79b0 100644 --- a/libglissade/glissade_basal_traction.F90 +++ b/libglissade/glissade_basal_traction.F90 @@ -85,7 +85,8 @@ subroutine calcbeta (whichbabc, & use glimmer_paramets, only: len0 use glimmer_physcon, only: gn, pi use parallel, only: nhalo, this_rank - use parallel, only: parallel_globalindex + use parallel, only: parallel_globalindex, global_ewn, global_nsn + use parallel, only: distributed_scatter_var, parallel_halo, main_task implicit none @@ -105,6 +106,10 @@ subroutine calcbeta (whichbabc, & ! Note: This is beta_internal in glissade real(dp), intent(in), dimension(:,:), optional :: f_ground ! grounded ice fraction, 0 <= f_ground <= 1 + ! Note: Adding fields for parallel ISHOM-C test case + real(dp), dimension(:,:), allocatable :: beta_global ! beta on the global grid + real(dp), dimension(:,:), allocatable :: beta_extend ! beta extended to the ice grid (dimensions ewn, nsn) + ! Note: optional arguments topg and eus are used for pseudo-plastic sliding law real(dp), intent(in), dimension(:,:), optional :: topg ! bed topography (m) real(dp), intent(in), optional :: eus ! eustatic sea level (m) relative to z = 0 @@ -116,7 +121,6 @@ subroutine calcbeta (whichbabc, & real(dp) :: Ldomain ! size of full domain real(dp) :: omega ! frequency of beta field real(dp) :: dx, dy - integer :: ilo, ihi, jlo, jhi ! limits of beta field for ISHOM C case integer :: ew, ns real(dp), dimension(size(beta,1), size(beta,2)) :: speed ! ice speed, sqrt(uvel^2 + vvel^2), m/yr @@ -257,26 +261,55 @@ subroutine calcbeta (whichbabc, & ! However, this is not possible given that the global velocity grid is smaller ! than the ice grid and hence not able to fit the full beta field. ! The following code sets beta on the full grid as prescribed by Pattyn et al. (2008). - !NOTE: This works only in serial! - Ldomain = (ewn-2*nhalo) * dew ! size of full domain (must be square) - omega = 2.d0*pi / Ldomain + ! Allocate a global array on the main task only. + ! On other tasks, allocate a size 0 array, since distributed_scatter_var wants to deallocate on all tasks. + if (main_task) then + allocate(beta_global(global_ewn, global_nsn)) + else + allocate(beta_global(0,0)) + endif - ilo = nhalo - ihi = ewn-nhalo - jlo = nhalo - jhi = nsn-nhalo - ! Prescribe beta as in Pattyn et al., The Cryosphere, 2008 + ! Note: These beta values live at vertices, not cell centers. + ! They need a global array of size (ewn,nsn) to hold values on the global boundary. + if (main_task) then + + Ldomain = global_ewn * dew ! size of full domain (must be square) + omega = 2.d0*pi / Ldomain + + beta_global(:,:) = 0.d0 + do ns = 1, global_nsn + do ew = 1, global_ewn + dx = dew * ew + dy = dns * ns + beta_global(ew,ns) = 1000.d0 + 1000.d0 * sin(omega*dx) * sin(omega*dy) + enddo + enddo + + endif + + ! Scatter the global beta values back to local arrays + ! Note: beta_extend has dimensions (ewn,nsn), so it can receive scattered data from beta_global. + allocate(beta_extend(ewn, nsn)) + beta_extend(:,:) = 0.d0 + call distributed_scatter_var(beta_extend, beta_global) + + ! distributed_scatter_var does not update the halo, so do an update here + call parallel_halo(beta_extend) + + ! Copy beta_extend to beta on the local processor. + ! This is done since beta lives on the velocity grid and has dimensions (ewn-1,nsn-1). beta(:,:) = 0.d0 - do ns = jlo, jhi - do ew = ilo, ihi - dx = dew * (ew-ilo) - dy = dns * (ns-jlo) - beta(ew,ns) = 1000.d0 + 1000.d0 * sin(omega*dx) * sin(omega*dy) + do ns = 1, nsn-1 + do ew = 1, ewn-1 + beta(ew,ns) = beta_extend(ew, ns) enddo enddo + ! beta_extend is no longer needed (beta_global is deallocated in distributed_scatter_var) + deallocate(beta_extend) + case(HO_BABC_BETA_EXTERNAL) ! use beta value from external file ! set beta to the prescribed external value @@ -737,9 +770,13 @@ subroutine calc_effective_pressure (which_effecpress, & do j = 1, nsn do i = 1, ewn - f_pattyn = rhoo*(eus-topg(i,j)) / (rhoi*thck(i,j)) ! > 1 for floating, < 1 for grounded - f_pattyn_capped = max( min(f_pattyn,1.0d0), 0.0d0) ! capped to lie in the range [0,1] - basal_physics%effecpress(i,j) = overburden(i,j) * (1.0d0 - f_pattyn_capped)**ocean_p + if (thck(i,j) > 0.0d0) then + f_pattyn = rhoo*(eus-topg(i,j)) / (rhoi*thck(i,j)) ! > 1 for floating, < 1 for grounded + f_pattyn_capped = max( min(f_pattyn,1.0d0), 0.0d0) ! capped to lie in the range [0,1] + basal_physics%effecpress(i,j) = overburden(i,j) * (1.0d0 - f_pattyn_capped)**ocean_p + else + basal_physics%effecpress(i,j) = 0.0d0 + endif enddo enddo diff --git a/libglissade/glissade_bmlt_float.F90 b/libglissade/glissade_bmlt_float.F90 index 85dee066..cca4d427 100644 --- a/libglissade/glissade_bmlt_float.F90 +++ b/libglissade/glissade_bmlt_float.F90 @@ -515,8 +515,6 @@ subroutine glissade_plume_melt_rate(& entrainment, detrainment, & divDu_plume, bmlt_float) - use glissade_grid_operators, only: glissade_centered_gradient - ! Compute the melt rate at the ice-ocean interface based on a steady-state plume model ! References: @@ -2242,8 +2240,6 @@ subroutine compute_plume_velocity(& plume_speed_east, & plume_speed_north) - use glissade_grid_operators, only: glissade_centered_gradient - integer, intent(in) :: & nx, ny ! number of grid cells in each dimension diff --git a/libglissade/glissade_grid_operators.F90 b/libglissade/glissade_grid_operators.F90 index fdf7d89d..9d57fbe7 100644 --- a/libglissade/glissade_grid_operators.F90 +++ b/libglissade/glissade_grid_operators.F90 @@ -40,15 +40,14 @@ module glissade_grid_operators use glimmer_global, only: dp use glimmer_log - use glide_types ! HO_GRADIENT_MARGIN_* + use glide_types use parallel implicit none private public :: glissade_stagger, glissade_unstagger, & - glissade_centered_gradient, glissade_upstream_gradient, & - glissade_gradient_at_edges, & + glissade_gradient, glissade_gradient_at_edges, & glissade_surface_elevation_gradient, & glissade_vertical_average @@ -223,60 +222,24 @@ end subroutine glissade_unstagger !**************************************************************************** - subroutine glissade_centered_gradient(nx, ny, & - dx, dy, & - field, & - df_dx, df_dy, & - ice_mask, & - gradient_margin_in, & - usrf, & - floating_mask, & - land_mask, & - max_slope) + subroutine glissade_gradient(nx, ny, & + dx, dy, & + field, & + df_dx, df_dy, & + ice_mask, & + gradient_margin_in) !---------------------------------------------------------------- ! Given a scalar variable f on the unstaggered grid (dimension nx, ny), ! compute its gradient (df_dx, df_dy) on the staggered grid (dimension nx-1, ny-1). ! The gradient is evaluated at the four neighboring vertices and is second-order accurate. ! - ! The gradient at a given vertex is constructed from gradients at adjacent edges. - ! If one or more edge gradients is masked out, then df_fx and df_dy are constructed from the others. - ! If all edge gradients adjacent to a vertex are masked out, then the gradient is set to zero. - ! - ! Edge gradients are computed in the standard way, taking the difference between - ! the values in two adjacent cells and dividing by the distance. - ! At the ice margin, where one cell adjacent to a given edge is ice-free, - ! edge gradients may be masked in the following ways: - ! - ! HO_GRADIENT_MARGIN_ALL = 0: Values in both adjacent cells are used to compute the gradient, - ! including values in ice-free cells. In other words, there is no masking of edges. - ! This convention is used by Glide. It works well at land-terminating margins, but performs poorly - ! for ice shelves with a sudden drop in ice thickness and surface elevation at the margin. - ! - ! HO_GRADIENT_MARGIN_GROUNDED_ICE = 1: The gradient is computed at edges where either - ! (1) Both adjacent cells are ice-covered. - ! (2) One cell is ice-covered and grounded, and lies above the other cell. - ! - ! The edge is masked out where a floating cell is adjacent to an ice-free ocean cell, - ! or where an ice-covered cell lies below an ice-free land cell (i.e., a nunatak). - ! At land-terminating margins the gradient is nonzero (except for the nunatak case), - ! and at marine-terminating margins the gradient is zero (unless the ice-covered cell is grounded). - ! - ! This option used to be the default. However, it can overestimate driving forces at grounded marine - ! margins where spreading is driven by lateral forces even when the surface gradient is zero. - ! - ! HO_GRADIENT_MARGIN_ICE_ONLY = 2: Only values in ice-covered cells (i.e., cells with thck > thklim) - ! are used to compute gradients. If one or both adjacent cells is ice-free, the edge is masked out. - ! This option works well at shelf margins but less well for land margins (e.g., the Halfar test case). - ! - ! HO_GRADIENT_MARGIN_ICE_OVER_LAND = 3: The gradient is computed at edges where either - ! (1) Both adjacent cells are ice-covered. - ! (2) One cell is ice-covered and lies above ice-free land. - ! - ! Method 3 does not compute a gradient at edges where ice lies above ocean cells. - ! At these edges, lateral forces drive spreading in the absence of a surface gradient. - ! This method works well at both land- and marine-terminating margins. - ! It is the default for higher-order simulations. + ! The gradient at a given vertex is taken as the average of the edge gradients + ! on either side of the vertex. + ! If gradient_margin_in = 0, then gradients are computed at all edges, even if + ! one or both cells is ice-free (cf. stagger_margin_in above). + ! If gradient_margin_in = 1, then gradients are computed only for edges with + ! ice-covered cells (ice_mask = 1) in each adjacent cells. Other edges are ignored. !---------------------------------------------------------------- !---------------------------------------------------------------- @@ -287,8 +250,8 @@ subroutine glissade_centered_gradient(nx, ny, & nx, ny ! horizontal grid dimensions real(dp), intent(in) :: & - dx, dy ! grid cell length and width - ! assumed to have the same value for each grid cell + dx, dy ! grid cell length and width + ! assumed to have the same value for each grid cell real(dp), dimension(nx,ny), intent(in) :: & field ! input scalar field, defined at cell centers @@ -301,19 +264,7 @@ subroutine glissade_centered_gradient(nx, ny, & integer, intent(in), optional :: & gradient_margin_in ! 0: Compute edge gradient when either cell is ice-covered - ! 1: Compute edge gradient for grounded ice above ice-free land or ocean - ! 2: Compute edge gradient only when both cells have ice - ! 3: Compute edge gradient for ice-covered cell above ice-free land (not ocean) - - real(dp), dimension(nx,ny), intent(in), optional :: & - usrf ! ice surface elevation) - - integer, dimension(nx,ny), intent(in), optional :: & - floating_mask, & ! = 1 for cells where ice is present and floating, else = 0 - land_mask ! = 1 for land cells, else = 0 - - real(dp), intent(in), optional :: & - max_slope ! maximum slope allowed for surface gradient computations (unitless) + ! 1: Compute edge gradient only when both cells have ice !-------------------------------------------------------- ! Local variables @@ -323,55 +274,43 @@ subroutine glissade_centered_gradient(nx, ny, & integer :: i, j - logical, dimension(nx-1,ny) :: & - edge_mask_x ! edge mask for computing df/dx - - logical, dimension(nx,ny-1) :: & + logical, dimension(nx,ny) :: & + edge_mask_x, & ! edge mask for computing df/dx edge_mask_y ! edge mask for computing df/dy real(dp) :: df_dx_north, df_dx_south ! df_dx at neighboring edges real(dp) :: df_dy_east, df_dy_west ! df_dx at neighboring edges - !WHL - debug - real(dp) :: dfdx, dfdy - integer :: edge_count + ! Initialize + + df_dx(:,:) = 0.0d0 + df_dy(:,:) = 0.0d0 + + edge_mask_x(:,:) = .false. + edge_mask_y(:,:) = .false. + + if (present(gradient_margin_in)) then + gradient_margin = gradient_margin_in + else + gradient_margin = 0 ! default is to average over all cells, including those where ice is absent + endif !-------------------------------------------------------- ! Gradient at vertex(i,j) is based on f(i:i+1,j:j+1) - ! + ! ! (i,j+1) | (i+1,j+1) ! -------(i,j)---------- ! (i,j) | (i+1,j) !-------------------------------------------------------- - if (present(gradient_margin_in)) then - gradient_margin = gradient_margin_in - else - gradient_margin = HO_GRADIENT_MARGIN_GROUNDED_ICE - endif - ! Create masks identifying edges that will be used in gradient computations - if (gradient_margin == HO_GRADIENT_MARGIN_ALL) then + if (gradient_margin == 0) then edge_mask_x(:,:) = .true. ! true for all edges edge_mask_y(:,:) = .true. - elseif (gradient_margin == HO_GRADIENT_MARGIN_GROUNDED_ICE) then - - if (present(floating_mask) .and. present(land_mask) .and. present(usrf)) then - - call glissade_edgemask_gradient_margin_grounded_ice(nx, ny, & - ice_mask, & - floating_mask, & - land_mask, & - usrf, & - edge_mask_x, edge_mask_y) - else - call write_log('Must pass in floating_mask and usrf to use this gradient_margin option', GM_FATAL) - endif ! present(floating_mask), etc. - - elseif (gradient_margin == HO_GRADIENT_MARGIN_ICE_ONLY) then + elseif (gradient_margin == 1) then ! mask for east and west cell edges do j = 1, ny @@ -383,8 +322,8 @@ subroutine glissade_centered_gradient(nx, ny, & endif enddo enddo - - ! mask for north and south edges + + ! mask for north and south cell edges do j = 1, ny-1 do i = 1, nx if (ice_mask(i,j)==1 .and. ice_mask(i,j+1)==1) then @@ -397,30 +336,8 @@ subroutine glissade_centered_gradient(nx, ny, & endif ! gradient_margin - !WHL - debug - Count number of edges with gradients exceeding max_slope - if (present(max_slope)) then - edge_count = 0 - do j = nhalo+1, ny-nhalo - do i = nhalo+1, nx-nhalo - dfdx = (field(i+1,j) - field(i,j)) / dx - if (abs(dfdx) > max_slope .and. edge_mask_x(i,j)) then - edge_count = edge_count + 1 - endif - dfdy = (field(i,j+1) - field(i,j)) / dy - if (abs(dfdy) > max_slope .and. edge_mask_y(i,j)) then - edge_count = edge_count + 1 - endif - enddo - enddo - edge_count = parallel_reduce_sum(edge_count) - if (main_task) then -! print*, 'Number of edges:', (nx-2*nhalo)*(ny-2*nhalo)*2 -! print*, 'Limit slope: edge_count =', edge_count - endif - endif - - ! compute gradient at vertices by averaging gradient at adjacent edges - ! ignore edges with edge_mask = 0 + ! Compute gradient at vertices by averaging gradient at adjacent edges. + ! Ignore edges with edge_mask = F do j = 1, ny-1 do i = 1, nx-1 @@ -439,16 +356,6 @@ subroutine glissade_centered_gradient(nx, ny, & df_dx(i,j) = 0.d0 endif - ! Optionally, limit df_dx - - if (present(max_slope)) then - if (df_dx(i,j) > 0.0d0) then - df_dx(i,j) = min(df_dx(i,j), max_slope) - else - df_dx(i,j) = max(df_dx(i,j), -max_slope) - endif - endif - ! df/dy df_dy_east = (field(i+1,j+1) - field(i+1,j))/ dy df_dy_west = (field(i,j+1) - field(i,j)) / dy @@ -463,19 +370,9 @@ subroutine glissade_centered_gradient(nx, ny, & df_dy(i,j) = 0.d0 endif - ! Optionally, limit df_dy - - if (present(max_slope)) then - if (df_dy(i,j) > 0.0d0) then - df_dy(i,j) = min(df_dy(i,j), max_slope) - else - df_dy(i,j) = max(df_dy(i,j), -max_slope) - endif - endif - enddo ! i enddo ! j - + if (verbose_gradient .and. main_task) then print*, ' ' print*, 'Centered gradient:' @@ -488,7 +385,7 @@ subroutine glissade_centered_gradient(nx, ny, & enddo print*, ' ' enddo - + print*, ' ' print*, 'df_dy:' do j = ny-1, 1, -1 @@ -500,400 +397,7 @@ subroutine glissade_centered_gradient(nx, ny, & enddo endif - end subroutine glissade_centered_gradient - -!**************************************************************************** - - subroutine glissade_upstream_gradient(nx, ny, & - dx, dy, & - field, & - df_dx, df_dy, & - ice_mask, & - usrf, & - gradient_margin_in, & - accuracy_flag_in, & - floating_mask, & - land_mask, & - max_slope) - - !---------------------------------------------------------------- - ! Given a scalar variable f on the unstaggered grid (dimension nx, ny), - ! compute its gradient (df_dx, df_dy) on the staggered grid (dimension nx-1, ny-1). - ! The gradient can be evaluated at one upstream edge (for first-order accuracy) - ! or at two upstream edges (for second-order accuracy). - ! The reason to take a one-sided gradient is to damp checkerboard noise - ! that often arises with a centered gradient. - ! - ! Note: Upstream is defined by the direction of higher surface elevation. - ! For df_dx, the edge gradients are upstream in the y direction, - ! and for df_dy, the edge gradients are upstream in the x direction. - ! - ! See comments in subroutine glissade_centered_gradient about the - ! various values of gradient_margin. - !---------------------------------------------------------------- - - !---------------------------------------------------------------- - ! Input-output arguments - !---------------------------------------------------------------- - - integer, intent(in) :: & - nx, ny ! horizontal grid dimensions - - real(dp), intent(in) :: & - dx, dy ! grid cell length and width - ! assumed to have the same value for each grid cell - - real(dp), dimension(nx,ny), intent(in) :: & - field ! scalar field, defined at cell centers - - real(dp), dimension(nx-1,ny-1), intent(out) :: & - df_dx, df_dy ! gradient components, defined at cell vertices - - integer, dimension(nx,ny), intent(in) :: & - ice_mask ! = 1 where ice is present, else = 0 - - real(dp), dimension(nx,ny), intent(in) :: & - usrf ! ice surface elevation (required to determine upstream direction) - - integer, intent(in), optional :: & - gradient_margin_in ! 0: use all values when computing gradient (including zeroes where ice is absent) - ! 1: use values in ice-covered and/or land cells (but not ocean cells); see details above - ! If one or more values is masked out, construct df_fx and df_dy from the others - ! 2: use values in ice-covered cells only - ! If one or more values is masked out, construct df_fx and df_dy from the others - - integer, intent(in), optional :: & - accuracy_flag_in ! = 1 for 1st order, 2 for 2nd order - - integer, dimension(nx,ny), intent(in), optional :: & - floating_mask, & ! = 1 where ice is present and floating, else = 0 - land_mask ! = 1 for land cells, else = 0 - ! floating and land masks required for gradient_margin = HO_GRADIENT_MARGIN_GROUNDED_ICE - - real(dp), intent(in), optional :: & - max_slope ! maximum slope allowed for surface gradient computations (unitless) - - !-------------------------------------------------------- - ! Local variables - !-------------------------------------------------------- - - integer :: gradient_margin, accuracy_flag - integer :: i, j - integer :: summask - real(dp) :: sum1, sum2 - - logical, dimension(nx-1,ny) :: & - edge_mask_x ! edge mask for computing df/dx - - logical, dimension(nx,ny-1) :: & - edge_mask_y ! edge mask for computing df/dy - - real(dp) :: df_dx_north, df_dx_north2 - real(dp) :: df_dx_south, df_dx_south2 - real(dp) :: df_dy_east, df_dy_east2 - real(dp) :: df_dy_west, df_dy_west2 - - !-------------------------------------------------------- - ! First-order upstream gradient at vertex(i,j) is based on two points out of f(i:i+1,j:j+1) - ! - ! (i,j+1) | (i+1,j+1) - ! -------(i,j)---------- - ! (i,j) | (i+1,j) - ! - ! Second-order gradient is based on four points in the upstream direction - !-------------------------------------------------------- - - if (present(accuracy_flag_in)) then - accuracy_flag = accuracy_flag_in - else - accuracy_flag = 2 ! default to second-order - endif - - if (present(gradient_margin_in)) then - gradient_margin = gradient_margin_in - else - gradient_margin = HO_GRADIENT_MARGIN_GROUNDED_ICE - endif - - ! Set integer edge mask based on gradient_margin. - - if (gradient_margin == HO_GRADIENT_MARGIN_ALL) then - - edge_mask_x(:,:) = .true. ! true for all edges - edge_mask_y(:,:) = .true. - - elseif (gradient_margin == HO_GRADIENT_MARGIN_GROUNDED_ICE) then - - if (present(floating_mask) .and. present(land_mask)) then - - call glissade_edgemask_gradient_margin_grounded_ice(nx, ny, & - ice_mask, & - floating_mask, & - land_mask, & - usrf, & - edge_mask_x, edge_mask_y) - else - call write_log('Must pass in floating_mask to use this gradient_margin option', GM_FATAL) - endif ! present(floating_mask) - - elseif (gradient_margin == HO_GRADIENT_MARGIN_ICE_ONLY) then - - ! mask for east and west cell edges - do j = 1, ny - do i = 1, nx-1 - if (ice_mask(i,j)==1 .and. ice_mask(i+1,j)==1) then - edge_mask_x(i,j) = .true. - else - edge_mask_x(i,j) = .false. - endif - enddo - enddo - - ! mask for north and south cell edges - do j = 1, ny-1 - do i = 1, nx - if (ice_mask(i,j)==1 .and. ice_mask(i,j+1)==1) then - edge_mask_y(i,j) = .true. - else - edge_mask_y(i,j) = .false. - endif - enddo - enddo - - endif ! gradient_margin - - if (accuracy_flag == 1) then ! first-order accurate - - do j = 1, ny-1 - do i = 1, nx-1 - - if (edge_mask_x(i,j) .or. edge_mask_x(i,j+1)) then - - ! Compute df/dx by taking upstream gradient - df_dx_north = (field(i+1,j+1) - field(i,j+1)) / dx - df_dx_south = (field(i+1,j) - field(i,j)) / dx - - sum1 = usrf(i+1,j+1) + usrf(i,j+1) - sum2 = usrf(i+1,j) + usrf(i,j) - - if (sum1 > sum2) then ! north is upstream; use north edge gradient if possible - - if (edge_mask_x(i,j+1)) then - df_dx(i,j) = df_dx_north - else - df_dx(i,j) = df_dx_south - endif - - else ! south is upstream; use south edge gradient if possible - - if (edge_mask_x(i,j)) then - df_dx(i,j) = df_dx_south - else - df_dx(i,j) = df_dx_north - endif - - endif ! sum1 > sum2 - - else ! both adjacent edge masks = F; punt - - df_dx(i,j) = 0.d0 - - endif ! adjacent edge_mask = T - - ! Optionally, limit df_dx - - if (present(max_slope)) then - if (df_dx(i,j) > 0.0d0) then - df_dx(i,j) = min(df_dx(i,j), max_slope) - else - df_dx(i,j) = max(df_dx(i,j), -max_slope) - endif - endif - - if (edge_mask_y(i,j) .or. edge_mask_y(i+1,j)) then - - ! Compute df/dy by taking upstream gradient - df_dy_east = (field(i+1,j+1) - field(i+1,j)) / dy - df_dy_west = (field(i,j+1) - field(i,j)) / dy - - sum1 = usrf(i+1,j+1) + usrf(i+1,j) - sum2 = usrf(i,j+1) + usrf(i,j) - - if (sum1 > sum2) then ! east is upstream; use east edge gradient if possible - - if (edge_mask_y(i+1,j)) then - df_dy(i,j) = df_dy_east - else - df_dy(i,j) = df_dy_west - endif - - else ! west is upstream; use west edge gradient if possible - - if (edge_mask_y(i,j)) then - df_dy(i,j) = df_dy_west - else - df_dy(i,j) = df_dy_east - endif - - endif ! sum1 > sum2 - - else ! both adjacent edge masks = F; punt - - df_dy(i,j) = 0.d0 - - endif ! adjacent edge mask = T - - ! Optionally, limit df_dy - - if (present(max_slope)) then - if (df_dy(i,j) > 0.0d0) then - df_dy(i,j) = min(df_dy(i,j), max_slope) - else - df_dy(i,j) = max(df_dy(i,j), -max_slope) - endif - endif - - enddo - enddo - - else ! second-order accurate - - do j = 2, ny-2 ! loop does not include all of halo - do i = 2, nx-2 - - if (edge_mask_x(i,j) .or. edge_mask_x(i,j+1)) then - - ! Compute df_dx by taking upstream gradient - - df_dx_north2 = (field(i+1,j+2) - field(i,j+2)) / dx - df_dx_north = (field(i+1,j+1) - field(i,j+1)) / dx - df_dx_south = (field(i+1,j) - field(i,j)) / dx - df_dx_south2 = (field(i+1,j-1) - field(i,j-1)) / dx - - sum1 = usrf(i+1,j+1) + usrf(i,j+1) + usrf(i+1,j+2) + usrf(i,j+2) - sum2 = usrf(i+1,j) + usrf(i,j) + usrf(i+1,j-1) + usrf(i,j-1) - - if (sum1 > sum2) then ! north is upstream; use north edge gradients if possible - - if (edge_mask_x(i,j+1) .and. edge_mask_x(i,j+2)) then - df_dx(i,j) = 1.5d0 * df_dx_north - 0.5d0 * df_dx_north2 - elseif (edge_mask_x(i,j+1)) then ! revert to first order - df_dx(i,j) = df_dx_north - else ! first-order downstream - df_dx(i,j) = df_dx_south - endif - - else ! south is upstream; use south edge gradients if possible - - if (edge_mask_x(i,j) .and. edge_mask_x(i,j-1)) then - df_dx(i,j) = 1.5d0 * df_dx_south - 0.5d0 * df_dx_south2 - elseif (edge_mask_x(i,j)) then ! revert to first order - df_dx(i,j) = df_dx_south - else ! first-order downstream - df_dx(i,j) = df_dx_north - endif - - endif ! sum1 > sum2 - - else ! both adjacent edge masks = F; punt - - df_dx(i,j) = 0.d0 - - endif ! adjacent edge mask = T - - ! Optionally, limit df_dx - - if (present(max_slope)) then - if (df_dx(i,j) > 0.0d0) then - df_dx(i,j) = min(df_dx(i,j), max_slope) - else - df_dx(i,j) = max(df_dx(i,j), -max_slope) - endif - endif - - if (edge_mask_y(i,j) .or. edge_mask_y(i+1,j)) then - - ! Compute df_dy by taking upstream gradient - - df_dy_east2 = (field(i+2,j+1) - field(i+2,j)) / dy - df_dy_east = (field(i+1,j+1) - field(i+1,j)) / dy - df_dy_west = (field(i,j+1) - field(i,j)) / dy - df_dy_west2 = (field(i-1,j+1) - field(i-1,j)) / dy - - ! determine upstream direction - - sum1 = usrf(i+1,j+1) + usrf(i+1,j) + usrf(i+2,j+1) + usrf(i+2,j) - sum2 = usrf(i,j+1) + usrf(i,j) + usrf(i-1,j+1) + usrf(i-1,j) - - if (sum1 > sum2) then ! east is upstream; use east edge gradients if possible - - if (edge_mask_y(i+1,j) .and. edge_mask_y(i+2,j)) then - df_dy(i,j) = 1.5d0 * df_dy_east - 0.5d0 * df_dy_east2 - elseif (edge_mask_y(i+1,j)) then ! revert to first order - df_dy(i,j) = df_dy_east - else ! first-order downstream - df_dy(i,j) = df_dy_west - endif - - else ! west is upstream; use west edge gradients if possible - - if (edge_mask_y(i,j) .and. edge_mask_y(i-1,j)) then - df_dy(i,j) = 1.5d0 * df_dy_west - 0.5d0 * df_dy_west2 - elseif (edge_mask_y(i,j)) then ! revert to first order - df_dy(i,j) = df_dy_west - else ! first_order downstream - df_dy(i,j) = df_dy_east - endif - - endif ! sum1 > sum2 - - else ! both adjacent edge masks = F; punt - - df_dy(i,j) = 0.d0 - - endif ! adjacent edge mask = T - - ! Optionally, limit df_dy - - if (present(max_slope)) then - if (df_dy(i,j) > 0.0d0) then - df_dy(i,j) = min(df_dy(i,j), max_slope) - else - df_dy(i,j) = max(df_dy(i,j), -max_slope) - endif - endif - - enddo ! i - enddo ! j - - ! fill in halo values - call staggered_parallel_halo(df_dx) - call staggered_parallel_halo(df_dy) - - endif ! first or second order accurate - - if (verbose_gradient .and. main_task) then - print*, ' ' - print*, 'upstream df_dx:' - do j = ny-2, 2, -1 - do i = 1, nx-1 - write(6,'(f7.4)',advance='no') df_dx(i,j) - enddo - print*, ' ' - enddo - - print*, ' ' - print*, 'upstream df_dy:' - do j = ny-2, 2, -1 - do i = 1, nx-1 - write(6,'(f7.4)',advance='no') df_dy(i,j) - enddo - print*, ' ' - enddo - - endif - - end subroutine glissade_upstream_gradient + end subroutine glissade_gradient !**************************************************************************** @@ -904,7 +408,6 @@ subroutine glissade_gradient_at_edges(nx, ny, & ice_mask, & gradient_margin_in, & usrf, & - floating_mask, & land_mask, & max_slope) @@ -913,6 +416,8 @@ subroutine glissade_gradient_at_edges(nx, ny, & ! compute its gradient (df_dx, df_dy) at cell edges (i.e., the C grid): ! df_dx at the midpoint of the east edge and df_dy at the midpoint of ! the north edge. + ! + ! This subroutine is called by the glissade SIA solver. !---------------------------------------------------------------- !---------------------------------------------------------------- @@ -949,9 +454,8 @@ subroutine glissade_gradient_at_edges(nx, ny, & usrf ! ice surface elevation integer, dimension(nx,ny), intent(in), optional :: & - floating_mask, & ! = 1 where ice is present and floating, else = 0 land_mask ! = 1 for land cells, else = 0 - ! floating and land masks required for gradient_margin = HO_GRADIENT_MARGIN_GROUNDED_ICE + ! floating and land masks required for gradient_margin = HO_GRADIENT_MARGIN_HYBRID real(dp), intent(in), optional :: & max_slope ! maximum slope allowed for surface gradient computations (unitless) @@ -963,10 +467,10 @@ subroutine glissade_gradient_at_edges(nx, ny, & integer :: gradient_margin integer :: i, j - logical, dimension(nx-1,ny) :: & + logical, dimension(nx,ny) :: & edge_mask_x ! edge mask for computing df/dx - logical, dimension(nx,ny-1) :: & + logical, dimension(nx,ny) :: & edge_mask_y ! edge mask for computing df/dy real(dp) :: & @@ -993,31 +497,33 @@ subroutine glissade_gradient_at_edges(nx, ny, & if (present(gradient_margin_in)) then gradient_margin = gradient_margin_in else - gradient_margin = HO_GRADIENT_MARGIN_GROUNDED_ICE + gradient_margin = HO_GRADIENT_MARGIN_HYBRID endif - ! Set integer edge mask based on gradient_margin. + ! Set logical edge mask based on gradient_margin. - if (gradient_margin == HO_GRADIENT_MARGIN_ALL) then + edge_mask_x(:,:) = .false. + edge_mask_y(:,:) = .false. + + if (gradient_margin == HO_GRADIENT_MARGIN_LAND) then edge_mask_x(:,:) = .true. ! true for all edges edge_mask_y(:,:) = .true. - elseif (gradient_margin == HO_GRADIENT_MARGIN_GROUNDED_ICE) then + elseif (gradient_margin == HO_GRADIENT_MARGIN_HYBRID) then - if (present(floating_mask) .and. present(land_mask) .and. present(usrf)) then + if (present(land_mask) .and. present(usrf)) then - call glissade_edgemask_gradient_margin_grounded_ice(nx, ny, & - ice_mask, & - floating_mask, & - land_mask, & - usrf, & - edge_mask_x, edge_mask_y) + call glissade_edgemask_gradient_margin_hybrid(nx, ny, & + ice_mask, & + land_mask, & + usrf, & + edge_mask_x, edge_mask_y) else - call write_log('Must pass in floating mask and usrf to use this gradient_margin option', GM_FATAL) - endif ! present(floating_mask), etc. + call write_log('Must pass in land_mask and usrf to use this gradient_margin option', GM_FATAL) + endif - elseif (gradient_margin == HO_GRADIENT_MARGIN_ICE_ONLY) then + elseif (gradient_margin == HO_GRADIENT_MARGIN_MARINE) then ! mask for east and west cell edges do j = 1, ny @@ -1118,26 +624,27 @@ end subroutine glissade_gradient_at_edges !**************************************************************************** - subroutine glissade_edgemask_gradient_margin_grounded_ice(nx, ny, & - ice_mask, & - floating_mask, & - land_mask, & - usrf, & - edge_mask_x, edge_mask_y) + subroutine glissade_edgemask_gradient_margin_hybrid(nx, ny, & + ice_mask, & + land_mask, & + usrf, & + edge_mask_x, edge_mask_y) !---------------------------------------------------------------- - ! Compute edge masks required for option gradient_margin = HO_GRADIENT_MARGIN_GROUNDED_ICE. + ! Compute edge masks required for option gradient_margin = HO_GRADIENT_MARGIN_HYBRID ! ! The mask is set to true at all edges where either ! (1) Both adjacent cells are ice-covered. - ! (2) One cell is ice-covered and grounded, and lies above the other cell. + ! (2) One cell is ice-covered, and lies above ice-free land. ! - ! The mask is set to false where a floating cell is adjacent to an ice-free ocean cell, - ! or where an ice-covered land cell lies below an ice-free land cell (i.e., a nunatak). + ! This method sets the gradient to zero at edges where + ! (1) An ice-covered cell (grounded or floating) lies above ice-free ocean. + ! Note: Inactive calving-front cells are treated as ice-free ocean. + ! (2) An ice-covered land cell lies below an ice-free land cell (i.e., a nunatak). ! - ! The intent is to give a reasonable gradient at both land-terminating and marine-terminating margins. - ! At land-terminating margins the gradient is nonzero (except for the nunatak case), - ! and at marine-terminating margins the gradient is zero (unless the ice-covered cell is grounded). + ! This method aims to give a reasonable gradient at both land-terminating and marine-terminating margins. + ! At land-terminating margins the gradient is nonzero (except for nunataks), and at marine-terminating + ! margins the gradient is zero. !---------------------------------------------------------------- !---------------------------------------------------------------- @@ -1147,20 +654,16 @@ subroutine glissade_edgemask_gradient_margin_grounded_ice(nx, ny, integer, intent(in) :: & nx, ny ! horizontal grid dimensions - ! Note: land_mask is not currently part of the logic, but might be at some point integer, dimension(nx,ny), intent(in) :: & ice_mask, & ! = 1 where ice is present, else = 0 - floating_mask, & ! = 1 where ice is present and floating, else = 0 land_mask ! = 1 for land cells, else = 0 real(dp), dimension(nx,ny), intent(in) :: & usrf ! ice surface elevation - logical, dimension(nx-1,ny), intent(out) :: & - edge_mask_x ! edge mask for computing df/dx - - logical, dimension(nx,ny-1), intent(out) :: & - edge_mask_y ! edge mask for computing df/dy + logical, dimension(nx,ny), intent(out) :: & + edge_mask_x, & ! edge mask for computing df/dx + edge_mask_y ! edge mask for computing df/dy !---------------------------------------------------------------- ! Local variables @@ -1168,45 +671,51 @@ subroutine glissade_edgemask_gradient_margin_grounded_ice(nx, ny, integer :: i, j + ! initialize + edge_mask_x(:,:) = .false. + edge_mask_y(:,:) = .false. + ! compute mask for east and west cell edges + do j = 1, ny do i = 1, nx-1 - ! check for ice in both adjacent cells, or grounded ice in one cell lying above the other cell - if ( (ice_mask(i,j)==1 .and. ice_mask(i+1,j)==1) .or. & - (ice_mask(i,j)==1 .and. floating_mask(i,j)==0 .and. usrf(i,j) > usrf(i+1,j)) .or. & - (ice_mask(i+1,j)==1 .and. floating_mask(i+1,j)==0 .and. usrf(i+1,j) > usrf(i,j)) ) then + if (ice_mask(i,j) == 1 .and. ice_mask(i+1,j) == 1) then ! both cells have ice edge_mask_x(i,j) = .true. - else + elseif ( (ice_mask(i,j)==1 .and. land_mask(i+1,j)==1 .and. usrf(i,j) > usrf(i+1,j)) .or. & + (ice_mask(i+1,j)==1 .and. land_mask(i,j)==1 .and. usrf(i+1,j) > usrf(i,j)) ) then - edge_mask_x(i,j) = .false. + ! ice-covered cell lies above ice-free land + edge_mask_x(i,j) = .true. endif + enddo enddo ! compute mask for north and south cell edges + do j = 1, ny-1 do i = 1, nx - ! check for ice in both adjacent cells, or grounded ice in one cell lying above the other cell - if ( (ice_mask(i,j)==1 .and. ice_mask(i,j+1)==1) .or. & - (ice_mask(i,j)==1 .and. floating_mask(i,j)==0 .and. usrf(i,j) > usrf(i,j+1)) .or. & - (ice_mask(i,j+1)==1 .and. floating_mask(i,j+1)==0 .and. usrf(i,j+1) > usrf(i,j)) ) then + if (ice_mask(i,j) == 1 .and. ice_mask(i,j+1) == 1) then ! both cells have ice edge_mask_y(i,j) = .true. - else + elseif ( (ice_mask(i,j)==1 .and. land_mask(i,j+1)==1 .and. usrf(i,j) > usrf(i,j+1)) .or. & + (ice_mask(i,j+1)==1 .and. land_mask(i,j)==1 .and. usrf(i,j+1) > usrf(i,j)) ) then - edge_mask_y(i,j) = .false. + ! ice-covered cell lies above ice-free land + edge_mask_y(i,j) = .true. endif + enddo enddo - end subroutine glissade_edgemask_gradient_margin_grounded_ice + end subroutine glissade_edgemask_gradient_margin_hybrid !**************************************************************************** @@ -1219,29 +728,55 @@ subroutine glissade_surface_elevation_gradient(nx, ny, & thklim, & thck_gradient_ramp, & ds_dx, ds_dy, & + ho_gradient, & + ho_gradient_margin, & max_slope) - + !---------------------------------------------------------------- - ! Compute surface elevation gradients for option gradient_margin = HO_GRADIENT_MARGIN_ICE_OVER_LAND. + ! Compute surface elevation gradients for different ho_gradient and ho_gradient_margin options. ! - ! The gradient is nonzero at edges where either - ! (1) Both adjacent cells have active ice. - ! (2) An ice-covered cell lies above ice-free land. + ! The gradient at a given vertex is constructed from gradients at adjacent edges. + ! Edge gradients are computed in the standard way, taking the difference between + ! the values in two adjacent cells and dividing by the distance. + ! At the ice margin, where one or both cells adjacent to a given edge may be ice-free, + ! edge gradients may be masked in the following ways: + ! + ! HO_GRADIENT_MARGIN_LAND = 0: Values in both adjacent cells are used to compute the gradient, + ! including values in ice-free cells. In other words, there is no masking of edges. + ! This convention is used by Glide. It works well at land-terminating margins, but performs poorly + ! for ice shelves with a sharp drop in ice thickness and surface elevation at the margin. ! - ! The gradient is set to zero where + ! HO_GRADIENT_MARGIN_HYBRID = 1: The gradient is computed at edges where either + ! (1) Both adjacent cells are ice-covered. + ! (2) One cell is ice-covered and lies above ice-free land. + ! + ! This method sets the gradient to zero at edges where ! (1) An ice-covered cell (grounded or floating) lies above ice-free ocean. ! Note: Inactive calving-front cells are treated as ice-free ocean. ! (2) An ice-covered land cell lies below an ice-free land cell (i.e., a nunatak). ! - ! The intent is to give a reasonable gradient at both land-terminating and marine-terminating margins. + ! The aim is to give a reasonable gradient at both land-terminating and marine-terminating margins. ! At land-terminating margins the gradient is nonzero (except for nunataks), and at marine-terminating ! margins the gradient is zero. ! - ! At some edges, gradients are reduced to inhibit oscillations: - ! - If the ice in the upper-lying cell is thin (thklim < thck < thklim + thck_gradient_ramp), the gradient is reduced. - ! This prevents large changes in the gradient as the ice thickness oscillates about thklim. - ! - If grounded marine-based ice lies above ice-free ocean, the surface elevation difference is replaced - ! by thickness above flotation (thck - h_flotation), which approaches zero when the ice is nearly afloat. + ! HO_GRADIENT_MARGIN_MARINE = 2: Only values in ice-covered cells (i.e., cells with thck > thklim) + ! are used to compute gradients. If one or both adjacent cells is ice-free, the edge is masked out. + ! This option works well at shelf margins but less well for land margins (e.g., the Halfar test case). + ! + ! There are three ways to compute vertex gradients from edge gradients, as determined + ! by the ho_gradient option: + ! + ! HO_GRADIENT = 0: Standard centered gradient, obtained by averaging the two nearest edge gradients + ! + ! HO_GRADIENT = 1: First-order upstream gradient, obtained by setting the vertex value to the + ! nearest edge gradient on the higher-elevation side of the vertex + ! + ! HO_GRADIENT = 2: Second-order upstream gradient, obtained by setting the vertex value to a linear + ! combination of the two nearest edge gradients on the higher-elevation side + ! + ! The centered gradient is usually most accurate, but it can lead to checkerboard noise + ! in the surface elevation field, because a checkerboard pattern is invisible to the gradient. + ! The upstream gradients are less accurate but are better at damping checkerboard noise. !---------------------------------------------------------------- !---------------------------------------------------------------- @@ -1261,7 +796,7 @@ subroutine glissade_surface_elevation_gradient(nx, ny, & real(dp), dimension(nx,ny), intent(in) :: & thck, & ! ice thickness usrf, & ! ice surface elevation - topg ! bed elevation + topg ! bed elevation real(dp), intent(in) :: & eus, & ! eustatic sea level @@ -1269,11 +804,16 @@ subroutine glissade_surface_elevation_gradient(nx, ny, & thck_gradient_ramp real(dp), dimension(nx-1,ny-1), intent(out) :: & - ds_dx, & ! ds/dx at vertices - ds_dy ! ds/dy at vertices + ds_dx, & ! ds/dx at vertices + ds_dy ! ds/dy at vertices + + integer, intent(in) :: & + ho_gradient, & ! gradient type (centered, 1st-order upstream, or 2nd-order upstream) + ho_gradient_margin ! option for computing gradients at ice sheet margin + ! see comments above real(dp), intent(in), optional :: & - max_slope ! maximum slope allowed for surface gradient computations (unitless) + max_slope ! maximum slope allowed for surface gradient computations (unitless) !---------------------------------------------------------------- ! Local variables @@ -1292,106 +832,158 @@ subroutine glissade_surface_elevation_gradient(nx, ny, & edge_factor, & ! gradient-weighting factor in range [0,1] sign_factor ! sign factor, +1 or -1 + real(dp) :: sum1, sum2 ! temporary sums + + !WHL - debug + real(dp) :: alpha + + real(dp) :: ds_dx_up, ds_dy_up + real(dp) :: ds_dx_ctr, ds_dy_ctr + ! initialize + ds_dx_edge(:,:) = 0.0d0 ds_dy_edge(:,:) = 0.0d0 ds_dx(:,:) = 0.0d0 ds_dy(:,:) = 0.0d0 - ! compute ds_dx on east edges - do j = 1, ny - do i = 1, nx-1 + if (ho_gradient_margin == HO_GRADIENT_MARGIN_LAND) then - ! determine which cell is upper and which is lower - if (usrf(i,j) > usrf(i+1,j)) then - iu = i - il = i+1 - sign_factor = -1.0d0 - else - iu = i+1 - il = i - sign_factor = 1.0d0 - endif + ! Compute ds_dx and ds_dy on all edges, whether or not adjacent cells are ice-covered + do j = 1, ny + do i = 1, nx-1 + ds_dx_edge(i,j) = (usrf(i+1,j) - usrf(i,j)) / dx + enddo + enddo - if (land_mask(iu,j) == 1) then - ! Compute a factor that reduces the gradient if ice in the upper cell is thin and land-based. - ! This inhibits oscillations in the gradient when the thickness in the upper cell is close to thklim. - edge_thck_upper = thck(iu,j) - edge_factor = min(1.0d0, (edge_thck_upper - thklim)/thck_gradient_ramp) - edge_factor = max(edge_factor, 0.0d0) - else - edge_factor = 1.0d0 - endif + do j = 1, ny-1 + do i = 1, nx + ds_dy_edge(i,j) = (usrf(i,j+1) - usrf(i,j)) / dy + enddo + enddo - if (active_ice_mask(iu,j) == 1 .and. active_ice_mask(il,j) == 1) then ! both cells have active ice + elseif (ho_gradient_margin == HO_GRADIENT_MARGIN_MARINE) then - ! compute the gradient in the usual way - ds_dx_edge(i,j) = edge_factor * sign_factor * (usrf(iu,j) - usrf(il,j)) / dx + ! Compute ds_dx and ds_dy only on edges with active ice in each adjacent cell + do j = 1, ny + do i = 1, nx-1 + if (active_ice_mask(i,j) == 1 .and. active_ice_mask(i+1,j) == 1) then + ds_dx_edge(i,j) = (usrf(i+1,j) - usrf(i,j)) / dx + endif + enddo + enddo - elseif (active_ice_mask(iu,j) == 1 .and. land_mask(il,j) == 1) then + do j = 1, ny-1 + do i = 1, nx + if (active_ice_mask(i,j) == 1 .and. active_ice_mask(i,j+1) == 1) then + ds_dy_edge(i,j) = (usrf(i,j+1) - usrf(i,j)) / dy + endif + enddo + enddo - ! upper cell has active ice and ice-free lower cell is land; compute the gradient in the usual way - ds_dx_edge(i,j) = edge_factor * sign_factor * (usrf(iu,j) - usrf(il,j)) / dx + elseif (ho_gradient_margin == HO_GRADIENT_MARGIN_HYBRID) then - endif ! both cells have ice + ! compute ds_dx on east edges + do j = 1, ny + do i = 1, nx-1 - enddo ! i - enddo ! j + ! determine which cell is upper and which is lower + if (usrf(i,j) > usrf(i+1,j)) then + iu = i + il = i+1 + sign_factor = -1.0d0 + else + iu = i+1 + il = i + sign_factor = 1.0d0 + endif - ! compute ds_dy on north edges - do j = 1, ny-1 - do i = 1, nx + if (land_mask(iu,j) == 1) then + ! Compute a factor that reduces the gradient if ice in the upper cell is thin and land-based. + ! This inhibits oscillations in the gradient when the thickness in the upper cell is close to thklim. + edge_thck_upper = thck(iu,j) + edge_factor = min(1.0d0, (edge_thck_upper - thklim)/thck_gradient_ramp) + edge_factor = max(edge_factor, 0.0d0) + else + edge_factor = 1.0d0 + endif - ! determine which cell is upper and which is lower - if (usrf(i,j) > usrf(i,j+1)) then - ju = j - jl = j+1 - sign_factor = -1.0d0 - else - ju = j+1 - jl = j - sign_factor = 1.0d0 - endif + if (active_ice_mask(iu,j) == 1 .and. active_ice_mask(il,j) == 1) then ! both cells have active ice - if (land_mask(i,ju) == 1) then - ! Compute a factor that reduces the gradient if ice in the upper cell is thin and land-based. - ! This inhibits oscillations in the gradient when the thickness in the upper cell is close to thklim. - edge_thck_upper = thck(i,ju) - edge_factor = min(1.0d0, (edge_thck_upper - thklim)/thck_gradient_ramp) - edge_factor = max(edge_factor, 0.0d0) - else - edge_factor = 1.0d0 - endif + ! compute the gradient + ds_dx_edge(i,j) = edge_factor * sign_factor * (usrf(iu,j) - usrf(il,j)) / dx - if (active_ice_mask(i,ju)==1 .and. active_ice_mask(i,jl)==1) then ! both cells have ice + elseif (active_ice_mask(iu,j) == 1 .and. land_mask(il,j) == 1) then - ! compute the gradient in the usual way - ds_dy_edge(i,j) = edge_factor * sign_factor * (usrf(i,ju) - usrf(i,jl)) / dy + ! upper cell has active ice, and ice-free lower cell is land; compute the gradient + ds_dx_edge(i,j) = edge_factor * sign_factor * (usrf(iu,j) - usrf(il,j)) / dx - elseif (active_ice_mask(i,ju) == 1 .and. land_mask(i,jl) == 1) then + endif ! both cells have ice - ! upper cell has active ice and ice-free lower cell is land; compute the gradient in the usual way - ds_dy_edge(i,j) = edge_factor * sign_factor * (usrf(i,ju) - usrf(i,jl)) / dy + enddo ! i + enddo ! j - endif ! both cells have ice + ! compute ds_dy on north edges + do j = 1, ny-1 + do i = 1, nx - enddo ! i - enddo ! j + ! determine which cell is upper and which is lower + if (usrf(i,j) > usrf(i,j+1)) then + ju = j + jl = j+1 + sign_factor = -1.0d0 + else + ju = j+1 + jl = j + sign_factor = 1.0d0 + endif - ! Average the edge gradients to vertices + if (land_mask(i,ju) == 1) then + ! Compute a factor that reduces the gradient if ice in the upper cell is thin and land-based. + ! This inhibits oscillations in the gradient when the thickness in the upper cell is close to thklim. + edge_thck_upper = thck(i,ju) + edge_factor = min(1.0d0, (edge_thck_upper - thklim)/thck_gradient_ramp) + edge_factor = max(edge_factor, 0.0d0) + else + edge_factor = 1.0d0 + endif - do j = 1, ny-1 - do i = 1, nx-1 - ds_dx(i,j) = 0.5d0 * (ds_dx_edge(i,j) + ds_dx_edge(i,j+1)) - ds_dy(i,j) = 0.5d0 * (ds_dy_edge(i,j) + ds_dy_edge(i+1,j)) + if (active_ice_mask(i,ju)==1 .and. active_ice_mask(i,jl)==1) then ! both cells have ice + + ! compute the gradient + ds_dy_edge(i,j) = edge_factor * sign_factor * (usrf(i,ju) - usrf(i,jl)) / dy + + elseif (active_ice_mask(i,ju) == 1 .and. land_mask(i,jl) == 1) then + + ! upper cell has active ice, and ice-free lower cell is land; compute the gradient + ds_dy_edge(i,j) = edge_factor * sign_factor * (usrf(i,ju) - usrf(i,jl)) / dy + + endif ! both cells have ice + + enddo ! i + enddo ! j + + endif ! ho_gradient_margin + + + ! Average the edge gradients to the vertex, depending on the value of ho_gradient. + + if (ho_gradient == HO_GRADIENT_CENTERED) then + + ! Average the edge gradients to vertices using a centered approximation. + ! This method is 2nd order accurate but can be subject to B grid noise. + do j = 1, ny-1 + do i = 1, nx-1 + ds_dx(i,j) = 0.5d0 * (ds_dx_edge(i,j) + ds_dx_edge(i,j+1)) + ds_dy(i,j) = 0.5d0 * (ds_dy_edge(i,j) + ds_dy_edge(i+1,j)) + enddo enddo - enddo - !WHL - This is an alternate method of averaging edge gradients to vertices, following glissade_centered_gradient. - ! It is commented out. - ! For the dome problem, the differences between gradient_margin methods 1 and 3 - ! come from replacing these lines with the ds_dx and ds_dy calculations above. + !WHL - This is an alternate method of averaging edge gradients to vertices, following glissade_centered_gradient. + ! It is commented out. + ! For the dome problem, the differences between gradient_margin methods 1 and 3 + ! come from replacing these lines with the ds_dx and ds_dy calculations above. ! do j = 1, ny-1 ! do i = 1, nx-1 @@ -1421,6 +1013,76 @@ subroutine glissade_surface_elevation_gradient(nx, ny, & ! enddo ! enddo + + elseif (ho_gradient == HO_GRADIENT_UPSTREAM1) then + + ! Take a one-sided, first-order-accurate upstream gradient. + ! For ds_dx, use the east/west edge that is upstream in the y direction + ! For ds_dy, use the north/south edge that is upstream in the x direction + + do j = 1, ny-1 + do i = 1, nx-1 + + ! Identify the upstream edge in the y direction + sum1 = usrf(i,j+1) + usrf(i+1,j+1) + sum2 = usrf(i,j) + usrf(i+1,j) + + if (sum1 > sum2) then ! north is upstream; use east edge of cell (i,j+1) + ds_dx(i,j) = ds_dx_edge(i,j+1) + else ! south is upstream; use east edge of cell (i,j) + ds_dx(i,j) = ds_dx_edge(i,j) + endif + + ! Identify the upstream edge in the x direction + sum1 = usrf(i+1,j) + usrf(i+1,j+1) + sum2 = usrf(i,j) + usrf(i,j+1) + + if (sum1 > sum2) then ! east is upstream; use north edge of cell (i+1,j) + ds_dy(i,j) = ds_dy_edge(i+1,j) + else ! west is upstream; use north edge of cell (i,j) + ds_dy(i,j) = ds_dy_edge(i,j) + endif + + enddo + enddo + + elseif (ho_gradient == HO_GRADIENT_UPSTREAM2) then + + ! Take a one-sided, second-order-accurate upstream gradient. + + do j = 2, ny-2 + do i = 2, nx-2 + + ! Identify the upstream edge in the y direction + sum1 = usrf(i,j+1) + usrf(i+1,j+1) + usrf(i,j+2) + usrf(i+1,j+2) + sum2 = usrf(i,j) + usrf(i+1,j) + usrf(i,j-1) + usrf(i+1,j-1) + + ! Compute df_dx by taking 2nd-order upstream gradient + + if (sum1 > sum2) then ! north is upstream; use east edge of cells (i,j+1:j+2) + ds_dx(i,j) = 1.5d0 * ds_dx_edge(i,j+1) - 0.5d0 * ds_dx_edge(i,j+2) + else ! south is upstream; use east edge of cells (i,j-1:j) + ds_dx(i,j) = 1.5d0 * ds_dx_edge(i,j) - 0.5d0 * ds_dx_edge(i,j-1) + endif + + ! Identify the upstream edge in the x direction + sum1 = usrf(i+1,j) + usrf(i+1,j+1) + usrf(i+2,j) + usrf(i+2,j+1) + sum2 = usrf(i,j) + usrf(i,j+1) + usrf(i-1,j) + usrf(i-1,j+1) + + ! Compute df_dx by taking 2nd-order upstream gradient + + if (sum1 > sum2) then ! east is upstream; use north edge of cells (i+1:i+2,j) + ds_dy(i,j) = 1.5d0 * ds_dy_edge(i+1,j) - 0.5d0 * ds_dy_edge(i+2,j) + else ! west is upstream; use north edge of cells (i-1:i,j) + ds_dy(i,j) = 1.5d0 * ds_dy_edge(i,j) - 0.5d0 * ds_dy_edge(i-1,j) + endif + + enddo + enddo + + endif ! ho_gradient + + ! Optionally, limit ds/dx and ds/dy if (present(max_slope)) then @@ -1447,12 +1109,16 @@ subroutine glissade_surface_elevation_gradient(nx, ny, & endif ! present(max_slope) + ! halo update + call staggered_parallel_halo(ds_dx) + call staggered_parallel_halo(ds_dy) + if (verbose_gradient .and. main_task) then print*, ' ' print*, 'Hybrid gradient:' print*, ' ' print*, 'ds_dx:' - do j = ny-1, 1, -1 + do j = ny-2, 2, -1 !! do i = 1, nx-1 do i = 1, nx/2 write(6,'(f9.6)',advance='no') ds_dx(i,j) @@ -1462,7 +1128,7 @@ subroutine glissade_surface_elevation_gradient(nx, ny, & print*, ' ' print*, 'ds_dy:' - do j = ny-1, 1, -1 + do j = ny-2, 2, -1 !! do i = 1, nx-1 do i = 1, nx/2 write(6,'(f9.6)',advance='no') ds_dy(i,j) diff --git a/libglissade/glissade_therm.F90 b/libglissade/glissade_therm.F90 index 64541e28..ce93bbe7 100644 --- a/libglissade/glissade_therm.F90 +++ b/libglissade/glissade_therm.F90 @@ -193,7 +193,14 @@ subroutine glissade_init_therm (temp_init, is_restart, & ! Method (3) may be optimal for reducing spinup time in the interior of large ice sheets. ! Option (4) requires that temperature is present in the input file. - if (temp_init == TEMP_INIT_EXTERNAL) then + if (is_restart == RESTART_TRUE) then + + ! Temperature has already been initialized from a restart file. + ! (Temperature is always a restart variable.) + + call write_log('Initializing ice temperature from the restart file') + + elseif (temp_init == TEMP_INIT_EXTERNAL) then ! Temperature from external file @@ -229,8 +236,7 @@ subroutine glissade_init_therm (temp_init, is_restart, & tempunstag(1,:,:) = temp(0,:,:) tempunstag(upn,:,:) = temp(upn,:,:) - call write_log('Initializing ice temperature from a restart/input file') - + call write_log('Initializing ice temperature from an input file') elseif ( maxval(tempunstag(:,:,:)) > (-1.0d0 * trpt)) then @@ -1997,8 +2003,11 @@ subroutine glissade_flow_factor(whichflwa, whichtemp, & stagsigma, & thck, ice_mask, & temp, flwa, & - default_flwa_arg, & - flow_enhancement_factor, waterfrac) + default_flwa, & + flow_enhancement_factor, & + flow_enhancement_factor_ssa, & + floating_mask, & + waterfrac) ! Calculate Glen's $A$ over the 3D domain, using one of three possible methods. ! @@ -2037,10 +2046,11 @@ subroutine glissade_flow_factor(whichflwa, whichtemp, & integer, dimension(:,:), intent(in) :: ice_mask !> = 1 where ice is present (thck > thklim), else = 0 real(dp),dimension(:,:,:), intent(in) :: temp !> 3D temperature field (deg C) real(dp),dimension(:,:,:), intent(inout) :: flwa !> output $A$, in units of Pa^{-n} s^{-1}, allow input for data option - real(dp), intent(in) :: default_flwa_arg !> Glen's A to use in isothermal case - !> Units: Pa^{-n} s^{-1} - real(dp), intent(in), optional :: flow_enhancement_factor !> flow enhancement factor in Arrhenius relationship - real(dp),dimension(:,:,:), intent(in), optional :: waterfrac !> internal water content fraction, 0 to 1 + real(dp), intent(in) :: default_flwa !> Glen's A to use in isothermal case, Pa^{-n} s^{-1} + real(dp), intent(in), optional :: flow_enhancement_factor !> flow enhancement factor in Arrhenius relationship + real(dp), intent(in), optional :: flow_enhancement_factor_ssa !> flow enhancement factor for floating ice + integer, dimension(:,:), intent(in), optional :: floating_mask !> = 1 where ice is present and floating, else = 0 + real(dp),dimension(:,:,:), intent(in), optional :: waterfrac !> internal water content fraction, 0 to 1 !> \begin{description} !> \item[0] Set to prescribed constant value. @@ -2052,10 +2062,12 @@ subroutine glissade_flow_factor(whichflwa, whichtemp, & ! Internal variables !------------------------------------------------------------------------------------ - real(dp) :: default_flwa ! Glen's A for isothermal case, in units of Pa{-n} s^{-1} integer :: ew, ns, up, ewn, nsn, nlayers real(dp), dimension(size(stagsigma)) :: pmptemp ! pressure melting point temperature - real(dp) :: enhancement_factor ! flow enhancement factor in Arrhenius relationship + + real(dp), dimension(:,:), allocatable :: & + enhancement_factor ! flow enhancement factor in Arrhenius relationship + real(dp) :: tempcor ! temperature relative to pressure melting point real(dp),dimension(4), parameter :: & @@ -2073,10 +2085,24 @@ subroutine glissade_flow_factor(whichflwa, whichtemp, & ewn = size(flwa,2) nsn = size(flwa,3) + allocate(enhancement_factor(ewn,nsn)) + if (present(flow_enhancement_factor)) then - enhancement_factor = flow_enhancement_factor + if (present(flow_enhancement_factor_ssa) .and. present(floating_mask)) then + do ns = 1, nsn + do ew = 1, ewn + if (floating_mask(ew,ns) == 1) then + enhancement_factor(ew,ns) = flow_enhancement_factor_ssa + else + enhancement_factor(ew,ns) = flow_enhancement_factor + endif + enddo + enddo + else ! no separate factor for floating ice + enhancement_factor(:,:) = flow_enhancement_factor + endif else - enhancement_factor = 1.d0 + enhancement_factor(:,:) = 1.d0 endif ! Check that the temperature array has the desired vertical dimension @@ -2089,11 +2115,13 @@ subroutine glissade_flow_factor(whichflwa, whichtemp, & ! Note: Here, default_flwa is assumed to have units of Pa^{-n} s^{-1}, ! whereas model%paramets%default_flwa has units of Pa^{-n} yr^{-1}. - default_flwa = enhancement_factor * default_flwa_arg - ! initialize if (whichflwa /= FLWA_INPUT) then - flwa(:,:,:) = default_flwa + do ns = 1, nsn + do ew = 1, ewn + flwa(:,ew,ns) = enhancement_factor(ew,ns) * default_flwa + enddo + enddo endif select case(whichflwa) @@ -2117,9 +2145,9 @@ subroutine glissade_flow_factor(whichflwa, whichtemp, & ! Calculate Glen's A (including flow enhancement factor) if (tempcor >= -10.d0) then - flwa(up,ew,ns) = enhancement_factor * arrfact(1) * exp(arrfact(3)/(tempcor + trpt)) + flwa(up,ew,ns) = enhancement_factor(ew,ns) * arrfact(1) * exp(arrfact(3)/(tempcor + trpt)) else - flwa(up,ew,ns) = enhancement_factor * arrfact(2) * exp(arrfact(4)/(tempcor + trpt)) + flwa(up,ew,ns) = enhancement_factor(ew,ns) * arrfact(2) * exp(arrfact(4)/(tempcor + trpt)) endif ! BDM added correction for a liquid water fraction @@ -2150,7 +2178,7 @@ subroutine glissade_flow_factor(whichflwa, whichtemp, & ! Calculate Glen's A with a fixed temperature (including flow enhancement factor) !! if (const_temp >= -10.d0) then - flwa(:,ew,ns) = enhancement_factor * arrfact(1) * exp(arrfact(3)/(const_temp + trpt)) + flwa(:,ew,ns) = enhancement_factor(ew,ns) * arrfact(1) * exp(arrfact(3)/(const_temp + trpt)) !! else !! flwa(:,ew,ns) = enhancement_factor * arrfact(2) * exp(arrfact(4)/(const_temp + trpt)) !! endif diff --git a/libglissade/glissade_velo_higher.F90 b/libglissade/glissade_velo_higher.F90 index 814b383a..828057a9 100644 --- a/libglissade/glissade_velo_higher.F90 +++ b/libglissade/glissade_velo_higher.F90 @@ -63,7 +63,7 @@ module glissade_velo_higher use glimmer_log use glimmer_sparse_type use glimmer_sparse - use glissade_grid_operators + use glissade_grid_operators use glissade_masks, only: glissade_get_masks use glide_types @@ -1505,24 +1505,19 @@ subroutine glissade_velo_higher_solve(model, & ! (requires that usrf is up to date in halo cells) ! ! Possible settings for whichgradient_margin: - ! HO_GRADIENT_MARGIN_ALL = 0 - ! HO_GRADIENT_MARGIN_GROUNDED_ICE = 1 - ! HO_GRADIENT_MARGIN_ICE_ONLY = 2 - ! HO_GRADIENT_MARGIN_ICE_OVER_LAND = 3 + ! HO_GRADIENT_MARGIN_LAND = 0 + ! HO_GRADIENT_MARGIN_HYBRID = 1 + ! HO_GRADIENT_MARGIN_MARINE = 2 ! ! gradient_margin = 0 computes gradients at all edges, even if one cell ! if ice-free. This is what Glide does, but is not appropriate if we have ice-covered ! floating cells lying above ice-free ocean cells, because the gradient is too big. - ! gradient_margin = 1 computes gradients at edges where a grounded ice-covered - ! cell lies above an ice-free cell (either land or ocean). - ! This used to be the default, but it overestimates driving stress for grounded marine - ! cliffs with a large lateral spreading force (in addition to the surface gradient). + ! gradient_margin_in = 1 computes gradients at edges with ice-covered cells + ! above ice-free land, but not above ice-free ocean. This setting is appropriate + ! for both land- and ocean-terminating boundaries. It is the default. ! gradient_margin_in = 2 computes gradients only at edges with ice-covered cells ! on each side. This is appropriate for problems with ice shelves, but is ! is less accurate than options 0 or 1 for land-based problems (e.g., Halfar SIA). - ! gradient_margin_in = 3 computes gradients at edges with ice-covered cells - ! above ice-free land, but not above ice-free ocean. This setting is appropriate - ! for both land- and ocean-terminating boundaries. It is the default. ! ! Passing in max_slope ensures that the surface elevation gradient on the edge ! between two cells does not exceed a prescribed value. @@ -1531,64 +1526,28 @@ subroutine glissade_velo_higher_solve(model, & ! slopes of up to ~0.3 between adjacent grid cells, leading to very large velocities ! even with a no-slip basal boundary condition. ! - ! Both the centered and upstream gradients are 2nd order accurate in space. - ! The upstream gradient may be preferable for evolution problems using - ! whichapprox = HO_APPROX_BP or HO_APPROX_SIA, because in these cases - ! the centered gradient fails to cancel checkerboard noise. - ! The L1L2 solver computes 3D velocities in a way that damps checkerboard noise, - ! so a centered difference should work well (and for the Halfar problem is more - ! accurate than upstream). + ! There are three options for whichgradient: + ! (0) centered + ! (1) first-order upstream + ! (2) second-order upstream. + ! Centered gradients are the default, but an upstream gradient may be preferred + ! to damp checkerboard noise. !------------------------------------------------------------------------------ !pw call t_startf('glissade_gradient') - if (whichgradient_margin == HO_GRADIENT_MARGIN_ICE_OVER_LAND) then - - ! newer option; compute edge gradients when active ice lies over land but not ice-free ocean - - call glissade_surface_elevation_gradient(nx, ny, & - dx, dy, & - active_ice_mask, & - land_mask, & - usrf, thck, & - topg, eus, & - thklim, & - thck_gradient_ramp, & - dusrf_dx, dusrf_dy, & - max_slope) - - else ! older option - - if (whichgradient == HO_GRADIENT_CENTERED) then ! 2nd order centered - - call glissade_centered_gradient(nx, ny, & - dx, dy, & - usrf, & - dusrf_dx, dusrf_dy, & - active_ice_mask, & - gradient_margin_in = whichgradient_margin, & - usrf = usrf, & - floating_mask = floating_mask,& - land_mask = land_mask, & - max_slope = max_slope) - - else ! 2nd order upstream - - call glissade_upstream_gradient(nx, ny, & - dx, dy, & - usrf, & - dusrf_dx, dusrf_dy, & - active_ice_mask, & - usrf, & - gradient_margin_in = whichgradient_margin, & - accuracy_flag_in = 2, & - floating_mask = floating_mask,& - land_mask = land_mask, & - max_slope = max_slope) - - endif ! whichgradient - - endif ! whichgradient_margin + call glissade_surface_elevation_gradient(nx, ny, & + dx, dy, & + active_ice_mask, & + land_mask, & + usrf, thck, & + topg, eus, & + thklim, & + thck_gradient_ramp, & + dusrf_dx, dusrf_dy, & + whichgradient, & + whichgradient_margin, & + max_slope = max_slope) !pw call t_stopf('glissade_gradient') @@ -5806,7 +5765,7 @@ subroutine compute_3d_velocity_L1L2(nx, ny, & nz, sigma, & dx, dy, & nhalo, & - ice_mask, floating_mask, & + ice_mask, land_mask, & active_cell, active_vertex, & umask_dirichlet, vmask_dirichlet, & xVertex, yVertex, & @@ -5841,7 +5800,7 @@ subroutine compute_3d_velocity_L1L2(nx, ny, & integer, dimension(nx,ny), intent(in) :: & ice_mask, & ! = 1 for cells where ice is present (thk > thklim), else = 0 - floating_mask ! = 1 for cells where ice is present and floating + land_mask ! = 1 for cells with topg >= eus, else = 0 logical, dimension(nx,ny), intent(in) :: & active_cell ! true if cell contains ice and borders a locally owned vertex @@ -6073,7 +6032,7 @@ subroutine compute_3d_velocity_L1L2(nx, ny, & ! We need dwork1_dx, dwork2_dx, dwork2_dy and dwork3_dx. ! The calls to glissade_centered_gradient compute a couple of extraneous derivatives, ! but these calls are simpler than inlining the gradient code. - ! Setting gradient_margin_in = HO_GRADIENT_MARGIN_ICE_ONLY uses only ice-covered cells to + ! Setting gradient_margin_in = HO_GRADIENT_MARGIN_MARINE uses only ice-covered cells to ! compute the gradient. This is the appropriate flag for these ! calls, because efvs and strain rates have no meaning in ice-free cells. @@ -6081,26 +6040,28 @@ subroutine compute_3d_velocity_L1L2(nx, ny, & work2(:,:) = efvs_integral_z_to_s(k,:,:) * (du_dy(:,:) + dv_dx(:,:)) work3(:,:) = efvs_integral_z_to_s(k,:,:) * (2.d0*dv_dy(:,:) + du_dx(:,:)) - call glissade_centered_gradient(nx, ny, & - dx, dy, & - work1, & - dwork1_dx, dwork1_dy, & - ice_mask, & - gradient_margin_in = HO_GRADIENT_MARGIN_ICE_ONLY) - - call glissade_centered_gradient(nx, ny, & - dx, dy, & - work2, & - dwork2_dx, dwork2_dy, & - ice_mask, & - gradient_margin_in = HO_GRADIENT_MARGIN_ICE_ONLY) - - call glissade_centered_gradient(nx, ny, & - dx, dy, & - work3, & - dwork3_dx, dwork3_dy, & - ice_mask, & - gradient_margin_in = HO_GRADIENT_MARGIN_ICE_ONLY) + ! With gradient_margin_in = 1, only ice-covered cells are included in the gradient. + ! This is the appropriate setting, since efvs and strain rates have no meaning in ice-free cells. + call glissade_gradient(nx, ny, & + dx, dy, & + work1, & + dwork1_dx, dwork1_dy, & + ice_mask, & + gradient_margin_in = 1) + + call glissade_gradient(nx, ny, & + dx, dy, & + work2, & + dwork2_dx, dwork2_dy, & + ice_mask, & + gradient_margin_in = 1) + + call glissade_gradient(nx, ny, & + dx, dy, & + work3, & + dwork3_dx, dwork3_dy, & + ice_mask, & + gradient_margin_in = 1) ! Loop over locally owned active vertices, evaluating tau_xz and tau_yz for this layer do j = staggered_jlo, staggered_jhi @@ -6165,7 +6126,7 @@ subroutine compute_3d_velocity_L1L2(nx, ny, & ! are ice-covered. ! At a land margin, either 0 or 1 is appropriate, but 2 is inaccurate. ! At a shelf margin, either 1 or 2 is appropriate, but 0 is inaccurate. - ! So HO_GRADIENT_MARGIN_ICE_LAND = 1 is the safest value. + ! So HO_GRADIENT_MARGIN_NYBRID = 1 is the safest value. if (edge_velocity) then @@ -6176,9 +6137,10 @@ subroutine compute_3d_velocity_L1L2(nx, ny, & dx, dy, & usrf, & dusrf_dx_edge, dusrf_dy_edge, & + ice_mask, & gradient_margin_in = whichgradient_margin, & - ice_mask = ice_mask, & - floating_mask = floating_mask, & + usrf = usrf, & + land_mask = land_mask, & max_slope = max_slope) endif diff --git a/libglissade/glissade_velo_sia.F90 b/libglissade/glissade_velo_sia.F90 index 82d330b3..720d6ad7 100644 --- a/libglissade/glissade_velo_sia.F90 +++ b/libglissade/glissade_velo_sia.F90 @@ -60,7 +60,7 @@ module glissade_velo_sia ! use glimmer_log, only: write_log use glide_types - use glissade_grid_operators, only: glissade_stagger, glissade_centered_gradient, & + use glissade_grid_operators, only: glissade_stagger, glissade_gradient, & glissade_gradient_at_edges use parallel @@ -173,7 +173,6 @@ subroutine glissade_velo_sia_solve(model, & integer, dimension(nx,ny) :: & ice_mask, & ! = 1 where ice is present, else = 0 - floating_mask, & ! = 1 for cells where ice is present and floating land_mask ! = 1 for land cells, else = 0 integer :: i, j, k @@ -240,15 +239,13 @@ subroutine glissade_velo_sia_solve(model, & !------------------------------------------------------------------------------ ! Compute masks: ! (1) ice_mask = 1 in cells where ice is present (thck > thklim), = 0 elsewhere - ! (2) floating_mask = 1 in cells where ice is present and floating - ! (3) land_mask = 1 in land cells + ! (2) land_mask = 1 in land cells !------------------------------------------------------------------------------ call glissade_get_masks(nx, ny, & thck, topg, & eus, thklim, & ice_mask, & - floating_mask = floating_mask, & land_mask = land_mask) !------------------------------------------------------------------------------ @@ -304,41 +301,35 @@ subroutine glissade_velo_sia_solve(model, & ! Compute surface elevation gradient ! - ! Here a centered gradient is OK because the interior velocities - ! are computed along cell edges, so checkerboard noise is damped - ! (unlike Glissade finite-element calculations). + ! Note: This is a standard second-order centered gradient. + ! For the higher-order velocity solvers, a centered gradient can lead + ! to checkerboard noise in the surface elevation field, because a checkerboard + ! pattern is invisible to the gradient operator. + ! For the SIA solver, however, checkerboard noise is damped, because + ! interior ice velocities are computed at cell edges rather than vertices. ! ! Possible settings for whichgradient_margin: - ! HO_GRADIENT_MARGIN_ALL = 0 - ! HO_GRADIENT_MARGIN_GROUNDED_ICE = 1 - ! HO_GRADIENT_MARGIN_ICE_ONLY = 2 - ! HO_GRADIENT_MARGIN_ICE_OVER_LAND = 3 + ! HO_GRADIENT_MARGIN_LAND = 0 + ! HO_GRADIENT_MARGIN_HYBRID = 1 + ! HO_GRADIENT_MARGIN_MARINE = 2 ! ! gradient_margin_in = 0 computes gradients at all edges, even if one cell ! if ice-free. This is what Glide does, but is not appropriate if we have ice-covered ! floating cells lying above ice-free ocean cells, because the gradient is too big. - ! It generally works well for land-based shallow-ice problems. - ! gradient_margin_in = 1 computes gradients at edges where a grounded ice-covered - ! cell lies above an ice-free cell (either land or ocean). + ! It generally is the best choice for land-based shallow-ice problems. + ! gradient_margin_in = 1 computes gradients at edges where an ice-covered cell + ! lies above ice-free land. It should give the same results as method 0 for + ! problems without marine ice (unless there are nunataks). ! gradient_margin_in = 2 computes gradients only at edges with ice-covered cells ! on each side. This is appropriate for problems with ice shelves, but is ! is less accurate than options 0 or 1 for land-based problems (e.g., Halfar SIA). - ! gradient_margin_in = 3 is not supported for the SIA solver, because it requires - ! that the lateral spreading force is computed at marine margins. - ! If option 3 is selected, the solver defaults to option 1 and writes a message - ! to the log file. - - !TODO - Pass in max_slope? - - call glissade_centered_gradient(nx, ny, & - dx, dy, & - usrf, & - dusrf_dx, dusrf_dy, & - ice_mask, & - gradient_margin_in = whichgradient_margin, & - usrf = usrf, & - floating_mask = floating_mask, & - land_mask = land_mask) + + call glissade_gradient(nx, ny, & + dx, dy, & + usrf, & + dusrf_dx, dusrf_dy, & + ice_mask, & + gradient_margin_in = whichgradient_margin) if (verbose .and. main_task) then print*, ' ' @@ -460,7 +451,6 @@ subroutine glissade_velo_sia_solve(model, & dusrf_dx, dusrf_dy, & stagflwa, & ice_mask, & - floating_mask, & land_mask, & whichgradient_margin, & ubas, vbas, & @@ -782,7 +772,6 @@ subroutine glissade_velo_sia_interior(nx, ny, nz, & dusrf_dx, dusrf_dy, & stagflwa, & ice_mask, & - floating_mask, & land_mask, & whichgradient_margin, & ubas, vbas, & @@ -819,7 +808,6 @@ subroutine glissade_velo_sia_interior(nx, ny, nz, & integer, dimension(nx,ny), intent(in) :: & ice_mask, & ! = 1 where ice is present, else = 0 - floating_mask, & ! = 1 for cells where ice is present and floating, else = 0 land_mask ! = 1 for land cells, else = 0 integer, intent(in) :: & @@ -918,15 +906,10 @@ subroutine glissade_velo_sia_interior(nx, ny, nz, & ! Compute ice velocity components at cell edges (u at E edge, v at N edge; relative to bed). ! Then interpolate the edge velocities to cell vertices. - ! Note: The higher-order default is whichgradient_margin = HO_GRADIENT_MARGIN_ICE_OVER_LAND = 3, + ! Note: The higher-order default is whichgradient_margin = HO_GRADIENT_MARGIN_HYBRID = 1, ! which is appropriate for HO problems where we compute lateral spreading at ice cliffs. - ! The SIA solver does not do this, so if option 3 is chosen, the solver will - ! default to whichgradient_margin = HO_GRADIENT_MARGIN_GROUNDED_ICE = 1, which - ! works well for shallow-ice problems. Using HO_GRADIENT_MARGIN_ALL = 0 gives - ! the same results as option 1 for land-based problems. Using HO_GRADIENT_MARGIN_ICE_ONLY = 2 - ! is designed for marine-based margins and is likely to give less accurate results here. - ! See comments above the call to glissade_centered_gradient. - !TODO - Pass in max_slope? + ! The SIA solver does not do this, so this option is a bad idea for SIA problems + ! with marine ice. (In fact, the SIA is generally ill-suited for problems with marine ice.) call glissade_gradient_at_edges(nx, ny, & dx, dy, & @@ -935,7 +918,6 @@ subroutine glissade_velo_sia_interior(nx, ny, nz, & ice_mask, & gradient_margin_in = whichgradient_margin, & usrf = usrf, & - floating_mask = floating_mask, & land_mask = land_mask) do k = nz-1, 1, -1 diff --git a/tests/MISMIP/mismip.code/README.mismip b/tests/MISMIP/mismip.code/README.mismip new file mode 100644 index 00000000..e837fa4b --- /dev/null +++ b/tests/MISMIP/mismip.code/README.mismip @@ -0,0 +1,215 @@ +Instructions for setting up and running the MISMIP experiments with CISM. + +Note: These instructions assume that you have access to the NCAR HPC Cheyenne, +with an LIWG HPC account. If you do not have an account and would like one, +please contact Gunter Leguy (gunterl@ucar.edu). + +See this paper for details on MISMIP: +Pattyn,F., et al., Results of the Marine Ice Sheet Model Intercomparison Project, MISMIP, +The Cryosphere, 6, 573-588, doi:10.5194/tc-6-573-2012, 2012. + +The MISMIP experiments consist of a set of advance and retreat experiments of the +grounding line. This is achieved by modifying the flow rate factor A. +A simulation with a new A value begins using the steady-state solution profile from +the old A value and so on. + +The script used to run the experiment uses the naming convention "mismip_Aval.ext" +where "Aval" is the value of A used in a given experiment and "ext" is the filename +extension such as "config" (for a config file), "out.nc" for the output file or +"restart.nc" for the restart file. + +The experiment can be set to run on either of two kinds of bedrock topography: +1/ a linear bed sloping downward toward the ocean. +2/ a polynomial bed which begins and ends by sloping downward towards the ocean but + include an in-between reversed sloping region meant to mimic an unstable region. + + +The files needed to run the experiment are located in directory ../tests/MISMIP/mismip.code. +If you plan to run more than one test suite, you will likely want to keep this directory clean. +Instead, create a new directory and make a fresh copy of the code for each test. +For example, from ../tests/MISMIP: + +> cp -rf mismip3d.code mismip.test1 +> cp -rf mismip3d.code mismip.test2 +etc. + +Go to the directory where you are setting up and running the experiments: + +> cd mismip.test1 + +This directory should contain several Python scripts, along with a template config file +(mismip.config.template) with default settings for MISMIP and a template run file +(runCISM.cheyenne.template) with setups to run MISMIP on Cheyenne. + +Put a copy or link of cism_driver in your directory. For example: + +> ln -s ../../../builds/mac-gnu/cism_driver/cism_driver cism_driver + +To create the grid topography and set the appropriate config options for each test, run the Python setup script: + +> python mismipSetup.py [arguments] + +This script has the following optional arguments: + -c FILE, --config=FILE + config file template [default: mismip.config.template] + -e EXECUTABLE, --exec=EXECUTABLE + Path to the CISM executable [default: cism_driver] + -x EXPT, --expt=EXPT + MISMIP experiment(s) to set up [default: all] + -t TSTEP, --tstep=TSTEP + time step (yr) [default: 1] + -r RES, --res=RES + horizontal grid resolution (m) [default: 2000] + -v VLEVEL, --vlevel=VLEVEL + no. of vertical levels [default: 3] + -a APPROXIMATION, --approx=APPROXIMATION + Stokes approximation (SSA, DIVA, BP) [default: DIVA] + -b BASALFRICTION, --basal=BASALFRICTION + Basal friction law (powerlaw, Schoof, Tsai) [default: powerlaw] + -y YEARSPINUP, --year=YEARSPINUP + Length of spinup run (yr) [default: 20000] + --bed=BEDTOPO + bed topography, linear or poly [default: linear] + --yrun=YEARSRUN + run length between 2 experiments [default: 20000] + --hinit=INITTHICK + experiment initial profile [default: slab] + + +Notes on optional arguments: +- If setting up all the experiments at once, the -x argument is not needed. +- The following resolutions (m) are supported: 8000, 4000, 2000, 1000, 500, 250. +- The perturbation experiments (e.g., P75S) might require a shorter time step + than the Stnd experiment. For instance, experiment Stnd using DIVA at 2 km is typically stable with dt = 1.0 yr. + With identical config settings, P75S is unstable and requires a shorter time step, e.g. dt = 0.5 yr. + If so, you might need to manually reduce dt in the config file for a given experiment + (assuming you don't want to repeat experiment Stnd). +- Three Stokes approximations are supported for MISMIP3d: SSA, DIVA and BP. +- Three basal BCs are supported: + (1) a Weertman-type power law: 'powerlaw' + (2) a modified power law based on Tsai (2015): 'Tsai' + (3) a modified power law based on Schoof (2005) and Leguy et al. (2014): 'Schoof' +- 2 types of bed topography, linear and polynomial. +- 2 possible initial thickness profiles: + (1) a uniform 100m thick ice slab (slab) + (2) a profile using the semi-analytical solution form Schoof2007 with an initial grounding line guess + at 900. km. Note: this option requires a smaller initial time step to avoid CFL violation. + +The config template file includes sensible default values for a number of parameters associated with +the Stokes approximation, the basal friction law and the MISMIP3d basal friction perturbation. +To change any of these parameters, you need to edit the template file. + +If you set up all the experiments, you should have the following subdirectories after running +the setup script: +bedtype + +where bedtype = "linear" or "poly". +Within the bedtype folder you should have 2 folders: "advance" and "retreat". +Within each of these folders you should have one folder for each A-value. + +After setup, you can run all the experiments by using the Python run script: + +> python mismipRun.py + +Here are the optional arguments for the run script: + -e EXECUTABLE, --exec=EXECUTABLE + Path to the CISM executable [default: ./cism_driver] + -x EXPT, --expt=EXPT + MISMIP experiment(s) to run [default: all] + -n NUMPROCS, --parallel=NUMPROCS + Number of processors: if specified then run in parallel + -s STATUS, --stat=STATUS + Experiment status set to run [default: advance] + --bed=BEDTOPO + bed topography, linear or poly [default: linear] + +Notes on optional arguments: +- The option "-s" is only meant to be used when a single A-value is specified with the "-x" option. + +By default, this script will run all experiments at once. You can also run part of the experiment +at a time by specifying its set (advance, retreat) or a single A value: + +> python mismipRun.py -x advance + +or + +> python mismipRun.py -x 14.638e-18 + +Experiments run at coarse resolutions (2 km or coarser) can be run on a single processor +on your personal computer. You can also run all the experiments on several processors using: + +> python mismipRun.py -n 4 + +It can take a long time to run some experiment when running with a resolution finer than 2 km. +If for some reason you need to interrupt your run, you can restart it at a later time. +If the restart of a given experiment is present in the directory, the script mismipRun.py +will automatically restart from that restart file. +For example, continuing the advance set of experiments on 4 processors: + +> python mismipRun.py -x advance -n 4 + +Note that before restarting your run you might want to save a copy of the log file of the +A-value experiment 'mismip_Aval.config.log' under a different name as it will be overwritten +on restart. The output file, mismip_Aval.out.nc, will be appended on restart and does not +need to be copied. Continue as needed until you have completed the experiment. + +Alternatively, you may want to submit a batch job. If so, do the following: +1/ Change to the subdirectory of the A-value you want to run. +2/ Copy the script runCISM.cheyenne.template into the A-value directory. +3/ Modify the script: + a/ Adjust the number of nodes and processors. + b/ Adjust the time you'd like to run on HPC. + c/ Modify the name of the config file to reflect the one in your current directory. +4/ launch the job from your subdirectory by typing at the command line: + > qsub runCISM.cheyenne.template + + +Before proceeding, you may want to confirm that the GL has reached a steady state. +One way to do this is to list f_ground (the grounded fraction, 0 < f_ground < 1) along the x +direction at the center of the domain. The value of f_ground in the last (partially) +grounded cell should be stable within some small tolerance, e.g. ~10^{-3}. + +The file mismip_Aval.restart.nc in the AnewVal directory will become the input file +for the AnewVal experiment. This file needs to be edited so that CISM's internal_time coordinate +for the final time slice is 0. This action is done automatically when running interactively +with the script mismipRun.py. +If you are not running interactively and submitting jobs, you will need to edit it yourself. +Suppose the final time is 20000 years. You can set internal_time = 0 with a one-line NCO command: + +> ncap2 -s 'internal_time=internal_time-20000' mismip+Spinup.restart.nc out.nc + +You may have to precede this command with 'module load nco' if NCO is not loaded automatically. + +To verify that the time correction worked, you can do 'ncdump -v internal_time mismip_Aval.restart.nc'. +The final internal_time value should be 0, with negative values for earlier times. + +Then you can overwrite the original restart file: + +> cp out.nc mismip_Aval.restart.nc + +Once these experiments have run, the next step is to extract the grounding line i(GL) position. +This can be done by extracting information from the CISM output files by running another Python script: + +> python mismipWriteGL.py + +Note: you need to use the option "--bed poly" at the command line in order to extract the grounding +line position with a run performed using the polynomial bed. + +The default is to generate a netCDF file containing time dependent arrays for all the experiments among which +xGL and yGL (the location of the grounding, note that yGL is constant for MISMIP). To see a full list +of available arrays, simply type "ncdump -h Aval_cism.nc" at the command line. +you can make a GL file for the set of advance or retreat experiment by adding an optional argument: + +> python mismipWriteGL.py -x advance + +The resulting GL files in the various subdirectories are called [Aval]_cism.nc, +where [Aval] is he numerical A value. + +Finally, you can create the summary grounding line plots in a file called mismipPlotGL.pdf: + +> python mismipPlotGL.py + +The plot shows the grounding line position at the end of each stage of the experiment. +In order to plot the results using the polynomial bed topography, run: + +> python mismipPlotGL.py --bed poly diff --git a/tests/MISMIP/mismip.code/mismip.config.template b/tests/MISMIP/mismip.code/mismip.config.template new file mode 100755 index 00000000..c37dfbf6 --- /dev/null +++ b/tests/MISMIP/mismip.code/mismip.config.template @@ -0,0 +1,78 @@ +[MISMIP] + +[grid] +upn = 3 +ewn = 1000 +nsn = 5 +dew = 2000 +dns = 2000 + +[time] +tstart = 0. +tend = 20000. +dt = 1. +adaptive_cfl_threshold = 0.5 + +[options] +dycore = 2 # 0 = glide, 1 = glam, 2=glissade +evolution = 3 # 3 = remapping, 4 = first order upwind +flow_law = 0 # 0 = constant, 2 = Paterson-Budd +temperature = 0 # 1 = prognostic, 3 = enthalpy +bmlt_float = 0 # 0 = no melting of floating ice +marine_margin = 4 # 4 = topographic threshold, 5 = calving mask +restart_extend_velo = 1 # 1 = write uvel/vvel on extended grid +# restart = 0 + +[ho_options] +which_ho_babc = 9 # 4 = no-slip at bed, 10 = Coulomb friction law, 11 = Coulomb w/const basal flwa +which_ho_effecpress = 3 # 3 = ocean connection +which_ho_flotation_function = 0 # 0 = fpattyn, 1 = 1/fpattyn, 2 = ocean cavity thickness +which_ho_efvs = 2 # 0 = constant, 2 = nonlinear eff. visc. w/ n=3 +which_ho_sparse = 3 # 1 = SLAP GMRES, 3 = Fortran PCG, 4 = Trilinos for linear solver +which_ho_nonlinear = 0 # 0 = Picard, 1 = JFNK +which_ho_precond = 1 # 1 = diagonal precond, 2 = shallow ice preconditioner +which_ho_approx = 1 # 1 = SSA, 2 = Blatter-Pattyn, 3 = L1L2, 4 = DIVA +which_ho_gradient_margin = 2 # 2 = marine BC +which_ho_gradient = 0 # 0 = centered, 1 = upstream +which_ho_assemble_beta = 1 # 0 = standard FE, 1 = local +which_ho_assemble_taud = 1 # 0 = standard FE, 1 = local +which_ho_ground = 1 # 0 = no GLP, 1 = GLP +which_ho_resid = 4 # 3 = absolute, 4 = relative +glissade_maxiter = 50 + +[parameters] +rhoi = 900.0 # MISMIP ice density +rhoo = 1000.0 # MISMIP ocean density +grav = 9.80 # MISMIP gravitational acceleration +flow_factor = 1. +ice_limit = 1. ; min thickness (m) for dynamics +default_flwa = 1.0e-17 +marine_limit = -1250 ; depth past which ice is lost +powerlaw_c = 2.4131e4 +powerlaw_m = 3 +coulomb_c = 0.1778 +coulomb_bump_max_slope = 0.5 ; maximum bed obstacle slope +coulomb_bump_wavelength = 2.0 +flwa_basal = 1.0e-16 +p_ocean_penetration = 0 ; p values for coulomb friction law + + +[CF default] +comment = created with mismipInit.py +title = MISMIP experiment using CISM2 + +[CF input] +name = mismipInit.restart.nc +time = 1 + +[CF output] +#variables = restart effec_press beta resid_u resid_v f_ground f_flotation btractx btracty taudx taudy tau_xx +variables = thk usurf uvel vvel temp beta_internal topg flwa effec_press beta resid_u resid_v f_ground f_flotation floating_mask grounded_mask usfc vsfc ubas vbas uvel_mean vvel_mean stagthk ivol imass_above_flotation iareag btractx btracty taudx taudy tau_xx +frequency = 500 +name = mismip.out.nc + +[CF restart] +variables = restart +xtype = double +frequency = 1000 +name = mismip.restart.nc diff --git a/tests/MISMIP/mismip.code/mismipPlotGL.py b/tests/MISMIP/mismip.code/mismipPlotGL.py new file mode 100755 index 00000000..90bb1c36 --- /dev/null +++ b/tests/MISMIP/mismip.code/mismipPlotGL.py @@ -0,0 +1,373 @@ +#!/usr/bin/env python + +# This script plots the grounding line position for the MISMIP experiments. +# This script requires the user to have run the python script "mismipWriteGL.py". + + +from netCDF4 import Dataset +from optparse import OptionParser +import numpy as np +import matplotlib.pyplot as plt +import sys, os + + + +############################### +# Constants used in this code # +############################### + +model = '_cism' # file naming extension +sPerY = 365.0*24.*3600. # number of second in a year + +#### Linear bed specific #### + +# A-values used in the linear bed experiment. +AsLinear = ['14.638e-17','6.7941e-17','3.1536e-17', + '14.638e-18','6.7941e-18','3.1536e-18', + '14.638e-19','6.7941e-19','3.1536e-19', + '6.7941e-19','14.638e-19', + '3.1536e-18','6.7941e-18','14.638e-18', + '3.1536e-17','6.7941e-17','14.638e-17'] + +# A-values used in the linear bed and advance experiment. +AsAdvanceLinear = ['14.638e-17','6.7941e-17','3.1536e-17', + '14.638e-18','6.7941e-18','3.1536e-18', + '14.638e-19','6.7941e-19','3.1536e-19'] + +# A-values used in the linear bed and retreat experiment. +AsRetreatLinear = list(reversed(AsAdvanceLinear[0:-1])) + + +# Status of the linear bed experiment. +AstatusLinear = ['advance','advance','advance', + 'advance','advance','advance', + 'advance','advance','advance', + 'retreat','retreat', + 'retreat','retreat','retreat', + 'retreat','retreat','retreat'] + +# Status of the linear bed and advance experiment. +AstatusAdvanceLinear = ['advance','advance','advance', + 'advance','advance','advance', + 'advance','advance','advance'] + +# Status of the linear bed and retreat experiment. +AstatusRetreatLinear = ['retreat','retreat', + 'retreat','retreat','retreat', + 'retreat','retreat','retreat'] + + + +#### Poly bed specific #### + + +# A-values used in the poly bed experiment. +AsPoly = ['9.4608e-18','7.8840e-18','6.3072e-18', + '4.7304e-18','3.1536e-18','1.5768e-18', + '7.8840e-19', + '1.5768e-18','3.1536e-18','4.7304e-18', + '6.3072e-18','7.8840e-18','9.4608e-18'] + +# A-values used in the poly bed and advance experiment. +AsAdvancePoly = ['9.4608e-18','7.8840e-18','6.3072e-18', + '4.7304e-18','3.1536e-18','1.5768e-18', + '7.8840e-19'] + +# A-values used in the poly bed and retreat experiment. +AsRetreatPoly = list(reversed(AsAdvancePoly[0:-1])) + + +# Status of the poly bed experiment. +AstatusPoly = ['advance','advance','advance', + 'advance','advance','advance', + 'advance', + 'retreat','retreat','retreat', + 'retreat','retreat','retreat'] + +# Status of the poly bed and advance experiment. +AstatusAdvancePoly = ['advance','advance','advance', + 'advance','advance','advance', + 'advance'] + +# Status of the poly bed and retreat experiment. +AstatusRetreatPoly = ['retreat','retreat','retreat', + 'retreat','retreat','retreat'] + + + +#################################### +# Function used later in the code # +#################################### + +# The following function returns the linear bed topography as in Pattyn et al. (2012). +def computeBedLinear(x): + # Input expected in km. + + schoofx = 750. # scaling factor in km + slope = -778.5 # m + b0 = 720. # m + + eps_b = 1e-10 + abs_x = np.sqrt(x**2 + eps_b**2) # km (smoothing for ice divide BC requirements) + xprime = abs_x/schoofx # unitless + b = b0 + slope*xprime # m + bx = slope/schoofx*x/abs_x # m/km + + return (b,bx) + +# The following function returns the polynomial bed topography as in Pattyn et al. (2012). +def computeBedPoly(x): + # Input expected in km. + + schoofx = 750. # scaling factor in km + x = x/schoofx # unitless + + b0 = 729. # m + b2 = -2184.8 # m + b4 = 1031.72 # m + b6 = -151.72 # m + + b = b0 + b2*x**2 + b4*x**4 + b6*x**6 # m + bx = (2*b2*x + 4*b4*x**3 + 6*b6*x**5)/schoofx # m/km + + return (b,bx) + + +# The following function returns the bed topography as in Pattyn et al. (2012) based +# on the choice between 'linear' and 'polynomial' +def computeBed(x, bedType): + # Input expected in km. + + if bedType == 'linear': + (b,bx) = computeBedLinear(x) + elif bedType == 'poly': + (b,bx) = computeBedPoly(x) + else: + sys.exit('Please specify bed topography from these options: linear or poly.') + + return (b,bx) + + +# The following function returns the semi-analytic solution from Schoof2007 model A. +# Note: the units used in Schoof2007 are not the same as the one in CISM. +def xgSemianalytic(bedType): + + sPerY = 365.0*24.*3600. # number of seconds per year + n = 3. + rhoi = 900. # kg/m^3 + rhow = 1000. # kg/m^3 + g = 9.8 # m/s^2 + delta = 1 - rhoi/rhow # unitless + a = 0.3/sPerY # converting accumulation to m/s + C = 7.624e6 # Pa(s/m)^(1/3) + + if bedType == 'linear': + xx = np.linspace(1.e6,1.8e6,400) + else: + xx = np.linspace(0.7e6,1.5e6,400) + + + AmodelA = np.zeros(len(xx)) + for k in range(0,len(xx)): + x = xx[k] + + (b,bx) = computeBed(x*1.e-3, bedType) + bx = bx*1.e-3 # needs to be in m/m for unit compliance with schoof2007 + + # Schoof model A as A = f(x). + h = -b/(1-delta) + q = x*a + u = q/h + + num = -a + u*(-bx-C/(rhoi*g*h)*np.abs(q)**(1./n-1)*q/h**(1./n)) + den = (rhoi*g*delta/4)**n*h**(n+1) + AmodelA[k] = -num/den + + return (AmodelA,xx) + + + + +######## +# Code # +######## + + +# Parse options. +optparser = OptionParser() + +optparser.add_option('-x', '--expt', dest='experiment', type='string', default = 'all', help='MISMIP experiment set to run', metavar="EXPT") +optparser.add_option('-s', '--stat', dest='StatChoice',type='string',default='advance', help='MISMIP experiment set to run', metavar="EXPT") +optparser.add_option('--bed', dest='bedtopo', type='string', default ='linear',help='bed topography, linear or poly', metavar='BEDTOPO') + +for option in optparser.option_list: + if option.default != ("NO", "DEFAULT"): + option.help += (" " if option.help else "") + "[default: %default]" +options, args = optparser.parse_args() + + +if options.bedtopo == 'linear': + As = AsLinear + AsAdvance = AsAdvanceLinear + AsRetreat = AsRetreatLinear + Astatus = AstatusLinear + AstatusAdvance = AstatusAdvanceLinear + AstatusRetreat = AstatusRetreatLinear +elif options.bedtopo == 'poly': + As = AsPoly + AsAdvance = AsAdvancePoly + AsRetreat = AsRetreatPoly + Astatus = AstatusPoly + AstatusAdvance = AstatusAdvancePoly + AstatusRetreat = AstatusRetreatPoly +else: + sys.exit('Please specify bed type from this list: linear, poly') + + +if options.experiment == 'all': + experiments = As + Astat = Astatus + print 'Plotting all the MISMIP experiments' +elif options.experiment == 'advance': + experiments = AsAdvance + Astat = AstatusAdvance + print 'Plotting advance experiments' +elif options.experiment == 'retreat': + experiments = AsRetreat + Astat = AstatusRetreat + print 'Plotting retreat experiments' +else: + sys.exit('Please specify experiment(s) from this list: all, advance, retreat') + + + +# Loop through A values. +count = -1 +Aval = np.zeros(len(experiments)) # initialize A-values storing array +xgval = np.zeros(len(experiments)) # initialize GL storing array +bedType = options.bedtopo # type of bed topography + +for expt in experiments: + count = count + 1 + + Aval[count] = float(expt) + stat = Astat[count] + + # Change to bed type directory. + os.chdir(bedType) + + # Change to Advance or Retreat directory. + os.chdir(stat) + + # Change to the subdirectory for this experiment. + os.chdir(expt) + + # Read the file and extract grounding line position. + try: + file = expt + model + '.nc' + ncid = Dataset(file, 'r') + xgval[count] = ncid.variables["xGL"][-1][-1] + ncid.close() + except: + print 'Results for experiment',stat,'and',expt,'is not available' + + + # Switch back to original directory. + os.chdir('../../../') + + +# Obtain the semi-analytic solution from Schoof 2007. +# Note: we compute invAnal (1/A) because it will be used to display the results. +# It has a better esthetic this way. +(Aanal, xanal) = xgSemianalytic(bedType) +invAanal = 1./Aanal # computing 1/A of semi-analytic solution + +# Compute 1/A of values used for simulation and converting to the +# same units as in PAttyn et al. 2012. +invA = 1./(Aval/sPerY) +invAadv = invA[0:len(AsAdvance)] # 1/A used in advance experiments +invAret = list(reversed(invAadv)) # 1/A used in retreat experiments +xgadv = xgval[0:len(AsAdvance)] # GL of advanced experiments +xgret = xgval[len(AsAdvance)-1:] # GL of retreat experiments + +# Adjust plot properties depending on the bed type. +if bedType == 'linear': + ymin = 1000.0 + ymax = 1800.0 + nytick = 9 +else: + ymin = 700.0 + ymax = 1500.0 + nytick = 9 + + +# Plot the figure displaying the grounding line location. +# Note: the figure will display one subplot for the advance experiments +# and another one for the retreat experiments. + +# Set figure size. +plt.figure(figsize=(7, 7)) + +# Advance experiment display. +plt.subplot(121) + +# Plot semi-analytical solution from Schoof 2007. +plt.semilogx(invAanal, xanal*1.e-3, color='black', label='analytic',linewidth=1) + +# Plot simulation results. +plt.semilogx(invAadv,xgadv*1.e-3, '+', ms=10, mfc='red',mec='red',label='simulation advance') + +# Turn on logarythmic grid display. +plt.grid(True,which="both",ls="-") + +# Set limit display for x and y axis. +plt.xlim((invAadv[0], invAadv[-1])) +plt.ylim((ymin,ymax)) + +# Add labels for x and y axis and their position and font size. +plt.xlabel("1/A (Pa$^3$s)", position=(1,0), size=12) +plt.ylabel("xg(km)", size=12) + +# Set the legend location to upper left. +plt.legend(loc=2) + + +# Retreat experiment display. +plt.subplot(122) + +# Plot semi-analytical solution from Schoof 2007 with reversed x-axis. +plt.semilogx(invAanal, xanal*1.e-3, color='black', label='analytic',linewidth=1) +plt.gca().invert_xaxis() + +# Plot simulation results with reversed x-axis. +plt.semilogx(invAret,xgret*1.e-3, '+', ms=10, mfc='blue',mec='blue',label='simulation retreat') +plt.gca().invert_xaxis() + +# Turn on logarithmic grid display. +plt.grid(True,which="both",ls="-") + +# Set limit display for x and y axis. +plt.xlim((invAret[0], invAret[-1])) +plt.ylim((ymin,ymax)) + +# Hack to display the gridding of y-axis using display from subplot 1. +plt.yticks(np.linspace(ymin,ymax,nytick)," ") + +# Set the legend location to upper right. +plt.legend(loc=1) + +# Suppress the vertical white spacing between the 2 subplots. +# This is how they look attached to one another. +plt.subplots_adjust(wspace=0) + +# Add a title to the figure based on its location on subplot 2. +plt.title('Grounding line position', position=(0,1)) + +# Save the figure. +if bedType == 'linear': + plt.savefig("mismipPlotGLLinearBed.pdf") +elif bedType == 'poly': + plt.savefig("mismipPlotGLPolyBed.pdf") +else: + print('Saving the figure with a randome name') + plt.savefig("mismipPlotGLRandomBed.pdf") diff --git a/tests/MISMIP/mismip.code/mismipRun.py b/tests/MISMIP/mismip.code/mismipRun.py new file mode 100644 index 00000000..6fdf96fb --- /dev/null +++ b/tests/MISMIP/mismip.code/mismipRun.py @@ -0,0 +1,318 @@ +#!/usr/bin/env python + +# This script runs the MISMIP (1d) experiment. +# See this paper for details: +# Pattyn et al., Results of the Marine Ice Sheet Model Intercomparison Project, MISMIP, +# The Cryosphere, 6, 573-588, 2012, doi:10.5194/tc-6-573-2012. + + +import sys, os +import shutil +import fileinput +import numpy as np + +from optparse import OptionParser +from netCDF4 import Dataset +from ConfigParser import ConfigParser + + + +############################### +# Constants used in this code # +############################### + + +#### Linear bed specific #### + + +# A-values used in the linear bed experiment. +AsLinear = ['14.638e-17','6.7941e-17','3.1536e-17', + '14.638e-18','6.7941e-18','3.1536e-18', + '14.638e-19','6.7941e-19','3.1536e-19', + '6.7941e-19','14.638e-19', + '3.1536e-18','6.7941e-18','14.638e-18', + '3.1536e-17','6.7941e-17','14.638e-17'] + +# A-values used in the linear bed and advance experiment. +AsAdvanceLinear = ['14.638e-17','6.7941e-17','3.1536e-17', + '14.638e-18','6.7941e-18','3.1536e-18', + '14.638e-19','6.7941e-19','3.1536e-19'] + +# A-values used in the linear bed and retreat experiment. +AsRetreatLinear = list(reversed(AsAdvanceLinear[0:-1])) + + +# Status of the linear bed experiment. +AstatusLinear = ['advance','advance','advance', + 'advance','advance','advance', + 'advance','advance','advance', + 'retreat','retreat', + 'retreat','retreat','retreat', + 'retreat','retreat','retreat'] + +# Status of the linear bed and advance experiment. +AstatusAdvanceLinear = ['advance','advance','advance', + 'advance','advance','advance', + 'advance','advance','advance'] + +# Status of the linear bed and retreat experiment. +AstatusRetreatLinear = ['retreat','retreat', + 'retreat','retreat','retreat', + 'retreat','retreat','retreat'] + + + +#### Poly bed specific #### + + +# A-values used in the poly bed experiment. +AsPoly = ['9.4608e-18','7.8840e-18','6.3072e-18', + '4.7304e-18','3.1536e-18','1.5768e-18', + '7.8840e-19', + '1.5768e-18','3.1536e-18','4.7304e-18', + '6.3072e-18','7.8840e-18','9.4608e-18'] + +# A-values used in the poly bed and advance experiment. +AsAdvancePoly = ['9.4608e-18','7.8840e-18','6.3072e-18', + '4.7304e-18','3.1536e-18','1.5768e-18', + '7.8840e-19'] + +# A-values used in the poly bed and retreat experiment. +AsRetreatPoly = list(reversed(AsAdvancePoly[0:-1])) + +# Status of the poly bed experiment. +AstatusPoly = ['advance','advance','advance', + 'advance','advance','advance', + 'advance', + 'retreat','retreat','retreat', + 'retreat','retreat','retreat'] + +# Status of the poly bed and advance experiment. +AstatusAdvancePoly = ['advance','advance','advance', + 'advance','advance','advance', + 'advance'] + +# Status of the poly bed and retreat experiment. +AstatusRetreatPoly = ['retreat','retreat','retreat', + 'retreat','retreat','retreat'] + + + + + +############################### +# Functions used in this code # +############################### + + +def launchCism(executable, configfile, parallel): + # Run CISM (only if no job script is being created and/or run). + print 'parallel =', parallel + + if parallel == None: + # Perform a serial run. + os.system(executable + ' ' + configfile) + else: + # Perform a parallel run. + if parallel <= 0: + sys.exit( 'Error: Number of processors specified for parallel run is <=0.' ) + else: + # These calls to os.system will return the exit status: 0 for success (the command exists), some other integer for failure. + if os.system('which openmpirun > /dev/null') == 0: + mpiexec = 'openmpirun -np ' + str(parallel) + elif os.system('which mpirun > /dev/null') == 0: + mpiexec = 'mpirun -np ' + str(parallel) + elif os.system('which aprun > /dev/null') == 0: + mpiexec = 'aprun -n ' + str(parallel) + elif os.system('which mpirun.lsf > /dev/null') == 0: + # mpirun.lsf does NOT need the number of processors (options.parallel). + mpiexec = 'mpirun.lsf' + else: + sys.exit('Unable to execute parallel run. Please edit the script to use your MPI run command, or run manually with something like: mpirun -np 4 ./cism_driver mismip3dInit.config') + + runstring = mpiexec + ' ' + executable + ' ' + configfile + print 'Executing parallel run with: ' + runstring + '\n\n' + + # Here is where the parallel run is actually executed! + os.system(runstring) + + + + +######## +# code # +######## + +# Parse options. +optparser = OptionParser() + +optparser.add_option('-e', '--exec', dest='executable',type = 'string',default='./cism_driver', help='Path to the CISM executable') +optparser.add_option('-n', '--parallel',dest='parallel', type='int',help='Number of processors: if specified then run in parallel', metavar="NUMPROCS") +optparser.add_option('-x', '--expt', dest='experiment',type='string',default='all', help='MISMIP experiment set to run', metavar="EXPT") +optparser.add_option('-s', '--stat', dest='StatChoice',type='string',default='advance', help='Experiment status set to run', metavar='STATUS') +optparser.add_option('--bed', dest='bedtopo', type='string', default ='linear',help='bed topography, linear or poly', metavar='BEDTOPO') + + +for option in optparser.option_list: + if option.default != ("NO", "DEFAULT"): + option.help += (" " if option.help else "") + "[default: %default]" +options, args = optparser.parse_args() + + +if options.bedtopo == 'linear': + As = AsLinear + AsAdvance = AsAdvanceLinear + AsRetreat = AsRetreatLinear + Astatus = AstatusLinear + AstatusAdvance = AstatusAdvanceLinear + AstatusRetreat = AstatusRetreatLinear +elif options.bedtopo == 'poly': + As = AsPoly + AsAdvance = AsAdvancePoly + AsRetreat = AsRetreatPoly + Astatus = AstatusPoly + AstatusAdvance = AstatusAdvancePoly + AstatusRetreat = AstatusRetreatPoly +else: + sys.exit('Please specify bed type from this list: linear, poly.') + + +if options.experiment == 'all': + experiments = As + Astat = Astatus + print 'Running all the MISMIP experiments' +elif options.experiment == 'advance': + experiments = AsAdvance + Astat = AstatusAdvance + print 'Running advance experiments' +elif options.experiment == 'retreat': + experiments = AsRetreat + Astat = AstatusRetreat + print 'Running retreat experiments' +elif options.experiment in As: + # In this case there might be 2 possibilities, advance or retreat. + experiments = [options.experiment] + if options.StatChoice == 'retreat': + Astat = ['retreat'] + else: + Astat = ['advance'] + + print 'Running experiment ', options.experiment +else: + sys.exit('Please specify experiment(s) from this list: all, advance, retreat or a single value from Pattyn et al.2012.') + + + +# loop through A values. +bedType = options.bedtopo +countStat = -1 # counter to access status matrix. Taking into account zero-arrays indexing + +for expt in experiments: + countStat = countStat+1 + stat = Astat[countStat] + + # Change to bed type directory. + os.chdir(bedType) + + # Change to advance or retreat directory. + os.chdir(stat) + + # Change to A value directory. + os.chdir(expt) + print 'changed directory to ', stat+'/'+expt + + # Name of the restart pointer file. + restartPointer = 'mismip_' + expt + '.pointer' + + # Read information from config file. + configfile = 'mismip_' + expt + '.config' + config = ConfigParser() + config.read(configfile) + + inputFile = config.get('CF input', 'name') + outputFile = config.get('CF output', 'name') + outputFreq = config.get('CF output', 'frequency') + endTime = config.get('time', 'tend') + endTime = float(endTime) + + # Time buffer to remedy the restart pb when switching to different time step within a run. + buffer = float(outputFreq) - 1. + + # Read output file content information. + lastTimeEntry = 0 + lastEntryInternal = 0 + sizeTimeOutput = 0 + if os.path.exists(outputFile): + outputData = Dataset(outputFile,'r') + if outputData['time'].size != 0: + sizeTimeOutput = outputData['time'].size + lastTimeEntry = outputData['time'][-1] + + outputData.close() + + + # Take action based on the stage of the experimental run. + if (lastTimeEntry >= (endTime - buffer)) and (sizeTimeOutput > 1): + # The run for this A value is done, moving to the next one. + pass + elif (lastTimeEntry < endTime) and (sizeTimeOutput > 1): + # The run for this A value is not done and needs to continue. + print 'Continuing experiment from restart.' + + # Make sure restart is set to 1 in config file. + config.set('options', 'restart', 1) + + # Write to config file. + with open(configfile, 'w') as newconfigfile: + config.write(newconfigfile) + + # launch CISM to restart the run. + launchCism(options.executable, configfile, options.parallel) + + # Re-opening the output file and check if experiment was manually interrupted or not. + outputData = Dataset(outputFile,'r') + lastTimeEntry = outputData['time'][-1] + outputData.close() + if (lastTimeEntry >= (endTime - buffer)): + print 'Finished experiment', expt + else: + print 'Experiment interrupted.' + sys.exit('Terminating the run.') + + else: + # Start the experiment from time = 0. + if (expt == As[0]) and (stat=='advance'): + print 'First A-value, beginning from initial setup.' + else: + print 'Restarting from previous A-value restart file.' + + inputData = Dataset(inputFile,'r+') + lastEntryInternal = inputData['internal_time'][-1] + inputslice = 1 + if lastentry != 0: + inputData['internal_time'][:] = inputData['internal_time'][:] - lastEntryInternal + print 'the new internal_time array is ', inputData['internal_time'][:] + + inputData.close() + + # Set config file. + config.set('CF input', 'time', inputslice) + + + # launch CISM. + launchCism(options.executable, configfile, options.parallel) + + # Re-open the output file and check if experiment was manually interrupted or not. + outputData = Dataset(outputFile,'r') + lastTimeEntry = outputData['time'][-1] + outputData.close() + if (lastTimeEntry >= (endTime - buffer)): + print 'Finished experiment', expt + else: + print 'Experiment interrupted.' + sys.exit('Terminating the run.') + + + print('Switching back to original directory.') + # Change to parent directory and continue. + os.chdir('../../..') diff --git a/tests/MISMIP/mismip.code/mismipSetup.py b/tests/MISMIP/mismip.code/mismipSetup.py new file mode 100755 index 00000000..44bdc2d0 --- /dev/null +++ b/tests/MISMIP/mismip.code/mismipSetup.py @@ -0,0 +1,711 @@ +#!/usr/bin/env python + +# This script sets up initial conditions for the MISMIP (1d) experiment. +# See this paper for details: +# Pattyn et al., Results of the Marine Ice Sheet Model Intercomparison Project, MISMIP, +# The Cryosphere, 6, 573-588, 2012, doi:10.5194/tc-6-573-2012. +# +# Note: This experiment is meant to analyse flowline models. +# There is no perturbation whatsoever in the y direction. +# Therefore the domain is limited to 6 grid cells in the y direction. + +import sys, os +import shutil +import fileinput +import numpy as np +from netCDF4 import Dataset +from ConfigParser import ConfigParser +from optparse import OptionParser + + +############################### +# Constants used in this code # +############################### + +accum = 0.3 # uniform accumulation (m/yr) + + +#### Linear bed specific #### + +# A-values used in the linear bed experiment. +AsLinear = ['14.638e-17','6.7941e-17','3.1536e-17', + '14.638e-18','6.7941e-18','3.1536e-18', + '14.638e-19','6.7941e-19','3.1536e-19', + '6.7941e-19','14.638e-19', + '3.1536e-18','6.7941e-18','14.638e-18', + '3.1536e-17','6.7941e-17','14.638e-17'] + +# A-values used in the linear bed and advance experiment. +AsAdvanceLinear = ['14.638e-17','6.7941e-17','3.1536e-17', + '14.638e-18','6.7941e-18','3.1536e-18', + '14.638e-19','6.7941e-19','3.1536e-19'] + +# A-values used in the linear bed and retreat experiment. +AsRetreatLinear = list(reversed(AsAdvanceLinear[0:-1])) + + +# Status of the linear bed experiment. +AstatusLinear = ['advance','advance','advance', + 'advance','advance','advance', + 'advance','advance','advance', + 'retreat','retreat', + 'retreat','retreat','retreat', + 'retreat','retreat','retreat'] + +# Status of the linear bed and advance experiment. +AstatusAdvanceLinear = ['advance','advance','advance', + 'advance','advance','advance', + 'advance','advance','advance'] + +# Status of the linear bed and retreat experiment. +AstatusRetreatLinear = ['retreat','retreat', + 'retreat','retreat','retreat', + 'retreat','retreat','retreat'] + + + +#### Poly bed specific #### + + +# A-values used in the poly bed experiment. +AsPoly = ['9.4608e-18','7.8840e-18','6.3072e-18', + '4.7304e-18','3.1536e-18','1.5768e-18', + '7.8840e-19', + '1.5768e-18','3.1536e-18','4.7304e-18', + '6.3072e-18','7.8840e-18','9.4608e-18'] + +# A-values used in the poly bed and advance experiment. +AsAdvancePoly = ['9.4608e-18','7.8840e-18','6.3072e-18', + '4.7304e-18','3.1536e-18','1.5768e-18', + '7.8840e-19'] + +# A-values used in the poly bed and retreat experiment. +AsRetreatPoly = list(reversed(AsAdvancePoly[0:-1])) + + +# Status of the poly bed experiment. +AstatusPoly = ['advance','advance','advance', + 'advance','advance','advance', + 'advance', + 'retreat','retreat','retreat', + 'retreat','retreat','retreat'] + +# Status of the poly bed and advance experiment. +AstatusAdvancePoly = ['advance','advance','advance', + 'advance','advance','advance', + 'advance'] + +# Status of the poly bed and retreat experiment. +AstatusRetreatPoly = ['retreat','retreat','retreat', + 'retreat','retreat','retreat'] + + +# Final times prescribed for the poly bed experiment. +AsTimePoly = ['30000','15000','15000', + '15000','15000','30000', + '30000', + '15000','15000','30000', + '30000','30000','15000'] + +# Final times prescribed for the poly bed experiment. +AsTimeAdvancePoly = ['30000','15000','15000', + '15000','15000','30000', + '30000'] + +# Final times prescribed for the poly bed experiment. +AsTimeRetreatPoly = ['15000','15000','30000', + '30000','30000','15000'] + + + +#################################### +# Function used later in the code # +#################################### + + +# The following function returns the linear bed topography as in Pattyn et al. (2012). +def computeBedLinear(x): + # Input expected in km. + + schoofx = 750. # scaling factor in km + slope = -778.5 # m + b0 = 720. # m + + eps_b = 1e-10 + abs_x = np.sqrt(x**2 + eps_b**2) # km (smoothing for ice divide BC requirements) + xprime = abs_x/schoofx # unitless + b = b0 + slope*xprime # m + + return b + +# The following function returns the polynomial bed topography as in Pattyn et al. (2012). +def computeBedPoly(x): + # Input x expected in km. + + schoofx = 750. # scaling factor in km + x = x/schoofx # unitless + + b0 = 729. # m + b2 = -2184.8 # m + b4 = 1031.72 # m + b6 = -151.72 # m + + b = b0 + b2*x**2 + b4*x**4 + b6*x**6 # m + + return b + +# The following function returns the bed topography as in Pattyn et al. (2012) based +# on a choice between 'linear' and 'polynomial' +def computeBed(x, bedType): + # Input x expected in km. + + if bedType == 'linear': + b = computeBedLinear(x) + elif bedType == 'poly': + b = computeBedPoly(x) + else: + sys.exit('Please specify bed topography from these options: linear or poly.') + + return b + + +# The following function computes a uniform initial ice thickness for the experiment. +def computeHGuessSlab(): + + H = 100. # m + return H + + +# The following function computes an initial ice thickness based on semi-analyitc +# solution of Schoof2007. +def computeHGuessSemiAnalytic(xH, xu, bedType): + # Input xH and xu expected in m. + + sPerY = 365.0*24.*3600. # number of seconds per year + n = 3. + rhoi = 900. # kg/m^3 + rhow = 1000. # kg/m^3 + g = 9.8 # m/s^2 + delta = 1 - rhoi/rhow # unitless + a = 0.3/sPerY # converting accumulation to m/s + C = 7.624e6 # Pa(s/m)^(1/3) + A = 4.6416e-24 # Pa^-3/s + xg = 900000.0 # m + + deltaX = np.abs(xH[1]-xH[0]) # m + + # Determining the index of GL relative to domain. + glIndex = np.argmin(np.abs(xH[:]-xg)) + xg = xH[glIndex] + + # Computing bed depth and ice thickness at the grounding line. + # Note: the sign convention here is different than in Schoof2007. + bxg = -computeBed(xg*1.e-3, bedType) + Hxg = bxg/(1.0-delta) + + if(Hxg <= 0.): + raise ValueError("H(xg) <= 0. Cannot produce HGuess") + + b = -computeBed(xH[:]*1e-3, bedType) + uxg = a*xg/Hxg + xH = xH[:] + + # Computing the analytic solution in ice shelf. + c1 = rhoi*delta*g*a*A**(1./n)/4 + c1 = c1**n + operand = np.maximum(c1*(xH[:]**(n+1) - xg**(n+1)),0.0) + uxg**(n+1) + uShelf = (operand)**(1/(n+1)) + HShelf = a*xH[:]/uShelf[:] + + H = HShelf.copy() + u = uShelf.copy() + + # Assume balance between taub and taud in the sheet (SIA) and steady state (u=a*x/H), + # and iteratively solve the ODE for H + Hip1 = H[glIndex] + + deltaB = b[5]-b[4] + for xIndex in range(glIndex-1,-1,-1): + deltaB = (b[xIndex+1]-b[xIndex])/deltaX + Hi = Hip1+deltaX*(deltaB + C/(rhoi*g)*np.abs(a*xH[xIndex])**(1./n)/(Hip1**(1./n+1))) + Hip1 = Hi + H[xIndex] = Hi + + H[0] = H[1] # Enforcing ice divide boundary condition. + + return H + + +# This function returns the initial thickness profile for the experiment. The choices are between +# an initial uniform slab and semi-analytical solution from Schoof2007 given an initial ad-hoc GL position. +def computeHGuess(xH, xu, bedType, HInitType): + # Input xH and xu expected in m. + + if HinitType == 'slab': + H = computeHGuessSlab() + elif HinitType == 'analytic': + H = computeHGuessSemiAnalytic(xH, xu, bedType) + else: + sys.exit('Please specify initial profile from these options: slab or analytic.') + + return H + + + +############# +# Main code # +############# + + +# Parse options. +optparser = OptionParser() + +optparser.add_option('-c', '--config',dest='configfile', type='string',default='mismip.config.template', help='config file name for setting up the MISMIP experiment', metavar='FILE') +optparser.add_option('-e', '--exec', dest='executable',default='cism_driver',help='Set path to the CISM executable', metavar='EXECUTABLE') +optparser.add_option('-x', '--expt', dest='experiment', type='string',default = 'all', help='MISMIP experiment(s) to set up', metavar='EXPT') +optparser.add_option('-t', '--tstep', dest='timestep', type='float', default = 1, help='time step (yr)', metavar='TSTEP') +optparser.add_option('-r', '--res', dest='resolution', type='int', default = 2000, help='grid resolution (m)', metavar='RES') +optparser.add_option('-v', '--vlevel',dest='vertlevels', type='int', default = 3, help='no. of vertical levels', metavar='VLEVEL') +optparser.add_option('-a', '--approx',dest='approximation',type='string',default = 'DIVA',help='Stokes approximation (SSA, DIVA, BP)', metavar='APPROXIMATION') +optparser.add_option('-b', '--basal', dest='basalFriction', type='string', default='powerlaw', help='Basal friction law (powerlaw, schoof)', metavar='BASALFRICTION') +optparser.add_option('-y', '--year', dest='yearsSpinup', type='int', default = 30000, help='Length of Spinup run (yr)', metavar='YEARSPINUP') +optparser.add_option('--bed', dest='bedtopo', type='string',default ='linear',help='bed topography, linear or poly', metavar='BEDTOPO') +optparser.add_option('--yrun', dest='yearsRun', type='int', default ='20000', help='run length between 2 experiments', metavar='YEARSRUN') +optparser.add_option('--hinit',dest='initThick',type='string',default ='slab', help='experiment initial thickness profile', metavar='INITTHICK') + +optparser.add_option + +for option in optparser.option_list: + if option.default != ('NO', 'DEFAULT'): + option.help += (' ' if option.help else '') + '[default: %default]' +options, args = optparser.parse_args() + + +if options.bedtopo == 'linear': + As = AsLinear + AsAdvance = AsAdvanceLinear + AsRetreat = AsRetreatLinear + Astatus = AstatusLinear + AstatusAdvance = AstatusAdvanceLinear + AstatusRetreat = AstatusRetreatLinear + + xDomain = 1840000.0 # domain x-dimension (m) + marine_limit = -1140 # configuration to chop off ice passed this depth +elif options.bedtopo == 'poly': + As = AsPoly + AsAdvance = AsAdvancePoly + AsRetreat = AsRetreatPoly + Astatus = AstatusPoly + AstatusAdvance = AstatusAdvancePoly + AstatusRetreat = AstatusRetreatPoly + + xDomain = 1520000.0 # domain x-dimension (m) + marine_limit = -1100 # configuration to chop off ice passed this depth +else: + sys.exit('Please specify bed type from this list: linear, poly.') + + +if options.experiment == 'all': + experiments = As + AsTime = AsTimePoly + Astat = Astatus + print 'Setting up all the experiments' +elif options.experiment == 'advance': + experiments = AsAdvance + AsTime = AsTimeAdvancePoly + Astat = AstatusAdvance + print 'Setting up advance experiments' +elif options.experiment == 'retreat': + experiments = AsRetreat + AsTime = AsTimeRetreatPoly + Astat = AstatusRetreat + print 'Setting up retreat experiments' +else: + sys.exit('Please specify experiment(s) from this list: all, advance, retreat.') + + +# If there is not already a link to cism_driver in the main directory, then make one. +# Each subdirectory will link to cism_driver in the main directory. +if options.executable != 'cism_driver': + # Remove the existing link, if present. + os.unlink('cism_driver') + # Make the new link. + os.symlink(options.executable, 'cism_driver') + + +# Set grid resolution. +if options.resolution == 8000: + dx = 8000.0 + dy = 8000.0 +elif options.resolution == 4000: + dx = 4000.0 + dy = 4000.0 +elif options.resolution == 2000: + dx = 2000.0 + dy = 2000.0 +elif options.resolution == 1000: + dx = 1000.0 + dy = 1000.0 +elif options.resolution == 500: + dx = 500.0 + dy = 500.0 +elif options.resolution == 250: + dx = 250.0 + dy = 250.0 +else: + sys.exit('Please choose from among the following resolutions (m): 8000, 4000, 2000, 1000, 500, 250') + +if options.vertlevels >= 2: + nz = options.vertlevels +else: + sys.exit('Error: must have at least 2 vertical levels') + +print 'MISMIP grid resolution (m) =', options.resolution +print 'Number of vertical levels =', nz + +# Note: This is a streamline experiment with no y-direction variation in bed or forces. +# For this reason we can limit the domain in y-direction by a fixed amount of grid cells. +yDomain = dy*5 + +# Set number of grid cells in each direction. +# Include a few extra cells in the x direction to handle boundary conditions. +nx = int(xDomain/dx) + 4 +ny = int(yDomain/dy) + +# Copy the config template to a new master config file. +masterConfigFile = 'mismip.config' + +try: + shutil.copy(options.configfile, masterConfigFile) +except OSError: + sys.exit('Could not copy', options.configfile) + +print 'Creating master config file', masterConfigFile + +# Read the master config file. +config = ConfigParser() +config.read(masterConfigFile) + +# Set the grid variables in the master config file. +config.set('grid', 'ewn', nx) +config.set('grid', 'nsn', ny) +config.set('grid', 'upn', nz) +config.set('grid', 'dew', dx) +config.set('grid', 'dns', dy) + +# Set the time step in the msster config file. +# Set the diagnostic interval to the same value (not necessary, but helpful for debugging). + +config.set('time', 'dt', options.timestep) +config.set('time', 'dt_diag', options.timestep) + +# Set Stokes approximation in config file. +if options.approximation == 'SSA': + which_ho_approx = 1 + print 'Using SSA velocity solver' +elif options.approximation == 'DIVA': + which_ho_approx = 4 + print 'Using DIVA velocity solver' +elif options.approximation == 'BP': + which_ho_approx = 2 + print 'Using Blatter-Pattyn velocity solver' +else: + which_ho_approx = 4 + print 'Defaulting to DIVA velocity solver' + +config.set('ho_options', 'which_ho_approx', which_ho_approx) + +# Config settings related to basal friction law. +# Note: Each of these friction laws is associate with certain basal parameters. +# The desired parameters should be set in the config template. +if options.basalFriction == 'Schoof': + which_ho_babc = 11 + print 'Using Schoof basal friction law' +elif options.basalFriction == 'Tsai': + which_ho_babc = 12 + print 'Using Tsai basal friction law' +elif options.basalFriction == 'powerlaw': + which_ho_babc = 9 + print 'Using basal friction power law' +else: + which_ho_babc = 9 # powerlaw is default + print 'Defaulting to powerlaw basal friction law' + +config.set('ho_options', 'which_ho_babc', which_ho_babc) + +# Config setting related to spin up time. +yearsSpinup = float(options.yearsSpinup) +config.set('time', 'tend', yearsSpinup) + +# Config setting related to presence of ice. +config.set('parameters','marine_limit',marine_limit) + +# Write to the master config file. +with open(masterConfigFile, 'w') as configfile: + config.write(configfile) + + +print 'years of Spinup experiment =', yearsSpinup +restartfreqSpinup = min(1000.0, options.yearsSpinup) # can be changed by the user if needed +print 'Spinup restart frequency =', restartfreqSpinup + + +# Create the netCDF input file according to the information in the config file. +try: + parser = ConfigParser() + parser.read(options.configfile) + initfile = parser.get('CF input', 'name') +except OSError: + sys.exit('Error parsing ' + options.configfile) + +print 'Creating input file', initfile +ncfile = Dataset(initfile, 'w') + + +# Create dimensions. +# Note: (x0,y0) = staggered (velocity) grid. +# (x1,y1) = unstaggered (scalar) grid. +ncfile.createDimension('time',1) +ncfile.createDimension('x1',nx) +ncfile.createDimension('y1',ny) +ncfile.createDimension('x0',nx-1) +ncfile.createDimension('y0',ny-1) +ncfile.createDimension('level',nz) +ncfile.createDimension('staglevel',nz-1) +ncfile.createDimension('stagwbndlevel',nz+1) # similar to staglevel but including boundaries + +# Create time and grid variables. +# Note: (x1,y1) are loadable and need to be in the input file. +# (x0,y0) are not loadable, but are derived in CISM from (x1,y1). May not be needed. +ncfile.createVariable('time','f4',('time',))[:] = [0] +x1 = ncfile.createVariable('x1','f4',('x1',)) +y1 = ncfile.createVariable('y1','f4',('y1',)) +x0 = ncfile.createVariable('x0','f4',('x0',)) +y0 = ncfile.createVariable('y0','f4',('y0',)) + +# Create 2D input fields. +thk = ncfile.createVariable('thk', 'f4', ('time','y1','x1')) +topg = ncfile.createVariable('topg', 'f4', ('time','y1','x1')) +acab = ncfile.createVariable('acab', 'f4', ('time','y1','x1')) +uvel = ncfile.createVariable('uvel', 'f4', ('time','level','y0','x0')) +vvel = ncfile.createVariable('vvel', 'f4', ('time','level','y0','x0')) +kinbcmask = ncfile.createVariable('kinbcmask', 'i4', ('time','y0','x0')) # kinematic BC mask + +# Compute x and y on each grid. +# Note: (1) The x origin is placed at the center of the second cell from the left. +# This assumes that kinbcmask = 1 at the first vertex from the left. +# Thus the left edge of the grid has x = -3*dx/2. +# (2) The y origin is placed at the bottom edge of the CISM grid. +# The line of central symmetry runs along cell edges at y = 40 km. + +x = dx*np.arange(nx,dtype='float32') # x = 0, dx, 2*dx, etc. +y = dy*np.arange(ny,dtype='float32') # y = 0, dy, 2*dy, etc. + +x1[:] = x[:] - dx # x1 = -dx, 0, dx, ..., (nx-2)*dx - dx/2 +y1[:] = y[:] + dy/2. # y1 = dy/2, 3*dy/2, ..., (ny-1)*dy - dy/2 + +x0[:] = x[:-1] - dx/2. # x0 = -dx/2, dx/2, 3*dx/2, ..., (nx-2)*dx +y0[:] = y[:-1] + dy # y0 = dy, 2*dy, ..., (ny-1)*dy + + +# Set bed topography. +bedType = options.bedtopo +print 'Computing ' + bedType + ' bed' +for i in range(nx): + topg[:,:,i] = computeBed(x1[i]/1.e3, bedType) # x1 is in [m] and we need input in [km] + + +# Creating 'linear' or 'poly' bedtype directory. +try: + os.mkdir(bedType) + print 'Created subdirectory', bedType +except OSError: + print 'Subdirectory', bedType, 'already exists' + + +# Set initial ice thickness. +HinitType = options.initThick +initThickness = computeHGuess(x1, x0, bedType, HinitType) +for j in range(ny): + thk[0,j,:] = initThickness + +# Set the surface mass balance. +acab[:,:,:] = accum + +# Set initial velocity to zero (probably not necessary). +uvel[:,:,:,:] = 0. +vvel[:,:,:,:] = 0. + +# Set kinematic velocity mask. +# Where kinbcmask = 1, the velocity is fixed at its initial value. +# Note: Although there is no ice on the RHS of the domain, we need kinbcmask =1 there +# to preserve symmetry with the LHS (since east-west BCs are formally periodic). +kinbcmask[:,:,:] = 0 # initialize to 0 everywhere +kinbcmask[:,:,0] = 1 # mask out left-most column +kinbcmask[:,:,-1] = 1 # mask out right-most column + + +ncfile.close() + + +# Loop through A values. +AprevString = '' # string used in linking input file of the experiments. +countTime = -1 # counter to access end time matrix for poly bed. Accounting for zero-array indexing +countStat = -1 # counter to access status matrix. Accounting zero-array indexing + +for expt in experiments: + + countTime = countTime + 1 + countStat = countStat+1 + stat = Astat[countStat] + + # Change to bed type directory. + os.chdir(bedType) + + # Create advance and/or retreat directory if needed. + try: + os.mkdir(stat) + print 'Created subdirectory', expt + except OSError: + print 'Subdirectory', expt, 'already exists' + + # Change to advance or retreat directory. + os.chdir(stat) + + # For each experiment, make a suitable config file and set up a subdirectory. + print 'Creating config file for experiment A value', expt + + # Make a copy of the mismip config file. + # Below, this copy will be tailored for the chosen MISMIP experiment, + # without changing the settings used for the Spinup experiment. + + newConfigFile = 'mismip_' + expt + '.config' + print 'Config file for this experiment:', newConfigFile + shutil.copy('../../' + masterConfigFile, newConfigFile) + + # Read the new config file. + config = ConfigParser() + config.read(newConfigFile) + + # Experiment-specific settings. + if bedType == 'linear': + if (expt == As[0]) and (stat == 'advance'): + tstart = 0.0 + tend = yearsSpinup + inputdir = '../../../' + inputfile = initfile + inputslice = 1 + outputfreq = min(1000.0, restartfreqSpinup) + restartfreq = restartfreqSpinup + elif (expt == AsRetreat[0]) and (stat == 'retreat'): + tstart = 0.0 + tend = float(options.yearsRun) + inputdir = '../../advance/' + AprevString + '/' + inputfile = 'mismip_' + AprevString + '.restart.nc' + inputslice = 1 + outputfreq = min(1000.0, restartfreqSpinup) + restartfreq = restartfreqSpinup + else: + tstart = 0.0 + tend = float(options.yearsRun) + inputdir = '../' + AprevString + '/' + inputfile = 'mismip_' + AprevString + '.restart.nc' + inputslice = 1 + outputfreq = 1000.0 + restartfreq = 1000.0 + elif bedType == 'poly': + if (expt == As[0]) and (stat == 'advance'): + tstart = 0.0 + tend = AsTime[countTime] + inputdir = '../../../' + inputfile = initfile + inputslice = 1 + outputfreq = min(1000.0, restartfreqSpinup) + restartfreq = restartfreqSpinup + elif (expt == AsRetreat[0]) and (stat == 'retreat'): + tstart = 0.0 + tend = AsTime[countTime] + inputdir = '../../advance' + AprevString + '/' + inputfile = 'mismip_' + AprevString + '.restart.nc' + inputslice = 1 + outputfreq = min(1000.0, restartfreqSpinup) + restartfreq = restartfreqSpinup + else: + tstart = 0.0 + tend = AsTime[countTime] + inputdir = '../' + AprevString + '/' + inputfile = 'mismip_' + AprevString + '.restart.nc' + inputslice = 1 + outputfreq = 1000.0 + restartfreq = 1000.0 + else: + print('This should not be an option by now.') + sys.exit('Exiting the run.') + + # Set the start and end times. + config.set('time', 'tstart', tstart) + config.set('time', 'tend', tend) + + # Set the default flwa value. + config.set('parameters', 'default_flwa', float(expt)) + + # Change the default comment. + comment = 'MISMIP experiment ' + expt + config.set('CF default', 'comment', comment) + + # Set input file and time slice. + config.set('CF input', 'name', inputfile) + config.set('CF input', 'time', inputslice) + + # Set the output filename in the section [CF output]. + outfilename = 'mismip_' + expt + '.out.nc' + print 'Output file:', outfilename + config.set('CF output', 'name', outfilename) + config.set('CF output', 'frequency', outputfreq) + + # Set restart info. This should be in the section called '[CF output]'. + # Note: Each experiment (except Stnd) writes out only one time slice to a restart file. + restartfilename = 'mismip_' + expt + '.restart.nc' + print 'Restart file:', restartfilename + config.set('CF restart', 'name', restartfilename) + config.set('CF restart', 'variables', 'restart') + config.set('CF restart', 'frequency', restartfreq) + config.set('CF restart', 'start', tstart + restartfreq) + config.set('CF restart', 'xtype', 'double') + + # Write to the new config file. + with open(newConfigFile, 'w') as configfile: + config.write(configfile) + + # Create a subdirectory named for the experiment, and stage the run there. + try: + os.mkdir(expt) + print 'Created subdirectory', expt + except OSError: + print 'Subdirectory', expt, 'already exists' + + os.chdir(expt) + + # Move the config file from the parent directory to the subdirectory. + shutil.move('../' + newConfigFile, newConfigFile) + print 'Created config file', newConfigFile + + # Link to the cism_driver executable in the parent directory. + try: + os.symlink('../../../cism_driver', 'cism_driver') + except OSError: + pass # link to cism_driver already exists + + # Link to the input file in the appropriate directory. + try: + os.symlink(inputdir + inputfile, inputfile) + except OSError: + pass # link to the input file already exists + + + # Updating the previous values of Aprev for next experiment setup. + AprevString = expt + + + # Go back to the parent directory and continue. + os.chdir('../../..') diff --git a/tests/MISMIP/mismip.code/mismipWriteGL.py b/tests/MISMIP/mismip.code/mismipWriteGL.py new file mode 100755 index 00000000..3b3fbdba --- /dev/null +++ b/tests/MISMIP/mismip.code/mismipWriteGL.py @@ -0,0 +1,304 @@ +#!/usr/bin/env python + +# This script reads a CISM output file from a MISMIP experiment, and creates a netCDF output file. +# The input file should include the following: +# Coordinates: x0, y0, x1, y1, time +# Global scalars: ivol, ice_mass_above_flotation, iareag +# 2D fields: f_ground, stagthck, usfc, vsfc, ubas, vbas, uvel_mean, vvel_mean +# The f_ground field is not part of the new output file, but is needed to identify +# vertices lying on the grounding line (GL). + + +import os, sys +import numpy as np +from netCDF4 import Dataset +import matplotlib.pyplot as plt +from optparse import OptionParser + + +############################### +# Constants used in this code # +############################### + +#Set ice density prescribed for MISMIP3d. +rhoi = 900. # kg/m^3 +model = '_cism' # file naming extension + + +#### Linear bed specific #### + +# A-values used in the linear bed experiment. +AsLinear = ['14.638e-17','6.7941e-17','3.1536e-17', + '14.638e-18','6.7941e-18','3.1536e-18', + '14.638e-19','6.7941e-19','3.1536e-19', + '6.7941e-19','14.638e-19', + '3.1536e-18','6.7941e-18','14.638e-18', + '3.1536e-17','6.7941e-17','14.638e-17'] + +# A-values used in the linear bed and advance experiment. +AsAdvanceLinear = ['14.638e-17','6.7941e-17','3.1536e-17', + '14.638e-18','6.7941e-18','3.1536e-18', + '14.638e-19','6.7941e-19','3.1536e-19'] + +# A-values used in the linear bed and retreat experiment. +AsRetreatLinear = list(reversed(AsAdvanceLinear[0:-1])) + + +# Status of the linear bed experiment. +AstatusLinear = ['advance','advance','advance', + 'advance','advance','advance', + 'advance','advance','advance', + 'retreat','retreat', + 'retreat','retreat','retreat', + 'retreat','retreat','retreat'] + +# Status of the linear bed and advance experiment. +AstatusAdvanceLinear = ['advance','advance','advance', + 'advance','advance','advance', + 'advance','advance','advance'] + +# Status of the linear bed and retreat experiment. +AstatusRetreatLinear = ['retreat','retreat', + 'retreat','retreat','retreat', + 'retreat','retreat','retreat'] + + + +#### Poly bed specific #### + + + +# A-values used in the poly bed experiment. +AsPoly = ['9.4608e-18','7.8840e-18','6.3072e-18', + '4.7304e-18','3.1536e-18','1.5768e-18', + '7.8840e-19', + '1.5768e-18','3.1536e-18','4.7304e-18', + '6.3072e-18','7.8840e-18','9.4608e-18'] + +# A-values used in the poly bed and advance experiment. +AsAdvancePoly = ['9.4608e-18','7.8840e-18','6.3072e-18', + '4.7304e-18','3.1536e-18','1.5768e-18', + '7.8840e-19'] + +# A-values used in the poly bed and retreat experiment. +AsRetreatPoly = list(reversed(AsAdvancePoly[0:-1])) + + +# Status of the poly bed experiment. +AstatusPoly = ['advance','advance','advance', + 'advance','advance','advance', + 'advance', + 'retreat','retreat','retreat', + 'retreat','retreat','retreat'] + +# Status of the poly bed and advance experiment. +AstatusAdvancePoly = ['advance','advance','advance', + 'advance','advance','advance', + 'advance'] + +# Status of the poly bed and retreat experiment. +AstatusRetreatPoly = ['retreat','retreat','retreat', + 'retreat','retreat','retreat'] + + + +######## +# Code # +######## + + +# Parse options. +parser = OptionParser() + +parser.add_option('-x', '--expt',dest='experiment',type='string', default='all', help='MISMIP experiment(s) to run', metavar="EXPT") +parser.add_option('-f', '--file',dest='filename', type='string', help='CISM output file from MISMIP3d run', metavar="FILE") +parser.add_option('-s', '--stat',dest='StatChoice',type='string',default='advance',help='MISMIP experiment set to run', metavar="EXPT") +parser.add_option('--bed', dest='bedtopo', type='string', default ='linear',help='bed topography, linear or poly', metavar='BEDTOPO') + + +for option in parser.option_list: + if option.default != ("NO", "DEFAULT"): + option.help += (" " if option.help else "") + "[default: %default]" + +options, args = parser.parse_args() + +if options.bedtopo == 'linear': + As = AsLinear + AsAdvance = AsAdvanceLinear + AsRetreat = AsRetreatLinear + Astatus = AstatusLinear + AstatusAdvance = AstatusAdvanceLinear + AstatusRetreat = AstatusRetreatLinear +elif options.bedtopo == 'poly': + As = AsPoly + AsAdvance = AsAdvancePoly + AsRetreat = AsRetreatPoly + Astatus = AstatusPoly + AstatusAdvance = AstatusAdvancePoly + AstatusRetreat = AstatusRetreatPoly +else: + sys.exit('Please specify bed type from this list: linear, poly') + + +if options.experiment == 'all': + experiments = As + Astat = Astatus + print 'Writing all the MISMIP experiments' +elif options.experiment == 'advance': + experiments = AsAdvance + Astat = AstatusAdvance + print 'Writing advance experiments' +elif options.experiment == 'retreat': + experiments = AsRetreat + Astat = AstatusRetreat + print 'Writing retreat experiments' +else: + sys.exit('Please specify experiment(s) from this list: all, advance, retreat') + + + +# Loop through A values. +bedType = options.bedtopo +countStat = -1 # counter to access status matrix. Accounting for zero-arrays indexing. + +for expt in experiments: + + countStat = countStat + 1 + stat = Astat[countStat] + + # Change to bed type directory. + os.chdir(bedType) + + # Change to Advance or Retreat directory. + os.chdir(stat) + + # Change to the subdirectory for this experiment. + os.chdir(expt) + + if options.filename: + file = options.filename + else: + file = 'mismip_' + expt + '.out.nc' + + print 'Creating a MISMIP grounding-line file for experiment', expt + print 'Attempting to read CISM file', file + + # Open the CISM output file, get needed dimensions. + # Note: (x0,y0) are dimensions of the staggered (velocity) grid + # (x1,y1) are dimensions of the unstaggered (scalar) grid + + try: + cismfile = Dataset(file,'r') + except: + sys.exit('Error: Unable to open CISM file') + + try: + nTime = len(cismfile.dimensions['time']) + nx = len(cismfile.dimensions['x0']) + ny = len(cismfile.dimensions['y0']) + except: + sys.exit('Error: The CISM file is missing needed dimensions') + + # Initialize some variables and arrays. + # Read in some fields needed to compute diagnostics similar to the one of MISMIP+. + + print 'Reading in CISM variables...' + + try: + # These array names are somewhat arbitrary. Sometime I have used CISM names; + # sometimes I have used a different name that is closer (but not identical) + # to the MISMIP3d field names. + + # Read horizontal and time coordinates. + # Note: (x0,yo) are vertex coordinates; (x1,y1) are cell center coordinates. + x0 = cismfile.variables['x0'][:] + y0 = cismfile.variables['y0'][:] + x1 = cismfile.variables['x1'][:] + y1 = cismfile.variables['y1'][:] + t = cismfile.variables['time'][:] + + # Read scalars (function of time only). + ivol = cismfile.variables['ivol'][:] + imass_above_flotation = cismfile.variables['imass_above_flotation'][:] + iareag = cismfile.variables['iareag'][:] + + # Read 2D fields (functions of x, y and time). + # Note: All these fields are located at vertices. + f_ground = cismfile.variables['f_ground'][:,:,:] + iceThickness = cismfile.variables['thk'][:,:,:] + uSurface = cismfile.variables['usfc'][:,:,:] + vSurface = cismfile.variables['vsfc'][:,:,:] + uBase = cismfile.variables['ubas'][:,:,:] + vBase = cismfile.variables['vbas'][:,:,:] + uMean = cismfile.variables['uvel_mean'][:,:,:] + vMean = cismfile.variables['vvel_mean'][:,:,:] + + except: + sys.exit('Error: The output file is missing needed fields.') + + # Create the GL output file. + + outfilename = expt + model + '.nc' + ncfile = Dataset(outfilename, 'w') + print 'Created output file', outfilename + + # Set dimensions. + glptdim = ncfile.createDimension('nPointGL', size = None) + timedim = ncfile.createDimension('nTime', size = nTime) + + # Create variables. + xGL = ncfile.createVariable('xGL', 'f4', ('nPointGL', 'nTime')) + yGL = ncfile.createVariable('yGL', 'f4', ('nPointGL', 'nTime')) + time = ncfile.createVariable('time', 'f4', ('nTime')) + + iceVolume = ncfile.createVariable('iceVolume', 'f4', ('nTime')) + iceVAF = ncfile.createVariable('iceVAF', 'f4', ('nTime')) + groundedArea = ncfile.createVariable('groundedArea', 'f4', ('nTime')) + + iceThicknessGL = ncfile.createVariable('iceThicknessGL', 'f4', ('nPointGL', 'nTime')) + uSurfaceGL = ncfile.createVariable('uSurfaceGL', 'f4', ('nPointGL', 'nTime')) + vSurfaceGL = ncfile.createVariable('vSurfaceGL', 'f4', ('nPointGL', 'nTime')) + uBaseGL = ncfile.createVariable('uBaseGL', 'f4', ('nPointGL', 'nTime')) + vBaseGL = ncfile.createVariable('vBaseGL', 'f4', ('nPointGL', 'nTime')) + uMeanGL = ncfile.createVariable('uMeanGL', 'f4', ('nPointGL', 'nTime')) + vMeanGL = ncfile.createVariable('vMeanGL', 'f4', ('nPointGL', 'nTime')) + + # Loop over time slices and fill variables. + print 'Adding grounding-line variables to output file...' + + for iTime in range(nTime): + + print ' Time slice:', iTime + + # Add the scalar data for this time slice. + time[iTime] = t[iTime] + iceVolume[iTime] = ivol[iTime] + iceVAF[iTime] = imass_above_flotation[iTime]/rhoi + groundedArea[iTime] = iareag[iTime] + + # Loop over the horizontal grid and identify GL points. + # For each GL point, add the desired data to the arrays. + eps = 1.0e-6 + nGL = 0 + + # A cell hosts a grounding line if at least one neighboring point has a zero f_ground value. + for i in range(1,nx-1): + if (f_ground[iTime,3,i] > 0) and (f_ground[iTime,3,i+1] == 0): + nGL = nGL + 1 + m = nGL - 1 # indexing starts at 0 + xGL[m,iTime] = x0[i] + + iceThicknessGL[m,iTime] = iceThickness[iTime,3,i] + uSurfaceGL[m,iTime] = uSurface[iTime,3,i] + vSurfaceGL[m,iTime] = vSurface[iTime,3,i] + uBaseGL[m,iTime] = uBase[iTime,3,i] + vBaseGL[m,iTime] = vBase[iTime,3,i] + uMeanGL[m,iTime] = uMean[iTime,3,i] + vMeanGL[m,iTime] = vMean[iTime,3,i] + + + + ncfile.close() + + # Change to the parent directory. + os.chdir('../../..') diff --git a/tests/MISMIP/mismip.code/runCISM.cheyenne.template b/tests/MISMIP/mismip.code/runCISM.cheyenne.template new file mode 100755 index 00000000..1810a4f8 --- /dev/null +++ b/tests/MISMIP/mismip.code/runCISM.cheyenne.template @@ -0,0 +1,11 @@ +#!/bin/bash +# +#PBS -N MISMIP +#PBS -A P93300601 +#PBS -l walltime=00:01:00 +#PBS -q economy +#PBS -j oe +#PBS -m abe +#PBS -l select=1:ncpus=1:mpiprocs=1 + +mpiexec_mpt ./cism_driver mismip.config diff --git a/tests/MISMIP3d/.DS_Store b/tests/MISMIP3d/.DS_Store new file mode 100644 index 00000000..704602f7 Binary files /dev/null and b/tests/MISMIP3d/.DS_Store differ diff --git a/tests/MISMIP3d/mismip3d.code/.DS_Store b/tests/MISMIP3d/mismip3d.code/.DS_Store new file mode 100644 index 00000000..bb4b5fe8 Binary files /dev/null and b/tests/MISMIP3d/mismip3d.code/.DS_Store differ diff --git a/tests/MISMIP3d/mismip3d.code/README.mismip3d b/tests/MISMIP3d/mismip3d.code/README.mismip3d new file mode 100644 index 00000000..b7f38a36 --- /dev/null +++ b/tests/MISMIP3d/mismip3d.code/README.mismip3d @@ -0,0 +1,178 @@ +Instructions for setting up and running the MISMIP3d experiments with CISM. + +Note: part of the instruction assumes that you have access to the NCAR HPC + Cheyenne and have access to the LIWG HPC spending account. + +See this paper for details on MISMIP3d: +Pattyn,F., et al. (2013), Grounding-line migration in plan-view marine +ice-sheet models: results of the ice2sea MISMIP3d intercomparison, +J. Glaciol., 59, doi:10.3189/2013JoG12J129, 2013. + +The following experiments are included in MISMIP3d: + +Stnd Standard spinup of the model to steady state +P75S 100-year with basal friction perturbation +P75R Steady state run undoing basal friction perturbation + +Experiment P75S starts from the spun-up state at the end of experiment Stnd. +Reaching a stable state typically requires ~20,000 years. +Experiment P75R is similar to experiment Stnd. + +The files needed to run the experiment are located in directory ../tests/MISMMIP3d/mismip3d.code. +If you plan to run more than one test suite, you will likely want to keep this directory clean. Instead +create a new directory and make a fresh copy of the code for each test. For example, from ../tests/MISMMIP3d: + +> cp -rf mismip3d.code mismip3d.test1 +> cp -rf mismip3d.code mismip3d.test2 +etc. + +Go to the directory where you are setting up and running the experiments: + +> cd mismip3d.test1 + +This directory should contain several Python scripts, along with a template config file +(mismip3d.config.template) with default settings for MISMIP3d and a template run file +(runCISM.cheyenne.template) with setups to run MISMIP3d on the Cheyenne super computer. + +Put a copy or link of cism_driver in your directory. For example: + +> ln -s ../../../builds/mac-gnu/cism_driver/cism_driver cism_driver + +To create the grid topography and set the appropriate config options for each test, run the Python setup script: + +> python mismip3dSetup.py [arguments] + +This script has the following optional arguments: + -c FILE, --config=FILE + config file template [default: mismip3d.config.template] + -e EXECUTABLE, --exec=EXECUTABLE + Path to the CISM executable [default: cism_driver] + -x EXPT, --expt=EXPT MISMIP3d experiment(s) to set up [default: all] + -t TSTEP, --tstep=TSTEP + time step (yr) [default: 1] + -r RES, --res=RES horizontal grid resolution (m) [default: 2000] + -v VLEVEL, --vlevel=VLEVEL + no. of vertical levels [default: 3] + -a APPROXIMATION, --approx=APPROXIMATION + Stokes approximation (SSA, DIVA, BP) [default: DIVA] + -b BASALFRICTION, --basal=BASALFRICTION + Basal friction law (powerlaw, Schoof, Tsai) [default: powerlaw] + -y YEARSSPINUP, --year=YEARSPINUP + Length of spinup run (yr) [default: 20000] + +Notes on optional arguments: +- If setting up all the experiments at once, the -x argument is not needed. +- The following resolutions (m) are supported: 8000, 4000, 2000, 1000, 500, 250. +- The perturbation experiments (e.g., P75S) might require a shorter time step + than the Stnd experiment. For instance, experiment Stnd using DIVA at 2 km is typically stable with dt = 1.0 yr. + With identical config settings, P75S is unstable and requires a shorter time step, e.g. dt = 0.5 yr. + If so, you might need to manually reduce dt in the config file for a given experiment + (assuming you don't want to repeat experiment Stnd). +- Three Stokes approximations are supported for MISMIP3d: SSA, DIVA and BP. +- Three basal BCs are supported: + (1) a Weertman-type power law: 'powerlaw' + (2) a modified power law based on Tsai (2015): 'Tsai' + (3) a modified power law based on Schoof (2005) and Leguy et al. (2014): 'Schoof' + +The config template file includes sensible default values for a number of parameters +associated with the Stokes approximation, the basal friction law and the MISMIP3d basal friction +perturbation. To change any of these parameters, you need to edit the template file. + +If you set up all the experiments, you should have the following subdirectories after running +the setup script: +Stnd, P75S, P75R. + +After setup, you can run all the experiments by using the Python run script: + +> python mismip3dRun.py + +Here are the optional arguments for the run script: + -e EXECUTABLE, --exec=EXPT + Path to the CISM executable [default: ./cism_driver] + -x EXPT, --expt=experiment MISMIP3d experiment(s) to run [default: allIce] + -n NUMPROCS, --parallel=NUMPROCS + Number of processors: if specified then run in parallel + --job, if present at the command line, creates a batch job submission script for a + given experiment + --submit, if present at the command line, submit the job created with the --job option + +Experiments with coarse resolution (2km or coarser) can be run on your personal computer. + +By default this script will run all 3 experiments at once. You can also run one experiment +at a time by specifying its name (Stnd, P75S or P75R) using for example: + +> python mismip3dRun.py -x Stnd + +Experiments run at coarse resolutions (2km or coarser) can be run on a single processor +on your personal computer. You can also run all the experiments on several processor using: + +> python mismip3dRun.py -n 4 + +It can take a long time to run some experiment when running with a resolution of 2km and +higher. If for some reason you need to interrupt your run, you can restart it at a later +time. If the restart of a given experiment is present in the directory, the script +mismip3dRun.py will atumatically restart from that restart file. For example, continuiing +the Stnd experiment on 4 processors: + +> python mismip3dRun.py -x Stnd -n 4 + +Note that before restarting your run you might want to save a copy of the log file +'mismip3dStnd.config.log' under a different name as it will be overwritten on restart. +The output file, mismip3dStnd.out.nc, will be appended on restart and does not need to +be copied. Continue as needed until you have completed the full Stnd period. +Alternatively, you may want to submit a batch job. If so, run the following: + +> python mismip3dRun.py -x Stnd --job + +This will setup a batch job script called runCISM.cheyenne.Stnd in the 'Stnd' directory using +the default setting from the mismip3dRun.py script. +NOTE: If you want to run each experiment by submitting of batch job script you will need to +setup each script one at a time, meaning that even with '-x all' option, the run script will +only setup a batch job script for the 'Stnd' experiment. + +Once a batch job script is created, you can edit the script and modify the time +and number of processors manually and submit the job yourself using the command: + +> qsub runCISM.cheyenne.Stnd + +You can also directly submit the batch job automatically by running: + +> python mismip3dRun.py -x Stnd --job --submit + +Before proceeding, you may want to confirm that the GL has reached a steady state. +One way to do this is to list f_ground (the grounded fraction, 0 < f_ground < 1) along the x +direction at y ~ 50 km. The value of f_ground in the last (partially) grounded cell should be +stable within some small tolerance, e.g. ~10^{-3}. + +The file mismipSdStnd.restart.nc in the Stnd directory will become the input file +for the P75S experiment. This file needs to be edited so that CISM's internal_time coordinate +for the final time slice is 0 and that the basal friction parameter gets adjusted for the perturbation +experiment by modifying the 'C_space_factor' parameter. This action is done automatically when running +with the script mismip3dRun.py and the experiment 'P75S' is present in the command line option (either +when using 'all', 'allP75' or 'P75S'). + +To verify that the time correction worked, you can do 'ncdump -v Stnd/internal_time mismip3dStnd.restart.nc'. +The final internal_time value should be 0, with negative values for earlier times. Similarly, you can do +the same for C_space_factor by making sure the values of the field are not all 1 or by plotting the field +using you preferred display software. + + +Once these experiments have run, the next step is to extract the grounding line i(GL) position. +This can be done by extracting information from the CISM output files by running another Python script: + +> python mismip3dWriteGL.py + +The default is to generate a netCDF file containing the 2 arrays xGL and yGL (the location of the +grounding line along the x and y coordinates) for all the experiments, but you can make a GL file +for a single experiment by adding an optional argument: + +> python mismip3dWriteGL.py -x P75S + +The resulting GL files in the various subdirectories are called [EXPT]_cism.nc, +where [EXPT] is Stnd, P75S, and P75R. + +Finally, you can create the summary grounding line plots in a file called mismip3dPlotGL.pdf: + +> python mismip3dPlotGL.py + +The plot shows the grounding line position at the end of each stage of the experiment. diff --git a/tests/MISMIP3d/mismip3d.code/mismip3d.config.template b/tests/MISMIP3d/mismip3d.code/mismip3d.config.template new file mode 100755 index 00000000..ce8e0607 --- /dev/null +++ b/tests/MISMIP3d/mismip3d.code/mismip3d.config.template @@ -0,0 +1,77 @@ +[MISMIP] + +[grid] +upn = 3 +ewn = 324 +nsn = 40 +dew = 2000 +dns = 2000 + +[time] +tstart = 0. +tend = 15000. +dt = 1 +#idiag = 5 +#jdiag = 5 + +[options] +dycore = 2 # 0 = glide, 1 = glam, 2=glissade +evolution = 3 # 3 = remapping, 4 = first order upwind +flow_law = 0 # 0 = constant, 2 = Paterson-Budd +temperature = 0 # 1 = prognostic, 3 = enthalpy +marine_margin = 4 # 4 = topographic threshold, 5 = calving mask +basal_mass_balance = 1 # 1 = include basal mass balance in continuity eqn +bmlt_float = 0 # 0 = no melting of floating ice +restart_extend_velo = 1 # 1 = write uvel/vvel on extended grid +# restart = 1 + +[ho_options] +which_ho_babc = 10 # 9 = Powerlaw, 11 = Coulomb w/const basal flwa, 12= Tsai +which_ho_effecpress = 3 # ocean connection +which_ho_flotation_function = 0 # 0 = f_pattyn, 1 = 1/f_pattyn +which_ho_efvs = 2 # 0 = constant, 2 = nonlinear eff. visc. w/ n=3 +which_ho_sparse = 3 # 1 = SLAP GMRES, 3 = Fortran PCG +which_ho_nonlinear = 0 # 0 = Picard, 1 = JFNK +which_ho_precond = 1 # 1 = diagonal precond, 2 = shallow ice preconditioner +which_ho_approx = 1 # 1 = SSA, 2 = Blatter-Pattyn, 3 = L1L2, 4 = DIVA +which_ho_gradient_margin = 2 +which_ho_gradient = 0 # 0 = centered, 1 = upstream +which_ho_assemble_beta = 1 # 0 = standard FE, 1 = local +which_ho_assemble_taud = 1 # 0 = standard FE, 1 = local +which_ho_ground = 1 # 0 = no GLP, 1 = GLP +which_ho_resid = 4 # 3 = absolute, 4 = relative +glissade_maxiter = 50 + +[parameters] +rhoi = 900.0 ; MISMIP ice density +rhoo = 1000.0 ; MISMIP ocean density +grav = 9.8 ; MISMIP gravitational acceleration +flow_factor = 1. +ice_limit = 1. ; min thickness (m) for dynamics +default_flwa = 3.1536e-18 +marine_limit = -890 ; depth past which ice is lost +coulomb_c = 0.2332 +coulomb_bump_max_slope = 0.5 ; maximum bed obstacle slope +coulomb_bump_wavelength = 2.0 ; bedrock wavelength +flwa_basal = 1.0e-16 +p_ocean_penetration = 0. ; p values for coulomb friction law + + +[CF default] +comment = created with mismip3dInit.py +title = MISMIP3d experiment using CISM2 + +[CF input] +name = mismip3dInit.nc +time = 1 + +[CF output] +variables = thk usurf uvel vvel velnorm temp beta_internal topg flwa effec_press beta resid_u resid_v f_ground f_flotation floating_mask grounded_mask usfc vsfc ubas vbas uvel_mean vvel_mean stagthk ivol imass_above_flotation iareag btractx btracty taudx taudy tau_xx +frequency = 1000 +name = mismip3d.out.nc + +[CF restart] +variables = restart +xtype = double +frequency = 2000 +name = mismip3d.restart.nc diff --git a/tests/MISMIP3d/mismip3d.code/mismip3dPlotGL.py b/tests/MISMIP3d/mismip3d.code/mismip3dPlotGL.py new file mode 100755 index 00000000..90c3128e --- /dev/null +++ b/tests/MISMIP3d/mismip3d.code/mismip3dPlotGL.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python + +# This script plots the grounding line position for all 3 MISMIP3d experiments at the end of each experiment. +# This script requires the user to have run the python script "mismip3dWriteGL.py". + + +from netCDF4 import Dataset +import numpy as np +import matplotlib.pyplot as plt + +#################################### +# Function used later in the code # +#################################### + + +def glplot(ncfile, times, colora, label): + """ + add a plot of grounding line points to current axes. + makes use of the numpy.ma.MaskedArray when reading xGL,yGL + """ + ncid = Dataset(ncfile, 'r') + time = ncid.variables["time"][:] + lxmax = 0.0 + lxmin = 800.0 + for i in range(0, len(times)): + seq = (time == times[i]) + xGL = ncid.variables["xGL"][:, seq]*1e-3 + lxmax = max(np.max(xGL), lxmax) + lxmin = min(np.min(xGL), lxmin) + yGL = ncid.variables["yGL"][:, seq]*1e-3 + plt.plot(xGL, yGL, 's', ms=3, mfc=colora[i], + mec=colora[i], label=label + ', t = ' + format(times[i])) + return lxmin, lxmax + + +######## +# Code # +######## + +model = '_cism' + + +plt.figure(figsize=(7, 7)) + +fileStd = 'Stnd/Stnd' + model + '.nc' +ncidStd = Dataset(fileStd,'r') +timeStd = ncidStd.variables["time"][:] +xmin, xmax = glplot(fileStd, [timeStd[-1]], ['black'], 'Stnd') +xminplot = xmin + +fileP75S = 'P75S/P75S' + model + '.nc' +ncidP75S = Dataset(fileP75S,'r') +timeP75S = ncidP75S.variables["time"][:] +xmin, xmax = glplot(fileP75S, [timeP75S[-1]], ['red'], 'P75S') +xminplot = xmin +xmaxplot = xmax + +fileP75R = 'P75R/P75R' + model + '.nc' +ncidP75R = Dataset(fileP75R,'r') +timeP75R = ncidP75R.variables["time"][:] +plt.xlim([xminplot-50.0, xmaxplot+50.0]) +xmin, xmax = glplot(fileP75R, [timeP75R[-1]], ['blue'], 'P75R') + +plt.legend(frameon=True, borderaxespad=0, loc='right') +plt.xlabel(r'$x$ (km)') +plt.ylabel(r'$y$ (km)') + +# Saving the figure. +plt.savefig("mismip3dPlotGL.pdf") diff --git a/tests/MISMIP3d/mismip3d.code/mismip3dRun.py b/tests/MISMIP3d/mismip3d.code/mismip3dRun.py new file mode 100755 index 00000000..d14bb14e --- /dev/null +++ b/tests/MISMIP3d/mismip3d.code/mismip3dRun.py @@ -0,0 +1,372 @@ +#!/usr/bin/env python + +# This script runs any or all of the various MISMIP3d experiments. +# See this paper for details about the MISMIP3d experiment: +# Pattyn et al., Grounding-line migration in plan-view marine ice-sheet models: results of the ice2sea MISMIP3d intercomparison, J. Glaciol., 59, doi:10.3189/2013JoG12J129, 2013. + +import sys, os +import shutil +import fileinput +import numpy as np +from optparse import OptionParser +from ConfigParser import ConfigParser +from netCDF4 import Dataset + + +################################### +# string constants and parameters # +################################### + +templateJobScriptFile = 'runCISM.cheyenne.template' # template job submission script + + +#################################### +# Function used later in the code # +#################################### + + +# This function returns the basal friction perturbation for experiment P75S. +# Note: x, y and xGL (the grounding line position) need to be in km. +def computeBasalPerturbation(x,y,xGL): + a = 0.75 # perturbation amplitude + xc = 150 # in km + yc = 10 # in km + yGL = 50 # in km (The function is center at the center line of the domain) + + # Calculating the spatial variation. + xpart = -(x-xGL)**2/(2*xc**2) + ypart = -(y-yGL)**2/(2*yc**2) + Cspatial = 1-a*np.exp(xpart + ypart) + return Cspatial + + +# This function is used to replace a string by another in a file. +def replace_string(file, oldString, newString): + # Replace the first instance of oldString with newString in the input file. + stringFound = False + for line in fileinput.FileInput(file, inplace=True): + if (not stringFound) and (oldString in line): + line = line.replace(oldString, newString) + stringFound = True + print line, + else: + print line, + + + +######## +# Code # +######## + + +# Parse options. +optparser = OptionParser() + +optparser.add_option('-e', '--exec', dest='executable', type = 'string', default='./cism_driver', help='Path to the CISM executable') +optparser.add_option('-x', '--expt', dest='experiment', type='string', default='all', help='MISMIP3d experiment(s) to run', metavar='EXPT') +optparser.add_option('-n', '--parallel', dest='parallel', type='int', help='Number of processors: if specified then run in parallel', metavar='NUMPROCS') +optparser.add_option('--job', action="store_true", dest='jobscript', help='option to set up a script to run on HPC') +optparser.add_option('--submit', action="store_true", dest='jobsubmit', help='option to submit the run script directly after executing this script') + + +for option in optparser.option_list: + if option.default != ('NO', 'DEFAULT'): + option.help += (' ' if option.help else '') + '[default: %default]' +options, args = optparser.parse_args() + +if options.experiment == 'all': + experiments = ['Stnd', 'P75S', 'P75R'] + print 'Run all the MISMIP3d experiments' +elif options.experiment == 'allP75': + experiments = ['P75S', 'P75R'] + print 'Run all the MISMIP3d P75 (S) and (R) experiments' +elif options.experiment in ['Stnd', 'P75S', 'P75R']: + experiments = [options.experiment] + print 'Run experiment', options.experiment +else: + sys.exit('Please specify experiment(s) from this list: Stnd, P75S, P75R') + + +if (options.jobsubmit) and (not options.jobscript): + print 'You are trying to submit a batch job without the job option. No job will be submitted.' + +if (options.parallel is not None) and (options.jobscript): + print 'The number of processors will not be taken into account as you will be submitting a batch job script.' + +restart = 0 # constant meant to keep track of restart status. + +# Loop through experiments. +for expt in experiments: + + # Change to directory for this experiment. + os.chdir(expt) + print 'Changed directory to ', expt + + # Set and open config file for reading. + configfile = 'mismip3d' + expt + '.config' + config = ConfigParser() + config.read(configfile) + + inputFile = config.get('CF input', 'name') + outputFile = config.get('CF output', 'name') + outputFreq = config.get('CF output', 'frequency') + endTime = config.get('time', 'tend') + endTime = float(endTime) + + # Time buffer to remedy the restart pb when switching to different time step within a run. + buffer = float(outputFreq) - 1. + + # Read output file content information. + lastTimeEntry = 0 + lastEntryInternal = 0 + sizeTimeOutput = 0 + if os.path.exists(outputFile): + outputData = Dataset(outputFile,'r') + if outputData['time'].size != 0: + sizeTimeOutput = outputData['time'].size + lastTimeEntry = outputData['time'][-1] + + outputData.close() + + # Take action based on the stage of the experimental run. + if (lastTimeEntry >= (endTime - buffer)) and (sizeTimeOutput > 1): + # The run for this A value is done, moving to the next one. + pass + elif (lastTimeEntry < endTime) and (sizeTimeOutput > 1): + # The run for this A value is not done and needs to continue. + + # Make sure restart is set to 1 in config file. + config.set('options', 'restart', 1) + + # Write to config file. + with open(configfile, 'w') as newconfigfile: + config.write(newconfigfile) + + else: + print 'there is nothing to restart from, executing from the beginning' + + + # Make sure we are starting from the final time slice of the input file, + # (Except for Stnd, the input file is a restart file from a previous run.) + + + + if expt != 'Stnd': + + # Edit the 'time' entry in the [CF input] section. + inputfile = config.get('CF input', 'name') + try: + inputfileOut = config.get('CF input', 'nameout') + except: + pass # no nameout name in config file + + infile = Dataset(inputfile,'r') + ntime = len(infile.dimensions['time']) # reading the last time slice of the previous experiment + config.set('CF input', 'time', ntime) + print 'input file =', inputfile + print 'time slice =', ntime + + infile.close() + + + # For experiment P75S we need to induce the perturbation and modify C_space_factor. + if (expt == 'P75S') and (restart==0): + + # We need to calculate the grounding line location from the input file of the experiment at the center line location. + infileOut = Dataset(inputfileOut,'r') + ntimeout = len(infileOut.dimensions['time']) # reading last time slice of previous ext output file + f_ground = infileOut.variables['f_ground'][:,:,:] + infileOut.close() + + print 'opening inputfile ',inputfile + infile = Dataset(inputfile,'r+') + nx = len(infile.dimensions['x0']) + ny = len(infile.dimensions['y0']) + x0 = infile.variables['x0'][:] + y0 = infile.variables['y0'][:] + + ycenter = int(ny/2) + print 'ycenter=',ycenter + + for i in range(1,nx-1): + if (f_ground[ntimeout-1,ycenter,i] > 0) and (f_ground[ntimeout-1,ycenter,i+1] == 0): + xGL = x0[i] + print 'grounding line at ycenter =', xGL + + # We now write C_space factor to the file. + print 'writing peturbation to C_space_factor' + for i in range(nx): + for j in range(ny): + infile.variables['C_space_factor'][-1,j,i] = computeBasalPerturbation(x0[i]/1000,y0[j]/1000,xGL/1000) + + # We first check that the variable "internal_time" from the Stnd restart file has 0 for its last entry. + lastentry = infile['internal_time'][-1] + if lastentry != 0: + infile['internal_time'][:] = infile['internal_time'][:] - lastentry + print 'the new internal_time array is ', infile['internal_time'][:] + + infile.close() + + + # We need to reset C_space_factor to 1 and internal_time to 0. + if (expt == 'P75R') and (restart==0): + infile = Dataset(inputfile,'r+') + print 'opening inputfile ',inputfile + infile.variables['C_space_factor'][:,:,:] = 1 + + lastentry = infile['internal_time'][-1] + if lastentry != 0: + infile['internal_time'][:] = infile['internal_time'][:] - lastentry + print 'the new internal_time array is ', infile['internal_time'][:] + + infile.close() + + + # Write the modified config file. + with open(configfile, 'w') as newconfigfile: + config.write(newconfigfile) + + + + # Checking whether we want to submit a job script on HPC. + if options.jobscript: + + # New job submission script for current experiment. + newJobScriptFile = 'runCISM.cheyenne.' + expt + + # Make a copy of the job submission script. + try: + shutil.copy('../' + templateJobScriptFile, newJobScriptFile) + except: + sys.exit('Could not copy ' + templateJobScriptFile) + + print 'Created master job submission script ', newJobScriptFile, ' for experiment ', expt + + + # Modifying the walltime and computing power based on resolution (and Bill's experience). + walltimeLook = '#PBS -l walltime=00:01:00' + HPCpowerLook = '#PBS -l select=1:ncpus=1:mpiprocs=1' + + # Obtain the resolution from config file. + res = config.get('grid', 'dew') + + if res=='8000.0': + if (expt=='Stnd') or (expt=='P75R'): + walltimeReplace = '#PBS -l walltime=00:05:00' + HPCpowerReplace = '#PBS -l select=1:ncpus=36:mpiprocs=36' + else: + walltimeReplace = '#PBS -l walltime=00:01:00' + HPCpowerReplace = '#PBS -l select=1:ncpus=36:mpiprocs=36' + + elif res=='4000.0': + if (expt=='Stnd') or (expt=='P75R'): + walltimeReplace = '#PBS -l walltime=00:30:00' + HPCpowerReplace = '#PBS -l select=1:ncpus=36:mpiprocs=36' + else: + walltimeReplace = '#PBS -l walltime=00:01:00' + HPCpowerReplace = '#PBS -l select=1:ncpus=36:mpiprocs=36' + + elif res=='2000.0': + if (expt=='Stnd') or (expt=='P75R'): + walltimeReplace = '#PBS -l walltime=02:00:00' + HPCpowerReplace = '#PBS -l select=2:ncpus=36:mpiprocs=36' + else: + walltimeReplace = '#PBS -l walltime=00:08:00' + HPCpowerReplace = '#PBS -l select=1:ncpus=36:mpiprocs=36' + + elif res=='1000.0': + if (expt=='Stnd') or (expt=='P75R'): + walltimeReplace = '#PBS -l walltime=06:00:00' + HPCpowerReplace = '#PBS -l select=3:ncpus=36:mpiprocs=36' + else: + walltimeReplace = '#PBS -l walltime=00:05:00' + HPCpowerReplace = '#PBS -l select=3:ncpus=36:mpiprocs=36' + + elif res=='500.0': + if (expt=='Stnd') or (expt=='P75R'): + walltimeReplace = '#PBS -l walltime=10:00:00' + HPCpowerReplace = '#PBS -l select=4:ncpus=36:mpiprocs=36' + else: + walltimeReplace = '#PBS -l walltime=00:05:00' + HPCpowerReplace = '#PBS -l select=4:ncpus=36:mpiprocs=36' + + elif res=='250.0': + if (expt=='Stnd') or (expt=='P75R'): + walltimeReplace = '#PBS -l walltime=10:00:00' + HPCpowerReplace = '#PBS -l select=6:ncpus=36:mpiprocs=36' + else: + walltimeReplace = '#PBS -l walltime=00:05:00' + HPCpowerReplace = '#PBS -l select=6:ncpus=36:mpiprocs=36' + + else: + sys.exit('You are running with a customized resolution or one that is not supported. Adjust the job script manually') + + + # Replacing computing resources to the job script file. + try: + linecheck = walltimeLook + replace_string(newJobScriptFile, walltimeLook, walltimeReplace) + linecheck = HPCpowerLook + replace_string(newJobScriptFile, HPCpowerLook, HPCpowerReplace) + except: + print 'this line does not exist: ', linecheck + + # Modifying the executable line based on config file name. + execlineLook = 'mpiexec_mpt ./cism_driver mismip3d.config' + execlineReplace = 'mpiexec_mpt ./cism_driver ' + configfile + try: + replace_string(newJobScriptFile, execlineLook, execlineReplace) + except: + print 'this line does not exist: ', execlineLook + + if options.jobsubmit: + # Submitting the job. + try: + submitscriptstring = 'qsub' + ' ' + newJobScriptFile + print 'submitting the batch job script ' + os.system(submitscriptstring) + except: + sys.exit('Could not submit job. Check if you are running this script on HPC Cheyenne.') + + sys.exit('Interrupting python script execution since job got submitted') + + else: + sys.exit('Interrupting python script execution since experiment will be run by submitting a batch job script') + + + + # Run CISM (only if no job script is being created and/or run). + + print 'parallel =', options.parallel + + if options.parallel == None: + # Perform a serial run. + os.system(options.executable + ' ' + configfile) + else: + # Perform a parallel run. + if options.parallel <= 0: + sys.exit( 'Error: Number of processors specified for parallel run is <=0.' ) + else: + # These calls to os.system will return the exit status: 0 for success (the command exists), some other integer for failure. + if os.system('which openmpirun > /dev/null') == 0: + mpiexec = 'openmpirun -np ' + str(options.parallel) + elif os.system('which mpirun > /dev/null') == 0: + mpiexec = 'mpirun -np ' + str(options.parallel) + elif os.system('which aprun > /dev/null') == 0: + mpiexec = 'aprun -n ' + str(options.parallel) + elif os.system('which mpirun.lsf > /dev/null') == 0: + # mpirun.lsf does NOT need the number of processors (options.parallel). + mpiexec = 'mpirun.lsf' + else: + sys.exit('Unable to execute parallel run. Please edit the script to use your MPI run command, or run manually with something like: mpirun -np 4 ./cism_driver mismip3dInit.config') + + runstring = mpiexec + ' ' + options.executable + ' ' + configfile + print 'Executing parallel run with: ' + runstring + '\n\n' + + # Here is where the parallel run is actually executed. + os.system(runstring) + + print 'Finished experiment', expt + + # Change to parent directory and continue. + os.chdir('..') diff --git a/tests/MISMIP3d/mismip3d.code/mismip3dSetup.py b/tests/MISMIP3d/mismip3d.code/mismip3dSetup.py new file mode 100755 index 00000000..e860a000 --- /dev/null +++ b/tests/MISMIP3d/mismip3d.code/mismip3dSetup.py @@ -0,0 +1,407 @@ +#!/usr/bin/env python + +# This script sets up initial conditions for the MISMIP3d experiment. +# See this paper for details about the MISMIP3d experiment: +# Pattyn et al., Grounding-line migration in plan-view marine ice-sheet models: results of the ice2sea MISMIP3d intercomparison, J. Glaciol., 59, doi:10.3189/2013JoG12J129, 2013. + +import sys, os +import shutil +import fileinput +import numpy as np +from netCDF4 import Dataset +from ConfigParser import ConfigParser +from optparse import OptionParser + + +############# +# Constants # +############# + + +xDomain = 800000.0 # domain x-dimension (m) +yDomain = 100000.0 # domain y-dimension (m) +initThickness = 500. # initial uniform ice thickness +accum = 0.5 # uniform accumulation (m/yr) + + +#################################### +# Function used later in the code # +#################################### + + +# The following function returns the linear bed topography as in Pattyn et al. (2013). +def computeBed(x): + # Input x has to be in km. + slope = -1. + b0 = -100. # m + eps_b = 1e-10 # small regularization number + abs_x = np.sqrt(x**2 + eps_b**2) # regularizing to avoid problems at the divide + + b = b0 + slope*abs_x + return b + + + +######## +# Code # +######## + + +# Parse options +optparser = OptionParser() + +optparser.add_option('-c', '--config', dest='configfile', type='string', default='mismip3d.config.template', help='config file name for setting up the MISMIP3d experiment', metavar='FILE') +optparser.add_option('-e', '--exec', dest='executable', default='cism_driver',help='Set path to the CISM executable', metavar='EXECUTABLE') +optparser.add_option('-x', '--expt', dest='experiment', type='string', default = 'all', help='MISMIP3d experiment to set up', metavar='EXPT') +optparser.add_option('-t', '--tstep', dest='timestep', type='float', default = 1, help='time step (yr)', metavar='TSTEP') +optparser.add_option('-r', '--res', dest='resolution', type='int', default = 2000, help='grid resolution (m)', metavar='RES') +optparser.add_option('-v', '--vlevel', dest='vertlevels', type='int', default = 3, help='no. of vertical levels', metavar='VLEVEL') +optparser.add_option('-a', '--approx', dest='approximation', type='string', default = 'DIVA', help='Stokes approximation (SSA, DIVA, BP)', metavar='APPROXIMATION') +optparser.add_option('-b', '--basal', dest='basalFriction', type='string', default='powerlaw', help='Basal friction law (powerlaw, schoof)', metavar='BASALFRICTION') +optparser.add_option('-y', '--year', dest='yearsStnd', type='int', default = 20000, help='Length of Stnd run (yr)', metavar='YEARSPINUP') + +optparser.add_option + +for option in optparser.option_list: + if option.default != ('NO', 'DEFAULT'): + option.help += (' ' if option.help else '') + '[default: %default]' +options, args = optparser.parse_args() + +if options.experiment == 'all': + experiments = ['Stnd','P75S','P75R'] + print 'Setting up all the MISMIP3d experiments' +elif options.experiment == 'allP75': + experiments = ['P75S','P75R'] + print 'Setting up P75S and P75R experiments' +elif options.experiment in ['Stnd','P75S','P75R']: + experiments = [options.experiment] + print 'Setting up experiment', options.experiment +else: + sys.exit('Please specify experiment(s) from this list: Stnd, P75S, P75R') + + +# If there is not already a link to cism_driver in the main directory, then make one. +# Each subdirectory will link to cism_driver in the main directory. +if options.executable != 'cism_driver': + # Remove the existing link, if present. + os.unlink('cism_driver') + # Make the new link. + os.symlink(options.executable, 'cism_driver') + + +# Set grid resolution. +if options.resolution == 8000: + dx = 8000.0 + dy = 8000.0 +elif options.resolution == 4000: + dx = 4000.0 + dy = 4000.0 +elif options.resolution == 2000: + dx = 2000.0 + dy = 2000.0 +elif options.resolution == 1000: + dx = 1000.0 + dy = 1000.0 +elif options.resolution == 500: + dx = 500.0 + dy = 500.0 +elif options.resolution == 250: + dx = 250.0 + dy = 250.0 +else: + sys.exit('Please choose from among the following resolutions (m): 8000, 4000, 2000, 1000, 500, 250') + +if options.vertlevels >= 2: + nz = options.vertlevels +else: + sys.exit('Error: must have at least 2 vertical levels') + +print 'MISMIP3d grid resolution (m) =', options.resolution +print 'Number of vertical levels =', nz + +# Set number of grid cells in each direction. +# Include a few extra cells in the x direction to handle boundary conditions. +nx = int(xDomain/dx) + 4 +ny = int(yDomain/dy) + +# Copy the config template to a new master config file. +masterConfigFile = 'mismip3d.config' + +try: + shutil.copy(options.configfile, masterConfigFile) +except OSError: + sys.exit('Could not copy', options.configfile) + +print 'Creating master config file', masterConfigFile + +# Read the master config file. +config = ConfigParser() +config.read(masterConfigFile) + +# Set the grid variables in the master config file. +config.set('grid', 'ewn', nx) +config.set('grid', 'nsn', ny) +config.set('grid', 'upn', nz) +config.set('grid', 'dew', dx) +config.set('grid', 'dns', dy) + +# Set the time step in the msster config file. +# Set the diagnostic interval to the same value (not necessary, but helpful for debugging). + +config.set('time', 'dt', options.timestep) +config.set('time', 'dt_diag', options.timestep) + +# Set Stokes approximation in config file. +if options.approximation == 'SSA': + which_ho_approx = 1 + print 'Using SSA velocity solver' +elif options.approximation == 'DIVA': + which_ho_approx = 4 + print 'Using DIVA velocity solver' +elif options.approximation == 'BP': + which_ho_approx = 2 + print 'Using Blatter-Pattyn velocity solver' +else: + which_ho_approx = 4 + print 'Defaulting to DIVA velocity solver' + +config.set('ho_options', 'which_ho_approx', which_ho_approx) + +# Config settings related to basal friction law. +# Note: Each of these friction laws is associate with certain basal parameters. +# The desired parameters should be set in the config template. +if options.basalFriction == 'powerlaw': + p_ocean_penetration = 0 + print 'Using basal friction power law (Schoof2007)' +elif options.basalFriction == 'schoof': + p_ocean_penetration = 1 + print 'Using Schoof 2005 basal friction law' +else: + p_ocean_penetration = 0 # is default + print 'Defaulting to Powerlaw basal friction law' + +config.set('parameters', 'p_ocean_penetration', p_ocean_penetration) + +# Config setting related to spin up time. +yearsStnd = float(options.yearsStnd) +config.set('time', 'tend', yearsStnd) + +# Write to the master config file. +with open(masterConfigFile, 'w') as configfile: + config.write(configfile) + + +print 'years of Stnd experiment =', yearsStnd +restartfreqStnd = min(1000.0, options.yearsStnd) # can be changed by the user if needed +print 'Stnd restart frequency =', restartfreqStnd + + +# Create the netCDF input file according to the information in the config file. +try: + parser = ConfigParser() + parser.read(options.configfile) + initfile = parser.get('CF input', 'name') +except OSError: + sys.exit('Error parsing ' + options.configfile) + +print 'Creating input file', initfile +ncfile = Dataset(initfile, 'w') + + +# Create dimensions. +# Note: (x0,y0) = staggered (velocity) grid. +# (x1,y1) = unstaggered (scalar) grid. +ncfile.createDimension('time',1) +ncfile.createDimension('x1',nx) +ncfile.createDimension('y1',ny) +ncfile.createDimension('x0',nx-1) +ncfile.createDimension('y0',ny-1) +ncfile.createDimension('level',nz) +ncfile.createDimension('staglevel',nz-1) +ncfile.createDimension('stagwbndlevel',nz+1) # similar to staglevel but including boundaries + +# Create time and grid variables. +# Note: (x1,y1) are loadable and need to be in the input file. +# (x0,y0) are not loadable, but are derived in CISM from (x1,y1). May not be needed. +ncfile.createVariable('time','f4',('time',))[:] = [0] +x1 = ncfile.createVariable('x1', 'f4', ('x1',)) +y1 = ncfile.createVariable('y1', 'f4', ('y1',)) +x0 = ncfile.createVariable('x0', 'f4', ('x0',)) +y0 = ncfile.createVariable('y0', 'f4', ('y0',)) + +# Create 2D input fields. +thk = ncfile.createVariable('thk', 'f4', ('time','y1','x1')) +topg = ncfile.createVariable('topg', 'f4', ('time','y1','x1')) +acab = ncfile.createVariable('acab', 'f4', ('time','y1','x1')) +uvel = ncfile.createVariable('uvel', 'f4', ('time','level','y0','x0')) +vvel = ncfile.createVariable('vvel', 'f4', ('time','level','y0','x0')) +C_space_factor = ncfile.createVariable('C_space_factor','f4',('time','y1','x1')) +kinbcmask = ncfile.createVariable('kinbcmask', 'i4', ('time','y0','x0')) # kinematic BC mask + +# Compute x and y on each grid. +# Note: (1) The x origin is placed at the center of the second cell from the left. +# This assumes that kinbcmask = 1 at the first vertex from the left. +# Thus the left edge of the grid has x = -3*dx/2. +# (2) The y origin is placed at the bottom edge of the CISM grid. +# The line of central symmetry runs along cell edges at y = 40 km. + +x = dx*np.arange(nx,dtype='float32') # x = 0, dx, 2*dx, etc. +y = dy*np.arange(ny,dtype='float32') # y = 0, dy, 2*dy, etc. + +x1[:] = x[:] - dx # x1 = -dx, 0, dx, ..., (nx-2)*dx - dx/2 +y1[:] = y[:] + dy/2. # y1 = dy/2, 3*dy/2, ..., (ny-1)*dy - dy/2 + +x0[:] = x[:-1] - dx/2. # x0 = -dx/2, dx/2, 3*dx/2, ..., (nx-2)*dx +y0[:] = y[:-1] + dy # y0 = dy, 2*dy, ..., (ny-1)*dy + + +# Set bed topography. +for i in range(nx): + topg[:,:,i] = computeBed(x1[i]/1.e3) # x1 is in [m] and we input in [km] + +# Set initial thickness. +thk[0,:,:] = initThickness + +# Set the surface mass balance. +acab[:,:,:] = accum + +# Set initial velocity to zero (probably not necessary). +uvel[:,:,:,:] = 0. +vvel[:,:,:,:] = 0. + +# Set kinematic velocity mask. +# Where kinbcmask = 1, the velocity is fixed at its initial value. +# Note: Although there is no ice on the RHS of the domain, we need kinbcmask =1 there +# to preserve symmetry with the LHS (since east-west BCs are formally periodic). +kinbcmask[:,:,:] = 0 # initialize to 0 everywhere +kinbcmask[:,:,0] = 1 # mask out left-most column +kinbcmask[:,:,-1] = 1 # mask out right-most column + +# Set the spatially variable basal friction coefficient. +C_space_factor[0,:,:] = 1. + +ncfile.close() + +print 'Experiments:', experiments + +# Loop through experiments. +for expt in experiments: + + # For each experiment, make a suitable config file and set up a subdirectory. + print 'Creating config file for experiment', expt + + # Make a copy of the mismip3dInit config file. + # Below, this copy will be tailored for the chosen MISMIP3d experiment, + # without changing the settings used for the Stnd experiment. + + newConfigFile = 'mismip3d' + expt + '.config' + print 'Config file for this experiment:', newConfigFile + shutil.copy(masterConfigFile, newConfigFile) + + # Read the new config file. + config = ConfigParser() + config.read(newConfigFile) + +# Experiment-specific settings. + + if expt == 'Stnd': + tstart = 0.0 + tend = yearsStnd + inputdir = '../' + inputfile = initfile + inputslice = 1 + outputfreq = min(1000.0, restartfreqStnd) + restartfreq = restartfreqStnd + elif expt == 'P75S': + tstart = 0.0 + tend = 100.0 + inputdir = '../Stnd/' + inputfile = 'mismip3dStnd.restart.nc' + inputfileOut = 'mismip3dStnd.out.nc' # we will need f_ground from this file to calculate the GL (not in the restart file) + inputslice = int(yearsStnd/restartfreqStnd) + outputfreq = 10.0 + restartfreq = 100.0 + elif expt == 'P75R': + tstart = 0.0 + tend = yearsStnd + inputdir = '../P75S/' + inputfile = 'mismip3dP75S.restart.nc' + inputslice = 1 + outputfreq = 1000.0 + restartfreq = 1000.0 + + # Set the start and end times + config.set('time', 'tstart', tstart) + config.set('time', 'tend', tend) + + # Change the default comment + comment = 'MISMIP3d experiment ' + expt + config.set('CF default', 'comment', comment) + + # Set input file and time slice. + # Note: This method may not be robust for Stnd and P75R runs that start and restart. + # For this reason, the script mismip3dRun.py makes sure the 'time' entry + # in [CF input] corresponds to the final time slice. + print 'Input file:', inputfile + if expt=='P75S': + config.set('CF input', 'nameOut', inputfileOut) + + config.set('CF input', 'name', inputfile) + config.set('CF input', 'time', inputslice) + + # Set the output filename in the section [CF output1]. + outfilename = 'mismip3d' + expt + '.out.nc' + print 'Output file:', outfilename + config.set('CF output', 'name', outfilename) + config.set('CF output', 'frequency', outputfreq) + + # Set restart info. This should be in the section called '[CF output]'. + # Note: Each experiment (except Stnd) writes out only one time slice to a restart file. + restartfilename = 'mismip3d' + expt + '.restart.nc' + print 'Restart file:', restartfilename + config.set('CF restart', 'name', restartfilename) + config.set('CF restart', 'variables', 'restart') + config.set('CF restart', 'frequency', restartfreq) + config.set('CF restart', 'start', tstart + restartfreq) + config.set('CF restart', 'xtype', 'double') + + # Write to the new config file. + with open(newConfigFile, 'w') as configfile: + config.write(configfile) + + # Create a subdirectory named for the experiment, and stage the run there. + try: + os.mkdir(expt) + print 'Created subdirectory', expt + except OSError: + print 'Subdirectory', expt, 'already exists' + + os.chdir(expt) + + # Move the config file from the parent directory to the subdirectory. + shutil.move('../' + newConfigFile, newConfigFile) + print 'Created config file', newConfigFile + + # Link to the cism_driver executable in the parent directory. + try: + os.symlink('../cism_driver', 'cism_driver') + except OSError: + pass # link to cism_driver already exists + + # Link to the input file in the appropriate directory. + try: + os.symlink(inputdir + inputfile, inputfile) + except OSError: + pass # link to the input file already exists + + if expt=='P75S': + try: + os.symlink(inputdir + inputfileOut, inputfileOut) + except OSError: + pass # link to the file already exist + + + # Go back to the parent directory and continue. + os.chdir('..') + + diff --git a/tests/MISMIP3d/mismip3d.code/mismip3dWriteGL.py b/tests/MISMIP3d/mismip3d.code/mismip3dWriteGL.py new file mode 100755 index 00000000..c76b7d57 --- /dev/null +++ b/tests/MISMIP3d/mismip3d.code/mismip3dWriteGL.py @@ -0,0 +1,188 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +This script reads a CISM output file from a MISMIP3d experiment, and make a netCDF output +file. + +The input file should include the following: + Coordinates: x0, y0, x1, y1, time + Global scalars: ivol, ice_mass_above_flotation, iareag + 2D fields: f_ground, stagthck, usfc, vsfc, ubas, vbas, uvel_mean, vvel_mean +The f_ground field is not part of the new output file, but is needed to identify +vertices lying on the grounding line (GL). +""" + +import os, sys +import numpy as np +from netCDF4 import Dataset +import matplotlib.pyplot as plt + + +######## +# Code # +######## + + +# Set ice density prescribed for MISMIP3d. +rhoi = 900. # kg/m^3 +model = '_cism' + +# Parse options +from optparse import OptionParser +parser = OptionParser() + +parser.add_option("-x", "--expt", dest="experiment", type='string', default='all', help="Name of MISMIP3d experiment(s)", metavar="EXPT") +parser.add_option("-f", "--file", dest="filename", type='string', help="CISM output file from MISMIP3d run", metavar="FILE") + +for option in parser.option_list: + if option.default != ("NO", "DEFAULT"): + option.help += (" " if option.help else "") + "[default: %default]" + +options, args = parser.parse_args() + +if options.experiment: + if options.experiment == 'all': + # Read output data for all experiments. + experiments = ['Stnd', 'P75S', 'P75R'] + else: + experiments = [options.experiment] +else: + sys.exit('Error: No experiment specified. Please specify experiment(s) with the -x option') + + + +# Looping through the experiments. +for expt in experiments: + + # Change to the subdirectory for this experiment. + os.chdir(expt) + + if options.filename: + file = options.filename + else: + file = 'mismip3d' + expt + '.out.nc' + + print 'Creating a MISMIP3d grounding-line file for experiment', expt + print 'Attempting to read CISM file', file + + # Open the CISM output file, get needed dimensions. + # Note: (x0,y0) are dimensions of the staggered (velocity) grid. + # (x1,y1) are dimensions of the unstaggered (scalar) grid. + + try: + cismfile = Dataset(file,'r') + except: + sys.exit('Error: Unable to open CISM file') + + try: + nTime = len(cismfile.dimensions['time']) + nx = len(cismfile.dimensions['x0']) + ny = len(cismfile.dimensions['y0']) + except: + sys.exit('Error: The CISM file is missing needed dimensions') + + # Initialize some variables and arrays. + # Read in some fields needed to compute diagnostics similar to the one of the MISMIP+ experiment. + + print 'Reading in CISM variables...' + + try: + # These array names are somewhat arbitrary. Sometime I have used CISM names; + # sometimes I have used a different name that is closer (but not identical) + # to the MISMIP3d field names. + + # Read horizontal and time coordinates. + # Note: (x0,yo) are vertex coordinates; (x1,y1) are cell center coordinates. + x0 = cismfile.variables['x0'][:] + y0 = cismfile.variables['y0'][:] + x1 = cismfile.variables['x1'][:] + y1 = cismfile.variables['y1'][:] + t = cismfile.variables['time'][:] + + # Read scalars (function of time only). + ivol = cismfile.variables['ivol'][:] + imass_above_flotation = cismfile.variables['imass_above_flotation'][:] + iareag = cismfile.variables['iareag'][:] + + # Read 2D fields (functions of x, y and time). + # Note: All these fields are located at vertices. + f_ground = cismfile.variables['f_ground'][:,:,:] + iceThickness = cismfile.variables['stagthk'][:,:,:] + uSurface = cismfile.variables['usfc'][:,:,:] + vSurface = cismfile.variables['vsfc'][:,:,:] + uBase = cismfile.variables['ubas'][:,:,:] + vBase = cismfile.variables['vbas'][:,:,:] + uMean = cismfile.variables['uvel_mean'][:,:,:] + vMean = cismfile.variables['vvel_mean'][:,:,:] + except: + sys.exit('Error: The output file is missing needed fields.') + + # Create the GL output file. + + outfilename = expt + model + '.nc' + ncfile = Dataset(outfilename, 'w') + print 'Created output file', outfilename + + # Set dimensions. + glptdim = ncfile.createDimension('nPointGL', size = None) + timedim = ncfile.createDimension('nTime', size = nTime) + + # Create variables. + xGL = ncfile.createVariable('xGL', 'f4', ('nPointGL', 'nTime')) + yGL = ncfile.createVariable('yGL', 'f4', ('nPointGL', 'nTime')) + time = ncfile.createVariable('time', 'f4', ('nTime')) + + iceVolume = ncfile.createVariable('iceVolume', 'f4', ('nTime')) + iceVAF = ncfile.createVariable('iceVAF', 'f4', ('nTime')) + groundedArea = ncfile.createVariable('groundedArea', 'f4', ('nTime')) + + iceThicknessGL = ncfile.createVariable('iceThicknessGL', 'f4', ('nPointGL', 'nTime')) + uSurfaceGL = ncfile.createVariable('uSurfaceGL', 'f4', ('nPointGL', 'nTime')) + vSurfaceGL = ncfile.createVariable('vSurfaceGL', 'f4', ('nPointGL', 'nTime')) + uBaseGL = ncfile.createVariable('uBaseGL', 'f4', ('nPointGL', 'nTime')) + vBaseGL = ncfile.createVariable('vBaseGL', 'f4', ('nPointGL', 'nTime')) + uMeanGL = ncfile.createVariable('uMeanGL', 'f4', ('nPointGL', 'nTime')) + vMeanGL = ncfile.createVariable('vMeanGL', 'f4', ('nPointGL', 'nTime')) + + # Loop over time slices and fill variables. + print 'Adding grounding-line variables to output file...' + + for iTime in range(nTime): + + print ' Time slice:', iTime + + # Add the scalar data for this time slice. + time[iTime] = t[iTime] + iceVolume[iTime] = ivol[iTime] + iceVAF[iTime] = imass_above_flotation[iTime]/rhoi + groundedArea[iTime] = iareag[iTime] + + # Loop over the horizontal grid and identify GL points. + # For each GL point, add the desired data to the arrays. + eps = 1.0e-6 + nGL = 0 + + + # A cell hosts a grounding line if at least one neighboring point has a zero f_ground value. + for i in range(1,nx-1): + for j in range(1,ny-1): + if (f_ground[iTime,j,i] > 0) and ((f_ground[iTime,j-1,i-1] == 0) or (f_ground[iTime,j-1,i] == 0) or (f_ground[iTime,j-1,i+1] == 0) or (f_ground[iTime,j,i-1] == 0) or (f_ground[iTime,j,i+1] == 0) or (f_ground[iTime,j+1,i-1] == 0) or (f_ground[iTime,j+1,i] == 0) or (f_ground[iTime,j+1,i+1] == 0)): + nGL = nGL + 1 + m = nGL - 1 # indexing starts at 0 + xGL[m,iTime] = x0[i] + yGL[m,iTime] = y0[j] + + iceThicknessGL[m,iTime] = iceThickness[iTime,j,i] + uSurfaceGL[m,iTime] = uSurface[iTime,j,i] + vSurfaceGL[m,iTime] = vSurface[iTime,j,i] + uBaseGL[m,iTime] = uBase[iTime,j,i] + vBaseGL[m,iTime] = vBase[iTime,j,i] + uMeanGL[m,iTime] = uMean[iTime,j,i] + vMeanGL[m,iTime] = vMean[iTime,j,i] + + + + ncfile.close() + + # Change to the parent directory. + os.chdir('..') diff --git a/tests/MISMIP3d/mismip3d.code/runCISM.cheyenne.template b/tests/MISMIP3d/mismip3d.code/runCISM.cheyenne.template new file mode 100755 index 00000000..2b8a4847 --- /dev/null +++ b/tests/MISMIP3d/mismip3d.code/runCISM.cheyenne.template @@ -0,0 +1,11 @@ +#!/bin/bash +# +#PBS -N MISMIP3d +#PBS -A P93300601 +#PBS -l walltime=00:01:00 +#PBS -q economy +#PBS -j oe +#PBS -m abe +#PBS -l select=1:ncpus=1:mpiprocs=1 + +mpiexec_mpt ./cism_driver mismip3d.config diff --git a/tests/MISOMIP/mismip+/README.mismip+ b/tests/MISOMIP/mismip+/README.mismip+ index 83fbc044..7f890e97 100644 --- a/tests/MISOMIP/mismip+/README.mismip+ +++ b/tests/MISOMIP/mismip+/README.mismip+ @@ -76,7 +76,10 @@ Notes on optional arguments: than the Spinup experiment. For instance, a spin-up using DIVA at 2 km is typically stable with dt = 1.0 yr. With identical config settings, Ice1r is unstable and requires a shorter time step, e.g. dt = 0.5 yr. If so, you might need to manually reduce dt in the config file for a given experiment - (assuming you don't want to repeat the Spinup). + (assuming you don't want to repeat the Spinup). Alternatively you might choose to run all Ice1 + or Ice2 experiments with this shorter time step. In that case simply setup those experiments + separately from the Spinup and Ice0 experiment using (for res = 2km and Ice1 only): + > python mismip+Setup -r 2000 -t 0.5 -x Ice1 - Three Stokes approximations are supported for MISMIP+: SSA, DIVA and BP. - Three basal BCs are supported: (1) a Weertman-type power law: 'powerlaw' @@ -91,41 +94,66 @@ If you set up all the experiments, you should have the following subdirectories the setup script: Spinup, Ice0, Ice1r, Ice1ra, Ice1rr, Ice1rax, Ice1rrx, Ice2r, Ice2ra, Ice2rr, Ice2rax, Ice2rrx. -After setup, you should first run the Spinup experiment. You can do this using a Python run script. +After setup, you should first run the Spinup experiment. You can do this using the Python run +script: mismip+Run.py. To run the Spinup experiment on 16 processors: -> python mismip_Run.py -x Spinup -n 16 +> python mismip+Run.py -x Spinup -n 16 Here are the optional arguments for the run script: -e EXECUTABLE, --exec=EXECUTABLE Path to the CISM executable [default: ./cism_driver] - -x EXPT, --expt=EXPT MISMIP+ experiment(s) to run [default: allIce] + -x EXPT, --expt=EXPT MISMIP+ experiment(s) to run [default: all] -n NUMPROCS, --parallel=NUMPROCS Number of processors: if specified then run in parallel -Alternatively, you may want to submit a batch job. If so, then change to the subdirectory -('cd Spinup'), and launch your batch job from the subdirectory. +If the run is interrupted for any reason and you need to restart it in order to finish your +experiment, simply relaunch your experiment using the same command line used originally. Using +the same previous example of the Spinup run: -If the spin-up cannot be completed with one job submission, you can finish it by initializing -subsequent runs from the restart file, mismip+Spinup.restart.nc. To do this, simply edit -the config file (mismip+Spinup.config) by uncommenting the line 'restart = 1' under [options]. -(You may want to save a copy of the log file under a different name so that it will not -be overwritten on restart. The output file, mismip+Spinup.out.nc, will be appended on restart -and does not need to be copied.) +> python mismip+Run.py -x Spinup -n 16 -Then launch the restart run. For example, to submit an mpi run on 16 processors: +The script will automatically read the restart file name, modify the config file and restart +the run from the latest time saved in the restart file. -> mpirun -n 16 cism_driver mismip+Spinup.config.restart Continue as needed until you have completed the spin-up period. +Alternatively, you may want to submit a batch job. If so, do the following: +1/ Change to the subdirectory of the experiment you want to run. For example, if you'd like + to submit a job for the "Spinup" experiment, type: "cd Spinup". +2/ Copy the script runCISM.cheyenne.template into the directory. +3/ Modify the script: + a/ Adjust the number of nodes and processors. + b/ Adjust the time you'd like to run on HPC. + c/ Modify the name of the config file to reflect the one in your current directory. +4/ launch the job from your subdirectory by typing at the command line: + > qsub runCISM.cheyenne.template + + Before proceeding, you may want to confirm that the GL has reached a steady state. One way to do this is to list f_ground (the grounded fraction, 0 < f_ground < 1) along the x direction at y ~ 40 km. The value of f_ground in the last (partially) grounded cell should be stable within some small tolerance, e.g. ~10^{-3}. +If the spin-up cannot be completed with one job submission, you can finish it by initializing +subsequent runs from the restart file, mismip+Spinup.restart.nc. To do this, simply edit +the config file (mismip+Spinup.config) by uncommenting the line 'restart = 1' under [options]. +If this option is not present, simply add it under this configuration segment. +(You may want to save a copy of the log file under a different name so that it will not +be overwritten on restart. The output file, mismip+Spinup.out.nc, will be appended on restart +and does not need to be copied.) + +Continue as needed until you have completed the spin-up period. + The file mismip+Spinup.restart.nc in the Spinup directory will become the input file -for the Ice0, Ice1r and Ice2r experiments. However, you will need to edit it so that -CISM's internal_time coordinate for the final time slice is 0. +for the Ice0, Ice1r and Ice2r experiments. However, it needs to edited so that CISM's +internal_time coordinate for the final time slice is 0. If you are running interactively, +the script mismip+Run.py will do this automatically and you simply need to launch your +next experiment. +If you are not running interactively and submitting jobs, you will need to edit it yourself. +You can check on the maximum internal_time value by typing at the command line: + +> ncdump -v internal_time mismip+Spinup.restart.nc Suppose the final time is 20000 years. You can set internal_time = 0 with a one-line NCO command: @@ -133,7 +161,7 @@ Suppose the final time is 20000 years. You can set internal_time = 0 with a one- You may have to precede this command with 'module load nco' if NCO is not loaded automatically. -To verify that the time correction worked, you can do 'ncdump -v internal_time outputfilename.nc'. +To verify that the time correction worked, you can do 'ncdump -v internal_time out.nc'. The final internal_time value should be 0, with negative values for earlier times. Then you can overwrite the original restart file: diff --git a/tests/MISOMIP/mismip+/mismip+.config.template b/tests/MISOMIP/mismip+/mismip+.config.template index 4d8f6808..60678747 100755 --- a/tests/MISOMIP/mismip+/mismip+.config.template +++ b/tests/MISOMIP/mismip+/mismip+.config.template @@ -1,4 +1,4 @@ -[DOME-TEST] +[MISMIP+] [grid] upn = 3 @@ -12,8 +12,8 @@ tstart = 0. tend = 20000.0 dt = 0.5 dt_diag = 0.5 -idiag = 225 -jdiag = 20 +#idiag = 5 +#jdiag = 5 [options] dycore = 2 # 0 = glide, 1 = glam, 2 = glissade @@ -27,20 +27,20 @@ restart_extend_velo = 1 # 1 = write uvel/vvel on extended grid #restart = 1 # uncomment for a restart run [ho_options] -which_ho_babc = 11 # 11 = Coulomb w/const basal flwa, 12 = Tsai law, 9 = power law -which_ho_effecpress = 3 # 3 = ocean connection -which_ho_efvs = 2 # 0 = constant, 2 = nonlinear eff. visc. w/ n=3 -which_ho_sparse = 3 # 1 = SLAP GMRES, 3 = Fortran PCG, 4 = Trilinos for linear solver -which_ho_nonlinear = 0 # 0 = Picard, 1 = JFNK -which_ho_approx = 4 # 1 = SSA, 2 = Blatter-Pattyn, 3 = L1L2, 4 = DIVA -which_ho_precond = 1 # 1 = diagonal precond, 2 = shallow ice preconditioner -which_ho_gradient_margin = 2 # 2 = marine BC -which_ho_gradient = 0 #0 = centered, 1 = upstream -which_ho_assemble_beta = 1 #0 = standard FE, 1 = local -which_ho_assemble_taud = 1 #0 = standard FE, 1 = local -which_ho_assemble_bfric = 1 #0 = standard FE, 1 = local -which_ho_ground = 1 #0 = no GLP, 1 = GLP -which_ho_resid = 4 #3 = absolute, 4 = relative +which_ho_babc = 11 # 11 = Coulomb w/const basal flwa, 12 = Tsai law, 9 = power law +which_ho_effecpress = 3 # 3 = ocean connection +which_ho_efvs = 2 # 0 = constant, 2 = nonlinear eff. visc. w/ n=3 +which_ho_sparse = 3 # 1 = SLAP GMRES, 3 = Fortran PCG, 4 = Trilinos for linear solver +which_ho_nonlinear = 0 # 0 = Picard, 1 = JFNK +which_ho_approx = 4 # 1 = SSA, 2 = Blatter-Pattyn, 3 = L1L2, 4 = DIVA +which_ho_precond = 1 # 1 = diagonal precond, 2 = shallow ice preconditioner +which_ho_gradient_margin = 2 # 2 = marine BC +which_ho_gradient = 0 # 0 = centered, 1 = upstream +which_ho_assemble_beta = 1 # 0 = standard FE, 1 = local +which_ho_assemble_taud = 1 # 0 = standard FE, 1 = local +which_ho_assemble_bfric = 1 # 0 = standard FE, 1 = local +which_ho_ground = 1 # 0 = no GLP, 1 = GLP +which_ho_resid = 4 # 3 = absolute, 4 = relative which_ho_flotation_function = 2 # 0 = f_pattyn, 2 = linear glissade_maxiter = 50 diff --git a/tests/MISOMIP/mismip+/mismip+PlotGL.py b/tests/MISOMIP/mismip+/mismip+PlotGL.py index aed88f3c..6e8340bf 100755 --- a/tests/MISOMIP/mismip+/mismip+PlotGL.py +++ b/tests/MISOMIP/mismip+/mismip+PlotGL.py @@ -43,7 +43,9 @@ def garplot(ncfile, label, color, marker): plt.plot(tscale(time), gar, 'o-', mfc=color, color='black', label=label, marker=marker) ncid.close() - return np.max(gar) + lmax = max(gar) + lmin = min(gar) + return lmax, lmin #TODO - Modify so the label is optional def glplot(ncfile, times, colora, label): @@ -73,7 +75,7 @@ def glplot(ncfile, times, colora, label): xmaxplot = xmax # xmax based on Ice1r at t = 0 xmin, xmax = glplot('Ice1rr/Ice1rr' + model + '.nc', [200], ['yellow'], 'Ice1rr') xminplot = xmin # xmin based on Ice1rr at t = 200 -plt.xlim([xminplot-20.0, xmaxplot+50.0]) +plt.xlim([xminplot-50.0, xmaxplot+50.0]) xmin, xmax = glplot('Ice1ra/Ice1ra' + model + '.nc', [200], ['orange'], 'Ice1ra') xmin, xmax = glplot('Ice2r/Ice2r' + model + '.nc', [100], ['blue'], 'Ice2r') xmin, xmax = glplot('Ice2rr/Ice2rr' + model + '.nc', [200], ['pink'], 'Ice2rr') @@ -89,7 +91,6 @@ def glplot(ncfile, times, colora, label): plt.plot(tscale([100, 100]), [0, 100], color="grey") plt.plot(tscale([200, 200]), [0, 100], color="grey") plt.xlim(tscale([0, 1000])) -plt.ylim([25, 40]) xtlocs = tscale([0, 10, 50, 100, 200, 400, 800]) plt.xticks(xtlocs, intscale(xtlocs)) @@ -97,21 +98,25 @@ def glplot(ncfile, times, colora, label): plt.ylabel(r'Grounded area (1000 km$^3$)') #Ice0 -maxa = garplot('Ice0/Ice0' + model + '.nc', 'Ice0', 'grey', 'd') +maxa, mina = garplot('Ice0/Ice0' + model + '.nc', 'Ice0', 'grey', 'd') +xmaxplot = maxa #Ice1 -maxa = garplot('Ice1r/Ice1r' + model + '.nc', 'Ice1r', 'red', 'o') -maxa = garplot('Ice1rr/Ice1rr' + model + '.nc', 'Ice1rr', 'purple', 'o') -maxa = garplot('Ice1ra/Ice1ra' + model + '.nc', 'Ice1ra', 'orange', 'o') -maxa = garplot('Ice1rrx/Ice1rrx' + model + '.nc', 'nolabel', 'purple', 'o') -maxa = garplot('Ice1rax/Ice1rax' + model + '.nc', 'nolabel', 'orange', 'o') +maxa, mina = garplot('Ice1r/Ice1r' + model + '.nc', 'Ice1r', 'red', 'o') +maxa, mina = garplot('Ice1rr/Ice1rr' + model + '.nc', 'Ice1rr', 'purple', 'o') +maxa, mina = garplot('Ice1ra/Ice1ra' + model + '.nc', 'Ice1ra', 'orange', 'o') +maxa, mina = garplot('Ice1rrx/Ice1rrx' + model + '.nc', 'nolabel', 'purple', 'o') +xminplot = mina +maxa, mina = garplot('Ice1rax/Ice1rax' + model + '.nc', 'nolabel', 'orange', 'o') + +plt.ylim([xminplot-2., xmaxplot+2.]) #Ice2 -maxa = garplot('Ice2r/Ice2r' + model + '.nc', 'Ice2r', 'blue', 's') -maxa = garplot('Ice2rr/Ice2rr' + model + '.nc', 'Ice2rr', 'pink', 's') -maxa = garplot('Ice2ra/Ice2ra' + model + '.nc', 'Ice2ra', 'yellow', 's') -maxa = garplot('Ice2rrx/Ice2rrx' + model + '.nc', 'nolabel', 'pink', 's') -maxa = garplot('Ice2rax/Ice2rax' + model + '.nc', 'nolabel', 'yellow', 's') +maxa, mina = garplot('Ice2r/Ice2r' + model + '.nc', 'Ice2r', 'blue', 's') +maxa, mina = garplot('Ice2rr/Ice2rr' + model + '.nc', 'Ice2rr', 'pink', 's') +maxa, mina = garplot('Ice2ra/Ice2ra' + model + '.nc', 'Ice2ra', 'yellow', 's') +maxa, mina = garplot('Ice2rrx/Ice2rrx' + model + '.nc', 'nolabel', 'pink', 's') +maxa, mina = garplot('Ice2rax/Ice2rax' + model + '.nc', 'nolabel', 'yellow', 's') plt.legend(loc='lower left', ncol=2, frameon=True, borderaxespad=0) diff --git a/tests/MISOMIP/mismip+/mismip+Run.py b/tests/MISOMIP/mismip+/mismip+Run.py index dc1f6c09..3a512b5e 100755 --- a/tests/MISOMIP/mismip+/mismip+Run.py +++ b/tests/MISOMIP/mismip+/mismip+Run.py @@ -13,12 +13,13 @@ from ConfigParser import ConfigParser from netCDF4 import Dataset -# Parse options + +# Parse options. optparser = OptionParser() -optparser.add_option('-e', '--exec', dest='executable', type = 'string', default='./cism_driver', help='Path to the CISM executable') -optparser.add_option("-x", "--expt", dest='experiment', type='string', default = 'allIce', help="MISMIP+ experiment(s) to run", metavar="EXPT") -optparser.add_option('-n', '--parallel', dest='parallel', type='int', help='Number of processors: if specified then run in parallel', metavar="NUMPROCS") +optparser.add_option('-e', '--exec', dest='executable', type = 'string', default ='./cism_driver', help="Path to the CISM executable") +optparser.add_option('-x', '--expt', dest='experiment', type ='string', default = 'all', help="MISMIP+ experiment(s) to run", metavar="EXPT") +optparser.add_option('-n', '--parallel', dest='parallel', type ='int', help="Number of processors: if specified then run in parallel", metavar="NUMPROCS") for option in optparser.option_list: @@ -29,9 +30,9 @@ if options.experiment == 'Spinup': experiments = ['Spinup'] print 'Run the MISMIP+ Spinup experiment' -#if options.experiment == 'all': -# experiments = ['Spinup', 'Ice0', 'Ice1r', 'Ice1ra', 'Ice1rr', 'Ice2r', 'Ice2ra', 'Ice2rr', 'Ice1rax', 'Ice1rrx', 'Ice2rax', 'Ice2rrx'] -# print 'Run all the MISMIP+ experiments, including Spinup' +elif options.experiment == 'all': + experiments = ['Spinup', 'Ice0', 'Ice1r', 'Ice1ra', 'Ice1rr', 'Ice2r', 'Ice2ra', 'Ice2rr', 'Ice1rax', 'Ice1rrx', 'Ice2rax', 'Ice2rrx'] + print 'Run all the MISMIP+ experiments, including Spinup' elif options.experiment == 'allIce': experiments = ['Ice0', 'Ice1r', 'Ice1ra', 'Ice1rr', 'Ice2r', 'Ice2ra', 'Ice2rr', 'Ice1rax', 'Ice1rrx', 'Ice2rax', 'Ice2rrx'] print 'Run all the MISMIP+ experiments, excluding Spinup' @@ -47,51 +48,99 @@ else: sys.exit('Please specify experiment(s) from this list: Spinup, allIce, Ice1, Ice2, Spinup, Ice0, Ice1r, Ice1ra, Ice1rr, Ice1rax, Ice1rrx, Ice2r, Ice2ra, Ice2rr, Ice2rax, Ice2rrx') -# Loop through experiments +# Loop through experiments. for expt in experiments: print 'Running experiment', expt - # Change to directory for this experiment + # Change to directory for this experiment. os.chdir(expt) - # Set config file + # Set config file. configfile = 'mismip+' + expt + '.config' # Make sure we are starting from the final time slice of the input file, # (Except for Spinup, the input file is a restart file from a previous run.) - if expt != 'Spinup': + # Read the config file. + config = ConfigParser() + config.read(configfile) - # Read the config file - config = ConfigParser() - config.read(configfile) + if expt != 'Spinup': # Edit the 'time' entry in the [CF input] section. inputfile = config.get('CF input', 'name') - infile = Dataset(inputfile,'r') - ntime = len(infile.dimensions['time']) + infile = Dataset(inputfile,'r+') + ntime = len(infile.dimensions['time']) config.set('CF input', 'time', ntime) print 'input file =', inputfile print 'time slice =', ntime + + # Before starting each test suite experiment, we need to make sure + # the Spinup restart file internal_time last entry is shifted to 0. + if (expt=='Ice0') or (expt=='Ice1r') or (expt=='Ice2r'): + lastentry = infile['internal_time'][-1] + if lastentry != 0: + infile['internal_time'][:] = infile['internal_time'][:] - lastentry + print 'the new internal_time array is ', infile['internal_time'][:] + infile.close() - # Write the modified config file - with open(configfile, 'w') as newconfigfile: - config.write(newconfigfile) - # Run CISM + elif expt == 'Spinup': + inputFile = config.get('CF input', 'name') + outputFile = config.get('CF output', 'name') + outputFreq = config.get('CF output', 'frequency') + endTime = config.get('time', 'tend') + endTime = float(endTime) + + # Time buffer to remedy the restart pb when switching to different time step within a run. + buffer = float(outputFreq) - 1. + + # Read output file content information. + lastTimeEntry = 0 + lastEntryInternal = 0 + sizeTimeOutput = 0 + if os.path.exists(outputFile): + outputData = Dataset(outputFile,'r') + if outputData['time'].size != 0: + sizeTimeOutput = outputData['time'].size + lastTimeEntry = outputData['time'][-1] + + outputData.close() + + # Take action based on the stage of the experimental run. + if (lastTimeEntry >= (endTime - buffer)) and (sizeTimeOutput > 1): + # The run for this A value is done, moving to the next one. + pass + elif (lastTimeEntry < endTime) and (sizeTimeOutput > 1): + # The run for this A value is not done and needs to continue. + print 'Continuing experiment from restart.' + + # Make sure restart is set to 1 in config file. + config.set('options', 'restart', 1) + + # Write to config file. + with open(configfile, 'w') as newconfigfile: + config.write(newconfigfile) + + else: + print 'There is nothing to restart from, executing from the beginning.' + + + + # Run CISM. print 'parallel =', options.parallel if options.parallel == None: - # Perform a serial run + # Perform a serial run. os.system(options.executable + ' ' + configfile) else: - # Perform a parallel run + # Perform a parallel run. if options.parallel <= 0: sys.exit( 'Error: Number of processors specified for parallel run is <=0.' ) else: - # These calls to os.system will return the exit status: 0 for success (the command exists), some other integer for failure + # These calls to os.system will return the exit status: 0 for success (the command exists), some other integer for failure. if os.system('which openmpirun > /dev/null') == 0: mpiexec = 'openmpirun -np ' + str(options.parallel) elif os.system('which mpirun > /dev/null') == 0: @@ -99,7 +148,7 @@ elif os.system('which aprun > /dev/null') == 0: mpiexec = 'aprun -n ' + str(options.parallel) elif os.system('which mpirun.lsf > /dev/null') == 0: - # mpirun.lsf does NOT need the number of processors (options.parallel) + # mpirun.lsf does NOT need the number of processors (options.parallel). mpiexec = 'mpirun.lsf' else: sys.exit('Unable to execute parallel run. Please edit the script to use your MPI run command, or run manually with something like: mpirun -np 4 ./cism_driver mismip+Init.config') @@ -112,5 +161,5 @@ print 'Finished experiment', expt - # Change to parent directory and continue + # Change to parent directory and continue. os.chdir('..') diff --git a/tests/MISOMIP/mismip+/mismip+Setup.py b/tests/MISOMIP/mismip+/mismip+Setup.py index bfea349d..cf7f3a72 100755 --- a/tests/MISOMIP/mismip+/mismip+Setup.py +++ b/tests/MISOMIP/mismip+/mismip+Setup.py @@ -15,20 +15,71 @@ from ConfigParser import ConfigParser from optparse import OptionParser -# Parse options + +############# +# Constants # +############# + + +xDomain = 640000.0 # domain x-dimension (m) +yDomain = 80000.0 # domain y-dimension (m) +xCalve = 640000. # calving front location (m) +initThickness = 100. # initial uniform ice thikcness (m) +accum = 0.3 # uniform accumulation rate (m/yr) + +restartfreqSpinup = 1000. # frequency at which restart file is written (yr) + + +#################################### +# Function used later in the code # +#################################### + + +#This function computes the MISMIP+ bed according to Asay-Davis et al. (2016). +def computeBed(x,y): + x = x/1.e3 # km + y = y/1.e3 # km + X = np.size(x) + Y = np.size(y) + b = np.zeros((X,Y)) + B0 = -150. # m + B2 = -728.8 # m + B4 = 343.91 # m + B6 = -50.57 # m + x_bar = 300. # km + x_tilde = x/x_bar + + dc = 500. # m + fc = 4. # km + wc = 24. # km + Ly = 80. # km + + Bmax = -720. # m + B_x = B0 + B2*x_tilde**2 + B4*x_tilde**4 + B6*x_tilde**6 + B_y = dc / (1 + np.exp(-2*(y-Ly/2-wc)/fc)) + dc / (1 + np.exp(2*(y-Ly/2+wc)/fc)) + Bsum = B_x + B_y + B = np.maximum(Bsum, Bmax) # B >= Bmax + return B + + + +######## +# Code # +######## + +# Parse options. optparser = OptionParser() -optparser.add_option('-c', '--config', dest='configfile', type='string', default='mismip+.config.template', help='config file template', metavar="FILE") -optparser.add_option('-e', '--exec', dest='executable', type='string', default='cism_driver', help='path to the CISM executable') -optparser.add_option("-x", "--expt", dest='experiment', type='string', default = 'all', help="MISMIP+ experiment(s) to set up", metavar="EXPT") -optparser.add_option("-t", "--tstep", dest='timestep', type='float', default = 0.5, help="time step (yr)", metavar="TSTEP") -optparser.add_option("-r", "--res", dest='resolution', type='int', default = 2000, help="grid resolution (m)", metavar="RES") -optparser.add_option("-v", "--vlevel", dest='vertlevels', type='int', default = 3, help="no. of vertical levels", metavar="VLEVEL") -optparser.add_option('-a', '--approx', dest='approximation', type='string', default = 'DIVA', help='Stokes approximation (SSA, DIVA, BP)') -optparser.add_option('-b', '--basal', dest='basalFriction', type='string', default='Schoof', help='basal friction law (Schoof, Tsai, powerlaw)') -optparser.add_option('-y', '--year', dest='yearsSpinup', type='int', default = 20000, help='length of spinup run (yr)') -#optparser.add_option('-n', '--parallel', dest='parallel', type='int', help='Number of processors: if specified then run in parallel', metavar="NUMPROCS") +optparser.add_option('-c', '--config', dest='configfile', type='string', default='mismip+.config.template', help="config file template", metavar="FILE") +optparser.add_option('-e', '--exec', dest='executable', type='string', default='cism_driver', help="path to the CISM executable") +optparser.add_option('-x', '--expt', dest='experiment', type='string', default= 'all', help="MISMIP+ experiment(s) to set up", metavar="EXPT") +optparser.add_option('-t', '--tstep', dest='timestep', type='float', default= 0.5, help="time step (yr)", metavar="TSTEP") +optparser.add_option('-r', '--res', dest='resolution', type='int', default= 2000, help="grid resolution (m)", metavar="RES") +optparser.add_option('-v', '--vlevel', dest='vertlevels', type='int', default= 3, help="no. of vertical levels", metavar="VLEVEL") +optparser.add_option('-a', '--approx', dest='approximation', type='string', default= 'DIVA', help="Stokes approximation (SSA, DIVA, BP)") +optparser.add_option('-b', '--basal', dest='basalFriction', type='string', default='Schoof', help="basal friction law (Schoof, Tsai, powerlaw)") +optparser.add_option('-y', '--year', dest='yearsSpinup', type='int', default= 20000, help="length of spinup run (yr)") optparser.add_option @@ -40,6 +91,15 @@ if options.experiment == 'all': experiments = ['Spinup', 'Ice0', 'Ice1r', 'Ice1ra', 'Ice1rr', 'Ice1rax', 'Ice1rrx', 'Ice2r', 'Ice2ra', 'Ice2rr', 'Ice2rax', 'Ice2rrx'] print 'Setting up all the MISMIP+ experiments' +elif options.experiment == 'allIce': + experiments = ['Ice0', 'Ice1r', 'Ice1ra', 'Ice1rr', 'Ice2r', 'Ice2ra', 'Ice2rr', 'Ice1rax', 'Ice1rrx', 'Ice2rax', 'Ice2rrx'] + print 'Run all the MISMIP+ experiments, excluding Spinup' +elif options.experiment == 'Ice1': + experiments = ['Ice1r', 'Ice1ra', 'Ice1rr', 'Ice1rax', 'Ice1rrx'] + print 'Run the MISMIP+ Ice1 experiments' +elif options.experiment == 'Ice2': + experiments = ['Ice2r', 'Ice2ra', 'Ice2rr', 'Ice2rax', 'Ice2rrx'] + print 'Run the MISMIP+ Ice2 experiments' elif options.experiment in ['Spinup', 'Ice0', 'Ice1r', 'Ice1ra', 'Ice1rr', 'Ice1rax', 'Ice1rrx', 'Ice2r', 'Ice2ra', 'Ice2rr', 'Ice2rax', 'Ice2rrx']: experiments = [options.experiment] print 'Setting up experiment', options.experiment @@ -47,14 +107,14 @@ sys.exit('Please specify experiment(s) from this list: all, Spinup, Ice0, Ice1r, Ice1ra, Ice1rr, Ice1rax, Ice1rrx, Ice2r, Ice2ra, Ice2rr, Ice2rax, Ice2rrx') # If there is not already a link to cism_driver in the main directory, then make one. -# Each subdirectory will link to cism_driver in the main directory +# Each subdirectory will link to cism_driver in the main directory. if options.executable != 'cism_driver': - # Remove the existing link, if present + # Remove the existing link, if present. os.unlink('cism_driver') - # Make the new link + # Make the new link. os.symlink(options.executable, 'cism_driver') -# Set grid resolution +# Set grid resolution. if options.resolution == 8000: dx = 8000.0 dy = 8000.0 @@ -84,10 +144,8 @@ print 'MISMIP+ grid resolution (m) =', options.resolution print 'Number of vertical levels =', nz -# Set number of grid cells in each direction -# Include a few extra cells in the x direction to handle boundary conditions -xDomain = 640000.0 -yDomain = 80000.0 +# Set number of grid cells in each direction. +# Include a few extra cells in the x direction to handle boundary conditions. nx = int(xDomain/dx) + 4 ny = int(yDomain/dy) @@ -112,13 +170,7 @@ config.set('grid', 'dew', dx) config.set('grid', 'dns', dy) -# Set the time step in the msster config file -# Set the diagnostic interval to the same value (not necessary, but helpful for debugging). - -config.set('time', 'dt', options.timestep) -config.set('time', 'dt_diag', options.timestep) - -# Set Stokes approximation in config file +# Set Stokes approximation in config file. if options.approximation == 'SSA': which_ho_approx = 1 print 'Using SSA velocity solver' @@ -134,7 +186,7 @@ config.set('ho_options', 'which_ho_approx', which_ho_approx) -# Config settings related to basal friction law +# Config settings related to basal friction law. # Note: Each of these friction laws is associate with certain basal parameters. # The desired parameters should be set in the config template. if options.basalFriction == 'Schoof': @@ -155,15 +207,14 @@ yearsSpinup = float(options.yearsSpinup) config.set('time', 'tend', yearsSpinup) -# Write to the master config file +# Write to the master config file. with open(masterConfigFile, 'w') as configfile: config.write(configfile) print 'years of spinup =', yearsSpinup -restartfreqSpinup = 1000. # default is 1000; user can change the value here if needed print 'spinup restart frequency =', restartfreqSpinup -# Create the netCDF input file, +# Create the netCDF input file. try: parser = ConfigParser() parser.read(options.configfile) @@ -174,19 +225,19 @@ print 'Creating input file', initfile ncfile = Dataset(initfile, 'w') -# Create dimensions -# Note: (x0,y0) = staggered (velocity) grid -# (x1,y1) = unstaggered (scalar) grid +# Create dimensions. +# Note: (x0,y0) = staggered (velocity) grid. +# (x1,y1) = unstaggered (scalar) grid. ncfile.createDimension('time', 1) -ncfile.createDimension('x1', nx) -ncfile.createDimension('y1', ny) -ncfile.createDimension('x0', nx-1) -ncfile.createDimension('y0', ny-1) -ncfile.createDimension('level', nz) -ncfile.createDimension('staglevel', nz-1) +ncfile.createDimension('x1', nx) +ncfile.createDimension('y1', ny) +ncfile.createDimension('x0', nx-1) +ncfile.createDimension('y0', ny-1) +ncfile.createDimension('level', nz) +ncfile.createDimension('staglevel', nz-1) ncfile.createDimension('stagwbndlevel', nz+1) -# Create time and grid variables +# Create time and grid variables. # Note: (x1,y1) are loadable and need to be in the input file. # (x0,y0) are not loadable, but are derived in CISM from (x1,y1). May not be needed. @@ -197,8 +248,6 @@ y0 = ncfile.createVariable('y0','f4',('y0',)) # Create 2D input fields -#WHL - Reverse order of indices? - thk = ncfile.createVariable('thk', 'f4', ('time','y1','x1')) topg = ncfile.createVariable('topg', 'f4', ('time','y1','x1')) acab = ncfile.createVariable('acab', 'f4', ('time','y1','x1')) @@ -222,47 +271,21 @@ x0[:] = x[:-1] - dx/2. # x0 = -dx/2, dx/2, 3*dx/2, ..., (nx-2)*dx y0[:] = y[:-1] + dy # y0 = dy, 2*dy, ..., (ny-1)*dy -# Initialize thickness -xCalve = 640000. # calving front location (m) -initThickness = 100. +# Initialize thickness. thk[:,:,:] = 0. for i in range(nx): if x1[i] <= xCalve: thk[:,:,i] = initThickness -#This function computes the MISMIP+ bed according to Asay-Davis et al. (2016). -def computeBed(x,y): - x = x/1.e3 # km - y = y/1.e3 # km - X = np.size(x) - Y = np.size(y) - b = np.zeros((X,Y)) - B0 = -150. # m - B2 = -728.8 # m - B4 = 343.91 # m - B6 = -50.57 # m - x_bar = 300. # km - x_tilde = x/x_bar - dc = 500. # m - fc = 4. # km - wc = 24. # km - Ly = 80. # km - Bmax = -720. # m - B_x = B0 + B2*x_tilde**2 + B4*x_tilde**4 + B6*x_tilde**6 - B_y = dc / (1 + np.exp(-2*(y-Ly/2-wc)/fc)) + dc / (1 + np.exp(2*(y-Ly/2+wc)/fc)) - Bsum = B_x + B_y - B = np.maximum(Bsum, Bmax) # B >= Bmax - return B -# Set bed topography +# Set bed topography. for i in range(nx): for j in range(ny): topg[:,j,i] = computeBed(x1[i], y1[j]) -# Set the surface mass balance +# Set the surface mass balance. # Uniform accumulation, but prescribe a large negative rate beyond the calving front. # WHL - The large negative rate may not be needed, but setting it just in case. -accum = 0.3 # uniform accumulation (m/yr) acab[:,:,:] = accum for i in range(nx): if x1[i] > xCalve: @@ -277,34 +300,33 @@ def computeBed(x,y): # Where kinbcmask = 1, the velocity is fixed at its initial value. # Note: Although there is no ice on the RHS of the domain, we need kinbcmask =1 there # to preserve symmetry with the LHS (since east-west BCs are formally periodic). -kinbcmask[:,:,:] = 0 # initialize to 0 everywhere -kinbcmask[:,:,0] = 1 # mask out left-most column -kinbcmask[:,:,-1] = 1 # mask out right-most column +kinbcmask[:,:,:] = 0 # initialize to 0 everywhere +kinbcmask[:,:,0] = 1 # mask out left-most column +kinbcmask[:,:,-1] = 1 # mask out right-most column ncfile.close() print 'Experiments:', experiments -# Loop through experiments +# Loop through experiments. for expt in experiments: - # For each experiment, make a suitable config file and set up a subdirectory + # For each experiment, make a suitable config file and set up a subdirectory. print 'Creating config file for experiment', expt # Make a copy of the mismip+Init config file. # Below, this copy will be tailored for the chosen MISMIP+ experiment, # without changing the settings used for spin-up. -# print 'Master config file:', masterConfigFile newConfigFile = 'mismip+' + expt + '.config' print 'Config file for this experiment:', newConfigFile shutil.copy(masterConfigFile, newConfigFile) - # Read the new config file + # Read the new config file. config = ConfigParser() config.read(newConfigFile) - # Experiment-specific settings + # Experiment-specific settings. # Note: The standard experiments are Ice0, Ice1r, Ice1ra, Ice1rr, Ice2r, Ice2ra and Ice2rr. # Experiments Ice1ra, Ice1rr, Ice2ra and Ice2rr are assumed to end at year 200. # Experiments Ice1rax, Ice1rrx, Ice2rax and Ice2rrx are the optional extensions @@ -314,117 +336,129 @@ def computeBed(x,y): # rather than restart experiments (restart = 1, starting from the 'CF restart' file). if (expt == 'Spinup'): - tstart = 0.0 - tend = yearsSpinup - inputdir = '../' - inputfile = initfile - inputslice = 1 - outputfreq = 1000.0 - restartfreq = restartfreqSpinup + tstart = 0.0 + tend = yearsSpinup + inputdir = '../' + inputfile = initfile + inputslice = 1 + outputfreq = min(1000.0, yearsSpinup) + restartfreq = min(restartfreqSpinup, yearsSpinup) elif expt == 'Ice0': - tstart = 0.0 - tend = 100.0 - inputdir = '../Spinup/' - inputfile = 'mismip+Spinup.restart.nc' - inputslice = int(yearsSpinup/restartfreqSpinup) - outputfreq = 10.0 + tstart = 0.0 + tend = 100.0 + inputdir = '../Spinup/' + inputfile = 'mismip+Spinup.restart.nc' + inputslice = int(yearsSpinup/restartfreqSpinup) + outputfreq = 10.0 restartfreq = 100.0 elif expt == 'Ice1r': config.set('options', 'bmlt_float', 1) - tstart = 0.0 - tend = 100.0 - inputdir = '../Spinup/' - inputfile = 'mismip+Spinup.restart.nc' - inputslice = int(yearsSpinup/restartfreqSpinup) - outputfreq = 10.0 + tstart = 0.0 + tend = 100.0 + inputdir = '../Spinup/' + inputfile = 'mismip+Spinup.restart.nc' + inputslice = int(yearsSpinup/restartfreqSpinup) + outputfreq = 10.0 restartfreq = 100.0 elif expt == 'Ice1ra': - tstart = 100.0 - tend = 200.0 - inputdir = '../Ice1r/' - inputfile = 'mismip+Ice1r.restart.nc' - inputslice = 1 - outputfreq = 10.0 + tstart = 100.0 + tend = 200.0 + inputdir = '../Ice1r/' + inputfile = 'mismip+Ice1r.restart.nc' + inputslice = 1 + outputfreq = 10.0 restartfreq = 100.0 elif expt == 'Ice1rax': - tstart = 200.0 - tend = 1000.0 - inputdir = '../Ice1ra/' - inputfile = 'mismip+Ice1ra.restart.nc' - inputslice = 1 - outputfreq = 100.0 + tstart = 200.0 + tend = 1000.0 + inputdir = '../Ice1ra/' + inputfile = 'mismip+Ice1ra.restart.nc' + inputslice = 1 + outputfreq = 100.0 restartfreq = 800.0 elif expt == 'Ice1rr': config.set('options', 'bmlt_float', 1) - tstart = 100.0 - tend = 200.0 - inputdir = '../Ice1r/' - inputfile = 'mismip+Ice1r.restart.nc' - inputslice = 1 - outputfreq = 10.0 + tstart = 100.0 + tend = 200.0 + inputdir = '../Ice1r/' + inputfile = 'mismip+Ice1r.restart.nc' + inputslice = 1 + outputfreq = 10.0 restartfreq = 100.0 elif expt == 'Ice1rrx': config.set('options', 'bmlt_float', 1) - tstart = 200.0 - tend = 1000.0 - inputdir = '../Ice1rr/' - inputfile = 'mismip+Ice1rr.restart.nc' - inputslice = 1 - outputfreq = 100.0 + tstart = 200.0 + tend = 1000.0 + inputdir = '../Ice1rr/' + inputfile = 'mismip+Ice1rr.restart.nc' + inputslice = 1 + outputfreq = 100.0 restartfreq = 800.0 elif expt == 'Ice2r': config.set('options', 'bmlt_float', 2) - tstart = 0.0 - tend = 100.0 - inputdir = '../Spinup/' - inputfile = 'mismip+Spinup.restart.nc' - inputslice = int(yearsSpinup/restartfreqSpinup) - outputfreq = 10.0 + tstart = 0.0 + tend = 100.0 + inputdir = '../Spinup/' + inputfile = 'mismip+Spinup.restart.nc' + inputslice = int(yearsSpinup/restartfreqSpinup) + outputfreq = 10.0 restartfreq = 100.0 elif expt == 'Ice2ra': - tstart = 100.0 - tend = 200.0 - inputdir = '../Ice2r/' - inputfile = 'mismip+Ice2r.restart.nc' - inputslice = 1 - outputfreq = 10.0 + tstart = 100.0 + tend = 200.0 + inputdir = '../Ice2r/' + inputfile = 'mismip+Ice2r.restart.nc' + inputslice = 1 + outputfreq = 10.0 restartfreq = 100.0 elif expt == 'Ice2rax': - tstart = 200.0 - tend = 1000.0 - inputdir = '../Ice2ra/' - inputfile = 'mismip+Ice2ra.restart.nc' - inputslice = 1 - outputfreq = 100.0 + tstart = 200.0 + tend = 1000.0 + inputdir = '../Ice2ra/' + inputfile = 'mismip+Ice2ra.restart.nc' + inputslice = 1 + outputfreq = 100.0 restartfreq = 800.0 elif expt == 'Ice2rr': config.set('options', 'bmlt_float', 2) - tstart = 100.0 - tend = 200.0 - inputdir = '../Ice2r/' - inputfile = 'mismip+Ice2r.restart.nc' - inputslice = 1 - outputfreq = 10.0 + tstart = 100.0 + tend = 200.0 + inputdir = '../Ice2r/' + inputfile = 'mismip+Ice2r.restart.nc' + inputslice = 1 + outputfreq = 10.0 restartfreq = 100.0 elif expt == 'Ice2rrx': config.set('options', 'bmlt_float', 2) - tstart = 200.0 - tend = 1000.0 - inputdir = '../Ice2rr/' - inputfile = 'mismip+Ice2rr.restart.nc' - inputslice = 1 - outputfreq = 100.0 + tstart = 200.0 + tend = 1000.0 + inputdir = '../Ice2rr/' + inputfile = 'mismip+Ice2rr.restart.nc' + inputslice = 1 + outputfreq = 100.0 restartfreq = 800.0 - # Set the start and end times + + # Set the time step in the master config file. + # Set the diagnostic interval to the same value (not necessary, but helpful for debugging). + # Note: this step is necessary when running at resolution coarser that 4 km as the output files + # needs to be written every 10 years to satisfy plotting criteria. + if expt != 'Spinup': + config.set('time', 'dt', min(options.timestep, 2.)) + config.set('time', 'dt_diag', min(options.timestep, 2.)) + else: + config.set('time', 'dt', options.timestep) + config.set('time', 'dt_diag', options.timestep) + + # Set the start and end times. config.set('time', 'tstart', tstart) - config.set('time', 'tend', tend) + config.set('time', 'tend', tend) - # Change the default comment + # Change the default comment. comment = 'MISMIP+ experiment ' + expt config.set('CF default', 'comment', comment) - # Set input file and time slice in the section '[CF input]' + # Set input file and time slice in the section '[CF input]'. # Note: This method may not be robust for Spinup runs that start and restart. # For this reason, the script mismip+Run.py makes sure the 'time' entry # in [CF input] corresponds to the final time slice. @@ -432,23 +466,23 @@ def computeBed(x,y): config.set('CF input', 'name', inputfile) config.set('CF input', 'time', inputslice) - # Set the output filename in the section '[CF output]' + # Set the output filename in the section '[CF output]'. outputfile = 'mismip+' + expt + '.out.nc' print 'Output file:', outputfile - config.set('CF output', 'name', outputfile) + config.set('CF output', 'name', outputfile) config.set('CF output', 'frequency', outputfreq) # Set restart info in the section '[CF restart]'. # Note: Each experiment (except Spinup) writes only one time slice to a restart file. restartfile = 'mismip+' + expt + '.restart.nc' print 'Restart file:', restartfile - config.set('CF restart', 'name', restartfile) + config.set('CF restart', 'name', restartfile) config.set('CF restart', 'variables', 'restart') - config.set('CF restart', 'frequency', restartfreq) - config.set('CF restart', 'xtype', 'double') + config.set('CF restart', 'frequency', restartfreq) + config.set('CF restart', 'xtype', 'double') config.set('CF restart', 'write_init', False) - # Write to the new config file + # Write to the new config file. with open(newConfigFile, 'w') as configfile: config.write(configfile) @@ -461,21 +495,21 @@ def computeBed(x,y): os.chdir(expt) - # Move the config file from the parent directory to the subdirectory + # Move the config file from the parent directory to the subdirectory. shutil.move('../' + newConfigFile, newConfigFile) print 'Created config file', newConfigFile - # Link to the cism_driver executable in the parent directory + # Link to the cism_driver executable in the parent directory. try: os.symlink('../cism_driver', 'cism_driver') except: pass # link to cism_driver already exists - # Link to the input file in the appropriate directory + # Link to the input file in the appropriate directory. try: os.symlink(inputdir + inputfile, inputfile) except: pass # link to the input file already exists - # Go back to the parent directory and continue + # Go back to the parent directory and continue. os.chdir('..') diff --git a/tests/higher-order/dome/LOCAsettings_trilinosOptions.xml b/tests/dome/LOCAsettings_trilinosOptions.xml similarity index 100% rename from tests/higher-order/dome/LOCAsettings_trilinosOptions.xml rename to tests/dome/LOCAsettings_trilinosOptions.xml diff --git a/tests/higher-order/dome/README.md b/tests/dome/README.md similarity index 100% rename from tests/higher-order/dome/README.md rename to tests/dome/README.md diff --git a/tests/higher-order/dome/README.restart b/tests/dome/README.restart similarity index 100% rename from tests/higher-order/dome/README.restart rename to tests/dome/README.restart diff --git a/tests/higher-order/dome/dome-forcing.config b/tests/dome/dome-forcing.config similarity index 100% rename from tests/higher-order/dome/dome-forcing.config rename to tests/dome/dome-forcing.config diff --git a/tests/higher-order/dome/dome.config b/tests/dome/dome.config similarity index 100% rename from tests/higher-order/dome/dome.config rename to tests/dome/dome.config diff --git a/tests/higher-order/dome/netCDF.py b/tests/dome/netCDF.py similarity index 100% rename from tests/higher-order/dome/netCDF.py rename to tests/dome/netCDF.py diff --git a/tests/higher-order/dome/runDome.py b/tests/dome/runDome.py similarity index 100% rename from tests/higher-order/dome/runDome.py rename to tests/dome/runDome.py diff --git a/tests/higher-order/dome/trilinosOptions.xml b/tests/dome/trilinosOptions.xml similarity index 100% rename from tests/higher-order/dome/trilinosOptions.xml rename to tests/dome/trilinosOptions.xml diff --git a/tests/glint-example/README.md b/tests/glint-example/README.md index 78adc0ae..dc94a756 100644 --- a/tests/glint-example/README.md +++ b/tests/glint-example/README.md @@ -16,12 +16,7 @@ You will also need a 20-km Greenland input file: * `gland20.input.nc` -These three netCDF files are part of a tar file, `glint-example.1.0.0.tar.gz`, -that can be downloaded from the CISM website: - - - -These files are also available in the CESM inputdata repository (account +These three netCDF files are available in the CESM inputdata repository (account required), here: diff --git a/tests/glint-example/glint_example.config.pdd b/tests/glint-example/glint_example.config.pdd index 80a98525..e4721610 100644 --- a/tests/glint-example/glint_example.config.pdd +++ b/tests/glint-example/glint_example.config.pdd @@ -3,7 +3,6 @@ days_in_year = 360 total_years = 10 climate_tstep = 6 # hours -diurnal_cycle = 0.0 gcm_smb = .false. [GLEX precip] diff --git a/tests/glint-example/glint_example.config.smb b/tests/glint-example/glint_example.config.smb index 09bb9172..19deb89a 100644 --- a/tests/glint-example/glint_example.config.smb +++ b/tests/glint-example/glint_example.config.smb @@ -3,7 +3,6 @@ days_in_year = 360 total_years = 10 climate_tstep = 6 # hours -diurnal_cycle = 0.0 gcm_smb = .true. [GLEX precip] diff --git a/tests/halfar/README.md b/tests/halfar/README.md index 4243b3dd..09429c87 100644 --- a/tests/halfar/README.md +++ b/tests/halfar/README.md @@ -13,7 +13,7 @@ You will need to either copy your executable into this directory, have the executable in your path, or create a symbolic link to your executable, using: ```sh -ln -s ../../../builds/platform-compiler/cism_driver/cism_driver ./ +ln -s ../../builds/platform-compiler/cism_driver/cism_driver ./ ``` runHalfar.py @@ -34,7 +34,7 @@ but advanced commandline options are available. For details, execute: The test should only take a few seconds for the default run duration of 200 years. -The script create a netCDF input file for CISM and then runs CISM, generating a +The script creates a netCDF input file for CISM and then runs CISM, generating a netCDF output file. Input and output `*.nc` files are written into the `output` subdirectory, which is controlled by the `-o/--output-dir` option. @@ -86,8 +86,8 @@ Glissade dycore. To do so, use the `halfar.HO.config` file: In this case, the model output should not be expected to match the analytic solution, but results will be reasonably close (using a no-slip basal boundary -condition, anyway). The test should take a few minutes for the default run -duration of 200 years. +condition, anyway). The test should take no more than a few minutes for the +default run duration of 200 years. PREREQUISITES: diff --git a/tests/halfar/halfar-HO.config b/tests/halfar/halfar-HO.config index 5e9e014c..88f053cc 100644 --- a/tests/halfar/halfar-HO.config +++ b/tests/halfar/halfar-HO.config @@ -43,6 +43,6 @@ time = 1 [CF output] variables = restart -frequency = 1 +frequency = 5. name = halfarHO.out.nc diff --git a/tests/halfar/halfar.config b/tests/halfar/halfar.config index cedb7812..df8de519 100644 --- a/tests/halfar/halfar.config +++ b/tests/halfar/halfar.config @@ -43,6 +43,6 @@ time = 1 [CF output] variables = restart # Note: 'restart' auto-expands to the important state variables -frequency = 1 +frequency = 5. name = halfar.out.nc diff --git a/tests/higher-order/mismip/README b/tests/higher-order/mismip/README deleted file mode 100755 index 57ac248f..00000000 --- a/tests/higher-order/mismip/README +++ /dev/null @@ -1,42 +0,0 @@ -MISMIP test case -======== -This directory contains two Python scripts for running the MISMIP experiments on a fixed domain on a linearly tilted bed topography. -(WHL: Just one python script for now) -To run the experiment, enter the following on a terminal command line: - -For a serial job: ./mismipInit.py - -For a parallel job: ./mismipInit.py -m number_of_processors, for example: - -./mismipInit.py -m 8 - -... will run the test case on 8 processors. - -Execute: ./mismipInit.py --help -for a list of all options available. - -To run with a specific configuration file: ./mismipInit.py -c mismip_config_filename - -If no config file is specified, the default is mismipInit.config. - -To run with a specific executable: ./mismipInit.py -e cism_executable_name - -If no executable is specified, the default is cism_driver. -You can either copy the cism_driver executable to this directory or create a link, e.g.: - -ln -s ../../../builds/mac-gnu/cism_driver/cism_driver cism_driver - -The default configuration file is set to use the higher-order Glissade dycore. -The current option is the DIVA solver (which_ho_approx = 3), based on Goldberg (2011). - -The script performs the following steps: -1. Create a netCDF input file for CISM. -2. Run CISM, creating a netCDF output file. - -PREREQUISITES: -In order to use the mismip.py script, you will need to have Python and one of the -following Python modules installed: -Scientific.IO.NetCDF, netCDF4, or pycdf - -To view the results use ncview or another utility for viewing netCDF files. - diff --git a/tests/higher-order/mismip/mismipInit.config b/tests/higher-order/mismip/mismipInit.config deleted file mode 100644 index 5cd16d23..00000000 --- a/tests/higher-order/mismip/mismipInit.config +++ /dev/null @@ -1,87 +0,0 @@ -[DOME-TEST] - -[grid] -upn = 3 -#ewn = 661 -ewn = 1321 -#ewn = 2641 -nsn = 5 -dew = 1600 -dns = 1600 - -[time] -tstart = 0. -#tend = 15000. -tend = 1.0 -dt = 0.25 -dt_diag = 0.25 -#idiag = 286 -idiag = 567 -#idiag = 1130 -jdiag = 3 - -[options] -dycore = 2 # 0 = glide, 1 = glam, 2=glissade -evolution = 4 # 3 = remapping, 4 = first order upwind -flow_law = 0 # 0 = constant, 2 = Paterson-Budd -temperature = 0 # 1 = prognostic, 3 = enthalpy -marine_margin = 4 -basal_water = 4 # 4 = ocean penetration parameterization -#restart = 1 - -[ho_options] -which_ho_babc = 11 # 4 = no-slip at bed, 10 = Coulomb friction law, 11 = Coulomb w/const basal flwa -which_ho_efvs = 2 # 0 = constant, 2 = nonlinear eff. visc. w/ n=3 -which_ho_sparse = 3 # 1 = SLAP GMRES, 3 = Fortran PCG, 4 = Trilinos for linear solver -which_ho_nonlinear = 0 # 0 = Picard, 1 = JFNK -which_ho_precond = 1 # 1 = diagonal precond, 2 = shallow ice preconditioner -which_ho_approx = 1 # 2 = Blatter-Pattyn, 3 = L1L2, 4 = DIVA -which_ho_gradient_margin = 2 -which_ho_gradient = 0 #0 = centered, 1 = upstream -which_ho_assemble_beta = 1 #0 = standard FE, 1 = local -which_ho_assemble_taud = 1 #0 = standard FE, 1 = local -which_ho_ground = 1 #0 = no GLP, 1 = GLP -which_ho_resid = 4 #3 = absolute, 4 = relative -glissade_maxiter = 50 -restart_extend_velo = 1 # 1 = write uvel/vvel on extended grid - -[parameters] -rhoi = 900.0 # CISM default = 910 -rhoo = 1000.0 # CISM default = 1028 -grav = 9.80 # CISM default = 9.81 -flow_factor = 1. -ice_limit = 1. ; min thickness (m) for dynamics -default_flwa = 14.648e-17 -#default_flwa = 31.558e-18 -marine_limit = -1280 ; depth past which ice is lost -coulomb_c = 0.1778 -coulomb_bump_max_slope = 0.5 ; maximum bed obstacle slope -#coulomb_bump_wavelength = 6.33753723303 ; bedrock's wavelength -#coulomb_bump_wavelength = 1.365374112507 -coulomb_bump_wavelength = 2.0 -flwa_basal = 1.0e-16 -#p_ocean_penetration = 1. ; p values for coulomb friction law -p_ocean_penetration = 0. ; p values for coulomb friction law - - -[CF default] -comment = created with mismipINit.py -title = MISMIP experiment using CISM2 - -[CF input] -name = mismipInit.nc -#name = Exp1_A_3.1558e-17_3200.0km_Restart.out.nc -time = 1 - -[CF output] -#variables = thk usurf uvel vvel velnorm temp beta topg flwa -variables = restart effec_press beta resid_u resid_v f_ground f_pattyn btractx btracty taudx taudy tau_xx -frequency = 1 -#name = Exp1_A_1.4648e-16_1600.0km.out.nc -name = mismipInit.out.nc - -[CF output] -variables = restart -xtype = double -frequency = 1000 -name = mismip.restart.nc diff --git a/tests/higher-order/mismip/mismipInit.py b/tests/higher-order/mismip/mismipInit.py deleted file mode 100755 index 9f7dc5dc..00000000 --- a/tests/higher-order/mismip/mismipInit.py +++ /dev/null @@ -1,279 +0,0 @@ -#!/usr/bin/env python2 -# This script runs the mismip experiment on a linear downward sloping bed. -# Files are written in the "output" subdirectory. - -#### TO BE REWRITTEN -# The script performs the following two steps: -# 1. Created an initial profie for the mismip experiment. -# 2. Create a netCDF input file for Glimmer. - -# Parse options -from optparse import OptionParser -optparser = OptionParser() -optparser.add_option("-c", "--config", dest="configfile", type='string', default='mismipInit.config', help="Name of .config file to use to setup and run the mismip experiment", metavar="FILE") -optparser.add_option('-m','--parallel',dest='parallel',type='int', help='Number of processors to run the model with: if specified then execute run in parallel', metavar="NUMPROCS") -optparser.add_option('-e','--exec',dest='executable',default='./cism_driver',help='Set path to the CISM executable') -for option in optparser.option_list: - if option.default != ("NO", "DEFAULT"): - option.help += (" " if option.help else "") + "[default: %default]" -options, args = optparser.parse_args() - -import sys, os, numpy -from netCDF import * -from math import sqrt -from ConfigParser import ConfigParser - - - -# ===================================== -# Create a netCDF file according to the information in the config file. -try: - parser = ConfigParser() - parser.read(options.configfile) - nx = int(parser.get('grid','ewn')) - ny = int(parser.get('grid','nsn')) - nz = int(parser.get('grid','upn')) - dx = float(parser.get('grid','dew')) - dy = float(parser.get('grid','dns')) - A = float(parser.get('parameters','default_flwa')) -# C = float(parser.get('parameters','coulomb_c')) - p = float(parser.get('parameters','p_ocean_penetration')) - filename = parser.get('CF input', 'name') -except: - sys.exit('Error parsing ' + options.configfile) - -print 'Writing', filename -try: - netCDFfile = NetCDFFile(filename,'w',format='NETCDF3_CLASSIC') -except TypeError: - netCDFfile = NetCDFFile(filename,'w') - -netCDFfile.createDimension('time',1) -netCDFfile.createDimension('x1',nx) -netCDFfile.createDimension('y1',ny) -netCDFfile.createDimension('level',nz) -netCDFfile.createDimension('staglevel',nz-1) -netCDFfile.createDimension('stagwbndlevel',nz+1) -netCDFfile.createDimension('x0',nx-1) # staggered grid -netCDFfile.createDimension('y0',ny-1) - -x = dx*numpy.arange(nx,dtype='float32') -y = dx*numpy.arange(ny,dtype='float32') - -netCDFfile.createVariable('time','f',('time',))[:] = [0] -netCDFfile.createVariable('x1','f',('x1',))[:] = x -netCDFfile.createVariable('y1','f',('y1',))[:] = y -netCDFfile.createVariable('x0','f',('x0',))[:] = dx/2 + x[:-1] # staggered grid -netCDFfile.createVariable('y0','f',('y0',))[:] = dy/2 + y[:-1] - -thk = numpy.zeros([1,ny,nx],dtype='float32') -topg = numpy.zeros([1,ny,nx],dtype='float32') -beta = numpy.zeros([1,ny-1,nx-1],dtype='float32') -acab = numpy.zeros([1,ny,nx],dtype='float32') -uvel = numpy.zeros([1,nz,ny-1,nx-1],dtype='float32') -vvel = numpy.zeros([1,nz,ny-1,nx-1],dtype='float32') -kinbcmask = numpy.zeros([1,ny-1,nx-1],dtype='int32') -effecpress = numpy.zeros([1,ny,nx],dtype='float32') -tau_b = numpy.zeros([1,ny-1,nx-1],dtype='float32') - -print 'p_ocean=', p - -# Calculating the linear bed topography from Schoof 2007 -def computeBedLinear(x): - xBar = 1.e6 - HBar = 1.e3 - schoofx = 750000. - slope = -778.5 - b0 = 720. - - eps_b = 1e-10 - abs_x = numpy.sqrt(x**2 + eps_b**2) - xprime = abs_x*xBar/schoofx - b = -(b0 + slope*xprime)/HBar - - return b - - -deltaX = dx/1.e6 -xc = (nx-1)*deltaX -xu = numpy.linspace(0,xc,nx) -xH = xu - 0.5*deltaX -bed = computeBedLinear(xH) - - -# We are using non-dimensionalized equations at first. We will re-dimensionalized at when saving for input netcd file -# parameters for initialization (maybe to read from config file?) -sPerY = 365.25*24.*3600. -HBar = 1000. -xBar = 1000000. -aBar = .3/sPerY -uBar = aBar*xBar/HBar -tBar = xBar/uBar - -rho_i = 910. -rho_w = 1028. -delta = 1.0 - rho_i/rho_w -g = 9.81 -n = 3.0 -a = 1.0 # (non-dimensionalized accumulation) -Ab = 3.1688e-24 # ice temperature at the bed. -A = 4.6416e-24 # first MISMIP exp value. -Cs = 7.624e6 # C-schoof 2007. - -lambda_0 = 2. -m_0 = 0.5 -linearSlope = 778.5 - -NBar = rho_i*g*HBar -taudBar = NBar*HBar/xBar -taubBar = Cs*uBar**(1./n) -Kappa = (m_0*uBar)/(lambda_0*Ab)/(NBar**n) -gamma = taubBar/taudBar - -taulBar = (A**(-1./n))*(uBar/xBar)**(1./n)*HBar/xBar -epsilon = taulBar/(2*taudBar) - -print "kappa=", Kappa -print "gamma=", gamma -print "Epsilon=", epsilon - -toleranceInner = 1e-3 - -xg_init = 0.9 - - -# Creating initial profile using shallow ice approximation (balance between tau_b and tau_d) - -glIndex = numpy.argmin(numpy.abs(xH-xg_init)) -xg = xH[glIndex] -Hxg = bed[glIndex]/(1.0-delta) - - -print "dx=", deltaX -print "xH(xg)=", xH[glIndex] -print "Hxg=",Hxg - -print "xg=",xg - -if (Hxg <= 0.): - raise ValueError("Hf(xg) <= 0. Cannot produce H profile") - print "xg = ",xg - print "Hxg =",Hxg - -uxg = a*xg/Hxg -c1 = (delta*a/(8*epsilon))**n -#operand = numpy.maximum(c1*(xH**(n+1) - xg**(n+1)),0.0) + uxg**(n+1) -operand = c1*(xH**(n+1) - xg**(n+1)) + uxg**(n+1) - - -HShelf = a*xH*(operand)**(-1/(n+1)) -uShelf = a*xH/HShelf - -H = HShelf.copy() -u = uShelf.copy() - -#Hip1 = H[glIndex+1] -#for xIndex in range(glIndex,-1,-1): -# Hi = Hip1 -# deltaB = (bed[glIndex+1]-bed[glIndex]) -# for iterIndex in range(100): -# Hmid = 0.5*(Hi+Hip1) -# umid = a*xu[xIndex]/Hmid -# taub = -gamma*umid**(1./n) -# HPrev = Hi -# Hi = 0.5*(-deltaB + numpy.sqrt(deltaB**2-4*(-Hip1**2 +Hip1*deltaB + 2*deltaX*taub))) -# deltaH = numpy.abs(Hi-HPrev) -# if(deltaH < toleranceInner): -# break -# #print "deltaH:", Hi-HPrev, Hi, Hip1 -# Hip1 = Hi -# H[xIndex] = Hi -# u[xIndex] = umid - - -for xIndex in range(glIndex,-1,-1): - Hip1 = H[xIndex+1] - xx = xH[xIndex+1] - H[xIndex] = -(bed[xIndex+1]-bed[xIndex])+deltaX*gamma*a*xx*numpy.abs(a*xx)**(1/n-1)/Hip1**(1/n+1)+Hip1 - -Hf = numpy.max(bed/(1-delta),0) -Fp = numpy.max((1-Hf/H),0)**p -N_effec = H*Fp -tauB = gamma*u**(1/n)*(N_effec**n/(Kappa*u + N_effec**n))**(1/n) - - -# Re-dimensionalize the variables for output file writing -H = H*HBar -u = u*uBar -xH = xH*HBar -xu = xu*HBar -bed_topo = bed*HBar -N_effec = N_effec*NBar -accumulation = 0.3 -tauB = tauB*taubBar - -# Calculating the ice thickness and velocity and the bed topo over the entire -for j in range(ny): - topg[0,j,:] = -bed_topo # takes into account the silly sign convention from Schoof 2007 -# effecpress[0,j,:] = N_effec - thk[0,j,3:-3] = H[:-6] # leaving 3 cells on both ends to be empty - effecpress[0,j,3:-3] = N_effec[:-6] - acab[0,j,:] = accumulation -# tau_b[0,j,:] = tauB[:-1] - -acab[:,:,0:3] = 0.0 # no ice accumulates in the three first cells -acab[:,:,-3:] = 0.0 # no ice accumulates in the three last cells -kinbcmask[:,:,2] = 1 - - -# Calculate tempstag and beta values, if desired. See lines below to enable them being written to file. -tau_b[:] = tauB[:-1] - - -# Create the required variables in the netCDF file. -netCDFfile.createVariable('thk', 'f',('time','y1','x1'))[:] = thk -netCDFfile.createVariable('topg','f',('time','y1','x1'))[:] = topg -netCDFfile.createVariable('acab', 'f',('time','y1','x1'))[:] = acab -netCDFfile.createVariable('uvel','f',('time','level','y0','x0'))[:] = uvel[:] -netCDFfile.createVariable('vvel','f',('time','level','y0','x0'))[:] = vvel[:] -netCDFfile.createVariable('kinbcmask','i',('time','y0','x0'))[:] = kinbcmask[:] -netCDFfile.createVariable('effecpress','f',('time','y1','x1'))[:] = effecpress -netCDFfile.createVariable('tau_b','f',('time','y0','x0'))[:] = tau_b - - -# Optional fields that could be added to the initial condition file. -# Uncomment these lines (and modify their values above), if you want to include them -#netCDFfile.createVariable('beta','f',('time','y0','x0'))[:] = beta - -netCDFfile.close() - - -# ===================================== -# Run CISM -print 'Running CISM' -print '============\n' -if options.parallel == None: - # Perform a serial run - os.system(options.executable + ' ' + options.configfile) -else: - # Perform a parallel run - if options.parallel <= 0: - sys.exit( 'Error: Number of processors specified for parallel run is <=0.' ) - else: - # These calls to os.system will return the exit status: 0 for success (the command exists), some other integer for failure - if os.system('which openmpirun > /dev/null') == 0: - mpiexec = 'openmpirun -np ' + str(options.parallel) - elif os.system('which mpirun > /dev/null') == 0: - mpiexec = 'mpirun -np ' + str(options.parallel) - elif os.system('which aprun > /dev/null') == 0: - mpiexec = 'aprun -n ' + str(options.parallel) - elif os.system('which mpirun.lsf > /dev/null') == 0: - # mpirun.lsf does NOT need the number of processors (options.parallel) - mpiexec = 'mpirun.lsf' - else: - sys.exit('Unable to execute parallel run. Please edit the script to use your MPI run command, or run the model manually with something like: mpirun -np 4 ./cism_driver mismip_init.config') - runstring = mpiexec + ' ' + options.executable + ' ' + options.configfile - print 'Executing parallel run with: ' + runstring + '\n\n' - os.system(runstring) # Here is where the parallel run is actually executed! - - diff --git a/tests/higher-order/mismip/netCDF.py b/tests/higher-order/mismip/netCDF.py deleted file mode 100755 index c9da3a7f..00000000 --- a/tests/higher-order/mismip/netCDF.py +++ /dev/null @@ -1,124 +0,0 @@ -# This script allows use of any of three python netCDF modules: -# Scientific.IO.NetCDF, netCDF4, or pycdf -# To use whichever netCDF module you might have installed put -# from netCDF import * -# in your script. -# Programs should use the Scientific.IO.NetCDF syntax; -# Generally, netCDF4 matches the Scientific.IO.NetCDF syntax and functionality. However there are some differences which require knowing which module is in use to properly call methods. The variable netCDF_module is provided to accomplish this. -# If the pycdf module is to be used, an appropriate "translation" of the method calls is provided. -# Written March 16, 2010 by Glen Granzow - -try: - from Scientific.IO.NetCDF import NetCDFFile - netCDF_module = 'Scientific.IO.NetCDF' -except ImportError: - try: - from netCDF4 import Dataset as NetCDFFile - netCDF_module = 'netCDF4' - except ImportError: - try: - import pycdf - netCDF_module = 'pycdf' - except ImportError: - print 'Unable to import any of the following python modules:' - print ' Scientific.IO.NetCDF \n netcdf4 \n pycdf' - print 'One of them must be installed.' - raise ImportError('No netCDF module found') - - def NCtype(value): - if isinstance(value,int): return pycdf.NC.INT - if isinstance(value,float): return pycdf.NC.FLOAT - if isinstance(value,str): return pycdf.NC.CHAR - - class NetCDFFile(object): - def __init__(self,filename,mode): - if mode == 'w': - self.FILE = pycdf.CDF(filename, pycdf.NC.WRITE | pycdf.NC.CREATE | pycdf.NC.TRUNC) - if mode == 'r': - self.FILE = pycdf.CDF(filename, pycdf.NC.NOWRITE) - if mode == 'a': - self.FILE = pycdf.CDF(filename, pycdf.NC.WRITE) - self.FILE.automode() - - def __setattr__(self,name,value): - if name == 'FILE': - object.__setattr__(self,name,value) - else: # Used to assign global attributes - self.FILE.attr(name).put(NCtype(value),value) - - def __getattr__(self,name): - if name == 'dimensions': - return self.FILE.dimensions() - if name == 'variables': - dictionary = dict() - for variable in self.FILE.variables().keys(): - dictionary[variable] = NetCDFvariable(self.FILE.var(variable),None,None,None) - return dictionary - global_attributes = self.FILE.attributes() - if name in global_attributes: - return global_attributes[name] - return object.__getattribute__(self,name) - - def __dir__(self): - return self.FILE.attributes().keys() - - def hasattr(self,name): - return name in dir() - - def createDimension(self,name,size): - self.FILE.def_dim(name,size) - - def createVariable(self,name,datatype,dimensions): - dictNC = {'f':pycdf.NC.FLOAT, 'd':pycdf.NC.DOUBLE, 'i':pycdf.NC.INT, 'c':pycdf.NC.CHAR, 'b':pycdf.NC.BYTE} - return NetCDFvariable(self.FILE,name,dictNC[datatype],dimensions) - - def sync(self): - self.FILE.sync() - - def close(self): - self.FILE.close() - - class NetCDFvariable(object): - def __init__(self,FILE,name,datatype,dimensions): - if isinstance(FILE,pycdf.pycdf.CDFVar): - # FILE is an already defined netCDF variable (not a file) - self.VARIABLE = FILE - else: - # Create a new variable in the netCDF file - self.VARIABLE = FILE.def_var(name,datatype,dimensions) - self.shape = self.VARIABLE.shape() - - def __setitem__(self,elem,data): - self.VARIABLE[elem] = data - - def __getitem__(self,elem): - return self.VARIABLE[elem] - - def __setattr__(self,name,value): - if name in ('VARIABLE','shape'): - object.__setattr__(self,name,value) - else: # Used to assign variable attributes - self.VARIABLE.attr(name).put(NCtype(value),value) - - def __getattr__(self,name): - if name == 'dimensions': - return self.VARIABLE.dimensions() - variable_attributes = self.VARIABLE.attributes() - if name in variable_attributes: - return variable_attributes[name] - return object.__getattribute__(self,name) - - def assignValue(self,value): - self.VARIABLE.put(value) - - def getValue(self): - return self.VARIABLE.get() - - def __dir__(self): - return self.VARIABLE.attributes().keys() - - def typecode(self): - NCdict = {pycdf.NC.FLOAT:'f', pycdf.NC.DOUBLE:'d', pycdf.NC.INT:'i', pycdf.NC.CHAR:'c', pycdf.NC.BYTE:'b'} - return NCdict[self.VARIABLE.inq_type()] - -print 'Using',netCDF_module,'for netCDF file I/O' diff --git a/tests/ismip-hom/.DS_Store b/tests/ismip-hom/.DS_Store new file mode 100644 index 00000000..2ecc83d8 Binary files /dev/null and b/tests/ismip-hom/.DS_Store differ diff --git a/tests/higher-order/ismip-hom/README.md b/tests/ismip-hom/README.md similarity index 97% rename from tests/higher-order/ismip-hom/README.md rename to tests/ismip-hom/README.md index 0321a29a..661c2f97 100644 --- a/tests/higher-order/ismip-hom/README.md +++ b/tests/ismip-hom/README.md @@ -8,7 +8,7 @@ physics. For more information, see The python scripts provided here (`runISMIP_HOM.py` and `plotISMIP_HOM.py`, referred to in the following as the ISMIP-HOM scripts) were created to run the -experiments using Glimmer/CISM and compare the results with results from other +experiments using CISM and compare the results with results from other models. PREREQUISITES: @@ -77,6 +77,13 @@ runs experiments a and c using the default domain sizes. Combine these as ``` to run experiments a and c with domain sizes 40, 80, and 160 km. +The experiment can also be run in parallel. For example, + +```sh +./runMISMIP_HOM.py -n 4 +``` + +runs the default settings on 4 processors. CISM may not converge for every experiment with the default values of the grid size and other parameters. Some additional parameters can be changed using diff --git a/tests/higher-order/ismip-hom/ismip-hom.config b/tests/ismip-hom/ismip-hom.config similarity index 100% rename from tests/higher-order/ismip-hom/ismip-hom.config rename to tests/ismip-hom/ismip-hom.config diff --git a/tests/higher-order/ismip-hom/netCDF.py b/tests/ismip-hom/netCDF.py similarity index 100% rename from tests/higher-order/ismip-hom/netCDF.py rename to tests/ismip-hom/netCDF.py diff --git a/tests/higher-order/ismip-hom/plotISMIP_HOM.py b/tests/ismip-hom/plotISMIP_HOM.py similarity index 99% rename from tests/higher-order/ismip-hom/plotISMIP_HOM.py rename to tests/ismip-hom/plotISMIP_HOM.py index 8c1f49d0..9e98c919 100755 --- a/tests/higher-order/ismip-hom/plotISMIP_HOM.py +++ b/tests/ismip-hom/plotISMIP_HOM.py @@ -1,7 +1,7 @@ #!/usr/bin/env python2 """ -This script plots the results of running ISMIP-HOM experiments using Glimmer. +This script plots the results of running ISMIP-HOM experiments using CISM. Before running this script, run runISMIP_HOM.py to generate the results. runISMIP_HOM.py generates a set of output files that will follow the pattern: ismip-hom-?[-MOD].RESO.[pPROC.]out.nc, where `?` is a POSIX metacharacter, diff --git a/tests/higher-order/ismip-hom/runISMIP_HOM.py b/tests/ismip-hom/runISMIP_HOM.py similarity index 96% rename from tests/higher-order/ismip-hom/runISMIP_HOM.py rename to tests/ismip-hom/runISMIP_HOM.py index 6b540864..2667f738 100755 --- a/tests/higher-order/ismip-hom/runISMIP_HOM.py +++ b/tests/ismip-hom/runISMIP_HOM.py @@ -21,7 +21,7 @@ import netCDF from math import tan, sin, pi, exp -defaultSizes=[20,80] +defaultSizes=[5,10,20,40,80,160] defaultExperiments=['a','c'] # Parse the command line options @@ -226,13 +226,16 @@ def main(): offset = float(size)*1000.0 * tan(3.0 * pi/180.0) config_parser.set('parameters', 'periodic_offset_ew', str(offset)) - if experiment in ('c','d'): - # These tests have beta passed in from the input file, so change option accordingly. +# if experiment in ('c' 'd'): + if experiment in ('d'): + # This test has beta passed in from the input file, so change option accordingly. config_parser.set('ho_options', 'which_ho_babc', '5') - ##Optional: if doing experiment C, one can alternatively use the ho_babc option setup for this test case rather than passing in a beta - #if experiment in ('c'): - # config_parser.set('ho_options', 'which_ho_babc', '8') + # If doing experiment C, it is more accurate to use which_ho_babc = 8 than pass in beta. + # If beta is read in on the global ice grid, there will be interpolation errors when it is + # averaged to the velocity grid. Constructing beta on the velocity grid at runtime avoids these errors. + if experiment in ('c'): + config_parser.set('ho_options', 'which_ho_babc', '8') # For test case F we need to make a few additional adjustments to the config if experiment in ('f'): diff --git a/tests/higher-order/ismip-hom/trilinosOptions.xml b/tests/ismip-hom/trilinosOptions.xml similarity index 100% rename from tests/higher-order/ismip-hom/trilinosOptions.xml rename to tests/ismip-hom/trilinosOptions.xml diff --git a/tests/regression/README.md b/tests/regression/README.md index aca0d9d1..e9d0e38b 100644 --- a/tests/regression/README.md +++ b/tests/regression/README.md @@ -22,7 +22,7 @@ External Packages * [HDF5 1.8.6](https://www.hdfgroup.org/HDF5/) If you have a working install of CISM, and you installed the suggested packages -in the [CISM](http://oceans11.lanl.gov/cism/documentation.html) users manual, +in the [CISM](https://github.com/CISM/cism-documentation) users manual, you'll likely already have everything you need. If you haven't previously built CISM on your machine, we suggest following the installation instructions as they are laid out in the users manual first. diff --git a/tests/regression/util/dicts.py b/tests/regression/util/dicts.py index 1c51fd3b..ef7bc989 100644 --- a/tests/regression/util/dicts.py +++ b/tests/regression/util/dicts.py @@ -19,7 +19,7 @@ # -o/--output-dir, -m/--modifier, or -s/--setup-only options because the # build_and_test script will include those automatically. -# The higher-order/dome test +# dome test # -------------------------- # for tests with -n N for N < 16 dome_perf_small = { @@ -40,7 +40,7 @@ 'b4': '--scale 4 -n 256', } -# The higher-order/shelf tests +# shelf tests # ---------------------------- # NOTE: empty dict because no performance testing for confined shelf. Leaving # here for possible future expansion. @@ -67,12 +67,12 @@ # This is the main dictionary that describes what tests to run. # Each dictionary item should consist of key-value pairs like: # key: 'path_to_test_from_$CISM/tests SIZE(optional) CASE' -# example: 'higher-order/dome' +# example: 'dome' # NOTE: key can be a space separated list with the first entry the path to # the test directory, the last entry is the specific test case, and # the rest of the list used to define uniqueness. This is useful for # tests that have multiple run scripts like shelf or ISMIP-HOM. -# example: 'higher-order/ismip-hom 20 a' +# example: 'ismip-hom 20 a' # # value: tuple of (run_script, perf_dict) where run_script is the test run # script that can be found within the directory specified by the key @@ -87,19 +87,19 @@ # size option in the run command reflects the slip ratio not the domain size # (like in the other ISMIP-HOM tests). test_dict = { - 'higher-order/dome dome': ('runDome.py', dome_perf_small), - 'higher-order/shelf shelf-confined': ('runShelfConfined.py', shelfConfined_perf_small), - 'higher-order/shelf shelf-circular': ('runShelfCircular.py', shelfConfined_perf_small), - 'higher-order/ismip-hom 20 ismip-hom-a': ('runISMIP_HOM.py -r a --size 20', ismip_perf_small), - 'higher-order/ismip-hom 20 ismip-hom-c': ('runISMIP_HOM.py -r c --size 20', keep_empty), - 'higher-order/ismip-hom 80 ismip-hom-a': ('runISMIP_HOM.py -r a --size 80', keep_empty), - 'higher-order/ismip-hom 80 ismip-hom-c': ('runISMIP_HOM.py -r c --size 80', keep_empty), - 'higher-order/ismip-hom 0 ismip-hom-f': ('runISMIP_HOM.py -r f --size 0', keep_empty), - 'higher-order/stream stream': ('runStream.py', stream_perf_small), + 'dome dome': ('runDome.py', dome_perf_small), + 'shelf shelf-confined': ('runShelfConfined.py', shelfConfined_perf_small), + 'shelf shelf-circular': ('runShelfCircular.py', shelfConfined_perf_small), + 'ismip-hom 20 ismip-hom-a': ('runISMIP_HOM.py -r a --size 20', ismip_perf_small), + 'ismip-hom 20 ismip-hom-c': ('runISMIP_HOM.py -r c --size 20', keep_empty), + 'ismip-hom 80 ismip-hom-a': ('runISMIP_HOM.py -r a --size 80', keep_empty), + 'ismip-hom 80 ismip-hom-c': ('runISMIP_HOM.py -r c --size 80', keep_empty), + 'ismip-hom 0 ismip-hom-f': ('runISMIP_HOM.py -r f --size 0', keep_empty), + 'stream stream': ('runStream.py', stream_perf_small), } perf_dict = { - 'higher-order/dome dome': ('runDome.py', dome_perf_large), + 'dome dome': ('runDome.py', dome_perf_large), } # HPC PLATFORM DICTIONARIES diff --git a/tests/higher-order/ross/README.md b/tests/ross/README.md similarity index 100% rename from tests/higher-order/ross/README.md rename to tests/ross/README.md diff --git a/tests/higher-order/ross/data/111by147Grid.dat b/tests/ross/data/111by147Grid.dat similarity index 100% rename from tests/higher-order/ross/data/111by147Grid.dat rename to tests/ross/data/111by147Grid.dat diff --git a/tests/higher-order/ross/data/inlets.dat b/tests/ross/data/inlets.dat similarity index 100% rename from tests/higher-order/ross/data/inlets.dat rename to tests/ross/data/inlets.dat diff --git a/tests/higher-order/ross/data/kbc.dat b/tests/ross/data/kbc.dat similarity index 100% rename from tests/higher-order/ross/data/kbc.dat rename to tests/ross/data/kbc.dat diff --git a/tests/higher-order/ross/data/readme.riggs_clean b/tests/ross/data/readme.riggs_clean similarity index 100% rename from tests/higher-order/ross/data/readme.riggs_clean rename to tests/ross/data/readme.riggs_clean diff --git a/tests/higher-order/ross/data/readme.txt b/tests/ross/data/readme.txt similarity index 100% rename from tests/higher-order/ross/data/readme.txt rename to tests/ross/data/readme.txt diff --git a/tests/higher-order/ross/data/riggs_clean.dat b/tests/ross/data/riggs_clean.dat similarity index 100% rename from tests/higher-order/ross/data/riggs_clean.dat rename to tests/ross/data/riggs_clean.dat diff --git a/tests/higher-order/ross/netCDF.py b/tests/ross/netCDF.py similarity index 100% rename from tests/higher-order/ross/netCDF.py rename to tests/ross/netCDF.py diff --git a/tests/higher-order/ross/plotRoss.py b/tests/ross/plotRoss.py similarity index 100% rename from tests/higher-order/ross/plotRoss.py rename to tests/ross/plotRoss.py diff --git a/tests/higher-order/ross/ross.config b/tests/ross/ross.config similarity index 100% rename from tests/higher-order/ross/ross.config rename to tests/ross/ross.config diff --git a/tests/higher-order/ross/runRoss.py b/tests/ross/runRoss.py similarity index 100% rename from tests/higher-order/ross/runRoss.py rename to tests/ross/runRoss.py diff --git a/tests/higher-order/ross/trilinosOptions.xml b/tests/ross/trilinosOptions.xml similarity index 100% rename from tests/higher-order/ross/trilinosOptions.xml rename to tests/ross/trilinosOptions.xml diff --git a/tests/higher-order/shelf/README.md b/tests/shelf/README.md similarity index 100% rename from tests/higher-order/shelf/README.md rename to tests/shelf/README.md diff --git a/tests/higher-order/shelf/netCDF.py b/tests/shelf/netCDF.py similarity index 100% rename from tests/higher-order/shelf/netCDF.py rename to tests/shelf/netCDF.py diff --git a/tests/higher-order/shelf/runShelfCircular.py b/tests/shelf/runShelfCircular.py similarity index 100% rename from tests/higher-order/shelf/runShelfCircular.py rename to tests/shelf/runShelfCircular.py diff --git a/tests/higher-order/shelf/runShelfConfined.py b/tests/shelf/runShelfConfined.py similarity index 100% rename from tests/higher-order/shelf/runShelfConfined.py rename to tests/shelf/runShelfConfined.py diff --git a/tests/higher-order/shelf/shelf-circular.config b/tests/shelf/shelf-circular.config similarity index 100% rename from tests/higher-order/shelf/shelf-circular.config rename to tests/shelf/shelf-circular.config diff --git a/tests/higher-order/shelf/shelf-confined.config b/tests/shelf/shelf-confined.config similarity index 100% rename from tests/higher-order/shelf/shelf-confined.config rename to tests/shelf/shelf-confined.config diff --git a/tests/higher-order/shelf/trilinosOptions.xml b/tests/shelf/trilinosOptions.xml similarity index 100% rename from tests/higher-order/shelf/trilinosOptions.xml rename to tests/shelf/trilinosOptions.xml diff --git a/tests/higher-order/slab/README.md b/tests/slab/README.md similarity index 100% rename from tests/higher-order/slab/README.md rename to tests/slab/README.md diff --git a/tests/higher-order/slab/netCDF.py b/tests/slab/netCDF.py similarity index 100% rename from tests/higher-order/slab/netCDF.py rename to tests/slab/netCDF.py diff --git a/tests/higher-order/slab/plotSlab.py b/tests/slab/plotSlab.py similarity index 100% rename from tests/higher-order/slab/plotSlab.py rename to tests/slab/plotSlab.py diff --git a/tests/higher-order/slab/runSlab.py b/tests/slab/runSlab.py similarity index 100% rename from tests/higher-order/slab/runSlab.py rename to tests/slab/runSlab.py diff --git a/tests/higher-order/slab/slab.config b/tests/slab/slab.config similarity index 100% rename from tests/higher-order/slab/slab.config rename to tests/slab/slab.config diff --git a/tests/higher-order/slab/trilinosOptions.xml b/tests/slab/trilinosOptions.xml similarity index 100% rename from tests/higher-order/slab/trilinosOptions.xml rename to tests/slab/trilinosOptions.xml diff --git a/tests/higher-order/stream/README.md b/tests/stream/README.md similarity index 100% rename from tests/higher-order/stream/README.md rename to tests/stream/README.md diff --git a/tests/higher-order/stream/netCDF.py b/tests/stream/netCDF.py similarity index 100% rename from tests/higher-order/stream/netCDF.py rename to tests/stream/netCDF.py diff --git a/tests/higher-order/stream/plotStream.py b/tests/stream/plotStream.py similarity index 100% rename from tests/higher-order/stream/plotStream.py rename to tests/stream/plotStream.py diff --git a/tests/higher-order/stream/runStream.py b/tests/stream/runStream.py similarity index 100% rename from tests/higher-order/stream/runStream.py rename to tests/stream/runStream.py diff --git a/tests/higher-order/stream/stream.config b/tests/stream/stream.config similarity index 100% rename from tests/higher-order/stream/stream.config rename to tests/stream/stream.config diff --git a/tests/higher-order/stream/trilinosOptions.xml b/tests/stream/trilinosOptions.xml similarity index 100% rename from tests/higher-order/stream/trilinosOptions.xml rename to tests/stream/trilinosOptions.xml