From 0ef1ff61b89b39b7846492440bdda7153736b1a3 Mon Sep 17 00:00:00 2001 From: TillRasmussen Date: Fri, 15 Dec 2023 22:43:42 +0000 Subject: [PATCH 1/7] removal of unused variables. --- cicecore/cicedyn/infrastructure/ice_grid.F90 | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/cicecore/cicedyn/infrastructure/ice_grid.F90 b/cicecore/cicedyn/infrastructure/ice_grid.F90 index ef2db8a11..78448f5ca 100644 --- a/cicecore/cicedyn/infrastructure/ice_grid.F90 +++ b/cicecore/cicedyn/infrastructure/ice_grid.F90 @@ -167,13 +167,6 @@ module ice_grid ! xyyav, & ! mean T-cell value of xyy ! yyyav ! mean T-cell value of yyy - real (kind=dbl_kind), & - dimension (:,:,:,:,:), allocatable, public :: & - mne, & ! matrices used for coordinate transformations in remapping - mnw, & ! ne = northeast corner, nw = northwest, etc. - mse, & - msw - ! masks real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & hm , & ! land/boundary mask, thickness (T-cell) @@ -288,10 +281,6 @@ subroutine alloc_grid latn_bounds(4,nx_block,ny_block,max_blocks), & ! latitude of gridbox corners for N point lone_bounds(4,nx_block,ny_block,max_blocks), & ! longitude of gridbox corners for E point late_bounds(4,nx_block,ny_block,max_blocks), & ! latitude of gridbox corners for E point - mne (2,2,nx_block,ny_block,max_blocks), & ! matrices used for coordinate transformations in remapping - mnw (2,2,nx_block,ny_block,max_blocks), & ! ne = northeast corner, nw = northwest, etc. - mse (2,2,nx_block,ny_block,max_blocks), & - msw (2,2,nx_block,ny_block,max_blocks), & stat=ierr) if (ierr/=0) call abort_ice(subname//'ERROR: Out of memory1') From 32e7e26533655c8121025599189809af37d2190e Mon Sep 17 00:00:00 2001 From: TillRasmussen Date: Fri, 15 Dec 2023 23:20:31 +0000 Subject: [PATCH 2/7] moved xav to transport. Could remove commented code. Could remove xav and yav as they are zero --- .../cicedyn/dynamics/ice_transport_remap.F90 | 279 +++++++++++++++--- cicecore/cicedyn/infrastructure/ice_grid.F90 | 17 -- 2 files changed, 232 insertions(+), 64 deletions(-) diff --git a/cicecore/cicedyn/dynamics/ice_transport_remap.F90 b/cicecore/cicedyn/dynamics/ice_transport_remap.F90 index dd59efc87..fbbac83f5 100644 --- a/cicecore/cicedyn/dynamics/ice_transport_remap.F90 +++ b/cicecore/cicedyn/dynamics/ice_transport_remap.F90 @@ -63,8 +63,32 @@ module ice_transport_remap ! if false, area flux is determined internally ! and is passed out +! REMOVE? I + ! geometric quantities used for remapping transport +! real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & +! xav , & ! mean T-cell value of x +! yav , & ! mean T-cell value of y +! xxav , & ! mean T-cell value of xx +! xyav , & ! mean T-cell value of xy +! yyav , & ! mean T-cell value of yy +! yyav ! mean T-cell value of yy +! xxxav, & ! mean T-cell value of xxx +! xxyav, & ! mean T-cell value of xxy +! xyyav, & ! mean T-cell value of xyy +! yyyav ! mean T-cell value of yyy + + real (kind=dbl_kind), parameter :: xav=c0 + real (kind=dbl_kind), parameter :: yav=c0 + real (kind=dbl_kind), parameter :: xxav=c1/c12 + real (kind=dbl_kind), parameter :: yyav=c1/c12 + logical (kind=log_kind), parameter :: bugcheck = .false. + interface limited_gradient + module procedure limited_gradient_cn_dbl, & + limited_gradient_cn_scalar + end interface + !======================================================================= ! Here is some information about how the incremental remapping scheme ! works in CICE and how it can be adapted for use in other models. @@ -261,13 +285,13 @@ module ice_transport_remap subroutine init_remap - use ice_domain, only: nblocks - use ice_grid, only: xav, yav, xxav, yyav +! use ice_domain, only: nblocks +! use ice_grid, only: xav, yav, xxav, yyav ! dxT, dyT, xyav, & ! xxxav, xxyav, xyyav, yyyav - integer (kind=int_kind) :: & - i, j, iblk ! standard indices +! integer (kind=int_kind) :: & +! i, j, iblk ! standard indices character(len=*), parameter :: subname = '(init_remap)' @@ -277,27 +301,27 @@ subroutine init_remap ! Note: On a rectangular grid, the integral of any odd function ! of x or y = 0. - !$OMP PARALLEL DO PRIVATE(iblk,i,j) SCHEDULE(runtime) - do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - xav(i,j,iblk) = c0 - yav(i,j,iblk) = c0 +! !$OMP PARALLEL DO PRIVATE(iblk,i,j) SCHEDULE(runtime) +! do iblk = 1, nblocks +! do j = 1, ny_block +! do i = 1, nx_block +! xav(i,j,iblk) = c0 +! yav(i,j,iblk) = c0 !!! These formulas would be used on a rectangular grid !!! with dimensions (dxT, dyT): !!! xxav(i,j,iblk) = dxT(i,j,iblk)**2 / c12 !!! yyav(i,j,iblk) = dyT(i,j,iblk)**2 / c12 - xxav(i,j,iblk) = c1/c12 - yyav(i,j,iblk) = c1/c12 +! xxav(i,j,iblk) = c1/c12 +! yyav(i,j,iblk) = c1/c12 ! xyav(i,j,iblk) = c0 ! xxxav(i,j,iblk) = c0 ! xxyav(i,j,iblk) = c0 ! xyyav(i,j,iblk) = c0 ! yyyav(i,j,iblk) = c0 - enddo - enddo - enddo - !$OMP END PARALLEL DO +! enddo +! enddo +! enddo +! !$OMP END PARALLEL DO !------------------------------------------------------------------- ! Set logical l_fixed_area depending of the grid type. @@ -356,8 +380,8 @@ subroutine horizontal_remap (dt, ntrace, & use ice_domain, only: nblocks, blocks_ice, halo_info, maskhalo_remap use ice_blocks, only: block, get_block, nghost use ice_grid, only: HTE, HTN, dxu, dyu, & - earea, narea, tarear, hm, & - xav, yav, xxav, yyav + earea, narea, tarear, hm!, & +! xav, yav, xxav, yyav ! xyav, xxxav, xxyav, xyyav, yyyav use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound @@ -519,9 +543,10 @@ subroutine horizontal_remap (dt, ntrace, & tracer_type, depend, & has_dependents, icellsnc(0,iblk), & indxinc(:,0), indxjnc(:,0), & - hm (:,:,iblk), xav (:,:,iblk), & - yav (:,:,iblk), xxav (:,:,iblk), & - yyav (:,:,iblk), & + hm (:,:,iblk), & +! xav (:,:,iblk), & +! yav (:,:,iblk), xxav (:,:,iblk), & +! yyav (:,:,iblk), & ! xyav (:,:,iblk), & ! xxxav (:,:,iblk), xxyav (:,:,iblk), & ! xyyav (:,:,iblk), yyyav (:,:,iblk), & @@ -539,9 +564,10 @@ subroutine horizontal_remap (dt, ntrace, & tracer_type, depend, & has_dependents, icellsnc (n,iblk), & indxinc (:,n), indxjnc(:,n), & - hm (:,:,iblk), xav (:,:,iblk), & - yav (:,:,iblk), xxav (:,:,iblk), & - yyav (:,:,iblk), & + hm (:,:,iblk), & +! xav (:,:,iblk), & +! yav (:,:,iblk), xxav (:,:,iblk), & +! yyav (:,:,iblk), & ! xyav (:,:,iblk), & ! xxxav (:,:,iblk), xxyav (:,:,iblk), & ! xyyav (:,:,iblk), yyyav (:,:,iblk), & @@ -1052,9 +1078,10 @@ subroutine construct_fields (nx_block, ny_block, & tracer_type, depend, & has_dependents, icells, & indxi, indxj, & - hm, xav, & - yav, xxav, & - yyav, & + hm, & +! xav, & +! yav, xxav, & +! yyav, & ! xyav, & ! xxxav, xxyav, & ! xyyav, yyyav, & @@ -1084,9 +1111,9 @@ subroutine construct_fields (nx_block, ny_block, & indxj real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - hm , & ! land/boundary mask, thickness (T-cell) - xav, yav , & ! mean T-cell values of x, y - xxav, yyav ! mean T-cell values of xx, yy + hm !, & ! land/boundary mask, thickness (T-cell) +! xav, yav , & ! mean T-cell values of x, y +! xxav, yyav ! mean T-cell values of xx, yy ! xyav, , & ! mean T-cell values of xy ! xxxav,xxyav,xyyav,yyyav ! mean T-cell values of xxx, xxy, xyy, yyy @@ -1231,11 +1258,12 @@ subroutine construct_fields (nx_block, ny_block, & ! center of mass (mxav,myav) for each cell ! echmod: xyav = 0 - mxav(i,j) = (mx(i,j)*xxav(i,j) & - + mc(i,j)*xav (i,j)) / mm(i,j) - myav(i,j) = (my(i,j)*yyav(i,j) & - + mc(i,j)*yav(i,j)) / mm(i,j) - + mxav(i,j) = (mx(i,j)*xxav & !(i,j) & +! + mc(i,j)*xav (i,j)) / mm(i,j) + + mc(i,j)*xav ) / mm(i,j) + myav(i,j) = (my(i,j)*yyav &!(i,j) & +! + mc(i,j)*yav(i,j)) / mm(i,j) + + mc(i,j)*yav) / mm(i,j) ! mxav(i,j) = (mx(i,j)*xxav(i,j) & ! + my(i,j)*xyav(i,j) & ! + mc(i,j)*xav (i,j)) / mm(i,j) @@ -1287,9 +1315,11 @@ subroutine construct_fields (nx_block, ny_block, & ! w6 = my(i,j)*ty(i,j,nt) w7 = c1 / (mm(i,j)*tm(i,j,nt)) ! echmod: grid arrays = 0 - mtxav(i,j,nt) = (w1*xav (i,j) + w2*xxav (i,j)) & +! mtxav(i,j,nt) = (w1*xav (i,j) + w2*xxav (i,j)) & + mtxav(i,j,nt) = (w1*xav + w2*xxav) & * w7 - mtyav(i,j,nt) = (w1*yav(i,j) + w3*yyav(i,j)) & + mtyav(i,j,nt) = (w1*yav + w3*yyav) & +! mtyav(i,j,nt) = (w1*yav(i,j) + w3*yyav(i,j)) & * w7 ! mtxav(i,j,nt) = (w1*xav (i,j) + w2*xxav (i,j) & @@ -1365,12 +1395,12 @@ end subroutine construct_fields ! authors William H. Lipscomb, LANL ! John R. Baumgardner, LANL - subroutine limited_gradient (nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - nghost, & - phi, phimask, & - cnx, cny, & - gx, gy) + subroutine limited_gradient_cn_dbl (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + nghost, & + phi, phimask, & + cnx, cny, & + gx, gy) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1379,12 +1409,14 @@ subroutine limited_gradient (nx_block, ny_block, & real (kind=dbl_kind), dimension (nx_block,ny_block), intent (in) :: & phi , & ! input tracer field (mean values in each grid cell) - cnx , & ! x-coordinate of phi relative to geometric center of cell - cny , & ! y-coordinate of phi relative to geometric center of cell phimask ! phimask(i,j) = 1 if phi(i,j) has physical meaning, = 0 otherwise. ! For instance, aice has no physical meaning in land cells, ! and hice no physical meaning where aice = 0. + real (kind=dbl_kind), dimension (nx_block,ny_block), intent (in) :: & + cnx , & ! x-coordinate of phi relative to geometric center of cell + cny ! y-coordinate of phi relative to geometric center of cellĀ½ + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & gx , & ! limited x-direction gradient gy ! limited y-direction gradient @@ -1410,7 +1442,7 @@ subroutine limited_gradient (nx_block, ny_block, & puny , & ! gxtmp, gytmp ! temporary term for x- and y- limited gradient - character(len=*), parameter :: subname = '(limited_gradient)' + character(len=*), parameter :: subname = '(limited_gradient_cn_dbl)' call icepack_query_parameters(puny_out=puny) call icepack_warnings_flush(nu_diag) @@ -1508,7 +1540,160 @@ subroutine limited_gradient (nx_block, ny_block, & enddo ! ij - end subroutine limited_gradient + end subroutine limited_gradient_cn_dbl + +!======================================================================= +! Part 2 of the interface that allow cnx and cny to either matrices or scalars +! Based on limited_gradient_cn_dbl +! Updated by Till Rasmussen, DMI + + subroutine limited_gradient_cn_scalar (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + nghost, & + phi, phimask, & + cnx, cny, & + gx, gy) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi , & ! beginning and end of physical domain + nghost ! number of ghost cells + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent (in) :: & + phi , & ! input tracer field (mean values in each grid cell) + phimask ! phimask(i,j) = 1 if phi(i,j) has physical meaning, = 0 otherwise. + ! For instance, aice has no physical meaning in land cells, + ! and hice no physical meaning where aice = 0. + + real (kind=dbl_kind), intent (in) :: & + cnx , & ! x-coordinate of phi relative to geometric center of cell + cny ! y-coordinate of phi relative to geometric center of cell + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & + gx , & ! limited x-direction gradient + gy ! limited y-direction gradient + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij , & ! standard indices + icells ! number of cells to limit + + integer (kind=int_kind), dimension(nx_block*ny_block) :: & + indxi, indxj ! combined i/j horizontal indices + + real (kind=dbl_kind) :: & + phi_nw, phi_n, phi_ne , & ! values of phi in 8 neighbor cells + phi_w, phi_e , & + phi_sw, phi_s, phi_se , & + qmn, qmx , & ! min and max value of phi within grid cell + pmn, pmx , & ! min and max value of phi among neighbor cells + w1, w2, w3, w4 ! work variables + + real (kind=dbl_kind) :: & + puny , & ! + gxtmp, gytmp ! temporary term for x- and y- limited gradient + + character(len=*), parameter :: subname = '(limited_gradient_cn_scalar)' + + call icepack_query_parameters(puny_out=puny) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + gx(:,:) = c0 + gy(:,:) = c0 + + ! For nghost = 1, loop over physical cells and update ghost cells later + ! For nghost = 2, loop over a layer of ghost cells and skip the update + + icells = 0 + do j = jlo-nghost+1, jhi+nghost-1 + do i = ilo-nghost+1, ihi+nghost-1 + if (phimask(i,j) > puny) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif ! phimask > puny + enddo + enddo + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + ! Store values of phi in the 8 neighbor cells. + ! Note: phimask = 1. or 0. If phimask = 1., use the true value; + ! if phimask = 0., use the home cell value so that non-physical + ! values of phi do not contribute to the gradient. + phi_nw = phimask(i-1,j+1) * phi(i-1,j+1) & + + (c1-phimask(i-1,j+1))* phi(i ,j ) + phi_n = phimask(i ,j+1) * phi(i ,j+1) & + + (c1-phimask(i ,j+1))* phi(i ,j ) + phi_ne = phimask(i+1,j+1) * phi(i+1,j+1) & + + (c1-phimask(i+1,j+1))* phi(i ,j ) + phi_w = phimask(i-1,j ) * phi(i-1,j ) & + + (c1-phimask(i-1,j ))* phi(i ,j ) + phi_e = phimask(i+1,j ) * phi(i+1,j ) & + + (c1-phimask(i+1,j ))* phi(i ,j ) + phi_sw = phimask(i-1,j-1) * phi(i-1,j-1) & + + (c1-phimask(i-1,j-1))* phi(i ,j ) + phi_s = phimask(i ,j-1) * phi(i ,j-1) & + + (c1-phimask(i ,j-1))* phi(i ,j ) + phi_se = phimask(i+1,j-1) * phi(i+1,j-1) & + + (c1-phimask(i+1,j-1))* phi(i ,j ) + + ! unlimited gradient components + ! (factors of two cancel out) + + gxtmp = (phi_e - phi_w) * p5 + gytmp = (phi_n - phi_s) * p5 + + ! minimum and maximum among the nine local cells + pmn = min (phi_nw, phi_n, phi_ne, phi_w, phi(i,j), & + phi_e, phi_sw, phi_s, phi_se) + pmx = max (phi_nw, phi_n, phi_ne, phi_w, phi(i,j), & + phi_e, phi_sw, phi_s, phi_se) + + pmn = pmn - phi(i,j) + pmx = pmx - phi(i,j) + + ! minimum and maximum deviation of phi within the cell + ! minimum and maximum deviation of phi within the cell + w1 = (p5 - cnx) * gxtmp & + + (p5 - cny) * gytmp + w2 = (p5 - cnx) * gxtmp & + - (p5 + cny) * gytmp + w3 = -(p5 + cnx) * gxtmp & + - (p5 + cny) * gytmp + w4 = (p5 - cny) * gytmp & + - (p5 + cnx) * gxtmp + + qmn = min (w1, w2, w3, w4) + qmx = max (w1, w2, w3, w4) + + ! the limiting coefficient + if ( abs(qmn) > abs(pmn) ) then ! 'abs(qmn) > puny' not sufficient + w1 = max(c0, pmn/qmn) + else + w1 = c1 + endif + + if ( abs(qmx) > abs(pmx) ) then + w2 = max(c0, pmx/qmx) + else + w2 = c1 + endif + + w1 = min(w1, w2) + + ! Limit the gradient components + gx(i,j) = w1 * gxtmp + gy(i,j) = w1 * gytmp + + enddo ! ij + + end subroutine limited_gradient_cn_scalar !======================================================================= ! diff --git a/cicecore/cicedyn/infrastructure/ice_grid.F90 b/cicecore/cicedyn/infrastructure/ice_grid.F90 index 78448f5ca..bebfc7298 100644 --- a/cicecore/cicedyn/infrastructure/ice_grid.F90 +++ b/cicecore/cicedyn/infrastructure/ice_grid.F90 @@ -154,19 +154,6 @@ module ice_grid lone_bounds, & ! longitude of gridbox corners for E point late_bounds ! latitude of gridbox corners for E point - ! geometric quantities used for remapping transport - real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - xav , & ! mean T-cell value of x - yav , & ! mean T-cell value of y - xxav , & ! mean T-cell value of xx -! xyav , & ! mean T-cell value of xy -! yyav , & ! mean T-cell value of yy - yyav ! mean T-cell value of yy -! xxxav, & ! mean T-cell value of xxx -! xxyav, & ! mean T-cell value of xxy -! xyyav, & ! mean T-cell value of xyy -! yyyav ! mean T-cell value of yyy - ! masks real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & hm , & ! land/boundary mask, thickness (T-cell) @@ -255,10 +242,6 @@ subroutine alloc_grid cxm (nx_block,ny_block,max_blocks), & ! 0.5*HTN - 1.5*HTS dxhy (nx_block,ny_block,max_blocks), & ! 0.5*(HTE - HTW) dyhx (nx_block,ny_block,max_blocks), & ! 0.5*(HTN - HTS) - xav (nx_block,ny_block,max_blocks), & ! mean T-cell value of x - yav (nx_block,ny_block,max_blocks), & ! mean T-cell value of y - xxav (nx_block,ny_block,max_blocks), & ! mean T-cell value of xx - yyav (nx_block,ny_block,max_blocks), & ! mean T-cell value of yy hm (nx_block,ny_block,max_blocks), & ! land/boundary mask, thickness (T-cell) bm (nx_block,ny_block,max_blocks), & ! task/block id uvm (nx_block,ny_block,max_blocks), & ! land/boundary mask, velocity (U-cell) - water in case of all water point From 6eac1a60832bade7e18a7fd0cd40c9112a6425c4 Mon Sep 17 00:00:00 2001 From: TillRasmussen Date: Sun, 17 Dec 2023 13:16:45 +0000 Subject: [PATCH 3/7] Move derived parameters and only allocate if needed --- cicecore/cicedyn/dynamics/ice_dyn_eap.F90 | 4 +- cicecore/cicedyn/dynamics/ice_dyn_evp.F90 | 54 ++++++++++++-- cicecore/cicedyn/dynamics/ice_dyn_shared.F90 | 74 +++++++++++++++++-- cicecore/cicedyn/dynamics/ice_dyn_vp.F90 | 29 +++----- cicecore/cicedyn/infrastructure/ice_grid.F90 | 77 ++------------------ 5 files changed, 132 insertions(+), 106 deletions(-) diff --git a/cicecore/cicedyn/dynamics/ice_dyn_eap.F90 b/cicecore/cicedyn/dynamics/ice_dyn_eap.F90 index e240fc8f1..a8ac62797 100644 --- a/cicecore/cicedyn/dynamics/ice_dyn_eap.F90 +++ b/cicecore/cicedyn/dynamics/ice_dyn_eap.F90 @@ -109,7 +109,7 @@ subroutine eap (dt) seabed_stress_factor_LKD, seabed_stress_factor_prob, & seabed_stress_method, seabed_stress, & stack_fields, unstack_fields, iceTmask, iceUmask, & - fld2, fld3, fld4 + fld2, fld3, fld4, dxhy, dyhx, cxp, cyp, cxm, cym use ice_flux, only: rdg_conv, strairxT, strairyT, & strairxU, strairyU, uocn, vocn, ss_tltx, ss_tlty, fmU, & strtltxU, strtltyU, strocnxU, strocnyU, strintxU, strintyU, taubxU, taubyU, & @@ -118,7 +118,7 @@ subroutine eap (dt) stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 - use ice_grid, only: tmask, umask, dxT, dyT, dxhy, dyhx, cxp, cyp, cxm, cym, & + use ice_grid, only: tmask, umask, dxT, dyT, & tarear, uarear, grid_average_X2Y, & grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv use ice_state, only: aice, aiU, vice, vsno, uvel, vvel, divu, shear, & diff --git a/cicecore/cicedyn/dynamics/ice_dyn_evp.F90 b/cicecore/cicedyn/dynamics/ice_dyn_evp.F90 index ee832e447..1f73f8467 100644 --- a/cicecore/cicedyn/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedyn/dynamics/ice_dyn_evp.F90 @@ -84,6 +84,12 @@ module ice_dyn_evp emass (:,:,:) , & ! total mass of ice and snow (E grid) emassdti (:,:,:) ! mass of E-cell/dte (kg/m^2 s) + real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & + ratiodxN , & ! - dxN(i+1,j) / dxN(i,j) + ratiodyE , & ! - dyE(i ,j+1) / dyE(i,j) + ratiodxNr , & ! 1 / ratiodxN + ratiodyEr ! 1 / ratiodyE + real (kind=dbl_kind), allocatable :: & strengthU(:,:,:) , & ! strength averaged to U points divergU (:,:,:) , & ! div array on U points, differentiate from divu @@ -119,9 +125,10 @@ module ice_dyn_evp ! Elastic-viscous-plastic dynamics driver ! subroutine init_evp - use ice_blocks, only: nx_block, ny_block, nghost - use ice_domain_size, only: max_blocks, nx_global, ny_global - use ice_grid, only: grid_ice, dyT, dxT, uarear, tmask, G_HTE, G_HTN + use ice_blocks, only: get_block, nx_block, ny_block, nghost, block + use ice_domain_size, only: max_blocks + use ice_domain, only: nblocks, blocks_ice + use ice_grid, only: grid_ice, dyT, dxT, uarear, tmask, G_HTE, G_HTN, dxN, dyE use ice_calendar, only: dt_dyn use ice_dyn_shared, only: init_dyn_shared, evp_algorithm use ice_dyn_evp1d, only: dyn_evp1d_init @@ -131,6 +138,14 @@ subroutine init_evp character(len=*), parameter :: subname = '(init_evp)' + type (block) :: & + this_block ! block information for current block + + integer (kind=int_kind) :: & + i, j, iblk , & ! block index + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + call init_dyn_shared(dt_dyn) if (evp_algorithm == "shared_mem_1d" ) then @@ -197,6 +212,33 @@ subroutine init_evp stat=ierr) if (ierr/=0) call abort_ice(subname//' ERROR: Out of memory E evp') + allocate( & + ratiodxN (nx_block,ny_block,max_blocks), & + ratiodyE (nx_block,ny_block,max_blocks), & + ratiodxNr(nx_block,ny_block,max_blocks), & + ratiodyEr(nx_block,ny_block,max_blocks), & + stat=ierr) + if (ierr/=0) call abort_ice(subname//'ERROR: Out of memory ratio') + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + ratiodxN (i,j,iblk) = - dxN(i+1,j ,iblk) / dxN(i,j,iblk) + ratiodyE (i,j,iblk) = - dyE(i ,j+1,iblk) / dyE(i,j,iblk) + ratiodxNr(i,j,iblk) = c1 / ratiodxN(i,j,iblk) + ratiodyEr(i,j,iblk) = c1 / ratiodyE(i,j,iblk) + enddo + enddo + enddo ! iblk + !$OMP END PARALLEL DO + endif end subroutine init_evp @@ -219,7 +261,7 @@ subroutine evp (dt) ice_HaloDestroy, ice_HaloUpdate_stress use ice_blocks, only: block, get_block, nx_block, ny_block, nghost use ice_domain, only: nblocks, blocks_ice, halo_info, maskhalo_dyn - use ice_domain_size, only: max_blocks, ncat, nx_global, ny_global + use ice_domain_size, only: max_blocks, ncat use ice_flux, only: rdg_conv, rdg_shear, strairxT, strairyT, & strairxU, strairyU, uocn, vocn, ss_tltx, ss_tlty, fmU, & strtltxU, strtltyU, strocnxU, strocnyU, strintxU, strintyU, taubxU, taubyU, & @@ -238,8 +280,6 @@ subroutine evp (dt) stresspU, stressmU, stress12U use ice_grid, only: tmask, umask, umaskCD, nmask, emask, uvm, epm, npm, & dxE, dxN, dxT, dxU, dyE, dyN, dyT, dyU, & - ratiodxN, ratiodxNr, ratiodyE, ratiodyEr, & - dxhy, dyhx, cxp, cyp, cxm, cym, & tarear, uarear, earear, narear, grid_average_X2Y, uarea, & grid_type, grid_ice, & grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv @@ -250,7 +290,7 @@ subroutine evp (dt) ice_timer_start, ice_timer_stop, timer_evp use ice_dyn_shared, only: evp_algorithm, stack_fields, unstack_fields, & DminTarea, visc_method, deformations, deformationsC_T, deformationsCD_T, & - strain_rates_U, & + strain_rates_U, dxhy, dyhx, cxp, cyp, cxm, cym, & iceTmask, iceUmask, iceEmask, iceNmask, & dyn_haloUpdate, fld2, fld3, fld4 use ice_dyn_evp1d, only: dyn_evp1d_run diff --git a/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 b/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 index 9dbeaf1a7..4c8a5f1b5 100644 --- a/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 @@ -11,7 +11,7 @@ module ice_dyn_shared use ice_kinds_mod use ice_communicate, only: my_task, master_task, get_num_procs - use ice_constants, only: c0, c1, c2, c3, c4, c6 + use ice_constants, only: c0, c1, c2, c3, c4, c6, c1p5 use ice_constants, only: omega, spval_dbl, p01, p001, p5 use ice_blocks, only: nx_block, ny_block use ice_domain_size, only: max_blocks @@ -119,6 +119,14 @@ module ice_dyn_shared real (kind=dbl_kind), allocatable, public :: & DminTarea(:,:,:) ! deltamin * tarea (m^2/s) + real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & + cyp , & ! 1.5*HTE(i,j)-0.5*HTW(i,j) = 1.5*HTE(i,j)-0.5*HTE(i-1,j) + cxp , & ! 1.5*HTN(i,j)-0.5*HTS(i,j) = 1.5*HTN(i,j)-0.5*HTN(i,j-1) + cym , & ! 0.5*HTE(i,j)-1.5*HTW(i,j) = 0.5*HTE(i,j)-1.5*HTE(i-1,j) + cxm , & ! 0.5*HTN(i,j)-1.5*HTS(i,j) = 0.5*HTN(i,j)-1.5*HTN(i,j-1) + dxhy , & ! 0.5*(HTE(i,j) - HTW(i,j)) = 0.5*(HTE(i,j) - HTE(i-1,j)) + dyhx ! 0.5*(HTN(i,j) - HTS(i,j)) = 0.5*(HTN(i,j) - HTN(i,j-1)) + ! ice isotropic tensile strength parameter real (kind=dbl_kind), public :: & Ktens ! T=Ktens*P (tensile strength: see Konig and Holland, 2010) @@ -192,6 +200,18 @@ subroutine alloc_dyn_shared stat=ierr) if (ierr/=0) call abort_ice(subname//': Out of memory') + if (grid_ice == 'B' .and. evp_algorithm == "standard_2d") then + allocate( & + cyp (nx_block,ny_block,max_blocks), & ! 1.5*HTE - 0.5*HTW + cxp (nx_block,ny_block,max_blocks), & ! 1.5*HTN - 0.5*HTS + cym (nx_block,ny_block,max_blocks), & ! 0.5*HTE - 1.5*HTW + cxm (nx_block,ny_block,max_blocks), & ! 0.5*HTN - 1.5*HTS + dxhy (nx_block,ny_block,max_blocks), & ! 0.5*(HTE - HTW) + dyhx (nx_block,ny_block,max_blocks), & ! 0.5*(HTN - HTS) + stat=ierr) + if (ierr/=0) call abort_ice(subname//': Out of memory') + endif + if (grid_ice == 'CD' .or. grid_ice == 'C') then allocate( & uvelE_init (nx_block,ny_block,max_blocks), & ! x-component of velocity (m/s), beginning of timestep @@ -214,8 +234,9 @@ end subroutine alloc_dyn_shared subroutine init_dyn_shared (dt) - use ice_blocks, only: nx_block, ny_block - use ice_domain, only: nblocks, halo_dynbundle + use ice_blocks, only: block, get_block + use ice_boundary, only: ice_halo, ice_haloUpdate + use ice_domain, only: nblocks, halo_dynbundle, blocks_ice, halo_info use ice_domain_size, only: max_blocks use ice_flux, only: & stressp_1, stressp_2, stressp_3, stressp_4, & @@ -224,7 +245,8 @@ subroutine init_dyn_shared (dt) stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U use ice_state, only: uvel, vvel, uvelE, vvelE, uvelN, vvelN - use ice_grid, only: ULAT, NLAT, ELAT, tarea + use ice_grid, only: ULAT, NLAT, ELAT, tarea, HTE, HTN + use ice_constants, only: field_loc_center, field_type_vector real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -232,9 +254,13 @@ subroutine init_dyn_shared (dt) ! local variables integer (kind=int_kind) :: & - i, j , & ! indices - nprocs, & ! number of processors - iblk ! block index + i, j , & ! indices + ilo, ihi, jlo, jhi, & !min and max index for interior of blocks + nprocs, & ! number of processors + iblk ! block index + + type (block) :: & + this_block ! block information for current block character(len=*), parameter :: subname = '(init_dyn_shared)' @@ -333,6 +359,40 @@ subroutine init_dyn_shared (dt) enddo ! iblk !$OMP END PARALLEL DO + if (grid_ice == 'B' .and. evp_algorithm == "standard_2d") then + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + dxhy(i,j,iblk) = p5*(HTE(i,j,iblk) - HTE(i-1,j,iblk)) + dyhx(i,j,iblk) = p5*(HTN(i,j,iblk) - HTN(i,j-1,iblk)) + enddo + enddo + + do j = jlo, jhi+1 + do i = ilo, ihi+1 + cyp(i,j,iblk) = (c1p5*HTE(i,j,iblk) - p5*HTE(i-1,j,iblk)) + cxp(i,j,iblk) = (c1p5*HTN(i,j,iblk) - p5*HTN(i,j-1,iblk)) + ! match order of operations in cyp, cxp for tripole grids + cym(i,j,iblk) = -(c1p5*HTE(i-1,j,iblk) - p5*HTE(i,j,iblk)) + cxm(i,j,iblk) = -(c1p5*HTN(i,j-1,iblk) - p5*HTN(i,j,iblk)) + enddo + enddo + enddo + call ice_HaloUpdate (dxhy, halo_info, & + field_loc_center, field_type_vector, & + fillValue=c1) + call ice_HaloUpdate (dyhx, halo_info, & + field_loc_center, field_type_vector, & + fillValue=c1) + + endif + end subroutine init_dyn_shared !======================================================================= diff --git a/cicecore/cicedyn/dynamics/ice_dyn_vp.F90 b/cicecore/cicedyn/dynamics/ice_dyn_vp.F90 index 58589f8d7..e7edb53ee 100644 --- a/cicecore/cicedyn/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedyn/dynamics/ice_dyn_vp.F90 @@ -43,16 +43,17 @@ module ice_dyn_vp field_type_scalar, field_type_vector use ice_constants, only: c0, p027, p055, p111, p166, & p222, p25, p333, p5, c1 - use ice_domain, only: nblocks, distrb_info + use ice_domain, only: nblocks, distrb_info, halo_info use ice_domain_size, only: max_blocks use ice_dyn_shared, only: dyn_prep1, dyn_prep2, dyn_finish, & cosw, sinw, fcor_blk, uvel_init, vvel_init, & seabed_stress_factor_LKD, seabed_stress_factor_prob, seabed_stress_method, & - seabed_stress, Ktens, stack_fields, unstack_fields, fld2, fld3, fld4 + seabed_stress, Ktens, stack_fields, unstack_fields, fld2, fld3, fld4, & + dxhy, dyhx, cxp, cyp, cxm, cym use ice_fileunits, only: nu_diag use ice_flux, only: fmU use ice_global_reductions, only: global_sum - use ice_grid, only: dxT, dyT, dxhy, dyhx, cxp, cyp, cxm, cym, uarear + use ice_grid, only: dxT, dyT, uarear use ice_exit, only: abort_ice use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted use icepack_intfc, only: icepack_ice_strength, icepack_query_parameters @@ -120,19 +121,9 @@ subroutine init_vp use ice_boundary, only: ice_HaloUpdate use ice_constants, only: c1, & field_loc_center, field_type_scalar - use ice_domain, only: blocks_ice, halo_info + use ice_domain, only: blocks_ice use ice_calendar, only: dt_dyn use ice_dyn_shared, only: init_dyn_shared -! use ice_grid, only: tarea - - ! local variables - - integer (kind=int_kind) :: & - i, j, iblk, & - ilo,ihi,jlo,jhi ! beginning and end of physical domain - - type (block) :: & - this_block ! block information for current block call init_dyn_shared(dt_dyn) @@ -167,7 +158,8 @@ subroutine implicit_solver (dt) use ice_blocks, only: block, get_block, nx_block, ny_block use ice_domain, only: blocks_ice, halo_info, maskhalo_dyn use ice_domain_size, only: max_blocks, ncat - use ice_dyn_shared, only: deformations, iceTmask, iceUmask + use ice_dyn_shared, only: deformations, iceTmask, iceUmask, & + cxp, cyp, cxm, cym use ice_flux, only: rdg_conv, rdg_shear, strairxT, strairyT, & strairxU, strairyU, uocn, vocn, ss_tltx, ss_tlty, fmU, & strtltxU, strtltyU, strocnxU, strocnyU, strintxU, strintyU, taubxU, taubyU, & @@ -176,7 +168,7 @@ subroutine implicit_solver (dt) stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 - use ice_grid, only: tmask, umask, dxT, dyT, cxp, cyp, cxm, cym, & + use ice_grid, only: tmask, umask, dxT, dyT, & tarear, grid_type, grid_average_X2Y, & grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv use ice_state, only: aice, aiU, vice, vsno, uvel, vvel, divu, shear, & @@ -686,9 +678,8 @@ subroutine anderson_solver (icellT , icellU , & use ice_domain, only: maskhalo_dyn, halo_info use ice_domain_size, only: max_blocks use ice_flux, only: fmU, TbU - use ice_grid, only: dxT, dyT, dxhy, dyhx, cxp, cyp, cxm, cym, & - uarear - use ice_dyn_shared, only: DminTarea + use ice_grid, only: dxT, dyT, uarear + use ice_dyn_shared, only: DminTarea, dxhy, dyhx, cxp, cyp, cxm, cym use ice_state, only: uvel, vvel, strength use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound diff --git a/cicecore/cicedyn/infrastructure/ice_grid.F90 b/cicecore/cicedyn/infrastructure/ice_grid.F90 index bebfc7298..e57092d24 100644 --- a/cicecore/cicedyn/infrastructure/ice_grid.F90 +++ b/cicecore/cicedyn/infrastructure/ice_grid.F90 @@ -115,20 +115,6 @@ module ice_grid G_HTE , & ! length of eastern edge of T-cell (global ext.) G_HTN ! length of northern edge of T-cell (global ext.) - real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - cyp , & ! 1.5*HTE(i,j)-0.5*HTW(i,j) = 1.5*HTE(i,j)-0.5*HTE(i-1,j) - cxp , & ! 1.5*HTN(i,j)-0.5*HTS(i,j) = 1.5*HTN(i,j)-0.5*HTN(i,j-1) - cym , & ! 0.5*HTE(i,j)-1.5*HTW(i,j) = 0.5*HTE(i,j)-1.5*HTE(i-1,j) - cxm , & ! 0.5*HTN(i,j)-1.5*HTS(i,j) = 0.5*HTN(i,j)-1.5*HTN(i,j-1) - dxhy , & ! 0.5*(HTE(i,j) - HTW(i,j)) = 0.5*(HTE(i,j) - HTE(i-1,j)) - dyhx ! 0.5*(HTN(i,j) - HTS(i,j)) = 0.5*(HTN(i,j) - HTN(i,j-1)) - - real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - ratiodxN , & ! - dxN(i+1,j) / dxN(i,j) - ratiodyE , & ! - dyE(i ,j+1) / dyE(i,j) - ratiodxNr , & ! 1 / ratiodxN - ratiodyEr ! 1 / ratiodyE - ! grid dimensions for rectangular grid real (kind=dbl_kind), public :: & dxrect, & ! user_specified spacing (cm) in x-direction (uniform HTN) @@ -236,12 +222,6 @@ subroutine alloc_grid ANGLET (nx_block,ny_block,max_blocks), & ! ANGLE converted to T-cells bathymetry(nx_block,ny_block,max_blocks),& ! ocean depth, for grounding keels and bergs (m) ocn_gridcell_frac(nx_block,ny_block,max_blocks),& ! only relevant for lat-lon grids - cyp (nx_block,ny_block,max_blocks), & ! 1.5*HTE - 0.5*HTW - cxp (nx_block,ny_block,max_blocks), & ! 1.5*HTN - 0.5*HTS - cym (nx_block,ny_block,max_blocks), & ! 0.5*HTE - 1.5*HTW - cxm (nx_block,ny_block,max_blocks), & ! 0.5*HTN - 1.5*HTS - dxhy (nx_block,ny_block,max_blocks), & ! 0.5*(HTE - HTW) - dyhx (nx_block,ny_block,max_blocks), & ! 0.5*(HTN - HTS) hm (nx_block,ny_block,max_blocks), & ! land/boundary mask, thickness (T-cell) bm (nx_block,ny_block,max_blocks), & ! task/block id uvm (nx_block,ny_block,max_blocks), & ! land/boundary mask, velocity (U-cell) - water in case of all water point @@ -267,16 +247,6 @@ subroutine alloc_grid stat=ierr) if (ierr/=0) call abort_ice(subname//'ERROR: Out of memory1') - if (grid_ice == 'CD' .or. grid_ice == 'C') then - allocate( & - ratiodxN (nx_block,ny_block,max_blocks), & - ratiodyE (nx_block,ny_block,max_blocks), & - ratiodxNr(nx_block,ny_block,max_blocks), & - ratiodyEr(nx_block,ny_block,max_blocks), & - stat=ierr) - if (ierr/=0) call abort_ice(subname//'ERROR: Out of memory2') - endif - if (save_ghte_ghtn) then if (my_task == master_task) then allocate( & @@ -571,34 +541,6 @@ subroutine init_grid2 enddo enddo - do j = jlo, jhi - do i = ilo, ihi - dxhy(i,j,iblk) = p5*(HTE(i,j,iblk) - HTE(i-1,j,iblk)) - dyhx(i,j,iblk) = p5*(HTN(i,j,iblk) - HTN(i,j-1,iblk)) - enddo - enddo - - do j = jlo, jhi+1 - do i = ilo, ihi+1 - cyp(i,j,iblk) = (c1p5*HTE(i,j,iblk) - p5*HTE(i-1,j,iblk)) - cxp(i,j,iblk) = (c1p5*HTN(i,j,iblk) - p5*HTN(i,j-1,iblk)) - ! match order of operations in cyp, cxp for tripole grids - cym(i,j,iblk) = -(c1p5*HTE(i-1,j,iblk) - p5*HTE(i,j,iblk)) - cxm(i,j,iblk) = -(c1p5*HTN(i,j-1,iblk) - p5*HTN(i,j,iblk)) - enddo - enddo - - if (grid_ice == 'CD' .or. grid_ice == 'C') then - do j = jlo, jhi - do i = ilo, ihi - ratiodxN (i,j,iblk) = - dxN(i+1,j ,iblk) / dxN(i,j,iblk) - ratiodyE (i,j,iblk) = - dyE(i ,j+1,iblk) / dyE(i,j,iblk) - ratiodxNr(i,j,iblk) = c1 / ratiodxN(i,j,iblk) - ratiodyEr(i,j,iblk) = c1 / ratiodyE(i,j,iblk) - enddo - enddo - endif - enddo ! iblk !$OMP END PARALLEL DO @@ -614,13 +556,6 @@ subroutine init_grid2 call ice_timer_start(timer_bound) - call ice_HaloUpdate (dxhy, halo_info, & - field_loc_center, field_type_vector, & - fillValue=c1) - call ice_HaloUpdate (dyhx, halo_info, & - field_loc_center, field_type_vector, & - fillValue=c1) - ! Update just on the tripole seam to ensure bit-for-bit symmetry across seam call ice_HaloUpdate (tarea, halo_info, & field_loc_center, field_type_scalar, & @@ -1325,12 +1260,12 @@ subroutine latlongrid dyN (i,j,iblk) = 1.e36_dbl_kind dxE (i,j,iblk) = 1.e36_dbl_kind dyE (i,j,iblk) = 1.e36_dbl_kind - dxhy (i,j,iblk) = 1.e36_dbl_kind - dyhx (i,j,iblk) = 1.e36_dbl_kind - cyp (i,j,iblk) = 1.e36_dbl_kind - cxp (i,j,iblk) = 1.e36_dbl_kind - cym (i,j,iblk) = 1.e36_dbl_kind - cxm (i,j,iblk) = 1.e36_dbl_kind +!TILL dxhy (i,j,iblk) = 1.e36_dbl_kind +!TILL dyhx (i,j,iblk) = 1.e36_dbl_kind +!TILL cyp (i,j,iblk) = 1.e36_dbl_kind +!TILL cxp (i,j,iblk) = 1.e36_dbl_kind +!TILL cym (i,j,iblk) = 1.e36_dbl_kind +!TILL cxm (i,j,iblk) = 1.e36_dbl_kind enddo enddo enddo From 18e71613dcbd12d1483f70f32c72821f2ff560d3 Mon Sep 17 00:00:00 2001 From: TillRasmussen Date: Tue, 19 Dec 2023 22:35:59 +0000 Subject: [PATCH 4/7] bugfixes for cxp, cyp... --- cicecore/cicedyn/dynamics/ice_dyn_shared.F90 | 74 ++++++++++++-------- cicecore/cicedyn/dynamics/ice_dyn_vp.F90 | 7 +- 2 files changed, 47 insertions(+), 34 deletions(-) diff --git a/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 b/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 index 4c8a5f1b5..79eb8ae4e 100644 --- a/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 @@ -200,12 +200,17 @@ subroutine alloc_dyn_shared stat=ierr) if (ierr/=0) call abort_ice(subname//': Out of memory') - if (grid_ice == 'B' .and. evp_algorithm == "standard_2d") then +! if (evp_algorithm == "standard_2d") then allocate( & cyp (nx_block,ny_block,max_blocks), & ! 1.5*HTE - 0.5*HTW cxp (nx_block,ny_block,max_blocks), & ! 1.5*HTN - 0.5*HTS cym (nx_block,ny_block,max_blocks), & ! 0.5*HTE - 1.5*HTW cxm (nx_block,ny_block,max_blocks), & ! 0.5*HTN - 1.5*HTS + stat=ierr) + if (ierr/=0) call abort_ice(subname//': Out of memory') + + if (grid_ice == 'B' .and. evp_algorithm == "standard_2d") then + allocate( & dxhy (nx_block,ny_block,max_blocks), & ! 0.5*(HTE - HTW) dyhx (nx_block,ny_block,max_blocks), & ! 0.5*(HTN - HTS) stat=ierr) @@ -360,39 +365,46 @@ subroutine init_dyn_shared (dt) !$OMP END PARALLEL DO if (grid_ice == 'B' .and. evp_algorithm == "standard_2d") then - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - do j = jlo, jhi - do i = ilo, ihi - dxhy(i,j,iblk) = p5*(HTE(i,j,iblk) - HTE(i-1,j,iblk)) - dyhx(i,j,iblk) = p5*(HTN(i,j,iblk) - HTN(i,j-1,iblk)) - enddo - enddo - - do j = jlo, jhi+1 - do i = ilo, ihi+1 - cyp(i,j,iblk) = (c1p5*HTE(i,j,iblk) - p5*HTE(i-1,j,iblk)) - cxp(i,j,iblk) = (c1p5*HTN(i,j,iblk) - p5*HTN(i,j-1,iblk)) - ! match order of operations in cyp, cxp for tripole grids - cym(i,j,iblk) = -(c1p5*HTE(i-1,j,iblk) - p5*HTE(i,j,iblk)) - cxm(i,j,iblk) = -(c1p5*HTN(i,j-1,iblk) - p5*HTN(i,j,iblk)) - enddo - enddo - enddo - call ice_HaloUpdate (dxhy, halo_info, & - field_loc_center, field_type_vector, & - fillValue=c1) - call ice_HaloUpdate (dyhx, halo_info, & - field_loc_center, field_type_vector, & - fillValue=c1) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + dxhy(i,j,iblk) = p5*(HTE(i,j,iblk) - HTE(i-1,j,iblk)) + dyhx(i,j,iblk) = p5*(HTN(i,j,iblk) - HTN(i,j-1,iblk)) + enddo + enddo + enddo + call ice_HaloUpdate (dxhy, halo_info, & + field_loc_center, field_type_vector, & + fillValue=c1) + call ice_HaloUpdate (dyhx, halo_info, & + field_loc_center, field_type_vector, & + fillValue=c1) endif + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi+1 + do i = ilo, ihi+1 + cyp(i,j,iblk) = (c1p5*HTE(i,j,iblk) - p5*HTE(i-1,j,iblk)) + cxp(i,j,iblk) = (c1p5*HTN(i,j,iblk) - p5*HTN(i,j-1,iblk)) + ! match order of operations in cyp, cxp for tripole grids + cym(i,j,iblk) = -(c1p5*HTE(i-1,j,iblk) - p5*HTE(i,j,iblk)) + cxm(i,j,iblk) = -(c1p5*HTN(i,j-1,iblk) - p5*HTN(i,j,iblk)) + enddo + enddo + enddo + end subroutine init_dyn_shared !======================================================================= diff --git a/cicecore/cicedyn/dynamics/ice_dyn_vp.F90 b/cicecore/cicedyn/dynamics/ice_dyn_vp.F90 index e7edb53ee..477d19515 100644 --- a/cicecore/cicedyn/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedyn/dynamics/ice_dyn_vp.F90 @@ -43,13 +43,12 @@ module ice_dyn_vp field_type_scalar, field_type_vector use ice_constants, only: c0, p027, p055, p111, p166, & p222, p25, p333, p5, c1 - use ice_domain, only: nblocks, distrb_info, halo_info + use ice_domain, only: nblocks, distrb_info use ice_domain_size, only: max_blocks use ice_dyn_shared, only: dyn_prep1, dyn_prep2, dyn_finish, & cosw, sinw, fcor_blk, uvel_init, vvel_init, & seabed_stress_factor_LKD, seabed_stress_factor_prob, seabed_stress_method, & - seabed_stress, Ktens, stack_fields, unstack_fields, fld2, fld3, fld4, & - dxhy, dyhx, cxp, cyp, cxm, cym + seabed_stress, Ktens, stack_fields, unstack_fields, fld2, fld3, fld4 use ice_fileunits, only: nu_diag use ice_flux, only: fmU use ice_global_reductions, only: global_sum @@ -2744,6 +2743,7 @@ subroutine fgmres (zetax2 , etax2 , & use ice_boundary, only: ice_HaloUpdate use ice_domain, only: maskhalo_dyn, halo_info use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound + use ice_dyn_shared, only: dxhy, dyhx, cxp, cyp, cxm, cym real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks,4), intent(in) :: & zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) @@ -3145,6 +3145,7 @@ subroutine pgmres (zetax2 , etax2 , & use ice_boundary, only: ice_HaloUpdate use ice_domain, only: maskhalo_dyn, halo_info use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound + use ice_dyn_shared, only: dyhx, dxhy, cxp, cyp, cxm, cym real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks,4), intent(in) :: & zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) From 1cc2b2471b0370f745324285df132d02e00a3a75 Mon Sep 17 00:00:00 2001 From: TillRasmussen Date: Mon, 1 Jan 2024 13:39:12 +0000 Subject: [PATCH 5/7] fix index and remove commented code in ice_grid --- cicecore/cicedyn/dynamics/ice_dyn_evp.F90 | 13 ++-- cicecore/cicedyn/dynamics/ice_dyn_shared.F90 | 66 +++++++++---------- .../cicedyn/dynamics/ice_transport_remap.F90 | 45 +++++++------ cicecore/cicedyn/infrastructure/ice_grid.F90 | 6 -- 4 files changed, 65 insertions(+), 65 deletions(-) diff --git a/cicecore/cicedyn/dynamics/ice_dyn_evp.F90 b/cicecore/cicedyn/dynamics/ice_dyn_evp.F90 index 1f73f8467..6a71d6a14 100644 --- a/cicecore/cicedyn/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedyn/dynamics/ice_dyn_evp.F90 @@ -212,13 +212,12 @@ subroutine init_evp stat=ierr) if (ierr/=0) call abort_ice(subname//' ERROR: Out of memory E evp') - allocate( & - ratiodxN (nx_block,ny_block,max_blocks), & - ratiodyE (nx_block,ny_block,max_blocks), & - ratiodxNr(nx_block,ny_block,max_blocks), & - ratiodyEr(nx_block,ny_block,max_blocks), & - stat=ierr) - if (ierr/=0) call abort_ice(subname//'ERROR: Out of memory ratio') + allocate( ratiodxN (nx_block,ny_block,max_blocks), & + ratiodyE (nx_block,ny_block,max_blocks), & + ratiodxNr(nx_block,ny_block,max_blocks), & + ratiodyEr(nx_block,ny_block,max_blocks), & + stat=ierr) + if (ierr/=0) call abort_ice(subname//' ERROR: Out of memory ratio') !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) do iblk = 1, nblocks diff --git a/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 b/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 index 79eb8ae4e..d3f819f20 100644 --- a/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 @@ -200,19 +200,18 @@ subroutine alloc_dyn_shared stat=ierr) if (ierr/=0) call abort_ice(subname//': Out of memory') -! if (evp_algorithm == "standard_2d") then - allocate( & - cyp (nx_block,ny_block,max_blocks), & ! 1.5*HTE - 0.5*HTW - cxp (nx_block,ny_block,max_blocks), & ! 1.5*HTN - 0.5*HTS - cym (nx_block,ny_block,max_blocks), & ! 0.5*HTE - 1.5*HTW - cxm (nx_block,ny_block,max_blocks), & ! 0.5*HTN - 1.5*HTS - stat=ierr) - if (ierr/=0) call abort_ice(subname//': Out of memory') + allocate( & + cyp(nx_block,ny_block,max_blocks), & ! 1.5*HTE - 0.5*HTW + cxp(nx_block,ny_block,max_blocks), & ! 1.5*HTN - 0.5*HTS + cym(nx_block,ny_block,max_blocks), & ! 0.5*HTE - 1.5*HTW + cxm(nx_block,ny_block,max_blocks), & ! 0.5*HTN - 1.5*HTS + stat=ierr) + if (ierr/=0) call abort_ice(subname//': Out of memory') if (grid_ice == 'B' .and. evp_algorithm == "standard_2d") then allocate( & - dxhy (nx_block,ny_block,max_blocks), & ! 0.5*(HTE - HTW) - dyhx (nx_block,ny_block,max_blocks), & ! 0.5*(HTN - HTS) + dxhy(nx_block,ny_block,max_blocks), & ! 0.5*(HTE - HTW) + dyhx(nx_block,ny_block,max_blocks), & ! 0.5*(HTN - HTS) stat=ierr) if (ierr/=0) call abort_ice(subname//': Out of memory') endif @@ -364,27 +363,28 @@ subroutine init_dyn_shared (dt) enddo ! iblk !$OMP END PARALLEL DO - if (grid_ice == 'B' .and. evp_algorithm == "standard_2d") then - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - do j = jlo, jhi - do i = ilo, ihi - dxhy(i,j,iblk) = p5*(HTE(i,j,iblk) - HTE(i-1,j,iblk)) - dyhx(i,j,iblk) = p5*(HTN(i,j,iblk) - HTN(i,j-1,iblk)) - enddo - enddo - enddo - call ice_HaloUpdate (dxhy, halo_info, & - field_loc_center, field_type_vector, & - fillValue=c1) - call ice_HaloUpdate (dyhx, halo_info, & - field_loc_center, field_type_vector, & - fillValue=c1) + if (grid_ice == 'B' .and. evp_algorithm == "standard_2d") then + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + dxhy(i,j,iblk) = p5*(HTE(i,j,iblk) - HTE(i-1,j,iblk)) + dyhx(i,j,iblk) = p5*(HTN(i,j,iblk) - HTN(i,j-1,iblk)) + enddo + enddo + enddo + + call ice_HaloUpdate (dxhy, halo_info, & + field_loc_center, field_type_vector, & + fillValue=c1) + call ice_HaloUpdate (dyhx, halo_info, & + field_loc_center, field_type_vector, & + fillValue=c1) endif @@ -401,8 +401,8 @@ subroutine init_dyn_shared (dt) ! match order of operations in cyp, cxp for tripole grids cym(i,j,iblk) = -(c1p5*HTE(i-1,j,iblk) - p5*HTE(i,j,iblk)) cxm(i,j,iblk) = -(c1p5*HTN(i,j-1,iblk) - p5*HTN(i,j,iblk)) - enddo - enddo + enddo + enddo enddo end subroutine init_dyn_shared diff --git a/cicecore/cicedyn/dynamics/ice_transport_remap.F90 b/cicecore/cicedyn/dynamics/ice_transport_remap.F90 index fbbac83f5..e3bdcf7cf 100644 --- a/cicecore/cicedyn/dynamics/ice_transport_remap.F90 +++ b/cicecore/cicedyn/dynamics/ice_transport_remap.F90 @@ -63,8 +63,7 @@ module ice_transport_remap ! if false, area flux is determined internally ! and is passed out -! REMOVE? I - ! geometric quantities used for remapping transport +! geometric quantities used for remapping transport ! real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & ! xav , & ! mean T-cell value of x ! yav , & ! mean T-cell value of y @@ -85,7 +84,7 @@ module ice_transport_remap logical (kind=log_kind), parameter :: bugcheck = .false. interface limited_gradient - module procedure limited_gradient_cn_dbl, & + module procedure limited_gradient_cn_2d, & limited_gradient_cn_scalar end interface @@ -1385,22 +1384,24 @@ subroutine construct_fields (nx_block, ny_block, & end subroutine construct_fields !======================================================================= -! ! Compute a limited gradient of the scalar field phi in scaled coordinates. ! "Limited" means that we do not create new extrema in phi. For ! instance, field values at the cell corners can neither exceed the ! maximum of phi(i,j) in the cell and its eight neighbors, nor fall ! below the minimum. ! +! Part 1 of the interface limited_gradient that allows cnx and cny to be 2d arrays. +! This is identical to the original limited_gradient subroutine ! authors William H. Lipscomb, LANL ! John R. Baumgardner, LANL +! Updated to interface by Till Rasmussen, DMI - subroutine limited_gradient_cn_dbl (nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - nghost, & - phi, phimask, & - cnx, cny, & - gx, gy) + subroutine limited_gradient_cn_2d (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + nghost, & + phi, phimask, & + cnx, cny, & + gx, gy) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1442,7 +1443,7 @@ subroutine limited_gradient_cn_dbl (nx_block, ny_block, & puny , & ! gxtmp, gytmp ! temporary term for x- and y- limited gradient - character(len=*), parameter :: subname = '(limited_gradient_cn_dbl)' + character(len=*), parameter :: subname = '(limited_gradient_cn_2d)' call icepack_query_parameters(puny_out=puny) call icepack_warnings_flush(nu_diag) @@ -1540,19 +1541,25 @@ subroutine limited_gradient_cn_dbl (nx_block, ny_block, & enddo ! ij - end subroutine limited_gradient_cn_dbl + end subroutine limited_gradient_cn_2d !======================================================================= -! Part 2 of the interface that allow cnx and cny to either matrices or scalars -! Based on limited_gradient_cn_dbl +! Compute a limited gradient of the scalar field phi in scaled coordinates. +! "Limited" means that we do not create new extrema in phi. For +! instance, field values at the cell corners can neither exceed the +! maximum of phi(i,j) in the cell and its eight neighbors, nor fall +! below the minimum. +! +! Part 2 of the interface limited_gradient that allows cnx and cny to be scalars +! Based on limited_gradient_cn_2d (previously limited_gradient) ! Updated by Till Rasmussen, DMI subroutine limited_gradient_cn_scalar (nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - nghost, & - phi, phimask, & - cnx, cny, & - gx, gy) + ilo, ihi, jlo, jhi, & + nghost, & + phi, phimask, & + cnx, cny, & + gx, gy) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions diff --git a/cicecore/cicedyn/infrastructure/ice_grid.F90 b/cicecore/cicedyn/infrastructure/ice_grid.F90 index e57092d24..1cc3540ca 100644 --- a/cicecore/cicedyn/infrastructure/ice_grid.F90 +++ b/cicecore/cicedyn/infrastructure/ice_grid.F90 @@ -1260,12 +1260,6 @@ subroutine latlongrid dyN (i,j,iblk) = 1.e36_dbl_kind dxE (i,j,iblk) = 1.e36_dbl_kind dyE (i,j,iblk) = 1.e36_dbl_kind -!TILL dxhy (i,j,iblk) = 1.e36_dbl_kind -!TILL dyhx (i,j,iblk) = 1.e36_dbl_kind -!TILL cyp (i,j,iblk) = 1.e36_dbl_kind -!TILL cxp (i,j,iblk) = 1.e36_dbl_kind -!TILL cym (i,j,iblk) = 1.e36_dbl_kind -!TILL cxm (i,j,iblk) = 1.e36_dbl_kind enddo enddo enddo From a618aef83cefc32c52d3d0dedd1d74afb011039c Mon Sep 17 00:00:00 2001 From: TillRasmussen Date: Mon, 1 Jan 2024 16:33:45 +0000 Subject: [PATCH 6/7] new version of transport_remap. xav, yav array where needed. xxav, yyav parameter --- .../cicedyn/dynamics/ice_transport_remap.F90 | 236 +++--------------- 1 file changed, 30 insertions(+), 206 deletions(-) diff --git a/cicecore/cicedyn/dynamics/ice_transport_remap.F90 b/cicecore/cicedyn/dynamics/ice_transport_remap.F90 index e3bdcf7cf..7aa38a281 100644 --- a/cicecore/cicedyn/dynamics/ice_transport_remap.F90 +++ b/cicecore/cicedyn/dynamics/ice_transport_remap.F90 @@ -63,31 +63,8 @@ module ice_transport_remap ! if false, area flux is determined internally ! and is passed out -! geometric quantities used for remapping transport -! real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & -! xav , & ! mean T-cell value of x -! yav , & ! mean T-cell value of y -! xxav , & ! mean T-cell value of xx -! xyav , & ! mean T-cell value of xy -! yyav , & ! mean T-cell value of yy -! yyav ! mean T-cell value of yy -! xxxav, & ! mean T-cell value of xxx -! xxyav, & ! mean T-cell value of xxy -! xyyav, & ! mean T-cell value of xyy -! yyyav ! mean T-cell value of yyy - - real (kind=dbl_kind), parameter :: xav=c0 - real (kind=dbl_kind), parameter :: yav=c0 - real (kind=dbl_kind), parameter :: xxav=c1/c12 - real (kind=dbl_kind), parameter :: yyav=c1/c12 - logical (kind=log_kind), parameter :: bugcheck = .false. - interface limited_gradient - module procedure limited_gradient_cn_2d, & - limited_gradient_cn_scalar - end interface - !======================================================================= ! Here is some information about how the incremental remapping scheme ! works in CICE and how it can be adapted for use in other models. @@ -1140,9 +1117,14 @@ subroutine construct_fields (nx_block, ny_block, & ij ! combined i/j horizontal index real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + xav , & ! mean T-cell values of x + yav , & ! mean T-cell values of y mxav , & ! x coordinate of center of mass myav ! y coordinate of center of mass + real (kind=dbl_kind), parameter :: xxav=c1/c12 ! mean T-cell values of xx + real (kind=dbl_kind), parameter :: yyav=c1/c12 ! mean T-cell values of yy + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrace) :: & mtxav , & ! x coordinate of center of mass*tracer mtyav ! y coordinate of center of mass*tracer @@ -1203,9 +1185,11 @@ subroutine construct_fields (nx_block, ny_block, & do j = 1, ny_block do i = 1, nx_block - mc(i,j) = c0 - mx(i,j) = c0 - my(i,j) = c0 + xav(i,j) = c0 + yav(i,j) = c0 + mc(i,j) = c0 + mx(i,j) = c0 + my(i,j) = c0 mxav(i,j) = c0 myav(i,j) = c0 enddo @@ -1257,12 +1241,13 @@ subroutine construct_fields (nx_block, ny_block, & ! center of mass (mxav,myav) for each cell ! echmod: xyav = 0 - mxav(i,j) = (mx(i,j)*xxav & !(i,j) & +! mxav(i,j) = (mx(i,j)*xxav & !(i,j) & ! + mc(i,j)*xav (i,j)) / mm(i,j) - + mc(i,j)*xav ) / mm(i,j) - myav(i,j) = (my(i,j)*yyav &!(i,j) & +! + mc(i,j)*xav ) / mm(i,j) + mxav(i,j) = mx(i,j)*xxav / mm(i,j) +! myav(i,j) = (my(i,j)*yyav &!(i,j) & ! + mc(i,j)*yav(i,j)) / mm(i,j) - + mc(i,j)*yav) / mm(i,j) + myav(i,j) = my(i,j)*yyav / mm(i,j) ! mxav(i,j) = (mx(i,j)*xxav(i,j) & ! + my(i,j)*xyav(i,j) & ! + mc(i,j)*xav (i,j)) / mm(i,j) @@ -1303,7 +1288,7 @@ subroutine construct_fields (nx_block, ny_block, & if (tmask(i,j,nt) > puny) then ! center of area*tracer - w1 = mc(i,j)*tc(i,j,nt) +! w1 = mc(i,j)*tc(i,j,nt) w2 = mc(i,j)*tx(i,j,nt) & + mx(i,j)*tc(i,j,nt) w3 = mc(i,j)*ty(i,j,nt) & @@ -1315,12 +1300,13 @@ subroutine construct_fields (nx_block, ny_block, & w7 = c1 / (mm(i,j)*tm(i,j,nt)) ! echmod: grid arrays = 0 ! mtxav(i,j,nt) = (w1*xav (i,j) + w2*xxav (i,j)) & - mtxav(i,j,nt) = (w1*xav + w2*xxav) & - * w7 - mtyav(i,j,nt) = (w1*yav + w3*yyav) & +! mtxav(i,j,nt) = (w1*xav + w2*xxav) & +! * w7 + mtxav(i,j,nt) = w2*xxav *w7 +! mtyav(i,j,nt) = (w1*yav + w3*yyav) & ! mtyav(i,j,nt) = (w1*yav(i,j) + w3*yyav(i,j)) & - * w7 - +! * w7 + mtyav(i,j,nt) = w3*yyav * w7 ! mtxav(i,j,nt) = (w1*xav (i,j) + w2*xxav (i,j) & ! + w3*xyav (i,j) + w4*xxxav(i,j) & ! + w5*xxyav(i,j) + w6*xyyav(i,j)) & @@ -1390,18 +1376,15 @@ end subroutine construct_fields ! maximum of phi(i,j) in the cell and its eight neighbors, nor fall ! below the minimum. ! -! Part 1 of the interface limited_gradient that allows cnx and cny to be 2d arrays. -! This is identical to the original limited_gradient subroutine ! authors William H. Lipscomb, LANL ! John R. Baumgardner, LANL -! Updated to interface by Till Rasmussen, DMI - subroutine limited_gradient_cn_2d (nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - nghost, & - phi, phimask, & - cnx, cny, & - gx, gy) + subroutine limited_gradient (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + nghost, & + phi, phimask, & + cnx, cny, & + gx, gy) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1443,7 +1426,7 @@ subroutine limited_gradient_cn_2d (nx_block, ny_block, & puny , & ! gxtmp, gytmp ! temporary term for x- and y- limited gradient - character(len=*), parameter :: subname = '(limited_gradient_cn_2d)' + character(len=*), parameter :: subname = '(limited_gradient)' call icepack_query_parameters(puny_out=puny) call icepack_warnings_flush(nu_diag) @@ -1541,166 +1524,7 @@ subroutine limited_gradient_cn_2d (nx_block, ny_block, & enddo ! ij - end subroutine limited_gradient_cn_2d - -!======================================================================= -! Compute a limited gradient of the scalar field phi in scaled coordinates. -! "Limited" means that we do not create new extrema in phi. For -! instance, field values at the cell corners can neither exceed the -! maximum of phi(i,j) in the cell and its eight neighbors, nor fall -! below the minimum. -! -! Part 2 of the interface limited_gradient that allows cnx and cny to be scalars -! Based on limited_gradient_cn_2d (previously limited_gradient) -! Updated by Till Rasmussen, DMI - - subroutine limited_gradient_cn_scalar (nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - nghost, & - phi, phimask, & - cnx, cny, & - gx, gy) - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - ilo,ihi,jlo,jhi , & ! beginning and end of physical domain - nghost ! number of ghost cells - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent (in) :: & - phi , & ! input tracer field (mean values in each grid cell) - phimask ! phimask(i,j) = 1 if phi(i,j) has physical meaning, = 0 otherwise. - ! For instance, aice has no physical meaning in land cells, - ! and hice no physical meaning where aice = 0. - - real (kind=dbl_kind), intent (in) :: & - cnx , & ! x-coordinate of phi relative to geometric center of cell - cny ! y-coordinate of phi relative to geometric center of cell - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & - gx , & ! limited x-direction gradient - gy ! limited y-direction gradient - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij , & ! standard indices - icells ! number of cells to limit - - integer (kind=int_kind), dimension(nx_block*ny_block) :: & - indxi, indxj ! combined i/j horizontal indices - - real (kind=dbl_kind) :: & - phi_nw, phi_n, phi_ne , & ! values of phi in 8 neighbor cells - phi_w, phi_e , & - phi_sw, phi_s, phi_se , & - qmn, qmx , & ! min and max value of phi within grid cell - pmn, pmx , & ! min and max value of phi among neighbor cells - w1, w2, w3, w4 ! work variables - - real (kind=dbl_kind) :: & - puny , & ! - gxtmp, gytmp ! temporary term for x- and y- limited gradient - - character(len=*), parameter :: subname = '(limited_gradient_cn_scalar)' - - call icepack_query_parameters(puny_out=puny) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - gx(:,:) = c0 - gy(:,:) = c0 - - ! For nghost = 1, loop over physical cells and update ghost cells later - ! For nghost = 2, loop over a layer of ghost cells and skip the update - - icells = 0 - do j = jlo-nghost+1, jhi+nghost-1 - do i = ilo-nghost+1, ihi+nghost-1 - if (phimask(i,j) > puny) then - icells = icells + 1 - indxi(icells) = i - indxj(icells) = j - endif ! phimask > puny - enddo - enddo - - do ij = 1, icells - i = indxi(ij) - j = indxj(ij) - - ! Store values of phi in the 8 neighbor cells. - ! Note: phimask = 1. or 0. If phimask = 1., use the true value; - ! if phimask = 0., use the home cell value so that non-physical - ! values of phi do not contribute to the gradient. - phi_nw = phimask(i-1,j+1) * phi(i-1,j+1) & - + (c1-phimask(i-1,j+1))* phi(i ,j ) - phi_n = phimask(i ,j+1) * phi(i ,j+1) & - + (c1-phimask(i ,j+1))* phi(i ,j ) - phi_ne = phimask(i+1,j+1) * phi(i+1,j+1) & - + (c1-phimask(i+1,j+1))* phi(i ,j ) - phi_w = phimask(i-1,j ) * phi(i-1,j ) & - + (c1-phimask(i-1,j ))* phi(i ,j ) - phi_e = phimask(i+1,j ) * phi(i+1,j ) & - + (c1-phimask(i+1,j ))* phi(i ,j ) - phi_sw = phimask(i-1,j-1) * phi(i-1,j-1) & - + (c1-phimask(i-1,j-1))* phi(i ,j ) - phi_s = phimask(i ,j-1) * phi(i ,j-1) & - + (c1-phimask(i ,j-1))* phi(i ,j ) - phi_se = phimask(i+1,j-1) * phi(i+1,j-1) & - + (c1-phimask(i+1,j-1))* phi(i ,j ) - - ! unlimited gradient components - ! (factors of two cancel out) - - gxtmp = (phi_e - phi_w) * p5 - gytmp = (phi_n - phi_s) * p5 - - ! minimum and maximum among the nine local cells - pmn = min (phi_nw, phi_n, phi_ne, phi_w, phi(i,j), & - phi_e, phi_sw, phi_s, phi_se) - pmx = max (phi_nw, phi_n, phi_ne, phi_w, phi(i,j), & - phi_e, phi_sw, phi_s, phi_se) - - pmn = pmn - phi(i,j) - pmx = pmx - phi(i,j) - - ! minimum and maximum deviation of phi within the cell - ! minimum and maximum deviation of phi within the cell - w1 = (p5 - cnx) * gxtmp & - + (p5 - cny) * gytmp - w2 = (p5 - cnx) * gxtmp & - - (p5 + cny) * gytmp - w3 = -(p5 + cnx) * gxtmp & - - (p5 + cny) * gytmp - w4 = (p5 - cny) * gytmp & - - (p5 + cnx) * gxtmp - - qmn = min (w1, w2, w3, w4) - qmx = max (w1, w2, w3, w4) - - ! the limiting coefficient - if ( abs(qmn) > abs(pmn) ) then ! 'abs(qmn) > puny' not sufficient - w1 = max(c0, pmn/qmn) - else - w1 = c1 - endif - - if ( abs(qmx) > abs(pmx) ) then - w2 = max(c0, pmx/qmx) - else - w2 = c1 - endif - - w1 = min(w1, w2) - - ! Limit the gradient components - gx(i,j) = w1 * gxtmp - gy(i,j) = w1 * gytmp - - enddo ! ij - - end subroutine limited_gradient_cn_scalar + end subroutine limited_gradient !======================================================================= ! From f848367767b492ec41ba176e091818aef83ce6ea Mon Sep 17 00:00:00 2001 From: TillRasmussen Date: Sat, 6 Jan 2024 00:27:45 +0000 Subject: [PATCH 7/7] Removed comments rom ice_transport_remap and arrays for nonuniform grids --- .../cicedyn/dynamics/ice_transport_remap.F90 | 121 ++---------------- 1 file changed, 11 insertions(+), 110 deletions(-) diff --git a/cicecore/cicedyn/dynamics/ice_transport_remap.F90 b/cicecore/cicedyn/dynamics/ice_transport_remap.F90 index 7aa38a281..11521a0fa 100644 --- a/cicecore/cicedyn/dynamics/ice_transport_remap.F90 +++ b/cicecore/cicedyn/dynamics/ice_transport_remap.F90 @@ -25,6 +25,7 @@ ! can be specified (following an idea of Mats Bentsen) ! 2010: ECH removed unnecessary grid arrays and optional arguments from ! horizontal_remap +! 2023: TAR, DMI Remove commented code and unnecessary arrays module ice_transport_remap @@ -253,52 +254,15 @@ module ice_transport_remap ! ! Grid quantities used by the remapping transport scheme ! -! Note: the arrays xyav, xxxav, etc are not needed for rectangular grids -! but may be needed in the future for other nonuniform grids. They have -! been commented out here to save memory and flops. +! Note: Arrays needed for nonuniform grids has been deleted. +! They can be found in version 6.5 and earlier ! ! author William H. Lipscomb, LANL subroutine init_remap -! use ice_domain, only: nblocks -! use ice_grid, only: xav, yav, xxav, yyav -! dxT, dyT, xyav, & -! xxxav, xxyav, xyyav, yyyav - -! integer (kind=int_kind) :: & -! i, j, iblk ! standard indices - character(len=*), parameter :: subname = '(init_remap)' - ! Compute grid cell average geometric quantities on the scaled - ! rectangular grid with dx = 1, dy = 1. - ! - ! Note: On a rectangular grid, the integral of any odd function - ! of x or y = 0. - -! !$OMP PARALLEL DO PRIVATE(iblk,i,j) SCHEDULE(runtime) -! do iblk = 1, nblocks -! do j = 1, ny_block -! do i = 1, nx_block -! xav(i,j,iblk) = c0 -! yav(i,j,iblk) = c0 -!!! These formulas would be used on a rectangular grid -!!! with dimensions (dxT, dyT): -!!! xxav(i,j,iblk) = dxT(i,j,iblk)**2 / c12 -!!! yyav(i,j,iblk) = dyT(i,j,iblk)**2 / c12 -! xxav(i,j,iblk) = c1/c12 -! yyav(i,j,iblk) = c1/c12 -! xyav(i,j,iblk) = c0 -! xxxav(i,j,iblk) = c0 -! xxyav(i,j,iblk) = c0 -! xyyav(i,j,iblk) = c0 -! yyyav(i,j,iblk) = c0 -! enddo -! enddo -! enddo -! !$OMP END PARALLEL DO - !------------------------------------------------------------------- ! Set logical l_fixed_area depending of the grid type. ! @@ -356,9 +320,7 @@ subroutine horizontal_remap (dt, ntrace, & use ice_domain, only: nblocks, blocks_ice, halo_info, maskhalo_remap use ice_blocks, only: block, get_block, nghost use ice_grid, only: HTE, HTN, dxu, dyu, & - earea, narea, tarear, hm!, & -! xav, yav, xxav, yyav -! xyav, xxxav, xxyav, xyyav, yyyav + earea, narea, tarear, hm use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound real (kind=dbl_kind), intent(in) :: & @@ -520,12 +482,6 @@ subroutine horizontal_remap (dt, ntrace, & has_dependents, icellsnc(0,iblk), & indxinc(:,0), indxjnc(:,0), & hm (:,:,iblk), & -! xav (:,:,iblk), & -! yav (:,:,iblk), xxav (:,:,iblk), & -! yyav (:,:,iblk), & -! xyav (:,:,iblk), & -! xxxav (:,:,iblk), xxyav (:,:,iblk), & -! xyyav (:,:,iblk), yyyav (:,:,iblk), & mm (:,:,0,iblk), mc (:,:,0,iblk), & mx (:,:,0,iblk), my (:,:,0,iblk), & mmask(:,:,0) ) @@ -541,12 +497,6 @@ subroutine horizontal_remap (dt, ntrace, & has_dependents, icellsnc (n,iblk), & indxinc (:,n), indxjnc(:,n), & hm (:,:,iblk), & -! xav (:,:,iblk), & -! yav (:,:,iblk), xxav (:,:,iblk), & -! yyav (:,:,iblk), & -! xyav (:,:,iblk), & -! xxxav (:,:,iblk), xxyav (:,:,iblk), & -! xyyav (:,:,iblk), yyyav (:,:,iblk), & mm (:,:,n,iblk), mc (:,:,n,iblk), & mx (:,:,n,iblk), my (:,:,n,iblk), & mmask (:,:,n), & @@ -1055,12 +1005,6 @@ subroutine construct_fields (nx_block, ny_block, & has_dependents, icells, & indxi, indxj, & hm, & -! xav, & -! yav, xxav, & -! yyav, & -! xyav, & -! xxxav, xxyav, & -! xyyav, yyyav, & mm, mc, & mx, my, & mmask, & @@ -1087,11 +1031,7 @@ subroutine construct_fields (nx_block, ny_block, & indxj real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - hm !, & ! land/boundary mask, thickness (T-cell) -! xav, yav , & ! mean T-cell values of x, y -! xxav, yyav ! mean T-cell values of xx, yy -! xyav, , & ! mean T-cell values of xy -! xxxav,xxyav,xyyav,yyyav ! mean T-cell values of xxx, xxy, xyy, yyy + hm ! land/boundary mask, thickness (T-cell) real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & mm , & ! mean value of mass field @@ -1131,7 +1071,7 @@ subroutine construct_fields (nx_block, ny_block, & real (kind=dbl_kind) :: & puny, & - w1, w2, w3, w7 ! work variables + w2, w3, w7 ! work variables character(len=*), parameter :: subname = '(construct_fields)' @@ -1223,12 +1163,8 @@ subroutine construct_fields (nx_block, ny_block, & j = indxj(ij) ! mass field at geometric center - ! echmod: xav = yav = 0 mc(i,j) = mm(i,j) -! mc(i,j) = mm(i,j) - xav(i,j)*mx(i,j) & -! - yav(i,j)*my(i,j) - enddo ! ij ! tracers @@ -1240,20 +1176,10 @@ subroutine construct_fields (nx_block, ny_block, & j = indxj(ij) ! center of mass (mxav,myav) for each cell - ! echmod: xyav = 0 -! mxav(i,j) = (mx(i,j)*xxav & !(i,j) & -! + mc(i,j)*xav (i,j)) / mm(i,j) -! + mc(i,j)*xav ) / mm(i,j) + mxav(i,j) = mx(i,j)*xxav / mm(i,j) -! myav(i,j) = (my(i,j)*yyav &!(i,j) & -! + mc(i,j)*yav(i,j)) / mm(i,j) - myav(i,j) = my(i,j)*yyav / mm(i,j) -! mxav(i,j) = (mx(i,j)*xxav(i,j) & -! + my(i,j)*xyav(i,j) & -! + mc(i,j)*xav (i,j)) / mm(i,j) -! myav(i,j) = (mx(i,j)*xyav(i,j) & -! + my(i,j)*yyav(i,j) & -! + mc(i,j)*yav(i,j)) / mm(i,j) + myav(i,j) = my(i,j)*yyav / mm(i,j) + enddo do nt = 1, ntrace @@ -1288,33 +1214,14 @@ subroutine construct_fields (nx_block, ny_block, & if (tmask(i,j,nt) > puny) then ! center of area*tracer -! w1 = mc(i,j)*tc(i,j,nt) w2 = mc(i,j)*tx(i,j,nt) & + mx(i,j)*tc(i,j,nt) w3 = mc(i,j)*ty(i,j,nt) & + my(i,j)*tc(i,j,nt) -! w4 = mx(i,j)*tx(i,j,nt) -! w5 = mx(i,j)*ty(i,j,nt) & -! + my(i,j)*tx(i,j,nt) -! w6 = my(i,j)*ty(i,j,nt) w7 = c1 / (mm(i,j)*tm(i,j,nt)) ! echmod: grid arrays = 0 -! mtxav(i,j,nt) = (w1*xav (i,j) + w2*xxav (i,j)) & -! mtxav(i,j,nt) = (w1*xav + w2*xxav) & -! * w7 - mtxav(i,j,nt) = w2*xxav *w7 -! mtyav(i,j,nt) = (w1*yav + w3*yyav) & -! mtyav(i,j,nt) = (w1*yav(i,j) + w3*yyav(i,j)) & -! * w7 - mtyav(i,j,nt) = w3*yyav * w7 -! mtxav(i,j,nt) = (w1*xav (i,j) + w2*xxav (i,j) & -! + w3*xyav (i,j) + w4*xxxav(i,j) & -! + w5*xxyav(i,j) + w6*xyyav(i,j)) & -! * w7 -! mtyav(i,j,nt) = (w1*yav(i,j) + w2*xyav (i,j) & -! + w3*yyav(i,j) + w4*xxyav(i,j) & -! + w5*xyyav(i,j) + w6*yyyav(i,j)) & -! * w7 + mtxav(i,j,nt) = w2*xxav *w7 + mtyav(i,j,nt) = w3*yyav * w7 endif ! tmask enddo ! ij @@ -1357,8 +1264,6 @@ subroutine construct_fields (nx_block, ny_block, & j = indxj(ij) tc(i,j,nt) = tm(i,j,nt) -! tx(i,j,nt) = c0 ! already initialized to 0. -! ty(i,j,nt) = c0 enddo ! ij endif ! tracer_type @@ -3118,10 +3023,6 @@ subroutine locate_triangles (nx_block, ny_block, & write(nu_diag,*) '' write(nu_diag,*) 'WARNING: xp =', xp(i,j,nv,ng) write(nu_diag,*) 'm, i, j, ng, nv =', my_task, i, j, ng, nv -! write(nu_diag,*) 'yil,xdl,xcl,ydl=',yil,xdl,xcl,ydl -! write(nu_diag,*) 'yir,xdr,xcr,ydr=',yir,xdr,xcr,ydr -! write(nu_diag,*) 'ydm=',ydm -! stop endif if (abs(yp(i,j,nv,ng)) > p5+puny) then write(nu_diag,*) ''