Commit af25b9fe authored by Dr Jeff Polton's avatar Dr Jeff Polton
Browse files

Updates for restructed dir

parent ec13b89c
# compiler options for Archer CRAY XC-30 (using intel compiler)
#
# NCDF_HOME root directory containing lib and include subdirectories for netcdf4
# HDF5_HOME root directory containing lib and include subdirectories for HDF5
# XIOS_HOME root directory containing lib for XIOS
# OASIS_HOME root directory containing lib for OASIS
#
# NCDF_INC netcdf4 include file
# NCDF_LIB netcdf4 library
# XIOS_INC xios include file (taken into accound only if key_iomput is activated)
# XIOS_LIB xios library (taken into accound only if key_iomput is activated)
# OASIS_INC oasis include file (taken into accound only if key_oasis3 is activated)
# OASIS_LIB oasis library (taken into accound only if key_oasis3 is activated)
#
# FC Fortran compiler command
# FCFLAGS Fortran compiler flags
# FFLAGS Fortran 77 compiler flags
# LD linker
# LDFLAGS linker flags, e.g. -L<lib dir> if you have libraries
# FPPFLAGS pre-processing flags
# AR assembler
# ARFLAGS assembler flags
# MK make
# USER_INC complete list of include files
# USER_LIB complete list of libraries to pass to the linker
# CC C compiler used to compile conv for AGRIF
# CFLAGS compiler flags used with CC
#
# Note that:
# - unix variables "$..." are accpeted and will be evaluated before calling fcm.
# - fcm variables are starting with a % (and not a $)
#
%NCDF_HOME $NETCDF_DIR
%HDF5_HOME $HDF5_DIR
%XIOS_HOME /work/n01/n01/$USER/XIOS
#OASIS_HOME
%NCDF_INC -I%NCDF_HOME/include -I%HDF5_HOME/include
%NCDF_LIB -L%HDF5_HOME/lib -L%NCDF_HOME/lib -lnetcdff -lnetcdf -lhdf5_hl -lhdf5 -lz
%XIOS_INC -I%XIOS_HOME/inc
%XIOS_LIB -L%XIOS_HOME/lib -lxios
#OASIS_INC -I%OASIS_HOME/build/lib/mct -I%OASIS_HOME/build/lib/psmile.MPI1
#OASIS_LIB -L%OASIS_HOME/lib -lpsmile.MPI1 -lmct -lmpeu -lscrip
%CPP cpp
%FC ftn
%FCFLAGS -integer-size 32 -real-size 64 -g -O3 -fp-model source -zero -fpp -warn all
%FFLAGS -integer-size 32 -real-size 64 -g -O3 -fp-model source -zero -fpp -warn all
%LD CC -Wl,"--allow-multiple-definition"
%FPPFLAGS -P -C -traditional
%LDFLAGS
%AR ar
%ARFLAGS -r
%MK gmake
%USER_INC %XIOS_INC %NCDF_INC
%USER_LIB %XIOS_LIB %NCDF_LIB
#USER_INC %XIOS_INC %OASIS_INC %NCDF_INC
#USER_LIB %XIOS_LIB %OASIS_LIB %NCDF_LIB
%CC cc
%CFLAGS -O0
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
MODULE dommsk
!!======================================================================
!! *** MODULE dommsk ***
!! Ocean initialization : domain land/sea mask
!!======================================================================
!! History : OPA ! 1987-07 (G. Madec) Original code
!! 6.0 ! 1993-03 (M. Guyon) symetrical conditions (M. Guyon)
!! 7.0 ! 1996-01 (G. Madec) suppression of common work arrays
!! - ! 1996-05 (G. Madec) mask computed from tmask
!! 8.0 ! 1997-02 (G. Madec) mesh information put in domhgr.F
!! 8.1 ! 1997-07 (G. Madec) modification of kbat and fmask
!! - ! 1998-05 (G. Roullet) free surface
!! 8.2 ! 2000-03 (G. Madec) no slip accurate
!! - ! 2001-09 (J.-M. Molines) Open boundaries
!! NEMO 1.0 ! 2002-08 (G. Madec) F90: Free form and module
!! - ! 2005-11 (V. Garnier) Surface pressure gradient organization
!! 3.2 ! 2009-07 (R. Benshila) Suppression of rigid-lid option
!! 3.6 ! 2015-05 (P. Mathiot) ISF: add wmask,wumask and wvmask
!! 4.0 ! 2016-06 (G. Madec, S. Flavoni) domain configuration / user defined interface
!!----------------------------------------------------------------------
!!----------------------------------------------------------------------
!! dom_msk : compute land/ocean mask
!!----------------------------------------------------------------------
USE oce ! ocean dynamics and tracers
USE dom_oce ! ocean space and time domain
USE usrdef_fmask ! user defined fmask
USE bdy_oce
USE in_out_manager ! I/O manager
USE iom
USE lbclnk ! ocean lateral boundary conditions (or mpp link)
USE lib_mpp ! Massively Parallel Processing library
USE wrk_nemo ! Memory allocation
USE timing ! Timing
IMPLICIT NONE
PRIVATE
PUBLIC dom_msk ! routine called by inidom.F90
! !!* Namelist namlbc : lateral boundary condition *
REAL(wp) :: rn_shlat ! type of lateral boundary condition on velocity
LOGICAL, PUBLIC :: ln_vorlat ! consistency of vorticity boundary condition
! with analytical eqs.
!! * Substitutions
# include "vectopt_loop_substitute.h90"
!!----------------------------------------------------------------------
!! NEMO/OPA 3.2 , LODYC-IPSL (2009)
!! $Id: dommsk.F90 7753 2017-03-03 11:46:59Z mocavero $
!! Software governed by the CeCILL licence (NEMOGCM/NEMO_CeCILL.txt)
!!----------------------------------------------------------------------
CONTAINS
SUBROUTINE dom_msk( k_top, k_bot )
!!---------------------------------------------------------------------
!! *** ROUTINE dom_msk ***
!!
!! ** Purpose : Compute land/ocean mask arrays at tracer points, hori-
!! zontal velocity points (u & v), vorticity points (f) points.
!!
!! ** Method : The ocean/land mask at t-point is deduced from ko_top
!! and ko_bot, the indices of the fist and last ocean t-levels which
!! are either defined in usrdef_zgr or read in zgr_read.
!! The velocity masks (umask, vmask, wmask, wumask, wvmask)
!! are deduced from a product of the two neighboring tmask.
!! The vorticity mask (fmask) is deduced from tmask taking
!! into account the choice of lateral boundary condition (rn_shlat) :
!! rn_shlat = 0, free slip (no shear along the coast)
!! rn_shlat = 2, no slip (specified zero velocity at the coast)
!! 0 < rn_shlat < 2, partial slip | non-linear velocity profile
!! 2 < rn_shlat, strong slip | in the lateral boundary layer
!!
!! tmask_i : interior ocean mask at t-point, i.e. excluding duplicated
!! rows/lines due to cyclic or North Fold boundaries as well
!! as MPP halos.
!! tmask_h : halo mask at t-point, i.e. excluding duplicated rows/lines
!! due to cyclic or North Fold boundaries as well as MPP halos.
!!
!! ** Action : tmask, umask, vmask, wmask, wumask, wvmask : land/ocean mask
!! at t-, u-, v- w, wu-, and wv-points (=0. or 1.)
!! fmask : land/ocean mask at f-point (=0., or =1., or
!! =rn_shlat along lateral boundaries)
!! tmask_i : interior ocean mask
!! tmask_h : halo mask
!! ssmask , ssumask, ssvmask, ssfmask : 2D ocean mask
!!----------------------------------------------------------------------
INTEGER, DIMENSION(:,:), INTENT(in) :: k_top, k_bot ! first and last ocean level
!
INTEGER :: ji, jj, jk ! dummy loop indices
INTEGER :: iif, iil ! local integers
INTEGER :: ijf, ijl ! - -
INTEGER :: iktop, ikbot ! - -
INTEGER :: ios, inum
REAL(wp), POINTER, DIMENSION(:,:) :: zwf ! 2D workspace
!!
NAMELIST/namlbc/ rn_shlat, ln_vorlat
NAMELIST/nambdy/ ln_bdy ,nb_bdy, ln_coords_file, cn_coords_file, &
& ln_mask_file, cn_mask_file, cn_dyn2d, nn_dyn2d_dta, &
& cn_dyn3d, nn_dyn3d_dta, cn_tra, nn_tra_dta, &
& ln_tra_dmp, ln_dyn3d_dmp, rn_time_dmp, rn_time_dmp_out, &
& cn_ice_lim, nn_ice_lim_dta, &
& rn_ice_tem, rn_ice_sal, rn_ice_age, &
& ln_vol, nn_volctl, nn_rimwidth, nb_jpk_bdy
!!---------------------------------------------------------------------
!
IF( nn_timing == 1 ) CALL timing_start('dom_msk')
!
REWIND( numnam_ref ) ! Namelist namlbc in reference namelist : Lateral momentum boundary condition
READ ( numnam_ref, namlbc, IOSTAT = ios, ERR = 901 )
901 IF( ios /= 0 ) CALL ctl_nam ( ios , 'namlbc in reference namelist', lwp )
REWIND( numnam_cfg ) ! Namelist namlbc in configuration namelist : Lateral momentum boundary condition
READ ( numnam_cfg, namlbc, IOSTAT = ios, ERR = 902 )
902 IF( ios /= 0 ) CALL ctl_nam ( ios , 'namlbc in configuration namelist', lwp )
IF(lwm) WRITE ( numond, namlbc )
IF(lwp) THEN ! control print
WRITE(numout,*)
WRITE(numout,*) 'dommsk : ocean mask '
WRITE(numout,*) '~~~~~~'
WRITE(numout,*) ' Namelist namlbc'
WRITE(numout,*) ' lateral momentum boundary cond. rn_shlat = ',rn_shlat
WRITE(numout,*) ' consistency with analytical form ln_vorlat = ',ln_vorlat
ENDIF
IF ( rn_shlat == 0. ) THEN ; IF(lwp) WRITE(numout,*) ' ocean lateral free-slip '
ELSEIF ( rn_shlat == 2. ) THEN ; IF(lwp) WRITE(numout,*) ' ocean lateral no-slip '
ELSEIF ( 0. < rn_shlat .AND. rn_shlat < 2. ) THEN ; IF(lwp) WRITE(numout,*) ' ocean lateral partial-slip '
ELSEIF ( 2. < rn_shlat ) THEN ; IF(lwp) WRITE(numout,*) ' ocean lateral strong-slip '
ELSE
WRITE(ctmp1,*) ' rn_shlat is negative = ', rn_shlat
CALL ctl_stop( ctmp1 )
ENDIF
! Ocean/land mask at t-point (computed from ko_top and ko_bot)
! ----------------------------
!
tmask(:,:,:) = 0._wp
DO jj = 1, jpj
DO ji = 1, jpi
iktop = k_top(ji,jj)
ikbot = k_bot(ji,jj)
IF( iktop /= 0 ) THEN ! water in the column
tmask(ji,jj,iktop:ikbot ) = 1._wp
ENDIF
END DO
END DO
!SF add here lbc_lnk: bug not still understood : cause now domain configuration is read !
!!gm I don't understand why...
CALL lbc_lnk( tmask , 'T', 1._wp ) ! Lateral boundary conditions
! Mask corrections for bdy (read in mppini2)
REWIND( numnam_ref ) ! Namelist nambdy in reference namelist :Unstructured open boundaries
READ ( numnam_ref, nambdy, IOSTAT = ios, ERR = 903)
903 IF( ios /= 0 ) CALL ctl_nam ( ios , 'nambdy in reference namelist', lwp )
REWIND( numnam_cfg ) ! Namelist nambdy in configuration namelist :Unstructured open boundaries
READ ( numnam_cfg, nambdy, IOSTAT = ios, ERR = 904 )
904 IF( ios /= 0 ) CALL ctl_nam ( ios , 'nambdy in configuration namelist', lwp )
! ------------------------
IF ( ln_bdy .AND. ln_mask_file ) THEN
CALL iom_open( cn_mask_file, inum )
CALL iom_get ( inum, jpdom_data, 'bdy_msk', bdytmask(:,:) )
CALL iom_close( inum )
DO jk = 1, jpkm1
DO jj = 1, jpj
DO ji = 1, jpi
tmask(ji,jj,jk) = tmask(ji,jj,jk) * bdytmask(ji,jj)
END DO
END DO
END DO
ENDIF
! Ocean/land mask at u-, v-, and f-points (computed from tmask)
! ----------------------------------------
! NB: at this point, fmask is designed for free slip lateral boundary condition
DO jk = 1, jpk
DO jj = 1, jpjm1
DO ji = 1, fs_jpim1 ! vector loop
umask(ji,jj,jk) = tmask(ji,jj ,jk) * tmask(ji+1,jj ,jk)
vmask(ji,jj,jk) = tmask(ji,jj ,jk) * tmask(ji ,jj+1,jk)
END DO
DO ji = 1, jpim1 ! NO vector opt.
fmask(ji,jj,jk) = tmask(ji,jj ,jk) * tmask(ji+1,jj ,jk) &
& * tmask(ji,jj+1,jk) * tmask(ji+1,jj+1,jk)
END DO
END DO
END DO
CALL lbc_lnk( umask , 'U', 1._wp ) ! Lateral boundary conditions
CALL lbc_lnk( vmask , 'V', 1._wp )
CALL lbc_lnk( fmask , 'F', 1._wp )
! Ocean/land mask at wu-, wv- and w points (computed from tmask)
!-----------------------------------------
wmask (:,:,1) = tmask(:,:,1) ! surface
wumask(:,:,1) = umask(:,:,1)
wvmask(:,:,1) = vmask(:,:,1)
DO jk = 2, jpk ! interior values
wmask (:,:,jk) = tmask(:,:,jk) * tmask(:,:,jk-1)
wumask(:,:,jk) = umask(:,:,jk) * umask(:,:,jk-1)
wvmask(:,:,jk) = vmask(:,:,jk) * vmask(:,:,jk-1)
END DO
! Ocean/land column mask at t-, u-, and v-points (i.e. at least 1 wet cell in the vertical)
! ----------------------------------------------
ssmask (:,:) = MAXVAL( tmask(:,:,:), DIM=3 )
ssumask(:,:) = MAXVAL( umask(:,:,:), DIM=3 )
ssvmask(:,:) = MAXVAL( vmask(:,:,:), DIM=3 )
! Interior domain mask (used for global sum)
! --------------------
!
iif = jpreci ; iil = nlci - jpreci + 1
ijf = jprecj ; ijl = nlcj - jprecj + 1
!
! ! halo mask : 0 on the halo and 1 elsewhere
tmask_h(:,:) = 1._wp
tmask_h( 1 :iif, : ) = 0._wp ! first columns
tmask_h(iil:jpi, : ) = 0._wp ! last columns (including mpp extra columns)
tmask_h( : , 1 :ijf) = 0._wp ! first rows
tmask_h( : ,ijl:jpj) = 0._wp ! last rows (including mpp extra rows)
!
! ! north fold mask
tpol(1:jpiglo) = 1._wp
fpol(1:jpiglo) = 1._wp
IF( jperio == 3 .OR. jperio == 4 ) THEN ! T-point pivot
tpol(jpiglo/2+1:jpiglo) = 0._wp
fpol( 1 :jpiglo) = 0._wp
IF( mjg(nlej) == jpjglo ) THEN ! only half of the nlcj-1 row for tmask_h
DO ji = iif+1, iil-1
tmask_h(ji,nlej-1) = tmask_h(ji,nlej-1) * tpol(mig(ji))
END DO
ENDIF
ENDIF
!
IF( jperio == 5 .OR. jperio == 6 ) THEN ! F-point pivot
tpol( 1 :jpiglo) = 0._wp
fpol(jpiglo/2+1:jpiglo) = 0._wp
ENDIF
!
! ! interior mask : 2D ocean mask x halo mask
tmask_i(:,:) = ssmask(:,:) * tmask_h(:,:)
! Lateral boundary conditions on velocity (modify fmask)
! ---------------------------------------
IF( rn_shlat /= 0 ) THEN ! Not free-slip lateral boundary condition
!
CALL wrk_alloc( jpi,jpj, zwf )
!
DO jk = 1, jpk
zwf(:,:) = fmask(:,:,jk)
DO jj = 2, jpjm1
DO ji = fs_2, fs_jpim1 ! vector opt.
IF( fmask(ji,jj,jk) == 0._wp ) THEN
fmask(ji,jj,jk) = rn_shlat * MIN( 1._wp , MAX( zwf(ji+1,jj), zwf(ji,jj+1), &
& zwf(ji-1,jj), zwf(ji,jj-1) ) )
ENDIF
END DO
END DO
DO jj = 2, jpjm1
IF( fmask(1,jj,jk) == 0._wp ) THEN
fmask(1 ,jj,jk) = rn_shlat * MIN( 1._wp , MAX( zwf(2,jj), zwf(1,jj+1), zwf(1,jj-1) ) )
ENDIF
IF( fmask(jpi,jj,jk) == 0._wp ) THEN
fmask(jpi,jj,jk) = rn_shlat * MIN( 1._wp , MAX( zwf(jpi,jj+1), zwf(jpim1,jj), zwf(jpi,jj-1) ) )
ENDIF
END DO
DO ji = 2, jpim1
IF( fmask(ji,1,jk) == 0._wp ) THEN
fmask(ji, 1 ,jk) = rn_shlat * MIN( 1._wp , MAX( zwf(ji+1,1), zwf(ji,2), zwf(ji-1,1) ) )
ENDIF
IF( fmask(ji,jpj,jk) == 0._wp ) THEN
fmask(ji,jpj,jk) = rn_shlat * MIN( 1._wp , MAX( zwf(ji+1,jpj), zwf(ji-1,jpj), zwf(ji,jpjm1) ) )
ENDIF
END DO
END DO
!
CALL wrk_dealloc( jpi,jpj, zwf )
!
CALL lbc_lnk( fmask, 'F', 1._wp ) ! Lateral boundary conditions on fmask
!
! CAUTION : The fmask may be further modified in dyn_vor_init ( dynvor.F90 ) depending on ln_vorlat
!
ENDIF
! User defined alteration of fmask (use to reduce ocean transport in specified straits)
! --------------------------------
!
CALL usr_def_fmask( cn_cfg, nn_cfg, fmask )
!
!
IF( nn_timing == 1 ) CALL timing_stop('dom_msk')
!
END SUBROUTINE dom_msk
!!======================================================================
END MODULE dommsk
MODULE dtatsd
!!======================================================================
!! *** MODULE dtatsd ***
!! Ocean data : read ocean Temperature & Salinity Data from gridded data
!!======================================================================
!! History : OPA ! 1991-03 () Original code
!! - ! 1992-07 (M. Imbard)
!! 8.0 ! 1999-10 (M.A. Foujols, M. Imbard) NetCDF FORMAT
!! NEMO 1.0 ! 2002-06 (G. Madec) F90: Free form and module
!! 3.3 ! 2010-10 (C. Bricaud, S. Masson) use of fldread
!! 3.4 ! 2010-11 (G. Madec, C. Ethe) Merge of dtatem and dtasal + suppression of CPP keys
!!----------------------------------------------------------------------
!!----------------------------------------------------------------------
!! dta_tsd : read and time interpolated ocean Temperature & Salinity Data
!!----------------------------------------------------------------------
USE oce ! ocean dynamics and tracers
USE dom_oce ! ocean space and time domain
USE fldread ! read input fields
USE in_out_manager ! I/O manager
USE phycst ! physical constants
USE lib_mpp ! MPP library
USE wrk_nemo ! Memory allocation
USE timing ! Timing
USE iom
IMPLICIT NONE
PRIVATE
PUBLIC dta_tsd_init ! called by opa.F90
PUBLIC dta_tsd ! called by istate.F90 and tradmp.90
LOGICAL , PUBLIC :: ln_tsd_init !: T & S data flag
LOGICAL , PUBLIC :: ln_tsd_interp !: vertical interpolation flag
LOGICAL , PUBLIC :: ln_tsd_tradmp !: internal damping toward input data flag
TYPE(FLD), ALLOCATABLE, DIMENSION(:) :: sf_tsd ! structure of input SST (file informations, fields read)
INTEGER :: jpk_init , inum_dta
INTEGER :: id ,linum ! local integers
INTEGER :: zdim(4)
!!----------------------------------------------------------------------
!! NEMO/OPA 3.3 , NEMO Consortium (2010)
!! $Id: dtatsd.F90 7753 2017-03-03 11:46:59Z mocavero $
!! Software governed by the CeCILL licence (NEMOGCM/NEMO_CeCILL.txt)
!!----------------------------------------------------------------------
CONTAINS
SUBROUTINE dta_tsd_init( ld_tradmp )
!!----------------------------------------------------------------------
!! *** ROUTINE dta_tsd_init ***
!!
!! ** Purpose : initialisation of T & S input data
!!
!! ** Method : - Read namtsd namelist
!! - allocates T & S data structure
!!----------------------------------------------------------------------
LOGICAL, INTENT(in), OPTIONAL :: ld_tradmp ! force the initialization when tradp is used
!
INTEGER :: ios, ierr0, ierr1, ierr2, ierr3, ierr4, ierr5 ! local integers
!!
CHARACTER(len=100) :: cn_dir ! Root directory for location of ssr files
TYPE(FLD_N), DIMENSION(jpts+2):: slf_i ! array of namelist informations on the fields to read
TYPE(FLD_N) :: sn_tem, sn_sal, sn_dep, sn_msk
!!
NAMELIST/namtsd/ ln_tsd_init, ln_tsd_interp, ln_tsd_tradmp, cn_dir, sn_tem, sn_sal, sn_dep, sn_msk
!!----------------------------------------------------------------------
!
IF( nn_timing == 1 ) CALL timing_start('dta_tsd_init')
!
! Initialisation
ierr0 = 0 ; ierr1 = 0 ; ierr2 = 0 ; ierr3 = 0 ; ierr4 = 0 ; ierr5 = 0
!
REWIND( numnam_ref ) ! Namelist namtsd in reference namelist :
READ ( numnam_ref, namtsd, IOSTAT = ios, ERR = 901)
901 IF( ios /= 0 ) CALL ctl_nam ( ios , 'namtsd in reference namelist', lwp )
REWIND( numnam_cfg ) ! Namelist namtsd in configuration namelist : Parameters of the run
READ ( numnam_cfg, namtsd, IOSTAT = ios, ERR = 902 )
902 IF( ios /= 0 ) CALL ctl_nam ( ios , 'namtsd in configuration namelist', lwp )
IF(lwm) WRITE ( numond, namtsd )
IF( PRESENT( ld_tradmp ) ) ln_tsd_tradmp = .TRUE. ! forces the initialization when tradmp is used
IF(lwp) THEN ! control print
WRITE(numout,*)
WRITE(numout,*) 'dta_tsd_init : Temperature & Salinity data '
WRITE(numout,*) '~~~~~~~~~~~~ '
WRITE(numout,*) ' Namelist namtsd'
WRITE(numout,*) ' Initialisation of ocean T & S with T &S input data ln_tsd_init = ', ln_tsd_init
WRITE(numout,*) ' iInterpolation of initial conditions in the vertical ln_tsd_interp = ', ln_tsd_interp
WRITE(numout,*) ' damping of ocean T & S toward T &S input data ln_tsd_tradmp = ', ln_tsd_tradmp
WRITE(numout,*)
IF( .NOT.ln_tsd_init .AND. .NOT.ln_tsd_tradmp ) THEN
WRITE(numout,*)
WRITE(numout,*) ' T & S data not used'
ENDIF
ENDIF
!
IF( ln_rstart .AND. ln_tsd_init ) THEN
CALL ctl_warn( 'dta_tsd_init: ocean restart and T & S data intialisation, ', &
& 'we keep the restart T & S values and set ln_tsd_init to FALSE' )
ln_tsd_init = .FALSE.
ENDIF
IF( ln_tsd_interp .AND. ln_tsd_tradmp ) THEN
CALL ctl_stop( 'dta_tsd_init: Tracer damping and vertical interpolation not yet configured' ) ; RETURN
ENDIF
IF( ln_tsd_interp .AND. LEN(TRIM(sn_msk%wname)) > 0 ) THEN
CALL ctl_stop( 'dta_tsd_init: Using vertical interpolation and weights files not recommended' ) ; RETURN
ENDIF
!
! ! allocate the arrays (if necessary)
IF( ln_tsd_init .OR. ln_tsd_tradmp ) THEN
!
IF( ln_tsd_interp ) THEN
ALLOCATE( sf_tsd(jpts+2), STAT=ierr0 ) ! to carry the addtional depth information
ELSE
ALLOCATE( sf_tsd(jpts ), STAT=ierr0 )
ENDIF
IF( ierr0 > 0 ) THEN
CALL ctl_stop( 'dta_tsd_init: unable to allocate sf_tsd structure' ) ; RETURN
ENDIF
!
IF( ln_tsd_interp ) THEN
CALL iom_open ( trim(cn_dir) // trim(sn_dep%clname), inum_dta )
id = iom_varid( inum_dta, sn_dep%clvar, zdim )
jpk_init = zdim(3)
IF(lwp) WRITE(numout,*) 'Dimension of veritcal coordinate in ICs: ', jpk_init
CALL iom_close( inum_dta ) ! Close the input file
!
ALLOCATE( sf_tsd(jp_tem)%fnow(jpi,jpj,jpk_init ) , STAT=ierr0 )
IF( sn_tem%ln_tint ) ALLOCATE( sf_tsd(jp_tem)%fdta(jpi,jpj,jpk_init,2) , STAT=ierr1 )
ALLOCATE( sf_tsd(jp_sal)%fnow(jpi,jpj,jpk_init ) , STAT=ierr2 )
IF( sn_sal%ln_tint ) ALLOCATE( sf_tsd(jp_sal)%fdta(jpi,jpj,jpk_init,2) , STAT=ierr3 )
ALLOCATE( sf_tsd(jp_dep)%fnow(jpi,jpj,jpk_init ) , STAT=ierr4 )
ALLOCATE( sf_tsd(jp_msk)%fnow(jpi,jpj,jpk_init ) , STAT=ierr5 )
ELSE
ALLOCATE( sf_tsd(jp_tem)%fnow(jpi,jpj,jpk) , STAT=ierr0 )
IF( sn_tem%ln_tint ) ALLOCATE( sf_tsd(jp_tem)%fdta(jpi,jpj,jpk,2) , STAT=ierr1 )
ALLOCATE( sf_tsd(jp_sal)%fnow(jpi,jpj,jpk) , STAT=ierr2 )
IF( sn_sal%ln_tint ) ALLOCATE( sf_tsd(jp_sal)%fdta(jpi,jpj,jpk,2) , STAT=ierr3 )
ENDIF ! ln_tsd_interp
!
IF( ierr0 + ierr1 + ierr2 + ierr3 + ierr4 + ierr5 > 0 ) THEN
CALL ctl_stop( 'dta_tsd : unable to allocate T & S data arrays' ) ; RETURN
ENDIF
! ! fill sf_tsd with sn_tem & sn_sal and control print
slf_i(jp_tem) = sn_tem ; slf_i(jp_sal) = sn_sal
IF( ln_tsd_interp ) slf_i(jp_dep) = sn_dep ; slf_i(jp_msk) = sn_msk
CALL fld_fill( sf_tsd, slf_i, cn_dir, 'dta_tsd', 'Temperature & Salinity data', 'namtsd', no_print )
!
ENDIF
!
IF( nn_timing == 1 ) CALL timing_stop('dta_tsd_init')
!
END SUBROUTINE dta_tsd_init
SUBROUTINE dta_tsd( kt, ptsd )
!!----------------------------------------------------------------------
!! *** ROUTINE dta_tsd ***
!!
!! ** Purpose : provides T and S data at kt
!!
!! ** Method : - call fldread routine
!! - ORCA_R2: add some hand made alteration to read data
!! - 'key_orca_lev10' interpolates on 10 times more levels
!! - s- or mixed z-s coordinate: vertical interpolation on model mesh
!! - ln_tsd_tradmp=F: deallocates the T-S data structure
!! as T-S data are no are used
!!
!! ** Action : ptsd T-S data on medl mesh and interpolated at time-step kt
!!----------------------------------------------------------------------
INTEGER , INTENT(in ) :: kt ! ocean time-step
REAL(wp), DIMENSION(jpi,jpj,jpk,jpts), INTENT( out) :: ptsd ! T & S data
!
INTEGER :: ji, jj, jk, jl, jk_init ! dummy loop indicies
INTEGER :: ik, il0, il1, ii0, ii1, ij0, ij1 ! local integers
REAL(wp):: zl, zi
!!----------------------------------------------------------------------
!
IF( nn_timing == 1 ) CALL timing_start('dta_tsd')
!
CALL fld_read( kt, 1, sf_tsd ) !== read T & S data at kt time step ==!
!
!
!!gm This should be removed from the code ===>>>> T & S files has to be changed
!
! !== ORCA_R2 configuration and T & S damping ==!
IF( cn_cfg == "orca" .AND. nn_cfg == 2 .AND. ln_tsd_tradmp ) THEN ! some hand made alterations
!
ij0 = 101 ; ij1 = 109 ! Reduced T & S in the Alboran Sea
ii0 = 141 ; ii1 = 155
DO jj = mj0(ij0), mj1(ij1)
DO ji = mi0(ii0), mi1(ii1)
sf_tsd(jp_tem)%fnow(ji,jj,13:13) = sf_tsd(jp_tem)%fnow(ji,jj,13:13) - 0.20_wp
sf_tsd(jp_tem)%fnow(ji,jj,14:15) = sf_tsd(jp_tem)%fnow(ji,jj,14:15) - 0.35_wp
sf_tsd(jp_tem)%fnow(ji,jj,16:25) = sf_tsd(jp_tem)%fnow(ji,jj,16:25) - 0.40_wp
!
sf_tsd(jp_sal)%fnow(ji,jj,13:13) = sf_tsd(jp_sal)%fnow(ji,jj,13:13) - 0.15_wp
sf_tsd(jp_sal)%fnow(ji,jj,14:15) = sf_tsd(jp_sal)%fnow(ji,jj,14:15) - 0.25_wp
sf_tsd(jp_sal)%fnow(ji,jj,16:17) = sf_tsd(jp_sal)%fnow(ji,jj,16:17) - 0.30_wp
sf_tsd(jp_sal)%fnow(ji,jj,18:25) = sf_tsd(jp_sal)%fnow(ji,jj,18:25) - 0.35_wp
END DO
END DO
ij0 = 87 ; ij1 = 96 ! Reduced temperature in Red Sea
ii0 = 148 ; ii1 = 160
sf_tsd(jp_tem)%fnow( mi0(ii0):mi1(ii1) , mj0(ij0):mj1(ij1) , 4:10 ) = 7.0_wp
sf_tsd(jp_tem)%fnow( mi0(ii0):mi1(ii1) , mj0(ij0):mj1(ij1) , 11:13 ) = 6.5_wp
sf_tsd(jp_tem)%fnow( mi0(ii0):mi1(ii1) , mj0(ij0):mj1(ij1) , 14:20 ) = 6.0_wp
ENDIF
!!gm end
!
IF( kt == nit000 .AND. lwp )THEN
WRITE(numout,*)
WRITE(numout,*) 'dta_tsd: interpolates T & S data onto current mesh'
ENDIF
!
IF( ln_tsd_interp ) THEN ! probably should use pointers in the following to make more readable
!
DO jk = 1, jpk ! determines the intepolated T-S profiles at each (i,j) points
DO jj= 1, jpj
DO ji= 1, jpi
zl = gdept_0(ji,jj,jk)
IF( zl < sf_tsd(jp_dep)%fnow(ji,jj,1) ) THEN ! above the first level of data
ptsd(ji,jj,jk,jp_tem) = sf_tsd(jp_tem)%fnow(ji,jj,1)
ptsd(ji,jj,jk,jp_sal) = sf_tsd(jp_sal)%fnow(ji,jj,1)
ELSEIF( zl > sf_tsd(jp_dep)%fnow(ji,jj,jpk_init) ) THEN ! below the last level of data
ptsd(ji,jj,jk,jp_tem) = sf_tsd(jp_tem)%fnow(ji,jj,jpk_init)
ptsd(ji,jj,jk,jp_sal) = sf_tsd(jp_sal)%fnow(ji,jj,jpk_init)
ELSE ! inbetween : vertical interpolation between jk_init & jk_init+1
DO jk_init = 1, jpk_init-1 ! when gdept(jk_init) < zl < gdept(jk_init+1)
IF( sf_tsd(jp_msk)%fnow(ji,jj,jk_init+1) == 0 ) THEN ! if there is no data fill down
sf_tsd(jp_tem)%fnow(ji,jj,jk_init+1) = sf_tsd(jp_tem)%fnow(ji,jj,jk_init)
sf_tsd(jp_sal)%fnow(ji,jj,jk_init+1) = sf_tsd(jp_sal)%fnow(ji,jj,jk_init)
ENDIF
IF( (zl-sf_tsd(jp_dep)%fnow(ji,jj,jk_init)) * (zl-sf_tsd(jp_dep)%fnow(ji,jj,jk_init+1)) <= 0._wp ) THEN
zi = ( zl - sf_tsd(jp_dep)%fnow(ji,jj,jk_init) ) / &
& (sf_tsd(jp_dep)%fnow(ji,jj,jk_init+1)-sf_tsd(jp_dep)%fnow(ji,jj,jk_init))
ptsd(ji,jj,jk,jp_tem) = sf_tsd(jp_tem)%fnow(ji,jj,jk_init) + &
& (sf_tsd(jp_tem)%fnow(ji,jj,jk_init+1)-sf_tsd(jp_tem)%fnow(ji,jj,jk_init)) * zi
ptsd(ji,jj,jk,jp_sal) = sf_tsd(jp_sal)%fnow(ji,jj,jk_init) + &
& (sf_tsd(jp_sal)%fnow(ji,jj,jk_init+1)-sf_tsd(jp_sal)%fnow(ji,jj,jk_init)) * zi
ENDIF
END DO
ENDIF
ENDDO
ENDDO
END DO
!
ptsd(:,:,:,jp_tem) = ptsd(:,:,:,jp_tem) *tmask(:,:,:)
ptsd(:,:,:,jp_sal) = ptsd(:,:,:,jp_sal) *tmask(:,:,:)
ELSE !== z- or zps- coordinate ==!
!
ptsd(:,:,:,jp_tem) = sf_tsd(jp_tem)%fnow(:,:,:) * tmask(:,:,:) ! Mask
ptsd(:,:,:,jp_sal) = sf_tsd(jp_sal)%fnow(:,:,:) * tmask(:,:,:)
!
IF( ln_zps ) THEN ! zps-coordinate (partial steps) interpolation at the last ocean level
DO jj = 1, jpj
DO ji = 1, jpi
ik = mbkt(ji,jj)
IF( ik > 1 ) THEN
zl = ( gdept_1d(ik) - gdept_0(ji,jj,ik) ) / ( gdept_1d(ik) - gdept_1d(ik-1) )
ptsd(ji,jj,ik,jp_tem) = (1.-zl) * ptsd(ji,jj,ik,jp_tem) + zl * ptsd(ji,jj,ik-1,jp_tem)
ptsd(ji,jj,ik,jp_sal) = (1.-zl) * ptsd(ji,jj,ik,jp_sal) + zl * ptsd(ji,jj,ik-1,jp_sal)
ENDIF
ik = mikt(ji,jj)
IF( ik > 1 ) THEN
zl = ( gdept_0(ji,jj,ik) - gdept_1d(ik) ) / ( gdept_1d(ik+1) - gdept_1d(ik) )
ptsd(ji,jj,ik,jp_tem) = (1.-zl) * ptsd(ji,jj,ik,jp_tem) + zl * ptsd(ji,jj,ik+1,jp_tem)
ptsd(ji,jj,ik,jp_sal) = (1.-zl) * ptsd(ji,jj,ik,jp_sal) + zl * ptsd(ji,jj,ik+1,jp_sal)
END IF
END DO
END DO
ENDIF
!
ENDIF
!
IF( .NOT.ln_tsd_tradmp ) THEN !== deallocate T & S structure ==!
! (data used only for initialisation)
IF(lwp) WRITE(numout,*) 'dta_tsd: deallocte T & S arrays as they are only use to initialize the run'
DEALLOCATE( sf_tsd(jp_tem)%fnow ) ! T arrays in the structure
IF( sf_tsd(jp_tem)%ln_tint ) DEALLOCATE( sf_tsd(jp_tem)%fdta )
DEALLOCATE( sf_tsd(jp_sal)%fnow ) ! S arrays in the structure
IF( sf_tsd(jp_sal)%ln_tint ) DEALLOCATE( sf_tsd(jp_sal)%fdta )
IF( ln_tsd_interp ) DEALLOCATE( sf_tsd(jp_dep)%fnow ) ! T arrays in the structure
IF( ln_tsd_interp ) DEALLOCATE( sf_tsd(jp_msk)%fnow ) ! T arrays in the structure
DEALLOCATE( sf_tsd ) ! the structure itself
ENDIF
!
IF( nn_timing == 1 ) CALL timing_stop('dta_tsd')
!
END SUBROUTINE dta_tsd
!!======================================================================
END MODULE dtatsd
This diff is collapsed.
MODULE dynspg
!!======================================================================
!! *** MODULE dynspg ***
!! Ocean dynamics: surface pressure gradient control
!!======================================================================
!! History : 1.0 ! 2005-12 (C. Talandier, G. Madec, V. Garnier) Original code
!! 3.2 ! 2009-07 (R. Benshila) Suppression of rigid-lid option
!!----------------------------------------------------------------------
!!----------------------------------------------------------------------
!! dyn_spg : update the dynamics trend with surface pressure gradient
!! dyn_spg_init: initialization, namelist read, and parameters control
!!----------------------------------------------------------------------
USE oce ! ocean dynamics and tracers variables
USE dom_oce ! ocean space and time domain variables
USE c1d ! 1D vertical configuration
USE phycst ! physical constants
USE sbc_oce ! surface boundary condition: ocean
USE sbcapr ! surface boundary condition: atmospheric pressure
USE dynspg_exp ! surface pressure gradient (dyn_spg_exp routine)
USE dynspg_ts ! surface pressure gradient (dyn_spg_ts routine)
USE sbctide !
USE updtide !
USE trd_oce ! trends: ocean variables
USE trddyn ! trend manager: dynamics
!
USE prtctl ! Print control (prt_ctl routine)
USE in_out_manager ! I/O manager
USE lib_mpp ! MPP library
USE wrk_nemo ! Memory Allocation
USE timing ! Timing
IMPLICIT NONE
PRIVATE
PUBLIC dyn_spg ! routine called by step module
PUBLIC dyn_spg_init ! routine called by opa module
INTEGER :: nspg = 0 ! type of surface pressure gradient scheme defined from lk_dynspg_...
!jth
LOGICAL, PUBLIC :: ln_ulimit
REAL(wp), PUBLIC :: cn_ulimit,cnn_ulimit
!
! ! Parameter to control the surface pressure gradient scheme
INTEGER, PARAMETER :: np_TS = 1 ! split-explicit time stepping (Time-Splitting)
INTEGER, PARAMETER :: np_EXP = 0 ! explicit time stepping
INTEGER, PARAMETER :: np_NO =-1 ! no surface pressure gradient, no scheme
!! * Substitutions
# include "vectopt_loop_substitute.h90"
!!----------------------------------------------------------------------
!! NEMO/OPA 3.2 , LODYC-IPSL (2009)
!! $Id: dynspg.F90 7753 2017-03-03 11:46:59Z mocavero $
!! Software governed by the CeCILL licence (NEMOGCM/NEMO_CeCILL.txt)
!!----------------------------------------------------------------------
CONTAINS
SUBROUTINE dyn_spg( kt )
!!----------------------------------------------------------------------
!! *** ROUTINE dyn_spg ***
!!
!! ** Purpose : compute surface pressure gradient including the
!! atmospheric pressure forcing (ln_apr_dyn=T).
!!
!! ** Method : Two schemes:
!! - explicit : the spg is evaluated at now
!! - split-explicit : a time splitting technique is used
!!
!! ln_apr_dyn=T : the atmospheric pressure forcing is applied
!! as the gradient of the inverse barometer ssh:
!! apgu = - 1/rau0 di[apr] = 0.5*grav di[ssh_ib+ssh_ibb]
!! apgv = - 1/rau0 dj[apr] = 0.5*grav dj[ssh_ib+ssh_ibb]
!! Note that as all external forcing a time averaging over a two rdt
!! period is used to prevent the divergence of odd and even time step.
!!----------------------------------------------------------------------
INTEGER, INTENT(in ) :: kt ! ocean time-step index
!
INTEGER :: ji, jj, jk ! dummy loop indices
REAL(wp) :: z2dt, zg_2, zintp, zgrau0r ! temporary scalar
REAL(wp), POINTER, DIMENSION(:,:,:) :: ztrdu, ztrdv
REAL(wp), POINTER, DIMENSION(:,:) :: zpice
!!----------------------------------------------------------------------
!
IF( nn_timing == 1 ) CALL timing_start('dyn_spg')
!
IF( l_trddyn ) THEN ! temporary save of ta and sa trends
CALL wrk_alloc( jpi,jpj,jpk, ztrdu, ztrdv )
ztrdu(:,:,:) = ua(:,:,:)
ztrdv(:,:,:) = va(:,:,:)
ENDIF
!
IF( ln_apr_dyn & ! atmos. pressure
.OR. ( .NOT.ln_dynspg_ts .AND. (ln_tide_pot .AND. ln_tide) ) & ! tide potential (no time slitting)
.OR. nn_ice_embd == 2 ) THEN ! embedded sea-ice
!
DO jj = 2, jpjm1
DO ji = fs_2, fs_jpim1 ! vector opt.
spgu(ji,jj) = 0._wp
spgv(ji,jj) = 0._wp
END DO
END DO
!
IF( ln_apr_dyn .AND. .NOT.ln_dynspg_ts ) THEN !== Atmospheric pressure gradient (added later in time-split case) ==!
zg_2 = grav * 0.5
DO jj = 2, jpjm1 ! gradient of Patm using inverse barometer ssh
DO ji = fs_2, fs_jpim1 ! vector opt.
spgu(ji,jj) = spgu(ji,jj) + zg_2 * ( ssh_ib (ji+1,jj) - ssh_ib (ji,jj) &
& + ssh_ibb(ji+1,jj) - ssh_ibb(ji,jj) ) * r1_e1u(ji,jj)
spgv(ji,jj) = spgv(ji,jj) + zg_2 * ( ssh_ib (ji,jj+1) - ssh_ib (ji,jj) &
& + ssh_ibb(ji,jj+1) - ssh_ibb(ji,jj) ) * r1_e2v(ji,jj)
END DO
END DO
ENDIF
!
! !== tide potential forcing term ==!
IF( .NOT.ln_dynspg_ts .AND. ( ln_tide_pot .AND. ln_tide ) ) THEN ! N.B. added directly at sub-time-step in ts-case
!
CALL upd_tide( kt ) ! update tide potential
!
DO jj = 2, jpjm1 ! add tide potential forcing
DO ji = fs_2, fs_jpim1 ! vector opt.
spgu(ji,jj) = spgu(ji,jj) + grav * ( pot_astro(ji+1,jj) - pot_astro(ji,jj) ) * r1_e1u(ji,jj)
spgv(ji,jj) = spgv(ji,jj) + grav * ( pot_astro(ji,jj+1) - pot_astro(ji,jj) ) * r1_e2v(ji,jj)
END DO
END DO
ENDIF
!
IF( nn_ice_embd == 2 ) THEN !== embedded sea ice: Pressure gradient due to snow-ice mass ==!
CALL wrk_alloc( jpi,jpj, zpice )
!
zintp = REAL( MOD( kt-1, nn_fsbc ) ) / REAL( nn_fsbc )
zgrau0r = - grav * r1_rau0
zpice(:,:) = ( zintp * snwice_mass(:,:) + ( 1.- zintp ) * snwice_mass_b(:,:) ) * zgrau0r
DO jj = 2, jpjm1
DO ji = fs_2, fs_jpim1 ! vector opt.
spgu(ji,jj) = spgu(ji,jj) + ( zpice(ji+1,jj) - zpice(ji,jj) ) * r1_e1u(ji,jj)
spgv(ji,jj) = spgv(ji,jj) + ( zpice(ji,jj+1) - zpice(ji,jj) ) * r1_e2v(ji,jj)
END DO
END DO
!
CALL wrk_dealloc( jpi,jpj, zpice )
ENDIF
!
DO jk = 1, jpkm1 !== Add all terms to the general trend
DO jj = 2, jpjm1
DO ji = fs_2, fs_jpim1 ! vector opt.
ua(ji,jj,jk) = ua(ji,jj,jk) + spgu(ji,jj)
va(ji,jj,jk) = va(ji,jj,jk) + spgv(ji,jj)
END DO
END DO
END DO
!
!!gm add here a call to dyn_trd for ice pressure gradient, the surf pressure trends ????
!
ENDIF
!
SELECT CASE ( nspg ) !== surface pressure gradient computed and add to the general trend ==!
CASE ( np_EXP ) ; CALL dyn_spg_exp( kt ) ! explicit
CASE ( np_TS ) ; CALL dyn_spg_ts ( kt ) ! time-splitting
END SELECT
!
IF( l_trddyn ) THEN ! save the surface pressure gradient trends for further diagnostics
ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:)
ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:)
CALL trd_dyn( ztrdu, ztrdv, jpdyn_spg, kt )
CALL wrk_dealloc( jpi,jpj,jpk, ztrdu, ztrdv )
ENDIF
! ! print mean trends (used for debugging)
IF(ln_ctl) CALL prt_ctl( tab3d_1=ua, clinfo1=' spg - Ua: ', mask1=umask, &
& tab3d_2=va, clinfo2= ' Va: ', mask2=vmask, clinfo3='dyn' )
!
IF( nn_timing == 1 ) CALL timing_stop('dyn_spg')
!
END SUBROUTINE dyn_spg
SUBROUTINE dyn_spg_init
!!---------------------------------------------------------------------
!! *** ROUTINE dyn_spg_init ***
!!
!! ** Purpose : Control the consistency between namelist options for
!! surface pressure gradient schemes
!!----------------------------------------------------------------------
INTEGER :: ioptio, ios ! local integers
!
NAMELIST/namdyn_spg/ ln_dynspg_exp , ln_dynspg_ts, &
& ln_bt_fw, ln_bt_av , ln_bt_auto , &
& nn_baro , rn_bt_cmax, nn_bt_flt,ln_ulimit,cn_ulimit,cnn_ulimit
!!----------------------------------------------------------------------
!
IF( nn_timing == 1 ) CALL timing_start('dyn_spg_init')
!
REWIND( numnam_ref ) ! Namelist namdyn_spg in reference namelist : Free surface
READ ( numnam_ref, namdyn_spg, IOSTAT = ios, ERR = 901)
901 IF( ios /= 0 ) CALL ctl_nam ( ios , 'namdyn_spg in reference namelist', lwp )
!
REWIND( numnam_cfg ) ! Namelist namdyn_spg in configuration namelist : Free surface
READ ( numnam_cfg, namdyn_spg, IOSTAT = ios, ERR = 902 )
902 IF( ios /= 0 ) CALL ctl_nam ( ios , 'namdyn_spg in configuration namelist', lwp )
IF(lwm) WRITE ( numond, namdyn_spg )
!
IF(lwp) THEN ! Namelist print
WRITE(numout,*)
WRITE(numout,*) 'dyn_spg_init : choice of the surface pressure gradient scheme'
WRITE(numout,*) '~~~~~~~~~~~'
WRITE(numout,*) ' Explicit free surface ln_dynspg_exp = ', ln_dynspg_exp
WRITE(numout,*) ' Free surface with time splitting ln_dynspg_ts = ', ln_dynspg_ts
write(numout,*) ' Limit velocities ln_ulimit = ',ln_ulimit
write(numout,*) ' Limit velocities max inverse Courant number = ',cn_ulimit
write(numout,*) ' Limit velocities multiplier for divergant flow = ',cnn_ulimit
ENDIF
! ! Control of surface pressure gradient scheme options
nspg = np_NO ; ioptio = 0
IF( ln_dynspg_exp ) THEN ; nspg = np_EXP ; ioptio = ioptio + 1 ; ENDIF
IF( ln_dynspg_ts ) THEN ; nspg = np_TS ; ioptio = ioptio + 1 ; ENDIF
!
IF( ioptio > 1 ) CALL ctl_stop( 'Choose only one surface pressure gradient scheme' )
IF( ioptio == 0 ) CALL ctl_warn( 'NO surface pressure gradient trend in momentum Eqs.' )
IF( ln_dynspg_exp .AND. ln_isfcav ) &
& CALL ctl_stop( ' dynspg_exp not tested with ice shelf cavity ' )
!
IF(lwp) THEN
WRITE(numout,*)
IF( nspg == np_EXP ) WRITE(numout,*) ' ===>> explicit free surface'
IF( nspg == np_TS ) WRITE(numout,*) ' ===>> free surface with time splitting scheme'
IF( nspg == np_NO ) WRITE(numout,*) ' ===>> No surface surface pressure gradient trend in momentum Eqs.'
ENDIF
!
IF( nspg == np_TS ) THEN ! split-explicit scheme initialisation
CALL dyn_spg_ts_init ! do it first: set nn_baro used to allocate some arrays later on
IF( dyn_spg_ts_alloc() /= 0 ) CALL ctl_stop('STOP', 'dyn_spg_init: failed to allocate dynspg_ts arrays' )
IF( neuler/=0 .AND. ln_bt_fw ) CALL ts_rst( nit000, 'READ' )
ENDIF
!
IF( nn_timing == 1 ) CALL timing_stop('dyn_spg_init')
!
END SUBROUTINE dyn_spg_init
!!======================================================================
END MODULE dynspg
MODULE par_oce
!!======================================================================
!! *** par_oce ***
!! Ocean : set the ocean parameters
!!======================================================================
!! History : OPA ! 1991 (Imbard, Levy, Madec) Original code
!! NEMO 1.0 ! 2004-01 (G. Madec, J.-M. Molines) Free form and module
!! 3.3 ! 2010-09 (C. Ethe) TRA-TRC merge: add jpts, jp_tem & jp_sal
!!----------------------------------------------------------------------
USE par_kind ! kind parameters
IMPLICIT NONE
PUBLIC
!!----------------------------------------------------------------------
!! namcfg namelist parameters
!!----------------------------------------------------------------------
LOGICAL :: ln_read_cfg !: (=T) read the domain configuration file or (=F) not
CHARACTER(lc) :: cn_domcfg !: filename the configuration file to be read
LOGICAL :: ln_write_cfg !: (=T) create the domain configuration file
CHARACTER(lc) :: cn_domcfg_out !: filename the configuration file to be read
!
LOGICAL :: ln_use_jattr !: input file read offset
! ! Use file global attribute: open_ocean_jstart to determine start j-row
! ! when reading input from those netcdf files that have the
! ! attribute defined. This is designed to enable input files associated
! ! with the extended grids used in the under ice shelf configurations to
! ! be used without redundant rows when the ice shelves are not in use.
!
!!---------------------------------------------------------------------
!! Domain Matrix size
!!---------------------------------------------------------------------
! configuration name & resolution (required only in ORCA family case)
CHARACTER(lc) :: cn_cfg !: name of the configuration
INTEGER :: nn_cfg !: resolution of the configuration
! global domain size !!! * total computational domain *
INTEGER :: jpiglo !: 1st dimension of global domain --> i-direction
INTEGER :: jpjglo !: 2nd - - --> j-direction
INTEGER :: jpkglo !: 3nd - - --> k levels
#if defined key_agrif
!!gm BUG ? I'm surprised by the calculation below of nbcellsx and nbcellsy before jpiglo,jpjglo
!!gm has been assigned to a value....
!!gm
! global domain size for AGRIF !!! * total AGRIF computational domain *
INTEGER, PUBLIC, PARAMETER :: nbghostcells = 1 !: number of ghost cells
INTEGER, PUBLIC :: nbcellsx = jpiglo - 2 - 2*nbghostcells !: number of cells in i-direction
INTEGER, PUBLIC :: nbcellsy = jpjglo - 2 - 2*nbghostcells !: number of cells in j-direction
#endif
! local domain size !!! * local computational domain *
INTEGER, PUBLIC :: jpi ! = ( jpiglo-2*jpreci + (jpni-1) ) / jpni + 2*jpreci !: first dimension
INTEGER, PUBLIC :: jpj ! = ( jpjglo-2*jprecj + (jpnj-1) ) / jpnj + 2*jprecj !: second dimension
INTEGER, PUBLIC :: jpk ! = jpkglo
INTEGER, PUBLIC :: jpim1 ! = jpi-1 !: inner domain indices
INTEGER, PUBLIC :: jpjm1 ! = jpj-1 !: - - -
INTEGER, PUBLIC :: jpkm1 ! = jpk-1 !: - - -
INTEGER, PUBLIC :: jpij ! = jpi*jpj !: jpi x jpj
!!---------------------------------------------------------------------
!! Active tracer parameters
!!---------------------------------------------------------------------
INTEGER, PUBLIC, PARAMETER :: jpts = 2 !: Number of active tracers (=2, i.e. T & S )
INTEGER, PUBLIC, PARAMETER :: jp_tem = 1 !: indice for temperature
INTEGER, PUBLIC, PARAMETER :: jp_sal = 2 !: indice for salinity
INTEGER, PUBLIC, PARAMETER :: jp_dep = 3 !: indice for depth
INTEGER, PUBLIC, PARAMETER :: jp_msk = 4 !: indice for depth
!!----------------------------------------------------------------------
!! Domain decomposition
!!----------------------------------------------------------------------
!! if we dont use massively parallel computer (parameters jpni=jpnj=1) so jpiglo=jpi and jpjglo=jpj
INTEGER, PUBLIC :: jpni !: number of processors following i
INTEGER, PUBLIC :: jpnj !: number of processors following j
INTEGER, PUBLIC :: jpnij !: nb of local domain = nb of processors ( <= jpni x jpnj )
INTEGER, PUBLIC, PARAMETER :: jpr2di = 0 !: number of columns for extra outer halo
INTEGER, PUBLIC, PARAMETER :: jpr2dj = 0 !: number of rows for extra outer halo
INTEGER, PUBLIC, PARAMETER :: jpreci = 1 !: number of columns for overlap
INTEGER, PUBLIC, PARAMETER :: jprecj = 1 !: number of rows for overlap
!!----------------------------------------------------------------------
!! NEMO/OPA 4.0 , NEMO Consortium (2016)
!! $Id: par_oce.F90 7646 2017-02-06 09:25:03Z timgraham $
!! Software governed by the CeCILL licence (NEMOGCM/NEMO_CeCILL.txt)
!!======================================================================
END MODULE par_oce
This diff is collapsed.
This diff is collapsed.
MODULE step_oce
!!======================================================================
!! *** MODULE step_oce ***
!! Ocean time-stepping : module used in both initialisation phase and time stepping
!!======================================================================
!! History : 3.3 ! 2010-08 (C. Ethe) Original code - reorganisation of the initial phase
!! 3.7 ! 2014-01 (G. Madec) LDF simplication
!!----------------------------------------------------------------------
USE oce ! ocean dynamics and tracers variables
USE dom_oce ! ocean space and time domain variables
USE zdf_oce ! ocean vertical physics variables
USE daymod ! calendar (day routine)
USE sbc_oce ! surface boundary condition: ocean
USE sbcmod ! surface boundary condition (sbc routine)
USE sbcrnf ! surface boundary condition: runoff variables
USE sbccpl ! surface boundary condition: coupled formulation (call send at end of step)
USE sbcapr ! surface boundary condition: atmospheric pressure
USE sbctide ! Tide initialisation
USE sbcwave ! Wave intialisation
USE traqsr ! solar radiation penetration (tra_qsr routine)
USE trasbc ! surface boundary condition (tra_sbc routine)
USE trabbc ! bottom boundary condition (tra_bbc routine)
USE trabbl ! bottom boundary layer (tra_bbl routine)
USE tradmp ! internal damping (tra_dmp routine)
USE traadv ! advection scheme control (tra_adv_ctl routine)
USE traldf ! lateral mixing (tra_ldf routine)
USE trazdf ! vertical mixing (tra_zdf routine)
USE tranxt ! time-stepping (tra_nxt routine)
USE tranpc ! non-penetrative convection (tra_npc routine)
USE eosbn2 ! equation of state (eos_bn2 routine)
USE divhor ! horizontal divergence (div_hor routine)
USE dynadv ! advection (dyn_adv routine)
USE dynbfr ! Bottom friction terms (dyn_bfr routine)
USE dynvor ! vorticity term (dyn_vor routine)
USE dynhpg ! hydrostatic pressure grad. (dyn_hpg routine)
USE dynldf ! lateral momentum diffusion (dyn_ldf routine)
USE dynzdf ! vertical diffusion (dyn_zdf routine)
USE dynspg ! surface pressure gradient (dyn_spg routine)
USE dynnxt ! time-stepping (dyn_nxt routine)
USE stopar ! Stochastic parametrization (sto_par routine)
USE stopts
USE bdy_oce , ONLY: ln_bdy
USE bdydta ! open boundary condition data (bdy_dta routine)
USE bdytra ! bdy cond. for tracers (bdy_tra routine)
USE bdydyn3d ! bdy cond. for baroclinic vel. (bdy_dyn3d routine)
USE sshwzv ! vertical velocity and ssh (ssh_nxt routine)
! (ssh_swp routine)
! (wzv routine)
USE domvvl ! variable vertical scale factors (dom_vvl_sf_nxt routine)
! (dom_vvl_sf_swp routine)
USE ldfslp ! iso-neutral slopes (ldf_slp routine)
USE ldfdyn ! lateral eddy viscosity coef. (ldf_dyn routine)
USE ldftra ! lateral eddy diffusive coef. (ldf_tra routine)
USE zdftmx ! tide-induced vertical mixing (zdf_tmx routine)
USE zdfbfr ! bottom friction (zdf_bfr routine)
USE zdftke ! TKE vertical mixing (zdf_tke routine)
USE zdfgls ! GLS vertical mixing (zdf_gls routine)
USE zdfddm ! double diffusion mixing (zdf_ddm routine)
USE zdfevd ! enhanced vertical diffusion (zdf_evd routine)
USE zdfric ! Richardson vertical mixing (zdf_ric routine)
USE zdfmxl ! Mixed-layer depth (zdf_mxl routine)
USE zdfqiao !Qiao module wave induced mixing (zdf_qiao routine)
USE step_diu ! Time stepping for diurnal sst
USE diurnal_bulk ! diurnal SST bulk routines (diurnal_sst_takaya routine)
USE cool_skin ! diurnal cool skin correction (diurnal_sst_coolskin routine)
USE sbc_oce ! surface fluxes
USE zpshde ! partial step: hor. derivative (zps_hde routine)
USE diawri ! Standard run outputs (dia_wri routine)
USE diaptr ! poleward transports (dia_ptr routine)
USE diadct ! sections transports (dia_dct routine)
USE diaar5 ! AR5 diagnosics (dia_ar5 routine)
USE diahth ! thermocline depth (dia_hth routine)
USE diahsb ! heat, salt and volume budgets (dia_hsb routine)
USE diaharm
!--- NB for restart hamonic analysis
USE diaharm_fast ! harmonic analysis of tides (harm_ana routine)
!--- END NB -----------------------------------
USE diacfl
USE flo_oce ! floats variables
USE floats ! floats computation (flo_stp routine)
USE crsfld ! Standard output on coarse grid (crs_fld routine)
USE asminc ! assimilation increments (tra_asm_inc routine)
! (dyn_asm_inc routine)
USE asmbkg
USE stpctl ! time stepping control (stp_ctl routine)
USE restart ! ocean restart (rst_wri routine)
USE prtctl ! Print control (prt_ctl routine)
USE diaobs ! Observation operator
USE in_out_manager ! I/O manager
USE iom !
USE lbclnk
USE timing ! Timing
#if defined key_iomput
USE xios
#endif
#if defined key_agrif
USE agrif_opa_sponge ! Momemtum and tracers sponges
USE agrif_opa_update ! Update (2-way nesting)
#endif
#if defined key_top
USE trcstp ! passive tracer time-stepping (trc_stp routine)
#endif
!!----------------------------------------------------------------------
!! NEMO/OPA 3.7 , NEMO Consortium (2014)
!! $Id: step_oce.F90 7646 2017-02-06 09:25:03Z timgraham $
!! Software governed by the CeCILL licence (NEMOGCM/NEMO_CeCILL.txt)
!!======================================================================
END MODULE step_oce
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment