Commit 3934705a authored by James Harle's avatar James Harle
Browse files

Adding pynemo code.

parent b2981577
'''
This code has been taken from matlibplot polygon interaction
@author: Mr. Srikanth Nagella
'''
# pylint: disable=E1103
# pylint: disable=no-name-in-module
import numpy as np
from matplotlib.lines import Line2D
from matplotlib.patches import Polygon
from matplotlib.artist import Artist
from matplotlib.mlab import dist_point_to_segment
from matplotlib.widgets import RectangleSelector
polygon_alpha = 0.2
class PolygonEditor(object):
'''
This edits the polygons drawn on the map
'''
show_verts = True
epsilon = 3 #threshold
def __init__(self, axis, canvas):
'''
initialises the editable polygon object
'''
self.axis = axis
self.polygon = None
self.line = None
self.xy_values = np.array([])
self._ind = None
self.background = None #background copying
self._callback_ids = list()
self._callback_ids.append(canvas.mpl_connect('draw_event',
self.draw_callback))
self._callback_ids.append(canvas.mpl_connect('button_press_event',
self.button_press_callback))
self._callback_ids.append(canvas.mpl_connect('button_release_event',
self.button_release_callback))
self._callback_ids.append(canvas.mpl_connect('motion_notify_event',
self.motion_notify_callback))
self.canvas = canvas
def add_point(self, xval, yval):
""" adds an new point to the selection list and redraws the selection tool"""
if self.xy_values.shape[0] == 0:
self.xy_values = np.array([(xval, yval), ])
else:
self.xy_values = np.concatenate((self.xy_values, [[xval, yval], ]), axis=0)
self.refresh()
def refresh(self):
""" This method looks at the list of points available and depending on the number of
points in the list creates a point or line or a polygon and draws them"""
if self.xy_values.shape[0] == 0: # No points available
self.reset_line()
self.reset_polygon()
elif self.xy_values.shape[0] <= 2: # point or line for 1 or 2 points
xval, yval = zip(*self.xy_values)
if self.line == None:
self.line = Line2D(xval, yval, marker='o', markerfacecolor='r', animated=True)
self.axis.add_line(self.line)
else:
self.line.set_data(zip(*self.xy_values))
self.reset_polygon()
else: # more than 2 points if polygon is not created then creates one and draws
if self.polygon == None:
self.polygon = Polygon(self.xy_values, animated=True, alpha=polygon_alpha)
self.polygon.add_callback(self.polygon_changed)
self.axis.add_patch(self.polygon)
else:
self.polygon.xy = self.xy_values
self.line.set_data(zip(*self.xy_values))
self.draw_callback(None)
self.canvas.draw()
def reset_line(self):
""" resets the line i.e removes the line from the axes and resets to None """
if self.line != None:
self.line.remove()
self.line = None
def reset_polygon(self):
""" resets the polygon ie. removes the polygon from the axis and reset to None """
if self.polygon != None:
self.polygon.remove()
self.polygon = None
def draw_line(self):
""" draws the line if available """
if self.line != None:
self.axis.draw_artist(self.line)
def draw_polygon(self):
""" draws polygon if available"""
if self.polygon != None:
self.axis.draw_artist(self.polygon)
def disable(self):
""" disables the events and the selection """
self.set_visibility(False)
for callback_id in self._callback_ids:
self.canvas.mpl_disconnect(callback_id)
self.canvas = None
def enable(self):
""" enables the selection """
self.set_visibility(True)
def set_visibility(self, status):
""" sets the visibility of the selection object """
if self.polygon != None:
self.polygon.set_visible(status)
if self.line != None:
self.line.set_visible(status)
self.canvas.blit(self.axis.bbox)
def draw_callback(self, dummy_event):
""" this method draws the selection object """
self.background = self.canvas.copy_from_bbox(self.axis.bbox)
self.draw_polygon()
self.draw_line()
self.canvas.blit(self.axis.bbox)
def polygon_changed(self, poly):
""" redraws the polygon """
vis = self.line.get_visible()
Artist.update_from(self.line, poly)
self.line.set_visible(vis)
def get_index_under_point(self, event):
""" gets the index of the point under the event (mouse click) """
if self.xy_values.shape[0] == 0:
return None
xy_values = self.xy_values
xt_values, yt_values = xy_values[:, 0], xy_values[:, 1]
dist = np.sqrt((xt_values-event.xdata)**2 + (yt_values-event.ydata)**2)
indseq = np.nonzero(np.equal(dist, np.amin(dist)))[0]
ind = indseq[0]
if dist[ind] >= self.epsilon:
ind = None
return ind
def button_press_callback(self, event):
""" callback to mouse press event """
if not self.show_verts:
return
if event.inaxes == None:
return
if event.button != 1:
return
self._ind = self.get_index_under_point(event)
if self._ind == None:
self.insert_datapoint(event)
def button_release_callback(self, event):
""" callback to mouse release event """
if not self.show_verts:
return
if event.button == 2:
self.insert_datapoint(event)
return
if event.button == 3:
self.delete_datapoint(event)
return
if event.button != 1:
return
self._ind = None
def insert_datapoint(self, event):
""" inserts a new data point between the segment that is closest in polygon """
if self.xy_values.shape[0] <= 2:
self.add_point(event.xdata, event.ydata)
else:
event_point = event.xdata, event.ydata
prev_d = dist_point_to_segment(event_point, self.xy_values[0], self.xy_values[-1])
prev_i = len(self.xy_values)
for i in range(len(self.xy_values)-1):
seg_start = self.xy_values[i]
seg_end = self.xy_values[i+1]
dist_p_s = dist_point_to_segment(event_point, seg_start, seg_end)
if dist_p_s < prev_d:
prev_i = i
prev_d = dist_p_s
self.xy_values = np.array(list(self.xy_values[:prev_i+1]) +
[(event.xdata, event.ydata)] +
list(self.xy_values[prev_i+1:]))
self.refresh()
def delete_datapoint(self, event):
""" deletes the data point under the point in event """
ind = self.get_index_under_point(event)
if ind is not None:
self.xy_values = np.array([tup for i, tup in enumerate(self.xy_values) if i != ind])
self.refresh()
self.canvas.draw()
def motion_notify_callback(self, event):
""" callback for the mouse motion with button press.
this is to move the edge points of the polygon"""
if not self.show_verts:
return
if self._ind is None:
return
if event.inaxes is None:
return
if event.button != 1:
return
xval, yval = event.xdata, event.ydata
self.xy_values[self._ind] = xval, yval
self.refresh()
self.canvas.restore_region(self.background)
self.axis.draw_artist(self.polygon)
self.axis.draw_artist(self.line)
self.canvas.blit(self.axis.bbox)
def reset(self):
""" resets the points in the selection deleting the line and polygon"""
self.xy_values = np.array([])
self.reset_line()
self.reset_polygon()
class BoxEditor(object):
""" Box editor is to select area using rubber band sort of drawing rectangle.
it uses matplotlib RectangleSelector under the hood """
polygon = None
def __init__(self, axes, canvas):
""" initialises class and creates a rectangle selector """
self.axes = axes
self.canvas = canvas
self.rectangle_selector = RectangleSelector(axes, self.line_select_callback, drawtype='box',
useblit=True, button=[1,],
minspanx=5, minspany=5,
spancoords='pixels')
def line_select_callback(self, eclick, erelease):
""" callback to the rectangleselector """
x1_val, y1_val = eclick.xdata, eclick.ydata
x2_val, y2_val = erelease.xdata, erelease.ydata
xy_values = np.array([[x1_val, y1_val, ],
[x1_val, y2_val, ],
[x2_val, y2_val, ],
[x2_val, y1_val, ], ])
self.reset_polygon()
self.polygon = Polygon(xy_values, animated=False, alpha=polygon_alpha)
self.axes.add_patch(self.polygon)
self.canvas.draw()
def enable(self):
""" enable the box selector """
self.rectangle_selector.set_active(True)
def disable(self):
""" disables or removes the box selector """
self.reset_polygon()
self.rectangle_selector.set_active(False)
self.canvas.draw()
def reset_polygon(self):
""" resets rectangle polygon """
if self.polygon != None:
self.polygon.remove()
self.polygon = None
def reset(self):
""" reset the Box selector """
self.reset_polygon()
# if __name__ == '__main__':
# import matplotlib.pyplot as plt
# from matplotlib.patches import Polygon
#
# theta = np.arange(0, 2*np.pi, 0.3)
# r = 1.5
#
# xs = r*np.cos(theta)
# ys = r*np.sin(theta)
#
# # poly = Polygon(list(zip(xs,ys)), animated=True)
# poly = Polygon(list([(0,0)]), animated=True)
#
# fig, ax = plt.subplots()
# ax.add_patch(poly)
# p = PolygonEditor(ax,poly)
#
# ax.set_title('Click and drag a point to move it')
# ax.set_xlim((-2,2))
# ax.set_ylim((-2,2))
# plt.show()
pynemo/gui/shelf_break.png

857 Bytes

class DstCoord:
"""
This object is currently empty and has data bound to it externally
Equivalent to Matlab dst_coord.
"""
def __self__(self):
self.bdy_i = None
This diff is collapsed.
'''
This generates the NEMO Boundary. Creates indices for t, u and v points, plus rim gradient.
The variable names have been renamed to keep consistent with python standards and generalizing
the variable names eg. bdy_i is used instead of bdy_t
Ported from Matlab code by James Harle
@author: John Kazimierz Farey
@author: Srikanth Nagella
'''
# pylint: disable=E1103
# pylint: disable=no-name-in-module
#External Imports
import numpy as np
import logging
#Local Imports
from utils.nemo_bdy_lib import sub2ind
class Boundary:
# Bearings for overlays
_NORTH = [1,-1,1,-1,2,None,1,-1]
_SOUTH = [1,-1,1,-1,None,-2,1,-1]
_EAST = [1,-1,1,-1,1,-1,2,None]
_WEST = [1,-1,1,-1,1,-1,None,-2]
def __init__(self, boundary_mask, settings, grid):
"""Generates the indices for NEMO Boundary and returns a Grid object with indices
Keyword arguments:
boundary_mask -- boundary mask
settings -- dictionary of setting values
grid -- type of the grid 't', 'u', 'v'
Attributes:
bdy_i -- index
bdy_r -- r index
"""
self.logger = logging.getLogger(__name__)
bdy_msk = boundary_mask
self.settings = settings
rw = self.settings['rimwidth']
rm = rw - 1
self.grid_type = grid.lower()
# Throw an error for wrong grid input type
if grid not in ['t', 'u', 'v', 'f']:
self.logger.error('Grid Type not correctly specified:'+grid)
raise ValueError("""%s is invalid grid grid_type;
must be 't', 'u', 'v' or 'f'""" %grid)
# Configure grid grid_type
if grid is not 't':
# We need to copy this before changing, because the original will be
# needed in calculating later grid boundary types
bdy_msk = boundary_mask.copy()
grid_ind = np.zeros(bdy_msk.shape, dtype=np.bool, order='C')
#NEMO works with a staggered 'C' grid. need to create a grid with staggered points
for fval in [-1, 0]: #-1 mask value, 0 Land, 1 Water/Ocean
if grid is 'u':
grid_ind[:, :-1] = np.logical_and(bdy_msk[:, :-1] == 1,
bdy_msk[:, 1:] == fval)
bdy_msk[grid_ind] = fval
elif grid is 'v':
grid_ind[:-1, :] = np.logical_and(bdy_msk[:-1, :] == 1,
bdy_msk[1:, :] == fval)
bdy_msk[grid_ind] = fval
elif grid is 'f':
grid_ind[:-1, :-1] = np.logical_and(bdy_msk[:-1,:-1] == 1,
bdy_msk[1:, 1:] == fval)
grid_ind[:-1, :] = np.logical_or(np.logical_and(bdy_msk[:-1, :] == 1,
bdy_msk[1:, :] == fval),
grid_ind[:-1, :] == 1)
grid_ind[:, :-1] = np.logical_or(np.logical_and(bdy_msk[:, :-1] == 1,
bdy_msk[:, 1:] == fval),
grid_ind[:, :-1] == 1)
bdy_msk[grid_ind] = fval
# Create padded array for overlays
msk = np.pad(bdy_msk,((1,1),(1,1)), 'constant', constant_values=(-1))
# create index arrays of I and J coords
igrid, jgrid = np.meshgrid(np.arange(bdy_msk.shape[1]), np.arange(bdy_msk.shape[0]))
SBi, SBj = self._find_bdy(igrid, jgrid, msk, self._SOUTH)
NBi, NBj = self._find_bdy(igrid, jgrid, msk, self._NORTH)
EBi, EBj = self._find_bdy(igrid, jgrid, msk, self._EAST)
WBi, WBj = self._find_bdy(igrid, jgrid, msk, self._WEST)
#create a 2D array index for the points that are on border
tij = np.column_stack((np.concatenate((SBi, NBi, WBi, EBi)),np.concatenate((SBj, NBj, WBj, EBj))))
bdy_i = np.tile(tij, (rw, 1, 1))
bdy_i = np.transpose(bdy_i, (1, 2, 0))
bdy_r = bdy_r = np.tile(np.arange(0,rw),(bdy_i.shape[0],1))
# Add points for relaxation zone over rim width
# In the relaxation zone with rim width. looking into the domain up to the rim width
# and select the points. S head North (0,+1) N head South(0,-1) W head East (+1,0)
# E head West (-1,0)
temp = np.column_stack((np.concatenate((SBi*0, NBi*0, WBi*0+1, EBi*0-1)),
np.concatenate((SBj*0+1, NBj*0-1, WBj*0, EBj*0))))
for i in range(rm):
bdy_i[:, :, i+1] = bdy_i[:, :, i] + temp
bdy_i = np.transpose(bdy_i, (1, 2, 0))
bdy_i = np.reshape(bdy_i,
(bdy_i.shape[0],bdy_i.shape[1]*bdy_i.shape[2]))
bdy_r = bdy_r.flatten(1)
## Remove duplicate and open sea points ##
bdy_i, bdy_r = self._remove_duplicate_points(bdy_i, bdy_r)
bdy_i, bdy_r, nonmask_index = self._remove_landpoints_open_ocean(bdy_msk, bdy_i, bdy_r)
### Fill in any gradients between relaxation zone and internal domain
### bdy_msk matches matlabs incarnation, r_msk is pythonic
r_msk = bdy_msk.copy()
r_msk[r_msk == 1] = rw
r_msk = np.float16(r_msk)
r_msk[r_msk < 1] = np.NaN
r_msk[bdy_i[:,1],bdy_i[:,0]] = np.float16(bdy_r)
r_msk_orig = r_msk.copy()
r_msk_ref = r_msk[1:-1, 1:-1]
self.logger.debug('Start r_msk bearings loop')
#Removes the shape gradients by smoothing it out
for i in range(rw-1):
# Check each bearing
for b in [self._SOUTH, self._NORTH, self._WEST, self._EAST]:
r_msk,r_msk_ref = self._fill(r_msk, r_msk_ref, b)
self.logger.debug('done loop')
# update bdy_i and bdy_r
new_ind = np.abs(r_msk - r_msk_orig) > 0
# The transposing gets around the Fortran v C ordering thing.
bdy_i_tmp = np.array([igrid.T[new_ind.T], jgrid.T[new_ind.T]])
bdy_r_tmp = r_msk.T[new_ind.T]
bdy_i = np.vstack((bdy_i_tmp.T, bdy_i))
uniqind = self._unique_rows(bdy_i)
bdy_i = bdy_i[uniqind, :]
bdy_r = np.hstack((bdy_r_tmp, bdy_r))
bdy_r = bdy_r[uniqind]
# sort by rimwidth
igrid = np.argsort(bdy_r, kind='mergesort')
bdy_r = bdy_r[igrid]
bdy_i = bdy_i[igrid, :]
self.bdy_i = bdy_i
self.bdy_r = bdy_r
self.logger.debug( 'Final bdy_i: %s', self.bdy_i.shape)
def _remove_duplicate_points(self, bdy_i, bdy_r):
""" Removes the duplicate points in the bdy_i and return the bdy_i and bdy_r
bdy_i -- bdy indexes
bdy_r -- bdy rim values
"""
bdy_i2 = np.transpose(bdy_i, (1, 0))
uniqind = self._unique_rows(bdy_i2)
bdy_i = bdy_i2[uniqind]
bdy_r = bdy_r[uniqind]
return bdy_i, bdy_r
def _remove_landpoints_open_ocean(self, mask, bdy_i, bdy_r):
""" Removes the land points and open ocean points """
unmask_index = mask[bdy_i[:,1],bdy_i[:,0]] != 0
bdy_i = bdy_i[unmask_index, :]
bdy_r = bdy_r[unmask_index]
return bdy_i, bdy_r, unmask_index
def _find_bdy(self, I, J, mask, brg):
"""Finds the border indexes by checking the change from ocean to land.
Returns the i and j index array where the shift happens.
Keyword arguments:
I -- I x direction indexes
J -- J y direction indexes
mask -- mask data
brg -- mask index range
"""
# subtract matrices to find boundaries, set to True
m1 = mask[brg[0]:brg[1], brg[2]:brg[3]]
m2 = mask[brg[4]:brg[5], brg[6]:brg[7]]
overlay = np.subtract(m1,m2)
# Create boolean array of bdy points in overlay
bool_arr = overlay==2
# index I or J to find bdies
bdy_I = I[bool_arr]
bdy_J = J[bool_arr]
return bdy_I, bdy_J
def _fill(self, mask, ref, brg):
""" """
tmp = mask[brg[4]:brg[5], brg[6]:brg[7]]
ind = (ref - tmp) > 1
ref[ind] = tmp[ind] + 1
mask[brg[0]:brg[1], brg[2]:brg[3]] = ref
return mask, ref
def _unique_rows(self, t):
""" This returns unique rows in the 2D array.
Returns indexes of unique rows in the input 2D array
t -- input 2D array
"""
sh = np.shape(t)
if (len(sh)> 2) or (sh[0] ==0) or (sh[1] == 0):
print 'Warning: Shape of expected 2D array:', sh
tlist = t.tolist()
sortt = []
indx = zip(*sorted([(val, i) for i,val in enumerate(tlist)]))[1]
indx = np.array(indx)
for i in indx:
sortt.append(tlist[i])
del tlist
for i,x in enumerate(sortt):
if x == sortt[i-1]:
indx[i] = -1
# all the rows are identical, set the first as the unique row
if sortt[0] == sortt[-1]:
indx[0] = 0
return indx[indx != -1]
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Calculates Grid Angles #
# #
# Written by John Kazimierz Farey, Sep 2012 #
# Port of Matlab code of James Harle #
# #
# I have substituted the nemo_phycst for numpy inbuilts #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# coord_fname: nemo coordinate file
# i: model zonal indices
# j: model meridional indices
# cd_type: define the nature of pt2d grid points
import numpy as np
from reader.factory import GetFile
import logging
# pylint: disable=E1101
class GridAngle:
# I and J offsets for different grid types
CASES = {'t': [0, 0, 0, -1], 'u': [0, 0, 0,-1],
'v': [0, 0, -1, 0], 'f': [0, 1, 0, 0]}
MAP = {'t': 'v', 'u': 'f', 'v': 'f', 'f': 'u'}
def __init__(self, coord_fname, imin, imax, jmin, jmax, cd_type):
# set case and check validity
self.CD_T = cd_type.lower()
self.logger = logging.getLogger(__name__)
if self.CD_T not in ['t', 'u', 'v', 'f']:
raise ValueError('Unknown grid grid_type %s' %cd_type)
self.M_T = self.MAP[self.CD_T]
self.logger.debug( 'Grid Angle: ', self.CD_T)
# open coord file
self.nc = GetFile(coord_fname)
# set constants
self.IMIN, self.IMAX = imin, imax
self.JMIN, self.JMAX = jmin, jmax
ndim = len(self.nc['glamt'].dimensions)
if ndim == 4:
self.DIM_STR = 0, 0
elif ndim == 3:
self.DIM_STR = 0
else:
self.DIM_STR = None
# Get North pole direction and modulus for cd_type
np_x, np_y, np_n = self._get_north_dir()
# Get i or j MAP segment Direction around cd_type
sd_x, sd_y, sd_n = self._get_seg_dir(np_n)
# Get cosinus and sinus
self.sinval, self.cosval = self._get_sin_cos(np_x, np_y, np_n, sd_x,
sd_y, sd_n)
self.nc.close()
# # # # # # # # # # # # #
# # Functions # # # # # #
# # # # # # # # # # # # #
def _get_sin_cos(self, nx, ny, nn, sx, sy, sn):
# Geographic mesh
i, j, ii, jj = self.CASES[self.CD_T]
var_one = self._get_lam_phi(map=True, i=i, j=j, single=True)
var_two = self._get_lam_phi(map=True, i=ii, j=jj, single=True)
ind = (np.abs(var_one - var_two) % 360) < 1.e-8
# Cosinus and sinus using using scaler/vectorial products
if self.CD_T == 'v':
sin_val = (nx * sx + ny * sy) / sn
cos_val = -(nx * sy - ny * sx) / sn
else:
sin_val = (nx * sy - ny * sx) / sn
cos_val = (nx * sx + ny * sy) / sn
sin_val[ind] = 0
cos_val[ind] = 1
return sin_val, cos_val
# Finds North pole direction and modulus of some point
def _get_north_dir(self):
zlam, zphi = self._get_lam_phi()
z_x_np = self._trig_eq(-2, 'cos', zlam, zphi)
z_y_np = self._trig_eq(-2, 'sin', zlam, zphi)
z_n_np = np.power(z_x_np,2) + np.power(z_y_np,2)
return z_x_np, z_y_np, z_n_np
# Find segmentation direction of some point
def _get_seg_dir(self, north_n):
i, j, ii, jj = self.CASES[self.CD_T]
zlam, zphi = self._get_lam_phi(map=True, i=i, j=j)
z_lan, z_phh = self._get_lam_phi(map=True, i=ii, j=jj)
z_x_sd = (self._trig_eq(2, 'cos', zlam, zphi) -
self._trig_eq(2, 'cos', z_lan, z_phh))
z_y_sd = (self._trig_eq(2, 'sin', zlam, zphi) -
self._trig_eq(2, 'sin', z_lan, z_phh)) # N
z_n_sd = np.sqrt(north_n * (np.power(z_x_sd, 2) + np.power(z_y_sd, 2)))
z_n_sd[z_n_sd < 1.e-14] = 1.e-14
return z_x_sd, z_y_sd, z_n_sd
# Returns lam/phi in (offset) i/j range for init grid type
# Data must be converted to float64 to prevent dementation of later results
def _get_lam_phi(self, map=False, i=0, j=0, single=False):
d = self.DIM_STR
i, ii = self.IMIN + i, self.IMAX + i
j, jj = self.JMIN + j, self.JMAX + j
if j < 0:
jj -= j
j = 0
if i < 0:
ii -= i
i = 0
if map:
case = self.M_T
else:
case = self.CD_T
zlam = np.float64(self.nc['glam' + case][d, j:jj, i:ii]) #.variables['glam' + case][d, j:jj, i:ii])
if single:
return zlam
zphi = np.float64(self.nc['gphi' + case][d, j:jj, i:ii])#.variables['gphi' + case][d, j:jj, i:ii])
return zlam, zphi
# Returns long winded equation of two vars; some lam and phi
def _trig_eq(self, x, eq, z_one, z_two):
if eq == 'cos':
z_one = np.cos(np.radians(z_one))
elif eq == 'sin':
z_one = np.sin(np.radians(z_one))
else:
raise ValueError('eq must be "cos" or "sin"')
z_two = np.tan(np.pi / 4 - np.radians(z_two) / 2)
return x * z_one * z_two
#
# Currently empty but gets attributes assigned
#
class BoundaryIce:
def __init__(self):
pass
'''
Creates Nemo Bdy netCDF file ready for population
Written by John Kazimierz Farey, started August 30, 2012
Port of Matlab code of James Harle
'''
# pylint: disable=E1103
# pylint: disable=no-name-in-module
from netCDF4 import Dataset
import datetime
import logging
def CreateBDYNetcdfFile(filename, N, I, J, K, rw, h, orig, fv, calendar, grd):
""" This method creates a template of bdy netcdf files. A common for
T, I, U, V, E grid types.
"""
gridNames = ['T', 'I', 'U', 'V', 'E', 'Z'] # All possible grids
# Dimension Lengths
xb_len = N
yb_len = 1
x_len = I
y_len = J
depth_len = K
# Enter define mode
ncid = Dataset(filename, 'w', clobber=True, format='NETCDF4')
#define dimensions
if grd in gridNames and grd != 'Z': # i.e grid NOT barotropic (Z)
dimztID = ncid.createDimension('z', depth_len)
else:
logging.error('Grid tpye not known')
dimxbID = ncid.createDimension('xb', xb_len)
dimybID = ncid.createDimension('yb', yb_len)
dimxID = ncid.createDimension('x', x_len)
dimyID = ncid.createDimension('y', y_len)
dimtcID = ncid.createDimension('time_counter', None)
#define variable
vartcID = ncid.createVariable('time_counter', 'f4', ('time_counter', ))
varlonID = ncid.createVariable('nav_lon', 'f4', ('y', 'x', ))
varlatID = ncid.createVariable('nav_lat', 'f4', ('y', 'x', ))
if grd in ['E']:
varztID = ncid.createVariable('deptht', 'f4', ('z', 'yb', 'xb', ))
varmskID = ncid.createVariable('bdy_msk', 'f4', ('y', 'x', ), fill_value=fv)
varN1pID = ncid.createVariable('N1p', 'f4', ('time_counter', 'z', 'yb', 'xb', ),
fill_value=fv)
varN3nID = ncid.createVariable('N3n', 'f4', ('time_counter', 'z', 'yb', 'xb', ),
fill_value=fv)
varN5sID = ncid.createVariable('N5s', 'f4', ('time_counter', 'z', 'yb', 'xb', ),
fill_value=fv)
elif grd in ['T', 'I']:
varztID = ncid.createVariable('deptht', 'f4', ('z', 'yb', 'xb', ))
varmskID = ncid.createVariable('bdy_msk', 'f4', ('y', 'x', ), fill_value=fv)
vartmpID = ncid.createVariable('votemper', 'f4', ('time_counter', 'z', 'yb', 'xb', ),
fill_value=fv)
varsalID = ncid.createVariable('vosaline', 'f4', ('time_counter', 'z', 'yb', 'xb', ),
fill_value=fv)
if grd == 'I':
varildID = ncid.createVariable('ileadfra', 'f4', ('time_counter', 'yb', 'xb',),
fill_value=fv)
variicID = ncid.createVariable('iicethic', 'f4', ('time_counter', 'yb', 'xb',),
fill_value=fv)
varisnID = ncid.createVariable('isnowthi', 'f4', ('time_counter', 'yb', 'xb',),
fill_value=fv)
elif grd == 'U':
varztID = ncid.createVariable('depthu', 'f4', ('z', 'yb', 'xb', ), fill_value=fv)
varbtuID = ncid.createVariable('vobtcrtx', 'f4', ('time_counter', 'yb', 'xb', ),
fill_value=fv)
vartouID = ncid.createVariable('vozocrtx', 'f4', ('time_counter', 'z', 'yb', 'xb', ),
fill_value=fv)
elif grd == 'V':
varztID = ncid.createVariable('depthv', 'f4', ('z', 'yb', 'xb', ))
varbtvID = ncid.createVariable('vobtcrty', 'f4', ('time_counter', 'yb', 'xb', ),
fill_value=fv)
vartovID = ncid.createVariable('vomecrty', 'f4', ('time_counter', 'z', 'yb', 'xb',),
fill_value=fv)
elif grd == 'Z':
varsshID = ncid.createVariable('sossheig', 'f4', ('time_counter', 'yb', 'xb', ),
fill_value=fv)
varmskID = ncid.createVariable('bdy_msk', 'f4', ('y', 'x', ), fill_value=fv)
else:
logging.error("Unknow Grid input")
varnbiID = ncid.createVariable('nbidta', 'i4', ('yb', 'xb', ))
varnbjID = ncid.createVariable('nbjdta', 'i4', ('yb', 'xb', ))
varnbrID = ncid.createVariable('nbrdta', 'i4', ('yb', 'xb', ))
#Global Attributes
ncid.file_name = filename
ncid.creation_date = str(datetime.datetime.now())
ncid.rim_width = rw
ncid.history = h
ncid.institution = 'National Oceanography Centre, Livepool, U.K.'
#Time axis attributes
vartcID.axis = 'T'
vartcID.standard_name = 'time'
vartcID.units = 'seconds since '+orig
vartcID.title = 'Time'
vartcID.long_name = 'Time axis'
vartcID.time_origin = orig
vartcID.calendar = calendar
#Longitude axis attributes
varlonID.axis = 'Longitude'
varlonID.short_name = 'nav_lon'
varlonID.units = 'degrees_east'
varlonID.long_name = 'Longitude'
#Latitude axis attributes
varlatID.axis = 'Latitude'
varlatID.short_name = 'nav_lat'
varlatID.units = 'degrees_east'
varlatID.long_name = 'Latitude'
#nbidta attributes
varnbiID.short_name = 'nbidta'
varnbiID.units = 'unitless'
varnbiID.long_name = 'Bdy i indices'
#nbjdta attributes
varnbjID.short_name = 'nbjdta'
varnbjID.units = 'unitless'
varnbjID.long_name = 'Bdy j indices'
#nbrdta attributes
varnbrID.short_name = 'nbrdta'
varnbrID.units = 'unitless'
varnbrID.long_name = 'Bdy discrete distance'
if grd == 'E':
varztID.axis = 'Depth'
varztID.short_name = 'deptht'
varztID.units = 'm'
varztID.long_name = 'Depth'
varmskID.short_name = 'bdy_msk'
varmskID.units = 'unitless'
varmskID.long_name = 'Structured boundary mask'
varN1pID.units = 'mmol/m^3'
varN1pID.short_name = 'N1p'
varN1pID.long_name = 'Phosphate'
varN1pID.grid = 'bdyT'
varN3nID.units = 'mmol/m^3'
varN3nID.short_name = 'N3n'
varN3nID.long_name = 'Nitrate'
varN3nID.grid = 'bdyT'
varN5sID.units = 'mmol/m^3'
varN5sID.short_name = 'N5s'
varN5sID.long_name = 'Silicate'
varN5sID.grid = 'bdyT'
if grd in ['T', 'I']:
varztID.axis = 'Depth'
varztID.short_name = 'deptht'
varztID.units = 'm'
varztID.long_name = 'Depth'
varmskID.short_name = 'bdy_msk'
varmskID.units = 'unitless'
varmskID.long_name = 'Structured boundary mask'
vartmpID.units = 'C'
vartmpID.short_name = 'votemper'
vartmpID.long_name = 'Temperature'
vartmpID.grid = 'bdyT'
varsalID.units = 'PSU'
varsalID.short_name = 'vosaline'
varsalID.long_name = 'Salinity'
varsalID.grid = 'bdyT'
if grd == 'I':
varildID.units = '%'
varildID.short_name = 'ildsconc'
varildID.long_name = 'Ice lead fraction'
varildID.grid = 'bdyT'
variicID.units = 'm'
variicID.short_name = 'iicethic'
variicID.long_name = 'Ice thickness'
variicID.grid = 'bdyT'
varisnID.units = 'm'
varisnID.short_name = 'isnowthi'
varisnID.long_name = 'Snow thickness'
varisnID.grid = 'bdyT'
elif grd == 'U':
varztID.axis = 'Depth'
varztID.short_name = 'depthu'
varztID.units = 'm'
varztID.long_name = 'Depth'
varbtuID.units = 'm/s'
varbtuID.short_name = 'vobtcrtx'
varbtuID.long_name = 'Thickness-weighted depth-averaged zonal Current'
varbtuID.grid = 'bdyU'
vartouID.units = 'm/s'
vartouID.short_name = 'vozocrtx'
vartouID.long_name = 'Zonal Current'
vartouID.grid = 'bdyU'
elif grd == 'V':
varztID.axis = 'Depth'
varztID.short_name = 'depthv'
varztID.units = 'm'
varztID.long_name = 'Depth'
varbtvID.units = 'm/s'
varbtvID.short_name = 'vobtcrty'
varbtvID.long_name = 'Thickness-weighted depth-averaged meridional Current'
varbtvID.grid = 'bdyV'
vartovID.units = 'm/s'
vartovID.short_name = 'vomecrty'
vartovID.long_name = 'Meridional Current'
vartovID.grid = 'bdyV'
elif grd == 'Z':
varsshID.units = 'm'
varsshID.short_name = 'sossheig'
varsshID.long_name = 'Sea Surface Height'
varsshID.grid = 'bdyT'
varmskID.short_name = 'bdy_msk'
varmskID.units = 'unitless'
varmskID.long_name = 'Structured boundary mask'
else:
logging.error('Unknown Grid')
ncid.close()
'''
Created on 3 Oct 2014
@author: Mr. Srikanth Nagella
Netcdf writer for the bdy output
'''
# pylint: disable=E1103
# pylint: disable=no-name-in-module
from netCDF4 import Dataset
import numpy as np
def write_data_to_file(filename, variable_name, data):
""" Writes the data to the netcdf templete file.
Keyword arguments:
filename -- output filename
variable_name -- variable name into which the data is written to.
data -- data that will be written to variable in netcdf.
"""
ncid = Dataset(filename, 'a', clobber=False, format='NETCDF4')
count = data.shape
three_dim_variables = ['votemper', 'vosaline', 'N1p', 'N3n', 'N5s']
two_dim_variables = ['sossheig', 'vobtcrtx', 'vobtcrty', 'iicethic', 'ileadfra', 'isnowthi']
if variable_name in three_dim_variables:
if len(count) == 3:
count += (1L, )
ncid.variables[variable_name][:, :, :, :] = np.reshape(data, count)[:, :, :, :]
elif variable_name in two_dim_variables:
if len(count) == 2:
count += (1L, )
elif len(count) == 1:
count += (1L, 1L, )
ncid.variables[variable_name][:, :, :] = np.reshape(data, count)[:, :, :]
elif variable_name == 'time_counter':
ncid.variables[variable_name][:] = data[:]
else:
if len(count) == 1:
ncid.variables[variable_name][:] = data[:]
elif len(count) == 2:
ncid.variables[variable_name][:, :] = data[:, :]
elif len(count) == 3:
ncid.variables[variable_name][:, :, :] = data[:, :, :]
ncid.close()
# This object is initially empty but has data bound to it
# Equivalent to matlab scr_coord
class ScrCoord:
def __init__(self):
pass
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
'''
Created on Wed Sep 12 08:02:46 2012
Parses a file to find out which nemo boundary settings to use
@author John Kazimierz Farey
@author James Harle
$Last commit on:$
'''
# pylint: disable=E1103
# pylint: disable=no-name-in-module
#External imports
from collections import OrderedDict
import os
import logging
class Setup(object):
'''
Invoke with a text file location, class init reads and deciphers variables.
attribute <settings> is a dict holding all the vars.
'''
""" This class holds the settings information """
def __init__(self, setfile):
""" Constructor, reads the settings file and sets the dictionary with setting name/key and
it's value """
#Logging for class
self.logger = logging.getLogger(__name__)
self.filename = setfile
if not setfile: # debug
self.filename = '../data/namelist.bdy'
self._load_settings()
self.variable_info = self.variable_info_reader(self.filename+'.info')
def refresh(self):
""" Re loads the settings from file """
self._load_settings()
def _load_settings(self):
""" Loads the settings from file """
try:
namelist = open(self.filename, 'r')
except:
self.logger.error("Cannot open the file:"+self.filename)
raise
data = namelist.readlines()
# Dictionary of all the vars in the file and a seperate settings for boolean values
self.settings, self.bool_settings = _assign(_trim(data))
namelist.close()
def _get_var_name_value(self, line):
""" splits the line into key value pair. """
key_value = line.split("=", 2)
name_prefix = key_value[0][0:2].lower()
name = key_value[0][3:].lower().strip() # 3 -> 0 to keep type info
value_str = key_value[1].strip()
index = '-1'
value = ''
if name_prefix == 'ln':
if value_str.find('true') is not -1:
value = True
elif value_str.find('false') is not -1:
value = False
else:
raise ValueError('Cannot assign %s to %s, must be boolean' %(value_str, name))
elif name_prefix == 'rn' or name_prefix == 'nn':
if value_str.find('.') > -1 or value_str.find('e') > -1:
try:
value = float(value_str)
except ValueError:
self.logger.error('Cannot assign %s to %s, must be float')
raise
else:
try:
value = int(value_str)
except ValueError:
self.logger.error('Cannot assign %s to %s, must be integer')
raise
elif name_prefix == 'cn' or name_prefix == 'sn':
value = value_str.strip("'")
elif name_prefix == 'cl':
name = key_value[0].split('(')
index = name[1].split(')')
name = name[0].strip()
index = index[0].strip()
value = value_str
else:
raise ValueError('%s data type is ambiguous' %key_value)
return name, index, value
def write(self):
'''
This method write backs the variable data back into the file
'''
try:
namelist = open(self.filename, 'r')
except:
self.logger.error("Cannot open the file:"+self.filename+" to write ")
raise
data = namelist.readlines()
for name in self.settings:
values = self.settings[name]
if type(values).__name__ != 'dict':
values = {'-1': values}
for index, value in values.iteritems():
count = -1
for line in data:
# print line
count = count + 1
#find the variable
line_without_comments = strip_comments(line)
if line_without_comments == '':
continue
#print line_without_comments
data_name, data_index, data_value = self._get_var_name_value(line_without_comments)
if data_name == name:
#found the variable line
if data_index == index:
if type(data_value).__name__ == 'bool' \
and \
type(value).__name__ != 'bool':
data[count] = _replace_var_value(line, data_value,\
self.bool_settings[name])
continue
if data_value == value:
break
else:
data[count] = _replace_var_value(line, data_value, value)
break
namelist.close()
namelist = open(self.filename, 'w')
namelist.truncate()
namelist.writelines(data)
namelist.close()
def variable_info_reader(self, filename):
""" This method reads the variable description data from 'variable.info' file in the pynemo installation path
if it can't find the file with the same name as input bdy file with extension .info
Keyword arguments:
filename -- filename of the variables information
returns a dictionary with variable name and its description
"""
variable_info = {}
if filename is None or not os.path.exists(filename):
#Read the default file
file_path, dummy = os.path.split(__file__)
filename = os.path.join(file_path,'variable.info')
try:
namelist = open(filename, 'r')
data = namelist.readlines()
for line in data:
name = _get_var_name(line)
value = line.split("=", 1)[1]
variable_info[name[0]] = value.strip()
except IOError:
self.logger.error("Cannot open the variable file:"+filename)
return variable_info
def _trim(data):
""" Trims the sets of lines removing empty lines/whitespaces and removing comments
which start with ! """
newdata = []
while data:
line = data.pop(0)
line = line.rsplit('!')[0].strip()
if line is not '':
newdata.append(line)
return newdata
def _get_val(vars_dictionary, bool_vars_dictionary, line):
""" traverses input string and appends the setting name and its value to dictionary
of settings and also if the setting name holds a boolean value then to the dictionary
of boolean variables. checks the type and raises error for ambiguous values"""
logger = logging.getLogger(__name__)
name_prefix = line[0][0:2].lower()
name = line[0][3:].lower().strip() # 3 -> 0 to keep type info
value = line[1].strip()
if name_prefix == 'ln':
if value.find('true') is not -1:
if vars_dictionary.has_key(name) != True:
vars_dictionary[name] = True
bool_vars_dictionary[name] = True
elif value.find('false') is not -1:
if vars_dictionary.has_key(name) != True:
vars_dictionary[name] = False
bool_vars_dictionary[name] = False
else:
raise ValueError('Cannot assign %s to %s, must be boolean' %(value, name))
elif name_prefix == 'rn' or name_prefix == 'nn':
if value.find('.') > -1 or value.find('e') > -1:
try:
vars_dictionary[name] = float(value)
except ValueError:
logger.error('Cannot assign %s to %s, must be float')
raise
else:
try:
vars_dictionary[name] = int(value)
except ValueError:
logger.error('Cannot assign %s to %s, must be integer')
raise
elif name_prefix == 'cn' or name_prefix == 'sn':
vars_dictionary[name] = value.strip("'")
elif name_prefix == 'cl':
name = line[0].split('(')
index = name[1].split(')')
if name[0].strip() not in vars_dictionary.keys():
vars_dictionary[name[0].strip()] = {}
vars_dictionary[name[0].strip()][index[0].strip()] = value.strip()
else:
raise ValueError('%s data type is ambiguous' %line)
def _replace_var_value(original_line, value, new_value):
""" replaces the variable name value with new_value in the original_line"""
if type(value).__name__ == 'bool':
value = str(value).lower()
new_value = str(new_value).lower()
elif type(value).__name__ == 'str': #an empty string need to replace with ''
print value, new_value
if value == '':
value = '\'\''
new_value = '\''+new_value+'\''
return original_line.replace(str(value), str(new_value), 1)
def _get_var_name(line):
""" parses the line to find the name and if it is part of the array '()'
then returns name of variable and index of the array. if variable is not
array then only variable name and -1 in index"""
name_value = line.split("=", 1)
name_prefix = name_value[0][0:2].lower()
name = name_value[0][3:].lower().strip() # 3 -> 0 to keep type info
if name_prefix in ['ln', 'rn', 'nn', 'cn', 'sn']:
return name, -1
elif name_prefix == 'cl':
name = name_value[0].split('(')
index = name[1].split(')')
return name[0], index[0]
# Returns tidy dictionary of var names and values
def _assign(data):
""" return a dictionary of variables and also special dictionary for boolean variable """
vars_dictionary = OrderedDict({})
bool_vars_dictionary = OrderedDict({})
for line in data:
keyvalue = line.split('=', 1)
_get_val(vars_dictionary, bool_vars_dictionary, keyvalue)
return vars_dictionary, bool_vars_dictionary
def strip_comments(line):
""" strips the comments in the line. removes text after ! """
line = line.rsplit('!')[0].strip()
return line
# This object is currently but has data bound to it
# Equivalent to matlab src_coord
class SourceCoord:
def __init__(self):
""" This for source coordinates object attributes initialisation """
self.bdy_i = None
##################################################
# Written by John Kazimierz Farey, Sep 2012 #
# Port of Matlab code of James Harle #
# # # #
# Init with source directory for netcdf files #
# Method to generates time/file list information #
# for a particular grid #
##################################################
from os import listdir
from netCDF4 import Dataset, netcdftime
import logging
class SourceTime:
def __init__(self, src_dir):
self.src_dir = src_dir
self.logger = logging.getLogger(__name__)
# returns a list of all the relevant netcdf files
def _get_dir_list(self, grid):
fend = 'd05%s.nc' %grid.upper()
dir_list = listdir(self.src_dir)
for i in range(len(dir_list)):
if dir_list[i][-7:] != fend:
dir_list[i] = ''
else:
dir_list[i] = self.src_dir + dir_list[i]
dir_list.sort()
return filter(None, dir_list)
# Returns list of dicts of date/time info
# I assume there is only one date per file
# Each date is datetime instance. to get day etc use x.day
# They should be in order
# Matlab var dir_list is incorporated into src_time
def get_source_time(self, grid, t_adjust):
dir_list = self._get_dir_list(grid)
src_time = []
for f in range(len(dir_list)):
self.logger.debug('get_source_time: %s', dir_list[f])
nc = Dataset(dir_list[f], 'r')
varid = nc.variables['time_counter']
f_time = {}
f_time['fname'] = dir_list[f]
# First 2 values are in unicode. Pray.
f_time['units'] = varid.units
f_time['calendar'] = varid.calendar
raw_date = varid[0] + t_adjust
f_time['date'] = netcdftime.num2date(raw_date, f_time['units'],
f_time['calendar'])
src_time.append(f_time)
return src_time
##############################################
# Generates Depth information #
# # # #
# Written by John Kazimierz Farey, Sep 2012 #
# Port of Matlab code of James Harle #
##############################################
"""
# NOTES:
# I have skipped error check code
# Generates depth points for t, u and v in one loop iteration
Initialise with bdy t, u and v grid attributes (Grid.bdy_i)
and settings dictionary
"""
from reader.factory import GetFile
import numpy as np
import logging
from utils.nemo_bdy_lib import sub2ind
from utils.e3_to_depth import e3_to_depth
# pylint: disable=E1101
# Query name
class Depth:
def __init__(self, bdy_t, bdy_u, bdy_v, settings):
self.logger = logging.getLogger(__name__)
self.logger.debug( 'init Depth' )
hc = settings['hc']
nc = GetFile(settings['dst_zgr'])#Dataset(settings['dst_zgr'], 'r')
mbathy = nc['mbathy'][:,:,:].squeeze() #nc.variables['mbathy'][:,:,:].squeeze()
# numpy requires float dtype to use NaNs
mbathy = np.float16(mbathy)
mbathy[mbathy == 0] = np.NaN
nz = len(nc['nav_lev'][:])#.variables['nav_lev'][:])
# Set up arrays
t_nbdy = len(bdy_t[:,0])
u_nbdy = len(bdy_u[:,0])
v_nbdy = len(bdy_v[:,0])
zp = ['t', 'wt', 'u', 'wu', 'v', 'wv']
self.zpoints = {}
for z in zp:
if 't' in z:
nbdy = t_nbdy
elif 'u' in z:
nbdy = u_nbdy
elif 'v' in z:
nbdy = v_nbdy
self.zpoints[z] = np.zeros((nz, nbdy))
# Check inputs
# FIX ME? Errors for wrong obj arg len. probably better to work around
if settings['sco']:
# hc = ... FIX ME??
# Depth of water column at t-point
hbatt = nc['hbatt'][:,:,:]#nc.variables['hbatt'][:,:,:]
# Replace land with NaN
hbatt[mbathy == 0] = np.NaN
# find bdy indices from subscripts
t_ind = sub2ind(mbathy.shape, bdy_t[:,0], bdy_t[:,1])
u_ind = sub2ind(mbathy.shape, bdy_u[:,0], bdy_u[:,1])
u_ind2 = sub2ind(mbathy.shape, bdy_u[:,0] + 1, bdy_u[:,1])
v_ind = sub2ind(mbathy.shape, bdy_v[:,0], bdy_v[:,1])
v_ind2 = sub2ind(mbathy.shape, bdy_v[:,0], bdy_v[:,1] + 1)
# This is very slow
self.logger.debug( 'starting nc reads loop' )
for k in range(nz):
if settings['sco']:
# sigma coeffs at t-point (1->0 indexed)
gsigt = nc['gsigt'][0,k,:,:]#nc.variables['gsigt'][0,k,:,:]
# sigma coeffs at w-point
gsigw = nc['gsigw'][0,k,:,:]#nc.variables['gsigw'][0,k,:,:]
# NOTE: check size of gsigt SKIPPED
wrk1 = (hbatt - hc) * gsigt[:,:] + (hc * (k + 0.5) / (nz - 1))
wrk2 = (hbatt - hc) * gsigw[:,:] + (hc * (k + 0.5) / (nz - 1))
else:
# jelt: replace 'load gdep[wt] with load e3[tw] and compute gdep[tw]
#wrk1 = nc['gdept'][0,k,:,:]#nc.variables['gdept'][0,k,:,:]
#wrk2 = nc['gdepw'][0,k,:,:]#nc.variables['gdepw'][0,k,:,:]
[wrk1, wrk2] = e3_to_depth(nc['e3t'][0,k,:,:], nc['e3w'][0,k,:,:], nz)
# Replace deep levels that are not used with NaN
wrk2[mbathy + 1 < k + 1] = np.NaN
wrk1[mbathy < k + 1] = np.NaN
# Set u and v grid point depths
zshapes = {}
for p in self.zpoints.keys():
zshapes[p] = self.zpoints[p].shape
wshapes = []
wshapes.append(wrk1.shape)
wshapes.append(wrk2.shape)
wrk1, wrk2 = wrk1.flatten(1), wrk2.flatten(1)
self.zpoints['t'][k,:] = wrk1[t_ind]
self.zpoints['wt'][k,:] = wrk2[t_ind]
self.zpoints['u'][k,:] = 0.5 * (wrk1[u_ind] + wrk1[u_ind2])
self.zpoints['wu'][k,:] = 0.5 * (wrk2[u_ind] + wrk2[u_ind2])
self.zpoints['v'][k,:] = 0.5 * (wrk1[v_ind] + wrk1[v_ind2])
self.zpoints['wv'][k,:] = 0.5 * (wrk2[v_ind] + wrk2[v_ind2])
for p in self.zpoints.keys():
self.zpoints[p] = self.zpoints[p].reshape(zshapes[p])
self.logger.debug( 'Done loop, zpoints: %s ', self.zpoints['t'].shape)
nc.close()
#
# The loop from nemo_bdy_extr_tm3
#
#
#
from calendar import monthrange
from datetime import datetime
from netCDF4 import Dataset, netcdftime
class Enter:
def __init__(self, settings, sc_time, dir_list, dst_cal_type, year, month):
var_nam = ['votemper', 'vosaline']
sc_fields = source_fields
#self.setup = settings # dict
# define src/dst cals
sf, ed = cal_trans(sc_time, dst_cal_type, year, month)
# W
DstCal = utime('seconds since %d-1-1' %year, dst_cal_type)
dst_start = DstCal.date2num(datetime(year, month, 1))
dst_end = DstCal.date2num(datetime(year, month, ed, 23, 59, 59))
self.S_cal = utime(sc_time[0]['units'], sc_time[0]['calendar'])
self.D_cal = utime('seconds since %d-1-1' %settings['year_000'],
settings['dst_calendar'])
for date in sc_time:
date['date_num'] = DstCal.date2num(date['date']) * sf
# Get first and last date within range
first_date, last_date = None, None
rev_seq = range(len_sc_time)
rev_seq.reverse()
# Multiple values.. might be broken..
for date in rev_seq:
if sc_time[date]['date_num'] < dst_start:
first_date = date #number..
break
for date in range(len_sc_time):
if sc_time[date]['date_num'] > dst_end:
last_date = date
break
for date in range(first_date, last_date + 1):
nc = Dataset(sc_time[date], 'r')
if key_vec:
pass
#nc_2 = Dataset
# FIX ME
# We shouldnt have to worry about counters
sc_bdy = np.zeros(nvar, sc_z_len, source_ind['ind'].shape[0],
source_ind['ind'].shape[1])
ind_vec = {}
# distinctive variable name since it gets lost in the huge loop
for shoggoth in range(nvar):
varid = nc.variables[var_nam[shoggoth]]
i, ii = source_ind['imin'], source_ind['imax']
j, jj = source_ind['jmin'], source_ind['jmax']
sc_arrays = []
col_sc_arrays = []
if key_vec:
varid_2 = nc_2.variables[var_nam[shoggoth + 1]]
if not isslab and not key_vec:
# NOTE: 0 v 1 indexing may problemate
sc_arrays.append(varid[i-1:ii, j-1:jj, :sc_z_len, :1])
elif key_vec:
sc_arrays.append(varid[i-2:ii, j-1:jj, :sc_z_len, :1])
sc_arrays.append(varid_2[i-1:ii, j-2:jj, :sc_z_len, :1])
for x in 0,1:
# tidy up src array - replace missing val
for y in 'mv', 'fv':
if not np.isnan(sc_fields[y][x]):
ind_vec[y] = sc_arrays[x] == sc_fields[y][x]
sc_arrays[x][ind_vec[y]] = 0
else:
sc_arrays[x][np.isnan(scarr)] = 0
# Adjust for scaling or offsets
if not np.isnan(sc_fields['sf'][x]):
sc_arrays[x] *= sc_fields['sf'][x]
if not np.isnan(sc_fields['os'][x]):
sc_arrays[x] += sc_fields['os'][x]
# Colocate velocity points on T grid prior to rotation
axis = [1, None]
col = 0.5 * (sc_arrays[x][:-axis[0],:-axis[1],:] +
sc_arrays[x][axis[0]:,axis[1]:,:])
col[col == 0] = np.NaN
col_sc_arrays.append(col)
axis.reverse()
# This is a slab
else:
sc_arrays.append(varid[i-1:ii, j-1:jj, :1])
#query var names
if msk and first and shoggoth==0:
pass
# Open another magic file and do stuff
nc3 = Dataset(source_mask, 'r')
varid_3 = nc3.variables['tmaskutil']
msk_array = varid_3[i-1:ii, j-1:jj, :1]
if msk: #must be true for msk array ??...
sc_arrays[0][msk_array == 0] = np.NaN
# Finished reading Source data
#for depth_val in range(sc_z_len):
# tmp_arrays = []
# if not key_vec:
# tmp_arrays.append(sc_arrays[0][:,:depth_val]
def _fv_mv_to_zero(self, scarr, indvec, sc_fields, pos):
for x in 'mv', 'fv':
if not np.isnan(sc_fields[x][pos]):
ind_vec[x] = scarr == sc_fields[x][pos]
scarr[ind_vec[x]] = 0
else:
scarr[np.isnan(scarr)] = 0
return scarr, indvec
# Convert numeric date from source to dest
def convert_date(self, date):
val = self.S_cal.num2date(date)
return self.D_cal.date2num(val)
def cal_trans(self, source, dest, year, month):
vals = {'gregorian': [monthrange(year, month)[1], 31], 'noleap':
[365., 31],'360_day': [360., 30]}
if source not in vals.keys():
raise ValueError('Unknown calendar type: %s' %source)
sf = val[source][0] / val[dest][0]
return sf, vals[dest][1]
########################################################
# Creates Nemo bdy indices for t, u, v points #
## ##
# Written by John Kazimierz Farey, started 30 Aug 2012 #
# Port of Matlab code by James Harle #
########################################################
'''
This module combines matlab coord gen and pop.
Initialise with netcdf file name and dictionary containing
all bdy grids (objects)
'''
from datetime import datetime
from netCDF4 import Dataset
import numpy as np
import logging
class Coord:
_grid = ['t','u','v']
# Init with nc fname and dictionary of bdy inds
def __init__(self, fname, bdy_ind):
self.bdy_ind = bdy_ind
self.logger = logging.getLogger(__name__)
self.logger.debug( fname )
if not fname:
print 'need some error handling in here or is this redundant?' # TODO
# Enter define mode
self.ncid = Dataset(fname, 'w', clobber=True, format='NETCDF4')
# Define Dimensions
self.dim_id = self._create_dims()
# Create tidy dictionaries to hold all our pretty variables
self.var_nb_ij_id = self._build_dict(['i', 'j'],
['nb', 'i4', 'unitless', 0])
self.var_nb_r_id = self._build_dict(['r'],
['nb', 'i4', 'unitless', 0])
self.var_g_lamphi_id = self._build_dict(['lam', 'phi'],
['g', 'f4', 'degrees_east', 'longitude'])
self.var_e_12_id = self._build_dict(['1', '2'],
['e', 'f4', 'metres', 'scale factor'])
# Assign Global Attributes
self.ncid.file_name = fname
self.ncid.creation_date = str(datetime.now())
self.ncid.institution = 'National Oceanography Centre, Liverpool, U.K.'
# Leave Define Mode
# # # # # # # # #
# # Functions # #
# # # # # # # # #
def closeme(self):
self.ncid.close()
# Creates dims and returns a dictionary of them
def _create_dims(self):
ret = {'xb':{}}
ret['xb']['t'] = self.ncid.createDimension('xbT',
len(self.bdy_ind['t'].bdy_i))
ret['xb']['u'] = self.ncid.createDimension('xbU',
len(self.bdy_ind['u'].bdy_i))
ret['xb']['v'] = self.ncid.createDimension('xbV',
len(self.bdy_ind['v'].bdy_i))
ret['yb'] = self.ncid.createDimension('yb', 1)
return ret
# Sets up a grid dictionary
def _build_dict(self, dim, units):
ret = {}
for g in self._grid:
ret[g] = {}
for d in dim:
ret[g][d] = self._add_vars(d, g, units)
return ret
# creates a var w/ attributes
def _add_vars(self, dim, grd, unt):
dat = unt[2]
lname = unt[3]
if dim is 'phi':
dat = 'degrees_north'
lname = 'latitude'
elif lname == 0:
lname = 'Bdy %s indices'%dim
lname = lname + ' (%s)'%grd.upper()
#print '%s%s%s'%(unt[0],dim,grd)
#print unt[1]
var = self.ncid.createVariable('%s%s%s'%(unt[0],dim,grd),
unt[1], ('yb', 'xb'+ grd.upper()))
var.short_name = '%s%s%s'%(unt[0],dim,grd)
var.units = dat
var.long_name = lname
return var
def populate(self, ncfname):
self.set_lenvar(self.var_nb_ij_id)
self.set_lenvar(self.var_nb_r_id)
ncid2 = Dataset(ncfname, 'r')
self.set_lenvar(self.var_g_lamphi_id, ncid2, 'g')
self.set_lenvar(self.var_e_12_id, ncid2, 'e')
ncid2.close()
self.closeme()
# sets the len var of each array in the var dictionary fed
# specifying nc and unt pulls data from a secondary file
# Otherwise pull it from the class dict
def set_lenvar(self, vardic, nc=None, unt=None):
for ind in vardic:
x = 0
data = None
for dim in vardic[ind]:
if nc is not None:
data = nc.variables['%s%s%s'%(unt, dim, ind)][:]
self.logger.debug('%s %s %s %s %s %s', ind, self.bdy_ind[ind].bdy_i[:,1], data.shape, dim, unt, ind)
data = data.squeeze()
self.logger.debug('%s %s %s', ind, self.bdy_ind[ind].bdy_i[:,1], data.shape)
data = data[(self.bdy_ind[ind].bdy_i[:,1]),
(self.bdy_ind[ind].bdy_i[:,0])]
elif len(vardic[ind]) == 1:
data = self.bdy_ind[ind].bdy_r[:]
else:
data = self.bdy_ind[ind].bdy_i[:,x]
x = 1
attData = getattr(vardic[ind][dim], 'long_name')
#print 'Just about to add 1 to ', attData
# add 1 to all indices as they're going to be used in
# a Fortran environment
# This is commented because data generated doesn't match
# with output generated from matlab
data = data + 1
vardic[ind][dim][:] = data
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment