Commit 6b4a048d authored by iregon's avatar iregon
Browse files

Old files cleaning

parent 8d7cd946
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 30 09:38:17 2019
Reads source data (file, pandas DataFrame or pd.io.parsers.TextFileReader) to
a pandas DataFrame. The source data model needs to be input to the module as
a named model (included in the module) or as the path to a data model.
Data is validated against its data model after reading, producing a boolean mask.
Calls the schemas, reader and valiate modules in the tool to access the data models,
read the data and validate it.
@author: iregon
"""
import os
import sys
import pandas as pd
from mdf_reader.reader import reader as reader
from mdf_reader.validate import validate as validate
import mdf_reader.schemas as schemas
import mdf_reader.properties as properties
import mdf_reader.common.pandas_TextParser_hdlr as pandas_TextParser_hdlr
import logging
import json
def read(source, data_model = None, data_model_path = None, sections = None,chunksize = None,
supp_section = None, supp_model = None, supp_model_path = None,
skiprows = None, out_path = None ):
logging.basicConfig(format='%(levelname)s\t[%(asctime)s](%(filename)s)\t%(message)s',
level=logging.INFO,datefmt='%Y%m%d %H:%M:%S',filename=None)
# 0. Make sure min info is available
if not data_model and not data_model_path:
logging.error('A valid data model name or path to data model must be provided')
return
if not isinstance(source,tuple(properties.supported_sources)):
if not source:
logging.error('Data source is empty (first argument to read()) ')
return
elif not os.path.isfile(source):
logging.error('Can\'t reach data source {} as a file'.format(source))
logging.info('Supported in-memory data sources are {}'.format(",".join(properties.supported_sources)))
return
# 1. Read schema(s) and get file format
logging.info("READING DATA MODEL SCHEMA FILE...")
schema = schemas.read_schema( schema_name = data_model, ext_schema_path = data_model_path)
if not schema:
return
if supp_section:
logging.info("READING SUPPLEMENTAL DATA MODEL SCHEMA FILE...")
supp_schema = schemas.read_schema( schema_name = supp_model, ext_schema_path = supp_model_path)
if not supp_schema:
return
else:
supp_schema = None
# 2. Read data
imodel = data_model if data_model else data_model_path
logging.info("EXTRACTING DATA FROM MODEL: {}".format(imodel))
data, valid = reader.read_model(source,schema, sections = sections, chunksize = chunksize, skiprows = skiprows)
# 3. Read additional format: on error, return what's been read so far...
# Mmmmm, make sure we can mix meta_file_formats: eg. core('FIXED_WIDTH')-supp("DELIMITED")
if supp_section:
i_suppmodel = supp_model if supp_model else supp_model_path
logging.info("EXTRACTING SUPPLEMENTAL DATA FROM MODEL: {}".format(i_suppmodel))
data, valid = reader.add_supplemental(data, supp_section, supp_schema, valid)
if isinstance(data,pd.io.parsers.TextFileReader):
logging.info('...RESTORING DATA PARSER')
data = pandas_TextParser_hdlr.restore(data.f,data.orig_options)
# 4. Create out data attributes
logging.info("CREATING OUTPUT DATA ATTRIBUTES FROM DATA MODEL(S)")
data_columns = [ x for x in data ] if isinstance(data,pd.DataFrame) else data.orig_options['names']
out_atts = schemas.df_schema(data_columns, schema, data_model, supp_section = supp_section, supp_schema = supp_schema, supp_model = supp_model )
# 5. Complete data validation
logging.info("VALIDATING DATA")
valid = validate.validate(data, out_atts, valid, data_model = data_model, data_model_path = data_model_path, supp_section = supp_section, supp_model = supp_model, supp_model_path = supp_model_path)
if isinstance(data,pd.io.parsers.TextFileReader):
logging.info('...RESTORING DATA PARSER')
data = pandas_TextParser_hdlr.restore(data.f,data.orig_options)
if out_path:
logging.info('WRITING DATA TO FILES IN: {}'.format(out_path))
cols = [ x for x in data ]
if isinstance(cols[0],tuple):
header = [":".join(x) for x in cols]
out_atts_json = { ":".join(x):out_atts.get(x) for x in out_atts.keys() }
else:
header = cols
out_atts_json = out_atts
data.to_csv(os.path.join(out_path,'data.csv'), header = header, encoding = 'utf-8',index = True, index_label='index')
valid.to_csv(os.path.join(out_path,'valid_mask.csv'), header = header, encoding = 'utf-8',index = True, index_label='index')
with open(os.path.join(out_path,'atts.json'),'w') as fileObj:
json.dump(out_atts_json,fileObj,indent=4)
return {'data':data,'atts':out_atts,'valid_mask':valid}
if __name__=='__main__':
kwargs = dict(arg.split('=') for arg in sys.argv[2:])
if 'sections' in kwargs.keys():
kwargs.update({ 'sections': [ x.strip() for x in kwargs.get('sections').split(",")] })
read(sys.argv[1],
**kwargs) # kwargs
\ No newline at end of file
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 30 09:38:17 2019
Reads source data from a data model to a pandas DataFrame.
Optionally, it reads supplemental data from the same source (from a different
data model) and pastes that to the output DataFrame
Uses the meta_formats generic submodules ('delimited' and 'fixed_width') to
pre-format data source and read either generic type of data model.
@author: iregon
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
# CAREFULL HERE:
# Note that in Python 3, the io.open function is an alias for the built-in open function.
# The built-in open function only supports the encoding argument in Python 3, not Python 2.
# https://docs.python.org/3.4/library/io.html?highlight=io
from io import StringIO as StringIO
import sys
import pandas as pd
import numpy as np
import logging
from . import meta_formats
from .. import properties
if sys.version_info[0] >= 3:
py3 = True
else:
py3 = False
from io import BytesIO as BytesIO
# Get pandas dtype for time_stamps
pandas_timestamp_dtype = pd.to_datetime(pd.DataFrame(['20000101'])[0],format='%Y%m%d').dtypes
def add_supplemental(data, supp_section, supp_schema, valid):
# Supplemental data needs to have no sectioning: cannot merge dfs with different level depths in the columns...
try:
supp_format = supp_schema['header'].get('format')
if supp_format in properties.supported_meta_file_formats:
TextParser = data if isinstance(data, pd.io.parsers.TextFileReader) else [data]
TextParser_valid = valid if isinstance(valid, pd.io.parsers.TextFileReader) else [valid]
chunksize = data.orig_options['chunksize'] if isinstance(TextParser,pd.io.parsers.TextFileReader) else None
iidx_offset = chunksize if chunksize else 0
output_buffer = StringIO() if py3 else BytesIO()
output_buffer_valid = StringIO() if py3 else BytesIO()
I_CHUNK = 0
for idata,ivalid in zip(TextParser,TextParser_valid):
date_columns = list(np.where(idata.dtypes == pandas_timestamp_dtype)[0])
dtypes = idata.dtypes.to_dict()
supp, supp_valid = read_model(idata[supp_section],supp_schema, idx_offset = I_CHUNK*iidx_offset )
supp_date_columns = list(np.where(supp.dtypes == pandas_timestamp_dtype)[0] + len(idata.columns) - 1 )
date_columns.extend(supp_date_columns)
date_columns = [ int(x) for x in date_columns ] # reader date parser won't take numpy.int64 from np.where as col index
if I_CHUNK == 0:
o_supp_dtypes = supp.dtypes.to_dict()
else:
o_supp_dtypes.update({ i:supp[i].dtype for i in supp if supp[i].dtype in properties.numpy_floats})
supp_elements = supp.columns.to_list()
supp_dtypes = {}
for element in supp_elements:
supp_dtypes[(supp_section,element)] = o_supp_dtypes.get(element)
dtypes.pop((supp_section,idata[supp_section].columns.to_list()[0]), None)
idata.drop(supp_section, axis = 1, inplace = True, level = 0)# OMG: apparently, with multiindex, this does not drop the columns from idata.columns
ivalid.drop(supp_section, axis = 1, inplace = True, level = 0)
supp.columns = [ (supp_section,x) for x in supp.columns ]
supp_valid.columns = [ (supp_section,x) for x in supp_valid.columns ]
dtypes.update(supp_dtypes)
supp.index = idata.index
supp_valid.index = ivalid.index
column_names = [ x for x in idata if x[0] != supp_section ]
column_names.extend([ x for x in supp ])
new_dtypes = { x:dtypes.get(x) for x in column_names }
idata = pd.concat([idata,supp],sort = False,axis=1)
ivalid = pd.concat([ivalid,supp_valid],sort = False,axis=1)
idata.to_csv(output_buffer,header=False, mode = 'a', encoding = 'utf-8',index = False)
ivalid.to_csv(output_buffer_valid,header=False, mode = 'a', encoding = 'utf-8',index = False)
I_CHUNK += 1
output_buffer.seek(0)
output_buffer_valid.seek(0)
for element in list(dtypes):
if new_dtypes.get(element) == pandas_timestamp_dtype:
new_dtypes[element] = 'object' # Only on output (on reading) will be then converted to datetime64[ns] type, cannot specify 'datetime' here: have to go through parser
data = pd.read_csv(output_buffer,names = idata.columns, dtype = new_dtypes, chunksize = chunksize, parse_dates = date_columns )
valid = pd.read_csv(output_buffer_valid,names = ivalid.columns, chunksize = chunksize)
return data, valid
else:
logging.error('Supplemental file format not supported: {}'.format(supp_format))
logging.warning('Supplemental data not extracted from supplemental section')
return data, valid
except Exception as e:
logging.warning('Supplemental data not extracted from supplemental section', exc_info=True)
return data, valid
def read_model(source,schema, sections = None, chunksize = None, skiprows = None, idx_offset = 0):
meta_format = schema['header'].get('format')
if meta_format not in properties.supported_meta_file_formats:
logging.error('File format read from input schema not supported: {}'.format(meta_format))
return
meta_reader = ".".join(['meta_formats',meta_format])
# 0. GET META FORMAT SUBCLASS ---------------------------------------------
if schema['header'].get('multiple_reports_per_line'): # needs to eval to True if set and True and to false if not set or false, without breaking
format_subclass = '1x'
else:
format_subclass = '11'
# 1. PARSE SCHEMA ---------------------------------------------------------
delimiter = schema['header'].get('delimiter')
parsing_order = schema['header'].get('parsing_order')
# 2. DEFINE OUTPUT --------------------------------------------------------
# 2.1 Sections to read
if not sections:
sections = [ x.get(y) for x in parsing_order for y in x ]
read_sections = [y for x in sections for y in x]
else:
read_sections = sections
multiindex = True if len(read_sections) > 1 or read_sections[0] != properties.dummy_level else False
if format_subclass == '1x':
return schema
# 2.1 Elements names: same order as declared in schema, which is the order in which the readers read them...
names = []
if schema['header'].get('date_parser'):
if multiindex:
names.extend([('_datetime','_datetime')])
else:
names.extend(['_datetime'])
for section in read_sections:
if multiindex:
names.extend([ (section,x) for x in schema['sections'][section]['elements'].keys() if not schema['sections'][section]['elements'][x].get('ignore') ])
else:
names.extend([ x for x in schema['sections'][section]['elements'].keys() if not schema['sections'][section]['elements'][x].get('ignore') ])
# 3. GET DATA FROM SOURCE (DF, FILE OR TEXTREADER):------------------------
# SIMPLE STRING PER REPORT/LINE
logging.info("Getting input data from source...")
source_function = eval(meta_reader + "." + "_".join(['source',format_subclass]))
TextParser = source_function(source,schema, chunksize = chunksize, skiprows = skiprows, delimiter = delimiter)
# 4. DO THE ACTUAL READING
reader_function = eval(meta_reader + "." + 'source_to_df')
logging.info("Reading data...")
[output_buffer,valid_buffer,dtypes] = reader_function(TextParser, schema, read_sections = read_sections, idx_offset = idx_offset )
# 5. OUTPUT DATA:----------------------------------------------------------
# WE'LL NEED TO POSPROCESS THIS WHEN READING MULTIPLE REPORTS PER LINE
output_buffer.seek(0)
valid_buffer.seek(0)
logging.info("Wrapping output....")
chunksize = TextParser.orig_options['chunksize'] if isinstance(TextParser,pd.io.parsers.TextFileReader) else None
logging.info('Data')
# 'datetime' is not a valid pandas dtype: Only on output (on reading) will be then converted (via parse_dates) to datetime64[ns] type, cannot specify 'datetime' (of any kind) here: will fail
date_columns = [] # Needs to be the numeric index of the column, as seems not to be able to work with tupples....
for i,element in enumerate(list(dtypes)):
if dtypes.get(element) == 'datetime':
date_columns.append(i)
df_reader = pd.read_csv(output_buffer,names = names, chunksize = chunksize, dtype = dtypes, parse_dates = date_columns)
logging.info('Mask')
valid_reader = pd.read_csv(valid_buffer,names = names, chunksize = chunksize)
return df_reader, valid_reader
{
"0":"Increasing, then decreasing; atmopsheric pressure the same or higher than three hours ago",
"1":"Increasing, then steady; or increasing, then increasing more slowly - Atmospheric pressure now higher than three hours ago",
"2":"Increasing (steadily or unsteadily) - Atmospheric pressure now higher than three hours ago",
"3":"Decreasing or steady, then increasing; or increasing, then increasing more rapidly - Atmospheric pressure now higher than three hours ago",
"4":"Steady; atmopsheric pressure the same as three hours ago",
"5":"Decreasing, then increasing; atmospheric pressure the same ot lower than three hours ago",
"6":"Decreasing, then steady; or decreasing, then decreasing more slowly - Atmospheric pressure now lower than three hours ago",
"7":"Decreasing (steadily or unsteadily) - Atmospheric pressure now lower than three hours ago",
"8":"Steady or increasing, then decreasing; or decreasing, then decreasing more rapidly - Atmospheric pressure now lower than three hours ago"
}
{
"0":"Netherlands",
"1":"Norway",
"2":"US",
"3":"UK",
"4":"France",
"5":"Denmark",
"6":"Italy",
"7":"India",
"8":"Hong Kong",
"9":"New Zealand",
"00":"Netherlands",
"01":"Norway",
"02":"US",
"03":"UK",
"04":"France",
"05":"Denmark",
"06":"Italy",
"07":"India",
"08":"Hong Kong",
"09":"New Zealand",
"10":"Ireland",
"11":"Philippines",
"12":"Egypt",
"13":"Canada",
"14":"Belgium",
"15":"South Africa",
"16":"Australia",
"17":"Japan",
"18":"Pakistan",
"19":"Argentina",
"20":"Sweden",
"21":"Federal Republic of Germany",
"22":"Iceland",
"23":"Israel",
"24":"Malaysia",
"25":"USSR",
"26":"Finland",
"27":"Rep. of Korea",
"28":"New Caledonia",
"29":"Portugal",
"30":"Spain",
"31":"Thailand",
"32":"Yugoslavia",
"33":"Poland",
"34":"Brazil",
"35":"Singapore",
"36":"Kenya",
"37":"Tanzania",
"38":"Uganda",
"39":"Mexico",
"40":"German Democractic Republic",
"AF":"Afghanistan",
"AL":"Albania",
"DZ":"Algeria",
"AD":"Andorra",
"AO":"Angola",
"AG":"Antigua and Barbuda",
"AR":"Argentina",
"AM":"Armenia",
"AW":"Aruba",
"AU":"Australia",
"AT":"Austria",
"AZ":"Azerbaijan",
"BS":"Bahamas",
"BH":"Bahrain",
"BD":"Bangladesh",
"BB":"Barbados",
"BY":"Belarus",
"BE":"Belgium",
"BZ":"Belize",
"BJ":"Benin",
"BT":"Bhutan",
"BO":"Bolivia",
"BA":"Bosnia and Herzegovina",
"BW":"Botswana",
"BR":"Brazil",
"BN":"Brunei Darussalam",
"BG":"Bulgaria",
"BF":"Burkina Faso",
"BI":"Burundi",
"KH":"Cambodia",
"CM":"Cameroon",
"CA":"Canada",
"CV":"Cape Verde",
"CF":"Central African Republic",
"TD":"Chad",
"CL":"Chile",
"CN":"China",
"CO":"Columbia",
"KM":"Comoros",
"CG":"Congo",
"CD":"The Democratic Republic of the Congo",
"CR":"Costa Rica",
"CI":"Cote d'Ivoire",
"HR":"Croatia",
"CU":"Cuba",
"CY":"Cyprus",
"CZ":"Czech Republic",
"DK":"Denmark",
"DJ":"Djibouti",
"DM":"Dominica",
"DO":"Dominican Republic",
"EC":"Ecuador",
"EG":"Egypt",
"SV":"El Salvador",
"GQ":"Equatorial Guinea",
"ER":"Eritrea",
"EE":"Estonia",
"ET":"Ethiopia",
"FJ":"Fiji",
"FI":"Finland",
"FR":"France",
"GA":"Gabon",
"GM":"Gambia",
"GE":"Georgia",
"DE":"Germany",
"GH":"Ghana",
"GR":"Greece",
"GD":"Grenada",
"GT":"Guatemala",
"GN":"Guinea",
"GW":"Guinea Bissau",
"GY":"Guyana",
"HT":"Haiti",
"HN":"Honduras",
"HK":"Hong Kong",
"HU":"Hungary",
"IS":"Iceland",
"IN":"India",
"ID":"Indonesia",
"IR":"Islamic Republic of Iran",
"IQ":"Iraq",
"IE":"Ireland",
"IL":"Israel",
"IT":"Italy",
"JM":"Jamaica",
"JP":"Japan",
"JO":"Jordan",
"KZ":"Kazakhstan",
"KE":"Kenya",
"KI":"Kiribati",
"KR":"Republic of Korea",
"KW":"Kuwait",
"KG":"Kyrgyzstan",
"LA":"Lao Peoples Democratic Republic",
"LV":"Latvia",
"LB":"Lebanon",
"LS":"Lesotho",
"LR":"Liberia",
"LY":"Libyan Arab Jamahiriya",
"LT":"Lithuania",
"LU":"Luxembourg",
"MK":"The Former Yugoslav Republic of Macedonia",
"MG":"Madagascar",
"MW":"Malawi",
"MY":"Malaysia",
"MV":"Maldives",
"ML":"Mali",
"MT":"Malta",
"MH":"Marshal Islands",
"MR":"Mauritania",
"MU":"Mauritius",
"MX":"Mexico",
"FM":"Federated States of Micronesia",
"MD":"Republic of Moldova",
"MC":"Monaco",
"MN":"Mongolia",
"MA":"Morocco",
"MZ":"Mozambique",
"MM":"Myanmar",
"NA":"Namibia",
"NR":"Nauru",
"NP":"Nepal",
"NL":"Netherlands",
"AN":"Netherlands Antilles",
"NZ":"New Zealand",
"NI":"Nicaragua",
"NE":"Niger",
"NG":"Nigeria",
"KP":"Democratic People's Republic of Korea",
"NO":"Norway",
"OM":"Oman",
"PK":"Pakistan",
"PW":"Palau",
"PS":"Occupied Palestinian Territory",
"PA":"Panama",
"PG":"Papua New Guinea",
"PY":"Paraguay",
"PE":"Peru",
"PH":"Philippines",
"PL":"Poland",
"PT":"Portugal",
"QA":"Qatar",
"RO":"Romania",
"RU":"Russian Federation",
"RW":"Rwanda",
"KN":"Saint Kitts and Nevis",
"LC":"Saint Lucia",
"VC":"Saint Vincent and the Grenadines",
"WS":"Samoa",
"SM":"San Marino",
"ST":"Sao Tome And Principe",
"SA":"Saudi Arabia",
"SN":"Senegal",
"CS":"Serbia and Montenegro",
"SC":"Seychelles",
"SL":"Sierra Leone",
"SG":"Singapore",
"SK":"Slovakia",
"SI":"Slovenia",
"SB":"Solomon Islands",
"SO":"Somalia",
"ZA":"South Africa",
"ES":"Spain",
"LK":"Sri Lanka",
"SD":"Sudan",
"SR":"Surinam",
"SZ":"Swaziland",
"SE":"Sweden",
"CH":"Switzerland",
"SY":"Syrian Arab Republic",
"TJ":"Tajikistan",
"TZ":"United Republic of Tanzania",
"TH":"Thailand",
"TL":"Timor - Leste",
"TG":"Togo",
"TO":"Tonga",
"TT":"Trinidad and Tobago",
"TN":"Tunisia",
"TR":"Turkey",
"TM":"Turkmenistan",
"TV":"Tuvala",
"UG":"Uganda",
"UA":"Ukraine",
"AE":"United Arab Emirates",
"GB":"United Kingdom",
"US":"United States",
"UY":"Uruguay",
"UZ":"Uzbekistan",
"VU":"Vanuatu",
"VA":"Vatican City",
"VE":"Venezuela",
"VN":"Viet Nam",
"YE":"Yemen",
"ZM":"Zambia",
"ZW":"Zimbabwe",
"DD":"East Germany",
"CS":"Serbia and Montenegro",
"RU":"Soviet Union",
"NC":"New Caledonia",
"ZY":"None (self recruited)",
"ZZ":"None (third party support)",
"TW":"Taiwan (Province of China)",
"SU":"Soviet Union",
"YU":"Yugoslavia",
"XX":"Multiple recruitment",
"EU":"European Union"
}
{
"0":"No Cirrus, Cirrocumulus or Cirrostratus",
"1":"Cirrus in the form of filaments, strands or hooks, not progressively invading the sky",
"2":"Dense Cirrus, in patches or entangled sheaves, which usually do not increase and sometimes seem to be the remains of the upper part of a Cumulonimbus, or Cirrus with sproutings in the form of small turrets or battlements, or Cirrus having the appearance of cumuliform tufts",
"3":"Dense Cirrus, often in the form of an anvil, being the remains of the upper parts of Cumulonimbus",
"4":"Cirrus in the form of hooks or of filaments, or both, progressively invading the sky; they generally become denser as a whole",
"5":"Cirrus (often in bands converging towards one point or two opposite points of the horizon) and Cirrostratus, or Cirrostratus alone; in either case, they are progressively invading the sky, and generally growing denser as a whole, but the continuous veil does not reach 45 degrees above the horizon.",
"6":"Cirrus (often in bands converging towards one point or two opposite points of the horizon) and Cirrostratus, or Cirrostratus alone; in either case, they are progressively invading the sky, and generally growing denser as a whole; the continuous veil extends more than 45 degrees above the horizon, without the sky being totally covered",
"7":"Veil of Cirrostratus covering the celestial dome",
"8":"Cirrostratus not progressively invading the sky and not completely covering the celestial dome",
"9":"Cirrocumulus alone, or Cirrocumulus accompanied by Cirrus or Cirrostratus, or both, but Cirrocumulus is predominant",
"10":"Cirrus, Cirrocumulus and Cirrostratus invisible owing to darkness, fog, blowing dust or sand, or other similar phenomena, or more often because of the presence of a continuous layer of lower clouds"
}
{
"0":"No Cirrus, Cirrocumulus or Cirrostratus",
"1":"Cirrus in the form of filaments, strands or hooks, not progressively invading the sky",
"2":"Dense Cirrus, in patches or entangled sheaves, which usually do not increase and sometimes seem to be the remains of the upper part of a Cumulonimbus, or Cirrus with sproutings in the form of small turrets or battlements, or Cirrus having the appearance of cumuliform tufts",
"3":"Dense Cirrus, often in the form of an anvil, being the remains of the upper parts of Cumulonimbus",
"4":"Cirrus in the form of hooks or of filaments, or both, progressively invading the sky; they generally become denser as a whole",
"5":"Cirrus (often in bands converging towards one point or two opposite points of the horizon) and Cirrostratus, or Cirrostratus alone; in either case, they are progressively invading the sky, and generally growing denser as a whole, but the continuous veil does not reach 45 degrees above the horizon.",
"6":"Cirrus (often in bands converging towards one point or two opposite points of the horizon) and Cirrostratus, or Cirrostratus alone; in either case, they are progressively invading the sky, and generally growing denser as a whole; the continuous veil extends more than 45 degrees above the horizon, without the sky being totally covered",
"7":"Veil of Cirrostratus covering the celestial dome",
"8":"Cirrostratus not progressively invading the sky and not completely covering the celestial dome",
"9":"Cirrocumulus alone, or Cirrocumulus accompanied by Cirrus or Cirrostratus, or both, but Cirrocumulus is predominant",
"10":"Cirrus, Cirrocumulus and Cirrostratus invisible owing to darkness, fog, blowing dust or sand, or other similar phenomena, or more often because of the presence of a continuous layer of lower clouds"
}
{
"0":"No Altocumulus, Altostratus or Nimbostratus",
"1":"Altostratus, the greater part of which is semitransparent; through this part the sun or moon may be weakly visible, as through ground glass",
"2":"Altostratus, the greater part of which is sufficiently dense to hide the sun or moon, or Nimbostratus",
"3":"Altocumulus, the greater part of which is semitransparent; the various elements of the cloud change only slowly and are all at a single level",
"4":"Patches (often in the form of almonds or fish) of Altocumulus, the greater part of which is semi-transparent; the clouds occur at one or more levels and the elements are continually changing in appearance",
"5":"Altocumulus clouds generally thicken as a whole; Semi-transparent Altocumulus in bands, or Altocumulus, in one or more fairly continuous layer (semi-transparent or opaque), progresively invading the sky; these Altocumulus clouds generally thicken as a whole",
"6":"Altocumulus resulting from the spreading out of Cumulus (or Cumulonimbus)",
"7":"Altocumulus in two or more layers, usually opaque in places, and not progressively invading the sky; or opaque layer of Altocumulus, not progressively invading the sky; or Altocumulus together with Altostratus or Nimbostratus",
"8":"Altocumulus with sproutings in the form of small towers or battlements, or Altocumulus having the appearance of cumuliform tufts",
"9":"Altocumulus of a chaotic sky, generally at several levels",
"10":"Altocumulus, Altostratus and Nimbostratus invisible owing to darkness, fog, blowing dust or sand, or other similar phenomena, or more often because of the presence of a continuous layer of lower clouds"
}
{
"0":"36-point compass",
"1":"32-point compass",
"2":"16 of 36-point compass",
"3":"16 of 32-point compass",
"4":"8-point compass",
"5":"360-point compass",
"6":"high resolution data (e.g., tenths of degrees)"
}
{
"0":"measured",
"1":"computed",
"2":"iced measured",
"3":"iced computed"
}
{
"0":"0",
"1":"45",
"2":"90",
"3":"135",
"4":"180",
"5":"225",
"6":"270",
"7":"315",
"8":"360",
"9":"NULL"
}
{
"0":"0",
"1":"50",
"2":"100",
"3":"200",
"4":"300",
"5":"600",
"6":"1000",
"7":"1500",
"8":"2000",
"9":"2500",
"10":"NULL"
}
{
"0":"estimated",
"1":"measured"
}
{
"0":"ID present, but unknown type",
"1":"ship, Ocean Station Vessel (OSV), or ice station callsign",
"2":"generic ID (e.g., SHIP, BUOY, RIGG, PLAT)",
"3":"WMO 5-digit buoy number",
"4":"other buoy number (e.g., Argos or national buoy number)",
"5":"Coastal-Marine Automated Network (C-MAN) ID (assigned by US NDBC or other organizations)",
"6":"station name or number",
"7":"oceanographic platform/cruise number",
"8":"fishing vessel psuedo-ID",
"9":"national ship number",
"10":"composite information from early ship data",
"11":"7-digit buoy ID (proposed)"
}
{
"0":"version 0 (2010, http://icoads.noaa.gov/e-doc/imma/R2.5-imma.pdf)",
"1":"version 1 (2016)"
}
{
"0":"tenths degC",
"1":"half degC",
"2":"whole degC",
"3":"whole or tenths degC (mixed precision among temperature fields)",
"4":"tenths degF",
"5":"half degF",
"6":"whole degF",
"7":"whole or tenths degF (mixed precision among temperature fields)",
"8":"high resolution data (e.g., hundredths degC)",
"9":"other"
}
{
"0":"degrees and tenths",
"1":"whole degrees",
"2":"mixed precision",
"3":"interpolated",
"4":"degrees and minutes",
"5":"high resolution data (e.g., degrees to seconds)",
"6":"other"
}
{
"0": "",
"1": "",
"2": "",
"3": "",
"4": "",
"5": "",
"6": "",
"7": "",
"8": "",
"9": "",
"10": "",
"11": "",
"12": "",
"13": "",
"14": "",
"15": "",
"16": "",
"17": "",
"18": "",
"19": "",
"20": "",
"21": "",
"22": "",
"23": "",
"24": "",
"25": "",
"26": "",
"27": "",
"28": "",
"29": "",
"30": "",
"31": "",
"32": "",
"33": "",
"34": "",
"35": "",
"36": "",
"37": "",
"38": "",
"39": "",
"40": "",
"41": "",
"42": "",
"43": "",
"44": "",
"45": "",
"46": "",
"47": "",
"48": "",
"49": "",
"50": "",
"51": "",
"52": "",
"53": "",
"54": "",
"55": "",
"56": "",
"57": "",
"58": "",
"59": "",
"60": "",
"61": "",
"62": "",
"63": "",
"64": "",
"65": "",
"66": "",
"67": "",
"68": "",
"69": "",
"70": "",
"71": "",
"72": "",
"73": "",
"74": "",
"75": "",
"76": "",
"77": "",
"78": "",
"79": "",
"80": "",
"81": "",
"82": "",
"83": "",
"84": "",
"85": "",
"86": "",
"87": "",
"88": "",
"89": "",
"90": "",
"91": "",
"92": "",
"93": "",
"94": "",
"95": "",
"96": "",
"97": "",
"98": "",
"99": ""
}
\ No newline at end of file
{
"0":"waves from 0 degrees",
"1":"waves from 10",
"2":"waves from 20 degrees",
"3":"waves from 30 degrees",
"4":"waves from 40 degrees",
"5":"waves from 50 degrees",
"6":"waves from 60 degrees",
"7":"waves from 70 degrees",
"8":"waves from 80 degrees",
"9":"waves from 90 degrees",
"10":"waves from 100 degrees",
"11":"waves from 110 degrees",
"12":"waves from 120 degrees",
"13":"waves from 130 degrees",
"14":"waves from 140 degrees",
"15":"waves from 150 degrees",
"16":"waves from 160 degrees",
"17":"waves from 170 degrees",
"18":"waves from 180 degrees",
"19":"waves from 190 degrees",
"20":"waves from 200 degrees",
"21":"waves from 210 degrees",
"22":"waves from 220 degrees",
"23":"waves from 230 degrees",
"24":"waves from 240 degrees",
"25":"waves from 250 degrees",
"26":"waves from 260 degrees",
"27":"waves from 270 degrees",
"28":"waves from 280 degrees",
"29":"waves from 290 degrees",
"30":"waves from 300 degrees",
"31":"waves from 310 degrees",
"32":"waves from 320 degrees",
"33":"waves from 330 degrees",
"34":"waves from 340 degrees",
"35":"waves from 350 degrees",
"36":"waves from 360 degrees",
"37":"waves confused, direction indeterminate (WH ≤ 4.75 m)",
"38":"waves confused, direction indeterminate (WH > 4.75 m; or irrespective of wave height, corresponding to 99 in WMO Code 0877"
}
{
"0":"BU",
"1":"C",
"2":"TT",
"3":"HC",
"4":"HT",
"5":"RAD",
"6":"BTT",
"7":"OT",
"9":"NULL",
"10":"NULL",
"11":"NULL",
"12":"NULL"
}
{
"0":"nearest whole hour",
"1":"hour to tenths",
"2":"hour plus minutes",
"3":"high resolution (e.g., hour to hundredths)",
"4":"Daily (assumed local solar midday)"
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment