Commit d8103fdb authored by Woude, Auke van der's avatar Woude, Auke van der
Browse files

Staging commit before paint by number

parent aea4fdc0
......@@ -68,7 +68,21 @@ class EmisModel(object):
self.categories = categories
self.paramdict = rc.read(dacycle.dasystem['paramdict'])
logging.debug('Emismodel has been set-up')
# --- Settings
year = self.startdate.year
ndays = (dtm.datetime(year, 12, 31) - dtm.datetime(year - 1, 12, 31)).days
self.ndays = ndays
times = np.array([dtm.datetime(year, 1, 1, 0, 0, 0) + dtm.timedelta(hours = i) for i in range(ndays*24)])
self.times = times
datapath = '/projects/0/ctdas/RINGO/EmissionInventories/DynEmMod_TimeProfiles'
infilename = '{}/RINGO_ECMWF_DailyMeteo{}.nc'.format(datapath, year)
with nc.Dataset(infilename) as infile:
self.T2myr = infile.variables['T2m' ][:ndays] - 273.15 # T2m K --> oC
self.U10 = infile.variables['U10m' ][:ndays] # u m/s
self.Rinyr = infile.variables['Rsin' ][:ndays] / (24. * 1.0e4) # Radiation (?) J/m2/day --> J/cm2/hr
self.T2myr_av = infile.variables['T2m_avg' ][:ndays] - 273.15 #
self.U10_av = infile.variables['U10m_avg'][:ndays]
self.Rinyr_av = infile.variables['Rsin_avg'][:ndays]
def find_in_state(self, params, station, cat, name=None, return_index=False):
......@@ -77,7 +91,6 @@ class EmisModel(object):
key = station + '.' + cat
else:
key = station + '.' + cat + '.' + name
#print('Looking for {}'.format(key))
if key in self.paramdict:
i_in_state = int(self.paramdict[key])
......@@ -86,7 +99,7 @@ class EmisModel(object):
value = params[i_in_state]
return value
elif return_index: return False
else: return 1
return 1
def get_emis(self, dacycle, samples, indices, do_pseudo):
"""set up emission information for pseudo-obs (do_pseudo=1) and ensemble runs (do_pseudo=0)"""
......@@ -106,7 +119,7 @@ class EmisModel(object):
logging.debug('Succesfully wrote prior emission files')
def get_yearly_emissions(self, params):
yremis = np.zeros((len(self.categories), len(self.countries), len(self.species)), dtype=np.float32)
yremis = np.zeros((len(self.categories), len(self.countries), len(self.species)))
for i_country, country in enumerate(self.countries):
for i_cat, (cat, values) in enumerate(self.categories.items()):
emission_factor = values['emission_factors']
......@@ -144,7 +157,10 @@ class EmisModel(object):
proxy_category_names = proxy['emis_category_names'][:]
proxy_category_names = [b''.join(category_name).strip().decode() for category_name in proxy_category_names]
spatial_distributions = np.zeros((self.nrcat, len(self.countries), *self.area.shape), dtype=np.float32)
proxy_country_names = proxy['country_names'][:]
proxy_country_names = [b''.join(country_name).strip().decode() for country_name in proxy_country_names]
spatial_distributions = np.zeros((self.nrcat, len(self.countries), self.area.shape[0], self.area.shape[1]))
for i, category in enumerate(self.categories):
spatial_name = self.categories[category]['spatial']
cat_index = proxy_category_names.index(spatial_name)
......@@ -158,6 +174,7 @@ class EmisModel(object):
spatial_emissions = spatial_distributions[:, :, None, :, :] * yremis[:, :, :, None, None] # cat, country, species, lat, lon
# Sum over the countries to overlay them.
spatial_emissions = spatial_emissions.sum(axis=1) # Indices: category, species, lat, lon
self.spatial_emissions = spatial_emissions
emissions = []
for i, category in enumerate(self.categories):
......@@ -167,8 +184,7 @@ class EmisModel(object):
temporal_profile = time_profiles[temporal_name]
emissions.append(spatial_emissions[cat_index, :, :, :] * temporal_profile[None, :, :, :])
self.emissions = emissions
emissions = np.array(emissions)
emissions = np.asarray(emissions)
emissions = np.swapaxes(emissions, 0,1) # Indices: species, category, time, lat, lon
# Recalculate spatial emissions to umol/sec/m2
......@@ -203,23 +219,8 @@ class EmisModel(object):
Returns:
dict of np.arrays:
The temporal profiles (one for each hour) for each gridcel and timestep """
# --- Settings
year = self.startdate.year
ndays = (dtm.datetime(year, 12, 31) - dtm.datetime(year - 1, 12, 31)).days
self.ndays = ndays
times = np.array([dtm.datetime(year, 1, 1, 0, 0, 0) + dtm.timedelta(hours = i) for i in range(ndays*24)])
self.times = times
datapath = '/projects/0/ctdas/RINGO/EmissionInventories/DynEmMod_TimeProfiles'
infilename = '{}/RINGO_ECMWF_DailyMeteo{}.nc'.format(datapath, year)
with nc.Dataset(infilename) as infile:
T2myr = infile.variables['T2m' ][:ndays] - 273.15 # T2m K --> oC
U10 = infile.variables['U10m' ][:ndays] # u m/s
Rinyr = infile.variables['Rsin' ][:ndays] / (24. * 1.0e4) # Radiation (?) J/m2/day --> J/cm2/hr
T2myr_av = infile.variables['T2m_avg' ][:ndays] - 273.15 #
U10_av = infile.variables['U10m_avg'][:ndays]
Rinyr_av = infile.variables['Rsin_avg'][:ndays]
ndays, nlat, nlon = T2myr.shape
ndays, nlat, nlon = self.T2myr.shape
# --- calculate degree day sum and corresponding time profiles
fr_cons = 0.2 # constant emission for consumers (cooking, warm water)
fr_gls = 0. # no constant emission for glasshouses
......@@ -238,11 +239,11 @@ class EmisModel(object):
HDC_gas = np.empty((ndays, nlat, nlon)) # Heating Demand Category 4 (Renewable activity)
for i in range(ndays):
HDC_cons[i] = np.fmax(T0_cons - T2myr[i, :, :], 0) # Daily demand for consumers / household heating
HDC_gls [i] = np.fmax(T0_gls - T2myr[i, :, :], 0) # Daily demand for glasshouse heating
HDC_coal[i] = np.fmax(T0_coal - T2myr[i, :, :], 0) # Daily demand for coal-fired powerplant productvity
dum1 = np.fmax(U0_gas - U10[i, :, :], 0) # Wind energy
dum2 = np.fmax(R0_gas - Rinyr[i, :, :], 0) # Solar energy
HDC_cons[i] = np.fmax(T0_cons - self.T2myr[i, :, :], 0) # Daily demand for consumers / household heating
HDC_gls [i] = np.fmax(T0_gls - self.T2myr[i, :, :], 0) # Daily demand for glasshouse heating
HDC_coal[i] = np.fmax(T0_coal - self.T2myr[i, :, :], 0) # Daily demand for coal-fired powerplant productvity
dum1 = np.fmax(U0_gas - self.U10[i, :, :], 0) # Wind energy
dum2 = np.fmax(R0_gas - self.Rinyr[i, :, :], 0) # Solar energy
HDC_gas [i] = dum1 * dum2
HC_cons = HDC_cons.mean(axis=0)
......@@ -380,7 +381,7 @@ class EmisModel(object):
datepoint = self.startdate
enddate = self.enddate
t_ind = np.ones((indices.stop, nlat, nlon), dtype=np.float32)
t_ind = np.ones((nt, nlat, nlon))
# Make container (dict) containing all time profiles
time_profiles = {'t_gas': t_gas[indices].astype(np.float32),
't_coal': t_coal[indices].astype(np.float32),
......
This diff is collapsed.
......@@ -10,7 +10,7 @@ restartdir : ${basepath}/restart/
obs.input.id : obsfiles.csv
! number of observation sites included; number of species included and to be used in inversion
obs.input.nr : 100
obs.spec.nr : 1
obs.spec.nr : 2
obs.dir : obsfiles${strategy}
do.co : 0
do.c14integrated: 0
......@@ -26,16 +26,6 @@ nffparameters : 24
nbioparameters : 6
nparameters : ${nffparameters} + ${nbioparameters}
file.pft : /projects/0/ctdas/RINGO/inversions/Data/SiBPFTs.nc
! Settings for the domain:
domain.lon.start : -5.95
domain.lon.end : 19.95
domain.lat.start : 43.025
domain.lat.end : 54.975
domain.lon.num : 260
domain.lat.num : 240
paramdict : ${datadir}/paramdict.rc
! set fixed seed for random number generator, or use 0 if you want to use any random seed
random.seed : 4385
......@@ -66,7 +56,7 @@ run.obsflag : 0
! back trajectory time of STILT footprints, also applied to OPS (in hours)
run.backtime : 24
biosphere_fluxdir : /projects/0/ctdas/RINGO/inversions/Data/SiBfile.nc
biosphere_fluxdir : /projects/0/ctdas/RINGO/EmissionInventories/True/RINGO_ORCHIDEE_GPP_TER_dC14_old.nc
files_startdate : 2016-01-01 00:00:00
! choose propagation scheme:
......
! CarbonTracker Data Assimilation Shell (CTDAS) Copyright (C) 2017 Wouter Peters.
! Users are recommended to contact the developers (wouter.peters@wur.nl) to receive
! updates of the code. See also: http://www.carbontracker.eu.
!
! This program is free software: you can redistribute it and/or modify it under the
! terms of the GNU General Public License as published by the Free Software Foundation,
! version 3. This program is distributed in the hope that it will be useful, but
! WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
! FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License along with this
! program. If not, see <http://www.gnu.org/licenses/>.
! author: Wouter Peters
!
! This is a blueprint for an rc-file used in CTDAS. Feel free to modify it, and please go to the main webpage for further documentation.
!
! Note that rc-files have the convention that commented lines start with an exclamation mark (!), while special lines start with a hashtag (#).
!
! When running the script start_ctdas.sh, this /.rc file will be copied to your run directory, and some items will be replaced for you.
! The result will be a nearly ready-to-go rc-file for your assimilation job. The entries and their meaning are explained by the comments below.
!
!
! HISTORY:
!
! Created on August 20th, 2013 by Wouter Peters
!
!
! The time for which to start and end the data assimilation experiment in format YYYY-MM-DD HH:MM:SS
time.start : 2016-01-04 00:00:00
time.finish : 2016-01-20 00:00:00
time.fxstart : 2016-01-01 00:00:00
! Whether to restart the CTDAS system from a previous cycle, or to start the sequence fresh. Valid entries are T/F/True/False/TRUE/FALSE
time.restart : False
! The length of a cycle is given in days, such that the integer 7 denotes the typically used weekly cycle. Valid entries are integers > 1
time.cycle : 2
! The number of cycles of lag to use for a smoother version of CTDAS. CarbonTracker CO2 typically uses 5 weeks of lag. Valid entries are integers > 0
time.nlag : 1
! The directory under which the code, input, and output will be stored. This is the base directory for a run. The word
! '/' will be replaced through the start_ctdas.sh script by a user-specified folder name. DO NOT REPLACE
dir.da_run : /projects/0/ctdas/awoude/develop
! The resources used to complete the data assimilation experiment. This depends on your computing platform.
! The number of cycles per job denotes how many cycles should be completed before starting a new process or job, this
! allows you to complete many cycles before resubmitting a job to the queue and having to wait again for resources.
! Valid entries are integers > 0
da.resources.ncycles_per_job : 1
! The ntasks specifies the number of threads to use for the MPI part of the code, if relevant. Note that the CTDAS code
! itself is not parallelized and the python code underlying CTDAS does not use multiple processors. The chosen observation
! operator though might use many processors, like TM5. Valid entries are integers > 0
da.resources.ntasks : 1
! This specifies the amount of wall-clock time to request for each job. Its value depends on your computing platform and might take
! any form appropriate for your system. Typically, HPC queueing systems allow you a certain number of hours of usage before
! your job is killed, and you are expected to finalize and submit a next job before that time. Valid entries are strings.
da.resources.ntime : 01:30:00
! The resource settings above will cause the creation of a job file in which 2 cycles will be run, and 30 threads
! are asked for a duration of 4 hours
!
! Info on the DA system used, this depends on your application of CTDAS and might refer to for instance CO2, or CH4 optimizations.
!
da.system : CarbonTracker
! The specific settings for your system are read from a separate rc-file, which points to the data directories, observations, etc
da.system.rc : da/ccffdas/stilt-ops_urbanall.rc
! This flag should probably be moved to the da.system.rc file. It denotes which type of filtering to use in the optimizer
da.system.localization : None
! Info on the observation operator to be used, these keys help to identify the settings for the transport model in this case
da.obsoperator : STILT
!
! The TM5 transport model is controlled by an rc-file as well. The value below refers to the configuration of the TM5 model to
! be used as observation operator in this experiment.
!
da.obsoperator.home : /projects/0/ctdas/awoude/develop/exec/da/rc/stilt/
da.obsoperator.rc : ${da.obsoperator.home}/stilt.rc
!
! The number of ensemble members used in the experiment. Valid entries are integers > 2
!
da.optimizer.nmembers : 2
! Finally, info on the archive task, if any. Archive tasks are run after each cycle to ensure that the results of each cycle are
! preserved, even if you run on scratch space or a temporary disk. Since an experiment can take multiple weeks to complete, moving
! your results out of the way, or backing them up, is usually a good idea. Note that the tasks are commented and need to be uncommented
! to use this feature.
! The following key identifies that two archive tasks will be executed, one called 'alldata' and the other 'resultsonly'.
!task.rsync : alldata onlyresults
! The specifics for the first task.
! 1> Which source directories to back up. Valid entry is a list of folders separated by spaces
! 2> Which destination directory to use. Valid entries are a folder name, or server and folder name in rsync format as below
! 3> Which flags to add to the rsync command
! The settings below will result in an rsync command that looks like:
!
! rsync -auv -e ssh ${dir.da_run} you@yourserver.com:/yourfolder/
!
!task.rsync.alldata.sourcedirs : ${dir.da_run}
!task.rsync.alldata.destinationdir : you@yourserver.com:/yourfolder/
!task.rsync.alldata.flags g -auv -e ssh
! Repeated for rsync task 2, note that we only back up the analysis and output dirs here
!task.rsync.onlyresults.sourcedirs : ${dir.da_run}/analysis ${dir.da_run}/output
!task.rsync.onlyresults.destinationdir : you@yourserver.com:/yourfolder/
!task.rsync.onlyresults.flags : -auv -e ssh
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment