Commit 23d2a72f authored by Roelofsen, Hans's avatar Roelofsen, Hans
Browse files

updates to dk_advanced, using btA

parent da8b2c49
......@@ -10,11 +10,11 @@ nbt_src: path to neergeschaalde beheertypenkaart (bt) (nominal raster)
Method:
1. total pixels per bt per area
2. reduce NDFF observations to 1 observation per nbt pixel using forward affine transform
3. couple each NDFF observation to 1 area
4. couple each NDFF observation to 1 bt
5. rework tables to output
2. Average N-Deposition in mol/(ha*jaar) per area
3. reduce NDFF observations to 1 observation per nbt pixel using forward affine transform
4. couple each NDFF observation to 1 area
5. couple each NDFF observation to 1 bt
6. rework tables to output
<SPECIES NAME>
AreaName|BT1_pxl-count|BT1_ndff-pxl-count|BT2_pxl-count|BT2_ndff-pxl-count|...|BTn_pxl-count|BTn_ndff-pxl-count
......@@ -22,6 +22,7 @@ By: Hans Roelofsen, WEnR, 29 June 2020
"""
import argparse
import numpy as np
import pandas as pd
import geopandas as gp
import rasterio as rio
......@@ -31,35 +32,68 @@ from mnp import helpers as hlp
parser = argparse.ArgumentParser()
parser.add_argument('species', type=str, help='species name')
parser.add_argument('bt', type=str, help='beheertype code Nxx.yy')
parser.add_argument('--bt', type=str, help='beheertype code Nxx.yy')
parser.add_argument('--shp', action='store_true', default=False, help='write shapefile?')
# Parse arguments
#args = parser.parse_args()
#sp = hlp.Species(args.species)
sp = hlp.Species('Tapuit')
#bt = args.bt
args = parser.parse_args()
sp = hlp.Species(args.species)
#sp = hlp.Species('Tapuit')
bt = args.bt
shp = args.shp
# TODO: 20200707: implementing optional arguments in processflow
# Path to beheertypenkaart
bt_tiff = r'c:\apps\temp_geodata\nbt\nbt_20190730_111603.tif'
bt_tiff = r'c:\Users\roelo008\OneDrive - WageningenUR\a_projects\nBT\nbt_20190730_111603.tif'
bt_kaart = rio.open(bt_tiff, mode='r+')
# Path to NDepkaart
ndep_src = r'c:\Users\roelo008\OneDrive - WageningenUR\a_projects\MNP\draakgracht\depo_ntot_2020\depo_ntot_BBR2020_1902.asc'
ndep_kaart = rio.open(ndep_src)
# Table with pixel values, beheertypecodes etc
nbts = hlp.nbts()
pxl_2_btB = dict(zip(nbts.pxl, nbts.bt_B))
btA_2_btB = dict(zip(nbts.bt_A, nbts.bt_B))
'''1. total BT pixels per area'''
areas = hlp.gen_squares(origin_x=0, origin_y=625000, ncol=28, nrow=33, size=10000)
bt_stats_prelim = rast.zonal_stats(vectors=areas, raster=bt_kaart.read(1), categorical=True, category_map=pxl_2_btB,
affine=bt_kaart.affine)
pxl2btA = dict(zip(nbts.pxl, nbts.bt_A)) # btA: categorisering volgens BT code 20190730
pxl2btB = dict(zip(nbts.pxl, nbts.bt_B)) # btB: categorisering volgens BT code nBT2016
btA2btB = dict(zip(nbts.bt_A, nbts.bt_B))
'''1. total btB pixels per area. NOTE: de values van pxl2btB bevatten duplicaten, ie er zijn >1 pxl-vals die verwijzen
naar eg. N12.02 Dit levert problemen op als de vals van pxl2btB als de KEYS gebruikt worden van de rasterstats.
Gebruik daarom de btA categorieen'''
areas = hlp.gen_squares(x_ul=0, y_ul=625000, ncol=28, nrow=33, size=10000)
bt_stats_prelim1 = rast.zonal_stats(vectors=areas, raster=bt_kaart.read(1), categorical=True, category_map=pxl2btA,
affine=bt_kaart.affine)
# Reclass btA categorieen naar btB categoriien
bt_stats_prelim2 = []
for stat in bt_stats_prelim1:
btB_stat = {}
for k, v in stat.items():
btB_key = btA2btB[k]
try:
btB_stat[btB_key] += v
except KeyError:
btB_stat[btB_key] = v
bt_stats_prelim2.append(btB_stat)
# pixel counts for each area should sum to 10.000^2 / 25^2 = 160.000 or 0 when area is outside the BT-kaart
assert all([np.sum(v for _,v in area_dict.items()) in [160000, 0] for area_dict in bt_stats_prelim2])
# make sure all btB categorieen are represented as dict keys
bt_stats = []
for area_stats in bt_stats_prelim:
for area_stats in bt_stats_prelim2:
empty_stats = dict(zip(set(nbts.bt_B), [0] * len(set(nbts.bt_B))))
# See: https://stackoverflow.com/questions/38987/how-do-i-merge-two-dictionaries-in-a-single-expression-in-python
bt_stats.append({**empty_stats, **area_stats})
areas = areas.join(pd.DataFrame(data=bt_stats))
areas = areas.join(pd.DataFrame.from_dict(bt_stats, orient='columns'))
'''2. Average NDep per area'''
ndep_stats = rast.zonal_stats(vectors=areas, raster=ndep_kaart.read(1), stats='min max mean', affine=ndep_kaart.affine)
areas.loc[:, 'ndep_max'] = [x['max'] for x in ndep_stats]
areas.loc[:, 'ndep_min'] = [x['min'] for x in ndep_stats]
areas.loc[:, 'ndep_mean'] = [x['mean'] for x in ndep_stats]
'''2. Construct GDF with NDFF waarnemingen reduced to 1 waarneming per BTKaart pixel. Use forward Affine to tie NDFF
'''3. Construct GDF with NDFF waarnemingen reduced to 1 waarneming per BTKaart pixel. Use forward Affine to tie NDFF
waarneming centroids to a NDFF pixel'''
ndff_all = hlp.read_ndff_shp(sp.ndff_src)
queries = {'q1': ndff_all.loc[(ndff_all.year >= 2000) & (ndff_all.year.notna())].index,
......@@ -73,21 +107,30 @@ ndff = gp.GeoDataFrame({'ndff_id': [1] * len(sel), 'soort': [sp.name] * len(sel)
ndff.loc[:, 'nbt_row_col'] = ndff.geometry.apply(hlp.xy_2_colrow, rio_object=bt_kaart)
ndff.drop_duplicates(subset='nbt_row_col', inplace=True)
'''3. Join each NDFF waarneming to an area'''
'''4. Join each NDFF waarneming to an area'''
ndff = gp.sjoin(left_df=ndff, right_df=areas.loc[:, ['ID', 'geometry']], how='left', op='within')
'''4. Join each NDFF waarneming to a BT'''
'''5. Join each NDFF waarneming to a BT'''
sp_stats = rast.zonal_stats(vectors=ndff, raster=bt_kaart.read(1), categorical=True, all_touched=False,
category_map=pxl_2_btB, affine=bt_kaart.affine)
category_map=pxl2btB, affine=bt_kaart.affine)
# not more than 1 BT per NDFF observation and not more than 1 pixel per NDFF observation
assert all([len(x) == 1 for x in sp_stats])
assert all([item for sublist in [[v == 1 for v in x.values()] for x in sp_stats] for item in sublist]) # sorry..
ndff.loc[:, 'BT'] = [item for sublist in [[k for k in x.keys()] for x in sp_stats] for item in sublist]
ndff.loc[:, 'btB'] = [item for sublist in [[k for k in x.keys()] for x in sp_stats] for item in sublist]
'''5. Pivot NDFF waarnemingen to Area ids and sum aantal waarnemingen. Then merge with areas df.'''
assert set(ndff.BT).issubset(areas.columns)
ndff_per_area = pd.pivot_table(data=ndff, index='ID', columns='BT', values='ndff_id', aggfunc='sum')
'''6. Pivot NDFF waarnemingen to Area ids and sum aantal waarnemingen. Then merge with areas df.'''
assert set(ndff.btB).issubset(areas.columns)
ndff_per_area = pd.pivot_table(data=ndff, index='ID', columns='btB', values='ndff_id', aggfunc='sum')
ndff_per_area.columns = ['{0}_ndffpxls'.format(x) for x in ndff_per_area.columns.tolist()]
out = pd.merge(left=areas, right=ndff_per_area.fillna(0), left_on='ID', right_index=True, how='left')
out = out.reindex(sorted(out.columns), axis=1)
\ No newline at end of file
# Drop btB columns from areas where there are no NDFF observations
bt_drop = [col for col in areas.columns.difference(set(ndff.btB)).tolist() if col.startswith(('N', 'W', 'A', 'L'))]
'''Merge NDFF observations df with areas df for full dataframe'''
out = pd.merge(left=areas, right=ndff_per_area, left_on='ID', right_index=True, how='left')
out = out.reindex(sorted(out.columns), axis=1)
# Copy to clipboard dropping areas entirely nodata and BT columns with no matching NDFF observation
out.loc[out.nodata < 160000, :].drop(labels=['geometry'] + bt_drop, axis=1).fillna(0).to_clipboard(sep=';', index=False)
print('\n results are on ze clipboard')
if shp:
out.to_file(r'c:\apps\temp_geodata\tapuit_dk_adv.shp')
......@@ -137,7 +137,8 @@ def xy_2_colrow(point, rio_object):
def get_mnp_dk(species):
try:
df = pd.read_csv(r'c:\apps\temp_geodata\MNP_DK\03_MNP_versie6_par_density_factors_BT2019_v2.csv', sep=',',
df = pd.read_csv(r'c:\Users\roelo008\OneDrive - WageningenUR\a_projects\MNP\paramset\
03_MNP_versie6_par_density_factors_BT2019_v2.csv', sep=',',
usecols=['Species_code', 'Land_type_code', 'Land_type_quality'])
df['bt_B'] = np.where(df.Land_type_code.str.slice(7) == '00', df.Land_type_code.str.slice(0, 6),
df.Land_type_code)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment