Commit da8b2c49 authored by roelo008's avatar roelo008
Browse files

advanced draagkracht model

parent 1701f53d
......@@ -11,32 +11,33 @@ nbt_src: path to neergeschaalde beheertypenkaart (bt) (nominal raster)
Method:
1. total pixels per bt per area
2. reduce NDFF observations to 1 observation per nbt pixel using forward affine transform
3. couple each NDFF observation to 1 bt
4. couple each NDFF observation to 1 area
3. couple each NDFF observation to 1 area
4. couple each NDFF observation to 1 bt
5. rework tables to output
<SPECIES NAME>
AreaName|BT1_tot|BT1_presence|BT2_tot|BT2_presence|...|BTn_tot|BTn_presence
AreaName|BT1_pxl-count|BT1_ndff-pxl-count|BT2_pxl-count|BT2_ndff-pxl-count|...|BTn_pxl-count|BTn_ndff-pxl-count
By: Hans Roelofsen, WEnR, 29 June 2020
"""
import argparse
import os
import pandas as pd
import geopandas as gp
import rasterio as rio
import rasterstats as rast
import numpy as np
from mnp import helpers as hlp
# from sample.mnp import helpers as hlp
parser = argparse.ArgumentParser()
parser.add_argument('species', type=str, help='species name')
parser.add_argument('bt', type=str, help='beheertype code Nxx.yy')
# Parse arguments
args = parser.parse_args()
sp = hlp.Species(args.species)
#args = parser.parse_args()
#sp = hlp.Species(args.species)
sp = hlp.Species('Tapuit')
#bt = args.bt
# Path to beheertypenkaart
bt_tiff = r'c:\apps\temp_geodata\nbt\nbt_20190730_111603.tif'
......@@ -47,23 +48,46 @@ nbts = hlp.nbts()
pxl_2_btB = dict(zip(nbts.pxl, nbts.bt_B))
btA_2_btB = dict(zip(nbts.bt_A, nbts.bt_B))
# Read NDFF shapefile
ndff = hlp.read_ndff_shp(sp.ndff_src)
'''1. total BT pixels per area'''
areas = hlp.gen_squares(origin_x=0, origin_y=625000, ncol=28, nrow=33, size=10000)
bt_stats_prelim = rast.zonal_stats(vectors=areas, raster=bt_kaart.read(1), categorical=True, category_map=pxl_2_btB,
affine=bt_kaart.affine)
bt_stats = []
for area_stats in bt_stats_prelim:
empty_stats = dict(zip(set(nbts.bt_B), [0] * len(set(nbts.bt_B))))
# See: https://stackoverflow.com/questions/38987/how-do-i-merge-two-dictionaries-in-a-single-expression-in-python
bt_stats.append({**empty_stats, **area_stats})
areas = areas.join(pd.DataFrame(data=bt_stats))
# Read areas shapefile
areas = gp.read_file()
# define queries that isolate NDFF data of interest
queries = {'q1': ndff.loc[(ndff.year >= 2000) & (ndff.year.notna())].index,
'q2': ndff.loc[ndff.nl_name.isin([sp.name])].index,
'q3': ndff.loc[ndff.area < 10000].index}
'''2. Construct GDF with NDFF waarnemingen reduced to 1 waarneming per BTKaart pixel. Use forward Affine to tie NDFF
waarneming centroids to a NDFF pixel'''
ndff_all = hlp.read_ndff_shp(sp.ndff_src)
queries = {'q1': ndff_all.loc[(ndff_all.year >= 2000) & (ndff_all.year.notna())].index,
'q2': ndff_all.loc[ndff_all.nl_name.isin([sp.name])].index,
'q3': ndff_all.loc[ndff_all.area < 10000].index}
sel = set.intersection(*[set(queries[x]) for x in queries.keys()]) # for query in selected_queries
assert len(sel) > 0, 'no observations remaining'
print('Found {0} NDFF observations for {1}'.format(len(sel), sp.name))
assert len(sel) > 0, ' no observations remaining'
print(' found {0} NDFF observations for {1}'.format(len(sel), sp.name))
ndff = gp.GeoDataFrame({'ndff_id': [1] * len(sel), 'soort': [sp.name] * len(sel), 'geometry': ndff_all.loc[sel].centroid})
ndff.loc[:, 'nbt_row_col'] = ndff.geometry.apply(hlp.xy_2_colrow, rio_object=bt_kaart)
ndff.drop_duplicates(subset='nbt_row_col', inplace=True)
ndff_gdf = gp.GeoDataFrame({'ID': [1] * len(sel), 'soort': [sp.name] * len(sel), 'geometry': ndff.loc[sel].centroid})
ndff_gdf.loc[:, 'nbt_row_col'] = ndff_gdf.geometry.apply(hlp.xy_2_colrow, rio_object=bt_kaart)
ndff_gdf.drop_duplicates(subset='nbt_row_col', inplace=True)
sp_stats = rast.zonal_stats(vectors=ndff_gdf, raster=bt_kaart.read(1), categorical=True, all_touched=False,
'''3. Join each NDFF waarneming to an area'''
ndff = gp.sjoin(left_df=ndff, right_df=areas.loc[:, ['ID', 'geometry']], how='left', op='within')
'''4. Join each NDFF waarneming to a BT'''
sp_stats = rast.zonal_stats(vectors=ndff, raster=bt_kaart.read(1), categorical=True, all_touched=False,
category_map=pxl_2_btB, affine=bt_kaart.affine)
# not more than 1 BT per NDFF observation and not more than 1 pixel per NDFF observation
assert all([len(x) == 1 for x in sp_stats])
assert all([item for sublist in [[v == 1 for v in x.values()] for x in sp_stats] for item in sublist]) # sorry..
ndff.loc[:, 'BT'] = [item for sublist in [[k for k in x.keys()] for x in sp_stats] for item in sublist]
'''5. Pivot NDFF waarnemingen to Area ids and sum aantal waarnemingen. Then merge with areas df.'''
assert set(ndff.BT).issubset(areas.columns)
ndff_per_area = pd.pivot_table(data=ndff, index='ID', columns='BT', values='ndff_id', aggfunc='sum')
ndff_per_area.columns = ['{0}_ndffpxls'.format(x) for x in ndff_per_area.columns.tolist()]
out = pd.merge(left=areas, right=ndff_per_area.fillna(0), left_on='ID', right_index=True, how='left')
out = out.reindex(sorted(out.columns), axis=1)
\ No newline at end of file
......@@ -6,6 +6,8 @@ import os
import numpy as np
import geopandas as gp
import pandas as pd
import shapely
import affine
from datetime import datetime as dt
from matplotlib import pyplot as plt
......@@ -98,22 +100,24 @@ class Species:
raise
def colrow_2_xy(colrow, rio_object):
def colrow_2_xy(colrow, rio_object=None, affine_in=None):
"""
https://www.perrygeo.com/python-affine-transforms.html
:param rio_object:
:param col: column index
:param row: row index
:param colrow: column-index_row-index tuple
:param rio_object: rio object
:param affine_in: affine transformation
:return: easting norhting in CRS
"""
try:
a = rio_object.affine
col, row = colrow
x, y = a * (col, row)
return x, y
except AttributeError:
print("{0} is not a valid rasterio object".format(rio_object))
raise
if isinstance(affine_in, affine.Affine):
a = affine_in
else:
raise Exception("{0} is not a valid rasterio object and no Affine alternative supplied".format(rio_object))
col, row = colrow
x, y = a * (col, row)
return x, y
def xy_2_colrow(point, rio_object):
......@@ -142,3 +146,41 @@ def get_mnp_dk(species):
except AttributeError:
print('Make sure to provide a Species object, w@nker')
raise
def gen_squares(x_ul, y_ul, nrow, ncol, size):
"""
Generate geodataframe with square polygons
:param x_ul: easting top-left corner
:param y_ul: northing top-left corner
:param nrow: number of rows
:param ncol: number of cols
:param size: square size in m
:return: gdf with ncol*nrow items
"""
'''
Definition affine.Affine(a, b, c, d, e, f)
a: width of a pixel
b: row rotation (typically zero)
c: x-coordinate of the upper-left of the upper-left pixel
d: column rotation (typically zero)
e: height of a pixel (negative)
f: y-coordinate of the upper-left of the upper-left pixel
https://www.perrygeo.com/python-affine-transforms.html
'''
aff = affine.Affine(size, 0, x_ul, 0, -size, y_ul)
ul_corners_indx = [indx for indx in np.ndindex(nrow, ncol)]
ul_corners_coords = [colrow_2_xy(colrow=corner, affine_in=aff) for corner in ul_corners_indx]
# [(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)] == [ll, lr, ur, ul]
all_corner_coords = [[(xmin, ymax-size), (xmin+size, ymax-size), (xmin+size, ymax), (xmin, ymax)] for (xmin, ymax)
in ul_corners_coords]
shapes = [shapely.geometry.Polygon(corner) for corner in all_corner_coords]
ids = ['{0}_{1}'.format(int(x), int(y)) for x,y in ul_corners_coords]
return gp.GeoDataFrame(crs={"init": 'epsg:28992'}, geometry=shapes, data={'ID': ids, 'size': str(size)})
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment