-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathEdHulPOL_development_alt.txt
263 lines (230 loc) · 12.5 KB
/
EdHulPOL_development_alt.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
###
## Development for the EdHulPOL model
###
# The EdHulPOL model is a R based pollen dispersion model.
# The model allows for calibration of model parameters
# given known contempary data and subsequent application
# of these parameters for paleo-ecological studies
# to assess the senstivity of lakes to change in ecosystem composition.
# Novel in this application is the propogation of parameter uncertainties
# and iteration of the hypothetical land surfaces. Therefore, robust
# probability based estimates can be made (i.e. using Monte Carlo approaches).
# EdHulPOL is a modified version of the HULPOL v4 model.
# Bunting & Middleton (2005) Modelling pollen dispersal and deposition using HUMPOL software, including simulating windroses and irregular lakes,
# Review of Paleobotany & Palynology, vol 134, 184-196
# load needed functions
source("/home/lsmallma/WORK/R/Scripts/WRF-SPA/functions.txt")
## Model options
deposition_model=3 # select which deposition model?
nos_iter=150
## INPUTS
nos_species=2 # number of taxa considered
fraction=c(50,50) # fraction of each taxa overall, must equal 100 %
weighted_patch=FALSE
patch_size=1600 # forest clearance patches (radius, m)
lake_size=1600 # radius of lake (m)
lake_centred=TRUE # lake fixed to centre of domain or placed randomly
domain_radius=500*0.5 # radius of domain (pixels)
resolution=200 # resolution / pixel size (m)
fall_speed=c(0.018,0.0250) # fall speed (m.s-1) of pollen; 0 = internal calculation
wind_sp=3 # wind speed (m.s-1)
radius=10e-6 # radius of pollen grain (m)
pollen_productivity=c(1,1.5) # runif(1,0,15)
## Parameters
gamma=0.125 # turbulence parameter = 2*gamma
diffusion_coefficient=0.12 # 0.3 # Vertical diffusion coefficient (m^1/8)
gravity=9.81 # gravity (m s-2)
rho0=2e6 # particle density (g m-3)
rho=1.27e6 # air density density (g m-3)
mu=1.8e-2 # dynamic viscosity (g m-1 s-1)
sum_pol=rep(0,nos_species)
for (iter in seq(1,nos_iter)) {
print(paste("iteration ",iter," of ",nos_iter, sep=""))
stime=proc.time()["elapsed"]
## create my spatial domain
mean_sp_abundance_at_distance=array(1, dim=c(domain_radius*2,domain_radius*2,nos_species))
mean_sp_abundance_at_distance[,,2]=0
## add forest patches to the model
# determine how many forest patches will be needed
total_area=length(as.vector(mean_sp_abundance_at_distance[,,1]))*resolution**2
# how much area in total will become savanna
patch_area=total_area*fraction[2]*1e-2
# multiple by 2 as random allocation could mean that we overlap sometimes
est_nos_patch=ceiling(patch_area/(pi*patch_size**2)*10)
if (iter == 1 | weighted_patch == FALSE) {
# find required number of forest patch locations
patch_centre_i=round(runif(est_nos_patch,1,domain_radius*2), digits=0)
patch_centre_j=round(runif(est_nos_patch,1,domain_radius*2), digits=0)
} else {
patch_centre_i=round(pmax(1,pmin(domain_radius*2,rnorm(est_nos_patch,lake_centre_i,domain_radius/2))), digits=0)
patch_centre_j=round(pmax(1,pmin(domain_radius*2,rnorm(est_nos_patch,lake_centre_j,domain_radius/2))), digits=0)
}
# create vectorised versions of the 2D space
i_extent=rep(1:(domain_radius*2),times=(domain_radius*2))
j_extent=rep(1:(domain_radius*2), each=(domain_radius*2))
# block out these values in our maps
forest_hold=as.vector(mean_sp_abundance_at_distance[,,1])
savanna_hold=as.vector(mean_sp_abundance_at_distance[,,2])
i = 1 ; patch_done = 0
while (patch_done < patch_area) {
if (i %% 500 == 0) {print(paste("...loop ",i,"; area still to find ha = ",((patch_area-patch_done)/10000),sep=""))}
# now work out the distance from each patch
distance_array=sqrt((abs(i_extent-patch_centre_i[i])**2)+(abs(j_extent-patch_centre_j[i])**2))*resolution
# which of these are within the lake area, assuming circular lake
patch_locations=which(distance_array < patch_size)
forest_hold[patch_locations]=0
savanna_hold[patch_locations]=1
patch_done = length(which(savanna_hold == 1))*resolution**2
i = i + 1 ; if (i == est_nos_patch) {patch_done = patch_area}
}
# re-structure for further work
mean_sp_abundance_at_distance=array(c(forest_hold,savanna_hold), dim=c(domain_radius*2,domain_radius*2,nos_species))
# then create lake area
if (iter == 1 & lake_centred) {
lake_centre_i=250 #round(runif(1,1,domain_radius*2), digits=0)
lake_centre_j=250 #round(runif(1,1,domain_radius*2), digits=0)
} else if (iter == 1 & lake_centred == FALSE) {
lake_centre_i=round(runif(1,1,domain_radius*2), digits=0)
lake_centre_j=round(runif(1,1,domain_radius*2), digits=0)
}
# now work out the distance from the lake
distance_array=sqrt((abs(i_extent-lake_centre_i)**2)+(abs(j_extent-lake_centre_j)**2))*resolution
# which of these are within the lake area, assuming circular lake
water_locations=which(distance_array < lake_size)
# keep track of how many
nos_water=length(water_locations)
# block out these values in our maps
forest_hold=as.vector(mean_sp_abundance_at_distance[,,1])
savanna_hold=as.vector(mean_sp_abundance_at_distance[,,2])
forest_hold[water_locations]=0
savanna_hold[water_locations]=0
# re-structure for further work
mean_sp_abundance_at_distance=array(c(forest_hold,savanna_hold), dim=c(domain_radius*2,domain_radius*2,nos_species))
#par(mfrow=c(1,2))
#library(fields)
#image.plot(mean_sp_abundance_at_distance[,,1])
#image.plot(mean_sp_abundance_at_distance[,,2])
print(paste("...% forest in area ",round(length(which(as.vector(mean_sp_abundance_at_distance[,,1]) == 1))/length(as.vector(mean_sp_abundance_at_distance[,,1])),digits=3)*100,sep=""))
# species distance stuff using % covers
#sp_hold=read.table("/home/lsmallma/WORK/EdHumPol/forest_example.txt")[,1]
#sp_hold2=read.table("/home/lsmallma/WORK/EdHumPol/savanna_example.txt")[,1]
##
### Calculate the mean species abundance at a given distance from the lake
# First allocate the distance variables and the nos_species dimension
#mean_sp_abundance_at_distance=array(0, dim=c(length(distance_array),nos_species))
# next we loop through the input file to assess our distance vectorised
#for (i in seq(1, length(sp_hold))) {
# # periodic update
# if (ceiling((i/length(sp_hold))*100) == (i/length(sp_hold))*100) {print(paste("Species maps ",round((i/length(sp_hold))*100,1)," complete %",sep=""))}
# # actual calculation
# mean_sp_abundance_at_distance[which(distance_array < (i*resolution) & distance_array > ((i-1)*resolution)),1] = sp_hold[i]
# mean_sp_abundance_at_distance[which(distance_array < (i*resolution) & distance_array > ((i-1)*resolution)),2] = sp_hold2[i]
#}
# update the distance vector
distance_array=array(distance_array, dim=c(length(distance_array),(nos_water)))
for (w in seq(1, nos_water)) {
# periodic updates
if (ceiling((w/nos_water)*100) == (w/nos_water)*100) { print(paste("...Distance maps ",round((w/nos_water)*100,1)," % complete",sep="")) }
# locate new center, i.e. the water pixel
centre_i=water_locations[w]%%(domain_radius*2) ; centre_j=ceiling(water_locations[w]/(domain_radius*2))
# now create multiple distance maps for each of the water locations
distance_array[,w]=sqrt((abs(i_extent-centre_i)**2)+(abs(j_extent-centre_j)**2))*resolution
}
# clean up some
rm(patch_centre_i,patch_centre_j,patch_locations)
# rm(i_extent,j_extent,lake_centre_i,lake_centre_j,centre_i,centre_j,water_locations)
rm(total_area,patch_done,patch_area,est_nos_patch,forest_hold,savanna_hold)
# garbage collection
gc(reset=TRUE, verbose=FALSE)
# ensure a minimum distance (1 m) in distance arrays
distance_array=as.vector(distance_array)
distance_array[which(distance_array == 0)] = 1
# generate spatial structures now
distance_array=array(distance_array, dim=c((domain_radius*2),(domain_radius*2),nos_water))
#mean_sp_abundance_at_distance=array(mean_sp_abundance_at_distance, dim=c((domain_radius*2),(domain_radius*2),nos_species))
# check the result
if (iter == 1) {
par(mfrow=c(2,4)) ; library(fields)
image.plot(distance_array[,,1], main="Distance from centre (m)")
image.plot(mean_sp_abundance_at_distance[,,1], main="Forest species abundance (%)")
image.plot(mean_sp_abundance_at_distance[,,2], main="Savanna species abundance (%)")
} else if (iter == 2) {
image.plot(mean_sp_abundance_at_distance[,,1], main="Forest species abundance (%)")
image.plot(mean_sp_abundance_at_distance[,,2], main="Savanna species abundance (%)")
}
# match matrix size with the species distribution information
pollen_deposition=array(NA, dim=c(dim(mean_sp_abundance_at_distance),nos_water))
if (deposition_model==1) {
for (w in seq(1, nos_water)) { pollen_deposition[,,1:nos_species,w]=distance_array[,,w]**-1 }
} else if (deposition_model==2) {
for (w in seq(1, nos_water)) { pollen_deposition[,,1:nos_species,w]=distance_array[,,w]**-2 }
} else if (deposition_model==3) {
for (i in seq(1, nos_species)) {
#print(paste("Species ",i," of ",nos_species,sep=""))
# Presentice model (Sutton 1953; Prentice 1988)
if (fall_speed[i] == 0) {fall_speed[i]=(2*(radius**2)*gravity*(rho0-rho))/(9*mu)}
beta_sp=(4*fall_speed[i])/((2*gamma)*wind_sp*(pi**0.5)*diffusion_coefficient)
# loop through the water tiles, possibly better to vectorise this also
# for (w in seq(1, nos_water)) {
# # periodic update
# if (ceiling(((w*i)/(nos_water*nos_species))*100) == ((w*i)/(nos_water*nos_species))*100) {
# print(paste("...water pixel = ",w," of ",nos_water,sep=""))
# print(paste("......deposition matrix calculation ", round(((w*i)/(nos_water*nos_species))*100,1)," % complete",sep=""))
# } ###i don;t tik i actually need this loop....
pollen_deposition[,,i,]=beta_sp*gamma*(distance_array[,,]**(gamma-1))*exp(-beta_sp*distance_array[,,]**gamma)
# pollen_deposition[,,i,w]=beta_sp*gamma*(distance_array[,,w]**(gamma-1))*exp(-beta_sp*distance_array[,,w]**gamma)
# } # water loop
} # species loop
} # all conditions
print("...pollen deposition map completed")
# clean up
distance_array = 0 ; rm(distance_array)
beta_sp = 0 ; rm(beta_sp)
# garbage collection
gc(reset=TRUE, verbose=FALSE)
# check the result
# image.plot(pollen_deposition[,,1,1], main="pollen deposition (Forest)")
# image.plot(pollen_deposition[,,2,1], main="pollen deposition (Savanna)")
# Prentice-Sugita model (Sugita 1994) modified by Bunting & Middleton (2005); where pollen load is linearly related to distance weighted plant abundance
pollen_load_at_lake_prediction=array(NA, dim=c(nos_species,nos_water))
for (w in seq(1,nos_water)) {
pollen_load_at_lake_prediction[1,w]=sum(pollen_productivity[1]*mean_sp_abundance_at_distance[,,1]*pollen_deposition[,,1,w])
pollen_load_at_lake_prediction[2,w]=sum(pollen_productivity[2]*mean_sp_abundance_at_distance[,,2]*pollen_deposition[,,2,w])
}
# number of pollen grains expected; mean across all water pixels
# print(rowMeans(pollen_load_at_lake_prediction))
# print(rowSums(pollen_load_at_lake_prediction))
sum_pol=rbind(sum_pol,rowSums(pollen_load_at_lake_prediction))
# plot(pollen_load_at_lake_prediction[1,], main="Pollen loads sp 1")
# plot(pollen_load_at_lake_prediction[2,], main="Pollen loads sp 2")
# clean up again
pollen_load_at_lake_prediction = 0 ; rm(pollen_load_at_lake_prediction)
mean_sp_abundance_at_distance = 0 ; rm(mean_sp_abundance_at_distance)
pollen_deposition = 0 ; rm(pollen_deposition)
rm(i,w)
# garbage collection
gc(reset=TRUE, verbose=FALSE)
# keep time
print(paste("Seconds per iteration = ",round(proc.time()["elapsed"]-stime, digits=1),sep=""))
} # end iteration loop
# remove first value
sum_pol=sum_pol[-1,]
#par(mfrow=c(2,2))
#hist(sum_pol[,1], main="Forest pollen")
#hist(sum_pol[,2], main="Savanna pollen")
#plot(sum_pol[,1], main="Forest pollen")
#plot(sum_pol[,2], main="Savanna pollen")
#plot(sum_pol[,1]~sum_pol[,2], ylab="Forest pollen", xlab="Savanna pollen")
#abline(0,1, lwd=3, col="red")
# species 1 (forest)
summary(sum_pol[,1]/(sum_pol[,1]+sum_pol[,2]))*100
mean(sum_pol[,1]/(sum_pol[,1]+sum_pol[,2]))*100
quantile(sum_pol[,1]/(sum_pol[,1]+sum_pol[,2]), prob=c(0.025,0.5,0.975))*100
sd(sum_pol[,1]/(sum_pol[,1]+sum_pol[,2]))*100
# species 2 (savanna)
summary(sum_pol[,2]/(sum_pol[,1]+sum_pol[,2]))*100
mean(sum_pol[,2]/(sum_pol[,1]+sum_pol[,2]))*100
quantile(sum_pol[,2]/(sum_pol[,1]+sum_pol[,2]), prob=c(0.025,0.5,0.975))*100
sd(sum_pol[,2]/(sum_pol[,1]+sum_pol[,2]))*100
#library(gplots)
#plotCI(seq(1,nos_species),c(mean(sum_pol[,1]),mean(sum_pol[,2])),uiw=c(sd(sum_pol[,1]),sd(sum_pol[,2])))