source("setup.R")
= sf::read_sf(file.path("data", "obs", "obs-covariates.gpkg")) |>
obs ::st_set_geometry("geometry") |>
sf::mutate(month = factor(format(month_id, "%b"), levels = month.abb),
dplyr.before = geometry)
= sf::read_sf(file.path("data", "bkg", "bkg-covariates.gpkg")) |>
bkg ::st_set_geometry("geometry") |>
sf::mutate(month = factor(format(month_id, "%b"), levels = month.abb),
dplyr.before = geometry)
Modeling each month
Here we modify our first modeling workflow to produce a model for each month. In the previous workflow we produced one model covering observations covering all times; we then applied the model to the various months.
1 Load data
Here we load the observation and background data points. We add a column identifying the month of the year.
2 Do we model every month?
Let’s do a quick check by counting each by month. Note that we drop the spatial info so that we can make simply tallies.
= sf::st_drop_geometry(obs) |>
counts ::count(month, name = "n_obs") |>
dplyr::left_join(sf::st_drop_geometry(bkg) |> dplyr::count(month, name = "n_bkg"),
dplyrby = 'month') |>
print(n = 12)
# A tibble: 12 × 3
month n_obs n_bkg
<fct> <int> <int>
1 Jan 33 149
2 Feb 40 166
3 Mar 50 1111
4 Apr 341 1898
5 May 541 7058
6 Jun 2137 7209
7 Jul 2108 5617
8 Aug 1698 2352
9 Sep 725 1074
10 Oct 328 1664
11 Nov 494 233
12 Dec 66 101
So the colder months have fewer observations than the warmer months. We already knew that, but it will be interesting to see how that manifests itself in the models.
2.1 Build the monthly models
Since we are building 12 models (rather than one) it is useful to create a function that computes a model for any month, and then iterate through the months of the year.
# A function for making one month's model
#
# @param tbl a data frame of one month's observations
# @param key a data frame that holds the current iteration's month name
# @param bkg a complete data frame of background data (which we filter for the given month)
# @param path the path where the model is saved
# @return a model, which is also saved in "data/model/v2/v2.<monthname>"
= function(tbl, key, bkg = NULL, path = "."){
model_month
= bkg |>
bkg ::filter(month == key$month) |>
dplyr::st_drop_geometry() |>
sf::select(dplyr::all_of(c("sst", "u_wind", "v_wind"))) |>
dplyrna.omit()
= tbl |>
obs ::st_drop_geometry() |>
sf::select(dplyr::all_of(c("sst", "u_wind", "v_wind"))) |>
dplyrna.omit()
# these are the predictor variables row bound
= dplyr::bind_rows(obs, bkg)
x
# and the flag indicating presence/background
= c(rep(1, nrow(obs)), rep(0, nrow(bkg)))
flag
= file.path(path, paste0("v2.", key$month, ".rds"))
model_path
= maxnet::maxnet(flag, x) |>
model ::write_maxnet(model_path)
maxnetic
model
}
= file.path("data", "model", "v2")
path = dir.create(path, recursive = TRUE, showWarnings = FALSE)
ok = obs |>
models ::group_by(month) |>
dplyr::group_map(model_month, bkg = bkg, path = path) |>
dplyr::set_names(levels(obs$month)) rlang
We can look at the response plots for every month, but for demonstration purposes, we’ll just show one month. It is interesting to compare this respinse to that for the basic model.
plot(models[['Jun']], type = 'cloglog')
3 Predict with rasters
First we load the raster databases as these are lightweight to pass into a function that iterates through the months.
3.1 Load the raster databases (sst
and u_wind
and v_wind
)
We also make sure they are in date order and add a “month” variable to each.
= "data/oisst"
sst_path = oisster::read_database(sst_path) |>
sst_db ::arrange(date) |>
dplyr::mutate(month = format(date, "%b"))
dplyr
= "data/nbs"
nbs_path = nbs::read_database(nbs_path) |>
wind_db ::arrange(date)|>
dplyr::mutate(month = format(date, "%b"))
dplyr
= wind_db |>
u_wind_db ::filter(param == "u_wind")
dplyr
= wind_db |>
v_wind_db ::filter(param == "v_wind") dplyr
3.2 Iterate through the months making predictions
Now we can build an iterator function that will make a prediction for each month. Let’s narrow our predictions to just those for a particular year, 2019, and read the rasters in all at once.
= as.Date(c("2019-01-01", "2019-12-31"))
dates = read_predictors(
x sst_db = dplyr::filter(sst_db, dplyr::between(date, dates[1], dates[2])),
u_wind_db = dplyr::filter(u_wind_db, dplyr::between(date, dates[1], dates[2])),
v_wind_db = dplyr::filter(v_wind_db, dplyr::between(date, dates[1], dates[2])) )
Now we can iterate through the months.
= seq(from = dates[1], to = dates[2], by = "month")
date_sequence = lapply(names(models),
pred_rasters function(mon){
= which(month.abb %in% mon)
ix predict(models[[mon]], dplyr::slice(x, time, ix, drop), type = "cloglog")
}) = do.call(c, append(pred_rasters, list(along = list(time = date_sequence)))) pred_rasters
Let’s plot them.
= rnaturalearth::ne_coastline(scale = 'large', returnclass = 'sf') |>
coast ::st_geometry() |>
sf::st_crop(pred_rasters)
sf
= function() {
plot_coast plot(coast, col = 'green', add = TRUE)
}plot(pred_rasters, hook = plot_coast)
Let’s see what we can discern from the predict abilities. We can extract the predicted values at the observed locations. Having those in hand allows us to compute pAUC for each month.
= stars::st_extract(pred_rasters,
pred_obs ::filter(obs, dplyr::between(month_id, dates[1], dates[2])),
dplyrtime_column = "month_id") |>
::mutate(month = factor(format(month_id, "%b"), levels = month.abb)) |>
dplyr::group_by(month)
dplyr
= dplyr::group_map(pred_obs,
paucs function(x, y) {
= month.abb %in% y$month
ix = dplyr::slice(pred_rasters, "time", ix)
s = maxnetic::pAUC(s,x)
pauc ::tibble(month = y$month,
dplyrauc = pauc$area,
pauc = list(pauc))
|>
})::bind_rows() |>
dplyrprint(n = 12)
# A tibble: 12 × 3
month auc pauc
<fct> <dbl> <list>
1 Jan 0.675 <pAUC [5]>
2 Feb 0.705 <pAUC [5]>
3 Mar 0.682 <pAUC [5]>
4 Apr 0.694 <pAUC [5]>
5 May 0.662 <pAUC [5]>
6 Jun 0.662 <pAUC [5]>
7 Jul 0.659 <pAUC [5]>
8 Aug 0.686 <pAUC [5]>
9 Sep 0.602 <pAUC [5]>
10 Oct 0.626 <pAUC [5]>
11 Nov 0.619 <pAUC [5]>
12 Dec 0.600 <pAUC [5]>
Note that last element, pauc
, is the result returned by the maxnetic::pAUC()
function which we can plot.
= paucs |>
pp ::group_by(month) |>
dplyr::group_map(
dplyrfunction(tbl, key){
plot(tbl$pauc[[1]], title = key$month, xlab = "", ylab = "")
}
)::wrap_plots(pp, ncol = 4) patchwork
Well, it would be easy to become dispirited by this result. It would be reasonable to expect AUC values to improve if we built monthly models rather than a single model applied to any month. But it seems to not be the dramatic improvement hoped for. Darn!