From 215a611a30229d80dcfae8211dde307e85f15579 Mon Sep 17 00:00:00 2001 From: Ruth Bowyer <105492883+RuthBowyer@users.noreply.github.com> Date: Tue, 13 Jun 2023 14:09:25 +0100 Subject: [PATCH 001/146] Scripts to run df conversion and averages on infill data, inclusion of infill data for run identifying --- R/misc/Identifying_Runs.RMD | 220 ++++++++++------ R/misc/Identifying_Runs.md | 247 ++++++++++++------ .../figure-gfm/unnamed-chunk-20-1.png | Bin 179516 -> 179025 bytes .../figure-gfm/unnamed-chunk-36-1.png | Bin 190380 -> 189807 bytes R/misc/InfillDataRunIdentyfing.R | 33 +++ R/misc/calc.mean.sd.daily.infill.R | 51 ++++ 6 files changed, 390 insertions(+), 161 deletions(-) create mode 100644 R/misc/InfillDataRunIdentyfing.R create mode 100644 R/misc/calc.mean.sd.daily.infill.R diff --git a/R/misc/Identifying_Runs.RMD b/R/misc/Identifying_Runs.RMD index 0c1b96e9..1d9de8ef 100644 --- a/R/misc/Identifying_Runs.RMD +++ b/R/misc/Identifying_Runs.RMD @@ -26,7 +26,13 @@ These runs will be the focus of initial bias correction focus Data is tasmax runs converted to dataframe using sript 'ConvertingAllCPMdataTOdf.R', with files later renamed.Then daily means for historical periods and future periods were calculated using 'calc.mean.sd.daily.R' and summaries saved as .csv -In retrospect the conversion to df might not have been necessary/the most resource efficient, see comment here:https://tmieno2.github.io/R-as-GIS-for-Economists/turning-a-raster-object-into-a-data-frame.html +In retrospect the conversion to df might not have been necessary/the most resource efficient, see comment here:https://tmieno2.github.io/R-as-GIS-for-Economists/turning-a-raster-object-into-a-data-frame.html -- this was tested and using `terra::global` to calculate the raster-wide mean was less efficient + +**Update 13.05.23** - Adding in infill data, mean to be calculated over the whole time period + +As of June 2023, the tasmax-as-dataframe and tasmax daily means and the df data is located in `vmfileshare/Interim/tasmax_dfs/` + +There is an error in the naming convention - Y00_Y20 should be Y01 to reflect the infill data time period (although this does cover a breif period of 2000) - to be updated in future ```{r} @@ -40,25 +46,31 @@ fp <- paste0("/Users/rbowyer/Library/CloudStorage/OneDrive-TheAlanTuringInstitut ``` ```{r} -names <- gsub("df.avs_|.csv|df.", "", files) -names_hist <- names[grepl("hist", names)] -names_y21_40 <- names[grepl("Y21_Y40", names)] -names_y61_80 <- names[grepl("Y61_Y80", names)] -fp_hist <- fp[grepl("_hist", fp)] -fp_y21_40 <- fp[grepl("Y21_Y40", fp)] -fp_y61_80 <- fp[grepl("Y61_Y80", fp)] +# Creating objects for names and filepath for each of the timer periods, for easy loading +names <- gsub("df.avs_|.csv|df.", "", files) +i <- c("hist", "Y00_Y20","Y21_Y40", "Y41_Y60", "Y61_Y80") -dfs_hist <- lapply(fp_hist, read.csv) -names(dfs_hist) <- names_hist +namesL <- lapply(i, function(i){ + n <- names[grepl(i, names)] + }) -dfs_y21_40 <- lapply(fp_y21_40, read.csv) -names(dfs_y21_40) <- names_y21_40 +names(namesL) <- paste0("names_",i) +list2env(namesL, .GlobalEnv) +``` +```{r load data} -dfs_y61_80 <- lapply(fp_y61_80, read.csv) -names(dfs_y61_80) <- names_y61_80 +dfL <- lapply(i, function(i){ + fp <- fp[grepl(i, fp)] + dfs <- lapply(fp, read.csv) + n <- namesL[[paste0("names_",i)]] + names(dfs) <- n + return(dfs) + }) +names(dfL) <- paste0("dfs_", i) +list2env(dfL, .GlobalEnv) ``` ## **2. Comparing Runs** @@ -70,7 +82,7 @@ names(dfs_y61_80) <- names_y61_80 Y <- rep(c(1981:2000), each=360) dfs_hist <- lapply(names_hist, function(i){ - df <- dfs_hist[[i]] + df <- dfs_hist[[i]] names(df) <- c("day", "mean", "sd") df$model <- i df$dn <- 1:nrow(df) @@ -83,7 +95,7 @@ historical_means <- dfs_hist %>% reduce(rbind) ``` -### **Time series - daily ** +### **Time series - daily** ```{r fig.height=8} @@ -141,7 +153,7 @@ ggplot(historical_means, aes(sample=mean, colour=factor(model))) + ``` -### **Time series - annual mean ** +### **Time series - annual mean** ```{r message=FALSE} @@ -215,7 +227,7 @@ ggplot(historical_means_y, aes(sample=mean.annual, colour=factor(model))) + ``` -### **Time series - annual max ** +### **Time series - annual max** ```{r message=FALSE} @@ -295,8 +307,8 @@ Max vals are different but based on means then selection would be Run 02 (2nd lo Y <- rep(c(2021:2040), each=360) -dfs_y21_40 <- lapply(names_y21_40, function(i){ - df <- dfs_y21_40[[i]] +dfs_Y21_Y40 <- lapply(names_Y21_Y40, function(i){ + df <- dfs_Y21_Y40[[i]] names(df) <- c("day", "mean", "sd") df$model <- i df$dn <- 1:nrow(df) @@ -304,20 +316,20 @@ dfs_y21_40 <- lapply(names_y21_40, function(i){ return(df) }) -#Create a single df in long form of Runs for the y21_40 period -y21_40_means <- dfs_y21_40 %>% reduce(rbind) +#Create a single df in long form of Runs for the Y21_Y40 period +Y21_Y40_means <- dfs_Y21_Y40 %>% reduce(rbind) ``` -### **Time series - daily ** +### **Time series - daily** ```{r} -ggplot(y21_40_means) + +ggplot(Y21_Y40_means) + geom_line(aes(x=dn, y=mean, group=model, colour=model)) + # Removing sd ribbon for ease of viewing #geom_ribbon(aes(x =dn, ymin = mean - sd, ymax= mean + sd), alpha=0.4) + - theme_bw() + xlab("Day (y21_40 1980 - 2000)") + + theme_bw() + xlab("Daily (1980 - 2000)") + ylab("Daily mean max temp (tasmax) oC") + #scale_fill_brewer(palette = "Paired", name = "") + scale_colour_brewer(palette = "Paired", name = "") + @@ -330,20 +342,20 @@ ggplot(y21_40_means) + ``` -### **boxplot - mean y21_40** +### **boxplot - mean Y21_Y40** ```{r} #Create a pallete specific to the runs so when reordered maintain the same colours -y21_40_means$model <- as.factor(y21_40_means$model) +Y21_Y40_means$model <- as.factor(Y21_Y40_means$model) c <- brewer.pal(12, "Paired") -my_colours <- setNames(c, levels(y21_40_means$model)) +my_colours <- setNames(c, levels(Y21_Y40_means$model)) ``` ```{r} -y21_40_means %>% +Y21_Y40_means %>% mutate(model = fct_reorder(model, mean, .fun='median')) %>% ggplot(aes(x=reorder(model, mean), y=mean, fill=model)) + geom_boxplot() + theme_bw() + @@ -358,7 +370,7 @@ y21_40_means %>% ```{r fig.height=8} -ggplot(y21_40_means, aes(sample=mean, colour=factor(model))) + +ggplot(Y21_Y40_means, aes(sample=mean, colour=factor(model))) + stat_qq() + stat_qq_line()+ theme_bw()+ @@ -368,21 +380,21 @@ ggplot(y21_40_means, aes(sample=mean, colour=factor(model))) + ``` -### **Time series - annual mean ** +### **Time series - annual mean** ```{r message= FALSE, warning=FALSE} #Aggregating to year for annual average -y21_40_means$Yf <- as.factor(y21_40_means$Y) +Y21_Y40_means$Yf <- as.factor(Y21_Y40_means$Y) -y21_40_means_y <- y21_40_means %>% +Y21_Y40_means_y <- Y21_Y40_means %>% group_by(Yf, model) %>% dplyr::summarise(mean.annual=mean(mean, na.rm=T), sd.annual=sd(mean, na.rm = T)) ``` ```{r} -ggplot(y21_40_means_y) + +ggplot(Y21_Y40_means_y) + geom_line(aes(x = as.numeric(Yf), y=mean.annual, color=model)) + theme_bw() + xlab("Year (2021 - 2040)") + @@ -397,7 +409,7 @@ ggplot(y21_40_means_y) + ```{r} # Plotting with SDs in geom_ribbon to see if anything wildely different -ggplot(y21_40_means_y) + +ggplot(Y21_Y40_means_y) + geom_ribbon(aes(as.numeric(Yf), y=mean.annual, ymin = mean.annual - sd.annual, ymax= mean.annual + sd.annual, @@ -413,11 +425,11 @@ ggplot(y21_40_means_y) + ``` -### **boxplot - annual mean y21_40** +### **boxplot - annual mean 2021 - 2040** ```{r} -y21_40_means_y %>% +Y21_Y40_means_y %>% mutate(model = fct_reorder(model, mean.annual, .fun='median')) %>% ggplot(aes(x=reorder(model, mean.annual), y=mean.annual, fill=model)) + geom_boxplot() + theme_bw() + @@ -433,7 +445,7 @@ y21_40_means_y %>% ```{r fig.height=8} -ggplot(y21_40_means_y, aes(sample=mean.annual, colour=factor(model))) + +ggplot(Y21_Y40_means_y, aes(sample=mean.annual, colour=factor(model))) + stat_qq() + stat_qq_line()+ theme_bw()+ @@ -442,11 +454,11 @@ ggplot(y21_40_means_y, aes(sample=mean.annual, colour=factor(model))) + ``` -### **Time series - annual max ** +### **Time series - annual max** ```{r message=FALSE} -y21_40_max_y <- y21_40_means %>% +Y21_Y40_max_y <- Y21_Y40_means %>% group_by(Yf, model) %>% dplyr::summarise(max=max(mean, na.rm=T)) ``` @@ -454,7 +466,7 @@ y21_40_max_y <- y21_40_means %>% ```{r} -ggplot(y21_40_max_y) + +ggplot(Y21_Y40_max_y) + geom_line(aes(x = as.numeric(Yf), y=max, color=model)) + theme_bw() + xlab("Year (2021 - 2040)") + @@ -472,7 +484,7 @@ ggplot(y21_40_max_y) + ```{r} -y21_40_max_y %>% +Y21_Y40_max_y %>% mutate(model = fct_reorder(model, max, .fun='median')) %>% ggplot(aes(x=reorder(model, max), y=max, fill=model)) + geom_boxplot() + theme_bw() + @@ -490,7 +502,7 @@ Daily means: ```{r} #-1 removes the intercept to compare coefficients of all Runs -av1 <- aov(mean ~ model - 1, y21_40_means) +av1 <- aov(mean ~ model - 1, Y21_Y40_means) av1$coefficients[order(av1$coefficients)] ``` @@ -498,7 +510,7 @@ av1$coefficients[order(av1$coefficients)] Annual means: ```{r} -av2 <- aov(mean.annual ~ model - 1, y21_40_means_y) +av2 <- aov(mean.annual ~ model - 1, Y21_Y40_means_y) av2$coefficients[order(av2$coefficients)] ``` @@ -507,7 +519,7 @@ Max of means ```{r} -av3 <- aov(max ~ model - 1, y21_40_max_y) +av3 <- aov(max ~ model - 1, Y21_Y40_max_y) av3$coefficients[order(av3$coefficients)] ``` @@ -526,8 +538,8 @@ Based on this period, the seelction would be: Run 05, Run 03, Run 08, Run 06 (so Y <- rep(c(2061:2080), each=360) -dfs_y61_80 <- lapply(names_y61_80, function(i){ - df <- dfs_y61_80[[i]] +dfs_Y61_Y80 <- lapply(names_Y61_Y80, function(i){ + df <- dfs_Y61_Y80[[i]] names(df) <- c("day", "mean", "sd") df$model <- i df$dn <- 1:nrow(df) @@ -535,20 +547,20 @@ dfs_y61_80 <- lapply(names_y61_80, function(i){ return(df) }) -#Create a single df in long form of Runs for the y61_80 period -y61_80_means <- dfs_y61_80 %>% reduce(rbind) +#Create a single df in long form of Runs for the Y61_Y80 period +Y61_Y80_means <- dfs_Y61_Y80 %>% reduce(rbind) ``` -### **Time series - daily ** +### **Time series - daily** ```{r} -ggplot(y61_80_means) + +ggplot(Y61_Y80_means) + geom_line(aes(x=dn, y=mean, group=model, colour=model)) + # Removing sd ribbon for ease of viewing #geom_ribbon(aes(x =dn, ymin = mean - sd, ymax= mean + sd), alpha=0.4) + - theme_bw() + xlab("Day (y61_80 1980 - 2000)") + + theme_bw() + xlab("Day (2060 - 2080)") + ylab("Daily mean max temp (tasmax) oC") + #scale_fill_brewer(palette = "Paired", name = "") + scale_colour_brewer(palette = "Paired", name = "") + @@ -561,20 +573,20 @@ ggplot(y61_80_means) + ``` -### **boxplot - mean y61_80** +### **boxplot - mean Y61_Y80** ```{r} #Create a pallete specific to the runs so when reordered maintain the same colours -y61_80_means$model <- as.factor(y61_80_means$model) +Y61_Y80_means$model <- as.factor(Y61_Y80_means$model) c <- brewer.pal(12, "Paired") -my_colours <- setNames(c, levels(y61_80_means$model)) +my_colours <- setNames(c, levels(Y61_Y80_means$model)) ``` ```{r} -y61_80_means %>% +Y61_Y80_means %>% mutate(model = fct_reorder(model, mean, .fun='median')) %>% ggplot(aes(x=reorder(model, mean), y=mean, fill=model)) + geom_boxplot() + theme_bw() + @@ -589,7 +601,7 @@ y61_80_means %>% ```{r fig.height=8} -ggplot(y61_80_means, aes(sample=mean, colour=factor(model))) + +ggplot(Y61_Y80_means, aes(sample=mean, colour=factor(model))) + stat_qq() + stat_qq_line()+ theme_bw()+ @@ -599,21 +611,21 @@ ggplot(y61_80_means, aes(sample=mean, colour=factor(model))) + ``` -### **Time series - annual mean ** +### **Time series - annual mean** ```{r message= FALSE, warning=FALSE} #Aggregating to year for annual average -y61_80_means$Yf <- as.factor(y61_80_means$Y) +Y61_Y80_means$Yf <- as.factor(Y61_Y80_means$Y) -y61_80_means_y <- y61_80_means %>% +Y61_Y80_means_y <- Y61_Y80_means %>% group_by(Yf, model) %>% dplyr::summarise(mean.annual=mean(mean, na.rm=T), sd.annual=sd(mean, na.rm = T)) ``` ```{r} -ggplot(y61_80_means_y) + +ggplot(Y61_Y80_means_y) + geom_line(aes(x = as.numeric(Yf), y=mean.annual, color=model)) + theme_bw() + xlab("Year (2061 - 2080)") + @@ -628,7 +640,7 @@ ggplot(y61_80_means_y) + ```{r} # Plotting with SDs in geom_ribbon to see if anything wildely different -ggplot(y61_80_means_y) + +ggplot(Y61_Y80_means_y) + geom_ribbon(aes(as.numeric(Yf), y=mean.annual, ymin = mean.annual - sd.annual, ymax= mean.annual + sd.annual, @@ -644,11 +656,11 @@ ggplot(y61_80_means_y) + ``` -### **boxplot - annual mean y61_80** +### **boxplot - annual mean Y61_Y80** ```{r} -y61_80_means_y %>% +Y61_Y80_means_y %>% mutate(model = fct_reorder(model, mean.annual, .fun='median')) %>% ggplot(aes(x=reorder(model, mean.annual), y=mean.annual, fill=model)) + geom_boxplot() + theme_bw() + @@ -664,7 +676,7 @@ y61_80_means_y %>% ```{r fig.height=8} -ggplot(y61_80_means_y, aes(sample=mean.annual, colour=factor(model))) + +ggplot(Y61_Y80_means_y, aes(sample=mean.annual, colour=factor(model))) + stat_qq() + stat_qq_line()+ theme_bw()+ @@ -673,11 +685,11 @@ ggplot(y61_80_means_y, aes(sample=mean.annual, colour=factor(model))) + ``` -### **Time series - annual max ** +### **Time series - annual max** ```{r message=FALSE} -y61_80_max_y <- y61_80_means %>% +Y61_Y80_max_y <- Y61_Y80_means %>% group_by(Yf, model) %>% dplyr::summarise(max=max(mean, na.rm=T)) ``` @@ -685,7 +697,7 @@ y61_80_max_y <- y61_80_means %>% ```{r} -ggplot(y61_80_max_y) + +ggplot(Y61_Y80_max_y) + geom_line(aes(x = as.numeric(Yf), y=max, color=model)) + theme_bw() + xlab("Year (2061 - 2080)") + @@ -703,7 +715,7 @@ ggplot(y61_80_max_y) + ```{r} -y61_80_max_y %>% +Y61_Y80_max_y %>% mutate(model = fct_reorder(model, max, .fun='median')) %>% ggplot(aes(x=reorder(model, max), y=max, fill=model)) + geom_boxplot() + theme_bw() + @@ -721,7 +733,7 @@ Daily means: ```{r} #-1 removes the intercept to compare coefficients of all Runs -av1 <- aov(mean ~ model - 1, y61_80_means) +av1 <- aov(mean ~ model - 1, Y61_Y80_means) av1$coefficients[order(av1$coefficients)] ``` @@ -729,7 +741,7 @@ av1$coefficients[order(av1$coefficients)] Annual means: ```{r} -av2 <- aov(mean.annual ~ model - 1, y61_80_means_y) +av2 <- aov(mean.annual ~ model - 1, Y61_Y80_means_y) av2$coefficients[order(av2$coefficients)] ``` @@ -738,7 +750,7 @@ Max of means ```{r} -av3 <- aov(max ~ model - 1, y61_80_max_y) +av3 <- aov(max ~ model - 1, Y61_Y80_max_y) av3$coefficients[order(av3$coefficients)] ``` @@ -751,13 +763,50 @@ Run 3 and 5 suggested above The result per time slice suggest different runs, aside from run 5 +### Add in infill data + +**Update 13.05.23** - Adding in the infill data, and taking the anova result across the whole time period + +```{r infill means} + +Y <- rep(c(2001:2020), each=360) + +dfs_Y00_Y20 <- lapply(names_Y00_Y20, function(i){ + df <- dfs_Y00_Y20[[i]] + names(df) <- c("day", "mean", "sd") + df$model <- i + df$dn <- 1:nrow(df) + df$Y <- Y + df$Yf <- as.factor(df$Y) + return(df) +}) + + +Y <- rep(c(2041:2060), each=360) + +dfs_Y41_Y60 <- lapply(names_Y41_Y60, function(i){ + df <- dfs_Y41_Y60[[i]] + names(df) <- c("day", "mean", "sd") + df$model <- i + df$dn <- 1:nrow(df) + df$Y <- Y + df$Yf <- as.factor(df$Y) + return(df) +}) + + +#Create a single df in long form as above +Y00_Y20_means <- dfs_Y00_Y20 %>% reduce(rbind) +Y41_Y60_means <- dfs_Y41_Y60 %>% reduce(rbind) +``` + Assessing what the combined times slices suggest via anova -Daily means: +#### Daily means: ```{r} #-1 removes the intercept to compare coefficients of all Runs -all.means <- rbind(historical_means, y21_40_means, y61_80_means) +all.means <- rbind(historical_means, Y00_Y20_means, Y21_Y40_means, Y41_Y60_means, Y61_Y80_means) x <- as.character(all.means$model) all.means$model <- substr(x, nchar(x)-4, nchar(x)) @@ -767,11 +816,25 @@ av1 <- aov(mean ~ model - 1, all.means) av1$coefficients[order(av1$coefficients)] ``` -Annual means: +#### Annual means: ```{r} +# As above, creating annual means +infill.L <- list(Y00_Y20_means, Y41_Y60_means) -all.means_y <- rbind(historical_means_y, y21_40_means_y, y61_80_means_y) +infill.L_y <- lapply(infill.L, function(x){ + means_y <- x %>% + group_by(Yf, model) %>% + dplyr::summarise(mean.annual=mean(mean, na.rm=T), sd.annual=sd(mean, na.rm = T))}) + +``` + +```{r} +all.means_y <- rbind(historical_means_y, + infill.L_y[[1]], + Y21_Y40_means_y, + infill.L_y[[2]], + Y61_Y80_means_y) x <- as.character(all.means_y$model) all.means_y$model <- substr(x, nchar(x)-4, nchar(x)) @@ -781,5 +844,8 @@ av2$coefficients[order(av2$coefficients)] ``` -Considering all together, suggests: Runs 05, Run03, Run08 and Run12 + +**Updated June 13th 2023 result** + +Considering all together, suggests: Runs 05, Run07, Run08 and Run06 diff --git a/R/misc/Identifying_Runs.md b/R/misc/Identifying_Runs.md index ac3f610d..0aad23b8 100644 --- a/R/misc/Identifying_Runs.md +++ b/R/misc/Identifying_Runs.md @@ -1,7 +1,7 @@ **Identifying Runs for bias correction** ================ Ruth C E Bowyer -2023-05-19 +2023-06-13 ``` r rm(list=ls()) @@ -28,6 +28,18 @@ for historical periods and future periods were calculated using In retrospect the conversion to df might not have been necessary/the most resource efficient, see comment here: +– this was tested and using `terra::global` to calculate the raster-wide +mean was less efficient + +**Update 13.05.23** - Adding in infill data, mean to be calculated over +the whole time period + +As of June 2023, the tasmax-as-dataframe and tasmax daily means and the +df data is located in `vmfileshare/Interim/tasmax_dfs/` + +There is an error in the naming convention - Y00_Y20 should be Y01 to +reflect the infill data time period (although this does cover a breif +period of 2000) - to be updated in future ``` r Runs <- c("01", "04", "05", "06", "07", "08", "09", "10", "11", "12", "13", "15") @@ -39,26 +51,35 @@ fp <- paste0("/Users/rbowyer/Library/CloudStorage/OneDrive-TheAlanTuringInstitut ``` ``` r +# Creating objects for names and filepath for each of the timer periods, for easy loading names <- gsub("df.avs_|.csv|df.", "", files) -names_hist <- names[grepl("hist", names)] -names_y21_40 <- names[grepl("Y21_Y40", names)] -names_y61_80 <- names[grepl("Y61_Y80", names)] +i <- c("hist", "Y00_Y20","Y21_Y40", "Y41_Y60", "Y61_Y80") -fp_hist <- fp[grepl("_hist", fp)] -fp_y21_40 <- fp[grepl("Y21_Y40", fp)] -fp_y61_80 <- fp[grepl("Y61_Y80", fp)] +namesL <- lapply(i, function(i){ + n <- names[grepl(i, names)] + }) -dfs_hist <- lapply(fp_hist, read.csv) -names(dfs_hist) <- names_hist +names(namesL) <- paste0("names_",i) +list2env(namesL, .GlobalEnv) +``` -dfs_y21_40 <- lapply(fp_y21_40, read.csv) -names(dfs_y21_40) <- names_y21_40 + ## +``` r +dfL <- lapply(i, function(i){ + fp <- fp[grepl(i, fp)] + dfs <- lapply(fp, read.csv) + n <- namesL[[paste0("names_",i)]] + names(dfs) <- n + return(dfs) + }) -dfs_y61_80 <- lapply(fp_y61_80, read.csv) -names(dfs_y61_80) <- names_y61_80 +names(dfL) <- paste0("dfs_", i) +list2env(dfL, .GlobalEnv) ``` + ## + ## **2. Comparing Runs** ### **2a. Historical figures** @@ -67,7 +88,7 @@ names(dfs_y61_80) <- names_y61_80 Y <- rep(c(1981:2000), each=360) dfs_hist <- lapply(names_hist, function(i){ - df <- dfs_hist[[i]] + df <- dfs_hist[[i]] names(df) <- c("day", "mean", "sd") df$model <- i df$dn <- 1:nrow(df) @@ -79,7 +100,7 @@ dfs_hist <- lapply(names_hist, function(i){ historical_means <- dfs_hist %>% reduce(rbind) ``` -### **Time series - daily ** +### **Time series - daily** ``` r ggplot(historical_means) + @@ -132,7 +153,7 @@ ggplot(historical_means, aes(sample=mean, colour=factor(model))) + ![](Identifying_Runs_files/figure-gfm/unnamed-chunk-7-1.png) -### **Time series - annual mean ** +### **Time series - annual mean** ``` r #Aggregating to year for annual average @@ -205,7 +226,7 @@ ggplot(historical_means_y, aes(sample=mean.annual, colour=factor(model))) + ![](Identifying_Runs_files/figure-gfm/unnamed-chunk-12-1.png) -### **Time series - annual max ** +### **Time series - annual max** ``` r historical_max_y <- historical_means %>% @@ -300,8 +321,8 @@ Max vals are different but based on means then selection would be Run 02 Y <- rep(c(2021:2040), each=360) -dfs_y21_40 <- lapply(names_y21_40, function(i){ - df <- dfs_y21_40[[i]] +dfs_Y21_Y40 <- lapply(names_Y21_Y40, function(i){ + df <- dfs_Y21_Y40[[i]] names(df) <- c("day", "mean", "sd") df$model <- i df$dn <- 1:nrow(df) @@ -309,18 +330,18 @@ dfs_y21_40 <- lapply(names_y21_40, function(i){ return(df) }) -#Create a single df in long form of Runs for the y21_40 period -y21_40_means <- dfs_y21_40 %>% reduce(rbind) +#Create a single df in long form of Runs for the Y21_Y40 period +Y21_Y40_means <- dfs_Y21_Y40 %>% reduce(rbind) ``` -### **Time series - daily ** +### **Time series - daily** ``` r -ggplot(y21_40_means) + +ggplot(Y21_Y40_means) + geom_line(aes(x=dn, y=mean, group=model, colour=model)) + # Removing sd ribbon for ease of viewing #geom_ribbon(aes(x =dn, ymin = mean - sd, ymax= mean + sd), alpha=0.4) + - theme_bw() + xlab("Day (y21_40 1980 - 2000)") + + theme_bw() + xlab("Daily (1980 - 2000)") + ylab("Daily mean max temp (tasmax) oC") + #scale_fill_brewer(palette = "Paired", name = "") + scale_colour_brewer(palette = "Paired", name = "") + @@ -338,17 +359,17 @@ ggplot(y21_40_means) + ![](Identifying_Runs_files/figure-gfm/unnamed-chunk-20-1.png) -### **boxplot - mean y21_40** +### **boxplot - mean Y21_Y40** ``` r #Create a pallete specific to the runs so when reordered maintain the same colours -y21_40_means$model <- as.factor(y21_40_means$model) +Y21_Y40_means$model <- as.factor(Y21_Y40_means$model) c <- brewer.pal(12, "Paired") -my_colours <- setNames(c, levels(y21_40_means$model)) +my_colours <- setNames(c, levels(Y21_Y40_means$model)) ``` ``` r -y21_40_means %>% +Y21_Y40_means %>% mutate(model = fct_reorder(model, mean, .fun='median')) %>% ggplot(aes(x=reorder(model, mean), y=mean, fill=model)) + geom_boxplot() + theme_bw() + @@ -363,7 +384,7 @@ y21_40_means %>% ### **qqplot - daily means** ``` r -ggplot(y21_40_means, aes(sample=mean, colour=factor(model))) + +ggplot(Y21_Y40_means, aes(sample=mean, colour=factor(model))) + stat_qq() + stat_qq_line()+ theme_bw()+ @@ -373,20 +394,20 @@ ggplot(y21_40_means, aes(sample=mean, colour=factor(model))) + ![](Identifying_Runs_files/figure-gfm/unnamed-chunk-23-1.png) -### **Time series - annual mean ** +### **Time series - annual mean** ``` r #Aggregating to year for annual average -y21_40_means$Yf <- as.factor(y21_40_means$Y) +Y21_Y40_means$Yf <- as.factor(Y21_Y40_means$Y) -y21_40_means_y <- y21_40_means %>% +Y21_Y40_means_y <- Y21_Y40_means %>% group_by(Yf, model) %>% dplyr::summarise(mean.annual=mean(mean, na.rm=T), sd.annual=sd(mean, na.rm = T)) ``` ``` r -ggplot(y21_40_means_y) + +ggplot(Y21_Y40_means_y) + geom_line(aes(x = as.numeric(Yf), y=mean.annual, color=model)) + theme_bw() + xlab("Year (2021 - 2040)") + @@ -401,7 +422,7 @@ ggplot(y21_40_means_y) + ``` r # Plotting with SDs in geom_ribbon to see if anything wildely different -ggplot(y21_40_means_y) + +ggplot(Y21_Y40_means_y) + geom_ribbon(aes(as.numeric(Yf), y=mean.annual, ymin = mean.annual - sd.annual, ymax= mean.annual + sd.annual, @@ -418,10 +439,10 @@ ggplot(y21_40_means_y) + ![](Identifying_Runs_files/figure-gfm/unnamed-chunk-26-1.png) -### **boxplot - annual mean y21_40** +### **boxplot - annual mean 2021 - 2040** ``` r -y21_40_means_y %>% +Y21_Y40_means_y %>% mutate(model = fct_reorder(model, mean.annual, .fun='median')) %>% ggplot(aes(x=reorder(model, mean.annual), y=mean.annual, fill=model)) + geom_boxplot() + theme_bw() + @@ -436,7 +457,7 @@ y21_40_means_y %>% ### **qqplot - annual means** ``` r -ggplot(y21_40_means_y, aes(sample=mean.annual, colour=factor(model))) + +ggplot(Y21_Y40_means_y, aes(sample=mean.annual, colour=factor(model))) + stat_qq() + stat_qq_line()+ theme_bw()+ @@ -446,16 +467,16 @@ ggplot(y21_40_means_y, aes(sample=mean.annual, colour=factor(model))) + ![](Identifying_Runs_files/figure-gfm/unnamed-chunk-28-1.png) -### **Time series - annual max ** +### **Time series - annual max** ``` r -y21_40_max_y <- y21_40_means %>% +Y21_Y40_max_y <- Y21_Y40_means %>% group_by(Yf, model) %>% dplyr::summarise(max=max(mean, na.rm=T)) ``` ``` r -ggplot(y21_40_max_y) + +ggplot(Y21_Y40_max_y) + geom_line(aes(x = as.numeric(Yf), y=max, color=model)) + theme_bw() + xlab("Year (2021 - 2040)") + @@ -471,7 +492,7 @@ ggplot(y21_40_max_y) + ### **boxplot - annual max** ``` r -y21_40_max_y %>% +Y21_Y40_max_y %>% mutate(model = fct_reorder(model, max, .fun='median')) %>% ggplot(aes(x=reorder(model, max), y=max, fill=model)) + geom_boxplot() + theme_bw() + @@ -489,7 +510,7 @@ Daily means: ``` r #-1 removes the intercept to compare coefficients of all Runs -av1 <- aov(mean ~ model - 1, y21_40_means) +av1 <- aov(mean ~ model - 1, Y21_Y40_means) av1$coefficients[order(av1$coefficients)] ``` @@ -503,7 +524,7 @@ av1$coefficients[order(av1$coefficients)] Annual means: ``` r -av2 <- aov(mean.annual ~ model - 1, y21_40_means_y) +av2 <- aov(mean.annual ~ model - 1, Y21_Y40_means_y) av2$coefficients[order(av2$coefficients)] ``` @@ -517,7 +538,7 @@ av2$coefficients[order(av2$coefficients)] Max of means ``` r -av3 <- aov(max ~ model - 1, y21_40_max_y) +av3 <- aov(max ~ model - 1, Y21_Y40_max_y) av3$coefficients[order(av3$coefficients)] ``` @@ -540,8 +561,8 @@ Run 06 (so definetly Run 3 but others to be discussed) Y <- rep(c(2061:2080), each=360) -dfs_y61_80 <- lapply(names_y61_80, function(i){ - df <- dfs_y61_80[[i]] +dfs_Y61_Y80 <- lapply(names_Y61_Y80, function(i){ + df <- dfs_Y61_Y80[[i]] names(df) <- c("day", "mean", "sd") df$model <- i df$dn <- 1:nrow(df) @@ -549,18 +570,18 @@ dfs_y61_80 <- lapply(names_y61_80, function(i){ return(df) }) -#Create a single df in long form of Runs for the y61_80 period -y61_80_means <- dfs_y61_80 %>% reduce(rbind) +#Create a single df in long form of Runs for the Y61_Y80 period +Y61_Y80_means <- dfs_Y61_Y80 %>% reduce(rbind) ``` -### **Time series - daily ** +### **Time series - daily** ``` r -ggplot(y61_80_means) + +ggplot(Y61_Y80_means) + geom_line(aes(x=dn, y=mean, group=model, colour=model)) + # Removing sd ribbon for ease of viewing #geom_ribbon(aes(x =dn, ymin = mean - sd, ymax= mean + sd), alpha=0.4) + - theme_bw() + xlab("Day (y61_80 1980 - 2000)") + + theme_bw() + xlab("Day (2060 - 2080)") + ylab("Daily mean max temp (tasmax) oC") + #scale_fill_brewer(palette = "Paired", name = "") + scale_colour_brewer(palette = "Paired", name = "") + @@ -572,17 +593,17 @@ ggplot(y61_80_means) + ![](Identifying_Runs_files/figure-gfm/unnamed-chunk-36-1.png) -### **boxplot - mean y61_80** +### **boxplot - mean Y61_Y80** ``` r #Create a pallete specific to the runs so when reordered maintain the same colours -y61_80_means$model <- as.factor(y61_80_means$model) +Y61_Y80_means$model <- as.factor(Y61_Y80_means$model) c <- brewer.pal(12, "Paired") -my_colours <- setNames(c, levels(y61_80_means$model)) +my_colours <- setNames(c, levels(Y61_Y80_means$model)) ``` ``` r -y61_80_means %>% +Y61_Y80_means %>% mutate(model = fct_reorder(model, mean, .fun='median')) %>% ggplot(aes(x=reorder(model, mean), y=mean, fill=model)) + geom_boxplot() + theme_bw() + @@ -597,7 +618,7 @@ y61_80_means %>% ### **qqplot - daily means** ``` r -ggplot(y61_80_means, aes(sample=mean, colour=factor(model))) + +ggplot(Y61_Y80_means, aes(sample=mean, colour=factor(model))) + stat_qq() + stat_qq_line()+ theme_bw()+ @@ -607,20 +628,20 @@ ggplot(y61_80_means, aes(sample=mean, colour=factor(model))) + ![](Identifying_Runs_files/figure-gfm/unnamed-chunk-39-1.png) -### **Time series - annual mean ** +### **Time series - annual mean** ``` r #Aggregating to year for annual average -y61_80_means$Yf <- as.factor(y61_80_means$Y) +Y61_Y80_means$Yf <- as.factor(Y61_Y80_means$Y) -y61_80_means_y <- y61_80_means %>% +Y61_Y80_means_y <- Y61_Y80_means %>% group_by(Yf, model) %>% dplyr::summarise(mean.annual=mean(mean, na.rm=T), sd.annual=sd(mean, na.rm = T)) ``` ``` r -ggplot(y61_80_means_y) + +ggplot(Y61_Y80_means_y) + geom_line(aes(x = as.numeric(Yf), y=mean.annual, color=model)) + theme_bw() + xlab("Year (2061 - 2080)") + @@ -635,7 +656,7 @@ ggplot(y61_80_means_y) + ``` r # Plotting with SDs in geom_ribbon to see if anything wildely different -ggplot(y61_80_means_y) + +ggplot(Y61_Y80_means_y) + geom_ribbon(aes(as.numeric(Yf), y=mean.annual, ymin = mean.annual - sd.annual, ymax= mean.annual + sd.annual, @@ -652,10 +673,10 @@ ggplot(y61_80_means_y) + ![](Identifying_Runs_files/figure-gfm/unnamed-chunk-42-1.png) -### **boxplot - annual mean y61_80** +### **boxplot - annual mean Y61_Y80** ``` r -y61_80_means_y %>% +Y61_Y80_means_y %>% mutate(model = fct_reorder(model, mean.annual, .fun='median')) %>% ggplot(aes(x=reorder(model, mean.annual), y=mean.annual, fill=model)) + geom_boxplot() + theme_bw() + @@ -670,7 +691,7 @@ y61_80_means_y %>% ### **qqplot - annual means** ``` r -ggplot(y61_80_means_y, aes(sample=mean.annual, colour=factor(model))) + +ggplot(Y61_Y80_means_y, aes(sample=mean.annual, colour=factor(model))) + stat_qq() + stat_qq_line()+ theme_bw()+ @@ -680,16 +701,16 @@ ggplot(y61_80_means_y, aes(sample=mean.annual, colour=factor(model))) + ![](Identifying_Runs_files/figure-gfm/unnamed-chunk-44-1.png) -### **Time series - annual max ** +### **Time series - annual max** ``` r -y61_80_max_y <- y61_80_means %>% +Y61_Y80_max_y <- Y61_Y80_means %>% group_by(Yf, model) %>% dplyr::summarise(max=max(mean, na.rm=T)) ``` ``` r -ggplot(y61_80_max_y) + +ggplot(Y61_Y80_max_y) + geom_line(aes(x = as.numeric(Yf), y=max, color=model)) + theme_bw() + xlab("Year (2061 - 2080)") + @@ -705,7 +726,7 @@ ggplot(y61_80_max_y) + ### **boxplot - annual max** ``` r -y61_80_max_y %>% +Y61_Y80_max_y %>% mutate(model = fct_reorder(model, max, .fun='median')) %>% ggplot(aes(x=reorder(model, max), y=max, fill=model)) + geom_boxplot() + theme_bw() + @@ -723,7 +744,7 @@ Daily means: ``` r #-1 removes the intercept to compare coefficients of all Runs -av1 <- aov(mean ~ model - 1, y61_80_means) +av1 <- aov(mean ~ model - 1, Y61_Y80_means) av1$coefficients[order(av1$coefficients)] ``` @@ -737,7 +758,7 @@ av1$coefficients[order(av1$coefficients)] Annual means: ``` r -av2 <- aov(mean.annual ~ model - 1, y61_80_means_y) +av2 <- aov(mean.annual ~ model - 1, Y61_Y80_means_y) av2$coefficients[order(av2$coefficients)] ``` @@ -751,7 +772,7 @@ av2$coefficients[order(av2$coefficients)] Max of means ``` r -av3 <- aov(max ~ model - 1, y61_80_max_y) +av3 <- aov(max ~ model - 1, Y61_Y80_max_y) av3$coefficients[order(av3$coefficients)] ``` @@ -770,13 +791,50 @@ Run 3 and 5 suggested above The result per time slice suggest different runs, aside from run 5 +### Add in infill data + +**Update 13.05.23** - Adding in the infill data, and taking the anova +result across the whole time period + +``` r +Y <- rep(c(2001:2020), each=360) + +dfs_Y00_Y20 <- lapply(names_Y00_Y20, function(i){ + df <- dfs_Y00_Y20[[i]] + names(df) <- c("day", "mean", "sd") + df$model <- i + df$dn <- 1:nrow(df) + df$Y <- Y + df$Yf <- as.factor(df$Y) + return(df) +}) + + +Y <- rep(c(2041:2060), each=360) + +dfs_Y41_Y60 <- lapply(names_Y41_Y60, function(i){ + df <- dfs_Y41_Y60[[i]] + names(df) <- c("day", "mean", "sd") + df$model <- i + df$dn <- 1:nrow(df) + df$Y <- Y + df$Yf <- as.factor(df$Y) + return(df) +}) + + +#Create a single df in long form as above +Y00_Y20_means <- dfs_Y00_Y20 %>% reduce(rbind) +Y41_Y60_means <- dfs_Y41_Y60 %>% reduce(rbind) +``` + Assessing what the combined times slices suggest via anova -Daily means: +#### Daily means: ``` r #-1 removes the intercept to compare coefficients of all Runs -all.means <- rbind(historical_means, y21_40_means, y61_80_means) +all.means <- rbind(historical_means, Y00_Y20_means, Y21_Y40_means, Y41_Y60_means, Y61_Y80_means) x <- as.character(all.means$model) all.means$model <- substr(x, nchar(x)-4, nchar(x)) @@ -786,15 +844,34 @@ av1 <- aov(mean ~ model - 1, all.means) av1$coefficients[order(av1$coefficients)] ``` - ## modelRun10 modelRun05 modelRun02 modelRun09 modelRun07 modelRun03 modelRun08 - ## 11.17510 12.48221 12.92077 12.92383 12.93620 13.00464 13.01388 - ## modelRun01 modelRun04 modelRun06 modelRun12 modelRun11 - ## 13.01623 13.02117 13.45437 13.46607 13.56872 + ## modelRun10 modelRun05 modelRun09 modelRun04 modelRun03 modelRun07 modelRun08 + ## 11.12464 12.48165 12.79216 12.89910 12.91685 12.91894 12.95115 + ## modelRun02 modelRun01 modelRun12 modelRun06 modelRun11 + ## 12.95347 12.97947 13.38267 13.40644 13.61157 -Annual means: +#### Annual means: ``` r -all.means_y <- rbind(historical_means_y, y21_40_means_y, y61_80_means_y) +# As above, creating annual means +infill.L <- list(Y00_Y20_means, Y41_Y60_means) + +infill.L_y <- lapply(infill.L, function(x){ + means_y <- x %>% + group_by(Yf, model) %>% + dplyr::summarise(mean.annual=mean(mean, na.rm=T), sd.annual=sd(mean, na.rm = T))}) +``` + + ## `summarise()` has grouped output by 'Yf'. You can override using the `.groups` + ## argument. + ## `summarise()` has grouped output by 'Yf'. You can override using the `.groups` + ## argument. + +``` r +all.means_y <- rbind(historical_means_y, + infill.L_y[[1]], + Y21_Y40_means_y, + infill.L_y[[2]], + Y61_Y80_means_y) x <- as.character(all.means_y$model) all.means_y$model <- substr(x, nchar(x)-4, nchar(x)) @@ -803,9 +880,11 @@ av2 <- aov(mean.annual ~ model - 1, all.means_y) av2$coefficients[order(av2$coefficients)] ``` - ## modelRun10 modelRun05 modelRun02 modelRun09 modelRun07 modelRun03 modelRun08 - ## 11.17510 12.48221 12.92077 12.92383 12.93620 13.00464 13.01388 - ## modelRun01 modelRun04 modelRun06 modelRun12 modelRun11 - ## 13.01623 13.02117 13.45437 13.46607 13.56872 + ## modelRun10 modelRun05 modelRun09 modelRun04 modelRun03 modelRun07 modelRun08 + ## 11.12464 12.48165 12.79216 12.89910 12.91685 12.91894 12.95115 + ## modelRun02 modelRun01 modelRun12 modelRun06 modelRun11 + ## 12.95347 12.97947 13.38267 13.40644 13.61157 + +**Updated June 13th 2023 result** -Considering all together, suggests: Runs 05, Run03, Run08 and Run12 +Considering all together, suggests: Runs 05, Run07, Run08 and Run06 diff --git a/R/misc/Identifying_Runs_files/figure-gfm/unnamed-chunk-20-1.png b/R/misc/Identifying_Runs_files/figure-gfm/unnamed-chunk-20-1.png index 175fdf82e7d64c2b1f9ed16b7efe3f89fc893d55..c9363e14b96a15c4bf75723abf935ccbb437fa26 100644 GIT binary patch delta 8575 zcmY*ebyO8^*QG_wO`qJI-U4Cz@ z_nUuat!K@7&g?U1@3Uv7<>uMs#WVk4#9E~5V5Ha54Y$EauSfvB_lEmO!2#rX zOn*0+;)t+Ba6ChkjmgF^;~zRCJkQ<~qh%&FQXZbV#=iK%< z^O&&EumJ?Vj*-eUpxtXJ6XplKPfWSA^lTX0nOoLwNW4A2Xf*(pT%RhZ;q@+NC zYP3$T-NTFFVz%DXC+$thscat?v zw@{!^Jr5yr7mn7f10I?$X*?uTICvaYaTXGYvc`{KxiG%-9S3VZi#CB~iSM`Hb=wOv z1hmGF!#!~G29eq>){iiwP64v}<;S%xY})AC#PT;Zzc@}ai#rwYPrvou@O5rzTLDL_ zy83LuDF++&nLFClS2RWOUmFE+HpcABtMxNPxH0)(m zQq~BO&hOrrd;yd(-#*iLhN@Zex|NPU`(|s?bBossOx+c~;pXPf_2g8mVIT)O-~{W0K4u>4RA z=wq>Tn^Rl|N%U5}t*SmE-jeWO%K768?A;$naGytI1z;O=SH2ZPXZqk&IKF^^hCT(}{rUzCfzCMY%^>;d?{;St^^R;GXioLL?X6~(t;LM)};LGiz z*p-1C_>sjGOI~_BKsymRsylPAj@+u5%hC%R6-_nFtPuiCREeLoD)dDu<=fi{MQY== ze>*Cz?tP^p4dsp}Vq({3os^%UD$eCZm~=MfDMBANz^{@u)B>SqbD7KN-)BAz;C_sCb;V?dYi+EP|P0aTb4D#;LeZD6;TrT;kw?M!!T9Ykw?B#sV8D9MD7MWXO)gaNalXe~KDuegVZ&A3*sn)a`+C17cO5s8 z`fR{oHY1LtxFJ8k$2Y9`f>W#InMRU$2)|BmI9o~Ff@LUCZRk;(@9hYSug}7i6l}DN zrX=t}2z)tE{~n%28{%1QAfSEUUl$!0(DA-2UZy7ftxgKjm=h^5d26mJG^o9?)*~Z7 zbVdg$tHF99pCItX&g%R?DN7?G;l1@h*sk+>7};TFP#!4A&N$7kZYu)$jaHe6mBH|I z_}LHT2R9Rtp>~WiDuI6(myglVcH64Io1nvs@9sKo&^))R0XkbFTyf2Q4jiLHl)JrD`zy9vzpmvL5jyy`i@DN4hcY;aDyeEp^9{^Bg&>>vu%DRAm+r2<$1o%%z-u^B2)5c8GIKF-Z$Aso zS1d+~WLCA9Y_`U?Z#)FU%#EU`dqH7T&MwiKV)EL0dfMbw&hwzotkCU?n_-)ILNS>p zEfc8AR??qSS-ei7FwL~(EZjS&!hD3?VdG5^wRiD=+}1y@z4=(6D`KuOP7nr z%SKhpYg|1*mJPX0?wEAJq06wz62Us2 zWrit1=y+4;HK?Hd*Pv$ipoPTvZu4S#``5#!oayflf9kqH`JrttIZV$`QBhO3rC$qj zr_}&OPJFi|W6K|nj?vods>sWF%%{t$In10od+zKPNoLpyQkAMRFrC#6C2t(@gpK&! zs2_pI1z6Q^(yn4}R?o-J)=%c==ih(gY_h9n;}!$gPoESul{x24y?4u^%YgPD`V2z1 zp=tVLq(zpAm>EWN!-$obHNvansIfJsfq##te$`k-4~>p8ntg7t(*@&%S4&jT4tW4o za-=ydxp&^ki(1-mN{y-h#zd@tG*U;;oa+f{-G6YF%&pRPl=?n^AF{NB#3l||xdKV8 zw7CK|20LPoON}8zW0_KEg4ABC3#&K%WCgLw_f$kbXlO<&+HT&hC0v1og1&*asl$B5 zh~^ll%(kQ4eboK`bY7&VotOakbvD4;OT%@w_!L4{O5f3-m!lprD5rYVo}Ey(1{p!M zbk?kqs@aegnF6k+-Zf}dgrkOAdBF=U!yM1k6lat^o6wH7&b3?bXcvwL#5}pay!>9}bG{RO#_CSv^5XE`sLAhMFKV&V zJ8DZTJ-d=bZO0lkVcKNag%7->Lg6oR|A>;n0_2@#t8k=8mTKy?V)~lf7}9PreWuE^z9WB*Fh8&$v=Y+*2lwvZ}`Utr&fL)S>irZ#?L>)-3RWXnJxAt1Lgzvo5+#5{_WEuwA71JCz zhBLQI9MU`AzxTd)j>q`CIr8azq^n3U^_zl^wwqXke3mSi!-QCc0O==Rb(2lazK2uk>6rqscHxvM#) zXd`LMtE9fpaael$Oc)Jyq*%zRm!-~otF6P6p$Rul-b=kn>Nr`q9UaGZTSS!iQ>;ok z0j2BbUE)e!0RN;iL1?%=+Y4ON^8YPjc)l0gezg95>08p=Zie6max>&@+*M*74WYbSd zoV<5k$oVV|FJf-Cr{E8R@X4a(G{1)DGffs$RNa*ECZFoAxZL`VZbZ8hrIeHVh~^?C z2RgMe|Ke2Tc~>g)^zFS32ILaHepqI`#hbzTenxasTFgLyc;PXSa0mnTy4)bPt=$t& zIB(klG!;oxObYQTt08Vu9c;*binf5h8Zn)~yF;l{1+x-|RXS9u**@7JiA!)XxVlT^ zK3?Lnwl`=-k4*JKUOr<{$AC8^6@St`b9pB6o3MlGY#%GuUWuVBW+5w*eHSV5FWzs1 z`#ec3qBo2fNdLYE$&WK3a{?^1FZdCGGPMswYDo86#S@Gra+OJE?e0|LOjo1o>GvK#K%WLL6oAELJ9U~!q5fx%-^OB ztC1o49_wJ^w4oef^?b?Hkck+*7y>fE92g9%&!yPBqKVn z+k#j=gFO;s8=$pn3gJIUvR`1#*pS_gWr#p+D_RTc?dEfnQhm-Q<)NFSX;27g7o4MU zKbjg(kwQbnzUhja0xo*6Vd2D+L6{C4H4$P`mYw=HE|dVY0Le1K2`Uk{q8yp8HYb?; z5fKs4*)jvM4zt$PCI8qcOg;sIrLC;yeJ zv^$i(p_q-Edv2?!ux%%o!r=~q8^w^!1wtp2{}%TeGNWLM7?OOcYczwpYxfm(siv_o zFq5H8rz*3qQuwc;%ykia5!LYz0;SL@g*vVdP1kUc0C;Nm2Gn_ujpY$)W&|91SaiX{ z1aWi=PRON2UZisEbZ}6CRw4O$nHI1P5wd+Oth#rSQXHozU>#H&C^+F}GtE(S-}hvY z8Csx{w$#`egf2hW7~aO`G6EO$`mg zRAZv>lyaKMWS(; zD71#pf2bboKTpyNSWmtD@O9ClS*6aJNYox*yv?JSgp{;k#wJWS6%ce(-7TG?47OjO zn()G9~~M3zc*UKay#({SuaCC?>Mt3e(Kd&f~}{IJf-jVmDXFzcLQ&E zN6ezO%Coa+kA%9uq5;ItE-vT3u^6wz2fA#6ti(k`s_$&*J=YR?CEV`+3yOE?v$$KnoY#hL|fnx9&Yf_33Jp^9VhnwwbT~)OGbT?oUSmGPR8A(+2jR z;nwE6yEK~&q+H1eihAhgW7{P?T2)x7k2JMVZ(s88aM|8me5LNX_iIwDAJL-H$zR%Z zJ+N7omgs_D^CWILkE-#=)LyZ6Z!dk|`9wX{DX6Iw3kE2D`rydsdb~RZL8!PX$D-y4 zhJckge2xu+mz&cil=}eRcp-Gm07m%Sqyt2LeIwaz{WG4lEp+BDjw465n)uGhpF~=? zTnTRGSMZ)%s*Z}+BT*xqVIet6ST?D$xid3yXAboeyS`Gr#P}#rK3a#y?z#ff5=}%jsgot;29(``<^w`Gs zs;W8P^m#&3A|t{e|fUx%dC!x9BKhq-83`o6)m|9m%jlM-{W2~ zu>0W(=BWEsG;k*)b~Yhn(-n;ARs1{aZk-wYp~4t0n%u>GNk4uRQ8+Y8aZ}lm9#sJM zN@90M-8Or_sw`_WqpvR8AAwdcxRQbHjhfxQ+dOV%0p-*_M^a+$wwDLX=jncybhnp> zAEG558Z#ZMv$G}Ztf%{wRbh>_wOTnwIvaM0U-;vEJX@ZV!Ph{T+A24k*-})}N5+>4a zax}zV;Tk6Dnfv}ATPi3$mO`lp1g>xYle1-j?n%>oeNjfdR*f)oyX~@pa?$?#08Fi zw;nkQ{w_GgY5un-d`#l;TJ6#h7fuvfK7x*SFA0HvafT1W-{07M8I*b9bALXU`aEDD z7Ii$HbX*aHxgA9%l8%Us_QvAgrG$aK40%dnwv`3R>#+6GrmNY0@iix&nc$}(V)T5X zKS6ln6Fb~(SGnSaBrTNHN5W*r9y#LuBC&jBT_HAR0NmTV+5we=3+7Lvf)1( zZqA%u+b^td((jf!pzEc8mO^nTYDWT)(a)~?X@I>6HukaR!#0W!!g}HY)=ax~i4~=# zGcV5;Y%8E!S9HTAHDCTn{})jVPj<@Z3(^eK0IyI4Nao}xWm)xQ;PKvJH_P`z57dcD z#!|h_TK$r(8Xscq*nVFR0q#zQmtDjqlO9q9Y>D+4gx(!iv_E*AZ%I7P6ZJF8hLPxD zFEbua2&le%m*KwR34@hXSn{|(!t?*Lj~DC>M?$0BJnwlAk=phrYIZg@S5e3YL1j91 z=<^>52|K5c@WB9shrTB>a*bW&R3lXkIq>UYCQ{!VN#RQXntIk}iZwyk+ida(eV?u1 z-s#Nq@!=lie$5aqO87xIu`pi()8=G-P@h_<>o4SspX^wsn1TCoynz1jKs21X7EEw4 z;5&sx%mzxf%4ExY#RV4?DVW+ZcZ~xNE-HzV9+D_{G~%W3ba@XqbM3G*HHmwzXu%og zKD<7DK;yE-3szjX^h-_rT6Woam=oe7c(RVzr)WmGm132-!?dx!{+YMM@7@bF?i1F7 zM=f4{5+siVG;bUCcKP?Sm#L*`u(E~~4GTo5-2PIN=HcjZ(nJ)cpc<=FuagppxArPQuT6PG{k*h&~dCMO`cP4(S9I*1X@| z7$CeyB0nSIOXKLzY4$l-YIC+NFc=3uwHDV?Yh5lxY{FYh8bOv|`t}PMx6t%9Yw!dJ*OsL;b-j)% zNtHH~R%$!2?}rR+z6ZuDSSR-NTd;cQJc;qco9nB%Pe7=jQIgmeX1v zYm9MBJ=~XrZGrNpw_^8a)6dRxnki-R`z<9!>~-`b-S0QJfgK0ab=);ktC8fK{-n5? zA0p)>YwD&@IVyPXgo^jPlc%m-Ol*gv_PRny!V>jtVecO_hR;D>EJV8O6i}<2fmHSi zy?@hd9#W_4lZ|JIs;*S=bM^Icqg+;QtxNWcD0yC`vpXXPtPq>*T~agv$nB z)8!2!WOp)QpC&K-?l+wD0z&9g{b-SPh^p%RVsS{ul`n3)S385mK=f4WiHd;e&N@%U z78$TnorAo!^#(idBRXeP{L?293(}{m?|Mbl!>cZdAWO771Q&4d&GMmNVLw}!DSAG^ z%EmVC+oU&rOo3klC(4YPb=0nKk;v2}^}gI8V)iEJ4VyJ`#8A-0hNp) zvG4j`E*JV;)MV|rxccT%P_OAs`b7f$E&KJ0#G~F|&io0S71%a)A!}@{-*e_~Zf*xl zLMxUY>;6y}w50B5(xwjd3d!6~w4C^PuF6sE*Isf}rK4XaP0l~L*mZ7%w$k%999qwH z%E&mXl0VbbU?d~zRJ{hZ(K#O!nCy?u-IpK8~Nq6 z!_xLgaTVNT;>^+j+*pCHePqJ2SCsUUxSl1p1l7sH>ymlVL7RxZ7MC-vlr9b`%42@x zVwYc_nPRa$MV^v$2%fdk93`sle)p8A;zRM#0~s

ROP+a@Cx{1moUQ^>8&}1b%c34 zzv0+&iav}YpcnNw2x#A6j6HGh#1jA_4 z6j7GI=<&IrknL>gtM`f#0SJ1|=N?8Jw6s!BoFGFyG?^ahmEtFZ>k+mz0#RGH2vz19 zP&L5(F@OXwO62(>3!Nv*D<)UC^IG6xzt(tJ2!B}oC@4Kh3;5-^+fOD5lMcbrIa2&x z?ILyN(Kc673?))>E5fYBNC?|MNhqF(=X)>b{lB(!D$3urvyjTUSRH;J|JaRxB=Id= z5-m+W1Cvjb{qJ%u11$>wevDGclR18}WltxI*H4VNy@|N^-_?97csA>=hHS+D4Tl$| bhTsZtsQ<=Dgp311fFFu-s7d^Oo-J2Bo`^zDRc?jYv00=OLs)N>Wm~kuFJTx#Xo&8YJY>`MZ8K-#7C+ z|D2h3-m`P|+1+RN*{Lh^&O7wrk8q9fw;$nObF|!lgy$v#-s+khBcRfu(V>}BBcSqP zm@8#U;iXFH;7ZBh{rKuxp&M&>{HAiO55HorJ*|0GkFvbU&>mu+Q#}V1%tg;t5LFW; zvB2S%+@GF`U5&S0=lGxcbNP-a*H0LpuKG_b+T5L7tOkPDB2PWvZ7@X{bDOY`Q7wuajtM?S*#^j`d_m^tH~Kwo*MVXm@X%atSI*CTHRRHy+uLa9&079c zxxv%@yfjEJw3_5^J{J0H%s-G@Ab5_{0-8C!2|M&26}Aj;@93Ls<{+HRo@qNWgD8eK z{=(XoDItHijTpa)n%DH*z*MmMEjXm9G2o}sqbW}#@P*RFxgcEHNa|Na;3=c(i*b+N zVVzhKP=67>;fL6VkrN*N^qwVS*26U_lT_`p6f-QH9D=z%3F zKAk4Udgm_DE#r>h{rGorP-!rx)ggyOq4+AmA(1XTa}B2#;+-H(D`b0j66OsI-Fi5ceLj26NV!iL2xNPk?d_Q5o8Z8W|Iuu!ECG$PR%Cg#Lw-gp zvlP(8)8yhYcA(WGnVgX8(T9FwqgA+9RHUBloafTA2x*FZs*Ii)cVeN66MUt$VCF-w zt{*t2Xw6PWOEe?sg+nu9>DVe6%Usijce;KVRCKHDW~`~H62y_vz(D5q@F$qLW?A8r zgN??s>&Y3017A9Y4sxlylQ5uTV!i(Ju?#prVx%g1fw=u+nfhD1ao;Haj0=!GOjJ`y z8eP{!n59696%w_W>yph>T2^>vKqi~5FJT+oG-2?Cl)k^eRbj8dx|_X(lzuwRVxmh$ z)#x(+P~^lCz`G;;@vsn1GIOex&^*h9S$Ub+y|7Qdd^2|V zPH`J9IW;NgW@F9*rtx7c*z|CG`~$dm@8w7$kj+Jy$NOQcDBY4m)xX^(l37nxC_hd1 z2_c@;=IyA4E{ati!tZ$yq^yOBmMiOxZ_`iAc_6yn;Ru93$e9R^a||<}(x7w;2>)6f z#*9U7JVhT@4tPhV)@!Ux;XCBn(|V2ijos{S%g+AknyG@1Gg{K6tiN{iss-?$^p4T( z#v>*E6mE)8!H+^2$&uo~2lg`kdEs$PBc=~3LN}Y#w-{oW6P%-zOYKj1xl&oh z6PK!SIkEk5L`hfaff`G`!Uqe~X{?WV*!xHdaMSrXeyWU)*54K)m;ec&EGi>~)~c zoMpEAXM){7w>{QL5wU8$O7_HWTZ*HX+neu$(+_v4uxB#yl_}=Tt*x2kkW|dYw=8W|yGzkH012MP)=K2Pe3N2Wh4 ztJ422>h;^z<(G3{EMaZvY?C_PxUSo;(Is^!zJ7BNDo|JigfG>qh4k|n2vaAISf!k8qwX^`XPQlddV44 z?T-FsFje542g&7}JB@bE{nm6M-k#OE1lQVkyOw%U3 zn-^V&i52vjN-tl0>pJsgDLo2BSXyoL6~H+$FvJLiU7ap9jV&P$yRX&P^YZ#FIsX<_iU6+8Bb(;FD&5oY>@vQ9*=i_V6X7G8A*-Cuv!Wu9~usB z+u7aEu7%hz>puU4`<*O74sE0hRb4$jwR!J=!00QatP4G$`UB)-wNz`_V!p-TZG*KH zIybdMw(LYn$VI>BTV0y#ogTRC$o<50@2ErJ{L!(>(Wp_YQw$06U&}{amAY(km^+$* zy4N{o5eX^B7<>Iq37t}KcCA8=6T0BAd&i&SN4ms#q1?j5B$!|F)mrZjnk@Fzz`19{ zdn4@Ybn(N$Q?R$@*YIWw6h%UUUc!jo~5+eG3+XVfvP4*M82 zvJ!F>pgWvINjpC9dUM&9-T=a^xnHe2S@{Lh>H9{2i(ThCQGXFHGgjqWDIE z25)f&b&v1ii9_D>8>DcgTrs9R0mIa|qe^OHC_a`ui5&u2z{tXAcp4TIr6tgy#Bi_0 z%gXC?O_l4XBl(ik3HB*+6Ngaxp4PyUV_N@r*DHVZflxNaN^FxDeoba}_E{N{df!{& zY}r&GcT(N+0Q)#b=%y*2!CX#FaEh@aE-=XyS!&Gne&=x-RkTUa$Z7WgeP+7g zVky}ofyF(oiMsRkhl3r5h^e8O0Om0PzC>WfF!lIG;ds%cJR<(sA(dxrGY~g3=i=)I zl=po=mx$NO{b9~q2?%({{lkY}qgmd65Op=ZKu#q)noz^Nf#Sm75@8)>IO`9|l~a{M zO)RWB9PHRRCUj%ib!amp8zM+a4d$W0&L$VyJu64Y#$LU*op;o)z>2P4ucaOJsT%-@ z3z$nDmf@FmbYC}`QV}G^?re1BW3SYXp!hF8?gYzhz5R|P@Lrqn{2aaf=EjfU6yyA= z_-JNb>C|neWRaE0hV`-Y&prwm3khS-i za;tOYX|^wHdBsp`87I>EfDB2W%dnAV0Tv(CvkPO|r#e-}pz~a@7J!)}FC<>t>}-v* zXWo=tTJ@@Ue4DJ|po{nNZi$GMspatT{SgKjNx~0*Uqch9|L%+>Yvr; za*z0Zjwn!>(f>iIBqs!Feb)(pY<<+{7=D`(xgXn0> zCyYp?HsA7oONW3Wcg|6z69Et0$^5AhbDhCSH1XMI@#}JkDt`Ka6VC)qsrgtSc9tG) z${%dBE8*q_gO43~s`FBj&f$KX<>MP8L~#qfXHVG}L-z3K%1;(US8-z@ZC`E{;NULv zXoyc1`tRXcGiPH{$a|YJ?xM_edcM!n(iqKh1-v#N7S;SE|CI%)3k-B#D=NtzsH|#D zWNCr3ZkUzIfB7Y`48QzdLXjY_eV^C280Zj-6t4z+bqNoTZj&_Q2;hK+}%ST%6`MgEBv@T9~CYcYR|w z`pVU2&MH6PwD?}EjFQns>Lwrb{5azKkUHKS@V;q;DSgCUB@*|xkwXL5fn=EA;FMiC z<`8jRcJeVJzb(q23#S2}FUwPnJ z@7Bf2?uf(T^ILwGx9w_xFfVsM{LOz7&B*T&?Xd2&*hSRHvC%SFMBACajVx4e+FB8( z?UOh<(J*-9l&})T**_%C#s@!8m&J`>c6v*vm@TPYL~NxUMnrij z%|a2FVO}jK=GY?1mWx21Qxd|#{#@I-rDJ7O9W#aq9&h**W2E+ZM)fPEm?$OPWj3b~ zt*gjm#jZ3OWBE(m2fPgX=wx+VT*T=}5XOA6L@*^~xd`BRS#r_D*_2Gd4tM-UnclSC zFRk747CAw1^5e)Z`jE$!@+b%6mZJv5zT z0f)7?2)8fH7d*^8rMn}sa%Hs=%hdZq$wzetMJTBhgg8TY;&z78>jgaNvVv`l+>-@n zd%>w8iIG6lPbnI#n!I^j&**RKt> z$TONe8ZV_8IMoYd%Q&#i3la20*ilj8$>Lr-q`lOc?-NgHUwDVc45Ah*i8OSZFrEmQ z)ai+=9Uq$qWB-h~jhSr!cEzc|O;4w`Ws`^pH2(qkP%50|TXYLcA_%Bo+YfOub1y~8XjpfBwtQj`ZEktj*Q8Se6NeaWvYD&siwc(Ht_RT9vhTyxlGG@Q)W zqnpBFWOSdwR!yPdMIU1y<=`Y^6=B8j5c3>99 zm=8>T#G;yKJ7&UQL#_^buNXf^Ag>TV?Vn7e_)tONqa>Bs%#(RO(f&Sp$?U-Md3!?A z_XOm*bW89sWl$B zHV7>=n3fiXROky_Qnx1oEc7U3RnXB)CUBktdUg}JOHN%it{t7NcBzx&o$`BRyF%Q7 z)}j;AlMv&uxi8~qoBJNhF5y+Tw1Gh1ZCm)Y{<;GP^GX39jxJvjK8@3@`DypuAvU19XCdbBRtojOenDjj02_Nfxwg+}l64vQ~79e@=^}d(rZak0b$|Ac_BkY!cGbb$lRi1(T=T7lC_c+8rHF3|-e$q27Yv7vI>k13zP-1Y7_E_C!QL-<0octf zWcC?foB}GWYObab1E<*#s20U#krqmBo2{9CnKnH7`^-GECRUB3xoSwudeD*GsYQZ# z$y|-e(I6H%KeO7PD!*5=%epgVu}wPKd)3q~n^S6&*{8?UT!}e3v#aCfx7w?y95zFr zs0t1Qk`@;3LyvfW4-Un;?}2lGT!)_#{!VQnOC3Uwa;`f&JLugWkrA`$ORWj^N1gP` zB*yox>Y?x_wc`F)GHcSzg|I2TEQ37ux?9Mh>Rlo2K`fkHXh-~QXI&^AW`zht{&A|f6>zd9!zybJWNFsiXmZnLu&&MQZ@RdtBXPHuvgz0LA@ z`fCd=koFjbgoHFRQqn4cacHuVSw1G(tLoP7VD*I#)N9;a>1x9!CGtE=-rq23QaG&0 z=<>B5791a!t;|HQhS45__I;?0=$e3e!R~^#J_nrL@luoaY1e(tmlzV+Qnj2Fi{Bt* zN$hzKbFQ}NC2E~r>IwG7LwQEA>Th;+#_O%h9oooC8~e%6(~@e_JNOU{Ch9=P`%aI z>~yg*QG~Vs#eM-XyME%XAzzCRV}7Tc|8BF6;s1Phh&syxC);52GozqH^-BqpeW`zq z${{yI4Gb1vF8elvQg*e9ZV!~cDVO~>o&2R}5l}uDjrlH4nD-!;NgHK>(ZuS@mG5CP zgf9imMD`EP`xj-DsiV#wP)^<3+5IM`9>;V#HpB$lx$9}$EjWO9_e6WeFi~*0K z`kT=JsirBouTuL3YB1csX@FD#k-%9g>48NcCVTACup13-d-ro(1 z!V}*fU{0rRPgTc5Lq*Qwz&{)?Cz32a2{RJ5GSU;CR7mi5(8GqGzX${WL3)IaR{CsY zlVSSb$p13{5E&4vtpMET-~W3IyN$>%+$m%Dzn*8IqQX@(1}B^R&oQkq;>Tdd$aDpa z|Ah6gaUB9UIv^i{s8RpF=k$@F1{!Sz&e*^AK^F2GSIaJMfhTAlB$lhI3~=dwY`*&y z%QZVCPm|-7_p#@xyfjw}R-;-7DA;jgL}ys{qinTh>rYoLhmy~?30JB%&}xi4KcV%2 z26BGe#8EK#6Z<^l8RV^5`7+?#p&CykxwiGi1;$2)N%GWFL9Z|{Fm#ujotzf8emy;& z0+sit-DdIB;!7#bPRoK+IFg@lZ*MCC@2_$_CjimNdXzB1i zUCVtiKREqv=%#>p(AJr*Pfbk)*O~WcR~xm+?ZW>4`N-pNc7*QbR<{318#bOmsd}CS z7)Ik1>DT9DTtK9{L@Coqb1FW~*g z1?GLu1EAwVS)hwU$QYe!22(X7&Rmtjma*?nDjLykKh2Xy3O+F>7nirKfB=s9E7byb z;CzHhv818X#BB*2xIa&a1zf7|*82@ugzqBzN*CR|v3ErYH zD?BICNoRlezdmF)YIbx}C_HjpY1O3ZhJr=gX(lA@t0d&ai!!~ylFtEwDp~5PQHuW6 z3D>9VwTk<&q^nNQzI`=b#CYW}U*ichNM0>iv{MyP&AVy7u7dG?`ZtGpPLAizNE(sG zzF6p-k;}nWnFngpE=2fpR=cV50S#6Z)~{&YL3}!V9OL+ihZOLndV`Z}cDN82uO11~ z45gcRWs3ayK>)EU%_)geV16m?m$?|r*{<;YVp_R|-7BiqmP{R%UYMKsK!`SUZ(^RO zEE2U`o=|!Mmaf{cRMhkJ^^Iu^a~83~AhLUTS;r@z+^ovAeh(w-4?5G!*Xw}Us?S#Z zA?)uiw;$J^pGwyPpsJp|NF=S&8)Z1sM^Pxi++7|uu>*Edm(4~k&VI4QciaM@!z~hn_{kSZ`HU(V`3c_~+3b`cVK@?H zFvz4O)n9KST5*~Q+fCE2+F<@+3Pq@)n%=LNt2R1fyf~UyH-DY8OqBXbrFD?|r6# zUh56TBt|s&OR|vUWPDlZnFq_^3CI@+5b3){ktRsg$L62cpqO|#@$TMYc8YrAPP=Nc7GjN?HS_d%&z8uC%|AQd z!Qv!UB4t|Od5MC{g=^g%f`gd^ig1FXN2iAn+bS4NiOE-+mk8GxZpRC)O(GH?gHRKJ zww*MiSy%#juddTP?#tg%)(sLw=*!B=8m2u@Qj^~Fw3ai%OMvvM^b2hD&7=q@!uvxI zPzcB8;~n0@IKT0VpLNxduLj4qhym8h{uTxzf5SvVqZ)3RH|o26inK-4Nr|V2J;}F| z#%pdvRQ9*$JBM7rZEUmau16v!pda-rkuLV7G_`Dm^xeWU%KKjFF$IKg2<0D(U&HeU zl7>n`fAU@OJcn$k<%sB}Na&y53q-M}|Gt8OSJ_Peyu$_9jmP+bav$K_GN==kf>Gcx zXAn?qL)C?M2;<@SOHq4B^ll~#mA9Z%paccht4IW;L-|M%&I0+svDlIW-7D~tu+&>67V)@Y(YK;$4WAbt` zxrDZw%F4=@xV*+3fag#0bkJP$~`QkE({`%7@Pv01LTVs@ZUMgq;1-t zth6naVJumb692&!H5lrM5HqH3GoD^Z_UyWHk9jzo8G$>7PGIab(mZUS+!eypVBP#_ z+woWny-7A=l1)3KK99bTaSeXfC1Bb{xZUxREtaN+o0kW0r9LSRcmM5Yi13+CMO@Vc zupv}M=7n>Hg8Mn{w?0A46U2oGaLdL=Rr(yJq0*{ia>(0qh3j z$n@lIRc9F_KCUdl$X(~A&};0Cn%vZu;k2#IE>$K0k29wRtQAGOMED)R&G;J#Tz z%zl*1MYCMWzXS9tEiGcBvunB`teApvgI(nHt1UPgjIS1q$!vJ#xXxr_8O+YJEX)#)F?bH>pU8}9NNizyx>jr zs@6@?po8gRv`Zqr4Sh|!zr(N#1AEtF+7cQOAsr$DtG)}$ql&i%o~+^r4%!#<%KRj{ znR2#G8r3>A|6{r586kt!n?dO5`1u9w OQIu7asd@7*^#1@id_?pB diff --git a/R/misc/Identifying_Runs_files/figure-gfm/unnamed-chunk-36-1.png b/R/misc/Identifying_Runs_files/figure-gfm/unnamed-chunk-36-1.png index c46780423476e95fbe33b04b40a42e818cce32e7..804bec4f27d1056506375e26aec7d153276060a6 100644 GIT binary patch delta 17652 zcmZ5{b8sh7x9u-ZCbm7%Ol)IfYhv3rJDMaj$;7s8+qNdQZS&>3_tkrKZ>@j2`gEP{ zv(M_ayKApqHVJ<*0`C_DRtJuj4Y>3DDYr(RKg(o7dooIZzi8opPDqjF7?{`K%n(wt+uvxvVE8? z%6Q`9fwz^#X@6{NX>I8vBbh7kFQUSSR)z0WS8<0eN66uZZp}=XS~z zdbdcfmS6X>k#>ZcL24<;dsbJp#&0?-Pe?AF@A-lRQ?5QJU{Q9r^=_I~>T@tAd5QH>+;@A8KO7FJMu4CChON>?O`{c$4EXaT{o|JCLo44u&T za|*N_l=(gS1p7zLQUG6dq#dWDC|0#q zQPCe1w7RS`iQYfuah7F}aMAB&7gPQ{aoLHs0!*Exy0e=s?uFqa&f};<%Ua3MK~g=W z|KX#u`L6e7>`m=GWbb!_YMjE^W|Lgp-(2Y8y1bl;MKQD$e#45qhA4|S_EGQgA!aL+ zd>3T>zYnljUn%@EB!C#q1rqf*yUxoh9PJY*9fOtmw&p(lYP9seS+ua%eC9OyFTq6~ zm0!Wqe)?J8hdv-I>y?6%OuJs?UXbD!MeOEL8D< z5fQyi4}wLg)EaGxCw@@Y5l&Q9EVvYDT#{X(=CL-=E$?VZIshaKUp;N zdbDumEL-A}T`?N^!RaL+gJ6x4aP>(wWyd-cXY8xD?qTwTPa(^~E1ej<{ymEgJGu2k*YHL1i^_uO1LO!a~Ct!x=LI zED{mKv_Tv09zb%9-vF{eaH5bM9h@K-)^+%@kKaFk*(d=`4G)pmG^o}kdG`nQ*#8={l<%L* zjN>9bBUoOiZS$NYnuG)@we3ak2oJ0Q6KBKy^f0{D>xJg4k?lr?j9~3rY&#jn`iSz z@&LF#bkLS|Ec8U0TWCXfjO)u&$l^7p4(JIs#o>K1vePo7^)8XKJ`ap?5B$`AJx5L) z8o2-9Ec}IXu*!~hFq6F8YpRiV_e>68OcC=ELsqgYzO`RAbqkrROe8cd7WnK!Dj-^ zL;z0vb89JcIgL#%%2YVNTfP9sjatq~Kw_6QJOu;FB3ssbQ>3_DVE6<|lVJ;DOj4Px zq=-(_M_8KLAF5U4GCL55zQSK{x#!*v%qsGY$(_r-De;DYsq^Ceq=&(+StEKPRo#s< zZjj5HKq-ILNgY`ay9(BGcOBbUcWVTe6%n6U%eJbL57q@7Rof`~pTbseK&BZn0Ph}! z1}}I+5X1QkHSc2OkBGEnCM;MywV(%MCwD+06~YSe`tB+oYoNq3})a z-Y%l^0J)^uN%;~LOM?rXhg5WaUE&Al)J89hTF~-z6hd*e&(Wd+8BBbLcz8cvqW<7e zw#L5?fN}5yTOK~bd(4`~S`~8u(nWh-Ri`@jbZv=RUY-s+zQKc0%sevX6b6$J_1!C9 zNkwU5p6`AX1Zx7CxMKTHzc;y=s8B&StsG<9P)k36;y@~HP@S(2OePAB#0HP zRf5Ya@&dua##gmqPbWNp1GoH{lp7=~=tmMskERwX=>;i@e*j{U>&d_I-`q5MSwb0% zX6C;{irBSZMY<&T1N4Kcd(wkkEp!oQ@?xv4UGF$fPBm|C;eY?wQ)kJd`Qq|D@&cJJ zMD;{6u%*!?<13bNbPTusxhz=Jzc=VuHkUdlkv+a1Q90v7qVv57;5Oe<*AzTFMkB(G z|4F~?i|&yYy=z~8`#S2h7wrqy5ux}6R`nuhku`rhGrO+|jxk}4G zjYS(JZwsy_ZiLqS2m%i{KB!mVP0>M1!Y4Hlr?rpVt4y=p&7e4R>fR&!yMpYZAJJZv5naIS$g>?w>>szJhoj3wTt7sVdcLv6J0~}h zoR~J&rLZ^D`)5FB^~0~|7e`BCwO}}qjhmlV8AoTkXmFN$-=^|MEt!pnM!+Y|-+H*a zr5ae{etAEO#;Fv}>o%6Pjgt{iqVgjMYHkS0JnI1`XvEI8PeWS?@h(1VR=;^Q*>~#% zP|dr(4L=3{`zz}p^t_dz{v6(yCNJw*jk?38|AU~n3S=3uA&wR@fxFKHH*?D;j&yWz zili>oPo$dUZM`)L*ocA{qs^c4>JhJ~v28sW z?P8x$Ck?dHHeOZ+&+IRb_k^f~Msa5Fo?+6FLJr`%Jm>ltoWT=2N59iJCRTfeB*QOquT z$Jri$pSn%{ye-K+KcV9WX%O4@mS=#J5%OJIIJyK*s-^+1Q0AvDm}<02ew;;wmJTxY zGfdr5LAguVc1nH?u1Jil*P&`h8VT?yp$B<9KV`E^_RB{--GqNnV2t{_yWy$qLuq&Y z&8o(O?_jj%VT2R03#=Oa72oN!uAHzg5521r0-OnYG_r>GMvqHi^Sa=_*zE!&Bp*qs zD&GY7XipMkP1RJf^i&CxGQ|_AItGWMZ97w8(wue+Z!EL^IyaVwj*_qWUWUWrUgh}m z^UBYYUZ&$xiiUiZ$4KX%_V&1Q7BaxfObGn3${XtBq0Kj(-o^%Z7@kb|6d}4FK~bG1 zijg7DyY))_r;Gd#tUS`=@gy3c*giK!g)Svrc!*F)L_=BBLFKanCF9v&&tC{jFgo`G zC#W_9330ni`5z~22qZ%AaUEHa@7i z@GTBA?dmrTESd(#SI~{j;51Z(##?Y{E(vh`rO=P|;yaN`_KN*EfBQ<5%Zhq+<86MT zuBC1J-Mq*^X+_J3w#Q!l{?#BchgqgigGQUX1DTH&L{$Xe&D06|3YN?ef`!bX(+qU{ z42a&`CTI^ymBB{{#9R$9?xtd|iYTH=OoP7k)=qxVQL6`s$Xrx(IQIU0P=FEnqC+R~~9#Xt`m5}{zV==bx&p3OSmF2MUCh5S*!(5%jJ zR7CDh#Kd@?)+`L^0wciwW8}tDM+7dmpS>~8_I*Azwer0*o7@G^q0%HW_zM*}SjG8x zS3lK-lc41bGAAh{t{UvQ&m~%}`O8>d3>g^OYtwz=$4FNRnNdyt{8+se4ACTh2-dZ) zvWd4vJM)07wh?&2(8!NjZ6qanfR3fuKPl(>)Dq|^rw-B-!efPEqWeHcX-=!oSc@a! zo%=nH#Y+k1UH=t;X=~wwHQRGo+20?I*$L>RUD9Z=h0G;8z1#z*`P%AY->bpv0H9(= zz1lCfd6=hRq}La`*?MT0$f8g_Xqx=vIULQ|W)W&koqUS|fe>kpstMFDxZj&|pjheE z&cB@^6UBHUGK~-&vGV_Mepc?AJwx-jR^_qE%2ShNA&vk%u6Ds54-06ELl1ZYc{z_s zZG%Qjb=pUfoXmW%BvBnw2vkR7VZYw=gcn{&1sBVo<3Vw)ZiVt{AT5ZG@$f%oSR4x9 z9~Rx=hz3(Ff@dxzjttc!hYzb`ma z*@Po}2gQKx^uZF(sZeUk(c!VwU0S@6W0Nq`#f-j0J>R7?hXGvE@s!0eB(BPi=Ms%N z^XTUN6sxg*Y&~l^?xU|xYc2~m@~DbLepoTObIkT4tH};vUmofC_Ybfxx6*nGK&*jc z_hNBST#awH20WEBVnf6aa0{R>Hym~!OXLZn5N$w4HV@@~e{}1^;cH!RPsvkcXNP45 ze0LeN$g z45o#Z^anDCie6&u+oVso$-kAq!b{zZ-Zg-TTE~j6eJigm4)3rP%4yKO-@piar(9_q%%=t`&6`*;=eVTYK;R6Y&7b&IFNo&T#Xzqj>NCH3CJ zA$pq{Zo?BnyNRqFiNeCu^98>$jPikc-n`i>QL#Hmj}88ej`ZD7?xd7)*vdwOSt%Qz zWzOaqT$xtmg!dM_9^*yFOGWcZMJqst3Mz=Dl>KAdM5A1g#B+-oPdOIw779F*yKfg+V zjGSesUh5j~N37wj6xzA0K%HK1WbEhIfCWD5^^=BcD}wvGDyka^u1I3kO z_oaO+z%KgVUyVA=`!k&eT62s7E@MTV&g&vM%zsxVAvZVsfBo-D8q_rUYJGh`y9SD` z7R`?-#A&VV%}E0o)8F5X88fy5Oik3POA-U zrRyxKE+x_yV~rF9$NCz)8FxVZ5}bzC-KF|a-N)4q8t=IMSGwC7fy9(pDqR}eL~z1N zotF%CW~oz`=>_ao*68DBw@BkHBJidIZLMo-tOq`8z0NH@;|l9v-3>)44MWEY*N2aW z2RJrU^A?HN59cHC-<|dn;>y?%5r?(48rf6#MiWJ&^Ye{lUG7eQAJUFK`0pD=Xt$%ri`*DJq>!Ty#6MRcY-0dXTghh&8OK z_+W1*(0Qr7?;JE&$h|TR2vtd>t5O~RRBceKNSFQdu3Cmeeug{(Um=Zc0k}nD*163V z_>#d8?>VG@`@WDWg?-|0wB-0yNN=^9a?7;V(oPs(Z@UI)^OK(H;oNE`>e68iqvD4%seb*tfRIRe%p#Xo9 z@mygu0FydB2w6V8a^KGSJJ9#;OwEupzV?m0qHH?L~8Ra7hOzB+ycxq4fHG*VLqzjzJ z$pTDieSO*9ch$oz3wlNDZ!+Co`4#9n(pC_GC@hs5Z4~_wW>4zsbslfrqpDo)&VLFO z_MBOJnssk;Mpv@dA803Ui=?kPFIX>aFov@B1kFW4Hignwaqhc1>m55S zxk3kd`tn^2r;6j4r!oV9++O7+AarT5yvO=$0V>#XsHjI1!|L@&-w|&k07V?mmvYel zFu!~{3e{Wo{p*Q*v$?>rYWQ|*8;%H^r^J{U>%x2dipzZa9)&RZ!Sjtc3mZPNC9yho0NchJwSEsv zm2(IjH2%G^1dHJ*OLn;g50jco-9ny7(=+$BienmZ9KydUinU$aUZJFre#A2gQ%o0l z*HrqkivgcA1jE-FS#TZ?g`RFxHrh_$8dyLqb5eGrE zF5SH7;D@~qN7VcxS|zUh-9F+kEH(TB%EoWoLY;3C{=7r6E4}Bj5wamASr-PnM3mw!lnu~qz*XXc(${N|)qx&e;YWDEb!b>YNv!{ZL zW7#6nQJx(Q-d06vMWl_r(4x*`?+ygOJb8{_72EMYQ>AGJsHl$$8TU8|c2OpgR>|Dp zI(k5#aR^WpDmCm|C%kPoj@U}&Ef+A+@Vjwtw9C8mwQ#lM$`FL%3JoQm6DH5ViBoHk z8#_y2Vh&d>K_K%^ZHjZ>jIs3RM4g8w>8juZF#@+Q*w)U)ak)1?Rd_O|4S@X+_4 zLJpfykG-QtMDO3nK4W0oB=CAeE_cHr0WQ?&qhO<@8@21ao)6#QKwmn3dm;UHN1<0P z_**f8Ddw|5WJri=RzIV7*e^B&0j@yAW*~tQ_3;IFemI9ng)irN}VvzIapKDI=f|LGCgxp34+}7^aHDl;e;D=a|-QT(&3hML6--wdW{V z=|VIKOAk(>MnfDYwuapApg5xNj)31HKTRqK+tPstI%G=MZBQ6&1w| zSR&eqxf4q(ulGF2>Zo=M8a9zhAuEM?ul>He8eYj}#2z|WvJ~B#!WY1^h==EwAk448 zUAq+;Z#aXiNPAd2Z?>BEX>&`y3~y|ephXygZk)xt^MfK~x|v3@FaK;-RZ9!DrYYS{ zRngC12_^}VMv+7+*RzhQ$HrWC7$++-PZpP$ZY9R&%WRL2XQTzSO?JNpJiFDDlt?Gl z^-SR+alXOx(;h)ZeKv|qC>H^rdU zT@XI8jo7u!!PT%DP`}kv=j7xhu9Ca#mp87X>0Gvi$KG~|@`3=qqnGh^7--4%5K)h9 z^xF$2hUWxJ+<`T$l9Cd%u#1va+QZ22{jaGNMrrw_HHKtZz18%c@OUYvlN@~bG0!Ir zSMBQN?(>X{=SGQP{TPnlO9MON#$5gmyUC146ElZhGMBw%9eO>I zA&J-a2t@-GTL6HD34&5acIaDpCK{K%a5|{OMI-dPaocF?@E5Pd$~Lm6iC}|~z2Z1C zp%pbN%UB=&krjns2TYRxayaNp+mr-$z!=Vz$63zmB2)8`-zzmpy0wbMsy4{T| zn+3ex>mk9QeNR!5^J1eDz*x^f=ROhBMe=YwkrznO+*nr$LO@5O*pp7bUWNjm_ZvTN?0c6|nO(tqgKB94X1okj8UzebsP$ zk&43>z;`&>*_k~YgUUaOP~HUk2-84|#={HN^yWXP&i94VUceH(`N4mKP5sGlt%+uq z@}=J#*$&G$*7BNG_2ZBoPPB;H9&vSnq;G6LO0-CUd8&Ctk?*-VI)=0;QwFgP6B4^6 z&H|C6$RO+>FYh`&>32mWqcz6uhuF*xRfo<&ZGT{%Gp=J^27e^>GD#()cLq$zZC&@* zvNTU+Q?+8RFt!s^5SQ?N;T57^Ru2%k)$(wJTB+ucl}T^~5&s-6jTPU|y1 zQe_~54+=v)%8R_5dp7u|rNZ2OH-6-6?nXMQ2aI3l3*E-V4I*ZvcWqCf61X>Nl10ZSD z_vOs=`S!>r3MWtwb}LN;+b;q`d_AqV2!7Ttw~Y4$iUeDyTmq7F6Ny(d0@^Fo4UBku z)f>-y3YSZDFLIHtVa(hf5jV?VXWKbwR<9s5UM*^@-|DxsZo8-6!OnelIDRe`h$+HX z0C&G34Poc-SRzrjqs+Tvy{ohm4(O5V455BPlU+Z4HtJ>GoE%PaS!+7x8zH9x&8FP? z^gV)&F!61>ZrG_GYpQVb5E$h9ngqKHdw%5Up)7Vy;=LAa#zn$paKi0d**v|B`h^UL5@Lb zUrWKZATd8FRffG#iGi&EP5*Zwgo{T}brt(m6p~To?VsZsyZ9PxL6U!fU zTB9=g*XpZ&-78=v6{|}v(wTkEnta`2ocYTt?=4FS4)4++kpvqBnmaTT?(y1rG%sx-over}W zx-4sa0%d@6x#~2@c97;2!F4yMd=_*YE6?A22d=rft1Y$g1FAF5xmOs~U7a;*>x+58 z&0)az3SBeqh>-M**MQ+|hmhG?KLS--D}Bv{){vms^v3l_0u!}i_E?QPmnax9w@fzL zkz-dP8T%a_dt`nDAl7Qz(uMFqGqN@DC{ezP%g6Tl7r(_twq`h=TV+1HE!e8^_f^7C zCdi|c$V-1J8M()AqG`H!5sa|5rL4`|KT=$|BN<)O9f#W+5?^m|YCjxewHxHVwCV^y z=aa+|=h4&KJ9h_r>6AJhUw~_?A#pEQ80hjs&`;&7tM6@_0VFHmBz1{qAAUytAk3nz z`$xwD_qEY_7(!$Y)fud6|5zjkjg=qOFiT;IA>av(aPluzB-2Y@;XQ(jDNoYRO-Xl| z&eZ_O29Biz&SVOSrz0=!O#oiXn~-Z<)*LeY72<5gQDR?-`5zXIMR^qD6=IdDYUWMTapRihBPhm{vP>le?<*>WCU z6TGQ|ati)q=!kw(6P@t#r0LimhJx@Q;{g{?x6;ny{y5zYD{jbUhN5S9-8V3OghV0? zR*FsRL%=+#c_G~n__@1_q*A7ZCC{K?A5U_?{l_b`1q34bDJdfKlWaVl;pc)hKPNNy zXDI{e%U3t#AEXhE*3F@_dQ0xEvBY}dlXPaIz;X`6ceUpoa%k?uhU*r3^|QK)o8xl8 zid6|_j_YRIgG=1kHW{)7%&!-`%o?2d)-mBVAWMLBCV?|d>7 zXo9}kr>rA5CGo2ImssJz0WAwWO5iBe>TV=O-lX<-E-sQ2vZwqPXGcik8#8J9hS$}G z0c}E`f^r*b_^_Ya$DJ!ORNwSCAb;TjbBAM|p**9{W*&rBdl7*iBSsqbVDTy$^xvaK zUlP$koUsn)Qw&^O<%q2!vTutVeMWy@n7(BKxpK{u8#?4Eiz0BRMSib3@HDWE&hyWGxs>N2c?)m~@zW6mtWUCR!SNGdmdn-Jm#@)y$&HD<9;DH|Qjm#;6MwH> zZmsWbQr+PzWtsyT9%v*nlTdA!UpAFylbUE#_nkdMokAMD?&Kjb6GQgQ^^vAdHDg1# zx@PJ%g^~8boW!X|%429S>6?Y$D{Q;<&JjL8;M-uR1?q-kv(ZlWfA3`jf1h)~Jh)@o z?apa3h_>PGEH3+(4^KJ;$>2tkA}Vd(WK8P`+G3N0BB(jSBG)h@G>$O9zLRGtUpC!= zA)mA9T<~f%MMfvo8B2;t5ajIp=6}l|iRSkXtZ{`W9RrHc+fR zz{XKHxd;8@3+) ziR=dZPO??vqS#(b8kHgCrxW?G(Jw<0{-8OLq`Sc^v6}zT{xodKND)+z;0ssE)*{LF zu`r(c<$72~YV;pH&?K$a(D+^aXpg6wy^S{cdGCq<^QSn+?Dc#YI4%XdhyL|4ckr)p zA!!(T6XWNp=NvMjjlNBtcUc-@KM*VTY;GK1PMCH_3*FX0&&5n#{vE6T3-nMa`hM}b z??B=hnlV(SXFOK7r4fap3eIj;*P~evBbsTc(4grMfX;O(5^#>-&zQa z_cZ)*|Kk3yB+D zvaMGwZE93Bc$VdfR35tER$34g$DSeIeB5G*5?a;a$&G+Ul)ZK4>?CDb%*&WO@kKlbt9NUFdOvd%UJ9nns5$A)#vOdqth8c%jW^$1=fQd#4Y3e2(eCetcn#p-k zESpt9=wxSZPYh!lD@ozc^3`27BxVwwn_U;KP%6La*A{i8_`UpCY|yR9laHpahH7&* z8qo6ksJ%NZ>;uw&h}Xlq;${(u)M%sY2kTQEWq@a7vCC?u(*jkn6@BLEB6Jt;wzRdc8wHU=hS<93sd~{RAEj> z72gOZyO{XBE!q1SNCKO^m;Oh5^IiPUo@LL~!BQQsG#=U#@#Wzc@}yrBOz@{Iuds=Q zyjYOhFCUquuND~a!h6+ zn)<+R)VCxT?H~;sZmRDFG7$1vHr2?~`}~D+6pr7Wsoy_g&TtbxVtuslfx0N{+R%u6 zj@TuQ1JdqhbjYsS%Fd%NAAi3?j_Za+>bW7)@G#>>4{F@>%O+p&(*WyZt{ zv#%*|u~fNydzyHfRx8;1K4=*h#}u=gg(qG9p&!m} zdZaW+@sgHB@U{_yG&E|}Yd>F&(*!N>{B_mLY}hXC)z$i#e)Fa(^P^uz-M(V!EG|r` z^*YOi&luKuf3VX4dYpHNoqe+9m&h|m_wKHeyedyMm-8UoMP z+$ujGj0{YIF)I>h+MMQeN!aUt9nbZ3%mmYSfjfgi8K@Awsdxk^k@aV$V({kOp#HcN zCi!+vaAkoqJvLEYjya$v75KM$K`qW92WtfG@&*!Kt4s$?l8TR07i)(1FWUg}_>i#t zpY7GyHMysxWcO_-TW7NHhTZ@k;|*c7s};OM8n5vSawk0@kF3P`EnIA053PPXbU=5q zRrUwJ#8}lSA}%_Ro^4AHs&$BH_PX88`$c8-$YG@=GE%Ttsb(ZAMd%)Nr+e!%k%MV^ zNU^Dbt}11vaW0U3A=#aXUX{fHArdAMybE?v z)}v)UPu&eX37R+rvt4%WxH)N9Cp}HRjquV4AkK8<(WPmex$#U4=QPshAmR1@NEJW# zsaohZQZB=zmd6mxsLw)3eypwQNpnd z?3m!Pm`DGNh3oWxoOXb_21e=(6ht0K&Ou#E`-TpB7jZaqO8Hh{VdtX8B)t(}jmFF5 z+G>`#F)vhLSfaphjREqy^iCKKvNaZw$UrwGg1}~y3^alPvBz1v>CpO(BVCrWw}AIK z(?0bUVcmj%a*gJnAiW`|1p%_GqaUsDg{@MAPt{HPyPx(bu`w+Y61q-g(#7Fr$Da$i zGOA-{RhhC=OnY+i)-i%E{s{zGD_IitXgZX|HBKeKq zv$7Y}%GWVA-pNflBWaZytZJ@g4JN&Y;@+TWT@{cbK<>2>Y5KW!GUK@+=ZXSu z>y#cE$O|(g&IEVhG~ht`I*A|=e5izmGD%W!uGQY0g(p8%m#YnfW}+ee<6DpwLsaXh zM#T89k`Ipl@mTsu9#W$UK4{AzMgXNKcK@7{`Mnfd56I_3qk=pO5Au!<`MxqLitncN zf_KQvEjNhl>;)ia8w04)i_&so|L{q0CIzn1jfan1(P_u0%(UR{Bmvk#CB+5FYluY# zX!qzH`=@T^C9E9VKmJb*LmO8&JvD6CQRt54uI8j_z6;32t!s!4PFVRL#n$FLRa3o} zD>|gZ04z!f<|Zaz*x!gDNJ0mrxaMa)Or#ScYO6`N{`nGR1Z9X@O!Bc4<4m3KO1>G& z4Et=ZS8=z75b75p(Bv17S@04VNOE#IjkFcb*y2*0a-f)_<;EBKZ3 z9MDQ=9@TU~HB=H%%qt_^+!Lnx^xnho)T%Dj0!ReDCX4B zka-T(Q@=BlOvGfQv?@XCV&_Kc|M`;e+6%8Ux=x3$M8h(MJ{6}6r3s|& zwbt{EGR(I&m~o|llV-Ri^&2V!y@*PV_3*GD;~KrY@5Hiy?^CM?azPw0ti^sP(xVJGBYP6=){btZAkDmHHP)tO=-L9X2k z$n`gJ3uWpIFY$$)u8PE6iJ|1z|$``4-SilodzCwjgi?T7MT`~3jD$sCe>p8^A|MZX@B zjUBAufyZ^20Kp1=rRl0b1z7!}i~q$n zo;s_cFZ!}YJkeN@`$q#92@aq5$l={S#f9FyZOG)ehBwDc9EX=?>Y_T%Gy$8hhtQ{o zevH;dQ2#MMeK68-6iF|6^dm7a{-eK(%H5!t_b9g37_U1kF7+#2Y5vWTlXz-v2kQb- zdMkXPvu@coU^qRRtc=0B(X7X%{^^mh$7TO0J2D*(8&$F(UWEn5P+#+2xgmn~CMmo= zZUbH2#8x&su^QTUcSJ?5Naq_8wJ{wsq5xbXo_jO#M|8hTy%mUYA6VxTMqkjF( zxfUT1S@xPMtE*|HiQ}mpslfx5EH6oLrd#9k!~DB^PESmB7v&V+$NQ3nWjYI%G4y7e z^qFQir`yM_h&KS9=!o_e4x43T!o^zVJw7RP-f+9KYU##+BaXa@wx?w&r`={2V`|c& z-;Ub$%jIohr`M?J3|ike_D&STNJXyXs7ucjRTFg0h}bpD96s11S8slpwj@||+e_Zx zE!CJvPeFUgo5c%r<(qVedEX?Ai#`oq5Gf*^gH5IF1V#dphI`FVwLLGy*vQkuSbIBZ zeyuf>hnEeuYaN~^jxeKS+^_THT4ona>GLU0tEv64lPKrKyzPP^By}v2g-NgT1w#BI z$3x6TVG-8S3l&h!jjd-GHr@5T?bGiIzj7IcHr8+#^66~+k^Z1&WG6V!;bj<_)UhMB zoDP}6-Yo*r%gkD?U_JJ}8FEtz_cI#tV_^LZrDgZuaud^x-R;(Vcg}Th6ybm1J4RDVmJ4*+lOf~Mt+TRHG{2ps2 z%BR>VyT)HlE@vcrR0X&blZ*&z z@6~XjZQUY0N@DI?p2}FL>mS29#?|ugmCQ_T7K6d;WzInVLshnjWU|Pe1@EBUPGPpU;ZH*i*c5^UN8mqw35=B1HW#|Abh*SL$G}TJL-l2 zwx+tw=ksY-T|*a_``wQ)dI9DeqOM<+MMVJPVt+ATv8POZ^R6X=8m<+XuvV2c2A$2B z29~ibRkE-S(-8eq|PGH5PK9LIwzO zU{D$1Ep9u2Wx&&BSlwu1;QnK82jN$@KyR*bXRI}aR99$ZUZyS>JX&wL4!4XJuCTeh zyGDGLE$mlNZ?&*&HRG^g^0E@Zjp-E}6>L_U6v1x2sQ;2}$a|J8%_~f(GcYlabsJ4s zUOUl3PdwKE@@ojva^(aq6Csx+Oan98KgL^2GmPQ|eA9*O6HkYNnRLzl_b%PiD{x7;Fiu`qm{LaI%UBvBXcYTaCyO6~G3geHto%5P~`a~)# z{{N*fx&G-h$cHbvQhFu-@9ck1BxuD7Y=W6v)$Mw;LH|!I%R&(OHKcxPC#eMcAMl?y z?}C_`Hy^%nNBp0tONDlA5|waqaoHbZnH*CAt$J;$X)agG3gg=MD~_p%+fryH3li&f z(3Ss8s(LyxM9?n40GyDa)zlMpTs0K2aqY9GRT|z;4qxye+@&kHHG#WYE+4I$yBO>+ ztC~_h?4~KMy*TCTpT=u8Yd#NardvI~nt7dtXQv-HwEu2toJm~UEV-V!4!8*69_9*! zKHxk@P3O*P{j!oxfK_%-yGvQg^!i^94Y(uLDMI+dcKrQvyVU-_g3$1vAlRmM@HYPM zz~Qrf#tk3yu=an?XPpYo3tD6M@v!|LQ3I>=XSe)}8~e8@vj@D({}15vaFHF+9n@m; ze7&j*@jqcRCpgGGYHhUV?fN;Lzm68f#>e}^_#e#gWE6Ar^C|uiFe&5_oITDL>wJGM ziempW`f1o?+kf1NY!vO9JpJ?_=)b=n2&9q<2PZ+nVJ zENK-<_Bt3B1||vNy(0 z1%LYVX4_t`M@=i+@0njNI)NX%pH8!Jo5nR$7oW>+n8MeGZmo?#WPWoZk6l!)1B2dR zife4E2et)mBTBh6^HEvj>;Le|ANX9;CB*TtRo5pr1B?8#g+s6)4>9-0l9f@We&TT0 zYG>Ef)R2(Wk@yt}m$rQhaL=bnOV|D7tkH(+bGe=HZ!ZrF2*mtt_5-+P7@vhaWgIR? zFiH@htjv$s19TU!gLF6ADfI8!LbP7L>+TV> zUH+;3n)Kdhi5{BtId}TybH7COrSoCk3u-C3{PGurclUtKrwJS@uUxZ!BslYPIN(%4 z$^Eo?&>BY&P4Keiq$13CsKWMFZY2M5aR0T1f0nueVAIsxycEzA#?ZmJy)lpOC*rqz z;rr04IG<|QftwnMk>S1#htJ{jeA^?6t@D#a@O62i!&Hd9*F5tyLj^7+BJqR8p)$x}G3Z)P@I7ePe$r8d7aS)Bi^Q}~2{&IwO${H+ zNU=#glE^jbEnZEx+M#H6m@1P1u{aW!Wgs+u7WT5{NZ{jX@~=7?9r7<&Mpi@2dLiqx z(CjmfTGJ)Q24%jpy4jr9v0A!8jty^MmcV&R=(i1p*<^-I)m_va3p2 zPGk|;F0FnqC z*L%NC6f5&X)t^i0B9O&B=3s^-7KIQ5TaD?0jw-J*S{N?Whk4rI%N{>cN+v(Qe4{gzgs3S{0(!PLfl*FdDfvTd?e30vxq#L?{k!q%;F8SEiU?lkwk3e!`!+Sp6fvyI?6=e(H`MqYAG zelf>}f1CVxm7kl}`*>au+h-Ls@FBY)qYM4#@Jyg8q<#mY22A+#m+CBHqq%Ylva^M1 zag!M=y79^MfHxO-cY(WNdEDghmd5HUnnaaN+h0-->KAmS_bMwZIn+06t(R(oR?iSY zReFxY6k@@5<5@z1v>>RddH7-y{5^2SfdAMJS02yofED_d9>W4V?;U*TtPZxbU<7rU ztM1F&S$T5c@V}*}>rv(Mo$^1zyhzFhvZ7QA?PdO*ZK6(Za-Q$8pKLbvnQzS0Vsafo z>Y(t>r*XS7sEbPAt&}i8;HnRuWd$h<(l0@@ss?;&WSiXSqWxhx+ah_w*tnw*%9tCu z0AwHEN;}U~Loyh^KLS0~s~}Jorv49Eun_pI49oa>F_R-2hK41oQDj0XpK)3vy7j54 z8x{I63Z17lqt@Q4$r6Hm>igpS*Qlk^2>*J?!b#m^96){{*nSDi<6pAIiTsrC-w!`G zXOk}Lo#?-ovBe7Nf+?j6iTJpR1aLH}KD(YjTN)V&IyZb@D3*U^@BD(tUM2->{0g6L z4z%D+0%#DrY@*8$zCR8MMEF)M@1qKmYyU8)IQZn5jS#uJB4rolaTeU2A3xJA zFe}S}>!&c+URO64Am0=c(+~V~U8fE{tvw!hRi@_$Uv*5{kNl5rv}XJ#kKR=@+f^@g zAytB@Gcz?_3ll#_*^K_D@8y8PkQ8l(zG)L2K7-L)7Tm8ViVK9fue*=f8AzPKk95Hz zFaq&Nz-AfuG@JPzhetoCz!vhF3_}u4f_tR?2ze<3ZXb;KbCm~J%C^;DveT!b5F)x^ zi4a$p1?zdrrB9mJ`OC3x{+49e9oyt#SjT7hf3{9%&4OL`W-RC2TWI%sSLtgdZ)4-` z)hjYShIzkDlY0?-er4(&`zlaDut;jcxeJKmyx=g4xLs1N&H>wr4ab@8C>|7l@pAcm zxvP;3>5gmW)nC&UzQg|aUHN`(jt!gYOqA6ue_q?ZFI1p=?%&R*{_3jd0{MQJ$d=tm zY(3b_E}k;u{x$Z}L;Q6nj^%et!yVh!ax+=2^HBrw$e7MNT{z9c;F)$BZX9~9PYQOyT zeeL_|YZtBm))zI2FYs=g$em=Ha_!9GHE*`19(u0faYx9t^upJJJI~nnrqujA9lz~> zxI@UZWU-XKj45rx8%?5?zx%+>_56LoAHV;zJj740DlO~(!*F22b+JiDopyfx_tL*U z%k}U@yDzK<`2L>yQ1v7RIO@0TQQRtp%RF31*;oFIarUn>VJ(=L#(-8G~ z-%5%3v)e>J%@FQP{Awd~dWoR=x0%J(6WI^{WoF~)c$W3-WajybE0V=FJ2OsA?Oj-L zCR9{=!_(r+-CKVr%m1EHKleXp{EB&y#`gk7U>RErE@LleGiX1XU;WNf`S2q#m&C7C z-Yr+ZMC@pu`g+~$Mbke0`Vzrhpr{(%6(t^W)S+_D;rZY?h)e22_k^<6)o*UUFx}9d z&7hER-uC+z?I*xi+O4omX6|VZ@=ip*jas@cMy;VP!ol{(nflO){=hO%6;yESevuO^&GlUwaeug@B1<eQ-x<7^eZ-fPNPn*cZ%QlosEC*34!Awz4%$ xAA$=6=5iyJY|X delta 18230 zcmZU)WmMfz*EM<$PH`!2g%&C9b~r$Bx3;*uyGyVlrMNrADems>?(W6i?e=-!JMJCN z_l@~4V<&rOW@fFGy=KmaP)f#8d;`HXFxbBVp4Oj#UZhrIR*!ACoTs^3@v&osd@;Wu zw-Wuij-WU!pmeFEeC@p~Li)@F7pEZl(;qEQQbp)M8J55T8+$6@Mp+bebjz z5e#w^!e`MOcu=AG_*N%wD8)MLlqk-f8r#D0Qn4I^$s_c}dI9Gk&0=`-(E18l|g&LAqepj{}sPRfQYBkADu65rv z#?TADL*n{q?3c0gk-toMf_5En8w1ld%n>(j)s_0}iUo{Hb6_jTF-+aX9e62oh>d!F zLfOcLF{)N=F`alNg74sfopW7s%|e|q_RfiX*F%$~c$mK)y%bKR>K+A>@1*1@CF0r#S|z8p!n#waMbAv;zj{L>V}o}FrcHZfvCZ_fFdEE(JiV7D>Z@Vfb$zl{ zS2}<8ll>{AI;ryDUGJdS7NLcx+4nO!h|o3AJ7fl`MRDUR=$Fjwau5k}7Xji4?n*;@ zM$~%yPQNLeH(|)fk0d?U(F*N7zmi9V_WxWFmTkR$u#s1-hK9^#q5ITe#G&t*dt+v- zRWH^I#JUegtk^sQZZpOoykTDhr=EB(E$!a_5LFqEe>@`7HKrHAf zQ07TgO4$G|OPOz79wFAI=?ZUd;mlA`r`9~-0ZIv(vo^WMPeicJx=P}b=L^oNNRp8m zSTCC$wbhNXVR7(n+&e7bs5Xg>eNVj8z>k@a7t=TZGiOaQM%u@^-3Wsmp<%~`pS(qX z#NXG7TPl_yf2!DsLV}`V8uoBtN?I{%HKJpQ_bD2dVG7{W#Xpaa>qFW z2S3=Okr#Y5zb0AgPK0CdKYi*}sVcKGhDi}ukmBU#nI{Y6^Fu1D9JGi#*Y6(R#t?;s zAa)o50!S5@uNsheIY!fVzi;2=A2Z+weuPq6kVZ%F_Y|f7Fp1S81-(1jzl1xFO%Oz51hk6B0 zjNmQ@Rc{Du>G--hG$KIFY7pv$Wlf_i+TLFS>!yjRo;KcWjpaiILZLi9zy9WZKtim1 ziF>+FhT~N7GJQ6DY}MqJO@#^ELI#@=QVC)sDhejU!`Pq! zYe}mJd|ULpFlS8m1XlNA+lsgwFzR&*uwNLDc!)hW`(e8|$cJ6PJXaD#mn;J18VEa` z{Tz6cX( znd|vA%#(S85=FO71unVGmbCazE)MVEyp7RA{IOh5mdLYJe}(PTb4%`Bx9 z9^;Z9f#uX|&!QcYP=CGq%I@?)7jqr7DWj-LE0;U&qhpJ!Yn0IBq1v;FzMlDFNUPvA zkp?3T+M!OYbId}N*w9c%BR9OzcMLM&xs};6toS=5Zr!h(X=$Z?9feK3L8Oe<|X>i3il#GJXIfxxFz?U#E4=Z}{_1 z)gFVl);h7AswRWpFR6m2Ys+3b?D``!${VIufR|-iXIhs?!0W!=1^$ zX#uN)4Zcqk=x3)bLODb`v9-3!B}4Vzc8(w3ouQo=n%_-ge_$mK6b=AFYw**ZYG<1q zn#jzHK2UG;dMK;GW=(qbI0{e;f1b0>J_GFt|E2dAJHqck`PmOS>LcGp3=Gw7D%Z)FkD{84N3aJ#$!ef(b3!T}Ja z$FS8&Gb&oE?rf@`ZJ5_~yqiIqZJs-`WW}owfZ$&1muJSZvi* zqcIyy=ym^)?t}GkI5XvQm1iCEd_l6=4a(gm{ju{_%WxsQpAZS)+Nig^IzM5XJFN{A zY=;WLYDkrFm)MxeqD*1b2TqxMob$4nN)0C{1LJi44MtGJIflmYILix`t4HOZ@j49f zm@0Jqd`7Ew#ko>^c%9z+QITArf9uZ?umCTYUZ$dLc#ZDI6`wo$BC2mgF7cbl7cC*2 z&)C5O(`+%oRU8$#RA{6Ss%O^^A%#0X7Z-maMK35vO6$DY5&)`3sP@icrFstDxOWRD zo0(tKu(>TF18}%Wo-zGG8QP+1P(euUbItOFVCIh6IiXmEdck*B1Il2nbCUb?)Fen_ zQ;Ly|s0_i<-@nfVir-Krpli?3zPUd!qJg=2b)BR%HUc5mP8qgXl- zg}xAHgMs~sb*M$#(}m{98~>SyX1#Eww*0wWIQ?Ab+N-5fcYju*kW_$k3e6WjY+_>El#C0IaBX`@s- zA;0?;r*MtbO2MVx7=gR_JW%<-Ms-1fkpfJC(%1rO{Se=7OOD4PeZk$)<2$_FqjZ@o z%O;DB00>3H5Ye?xp?Wt*w`fs1vpPhgAfOI6w|nExk2-Oj`zw~DA3pB-K(xebC0B6) z@O)iUqRExK+T!Ka#`iD2GAVdO#!{{OaHH|y4=NPQ3K3t}XkvZ;<@D6a&T7&V_BgnZ zUMgb5&mJMoq@MASXMe)0_wc~~3wh=k;*SP%%)QG7hZuf$N4yC_c8Muczn*lNY+e~! z7X|*ql8+%BRLCKuo<6S<$yu)!hp|`!fML$x>wPjh@xn?PU|mesqrcXL9Yt$+Mx5Z@ zTOk<1ext~OAVMaHO@kwJ)9IC(7n<6`8GU`k8l`{x#}}vL(~%ALT+c5X4YPONxdj!7 zvTLmUTmGu6u90th(!=Yv$8lCoZt9>ptF0co3BAP@c87Uyb}yT?ijNZ#R#h<-z^zlo zls`M98crC-QbJrW+X1j#`z+^Xejras<_>u6|2M|?_GNY)HO03^S+v;64` ztf86B131es&8LGbWI58qY?SMHCLbG>615)w3G8E`gA7p-xjr!LhDEfCoPhC_EAHTY zhECmKxUL7B*>Xc-Dh#E>=5e@`^2Q_Dy##~=J}`hvDbP}~qxRmAL$t=ej@iiWknh4- zqLJl*+ERuQTbcyUpQaiBYscyPOZ(1DX4}jX;*LTj8m~s7cat`xTJ4cEq|8$=RpjWz z+!4|_*s3y1X9@fCmVa0py?2XUITAkgdJ5O4j=e;IK;ayQ=Fo{X=T}EDG&7B|m2W)c z5b7HugQ(UiGcWAFzY35F7C&9$E}xY6#jJ6TIyK;-&|q{(Fy~1DVm#}FJDuMY?7MY! zPKLDQllXUYLZm~6cPcb9L@Crq#_g8Max*V?ij)R|X-J~`P3^pox2USmk{|ty6v{Lr zr1XwEz#s~I$=m4kn|L(kh{;g2%H+D~Fw)^zx%4mUa%Z1&+mjCNi zC%%`sU<4qm_N96$7S6Yi8ET>B{N(nF_R@Q2tp<|O6lgb!A!t67hUJtJ9cwfZM*bLV z%#I^vYhtjF(D*16Q%=nYraP^`v91~3OE?J=p zMaNan(im71<}0^f#LL9B=OqeGw7C5gE0y)?2H-Ul=E2BJ6QD>Gq9=u8YV`Xac=s+KQh0TMw~i>Aiv0pe~QpxEz(k4MCQ6cmuzJIw0vh(Ya}{~yMi~q zRBJg(1HC}L+Do-f(X?s&Gzin-^mu(Rhz4+5&M`P%jdIgm*l7VTH+w0TWl~*P&h(t$ z-nvxEKeiNq7xS%m6;n_rkI6s!-S^GxIH7;~sJkzeQJe|(FP>Z`$6#4ae!i0j$gum6 z(-!$YT8`$&burqHDK&l*grxDkFS#BHpFg2z6jmGbI>2{L;XjwR1FBUY5Rc?Oa619_ z)QhWsbG|v)oDtiKhIg?clFyE*h|(t0$7 z^usmw-DO+_=L!ZLg@=_cS)J6IYqb%erawBC9U{l_k@0$@Ju%s^K%3Gek&>`n{i@E4 zt!dF`@+C|qMY2}JgT{T645ndUL-W!K_e#KOeS_88sMP95XJugu*Wjt*<==B2P9s)T8n=eN<_k?3jtd znG{Rw&G%CG{q!1n0gn<6=Lkx`Y_*A~C~VI>re!C-tY&ODahPd$ln&$abXhfG^_wAn z*P7e5+=D{q{*p#zf09QhrL#tSk&5~v!T~>V{Bx-qtI`sAJpPT0t2tmhNN;h*e}BkAaX30`8Wsq~#VXZqmOtfwTaY5VYqFUA>}lJydA;4% z`||a0kpyCETkUZGB=uMfXuYzj(>Fq)Trt0FnG!F))wt$zUU>fU(MmPS_j9+7W6kzB z9Lq14?bP5;F^a>V?9&0_k=GI2=XWJJj7AXcy-ZE+qcB{GWK!2d3x))ELm_h}mm2j< zOQoe(8f^wYZd3AY{^V>C$Z8$(eP}S1%f&R5yRLjZzX!a>%HyK*1PPU(p(1r#>=md>g6tArTL1uAGjokoDYVM%cbKCHhaz66F zZPKkN4W++dr3nefJKH-qi%8g}tp0w}=fa$pM9>KBeYVqW*o_aJbSHaz2c`Ao7+~eks(XlRmpt);jr>%IExNbn|xEc6_JCYLSgFZK_^+z0LbXMw|WH zuq3aQ+Mi92huuWL*luvWMmDECV@(IEmE;NSPmCOsu~BpQ_dGFJ#FXZ??)%A!aVJ8>VfFaOR~NoZ3A+ z4W1o%kVZ+%AEK6>94_-x3*{`His=l<%r*7Jg<1C12iqbFdy19JJ|Jj*anwky>%4=Uijl6X(l692pm(h}(n&q!ZkE_ZAQ{8W zjakf+-`L#Otg*5V$I|A46{Mp^({MId(3!zd>Pw7>#ln33p$%tMxVeA={*ofR9I($c zBRl7m=-mNIr+-`bUNYj6>=@&iJfQqQK<;IahtK-)*~KCT!rSBpbjHYb}=2;tXi<`J{T>u@^c6uEL57S zv_e#|q6~Cw(wkfrGT&cXt)+QGE4qQv6mQvYv!zBrv}ZX)!Eg{w zau2#y&;%P3?j^H+jhsU@-2$@)!qNPA0;K8=-WY3;+1n?FFVDeYK2}-WO*rpm%R;$o z9Qye??M~*2&FW`%>uNMbUlO1C7c7MXU`D2Xy+7QIQ8lrkpt_krsC#!`{Czs!w!e;J z!3i@EQ?A9e!ou9L<2kgKvBavv26DxA(0lwVy$5@OeHdInsGw&49{y$Sk3H55=RjJ&JX6DrKV$%PQJL%J>T8gldql5Rt!WP72`VOGRl9v)lAsSd7vmMVW6j3K~$5` zTCHEQz?@_b&ZrA(+&gkzWYF=nk8d>~J?-c`z<>HsaBM}^e)egRBOc!J2KJB**gg1! z$wl>0OgzEe-F>dCI@3HejN{PTpFyv=7ix-4L|Yo&9mY1ENe&${I%lAt9ycY!{jPc^ zFTRa;6iB`~XS<7LV0}|H#K}s?L%B5q+|eF$hp2t);H&WtWGdRtWc#JG>RK;3 zmMUykC)(zB=PBA7m1%bJkW~(tKZ4)Cf7vA~;w*1_{4?PH!lu6U7JUx726oQ4@5l(4 zZphMerAP_ZRdjwa&a#Cs6aD$~2Rr;sQ!G{wQigkD+ZJISLna*-swmsv^vq;trsWoO zpqd%4TaIR1kla`HsDD7vti_Q(P+!GIBr~YB9Od1AEwIySa$r4A9f z8@h_lhChwUPNurFXq3cEzGNlmCd!`0E%AZGwAr$2=^McVFb~&#g<}HV$=xZ=(UD1U zaA~9W^!hX;lNnxfO;^Nj-TTo$%ghaxm+Qkpn{>ZG{?Fs;{5CILb{kyMi&@o-?kQv) z*HxV#d@{{y_Oa+tR{Xr$-@itznWB;AX&WrZv52}Z4nt&86Fv4jBT47`*Upm$GkUhy zTtTsmEG3+zJnnU)Y!9)OO0mt&+WqEtwBSvq_Yq%y+P}}_{@{duVW$N_H1}L-6%-6XZw%~MNXK*&2n{uVa2^un9 z^B(+V!m8;GXYM{Bdom0iD-0o=5RZhH?_vXoP2c*$=q$T#XCi{bac_h*5^@>he$k76 zLKF)S+qO)Or1RI4^5$pZB-_blqX_kfey}ampZQ_lq4fRK6D(O!Od1M8g!oD9=X8>F zX?Fy(aHk`XbBIT1KNy}y(Jo6mOH-N%1> zc=?y-Gc;o<-~gxYQ&kR_t7VJv_BB|kS;E=)|eEXhR5@l8zyNmcF4{3H-9bx z8Dnaq`uFqhBE<*9fRj6@Km5{ha5jW;ZFKmBcEb4Rc|etctR*CskY?&8Ea148aXpdz zlz5dH))}Fa7}DQsbN*%fFz1>IZkV*H>gK{#UQJ=2#AZDVipY@0io_YQWx4F|Ec(U+K}4?0>obwl+tGShq(I#+AxHOMZCqK$ouMmdT1nxvFadeEOd&E7D3!q zjfGF@5S{FoUAML!22?iIatp9=Cf&1$^h`WRROsP#J$pv+eq_pV;+i|T%|o#PuQ>9r zkdCslO}${e=}3%@EPNJATDw6PL^3JnXk`16#>IyrD4*^yc(RNFxHT+p3ruU*Fmd8@<&L* zYqWhO8>x<)e~%}$cli10&Z#H$MTj09$(h&Yt@`Uo)CU{jp^=<;2h7v`!l8;wOnL{^ zKV@vaC1j)QPMOA(wgS2%P}m@Ck*dCaQo(6Spm-gJeD}QmuZnxH8sGt0u>8EKg0wU* zZ%`+6Z%y}6ibyyy2Ydrdvzj?|B<Oe_s;q=hJ0UVJ{6>DLoeTULI#f)`eHK_K|=!11%N97h&%BbQH)$&^MDF% z=b0^rt!F@RF82G+lW|^C@1?}>WcMyaV}Ua(;^3>*yQfFzKE`TSFJEB=^|Fe;+2p(V zkI@9;=ZJEqz4ae^i*jYbdjAVPEW=ysj9Lv7UN^E2vW2Pws;a1Mu0Cp%@cY!%J5yARb z(no}D&IgV}0VDj{{JXU3uzy2QBpE`S5b*G2@usydtvrlYc<8(()oH7Cv2u~7ed8ew zO?$t*RL=i-XuuyX4;VKzaL~?=Dh}`AK9TMr8cmveeqo?Wa_$B<;qw+?oaG+9A13Rock*@9RGGN>4zRvY9NANorxCSbCNTk09eKtpTPb~gKgLuiO;Pb$mPBJ<3T(qT4 zD_CDjr)t_m6VtTjf>g3i+TVr^D*Ijt;>X)7Rbr=o{~(~ zA$}W^lhodYjHe0XEluExnaB0r0(?&$I&er`;p3jQ>M>hH#oWpMeXP>8ox?u=A;jJ* zc>s+(Qx4r{Y9rb?`8F}$6ioS`)qzqBV&asS9j>w;%L-<3^=N*Jf+*U%t3@E}fkw-? z+-qPNv`aZJJNfi7kb1p}jii3aoW`}&Yn~_atL#I=?-Y%L@ysEVl_nW#WJ`sD08vyv zz4>;ZkHNtQZN*GP-IDOrw@=uTihXWg)<>pO^A6|orZA1!TIA%x^jh8*BQP4`kb({S zBUrK(%ciy7%P#L<4JFhvFa}$=oM*)8H12j{WS&Fi1ZHp;%LpvRe$-o%M6%JD_AO1; zgOLI-#)Blfzd3KQgvf?2BH_({0z(Ug;0um4raqBV(0uPwaMig8@y|i!Y}8$JS62TL zEp^=OlQcVl+Mn27)8&smV&`#Qi;nEza>*>(YJ@3w*C@FF>cCl4^3lSP_suP=H%kqA zZ?ZfBId_m>@}4*E3)Tzt9EMF0yzmcmAcO_w3^@+IIXsH8?_Z`|BW+6%fCXZ*OcmV{ z=nKsl2dE%R`9+D_!r!lM^0LxlF7Z`PaBoGFUX#7+jzPSoj!EoN&qknv?mLT6C7*u! zAv%pWPdi>BVUOy|WaWKKF(b%DjZLki{&jZOwwPSu$wllDV#76p*dX3RgAm=H4&y>j zcHWr!6;oVjpvY`RWGNRLa9g1>2)ub;nkG{=>EVjSDipe$utNPYS;M8k^|#bCha{-6 zc+hG2XxfT0k_7ZkLX|QD^v(vGYb7r1R!wEEP&%|DzdZ6yPpYrNJKCNrCw}E7>)A=J zEc0he@Mj~7jKBjIZ#8iDD$6bRw!O0;+2h>q6W)<12E^SaW~8795MP6vUr^h?I+d5`Q4Q={_s@t$qE;$1 znl8lKaK9G|fRz!*1u!zf+q{1mc@w`a>d1LBz;3O@Dn~@gOLl%}BF=&$myL1U^n9MU z&wwYY<>~dv<~VU@!+Is?RLPudwmAibL%JeNp9hijf0YM#w>WHd;$L#Y`(x4h7m3SZ zmBupgUlb9ISG4evCq!L?gdqX@3~X1>TgGj6e+(T4`z-2O+E3Vs(2hBSptb8BhCa7wJW-%C@(XS zkzCF0zSA@5)y{>kOO@N%_IXQubTJ?cgqdK4CF~e(6M425)ys0`)911_QQ)m?8gzs3y{{N%2eu zXEm3R(4on<(LvvqW8z2edF2hFXM$L}%yi23!OP{kqI%zP*s|x;$3chR@^+>63f3kNgJ1alF#Qn9tYL0nunHD6vpn!UPKJJ#Q* zHF4NEJgI2%bxjLXQ97)Wa!xZ?*o!wWAV zn+r^zN-;U+8)|v8M5NJR>v{0qG6GD|{!D*O%_H8k5CsRWaK^A3!A=woE*@ayT7Kof zpu^C%Z3w0dXRq<|B<1ZkuJp+l+;$Ahdxl8D#?IM2E}fyLv5VP2vr&meMKph{Jn8Lw z)9k8oDrQjouGIfV4ZW|d*v_srN$TgAZNw)xQ^Qv+PNA=nfnkB@RnWsjZvf2f)h2(? zg&TEbjP5^$U`;7R>HbL`b6^8Ng$9sC+&phjnd`PodW1*N&Sz<(&c}^_V7(eR&uS%5 zJd}|Q=N9v+x*2&P9O^=t(w;#lY&y*R^daN&_gd7Z+k>#C*3Ula+&5l^_|L==iyW$F zR)1V$%XK&cuLRvHOO!-2fqV1*X+eFezGL@>M6k+2u+{}4yF+5H7-trTm!Qd6_vr5~ zey{!37I#j3q~<7@vNi>wY2I5}1=mErDMQE6%76B#U)n9ALK+vWfqMO}JB4Ku$;U}| zr8%<@xP?)F_4I-5AQz{NnMQxopWc7PLiJ2SUDyN`7br+lml`%efaTU7g7GYK$5CIY z>YrlE=WMKEyV~XIZ1IBF{Z(C6{>{;$E7Oqz<9wCkHSD2OW~K>~yY7kK4D93v;Xj7> zeOI7}(u{VeFApHjSrR1udR;yReXJfzRxk%5>UacT^D+@>yZF27gTyfBR9Ntc7F1Ns1UD@cC#NgW;OPE0z8u zGI*;3wk2B^C|bl)&!RH<)zbX-ZFAz(&&#xpz7CI8c zW%gn5dD`ON~~f=Ee%pIqjv(bIhL*0q}lf%D`ctr(2LaD0|P9KVJ!he_-f ztE#}Ft+n?DVXBs_gUHniDL)6Xrw{r8Cye1k*us#)uu=5UWz&|$>X2I;ufc9Dm|Xkn ziAi!by*~MI%W}|K->77__6gGvST2LV>$v4jw0dI$2_o@(5nzf6ZplBRv@G5skSDiy z?}Ot7=!yuw4T*DGsZFWeGBMZY=DDqKXwF5F#>H{^1(l2EZ4E>w_S1SmRAB@l^?95) z89mlWU(#P*NR5NcP0ik&Uo6JliWU8Q+lEi`-urn~$BjS^KRMjg%FCIVQ8maEZVuJn zn85Ckl1}0yFN~O_k|>|L z!_41k9=xbA7IV3Kx@5%TI@-ZV^=@~ut~4V?Us1FoYBuWP5`{rE@U~FJoF9L*s&_GZ z$P=HBU?`Benj-c>qG)-)(LfswZD9x6L-)qpJr=0plQg}obi~l@t{#flEy z)KWv8BQ&m#FR=oj9s^pUwtF4cTE>AiTzC2V1>VlvKYBkfYlMqh{;-tRm3_{HLR-a&6n8kkGm7EK_I6L zy7uet$`|)KZbXZl8NI1BhWg>_zs+U$W(!O|Oo`#GXOC5w%~WB*26|Jbnz=yZzR%b_ z7h$vY!6%8_xnBg^2=#+!>~5KZl5@T;v1kg*#GThO(erC(+Vv~Z)~ih5!n(@F^K)~t zU9kyYIV$Mr`$#u9v!#=)sU;Y*={BIhv9BJM=sigBWr7=nQ3GGJL1Q7KinYMp@Sju8 zZ2e(cea`7Co!sI8a&J2=1{8440Q0<@V4T&$bI9LbeNjO@4}E;%=xA~QjpTs@w`9OF zJI)J)4mOW3%zH+^hdamKls9l%Ccctgb;4Ku(sLV=jTtNEy<2IeNm7B<)~EBia?2mo zEcCujkdAvK(weogL+i_L_f}X4T_)%pN5d4_qr;XxU@OGW11^u=VFKU17#9;_F_u=} zTdzAXS^WU*Hg7jn4(RUf+0SbAcZXs0qcsmyqZ@rGB)Z!>X6(}; z$SlAWyK#|{o{Lk@+HFwIq%XKffmO?7#t%DuZ@$Rc;ZNm_|9reH7kmmRLCp_OrqlUy zO2j(?C-lJ&*9>7gJH*`941*FrcRwBMFnsYy3BA4IK99?tT>)no8BRaR+*yGJ~7nWrP^DvX?sTd?KK85w;(%(_pvBONMHEFF5-sT^Xv!6jh#&h z};L9+U@YDiJF_Ub(f~`2C{HB2`%Ks88_*^iOXzL43BvQ zDMnfsF{13^QUJ31__4@KQ%Y>bF`QZV4?~BAY|^WUq>Bm4yES=?1ddAW$~AH@6KKR- zxYjsCXrp^u+onm0qG25N@&R;x!1E835u7(n>r~%z14uI&M2Ra>cC0st1XFiTjDBV) z8c3)D<%far!SNG1c(GXQnGoBC zBz&Ud-ELo~mshLR+~HBUDH$k(!U}3t2FahCA3Ma?M$nZfa9UvgcXS2Ax~P6E(`HT6 z?OszvQuVM?f@DdtB!dA1l6Pq?m+sY4Xeu!@tctMQ_0-iry;eac5H##PF%2#6HCkF@ zi)sVtm;4=?vUS~bFVw#@rVCqJqnzT;RjRdisQhW4)J^vWa%^Y|z6$)DZRgyK>aCkf zaNwmwt|&;7t;?(9n3vFW)YP~{wRQyI5)~XCZdrC@adw4sTXZR2^;*iD=!G}kimdaD zl*yEo-7>!lo^6S=+=2Mova`E3a0?=G4?}_3y{=&vZQ~LIRO?PUrI{LMBlQ84Dy>$J zS~_NC=A{I9L^*a5;-|zo{>#T5jEKHonZJqCZ56?U9A=7xbu)L7P$rAVZ`_(0^%Teq zu{cy7i7cpE4B9f?^QD@mXRNEt-bdM1o<+)YQttuJ zi1Tb=VDq)lds02nlNgknBQJb48*44hKanx^TPhn&Naa0SLt67G<+so}ZnGfYmmrXw z)l>C@KXFDa6KWimzSu+X4oHb;*=VQd2{A-$A7gE zfRRF{lFvc!z7W)Uke@p8_Okv)xG+l0Y7jWzMz_^=oyVM9q*9Rb^XE^Kt65ab`7(gB z{c*N@MDXoFf!$<;K;`@Q{G8FDc=}Oh;6cWcyQ-{g?D}k75Jz|9YJ`0}Df8`l-(;=D zoiKrW&8<7A8{Jq=E}W3ZPLG-_BTptVg}%IHYUu|luY*z9r~>pM_hC@M#~{P=lsDl1 z^72xPtQ1Y{-<#d|`gk;I3p{DUE(6gy;^9L^lW8Oqh-TBA^IC>SXzCW9-@gq|MJ8pw z9GjZFK0okD{SQS}rP#it6GHz>eQC5WD9qnCp(|S~?49RvrG))x)GRTi9QL1@J9_>U_@Hf!f~EV&rn?p9sb{e8!}iV89`%*@PafsrKE910=o#M=P! zX9_Dy{0#5gMJ=r&5IEz<;o)J|n&+7|&v^&zzh8U=?}cMTeU*ek=KB4lt_&p3GfLm( zVX|WH4bZ`s@Zg0Xi`Ay4YEhXa)QXrB>O8j#hW~mw*K}T6IGF?IX--p8@ZkI(IPyH5 zdw#blNpksJ6Cr~N^9UH@29=lS=XQ5*=qY?f4y zNmp6Oqr<47{A*y+A@S6E|F}(e9OIRG+}rAGI4Wz#%lPO2{wQbnADLUVVt0h%`~9z| zH9mns6Oq94b}~nK`)dV(l`E$HdLAsufJ(rUb$&U_ ztm!lPRpX7>=OXN5tJyo0r?YlIn8Vi^CTqJtk~z)i@j%Ll{Ar>8or8g^@Io(Tm1HxugZ0k#IW-fxA$^D>s&{WE-OzI9^= zS^>)!!Gh17k4hMmvNX=PqXqCi*{yXKa&mHbfjV}b@N?@4f}ltH?Y{SI7vZ)a2Z5I( z-DQsumz5w+nqV2G#-6>4IZa#j*M4%Kg@UF9m&|Pgd}PZ+CY$ZfNaC`fpCI@Q{~tzH zv5_o=>c7uA6{<#ca8ZM0=?xFRafFBi9KKQ~n{OS6P(4u`b9BLHJ1U_rq`#HZn`fTu zPb2HExAlSe=wPM<8XN*omLK$>F%riSRxY?*B#O}rgTsgabPJ2Wm$I$DoVt!at-twD zr6>#s6ge~CV|FCxVvQCGblvn3+sbxnY-2JO6GFY*{%I)5nmoeMYd2nX#2dI z|K!loN=2kSU2m(a`6CpEW(hcOnk#N}``9HAMEBf$+EmOqie&yD;WR12fPGzlSBNMH z!z?$$Az+YM5fKxQw!gjDE`RmKn4C-A9ZpQ$juc!2rFp+zWpZae@6eoRD1l=CMW`df z$~cXL*L}ga#euKw(R6O9`G5Wlg;3A=%bEB)hl7$qY;2^K^F}@wK$tEE8>f{Y{hWfq z>PKV>JWFZt&e9h)XWO>B*0bbc=JpkTp%N`EBJ+$0&U2iK&k=#U6~kq>%Rw5= zb41^FFm_(YmHj0_;|M&!f*A~^OiXQ%%M~gYL39O{7%(S9WEIkh;ft?P8Gr#@XgTjf zaS@4>J&Rejk(P#7+mX1g*t=AZHtuIw%Jc5xqBMb|g+V%?ky8Y6E72Iv#I-3;d{+Ryp<70rar=UBb90lB2oz#) z-GkBqSGY~soh}q+4z^NT&dSk2@Y2@5G}GSb@voX0w@^q8n|JPex%itCd6@M-xozRy zCrr@a?+bLlrT+<|3XbJQ4vi{bwEZgmjKGYk{pG|qwR#-7{Cp7!)bOWGiub@8=>_vh za#bbuhT*4v5D8Lie?rvjEHdr^FX;1;0sOtf6W99hUjNNd*Ia=`eZ(4G63nrm3AClB z^eX$6``i?VSa5?0gu7AXNQZvlVW~_mNWJ~@k!vv0x@mn$veI%0(A)`zhmoBYL7#vl zcNqBa9bqEPpJIL0WqH)%Bly@@&J0Ez3*)3nlZGJ_^l<3(FM6vna&pYxZ&LjRa997? zkrAfP?$3Sk@OS8OHsbiSV&?jkDVf1>Sf}UJI$hxRm*M zsfy)4kB~aL9EJ%}m#!KDN(8YS#zoG!Ai4Zw&ca)*0H>#2YC`@%1au9%8Z&@}`THnU z=n1dkc0{t=a<)ia^&;9KQ!)mtOZ8yVDwo_VyAr431*?+~9sJSIW$e_>^=X`V%idA)Y@+?aiT;{# z&h4eXrbDR{R})sjoh@4POmrxhXwm?Ncs7K^2@J&|-&2Vy)7yG~_p>ygp<3vLT#sNy z+&AkK*@8(Rc~+L%kWLh(Y*&EOYt;c0QNANR2SRZ)w#y|OsK4Ywx;?hDz14XStE)E! z_)uS39SzFhE_5~#y@Q(`05<}o$1X7BG~OF@5KVZ6wv<@yHC?RWGrXi{TR4H0C})M$ z$e1CsF&1ddeqPVxgy{PQF9Tc#>4pxS;vA&c9M9rLW~O^#Fz)uf?6j3DeMqwr@NAOi z&Hv~m8pD?+aB`i@br4>3XH!8T)ERsyiBWi`RZH|>h)8i|$`9(0QEch4hwvF7!M_Tm zrK)J%8iZ)C#s+w?S@QKkARP)8UFT&{uigXRuAED?P%43#w-;Vxw?X?=JGS!Yo)OE; z*^+!^^(|amhsA>DYdi$If0#%3xl2D)3U)SQw`msdc-OdCS9MXGYCyEf!VBps)l)ag zIf@KJ1$ojcr(uji61l?qH+m0@nL2RIe9KKPPgS!J+y+EILsEIMdG8Sx)()q2YU~$r zfQ?Y5e>rfk>A% zP=&TCsyerUSi@UyEP7+zln5aerM8DQ#OD$uNiM^2@>PVV!!3Z5XZBo5eY^l&21mf^ zt=vha$rU=|nmxmiEDB3QGh~I20yI}Hd(_oot$ZzK zUDckLuK;|iHEJ0b1Y892fb9HJxTZ|k=Eu02JuQfWc=f!twMomZ`#^EVte$rIaLcd~ zr82a5V^pcdn0X21>*Mu`9g89IIaKRCQEqzT?qr;BM1iHwT%9swIrlHq zMI;h)fxWMm?HaiQW|j9u;fF-|kEawv8G^n|Q=9{XYFXas!HDEtERNM=rftc1$0GCe zcW>Bd!pfr;)uela#djSX-%f<#wRw_-P9w9OlwalYc9H|nS~s3->>4O`&8^FCjkmWi zN26Kfb#vxD-V;>Yk^OcU${8r{MS9(k*mC&`bTUA97~37CdS?@u!(5gpkEk_2ptLF) z#gQ?s=p;Cj{!`4SnbP?t54xyBC2ml&JN&P#AAo&po@zh{w*m((39O$Dj&xSo3`)sh z6Zp|qKkVP|73c-%_`RayS+s%=LJ5&}kW`(hXlxo7GkH@Qj%382tlcc+>*5D73Sptu zMRu?!*^{X)!$y#~N1Qso$2k`oiKu^6rjC9G5L8UF%WuUQvJhFK-fmMvnER48pD?af|bG;L)ye{vAoedC(sbT0&<% reduce(cbind) + + df.means_hist <- colMeans(df[c(1:7200)], na.rm=T) + df.means_hist <- as.data.frame(df.means_hist) + df.sds_hist <- sapply(df[c(1:7200)], sd, na.rm=T) + df.sds_hist <- as.data.frame(df.sds_hist) + df.avs_hist <- cbind(df.means_hist, df.sds_hist) + + r <- Runs[[i]] + fn <- paste0("df.avs_Y00_Y20_Run",i, ".csv") + write.csv(df.avs_hist, fn) + + df.means_Y41_Y60 <- colMeans(df[c(7201:14400)], na.rm=T) + df.means_Y41_Y60 <- as.data.frame(df.means_Y41_Y60) + df.sds_Y41_Y60 <- sapply(df[c(7201:14400)], sd, na.rm=T) + df.sds_Y41_Y60 <- as.data.frame(df.sds_Y41_Y60) + df.avs_Y41_Y60 <- cbind(df.means_Y41_Y60, df.sds_Y41_Y60) + + fn <- paste0("df.Y41_Y60_Run",i, ".csv") + write.csv(df.avs_Y41_Y60, fn) + + remove(dl) + remove(df) + gc() + +} + From 6d994147661393ebc264687e7690e44449136b2f Mon Sep 17 00:00:00 2001 From: Greg Mingas Date: Tue, 18 Jul 2023 10:31:30 +0100 Subject: [PATCH 002/146] Minor changes to README.md --- README.md | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 7115aedd..d5acd7b9 100644 --- a/README.md +++ b/README.md @@ -62,13 +62,13 @@ In the `R` subdirectory you can find code for replicating the different data pro ### How to download the data -You can download the raw UKCP2.2 climate data from the CEDA archive using the python script under `python/data_download`: +You can download the raw UKCP2.2 climate data from the CEDA archive. Go [here](https://archive.ceda.ac.uk/), create an account and set up your FTP credentials in "My Account". You can then use the python script under `python/data_download/` to download the data: ``` -python3 ceda_ftp_download.py --input /badc/ukcp18/data/land-cpm/uk/2.2km/rcp85/ --output output_dir --username 'uuu' --psw 'ppp' --change_hierarchy +python3 ceda_ftp_download.py --input /badc/ukcp18/data/land-cpm/uk/2.2km/rcp85/ --output 'output_dir' --username 'uuu' --psw 'ppp' --change_hierarchy ``` -You need to replace `uuu` and `ppp` with your CEDA username and FTP password respectively (you need to first create an account [here](https://archive.ceda.ac.uk/), and `output_dir` with the directory you want to write the data to. +You need to replace `uuu` and `ppp` with your CEDA username and FTP password respectively and replace 'output_dir' with the directory you want to write the data to. -Note that the `--change_hierarchy` flag is used when the script is called, which modifies the folder hierarchy to fit with the hierarchy in the Turing Azure file store. You can use the same script without the `--change_hierarchy` flag in order to download files without any changes in the hierarch. +Note that the `--change_hierarchy` flag is used, which modifies the folder hierarchy to fit with the hierarchy in the Turing Azure file store. You can use the same script without the `--change_hierarchy` flag in order to download files without any changes in the hierarchy. You can download the HADs observational data from the CEDA archive using the same python script, with a different input (note the `change_hierarchy` flag should not be used with HADs data - only applies to UKCP data): ``` @@ -77,13 +77,13 @@ python3 ceda_ftp_download.py --input /badc/ukmo-hadobs/data/insitu/MOHC/HadOBS/H ### Accessing the pre-downloaded/pre-processed data -Datasets used in this project (raw, processed and debiased) have been pre-downloaded/pre-processed and stored in an Azure fileshare set-up for the clim-recal project (https://dymestorage1.file.core.windows.net/vmfileshare). You need to be given access, and register your IP address to the approve list in the following way from the azure portal: +Datasets used in this project (raw, processed and debiased) have been pre-downloaded/pre-processed and stored in an Azure fileshare set-up for the clim-recal project (https://dymestorage1.file.core.windows.net/vmfileshare). You need to be given access, and register your IP address to the approved list in the following way from the azure portal: - Go to dymestorage1 page - Security + networking tab - Add your IP under the Firewall section -Once you have access you can mount the fileshare. On a Mac you can do it from a terminal +Once you have access you can mount the fileshare. On a Mac you can do it from a terminal: `open smb://dymestorage1.file.core.windows.net/vmfileshare` @@ -93,7 +93,9 @@ The fileshare will be mounted under `/Volumes/vmfileshare/` -Instructions on how the mount in other operating systems can be found in [the azure how-tos](https://learn.microsoft.com/en-us/azure/storage/files/storage-how-to-use-files-linux?tabs=smb311). Alternatively, you can access the Azure Portal, go to the dymestorage1 fileshare and click the "Connect" button to get an automatically generated script. This script can be used from within an Azure VM to mount the drive. +Instructions on how the mount in other operating systems can be found in [the azure how-tos](https://learn.microsoft.com/en-us/azure/storage/files/storage-how-to-use-files-linux?tabs=smb311). + +Alternatively, you can access the Azure Portal, go to the dymestorage1 fileshare and click the "Connect" button to get an automatically generated script. This script can be used from within an Azure VM to mount the drive. ### Pre-downloaded/pre-processed data description From 733854ca825506dc6284634bf50ee6c4f44a1786 Mon Sep 17 00:00:00 2001 From: RuthBowyer Date: Tue, 18 Jul 2023 13:56:31 +0000 Subject: [PATCH 003/146] Files to process raster data to df This commit from rebranching of DataProcessingR --- R/misc/Data_Processing_todf.R | 106 +++++++++++++++++ R/misc/read_crop_df_write.fn.R | 208 +++++++++++++++++++++++++++++++++ 2 files changed, 314 insertions(+) create mode 100644 R/misc/Data_Processing_todf.R create mode 100644 R/misc/read_crop_df_write.fn.R diff --git a/R/misc/Data_Processing_todf.R b/R/misc/Data_Processing_todf.R new file mode 100644 index 00000000..3cf595cc --- /dev/null +++ b/R/misc/Data_Processing_todf.R @@ -0,0 +1,106 @@ +## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## +## Data Processing UK HadsGrid CPM data from raster to data.frame + + +## 0. About + +# Many of the methods packages for applying bias correction to climate date take as input vector, matrix or data.frame, +# rather than a spatial file or raster +# This may not be the most effective way of analysising this data + + +rm(list=ls()) + +# libs +library(terra) +library(sf) +library(exactextractr) +library(reshape2) #melt +library(tidyverse) # +library(doParallel) + +#Loaded package versions +x <- c("MBC", "terra", "sf", "exactextractr") +lapply(x,packageVersion) + +#Path is "//vmfileshare/ClimateData +#dd <- "/Volumes/vmfileshare/ClimateData/" +dd <- "/mnt/vmfileshare/ClimateData/" + +#Need to define results directory (rd) in global + +source("R/misc/read_crop_df_write.fn.R") + +## Run cropped to test, then write parallel loop for other variables possibly +f <- paste0(dd,'shapefiles/NUTS_Level_1_January_2018_FCB_in_the_United_Kingdom_2022_7279368953270783580/NUTS_Level_1_January_2018_FCB_in_the_United_Kingdom.shp') +UK.shape <-vect(f) + +#Loop over each section of the UK as indicated here +regions <- UK.shape$nuts118nm +regioncd <- UK.shape$nuts118cd + +# Run in parallel for 2 regions to check +x <- regioncd[3:length(regioncd)] +rd <- paste0(dd, "Interim/HadsUK/Data_as_df/") + +cores <- detectCores() +cl <- makeCluster(cores[1]-1) +registerDoParallel(cl) + + foreach(x = x, + .packages = c("terra", "tidyverse"), + .errorhandling = 'pass') %dopar% { + + f <- paste0(dd,'shapefiles/NUTS_Level_1_January_2018_FCB_in_the_United_Kingdom_2022_7279368953270783580/NUTS_Level_1_January_2018_FCB_in_the_United_Kingdom.shp') + UK.shape <-vect(f) + crop.area <- UK.shape[which(UK.shape$nuts118cd==x)] + + var <- c("rainfall", "tasmax", "tasmin", "tas") + + hads19802010_read_crop_df_write(var = var, + fp = paste0(dd, "Processed/HadsUKgrid/resampled_2.2km/"), + name1 = "HadsUK", + crop=T, + crop.area=crop.area, + cropname=x, + rd=rd) + + } + + stopCluster(cl) + gc() + + +### Processing CPM + + x <- regioncd + + + cores <- detectCores() + cl <- makeCluster(cores[1]-1) + registerDoParallel(cl) + + foreach(x = x, + .packages = c("terra", "tidyverse"), + .errorhandling = 'pass') %dopar% { + + f <- paste0(dd,'shapefiles/NUTS_Level_1_January_2018_FCB_in_the_United_Kingdom_2022_7279368953270783580/NUTS_Level_1_January_2018_FCB_in_the_United_Kingdom.shp') + UK.shape <-vect(f) + crop.area <- UK.shape[which(UK.shape$nuts118cd==x)] + + runs <- c("05", "07", "08", "06") + var <- c("tasmax", "tasmin","pr", "tas") + rd <- paste0(dd, "Interim/CPM/Data_as_df") + + cpm_read_crop_df_write(runs=runs, var=var, fp=paste0(dd, "Reprojected/UKCP2.2/"), + year1=2060, year2=2080, #Ran sep for each year segment + name1="CPM", crop = T, + crop.area = crop.area, cropname = x, + rd=paste0(dd, "Interim/CPM/Data_as_df")) + + + } + + stopCluster(cl) + gc() + diff --git a/R/misc/read_crop_df_write.fn.R b/R/misc/read_crop_df_write.fn.R new file mode 100644 index 00000000..b65efd40 --- /dev/null +++ b/R/misc/read_crop_df_write.fn.R @@ -0,0 +1,208 @@ + +write.csv.date <- function(x, y){ + date <- Sys.Date() + date <- gsub("-", ".", date) + fn <- y + rd <- rd + csvFileName <- paste(rd,"/",fn,".",date,".csv",sep="") + write.csv(x, file=csvFileName, row.names = F)} + +# A function to read in specific runs, vars and years + +cpm_read_crop_df_write <- function(runs, #Character vector of selected runs + var, #Character vector of selected variables - this might need changing + fp, #filepath of parent d of folders where files are - eg paste0(dd, "Reprojected_infill/UKCP2.2/") + year1, #Numeric, first year of segment + year2, #Numeric, lastyear of segment + name1, #Character - first part of name to be assigned to the returned df- usually the model + crop, #logical + crop.area, #Polygon of area to crop to - any Spat obj accepted by terra::crop will work + cropname, #Character - name of crop to be assigned to the returned df - usually the crop area + rd){ # results directory for storing results + + runs <- runs + var <- var + years <- paste0(year1:year2, "1201", collapse="|") + + if(crop == T){ + + bbox <- crop.area + + for(i in runs){ + for(v in var){ + p <- paste0(fp, v, "/", i, "/latest/") + files <- list.files(p) + files <- files[!grepl("aux.xml", files)] + + files.y <- files[grepl(years, files)]# Historical timeslice 2 for calibration + files.y.p <- paste0(p, files.y) + + # Read in 1st runpath as df with xy coords to ensure overlay + p1 <- files.y.p[[1]] + r <- rast(p1) + r_c <- crop(r, bbox, snap="out", mask=T) + rdf1 <- as.data.frame(r_c, xy=T) + + # Load and convert remaining to single col dfs + dfL <- lapply(2:length(files.y.p), function(i){ + p <- files.y.p[[i]] + r <- rast(p) + r_c <- crop(r, bbox, snap="out", mask=T) + rdf <- as.data.frame(r_c) + return(rdf) + }) + + df <- dfL %>% reduce(cbind) + df <- cbind(rdf1, df) + + fn <- paste0(name1, "_", cropname, year1, "_", year2, v, "_Run", i) + + rd <- rd + write.csv.date(df, fn) + gc() + } + } + } else { #for where no crop to be applied + + for(i in runs){ + for(v in var){ + p <- paste0(fp, v, "/", i, "/latest/") + files <- list.files(p) + files <- files[!grepl("aux.xml", files)] + + files.y <- files[grepl(years, files)]# Historical timeslice 2 for calibration + files.y.p <- paste0(p, files.y) + + # Read in 1st runpath as df with xy coords to ensure overlay + p1 <- files.y.p[[1]] + r <- rast(p1) + rdf1 <- as.data.frame(r_c, xy=T) + + # Load and convert remaining to single col dfs + dfL <- lapply(2:length(files.y.p), function(i){ + p <- files.y.p[[i]] + r <- rast(p) + rdf <- as.data.frame(r_c) + return(rdf) + }) + + df <- dfL %>% reduce(cbind) + df <- cbind(rdf1, df) + + rd <- rd + + fn <- paste0(name1, "_", cropname, year1, "_", year2, v, "_Run", i) + + write.csv.date(df, fn) + + gc() + } + } + } +} + + +# HADs function + +hads19802010_read_crop_df_write <- function(var, #Character vector of selected variables - this might need changing + fp, #filepath of parent d of folders where files are - eg paste0(dd, "Reprojected_infill/UKCP2.2/") + name1, #Character - first part of name to be assigned to the returned df- usually the model + crop, #logical + crop.area, #Polygon of area to crop to - any Spat obj accepted by terra::crop will work + cropname, #Character - name of crop to be assigned to the returned df - usually the crop area + rd){ # results directory for storing results + + var <- var + fp <- fp + crop <- crop + + for(v in var){ + + HADs.files <- list.files(paste0(fp, v,"/day/")) + files <- HADs.files[grepl(v, HADs.files)] + Runpaths <- paste0(fp,v,"/day/",files[1:360]) #Subsetting to years 1980-2010 - if we download different data then this would need to be changed + + if(crop == TRUE){ + + bbox <- crop.area + cropname <- cropname + + # Read in 1st runpath as df with xy coords to ensure overlay with CPM data + p <- Runpaths[[1]] + r <- rast(p) + r_c <- crop(r, bbox, snap="out", mask=T) + rdf1 <- as.data.frame(r_c, xy=T) + + #To ensure subset dataframe has useful naming convention - this does not pull it through as such + n <- substr(p, nchar(p)-20, nchar(p)) + n <- gsub(".nc","", n) + names(rdf1) <- gsub("_", paste0(n, "_"), names(rdf1)) + + # Load and convert remaining to single col dfs + i <- 2:length(Runpaths) + + dfL <-lapply(i, function(i){ + p <- Runpaths[[i]] + r <- rast(p) + r_c <- crop(r, bbox, snap="out", mask=T) + rdf <- as.data.frame(r_c) + #To ensure subset dataframe has useful naming convention - this does not pull it through as such + n <- substr(p, nchar(p)-20, nchar(p)) + n <- gsub(".nc","", n) + names(rdf) <- gsub("_", paste0(n, "_"), names(rdf)) + return(rdf) + }) + + df <- dfL %>% reduce(cbind) + df <- cbind(rdf1, df) + + rd <- rd + + fn <- paste0(name1,cropname,"1980_2010_", v) + + write.csv.date(df, fn) + + gc() + + } else { + + + # Read in 1st runpath as df with xy coords to ensure overlay with CPM data + p <- Runpaths[[1]] + r <- rast(p) + rdf1 <- as.data.frame(r, xy=T) + + #To ensure subset dataframe has useful naming convention - this does not pull it through as such + n <- substr(p, nchar(p)-20, nchar(p)) + n <- gsub(".nc","", n) + names(rdf1) <- gsub("_", paste0(n, "_"), names(rdf1)) + + # Load and convert remaining to single col dfs + i <- 2:length(Runpaths) + + dfL <-lapply(i, function(i){ + p <- Runpaths[[i]] + r <- rast(p) + rdf <- as.data.frame(r) + #To ensure subset dataframe has useful naming convention - this does not pull it through as such + n <- substr(p, nchar(p)-20, nchar(p)) + n <- gsub(".nc","", n) + names(rdf) <- gsub("_", paste0(n, "_"), names(rdf)) + return(rdf) + }) + + df <- dfL %>% reduce(cbind) + df <- cbind(rdf1, df) + + rd <- rd + + fn <- paste0(name1,"1980_2010_", v) + + write.csv.date(df, fn) + + gc() + + } + } +} + From 3c15245d4b966fcf5fe81cc1eed0f31e6cbabf9e Mon Sep 17 00:00:00 2001 From: Grigorios Mingas Date: Fri, 11 Aug 2023 11:58:14 +0100 Subject: [PATCH 004/146] Revert accidental changes --- R/misc/Data_Processing_todf.R | 106 ----------------- R/misc/read_crop_df_write.fn.R | 208 --------------------------------- 2 files changed, 314 deletions(-) delete mode 100644 R/misc/Data_Processing_todf.R delete mode 100644 R/misc/read_crop_df_write.fn.R diff --git a/R/misc/Data_Processing_todf.R b/R/misc/Data_Processing_todf.R deleted file mode 100644 index 3cf595cc..00000000 --- a/R/misc/Data_Processing_todf.R +++ /dev/null @@ -1,106 +0,0 @@ -## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## -## Data Processing UK HadsGrid CPM data from raster to data.frame - - -## 0. About - -# Many of the methods packages for applying bias correction to climate date take as input vector, matrix or data.frame, -# rather than a spatial file or raster -# This may not be the most effective way of analysising this data - - -rm(list=ls()) - -# libs -library(terra) -library(sf) -library(exactextractr) -library(reshape2) #melt -library(tidyverse) # -library(doParallel) - -#Loaded package versions -x <- c("MBC", "terra", "sf", "exactextractr") -lapply(x,packageVersion) - -#Path is "//vmfileshare/ClimateData -#dd <- "/Volumes/vmfileshare/ClimateData/" -dd <- "/mnt/vmfileshare/ClimateData/" - -#Need to define results directory (rd) in global - -source("R/misc/read_crop_df_write.fn.R") - -## Run cropped to test, then write parallel loop for other variables possibly -f <- paste0(dd,'shapefiles/NUTS_Level_1_January_2018_FCB_in_the_United_Kingdom_2022_7279368953270783580/NUTS_Level_1_January_2018_FCB_in_the_United_Kingdom.shp') -UK.shape <-vect(f) - -#Loop over each section of the UK as indicated here -regions <- UK.shape$nuts118nm -regioncd <- UK.shape$nuts118cd - -# Run in parallel for 2 regions to check -x <- regioncd[3:length(regioncd)] -rd <- paste0(dd, "Interim/HadsUK/Data_as_df/") - -cores <- detectCores() -cl <- makeCluster(cores[1]-1) -registerDoParallel(cl) - - foreach(x = x, - .packages = c("terra", "tidyverse"), - .errorhandling = 'pass') %dopar% { - - f <- paste0(dd,'shapefiles/NUTS_Level_1_January_2018_FCB_in_the_United_Kingdom_2022_7279368953270783580/NUTS_Level_1_January_2018_FCB_in_the_United_Kingdom.shp') - UK.shape <-vect(f) - crop.area <- UK.shape[which(UK.shape$nuts118cd==x)] - - var <- c("rainfall", "tasmax", "tasmin", "tas") - - hads19802010_read_crop_df_write(var = var, - fp = paste0(dd, "Processed/HadsUKgrid/resampled_2.2km/"), - name1 = "HadsUK", - crop=T, - crop.area=crop.area, - cropname=x, - rd=rd) - - } - - stopCluster(cl) - gc() - - -### Processing CPM - - x <- regioncd - - - cores <- detectCores() - cl <- makeCluster(cores[1]-1) - registerDoParallel(cl) - - foreach(x = x, - .packages = c("terra", "tidyverse"), - .errorhandling = 'pass') %dopar% { - - f <- paste0(dd,'shapefiles/NUTS_Level_1_January_2018_FCB_in_the_United_Kingdom_2022_7279368953270783580/NUTS_Level_1_January_2018_FCB_in_the_United_Kingdom.shp') - UK.shape <-vect(f) - crop.area <- UK.shape[which(UK.shape$nuts118cd==x)] - - runs <- c("05", "07", "08", "06") - var <- c("tasmax", "tasmin","pr", "tas") - rd <- paste0(dd, "Interim/CPM/Data_as_df") - - cpm_read_crop_df_write(runs=runs, var=var, fp=paste0(dd, "Reprojected/UKCP2.2/"), - year1=2060, year2=2080, #Ran sep for each year segment - name1="CPM", crop = T, - crop.area = crop.area, cropname = x, - rd=paste0(dd, "Interim/CPM/Data_as_df")) - - - } - - stopCluster(cl) - gc() - diff --git a/R/misc/read_crop_df_write.fn.R b/R/misc/read_crop_df_write.fn.R deleted file mode 100644 index b65efd40..00000000 --- a/R/misc/read_crop_df_write.fn.R +++ /dev/null @@ -1,208 +0,0 @@ - -write.csv.date <- function(x, y){ - date <- Sys.Date() - date <- gsub("-", ".", date) - fn <- y - rd <- rd - csvFileName <- paste(rd,"/",fn,".",date,".csv",sep="") - write.csv(x, file=csvFileName, row.names = F)} - -# A function to read in specific runs, vars and years - -cpm_read_crop_df_write <- function(runs, #Character vector of selected runs - var, #Character vector of selected variables - this might need changing - fp, #filepath of parent d of folders where files are - eg paste0(dd, "Reprojected_infill/UKCP2.2/") - year1, #Numeric, first year of segment - year2, #Numeric, lastyear of segment - name1, #Character - first part of name to be assigned to the returned df- usually the model - crop, #logical - crop.area, #Polygon of area to crop to - any Spat obj accepted by terra::crop will work - cropname, #Character - name of crop to be assigned to the returned df - usually the crop area - rd){ # results directory for storing results - - runs <- runs - var <- var - years <- paste0(year1:year2, "1201", collapse="|") - - if(crop == T){ - - bbox <- crop.area - - for(i in runs){ - for(v in var){ - p <- paste0(fp, v, "/", i, "/latest/") - files <- list.files(p) - files <- files[!grepl("aux.xml", files)] - - files.y <- files[grepl(years, files)]# Historical timeslice 2 for calibration - files.y.p <- paste0(p, files.y) - - # Read in 1st runpath as df with xy coords to ensure overlay - p1 <- files.y.p[[1]] - r <- rast(p1) - r_c <- crop(r, bbox, snap="out", mask=T) - rdf1 <- as.data.frame(r_c, xy=T) - - # Load and convert remaining to single col dfs - dfL <- lapply(2:length(files.y.p), function(i){ - p <- files.y.p[[i]] - r <- rast(p) - r_c <- crop(r, bbox, snap="out", mask=T) - rdf <- as.data.frame(r_c) - return(rdf) - }) - - df <- dfL %>% reduce(cbind) - df <- cbind(rdf1, df) - - fn <- paste0(name1, "_", cropname, year1, "_", year2, v, "_Run", i) - - rd <- rd - write.csv.date(df, fn) - gc() - } - } - } else { #for where no crop to be applied - - for(i in runs){ - for(v in var){ - p <- paste0(fp, v, "/", i, "/latest/") - files <- list.files(p) - files <- files[!grepl("aux.xml", files)] - - files.y <- files[grepl(years, files)]# Historical timeslice 2 for calibration - files.y.p <- paste0(p, files.y) - - # Read in 1st runpath as df with xy coords to ensure overlay - p1 <- files.y.p[[1]] - r <- rast(p1) - rdf1 <- as.data.frame(r_c, xy=T) - - # Load and convert remaining to single col dfs - dfL <- lapply(2:length(files.y.p), function(i){ - p <- files.y.p[[i]] - r <- rast(p) - rdf <- as.data.frame(r_c) - return(rdf) - }) - - df <- dfL %>% reduce(cbind) - df <- cbind(rdf1, df) - - rd <- rd - - fn <- paste0(name1, "_", cropname, year1, "_", year2, v, "_Run", i) - - write.csv.date(df, fn) - - gc() - } - } - } -} - - -# HADs function - -hads19802010_read_crop_df_write <- function(var, #Character vector of selected variables - this might need changing - fp, #filepath of parent d of folders where files are - eg paste0(dd, "Reprojected_infill/UKCP2.2/") - name1, #Character - first part of name to be assigned to the returned df- usually the model - crop, #logical - crop.area, #Polygon of area to crop to - any Spat obj accepted by terra::crop will work - cropname, #Character - name of crop to be assigned to the returned df - usually the crop area - rd){ # results directory for storing results - - var <- var - fp <- fp - crop <- crop - - for(v in var){ - - HADs.files <- list.files(paste0(fp, v,"/day/")) - files <- HADs.files[grepl(v, HADs.files)] - Runpaths <- paste0(fp,v,"/day/",files[1:360]) #Subsetting to years 1980-2010 - if we download different data then this would need to be changed - - if(crop == TRUE){ - - bbox <- crop.area - cropname <- cropname - - # Read in 1st runpath as df with xy coords to ensure overlay with CPM data - p <- Runpaths[[1]] - r <- rast(p) - r_c <- crop(r, bbox, snap="out", mask=T) - rdf1 <- as.data.frame(r_c, xy=T) - - #To ensure subset dataframe has useful naming convention - this does not pull it through as such - n <- substr(p, nchar(p)-20, nchar(p)) - n <- gsub(".nc","", n) - names(rdf1) <- gsub("_", paste0(n, "_"), names(rdf1)) - - # Load and convert remaining to single col dfs - i <- 2:length(Runpaths) - - dfL <-lapply(i, function(i){ - p <- Runpaths[[i]] - r <- rast(p) - r_c <- crop(r, bbox, snap="out", mask=T) - rdf <- as.data.frame(r_c) - #To ensure subset dataframe has useful naming convention - this does not pull it through as such - n <- substr(p, nchar(p)-20, nchar(p)) - n <- gsub(".nc","", n) - names(rdf) <- gsub("_", paste0(n, "_"), names(rdf)) - return(rdf) - }) - - df <- dfL %>% reduce(cbind) - df <- cbind(rdf1, df) - - rd <- rd - - fn <- paste0(name1,cropname,"1980_2010_", v) - - write.csv.date(df, fn) - - gc() - - } else { - - - # Read in 1st runpath as df with xy coords to ensure overlay with CPM data - p <- Runpaths[[1]] - r <- rast(p) - rdf1 <- as.data.frame(r, xy=T) - - #To ensure subset dataframe has useful naming convention - this does not pull it through as such - n <- substr(p, nchar(p)-20, nchar(p)) - n <- gsub(".nc","", n) - names(rdf1) <- gsub("_", paste0(n, "_"), names(rdf1)) - - # Load and convert remaining to single col dfs - i <- 2:length(Runpaths) - - dfL <-lapply(i, function(i){ - p <- Runpaths[[i]] - r <- rast(p) - rdf <- as.data.frame(r) - #To ensure subset dataframe has useful naming convention - this does not pull it through as such - n <- substr(p, nchar(p)-20, nchar(p)) - n <- gsub(".nc","", n) - names(rdf) <- gsub("_", paste0(n, "_"), names(rdf)) - return(rdf) - }) - - df <- dfL %>% reduce(cbind) - df <- cbind(rdf1, df) - - rd <- rd - - fn <- paste0(name1,"1980_2010_", v) - - write.csv.date(df, fn) - - gc() - - } - } -} - From c453ef501aee69dacdf48db223ee68345250b6aa Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Thu, 10 Aug 2023 12:40:25 +0100 Subject: [PATCH 005/146] make azure instructions more clear --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index d5acd7b9..44866c27 100644 --- a/README.md +++ b/README.md @@ -79,8 +79,8 @@ python3 ceda_ftp_download.py --input /badc/ukmo-hadobs/data/insitu/MOHC/HadOBS/H Datasets used in this project (raw, processed and debiased) have been pre-downloaded/pre-processed and stored in an Azure fileshare set-up for the clim-recal project (https://dymestorage1.file.core.windows.net/vmfileshare). You need to be given access, and register your IP address to the approved list in the following way from the azure portal: -- Go to dymestorage1 page -- Security + networking tab +- Go to dymestorage1 page `Home > Storage accounts > dymestorage1` +- Navigate to *Networking* tab under Security + networking - Add your IP under the Firewall section Once you have access you can mount the fileshare. On a Mac you can do it from a terminal: From 62b86f564adcc02d32d53a2e2cad4bfafd96b799 Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Fri, 11 Aug 2023 21:23:41 +0100 Subject: [PATCH 006/146] comparing dates before and after resampling --- python/resampling/check_calendar.py | 58 + python/resampling/check_calendar_log.txt | 1886 ++++++++++++++++++++++ 2 files changed, 1944 insertions(+) create mode 100644 python/resampling/check_calendar.py create mode 100644 python/resampling/check_calendar_log.txt diff --git a/python/resampling/check_calendar.py b/python/resampling/check_calendar.py new file mode 100644 index 00000000..26390efd --- /dev/null +++ b/python/resampling/check_calendar.py @@ -0,0 +1,58 @@ +from datetime import datetime +import os +import xarray as xr +import glob +import numpy as np + +path_raw = '/Volumes/vmfileshare/ClimateData/Raw/HadsUKgrid/tasmax/day' +path_preproc = '/Volumes/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/tasmax/day' +#example file names: +#tasmax_hadukgrid_uk_1km_day_2.2km_resampled_19800101-19800131.ncr +#tasmax_hadukgrid_uk_1km_day_20211201-20211231.nc + +# open log file and write both input paths on top: +with open('check_calendar_log.txt', 'w') as f: + f.write(f"{'*'*20} Comparing raw data: {path_raw} {'*'*20}\n") + f.write(f"{'*'*20} to resampled data: {path_preproc} {'*'*20}\n") + +#iterate through dir at path and loop through files +files = [os.path.basename(f) for f in glob.glob(path_raw + "**/*.nc", recursive=True)] + +for i,file in enumerate(files): + if i%10==0: + print(i) + #separate filename from flag '2.2km_resamples' from date + output_name = f"{'_'.join(file.split('_')[:-1])}_2.2km_resampled_{file.split('_')[-1]}" + + raw_f = os.path.join(path_raw, file) + preproc_f = os.path.join(path_preproc, output_name) + #load before and after resampling files + data_raw = xr.open_dataset(raw_f, decode_coords="all") + data_preproc = xr.open_dataset(preproc_f, decode_coords="all") + time_raw = [str(t).split('T')[0] for t in data_raw.coords['time'].values] + time_pre = [str(t).split(' ')[0] for t in data_preproc.coords['time'].values] + + # Use sets to find differences + dates_in_raw_not_in_pre = set(time_raw) - set(time_pre) + dates_in_pre_not_in_raw = set(time_pre) - set(time_raw) + + # check if dates are empty + if dates_in_raw_not_in_pre | dates_in_pre_not_in_raw: + #if date in raw not in pre ends in 31 + if list(dates_in_raw_not_in_pre)[0][-2:]!='31': + # write to log file + with open(os.path.join(path_preproc,'check_calendar_log.txt'), 'a') as f: + f.write(f"File: {file} produced errors:\n") + f.write(f"raw # days: {len(set(time_raw))} - resampled # days: {len(set(time_pre))}\n") + f.write(f"Dates in raw not in resampled: {dates_in_raw_not_in_pre}\n") + f.write(f"Dates in resampled not in raw: {dates_in_pre_not_in_raw}\n") + + + + + + + + + + diff --git a/python/resampling/check_calendar_log.txt b/python/resampling/check_calendar_log.txt new file mode 100644 index 00000000..362bc6a6 --- /dev/null +++ b/python/resampling/check_calendar_log.txt @@ -0,0 +1,1886 @@ +******************** Comparing raw data: /Volumes/vmfileshare/ClimateData/Raw/HadsUKgrid/tasmax/day ******************** +******************** to resampled data: /Volumes/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/tasmax/day ******************** +File: tasmax_hadukgrid_uk_1km_day_19800101-19800131.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1980-01-31'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19800301-19800331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1980-03-31'} +Dates in resampled not in raw: {'1980-02-30'} +File: tasmax_hadukgrid_uk_1km_day_19800401-19800430.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1980-04-30'} +Dates in resampled not in raw: {'1980-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19800501-19800531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1980-05-31'} +Dates in resampled not in raw: {'1980-04-30'} +File: tasmax_hadukgrid_uk_1km_day_19800601-19800630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1980-06-30'} +Dates in resampled not in raw: {'1980-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19800701-19800731.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1980-07-31'} +Dates in resampled not in raw: {'1980-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19800801-19800831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1980-08-31'} +Dates in resampled not in raw: {'1980-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19801001-19801031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1980-10-31'} +Dates in resampled not in raw: {'1980-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19801201-19801231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1980-12-31'} +Dates in resampled not in raw: {'1980-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19810101-19810131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1981-01-31'} +Dates in resampled not in raw: {'1981-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19810201-19810228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1981-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19810301-19810331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1981-03-31', '1981-03-30'} +Dates in resampled not in raw: {'1981-02-29', '1981-02-30'} +File: tasmax_hadukgrid_uk_1km_day_19810401-19810430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1981-04-30', '1981-04-29'} +Dates in resampled not in raw: {'1981-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19810501-19810531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1981-05-31', '1981-05-30'} +Dates in resampled not in raw: {'1981-04-30', '1981-04-29'} +File: tasmax_hadukgrid_uk_1km_day_19810601-19810630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1981-06-30'} +Dates in resampled not in raw: {'1981-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19810701-19810731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1981-07-31', '1981-07-30'} +Dates in resampled not in raw: {'1981-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19810801-19810831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1981-08-31'} +Dates in resampled not in raw: {'1981-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19810901-19810930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1981-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19811001-19811031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1981-10-31'} +Dates in resampled not in raw: {'1981-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19811101-19811130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1981-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19811201-19811231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1981-12-31'} +Dates in resampled not in raw: {'1981-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19820101-19820131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1982-01-31'} +Dates in resampled not in raw: {'1982-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19820201-19820228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1982-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19820301-19820331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1982-03-30', '1982-03-31'} +Dates in resampled not in raw: {'1982-02-30', '1982-02-29'} +File: tasmax_hadukgrid_uk_1km_day_19820401-19820430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1982-04-29', '1982-04-30'} +Dates in resampled not in raw: {'1982-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19820501-19820531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1982-05-31', '1982-05-30'} +Dates in resampled not in raw: {'1982-04-29', '1982-04-30'} +File: tasmax_hadukgrid_uk_1km_day_19820601-19820630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1982-06-30'} +Dates in resampled not in raw: {'1982-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19820701-19820731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1982-07-30', '1982-07-31'} +Dates in resampled not in raw: {'1982-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19820801-19820831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1982-08-31'} +Dates in resampled not in raw: {'1982-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19820901-19820930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1982-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19821001-19821031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1982-10-31'} +Dates in resampled not in raw: {'1982-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19821101-19821130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1982-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19821201-19821231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1982-12-31'} +Dates in resampled not in raw: {'1982-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19830101-19830131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1983-01-31'} +Dates in resampled not in raw: {'1983-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19830201-19830228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1983-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19830301-19830331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1983-03-30', '1983-03-31'} +Dates in resampled not in raw: {'1983-02-29', '1983-02-30'} +File: tasmax_hadukgrid_uk_1km_day_19830401-19830430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1983-04-29', '1983-04-30'} +Dates in resampled not in raw: {'1983-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19830501-19830531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1983-05-30', '1983-05-31'} +Dates in resampled not in raw: {'1983-04-29', '1983-04-30'} +File: tasmax_hadukgrid_uk_1km_day_19830601-19830630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1983-06-30'} +Dates in resampled not in raw: {'1983-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19830701-19830731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1983-07-31', '1983-07-30'} +Dates in resampled not in raw: {'1983-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19830801-19830831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1983-08-31'} +Dates in resampled not in raw: {'1983-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19830901-19830930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1983-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19831001-19831031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1983-10-31'} +Dates in resampled not in raw: {'1983-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19831101-19831130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1983-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19831201-19831231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1983-12-31'} +Dates in resampled not in raw: {'1983-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19840101-19840131.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1984-01-31'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19840301-19840331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1984-03-31'} +Dates in resampled not in raw: {'1984-02-30'} +File: tasmax_hadukgrid_uk_1km_day_19840401-19840430.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1984-04-30'} +Dates in resampled not in raw: {'1984-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19840501-19840531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1984-05-31'} +Dates in resampled not in raw: {'1984-04-30'} +File: tasmax_hadukgrid_uk_1km_day_19840601-19840630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1984-06-30'} +Dates in resampled not in raw: {'1984-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19840701-19840731.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1984-07-31'} +Dates in resampled not in raw: {'1984-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19840801-19840831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1984-08-31'} +Dates in resampled not in raw: {'1984-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19841001-19841031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1984-10-31'} +Dates in resampled not in raw: {'1984-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19841201-19841231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1984-12-31'} +Dates in resampled not in raw: {'1984-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19850101-19850131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1985-01-31'} +Dates in resampled not in raw: {'1985-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19850201-19850228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1985-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19850301-19850331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1985-03-30', '1985-03-31'} +Dates in resampled not in raw: {'1985-02-30', '1985-02-29'} +File: tasmax_hadukgrid_uk_1km_day_19850401-19850430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1985-04-30', '1985-04-29'} +Dates in resampled not in raw: {'1985-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19850501-19850531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1985-05-30', '1985-05-31'} +Dates in resampled not in raw: {'1985-04-30', '1985-04-29'} +File: tasmax_hadukgrid_uk_1km_day_19850601-19850630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1985-06-30'} +Dates in resampled not in raw: {'1985-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19850701-19850731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1985-07-30', '1985-07-31'} +Dates in resampled not in raw: {'1985-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19850801-19850831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1985-08-31'} +Dates in resampled not in raw: {'1985-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19850901-19850930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1985-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19851001-19851031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1985-10-31'} +Dates in resampled not in raw: {'1985-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19851101-19851130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1985-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19851201-19851231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1985-12-31'} +Dates in resampled not in raw: {'1985-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19860101-19860131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1986-01-31'} +Dates in resampled not in raw: {'1986-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19860201-19860228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1986-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19860301-19860331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1986-03-30', '1986-03-31'} +Dates in resampled not in raw: {'1986-02-30', '1986-02-29'} +File: tasmax_hadukgrid_uk_1km_day_19860401-19860430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1986-04-29', '1986-04-30'} +Dates in resampled not in raw: {'1986-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19860501-19860531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1986-05-30', '1986-05-31'} +Dates in resampled not in raw: {'1986-04-29', '1986-04-30'} +File: tasmax_hadukgrid_uk_1km_day_19860601-19860630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1986-06-30'} +Dates in resampled not in raw: {'1986-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19860701-19860731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1986-07-31', '1986-07-30'} +Dates in resampled not in raw: {'1986-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19860801-19860831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1986-08-31'} +Dates in resampled not in raw: {'1986-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19860901-19860930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1986-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19861001-19861031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1986-10-31'} +Dates in resampled not in raw: {'1986-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19861101-19861130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1986-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19861201-19861231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1986-12-31'} +Dates in resampled not in raw: {'1986-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19870101-19870131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1987-01-31'} +Dates in resampled not in raw: {'1987-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19870201-19870228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1987-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19870301-19870331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1987-03-31', '1987-03-30'} +Dates in resampled not in raw: {'1987-02-29', '1987-02-30'} +File: tasmax_hadukgrid_uk_1km_day_19870401-19870430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1987-04-29', '1987-04-30'} +Dates in resampled not in raw: {'1987-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19870501-19870531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1987-05-30', '1987-05-31'} +Dates in resampled not in raw: {'1987-04-29', '1987-04-30'} +File: tasmax_hadukgrid_uk_1km_day_19870601-19870630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1987-06-30'} +Dates in resampled not in raw: {'1987-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19870701-19870731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1987-07-30', '1987-07-31'} +Dates in resampled not in raw: {'1987-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19870801-19870831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1987-08-31'} +Dates in resampled not in raw: {'1987-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19870901-19870930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1987-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19871001-19871031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1987-10-31'} +Dates in resampled not in raw: {'1987-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19871101-19871130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1987-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19871201-19871231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1987-12-31'} +Dates in resampled not in raw: {'1987-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19880101-19880131.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1988-01-31'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19880301-19880331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1988-03-31'} +Dates in resampled not in raw: {'1988-02-30'} +File: tasmax_hadukgrid_uk_1km_day_19880401-19880430.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1988-04-30'} +Dates in resampled not in raw: {'1988-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19880501-19880531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1988-05-31'} +Dates in resampled not in raw: {'1988-04-30'} +File: tasmax_hadukgrid_uk_1km_day_19880601-19880630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1988-06-30'} +Dates in resampled not in raw: {'1988-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19880701-19880731.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1988-07-31'} +Dates in resampled not in raw: {'1988-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19880801-19880831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1988-08-31'} +Dates in resampled not in raw: {'1988-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19881001-19881031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1988-10-31'} +Dates in resampled not in raw: {'1988-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19881201-19881231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1988-12-31'} +Dates in resampled not in raw: {'1988-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19890101-19890131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1989-01-31'} +Dates in resampled not in raw: {'1989-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19890201-19890228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1989-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19890301-19890331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1989-03-30', '1989-03-31'} +Dates in resampled not in raw: {'1989-02-29', '1989-02-30'} +File: tasmax_hadukgrid_uk_1km_day_19890401-19890430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1989-04-29', '1989-04-30'} +Dates in resampled not in raw: {'1989-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19890501-19890531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1989-05-31', '1989-05-30'} +Dates in resampled not in raw: {'1989-04-29', '1989-04-30'} +File: tasmax_hadukgrid_uk_1km_day_19890601-19890630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1989-06-30'} +Dates in resampled not in raw: {'1989-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19890701-19890731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1989-07-30', '1989-07-31'} +Dates in resampled not in raw: {'1989-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19890801-19890831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1989-08-31'} +Dates in resampled not in raw: {'1989-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19890901-19890930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1989-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19891001-19891031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1989-10-31'} +Dates in resampled not in raw: {'1989-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19891101-19891130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1989-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19891201-19891231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1989-12-31'} +Dates in resampled not in raw: {'1989-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19900101-19900131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1990-01-31'} +Dates in resampled not in raw: {'1990-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19900201-19900228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1990-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19900301-19900331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1990-03-30', '1990-03-31'} +Dates in resampled not in raw: {'1990-02-29', '1990-02-30'} +File: tasmax_hadukgrid_uk_1km_day_19900401-19900430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1990-04-30', '1990-04-29'} +Dates in resampled not in raw: {'1990-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19900501-19900531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1990-05-31', '1990-05-30'} +Dates in resampled not in raw: {'1990-04-30', '1990-04-29'} +File: tasmax_hadukgrid_uk_1km_day_19900601-19900630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1990-06-30'} +Dates in resampled not in raw: {'1990-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19900701-19900731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1990-07-31', '1990-07-30'} +Dates in resampled not in raw: {'1990-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19900801-19900831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1990-08-31'} +Dates in resampled not in raw: {'1990-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19900901-19900930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1990-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19901001-19901031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1990-10-31'} +Dates in resampled not in raw: {'1990-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19901101-19901130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1990-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19901201-19901231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1990-12-31'} +Dates in resampled not in raw: {'1990-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19910101-19910131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1991-01-31'} +Dates in resampled not in raw: {'1991-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19910201-19910228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1991-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19910301-19910331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1991-03-30', '1991-03-31'} +Dates in resampled not in raw: {'1991-02-29', '1991-02-30'} +File: tasmax_hadukgrid_uk_1km_day_19910401-19910430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1991-04-30', '1991-04-29'} +Dates in resampled not in raw: {'1991-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19910501-19910531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1991-05-30', '1991-05-31'} +Dates in resampled not in raw: {'1991-04-30', '1991-04-29'} +File: tasmax_hadukgrid_uk_1km_day_19910601-19910630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1991-06-30'} +Dates in resampled not in raw: {'1991-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19910701-19910731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1991-07-30', '1991-07-31'} +Dates in resampled not in raw: {'1991-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19910801-19910831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1991-08-31'} +Dates in resampled not in raw: {'1991-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19910901-19910930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1991-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19911001-19911031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1991-10-31'} +Dates in resampled not in raw: {'1991-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19911101-19911130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1991-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19911201-19911231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1991-12-31'} +Dates in resampled not in raw: {'1991-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19920101-19920131.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1992-01-31'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19920301-19920331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1992-03-31'} +Dates in resampled not in raw: {'1992-02-30'} +File: tasmax_hadukgrid_uk_1km_day_19920401-19920430.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1992-04-30'} +Dates in resampled not in raw: {'1992-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19920501-19920531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1992-05-31'} +Dates in resampled not in raw: {'1992-04-30'} +File: tasmax_hadukgrid_uk_1km_day_19920601-19920630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1992-06-30'} +Dates in resampled not in raw: {'1992-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19920701-19920731.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1992-07-31'} +Dates in resampled not in raw: {'1992-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19920801-19920831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1992-08-31'} +Dates in resampled not in raw: {'1992-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19921001-19921031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1992-10-31'} +Dates in resampled not in raw: {'1992-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19921201-19921231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1992-12-31'} +Dates in resampled not in raw: {'1992-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19930101-19930131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1993-01-31'} +Dates in resampled not in raw: {'1993-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19930201-19930228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1993-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19930301-19930331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1993-03-31', '1993-03-30'} +Dates in resampled not in raw: {'1993-02-30', '1993-02-29'} +File: tasmax_hadukgrid_uk_1km_day_19930401-19930430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1993-04-30', '1993-04-29'} +Dates in resampled not in raw: {'1993-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19930501-19930531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1993-05-31', '1993-05-30'} +Dates in resampled not in raw: {'1993-04-30', '1993-04-29'} +File: tasmax_hadukgrid_uk_1km_day_19930601-19930630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1993-06-30'} +Dates in resampled not in raw: {'1993-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19930701-19930731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1993-07-31', '1993-07-30'} +Dates in resampled not in raw: {'1993-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19930801-19930831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1993-08-31'} +Dates in resampled not in raw: {'1993-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19930901-19930930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1993-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19931001-19931031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1993-10-31'} +Dates in resampled not in raw: {'1993-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19931101-19931130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1993-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19931201-19931231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1993-12-31'} +Dates in resampled not in raw: {'1993-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19940101-19940131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1994-01-31'} +Dates in resampled not in raw: {'1994-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19940201-19940228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1994-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19940301-19940331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1994-03-31', '1994-03-30'} +Dates in resampled not in raw: {'1994-02-30', '1994-02-29'} +File: tasmax_hadukgrid_uk_1km_day_19940401-19940430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1994-04-30', '1994-04-29'} +Dates in resampled not in raw: {'1994-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19940501-19940531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1994-05-31', '1994-05-30'} +Dates in resampled not in raw: {'1994-04-30', '1994-04-29'} +File: tasmax_hadukgrid_uk_1km_day_19940601-19940630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1994-06-30'} +Dates in resampled not in raw: {'1994-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19940701-19940731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1994-07-31', '1994-07-30'} +Dates in resampled not in raw: {'1994-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19940801-19940831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1994-08-31'} +Dates in resampled not in raw: {'1994-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19940901-19940930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1994-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19941001-19941031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1994-10-31'} +Dates in resampled not in raw: {'1994-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19941101-19941130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1994-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19941201-19941231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1994-12-31'} +Dates in resampled not in raw: {'1994-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19950101-19950131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1995-01-31'} +Dates in resampled not in raw: {'1995-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19950201-19950228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1995-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19950301-19950331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1995-03-31', '1995-03-30'} +Dates in resampled not in raw: {'1995-02-29', '1995-02-30'} +File: tasmax_hadukgrid_uk_1km_day_19950401-19950430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1995-04-30', '1995-04-29'} +Dates in resampled not in raw: {'1995-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19950501-19950531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1995-05-30', '1995-05-31'} +Dates in resampled not in raw: {'1995-04-30', '1995-04-29'} +File: tasmax_hadukgrid_uk_1km_day_19950601-19950630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1995-06-30'} +Dates in resampled not in raw: {'1995-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19950701-19950731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1995-07-30', '1995-07-31'} +Dates in resampled not in raw: {'1995-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19950801-19950831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1995-08-31'} +Dates in resampled not in raw: {'1995-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19950901-19950930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1995-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19951001-19951031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1995-10-31'} +Dates in resampled not in raw: {'1995-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19951101-19951130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1995-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19951201-19951231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1995-12-31'} +Dates in resampled not in raw: {'1995-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19960101-19960131.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1996-01-31'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19960301-19960331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1996-03-31'} +Dates in resampled not in raw: {'1996-02-30'} +File: tasmax_hadukgrid_uk_1km_day_19960401-19960430.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1996-04-30'} +Dates in resampled not in raw: {'1996-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19960501-19960531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1996-05-31'} +Dates in resampled not in raw: {'1996-04-30'} +File: tasmax_hadukgrid_uk_1km_day_19960601-19960630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1996-06-30'} +Dates in resampled not in raw: {'1996-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19960701-19960731.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1996-07-31'} +Dates in resampled not in raw: {'1996-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19960801-19960831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1996-08-31'} +Dates in resampled not in raw: {'1996-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19961001-19961031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1996-10-31'} +Dates in resampled not in raw: {'1996-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19961201-19961231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1996-12-31'} +Dates in resampled not in raw: {'1996-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19970101-19970131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1997-01-31'} +Dates in resampled not in raw: {'1997-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19970201-19970228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1997-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19970301-19970331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1997-03-30', '1997-03-31'} +Dates in resampled not in raw: {'1997-02-29', '1997-02-30'} +File: tasmax_hadukgrid_uk_1km_day_19970401-19970430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1997-04-29', '1997-04-30'} +Dates in resampled not in raw: {'1997-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19970501-19970531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1997-05-31', '1997-05-30'} +Dates in resampled not in raw: {'1997-04-29', '1997-04-30'} +File: tasmax_hadukgrid_uk_1km_day_19970601-19970630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1997-06-30'} +Dates in resampled not in raw: {'1997-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19970701-19970731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1997-07-30', '1997-07-31'} +Dates in resampled not in raw: {'1997-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19970801-19970831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1997-08-31'} +Dates in resampled not in raw: {'1997-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19970901-19970930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1997-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19971001-19971031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1997-10-31'} +Dates in resampled not in raw: {'1997-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19971101-19971130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1997-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19971201-19971231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1997-12-31'} +Dates in resampled not in raw: {'1997-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19980101-19980131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1998-01-31'} +Dates in resampled not in raw: {'1998-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19980201-19980228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1998-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19980301-19980331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1998-03-31', '1998-03-30'} +Dates in resampled not in raw: {'1998-02-30', '1998-02-29'} +File: tasmax_hadukgrid_uk_1km_day_19980401-19980430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1998-04-30', '1998-04-29'} +Dates in resampled not in raw: {'1998-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19980501-19980531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1998-05-30', '1998-05-31'} +Dates in resampled not in raw: {'1998-04-30', '1998-04-29'} +File: tasmax_hadukgrid_uk_1km_day_19980601-19980630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1998-06-30'} +Dates in resampled not in raw: {'1998-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19980701-19980731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1998-07-30', '1998-07-31'} +Dates in resampled not in raw: {'1998-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19980801-19980831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1998-08-31'} +Dates in resampled not in raw: {'1998-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19980901-19980930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1998-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19981001-19981031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1998-10-31'} +Dates in resampled not in raw: {'1998-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19981101-19981130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1998-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19981201-19981231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1998-12-31'} +Dates in resampled not in raw: {'1998-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19990101-19990131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1999-01-31'} +Dates in resampled not in raw: {'1999-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19990201-19990228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1999-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19990301-19990331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1999-03-30', '1999-03-31'} +Dates in resampled not in raw: {'1999-02-30', '1999-02-29'} +File: tasmax_hadukgrid_uk_1km_day_19990401-19990430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1999-04-30', '1999-04-29'} +Dates in resampled not in raw: {'1999-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19990501-19990531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1999-05-31', '1999-05-30'} +Dates in resampled not in raw: {'1999-04-30', '1999-04-29'} +File: tasmax_hadukgrid_uk_1km_day_19990601-19990630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1999-06-30'} +Dates in resampled not in raw: {'1999-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19990701-19990731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1999-07-30', '1999-07-31'} +Dates in resampled not in raw: {'1999-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19990801-19990831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1999-08-31'} +Dates in resampled not in raw: {'1999-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19990901-19990930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1999-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19991001-19991031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1999-10-31'} +Dates in resampled not in raw: {'1999-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19991101-19991130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1999-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19991201-19991231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1999-12-31'} +Dates in resampled not in raw: {'1999-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20000101-20000131.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2000-01-31'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20000301-20000331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2000-03-31'} +Dates in resampled not in raw: {'2000-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20000401-20000430.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2000-04-30'} +Dates in resampled not in raw: {'2000-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20000501-20000531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2000-05-31'} +Dates in resampled not in raw: {'2000-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20000601-20000630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2000-06-30'} +Dates in resampled not in raw: {'2000-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20000701-20000731.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2000-07-31'} +Dates in resampled not in raw: {'2000-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20000801-20000831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2000-08-31'} +Dates in resampled not in raw: {'2000-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20001001-20001031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2000-10-31'} +Dates in resampled not in raw: {'2000-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20001201-20001231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2000-12-31'} +Dates in resampled not in raw: {'2000-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20010101-20010131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2001-01-31'} +Dates in resampled not in raw: {'2001-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20010201-20010228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2001-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20010301-20010331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2001-03-31', '2001-03-30'} +Dates in resampled not in raw: {'2001-02-30', '2001-02-29'} +File: tasmax_hadukgrid_uk_1km_day_20010401-20010430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2001-04-29', '2001-04-30'} +Dates in resampled not in raw: {'2001-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20010501-20010531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2001-05-31', '2001-05-30'} +Dates in resampled not in raw: {'2001-04-29', '2001-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20010601-20010630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2001-06-30'} +Dates in resampled not in raw: {'2001-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20010701-20010731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2001-07-30', '2001-07-31'} +Dates in resampled not in raw: {'2001-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20010801-20010831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2001-08-31'} +Dates in resampled not in raw: {'2001-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20010901-20010930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2001-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20011001-20011031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2001-10-31'} +Dates in resampled not in raw: {'2001-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20011101-20011130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2001-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20011201-20011231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2001-12-31'} +Dates in resampled not in raw: {'2001-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20020101-20020131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2002-01-31'} +Dates in resampled not in raw: {'2002-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20020201-20020228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2002-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20020301-20020331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2002-03-31', '2002-03-30'} +Dates in resampled not in raw: {'2002-02-30', '2002-02-29'} +File: tasmax_hadukgrid_uk_1km_day_20020401-20020430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2002-04-29', '2002-04-30'} +Dates in resampled not in raw: {'2002-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20020501-20020531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2002-05-31', '2002-05-30'} +Dates in resampled not in raw: {'2002-04-29', '2002-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20020601-20020630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2002-06-30'} +Dates in resampled not in raw: {'2002-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20020701-20020731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2002-07-30', '2002-07-31'} +Dates in resampled not in raw: {'2002-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20020801-20020831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2002-08-31'} +Dates in resampled not in raw: {'2002-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20020901-20020930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2002-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20021001-20021031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2002-10-31'} +Dates in resampled not in raw: {'2002-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20021101-20021130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2002-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20021201-20021231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2002-12-31'} +Dates in resampled not in raw: {'2002-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20030101-20030131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2003-01-31'} +Dates in resampled not in raw: {'2003-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20030201-20030228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2003-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20030301-20030331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2003-03-30', '2003-03-31'} +Dates in resampled not in raw: {'2003-02-29', '2003-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20030401-20030430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2003-04-30', '2003-04-29'} +Dates in resampled not in raw: {'2003-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20030501-20030531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2003-05-31', '2003-05-30'} +Dates in resampled not in raw: {'2003-04-30', '2003-04-29'} +File: tasmax_hadukgrid_uk_1km_day_20030601-20030630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2003-06-30'} +Dates in resampled not in raw: {'2003-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20030701-20030731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2003-07-30', '2003-07-31'} +Dates in resampled not in raw: {'2003-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20030801-20030831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2003-08-31'} +Dates in resampled not in raw: {'2003-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20030901-20030930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2003-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20031001-20031031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2003-10-31'} +Dates in resampled not in raw: {'2003-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20031101-20031130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2003-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20031201-20031231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2003-12-31'} +Dates in resampled not in raw: {'2003-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20040101-20040131.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2004-01-31'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20040301-20040331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2004-03-31'} +Dates in resampled not in raw: {'2004-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20040401-20040430.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2004-04-30'} +Dates in resampled not in raw: {'2004-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20040501-20040531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2004-05-31'} +Dates in resampled not in raw: {'2004-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20040601-20040630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2004-06-30'} +Dates in resampled not in raw: {'2004-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20040701-20040731.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2004-07-31'} +Dates in resampled not in raw: {'2004-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20040801-20040831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2004-08-31'} +Dates in resampled not in raw: {'2004-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20041001-20041031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2004-10-31'} +Dates in resampled not in raw: {'2004-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20041201-20041231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2004-12-31'} +Dates in resampled not in raw: {'2004-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20050101-20050131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2005-01-31'} +Dates in resampled not in raw: {'2005-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20050201-20050228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2005-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20050301-20050331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2005-03-31', '2005-03-30'} +Dates in resampled not in raw: {'2005-02-29', '2005-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20050401-20050430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2005-04-29', '2005-04-30'} +Dates in resampled not in raw: {'2005-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20050501-20050531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2005-05-31', '2005-05-30'} +Dates in resampled not in raw: {'2005-04-29', '2005-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20050601-20050630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2005-06-30'} +Dates in resampled not in raw: {'2005-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20050701-20050731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2005-07-31', '2005-07-30'} +Dates in resampled not in raw: {'2005-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20050801-20050831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2005-08-31'} +Dates in resampled not in raw: {'2005-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20050901-20050930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2005-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20051001-20051031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2005-10-31'} +Dates in resampled not in raw: {'2005-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20051101-20051130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2005-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20051201-20051231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2005-12-31'} +Dates in resampled not in raw: {'2005-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20060101-20060131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2006-01-31'} +Dates in resampled not in raw: {'2006-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20060201-20060228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2006-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20060301-20060331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2006-03-30', '2006-03-31'} +Dates in resampled not in raw: {'2006-02-29', '2006-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20060401-20060430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2006-04-29', '2006-04-30'} +Dates in resampled not in raw: {'2006-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20060501-20060531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2006-05-31', '2006-05-30'} +Dates in resampled not in raw: {'2006-04-29', '2006-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20060601-20060630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2006-06-30'} +Dates in resampled not in raw: {'2006-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20060701-20060731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2006-07-31', '2006-07-30'} +Dates in resampled not in raw: {'2006-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20060801-20060831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2006-08-31'} +Dates in resampled not in raw: {'2006-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20060901-20060930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2006-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20061001-20061031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2006-10-31'} +Dates in resampled not in raw: {'2006-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20061101-20061130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2006-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20061201-20061231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2006-12-31'} +Dates in resampled not in raw: {'2006-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20070101-20070131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2007-01-31'} +Dates in resampled not in raw: {'2007-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20070201-20070228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2007-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20070301-20070331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2007-03-31', '2007-03-30'} +Dates in resampled not in raw: {'2007-02-29', '2007-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20070401-20070430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2007-04-30', '2007-04-29'} +Dates in resampled not in raw: {'2007-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20070501-20070531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2007-05-31', '2007-05-30'} +Dates in resampled not in raw: {'2007-04-30', '2007-04-29'} +File: tasmax_hadukgrid_uk_1km_day_20070601-20070630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2007-06-30'} +Dates in resampled not in raw: {'2007-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20070701-20070731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2007-07-31', '2007-07-30'} +Dates in resampled not in raw: {'2007-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20070801-20070831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2007-08-31'} +Dates in resampled not in raw: {'2007-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20070901-20070930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2007-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20071001-20071031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2007-10-31'} +Dates in resampled not in raw: {'2007-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20071101-20071130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2007-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20071201-20071231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2007-12-31'} +Dates in resampled not in raw: {'2007-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20080101-20080131.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2008-01-31'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20080301-20080331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2008-03-31'} +Dates in resampled not in raw: {'2008-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20080401-20080430.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2008-04-30'} +Dates in resampled not in raw: {'2008-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20080501-20080531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2008-05-31'} +Dates in resampled not in raw: {'2008-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20080601-20080630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2008-06-30'} +Dates in resampled not in raw: {'2008-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20080701-20080731.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2008-07-31'} +Dates in resampled not in raw: {'2008-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20080801-20080831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2008-08-31'} +Dates in resampled not in raw: {'2008-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20081001-20081031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2008-10-31'} +Dates in resampled not in raw: {'2008-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20081201-20081231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2008-12-31'} +Dates in resampled not in raw: {'2008-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20090101-20090131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2009-01-31'} +Dates in resampled not in raw: {'2009-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20090201-20090228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2009-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20090301-20090331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2009-03-31', '2009-03-30'} +Dates in resampled not in raw: {'2009-02-30', '2009-02-29'} +File: tasmax_hadukgrid_uk_1km_day_20090401-20090430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2009-04-30', '2009-04-29'} +Dates in resampled not in raw: {'2009-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20090501-20090531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2009-05-30', '2009-05-31'} +Dates in resampled not in raw: {'2009-04-30', '2009-04-29'} +File: tasmax_hadukgrid_uk_1km_day_20090601-20090630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2009-06-30'} +Dates in resampled not in raw: {'2009-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20090701-20090731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2009-07-31', '2009-07-30'} +Dates in resampled not in raw: {'2009-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20090801-20090831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2009-08-31'} +Dates in resampled not in raw: {'2009-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20090901-20090930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2009-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20091001-20091031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2009-10-31'} +Dates in resampled not in raw: {'2009-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20091101-20091130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2009-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20091201-20091231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2009-12-31'} +Dates in resampled not in raw: {'2009-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20100101-20100131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2010-01-31'} +Dates in resampled not in raw: {'2010-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20100201-20100228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2010-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20100301-20100331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2010-03-31', '2010-03-30'} +Dates in resampled not in raw: {'2010-02-29', '2010-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20100401-20100430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2010-04-29', '2010-04-30'} +Dates in resampled not in raw: {'2010-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20100501-20100531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2010-05-30', '2010-05-31'} +Dates in resampled not in raw: {'2010-04-29', '2010-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20100601-20100630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2010-06-30'} +Dates in resampled not in raw: {'2010-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20100701-20100731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2010-07-30', '2010-07-31'} +Dates in resampled not in raw: {'2010-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20100801-20100831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2010-08-31'} +Dates in resampled not in raw: {'2010-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20100901-20100930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2010-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20101001-20101031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2010-10-31'} +Dates in resampled not in raw: {'2010-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20101101-20101130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2010-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20101201-20101231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2010-12-31'} +Dates in resampled not in raw: {'2010-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20110101-20110131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2011-01-31'} +Dates in resampled not in raw: {'2011-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20110201-20110228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2011-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20110301-20110331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2011-03-30', '2011-03-31'} +Dates in resampled not in raw: {'2011-02-29', '2011-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20110401-20110430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2011-04-30', '2011-04-29'} +Dates in resampled not in raw: {'2011-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20110501-20110531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2011-05-30', '2011-05-31'} +Dates in resampled not in raw: {'2011-04-30', '2011-04-29'} +File: tasmax_hadukgrid_uk_1km_day_20110601-20110630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2011-06-30'} +Dates in resampled not in raw: {'2011-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20110701-20110731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2011-07-31', '2011-07-30'} +Dates in resampled not in raw: {'2011-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20110801-20110831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2011-08-31'} +Dates in resampled not in raw: {'2011-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20110901-20110930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2011-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20111001-20111031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2011-10-31'} +Dates in resampled not in raw: {'2011-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20111101-20111130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2011-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20111201-20111231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2011-12-31'} +Dates in resampled not in raw: {'2011-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20120101-20120131.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2012-01-31'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20120301-20120331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2012-03-31'} +Dates in resampled not in raw: {'2012-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20120401-20120430.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2012-04-30'} +Dates in resampled not in raw: {'2012-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20120501-20120531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2012-05-31'} +Dates in resampled not in raw: {'2012-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20120601-20120630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2012-06-30'} +Dates in resampled not in raw: {'2012-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20120701-20120731.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2012-07-31'} +Dates in resampled not in raw: {'2012-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20120801-20120831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2012-08-31'} +Dates in resampled not in raw: {'2012-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20121001-20121031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2012-10-31'} +Dates in resampled not in raw: {'2012-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20121201-20121231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2012-12-31'} +Dates in resampled not in raw: {'2012-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20130101-20130131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2013-01-31'} +Dates in resampled not in raw: {'2013-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20130201-20130228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2013-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20130301-20130331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2013-03-30', '2013-03-31'} +Dates in resampled not in raw: {'2013-02-30', '2013-02-29'} +File: tasmax_hadukgrid_uk_1km_day_20130401-20130430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2013-04-29', '2013-04-30'} +Dates in resampled not in raw: {'2013-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20130501-20130531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2013-05-30', '2013-05-31'} +Dates in resampled not in raw: {'2013-04-29', '2013-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20130601-20130630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2013-06-30'} +Dates in resampled not in raw: {'2013-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20130701-20130731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2013-07-31', '2013-07-30'} +Dates in resampled not in raw: {'2013-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20130801-20130831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2013-08-31'} +Dates in resampled not in raw: {'2013-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20130901-20130930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2013-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20131001-20131031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2013-10-31'} +Dates in resampled not in raw: {'2013-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20131101-20131130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2013-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20131201-20131231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2013-12-31'} +Dates in resampled not in raw: {'2013-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20140101-20140131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2014-01-31'} +Dates in resampled not in raw: {'2014-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20140201-20140228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2014-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20140301-20140331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2014-03-31', '2014-03-30'} +Dates in resampled not in raw: {'2014-02-30', '2014-02-29'} +File: tasmax_hadukgrid_uk_1km_day_20140401-20140430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2014-04-29', '2014-04-30'} +Dates in resampled not in raw: {'2014-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20140501-20140531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2014-05-31', '2014-05-30'} +Dates in resampled not in raw: {'2014-04-29', '2014-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20140601-20140630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2014-06-30'} +Dates in resampled not in raw: {'2014-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20140701-20140731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2014-07-31', '2014-07-30'} +Dates in resampled not in raw: {'2014-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20140801-20140831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2014-08-31'} +Dates in resampled not in raw: {'2014-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20140901-20140930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2014-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20141001-20141031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2014-10-31'} +Dates in resampled not in raw: {'2014-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20141101-20141130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2014-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20141201-20141231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2014-12-31'} +Dates in resampled not in raw: {'2014-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20150101-20150131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2015-01-31'} +Dates in resampled not in raw: {'2015-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20150201-20150228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2015-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20150301-20150331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2015-03-31', '2015-03-30'} +Dates in resampled not in raw: {'2015-02-29', '2015-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20150401-20150430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2015-04-29', '2015-04-30'} +Dates in resampled not in raw: {'2015-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20150501-20150531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2015-05-31', '2015-05-30'} +Dates in resampled not in raw: {'2015-04-29', '2015-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20150601-20150630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2015-06-30'} +Dates in resampled not in raw: {'2015-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20150701-20150731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2015-07-31', '2015-07-30'} +Dates in resampled not in raw: {'2015-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20150801-20150831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2015-08-31'} +Dates in resampled not in raw: {'2015-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20150901-20150930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2015-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20151001-20151031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2015-10-31'} +Dates in resampled not in raw: {'2015-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20151101-20151130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2015-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20151201-20151231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2015-12-31'} +Dates in resampled not in raw: {'2015-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20160101-20160131.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2016-01-31'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20160301-20160331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2016-03-31'} +Dates in resampled not in raw: {'2016-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20160401-20160430.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2016-04-30'} +Dates in resampled not in raw: {'2016-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20160501-20160531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2016-05-31'} +Dates in resampled not in raw: {'2016-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20160601-20160630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2016-06-30'} +Dates in resampled not in raw: {'2016-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20160701-20160731.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2016-07-31'} +Dates in resampled not in raw: {'2016-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20160801-20160831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2016-08-31'} +Dates in resampled not in raw: {'2016-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20161001-20161031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2016-10-31'} +Dates in resampled not in raw: {'2016-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20161201-20161231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2016-12-31'} +Dates in resampled not in raw: {'2016-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20170101-20170131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2017-01-31'} +Dates in resampled not in raw: {'2017-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20170201-20170228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2017-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20170301-20170331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2017-03-31', '2017-03-30'} +Dates in resampled not in raw: {'2017-02-29', '2017-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20170401-20170430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2017-04-30', '2017-04-29'} +Dates in resampled not in raw: {'2017-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20170501-20170531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2017-05-31', '2017-05-30'} +Dates in resampled not in raw: {'2017-04-30', '2017-04-29'} +File: tasmax_hadukgrid_uk_1km_day_20170601-20170630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2017-06-30'} +Dates in resampled not in raw: {'2017-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20170701-20170731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2017-07-30', '2017-07-31'} +Dates in resampled not in raw: {'2017-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20170801-20170831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2017-08-31'} +Dates in resampled not in raw: {'2017-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20170901-20170930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2017-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20171001-20171031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2017-10-31'} +Dates in resampled not in raw: {'2017-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20171101-20171130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2017-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20171201-20171231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2017-12-31'} +Dates in resampled not in raw: {'2017-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20180101-20180131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2018-01-31'} +Dates in resampled not in raw: {'2018-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20180201-20180228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2018-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20180301-20180331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2018-03-30', '2018-03-31'} +Dates in resampled not in raw: {'2018-02-30', '2018-02-29'} +File: tasmax_hadukgrid_uk_1km_day_20180401-20180430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2018-04-29', '2018-04-30'} +Dates in resampled not in raw: {'2018-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20180501-20180531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2018-05-30', '2018-05-31'} +Dates in resampled not in raw: {'2018-04-29', '2018-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20180601-20180630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2018-06-30'} +Dates in resampled not in raw: {'2018-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20180701-20180731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2018-07-31', '2018-07-30'} +Dates in resampled not in raw: {'2018-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20180801-20180831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2018-08-31'} +Dates in resampled not in raw: {'2018-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20180901-20180930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2018-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20181001-20181031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2018-10-31'} +Dates in resampled not in raw: {'2018-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20181101-20181130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2018-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20181201-20181231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2018-12-31'} +Dates in resampled not in raw: {'2018-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20190101-20190131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2019-01-31'} +Dates in resampled not in raw: {'2019-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20190201-20190228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2019-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20190301-20190331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2019-03-31', '2019-03-30'} +Dates in resampled not in raw: {'2019-02-29', '2019-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20190401-20190430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2019-04-29', '2019-04-30'} +Dates in resampled not in raw: {'2019-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20190501-20190531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2019-05-31', '2019-05-30'} +Dates in resampled not in raw: {'2019-04-29', '2019-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20190601-20190630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2019-06-30'} +Dates in resampled not in raw: {'2019-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20190701-20190731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2019-07-30', '2019-07-31'} +Dates in resampled not in raw: {'2019-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20190801-20190831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2019-08-31'} +Dates in resampled not in raw: {'2019-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20190901-20190930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2019-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20191001-20191031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2019-10-31'} +Dates in resampled not in raw: {'2019-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20191101-20191130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2019-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20191201-20191231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2019-12-31'} +Dates in resampled not in raw: {'2019-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20200101-20200131.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2020-01-31'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20200301-20200331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2020-03-31'} +Dates in resampled not in raw: {'2020-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20200401-20200430.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2020-04-30'} +Dates in resampled not in raw: {'2020-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20200501-20200531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2020-05-31'} +Dates in resampled not in raw: {'2020-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20200601-20200630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2020-06-30'} +Dates in resampled not in raw: {'2020-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20200701-20200731.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2020-07-31'} +Dates in resampled not in raw: {'2020-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20200801-20200831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2020-08-31'} +Dates in resampled not in raw: {'2020-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20201001-20201031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2020-10-31'} +Dates in resampled not in raw: {'2020-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20201201-20201231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2020-12-31'} +Dates in resampled not in raw: {'2020-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20210101-20210131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2021-01-31'} +Dates in resampled not in raw: {'2021-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20210201-20210228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2021-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20210301-20210331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2021-03-31', '2021-03-30'} +Dates in resampled not in raw: {'2021-02-30', '2021-02-29'} +File: tasmax_hadukgrid_uk_1km_day_20210401-20210430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2021-04-29', '2021-04-30'} +Dates in resampled not in raw: {'2021-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20210501-20210531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2021-05-30', '2021-05-31'} +Dates in resampled not in raw: {'2021-04-29', '2021-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20210601-20210630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2021-06-30'} +Dates in resampled not in raw: {'2021-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20210701-20210731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2021-07-31', '2021-07-30'} +Dates in resampled not in raw: {'2021-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20210801-20210831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2021-08-31'} +Dates in resampled not in raw: {'2021-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20210901-20210930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2021-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20211001-20211031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2021-10-31'} +Dates in resampled not in raw: {'2021-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20211101-20211130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2021-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20211201-20211231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2021-12-31'} +Dates in resampled not in raw: {'2021-11-30'} From e645beb69956ba78355b77c9fcec856d14009c97 Mon Sep 17 00:00:00 2001 From: Ruth Bowyer <105492883+RuthBowyer@users.noreply.github.com> Date: Mon, 14 Aug 2023 09:47:53 +0100 Subject: [PATCH 007/146] Delete WIP_MBC.Rmd --- R/bias-correction-methods/WIP_MBC.Rmd | 139 -------------------------- 1 file changed, 139 deletions(-) delete mode 100644 R/bias-correction-methods/WIP_MBC.Rmd diff --git a/R/bias-correction-methods/WIP_MBC.Rmd b/R/bias-correction-methods/WIP_MBC.Rmd deleted file mode 100644 index 1a672db0..00000000 --- a/R/bias-correction-methods/WIP_MBC.Rmd +++ /dev/null @@ -1,139 +0,0 @@ ---- -title: "WIP MBC in R" -author: "Ruth C E Bowyer" -date: "`r format(Sys.Date())`" -output: - github_document - ---- - -```{r setup, include=FALSE} -knitr::opts_chunk$set(echo = TRUE) -``` - - -## 0. About - -Testing Bias Correction methods from the MBC package in R - -Loading data as created in 'DataProcessingMBC.RMD' - -```{r libraries dd} -rm(list=ls()) - -library(MBC) -library(terra) -library(sf) -library(exactextractr) -library(reshape2) #melt -library(data.table) #for fread - -#Loaded package versions -x <- c("MBC", "terra", "sf", "exactextractr") -lapply(x,packageVersion) - -#Path is "//vmfileshare/ClimateData -#dd <- "/Volumes/vmfileshare/ClimateData/" -dd <- "/mnt/vmfileshare/ClimateData/" -``` -## 1. Load data -```{r} - -fp <- paste0(dd, "Interim/NI_cropped_MBCdata/") -files <- list.files(paste0(dd, "Interim/NI_cropped_MBCdata")) - -#HADs grid observational data -obs <- files[grepl("HAD", files)] - -obs.dfs <- lapply(obs, function(x){ - fread(paste0(fp, x)) -}) -names(obs.dfs) <- obs - -#Using 1980 - 2010 as calibration period -cpm.files <- files[grepl("CPM", files)] -cal <- cpm.files[grepl("1980|2000", cpm.files)] - -cal.dfs <- lapply(cal, function(x){ - fread(paste0(fp, x)) -}) -names(cal.dfs) <- cal - -gc() -``` - -```{r} -#R crashed when reading all of this in so for now just doing the projections for the next few decades -proj1 <- cpm.files[grepl("2020", cpm.files)] - -proj.dfs <- lapply(proj1, function(x){ - fread(paste0(fp, x)) -}) - -names(proj.dfs) <- proj1 -``` - - -## 2. Linear scaling - -So the df is data as rows and cells as x - so need to t transform yours etc - -```{r} -library('qmap') -### QM1: Linear transform function - -qm1.fit <- fitQmap(Obs, Mod.Hist, method = "PTF", transfun = "linear", wet.day = -FALSE, cost = "RSS") -qm1.proj <- doQmapPTF(Mod.Hist, qm1.fit) -qm1.hist <- doQmapPTF(Mod.Proj, qm1.fit) -``` - - - -## 3. Univariate quantile mapping - -Following vignette here: https://cran.r-project.org/web/packages/MBC/MBC.pdf - -To really understand - is the distribution sampled from the whole data, or is it cell specific - as assume the former and then in which case the area will matter... - -Because its just asking for a vector, is it better to just loop it over by cell obs anyway?? or create a single vector - -```{r} -#Start with tasmax - -obs.tasmax <- as.data.frame(obs.dfs$HADsNI1980_2010_tasmax.2023.06.27.csv) -#Run 5 -cal.tasmax_1 <- as.data.frame(cal.dfs$CPM_NI1980_1999tasmax_Run05.2023.06.27.csv) -cal.tasmax_2 <- as.data.frame(cal.dfs$CPM_NI2000_2009tasmax_Run05.2023.06.27.csv) -proj.tasmax <- as.data.frame(proj.dfs$CPM_NI2020_2039tasmax_Run05.2023.06.27.csv) - - -``` - - -```{r} -library(MBC) - -# Univariate - fit.qdm <- - QDM(o.c=Obs_oc, #vector of observed samples during the calibration period. - m.c=Cal_oc, #vector of model outputs during the calibration period. - m.p=Proj_oc, #vector of model outputs during the projected period. - ratio=FALSE, #logical value indicating if samples are of a ratio quantity (e.g., precipitation) -- false as is temp - trace=Inf) #numeric value indicating the threshold below which values of a ratio quantity (e.g., ratio=TRUE) should be considered exact zeros. -- need to read up on this - - -mhat.c <- fit.qdm$mhat.c -mhat.p <- fit.qdm$mhat.p - -``` - - - - -### Multivariate quantile mapping - --- To do -- - -Need to ensure can reproject data as spatial appropariately -Assess methods - split sample approach etc \ No newline at end of file From 05541b7d45746da452bbfccabc2f1a404c4ea84a Mon Sep 17 00:00:00 2001 From: Grigorios Mingas Date: Thu, 17 Aug 2023 10:20:09 +0100 Subject: [PATCH 008/146] Allow run_cmethods to handle tiff and nc Hads observation files --- python/debiasing/run_cmethods.py | 19 +++++++++++++++++-- python/load_data/data_loader.py | 5 +++-- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/python/debiasing/run_cmethods.py b/python/debiasing/run_cmethods.py index e56e213d..34d97b96 100644 --- a/python/debiasing/run_cmethods.py +++ b/python/debiasing/run_cmethods.py @@ -6,6 +6,7 @@ import argparse +import glob import logging import sys import time @@ -87,8 +88,22 @@ def run_debiasing() -> None: ds_simh = \ load_data(contr_fpath, date_range=h_date_period, variable=var, shapefile_path=shape_fpath, extension='tif')[ var].rename({"projection_x_coordinate": "lon", "projection_y_coordinate": "lat"}) - ds_obs = load_data(obs_fpath, date_range=h_date_period, variable=var, shapefile_path=shape_fpath)[var].rename( - {"projection_x_coordinate": "lon", "projection_y_coordinate": "lat"}) + + # find file extensions for observation data + files_obs_nc = glob.glob(f"{obs_fpath}/*.nc", recursive=True) + files_obs_tif = glob.glob(f"{obs_fpath}/*.tif", recursive=True) + + if len(files_obs_nc) > 0 and len(files_obs_tif) == 0: + ext = 'nc' + elif len(files_obs_nc) == 0 and len(files_obs_tif) > 0: + ext = 'tif' + elif len(files_obs_nc) == 0 and len(files_obs_tif) == 0: + raise Exception(f"No observation files found in {obs_fpath} with extensions .nc or .tif") + else: + raise Exception(f"A mix of .nc and .tif observation files found in {obs_fpath}, file extension should be the " + f"same for all files in the directory.") + ds_obs = load_data(obs_fpath, date_range=h_date_period, variable=var, shapefile_path=shape_fpath, + extension=ext)[var].rename({"projection_x_coordinate": "lon", "projection_y_coordinate": "lat"}) log.info('Historical data Loaded.') # aligning calendars, e.g there might be a few extra days on the scenario data that has to be droped. diff --git a/python/load_data/data_loader.py b/python/load_data/data_loader.py index 8b47161f..eaa9d161 100644 --- a/python/load_data/data_loader.py +++ b/python/load_data/data_loader.py @@ -4,6 +4,7 @@ import os from datetime import datetime + def load_data(input_path, date_range, variable, shapefile_path=None, extension='nc'): ''' This function takes a date range and a variable and loads and merges xarrays based on those parameters. @@ -107,7 +108,7 @@ def reformat_file(file, variable): try: with xr.open_dataset(file, engine='rasterio') as x: - xa = x.rename({"x": "projection_x_coordinate", "y": "projection_y_coordinate", "band": "time",'band_data':variable}) \ + xa = x.rename({"x": "projection_x_coordinate", "y": "projection_y_coordinate", "band": "time", 'band_data': variable}) \ .rio.write_crs('epsg:27700') xa.coords['time'] = time_index @@ -169,7 +170,7 @@ def load_and_merge(date_range, files, variable): x = ds.load() x = x.sel(time=slice(*date_range)) except Exception as e: - x = reformat_file(file,variable).sel(time=slice(*date_range)) + x = reformat_file(file, variable).sel(time=slice(*date_range)) # Select the date range if x.time.size != 0: From 9578f2d8ddfd339ee93c638ff8aef374b2753e1a Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Fri, 18 Aug 2023 18:31:30 +0100 Subject: [PATCH 009/146] print duplicate dates, compare against expected dates --- python/resampling/check_calendar.py | 53 +++-- python/resampling/check_calendar_log.txt | 275 ++++++++++++++--------- 2 files changed, 204 insertions(+), 124 deletions(-) diff --git a/python/resampling/check_calendar.py b/python/resampling/check_calendar.py index 26390efd..a072f04d 100644 --- a/python/resampling/check_calendar.py +++ b/python/resampling/check_calendar.py @@ -1,14 +1,17 @@ -from datetime import datetime import os import xarray as xr import glob import numpy as np +from collections import Counter + +path_ukcp = '/Volumes/vmfileshare/ClimateData/Raw/UKCP2.2/tasmax/01/latest/tasmax_rcp85_land-cpm_uk_2.2km_01_day_19801201-19811130.nc' +data_raw = xr.open_dataset(path_ukcp, decode_coords="all") path_raw = '/Volumes/vmfileshare/ClimateData/Raw/HadsUKgrid/tasmax/day' path_preproc = '/Volumes/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/tasmax/day' -#example file names: -#tasmax_hadukgrid_uk_1km_day_2.2km_resampled_19800101-19800131.ncr -#tasmax_hadukgrid_uk_1km_day_20211201-20211231.nc +#example files to be compared : +# after resampling: tasmax_hadukgrid_uk_1km_day_2.2km_resampled_19800101-19800131.ncr +# before resampling: tasmax_hadukgrid_uk_1km_day_20211201-20211231.nc # open log file and write both input paths on top: with open('check_calendar_log.txt', 'w') as f: @@ -18,9 +21,8 @@ #iterate through dir at path and loop through files files = [os.path.basename(f) for f in glob.glob(path_raw + "**/*.nc", recursive=True)] +all_dates = np.array([], dtype='datetime64[ns]') # Specify the correct data type for i,file in enumerate(files): - if i%10==0: - print(i) #separate filename from flag '2.2km_resamples' from date output_name = f"{'_'.join(file.split('_')[:-1])}_2.2km_resampled_{file.split('_')[-1]}" @@ -29,6 +31,8 @@ #load before and after resampling files data_raw = xr.open_dataset(raw_f, decode_coords="all") data_preproc = xr.open_dataset(preproc_f, decode_coords="all") + + #convert to string time_raw = [str(t).split('T')[0] for t in data_raw.coords['time'].values] time_pre = [str(t).split(' ')[0] for t in data_preproc.coords['time'].values] @@ -38,15 +42,34 @@ # check if dates are empty if dates_in_raw_not_in_pre | dates_in_pre_not_in_raw: - #if date in raw not in pre ends in 31 - if list(dates_in_raw_not_in_pre)[0][-2:]!='31': - # write to log file - with open(os.path.join(path_preproc,'check_calendar_log.txt'), 'a') as f: - f.write(f"File: {file} produced errors:\n") - f.write(f"raw # days: {len(set(time_raw))} - resampled # days: {len(set(time_pre))}\n") - f.write(f"Dates in raw not in resampled: {dates_in_raw_not_in_pre}\n") - f.write(f"Dates in resampled not in raw: {dates_in_pre_not_in_raw}\n") - + # write to log file + with open('check_calendar_log.txt', 'a') as f: + f.write(f"File: {file} produced errors:\n") + f.write(f"raw # days: {len(set(time_raw))} - resampled # days: {len(set(time_pre))}\n") + f.write(f"Dates in raw not in resampled: {dates_in_raw_not_in_pre}\n") + f.write(f"Dates in resampled not in raw: {dates_in_pre_not_in_raw}\n") + + # save dates for later overall comparison + all_dates = np.concatenate((all_dates, data_preproc.coords['time'].values)) + +# generating expected dates +start = files[0].split('_')[-1].split('-')[0] +stop = files[-1].split('_')[-1].split('-')[1][:-5]+'30' +time_index = xr.cftime_range(start, stop, freq='D', calendar='360_day', inclusive='both') + +# convert to strings +x_dates_str = [f"{date.year}-{date.month:02d}-{date.day:02d}" for date in time_index] +y_dates_str = [f"{date.year}-{date.month:02d}-{date.day:02d}" for date in all_dates] +# compare if all present +not_in_y = [date_x for date_x in x_dates_str if date_x not in y_dates_str] +with open('check_calendar_log.txt', 'a') as f: + f.write(f'______________________________\n') + f.write(f'missing dates: {len(not_in_y)}\n') + # find duplicates + counts = Counter(y_dates_str) + for string, count in counts.items(): + if count > 1: + f.write(f"date '{string}' appears {count} times.\n") diff --git a/python/resampling/check_calendar_log.txt b/python/resampling/check_calendar_log.txt index 362bc6a6..48553625 100644 --- a/python/resampling/check_calendar_log.txt +++ b/python/resampling/check_calendar_log.txt @@ -50,19 +50,19 @@ Dates in raw not in resampled: {'1981-03-31', '1981-03-30'} Dates in resampled not in raw: {'1981-02-29', '1981-02-30'} File: tasmax_hadukgrid_uk_1km_day_19810401-19810430.nc produced errors: raw # days: 30 - resampled # days: 29 -Dates in raw not in resampled: {'1981-04-30', '1981-04-29'} +Dates in raw not in resampled: {'1981-04-29', '1981-04-30'} Dates in resampled not in raw: {'1981-03-30'} File: tasmax_hadukgrid_uk_1km_day_19810501-19810531.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'1981-05-31', '1981-05-30'} -Dates in resampled not in raw: {'1981-04-30', '1981-04-29'} +Dates in raw not in resampled: {'1981-05-30', '1981-05-31'} +Dates in resampled not in raw: {'1981-04-29', '1981-04-30'} File: tasmax_hadukgrid_uk_1km_day_19810601-19810630.nc produced errors: raw # days: 30 - resampled # days: 30 Dates in raw not in resampled: {'1981-06-30'} Dates in resampled not in raw: {'1981-05-30'} File: tasmax_hadukgrid_uk_1km_day_19810701-19810731.nc produced errors: raw # days: 31 - resampled # days: 30 -Dates in raw not in resampled: {'1981-07-31', '1981-07-30'} +Dates in raw not in resampled: {'1981-07-30', '1981-07-31'} Dates in resampled not in raw: {'1981-06-30'} File: tasmax_hadukgrid_uk_1km_day_19810801-19810831.nc produced errors: raw # days: 31 - resampled # days: 31 @@ -95,14 +95,14 @@ Dates in resampled not in raw: set() File: tasmax_hadukgrid_uk_1km_day_19820301-19820331.nc produced errors: raw # days: 31 - resampled # days: 31 Dates in raw not in resampled: {'1982-03-30', '1982-03-31'} -Dates in resampled not in raw: {'1982-02-30', '1982-02-29'} +Dates in resampled not in raw: {'1982-02-29', '1982-02-30'} File: tasmax_hadukgrid_uk_1km_day_19820401-19820430.nc produced errors: raw # days: 30 - resampled # days: 29 Dates in raw not in resampled: {'1982-04-29', '1982-04-30'} Dates in resampled not in raw: {'1982-03-30'} File: tasmax_hadukgrid_uk_1km_day_19820501-19820531.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'1982-05-31', '1982-05-30'} +Dates in raw not in resampled: {'1982-05-30', '1982-05-31'} Dates in resampled not in raw: {'1982-04-29', '1982-04-30'} File: tasmax_hadukgrid_uk_1km_day_19820601-19820630.nc produced errors: raw # days: 30 - resampled # days: 30 @@ -110,7 +110,7 @@ Dates in raw not in resampled: {'1982-06-30'} Dates in resampled not in raw: {'1982-05-30'} File: tasmax_hadukgrid_uk_1km_day_19820701-19820731.nc produced errors: raw # days: 31 - resampled # days: 30 -Dates in raw not in resampled: {'1982-07-30', '1982-07-31'} +Dates in raw not in resampled: {'1982-07-31', '1982-07-30'} Dates in resampled not in raw: {'1982-06-30'} File: tasmax_hadukgrid_uk_1km_day_19820801-19820831.nc produced errors: raw # days: 31 - resampled # days: 31 @@ -143,15 +143,15 @@ Dates in resampled not in raw: set() File: tasmax_hadukgrid_uk_1km_day_19830301-19830331.nc produced errors: raw # days: 31 - resampled # days: 31 Dates in raw not in resampled: {'1983-03-30', '1983-03-31'} -Dates in resampled not in raw: {'1983-02-29', '1983-02-30'} +Dates in resampled not in raw: {'1983-02-30', '1983-02-29'} File: tasmax_hadukgrid_uk_1km_day_19830401-19830430.nc produced errors: raw # days: 30 - resampled # days: 29 -Dates in raw not in resampled: {'1983-04-29', '1983-04-30'} +Dates in raw not in resampled: {'1983-04-30', '1983-04-29'} Dates in resampled not in raw: {'1983-03-30'} File: tasmax_hadukgrid_uk_1km_day_19830501-19830531.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'1983-05-30', '1983-05-31'} -Dates in resampled not in raw: {'1983-04-29', '1983-04-30'} +Dates in raw not in resampled: {'1983-05-31', '1983-05-30'} +Dates in resampled not in raw: {'1983-04-30', '1983-04-29'} File: tasmax_hadukgrid_uk_1km_day_19830601-19830630.nc produced errors: raw # days: 30 - resampled # days: 30 Dates in raw not in resampled: {'1983-06-30'} @@ -226,23 +226,23 @@ Dates in raw not in resampled: {'1985-02-01'} Dates in resampled not in raw: set() File: tasmax_hadukgrid_uk_1km_day_19850301-19850331.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'1985-03-30', '1985-03-31'} +Dates in raw not in resampled: {'1985-03-31', '1985-03-30'} Dates in resampled not in raw: {'1985-02-30', '1985-02-29'} File: tasmax_hadukgrid_uk_1km_day_19850401-19850430.nc produced errors: raw # days: 30 - resampled # days: 29 -Dates in raw not in resampled: {'1985-04-30', '1985-04-29'} +Dates in raw not in resampled: {'1985-04-29', '1985-04-30'} Dates in resampled not in raw: {'1985-03-30'} File: tasmax_hadukgrid_uk_1km_day_19850501-19850531.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'1985-05-30', '1985-05-31'} -Dates in resampled not in raw: {'1985-04-30', '1985-04-29'} +Dates in raw not in resampled: {'1985-05-31', '1985-05-30'} +Dates in resampled not in raw: {'1985-04-29', '1985-04-30'} File: tasmax_hadukgrid_uk_1km_day_19850601-19850630.nc produced errors: raw # days: 30 - resampled # days: 30 Dates in raw not in resampled: {'1985-06-30'} Dates in resampled not in raw: {'1985-05-30'} File: tasmax_hadukgrid_uk_1km_day_19850701-19850731.nc produced errors: raw # days: 31 - resampled # days: 30 -Dates in raw not in resampled: {'1985-07-30', '1985-07-31'} +Dates in raw not in resampled: {'1985-07-31', '1985-07-30'} Dates in resampled not in raw: {'1985-06-30'} File: tasmax_hadukgrid_uk_1km_day_19850801-19850831.nc produced errors: raw # days: 31 - resampled # days: 31 @@ -274,16 +274,16 @@ Dates in raw not in resampled: {'1986-02-01'} Dates in resampled not in raw: set() File: tasmax_hadukgrid_uk_1km_day_19860301-19860331.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'1986-03-30', '1986-03-31'} -Dates in resampled not in raw: {'1986-02-30', '1986-02-29'} +Dates in raw not in resampled: {'1986-03-31', '1986-03-30'} +Dates in resampled not in raw: {'1986-02-29', '1986-02-30'} File: tasmax_hadukgrid_uk_1km_day_19860401-19860430.nc produced errors: raw # days: 30 - resampled # days: 29 -Dates in raw not in resampled: {'1986-04-29', '1986-04-30'} +Dates in raw not in resampled: {'1986-04-30', '1986-04-29'} Dates in resampled not in raw: {'1986-03-30'} File: tasmax_hadukgrid_uk_1km_day_19860501-19860531.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'1986-05-30', '1986-05-31'} -Dates in resampled not in raw: {'1986-04-29', '1986-04-30'} +Dates in raw not in resampled: {'1986-05-31', '1986-05-30'} +Dates in resampled not in raw: {'1986-04-30', '1986-04-29'} File: tasmax_hadukgrid_uk_1km_day_19860601-19860630.nc produced errors: raw # days: 30 - resampled # days: 30 Dates in raw not in resampled: {'1986-06-30'} @@ -322,8 +322,8 @@ Dates in raw not in resampled: {'1987-02-01'} Dates in resampled not in raw: set() File: tasmax_hadukgrid_uk_1km_day_19870301-19870331.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'1987-03-31', '1987-03-30'} -Dates in resampled not in raw: {'1987-02-29', '1987-02-30'} +Dates in raw not in resampled: {'1987-03-30', '1987-03-31'} +Dates in resampled not in raw: {'1987-02-30', '1987-02-29'} File: tasmax_hadukgrid_uk_1km_day_19870401-19870430.nc produced errors: raw # days: 30 - resampled # days: 29 Dates in raw not in resampled: {'1987-04-29', '1987-04-30'} @@ -338,7 +338,7 @@ Dates in raw not in resampled: {'1987-06-30'} Dates in resampled not in raw: {'1987-05-30'} File: tasmax_hadukgrid_uk_1km_day_19870701-19870731.nc produced errors: raw # days: 31 - resampled # days: 30 -Dates in raw not in resampled: {'1987-07-30', '1987-07-31'} +Dates in raw not in resampled: {'1987-07-31', '1987-07-30'} Dates in resampled not in raw: {'1987-06-30'} File: tasmax_hadukgrid_uk_1km_day_19870801-19870831.nc produced errors: raw # days: 31 - resampled # days: 31 @@ -410,12 +410,12 @@ Dates in raw not in resampled: {'1989-03-30', '1989-03-31'} Dates in resampled not in raw: {'1989-02-29', '1989-02-30'} File: tasmax_hadukgrid_uk_1km_day_19890401-19890430.nc produced errors: raw # days: 30 - resampled # days: 29 -Dates in raw not in resampled: {'1989-04-29', '1989-04-30'} +Dates in raw not in resampled: {'1989-04-30', '1989-04-29'} Dates in resampled not in raw: {'1989-03-30'} File: tasmax_hadukgrid_uk_1km_day_19890501-19890531.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'1989-05-31', '1989-05-30'} -Dates in resampled not in raw: {'1989-04-29', '1989-04-30'} +Dates in raw not in resampled: {'1989-05-30', '1989-05-31'} +Dates in resampled not in raw: {'1989-04-30', '1989-04-29'} File: tasmax_hadukgrid_uk_1km_day_19890601-19890630.nc produced errors: raw # days: 30 - resampled # days: 30 Dates in raw not in resampled: {'1989-06-30'} @@ -455,22 +455,22 @@ Dates in resampled not in raw: set() File: tasmax_hadukgrid_uk_1km_day_19900301-19900331.nc produced errors: raw # days: 31 - resampled # days: 31 Dates in raw not in resampled: {'1990-03-30', '1990-03-31'} -Dates in resampled not in raw: {'1990-02-29', '1990-02-30'} +Dates in resampled not in raw: {'1990-02-30', '1990-02-29'} File: tasmax_hadukgrid_uk_1km_day_19900401-19900430.nc produced errors: raw # days: 30 - resampled # days: 29 -Dates in raw not in resampled: {'1990-04-30', '1990-04-29'} +Dates in raw not in resampled: {'1990-04-29', '1990-04-30'} Dates in resampled not in raw: {'1990-03-30'} File: tasmax_hadukgrid_uk_1km_day_19900501-19900531.nc produced errors: raw # days: 31 - resampled # days: 31 Dates in raw not in resampled: {'1990-05-31', '1990-05-30'} -Dates in resampled not in raw: {'1990-04-30', '1990-04-29'} +Dates in resampled not in raw: {'1990-04-29', '1990-04-30'} File: tasmax_hadukgrid_uk_1km_day_19900601-19900630.nc produced errors: raw # days: 30 - resampled # days: 30 Dates in raw not in resampled: {'1990-06-30'} Dates in resampled not in raw: {'1990-05-30'} File: tasmax_hadukgrid_uk_1km_day_19900701-19900731.nc produced errors: raw # days: 31 - resampled # days: 30 -Dates in raw not in resampled: {'1990-07-31', '1990-07-30'} +Dates in raw not in resampled: {'1990-07-30', '1990-07-31'} Dates in resampled not in raw: {'1990-06-30'} File: tasmax_hadukgrid_uk_1km_day_19900801-19900831.nc produced errors: raw # days: 31 - resampled # days: 31 @@ -502,16 +502,16 @@ Dates in raw not in resampled: {'1991-02-01'} Dates in resampled not in raw: set() File: tasmax_hadukgrid_uk_1km_day_19910301-19910331.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'1991-03-30', '1991-03-31'} +Dates in raw not in resampled: {'1991-03-31', '1991-03-30'} Dates in resampled not in raw: {'1991-02-29', '1991-02-30'} File: tasmax_hadukgrid_uk_1km_day_19910401-19910430.nc produced errors: raw # days: 30 - resampled # days: 29 -Dates in raw not in resampled: {'1991-04-30', '1991-04-29'} +Dates in raw not in resampled: {'1991-04-29', '1991-04-30'} Dates in resampled not in raw: {'1991-03-30'} File: tasmax_hadukgrid_uk_1km_day_19910501-19910531.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'1991-05-30', '1991-05-31'} -Dates in resampled not in raw: {'1991-04-30', '1991-04-29'} +Dates in raw not in resampled: {'1991-05-31', '1991-05-30'} +Dates in resampled not in raw: {'1991-04-29', '1991-04-30'} File: tasmax_hadukgrid_uk_1km_day_19910601-19910630.nc produced errors: raw # days: 30 - resampled # days: 30 Dates in raw not in resampled: {'1991-06-30'} @@ -586,16 +586,16 @@ Dates in raw not in resampled: {'1993-02-01'} Dates in resampled not in raw: set() File: tasmax_hadukgrid_uk_1km_day_19930301-19930331.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'1993-03-31', '1993-03-30'} -Dates in resampled not in raw: {'1993-02-30', '1993-02-29'} +Dates in raw not in resampled: {'1993-03-30', '1993-03-31'} +Dates in resampled not in raw: {'1993-02-29', '1993-02-30'} File: tasmax_hadukgrid_uk_1km_day_19930401-19930430.nc produced errors: raw # days: 30 - resampled # days: 29 -Dates in raw not in resampled: {'1993-04-30', '1993-04-29'} +Dates in raw not in resampled: {'1993-04-29', '1993-04-30'} Dates in resampled not in raw: {'1993-03-30'} File: tasmax_hadukgrid_uk_1km_day_19930501-19930531.nc produced errors: raw # days: 31 - resampled # days: 31 Dates in raw not in resampled: {'1993-05-31', '1993-05-30'} -Dates in resampled not in raw: {'1993-04-30', '1993-04-29'} +Dates in resampled not in raw: {'1993-04-29', '1993-04-30'} File: tasmax_hadukgrid_uk_1km_day_19930601-19930630.nc produced errors: raw # days: 30 - resampled # days: 30 Dates in raw not in resampled: {'1993-06-30'} @@ -634,8 +634,8 @@ Dates in raw not in resampled: {'1994-02-01'} Dates in resampled not in raw: set() File: tasmax_hadukgrid_uk_1km_day_19940301-19940331.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'1994-03-31', '1994-03-30'} -Dates in resampled not in raw: {'1994-02-30', '1994-02-29'} +Dates in raw not in resampled: {'1994-03-30', '1994-03-31'} +Dates in resampled not in raw: {'1994-02-29', '1994-02-30'} File: tasmax_hadukgrid_uk_1km_day_19940401-19940430.nc produced errors: raw # days: 30 - resampled # days: 29 Dates in raw not in resampled: {'1994-04-30', '1994-04-29'} @@ -683,15 +683,15 @@ Dates in resampled not in raw: set() File: tasmax_hadukgrid_uk_1km_day_19950301-19950331.nc produced errors: raw # days: 31 - resampled # days: 31 Dates in raw not in resampled: {'1995-03-31', '1995-03-30'} -Dates in resampled not in raw: {'1995-02-29', '1995-02-30'} +Dates in resampled not in raw: {'1995-02-30', '1995-02-29'} File: tasmax_hadukgrid_uk_1km_day_19950401-19950430.nc produced errors: raw # days: 30 - resampled # days: 29 -Dates in raw not in resampled: {'1995-04-30', '1995-04-29'} +Dates in raw not in resampled: {'1995-04-29', '1995-04-30'} Dates in resampled not in raw: {'1995-03-30'} File: tasmax_hadukgrid_uk_1km_day_19950501-19950531.nc produced errors: raw # days: 31 - resampled # days: 31 Dates in raw not in resampled: {'1995-05-30', '1995-05-31'} -Dates in resampled not in raw: {'1995-04-30', '1995-04-29'} +Dates in resampled not in raw: {'1995-04-29', '1995-04-30'} File: tasmax_hadukgrid_uk_1km_day_19950601-19950630.nc produced errors: raw # days: 30 - resampled # days: 30 Dates in raw not in resampled: {'1995-06-30'} @@ -770,12 +770,12 @@ Dates in raw not in resampled: {'1997-03-30', '1997-03-31'} Dates in resampled not in raw: {'1997-02-29', '1997-02-30'} File: tasmax_hadukgrid_uk_1km_day_19970401-19970430.nc produced errors: raw # days: 30 - resampled # days: 29 -Dates in raw not in resampled: {'1997-04-29', '1997-04-30'} +Dates in raw not in resampled: {'1997-04-30', '1997-04-29'} Dates in resampled not in raw: {'1997-03-30'} File: tasmax_hadukgrid_uk_1km_day_19970501-19970531.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'1997-05-31', '1997-05-30'} -Dates in resampled not in raw: {'1997-04-29', '1997-04-30'} +Dates in raw not in resampled: {'1997-05-30', '1997-05-31'} +Dates in resampled not in raw: {'1997-04-30', '1997-04-29'} File: tasmax_hadukgrid_uk_1km_day_19970601-19970630.nc produced errors: raw # days: 30 - resampled # days: 30 Dates in raw not in resampled: {'1997-06-30'} @@ -818,19 +818,19 @@ Dates in raw not in resampled: {'1998-03-31', '1998-03-30'} Dates in resampled not in raw: {'1998-02-30', '1998-02-29'} File: tasmax_hadukgrid_uk_1km_day_19980401-19980430.nc produced errors: raw # days: 30 - resampled # days: 29 -Dates in raw not in resampled: {'1998-04-30', '1998-04-29'} +Dates in raw not in resampled: {'1998-04-29', '1998-04-30'} Dates in resampled not in raw: {'1998-03-30'} File: tasmax_hadukgrid_uk_1km_day_19980501-19980531.nc produced errors: raw # days: 31 - resampled # days: 31 Dates in raw not in resampled: {'1998-05-30', '1998-05-31'} -Dates in resampled not in raw: {'1998-04-30', '1998-04-29'} +Dates in resampled not in raw: {'1998-04-29', '1998-04-30'} File: tasmax_hadukgrid_uk_1km_day_19980601-19980630.nc produced errors: raw # days: 30 - resampled # days: 30 Dates in raw not in resampled: {'1998-06-30'} Dates in resampled not in raw: {'1998-05-30'} File: tasmax_hadukgrid_uk_1km_day_19980701-19980731.nc produced errors: raw # days: 31 - resampled # days: 30 -Dates in raw not in resampled: {'1998-07-30', '1998-07-31'} +Dates in raw not in resampled: {'1998-07-31', '1998-07-30'} Dates in resampled not in raw: {'1998-06-30'} File: tasmax_hadukgrid_uk_1km_day_19980801-19980831.nc produced errors: raw # days: 31 - resampled # days: 31 @@ -862,23 +862,23 @@ Dates in raw not in resampled: {'1999-02-01'} Dates in resampled not in raw: set() File: tasmax_hadukgrid_uk_1km_day_19990301-19990331.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'1999-03-30', '1999-03-31'} -Dates in resampled not in raw: {'1999-02-30', '1999-02-29'} +Dates in raw not in resampled: {'1999-03-31', '1999-03-30'} +Dates in resampled not in raw: {'1999-02-29', '1999-02-30'} File: tasmax_hadukgrid_uk_1km_day_19990401-19990430.nc produced errors: raw # days: 30 - resampled # days: 29 -Dates in raw not in resampled: {'1999-04-30', '1999-04-29'} +Dates in raw not in resampled: {'1999-04-29', '1999-04-30'} Dates in resampled not in raw: {'1999-03-30'} File: tasmax_hadukgrid_uk_1km_day_19990501-19990531.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'1999-05-31', '1999-05-30'} -Dates in resampled not in raw: {'1999-04-30', '1999-04-29'} +Dates in raw not in resampled: {'1999-05-30', '1999-05-31'} +Dates in resampled not in raw: {'1999-04-29', '1999-04-30'} File: tasmax_hadukgrid_uk_1km_day_19990601-19990630.nc produced errors: raw # days: 30 - resampled # days: 30 Dates in raw not in resampled: {'1999-06-30'} Dates in resampled not in raw: {'1999-05-30'} File: tasmax_hadukgrid_uk_1km_day_19990701-19990731.nc produced errors: raw # days: 31 - resampled # days: 30 -Dates in raw not in resampled: {'1999-07-30', '1999-07-31'} +Dates in raw not in resampled: {'1999-07-31', '1999-07-30'} Dates in resampled not in raw: {'1999-06-30'} File: tasmax_hadukgrid_uk_1km_day_19990801-19990831.nc produced errors: raw # days: 31 - resampled # days: 31 @@ -994,8 +994,8 @@ Dates in raw not in resampled: {'2002-02-01'} Dates in resampled not in raw: set() File: tasmax_hadukgrid_uk_1km_day_20020301-20020331.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'2002-03-31', '2002-03-30'} -Dates in resampled not in raw: {'2002-02-30', '2002-02-29'} +Dates in raw not in resampled: {'2002-03-30', '2002-03-31'} +Dates in resampled not in raw: {'2002-02-29', '2002-02-30'} File: tasmax_hadukgrid_uk_1km_day_20020401-20020430.nc produced errors: raw # days: 30 - resampled # days: 29 Dates in raw not in resampled: {'2002-04-29', '2002-04-30'} @@ -1010,7 +1010,7 @@ Dates in raw not in resampled: {'2002-06-30'} Dates in resampled not in raw: {'2002-05-30'} File: tasmax_hadukgrid_uk_1km_day_20020701-20020731.nc produced errors: raw # days: 31 - resampled # days: 30 -Dates in raw not in resampled: {'2002-07-30', '2002-07-31'} +Dates in raw not in resampled: {'2002-07-31', '2002-07-30'} Dates in resampled not in raw: {'2002-06-30'} File: tasmax_hadukgrid_uk_1km_day_20020801-20020831.nc produced errors: raw # days: 31 - resampled # days: 31 @@ -1042,7 +1042,7 @@ Dates in raw not in resampled: {'2003-02-01'} Dates in resampled not in raw: set() File: tasmax_hadukgrid_uk_1km_day_20030301-20030331.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'2003-03-30', '2003-03-31'} +Dates in raw not in resampled: {'2003-03-31', '2003-03-30'} Dates in resampled not in raw: {'2003-02-29', '2003-02-30'} File: tasmax_hadukgrid_uk_1km_day_20030401-20030430.nc produced errors: raw # days: 30 - resampled # days: 29 @@ -1058,7 +1058,7 @@ Dates in raw not in resampled: {'2003-06-30'} Dates in resampled not in raw: {'2003-05-30'} File: tasmax_hadukgrid_uk_1km_day_20030701-20030731.nc produced errors: raw # days: 31 - resampled # days: 30 -Dates in raw not in resampled: {'2003-07-30', '2003-07-31'} +Dates in raw not in resampled: {'2003-07-31', '2003-07-30'} Dates in resampled not in raw: {'2003-06-30'} File: tasmax_hadukgrid_uk_1km_day_20030801-20030831.nc produced errors: raw # days: 31 - resampled # days: 31 @@ -1126,16 +1126,16 @@ Dates in raw not in resampled: {'2005-02-01'} Dates in resampled not in raw: set() File: tasmax_hadukgrid_uk_1km_day_20050301-20050331.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'2005-03-31', '2005-03-30'} -Dates in resampled not in raw: {'2005-02-29', '2005-02-30'} +Dates in raw not in resampled: {'2005-03-30', '2005-03-31'} +Dates in resampled not in raw: {'2005-02-30', '2005-02-29'} File: tasmax_hadukgrid_uk_1km_day_20050401-20050430.nc produced errors: raw # days: 30 - resampled # days: 29 -Dates in raw not in resampled: {'2005-04-29', '2005-04-30'} +Dates in raw not in resampled: {'2005-04-30', '2005-04-29'} Dates in resampled not in raw: {'2005-03-30'} File: tasmax_hadukgrid_uk_1km_day_20050501-20050531.nc produced errors: raw # days: 31 - resampled # days: 31 Dates in raw not in resampled: {'2005-05-31', '2005-05-30'} -Dates in resampled not in raw: {'2005-04-29', '2005-04-30'} +Dates in resampled not in raw: {'2005-04-30', '2005-04-29'} File: tasmax_hadukgrid_uk_1km_day_20050601-20050630.nc produced errors: raw # days: 30 - resampled # days: 30 Dates in raw not in resampled: {'2005-06-30'} @@ -1174,23 +1174,23 @@ Dates in raw not in resampled: {'2006-02-01'} Dates in resampled not in raw: set() File: tasmax_hadukgrid_uk_1km_day_20060301-20060331.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'2006-03-30', '2006-03-31'} -Dates in resampled not in raw: {'2006-02-29', '2006-02-30'} +Dates in raw not in resampled: {'2006-03-31', '2006-03-30'} +Dates in resampled not in raw: {'2006-02-30', '2006-02-29'} File: tasmax_hadukgrid_uk_1km_day_20060401-20060430.nc produced errors: raw # days: 30 - resampled # days: 29 -Dates in raw not in resampled: {'2006-04-29', '2006-04-30'} +Dates in raw not in resampled: {'2006-04-30', '2006-04-29'} Dates in resampled not in raw: {'2006-03-30'} File: tasmax_hadukgrid_uk_1km_day_20060501-20060531.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'2006-05-31', '2006-05-30'} -Dates in resampled not in raw: {'2006-04-29', '2006-04-30'} +Dates in raw not in resampled: {'2006-05-30', '2006-05-31'} +Dates in resampled not in raw: {'2006-04-30', '2006-04-29'} File: tasmax_hadukgrid_uk_1km_day_20060601-20060630.nc produced errors: raw # days: 30 - resampled # days: 30 Dates in raw not in resampled: {'2006-06-30'} Dates in resampled not in raw: {'2006-05-30'} File: tasmax_hadukgrid_uk_1km_day_20060701-20060731.nc produced errors: raw # days: 31 - resampled # days: 30 -Dates in raw not in resampled: {'2006-07-31', '2006-07-30'} +Dates in raw not in resampled: {'2006-07-30', '2006-07-31'} Dates in resampled not in raw: {'2006-06-30'} File: tasmax_hadukgrid_uk_1km_day_20060801-20060831.nc produced errors: raw # days: 31 - resampled # days: 31 @@ -1222,23 +1222,23 @@ Dates in raw not in resampled: {'2007-02-01'} Dates in resampled not in raw: set() File: tasmax_hadukgrid_uk_1km_day_20070301-20070331.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'2007-03-31', '2007-03-30'} -Dates in resampled not in raw: {'2007-02-29', '2007-02-30'} +Dates in raw not in resampled: {'2007-03-30', '2007-03-31'} +Dates in resampled not in raw: {'2007-02-30', '2007-02-29'} File: tasmax_hadukgrid_uk_1km_day_20070401-20070430.nc produced errors: raw # days: 30 - resampled # days: 29 -Dates in raw not in resampled: {'2007-04-30', '2007-04-29'} +Dates in raw not in resampled: {'2007-04-29', '2007-04-30'} Dates in resampled not in raw: {'2007-03-30'} File: tasmax_hadukgrid_uk_1km_day_20070501-20070531.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'2007-05-31', '2007-05-30'} -Dates in resampled not in raw: {'2007-04-30', '2007-04-29'} +Dates in raw not in resampled: {'2007-05-30', '2007-05-31'} +Dates in resampled not in raw: {'2007-04-29', '2007-04-30'} File: tasmax_hadukgrid_uk_1km_day_20070601-20070630.nc produced errors: raw # days: 30 - resampled # days: 30 Dates in raw not in resampled: {'2007-06-30'} Dates in resampled not in raw: {'2007-05-30'} File: tasmax_hadukgrid_uk_1km_day_20070701-20070731.nc produced errors: raw # days: 31 - resampled # days: 30 -Dates in raw not in resampled: {'2007-07-31', '2007-07-30'} +Dates in raw not in resampled: {'2007-07-30', '2007-07-31'} Dates in resampled not in raw: {'2007-06-30'} File: tasmax_hadukgrid_uk_1km_day_20070801-20070831.nc produced errors: raw # days: 31 - resampled # days: 31 @@ -1322,7 +1322,7 @@ Dates in raw not in resampled: {'2009-06-30'} Dates in resampled not in raw: {'2009-05-30'} File: tasmax_hadukgrid_uk_1km_day_20090701-20090731.nc produced errors: raw # days: 31 - resampled # days: 30 -Dates in raw not in resampled: {'2009-07-31', '2009-07-30'} +Dates in raw not in resampled: {'2009-07-30', '2009-07-31'} Dates in resampled not in raw: {'2009-06-30'} File: tasmax_hadukgrid_uk_1km_day_20090801-20090831.nc produced errors: raw # days: 31 - resampled # days: 31 @@ -1354,16 +1354,16 @@ Dates in raw not in resampled: {'2010-02-01'} Dates in resampled not in raw: set() File: tasmax_hadukgrid_uk_1km_day_20100301-20100331.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'2010-03-31', '2010-03-30'} -Dates in resampled not in raw: {'2010-02-29', '2010-02-30'} +Dates in raw not in resampled: {'2010-03-30', '2010-03-31'} +Dates in resampled not in raw: {'2010-02-30', '2010-02-29'} File: tasmax_hadukgrid_uk_1km_day_20100401-20100430.nc produced errors: raw # days: 30 - resampled # days: 29 -Dates in raw not in resampled: {'2010-04-29', '2010-04-30'} +Dates in raw not in resampled: {'2010-04-30', '2010-04-29'} Dates in resampled not in raw: {'2010-03-30'} File: tasmax_hadukgrid_uk_1km_day_20100501-20100531.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'2010-05-30', '2010-05-31'} -Dates in resampled not in raw: {'2010-04-29', '2010-04-30'} +Dates in raw not in resampled: {'2010-05-31', '2010-05-30'} +Dates in resampled not in raw: {'2010-04-30', '2010-04-29'} File: tasmax_hadukgrid_uk_1km_day_20100601-20100630.nc produced errors: raw # days: 30 - resampled # days: 30 Dates in raw not in resampled: {'2010-06-30'} @@ -1402,16 +1402,16 @@ Dates in raw not in resampled: {'2011-02-01'} Dates in resampled not in raw: set() File: tasmax_hadukgrid_uk_1km_day_20110301-20110331.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'2011-03-30', '2011-03-31'} -Dates in resampled not in raw: {'2011-02-29', '2011-02-30'} +Dates in raw not in resampled: {'2011-03-31', '2011-03-30'} +Dates in resampled not in raw: {'2011-02-30', '2011-02-29'} File: tasmax_hadukgrid_uk_1km_day_20110401-20110430.nc produced errors: raw # days: 30 - resampled # days: 29 -Dates in raw not in resampled: {'2011-04-30', '2011-04-29'} +Dates in raw not in resampled: {'2011-04-29', '2011-04-30'} Dates in resampled not in raw: {'2011-03-30'} File: tasmax_hadukgrid_uk_1km_day_20110501-20110531.nc produced errors: raw # days: 31 - resampled # days: 31 Dates in raw not in resampled: {'2011-05-30', '2011-05-31'} -Dates in resampled not in raw: {'2011-04-30', '2011-04-29'} +Dates in resampled not in raw: {'2011-04-29', '2011-04-30'} File: tasmax_hadukgrid_uk_1km_day_20110601-20110630.nc produced errors: raw # days: 30 - resampled # days: 30 Dates in raw not in resampled: {'2011-06-30'} @@ -1486,23 +1486,23 @@ Dates in raw not in resampled: {'2013-02-01'} Dates in resampled not in raw: set() File: tasmax_hadukgrid_uk_1km_day_20130301-20130331.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'2013-03-30', '2013-03-31'} -Dates in resampled not in raw: {'2013-02-30', '2013-02-29'} +Dates in raw not in resampled: {'2013-03-31', '2013-03-30'} +Dates in resampled not in raw: {'2013-02-29', '2013-02-30'} File: tasmax_hadukgrid_uk_1km_day_20130401-20130430.nc produced errors: raw # days: 30 - resampled # days: 29 -Dates in raw not in resampled: {'2013-04-29', '2013-04-30'} +Dates in raw not in resampled: {'2013-04-30', '2013-04-29'} Dates in resampled not in raw: {'2013-03-30'} File: tasmax_hadukgrid_uk_1km_day_20130501-20130531.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'2013-05-30', '2013-05-31'} -Dates in resampled not in raw: {'2013-04-29', '2013-04-30'} +Dates in raw not in resampled: {'2013-05-31', '2013-05-30'} +Dates in resampled not in raw: {'2013-04-30', '2013-04-29'} File: tasmax_hadukgrid_uk_1km_day_20130601-20130630.nc produced errors: raw # days: 30 - resampled # days: 30 Dates in raw not in resampled: {'2013-06-30'} Dates in resampled not in raw: {'2013-05-30'} File: tasmax_hadukgrid_uk_1km_day_20130701-20130731.nc produced errors: raw # days: 31 - resampled # days: 30 -Dates in raw not in resampled: {'2013-07-31', '2013-07-30'} +Dates in raw not in resampled: {'2013-07-30', '2013-07-31'} Dates in resampled not in raw: {'2013-06-30'} File: tasmax_hadukgrid_uk_1km_day_20130801-20130831.nc produced errors: raw # days: 31 - resampled # days: 31 @@ -1535,22 +1535,22 @@ Dates in resampled not in raw: set() File: tasmax_hadukgrid_uk_1km_day_20140301-20140331.nc produced errors: raw # days: 31 - resampled # days: 31 Dates in raw not in resampled: {'2014-03-31', '2014-03-30'} -Dates in resampled not in raw: {'2014-02-30', '2014-02-29'} +Dates in resampled not in raw: {'2014-02-29', '2014-02-30'} File: tasmax_hadukgrid_uk_1km_day_20140401-20140430.nc produced errors: raw # days: 30 - resampled # days: 29 -Dates in raw not in resampled: {'2014-04-29', '2014-04-30'} +Dates in raw not in resampled: {'2014-04-30', '2014-04-29'} Dates in resampled not in raw: {'2014-03-30'} File: tasmax_hadukgrid_uk_1km_day_20140501-20140531.nc produced errors: raw # days: 31 - resampled # days: 31 Dates in raw not in resampled: {'2014-05-31', '2014-05-30'} -Dates in resampled not in raw: {'2014-04-29', '2014-04-30'} +Dates in resampled not in raw: {'2014-04-30', '2014-04-29'} File: tasmax_hadukgrid_uk_1km_day_20140601-20140630.nc produced errors: raw # days: 30 - resampled # days: 30 Dates in raw not in resampled: {'2014-06-30'} Dates in resampled not in raw: {'2014-05-30'} File: tasmax_hadukgrid_uk_1km_day_20140701-20140731.nc produced errors: raw # days: 31 - resampled # days: 30 -Dates in raw not in resampled: {'2014-07-31', '2014-07-30'} +Dates in raw not in resampled: {'2014-07-30', '2014-07-31'} Dates in resampled not in raw: {'2014-06-30'} File: tasmax_hadukgrid_uk_1km_day_20140801-20140831.nc produced errors: raw # days: 31 - resampled # days: 31 @@ -1590,7 +1590,7 @@ Dates in raw not in resampled: {'2015-04-29', '2015-04-30'} Dates in resampled not in raw: {'2015-03-30'} File: tasmax_hadukgrid_uk_1km_day_20150501-20150531.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'2015-05-31', '2015-05-30'} +Dates in raw not in resampled: {'2015-05-30', '2015-05-31'} Dates in resampled not in raw: {'2015-04-29', '2015-04-30'} File: tasmax_hadukgrid_uk_1km_day_20150601-20150630.nc produced errors: raw # days: 30 - resampled # days: 30 @@ -1666,8 +1666,8 @@ Dates in raw not in resampled: {'2017-02-01'} Dates in resampled not in raw: set() File: tasmax_hadukgrid_uk_1km_day_20170301-20170331.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'2017-03-31', '2017-03-30'} -Dates in resampled not in raw: {'2017-02-29', '2017-02-30'} +Dates in raw not in resampled: {'2017-03-30', '2017-03-31'} +Dates in resampled not in raw: {'2017-02-30', '2017-02-29'} File: tasmax_hadukgrid_uk_1km_day_20170401-20170430.nc produced errors: raw # days: 30 - resampled # days: 29 Dates in raw not in resampled: {'2017-04-30', '2017-04-29'} @@ -1715,7 +1715,7 @@ Dates in resampled not in raw: set() File: tasmax_hadukgrid_uk_1km_day_20180301-20180331.nc produced errors: raw # days: 31 - resampled # days: 31 Dates in raw not in resampled: {'2018-03-30', '2018-03-31'} -Dates in resampled not in raw: {'2018-02-30', '2018-02-29'} +Dates in resampled not in raw: {'2018-02-29', '2018-02-30'} File: tasmax_hadukgrid_uk_1km_day_20180401-20180430.nc produced errors: raw # days: 30 - resampled # days: 29 Dates in raw not in resampled: {'2018-04-29', '2018-04-30'} @@ -1730,7 +1730,7 @@ Dates in raw not in resampled: {'2018-06-30'} Dates in resampled not in raw: {'2018-05-30'} File: tasmax_hadukgrid_uk_1km_day_20180701-20180731.nc produced errors: raw # days: 31 - resampled # days: 30 -Dates in raw not in resampled: {'2018-07-31', '2018-07-30'} +Dates in raw not in resampled: {'2018-07-30', '2018-07-31'} Dates in resampled not in raw: {'2018-06-30'} File: tasmax_hadukgrid_uk_1km_day_20180801-20180831.nc produced errors: raw # days: 31 - resampled # days: 31 @@ -1763,22 +1763,22 @@ Dates in resampled not in raw: set() File: tasmax_hadukgrid_uk_1km_day_20190301-20190331.nc produced errors: raw # days: 31 - resampled # days: 31 Dates in raw not in resampled: {'2019-03-31', '2019-03-30'} -Dates in resampled not in raw: {'2019-02-29', '2019-02-30'} +Dates in resampled not in raw: {'2019-02-30', '2019-02-29'} File: tasmax_hadukgrid_uk_1km_day_20190401-20190430.nc produced errors: raw # days: 30 - resampled # days: 29 -Dates in raw not in resampled: {'2019-04-29', '2019-04-30'} +Dates in raw not in resampled: {'2019-04-30', '2019-04-29'} Dates in resampled not in raw: {'2019-03-30'} File: tasmax_hadukgrid_uk_1km_day_20190501-20190531.nc produced errors: raw # days: 31 - resampled # days: 31 Dates in raw not in resampled: {'2019-05-31', '2019-05-30'} -Dates in resampled not in raw: {'2019-04-29', '2019-04-30'} +Dates in resampled not in raw: {'2019-04-30', '2019-04-29'} File: tasmax_hadukgrid_uk_1km_day_20190601-20190630.nc produced errors: raw # days: 30 - resampled # days: 30 Dates in raw not in resampled: {'2019-06-30'} Dates in resampled not in raw: {'2019-05-30'} File: tasmax_hadukgrid_uk_1km_day_20190701-20190731.nc produced errors: raw # days: 31 - resampled # days: 30 -Dates in raw not in resampled: {'2019-07-30', '2019-07-31'} +Dates in raw not in resampled: {'2019-07-31', '2019-07-30'} Dates in resampled not in raw: {'2019-06-30'} File: tasmax_hadukgrid_uk_1km_day_20190801-20190831.nc produced errors: raw # days: 31 - resampled # days: 31 @@ -1847,14 +1847,14 @@ Dates in resampled not in raw: set() File: tasmax_hadukgrid_uk_1km_day_20210301-20210331.nc produced errors: raw # days: 31 - resampled # days: 31 Dates in raw not in resampled: {'2021-03-31', '2021-03-30'} -Dates in resampled not in raw: {'2021-02-30', '2021-02-29'} +Dates in resampled not in raw: {'2021-02-29', '2021-02-30'} File: tasmax_hadukgrid_uk_1km_day_20210401-20210430.nc produced errors: raw # days: 30 - resampled # days: 29 Dates in raw not in resampled: {'2021-04-29', '2021-04-30'} Dates in resampled not in raw: {'2021-03-30'} File: tasmax_hadukgrid_uk_1km_day_20210501-20210531.nc produced errors: raw # days: 31 - resampled # days: 31 -Dates in raw not in resampled: {'2021-05-30', '2021-05-31'} +Dates in raw not in resampled: {'2021-05-31', '2021-05-30'} Dates in resampled not in raw: {'2021-04-29', '2021-04-30'} File: tasmax_hadukgrid_uk_1km_day_20210601-20210630.nc produced errors: raw # days: 30 - resampled # days: 30 @@ -1862,7 +1862,7 @@ Dates in raw not in resampled: {'2021-06-30'} Dates in resampled not in raw: {'2021-05-30'} File: tasmax_hadukgrid_uk_1km_day_20210701-20210731.nc produced errors: raw # days: 31 - resampled # days: 30 -Dates in raw not in resampled: {'2021-07-31', '2021-07-30'} +Dates in raw not in resampled: {'2021-07-30', '2021-07-31'} Dates in resampled not in raw: {'2021-06-30'} File: tasmax_hadukgrid_uk_1km_day_20210801-20210831.nc produced errors: raw # days: 31 - resampled # days: 31 @@ -1884,3 +1884,60 @@ File: tasmax_hadukgrid_uk_1km_day_20211201-20211231.nc produced errors: raw # days: 31 - resampled # days: 31 Dates in raw not in resampled: {'2021-12-31'} Dates in resampled not in raw: {'2021-11-30'} +______________________________ +missing dates: 0 +date '1980-03-30' appears 2 times. +date '1980-05-30' appears 2 times. +date '1980-07-30' appears 2 times. +date '1980-09-30' appears 2 times. +date '1980-11-30' appears 2 times. +date '1984-03-30' appears 2 times. +date '1984-05-30' appears 2 times. +date '1984-07-30' appears 2 times. +date '1984-09-30' appears 2 times. +date '1984-11-30' appears 2 times. +date '1988-03-30' appears 2 times. +date '1988-05-30' appears 2 times. +date '1988-07-30' appears 2 times. +date '1988-09-30' appears 2 times. +date '1988-11-30' appears 2 times. +date '1992-03-30' appears 2 times. +date '1992-05-30' appears 2 times. +date '1992-07-30' appears 2 times. +date '1992-09-30' appears 2 times. +date '1992-11-30' appears 2 times. +date '1996-03-30' appears 2 times. +date '1996-05-30' appears 2 times. +date '1996-07-30' appears 2 times. +date '1996-09-30' appears 2 times. +date '1996-11-30' appears 2 times. +date '2000-03-30' appears 2 times. +date '2000-05-30' appears 2 times. +date '2000-07-30' appears 2 times. +date '2000-09-30' appears 2 times. +date '2000-11-30' appears 2 times. +date '2004-03-30' appears 2 times. +date '2004-05-30' appears 2 times. +date '2004-07-30' appears 2 times. +date '2004-09-30' appears 2 times. +date '2004-11-30' appears 2 times. +date '2008-03-30' appears 2 times. +date '2008-05-30' appears 2 times. +date '2008-07-30' appears 2 times. +date '2008-09-30' appears 2 times. +date '2008-11-30' appears 2 times. +date '2012-03-30' appears 2 times. +date '2012-05-30' appears 2 times. +date '2012-07-30' appears 2 times. +date '2012-09-30' appears 2 times. +date '2012-11-30' appears 2 times. +date '2016-03-30' appears 2 times. +date '2016-05-30' appears 2 times. +date '2016-07-30' appears 2 times. +date '2016-09-30' appears 2 times. +date '2016-11-30' appears 2 times. +date '2020-03-30' appears 2 times. +date '2020-05-30' appears 2 times. +date '2020-07-30' appears 2 times. +date '2020-09-30' appears 2 times. +date '2020-11-30' appears 2 times. From 7ae0b58676310316f306df7cfe63a2f4e60ae595 Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Mon, 21 Aug 2023 11:41:07 +0100 Subject: [PATCH 010/146] add calendar correction --- python/resampling/resampling_hads.py | 55 +++++++++++++++++++++++++--- 1 file changed, 50 insertions(+), 5 deletions(-) diff --git a/python/resampling/resampling_hads.py b/python/resampling/resampling_hads.py index dcbf157a..b0111190 100644 --- a/python/resampling/resampling_hads.py +++ b/python/resampling/resampling_hads.py @@ -1,4 +1,11 @@ +''' +This script resamples the UKHADS data to match UKCP18 data. +It resamples spatially, from 1km to 2.2km +It resamples temporally to a 360 day calendar. +''' + import argparse +import pandas as pd import xarray as xr import os import glob @@ -8,7 +15,45 @@ import scipy import netCDF4 +def enforce_date_dropping(raw_data: xr.Dataset, converted_data: xr.Dataset) -> xr.Dataset: + """ + Workaround to avoid convert_calendar misbehavior with monthly data files. + + For leap years, the conversion assigns dropped data to the previous date instead of deleting it. + Here we manually delete those dates to avoid duplicates later in the pipeline. + + Args: + raw_data (xr.Dataset): The original data. + converted_data (xr.Dataset): The data after conversion. + + Returns: + xr.Dataset: The converted data with specific dates dropped. + """ + month_day_drop = {(1, 31), (4, 1), (6, 1), (8, 1), (9, 31), (12, 1)} + time_values = pd.DatetimeIndex(raw_data.coords['time'].values) + + # Get the indices of the dates to be dropped + index_to_drop = [i for i, (m, d) in enumerate(zip(time_values.month, time_values.day)) if (m, d) in month_day_drop] + + # Filter indices that are within the bounds of the converted_data + index_to_drop = [i for i in index_to_drop if i < len(converted_data.coords['time'].values)] + + if index_to_drop: + converted_data = converted_data.drop_sel(time=converted_data.coords['time'].values[index_to_drop]) + + return converted_data + def resample_hadukgrid(x): + ''' + Resamples the UKHADs data to match UKCP18 data both spatially and temporally + and saves the resampled data to the output directory. + inputs: + x: list of inputs + x[0]: file to be resampled + x[1]: x_grid + x[2]: y_grid + x[3]: output_dir + ''' try: # due to the multiprocessing implementations inputs come as list file = x[0] @@ -28,13 +73,14 @@ def resample_hadukgrid(x): data = xr.open_dataset(file, decode_coords="all") # convert to 360 day calendar. - data = data.convert_calendar(dim='time', calendar='360_day', align_on='year') + data_360 = data.convert_calendar(dim='time', calendar='360_day', align_on='year') + data_360 = enforce_date_dropping(data,data_360) # the dataset to be resample must have dimensions named projection_x_coordinate and projection_y_coordinate . - resampled = data[[variable]].interp(projection_x_coordinate=x_grid, projection_y_coordinate=y_grid, method="linear") + resampled = data_360[[variable]].interp(projection_x_coordinate=x_grid, projection_y_coordinate=y_grid, method="linear") #make sure we keep the original CRS - resampled.rio.write_crs(data.rio.crs,inplace=True) + resampled.rio.write_crs(data_360.rio.crs,inplace=True) # save resampled file resampled.to_netcdf(os.path.join(output_dir,output_name)) @@ -54,9 +100,8 @@ def resample_hadukgrid(x): # Adding arguments parser.add_argument("--input", help="Path where the .nc files to resample is located", required=True, type=str) + parser.add_argument("--grid_data", help="Path where the .nc file with the grid to resample is located", required=False,type=str, default='../../data/rcp85_land-cpm_uk_2.2km_grid.nc') parser.add_argument("--output", help="Path to save the resampled data data", required=False, default=".", type=str) - parser.add_argument("--grid_data", help="Path where the .nc file with the grid to resample is located", required=False, - type=str, default='../../data/rcp85_land-cpm_uk_2.2km_grid.nc') parser_args = parser.parse_args() From 2fe58c803e5f31ba25867cd10ef0c81a548c8830 Mon Sep 17 00:00:00 2001 From: Grigorios Mingas Date: Tue, 22 Aug 2023 10:08:26 +0100 Subject: [PATCH 011/146] Add script that removes some dates from the Hads data for leap years - only for rainfall and tasmax for now --- .../debiasing/remove_dates_from_leap_years.py | 62 +++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 python/debiasing/remove_dates_from_leap_years.py diff --git a/python/debiasing/remove_dates_from_leap_years.py b/python/debiasing/remove_dates_from_leap_years.py new file mode 100644 index 00000000..b03f6c6f --- /dev/null +++ b/python/debiasing/remove_dates_from_leap_years.py @@ -0,0 +1,62 @@ +# Temporary script to convert Hads observation data (.tif) generated by Ruth's R +# script to 360-day-per-year format. The script also renames the files to match the +# names in the file names in the "Processed/HadsUKgrid/resampled_2.2km" folder and saves them +# in .nc format (same as the original Hads format in "Processed/HadsUKgrid/resampled_2.2km") + +import os +import xarray as xr +import glob +import numpy as np + +# input Hads data folder +path = '/Volumes/vmfileshare/ClimateData/Interim/HadsUK/three.cities/' + +# output Hads data folder - NOTE: this is a local path, please change to local or Azure path +path_output = '/debiasing_test/all/' +# path_output = '/Volumes/vmfileshare/ClimateData/Interim/HadsUK/three.cities.greg/' + +# do this for two variables - tasmin is omitted because dates in files are different +for variable in ["tasmax", "rainfall"]: + + # create a list of input and output files + files_in = [] + files_in.extend([f for f in glob.glob(path + "**/*.tif", recursive=True)]) + files_in = [f for f in files_in if variable in f] + files_out = [f"{'_'.join(f.split('_')[:-1])}-{f.split('_')[-1]}" for f in files_in] + files_out = [f.replace("tif", "nc") for f in files_out] + files_out = [f.replace("20091231", "20091230") for f in files_out] + files_out = [f.replace("20191231", "20191230") for f in files_out] + files_out = [f.replace("20100131", "20100130") for f in files_out] + files_out = [f.replace("20200131", "20200130") for f in files_out] + files_out = ["." + f.replace(path, path_output) for f in files_out] + + for i, file_in in enumerate(files_in): + + # these are the lower and upper indexes used to slice the arrays, which change depending on + # whether we process the 1980-2010 or 2010-2020 Hads input + lower_index, upper_index = (0, 10840) if "1980" in file_in else (720, 2890) + + # read the raster data (.tif) + data = xr.open_dataset(file_in) + + # drop the five redundant dates for each leap year (approximate indexes are used) + data = data.drop_sel(band=np.arange(lower_index + 59, upper_index, 1445).tolist()) + data = data.drop_sel(band=np.arange(lower_index + 120, upper_index, 1445).tolist()) + data = data.drop_sel(band=np.arange(lower_index + 211, upper_index, 1445).tolist()) + data = data.drop_sel(band=np.arange(lower_index + 271, upper_index, 1445).tolist()) + data = data.drop_sel(band=np.arange(lower_index + 334, upper_index, 1445).tolist()) + + # create a 360-day time index based on the date range of the file + file_out = os.path.basename(files_out[i]).split('_') + start = file_out[-1].split('-')[0] + stop = file_out[-1].split('-')[1].split('.')[0] + time_index = xr.cftime_range(start, stop, freq='D', calendar='360_day', inclusive='both') + + # rename attributes and data and assign time index + data = data.rename( + {"x": "projection_x_coordinate", "y": "projection_y_coordinate", "band": "time", 'band_data': variable}) \ + .rio.write_crs('epsg:27700') + data.coords['time'] = time_index + + # write to an .nc file + data.to_netcdf(files_out[i]) From e05a578e16f459b77d1f756543494b8ddd952e34 Mon Sep 17 00:00:00 2001 From: Grigorios Mingas Date: Wed, 23 Aug 2023 10:11:30 +0100 Subject: [PATCH 012/146] Add script to rename CPM files generated by Ruth so that the dates are parsed correctly by the clim-recal python code --- python/debiasing/edit_cpm_filenames.py | 34 ++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 python/debiasing/edit_cpm_filenames.py diff --git a/python/debiasing/edit_cpm_filenames.py b/python/debiasing/edit_cpm_filenames.py new file mode 100644 index 00000000..a4c2b489 --- /dev/null +++ b/python/debiasing/edit_cpm_filenames.py @@ -0,0 +1,34 @@ +# Temporary script to rename the CPM data created by Ruth's code to fit with the format +# expected by the debiasing python code in clim-recal. + +import glob +import shutil +import os +from pathlib import Path + +# input Hads data folder +path = '/Volumes/vmfileshare/ClimateData/Interim/CPM/three.cities' + +# output Hads data folder - NOTE: this is a local path, please change to local or Azure path +path_output = './debiasing_test/scenario' +# path_output = '/Volumes/vmfileshare/ClimateData/Interim/CPM/three.cities.greg/' + +# create a list of input and output files +files_in = [] +files_in.extend([f for f in glob.glob(path + "**/*/*.tif", recursive=True)]) +files_out = [f for f in files_in] +files_out = [f.replace("1980_2000", "19800101-19991230") for f in files_out] +files_out = [f.replace("2000_2010", "20000101-20091230") for f in files_out] +files_out = [f.replace("2010_2020", "20100101-20191230") for f in files_out] +files_out = [f.replace("2020_2040", "20200101-20391230") for f in files_out] +files_out = [f.replace("2040_2060", "20400101-20591230") for f in files_out] +files_out = [f.replace("2060_2080", "20600101-20791230") for f in files_out] +files_out = [f.replace(path, path_output) for f in files_out] + +# copy - including recursive directory creation +for i, file_in in enumerate(files_in): + if not os.path.exists(os.path.dirname(files_out[i])): + path = Path(os.path.dirname(files_out[i])) + path.mkdir(parents=True) + + shutil.copy(file_in, files_out[i]) From 167369f6897907c67f18189748c8ccd2ad54f656 Mon Sep 17 00:00:00 2001 From: Grigorios Mingas Date: Wed, 23 Aug 2023 10:12:06 +0100 Subject: [PATCH 013/146] Add two more arguments to the run_cmethods script to parse date ranges for calibration and projection (previously hard coded) --- python/debiasing/run_cmethods.py | 36 ++++++++++++++++++++++++++------ 1 file changed, 30 insertions(+), 6 deletions(-) diff --git a/python/debiasing/run_cmethods.py b/python/debiasing/run_cmethods.py index 34d97b96..4d51bb70 100644 --- a/python/debiasing/run_cmethods.py +++ b/python/debiasing/run_cmethods.py @@ -13,6 +13,7 @@ import numpy as np import matplotlib.pyplot as plt import os +from datetime import datetime sys.path.insert(1, 'python-cmethods') from cmethods.CMethods import CMethods @@ -38,7 +39,16 @@ parser.add_argument('--contr', '--control', dest='contr_fpath', type=str, help='Path to control datasets') parser.add_argument('--scen', '--scenario', dest='scen_fpath', type=str, help='Path to scenario datasets (data to adjust)') - +parser.add_argument('--calib_dates', '--calibration_date_range', dest='calib_date_range', type=str, + help='Start and end dates for calibration data (historic data used to ' + 'calibrate the debiasing model) - in YYYYMMDD-YYYYMMDD format', + default='19801201-19811130') +parser.add_argument('--proj_dates', '--projection_date_range', dest='proj_date_range', type=str, + help='Start and end dates for future data (data to be projected using the ' + 'calibrated debiasing model) - multiple date ranges can be passed, ' + 'separated by "_", each in YYYYMMDD-YYYYMMDD format e.g., ' + '"20100101-20191231_20200101-20291231"', + default='20201201-20221130_20221201-20231130') parser.add_argument('--shp', '--shapefile', dest='shapefile_fpath', type=str, help='Path to shapefile', default=None) parser.add_argument('--out', '--output', dest='output_fpath', type=str, help='Path to save output files', default='.') parser.add_argument('-m', '--method', dest='method', type=str, help='Correction method', @@ -56,6 +66,8 @@ obs_fpath = params['obs_fpath'] contr_fpath = params['contr_fpath'] scen_fpath = params['scen_fpath'] +calibration_date_range = params['calib_date_range'] +projection_date_range = params['proj_date_range'] shape_fpath = params['shapefile_fpath'] out_fpath = params['output_fpath'] @@ -67,14 +79,26 @@ n_quantiles = params['n_quantiles'] n_jobs = params['p'] -h_date_period = ('1980-12-01', '1999-11-30') -future_time_periods = [('2020-12-01', '2030-11-30'), ('2030-12-01', '2040-11-30'), ('2060-12-01', '2070-11-30'), - ('2070-12-01', '2080-11-30')] + +calib_list = calibration_date_range.split('-') +h_date_period = (datetime.strptime(calib_list[0], '%Y%m%d').strftime('%Y-%m-%d'), + datetime.strptime(calib_list[1], '%Y%m%d').strftime('%Y-%m-%d')) +proj_list = projection_date_range.split('_') +future_time_periods = [(p.split('-')[0], p.split('-')[1]) for p in proj_list] +future_time_periods = [(datetime.strptime(p[0], '%Y%m%d').strftime('%Y-%m-%d'), + datetime.strptime(p[1], '%Y%m%d').strftime('%Y-%m-%d')) + for p in future_time_periods] + + +# h_date_period = ('1980-12-01', '1999-11-30') +# future_time_periods = [('2020-12-01', '2030-11-30'), ('2030-12-01', '2040-11-30'), ('2060-12-01', '2070-11-30'), +# ('2070-12-01', '2080-11-30')] # for testing -future_time_periods = [('2020-12-01', '2022-11-30'),('2022-12-01', '2023-11-30')] -h_date_period = ('1980-12-01', '1981-11-30') +# future_time_periods = [('2020-12-01', '2022-11-30'),('2022-12-01', '2023-11-30')] +# h_date_period = ('1980-12-01', '1981-11-30') + # * ----- ----- -----M A I N ----- ----- ----- def run_debiasing() -> None: start = time.time() From d98f60fc1d253476247a867699d0095622c83787 Mon Sep 17 00:00:00 2001 From: Grigorios Mingas Date: Wed, 23 Aug 2023 10:12:42 +0100 Subject: [PATCH 014/146] Modify script to create folders before adding files --- python/debiasing/remove_dates_from_leap_years.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/python/debiasing/remove_dates_from_leap_years.py b/python/debiasing/remove_dates_from_leap_years.py index b03f6c6f..5dd24b9a 100644 --- a/python/debiasing/remove_dates_from_leap_years.py +++ b/python/debiasing/remove_dates_from_leap_years.py @@ -7,12 +7,13 @@ import xarray as xr import glob import numpy as np +from pathlib import Path # input Hads data folder path = '/Volumes/vmfileshare/ClimateData/Interim/HadsUK/three.cities/' # output Hads data folder - NOTE: this is a local path, please change to local or Azure path -path_output = '/debiasing_test/all/' +path_output = '/debiasing_test/observation/' # path_output = '/Volumes/vmfileshare/ClimateData/Interim/HadsUK/three.cities.greg/' # do this for two variables - tasmin is omitted because dates in files are different @@ -58,5 +59,9 @@ .rio.write_crs('epsg:27700') data.coords['time'] = time_index + if not os.path.exists(os.path.dirname(files_out[i])): + folder_path = Path(os.path.dirname(files_out[i])) + folder_path.mkdir(parents=True) + # write to an .nc file data.to_netcdf(files_out[i]) From 265c053f6cc3622052648af493aca58664543d1b Mon Sep 17 00:00:00 2001 From: Ruth Bowyer <105492883+RuthBowyer@users.noreply.github.com> Date: Thu, 24 Aug 2023 10:28:34 +0100 Subject: [PATCH 015/146] Update Cropping_Rasters_to_three_cities.R Adding missing lib --- R/misc/Cropping_Rasters_to_three_cities.R | 1 + 1 file changed, 1 insertion(+) diff --git a/R/misc/Cropping_Rasters_to_three_cities.R b/R/misc/Cropping_Rasters_to_three_cities.R index 7e0812b5..ad499328 100644 --- a/R/misc/Cropping_Rasters_to_three_cities.R +++ b/R/misc/Cropping_Rasters_to_three_cities.R @@ -7,6 +7,7 @@ source("~/Desktop/clim-recal/clim-recal/R/misc/read_crop.fn.R") library(tidyverse) library(data.table) library(qmap) +library(terra) dd <- "/mnt/vmfileshare/ClimateData/" From 3b97600136be69ee7376512b81d860e7ed8cccdd Mon Sep 17 00:00:00 2001 From: Grigorios Mingas Date: Thu, 24 Aug 2023 12:35:23 +0100 Subject: [PATCH 016/146] Modify readme and run_cmethods arguments --- python/README.md | 9 +++++---- python/debiasing/run_cmethods.py | 16 ++++++++-------- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/python/README.md b/python/README.md index 878f3e98..298b532f 100644 --- a/python/README.md +++ b/python/README.md @@ -66,7 +66,7 @@ cd debiasing git submodule update --init --recursive ``` -The [run_cmethods.py](debiasing/run_cmethods.py) allow us to adjusts climate biases in climate data using the python-cmethods library. +The [run_cmethods.py](debiasing/run_cmethods.py) allow us to adjust climate biases in climate data using the python-cmethods library. It takes as input observation data (HADs data), control data (historical UKCP data), and scenario data (future UKCP data), and applies a correction method to the scenario data. The resulting output is saved as a `.nc` to a specified directory. The script will also produce a time-series and a map plot of the debiased data. @@ -87,6 +87,8 @@ where: - `--obs` specifies the path to the observation datasets - `--contr` specifies the path to the control datasets - `--scen` specifies the path to the scenario datasets (data to adjust) +- `--contr_dates` specifies start and end dates for control and observation data (historic UKCP and HADs data used to calibrate the debiasing model) +- `--scen_dates` specifies start and end dates for scenario data (data to be debiased using the calibrated debiasing model - multiple date ranges can be passed - `--shp` specifies the path to a shapefile, in case we want to select a smaller region (default: None) - `--out` specifies the path to save the output files (default: current directory) - `--method` specifies the correction method to use (default: quantile_delta_mapping) @@ -104,9 +106,8 @@ python run_cmethods.py --help ``` **Main Functionality**: -The script applies corrections extracted from historical observed and simulated data between `1980-12-01` and `1999-11-30`. -Corrections are applied to future scenario data between `2020` and `2080` (however there is no available scenario data between `2040` to `2060`, so this time -period is skipped. +By default (if no control and scenario dates are passed), the script applies corrections extracted from historical observed and simulated data between `1980-12-01` and `1999-11-30`. +Corrections are applied to future scenario data between `2020` and `2040`. The script performs the following steps: diff --git a/python/debiasing/run_cmethods.py b/python/debiasing/run_cmethods.py index 4d51bb70..fe862fef 100644 --- a/python/debiasing/run_cmethods.py +++ b/python/debiasing/run_cmethods.py @@ -39,16 +39,16 @@ parser.add_argument('--contr', '--control', dest='contr_fpath', type=str, help='Path to control datasets') parser.add_argument('--scen', '--scenario', dest='scen_fpath', type=str, help='Path to scenario datasets (data to adjust)') -parser.add_argument('--calib_dates', '--calibration_date_range', dest='calib_date_range', type=str, - help='Start and end dates for calibration data (historic data used to ' +parser.add_argument('--contr_dates', '--control_date_range', dest='control_date_range', type=str, + help='Start and end dates for control and observation data (historic data used to ' 'calibrate the debiasing model) - in YYYYMMDD-YYYYMMDD format', - default='19801201-19811130') -parser.add_argument('--proj_dates', '--projection_date_range', dest='proj_date_range', type=str, - help='Start and end dates for future data (data to be projected using the ' + default='19801201-19991130') +parser.add_argument('--scen_dates', '--scenario_date_range', dest='scenario_date_range', type=str, + help='Start and end dates for scenario data (data to be debiased using the ' 'calibrated debiasing model) - multiple date ranges can be passed, ' 'separated by "_", each in YYYYMMDD-YYYYMMDD format e.g., ' '"20100101-20191231_20200101-20291231"', - default='20201201-20221130_20221201-20231130') + default='20201201-20291130_20301201-20391130') parser.add_argument('--shp', '--shapefile', dest='shapefile_fpath', type=str, help='Path to shapefile', default=None) parser.add_argument('--out', '--output', dest='output_fpath', type=str, help='Path to save output files', default='.') parser.add_argument('-m', '--method', dest='method', type=str, help='Correction method', @@ -66,8 +66,8 @@ obs_fpath = params['obs_fpath'] contr_fpath = params['contr_fpath'] scen_fpath = params['scen_fpath'] -calibration_date_range = params['calib_date_range'] -projection_date_range = params['proj_date_range'] +calibration_date_range = params['control_date_range'] +projection_date_range = params['scenario_date_range'] shape_fpath = params['shapefile_fpath'] out_fpath = params['output_fpath'] From cf4296f7b9acf4b0cec3a29f9936975ee772776c Mon Sep 17 00:00:00 2001 From: Grigorios Mingas Date: Thu, 24 Aug 2023 18:32:38 +0100 Subject: [PATCH 017/146] Add script to do all runs for three cities --- python/debiasing/three_cities_debiasing.sh | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100755 python/debiasing/three_cities_debiasing.sh diff --git a/python/debiasing/three_cities_debiasing.sh b/python/debiasing/three_cities_debiasing.sh new file mode 100755 index 00000000..748e3928 --- /dev/null +++ b/python/debiasing/three_cities_debiasing.sh @@ -0,0 +1,16 @@ +#!/bin/sh + +declare -a vars=("tasmax" "rainfall" "tasmin") +declare -a runs=("05" "07" "08" "06") +declare -a methods=("quantile_delta_mapping" "quantile_mapping" "variance_scaling" "delta_method") +declare -a cities=("Glasgow" "Manchester" "London") + +for var in "${vars[@]}"; do + for run in "${runs[@]}"; do + for method in "${methods[@]}"; do + for city in "${cities[@]}"; do + python run_cmethods.py --scen /Volumes/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/$var/$run/latest/ --contr /Volumes/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/$var/$run/latest/ --obs /Volumes/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/$var/day/ --shape /Volumes/vmfileshare/ClimateData/shapefiles/three.cities/$city/$city.shp -v $var --method $method -p 5 --out ./debiasing_test/output/$city/$run/ --contr_dates 19800101-19801230 --scen_dates 20100101-20100330 + done + done + done +done From 7625a1615800c52f95dfa67e7b7c20376d5f4b62 Mon Sep 17 00:00:00 2001 From: Grigorios Mingas Date: Thu, 24 Aug 2023 18:44:36 +0100 Subject: [PATCH 018/146] Update debiasing bash script --- python/debiasing/three_cities_debiasing.sh | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/python/debiasing/three_cities_debiasing.sh b/python/debiasing/three_cities_debiasing.sh index 748e3928..80b2a64f 100755 --- a/python/debiasing/three_cities_debiasing.sh +++ b/python/debiasing/three_cities_debiasing.sh @@ -2,14 +2,19 @@ declare -a vars=("tasmax" "rainfall" "tasmin") declare -a runs=("05" "07" "08" "06") -declare -a methods=("quantile_delta_mapping" "quantile_mapping" "variance_scaling" "delta_method") +declare -a methods=("quantile_delta_mapping" "quantile_mapping") +declare -a methods_2=("variance_scaling" "delta_method") declare -a cities=("Glasgow" "Manchester" "London") for var in "${vars[@]}"; do for run in "${runs[@]}"; do - for method in "${methods[@]}"; do - for city in "${cities[@]}"; do - python run_cmethods.py --scen /Volumes/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/$var/$run/latest/ --contr /Volumes/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/$var/$run/latest/ --obs /Volumes/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/$var/day/ --shape /Volumes/vmfileshare/ClimateData/shapefiles/three.cities/$city/$city.shp -v $var --method $method -p 5 --out ./debiasing_test/output/$city/$run/ --contr_dates 19800101-19801230 --scen_dates 20100101-20100330 + for city in "${cities[@]}"; do + for method in "${methods[@]}"; do + python run_cmethods.py --scen /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/$var/$run/latest/ --contr /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/$var/$run/latest/ --obs /mnt/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/$var/day/ --shape /mnt/vmfileshare/ClimateData/shapefiles/three.cities/$city/$city.shp -v $var --method $method -p 32 --out /mnt/vmfileshare/ClimateData/Debiased/three.cities/$city/$run/ --contr_dates 19800101-20091230 --scen_dates 20100101-20191230 + done + + for method in "${methods_2[@]}"; do + python run_cmethods.py --scen /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/$var/$run/latest/ --contr /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/$var/$run/latest/ --obs /mnt/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/$var/day/ --shape /mnt/vmfileshare/ClimateData/shapefiles/three.cities/$city/$city.shp -v $var --method $method --group time.month -p 32 --out /mnt/vmfileshare/ClimateData/Debiased/three.cities/$city/$run/ --contr_dates 19800101-20091230 --scen_dates 20100101-20191230 done done done From 7c76fe63531df494da3f3a01f1d10355a4145529 Mon Sep 17 00:00:00 2001 From: Grigorios Mingas Date: Thu, 24 Aug 2023 18:46:36 +0100 Subject: [PATCH 019/146] Minor changes in bash script --- python/debiasing/three_cities_debiasing.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/debiasing/three_cities_debiasing.sh b/python/debiasing/three_cities_debiasing.sh index 80b2a64f..964e6585 100755 --- a/python/debiasing/three_cities_debiasing.sh +++ b/python/debiasing/three_cities_debiasing.sh @@ -1,10 +1,10 @@ #!/bin/sh -declare -a vars=("tasmax" "rainfall" "tasmin") +declare -a vars=("rainfall" "tasmax" "tasmin") declare -a runs=("05" "07" "08" "06") +declare -a cities=("Glasgow" "Manchester" "London") declare -a methods=("quantile_delta_mapping" "quantile_mapping") declare -a methods_2=("variance_scaling" "delta_method") -declare -a cities=("Glasgow" "Manchester" "London") for var in "${vars[@]}"; do for run in "${runs[@]}"; do From 671cc91b645a0342c932c65ff35038cdefab9a86 Mon Sep 17 00:00:00 2001 From: Grigorios Mingas Date: Thu, 24 Aug 2023 19:13:22 +0100 Subject: [PATCH 020/146] Modify bash script to handle rainfall/pr naming --- python/debiasing/three_cities_debiasing.sh | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/python/debiasing/three_cities_debiasing.sh b/python/debiasing/three_cities_debiasing.sh index 964e6585..8822fa73 100755 --- a/python/debiasing/three_cities_debiasing.sh +++ b/python/debiasing/three_cities_debiasing.sh @@ -1,21 +1,31 @@ #!/bin/sh -declare -a vars=("rainfall" "tasmax" "tasmin") +declare -a vars=("tasmax") declare -a runs=("05" "07" "08" "06") declare -a cities=("Glasgow" "Manchester" "London") declare -a methods=("quantile_delta_mapping" "quantile_mapping") declare -a methods_2=("variance_scaling" "delta_method") -for var in "${vars[@]}"; do - for run in "${runs[@]}"; do - for city in "${cities[@]}"; do - for method in "${methods[@]}"; do + +for run in "${runs[@]}"; do + for city in "${cities[@]}"; do + for method in "${methods[@]}"; do + + python run_cmethods.py --scen /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/pr/$run/latest/ --contr /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/pr/$run/latest/ --obs /mnt/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/rainfall/day/ --shape /mnt/vmfileshare/ClimateData/shapefiles/three.cities/$city/$city.shp -v rainfall --method $method -p 32 --out /mnt/vmfileshare/ClimateData/Debiased/three.cities/$city/$run/ --contr_dates 19800101-20091230 --scen_dates 20100101-20191230 + + for var in "${vars[@]}"; do python run_cmethods.py --scen /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/$var/$run/latest/ --contr /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/$var/$run/latest/ --obs /mnt/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/$var/day/ --shape /mnt/vmfileshare/ClimateData/shapefiles/three.cities/$city/$city.shp -v $var --method $method -p 32 --out /mnt/vmfileshare/ClimateData/Debiased/three.cities/$city/$run/ --contr_dates 19800101-20091230 --scen_dates 20100101-20191230 done + done - for method in "${methods_2[@]}"; do + for method in "${methods_2[@]}"; do + + python run_cmethods.py --scen /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/pr/$run/latest/ --contr /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/pr/$run/latest/ --obs /mnt/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/rainfall/day/ --shape /mnt/vmfileshare/ClimateData/shapefiles/three.cities/$city/$city.shp -v rainfall --method $method --group time.month -p 32 --out /mnt/vmfileshare/ClimateData/Debiased/three.cities/$city/$run/ --contr_dates 19800101-20091230 --scen_dates 20100101-20191230 + + for var in "${vars[@]}"; do python run_cmethods.py --scen /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/$var/$run/latest/ --contr /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/$var/$run/latest/ --obs /mnt/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/$var/day/ --shape /mnt/vmfileshare/ClimateData/shapefiles/three.cities/$city/$city.shp -v $var --method $method --group time.month -p 32 --out /mnt/vmfileshare/ClimateData/Debiased/three.cities/$city/$run/ --contr_dates 19800101-20091230 --scen_dates 20100101-20191230 done done + done done From d603197c95922fd2f870976adea305b7885b2ec4 Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Fri, 25 Aug 2023 09:20:56 +0100 Subject: [PATCH 021/146] add function that takes care of duplicate dates after calendar resampling - Fixes #32 --- check_calendar_log.txt | 1943 ++++++++++++++++++++++++++ python/resampling/check_calendar.py | 14 +- python/resampling/resampling_hads.py | 7 +- 3 files changed, 1955 insertions(+), 9 deletions(-) create mode 100644 check_calendar_log.txt diff --git a/check_calendar_log.txt b/check_calendar_log.txt new file mode 100644 index 00000000..203d1b21 --- /dev/null +++ b/check_calendar_log.txt @@ -0,0 +1,1943 @@ +******************** Comparing raw data: /Volumes/vmfileshare/ClimateData/Raw/HadsUKgrid/tasmax/day ******************** +******************** to resampled data: /Volumes/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/tasmax/day ******************** +File: tasmax_hadukgrid_uk_1km_day_19800101-19800131.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1980-01-31'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19800301-19800331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1980-03-31'} +Dates in resampled not in raw: {'1980-02-30'} +File: tasmax_hadukgrid_uk_1km_day_19800401-19800430.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1980-04-30'} +Dates in resampled not in raw: {'1980-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19800501-19800531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1980-05-31'} +Dates in resampled not in raw: {'1980-04-30'} +File: tasmax_hadukgrid_uk_1km_day_19800601-19800630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1980-06-30'} +Dates in resampled not in raw: {'1980-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19800701-19800731.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1980-07-31'} +Dates in resampled not in raw: {'1980-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19800801-19800831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1980-08-31'} +Dates in resampled not in raw: {'1980-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19801001-19801031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1980-10-31'} +Dates in resampled not in raw: {'1980-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19801201-19801231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1980-12-31'} +Dates in resampled not in raw: {'1980-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19810101-19810131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1981-01-31'} +Dates in resampled not in raw: {'1981-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19810201-19810228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1981-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19810301-19810331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1981-03-31', '1981-03-30'} +Dates in resampled not in raw: {'1981-02-29', '1981-02-30'} +File: tasmax_hadukgrid_uk_1km_day_19810401-19810430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1981-04-30', '1981-04-29'} +Dates in resampled not in raw: {'1981-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19810501-19810531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1981-05-31', '1981-05-30'} +Dates in resampled not in raw: {'1981-04-30', '1981-04-29'} +File: tasmax_hadukgrid_uk_1km_day_19810601-19810630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1981-06-30'} +Dates in resampled not in raw: {'1981-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19810701-19810731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1981-07-31', '1981-07-30'} +Dates in resampled not in raw: {'1981-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19810801-19810831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1981-08-31'} +Dates in resampled not in raw: {'1981-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19810901-19810930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1981-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19811001-19811031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1981-10-31'} +Dates in resampled not in raw: {'1981-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19811101-19811130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1981-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19811201-19811231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1981-12-31'} +Dates in resampled not in raw: {'1981-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19820101-19820131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1982-01-31'} +Dates in resampled not in raw: {'1982-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19820201-19820228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1982-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19820301-19820331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1982-03-31', '1982-03-30'} +Dates in resampled not in raw: {'1982-02-29', '1982-02-30'} +File: tasmax_hadukgrid_uk_1km_day_19820401-19820430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1982-04-29', '1982-04-30'} +Dates in resampled not in raw: {'1982-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19820501-19820531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1982-05-31', '1982-05-30'} +Dates in resampled not in raw: {'1982-04-29', '1982-04-30'} +File: tasmax_hadukgrid_uk_1km_day_19820601-19820630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1982-06-30'} +Dates in resampled not in raw: {'1982-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19820701-19820731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1982-07-30', '1982-07-31'} +Dates in resampled not in raw: {'1982-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19820801-19820831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1982-08-31'} +Dates in resampled not in raw: {'1982-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19820901-19820930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1982-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19821001-19821031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1982-10-31'} +Dates in resampled not in raw: {'1982-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19821101-19821130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1982-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19821201-19821231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1982-12-31'} +Dates in resampled not in raw: {'1982-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19830101-19830131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1983-01-31'} +Dates in resampled not in raw: {'1983-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19830201-19830228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1983-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19830301-19830331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1983-03-30', '1983-03-31'} +Dates in resampled not in raw: {'1983-02-30', '1983-02-29'} +File: tasmax_hadukgrid_uk_1km_day_19830401-19830430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1983-04-29', '1983-04-30'} +Dates in resampled not in raw: {'1983-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19830501-19830531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1983-05-31', '1983-05-30'} +Dates in resampled not in raw: {'1983-04-29', '1983-04-30'} +File: tasmax_hadukgrid_uk_1km_day_19830601-19830630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1983-06-30'} +Dates in resampled not in raw: {'1983-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19830701-19830731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1983-07-31', '1983-07-30'} +Dates in resampled not in raw: {'1983-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19830801-19830831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1983-08-31'} +Dates in resampled not in raw: {'1983-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19830901-19830930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1983-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19831001-19831031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1983-10-31'} +Dates in resampled not in raw: {'1983-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19831101-19831130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1983-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19831201-19831231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1983-12-31'} +Dates in resampled not in raw: {'1983-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19840101-19840131.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1984-01-31'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19840301-19840331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1984-03-31'} +Dates in resampled not in raw: {'1984-02-30'} +File: tasmax_hadukgrid_uk_1km_day_19840401-19840430.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1984-04-30'} +Dates in resampled not in raw: {'1984-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19840501-19840531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1984-05-31'} +Dates in resampled not in raw: {'1984-04-30'} +File: tasmax_hadukgrid_uk_1km_day_19840601-19840630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1984-06-30'} +Dates in resampled not in raw: {'1984-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19840701-19840731.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1984-07-31'} +Dates in resampled not in raw: {'1984-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19840801-19840831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1984-08-31'} +Dates in resampled not in raw: {'1984-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19841001-19841031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1984-10-31'} +Dates in resampled not in raw: {'1984-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19841201-19841231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1984-12-31'} +Dates in resampled not in raw: {'1984-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19850101-19850131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1985-01-31'} +Dates in resampled not in raw: {'1985-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19850201-19850228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1985-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19850301-19850331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1985-03-31', '1985-03-30'} +Dates in resampled not in raw: {'1985-02-29', '1985-02-30'} +File: tasmax_hadukgrid_uk_1km_day_19850401-19850430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1985-04-30', '1985-04-29'} +Dates in resampled not in raw: {'1985-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19850501-19850531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1985-05-31', '1985-05-30'} +Dates in resampled not in raw: {'1985-04-30', '1985-04-29'} +File: tasmax_hadukgrid_uk_1km_day_19850601-19850630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1985-06-30'} +Dates in resampled not in raw: {'1985-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19850701-19850731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1985-07-31', '1985-07-30'} +Dates in resampled not in raw: {'1985-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19850801-19850831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1985-08-31'} +Dates in resampled not in raw: {'1985-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19850901-19850930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1985-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19851001-19851031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1985-10-31'} +Dates in resampled not in raw: {'1985-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19851101-19851130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1985-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19851201-19851231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1985-12-31'} +Dates in resampled not in raw: {'1985-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19860101-19860131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1986-01-31'} +Dates in resampled not in raw: {'1986-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19860201-19860228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1986-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19860301-19860331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1986-03-31', '1986-03-30'} +Dates in resampled not in raw: {'1986-02-30', '1986-02-29'} +File: tasmax_hadukgrid_uk_1km_day_19860401-19860430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1986-04-30', '1986-04-29'} +Dates in resampled not in raw: {'1986-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19860501-19860531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1986-05-30', '1986-05-31'} +Dates in resampled not in raw: {'1986-04-30', '1986-04-29'} +File: tasmax_hadukgrid_uk_1km_day_19860601-19860630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1986-06-30'} +Dates in resampled not in raw: {'1986-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19860701-19860731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1986-07-30', '1986-07-31'} +Dates in resampled not in raw: {'1986-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19860801-19860831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1986-08-31'} +Dates in resampled not in raw: {'1986-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19860901-19860930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1986-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19861001-19861031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1986-10-31'} +Dates in resampled not in raw: {'1986-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19861101-19861130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1986-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19861201-19861231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1986-12-31'} +Dates in resampled not in raw: {'1986-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19870101-19870131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1987-01-31'} +Dates in resampled not in raw: {'1987-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19870201-19870228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1987-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19870301-19870331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1987-03-30', '1987-03-31'} +Dates in resampled not in raw: {'1987-02-30', '1987-02-29'} +File: tasmax_hadukgrid_uk_1km_day_19870401-19870430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1987-04-29', '1987-04-30'} +Dates in resampled not in raw: {'1987-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19870501-19870531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1987-05-31', '1987-05-30'} +Dates in resampled not in raw: {'1987-04-29', '1987-04-30'} +File: tasmax_hadukgrid_uk_1km_day_19870601-19870630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1987-06-30'} +Dates in resampled not in raw: {'1987-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19870701-19870731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1987-07-31', '1987-07-30'} +Dates in resampled not in raw: {'1987-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19870801-19870831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1987-08-31'} +Dates in resampled not in raw: {'1987-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19870901-19870930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1987-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19871001-19871031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1987-10-31'} +Dates in resampled not in raw: {'1987-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19871101-19871130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1987-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19871201-19871231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1987-12-31'} +Dates in resampled not in raw: {'1987-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19880101-19880131.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1988-01-31'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19880301-19880331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1988-03-31'} +Dates in resampled not in raw: {'1988-02-30'} +File: tasmax_hadukgrid_uk_1km_day_19880401-19880430.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1988-04-30'} +Dates in resampled not in raw: {'1988-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19880501-19880531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1988-05-31'} +Dates in resampled not in raw: {'1988-04-30'} +File: tasmax_hadukgrid_uk_1km_day_19880601-19880630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1988-06-30'} +Dates in resampled not in raw: {'1988-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19880701-19880731.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1988-07-31'} +Dates in resampled not in raw: {'1988-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19880801-19880831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1988-08-31'} +Dates in resampled not in raw: {'1988-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19881001-19881031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1988-10-31'} +Dates in resampled not in raw: {'1988-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19881201-19881231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1988-12-31'} +Dates in resampled not in raw: {'1988-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19890101-19890131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1989-01-31'} +Dates in resampled not in raw: {'1989-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19890201-19890228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1989-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19890301-19890331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1989-03-31', '1989-03-30'} +Dates in resampled not in raw: {'1989-02-29', '1989-02-30'} +File: tasmax_hadukgrid_uk_1km_day_19890401-19890430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1989-04-29', '1989-04-30'} +Dates in resampled not in raw: {'1989-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19890501-19890531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1989-05-31', '1989-05-30'} +Dates in resampled not in raw: {'1989-04-29', '1989-04-30'} +File: tasmax_hadukgrid_uk_1km_day_19890601-19890630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1989-06-30'} +Dates in resampled not in raw: {'1989-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19890701-19890731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1989-07-30', '1989-07-31'} +Dates in resampled not in raw: {'1989-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19890801-19890831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1989-08-31'} +Dates in resampled not in raw: {'1989-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19890901-19890930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1989-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19891001-19891031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1989-10-31'} +Dates in resampled not in raw: {'1989-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19891101-19891130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1989-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19891201-19891231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1989-12-31'} +Dates in resampled not in raw: {'1989-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19900101-19900131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1990-01-31'} +Dates in resampled not in raw: {'1990-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19900201-19900228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1990-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19900301-19900331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1990-03-30', '1990-03-31'} +Dates in resampled not in raw: {'1990-02-29', '1990-02-30'} +File: tasmax_hadukgrid_uk_1km_day_19900401-19900430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1990-04-30', '1990-04-29'} +Dates in resampled not in raw: {'1990-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19900501-19900531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1990-05-30', '1990-05-31'} +Dates in resampled not in raw: {'1990-04-30', '1990-04-29'} +File: tasmax_hadukgrid_uk_1km_day_19900601-19900630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1990-06-30'} +Dates in resampled not in raw: {'1990-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19900701-19900731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1990-07-31', '1990-07-30'} +Dates in resampled not in raw: {'1990-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19900801-19900831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1990-08-31'} +Dates in resampled not in raw: {'1990-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19900901-19900930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1990-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19901001-19901031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1990-10-31'} +Dates in resampled not in raw: {'1990-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19901101-19901130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1990-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19901201-19901231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1990-12-31'} +Dates in resampled not in raw: {'1990-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19910101-19910131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1991-01-31'} +Dates in resampled not in raw: {'1991-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19910201-19910228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1991-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19910301-19910331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1991-03-30', '1991-03-31'} +Dates in resampled not in raw: {'1991-02-30', '1991-02-29'} +File: tasmax_hadukgrid_uk_1km_day_19910401-19910430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1991-04-30', '1991-04-29'} +Dates in resampled not in raw: {'1991-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19910501-19910531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1991-05-30', '1991-05-31'} +Dates in resampled not in raw: {'1991-04-29', '1991-04-30'} +File: tasmax_hadukgrid_uk_1km_day_19910601-19910630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1991-06-30'} +Dates in resampled not in raw: {'1991-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19910701-19910731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1991-07-31', '1991-07-30'} +Dates in resampled not in raw: {'1991-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19910801-19910831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1991-08-31'} +Dates in resampled not in raw: {'1991-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19910901-19910930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1991-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19911001-19911031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1991-10-31'} +Dates in resampled not in raw: {'1991-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19911101-19911130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1991-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19911201-19911231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1991-12-31'} +Dates in resampled not in raw: {'1991-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19920101-19920131.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1992-01-31'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19920301-19920331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1992-03-31'} +Dates in resampled not in raw: {'1992-02-30'} +File: tasmax_hadukgrid_uk_1km_day_19920401-19920430.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1992-04-30'} +Dates in resampled not in raw: {'1992-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19920501-19920531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1992-05-31'} +Dates in resampled not in raw: {'1992-04-30'} +File: tasmax_hadukgrid_uk_1km_day_19920601-19920630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1992-06-30'} +Dates in resampled not in raw: {'1992-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19920701-19920731.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1992-07-31'} +Dates in resampled not in raw: {'1992-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19920801-19920831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1992-08-31'} +Dates in resampled not in raw: {'1992-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19921001-19921031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1992-10-31'} +Dates in resampled not in raw: {'1992-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19921201-19921231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1992-12-31'} +Dates in resampled not in raw: {'1992-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19930101-19930131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1993-01-31'} +Dates in resampled not in raw: {'1993-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19930201-19930228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1993-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19930301-19930331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1993-03-30', '1993-03-31'} +Dates in resampled not in raw: {'1993-02-30', '1993-02-29'} +File: tasmax_hadukgrid_uk_1km_day_19930401-19930430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1993-04-29', '1993-04-30'} +Dates in resampled not in raw: {'1993-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19930501-19930531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1993-05-30', '1993-05-31'} +Dates in resampled not in raw: {'1993-04-29', '1993-04-30'} +File: tasmax_hadukgrid_uk_1km_day_19930601-19930630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1993-06-30'} +Dates in resampled not in raw: {'1993-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19930701-19930731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1993-07-30', '1993-07-31'} +Dates in resampled not in raw: {'1993-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19930801-19930831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1993-08-31'} +Dates in resampled not in raw: {'1993-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19930901-19930930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1993-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19931001-19931031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1993-10-31'} +Dates in resampled not in raw: {'1993-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19931101-19931130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1993-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19931201-19931231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1993-12-31'} +Dates in resampled not in raw: {'1993-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19940101-19940131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1994-01-31'} +Dates in resampled not in raw: {'1994-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19940201-19940228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1994-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19940301-19940331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1994-03-31', '1994-03-30'} +Dates in resampled not in raw: {'1994-02-30', '1994-02-29'} +File: tasmax_hadukgrid_uk_1km_day_19940401-19940430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1994-04-29', '1994-04-30'} +Dates in resampled not in raw: {'1994-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19940501-19940531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1994-05-30', '1994-05-31'} +Dates in resampled not in raw: {'1994-04-29', '1994-04-30'} +File: tasmax_hadukgrid_uk_1km_day_19940601-19940630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1994-06-30'} +Dates in resampled not in raw: {'1994-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19940701-19940731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1994-07-31', '1994-07-30'} +Dates in resampled not in raw: {'1994-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19940801-19940831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1994-08-31'} +Dates in resampled not in raw: {'1994-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19940901-19940930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1994-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19941001-19941031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1994-10-31'} +Dates in resampled not in raw: {'1994-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19941101-19941130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1994-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19941201-19941231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1994-12-31'} +Dates in resampled not in raw: {'1994-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19950101-19950131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1995-01-31'} +Dates in resampled not in raw: {'1995-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19950201-19950228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1995-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19950301-19950331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1995-03-31', '1995-03-30'} +Dates in resampled not in raw: {'1995-02-30', '1995-02-29'} +File: tasmax_hadukgrid_uk_1km_day_19950401-19950430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1995-04-29', '1995-04-30'} +Dates in resampled not in raw: {'1995-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19950501-19950531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1995-05-31', '1995-05-30'} +Dates in resampled not in raw: {'1995-04-29', '1995-04-30'} +File: tasmax_hadukgrid_uk_1km_day_19950601-19950630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1995-06-30'} +Dates in resampled not in raw: {'1995-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19950701-19950731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1995-07-31', '1995-07-30'} +Dates in resampled not in raw: {'1995-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19950801-19950831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1995-08-31'} +Dates in resampled not in raw: {'1995-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19950901-19950930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1995-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19951001-19951031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1995-10-31'} +Dates in resampled not in raw: {'1995-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19951101-19951130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1995-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19951201-19951231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1995-12-31'} +Dates in resampled not in raw: {'1995-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19960101-19960131.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1996-01-31'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19960301-19960331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1996-03-31'} +Dates in resampled not in raw: {'1996-02-30'} +File: tasmax_hadukgrid_uk_1km_day_19960401-19960430.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1996-04-30'} +Dates in resampled not in raw: {'1996-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19960501-19960531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1996-05-31'} +Dates in resampled not in raw: {'1996-04-30'} +File: tasmax_hadukgrid_uk_1km_day_19960601-19960630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1996-06-30'} +Dates in resampled not in raw: {'1996-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19960701-19960731.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1996-07-31'} +Dates in resampled not in raw: {'1996-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19960801-19960831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1996-08-31'} +Dates in resampled not in raw: {'1996-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19961001-19961031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1996-10-31'} +Dates in resampled not in raw: {'1996-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19961201-19961231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1996-12-31'} +Dates in resampled not in raw: {'1996-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19970101-19970131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1997-01-31'} +Dates in resampled not in raw: {'1997-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19970201-19970228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1997-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19970301-19970331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1997-03-31', '1997-03-30'} +Dates in resampled not in raw: {'1997-02-30', '1997-02-29'} +File: tasmax_hadukgrid_uk_1km_day_19970401-19970430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1997-04-29', '1997-04-30'} +Dates in resampled not in raw: {'1997-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19970501-19970531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1997-05-30', '1997-05-31'} +Dates in resampled not in raw: {'1997-04-29', '1997-04-30'} +File: tasmax_hadukgrid_uk_1km_day_19970601-19970630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1997-06-30'} +Dates in resampled not in raw: {'1997-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19970701-19970731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1997-07-31', '1997-07-30'} +Dates in resampled not in raw: {'1997-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19970801-19970831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1997-08-31'} +Dates in resampled not in raw: {'1997-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19970901-19970930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1997-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19971001-19971031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1997-10-31'} +Dates in resampled not in raw: {'1997-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19971101-19971130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1997-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19971201-19971231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1997-12-31'} +Dates in resampled not in raw: {'1997-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19980101-19980131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1998-01-31'} +Dates in resampled not in raw: {'1998-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19980201-19980228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1998-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19980301-19980331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1998-03-31', '1998-03-30'} +Dates in resampled not in raw: {'1998-02-30', '1998-02-29'} +File: tasmax_hadukgrid_uk_1km_day_19980401-19980430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1998-04-29', '1998-04-30'} +Dates in resampled not in raw: {'1998-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19980501-19980531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1998-05-31', '1998-05-30'} +Dates in resampled not in raw: {'1998-04-29', '1998-04-30'} +File: tasmax_hadukgrid_uk_1km_day_19980601-19980630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1998-06-30'} +Dates in resampled not in raw: {'1998-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19980701-19980731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1998-07-31', '1998-07-30'} +Dates in resampled not in raw: {'1998-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19980801-19980831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1998-08-31'} +Dates in resampled not in raw: {'1998-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19980901-19980930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1998-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19981001-19981031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1998-10-31'} +Dates in resampled not in raw: {'1998-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19981101-19981130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1998-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19981201-19981231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1998-12-31'} +Dates in resampled not in raw: {'1998-11-30'} +File: tasmax_hadukgrid_uk_1km_day_19990101-19990131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1999-01-31'} +Dates in resampled not in raw: {'1999-02-01'} +File: tasmax_hadukgrid_uk_1km_day_19990201-19990228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'1999-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19990301-19990331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1999-03-31', '1999-03-30'} +Dates in resampled not in raw: {'1999-02-29', '1999-02-30'} +File: tasmax_hadukgrid_uk_1km_day_19990401-19990430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1999-04-29', '1999-04-30'} +Dates in resampled not in raw: {'1999-03-30'} +File: tasmax_hadukgrid_uk_1km_day_19990501-19990531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1999-05-31', '1999-05-30'} +Dates in resampled not in raw: {'1999-04-29', '1999-04-30'} +File: tasmax_hadukgrid_uk_1km_day_19990601-19990630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'1999-06-30'} +Dates in resampled not in raw: {'1999-05-30'} +File: tasmax_hadukgrid_uk_1km_day_19990701-19990731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'1999-07-30', '1999-07-31'} +Dates in resampled not in raw: {'1999-06-30'} +File: tasmax_hadukgrid_uk_1km_day_19990801-19990831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1999-08-31'} +Dates in resampled not in raw: {'1999-07-30'} +File: tasmax_hadukgrid_uk_1km_day_19990901-19990930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1999-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19991001-19991031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1999-10-31'} +Dates in resampled not in raw: {'1999-09-30'} +File: tasmax_hadukgrid_uk_1km_day_19991101-19991130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'1999-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_19991201-19991231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'1999-12-31'} +Dates in resampled not in raw: {'1999-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20000101-20000131.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2000-01-31'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20000301-20000331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2000-03-31'} +Dates in resampled not in raw: {'2000-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20000401-20000430.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2000-04-30'} +Dates in resampled not in raw: {'2000-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20000501-20000531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2000-05-31'} +Dates in resampled not in raw: {'2000-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20000601-20000630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2000-06-30'} +Dates in resampled not in raw: {'2000-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20000701-20000731.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2000-07-31'} +Dates in resampled not in raw: {'2000-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20000801-20000831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2000-08-31'} +Dates in resampled not in raw: {'2000-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20001001-20001031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2000-10-31'} +Dates in resampled not in raw: {'2000-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20001201-20001231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2000-12-31'} +Dates in resampled not in raw: {'2000-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20010101-20010131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2001-01-31'} +Dates in resampled not in raw: {'2001-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20010201-20010228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2001-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20010301-20010331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2001-03-31', '2001-03-30'} +Dates in resampled not in raw: {'2001-02-30', '2001-02-29'} +File: tasmax_hadukgrid_uk_1km_day_20010401-20010430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2001-04-29', '2001-04-30'} +Dates in resampled not in raw: {'2001-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20010501-20010531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2001-05-30', '2001-05-31'} +Dates in resampled not in raw: {'2001-04-29', '2001-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20010601-20010630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2001-06-30'} +Dates in resampled not in raw: {'2001-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20010701-20010731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2001-07-30', '2001-07-31'} +Dates in resampled not in raw: {'2001-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20010801-20010831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2001-08-31'} +Dates in resampled not in raw: {'2001-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20010901-20010930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2001-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20011001-20011031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2001-10-31'} +Dates in resampled not in raw: {'2001-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20011101-20011130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2001-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20011201-20011231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2001-12-31'} +Dates in resampled not in raw: {'2001-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20020101-20020131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2002-01-31'} +Dates in resampled not in raw: {'2002-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20020201-20020228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2002-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20020301-20020331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2002-03-30', '2002-03-31'} +Dates in resampled not in raw: {'2002-02-29', '2002-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20020401-20020430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2002-04-30', '2002-04-29'} +Dates in resampled not in raw: {'2002-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20020501-20020531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2002-05-31', '2002-05-30'} +Dates in resampled not in raw: {'2002-04-30', '2002-04-29'} +File: tasmax_hadukgrid_uk_1km_day_20020601-20020630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2002-06-30'} +Dates in resampled not in raw: {'2002-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20020701-20020731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2002-07-30', '2002-07-31'} +Dates in resampled not in raw: {'2002-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20020801-20020831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2002-08-31'} +Dates in resampled not in raw: {'2002-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20020901-20020930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2002-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20021001-20021031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2002-10-31'} +Dates in resampled not in raw: {'2002-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20021101-20021130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2002-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20021201-20021231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2002-12-31'} +Dates in resampled not in raw: {'2002-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20030101-20030131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2003-01-31'} +Dates in resampled not in raw: {'2003-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20030201-20030228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2003-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20030301-20030331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2003-03-30', '2003-03-31'} +Dates in resampled not in raw: {'2003-02-29', '2003-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20030401-20030430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2003-04-30', '2003-04-29'} +Dates in resampled not in raw: {'2003-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20030501-20030531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2003-05-31', '2003-05-30'} +Dates in resampled not in raw: {'2003-04-30', '2003-04-29'} +File: tasmax_hadukgrid_uk_1km_day_20030601-20030630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2003-06-30'} +Dates in resampled not in raw: {'2003-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20030701-20030731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2003-07-30', '2003-07-31'} +Dates in resampled not in raw: {'2003-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20030801-20030831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2003-08-31'} +Dates in resampled not in raw: {'2003-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20030901-20030930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2003-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20031001-20031031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2003-10-31'} +Dates in resampled not in raw: {'2003-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20031101-20031130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2003-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20031201-20031231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2003-12-31'} +Dates in resampled not in raw: {'2003-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20040101-20040131.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2004-01-31'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20040301-20040331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2004-03-31'} +Dates in resampled not in raw: {'2004-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20040401-20040430.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2004-04-30'} +Dates in resampled not in raw: {'2004-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20040501-20040531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2004-05-31'} +Dates in resampled not in raw: {'2004-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20040601-20040630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2004-06-30'} +Dates in resampled not in raw: {'2004-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20040701-20040731.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2004-07-31'} +Dates in resampled not in raw: {'2004-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20040801-20040831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2004-08-31'} +Dates in resampled not in raw: {'2004-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20041001-20041031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2004-10-31'} +Dates in resampled not in raw: {'2004-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20041201-20041231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2004-12-31'} +Dates in resampled not in raw: {'2004-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20050101-20050131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2005-01-31'} +Dates in resampled not in raw: {'2005-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20050201-20050228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2005-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20050301-20050331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2005-03-31', '2005-03-30'} +Dates in resampled not in raw: {'2005-02-29', '2005-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20050401-20050430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2005-04-30', '2005-04-29'} +Dates in resampled not in raw: {'2005-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20050501-20050531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2005-05-31', '2005-05-30'} +Dates in resampled not in raw: {'2005-04-30', '2005-04-29'} +File: tasmax_hadukgrid_uk_1km_day_20050601-20050630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2005-06-30'} +Dates in resampled not in raw: {'2005-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20050701-20050731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2005-07-31', '2005-07-30'} +Dates in resampled not in raw: {'2005-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20050801-20050831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2005-08-31'} +Dates in resampled not in raw: {'2005-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20050901-20050930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2005-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20051001-20051031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2005-10-31'} +Dates in resampled not in raw: {'2005-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20051101-20051130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2005-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20051201-20051231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2005-12-31'} +Dates in resampled not in raw: {'2005-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20060101-20060131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2006-01-31'} +Dates in resampled not in raw: {'2006-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20060201-20060228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2006-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20060301-20060331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2006-03-30', '2006-03-31'} +Dates in resampled not in raw: {'2006-02-29', '2006-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20060401-20060430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2006-04-29', '2006-04-30'} +Dates in resampled not in raw: {'2006-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20060501-20060531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2006-05-30', '2006-05-31'} +Dates in resampled not in raw: {'2006-04-29', '2006-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20060601-20060630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2006-06-30'} +Dates in resampled not in raw: {'2006-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20060701-20060731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2006-07-30', '2006-07-31'} +Dates in resampled not in raw: {'2006-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20060801-20060831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2006-08-31'} +Dates in resampled not in raw: {'2006-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20060901-20060930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2006-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20061001-20061031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2006-10-31'} +Dates in resampled not in raw: {'2006-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20061101-20061130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2006-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20061201-20061231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2006-12-31'} +Dates in resampled not in raw: {'2006-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20070101-20070131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2007-01-31'} +Dates in resampled not in raw: {'2007-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20070201-20070228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2007-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20070301-20070331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2007-03-30', '2007-03-31'} +Dates in resampled not in raw: {'2007-02-29', '2007-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20070401-20070430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2007-04-30', '2007-04-29'} +Dates in resampled not in raw: {'2007-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20070501-20070531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2007-05-31', '2007-05-30'} +Dates in resampled not in raw: {'2007-04-30', '2007-04-29'} +File: tasmax_hadukgrid_uk_1km_day_20070601-20070630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2007-06-30'} +Dates in resampled not in raw: {'2007-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20070701-20070731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2007-07-30', '2007-07-31'} +Dates in resampled not in raw: {'2007-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20070801-20070831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2007-08-31'} +Dates in resampled not in raw: {'2007-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20070901-20070930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2007-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20071001-20071031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2007-10-31'} +Dates in resampled not in raw: {'2007-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20071101-20071130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2007-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20071201-20071231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2007-12-31'} +Dates in resampled not in raw: {'2007-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20080101-20080131.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2008-01-31'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20080301-20080331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2008-03-31'} +Dates in resampled not in raw: {'2008-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20080401-20080430.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2008-04-30'} +Dates in resampled not in raw: {'2008-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20080501-20080531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2008-05-31'} +Dates in resampled not in raw: {'2008-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20080601-20080630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2008-06-30'} +Dates in resampled not in raw: {'2008-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20080701-20080731.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2008-07-31'} +Dates in resampled not in raw: {'2008-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20080801-20080831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2008-08-31'} +Dates in resampled not in raw: {'2008-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20081001-20081031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2008-10-31'} +Dates in resampled not in raw: {'2008-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20081201-20081231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2008-12-31'} +Dates in resampled not in raw: {'2008-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20090101-20090131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2009-01-31'} +Dates in resampled not in raw: {'2009-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20090201-20090228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2009-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20090301-20090331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2009-03-30', '2009-03-31'} +Dates in resampled not in raw: {'2009-02-30', '2009-02-29'} +File: tasmax_hadukgrid_uk_1km_day_20090401-20090430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2009-04-29', '2009-04-30'} +Dates in resampled not in raw: {'2009-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20090501-20090531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2009-05-30', '2009-05-31'} +Dates in resampled not in raw: {'2009-04-29', '2009-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20090601-20090630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2009-06-30'} +Dates in resampled not in raw: {'2009-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20090701-20090731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2009-07-30', '2009-07-31'} +Dates in resampled not in raw: {'2009-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20090801-20090831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2009-08-31'} +Dates in resampled not in raw: {'2009-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20090901-20090930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2009-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20091001-20091031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2009-10-31'} +Dates in resampled not in raw: {'2009-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20091101-20091130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2009-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20091201-20091231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2009-12-31'} +Dates in resampled not in raw: {'2009-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20100101-20100131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2010-01-31'} +Dates in resampled not in raw: {'2010-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20100201-20100228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2010-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20100301-20100331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2010-03-30', '2010-03-31'} +Dates in resampled not in raw: {'2010-02-29', '2010-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20100401-20100430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2010-04-29', '2010-04-30'} +Dates in resampled not in raw: {'2010-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20100501-20100531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2010-05-30', '2010-05-31'} +Dates in resampled not in raw: {'2010-04-29', '2010-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20100601-20100630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2010-06-30'} +Dates in resampled not in raw: {'2010-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20100701-20100731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2010-07-30', '2010-07-31'} +Dates in resampled not in raw: {'2010-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20100801-20100831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2010-08-31'} +Dates in resampled not in raw: {'2010-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20100901-20100930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2010-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20101001-20101031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2010-10-31'} +Dates in resampled not in raw: {'2010-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20101101-20101130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2010-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20101201-20101231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2010-12-31'} +Dates in resampled not in raw: {'2010-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20110101-20110131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2011-01-31'} +Dates in resampled not in raw: {'2011-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20110201-20110228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2011-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20110301-20110331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2011-03-31', '2011-03-30'} +Dates in resampled not in raw: {'2011-02-29', '2011-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20110401-20110430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2011-04-29', '2011-04-30'} +Dates in resampled not in raw: {'2011-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20110501-20110531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2011-05-30', '2011-05-31'} +Dates in resampled not in raw: {'2011-04-29', '2011-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20110601-20110630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2011-06-30'} +Dates in resampled not in raw: {'2011-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20110701-20110731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2011-07-30', '2011-07-31'} +Dates in resampled not in raw: {'2011-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20110801-20110831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2011-08-31'} +Dates in resampled not in raw: {'2011-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20110901-20110930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2011-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20111001-20111031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2011-10-31'} +Dates in resampled not in raw: {'2011-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20111101-20111130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2011-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20111201-20111231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2011-12-31'} +Dates in resampled not in raw: {'2011-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20120101-20120131.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2012-01-31'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20120301-20120331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2012-03-31'} +Dates in resampled not in raw: {'2012-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20120401-20120430.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2012-04-30'} +Dates in resampled not in raw: {'2012-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20120501-20120531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2012-05-31'} +Dates in resampled not in raw: {'2012-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20120601-20120630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2012-06-30'} +Dates in resampled not in raw: {'2012-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20120701-20120731.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2012-07-31'} +Dates in resampled not in raw: {'2012-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20120801-20120831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2012-08-31'} +Dates in resampled not in raw: {'2012-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20121001-20121031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2012-10-31'} +Dates in resampled not in raw: {'2012-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20121201-20121231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2012-12-31'} +Dates in resampled not in raw: {'2012-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20130101-20130131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2013-01-31'} +Dates in resampled not in raw: {'2013-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20130201-20130228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2013-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20130301-20130331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2013-03-30', '2013-03-31'} +Dates in resampled not in raw: {'2013-02-29', '2013-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20130401-20130430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2013-04-29', '2013-04-30'} +Dates in resampled not in raw: {'2013-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20130501-20130531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2013-05-30', '2013-05-31'} +Dates in resampled not in raw: {'2013-04-29', '2013-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20130601-20130630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2013-06-30'} +Dates in resampled not in raw: {'2013-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20130701-20130731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2013-07-30', '2013-07-31'} +Dates in resampled not in raw: {'2013-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20130801-20130831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2013-08-31'} +Dates in resampled not in raw: {'2013-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20130901-20130930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2013-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20131001-20131031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2013-10-31'} +Dates in resampled not in raw: {'2013-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20131101-20131130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2013-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20131201-20131231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2013-12-31'} +Dates in resampled not in raw: {'2013-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20140101-20140131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2014-01-31'} +Dates in resampled not in raw: {'2014-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20140201-20140228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2014-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20140301-20140331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2014-03-31', '2014-03-30'} +Dates in resampled not in raw: {'2014-02-30', '2014-02-29'} +File: tasmax_hadukgrid_uk_1km_day_20140401-20140430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2014-04-29', '2014-04-30'} +Dates in resampled not in raw: {'2014-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20140501-20140531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2014-05-31', '2014-05-30'} +Dates in resampled not in raw: {'2014-04-29', '2014-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20140601-20140630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2014-06-30'} +Dates in resampled not in raw: {'2014-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20140701-20140731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2014-07-30', '2014-07-31'} +Dates in resampled not in raw: {'2014-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20140801-20140831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2014-08-31'} +Dates in resampled not in raw: {'2014-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20140901-20140930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2014-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20141001-20141031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2014-10-31'} +Dates in resampled not in raw: {'2014-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20141101-20141130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2014-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20141201-20141231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2014-12-31'} +Dates in resampled not in raw: {'2014-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20150101-20150131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2015-01-31'} +Dates in resampled not in raw: {'2015-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20150201-20150228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2015-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20150301-20150331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2015-03-30', '2015-03-31'} +Dates in resampled not in raw: {'2015-02-30', '2015-02-29'} +File: tasmax_hadukgrid_uk_1km_day_20150401-20150430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2015-04-30', '2015-04-29'} +Dates in resampled not in raw: {'2015-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20150501-20150531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2015-05-30', '2015-05-31'} +Dates in resampled not in raw: {'2015-04-30', '2015-04-29'} +File: tasmax_hadukgrid_uk_1km_day_20150601-20150630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2015-06-30'} +Dates in resampled not in raw: {'2015-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20150701-20150731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2015-07-30', '2015-07-31'} +Dates in resampled not in raw: {'2015-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20150801-20150831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2015-08-31'} +Dates in resampled not in raw: {'2015-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20150901-20150930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2015-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20151001-20151031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2015-10-31'} +Dates in resampled not in raw: {'2015-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20151101-20151130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2015-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20151201-20151231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2015-12-31'} +Dates in resampled not in raw: {'2015-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20160101-20160131.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2016-01-31'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20160301-20160331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2016-03-31'} +Dates in resampled not in raw: {'2016-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20160401-20160430.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2016-04-30'} +Dates in resampled not in raw: {'2016-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20160501-20160531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2016-05-31'} +Dates in resampled not in raw: {'2016-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20160601-20160630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2016-06-30'} +Dates in resampled not in raw: {'2016-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20160701-20160731.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2016-07-31'} +Dates in resampled not in raw: {'2016-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20160801-20160831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2016-08-31'} +Dates in resampled not in raw: {'2016-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20161001-20161031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2016-10-31'} +Dates in resampled not in raw: {'2016-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20161201-20161231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2016-12-31'} +Dates in resampled not in raw: {'2016-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20170101-20170131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2017-01-31'} +Dates in resampled not in raw: {'2017-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20170201-20170228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2017-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20170301-20170331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2017-03-31', '2017-03-30'} +Dates in resampled not in raw: {'2017-02-29', '2017-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20170401-20170430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2017-04-30', '2017-04-29'} +Dates in resampled not in raw: {'2017-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20170501-20170531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2017-05-30', '2017-05-31'} +Dates in resampled not in raw: {'2017-04-30', '2017-04-29'} +File: tasmax_hadukgrid_uk_1km_day_20170601-20170630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2017-06-30'} +Dates in resampled not in raw: {'2017-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20170701-20170731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2017-07-30', '2017-07-31'} +Dates in resampled not in raw: {'2017-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20170801-20170831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2017-08-31'} +Dates in resampled not in raw: {'2017-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20170901-20170930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2017-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20171001-20171031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2017-10-31'} +Dates in resampled not in raw: {'2017-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20171101-20171130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2017-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20171201-20171231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2017-12-31'} +Dates in resampled not in raw: {'2017-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20180101-20180131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2018-01-31'} +Dates in resampled not in raw: {'2018-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20180201-20180228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2018-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20180301-20180331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2018-03-30', '2018-03-31'} +Dates in resampled not in raw: {'2018-02-29', '2018-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20180401-20180430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2018-04-29', '2018-04-30'} +Dates in resampled not in raw: {'2018-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20180501-20180531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2018-05-30', '2018-05-31'} +Dates in resampled not in raw: {'2018-04-29', '2018-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20180601-20180630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2018-06-30'} +Dates in resampled not in raw: {'2018-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20180701-20180731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2018-07-31', '2018-07-30'} +Dates in resampled not in raw: {'2018-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20180801-20180831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2018-08-31'} +Dates in resampled not in raw: {'2018-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20180901-20180930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2018-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20181001-20181031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2018-10-31'} +Dates in resampled not in raw: {'2018-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20181101-20181130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2018-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20181201-20181231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2018-12-31'} +Dates in resampled not in raw: {'2018-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20190101-20190131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2019-01-31'} +Dates in resampled not in raw: {'2019-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20190201-20190228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2019-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20190301-20190331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2019-03-30', '2019-03-31'} +Dates in resampled not in raw: {'2019-02-30', '2019-02-29'} +File: tasmax_hadukgrid_uk_1km_day_20190401-20190430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2019-04-29', '2019-04-30'} +Dates in resampled not in raw: {'2019-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20190501-20190531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2019-05-30', '2019-05-31'} +Dates in resampled not in raw: {'2019-04-29', '2019-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20190601-20190630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2019-06-30'} +Dates in resampled not in raw: {'2019-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20190701-20190731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2019-07-31', '2019-07-30'} +Dates in resampled not in raw: {'2019-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20190801-20190831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2019-08-31'} +Dates in resampled not in raw: {'2019-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20190901-20190930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2019-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20191001-20191031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2019-10-31'} +Dates in resampled not in raw: {'2019-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20191101-20191130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2019-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20191201-20191231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2019-12-31'} +Dates in resampled not in raw: {'2019-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20200101-20200131.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2020-01-31'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20200301-20200331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2020-03-31'} +Dates in resampled not in raw: {'2020-02-30'} +File: tasmax_hadukgrid_uk_1km_day_20200401-20200430.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2020-04-30'} +Dates in resampled not in raw: {'2020-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20200501-20200531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2020-05-31'} +Dates in resampled not in raw: {'2020-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20200601-20200630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2020-06-30'} +Dates in resampled not in raw: {'2020-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20200701-20200731.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2020-07-31'} +Dates in resampled not in raw: {'2020-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20200801-20200831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2020-08-31'} +Dates in resampled not in raw: {'2020-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20201001-20201031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2020-10-31'} +Dates in resampled not in raw: {'2020-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20201201-20201231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2020-12-31'} +Dates in resampled not in raw: {'2020-11-30'} +File: tasmax_hadukgrid_uk_1km_day_20210101-20210131.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2021-01-31'} +Dates in resampled not in raw: {'2021-02-01'} +File: tasmax_hadukgrid_uk_1km_day_20210201-20210228.nc produced errors: +raw # days: 28 - resampled # days: 27 +Dates in raw not in resampled: {'2021-02-01'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20210301-20210331.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2021-03-31', '2021-03-30'} +Dates in resampled not in raw: {'2021-02-30', '2021-02-29'} +File: tasmax_hadukgrid_uk_1km_day_20210401-20210430.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2021-04-29', '2021-04-30'} +Dates in resampled not in raw: {'2021-03-30'} +File: tasmax_hadukgrid_uk_1km_day_20210501-20210531.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2021-05-31', '2021-05-30'} +Dates in resampled not in raw: {'2021-04-29', '2021-04-30'} +File: tasmax_hadukgrid_uk_1km_day_20210601-20210630.nc produced errors: +raw # days: 30 - resampled # days: 30 +Dates in raw not in resampled: {'2021-06-30'} +Dates in resampled not in raw: {'2021-05-30'} +File: tasmax_hadukgrid_uk_1km_day_20210701-20210731.nc produced errors: +raw # days: 31 - resampled # days: 30 +Dates in raw not in resampled: {'2021-07-30', '2021-07-31'} +Dates in resampled not in raw: {'2021-06-30'} +File: tasmax_hadukgrid_uk_1km_day_20210801-20210831.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2021-08-31'} +Dates in resampled not in raw: {'2021-07-30'} +File: tasmax_hadukgrid_uk_1km_day_20210901-20210930.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2021-09-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20211001-20211031.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2021-10-31'} +Dates in resampled not in raw: {'2021-09-30'} +File: tasmax_hadukgrid_uk_1km_day_20211101-20211130.nc produced errors: +raw # days: 30 - resampled # days: 29 +Dates in raw not in resampled: {'2021-11-30'} +Dates in resampled not in raw: set() +File: tasmax_hadukgrid_uk_1km_day_20211201-20211231.nc produced errors: +raw # days: 31 - resampled # days: 31 +Dates in raw not in resampled: {'2021-12-31'} +Dates in resampled not in raw: {'2021-11-30'} +______________________________ +missing dates: 0 +date '1980-03-30' appears 2 times. +date '1980-05-30' appears 2 times. +date '1980-07-30' appears 2 times. +date '1980-09-30' appears 2 times. +date '1980-11-30' appears 2 times. +date '1984-03-30' appears 2 times. +date '1984-05-30' appears 2 times. +date '1984-07-30' appears 2 times. +date '1984-09-30' appears 2 times. +date '1984-11-30' appears 2 times. +date '1988-03-30' appears 2 times. +date '1988-05-30' appears 2 times. +date '1988-07-30' appears 2 times. +date '1988-09-30' appears 2 times. +date '1988-11-30' appears 2 times. +date '1992-03-30' appears 2 times. +date '1992-05-30' appears 2 times. +date '1992-07-30' appears 2 times. +date '1992-09-30' appears 2 times. +date '1992-11-30' appears 2 times. +date '1996-03-30' appears 2 times. +date '1996-05-30' appears 2 times. +date '1996-07-30' appears 2 times. +date '1996-09-30' appears 2 times. +date '1996-11-30' appears 2 times. +date '2000-03-30' appears 2 times. +date '2000-05-30' appears 2 times. +date '2000-07-30' appears 2 times. +date '2000-09-30' appears 2 times. +date '2000-11-30' appears 2 times. +date '2004-03-30' appears 2 times. +date '2004-05-30' appears 2 times. +date '2004-07-30' appears 2 times. +date '2004-09-30' appears 2 times. +date '2004-11-30' appears 2 times. +date '2008-03-30' appears 2 times. +date '2008-05-30' appears 2 times. +date '2008-07-30' appears 2 times. +date '2008-09-30' appears 2 times. +date '2008-11-30' appears 2 times. +date '2012-03-30' appears 2 times. +date '2012-05-30' appears 2 times. +date '2012-07-30' appears 2 times. +date '2012-09-30' appears 2 times. +date '2012-11-30' appears 2 times. +date '2016-03-30' appears 2 times. +date '2016-05-30' appears 2 times. +date '2016-07-30' appears 2 times. +date '2016-09-30' appears 2 times. +date '2016-11-30' appears 2 times. +date '2020-03-30' appears 2 times. +date '2020-05-30' appears 2 times. +date '2020-07-30' appears 2 times. +date '2020-09-30' appears 2 times. +date '2020-11-30' appears 2 times. diff --git a/python/resampling/check_calendar.py b/python/resampling/check_calendar.py index a072f04d..945ce89a 100644 --- a/python/resampling/check_calendar.py +++ b/python/resampling/check_calendar.py @@ -4,9 +4,6 @@ import numpy as np from collections import Counter -path_ukcp = '/Volumes/vmfileshare/ClimateData/Raw/UKCP2.2/tasmax/01/latest/tasmax_rcp85_land-cpm_uk_2.2km_01_day_19801201-19811130.nc' -data_raw = xr.open_dataset(path_ukcp, decode_coords="all") - path_raw = '/Volumes/vmfileshare/ClimateData/Raw/HadsUKgrid/tasmax/day' path_preproc = '/Volumes/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/tasmax/day' #example files to be compared : @@ -29,8 +26,14 @@ raw_f = os.path.join(path_raw, file) preproc_f = os.path.join(path_preproc, output_name) #load before and after resampling files - data_raw = xr.open_dataset(raw_f, decode_coords="all") - data_preproc = xr.open_dataset(preproc_f, decode_coords="all") + try: + data_raw = xr.open_dataset(raw_f, decode_coords="all") + data_preproc = xr.open_dataset(preproc_f, decode_coords="all") + # catch OSError and KeyError + except (OSError, KeyError) as e: + with open('check_calendar_log.txt', 'a') as f: + f.write(f"File: {file} produced errors: {e}\n") + continue #convert to string time_raw = [str(t).split('T')[0] for t in data_raw.coords['time'].values] @@ -44,7 +47,6 @@ if dates_in_raw_not_in_pre | dates_in_pre_not_in_raw: # write to log file with open('check_calendar_log.txt', 'a') as f: - f.write(f"File: {file} produced errors:\n") f.write(f"raw # days: {len(set(time_raw))} - resampled # days: {len(set(time_pre))}\n") f.write(f"Dates in raw not in resampled: {dates_in_raw_not_in_pre}\n") f.write(f"Dates in resampled not in raw: {dates_in_pre_not_in_raw}\n") diff --git a/python/resampling/resampling_hads.py b/python/resampling/resampling_hads.py index b0111190..0394128e 100644 --- a/python/resampling/resampling_hads.py +++ b/python/resampling/resampling_hads.py @@ -29,7 +29,7 @@ def enforce_date_dropping(raw_data: xr.Dataset, converted_data: xr.Dataset) -> x Returns: xr.Dataset: The converted data with specific dates dropped. """ - month_day_drop = {(1, 31), (4, 1), (6, 1), (8, 1), (9, 31), (12, 1)} + month_day_drop = {(1, 31), (4, 1), (6, 1), (8, 1), (10, 1), (12, 1)} time_values = pd.DatetimeIndex(raw_data.coords['time'].values) # Get the indices of the dates to be dropped @@ -74,7 +74,9 @@ def resample_hadukgrid(x): # convert to 360 day calendar. data_360 = data.convert_calendar(dim='time', calendar='360_day', align_on='year') - data_360 = enforce_date_dropping(data,data_360) + # apply correction if leap year + if data.time.dt.is_leap_year.any(): + data_360 = enforce_date_dropping(data,data_360) # the dataset to be resample must have dimensions named projection_x_coordinate and projection_y_coordinate . resampled = data_360[[variable]].interp(projection_x_coordinate=x_grid, projection_y_coordinate=y_grid, method="linear") @@ -93,7 +95,6 @@ def resample_hadukgrid(x): if __name__ == "__main__": """ Script to resample UKHADs data from the command line - """ # Initialize parser parser = argparse.ArgumentParser() From 1ba738a7d0e468ba262fcfe1cf1652e8a68b0b57 Mon Sep 17 00:00:00 2001 From: Grigorios Mingas Date: Wed, 30 Aug 2023 15:29:06 +0100 Subject: [PATCH 022/146] Separate run_cmethods.py to two scripts, one for preprocessing and one for runnning cmethods --- python/debiasing/preprocess_data.py | 203 +++++++++++++++++++++ python/debiasing/run_cmethods.py | 271 +++++++++++----------------- python/load_data/data_loader.py | 72 +++++--- 3 files changed, 358 insertions(+), 188 deletions(-) create mode 100644 python/debiasing/preprocess_data.py diff --git a/python/debiasing/preprocess_data.py b/python/debiasing/preprocess_data.py new file mode 100644 index 00000000..5adbeb68 --- /dev/null +++ b/python/debiasing/preprocess_data.py @@ -0,0 +1,203 @@ +#!/bin/python3 + +# Script to pre-process control, scenario and observation data (including combining files to cover a range of dates), +# before running debiasing methods. + +import argparse +import glob +import logging +import os +import sys +import time +import numpy as np +from datetime import datetime +from pathlib import Path + +sys.path.insert(1, '../load_data') +from data_loader import load_data + +# * ----- L O G G I N G ----- +formatter = logging.Formatter( + fmt='%(asctime)s %(module)s,line: %(lineno)d %(levelname)8s | %(message)s', + datefmt='%Y-%m-%d %H:%M:%S' +) + +log = logging.getLogger() +log.setLevel(logging.INFO) +screen_handler = logging.StreamHandler(stream=sys.stdout) +screen_handler.setFormatter(formatter) +logging.getLogger().addHandler(screen_handler) + +# * ----- I N P U T - H A N D L I N G ----- +parser = argparse.ArgumentParser(description='Pre-process data before bias correction.') +parser.add_argument('--obs', '--observation', dest='obs_fpath', type=str, help='Path to observation datasets') +parser.add_argument('--contr', '--control', dest='contr_fpath', type=str, help='Path to control datasets') +parser.add_argument('--scen', '--scenario', dest='scen_fpath', type=str, + help='Path to scenario datasets (data to adjust)') +parser.add_argument('--contr_dates', '--control_date_range', dest='control_date_range', type=str, + help='Start and end dates for control and observation data (historic CPM/HADs data used to ' + 'calibrate the debiasing model) - in YYYYMMDD-YYYYMMDD format', + default='19801201-19991130') +parser.add_argument('--scen_dates', '--scenario_date_range', dest='scenario_date_range', type=str, + help='Start and end dates for scenario data (CPM data to be debiased using the ' + 'calibrated debiasing model) - multiple date ranges can be passed, ' + 'separated by "_", each in YYYYMMDD-YYYYMMDD format e.g., ' + '"20100101-20191231_20200101-20291231"', + default='20201201-20291130_20301201-20391130') +parser.add_argument('--shp', '--shapefile', dest='shapefile_fpath', type=str, help='Path to shapefile', default=None) +parser.add_argument('--out', '--output', dest='output_fpath', type=str, help='Path to save output files', default='.') +parser.add_argument('-v', '--variable', dest='var', type=str, default='tasmax', help='Variable to adjust') +parser.add_argument('-u', '--unit', dest='unit', type=str, default='°C', help='Unit of the varible') +parser.add_argument('-r', '--run_number', dest='run_number', type=str, default=None, + help='Run number to process (out of 13 runs in the CPM data)') + +params = vars(parser.parse_args()) + +obs_fpath = params['obs_fpath'] +contr_fpath = params['contr_fpath'] +scen_fpath = params['scen_fpath'] +calibration_date_range = params['control_date_range'] +projection_date_range = params['scenario_date_range'] +shape_fpath = params['shapefile_fpath'] +out_fpath = params['output_fpath'] +var = params['var'] +unit = params['unit'] +run_number = params['run_number'] + +calib_list = calibration_date_range.split('-') +h_date_period = (datetime.strptime(calib_list[0], '%Y%m%d').strftime('%Y-%m-%d'), + datetime.strptime(calib_list[1], '%Y%m%d').strftime('%Y-%m-%d')) +proj_list = projection_date_range.split('_') +future_time_periods = [(p.split('-')[0], p.split('-')[1]) for p in proj_list] +future_time_periods = [(datetime.strptime(p[0], '%Y%m%d').strftime('%Y-%m-%d'), + datetime.strptime(p[1], '%Y%m%d').strftime('%Y-%m-%d')) + for p in future_time_periods] + + +# * ----- ----- -----M A I N ----- ----- ----- +def preprocess_data() -> None: + start = time.time() + + # load every file found with extension in the path and selects only the input time period.from + # coordinates are renamed for compatibility with the cmethods-library + use_pr = False + if var == "rainfall": + use_pr = True + if run_number is not None: + ds_simh = \ + load_data(contr_fpath, date_range=h_date_period, variable=var, filter_filenames_on_variable=True, + run_number=run_number, filter_filenames_on_run_number=True, use_pr=use_pr, shapefile_path=shape_fpath, + extension='tif')[var].rename({"projection_x_coordinate": "lon", + "projection_y_coordinate": "lat"}) + else: + ds_simh = \ + load_data(contr_fpath, date_range=h_date_period, variable=var, filter_filenames_on_variable=True, + use_pr=use_pr, shapefile_path=shape_fpath, + extension='tif')[var].rename({"projection_x_coordinate": "lon", + "projection_y_coordinate": "lat"}) + + # find file extensions for observation data + files_obs_nc = glob.glob(f"{obs_fpath}/*.nc", recursive=True) + files_obs_tif = glob.glob(f"{obs_fpath}/*.tif", recursive=True) + + if len(files_obs_nc) > 0 and len(files_obs_tif) == 0: + ext = 'nc' + elif len(files_obs_nc) == 0 and len(files_obs_tif) > 0: + ext = 'tif' + elif len(files_obs_nc) == 0 and len(files_obs_tif) == 0: + raise Exception(f"No observation files found in {obs_fpath} with extensions .nc or .tif") + else: + raise Exception(f"A mix of .nc and .tif observation files found in {obs_fpath}, file extension should be the " + f"same for all files in the directory.") + ds_obs = load_data(obs_fpath, date_range=h_date_period, variable=var, filter_filenames_on_variable=True, + shapefile_path=shape_fpath, extension=ext)[var].rename({"projection_x_coordinate": "lon", + "projection_y_coordinate": "lat"}) + log.info('Historical data Loaded.') + + # aligning calendars, e.g there might be a few extra days on the scenario data that has to be droped. + ds_simh = ds_simh.sel(time=ds_obs.time, method='nearest') + + if ds_obs.shape != ds_simh.shape: + raise RuntimeError('Error, observed and simulated historical data must have same dimensions.') + + log.info('Resulting datasets with shape') + log.info(ds_obs.shape) + + # masking coordinates where the observed data has no values + ds_simh = ds_simh.where(~np.isnan(ds_obs.isel(time=0))) + ds_simh = ds_simh.where(ds_simh.values < 1000) + log.info('Historical data Masked') + + ds_obs.attrs['unit'] = unit + ds_simh.attrs['unit'] = unit + + # write simh to .nc file in output directory + simh_filename = f'simh_var-{var}_run-{run_number}_{calib_list[0]}_{calib_list[1]}' + simh_path = os.path.join(out_fpath, f'{simh_filename}.nc') + if not os.path.exists(os.path.dirname(simh_path)): + folder_path = Path(os.path.dirname(simh_path)) + folder_path.mkdir(parents=True) + print(f"Saving historical control data to {simh_path}") + ds_simh.to_netcdf(simh_path) + log.info(f'Saved CPM data for calibration (historic) period to {simh_path}') + + # write ds_obs to .nc file in output directory + obsh_filename = f'obsh_var-{var}_run-{run_number}_{calib_list[0]}_{calib_list[1]}' + obsh_path = os.path.join(out_fpath, f'{obsh_filename}.nc') + if not os.path.exists(os.path.dirname(obsh_path)): + folder_path = Path(os.path.dirname(obsh_path)) + folder_path.mkdir(parents=True) + print(f"Saving historical observation data to {obsh_path}") + ds_obs.to_netcdf(obsh_path) + log.info(f'Saved HADs data for calibration (historic) period to {obsh_path}') + + # looping over time periods + # this is done because the full time period for the scenario dataset is too large for memory. + for f_date_period in future_time_periods: + + log.info(f'Running for {f_date_period} time period') + + try: + use_pr = False + if var == "rainfall": + use_pr = True + if run_number is not None: + ds_simp = \ + load_data(scen_fpath, date_range=f_date_period, variable=var, run_number=run_number, + filter_filenames_on_run_number=True, use_pr=use_pr, shapefile_path=shape_fpath, + extension='tif')[ + var].rename({"projection_x_coordinate": "lon", "projection_y_coordinate": "lat"}) + else: + ds_simp = \ + load_data(scen_fpath, date_range=f_date_period, variable=var, + use_pr=use_pr, shapefile_path=shape_fpath, extension='tif')[ + var].rename({"projection_x_coordinate": "lon", "projection_y_coordinate": "lat"}) + except Exception as e: + log.info(f'No data available for {f_date_period} time period') + continue + + # masking coordinates where the observed data has no values + ds_simp = ds_simp.where(~np.isnan(ds_obs.isel(time=0))) + ds_simp = ds_simp.where(ds_simp.values < 1000) + + ds_simp.attrs['unit'] = unit + + # write ds_simp to .nc file in output directory + simp_filename = f'simp_var-{var}_run-{run_number}_{f_date_period[0]}_{f_date_period[1]}' + simp_path = os.path.join(out_fpath, f'{simp_filename}.nc') + if not os.path.exists(os.path.dirname(simp_path)): + folder_path = Path(os.path.dirname(simp_path)) + folder_path.mkdir(parents=True) + print(f"Saving future scenario data to {simp_path}") + ds_simp.to_netcdf(simp_path) + log.info(f'Saved CPM data for projection (future) period {f_date_period} to {simp_path}') + + end = time.time() + log.info(f'total time in seconds: {end - start}') + log.info('Done') + + +if __name__ == '__main__': + preprocess_data() + +# * ----- ----- E O F ----- ----- diff --git a/python/debiasing/run_cmethods.py b/python/debiasing/run_cmethods.py index fe862fef..305f168b 100644 --- a/python/debiasing/run_cmethods.py +++ b/python/debiasing/run_cmethods.py @@ -1,9 +1,9 @@ #!/bin/python3 -# Script to adjust climate biases in climate data using the python-cmethods library This script is inspired in the -# script by Benjamin Thomas Schwertfeger (https://github.com/btschwertfeger/python-cmethods/blob/master/examples/do_bias_correction.py) -# and adapted to function with UKCP and HADs data. - +# Script to adjust climate biases in climate data using the python-cmethods library. This script is inspired by the +# script by Benjamin Thomas Schwertfeger +# (https://github.com/btschwertfeger/python-cmethods/blob/master/examples/do_bias_correction.py) +# and adapted to function with UKCP/CPM and HADs data. import argparse import glob @@ -13,14 +13,11 @@ import numpy as np import matplotlib.pyplot as plt import os -from datetime import datetime +import xarray as xr sys.path.insert(1, 'python-cmethods') from cmethods.CMethods import CMethods -sys.path.insert(1, '../load_data') -from data_loader import load_data - # * ----- L O G G I N G ----- formatter = logging.Formatter( fmt='%(asctime)s %(module)s,line: %(lineno)d %(levelname)8s | %(message)s', @@ -35,26 +32,15 @@ # * ----- I N P U T - H A N D L I N G ----- parser = argparse.ArgumentParser(description='Adjust climate data based on bias correction algorithms.') -parser.add_argument('--obs', '--observation', dest='obs_fpath', type=str, help='Path to observation datasets') -parser.add_argument('--contr', '--control', dest='contr_fpath', type=str, help='Path to control datasets') -parser.add_argument('--scen', '--scenario', dest='scen_fpath', type=str, - help='Path to scenario datasets (data to adjust)') -parser.add_argument('--contr_dates', '--control_date_range', dest='control_date_range', type=str, - help='Start and end dates for control and observation data (historic data used to ' - 'calibrate the debiasing model) - in YYYYMMDD-YYYYMMDD format', - default='19801201-19991130') -parser.add_argument('--scen_dates', '--scenario_date_range', dest='scenario_date_range', type=str, - help='Start and end dates for scenario data (data to be debiased using the ' - 'calibrated debiasing model) - multiple date ranges can be passed, ' - 'separated by "_", each in YYYYMMDD-YYYYMMDD format e.g., ' - '"20100101-20191231_20200101-20291231"', - default='20201201-20291130_20301201-20391130') -parser.add_argument('--shp', '--shapefile', dest='shapefile_fpath', type=str, help='Path to shapefile', default=None) +parser.add_argument('--input_data_folder', '--input_data_folder', dest='input_dir', type=str, + help='Directory that contains all data files. NetCDF (.nc) files with names starting with ' + '`simh` and `obsh` should be found in the directory (containing historic CPM ' + 'and HADs data respectively), as well as at least one file with name ' + 'starting with `simp` (containing future CPM data)') parser.add_argument('--out', '--output', dest='output_fpath', type=str, help='Path to save output files', default='.') parser.add_argument('-m', '--method', dest='method', type=str, help='Correction method', default='quantile_delta_mapping') parser.add_argument('-v', '--variable', dest='var', type=str, default='tas', help='Variable to adjust') -parser.add_argument('-u', '--unit', dest='unit', type=str, default='°C', help='Unit of the varible') parser.add_argument('-g', '--group', dest='group', type=str, default=None, help='Value grouping, default: time, (options: time.month, time.dayofyear, time.year') parser.add_argument('-k', '--kind', dest='kind', type=str, default='+', help='+ or *, default: +') @@ -63,42 +49,17 @@ help='Multiprocessing with n processes, default: 1') params = vars(parser.parse_args()) -obs_fpath = params['obs_fpath'] -contr_fpath = params['contr_fpath'] -scen_fpath = params['scen_fpath'] -calibration_date_range = params['control_date_range'] -projection_date_range = params['scenario_date_range'] -shape_fpath = params['shapefile_fpath'] +input_dir = params['input_dir'] out_fpath = params['output_fpath'] method = params['method'] var = params['var'] -unit = params['unit'] group = params['group'] kind = params['kind'] n_quantiles = params['n_quantiles'] n_jobs = params['p'] -calib_list = calibration_date_range.split('-') -h_date_period = (datetime.strptime(calib_list[0], '%Y%m%d').strftime('%Y-%m-%d'), - datetime.strptime(calib_list[1], '%Y%m%d').strftime('%Y-%m-%d')) -proj_list = projection_date_range.split('_') -future_time_periods = [(p.split('-')[0], p.split('-')[1]) for p in proj_list] -future_time_periods = [(datetime.strptime(p[0], '%Y%m%d').strftime('%Y-%m-%d'), - datetime.strptime(p[1], '%Y%m%d').strftime('%Y-%m-%d')) - for p in future_time_periods] - - -# h_date_period = ('1980-12-01', '1999-11-30') -# future_time_periods = [('2020-12-01', '2030-11-30'), ('2030-12-01', '2040-11-30'), ('2060-12-01', '2070-11-30'), -# ('2070-12-01', '2080-11-30')] - - -# for testing -# future_time_periods = [('2020-12-01', '2022-11-30'),('2022-12-01', '2023-11-30')] -# h_date_period = ('1980-12-01', '1981-11-30') - # * ----- ----- -----M A I N ----- ----- ----- def run_debiasing() -> None: start = time.time() @@ -107,124 +68,104 @@ def run_debiasing() -> None: if method not in cm.get_available_methods(): raise ValueError(f'Unknown method {method}. Available methods: {cm.get_available_methods()}') - # load every file found with extension in the path and selects only the input time period.from - # coordinates are renamed for compatibility with the cmethods-library - ds_simh = \ - load_data(contr_fpath, date_range=h_date_period, variable=var, shapefile_path=shape_fpath, extension='tif')[ - var].rename({"projection_x_coordinate": "lon", "projection_y_coordinate": "lat"}) - - # find file extensions for observation data - files_obs_nc = glob.glob(f"{obs_fpath}/*.nc", recursive=True) - files_obs_tif = glob.glob(f"{obs_fpath}/*.tif", recursive=True) - - if len(files_obs_nc) > 0 and len(files_obs_tif) == 0: - ext = 'nc' - elif len(files_obs_nc) == 0 and len(files_obs_tif) > 0: - ext = 'tif' - elif len(files_obs_nc) == 0 and len(files_obs_tif) == 0: - raise Exception(f"No observation files found in {obs_fpath} with extensions .nc or .tif") + simh_files = glob.glob(f"{input_dir}/simh*.nc") + if len(simh_files) == 0: + raise Exception(f"No .nc files with filename starting with simh were " + f"found in the input directory {input_dir}") + elif len(simh_files) > 1: + raise Exception(f"More than one .nc file with filenames starting with simh were " + f"found in the input directory {input_dir}") else: - raise Exception(f"A mix of .nc and .tif observation files found in {obs_fpath}, file extension should be the " - f"same for all files in the directory.") - ds_obs = load_data(obs_fpath, date_range=h_date_period, variable=var, shapefile_path=shape_fpath, - extension=ext)[var].rename({"projection_x_coordinate": "lon", "projection_y_coordinate": "lat"}) - log.info('Historical data Loaded.') - - # aligning calendars, e.g there might be a few extra days on the scenario data that has to be droped. - ds_simh = ds_simh.sel(time=ds_obs.time, method='nearest') + print('Loading historic control data from ', simh_files[0], "...") + with xr.open_dataset(simh_files[0], engine='netcdf4') as ds: + ds_simh = ds.load()[var] + log.info(f'Historic control data loaded with shape {ds_simh.shape}.') + + obsh_files = glob.glob(f"{input_dir}/obsh*.nc") + if len(obsh_files) == 0: + raise Exception(f"No .nc files with filename starting with obsh were " + f"found in the input directory {input_dir}") + elif len(obsh_files) > 1: + raise Exception(f"More than one .nc file with filenames starting with obsh were " + f"found in the input directory {input_dir}") + else: + print('Loading historic observation data from ', obsh_files[0], "...") + with xr.open_dataset(obsh_files[0], engine='netcdf4') as ds: + ds_obs = ds.load()[var] + log.info(f'Historic observation data loaded with shape {ds_obs.shape}.') if ds_obs.shape != ds_simh.shape: - raise RuntimeError('Error, observed and simulated historical data must have same dimensions.') - - log.info('Resulting datasets with shape') - log.info(ds_obs.shape) - - # masking coordinates where the observed data has no values - ds_simh = ds_simh.where(~np.isnan(ds_obs.isel(time=0))) - ds_simh = ds_simh.where(ds_simh.values < 1000) - log.info('Historical data Masked') - - ds_obs.attrs['unit'] = unit - ds_simh.attrs['unit'] = unit - - # looping over time periods - # this is done because the full time period for the scenario dataset is too large for memory. - for f_date_period in future_time_periods: - - log.info(f'Running for {f_date_period} time period') - - try: - ds_simp = \ - load_data(scen_fpath, date_range=f_date_period, variable=var, shapefile_path=shape_fpath, - extension='tif')[ - var].rename({"projection_x_coordinate": "lon", "projection_y_coordinate": "lat"}) - except Exception as e: - log.info(f'No data available for {f_date_period} time period') - continue - - # masking coordinates where the observed data has no values - ds_simp = ds_simp.where(~np.isnan(ds_obs.isel(time=0))) - ds_simp = ds_simp.where(ds_simp.values < 1000) - - ds_simp.attrs['unit'] = unit - - start_date: str = ds_simp['time'][0].dt.strftime('%Y%m%d').values.ravel()[0] - end_date: str = ds_simp['time'][-1].dt.strftime('%Y%m%d').values.ravel()[0] - - descr1, descr2 = '', '' - if method in cm.DISTRIBUTION_METHODS: - descr1 = f'_quantiles-{n_quantiles}' - - # If output file do not exist create it - result_path = os.path.join(out_fpath, var) - if not os.path.exists(result_path): - os.makedirs(result_path) - - # ----- Adjustment ----- - log.info(f'Starting {method} adjustment') - result = cm.adjust_3d( - method=method, - obs=ds_obs, - simh=ds_simh, - simp=ds_simp, - n_quantiles=n_quantiles, - kind=kind, - group=group, - n_jobs=n_jobs - ) - log.info('Saving now') - result.name = var - result['time'] = ds_simp['time'] - result = result.rename({"lon": "projection_x_coordinate", "lat": "projection_y_coordinate"}) - - # define output name - output_name = f'{method}_result_var-{var}{descr1}_kind-{kind}_group-{group}{descr2}_{start_date}_{end_date}' - file_name = os.path.join(result_path, f'debiased_{output_name}.nc') - - log.info('Results') - log.info(result.head()) - - plt.figure(figsize=(10, 5), dpi=216) - ds_simh.groupby('time.dayofyear').mean(...).plot(label='$T_{sim,h}$') - ds_obs.groupby('time.dayofyear').mean(...).plot(label='$T_{obs,h}$') - ds_simp.groupby('time.dayofyear').mean(...).plot(label='$T_{sim,p}$') - result.groupby('time.dayofyear').mean(...).plot(label='$T^{*Debiased}_{sim,p}$') - plt.title( - f'Debiased {var} projected to {start_date} and {end_date}') - plt.gca().grid(alpha=.3) - plt.legend() - fig_name = os.path.join(result_path, f'time-series-{output_name}.png') - plt.savefig(fig_name) - - index = list(np.linspace(0, len(result.time.values) - 1, 6, dtype=int)) - plt.figure(figsize=(10, 5), dpi=216) - g_simple = result.isel(time=index).plot(x='projection_x_coordinate', y='projection_y_coordinate', col='time', - col_wrap=3) - fig_name = os.path.join(result_path, f'maps-{output_name}.png') - plt.savefig(fig_name) - - print('Saving to', file_name) - result.to_netcdf(file_name) + raise RuntimeError('Error, observed and control historical data must have same dimensions.') + + # looping over future time periods for which debiased data need to be generated + simp_files = glob.glob(f"{input_dir}/simp*.nc") + if len(simp_files) == 0: + raise Exception(f"No .nc files with filename starting with simp were " + f"found in the input directory {input_dir}") + else: + for simp_file in simp_files: + print('Loading future scenario (CPM) data from ', simp_file, "...") + with xr.open_dataset(simp_file, engine='netcdf4') as ds: + ds_simp = ds.load()[var] + log.info(f'Future scenario data loaded with shape {ds_simp.shape}.') + + start_date: str = ds_simp['time'][0].dt.strftime('%Y%m%d').values.ravel()[0] + end_date: str = ds_simp['time'][-1].dt.strftime('%Y%m%d').values.ravel()[0] + + descr1, descr2 = '', '' + if method in cm.DISTRIBUTION_METHODS: + descr1 = f'_quantiles-{n_quantiles}' + + # If output file do not exist create it + result_path = os.path.join(out_fpath, var) + if not os.path.exists(result_path): + os.makedirs(result_path) + + # ----- Adjustment ----- + log.info(f'Starting {method} adjustment') + result = cm.adjust_3d( + method=method, + obs=ds_obs, + simh=ds_simh, + simp=ds_simp, + n_quantiles=n_quantiles, + kind=kind, + group=group, + n_jobs=n_jobs + ) + log.info('Saving now') + result.name = var + result['time'] = ds_simp['time'] + result = result.rename({"lon": "projection_x_coordinate", "lat": "projection_y_coordinate"}) + + # define output name + output_name = f'{method}_result_var-{var}{descr1}_kind-{kind}_group-{group}{descr2}_{start_date}_{end_date}' + file_name = os.path.join(result_path, f'debiased_{output_name}.nc') + + log.info('Results') + log.info(result.head()) + + plt.figure(figsize=(10, 5), dpi=216) + ds_simh.groupby('time.dayofyear').mean(...).plot(label='$T_{sim,h}$') + ds_obs.groupby('time.dayofyear').mean(...).plot(label='$T_{obs,h}$') + ds_simp.groupby('time.dayofyear').mean(...).plot(label='$T_{sim,p}$') + result.groupby('time.dayofyear').mean(...).plot(label='$T^{*Debiased}_{sim,p}$') + plt.title( + f'Debiased {var} projected to {start_date} and {end_date}') + plt.gca().grid(alpha=.3) + plt.legend() + fig_name = os.path.join(result_path, f'time-series-{output_name}.png') + plt.savefig(fig_name) + + index = list(np.linspace(0, len(result.time.values) - 1, 6, dtype=int)) + plt.figure(figsize=(10, 5), dpi=216) + g_simple = result.isel(time=index).plot(x='projection_x_coordinate', y='projection_y_coordinate', col='time', + col_wrap=3) + fig_name = os.path.join(result_path, f'maps-{output_name}.png') + plt.savefig(fig_name) + + print('Saving to', file_name) + result.to_netcdf(file_name) end = time.time() log.info(f'total time in seconds: {end - start}') diff --git a/python/load_data/data_loader.py b/python/load_data/data_loader.py index eaa9d161..1c5f55b8 100644 --- a/python/load_data/data_loader.py +++ b/python/load_data/data_loader.py @@ -5,8 +5,10 @@ from datetime import datetime -def load_data(input_path, date_range, variable, shapefile_path=None, extension='nc'): - ''' +def load_data(input_path, date_range, variable, filter_filenames_on_variable=False, + run_number=None, filter_filenames_on_run_number=False, use_pr=False, + shapefile_path=None, extension='nc'): + """ This function takes a date range and a variable and loads and merges xarrays based on those parameters. If shapefile is provided it crops the data to that region. @@ -18,6 +20,19 @@ def load_data(input_path, date_range, variable, shapefile_path=None, extension=' A tuple of datetime objects representing the start and end date variable : string A string representing the variable to be loaded + filter_filenames_on_variable : bool, default = False + When True, files in the input_path will be filtered based on whether their file name + contains "variable" as a substring. When False, filtering does not happen. + run_number : sting, default None + A string representing the CPM run number to use (out of 13 CPM runs available in the database). Only files + whose file name contains the substring run_number will be used. If None, all files in input_path are parsed, + regardless of run number in filename. + filter_filenames_on_run_number : bool, default = False + When True, files in the input_path will be filtered based on whether their file name + contains "2.2km_" followed by "run_number". When False, filtering does not happen. + This should only be used for CPM files. For HADs files this should always be set to False. + use_pr : bool, default = False + If True, replace variable with "pr" string when filtering the file names. shapefile_path: str Path to a shape file used to clip resulting dataset. extension: str @@ -27,30 +42,42 @@ def load_data(input_path, date_range, variable, shapefile_path=None, extension=' ------- merged_xarray : xarray An xarray containing all loaded and merged and clipped data - ''' + """ if extension not in ('nc', 'tif'): raise Exception("We only accept .nc or .tif extension for the input data") - files = glob.glob(f"{input_path}/*.{extension}", recursive=True) - - if len(files)==0: + if filter_filenames_on_variable: + if filter_filenames_on_run_number: + if use_pr: + # when run_number is used, use it to select files from CPM file list + files = glob.glob(f"{input_path}/pr*2.2km_{run_number}_*.{extension}", recursive=True) + else: + # when run_number is used, use it to select files from CPM file list + files = glob.glob(f"{input_path}/{variable}*2.2km_{run_number}_*.{extension}", recursive=True) + else: + if use_pr: + # when run_number is not used, select files only based on variable (either CPM or HADs) + files = glob.glob(f"{input_path}/pr*.{extension}", recursive=True) + else: + # when run_number is not used, select files only based on variable (either CPM or HADs) + files = glob.glob(f"{input_path}/{variable}*.{extension}", recursive=True) + else: + if filter_filenames_on_run_number: + # when run_number is used, use it to select files from CPM file list + files = glob.glob(f"{input_path}/*2.2km_{run_number}_*.{extension}", recursive=True) + else: + # when run_number is not used, select files only based on variable (either CPM or HADs) + files = glob.glob(f"{input_path}/*.{extension}", recursive=True) + + if len(files) == 0: raise Exception(f"No files found in {input_path} with {extension}") - - #TODO: Load using mfdataset avoiding errors from HDF5 - #try: - # loading files with dedicated function - # xa = xr.open_mfdataset(files).sel(time=slice(*date_range)).sortby('time') - #except Exception as e: - # print(f"Not able to load using open_mfdataset, with errors: {e}. " - # f"Looping and loading individual files.") - # # files with wrong format wont load with open_mfdataset, need to be reformated. - xa = load_and_merge(date_range, files, variable) # clipping if shapefile_path: + print(f"Clipping data using shapefile {shapefile_path}...") xa = clip_dataset(xa, variable, shapefile_path) return xa @@ -91,15 +118,15 @@ def clip_dataset(xa, variable, shapefile): except: pass - return xa + def reformat_file(file, variable): """ Load tif file and reformat xarray into expected format. """ - print(f"File: {file} is needs rasterio library, trying...") + print(f"File: {file} needs rasterio library, trying...") filename = os.path.basename(file).split('_') start = filename[-1].split('-')[0] @@ -122,8 +149,6 @@ def reformat_file(file, variable): 'projection_x_coordinate').to_dataset( name=variable) - - return xa @@ -159,13 +184,13 @@ def load_and_merge(date_range, files, variable): start_range = datetime.strptime(date_range[0], '%Y-%m-%d') stop_range = datetime.strptime(date_range[1], '%Y-%m-%d') - if (stop_file < start_range) | (start_file> stop_range): + if (stop_file < start_range) | (start_file > stop_range): continue # Load the xarray try: try: - print ('Loading and selecting ', file) + print('Loading and selecting ', file) with xr.open_dataset(file, engine='netcdf4') as ds: x = ds.load() x = x.sel(time=slice(*date_range)) @@ -184,6 +209,7 @@ def load_and_merge(date_range, files, variable): if len(xarray_list) == 0: raise RuntimeError('No files passed the time selection. No merged output produced.') else: - merged_xarray = xr.concat(xarray_list, dim="time",coords='minimal').sortby('time') + print("Merging arrays from different files...") + merged_xarray = xr.concat(xarray_list, dim="time", coords='minimal').sortby('time') return merged_xarray From 0045a0f096aee6578849324cd558ac0969e1db7c Mon Sep 17 00:00:00 2001 From: Grigorios Mingas Date: Wed, 30 Aug 2023 18:07:00 +0100 Subject: [PATCH 023/146] Modify data loader to restore 360 days calendar when reading the cropped Hads files --- python/debiasing/preprocess_data.py | 2 +- python/load_data/data_loader.py | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/python/debiasing/preprocess_data.py b/python/debiasing/preprocess_data.py index 5adbeb68..d279898d 100644 --- a/python/debiasing/preprocess_data.py +++ b/python/debiasing/preprocess_data.py @@ -114,7 +114,7 @@ def preprocess_data() -> None: "projection_y_coordinate": "lat"}) log.info('Historical data Loaded.') - # aligning calendars, e.g there might be a few extra days on the scenario data that has to be droped. + # aligning calendars, e.g there might be a few extra days on the scenario data that has to be dropped. ds_simh = ds_simh.sel(time=ds_obs.time, method='nearest') if ds_obs.shape != ds_simh.shape: diff --git a/python/load_data/data_loader.py b/python/load_data/data_loader.py index 1c5f55b8..b99a3d50 100644 --- a/python/load_data/data_loader.py +++ b/python/load_data/data_loader.py @@ -124,7 +124,6 @@ def clip_dataset(xa, variable, shapefile): def reformat_file(file, variable): """ Load tif file and reformat xarray into expected format. - """ print(f"File: {file} needs rasterio library, trying...") filename = os.path.basename(file).split('_') @@ -193,6 +192,13 @@ def load_and_merge(date_range, files, variable): print('Loading and selecting ', file) with xr.open_dataset(file, engine='netcdf4') as ds: x = ds.load() + dv = list(x.data_vars) + if len(dv) > 1 and dv[0] == os.path.basename(file)[:-3] and dv[1] == "crs": + x = x.rename({"northing": "projection_y_coordinate", + "easting": "projection_x_coordinate", + os.path.basename(file)[:-3]: variable}) \ + .rio.write_crs('epsg:27700') + x = x.convert_calendar(dim='time', calendar='360_day', align_on='year') x = x.sel(time=slice(*date_range)) except Exception as e: x = reformat_file(file, variable).sel(time=slice(*date_range)) From 52e55e6250417cf3fe469df32c1dcdfbacad8d75 Mon Sep 17 00:00:00 2001 From: Grigorios Mingas Date: Wed, 30 Aug 2023 23:41:21 +0100 Subject: [PATCH 024/146] Add bash script to run all cropped runs for three cities --- python/debiasing/preprocess_data.py | 3 ++- .../three_cities_debiasing_cropped.sh | 26 +++++++++++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) create mode 100755 python/debiasing/three_cities_debiasing_cropped.sh diff --git a/python/debiasing/preprocess_data.py b/python/debiasing/preprocess_data.py index d279898d..4e5d3992 100644 --- a/python/debiasing/preprocess_data.py +++ b/python/debiasing/preprocess_data.py @@ -86,7 +86,8 @@ def preprocess_data() -> None: if run_number is not None: ds_simh = \ load_data(contr_fpath, date_range=h_date_period, variable=var, filter_filenames_on_variable=True, - run_number=run_number, filter_filenames_on_run_number=True, use_pr=use_pr, shapefile_path=shape_fpath, + run_number=run_number, filter_filenames_on_run_number=True, use_pr=use_pr, + shapefile_path=shape_fpath, extension='tif')[var].rename({"projection_x_coordinate": "lon", "projection_y_coordinate": "lat"}) else: diff --git a/python/debiasing/three_cities_debiasing_cropped.sh b/python/debiasing/three_cities_debiasing_cropped.sh new file mode 100755 index 00000000..d82f6a01 --- /dev/null +++ b/python/debiasing/three_cities_debiasing_cropped.sh @@ -0,0 +1,26 @@ +#!/bin/sh + +declare -a vars=("tasmax" "rainfall" "tasmin") +declare -a runs=("05" "07" "08" "06") +declare -a cities=("Glasgow" "Manchester" "London") +declare -a methods=("quantile_delta_mapping" "quantile_mapping") +declare -a methods_2=("variance_scaling" "delta_method") + + +for run in "${runs[@]}"; do + for city in "${cities[@]}"; do + for var in "${vars[@]}"; do + + python preprocess_data.py --scen /Volumes/vmfileshare/ClimateData/Cropped/three.cities/CPM/$city --contr /Volumes/vmfileshare/ClimateData/Cropped/three.cities/CPM/$city --obs /Volumes/vmfileshare/ClimateData/Cropped/three.cities/Hads.original360/$city -v $var -r $run --out /Volumes/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/$city/$run/$var --contr_dates 19810101-19811230 --scen_dates 20100101-20100330 + + for method in "${methods[@]}"; do + python run_cmethods.py --input_data_folder /Volumes/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/$city/$run/$var --out /Volumes/vmfileshare/ClimateData/Debiased/three.cities.cropped/$city/$run/$var --method $method --v $var -p 32 + done + + for method in "${methods_2[@]}"; do + python run_cmethods.py --input_data_folder /Volumes/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/$city/$run/$var --out /Volumes/vmfileshare/ClimateData/Debiased/three.cities.cropped/$city/$run --method $method --group time.month --v $var -p 32 + done + + done + done +done From 08d9f75232b4f200ec39b5a32d0bd826f4e58966 Mon Sep 17 00:00:00 2001 From: Grigorios Mingas Date: Wed, 30 Aug 2023 23:44:55 +0100 Subject: [PATCH 025/146] Change script directories to work with Azure VM --- python/debiasing/three_cities_debiasing_cropped.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/python/debiasing/three_cities_debiasing_cropped.sh b/python/debiasing/three_cities_debiasing_cropped.sh index d82f6a01..6feb32aa 100755 --- a/python/debiasing/three_cities_debiasing_cropped.sh +++ b/python/debiasing/three_cities_debiasing_cropped.sh @@ -11,14 +11,14 @@ for run in "${runs[@]}"; do for city in "${cities[@]}"; do for var in "${vars[@]}"; do - python preprocess_data.py --scen /Volumes/vmfileshare/ClimateData/Cropped/three.cities/CPM/$city --contr /Volumes/vmfileshare/ClimateData/Cropped/three.cities/CPM/$city --obs /Volumes/vmfileshare/ClimateData/Cropped/three.cities/Hads.original360/$city -v $var -r $run --out /Volumes/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/$city/$run/$var --contr_dates 19810101-19811230 --scen_dates 20100101-20100330 + python preprocess_data.py --scen /mnt/vmfileshare/ClimateData/Cropped/three.cities/CPM/$city --contr /mnt/vmfileshare/ClimateData/Cropped/three.cities/CPM/$city --obs /mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.original360/$city -v $var -r $run --out /mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/$city/$run/$var --contr_dates 19810101-19811230 --scen_dates 20100101-20100330 for method in "${methods[@]}"; do - python run_cmethods.py --input_data_folder /Volumes/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/$city/$run/$var --out /Volumes/vmfileshare/ClimateData/Debiased/three.cities.cropped/$city/$run/$var --method $method --v $var -p 32 + python run_cmethods.py --input_data_folder /mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/$city/$run/$var --out /mnt/vmfileshare/ClimateData/Debiased/three.cities.cropped/$city/$run/$var --method $method --v $var -p 32 done for method in "${methods_2[@]}"; do - python run_cmethods.py --input_data_folder /Volumes/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/$city/$run/$var --out /Volumes/vmfileshare/ClimateData/Debiased/three.cities.cropped/$city/$run --method $method --group time.month --v $var -p 32 + python run_cmethods.py --input_data_folder /mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/$city/$run/$var --out /mnt/vmfileshare/ClimateData/Debiased/three.cities.cropped/$city/$run --method $method --group time.month --v $var -p 32 done done From 3654b8d3b873be7be932be87d6f6f182d07cadca Mon Sep 17 00:00:00 2001 From: RuthBowyer Date: Thu, 31 Aug 2023 16:05:50 +0000 Subject: [PATCH 026/146] read_crop updated --- R/misc/read_crop.fn.R | 166 +++++++++++++++++++++++------------------- 1 file changed, 90 insertions(+), 76 deletions(-) diff --git a/R/misc/read_crop.fn.R b/R/misc/read_crop.fn.R index fd0335a9..f66f61cf 100644 --- a/R/misc/read_crop.fn.R +++ b/R/misc/read_crop.fn.R @@ -4,111 +4,125 @@ # A function to read in specific runs, vars and years, crop them to an area (optionally) and write vals to a georef'd df cpm_read_crop <- function(runs, #Character vector of selected runs as number only eg Run08 is "08" - var, #Character vector of selected variables - this might need changing - fp, #filepath of parent d of folders where files are - eg paste0(dd, "Reprojected_infill/UKCP2.2/") - year1, #Numeric, first year of segment - year2, #Numeric, lastyear of segment - crop.area, #Polygon of area to crop to - any Spat obj accepted by terra::crop will work - cropname){ #Character - name of crop to be assigned to the returned vect - + var, #Character vector of selected variables - this might need changing + fp, #filepath of parent d of folders where files are - eg paste0(dd, "Reprojected_infill/UKCP2.2/") + rd, #path to results directory eg paste0(dd, "Cropped/three.cities/CPM/") + crop.area, #Polygon of area to crop to - any Spat obj accepted by terra::crop will work + cropname){ #Character - name of crop to be assigned to the returned vect + runs <- runs var <- var fp <- fp - years <- paste0(year1:year2, "1201", collapse="|") - - bbox <- crop.area - - for(i in runs){ - for(v in var){ - p <- paste0(fp, v, "/", i, "/latest/") - files <- list.files(p) - files <- files[!grepl("aux.xml", files)] - - files.y <- files[grepl(years, files)]# Historical timeslice 2 for calibration - files.y.p <- paste0(p, files.y) - - # Load and convert remaining to single col dfs - dfL <- lapply(1:length(files.y.p), function(n){ - f <- files.y.p[[n]] - r <- rast(f) - r_c <- crop(r, bbox, snap="out") - }) - - R <- dfL %>% reduce(c) + rd <- rd + + bbox <- crop.area + cropname <- cropname + + for(i in runs){ + for(v in var){ + p <- paste0(fp, v, "/", i, "/latest/") + files <- list.files(p) + files <- files[!grepl("aux.xml", files)] + + files.p <- paste0(p, files) + + # Load and convert remaining to single col dfs + dfL <- lapply(1:length(files.p), function(n){ + f <- files.p[[n]] + r <- rast(f) + r_c <- crop(r, bbox, snap="out") - #Write directory - rp <- paste0(dd, "Interim/CPM/three.cities/", cropname, "/" , cropname,"_") #adding in cropname to write, I think will make easier to track + #Write + f <- files[[n]]#filename as it was read in + fn <- paste0(rd, cropname, "/" , f) - fn <- paste0(rp, v, "_rcp85_land-cpm_uk_2.2km_", i, "_day_", year1, "_", year2, ".tif") - writeRaster(R, fn, overwrite=TRUE) + writeRaster(r_c, fn, overwrite=TRUE) - gc() - } + }) + + gc() } + } } # HADs function hads_read_crop <- function(var, #Character vector of selected variables - this might need changing - fp, #filepath of parent d of folders where files are - eg paste0(dd, "Reprojected_infill/UKCP2.2/") - i1, ## First file n index, eg for 1980-2010 this is files [1:360] i1=1 (I appreciate this is a lazy code) - i2, ## First file n index, eg for 1980-2010 this is files [1:360] i2=360 (I appreciate this is a lazy code) - crop.area, #Polygon of area to crop to - any Spat obj accepted by terra::crop will work - cropname){ #Character - name of crop to be assigned to the returned df - usually the crop area - + fp, #filepath of parent d of folders where files are - eg paste0(dd, "Reprojected_infill/UKCP2.2/") + rd, #path to results directory eg paste0(dd, "Cropped/three.cities/CPM/") + file.date, #Character, Date of HADs file to crop from in YYYYMMDD + crop.area, #Polygon of area to crop to - any Spat obj accepted by terra::crop will work + cropname){ #Character - name of crop to be assigned to the returned vect + var <- var fp <- fp bbox <- crop.area cropname <- cropname - + file.date <- file.date + for(v in var){ HADs.files <- list.files(paste0(fp, v,"/day/")) files <- HADs.files[grepl(v, HADs.files)] - Runpaths <- paste0(fp,v,"/day/",files[i1:i2]) + file.i <- grep(file.date,files) + files <- files[file.i:length(files)] + files.p <- paste0(fp, v,"/day/",files) - # Load and convert remaining to single col dfs - i <- 1:length(Runpaths) - dfL <-lapply(i, function(i){ - p <- Runpaths[[i]] - r <- rast(p) - r_c <- crop(r, bbox, snap="out")}) + # Load and convert remaining to single col dfs + dfL <- lapply(1:length(files.p), function(n){ + f <- files.p[[n]] + r <- rast(f) + r_c <- crop(r, bbox, snap="out") - R <- dfL %>% reduce(c) + #Write + f <- files[[n]]#filename as it was read in + fn <- paste0(rd, cropname, "/" , f) + writeCDF(r_c, fn, overwrite=TRUE) + }) + gc() + } +} - #To ensure each layer has a useful naming convention - lyr.n <-unlist(lapply(i, function(i){ - p <- Runpaths[[i]] - rast.names <- names(rast(p)) - - n <- substr(p, nchar(p)-20, nchar(p)) - n <- gsub(".nc","", n) - nn <- paste0("hadukgrid_2.2km_resampled", n, "_", rast.names)})) - - names(R) <- lyr.n - - #Write directory - rp <- paste0(dd, "Interim/HadsUK/three.cities/", cropname, "/" , cropname,"_") #adding in cropname to write, I think will make easier to track - - fn1 <- Runpaths[[1]] - fn1 <- gsub(".*resampled_", "",fn1) - fn1 <- gsub("-.*", "", fn1) - - ii <- length(Runpaths) - fn2 <- Runpaths[[ii]] - fn2 <- gsub(".*resampled_", "",fn2) - fn2 <- gsub(".*-", "", fn2) - fn2 <- gsub(".nc", "", fn2) +## This function for the different file structure of the updated 360 calendar - to be updated when have confirmation about the files +hads_read_crop2 <- function(var, #Character vector of selected variables - this might need changing + fp, #filepath of parent d of folders where files are - eg paste0(dd, "Reprojected_infill/UKCP2.2/") + rd, #path to results directory eg paste0(dd, "Cropped/three.cities/CPM/") + file.date, #Character, Date of HADs file to crop from in YYYYMMDD + crop.area, #Polygon of area to crop to - any Spat obj accepted by terra::crop will work + cropname){ #Character - name of crop to be assigned to the returned vect + + var <- var + fp <- fp + bbox <- crop.area + cropname <- cropname + file.date <- file.date + + for(v in var){ + + HADs.files <- list.files(paste0(fp)) + files <- HADs.files[grepl(v, HADs.files)] + file.i <- grep(file.date,files) + files <- files[file.i:length(files)] + files.p <- paste0(fp, files) + + + # Load and convert remaining to single col dfs + dfL <- lapply(1:length(files.p), function(n){ + f <- files.p[[n]] + r <- rast(f) + r_c <- crop(r, bbox, snap="out") - fn <- paste0(rp, v, "_hadukgrid_2.2km_resampled_",fn1, "_", fn2, ".tif") - writeRaster(R, fn, overwrite=TRUE) + #Write + f <- files[[n]]#filename as it was read in + fn <- paste0(rd, cropname, "/" , f) - gc() - + writeCDF(r_c, fn, overwrite=TRUE) + }) + gc() } } From 9cac46c0c1057243a1638d139b0799850a48e9f8 Mon Sep 17 00:00:00 2001 From: RuthBowyer Date: Thu, 31 Aug 2023 16:07:34 +0000 Subject: [PATCH 027/146] Update to crop with new write paths etc --- R/misc/Cropping_Rasters_to_three_cities.R | 143 +++------------------- 1 file changed, 17 insertions(+), 126 deletions(-) diff --git a/R/misc/Cropping_Rasters_to_three_cities.R b/R/misc/Cropping_Rasters_to_three_cities.R index ad499328..7cf0188e 100644 --- a/R/misc/Cropping_Rasters_to_three_cities.R +++ b/R/misc/Cropping_Rasters_to_three_cities.R @@ -1,8 +1,9 @@ ## Crop CPM and HADs rm(list=ls()) - -source("~/Desktop/clim-recal/clim-recal/R/misc/read_crop.fn.R") +#setwd("~/Desktop/clim-recal/clim-recal/") +#setwd("/home/dyme/Desktop/clim-recal/clim-recal") +source("R/misc/read_crop.fn.R") library(tidyverse) library(data.table) @@ -55,151 +56,41 @@ cities <- c("London", "Glasgow", "Manchester") ext.L <- list(London.ext, Glasgow.ext, Manchester.ext) names(ext.L) <- cities -lapply(cities, function(x){ - - cpm_read_crop(runs=runs, var = var, - fp = paste0(dd, "Reprojected/UKCP2.2/"), - year1=1980, - year2=2000, - crop.area=ext.L[[x]], - cropname=x) }) - - -# Splitting up next time slice for calib and val - lapply(cities, function(x){ cpm_read_crop(runs=runs, var = var, - fp = paste0(dd, "Reprojected_infill/UKCP2.2/"), - year1=2000, - year2=2010, + fp = paste0(dd, "Reprojected_infill/UKCP2.2/"), + rd = paste0(dd, "Cropped/three.cities/CPM/"), crop.area=ext.L[[x]], cropname=x) }) -lapply(cities, function(x){ - - cpm_read_crop(runs=runs, var = var, - fp = paste0(dd, "Reprojected_infill/UKCP2.2/"), - year1=2010, - year2=2020, - crop.area=ext.L[[x]], - cropname=x) }) -## Next time slice 2020-2040 -lapply(cities, function(x){ - - cpm_read_crop(runs=runs, var = var, - fp = paste0(dd, "Reprojected/UKCP2.2/"), - year1=2020, - year2=2040, - crop.area=ext.L[[x]], - cropname=x) }) +#### HADS - original 360 -## Next time slice 2040-2060 -lapply(cities, function(x){ - - cpm_read_crop(runs=runs, var = var, - fp = paste0(dd, "Reprojected_infill/UKCP2.2/"), - year1=2040, - year2=2060, - crop.area=ext.L[[x]], - cropname=x) }) +var <- c("tasmax", "tasmin", "rainfall") -## Next time slice 2060-2080 lapply(cities, function(x){ - cpm_read_crop(runs=runs, var = var, - fp = paste0(dd, "Reprojected/UKCP2.2/"), - year1=2060, - year2=2080, - crop.area=ext.L[[x]], - cropname=x) }) - - + hads_read_crop(var = var, + fp= paste0(dd, "Processed/HadsUKgrid/resampled_2.2km/"), + rd= paste0(dd, "Cropped/three.cities/Hads.original360/"), + file.date="19801201", #Start from the same date as the CPM + crop.area=ext.L[[x]], + cropname=x) }) -#### HADS -#Calibration years files 1 - 360 (first 30 years) +#### HADs - updated 360 calendar (to be run pending updated files) var <- c("tasmax", "tasmin", "rainfall") -lapply(cities, function(x){ - - hads_read_crop(var = var, - fp= paste0(dd, "Processed/HadsUKgrid/resampled_2.2km/"), - i1 = 1, i2 = 360, - crop.area=ext.L[[x]], - cropname=x) }) - -#Validation years files 361 - 480 -- years 2010 - 2020 - lapply(cities, function(x){ hads_read_crop(var = var, - fp= paste0(dd, "Processed/HadsUKgrid/resampled_2.2km/"), - i1 = 361, i2 = 480, + fp= paste0(dd, "Processed/HadsUKgrid/resampled_calendarfix/"), + rd= paste0(dd, "Cropped/three.cities/Hads.updated360/"), + file.date="19801201", #Start from the same date as the CPM crop.area=ext.L[[x]], cropname=x) }) -### Group the CPM to cal, val and projection -runs <- c("05", "07", "08", "06") -var <- c("tasmax", "tasmin","pr") - -for(x in cities){ - for(r in runs){ - for(v in var){ - p <- paste0(dd, "Interim/CPM/three.cities/", x, "/") - files <- list.files(p) - - files.y.v <- files[grepl("day_1980|day_2000", files)&grepl(v, files)&grepl(paste0(r, "_day"), files)] - - dfL <- lapply(files.y.v, function(n){ - f <- paste0(p, n) - r <- rast(f) - }) - - R <- dfL %>% reduce(c) - - #Write directory - rp <- paste0(dd, "Interim/CPM/three.cities/", x, "/grouped/",x, "_") #adding in cropname to write, I think will make easier to track - - fn <- paste0(rp, v, "_", r,"_calibration_1980-2010.tif") - writeRaster(R, fn, overwrite=TRUE) - - gc() - } -} -} - -#For validation I just copied over and renamed the files as they were already split that way - -## Projection years - -for(x in cities){ - for(r in runs){ - for(v in var){ - p <- paste0(dd, "Interim/CPM/three.cities/", x, "/") - files <- list.files(p) - - files.y.v <- files[grepl("day_2020|day_2040|day_2060", files)&grepl(v, files)&grepl(paste0(r, "_day"), files)] - - dfL <- lapply(files.y.v, function(n){ - f <- paste0(p, n) - r <- rast(f) - }) - - - R <- dfL %>% reduce(c) - - #Write directory - rp <- paste0(dd, "Interim/CPM/three.cities/", x, "/grouped/",x, "_") #adding in cropname to write, I think will make easier to track - - fn <- paste0(rp, v, "_", r,"_projection_2020-2080.tif") - writeRaster(R, fn, overwrite=TRUE) - - gc() - } - } -} From 24e5b62495137b36b5a672d00cc71433660103b7 Mon Sep 17 00:00:00 2001 From: Ruth Bowyer <105492883+RuthBowyer@users.noreply.github.com> Date: Thu, 31 Aug 2023 17:09:14 +0100 Subject: [PATCH 028/146] Delete R/bias-correction-methods/DataProcessingMBC.Rmd This is obsolete code now given the updated processing --- .../DataProcessingMBC.Rmd | 396 ------------------ 1 file changed, 396 deletions(-) delete mode 100644 R/bias-correction-methods/DataProcessingMBC.Rmd diff --git a/R/bias-correction-methods/DataProcessingMBC.Rmd b/R/bias-correction-methods/DataProcessingMBC.Rmd deleted file mode 100644 index 4d80b8c9..00000000 --- a/R/bias-correction-methods/DataProcessingMBC.Rmd +++ /dev/null @@ -1,396 +0,0 @@ ---- -title: "WIP MBC in R" -author: "Ruth C E Bowyer" -date: "`r format(Sys.Date())`" -output: - github_document ---- - -```{r setup, include=FALSE} -knitr::opts_chunk$set(echo = TRUE) -``` - - -## 0. About - -Testing Bias Correction methods from the MBC package in R - -MBC uses data as simple vector/dataframe (ie not applied to a spatial object) so creating a crop of the CPM for all time periods for Northern Ireland - -```{r libraries dd} -rm(list=ls()) - -library(MBC) -library(terra) -library(sf) -library(exactextractr) -library(reshape2) #melt -library(foreach) # -library(doSNOW) # -library(doParallel) # -library(tidyverse) # - -#Loaded package versions -x <- c("MBC", "terra", "sf", "exactextractr") -lapply(x,packageVersion) - -#Path is "//vmfileshare/ClimateData -#dd <- "/Volumes/vmfileshare/ClimateData/" -dd <- "/mnt/vmfileshare/ClimateData/" -``` - -## 00. Script Functions - -Potentially for future add to a source script but for now here is fine - -*For future update* - rd needs to be called in the global environment prior to running this, which is not ideal and could lead to incorrect write paths - -```{r defining functions} - -write.csv.date <- function(x, y){ - date <- Sys.Date() - date <- gsub("-", ".", date) - fn <- y - rd <- rd - csvFileName <- paste(rd,"/",fn,".",date,".csv",sep="") - write.csv(x, file=csvFileName, row.names = F)} - -# A function to read in specific runs, vars and years - -read_crop_df_write <- function(runs, #Character vector of selected runs - var, #Character vector of selected variables - this might need changing - fp, #filepath of where files are - eg paste0(dd, "Reprojected_infill/UKCP2.2/") - year1, #Numeric, first year of segment - year2, #Numeric, lastyear of segment - name1, #Character - first part of name to be assigned to the returned df- usually the model - crop, #logical - crop.area, #Polygon of area to crop to - any Spat obj accepted by terra::crop will work - cropname, #Character - name of crop to be assigned to the returned df - usually the crop area - rd){ # results directory for storing results - - runs <- runs - var <- var - years <- paste0(year1:year2, "1201", collapse="|") - L <- list() - - if(crop == T){ - - bbox <- crop.area - - for(i in runs){ - for(v in var){ - p <- paste0(fp, v, "/", i, "/latest/") - files <- list.files(p) - files <- files[!grepl("aux.xml", files)] - - files.y <- files[grepl(years, files)]# Historical timeslice 2 for calibration - files.y.p <- paste0(p, files.y) - - # Read in 1st runpath as df with xy coords to ensure overlay - p1 <- files.y.p[[1]] - r <- rast(p1) - r_c <- crop(r, bbox) - rdf1 <- as.data.frame(r_c, xy=T) - - # Load and convert remaining to single col dfs - dfL <- lapply(2:length(files.y.p), function(i){ - p <- files.y.p[[i]] - r <- rast(p) - r_c <- crop(r, bbox) - rdf <- as.data.frame(r_c) - return(rdf) - }) - - df <- dfL %>% reduce(cbind) - df <- cbind(rdf1, df) - - fn <- paste0(name1, "_", cropname, year1, "_", year2, v, "_Run", i) - - rd <- rd - write.csv.date(df, fn) - gc() - } - } - } else { #for where no crop to be applied - - for(i in runs){ - for(v in var){ - p <- paste0(fp, v, "/", i, "/latest/") - files <- list.files(p) - files <- files[!grepl("aux.xml", files)] - - files.y <- files[grepl(years, files)]# Historical timeslice 2 for calibration - files.y.p <- paste0(p, files.y) - - # Read in 1st runpath as df with xy coords to ensure overlay - p1 <- files.y.p[[1]] - r <- rast(p1) - rdf1 <- as.data.frame(r_c, xy=T) - - # Load and convert remaining to single col dfs - dfL <- lapply(2:length(files.y.p), function(i){ - p <- files.y.p[[i]] - r <- rast(p) - rdf <- as.data.frame(r_c) - return(rdf) - }) - - df <- dfL %>% reduce(cbind) - df <- cbind(rdf1, df) - - rd <- rd - - fn <- paste0(name1, "_", cropname, year1, "_", year2, v, "_Run", i) - - write.csv.date(df, fn) - - gc() - } - } - } -} - - - - -``` - - -## 1. Load Data - -To start with, cropping data to rough Northern Ireland extent - -```{r} - -#load an example hads raster to ensure crs the same -v <- c("tasmax", "tasmin","rainfall") -vd <- paste0(dd,"Processed/HadsUKgrid/resampled_2.2km/",v,"/day/") -HADs.files<- unlist(lapply(vd,list.files)) - -Hads_r_eg <- rast(paste0(vd[[1]], HADs.files[[1]])) -Hads_r_eg <- Hads_r_eg$tasmax_7 - -``` - -### Bounding box - NI - -```{r} - # Rough bounding for NI -i <- rast() -crs(i) <- crs(Hads_r_eg) -ext(i) <- c(-0, 190000, 450000, 600000) -values(i) <- c(100) - -e <- ext(i) -NI.bbox <- as.polygons(e) -``` - -### Observational data - -Using 1980 - 2010 for training, compare the BC applied to 2010 - 2021 to HADs grids - -This chunk: reads HADs obs in for each variable for year 2010-2021 and crops them to NI extent in parallel, then converts the data to a dataframe (input to MBC) -xy coords are kept to check alignment with CPM - -Have not updated the read_crop_df function to have this run on observational (requires adding another if... statement just for the run aspect of the CPM) - -```{r} - -for(v in c("tasmax", "tasmin", "rainfall")){ - - files <- HADs.files[grepl(v, HADs.files)] #Subset to run paths - Runpaths <- paste0(dd,"Processed/HadsUKgrid/resampled_2.2km/",v,"/day/",files[1:360]) #Subsetting to years 1980-2010 - - i <- 2:length(Runpaths) - - # Read in 1st runpath as df with xy coords to ensure overlay with CPM data - p <- Runpaths[[1]] - r <- rast(p) - r_c <- crop(r, NI.bbox) - rdf1 <- as.data.frame(r_c, xy=T) - - #To ensure subset fataframe has useful naming convention - this does not pull it through as such - n <- substr(p, nchar(p)-20, nchar(p)) - n <- gsub(".nc","", n) - names(rdf1) <- gsub("_", paste0(n, "_"), names(rdf1)) - - - # Load and convert remaining to single col dfs - dfL <-lapply(i, function(i){ - p <- Runpaths[[i]] - r <- rast(p) - r_c <- crop(r, NI.bbox) - rdf <- as.data.frame(r_c) - #To ensure subset dataframe has useful naming convention - this does not pull it through as such - n <- substr(p, nchar(p)-20, nchar(p)) - n <- gsub(".nc","", n) - names(rdf) <- gsub(paste0("_", n, "_"), names(rdf)) - return(rdf) - }) - - df <- dfL %>% reduce(cbind) - df <- cbind(rdf1, df) - - assign(paste0("HADsNI1980_2010_", v, ".df"), df) - - gc() -} - -``` - - -#### Write dfs - -```{r} - -rd <- paste0(dd, "Interim/NI_cropped_MBCdata") - -wL <- list(HADsNI1980_2010_tasmax.df, HADsNI1980_2010_tasmin.df, HADsNI1980_2010_rainfall.df) -dfnames <- c("HADsNI1980_2010_tasmax", "HADsNI1980_2010_tasmin", "HADsNI1980_2010_rainfall") -names(wL) <- dfnames -wL <- lapply(wL, function(x){ - names(x) <- gsub("__", "_", names(x)) - return(x) -}) - - -lapply(dfnames, function(x){ - df <- wL[[x]] - write.csv.date(df, x) -}) - -``` - -To check at some point -- why is does tasmin have 11562 vars and the others have 11560 ? - -```{r} - -# Melt to long -HADsL <- list(HADsNI1980_2010_tasmax.df, HADsNI1980_2010_tasmin.df, HADsNI1980_2010_rainfall.df) - -HADsLlong <- lapply(HADsL, function(l){ - melt <- melt(l, id=c("x","y")) - #Create an ID variable for easy merging - melt$xy <- paste0(melt$x, "-",melt$y) - return(melt) -}) - -``` - -```{r} -HADsLlong.allvars <- HADsLlong %>% reduce(cbind) - -#Remove duplicate x and y - checking below with the xy variable all map ok -names(HADsLlong.allvars) <- c("x","y", "tasmax_iy", "tasmax", "xy1", - "x2", "y2", "tasmin_iy", "tasmin", "xy2", - "x3", "y3", "rainfall_iy", "rainfall", "xy3") - -#Check all xy vars match -#table(HADsLlong.allvars$xy1 == HADsLlong.allvars$xy2) -#table(HADsLlong.allvars$xy1 == HADsLlong.allvars$xy3) - -#All match so remove not needed variables -HADsLlong.allvars[c("x2", "y2", "xy2", "x3", "y3", "xy3")] <- NULL - -``` - - - -### CPM data - -For now, loading - Run05, Run07, Run08, Run06 - -Cropping all to NI for this first iteration - -**Note** - naming convention for precipitation - currently is 'rainfall' in HADs but maybe could change? - -#### Historical - -```{r CPM historical 1} - -runs <- c("05", "07", "08", "06") -var <- c("tasmax", "tasmin","pr") - -read_crop_df_write(runs=runs, var=var, fp=paste0(dd, "Reprojected/UKCP2.2/"), - year1=1980, year2=1999, name1="CPM", crop = T, - crop.area = NI.bbox, cropname = "NI", - rd=paste0(dd, "Interim/NI_cropped_MBCdata")) -``` - -#### Historical - infill 1 - -Using 2010-2020 as test period so just loading in 2000 - 2009 here -```{r CPM historical 2} - -read_crop_df_write(runs=runs, var=var, fp=paste0(dd, "Reprojected_infill/UKCP2.2/"), - year1=2000, year2=2009, name1="CPM", crop = T, - crop.area = NI.bbox, cropname = "NI", - rd=paste0(dd, "Interim/NI_cropped_MBCdata")) -``` - -#### Historical - infill 2 - -... and for ease creating separately for the test - -```{r} -read_crop_df_write(runs=runs, var=var, fp=paste0(dd, "Reprojected_infill/UKCP2.2/"), - year1=2010, year2=2019, name1="CPM", crop = T, - crop.area = NI.bbox, cropname = "NI", - rd=paste0(dd, "Interim/NI_cropped_MBCdata")) - -``` - - - -#### Projections - -##### 2020 - 2039 - -```{r CPM historical 1} - -runs <- c("05", "07", "08", "06") -var <- c("tasmax", "tasmin","pr") - -read_crop_df_write(runs=runs, var=var, fp=paste0(dd, "Reprojected/UKCP2.2/"), - year1=2020, year2=2039, name1="CPM", crop = T, - crop.area = NI.bbox, cropname = "NI", - rd=paste0(dd, "Interim/NI_cropped_MBCdata")) -``` - - - -##### 2040 - 2059 (infill) - -```{r CPM historical 1} - -runs <- c("05", "07", "08", "06") -var <- c("tasmax", "tasmin","pr") - -read_crop_df_write(runs=runs, var=var, fp=paste0(dd, "Reprojected_infill/UKCP2.2/"), - year1=2040, year2=2059, name1="CPM", crop = T, - crop.area = NI.bbox, cropname = "NI", - rd=paste0(dd, "Interim/NI_cropped_MBCdata")) -``` - - - - -##### 2060 - 2079 - --Error thrown on tasmin -- check files and run again! - -```{r CPM historical 1} - -runs <- c("05", "07", "08", "06") -var <- c("tasmax", "tasmin","pr") -rd <- paste0(dd, "Interim/NI_cropped_MBCdata") - -read_crop_df_write(runs=runs, var=var, fp=paste0(dd, "Reprojected/UKCP2.2/"), - year1=2060, year2=2079, name1="CPM", crop = T, - crop.area = NI.bbox, cropname = "NI", - rd=rd) -``` - -There are many more cells in the CPM compared with the Hats data because the CPM includes Sea cells - merging by the x and y coordinates results in a merge of just the cells in both (no loss from the Hads dataset) - - From 1e3ac3f131d27db5c63f8b76b4e2276ccdf8cfe2 Mon Sep 17 00:00:00 2001 From: Ruth Bowyer <105492883+RuthBowyer@users.noreply.github.com> Date: Thu, 31 Aug 2023 17:10:39 +0100 Subject: [PATCH 029/146] Delete R/bias-correction-methods/apply_bias_correction_to_crpd_df_fn.R This script updated in a different branch, deleting here so as to avoid conflicts --- .../apply_bias_correction_to_crpd_df_fn.R | 344 ------------------ 1 file changed, 344 deletions(-) delete mode 100644 R/bias-correction-methods/apply_bias_correction_to_crpd_df_fn.R diff --git a/R/bias-correction-methods/apply_bias_correction_to_crpd_df_fn.R b/R/bias-correction-methods/apply_bias_correction_to_crpd_df_fn.R deleted file mode 100644 index d4b4d54f..00000000 --- a/R/bias-correction-methods/apply_bias_correction_to_crpd_df_fn.R +++ /dev/null @@ -1,344 +0,0 @@ -#Re-writing WIP_EQM as a loop, to loop over each segment and hopefully create bias corrected for all UK - -##Loading data as created in 'Data_Processing_todf.R' - -#Requires -library(tidyverse) -library(data.table) -library(qmap) - - -apply_bias_correction_to_cropped_df <- function(region, #Region code - needs to relate to the file name in a unique way to subset - var, #Meterological variables - Runs){ - - i <- region - -for(r in Runs){ - for(v in var){ - if(v!="pr"){ - dd <- "/mnt/vmfileshare/ClimateData/" - - #Subset to Area - #HADs grid observational data - fp <- paste0(dd, "Interim/HadsUK/Data_as_df/") - files <- list.files(fp) - obs <- files[grepl(i, files)] - - #subset file list to var - obs.var <- obs[grepl(v,obs)] - - obs.df <- fread(paste0(fp, obs.var)) - obs.df <- as.data.frame(obs.df) - - row.names(obs.df) <- paste0(obs.df$x, "_", obs.df$y ) - obs.df$x <- NULL - obs.df$y <- NULL - - #Using 1980 - 2010 as calibration period - fp <- paste0(dd, "Interim/CPM/Data_as_df/") - cpm.files <- list.files(fp) - - #Calibration years 1980 - 2010 - load in full one for 1980 - 2000 - cpm.cal <- cpm.files[grepl("1980|2000", cpm.files)] - - #Subset file list to area - cpm.cal <- cpm.cal[grepl(i, cpm.cal)] - - #subset to var and run - cpm.cal.var <- cpm.cal[grepl(v, cpm.cal)&grepl(r, cpm.cal)] - - #Load in - cal.dfs1 <- lapply(cpm.cal.var, function(x){ - df <- fread(paste0(fp, x)) - df <- as.data.frame(df) - }) - - names(cal.dfs1) <- cpm.cal.var - - - #Sub out beyond cal period (2010 - 2020) - ie just keep the calibration here - years <- 2000:2009 - lyrs <- paste0("_day_", years, collapse = "|") - - cal.df2 <- cal.dfs1[[2]][,grepl(lyrs, names(cal.dfs1[[2]]))] - - #Create final cal.df for this run - cal.df <- list(cal.dfs1[[1]], cal.df2) %>% reduce(cbind) - row.names(cal.df)<- paste0(cal.df$x, "_", cal.df$y) - cal.df$x <- NULL - cal.df$y <- NULL - - #Clean up - remove("cal.df2") - - #Subset out the test years (2010-2020) - proj.df1 <- cal.dfs1[[2]][,!grepl(lyrs, names(cal.dfs1[[2]]))] - - #Clean up - remove("cal.dfs1") - gc() - - yi <- paste0(i,c(2020,2040,2060), collapse="|") - cpm.proj <- cpm.files[grepl(yi, cpm.files)] - - #Subset to Area, var and run - cpm.proj <- cpm.proj[grepl(i, cpm.proj)&grepl(v, cpm.proj)&grepl(r, cpm.proj)] - - #Load in - proj.df2 <- lapply(cpm.proj, function(x){ - df <- as.data.frame(fread(paste0(fp, x))) - #Remove x and y cols - df[c(3:ncol(df))] - }) - - names(proj.df2) <- cpm.proj - - proj.df <- c(list(proj.df1), proj.df2) %>% reduce(cbind) - - row.names(proj.df) <- paste0(proj.df$x, "_", proj.df$y) - proj.df$x <- NULL - proj.df$y <- NULL - - remove("proj.df1") - remove("proj.df2") - -## **2. Wrangle the data** - - missing.in.hads.cpm.cal <- cal.df[-which(row.names(cal.df)%in%row.names(obs.df)),] - missing.in.hads.cpm.proj <- proj.df[-which(row.names(proj.df)%in%row.names(obs.df)),] - - #mnt/vmfileshare/ClimateData/Debiased/R/QuantileMapping - - cal.df <- cal.df[which(row.names(cal.df)%in%row.names(obs.df)),] - proj.df <- proj.df[which(row.names(proj.df)%in%row.names(obs.df)),] - - #save the missing outputs - p <- paste0("checkpoint1", v, "_", i, "_", r, "_") - print(p) - write.csv(missing.in.hads.cpm.cal, paste0(dd, "Debiased/R/QuantileMapping/missing.in.hads/",r,"_",i,"_",v, ".csv")) - - ### Update obs data to 360 days - - #The below is a work around with the HADS dataset having 365 days on leap years - this is to be updateed and corrected when the 360 day sampling is better sorted - - #Convert obs to 360 day year - has 40 more vars so remove the ones not in cal - remove <- c("0229_29", "0430_30", "0731_31", "0930_30", "1130_30") - remove <- paste0(remove, collapse = "|") - - obs.df <- obs.df[,!grepl(remove, names(obs.df))] - -### Transpose the data sets - - #Obs grid should be cols, observations (time) should be rows for linear scaling - - cal.df <- t(cal.df) - proj.df <- t(proj.df) - obs.df <- t(obs.df) - - -## **3. Empirical Quantile Mapping** - -#(from qmap vignette) - fitQmapQUANT estimates values of the empirical cumulative distribution function of observed and -#modelled time series for regularly spaced quantiles. doQmapQUANT uses these estimates to perform -#quantile mapping - p <- paste0("checkpoint2", v, "_", i, "_", r, "_") - print(p) - - library(qmap) - qm1.fit <- fitQmapQUANT(obs.df, cal.df, - wet.day = FALSE, - qstep = 0.01, - nboot = 1) #nboot number of bootstrap samples used for estimation of the observed quantiles. - - - qm1.hist.a <- doQmapQUANT(cal.df, qm1.fit, type="linear") - qm1.hist.b <- doQmapQUANT(cal.df, qm1.fit, type="tricub") - - qm1.proj.a <- doQmapQUANT(proj.df, qm1.fit, type="linear") - qm1.proj.b <- doQmapQUANT(proj.df, qm1.fit, type="tricub") - -## **4. Save the data** - p <- paste0("checkpoint3", v, "_", i, "_", r, "_") - print(p) - # Save data - lists of dfs for now (will be easier for assessment) - results.L <- list(obs.df, cal.df, proj.df, qm1.hist.a, qm1.hist.b, qm1.proj.a, qm1.proj.b) - - names(results.L) <- c("t.obs", "t.cal", "t.proj", "qm1.hist.a", "qm1.hist.b", "qm1.proj.a", "qm1.proj.b") - p <- paste0("checkpoint4", v, "_", i, "_", r, "_") - print(p) - base::saveRDS(results.L, file = paste0(dd, "Debiased/R/QuantileMapping/resultsL", r,"_",i,"_",v, ".RDS")) - - p <- paste0("checkpoint5", v, "_", i, "_", r, "_") - print(p) - rm(list=setdiff(ls(), c("v", "i", "r", "var", "Runs"))) - - gc(reset=TRUE) - - - } else { - -#### Precipitation - the HADs variable has is called 'rainfall' - - dd <- "/mnt/vmfileshare/ClimateData/" - - #Subset to Area - #HADs grid observational data - fp <- paste0(dd, "Interim/HadsUK/Data_as_df/") - files <- list.files(fp) - obs <- files[grepl(i, files)] - - #subset file list to var - obs.var <- obs[grepl("rainfall",obs)] - - obs.df <- fread(paste0(fp, obs.var)) - obs.df <- as.data.frame(obs.df) - - row.names(obs.df) <- paste0(obs.df$x, "_", obs.df$y ) - obs.df$x <- NULL - obs.df$y <- NULL - - #Using 1980 - 2010 as calibration period - fp <- paste0(dd, "Interim/CPM/Data_as_df/") - cpm.files <- list.files(fp) - - #Calibration years 1980 - 2010 - load in full one for 1980 - 2000 - cpm.cal <- cpm.files[grepl("1980|2000", cpm.files)] - - #Subset file list to area - cpm.cal <- cpm.cal[grepl(i, cpm.cal)] - - #subset to var and run - cpm.cal.var <- cpm.cal[grepl(v, cpm.cal)&grepl(r, cpm.cal)] - - #Load in - cal.dfs1 <- lapply(cpm.cal.var, function(x){ - df <- fread(paste0(fp, x)) - df <- as.data.frame(df) - }) - - names(cal.dfs1) <- cpm.cal.var - - - #Sub out beyond cal period (2010 - 2020) - ie just keep the calibration here - years <- 2000:2009 - lyrs <- paste0("_day_", years, collapse = "|") - - cal.df2 <- cal.dfs1[[2]][,grepl(lyrs, names(cal.dfs1[[2]]))] - - #Create final cal.df for this run - cal.df <- list(cal.dfs1[[1]], cal.df2) %>% reduce(cbind) - row.names(cal.df)<- paste0(cal.df$x, "_", cal.df$y) - cal.df$x <- NULL - cal.df$y <- NULL - - #Clean up - remove("cal.df2") - - #Subset out the test years (2010-2020) - proj.df1 <- cal.dfs1[[2]][,!grepl(lyrs, names(cal.dfs1[[2]]))] - - #Clean up - remove("cal.dfs1") - gc() - - yi <- paste0(i,c(2020,2040,2060), collapse="|") - cpm.proj <- cpm.files[grepl(yi, cpm.files)] - - #Subset to Area, var and run - cpm.proj <- cpm.proj[grepl(i, cpm.proj)&grepl(v, cpm.proj)&grepl(r, cpm.proj)] - - #Load in - proj.df2 <- lapply(cpm.proj, function(x){ - df <- as.data.frame(fread(paste0(fp, x))) - #Remove x and y cols - df[c(3:ncol(df))] - }) - - names(proj.df2) <- cpm.proj - - proj.df <- c(list(proj.df1), proj.df2) %>% reduce(cbind) - - row.names(proj.df) <- paste0(proj.df$x, "_", proj.df$y) - proj.df$x <- NULL - proj.df$y <- NULL - - remove("proj.df1") - remove("proj.df2") - - ## **2. Wrangle the data** - - missing.in.hads.cpm.cal <- cal.df[-which(row.names(cal.df)%in%row.names(obs.df)),] - missing.in.hads.cpm.proj <- proj.df[-which(row.names(proj.df)%in%row.names(obs.df)),] - - #mnt/vmfileshare/ClimateData/Debiased/R/QuantileMapping - - cal.df <- cal.df[which(row.names(cal.df)%in%row.names(obs.df)),] - proj.df <- proj.df[which(row.names(proj.df)%in%row.names(obs.df)),] - - #save the missing outputs - p <- paste0("checkpoint1", v, "_", i, "_", r, "_") - print(p) - write.csv(missing.in.hads.cpm.cal, paste0(dd, "Debiased/R/QuantileMapping/missing.in.hads/",r,"_",i,"_",v, ".csv")) - - ### Update obs data to 360 days - - #The below is a work around with the HADS dataset having 365 days on leap years - this is to be updateed and corrected when the 360 day sampling is better sorted - - #Convert obs to 360 day year - has 40 more vars so remove the ones not in cal - remove <- c("0229_29", "0430_30", "0731_31", "0930_30", "1130_30") - remove <- paste0(remove, collapse = "|") - - obs.df <- obs.df[,!grepl(remove, names(obs.df))] - - ### Transpose the data sets - - #Obs grid should be cols, observations (time) should be rows for linear scaling - - cal.df <- t(cal.df) - proj.df <- t(proj.df) - obs.df <- t(obs.df) - - - ## **3. Empirical Quantile Mapping** - - #(from qmap vignette) - fitQmapQUANT estimates values of the empirical cumulative distribution function of observed and - #modelled time series for regularly spaced quantiles. doQmapQUANT uses these estimates to perform - #quantile mapping - p <- paste0("checkpoint2", v, "_", i, "_", r, "_") - print(p) - - - qm1.fit <- fitQmapQUANT(obs.df, cal.df, - wet.day = TRUE, #If wet.day=TRUE the empirical probability of nonzero observations is found (obs>=0) and the corresponding modelled value is selected as a threshold. All modelled values below this threshold are set to zero. If wet.day is numeric the same procedure is performed after setting all obs to zero. - qstep = 0.01, - nboot = 1) #nboot number of bootstrap samples used for estimation of the observed quantiles. - - - qm1.hist.a <- doQmapQUANT(cal.df, qm1.fit, type="linear") - qm1.hist.b <- doQmapQUANT(cal.df, qm1.fit, type="tricub") - - qm1.proj.a <- doQmapQUANT(proj.df, qm1.fit, type="linear") - qm1.proj.b <- doQmapQUANT(proj.df, qm1.fit, type="tricub") - - ## **4. Save the data** - p <- paste0("checkpoint3", v, "_", i, "_", r, "_") - print(p) - # Save data - lists of dfs for now (will be easier for assessment) - results.L <- list(obs.df, cal.df, proj.df, qm1.hist.a, qm1.hist.b, qm1.proj.a, qm1.proj.b) - - names(results.L) <- c("t.obs", "t.cal", "t.proj", "qm1.hist.a", "qm1.hist.b", "qm1.proj.a", "qm1.proj.b") - p <- paste0("checkpoint4", v, "_", i, "_", r, "_") - print(p) - base::saveRDS(results.L, file = paste0(dd, "Debiased/R/QuantileMapping/resultsL", r,"_",i,"_",v, ".RDS")) - - p <- paste0("checkpoint5", v, "_", i, "_", r, "_") - print(p) - rm(list=setdiff(ls(), c("v", "i", "r", "var", "Runs"))) - - gc(reset=TRUE) - - - } - } - } From b7666a5e21f28a558e408f4fd82400d82d296570 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Wed, 13 Sep 2023 21:25:25 -0400 Subject: [PATCH 030/146] feat: add basic Dockerfile with default R config for Jupyter --- Dockerfile | 55 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 Dockerfile diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..6afb87f9 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,55 @@ +FROM jupyter/r-notebook + +ENV LC_ALL en_GB.UTF-8 +ENV LANG en_GB.UTF-8 +ENV LANGUAGE en_GB.UTF-8 +ENV SHELL /bin/bash +# ENV CONDA_DIR /usr/lib +ARG env_name=clim-recal +ARG py_ver=3.11 + +USER root + +# Generate the locales +RUN echo "en_GB.UTF-8 UTF-8" > /etc/locale.gen && locale-gen + + +RUN apt-get update && apt-get -y install gdal-bin python3-gdal libgdal-dev build-essential +RUN conda update -n base -c conda-forge conda + +# Ensure correct GDAL paths +RUN export CPLUS_INCLUDE_PATH=/usr/include/gdal && export C_INCLUDE_PATH=/usr/include/gdal + +# Create custom environment from environment.yml +# Add ipykernel for environment build as necessary +COPY --chown=${NB_UID}:${NB_GID} environment.yml /tmp/ +RUN mamba env create -p "${CONDA_DIR}/envs/${env_name}" -f /tmp/environment.yml && \ + # mamba install --yes 'jupyterlab' 'notebook' 'jupyterhub' 'nbclassic' 'ipykernel' && \ + mamba clean --all -f -y + +# Any additional `pip` installs can be added by using the following line +# Using `mamba` is highly recommended though +RUN "${CONDA_DIR}/envs/${env_name}/bin/pip" install --no-cache-dir \ + 'ipykernel' + +# Create kernel from custome environment.yml +RUN "${CONDA_DIR}/envs/${env_name}/bin/python" -m ipykernel install --user --name="${env_name}" && \ + fix-permissions "${CONDA_DIR}" && \ + fix-permissions "/home/${NB_USER}" + +# Copy the rest of the clim-recal code to volume +COPY --chown=${NB_UID}:${NB_GID} . . + +# Add custom activate script to reflect environment +USER root +RUN activate_custom_env_script=/usr/local/bin/before-notebook.d/activate_custom_env.sh && \ + echo "#!/bin/bash" > ${activate_custom_env_script} && \ + echo "eval \"$(conda shell.bash activate "${env_name}")\"" >> ${activate_custom_env_script} && \ + chmod +x ${activate_custom_env_script} + + +USER ${NB_UID} + +# Set this for default conda activate config +# You can comment this line to keep the default environment in Terminal +RUN echo "conda activate ${env_name}" >> "${HOME}/.bashrc" From b4f866561440041744d976a434a025a2f62084ba Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Thu, 14 Sep 2023 02:13:51 -0400 Subject: [PATCH 031/146] feat(docker): remove unneeded installation build process and add documentation comments --- Dockerfile | 42 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 36 insertions(+), 6 deletions(-) diff --git a/Dockerfile b/Dockerfile index 6afb87f9..8354eacc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,12 +1,39 @@ FROM jupyter/r-notebook +# This is derived from documentation available at +# https://jupyter-docker-stacks.readthedocs.io/en/latest/ + +# Example run command: + + +# This will require a mount of `vmfileshare` from `dymestorage1` +# On macOS this can be solved via: +# open smb://dymestorage1.file.core.windows.net/vmfileshare +# Using user: dymestorage1 +# And password specified via: +# https://learn.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage?tabs=azure-portal#view-account-access-keys + +# Example run: +# cd clim-recal +# docker build --tag 'clim-recal' . +# docker run -it -p 8888:8888 -v /Volumes/vmfileshare:/home/jovyan/work/vmfileshare clim-recal + ENV LC_ALL en_GB.UTF-8 ENV LANG en_GB.UTF-8 ENV LANGUAGE en_GB.UTF-8 ENV SHELL /bin/bash -# ENV CONDA_DIR /usr/lib ARG env_name=clim-recal -ARG py_ver=3.11 + +# `py_ver` is not currently used below and is specified in `environment.yaml` +# here as reminder and clarity if future change needed. +ARG py_ver=3.11 + +# The local_data_path is an absolute local path to ClimateData on the machine hosting running `docker` +ARG local_data_path=/Volumes/vmfileshare/ClimateData + +# The local_data_path is an absolute path to mount ClimateData within `docker` +ARG docker_data_path=/Volumes/vmfileshare/ClimateData + USER root @@ -24,7 +51,6 @@ RUN export CPLUS_INCLUDE_PATH=/usr/include/gdal && export C_INCLUDE_PATH=/usr/in # Add ipykernel for environment build as necessary COPY --chown=${NB_UID}:${NB_GID} environment.yml /tmp/ RUN mamba env create -p "${CONDA_DIR}/envs/${env_name}" -f /tmp/environment.yml && \ - # mamba install --yes 'jupyterlab' 'notebook' 'jupyterhub' 'nbclassic' 'ipykernel' && \ mamba clean --all -f -y # Any additional `pip` installs can be added by using the following line @@ -32,7 +58,7 @@ RUN mamba env create -p "${CONDA_DIR}/envs/${env_name}" -f /tmp/environment.yml RUN "${CONDA_DIR}/envs/${env_name}/bin/pip" install --no-cache-dir \ 'ipykernel' -# Create kernel from custome environment.yml +# Create kernel from custome `environment.yml` RUN "${CONDA_DIR}/envs/${env_name}/bin/python" -m ipykernel install --user --name="${env_name}" && \ fix-permissions "${CONDA_DIR}" && \ fix-permissions "/home/${NB_USER}" @@ -40,6 +66,7 @@ RUN "${CONDA_DIR}/envs/${env_name}/bin/python" -m ipykernel install --user --nam # Copy the rest of the clim-recal code to volume COPY --chown=${NB_UID}:${NB_GID} . . + # Add custom activate script to reflect environment USER root RUN activate_custom_env_script=/usr/local/bin/before-notebook.d/activate_custom_env.sh && \ @@ -47,9 +74,12 @@ RUN activate_custom_env_script=/usr/local/bin/before-notebook.d/activate_custom_ echo "eval \"$(conda shell.bash activate "${env_name}")\"" >> ${activate_custom_env_script} && \ chmod +x ${activate_custom_env_script} - +# Switch to default jupyter user USER ${NB_UID} -# Set this for default conda activate config +# Set this for default `conda activate` configuration # You can comment this line to keep the default environment in Terminal RUN echo "conda activate ${env_name}" >> "${HOME}/.bashrc" + +# This will use the default launch as discussed in +# https://jupyter-docker-stacks.readthedocs.io/en/latest/using/running.html From 4ab4f07e1e036e2ac0781523ca3d8e66215e4d9d Mon Sep 17 00:00:00 2001 From: Ruth Bowyer <105492883+RuthBowyer@users.noreply.github.com> Date: Thu, 21 Sep 2023 09:40:46 +0100 Subject: [PATCH 032/146] Update resampling_hads.py note about rioxarray --- python/resampling/resampling_hads.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/resampling/resampling_hads.py b/python/resampling/resampling_hads.py index 0394128e..53eaf789 100644 --- a/python/resampling/resampling_hads.py +++ b/python/resampling/resampling_hads.py @@ -6,7 +6,7 @@ import argparse import pandas as pd -import xarray as xr +import xarray as xr #requires rioxarray extension import os import glob import multiprocessing From 336720c2839f57806ef9fcbd2f0eae1b1a39124a Mon Sep 17 00:00:00 2001 From: Grigorios Mingas Date: Thu, 21 Sep 2023 13:34:32 +0100 Subject: [PATCH 033/146] Rename variables in two main python scripts to make them uniform --- python/debiasing/preprocess_data.py | 141 ++++++++++++++-------------- python/debiasing/run_cmethods.py | 84 +++++++++-------- 2 files changed, 115 insertions(+), 110 deletions(-) diff --git a/python/debiasing/preprocess_data.py b/python/debiasing/preprocess_data.py index 4e5d3992..035acf4c 100644 --- a/python/debiasing/preprocess_data.py +++ b/python/debiasing/preprocess_data.py @@ -30,16 +30,16 @@ # * ----- I N P U T - H A N D L I N G ----- parser = argparse.ArgumentParser(description='Pre-process data before bias correction.') -parser.add_argument('--obs', '--observation', dest='obs_fpath', type=str, help='Path to observation datasets') -parser.add_argument('--contr', '--control', dest='contr_fpath', type=str, help='Path to control datasets') -parser.add_argument('--scen', '--scenario', dest='scen_fpath', type=str, - help='Path to scenario datasets (data to adjust)') -parser.add_argument('--contr_dates', '--control_date_range', dest='control_date_range', type=str, - help='Start and end dates for control and observation data (historic CPM/HADs data used to ' +parser.add_argument('--mod', '--modelled', dest='mod_fpath', type=str, + help='Path to modelled (CPM) datasets') +parser.add_argument('--obs', '--observed', dest='obs_fpath', type=str, + help='Path to observation (HADs) datasets') +parser.add_argument('--calib_dates', '--calibration_date_range', dest='calibration_date_range', type=str, + help='Start and end dates for calibration (historic CPM/HADs data used to ' 'calibrate the debiasing model) - in YYYYMMDD-YYYYMMDD format', default='19801201-19991130') -parser.add_argument('--scen_dates', '--scenario_date_range', dest='scenario_date_range', type=str, - help='Start and end dates for scenario data (CPM data to be debiased using the ' +parser.add_argument('--valid_dates', '--validation_date_range', dest='validation_date_range', type=str, + help='Start and end dates for validation data (CPM data to be debiased using the ' 'calibrated debiasing model) - multiple date ranges can be passed, ' 'separated by "_", each in YYYYMMDD-YYYYMMDD format e.g., ' '"20100101-20191231_20200101-20291231"', @@ -54,10 +54,9 @@ params = vars(parser.parse_args()) obs_fpath = params['obs_fpath'] -contr_fpath = params['contr_fpath'] -scen_fpath = params['scen_fpath'] -calibration_date_range = params['control_date_range'] -projection_date_range = params['scenario_date_range'] +mod_fpath = params['mod_fpath'] +calibration_date_range = params['calibration_date_range'] +validation_date_range = params['validation_date_range'] shape_fpath = params['shapefile_fpath'] out_fpath = params['output_fpath'] var = params['var'] @@ -67,8 +66,8 @@ calib_list = calibration_date_range.split('-') h_date_period = (datetime.strptime(calib_list[0], '%Y%m%d').strftime('%Y-%m-%d'), datetime.strptime(calib_list[1], '%Y%m%d').strftime('%Y-%m-%d')) -proj_list = projection_date_range.split('_') -future_time_periods = [(p.split('-')[0], p.split('-')[1]) for p in proj_list] +val_list = validation_date_range.split('_') +future_time_periods = [(p.split('-')[0], p.split('-')[1]) for p in val_list] future_time_periods = [(datetime.strptime(p[0], '%Y%m%d').strftime('%Y-%m-%d'), datetime.strptime(p[1], '%Y%m%d').strftime('%Y-%m-%d')) for p in future_time_periods] @@ -83,16 +82,18 @@ def preprocess_data() -> None: use_pr = False if var == "rainfall": use_pr = True + + # load modelled data (CPM) for calibration period and place into ds_modc if run_number is not None: - ds_simh = \ - load_data(contr_fpath, date_range=h_date_period, variable=var, filter_filenames_on_variable=True, + ds_modc = \ + load_data(mod_fpath, date_range=h_date_period, variable=var, filter_filenames_on_variable=True, run_number=run_number, filter_filenames_on_run_number=True, use_pr=use_pr, shapefile_path=shape_fpath, extension='tif')[var].rename({"projection_x_coordinate": "lon", "projection_y_coordinate": "lat"}) else: - ds_simh = \ - load_data(contr_fpath, date_range=h_date_period, variable=var, filter_filenames_on_variable=True, + ds_modc = \ + load_data(mod_fpath, date_range=h_date_period, variable=var, filter_filenames_on_variable=True, use_pr=use_pr, shapefile_path=shape_fpath, extension='tif')[var].rename({"projection_x_coordinate": "lon", "projection_y_coordinate": "lat"}) @@ -110,67 +111,69 @@ def preprocess_data() -> None: else: raise Exception(f"A mix of .nc and .tif observation files found in {obs_fpath}, file extension should be the " f"same for all files in the directory.") - ds_obs = load_data(obs_fpath, date_range=h_date_period, variable=var, filter_filenames_on_variable=True, - shapefile_path=shape_fpath, extension=ext)[var].rename({"projection_x_coordinate": "lon", - "projection_y_coordinate": "lat"}) - log.info('Historical data Loaded.') - # aligning calendars, e.g there might be a few extra days on the scenario data that has to be dropped. - ds_simh = ds_simh.sel(time=ds_obs.time, method='nearest') + # load observation data (HADs) for calibration period and place into ds_obsc + ds_obsc = load_data(obs_fpath, date_range=h_date_period, variable=var, filter_filenames_on_variable=True, + shapefile_path=shape_fpath, extension=ext)[var].rename({"projection_x_coordinate": "lon", + "projection_y_coordinate": "lat"}) + log.info('Calibration data (modelled and observed) loaded.') + + # aligning calendars, there might be extra days in the modelled data that need to be dropped + ds_modc = ds_modc.sel(time=ds_obsc.time, method='nearest') - if ds_obs.shape != ds_simh.shape: - raise RuntimeError('Error, observed and simulated historical data must have same dimensions.') + if ds_obsc.shape != ds_modc.shape: + raise RuntimeError('Error, observed and modelled calibration data must have same dimensions.') log.info('Resulting datasets with shape') - log.info(ds_obs.shape) + log.info(ds_obsc.shape) # masking coordinates where the observed data has no values - ds_simh = ds_simh.where(~np.isnan(ds_obs.isel(time=0))) - ds_simh = ds_simh.where(ds_simh.values < 1000) - log.info('Historical data Masked') - - ds_obs.attrs['unit'] = unit - ds_simh.attrs['unit'] = unit - - # write simh to .nc file in output directory - simh_filename = f'simh_var-{var}_run-{run_number}_{calib_list[0]}_{calib_list[1]}' - simh_path = os.path.join(out_fpath, f'{simh_filename}.nc') - if not os.path.exists(os.path.dirname(simh_path)): - folder_path = Path(os.path.dirname(simh_path)) + ds_modc = ds_modc.where(~np.isnan(ds_obsc.isel(time=0))) + ds_modc = ds_modc.where(ds_modc.values < 1000) + log.info('Calibration data masked') + + ds_obsc.attrs['unit'] = unit + ds_modc.attrs['unit'] = unit + + # write modc to .nc file in output directory + modc_filename = f'modc_var-{var}_run-{run_number}_{calib_list[0]}_{calib_list[1]}' + modc_path = os.path.join(out_fpath, f'{modc_filename}.nc') + if not os.path.exists(os.path.dirname(modc_path)): + folder_path = Path(os.path.dirname(modc_path)) folder_path.mkdir(parents=True) - print(f"Saving historical control data to {simh_path}") - ds_simh.to_netcdf(simh_path) - log.info(f'Saved CPM data for calibration (historic) period to {simh_path}') - - # write ds_obs to .nc file in output directory - obsh_filename = f'obsh_var-{var}_run-{run_number}_{calib_list[0]}_{calib_list[1]}' - obsh_path = os.path.join(out_fpath, f'{obsh_filename}.nc') - if not os.path.exists(os.path.dirname(obsh_path)): - folder_path = Path(os.path.dirname(obsh_path)) + print(f"Saving modelled (CPM) data for calibration to {modc_path}") + ds_modc.to_netcdf(modc_path) + log.info(f'Saved modelled (CPM) data for calibration to {modc_path}') + + # write ds_obsc to .nc file in output directory + obsc_filename = f'obsc_var-{var}_run-{run_number}_{calib_list[0]}_{calib_list[1]}' + obsc_path = os.path.join(out_fpath, f'{obsc_filename}.nc') + if not os.path.exists(os.path.dirname(obsc_path)): + folder_path = Path(os.path.dirname(obsc_path)) folder_path.mkdir(parents=True) - print(f"Saving historical observation data to {obsh_path}") - ds_obs.to_netcdf(obsh_path) - log.info(f'Saved HADs data for calibration (historic) period to {obsh_path}') + print(f"Saving observation data (HADs) for calibration to {obsc_path}") + ds_obsc.to_netcdf(obsc_path) + log.info(f'Saved observation data (HADs) for calibration period to {obsc_path}') - # looping over time periods - # this is done because the full time period for the scenario dataset is too large for memory. + # looping over validation time periods for f_date_period in future_time_periods: log.info(f'Running for {f_date_period} time period') - + # load modelled (CPM) data for validation period and store in ds_modv try: use_pr = False if var == "rainfall": use_pr = True + # load if run_number is not None: - ds_simp = \ - load_data(scen_fpath, date_range=f_date_period, variable=var, run_number=run_number, + ds_modv = \ + load_data(mod_fpath, date_range=f_date_period, variable=var, run_number=run_number, filter_filenames_on_run_number=True, use_pr=use_pr, shapefile_path=shape_fpath, extension='tif')[ var].rename({"projection_x_coordinate": "lon", "projection_y_coordinate": "lat"}) else: - ds_simp = \ - load_data(scen_fpath, date_range=f_date_period, variable=var, + ds_modv = \ + load_data(mod_fpath, date_range=f_date_period, variable=var, use_pr=use_pr, shapefile_path=shape_fpath, extension='tif')[ var].rename({"projection_x_coordinate": "lon", "projection_y_coordinate": "lat"}) except Exception as e: @@ -178,20 +181,20 @@ def preprocess_data() -> None: continue # masking coordinates where the observed data has no values - ds_simp = ds_simp.where(~np.isnan(ds_obs.isel(time=0))) - ds_simp = ds_simp.where(ds_simp.values < 1000) + ds_modv = ds_modv.where(~np.isnan(ds_obsc.isel(time=0))) + ds_modv = ds_modv.where(ds_modv.values < 1000) - ds_simp.attrs['unit'] = unit + ds_modv.attrs['unit'] = unit - # write ds_simp to .nc file in output directory - simp_filename = f'simp_var-{var}_run-{run_number}_{f_date_period[0]}_{f_date_period[1]}' - simp_path = os.path.join(out_fpath, f'{simp_filename}.nc') - if not os.path.exists(os.path.dirname(simp_path)): - folder_path = Path(os.path.dirname(simp_path)) + # write ds_modv to .nc file in output directory + ds_modv_filename = f'modv_var-{var}_run-{run_number}_{f_date_period[0]}_{f_date_period[1]}' + ds_modv_path = os.path.join(out_fpath, f'{ds_modv_filename}.nc') + if not os.path.exists(os.path.dirname(ds_modv_path)): + folder_path = Path(os.path.dirname(ds_modv_path)) folder_path.mkdir(parents=True) - print(f"Saving future scenario data to {simp_path}") - ds_simp.to_netcdf(simp_path) - log.info(f'Saved CPM data for projection (future) period {f_date_period} to {simp_path}') + print(f"Saving modelled (CPM) data for validation to {ds_modv_path}") + ds_modv.to_netcdf(ds_modv_path) + log.info(f'Saved modelled (CPM) data for validation, period {f_date_period} to {ds_modv_path}') end = time.time() log.info(f'total time in seconds: {end - start}') diff --git a/python/debiasing/run_cmethods.py b/python/debiasing/run_cmethods.py index 305f168b..028a833a 100644 --- a/python/debiasing/run_cmethods.py +++ b/python/debiasing/run_cmethods.py @@ -34,9 +34,11 @@ parser = argparse.ArgumentParser(description='Adjust climate data based on bias correction algorithms.') parser.add_argument('--input_data_folder', '--input_data_folder', dest='input_dir', type=str, help='Directory that contains all data files. NetCDF (.nc) files with names starting with ' - '`simh` and `obsh` should be found in the directory (containing historic CPM ' - 'and HADs data respectively), as well as at least one file with name ' - 'starting with `simp` (containing future CPM data)') + '`modc` and `obsc` should be found in the directory (containing ' + 'modelled calibration data (CPM) and observed calibration data (HADs) respectively), ' + 'as well as at least one file with name ' + 'starting with `modv` (containing modelled validation data (CPM). Calibration data ' + 'are used to calibrate the debiasing methods and validation data are debiased.') parser.add_argument('--out', '--output', dest='output_fpath', type=str, help='Path to save output files', default='.') parser.add_argument('-m', '--method', dest='method', type=str, help='Correction method', default='quantile_delta_mapping') @@ -68,49 +70,49 @@ def run_debiasing() -> None: if method not in cm.get_available_methods(): raise ValueError(f'Unknown method {method}. Available methods: {cm.get_available_methods()}') - simh_files = glob.glob(f"{input_dir}/simh*.nc") - if len(simh_files) == 0: - raise Exception(f"No .nc files with filename starting with simh were " + modc_files = glob.glob(f"{input_dir}/modc*.nc") + if len(modc_files) == 0: + raise Exception(f"No .nc files with filename starting with modc were " f"found in the input directory {input_dir}") - elif len(simh_files) > 1: - raise Exception(f"More than one .nc file with filenames starting with simh were " + elif len(modc_files) > 1: + raise Exception(f"More than one .nc file with filenames starting with modc were " f"found in the input directory {input_dir}") else: - print('Loading historic control data from ', simh_files[0], "...") - with xr.open_dataset(simh_files[0], engine='netcdf4') as ds: - ds_simh = ds.load()[var] - log.info(f'Historic control data loaded with shape {ds_simh.shape}.') - - obsh_files = glob.glob(f"{input_dir}/obsh*.nc") - if len(obsh_files) == 0: - raise Exception(f"No .nc files with filename starting with obsh were " + print('Loading modelled calibration data (CPM) from ', modc_files[0], "...") + with xr.open_dataset(modc_files[0], engine='netcdf4') as ds: + ds_modc = ds.load()[var] + log.info(f'Modelled calibration data (CPM) loaded with shape {ds_modc.shape}.') + + obsc_files = glob.glob(f"{input_dir}/obsc*.nc") + if len(obsc_files) == 0: + raise Exception(f"No .nc files with filename starting with obsc were " f"found in the input directory {input_dir}") - elif len(obsh_files) > 1: - raise Exception(f"More than one .nc file with filenames starting with obsh were " + elif len(obsc_files) > 1: + raise Exception(f"More than one .nc file with filenames starting with obsc were " f"found in the input directory {input_dir}") else: - print('Loading historic observation data from ', obsh_files[0], "...") - with xr.open_dataset(obsh_files[0], engine='netcdf4') as ds: - ds_obs = ds.load()[var] - log.info(f'Historic observation data loaded with shape {ds_obs.shape}.') + print('Loading observation data for calibration from ', obsc_files[0], "...") + with xr.open_dataset(obsc_files[0], engine='netcdf4') as ds: + ds_obsc = ds.load()[var] + log.info(f'Observation data for calibration loaded with shape {ds_obsc.shape}.') - if ds_obs.shape != ds_simh.shape: - raise RuntimeError('Error, observed and control historical data must have same dimensions.') + if ds_obsc.shape != ds_modc.shape: + raise RuntimeError('Error, observed and modelled calibration data must have same dimensions.') # looping over future time periods for which debiased data need to be generated - simp_files = glob.glob(f"{input_dir}/simp*.nc") - if len(simp_files) == 0: - raise Exception(f"No .nc files with filename starting with simp were " + modv_files = glob.glob(f"{input_dir}/modv*.nc") + if len(modv_files) == 0: + raise Exception(f"No .nc files with filename starting with modv were " f"found in the input directory {input_dir}") else: - for simp_file in simp_files: - print('Loading future scenario (CPM) data from ', simp_file, "...") - with xr.open_dataset(simp_file, engine='netcdf4') as ds: - ds_simp = ds.load()[var] - log.info(f'Future scenario data loaded with shape {ds_simp.shape}.') + for modv_file in modv_files: + print('Loading modelled data (CPM) for validation from ', modv_file, "...") + with xr.open_dataset(modv_file, engine='netcdf4') as ds: + ds_modv = ds.load()[var] + log.info(f'Modelled data (CPM) for validation loaded with shape {ds_modv.shape}.') - start_date: str = ds_simp['time'][0].dt.strftime('%Y%m%d').values.ravel()[0] - end_date: str = ds_simp['time'][-1].dt.strftime('%Y%m%d').values.ravel()[0] + start_date: str = ds_modv['time'][0].dt.strftime('%Y%m%d').values.ravel()[0] + end_date: str = ds_modv['time'][-1].dt.strftime('%Y%m%d').values.ravel()[0] descr1, descr2 = '', '' if method in cm.DISTRIBUTION_METHODS: @@ -125,9 +127,9 @@ def run_debiasing() -> None: log.info(f'Starting {method} adjustment') result = cm.adjust_3d( method=method, - obs=ds_obs, - simh=ds_simh, - simp=ds_simp, + obs=ds_obsc, + simh=ds_modc, + simp=ds_modv, n_quantiles=n_quantiles, kind=kind, group=group, @@ -135,7 +137,7 @@ def run_debiasing() -> None: ) log.info('Saving now') result.name = var - result['time'] = ds_simp['time'] + result['time'] = ds_modv['time'] result = result.rename({"lon": "projection_x_coordinate", "lat": "projection_y_coordinate"}) # define output name @@ -146,9 +148,9 @@ def run_debiasing() -> None: log.info(result.head()) plt.figure(figsize=(10, 5), dpi=216) - ds_simh.groupby('time.dayofyear').mean(...).plot(label='$T_{sim,h}$') - ds_obs.groupby('time.dayofyear').mean(...).plot(label='$T_{obs,h}$') - ds_simp.groupby('time.dayofyear').mean(...).plot(label='$T_{sim,p}$') + ds_modc.groupby('time.dayofyear').mean(...).plot(label='$T_{sim,h}$') + ds_obsc.groupby('time.dayofyear').mean(...).plot(label='$T_{obs,h}$') + ds_modv.groupby('time.dayofyear').mean(...).plot(label='$T_{sim,p}$') result.groupby('time.dayofyear').mean(...).plot(label='$T^{*Debiased}_{sim,p}$') plt.title( f'Debiased {var} projected to {start_date} and {end_date}') From e870ce78c575fd5c7440c0ea33087b0f02b2d6e1 Mon Sep 17 00:00:00 2001 From: Grigorios Mingas Date: Thu, 21 Sep 2023 14:12:16 +0100 Subject: [PATCH 034/146] Fix bug with selection of validation CPM files --- python/debiasing/preprocess_data.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/python/debiasing/preprocess_data.py b/python/debiasing/preprocess_data.py index 035acf4c..47b6847f 100644 --- a/python/debiasing/preprocess_data.py +++ b/python/debiasing/preprocess_data.py @@ -127,7 +127,7 @@ def preprocess_data() -> None: log.info('Resulting datasets with shape') log.info(ds_obsc.shape) - # masking coordinates where the observed data has no values + # masking coordinates where the observed data has no x, y values ds_modc = ds_modc.where(~np.isnan(ds_obsc.isel(time=0))) ds_modc = ds_modc.where(ds_modc.values < 1000) log.info('Calibration data masked') @@ -164,23 +164,24 @@ def preprocess_data() -> None: use_pr = False if var == "rainfall": use_pr = True + # load if run_number is not None: ds_modv = \ load_data(mod_fpath, date_range=f_date_period, variable=var, run_number=run_number, filter_filenames_on_run_number=True, use_pr=use_pr, shapefile_path=shape_fpath, - extension='tif')[ + filter_filenames_on_variable=True, extension='tif')[ var].rename({"projection_x_coordinate": "lon", "projection_y_coordinate": "lat"}) else: ds_modv = \ - load_data(mod_fpath, date_range=f_date_period, variable=var, + load_data(mod_fpath, date_range=f_date_period, variable=var, filter_filenames_on_variable=True, use_pr=use_pr, shapefile_path=shape_fpath, extension='tif')[ var].rename({"projection_x_coordinate": "lon", "projection_y_coordinate": "lat"}) except Exception as e: log.info(f'No data available for {f_date_period} time period') continue - # masking coordinates where the observed data has no values + # masking coordinates where the observed data has no x, y values ds_modv = ds_modv.where(~np.isnan(ds_obsc.isel(time=0))) ds_modv = ds_modv.where(ds_modv.values < 1000) From aedf796336aac691f363ea046fe0bd7a2bc5c30f Mon Sep 17 00:00:00 2001 From: Grigorios Mingas Date: Thu, 21 Sep 2023 15:07:01 +0100 Subject: [PATCH 035/146] Add code that loads and writes to disk the observation data for the validation periods, so that it is easier to assess accuracy of debiased data --- python/debiasing/preprocess_data.py | 39 +++++++++++++++++++++++++---- 1 file changed, 34 insertions(+), 5 deletions(-) diff --git a/python/debiasing/preprocess_data.py b/python/debiasing/preprocess_data.py index 47b6847f..e20e136e 100644 --- a/python/debiasing/preprocess_data.py +++ b/python/debiasing/preprocess_data.py @@ -40,7 +40,7 @@ default='19801201-19991130') parser.add_argument('--valid_dates', '--validation_date_range', dest='validation_date_range', type=str, help='Start and end dates for validation data (CPM data to be debiased using the ' - 'calibrated debiasing model) - multiple date ranges can be passed, ' + 'calibrated debiasing model, and HADs data as referece) - multiple date ranges can be passed, ' 'separated by "_", each in YYYYMMDD-YYYYMMDD format e.g., ' '"20100101-20191231_20200101-20291231"', default='20201201-20291130_20301201-20391130') @@ -124,7 +124,7 @@ def preprocess_data() -> None: if ds_obsc.shape != ds_modc.shape: raise RuntimeError('Error, observed and modelled calibration data must have same dimensions.') - log.info('Resulting datasets with shape') + log.info('Resulting calibration datasets with shape') log.info(ds_obsc.shape) # masking coordinates where the observed data has no x, y values @@ -159,6 +159,7 @@ def preprocess_data() -> None: for f_date_period in future_time_periods: log.info(f'Running for {f_date_period} time period') + # load modelled (CPM) data for validation period and store in ds_modv try: use_pr = False @@ -178,24 +179,52 @@ def preprocess_data() -> None: use_pr=use_pr, shapefile_path=shape_fpath, extension='tif')[ var].rename({"projection_x_coordinate": "lon", "projection_y_coordinate": "lat"}) except Exception as e: - log.info(f'No data available for {f_date_period} time period') + log.info(f'No modelled data available for {f_date_period} time period') + continue + + # load observed (HADs) data for validation period and store in ds_obsv + try: + ds_obsv = load_data(obs_fpath, date_range=f_date_period, variable=var, filter_filenames_on_variable=True, + shapefile_path=shape_fpath, extension=ext)[var].rename( + {"projection_x_coordinate": "lon", + "projection_y_coordinate": "lat"}) + except Exception as e: + log.info(f'No observed data available for {f_date_period} time period') continue + # aligning calendars, there might be extra days in the modelled data that need to be dropped + ds_modv = ds_modv.sel(time=ds_obsv.time, method='nearest') + + if ds_obsv.shape != ds_modv.shape: + raise RuntimeError('Error, observed and modelled validation data must have same dimensions.') + + log.info('Resulting validation datasets with shape') + log.info(ds_obsv.shape) + # masking coordinates where the observed data has no x, y values - ds_modv = ds_modv.where(~np.isnan(ds_obsc.isel(time=0))) + ds_modv = ds_modv.where(~np.isnan(ds_obsv.isel(time=0))) ds_modv = ds_modv.where(ds_modv.values < 1000) + ds_obsv.attrs['unit'] = unit ds_modv.attrs['unit'] = unit - # write ds_modv to .nc file in output directory + # write ds_modv and ds_obsv to .nc files in output directory ds_modv_filename = f'modv_var-{var}_run-{run_number}_{f_date_period[0]}_{f_date_period[1]}' + ds_obsv_filename = f'obsv_var-{var}_run-{run_number}_{f_date_period[0]}_{f_date_period[1]}' ds_modv_path = os.path.join(out_fpath, f'{ds_modv_filename}.nc') + ds_obsv_path = os.path.join(out_fpath, f'{ds_obsv_filename}.nc') if not os.path.exists(os.path.dirname(ds_modv_path)): folder_path = Path(os.path.dirname(ds_modv_path)) folder_path.mkdir(parents=True) + if not os.path.exists(os.path.dirname(ds_obsv_path)): + folder_path = Path(os.path.dirname(ds_obsv_path)) + folder_path.mkdir(parents=True) print(f"Saving modelled (CPM) data for validation to {ds_modv_path}") ds_modv.to_netcdf(ds_modv_path) log.info(f'Saved modelled (CPM) data for validation, period {f_date_period} to {ds_modv_path}') + print(f"Saving observed (HADs) data for validation to {ds_obsv_path}") + ds_obsv.to_netcdf(ds_obsv_path) + log.info(f'Saved observed (HADs) data for validation, period {f_date_period} to {ds_modv_path}') end = time.time() log.info(f'total time in seconds: {end - start}') From db04d30c23f455bd4d92bd4235bd7ed9d6929a31 Mon Sep 17 00:00:00 2001 From: Grigorios Mingas Date: Thu, 21 Sep 2023 15:10:21 +0100 Subject: [PATCH 036/146] Modify bash script to work with new argument names --- python/debiasing/three_cities_debiasing_cropped.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/debiasing/three_cities_debiasing_cropped.sh b/python/debiasing/three_cities_debiasing_cropped.sh index 6feb32aa..842c8b6b 100755 --- a/python/debiasing/three_cities_debiasing_cropped.sh +++ b/python/debiasing/three_cities_debiasing_cropped.sh @@ -11,7 +11,7 @@ for run in "${runs[@]}"; do for city in "${cities[@]}"; do for var in "${vars[@]}"; do - python preprocess_data.py --scen /mnt/vmfileshare/ClimateData/Cropped/three.cities/CPM/$city --contr /mnt/vmfileshare/ClimateData/Cropped/three.cities/CPM/$city --obs /mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.original360/$city -v $var -r $run --out /mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/$city/$run/$var --contr_dates 19810101-19811230 --scen_dates 20100101-20100330 + python preprocess_data.py --mod /mnt/vmfileshare/ClimateData/Cropped/three.cities/CPM/$city --obs /mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.original360/$city -v $var -r $run --out /mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/$city/$run/$var --calib_dates 19810101-19811230 --valid_dates 20100101-20100330 for method in "${methods[@]}"; do python run_cmethods.py --input_data_folder /mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/$city/$run/$var --out /mnt/vmfileshare/ClimateData/Debiased/three.cities.cropped/$city/$run/$var --method $method --v $var -p 32 From eb1593a48db2c385352b3bf3e63194c4555c1d06 Mon Sep 17 00:00:00 2001 From: Grigorios Mingas Date: Thu, 21 Sep 2023 15:15:38 +0100 Subject: [PATCH 037/146] Modify second bash script to use new attribute names --- python/debiasing/three_cities_debiasing.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/python/debiasing/three_cities_debiasing.sh b/python/debiasing/three_cities_debiasing.sh index 8822fa73..a370ea97 100755 --- a/python/debiasing/three_cities_debiasing.sh +++ b/python/debiasing/three_cities_debiasing.sh @@ -1,6 +1,6 @@ #!/bin/sh -declare -a vars=("tasmax") +declare -a vars=("tasmax" "tasmin") declare -a runs=("05" "07" "08" "06") declare -a cities=("Glasgow" "Manchester" "London") declare -a methods=("quantile_delta_mapping" "quantile_mapping") @@ -11,19 +11,19 @@ for run in "${runs[@]}"; do for city in "${cities[@]}"; do for method in "${methods[@]}"; do - python run_cmethods.py --scen /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/pr/$run/latest/ --contr /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/pr/$run/latest/ --obs /mnt/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/rainfall/day/ --shape /mnt/vmfileshare/ClimateData/shapefiles/three.cities/$city/$city.shp -v rainfall --method $method -p 32 --out /mnt/vmfileshare/ClimateData/Debiased/three.cities/$city/$run/ --contr_dates 19800101-20091230 --scen_dates 20100101-20191230 + python run_cmethods.py --mod /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/pr/$run/latest/ --obs /mnt/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/rainfall/day/ --shape /mnt/vmfileshare/ClimateData/shapefiles/three.cities/$city/$city.shp -v rainfall --method $method -p 32 --out /mnt/vmfileshare/ClimateData/Debiased/three.cities/$city/$run/ --calib_dates 19810101-19811230 --valid_dates 20100101-20100330 for var in "${vars[@]}"; do - python run_cmethods.py --scen /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/$var/$run/latest/ --contr /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/$var/$run/latest/ --obs /mnt/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/$var/day/ --shape /mnt/vmfileshare/ClimateData/shapefiles/three.cities/$city/$city.shp -v $var --method $method -p 32 --out /mnt/vmfileshare/ClimateData/Debiased/three.cities/$city/$run/ --contr_dates 19800101-20091230 --scen_dates 20100101-20191230 + python run_cmethods.py --mod /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/$var/$run/latest/ --obs /mnt/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/$var/day/ --shape /mnt/vmfileshare/ClimateData/shapefiles/three.cities/$city/$city.shp -v $var --method $method -p 32 --out /mnt/vmfileshare/ClimateData/Debiased/three.cities/$city/$run/ --calib_dates 19810101-19811230 --valid_dates 20100101-20100330 done done for method in "${methods_2[@]}"; do - python run_cmethods.py --scen /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/pr/$run/latest/ --contr /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/pr/$run/latest/ --obs /mnt/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/rainfall/day/ --shape /mnt/vmfileshare/ClimateData/shapefiles/three.cities/$city/$city.shp -v rainfall --method $method --group time.month -p 32 --out /mnt/vmfileshare/ClimateData/Debiased/three.cities/$city/$run/ --contr_dates 19800101-20091230 --scen_dates 20100101-20191230 + python run_cmethods.py --mod /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/pr/$run/latest/ --obs /mnt/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/rainfall/day/ --shape /mnt/vmfileshare/ClimateData/shapefiles/three.cities/$city/$city.shp -v rainfall --method $method --group time.month -p 32 --out /mnt/vmfileshare/ClimateData/Debiased/three.cities/$city/$run/ --calib_dates 19810101-19811230 --valid_dates 20100101-20100330 for var in "${vars[@]}"; do - python run_cmethods.py --scen /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/$var/$run/latest/ --contr /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/$var/$run/latest/ --obs /mnt/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/$var/day/ --shape /mnt/vmfileshare/ClimateData/shapefiles/three.cities/$city/$city.shp -v $var --method $method --group time.month -p 32 --out /mnt/vmfileshare/ClimateData/Debiased/three.cities/$city/$run/ --contr_dates 19800101-20091230 --scen_dates 20100101-20191230 + python run_cmethods.py --mod /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/$var/$run/latest/ --obs /mnt/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/$var/day/ --shape /mnt/vmfileshare/ClimateData/shapefiles/three.cities/$city/$city.shp -v $var --method $method --group time.month -p 32 --out /mnt/vmfileshare/ClimateData/Debiased/three.cities/$city/$run/ --calib_dates 19810101-19811230 --valid_dates 20100101-20100330 done done From 0e44659248a52d98b567a938fa48e3779128ea8e Mon Sep 17 00:00:00 2001 From: Grigorios Mingas Date: Thu, 21 Sep 2023 15:44:34 +0100 Subject: [PATCH 038/146] Update python folder readme --- python/README.md | 75 +++++++++++++++++++++--------------------------- 1 file changed, 32 insertions(+), 43 deletions(-) diff --git a/python/README.md b/python/README.md index 298b532f..ffaba42c 100644 --- a/python/README.md +++ b/python/README.md @@ -57,7 +57,9 @@ Note: By March 2023 we have only implemented the [python-cmethods](https://githu ### The cmethods library -This repository contains a python script used to run debiasing in climate data using a fork of the [original python-cmethods](https://github.com/btschwertfeger/python-cmethods) module written by Benjamin Thomas Schwertfeger's , which has +This repository contains two python scripts one for preprocessing/grouping data and one to run +debiasing in climate data using a fork of the [original python-cmethods](https://github.com/btschwertfeger/python-cmethods) +module written by Benjamin Thomas Schwertfeger's , which has been modified to function with the dataset used in the clim-recal project. This library has been included as a submodule to this project, so you must run the following command to pull the submodules required. @@ -66,71 +68,58 @@ cd debiasing git submodule update --init --recursive ``` -The [run_cmethods.py](debiasing/run_cmethods.py) allow us to adjust climate biases in climate data using the python-cmethods library. -It takes as input observation data (HADs data), control data (historical UKCP data), and scenario data (future UKCP data), -and applies a correction method to the scenario data. The resulting output is saved as a `.nc` to a specified directory. -The script will also produce a time-series and a map plot of the debiased data. +- The [preprocess_data.py](debiasing/preprocess_data.py) script allows the user to specify directories from which the modelled (CPM/UKCP) data and observation (HADs) data should be loaded, as well as time periods to use for calibration and validation. The script parses the necessary files and combines them into two files for calibration (modelled and observed), and two files for validation (modelled and observed) - with the option to specify multiple validation periods. These can then be used as inputs to `run_cmethods.py`. +- The [run_cmethods.py](debiasing/run_cmethods.py) script allow us to adjust climate biases in climate data using the python-cmethods library. +It takes as input observation data (HADs data) and modelled data (historical CPM/UKCP data) for calibration, as well as observation and modelled data for validation (generated by `preprocess_data.py`). It calibrates the debiasing method using the calibration period data and debiases the modelled data for the validation period. The resulting output is saved as a `.nc` to a specified directory. The script will also produce a time-series and a map plot of the debiased data. **Usage**: -The script can be run from the command line using the following arguments: +The scripts can be run from the command line using the following arguments: ``` -python3 run_cmethods.py.py --obs --contr --scen --shp ---out -m -v -u -g -k -n -p -``` - -where: - -where: +python3 preprocess_data.py --mod --obs --shp --out -v -u -r --calib_dates --valid_dates -- `--obs` specifies the path to the observation datasets -- `--contr` specifies the path to the control datasets -- `--scen` specifies the path to the scenario datasets (data to adjust) -- `--contr_dates` specifies start and end dates for control and observation data (historic UKCP and HADs data used to calibrate the debiasing model) -- `--scen_dates` specifies start and end dates for scenario data (data to be debiased using the calibrated debiasing model - multiple date ranges can be passed -- `--shp` specifies the path to a shapefile, in case we want to select a smaller region (default: None) -- `--out` specifies the path to save the output files (default: current directory) -- `--method` specifies the correction method to use (default: quantile_delta_mapping) -- `-v` specifies the variable to adjust (default: tas) -- `-u` specifies the unit of the variable (default: °C) -- `-g` specifies the value grouping (default: time) -- `-k` specifies the method kind (+ or *, default: +) -- `-n` specifies the number of quantiles to use (default: 1000) -- `-p` specifies the number of processes to use for multiprocessing (default: 1) - -For more details on the script and options you can run: +python3 run_cmethods.py --input_data_folder --out -m -v -g -k -n -p +``` +For more details on the scripts and options you can run: +``` +python preprocess_data.py --help +``` +and ``` python run_cmethods.py --help ``` **Main Functionality**: -By default (if no control and scenario dates are passed), the script applies corrections extracted from historical observed and simulated data between `1980-12-01` and `1999-11-30`. -Corrections are applied to future scenario data between `2020` and `2040`. - - -The script performs the following steps: +The `preprocess_data.py` script performs the following steps: - Parses the input arguments. -- Loads, merges and clips (if shapefile is provided) the all input datasets and merges them into two distinct datasets: the observation and control datasets. -- Aligns the calendars of the historical simulation data and observed data, ensuring that they have the same time dimension -and checks that the observed and simulated historical data have the same dimensions. -- Loops over the future time periods specified in the `future_time_periods` variable and performs the following steps: - - Loads the scenario data for the current time period. - - Applies the specified correction method to the scenario data. +- Loads, merges and clips (if shapefile is provided) all calibration datasets and merges them into two distinct datasets: the m modelled and observed datasets. +- Aligns the calendars of the two datasets, ensuring that they have the same time dimension. +- Saves the calibration datasets to the output directory. +- Loops over the validation time periods specified in the `calib_dates` variable and performs the following steps: + - Loads the modelled data for the current time period. + - Loads the observed data for the current time period. + - Aligns and saves the datasets to the output directory. + +The `run_cmethods.py` script performs the following steps: + - Reads the input calibration and validation datasets from the input directory. + - Applies the specified debiasing method, combining the calibration and valiation data. - Saves the resulting output to the specified directory. - Creates diagnotic figues of the output dataset (time series and time dependent maps) and saves it into the specified directory. In this script -datasets are debiased in periods of 10 years, in a consecutive loop, for each time period it will produce an `.nc` output file +datasets are debiased in periods of 10 years, in a consecutive loop. For each time period it will produce an `.nc` output file with the adjusted data and a time-series plot and a time dependent map plot of the adjusted data. **Working example**. -Example of code working on the **clim-recal** dataset: +Example of how to run the two scripts using data stored in the Azure fileshare, running the scripts locally (uses input data that have been cropped to contain only the city of Glasgow. The two scripts will debias only the `tasmax` variable, run 05 of the CPM, for calibration years 1980-2009 and validation years 2010-2019. It uses the `quantile_delta_mapping` debiasing method: ``` -python run_cmethods.py --scen /Volumes/vmfileshare/ClimateData/Reprojected/UKCP2.2/tasmax/01/latest --contr /Volumes/vmfileshare/ClimateData/Reprojected/UKCP2.2/tasmax/01/latest/ --obs /Volumes/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/tasmax/day/ --shape ../../data/Scotland/Scotland.bbox.shp -v tasmax --method delta_method --group time.month -p 5 +python3 preprocess_data.py --mod /Volumes/vmfileshare/ClimateData/Cropped/three.cities/CPM/Glasgow/ --obs /Volumes/vmfileshare/ClimateData/Cropped/three.cities/Hads.original360/Glasgow/ -v tasmax --out ./preprocessed_data/ --calib_dates 19800101-20091230 --valid_dates 20100101-20191230 --run_number 05 + +python run_cmethods.py --input_data_folder ./preprocessed_data/ --out ./debiased_data/ --method quantile_delta_mapping --v tasmax -p 4 ``` From 1dca155ad587ef2fb3378d6837c2ceb6b4a51a28 Mon Sep 17 00:00:00 2001 From: Grigorios Mingas Date: Thu, 21 Sep 2023 15:46:40 +0100 Subject: [PATCH 039/146] Remove outdated files --- python/debiasing/edit_cpm_filenames.py | 34 ---------- .../debiasing/remove_dates_from_leap_years.py | 67 ------------------- 2 files changed, 101 deletions(-) delete mode 100644 python/debiasing/edit_cpm_filenames.py delete mode 100644 python/debiasing/remove_dates_from_leap_years.py diff --git a/python/debiasing/edit_cpm_filenames.py b/python/debiasing/edit_cpm_filenames.py deleted file mode 100644 index a4c2b489..00000000 --- a/python/debiasing/edit_cpm_filenames.py +++ /dev/null @@ -1,34 +0,0 @@ -# Temporary script to rename the CPM data created by Ruth's code to fit with the format -# expected by the debiasing python code in clim-recal. - -import glob -import shutil -import os -from pathlib import Path - -# input Hads data folder -path = '/Volumes/vmfileshare/ClimateData/Interim/CPM/three.cities' - -# output Hads data folder - NOTE: this is a local path, please change to local or Azure path -path_output = './debiasing_test/scenario' -# path_output = '/Volumes/vmfileshare/ClimateData/Interim/CPM/three.cities.greg/' - -# create a list of input and output files -files_in = [] -files_in.extend([f for f in glob.glob(path + "**/*/*.tif", recursive=True)]) -files_out = [f for f in files_in] -files_out = [f.replace("1980_2000", "19800101-19991230") for f in files_out] -files_out = [f.replace("2000_2010", "20000101-20091230") for f in files_out] -files_out = [f.replace("2010_2020", "20100101-20191230") for f in files_out] -files_out = [f.replace("2020_2040", "20200101-20391230") for f in files_out] -files_out = [f.replace("2040_2060", "20400101-20591230") for f in files_out] -files_out = [f.replace("2060_2080", "20600101-20791230") for f in files_out] -files_out = [f.replace(path, path_output) for f in files_out] - -# copy - including recursive directory creation -for i, file_in in enumerate(files_in): - if not os.path.exists(os.path.dirname(files_out[i])): - path = Path(os.path.dirname(files_out[i])) - path.mkdir(parents=True) - - shutil.copy(file_in, files_out[i]) diff --git a/python/debiasing/remove_dates_from_leap_years.py b/python/debiasing/remove_dates_from_leap_years.py deleted file mode 100644 index 5dd24b9a..00000000 --- a/python/debiasing/remove_dates_from_leap_years.py +++ /dev/null @@ -1,67 +0,0 @@ -# Temporary script to convert Hads observation data (.tif) generated by Ruth's R -# script to 360-day-per-year format. The script also renames the files to match the -# names in the file names in the "Processed/HadsUKgrid/resampled_2.2km" folder and saves them -# in .nc format (same as the original Hads format in "Processed/HadsUKgrid/resampled_2.2km") - -import os -import xarray as xr -import glob -import numpy as np -from pathlib import Path - -# input Hads data folder -path = '/Volumes/vmfileshare/ClimateData/Interim/HadsUK/three.cities/' - -# output Hads data folder - NOTE: this is a local path, please change to local or Azure path -path_output = '/debiasing_test/observation/' -# path_output = '/Volumes/vmfileshare/ClimateData/Interim/HadsUK/three.cities.greg/' - -# do this for two variables - tasmin is omitted because dates in files are different -for variable in ["tasmax", "rainfall"]: - - # create a list of input and output files - files_in = [] - files_in.extend([f for f in glob.glob(path + "**/*.tif", recursive=True)]) - files_in = [f for f in files_in if variable in f] - files_out = [f"{'_'.join(f.split('_')[:-1])}-{f.split('_')[-1]}" for f in files_in] - files_out = [f.replace("tif", "nc") for f in files_out] - files_out = [f.replace("20091231", "20091230") for f in files_out] - files_out = [f.replace("20191231", "20191230") for f in files_out] - files_out = [f.replace("20100131", "20100130") for f in files_out] - files_out = [f.replace("20200131", "20200130") for f in files_out] - files_out = ["." + f.replace(path, path_output) for f in files_out] - - for i, file_in in enumerate(files_in): - - # these are the lower and upper indexes used to slice the arrays, which change depending on - # whether we process the 1980-2010 or 2010-2020 Hads input - lower_index, upper_index = (0, 10840) if "1980" in file_in else (720, 2890) - - # read the raster data (.tif) - data = xr.open_dataset(file_in) - - # drop the five redundant dates for each leap year (approximate indexes are used) - data = data.drop_sel(band=np.arange(lower_index + 59, upper_index, 1445).tolist()) - data = data.drop_sel(band=np.arange(lower_index + 120, upper_index, 1445).tolist()) - data = data.drop_sel(band=np.arange(lower_index + 211, upper_index, 1445).tolist()) - data = data.drop_sel(band=np.arange(lower_index + 271, upper_index, 1445).tolist()) - data = data.drop_sel(band=np.arange(lower_index + 334, upper_index, 1445).tolist()) - - # create a 360-day time index based on the date range of the file - file_out = os.path.basename(files_out[i]).split('_') - start = file_out[-1].split('-')[0] - stop = file_out[-1].split('-')[1].split('.')[0] - time_index = xr.cftime_range(start, stop, freq='D', calendar='360_day', inclusive='both') - - # rename attributes and data and assign time index - data = data.rename( - {"x": "projection_x_coordinate", "y": "projection_y_coordinate", "band": "time", 'band_data': variable}) \ - .rio.write_crs('epsg:27700') - data.coords['time'] = time_index - - if not os.path.exists(os.path.dirname(files_out[i])): - folder_path = Path(os.path.dirname(files_out[i])) - folder_path.mkdir(parents=True) - - # write to an .nc file - data.to_netcdf(files_out[i]) From 3ccfb38d7d899a0e234eb4d090e7b699de5ccc1d Mon Sep 17 00:00:00 2001 From: Grigorios Mingas Date: Thu, 21 Sep 2023 15:48:51 +0100 Subject: [PATCH 040/146] Minor corrections to readme --- python/README.md | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/python/README.md b/python/README.md index ffaba42c..0ad0f261 100644 --- a/python/README.md +++ b/python/README.md @@ -84,11 +84,11 @@ python3 run_cmethods.py --input_data_folder --out Date: Mon, 25 Sep 2023 15:41:12 +0100 Subject: [PATCH 041/146] Fix bug in bash script --- python/debiasing/three_cities_debiasing.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/debiasing/three_cities_debiasing.sh b/python/debiasing/three_cities_debiasing.sh index a370ea97..7d045cb8 100755 --- a/python/debiasing/three_cities_debiasing.sh +++ b/python/debiasing/three_cities_debiasing.sh @@ -11,7 +11,7 @@ for run in "${runs[@]}"; do for city in "${cities[@]}"; do for method in "${methods[@]}"; do - python run_cmethods.py --mod /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/pr/$run/latest/ --obs /mnt/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/rainfall/day/ --shape /mnt/vmfileshare/ClimateData/shapefiles/three.cities/$city/$city.shp -v rainfall --method $method -p 32 --out /mnt/vmfileshare/ClimateData/Debiased/three.cities/$city/$run/ --calib_dates 19810101-19811230 --valid_dates 20100101-20100330 + python prepreprocess_data.py --mod /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/pr/$run/latest/ --obs /mnt/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/rainfall/day/ --shape /mnt/vmfileshare/ClimateData/shapefiles/three.cities/$city/$city.shp -v rainfall --method $method -p 32 --out /mnt/vmfileshare/ClimateData/Debiased/three.cities/$city/$run/ --calib_dates 19810101-19811230 --valid_dates 20100101-20100330 for var in "${vars[@]}"; do python run_cmethods.py --mod /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/$var/$run/latest/ --obs /mnt/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/$var/day/ --shape /mnt/vmfileshare/ClimateData/shapefiles/three.cities/$city/$city.shp -v $var --method $method -p 32 --out /mnt/vmfileshare/ClimateData/Debiased/three.cities/$city/$run/ --calib_dates 19810101-19811230 --valid_dates 20100101-20100330 @@ -20,7 +20,7 @@ for run in "${runs[@]}"; do for method in "${methods_2[@]}"; do - python run_cmethods.py --mod /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/pr/$run/latest/ --obs /mnt/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/rainfall/day/ --shape /mnt/vmfileshare/ClimateData/shapefiles/three.cities/$city/$city.shp -v rainfall --method $method --group time.month -p 32 --out /mnt/vmfileshare/ClimateData/Debiased/three.cities/$city/$run/ --calib_dates 19810101-19811230 --valid_dates 20100101-20100330 + python prepreprocess_data.py --mod /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/pr/$run/latest/ --obs /mnt/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/rainfall/day/ --shape /mnt/vmfileshare/ClimateData/shapefiles/three.cities/$city/$city.shp -v rainfall --method $method --group time.month -p 32 --out /mnt/vmfileshare/ClimateData/Debiased/three.cities/$city/$run/ --calib_dates 19810101-19811230 --valid_dates 20100101-20100330 for var in "${vars[@]}"; do python run_cmethods.py --mod /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/$var/$run/latest/ --obs /mnt/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/$var/day/ --shape /mnt/vmfileshare/ClimateData/shapefiles/three.cities/$city/$city.shp -v $var --method $method --group time.month -p 32 --out /mnt/vmfileshare/ClimateData/Debiased/three.cities/$city/$run/ --calib_dates 19810101-19811230 --valid_dates 20100101-20100330 From 629b57b43ce8a2cc89fa1a38233d734026ae319d Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Wed, 27 Sep 2023 10:41:01 +0100 Subject: [PATCH 042/146] feat(ci): add debiasing git submodule checkout to Dockerfile --- Dockerfile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Dockerfile b/Dockerfile index 8354eacc..09610747 100644 --- a/Dockerfile +++ b/Dockerfile @@ -81,5 +81,8 @@ USER ${NB_UID} # You can comment this line to keep the default environment in Terminal RUN echo "conda activate ${env_name}" >> "${HOME}/.bashrc" +RUN cd python/debiasing && git submodule update --init --recursive + + # This will use the default launch as discussed in # https://jupyter-docker-stacks.readthedocs.io/en/latest/using/running.html From a9a8d3476a8f6fc3792dd1d33ffafe691155712d Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Wed, 27 Sep 2023 13:10:45 +0000 Subject: [PATCH 043/146] feat(ci): add for automating deploy --- bash/ubuntu_install.sh | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 bash/ubuntu_install.sh diff --git a/bash/ubuntu_install.sh b/bash/ubuntu_install.sh new file mode 100644 index 00000000..4c87ad95 --- /dev/null +++ b/bash/ubuntu_install.sh @@ -0,0 +1,34 @@ +#!/bin/bash +# Basic if statement + +CHECKOUT_PATH=$HOME/code/clim-recal +ANACONDA_INSTALL_FOLDER=$HOME/code/anaconda-install +ANACONDA_INSTALL_SCRIPT_FILE_NAME=Anaconda3-2023.07-2-Linux-x86_64.sh +ANACONDA_INSTALL_URL=https://repo.anaconda.com/archive/$ANACONDA_INSTALL_SCRIPT_FILE_NAME + +if [ "$EUID" -ne 0 ] + then echo "Please run as root" + exit +fi + +while true; do + read -p "Would you like to set the region to GB? " yn + case $yn in + [Yy]* ) echo "en_GB.UTF-8 UTF-8" > /etc/locale.gen && locale-genmake install; break;; + [Nn]* ) exit;; + * ) echo "Please answer yes or no.";; + esac +done + +apt-get update && apt-get -y install gdal-bin python3-gdal libgdal-dev build-essential wget && apt-get upgrade + +cd python/debiasing && git submodule update --init --recursive + +while true; do + read -p "Would you like to dowload Anaconda? " yn + case $yn in + [Yy]* ) mkdir -p $ANACONDA_INSTALL_PATH; cd $ANACONDA_INSTALL_PATH; wget $ANACONDA_INSTALL_URL; bash $ANACONDA_INSTALL_SCRIPT_FILE_NAME ; break;; + [Nn]* ) exit;; + * ) echo "Please answer yes or no.";; + esac +done From 44a0bd0a52a0b318521729b47f4fb8fbe50dffa8 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Wed, 27 Sep 2023 13:33:12 +0000 Subject: [PATCH 044/146] fix(ci): ease permission structure in --- bash/ubuntu_install.sh | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) mode change 100644 => 100755 bash/ubuntu_install.sh diff --git a/bash/ubuntu_install.sh b/bash/ubuntu_install.sh old mode 100644 new mode 100755 index 4c87ad95..3357c9a1 --- a/bash/ubuntu_install.sh +++ b/bash/ubuntu_install.sh @@ -5,24 +5,18 @@ CHECKOUT_PATH=$HOME/code/clim-recal ANACONDA_INSTALL_FOLDER=$HOME/code/anaconda-install ANACONDA_INSTALL_SCRIPT_FILE_NAME=Anaconda3-2023.07-2-Linux-x86_64.sh ANACONDA_INSTALL_URL=https://repo.anaconda.com/archive/$ANACONDA_INSTALL_SCRIPT_FILE_NAME - -if [ "$EUID" -ne 0 ] - then echo "Please run as root" - exit -fi +sudo apt-get update && sudo apt-get -y install locales gdal-bin python3-gdal libgdal-dev build-essential wget && sudo apt-get upgrade while true; do read -p "Would you like to set the region to GB? " yn case $yn in - [Yy]* ) echo "en_GB.UTF-8 UTF-8" > /etc/locale.gen && locale-genmake install; break;; + [Yy]* ) sudo echo "en_GB.UTF-8 UTF-8" > /etc/locale.gen && locale-genmake install; break;; [Nn]* ) exit;; * ) echo "Please answer yes or no.";; esac done -apt-get update && apt-get -y install gdal-bin python3-gdal libgdal-dev build-essential wget && apt-get upgrade - -cd python/debiasing && git submodule update --init --recursive +cd $CHECKOUT_PATH/python/debiasing && git submodule update --init --recursive while true; do read -p "Would you like to dowload Anaconda? " yn From 0ab14bbbef850fa157ed572eda629e7e3fb3b912 Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Thu, 10 Aug 2023 13:38:18 +0100 Subject: [PATCH 045/146] add new structure to readme --- README.md | 70 +++++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 55 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 44866c27..630976b7 100644 --- a/README.md +++ b/README.md @@ -1,31 +1,59 @@ # Welcome to the `clim-recal` repository! -## Background -Regional climate models (RCMs) contain systematic errors, or biases in their output [1]. Biases arise in RCMs for a number of reasons, such as the assumptions in the general circulation models (GCMs), and in the downscaling process from GCM to RCM [1,2]. +Welcome to clim-recal, a specialized resource designed to tackle systematic errors or biases in Regional Climate Models (RCMs). As researchers, policy-makers, and various stakeholders explore publicly available RCMs, they need to consider the challenge of biases that can affect the accurate representation of climate change signals. Clim-recal provides both a **broad review** of available bias correction methods as well as **practical tutorials** and **guidance** on how to easily apply those methods to various datasets. -Researchers, policy-makers and other stakeholders wishing to use publicly available RCMs need to consider a range of "bias correction” methods (sometimes referred to as "bias adjustment" or "recalibration"). Bias correction methods offer a means of adjusting the outputs of RCM in a manner that might better reflect future climate change signals whilst preserving the natural and internal variability of climate [2]. +Clim-recal is an **Extensive guide to application of BC methods**: -The aim of **clim-recal** is therefore to: +- Accessible information for non quantitative researchers and lay-audience stakeholders +- Technical resource for application BC methods +- Framework for open additions +- In partnership with the MetOffice to ensure the propriety, quality, and usability of our work +- Full pipeline for bias-corrected data of the ground-breaking local-scale (2.2km)[Convection Permitting Model (CPM)](https://www.metoffice.gov.uk/pub/data/weather/uk/ukcp18/science-reports/UKCP-Convection-permitting-model-projections-report.pdf). -* To provide non-climate scientists with an extensive guide to the application, disadvantages/advantages and use of BC methods -* To provide researchers with a collated set of resources for how to technically apply the BC methods, with a framework for open additions -* To create accessible information on bias adjustment methods for non quantitative researchers and lay-audience stakeholders -We are working in partnership with the MetOffice to ensure the propriety, quality, and usability of our work. We're focusing on the UKCP18 suite of products, with the first dataset of focus their ground-breaking local-scale (2.2km) [Convection Permitting Model (CPM)](https://www.metoffice.gov.uk/pub/data/weather/uk/ukcp18/science-reports/UKCP-Convection-permitting-model-projections-report.pdf). +## Table of Contents -### Let's collaborate! +1. [Introduction](#) +2. [Quick Start Guide](#quick-start-guide) +4. [Guidance for Non-Climate Scientists](#guidance-non-expert) +5. [Guidance for Climate Scientists](#guidance-expert) +6. [Documentation](#documentation) + - [The data](#data-download) + - [Python Pipeline](#python-pipeline) + - [R Pipeline](#r-pipeline) + - [FAQs](#faqs) +6. [Research](#research) + - [Literature Review](#review) + - [Full BC Taxonomy](#taxonomy) + - [References](#references) +7. [License](#contributors) +8. [Contributors](#license) -We hope to bring together the extensive work already undertaken by the climate science community and showcase a range of libraries and techniques. If you have suggestions on the repository, or would like to include a new method (see below) or library, please raise an issue or [get in touch](mailto:clim-recal@turing.ac.uk)! -### Methods taxonomy +## Quick Start Guide -Our work-in-progress taxonomy can be viewed [here](https://docs.google.com/spreadsheets/d/18LIc8omSMTzOWM60aFNv1EZUl1qQN_DG8HFy1_0NdWk/edit?usp=sharing). When we've completed our literature review, it will be submitted for publication in an open peer-reviewed journal. +- should we include a toy dataset or simulated data? +- this should also be available in form of notebook -Our work is however, just like climate data, intended to be dynamic, and we are in the process of setting up a pipeline for researchers creating new methods of bias correction to be able to submit their methods for inclusion on in the **clim-recal** repository. +## Guidance for Non-Climate Scientists + +Regional climate models (RCMs) contain systematic errors, or biases in their output [1]. Biases arise in RCMs for a number of reasons, such as the assumptions in the general circulation models (GCMs), and in the downscaling process from GCM to RCM [1,2]. + +Researchers, policy-makers and other stakeholders wishing to use publicly available RCMs need to consider a range of "bias correction” methods (sometimes referred to as "bias adjustment" or "recalibration"). Bias correction methods offer a means of adjusting the outputs of RCM in a manner that might better reflect future climate change signals whilst preserving the natural and internal variability of climate [2]. + +## Guidance for Climate Scientists + +### How to link this with your data? + +### Let's collaborate! + +We hope to bring together the extensive work already undertaken by the climate science community and showcase a range of libraries and techniques. If you have suggestions on the repository, or would like to include a new method (see below) or library, please raise an issue or [get in touch](mailto:clim-recal@turing.ac.uk)! + +## Documentation -## Code +### Code In this repo we aim to provide examples of how to run the debiasing pipeline starting from the raw data available from the [MET office via CEDA](https://catalogue.ceda.ac.uk/uuid/ad2ac0ddd3f34210b0d6e19bfc335539) to the creation of debiased (bias corrected) datasets for different time periods. The pipeline has the following steps: @@ -57,7 +85,6 @@ In the `R` subdirectory you can find code for replicating the different data pro - **comparing-r-and-python** for replication of resampling and reviewing the bias correction methods applied in `python`. - **Resampling** for resampling the HADsUK datasets from 1km to 2.2km grid in `R`. - ## Data access ### How to download the data @@ -122,6 +149,15 @@ All the data used in this project can be found in the `/Volumes/vmfileshare/Clim └── infuse_ctry_2011_clipped ``` +## Research +### Literature Review + +### Methods taxonomy + +Our work-in-progress taxonomy can be viewed [here](https://docs.google.com/spreadsheets/d/18LIc8omSMTzOWM60aFNv1EZUl1qQN_DG8HFy1_0NdWk/edit?usp=sharing). When we've completed our literature review, it will be submitted for publication in an open peer-reviewed journal. + +Our work is however, just like climate data, intended to be dynamic, and we are in the process of setting up a pipeline for researchers creating new methods of bias correction to be able to submit their methods for inclusion on in the **clim-recal** repository. + ## Future directions In future, we're hoping to include: @@ -134,3 +170,7 @@ In future, we're hoping to include: 1. Senatore et al., 2022, https://doi.org/10.1016/j.ejrh.2022.101120 2. Ayar et al., 2021, https://doi.org/10.1038/s41598-021-82715-1 + +## License + +## Contributors From 93eaf3f96e9e0c343387c7f6903e4c9966366041 Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Thu, 31 Aug 2023 15:28:57 +0100 Subject: [PATCH 046/146] adding draft of current internal analysis pipeline --- docs/pipeline.md | 76 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) create mode 100644 docs/pipeline.md diff --git a/docs/pipeline.md b/docs/pipeline.md new file mode 100644 index 00000000..47ee14c9 --- /dev/null +++ b/docs/pipeline.md @@ -0,0 +1,76 @@ +--- +title: Analysis pipeline +--- +```mermaid + +graph TD + +data_hads[(HADS)] +data_cpm[(UKCP2.2)] +data_shape_uk[(shapefile UK)] +data_shape_cities[(shapefile cities)] + +script_load([ceda_ftp_download.py]) + +data_hads_raw[RAW/HadsUKgrid/../*.nc] +data_cpm_raw[RAW/UKCP2.2/../*.nc] +data_hads --> script_load +data_cpm --> script_load +script_load --> data_hads_raw +script_load --> data_cpm_raw +data_hads_raw --> script_resampling +data_cpm_raw --> script_reproject + +data_shape_uk --> script_crop_uk +data_shape_cities --> script_crop_city + +subgraph Preprocessing + style Preprocessing fill:#f9f9f9,stroke:#333,stroke-width:4px + + script_resampling([resampling_hads.py]) + script_reproject([reproject_all.sh]) + script_preproc([preprocess_data.py]) + script_crop_uk([cropping-CPM-to-Scotland.R]) + script_crop_city([Cropping_rasters_to_three_cities.R]) + + data_hads_res[Processed/HadsUKgrid/../*.nc] + data_cpm_rep[Reprojected/UKCP2.2/../*.tiff] + data_out1[out] + data_out2[out] + data_out3[out] + + script_resampling --> data_hads_res + script_reproject --> data_cpm_rep + + data_hads_res --> script_crop_uk + data_hads_res --> script_crop_city + + + script_crop_city --> data_out1 + data_out1 --> script_preproc + script_preproc --> data_out2 + script_crop_uk --> data_out3 + + +end + + +subgraph Debiasing + + script_bc_py[run_cmethods.py] + script_bc_r[apply_qmapQuant_to_crpd_df_fn.R] + + data_out2 --> script_bc_py + data_out1 --> script_bc_r + data_out3 --> script_bc_r + +end + + +classDef python fill:#4CAF50; +classDef r fill:#FF5722; + +class script_crop_city,script_crop_uk r; +class script_load,script_resampling,script_preproc python; + +``` From 793f053e24c3cd8eaf2f811168a10d56405a2bde Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Thu, 31 Aug 2023 15:45:35 +0100 Subject: [PATCH 047/146] update pipeline viz: connect cpm data --- docs/pipeline.md | 33 ++++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/docs/pipeline.md b/docs/pipeline.md index 47ee14c9..0c7f79ec 100644 --- a/docs/pipeline.md +++ b/docs/pipeline.md @@ -35,7 +35,9 @@ subgraph Preprocessing data_hads_res[Processed/HadsUKgrid/../*.nc] data_cpm_rep[Reprojected/UKCP2.2/../*.tiff] - data_out1[out] + data_gl[glasgow] + data_ma[manchester] + data_lon[london] data_out2[out] data_out3[out] @@ -46,31 +48,44 @@ subgraph Preprocessing data_hads_res --> script_crop_city - script_crop_city --> data_out1 - data_out1 --> script_preproc + script_crop_city --> data_gl + script_crop_city --> data_ma + script_crop_city --> data_lon + + + data_gl --> script_preproc + data_ma --> script_preproc + data_lon --> script_preproc script_preproc --> data_out2 script_crop_uk --> data_out3 + data_cpm_rep --> script_crop_city + data_cpm_rep --> script_crop_uk end subgraph Debiasing - script_bc_py[run_cmethods.py] - script_bc_r[apply_qmapQuant_to_crpd_df_fn.R] + script_bc_py([run_cmethods.py]) + script_bc_r([apply_qmapQuant_to_crpd_df_fn.R]) data_out2 --> script_bc_py - data_out1 --> script_bc_r + data_gl --> script_bc_r + data_ma --> script_bc_r + data_lon --> script_bc_r data_out3 --> script_bc_r + + end classDef python fill:#4CAF50; classDef r fill:#FF5722; +classDef bash fill:#f9f -class script_crop_city,script_crop_uk r; -class script_load,script_resampling,script_preproc python; - +class script_crop_city,script_crop_uk,script_bc_r r; +class script_load,script_resampling,script_preproc,script_bc_py python; +class script_reproject bash; ``` From 4aa4481d5b843e0f86f944aaeafcccbbd4996fcf Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Mon, 4 Sep 2023 22:09:09 +0100 Subject: [PATCH 048/146] ENH: pipeline viz; add wrapper script with parameters, add output path from python preprocess --- docs/pipeline.md | 94 ++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 75 insertions(+), 19 deletions(-) diff --git a/docs/pipeline.md b/docs/pipeline.md index 0c7f79ec..e23ae5e6 100644 --- a/docs/pipeline.md +++ b/docs/pipeline.md @@ -3,7 +3,16 @@ title: Analysis pipeline --- ```mermaid -graph TD +graph TB + +subgraph Legend + data_external[(external data)] + data_fileshare[path to fileshare] + script_r([R script]) + script_py([Python script]) + script_bash([Bash script]) + var[parameter]:::var +end data_hads[(HADS)] data_cpm[(UKCP2.2)] @@ -25,20 +34,26 @@ data_shape_uk --> script_crop_uk data_shape_cities --> script_crop_city subgraph Preprocessing - style Preprocessing fill:#f9f9f9,stroke:#333,stroke-width:4px script_resampling([resampling_hads.py]) script_reproject([reproject_all.sh]) script_preproc([preprocess_data.py]) script_crop_uk([cropping-CPM-to-Scotland.R]) - script_crop_city([Cropping_rasters_to_three_cities.R]) + script_crop_city([Cropping_Rasters_to_three_cities.R]) data_hads_res[Processed/HadsUKgrid/../*.nc] data_cpm_rep[Reprojected/UKCP2.2/../*.tiff] - data_gl[glasgow] - data_ma[manchester] - data_lon[london] - data_out2[out] + + data_cropped[Cropped/three.cities/..] + data_gl[../glasgow] + data_ma[../manchester] + data_lon[../london] + data_outdir[Cropped/three.cities/preprocessed/..] + data_out_train[../simh..] + data_out_validate[../simp..] + data_out_groundtruth_h[../obsh..] + data_out_groundtruth_p[../obsp..] + data_out3[out] data_out3[out] script_resampling --> data_hads_res @@ -47,45 +62,86 @@ subgraph Preprocessing data_hads_res --> script_crop_uk data_hads_res --> script_crop_city - - script_crop_city --> data_gl - script_crop_city --> data_ma - script_crop_city --> data_lon + script_crop_city --> data_cropped + script_crop_city --> data_shapefile_cities[shapefiles/three.cities] + data_cropped --> data_gl + data_cropped --> data_ma + data_cropped --> data_lon data_gl --> script_preproc data_ma --> script_preproc data_lon --> script_preproc - script_preproc --> data_out2 + script_preproc --> data_outdir + data_outdir --> data_out_train + data_outdir --> data_out_validate + data_outdir --> data_out_groundtruth script_crop_uk --> data_out3 data_cpm_rep --> script_crop_city data_cpm_rep --> script_crop_uk + + subgraph innerSubgraph[Execute Python Debiasing] + script_BC_wrapper[three_cities_debiasing.sh] + param1["metric (eg tasmax)"]:::var + param2["runs (eg 05)"]:::var + param3["BC method (eg quantile_mapping)"]:::var + param4[city]:::var + + script_BC_wrapper --> param1 + param1 --> param2 + param2 --> param3 + param3 --> param4 + param4 -- for loop --> script_preproc + + %% Looping connections + param4 -.-> param3 + param3 -.-> param2 + param2 -.-> param1 + end + +end + +subgraph assessment + script_asses[tbc] + data_out_groundtruth_p --> script_asses end + subgraph Debiasing + param4 -- for loop --> script_bc_py + script_bc_py([run_cmethods.py]) + data_out[Debiased/three.cities.cropped] + data_out --> script_asses script_bc_r([apply_qmapQuant_to_crpd_df_fn.R]) - data_out2 --> script_bc_py + data_out_train --> script_bc_py + data_out_train --> script_bc_py + data_out_groundtruth --> script_bc_py + + script_bc_py-->data_out + + data_gl --> script_bc_r data_ma --> script_bc_r data_lon --> script_bc_r data_out3 --> script_bc_r - - - + end classDef python fill:#4CAF50; classDef r fill:#FF5722; classDef bash fill:#f9f +classDef var fill:none,stroke:#0f0; +classDef dashed stroke-dasharray: 5 5; -class script_crop_city,script_crop_uk,script_bc_r r; -class script_load,script_resampling,script_preproc,script_bc_py python; -class script_reproject bash; +class script_crop_city,script_crop_uk,script_bc_r,script_r r; +class script_load,script_resampling,script_preproc,script_bc_py,script_py python; +class script_reproject,script_BC_wrapper,script_bash bash; +class innerSubgraph dashed; ``` From 87704a18fecbb423b77856529c239d6b3d7ec9fb Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Thu, 7 Sep 2023 16:00:22 +0100 Subject: [PATCH 049/146] Update pipeline flowchart with full LCAT pipeline --- docs/pipeline.md | 150 +++++++++++++++++++++++++++++++---------------- 1 file changed, 98 insertions(+), 52 deletions(-) diff --git a/docs/pipeline.md b/docs/pipeline.md index e23ae5e6..4d58503c 100644 --- a/docs/pipeline.md +++ b/docs/pipeline.md @@ -16,8 +16,9 @@ end data_hads[(HADS)] data_cpm[(UKCP2.2)] -data_shape_uk[(shapefile UK)] -data_shape_cities[(shapefile cities)] +data_shape_uk[("shapefile UK regions (incl London)")] +data_shape_gl[(shapefile Glasgow)] +data_shape_ma[(shapefile Manchester)] script_load([ceda_ftp_download.py]) @@ -27,61 +28,58 @@ data_hads --> script_load data_cpm --> script_load script_load --> data_hads_raw script_load --> data_cpm_raw -data_hads_raw --> script_resampling -data_cpm_raw --> script_reproject -data_shape_uk --> script_crop_uk -data_shape_cities --> script_crop_city -subgraph Preprocessing +subgraph Preprocessing 1 script_resampling([resampling_hads.py]) script_reproject([reproject_all.sh]) - script_preproc([preprocess_data.py]) - script_crop_uk([cropping-CPM-to-Scotland.R]) - script_crop_city([Cropping_Rasters_to_three_cities.R]) data_hads_res[Processed/HadsUKgrid/../*.nc] data_cpm_rep[Reprojected/UKCP2.2/../*.tiff] + + script_resampling --> data_hads_res + script_reproject --> data_cpm_rep +end + +subgraph Cropping + + script_crop_city([Cropping_Rasters_to_three_cities.R]) + data_cropped[Cropped/three.cities/..] data_gl[../glasgow] data_ma[../manchester] data_lon[../london] - data_outdir[Cropped/three.cities/preprocessed/..] - data_out_train[../simh..] - data_out_validate[../simp..] - data_out_groundtruth_h[../obsh..] - data_out_groundtruth_p[../obsp..] - data_out3[out] - data_out3[out] - - script_resampling --> data_hads_res - script_reproject --> data_cpm_rep + data_shapefile_cities[shapefiles/three.cities] - data_hads_res --> script_crop_uk - data_hads_res --> script_crop_city - script_crop_city --> data_cropped - script_crop_city --> data_shapefile_cities[shapefiles/three.cities] + script_crop_city --> data_shapefile_cities data_cropped --> data_gl data_cropped --> data_ma data_cropped --> data_lon + +end +subgraph Preprocessing 2 + data_outdir[Cropped/three.cities/preprocessed/..] - data_gl --> script_preproc - data_ma --> script_preproc - data_lon --> script_preproc + script_preproc([preprocess_data.py]) + + data_out_train[../simh..] + data_out_validate[../simp..] + data_out_groundtruth_h[../obsh..] + data_out_groundtruth_p[../obsp..] + script_preproc --> data_outdir + data_outdir --> data_out_train data_outdir --> data_out_validate data_outdir --> data_out_groundtruth - script_crop_uk --> data_out3 - - data_cpm_rep --> script_crop_city - data_cpm_rep --> script_crop_uk - - subgraph innerSubgraph[Execute Python Debiasing] + + + subgraph inner_py[Execute Python pipeline] + script_BC_wrapper[three_cities_debiasing.sh] param1["metric (eg tasmax)"]:::var param2["runs (eg 05)"]:::var @@ -100,48 +98,96 @@ subgraph Preprocessing param2 -.-> param1 end + subgraph inner_r[Execute R pipeline] + + script_crop_uk([Data_processing_todf.R]) + script_df_uk([Processing.data.for.LCAT.R]) + param1_r["metric (eg tasmax)"]:::var + param2_r["runs (eg 05)"]:::var + param3_r["segment"]:::var + + fn_bc([apply_qmapQuant_to_crpd_df_fn.R]) + data_interim_hads[Interim/HadsUK/Data_as_df/...] + data_interim_cpm[Interim/CPM/Data_as_df/...] + + script_crop_uk -- cpm_read_crop_df_write --> data_interim_cpm + script_crop_uk -- hads19802010_read_crop_df_write --> data_interim_hads + data_interim_cpm --> script_df_uk + data_interim_hads --> script_df_uk + + script_df_uk-->fn_bc + + + script_df_uk--> param1_r + param1_r --> param2_r + param2_r --> param3_r + param2_r -- apply_bias_correction_to_cropped_df --> fn_bc + param3_r -- cropdf_further_apply_bc_to_cropped_df --> fn_bc + + end end - subgraph assessment script_asses[tbc] data_out_groundtruth_p --> script_asses end - - subgraph Debiasing - param4 -- for loop --> script_bc_py + script_bc_py([run_cmethods.py]) + script_bc_r[[fitQmapQUANT.R]] - script_bc_py([run_cmethods.py]) - data_out[Debiased/three.cities.cropped] - data_out --> script_asses - script_bc_r([apply_qmapQuant_to_crpd_df_fn.R]) + data_out_py[Debiased/three.cities.cropped] + data_out_r[Debiased/R/QuantileMapping/resultsL*] + data_out_py --> script_asses + data_out_r --> script_asses data_out_train --> script_bc_py data_out_train --> script_bc_py data_out_groundtruth --> script_bc_py - script_bc_py-->data_out - - - data_gl --> script_bc_r - data_ma --> script_bc_r - data_lon --> script_bc_r - data_out3 --> script_bc_r - + script_bc_py-->data_out_py + script_bc_r-->data_out_r end - +%% between block connections +%% input preproc 1 +data_hads_raw --> script_resampling +data_cpm_raw --> script_reproject +%% input cropping +data_cpm_rep --> script_crop_city + +data_hads_res --> script_crop_uk +data_hads_res --> script_crop_city +data_shape_uk --> script_crop_city +data_shape_ma --> script_crop_city +data_shape_gl --> script_crop_city + +%% input preproc2 +data_cpm_rep --> script_crop_uk +data_shape_uk --> script_crop_uk +data_gl --> script_preproc +data_ma --> script_preproc +data_lon --> script_preproc +%% input debiasing +fn_bc --> script_bc_r +data_gl --> script_bc_r +data_ma --> script_bc_r +data_lon --> script_bc_r +param4 -- for loop --> script_bc_py + + +%% class styles classDef python fill:#4CAF50; classDef r fill:#FF5722; classDef bash fill:#f9f classDef var fill:none,stroke:#0f0; classDef dashed stroke-dasharray: 5 5; -class script_crop_city,script_crop_uk,script_bc_r,script_r r; +class script_crop_city,script_crop_uk,script_bc_r,script_r,script_df_uk,function_bc,function_crop_bc,fn_crop_cpm,fn_crop_hads,fn_bc r; class script_load,script_resampling,script_preproc,script_bc_py,script_py python; class script_reproject,script_BC_wrapper,script_bash bash; -class innerSubgraph dashed; +class inner_py dashed; +class inner_r dashed; ``` + From 4bf2fcdc0e55826e76b92c4b4acbd806e53ba8b4 Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Wed, 13 Sep 2023 16:37:58 +0100 Subject: [PATCH 050/146] update flow diagram the new flow diagram specifies a vision for the code base that involves a core (with each function documented) as well as adjacent code deliverables (eg operationalisation of core for MO data, jupyter notebook for guidance) --- docs/pipeline.md | 235 ++++++++++++++++++++++------------------------- 1 file changed, 111 insertions(+), 124 deletions(-) diff --git a/docs/pipeline.md b/docs/pipeline.md index 4d58503c..285ac4c5 100644 --- a/docs/pipeline.md +++ b/docs/pipeline.md @@ -6,6 +6,7 @@ title: Analysis pipeline graph TB subgraph Legend + direction RL data_external[(external data)] data_fileshare[path to fileshare] script_r([R script]) @@ -14,140 +15,134 @@ subgraph Legend var[parameter]:::var end -data_hads[(HADS)] -data_cpm[(UKCP2.2)] -data_shape_uk[("shapefile UK regions (incl London)")] -data_shape_gl[(shapefile Glasgow)] -data_shape_ma[(shapefile Manchester)] - -script_load([ceda_ftp_download.py]) +%%% INPUT DATA +subgraph CEDA + data_hads[(HADS)] + data_cpm[(UKCP2.2)] + data_hads --> script_load + data_cpm --> script_load + data_hads --> script_load +end -data_hads_raw[RAW/HadsUKgrid/../*.nc] -data_cpm_raw[RAW/UKCP2.2/../*.nc] -data_hads --> script_load -data_cpm --> script_load -script_load --> data_hads_raw -script_load --> data_cpm_raw +subgraph Core pipeline + subgraph Data Ingress + %%% Loading data to disk + script_load([ceda_ftp_download.py]) + data_hads_raw[RAW/HadsUKgrid/../*.nc] + data_cpm_raw[RAW/UKCP2.2/../*.nc] + script_load --> data_hads_raw + script_load --> data_cpm_raw + end + subgraph Preprocessing + %% resampling & reprojecting + script_resampling([resampling_hads.py]) + script_reproject([reproject_all.sh]) + data_hads_res[Processed/HadsUKgrid/../*.nc] + data_cpm_rep[Reprojected/UKCP2.2/../*.tiff] -subgraph Preprocessing 1 - - script_resampling([resampling_hads.py]) - script_reproject([reproject_all.sh]) - - data_hads_res[Processed/HadsUKgrid/../*.nc] - data_cpm_rep[Reprojected/UKCP2.2/../*.tiff] + script_resampling --> data_hads_res + script_reproject --> data_cpm_rep + + %% cropping + script_crop_city([Cropping_Rasters_to_three_cities.R]) - - script_resampling --> data_hads_res - script_reproject --> data_cpm_rep -end + data_cropped_cpm[Cropped/cpm/..] + data_cropped_hads[Cropped/hads/..] + script_crop_city --> data_cropped_cpm + script_crop_city --> data_cropped_hads -subgraph Cropping - script_crop_city([Cropping_Rasters_to_three_cities.R]) - - data_cropped[Cropped/three.cities/..] - data_gl[../glasgow] - data_ma[../manchester] - data_lon[../london] - data_shapefile_cities[shapefiles/three.cities] - - script_crop_city --> data_cropped - script_crop_city --> data_shapefile_cities - data_cropped --> data_gl - data_cropped --> data_ma - data_cropped --> data_lon - -end + end -subgraph Preprocessing 2 - data_outdir[Cropped/three.cities/preprocessed/..] + subgraph Data Splitting + data_outdir[Cropped/preprocessed/..] - script_preproc([preprocess_data.py]) - - data_out_train[../simh..] - data_out_validate[../simp..] - data_out_groundtruth_h[../obsh..] - data_out_groundtruth_p[../obsp..] - - script_preproc --> data_outdir + script_preproc([preprocess_data.py]) - data_outdir --> data_out_train - data_outdir --> data_out_validate - data_outdir --> data_out_groundtruth + data_out_train[../simh..] + data_out_calibrate[../simp..] + data_out_groundtruth_h[../obsh..] + data_out_groundtruth_p[../obsp..] + script_preproc --> data_outdir - subgraph inner_py[Execute Python pipeline] - - script_BC_wrapper[three_cities_debiasing.sh] - param1["metric (eg tasmax)"]:::var - param2["runs (eg 05)"]:::var - param3["BC method (eg quantile_mapping)"]:::var - param4[city]:::var - - script_BC_wrapper --> param1 - param1 --> param2 - param2 --> param3 - param3 --> param4 - param4 -- for loop --> script_preproc - - %% Looping connections - param4 -.-> param3 - param3 -.-> param2 - param2 -.-> param1 + data_outdir --> data_out_train + data_outdir --> data_out_calibrate + data_outdir --> data_out_groundtruth_h + data_outdir --> data_out_groundtruth_p end - subgraph inner_r[Execute R pipeline] - - script_crop_uk([Data_processing_todf.R]) - script_df_uk([Processing.data.for.LCAT.R]) - param1_r["metric (eg tasmax)"]:::var - param2_r["runs (eg 05)"]:::var - param3_r["segment"]:::var - - fn_bc([apply_qmapQuant_to_crpd_df_fn.R]) - data_interim_hads[Interim/HadsUK/Data_as_df/...] - data_interim_cpm[Interim/CPM/Data_as_df/...] - - script_crop_uk -- cpm_read_crop_df_write --> data_interim_cpm - script_crop_uk -- hads19802010_read_crop_df_write --> data_interim_hads - data_interim_cpm --> script_df_uk - data_interim_hads --> script_df_uk + subgraph bc[Bias Correction] + script_bc_py([run_cmethods.py]) + script_bc_r([run_cmethods.R]) + function_bc_r[[fitQmapQUANT.R]] - script_df_uk-->fn_bc - - - script_df_uk--> param1_r - param1_r --> param2_r - param2_r --> param3_r - param2_r -- apply_bias_correction_to_cropped_df --> fn_bc - param3_r -- cropdf_further_apply_bc_to_cropped_df --> fn_bc - - end -end -subgraph assessment - script_asses[tbc] - data_out_groundtruth_p --> script_asses -end + data_out_py[Debiased/...] + data_out_r[Debiased/R/QuantileMapping/resultsL*] -subgraph Debiasing - script_bc_py([run_cmethods.py]) - script_bc_r[[fitQmapQUANT.R]] + data_out_train --> script_bc_py + data_out_calibrate --> script_bc_py + data_out_groundtruth_h --> script_bc_py + data_out_train --> script_bc_r + data_out_calibrate --> script_bc_r + data_out_groundtruth_h --> script_bc_r + script_bc_r --> function_bc_r + script_bc_py-->data_out_py + function_bc_r-->data_out_r + end - data_out_py[Debiased/three.cities.cropped] - data_out_r[Debiased/R/QuantileMapping/resultsL*] + subgraph Assessment + script_asses[tbc] + data_out_groundtruth_p --> script_asses + end data_out_py --> script_asses data_out_r --> script_asses +end + + +subgraph nner_py[Execute Python pipeline for MO dataset] + data_shape_uk[(shape London)] + data_shape_gl[(shape Glasgow)] + data_shape_ma[(shape Manchester)] - data_out_train --> script_bc_py - data_out_train --> script_bc_py - data_out_groundtruth --> script_bc_py - script_bc_py-->data_out_py - script_bc_r-->data_out_r + script_BC_wrapper[three_cities_debiasing.sh] + param1["metric (eg tasmax)"]:::var + param2["runs (eg 05)"]:::var + param3["BC method (eg quantile_mapping)"]:::var + param4[city]:::var + + script_BC_wrapper --> param1 + param1 --> param2 + param2 --> param3 + param3 --> param4 + param4 -- for loop --> script_preproc + + %% Looping connections + param4 -.-> param3 + param3 -.-> param2 + param2 -.-> param1 +end + +subgraph nner_jupyter[Jupyter Notebook for Guidance] + direction BT + data_shape_gl2[(shape Glasgow)] + data_cpm2[(UKCP2.2_Monthly)] + + param5["tasmax"]:::var + param6["quantile_mapping"]:::var + param7[Glasgow]:::var + + script_BC_wrapper --> param1 + param5 --> script_preproc + param6 --> script_preproc + param7 --> script_preproc + + data_cpm2 --> script_load + data_shape_gl2 --> script_crop_city end %% between block connections @@ -157,23 +152,15 @@ data_cpm_raw --> script_reproject %% input cropping data_cpm_rep --> script_crop_city -data_hads_res --> script_crop_uk data_hads_res --> script_crop_city data_shape_uk --> script_crop_city data_shape_ma --> script_crop_city data_shape_gl --> script_crop_city %% input preproc2 -data_cpm_rep --> script_crop_uk -data_shape_uk --> script_crop_uk -data_gl --> script_preproc -data_ma --> script_preproc -data_lon --> script_preproc -%% input debiasing -fn_bc --> script_bc_r -data_gl --> script_bc_r -data_ma --> script_bc_r -data_lon --> script_bc_r +data_cropped_cpm --> script_preproc +data_cropped_hads --> script_preproc + param4 -- for loop --> script_bc_py @@ -184,7 +171,7 @@ classDef bash fill:#f9f classDef var fill:none,stroke:#0f0; classDef dashed stroke-dasharray: 5 5; -class script_crop_city,script_crop_uk,script_bc_r,script_r,script_df_uk,function_bc,function_crop_bc,fn_crop_cpm,fn_crop_hads,fn_bc r; +class script_crop_city,script_crop_uk,function_bc_r,script_r,script_df_uk,function_bc,function_crop_bc,fn_crop_cpm,fn_crop_hads,fn_bc,script_bc_r r; class script_load,script_resampling,script_preproc,script_bc_py,script_py python; class script_reproject,script_BC_wrapper,script_bash bash; class inner_py dashed; From 1eec2e8fd480bc0918d4b088068dec1ee3bb87eb Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Thu, 21 Sep 2023 15:44:18 +0100 Subject: [PATCH 051/146] setting up sphinx code documentation --- docs/doc_make/Makefile | 20 ++++++++++ docs/doc_make/ceda_ftp_download.rst | 5 +++ docs/doc_make/conf.py | 61 +++++++++++++++++++++++++++++ docs/doc_make/index.rst | 24 ++++++++++++ docs/doc_make/make.bat | 35 +++++++++++++++++ docs/doc_make/resampling_hads.rst | 5 +++ python/data_download/__init__.py | 0 python/debiasing/__init__.py | 0 python/load_data/__init__.py | 0 python/resampling/__init__.py | 0 10 files changed, 150 insertions(+) create mode 100644 docs/doc_make/Makefile create mode 100644 docs/doc_make/ceda_ftp_download.rst create mode 100644 docs/doc_make/conf.py create mode 100644 docs/doc_make/index.rst create mode 100644 docs/doc_make/make.bat create mode 100644 docs/doc_make/resampling_hads.rst create mode 100644 python/data_download/__init__.py create mode 100644 python/debiasing/__init__.py create mode 100644 python/load_data/__init__.py create mode 100644 python/resampling/__init__.py diff --git a/docs/doc_make/Makefile b/docs/doc_make/Makefile new file mode 100644 index 00000000..d4bb2cbb --- /dev/null +++ b/docs/doc_make/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/doc_make/ceda_ftp_download.rst b/docs/doc_make/ceda_ftp_download.rst new file mode 100644 index 00000000..229b22a7 --- /dev/null +++ b/docs/doc_make/ceda_ftp_download.rst @@ -0,0 +1,5 @@ +ceda_ftp_download +================= + +.. automodule:: data_download.ceda_ftp_download + :members: diff --git a/docs/doc_make/conf.py b/docs/doc_make/conf.py new file mode 100644 index 00000000..5f5721f0 --- /dev/null +++ b/docs/doc_make/conf.py @@ -0,0 +1,61 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import sys +import sphinx_rtd_theme + +sys.path.insert(0, os.path.abspath('../../python/')) +sys.path.insert(0, os.path.abspath('../../R/')) + +# -- Project information ----------------------------------------------------- + +project = 'clim-recal' +copyright = '2023, FIX:ADD AUTHORS' +author = 'FIX:ADD AUTHORS' + +# The full version, including alpha/beta/rc tags +release = '0.1.0' + + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' +html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] + + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +extensions = ['sphinx.ext.autodoc'] \ No newline at end of file diff --git a/docs/doc_make/index.rst b/docs/doc_make/index.rst new file mode 100644 index 00000000..e5203e36 --- /dev/null +++ b/docs/doc_make/index.rst @@ -0,0 +1,24 @@ +.. clim-recal documentation master file, created by + sphinx-quickstart on Thu Sep 21 12:44:53 2023. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to clim-recal's documentation! +====================================== + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + ceda_ftp_download + + resampling_hads + + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/doc_make/make.bat b/docs/doc_make/make.bat new file mode 100644 index 00000000..32bb2452 --- /dev/null +++ b/docs/doc_make/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=. +set BUILDDIR=_build + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.https://www.sphinx-doc.org/ + exit /b 1 +) + +if "%1" == "" goto help + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/docs/doc_make/resampling_hads.rst b/docs/doc_make/resampling_hads.rst new file mode 100644 index 00000000..4a104c68 --- /dev/null +++ b/docs/doc_make/resampling_hads.rst @@ -0,0 +1,5 @@ +resampling_hads +=============== + +.. automodule:: resampling.resampling_hads + :members: diff --git a/python/data_download/__init__.py b/python/data_download/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python/debiasing/__init__.py b/python/debiasing/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python/load_data/__init__.py b/python/load_data/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python/resampling/__init__.py b/python/resampling/__init__.py new file mode 100644 index 00000000..e69de29b From b02bcc6c211654b367928f9c2b17d321e815d5e5 Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Wed, 27 Sep 2023 15:55:30 +0100 Subject: [PATCH 052/146] restructure guidance with new pipeline walkthrough Pull all info from sections Code and separate python README into main readme. --- README.md | 218 ++++++++++++++++++++++---------------- internal_docs/INTERNAL.md | 49 +++++++++ 2 files changed, 178 insertions(+), 89 deletions(-) create mode 100644 internal_docs/INTERNAL.md diff --git a/README.md b/README.md index 630976b7..573659fa 100644 --- a/README.md +++ b/README.md @@ -15,156 +15,196 @@ Clim-recal is an **Extensive guide to application of BC methods**: ## Table of Contents 1. [Introduction](#) -2. [Quick Start Guide](#quick-start-guide) -4. [Guidance for Non-Climate Scientists](#guidance-non-expert) -5. [Guidance for Climate Scientists](#guidance-expert) +2. [Bias Correction Pipeline](#bias-correction-pipeline) +4. [Guidance for Non-Climate Scientists](#guidance-for-non-climate-scientists) +5. [Guidance for Climate Scientists](#guidance-for-non-climate-scientists) 6. [Documentation](#documentation) - - [The data](#data-download) - - [Python Pipeline](#python-pipeline) - - [R Pipeline](#r-pipeline) - - [FAQs](#faqs) -6. [Research](#research) +7. [Research](#research) - [Literature Review](#review) - [Full BC Taxonomy](#taxonomy) - [References](#references) 7. [License](#contributors) 8. [Contributors](#license) +## Bias Correction Pipeline +### Overview +Here we provide an example of how to run a debiasing pipeline starting. The pipeline has the following steps: -## Quick Start Guide +1. Reproject the [UKCP](https://data.ceda.ac.uk/badc/ukcp18/data/land-cpm/uk/2.2km) control and scenario data to the same coordinate system as the [HADs](https://data.ceda.ac.uk/badc/ukmo-hadobs/data/insitu/MOHC/HadOBS/HadUK-Grid/v1.1.0.0/1km) observational data (British National Grid). +2. Resample the HADs data from 1km to 2.2km grid to match the UKCP reprojected grid. +3. Run debiasing method on the control and observational data and project it into the scenario dataset. -- should we include a toy dataset or simulated data? -- this should also be available in form of notebook +After each of these steps the reprojected, resampled and debiased scenario datasets are produced and saved in an Azure fileshare storage (more details about this bellow). +> 📢 If you are an internal collaborator you can access the raw data as well as intermediate steps through our Azure server. [See here for a How-to](). -## Guidance for Non-Climate Scientists +### Prerequisites -Regional climate models (RCMs) contain systematic errors, or biases in their output [1]. Biases arise in RCMs for a number of reasons, such as the assumptions in the general circulation models (GCMs), and in the downscaling process from GCM to RCM [1,2]. +#### Setting up your environment +the environment used in this [environment setup file](setup-instructions.md). -Researchers, policy-makers and other stakeholders wishing to use publicly available RCMs need to consider a range of "bias correction” methods (sometimes referred to as "bias adjustment" or "recalibration"). Bias correction methods offer a means of adjusting the outputs of RCM in a manner that might better reflect future climate change signals whilst preserving the natural and internal variability of climate [2]. -## Guidance for Climate Scientists +#### Downloading the data +Our pipeline is optimized to work with the raw data from the [MET office via CEDA](https://catalogue.ceda.ac.uk/uuid/ad2ac0ddd3f34210b0d6e19bfc335539). -### How to link this with your data? +You can download the raw UKCP2.2 climate data from the CEDA archive. Go [here](https://archive.ceda.ac.uk/), create an account and set up your FTP credentials in "My Account". You can then use our custom script [ceda_ftp_download.py](python/data_download/) to download the data: -### Let's collaborate! +``` +# cpm data +python3 ceda_ftp_download.py --input /badc/ukcp18/data/land-cpm/uk/2.2km/rcp85/ --output 'output_dir' --username 'uuu' --psw 'ppp' --change_hierarchy -We hope to bring together the extensive work already undertaken by the climate science community and showcase a range of libraries and techniques. If you have suggestions on the repository, or would like to include a new method (see below) or library, please raise an issue or [get in touch](mailto:clim-recal@turing.ac.uk)! +# hads data +python3 ceda_ftp_download.py --input /badc/ukmo-hadobs/data/insitu/MOHC/HadOBS/HadUK-Grid/v1.1.0.0/1km --output output_dir --username 'uuu' --psw 'ppp' +``` +You need to replace `uuu` and `ppp` with your CEDA username and FTP password respectively and replace 'output_dir' with the directory you want to write the data to. -## Documentation +The `--change_hierarchy` flag modifies the folder hierarchy to fit with the hierarchy in the Turing Azure file store. This flag only applies to the UKCP data and should not be used with HADs data. You can use the same script without the `--change_hierarchy` flag in order to download files without any changes to the hierarchy. -### Code -In this repo we aim to provide examples of how to run the debiasing pipeline starting from the raw data available from the [MET office via CEDA](https://catalogue.ceda.ac.uk/uuid/ad2ac0ddd3f34210b0d6e19bfc335539) to the creation of debiased (bias corrected) datasets for different time periods. The pipeline has the following steps: +### Preparing the data +In [python/load_data/data_loader.py] we have written a few functions for loading and concatenating data into a single xarray which can be used for running debiasing methods. Instructions in how to use these functions can be found in python/notebooks/load_data_python.ipynb. -1. Reproject the [UKCP](https://data.ceda.ac.uk/badc/ukcp18/data/land-cpm/uk/2.2km) control and scenario data to the same coordinate system as the [HADs](https://data.ceda.ac.uk/badc/ukmo-hadobs/data/insitu/MOHC/HadOBS/HadUK-Grid/v1.1.0.0/1km) observational data (British National Grid). -2. Resample the HADs data from 1km to 2.2km grid to match the UKCP reprojected grid. -3. Run debiasing method on the control and observational data and project it into the scenario dataset. +reproject the UKCP datasets to the British National Grid coordinate system. +**Resampling** for the HADsUK datasets from 1km to a 2.2 km grid to match the UKCP re-projected grid. +**Data loaders** functions for loading and concatenating data into a single xarray which can be used for running debiasing methods. -After each of these steps the reprojected, resampled and debiased scenario datasets are produced and saved in an Azure fileshare storage (more details about this bellow). +### Preparing the bias correction and assessment +### Applying the bias correction + - **Debiasing scripts** that interface with implementations of the debiasing (bias correction) methods implemented by different libraries (by March 2023 we have only implemented the python-cmethods library). -### Bash + The code in the [debiasing](debiasing) directory contains scripts that interface with implementations of the debiasing methods +implemented by different libraries. -Here you find scripts to reproject the UKCP datasets to the British National Grid coordinate system. +Note: By March 2023 we have only implemented the [python-cmethods](https://github.com/alan-turing-institute/python-cmethods) library. -### Python -In the `python` subdirectory you can find code for the different data download, processing and debiasing steps: - - **Data download** for a script to download data from the CEDA archive. - - **Resampling** for the HADsUK datasets from 1km to a 2.2 km grid to match the UKCP re-projected grid. - - **Data loaders** functions for loading and concatenating data into a single xarray which can be used for running debiasing methods. - - **Debiasing scripts** that interface with implementations of the debiasing (bias correction) methods implemented by different libraries (by March 2023 we have only implemented the python-cmethods library). - -More details in how to use this code can be found in [the python README file](python/README.md) and the environment used in this [environment setup file](setup-instructions.md). +### The cmethods library + +This repository contains a python script used to run debiasing in climate data using a fork of the [original python-cmethods](https://github.com/btschwertfeger/python-cmethods) module written by Benjamin Thomas Schwertfeger's , which has +been modified to function with the dataset used in the clim-recal project. This library has been included as a +submodule to this project, so you must run the following command to pull the submodules required. -### R +``` +cd debiasing +git submodule update --init --recursive +``` -In the `R` subdirectory you can find code for replicating the different data processing and debiasing steps as above, along with comparisons of methods between the two languages. -- **bias-correction-methods** for bias correction (debiasing) methods available specifically in `R` libraries -- **comparing-r-and-python** for replication of resampling and reviewing the bias correction methods applied in `python`. -- **Resampling** for resampling the HADsUK datasets from 1km to 2.2km grid in `R`. +The [run_cmethods.py](debiasing/run_cmethods.py) allow us to adjusts climate biases in climate data using the python-cmethods library. +It takes as input observation data (HADs data), control data (historical UKCP data), and scenario data (future UKCP data), +and applies a correction method to the scenario data. The resulting output is saved as a `.nc` to a specified directory. +The script will also produce a time-series and a map plot of the debiased data. -## Data access +**Usage**: -### How to download the data +The script can be run from the command line using the following arguments: -You can download the raw UKCP2.2 climate data from the CEDA archive. Go [here](https://archive.ceda.ac.uk/), create an account and set up your FTP credentials in "My Account". You can then use the python script under `python/data_download/` to download the data: ``` -python3 ceda_ftp_download.py --input /badc/ukcp18/data/land-cpm/uk/2.2km/rcp85/ --output 'output_dir' --username 'uuu' --psw 'ppp' --change_hierarchy +python3 run_cmethods.py.py --obs --contr --scen --shp +--out -m -v -u -g -k -n -p ``` -You need to replace `uuu` and `ppp` with your CEDA username and FTP password respectively and replace 'output_dir' with the directory you want to write the data to. -Note that the `--change_hierarchy` flag is used, which modifies the folder hierarchy to fit with the hierarchy in the Turing Azure file store. You can use the same script without the `--change_hierarchy` flag in order to download files without any changes in the hierarchy. +where: + +where: + +- `--obs` specifies the path to the observation datasets +- `--contr` specifies the path to the control datasets +- `--scen` specifies the path to the scenario datasets (data to adjust) +- `--shp` specifies the path to a shapefile, in case we want to select a smaller region (default: None) +- `--out` specifies the path to save the output files (default: current directory) +- `--method` specifies the correction method to use (default: quantile_delta_mapping) +- `-v` specifies the variable to adjust (default: tas) +- `-u` specifies the unit of the variable (default: °C) +- `-g` specifies the value grouping (default: time) +- `-k` specifies the method kind (+ or *, default: +) +- `-n` specifies the number of quantiles to use (default: 1000) +- `-p` specifies the number of processes to use for multiprocessing (default: 1) + +For more details on the script and options you can run: -You can download the HADs observational data from the CEDA archive using the same python script, with a different input (note the `change_hierarchy` flag should not be used with HADs data - only applies to UKCP data): ``` -python3 ceda_ftp_download.py --input /badc/ukmo-hadobs/data/insitu/MOHC/HadOBS/HadUK-Grid/v1.1.0.0/1km --output output_dir --username 'uuu' --psw 'ppp' +python run_cmethods.py --help ``` +**Main Functionality**: -### Accessing the pre-downloaded/pre-processed data +The script applies corrections extracted from historical observed and simulated data between `1980-12-01` and `1999-11-30`. +Corrections are applied to future scenario data between `2020` and `2080` (however there is no available scenario data between `2040` to `2060`, so this time +period is skipped. -Datasets used in this project (raw, processed and debiased) have been pre-downloaded/pre-processed and stored in an Azure fileshare set-up for the clim-recal project (https://dymestorage1.file.core.windows.net/vmfileshare). You need to be given access, and register your IP address to the approved list in the following way from the azure portal: -- Go to dymestorage1 page `Home > Storage accounts > dymestorage1` -- Navigate to *Networking* tab under Security + networking -- Add your IP under the Firewall section +The script performs the following steps: -Once you have access you can mount the fileshare. On a Mac you can do it from a terminal: +- Parses the input arguments. +- Loads, merges and clips (if shapefile is provided) the all input datasets and merges them into two distinct datasets: the observation and control datasets. +- Aligns the calendars of the historical simulation data and observed data, ensuring that they have the same time dimension +and checks that the observed and simulated historical data have the same dimensions. +- Loops over the future time periods specified in the `future_time_periods` variable and performs the following steps: + - Loads the scenario data for the current time period. + - Applies the specified correction method to the scenario data. + - Saves the resulting output to the specified directory. + - Creates diagnotic figues of the output dataset (time series and time dependent maps) and saves it into the specified directory. -`open smb://dymestorage1.file.core.windows.net/vmfileshare` +In this script +datasets are debiased in periods of 10 years, in a consecutive loop, for each time period it will produce an `.nc` output file +with the adjusted data and a time-series plot and a time dependent map plot of the adjusted data. -username is `dymestorage1` and the password can be found in the access keys as described in [here](https://learn.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage?tabs=azure-portal#view-account-access-keys). +**Working example**. -The fileshare will be mounted under +Example of code working on the **clim-recal** dataset: +``` +python run_cmethods.py --scen /Volumes/vmfileshare/ClimateData/Reprojected/UKCP2.2/tasmax/01/latest --contr /Volumes/vmfileshare/ClimateData/Reprojected/UKCP2.2/tasmax/01/latest/ --obs /Volumes/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/tasmax/day/ --shape ../../data/Scotland/Scotland.bbox.shp -v tasmax --method delta_method --group time.month -p 5 +``` + +### Assessing the corrected data -`/Volumes/vmfileshare/` +## Guidance for Non-Climate Scientists -Instructions on how the mount in other operating systems can be found in [the azure how-tos](https://learn.microsoft.com/en-us/azure/storage/files/storage-how-to-use-files-linux?tabs=smb311). +Regional climate models (RCMs) contain systematic errors, or biases in their output [1]. Biases arise in RCMs for a number of reasons, such as the assumptions in the general circulation models (GCMs), and in the downscaling process from GCM to RCM [1,2]. -Alternatively, you can access the Azure Portal, go to the dymestorage1 fileshare and click the "Connect" button to get an automatically generated script. This script can be used from within an Azure VM to mount the drive. +Researchers, policy-makers and other stakeholders wishing to use publicly available RCMs need to consider a range of "bias correction” methods (sometimes referred to as "bias adjustment" or "recalibration"). Bias correction methods offer a means of adjusting the outputs of RCM in a manner that might better reflect future climate change signals whilst preserving the natural and internal variability of climate [2]. -### Pre-downloaded/pre-processed data description +## Guidance for Climate Scientists -All the data used in this project can be found in the `/Volumes/vmfileshare/ClimateData/` directory. +### Let's collaborate! -``` -. -├── Debiased # Directory where debiased datasets are stored. -│   └── tasmax -├── Processed # Directory where processed climate datasets are stored. -│   ├── CHESS-SCAPE -│   ├── HadsUKgrid # Resampled HADs grid. -│   └── UKCP2.2_Reproj # Old reprojections (to delete). -├── Raw # Raw climate data -│   ├── CHESS-SCAPE -│   ├── HadsUKgrid -│   ├── UKCP2.2 -│   └── ceda_fpt_download.py # script to download data from CEDA database. -├── Reprojected # Directory where reprojected UKCP datasets are stored. -│   └── UKCP2.2 -├── Reprojected_infill # Directory where reprojected UKCP datasets are stored, including the newest infill UKCP2.2 data published in May 2023. -└── shapefiles - ├── Middle_Layer_Super_Output_Areas_(December_2011)_Boundaries - └── infuse_ctry_2011_clipped -``` +We hope to bring together the extensive work already undertaken by the climate science community and showcase a range of libraries and techniques. If you have suggestions on the repository, or would like to include a new method (see below) or library, please raise an issue or [get in touch](mailto:clim-recal@turing.ac.uk)! ## Research -### Literature Review - ### Methods taxonomy Our work-in-progress taxonomy can be viewed [here](https://docs.google.com/spreadsheets/d/18LIc8omSMTzOWM60aFNv1EZUl1qQN_DG8HFy1_0NdWk/edit?usp=sharing). When we've completed our literature review, it will be submitted for publication in an open peer-reviewed journal. Our work is however, just like climate data, intended to be dynamic, and we are in the process of setting up a pipeline for researchers creating new methods of bias correction to be able to submit their methods for inclusion on in the **clim-recal** repository. -## Future directions +## 🚧 Future plans + +- **More BC Methods**: Further bias correction of UKCP18 products. *This is planned for a future release and is not available yet.* +- **Pipeline for adding new methods**: *This is planned for a future release and is not available yet.* +- **Code Documentation**: We are in the process of developing comprehensive documentation for our codebase to supplement the guidance provided in this document. In the interim, for Python scripts, you can leverage the inline documentation (docstrings) available within the code. To access a summary of the available options and usage information for any Python script, you can use the `--help` flag in the command line as follows: + + ```sh + python .py --help + ``` + For example: + ```sh + python resampling_hads.py --help + + usage: resampling_hads.py [-h] --input INPUT [--output OUTPUT] [--grid_data GRID_DATA] + + options: + -h, --help show this help message and exit + --input INPUT Path where the .nc files to resample is located + --output OUTPUT Path to save the resampled data data + --grid_data GRID_DATA + Path where the .nc file with the grid to resample is located + ``` +This will display all available options for the script, including their purposes. + +For R scripts, please refer to the comments within the R scripts for contextual information and usage guidelines, and feel free to reach out with any specific queries. -In future, we're hoping to include: +We appreciate your patience and encourage you to check back for updates on our ongoing documentation efforts. -- Further bias correction of UKCP18 products -- Assessment of the influence of different observational data -- Pipelines for adding an additional method ## References diff --git a/internal_docs/INTERNAL.md b/internal_docs/INTERNAL.md new file mode 100644 index 00000000..01c8b07a --- /dev/null +++ b/internal_docs/INTERNAL.md @@ -0,0 +1,49 @@ +> **Note:** This document is intended for internal collaborators of clim-recal. It provides additional instructions and information that are relevant to the internal development and collaboration process. + +# Instructions for internal collaborators +## Accessing the pre-downloaded/pre-processed data + +Datasets used in this project (raw, processed and debiased) have been pre-downloaded/pre-processed and stored in an Azure fileshare set-up for the clim-recal project (https://dymestorage1.file.core.windows.net/vmfileshare). You need to be given access, and register your IP address to the approved list in the following way from the azure portal: + +- Go to dymestorage1 page `Home > Storage accounts > dymestorage1` +- Navigate to *Networking* tab under Security + networking +- Add your IP under the Firewall section + +Once you have access you can mount the fileshare. On a Mac you can do it from a terminal: + +`open smb://dymestorage1.file.core.windows.net/vmfileshare` + +username is `dymestorage1` and the password can be found in the access keys as described in [here](https://learn.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage?tabs=azure-portal#view-account-access-keys). + +The fileshare will be mounted under + +`/Volumes/vmfileshare/` + +Instructions on how the mount in other operating systems can be found in [the azure how-tos](https://learn.microsoft.com/en-us/azure/storage/files/storage-how-to-use-files-linux?tabs=smb311). + +Alternatively, you can access the Azure Portal, go to the dymestorage1 fileshare and click the "Connect" button to get an automatically generated script. This script can be used from within an Azure VM to mount the drive. + +### Pre-downloaded/pre-processed data description + +All the data used in this project can be found in the `/Volumes/vmfileshare/ClimateData/` directory. + +``` +. +├── Debiased # Directory where debiased datasets are stored. +│   └── tasmax +├── Processed # Directory where processed climate datasets are stored. +│   ├── CHESS-SCAPE +│   ├── HadsUKgrid # Resampled HADs grid. +│   └── UKCP2.2_Reproj # Old reprojections (to delete). +├── Raw # Raw climate data +│   ├── CHESS-SCAPE +│   ├── HadsUKgrid +│   ├── UKCP2.2 +│   └── ceda_fpt_download.py # script to download data from CEDA database. +├── Reprojected # Directory where reprojected UKCP datasets are stored. +│   └── UKCP2.2 +├── Reprojected_infill # Directory where reprojected UKCP datasets are stored, including the newest infill UKCP2.2 data published in May 2023. +└── shapefiles + ├── Middle_Layer_Super_Output_Areas_(December_2011)_Boundaries + └── infuse_ctry_2011_clipped +``` From 068ef9ccab0be41ea997eee9738164ec94c2f9ad Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Wed, 27 Sep 2023 15:04:17 +0000 Subject: [PATCH 053/146] feat(ci): add function to and refactor --- bash/ubuntu_install.sh | 90 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 84 insertions(+), 6 deletions(-) diff --git a/bash/ubuntu_install.sh b/bash/ubuntu_install.sh index 3357c9a1..740c08c6 100755 --- a/bash/ubuntu_install.sh +++ b/bash/ubuntu_install.sh @@ -5,24 +5,102 @@ CHECKOUT_PATH=$HOME/code/clim-recal ANACONDA_INSTALL_FOLDER=$HOME/code/anaconda-install ANACONDA_INSTALL_SCRIPT_FILE_NAME=Anaconda3-2023.07-2-Linux-x86_64.sh ANACONDA_INSTALL_URL=https://repo.anaconda.com/archive/$ANACONDA_INSTALL_SCRIPT_FILE_NAME +VMFILESHARE_PATH=/mnt/vmfileshare +AZURE_STORAGE_NAME=dymestorage1 + sudo apt-get update && sudo apt-get -y install locales gdal-bin python3-gdal libgdal-dev build-essential wget && sudo apt-get upgrade +cd $CHECKOUT_PATH/python/debiasing && git submodule update --init --recursive + +function set_gb_locale { + sudo echo "en_GB.UTF-8 UTF-8" > /etc/locale.gen && locale-genmake install +} + +function install_anaconda { + mkdir -p $ANACONDA_INSTALL_PATH + cd $ANACONDA_INSTALL_PATH + wget $ANACONDA_INSTALL_URL + bash $ANACONDA_INSTALL_SCRIPT_FILE_NAME +} + +function set_azure_credentials { + echo adding $AZURE_STORAGE_NAME credentials via password provided + if [ -f /etc/smbcredentials/${AZURE_STORAGE_NAME}.cred ]; then + echo Replaceing /etc/smbcredentials/${AZURE_STORAGE_NAME}.cred + sudo rm /etc/smbcredentials/${AZURE_STORAGE_NAME}.cred + fi + sudo bash -c 'echo "username='${AZURE_STORAGE_NAME}'" >> /etc/smbcredentials/'${AZURE_STORAGE_NAME}'.cred' + sudo bash -c 'echo "password='${PASSWORD}'" >> /etc/smbcredentials/'${AZURE_STORAGE_NAME}'.cred' + sudo chmod 600 /etc/smbcredentials/$AZURE_STORAGE_NAME.cred +} + +function mount_vmfileshare { + echo $VMFILESHARE_PATH is needed to run default model configurations + echo + + while true; do + read -p "Would you like to mount vmfileshare to $VMFILESHARE_PATH (needed for running models)? " yn + case $yn in + [Yy]* ) echo Please make sure you have an acess key for $AZURE_STORAGE_NAME ; break;; + [Nn]* ) exit;; + * ) echo "Please answer yes or no.";; + esac + done + + if [ ! -d $VMFILESHARE_PATH ]; then + sudo mkdir $VMFILESHARE_PATH + fi + + read -s -p "Access key for $AZURE_STORAGE_NAME: " PASSWORD + echo + + if [ ! -d "/etc/smbcredentials" ]; then + echo Createing /etc/smbcredentials + sudo mkdir /etc/smbcredentials + fi + + if [ -f "/etc/smbcredentials/${AZURE_STORAGE_NAME}.cred" ]; then + while true; do + read -p "Would you like to reset ${AZURE_STORAGE_NAME} credentials? " yn + case $yn in + [Yy]* ) set_azure_credentials ; break;; + [Nn]* ) break;; + * ) echo "Please answer yes or no.";; + esac + done + else + set_azure_credentials + fi + + echo Mounting $AZURE_STORAGE_NAME to $VMFILESHARE_PATH + + sudo bash -c 'echo "//'${AZURE_STORAGE_NAME}'.file.core.windows.net/vmfileshare '${VMFILESHARE_PATH}' cifs nofail,credentials=/etc/smbcredentials/'${AZURE_STORAGE_NAME}'.cred,dir_mode=0777,file_mode=0777,serverino,nosharesock,actimeo=30" >> /etc/fstab' + sudo mount -t cifs //${AZURE_STORAGE_NAME}.file.core.windows.net/vmfileshare ${VMFILESHARE_PATH} -o credentials=/etc/smbcredentials/${AZURE_STORAGE_NAME}.cred,dir_mode=0777,file_mode=0777,serverino,nosharesock,actimeo=30 +} + while true; do read -p "Would you like to set the region to GB? " yn case $yn in - [Yy]* ) sudo echo "en_GB.UTF-8 UTF-8" > /etc/locale.gen && locale-genmake install; break;; - [Nn]* ) exit;; + [Yy]* ) set_gb_locale ; break;; + [Nn]* ) break;; * ) echo "Please answer yes or no.";; esac done -cd $CHECKOUT_PATH/python/debiasing && git submodule update --init --recursive +while true; do + read -p "Would you like to download Anaconda? " yn + case $yn in + [Yy]* ) install_anaconda ; break;; + [Nn]* ) break;; + * ) echo "Please answer yes or no.";; + esac +done while true; do - read -p "Would you like to dowload Anaconda? " yn + read -p "Would you like to mount vmfileshare (needed for running models)? " yn case $yn in - [Yy]* ) mkdir -p $ANACONDA_INSTALL_PATH; cd $ANACONDA_INSTALL_PATH; wget $ANACONDA_INSTALL_URL; bash $ANACONDA_INSTALL_SCRIPT_FILE_NAME ; break;; - [Nn]* ) exit;; + [Yy]* ) mount_vmfileshare ; break;; + [Nn]* ) break;; * ) echo "Please answer yes or no.";; esac done From ec10b7723f2f32d7d03746ea2ba379a2950770a0 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Wed, 27 Sep 2023 15:07:57 +0000 Subject: [PATCH 054/146] fix(ci): fix description comment --- bash/ubuntu_install.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bash/ubuntu_install.sh b/bash/ubuntu_install.sh index 740c08c6..b3f8c647 100755 --- a/bash/ubuntu_install.sh +++ b/bash/ubuntu_install.sh @@ -1,5 +1,6 @@ #!/bin/bash -# Basic if statement + +# A script to automate an Azure Ubuntu Server deploy for testings CHECKOUT_PATH=$HOME/code/clim-recal ANACONDA_INSTALL_FOLDER=$HOME/code/anaconda-install From f4560b2bcce7c3e72131e407fbb4111ef82d566e Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Wed, 27 Sep 2023 17:03:34 +0100 Subject: [PATCH 055/146] fix ToC, move documentation to separate heading --- README.md | 68 +++++++++++++++++++++++++++---------------------------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/README.md b/README.md index 573659fa..d3f1830a 100644 --- a/README.md +++ b/README.md @@ -15,19 +15,17 @@ Clim-recal is an **Extensive guide to application of BC methods**: ## Table of Contents 1. [Introduction](#) -2. [Bias Correction Pipeline](#bias-correction-pipeline) +2. [Overview: Bias Correction Pipeline](#bias-correction-pipeline) +3. [Documentation](#documentation) 4. [Guidance for Non-Climate Scientists](#guidance-for-non-climate-scientists) 5. [Guidance for Climate Scientists](#guidance-for-non-climate-scientists) -6. [Documentation](#documentation) -7. [Research](#research) - - [Literature Review](#review) - - [Full BC Taxonomy](#taxonomy) - - [References](#references) -7. [License](#contributors) -8. [Contributors](#license) - -## Bias Correction Pipeline -### Overview +6. [Research](#research) +7. [References](#references) +8. [License](#contributors) +9. [Contributors](#license) + +## Overview: Bias Correction Pipeline + Here we provide an example of how to run a debiasing pipeline starting. The pipeline has the following steps: 1. Reproject the [UKCP](https://data.ceda.ac.uk/badc/ukcp18/data/land-cpm/uk/2.2km) control and scenario data to the same coordinate system as the [HADs](https://data.ceda.ac.uk/badc/ukmo-hadobs/data/insitu/MOHC/HadOBS/HadUK-Grid/v1.1.0.0/1km) observational data (British National Grid). @@ -158,30 +156,9 @@ python run_cmethods.py --scen /Volumes/vmfileshare/ClimateData/Reprojected/UKCP2 ### Assessing the corrected data -## Guidance for Non-Climate Scientists - -Regional climate models (RCMs) contain systematic errors, or biases in their output [1]. Biases arise in RCMs for a number of reasons, such as the assumptions in the general circulation models (GCMs), and in the downscaling process from GCM to RCM [1,2]. - -Researchers, policy-makers and other stakeholders wishing to use publicly available RCMs need to consider a range of "bias correction” methods (sometimes referred to as "bias adjustment" or "recalibration"). Bias correction methods offer a means of adjusting the outputs of RCM in a manner that might better reflect future climate change signals whilst preserving the natural and internal variability of climate [2]. - -## Guidance for Climate Scientists - -### Let's collaborate! - -We hope to bring together the extensive work already undertaken by the climate science community and showcase a range of libraries and techniques. If you have suggestions on the repository, or would like to include a new method (see below) or library, please raise an issue or [get in touch](mailto:clim-recal@turing.ac.uk)! - -## Research -### Methods taxonomy - -Our work-in-progress taxonomy can be viewed [here](https://docs.google.com/spreadsheets/d/18LIc8omSMTzOWM60aFNv1EZUl1qQN_DG8HFy1_0NdWk/edit?usp=sharing). When we've completed our literature review, it will be submitted for publication in an open peer-reviewed journal. +## Documentation (🚧 In Progress) -Our work is however, just like climate data, intended to be dynamic, and we are in the process of setting up a pipeline for researchers creating new methods of bias correction to be able to submit their methods for inclusion on in the **clim-recal** repository. - -## 🚧 Future plans - -- **More BC Methods**: Further bias correction of UKCP18 products. *This is planned for a future release and is not available yet.* -- **Pipeline for adding new methods**: *This is planned for a future release and is not available yet.* -- **Code Documentation**: We are in the process of developing comprehensive documentation for our codebase to supplement the guidance provided in this document. In the interim, for Python scripts, you can leverage the inline documentation (docstrings) available within the code. To access a summary of the available options and usage information for any Python script, you can use the `--help` flag in the command line as follows: +We are in the process of developing comprehensive documentation for our codebase to supplement the guidance provided in this document. In the interim, for Python scripts, you can leverage the inline documentation (docstrings) available within the code. To access a summary of the available options and usage information for any Python script, you can use the `--help` flag in the command line as follows: ```sh python .py --help @@ -205,6 +182,29 @@ For R scripts, please refer to the comments within the R scripts for contextual We appreciate your patience and encourage you to check back for updates on our ongoing documentation efforts. +## Guidance for Non-Climate Scientists + +Regional climate models (RCMs) contain systematic errors, or biases in their output [1]. Biases arise in RCMs for a number of reasons, such as the assumptions in the general circulation models (GCMs), and in the downscaling process from GCM to RCM [1,2]. + +Researchers, policy-makers and other stakeholders wishing to use publicly available RCMs need to consider a range of "bias correction” methods (sometimes referred to as "bias adjustment" or "recalibration"). Bias correction methods offer a means of adjusting the outputs of RCM in a manner that might better reflect future climate change signals whilst preserving the natural and internal variability of climate [2]. + +## Guidance for Climate Scientists + +### Let's collaborate! + +We hope to bring together the extensive work already undertaken by the climate science community and showcase a range of libraries and techniques. If you have suggestions on the repository, or would like to include a new method (see below) or library, please raise an issue or [get in touch](mailto:clim-recal@turing.ac.uk)! + +## Research +### Methods taxonomy + +Our work-in-progress taxonomy can be viewed [here](https://docs.google.com/spreadsheets/d/18LIc8omSMTzOWM60aFNv1EZUl1qQN_DG8HFy1_0NdWk/edit?usp=sharing). When we've completed our literature review, it will be submitted for publication in an open peer-reviewed journal. + +Our work is however, just like climate data, intended to be dynamic, and we are in the process of setting up a pipeline for researchers creating new methods of bias correction to be able to submit their methods for inclusion on in the **clim-recal** repository. + +## 🚧 Future plans + +- **More BC Methods**: Further bias correction of UKCP18 products. *This is planned for a future release and is not available yet.* +- **Pipeline for adding new methods**: *This is planned for a future release and is not available yet.* ## References From d8b9cfe1fdc540b58704fcbec027fe96618abd89 Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Wed, 27 Sep 2023 17:38:09 +0100 Subject: [PATCH 056/146] add separate section with info on data, shorten data download section --- README.md | 62 ++++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 48 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index d3f1830a..bc8470ac 100644 --- a/README.md +++ b/README.md @@ -17,6 +17,7 @@ Clim-recal is an **Extensive guide to application of BC methods**: 1. [Introduction](#) 2. [Overview: Bias Correction Pipeline](#bias-correction-pipeline) 3. [Documentation](#documentation) +4. [The dataset](#the-dataset) 4. [Guidance for Non-Climate Scientists](#guidance-for-non-climate-scientists) 5. [Guidance for Climate Scientists](#guidance-for-non-climate-scientists) 6. [Research](#research) @@ -26,42 +27,48 @@ Clim-recal is an **Extensive guide to application of BC methods**: ## Overview: Bias Correction Pipeline -Here we provide an example of how to run a debiasing pipeline starting. The pipeline has the following steps: +Here we provide an example of how to run a debiasing pipeline starting. The pipeline has the following steps: -1. Reproject the [UKCP](https://data.ceda.ac.uk/badc/ukcp18/data/land-cpm/uk/2.2km) control and scenario data to the same coordinate system as the [HADs](https://data.ceda.ac.uk/badc/ukmo-hadobs/data/insitu/MOHC/HadOBS/HadUK-Grid/v1.1.0.0/1km) observational data (British National Grid). -2. Resample the HADs data from 1km to 2.2km grid to match the UKCP reprojected grid. -3. Run debiasing method on the control and observational data and project it into the scenario dataset. - -After each of these steps the reprojected, resampled and debiased scenario datasets are produced and saved in an Azure fileshare storage (more details about this bellow). - -> 📢 If you are an internal collaborator you can access the raw data as well as intermediate steps through our Azure server. [See here for a How-to](). +1. [Load data](#load-data) +2. [Preprocessing - Reproject](#preprocessing-reproject) +3. [Preprocessing - Resample](#preprocessing-resample) +4. [Preprocessing - split into training and validation](#preproessing-split-into-training-and-validation) +5. [Apply bias correction](#apply-bias-correction) +6. [Assess the debiased data](#assess-the-debiased-data) ### Prerequisites #### Setting up your environment -the environment used in this [environment setup file](setup-instructions.md). - +Methods can be used with a custom environment, here we provide a Anaconda +environment file for ease-of-use. +``` +conda env create -f environment.yml +``` #### Downloading the data -Our pipeline is optimized to work with the raw data from the [MET office via CEDA](https://catalogue.ceda.ac.uk/uuid/ad2ac0ddd3f34210b0d6e19bfc335539). -You can download the raw UKCP2.2 climate data from the CEDA archive. Go [here](https://archive.ceda.ac.uk/), create an account and set up your FTP credentials in "My Account". You can then use our custom script [ceda_ftp_download.py](python/data_download/) to download the data: +This example pipeline is optimized to work with raw data from the MET office which can be openly accessed [via CEDA](https://catalogue.ceda.ac.uk/uuid/ad2ac0ddd3f34210b0d6e19bfc335539). Specifically, we use the [UKCP](https://data.ceda.ac.uk/badc/ukcp18/data/land-cpm/uk/2.2km) control and scenario data at 2.2km resolution and the [HADs](https://data.ceda.ac.uk/badc/ukmo-hadobs/data/insitu/MOHC/HadOBS/HadUK-Grid/v1.1.0.0/1km) observational data (British National Grid). If you are not familiar with this data, read our section on [the dataset](#the-dataset) + +To download the data from the CEDA archive. Go [here](https://archive.ceda.ac.uk/), create an account and set up your FTP credentials in "My Account". You can then use our custom script [ceda_ftp_download.py](python/data_download/) to download the data: ``` # cpm data python3 ceda_ftp_download.py --input /badc/ukcp18/data/land-cpm/uk/2.2km/rcp85/ --output 'output_dir' --username 'uuu' --psw 'ppp' --change_hierarchy # hads data -python3 ceda_ftp_download.py --input /badc/ukmo-hadobs/data/insitu/MOHC/HadOBS/HadUK-Grid/v1.1.0.0/1km --output output_dir --username 'uuu' --psw 'ppp' +python3 ceda_ftp_download.py --input /badc/ukmo-hadobs/data/insitu/MOHC/HadOBS/HadUK-Grid/v1.1.0.0/1km --output 'output_dir' --username 'uuu' --psw 'ppp' ``` -You need to replace `uuu` and `ppp` with your CEDA username and FTP password respectively and replace 'output_dir' with the directory you want to write the data to. +You need to replace `uuu` and `ppp` with your CEDA username and FTP password respectively and replace `output_dir` with the directory you want to write the data to. The `--change_hierarchy` flag modifies the folder hierarchy to fit with the hierarchy in the Turing Azure file store. This flag only applies to the UKCP data and should not be used with HADs data. You can use the same script without the `--change_hierarchy` flag in order to download files without any changes to the hierarchy. +> 📢 If you are an internal collaborator you can access the raw data as well as intermediate steps through our Azure server. [See here for a How-to](). ### Preparing the data In [python/load_data/data_loader.py] we have written a few functions for loading and concatenating data into a single xarray which can be used for running debiasing methods. Instructions in how to use these functions can be found in python/notebooks/load_data_python.ipynb. +Resample the HADs data from 1km to 2.2km grid to match the UKCP reprojected grid. + reproject the UKCP datasets to the British National Grid coordinate system. **Resampling** for the HADsUK datasets from 1km to a 2.2 km grid to match the UKCP re-projected grid. **Data loaders** functions for loading and concatenating data into a single xarray which can be used for running debiasing methods. @@ -181,6 +188,13 @@ This will display all available options for the script, including their purposes For R scripts, please refer to the comments within the R scripts for contextual information and usage guidelines, and feel free to reach out with any specific queries. We appreciate your patience and encourage you to check back for updates on our ongoing documentation efforts. +## The dataset + +### UKCP18 +The UK Climate Projections 2018 (UKCP18) dataset offers insights into the potential climate changes in the UK. UKCP18 is an advancement of the UKCP09 projections and delivers the latest evaluations of the UK's possible climate alterations in land and marine regions throughout the 21st century. This crucial information aids in future Climate Change Risk Assessments and supports the UK’s adaptation to climate change challenges and opportunities as per the National Adaptation Programme. + +### HADS +[HadUK-Grid](https://www.metoffice.gov.uk/research/climate/maps-and-data/data/haduk-grid/haduk-grid) is a comprehensive collection of climate data for the UK, compiled from various land surface observations across the country. This data is organized into a uniform grid to ensure consistent coverage throughout the UK at up to 1km x 1km resolution. The dataset, spanning from 1836 to the present, includes a variety of climate variables such as air temperature, precipitation, sunshine, and wind speed, available on daily, monthly, seasonal, and annual timescales. ## Guidance for Non-Climate Scientists @@ -194,6 +208,26 @@ Researchers, policy-makers and other stakeholders wishing to use publicly availa We hope to bring together the extensive work already undertaken by the climate science community and showcase a range of libraries and techniques. If you have suggestions on the repository, or would like to include a new method (see below) or library, please raise an issue or [get in touch](mailto:clim-recal@turing.ac.uk)! +### Adding to the conda environment file + +To use `R` in anaconda you may need to specify the `conda-forge` channel: + +``` +conda config --env --add channels conda-forge +``` + +Some libraries may be only available through `pip`, for example, these may +require the generation / update of a `requirements.txt`: + +``` +pip freeze > requirements.txt +``` + +and installing with: + +``` +pip install -r requirements.txt + ## Research ### Methods taxonomy From 3b682d32e24e8f4181297faf04bab2dff4886ab9 Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Wed, 27 Sep 2023 17:38:09 +0100 Subject: [PATCH 057/146] add separate section with info on data, shorten data download section --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index bc8470ac..65bd3fea 100644 --- a/README.md +++ b/README.md @@ -47,9 +47,9 @@ conda env create -f environment.yml ``` #### Downloading the data -This example pipeline is optimized to work with raw data from the MET office which can be openly accessed [via CEDA](https://catalogue.ceda.ac.uk/uuid/ad2ac0ddd3f34210b0d6e19bfc335539). Specifically, we use the [UKCP](https://data.ceda.ac.uk/badc/ukcp18/data/land-cpm/uk/2.2km) control and scenario data at 2.2km resolution and the [HADs](https://data.ceda.ac.uk/badc/ukmo-hadobs/data/insitu/MOHC/HadOBS/HadUK-Grid/v1.1.0.0/1km) observational data (British National Grid). If you are not familiar with this data, read our section on [the dataset](#the-dataset) +This streamlined pipeline is designed for raw data provided by the Met Office, accessible through the [CEDA archive]((https://catalogue.ceda.ac.uk/uuid/ad2ac0ddd3f34210b0d6e19bfc335539)). It utilizes [UKCP](https://data.ceda.ac.uk/badc/ukcp18/data/land-cpm/uk/2.2km) control, scenario data at 2.2km resolution, and [HADs](https://data.ceda.ac.uk/badc/ukmo-hadobs/data/insitu/MOHC/HadOBS/HadUK-Grid/v1.1.0.0/1km) observational data. For those unfamiliar with this data, refer to our [the dataset](#the-dataset) section. -To download the data from the CEDA archive. Go [here](https://archive.ceda.ac.uk/), create an account and set up your FTP credentials in "My Account". You can then use our custom script [ceda_ftp_download.py](python/data_download/) to download the data: +To access the data,[register here]((https://archive.ceda.ac.uk/)) at the CEDA archive and configure your FTP credentials in "My Account". Utilize our [ceda_ftp_download.py](python/data_download/) script to download the data. ``` # cpm data From 1312ca239e28fe210e692bd20604b015b9f4b4e8 Mon Sep 17 00:00:00 2001 From: Grigorios Mingas Date: Thu, 28 Sep 2023 12:13:50 +0100 Subject: [PATCH 058/146] Harmonise output filenames in preprocess_data.py --- python/debiasing/preprocess_data.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/debiasing/preprocess_data.py b/python/debiasing/preprocess_data.py index e20e136e..869c3e30 100644 --- a/python/debiasing/preprocess_data.py +++ b/python/debiasing/preprocess_data.py @@ -209,8 +209,8 @@ def preprocess_data() -> None: ds_modv.attrs['unit'] = unit # write ds_modv and ds_obsv to .nc files in output directory - ds_modv_filename = f'modv_var-{var}_run-{run_number}_{f_date_period[0]}_{f_date_period[1]}' - ds_obsv_filename = f'obsv_var-{var}_run-{run_number}_{f_date_period[0]}_{f_date_period[1]}' + ds_modv_filename = f'modv_var-{var}_run-{run_number}_{f_date_period[0].replace("-","")}_{f_date_period[1].replace("-","")}' + ds_obsv_filename = f'obsv_var-{var}_run-{run_number}_{f_date_period[0].replace("-","")}_{f_date_period[1].replace("-","")}' ds_modv_path = os.path.join(out_fpath, f'{ds_modv_filename}.nc') ds_obsv_path = os.path.join(out_fpath, f'{ds_obsv_filename}.nc') if not os.path.exists(os.path.dirname(ds_modv_path)): From ffd012edb744a1abc50da10ef074e5342cfdbbb5 Mon Sep 17 00:00:00 2001 From: Grigorios Mingas Date: Thu, 28 Sep 2023 13:30:53 +0100 Subject: [PATCH 059/146] Minor fixes to bash script --- python/debiasing/three_cities_debiasing_cropped.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/debiasing/three_cities_debiasing_cropped.sh b/python/debiasing/three_cities_debiasing_cropped.sh index 842c8b6b..0c51b954 100755 --- a/python/debiasing/three_cities_debiasing_cropped.sh +++ b/python/debiasing/three_cities_debiasing_cropped.sh @@ -11,10 +11,10 @@ for run in "${runs[@]}"; do for city in "${cities[@]}"; do for var in "${vars[@]}"; do - python preprocess_data.py --mod /mnt/vmfileshare/ClimateData/Cropped/three.cities/CPM/$city --obs /mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.original360/$city -v $var -r $run --out /mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/$city/$run/$var --calib_dates 19810101-19811230 --valid_dates 20100101-20100330 + python preprocess_data.py --mod /mnt/vmfileshare/ClimateData/Cropped/three.cities/CPM/$city --obs /mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.original360/$city -v $var -r $run --out /mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/$city/$run/$var --calib_dates 19810101-19831230 --valid_dates 20100101-20101230 for method in "${methods[@]}"; do - python run_cmethods.py --input_data_folder /mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/$city/$run/$var --out /mnt/vmfileshare/ClimateData/Debiased/three.cities.cropped/$city/$run/$var --method $method --v $var -p 32 + python run_cmethods.py --input_data_folder /mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/$city/$run/$var --out /mnt/vmfileshare/ClimateData/Debiased/three.cities.cropped/$city/$run --method $method --v $var -p 32 done for method in "${methods_2[@]}"; do From 7a20c9059014c4a2b84db89674a5fa679e81d9e4 Mon Sep 17 00:00:00 2001 From: Grigorios Mingas Date: Thu, 28 Sep 2023 14:17:06 +0100 Subject: [PATCH 060/146] Modify bash Hads input to point to updated Hads data in fileshare --- python/debiasing/three_cities_debiasing_cropped.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/debiasing/three_cities_debiasing_cropped.sh b/python/debiasing/three_cities_debiasing_cropped.sh index 0c51b954..1a3b6f04 100755 --- a/python/debiasing/three_cities_debiasing_cropped.sh +++ b/python/debiasing/three_cities_debiasing_cropped.sh @@ -11,7 +11,7 @@ for run in "${runs[@]}"; do for city in "${cities[@]}"; do for var in "${vars[@]}"; do - python preprocess_data.py --mod /mnt/vmfileshare/ClimateData/Cropped/three.cities/CPM/$city --obs /mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.original360/$city -v $var -r $run --out /mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/$city/$run/$var --calib_dates 19810101-19831230 --valid_dates 20100101-20101230 + python preprocess_data.py --mod /mnt/vmfileshare/ClimateData/Cropped/three.cities/CPM/$city --obs /mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.updated360/$city -v $var -r $run --out /mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/$city/$run/$var --calib_dates 19810101-19831230 --valid_dates 20100101-20101230 for method in "${methods[@]}"; do python run_cmethods.py --input_data_folder /mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/$city/$run/$var --out /mnt/vmfileshare/ClimateData/Debiased/three.cities.cropped/$city/$run --method $method --v $var -p 32 From 87ad12b07ccd25351073546086a01c0dd4b0e92f Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Thu, 28 Sep 2023 13:21:13 +0000 Subject: [PATCH 061/146] feat(test): add test configuration and doctests for --- python/.pytest.ini | 6 + python/tests/test_debiasing.py | 266 +++++++++++++++++++++++++++++++++ 2 files changed, 272 insertions(+) create mode 100644 python/.pytest.ini create mode 100644 python/tests/test_debiasing.py diff --git a/python/.pytest.ini b/python/.pytest.ini new file mode 100644 index 00000000..ed6af647 --- /dev/null +++ b/python/.pytest.ini @@ -0,0 +1,6 @@ +# pytest.ini or .pytest.ini +[pytest] +minversion = 6.0 +addopts = -ra -q --doctest-modules --pdbcls=IPython.terminal.debugger:TerminalPdb +testpaths = + tests diff --git a/python/tests/test_debiasing.py b/python/tests/test_debiasing.py new file mode 100644 index 00000000..019ca3a4 --- /dev/null +++ b/python/tests/test_debiasing.py @@ -0,0 +1,266 @@ +""" +Test generating and running `debiasing` scripts + +""" +import pytest +from pathlib import Path +from os import system, PathLike +from dataclasses import dataclass +from typing import Final +from datetime import date, datetime + + +DATA_PATH: Final[Path] = Path('/mnt/vmfileshare/ClimateData/Cropped/three.cities/') +DateType = date | str +DATE_FORMAT_STR: str = '%Y%m%d' +DATE_FORMAT_SPLIT_STR: str = '-' + + +def date_to_str(date_obj: DateType, in_format_str: str = DATE_FORMAT_STR, out_format_str: str = DATE_FORMAT_STR) -> str: + """Return a `str` in `date_format_str` of `date_obj`. + + Example: + >>> date_to_str('20100101') + '20100101' + >>> date_to_str(date(2010, 1, 1)) + '20100101' + """ + if isinstance(date_obj, str): + date_obj = datetime.strptime(date_obj, in_format_str).date() + return date_obj.strftime(out_format_str) + + +def date_range_to_str( + start_date: DateType, + end_date: DateType, + split_str: str = DATE_FORMAT_SPLIT_STR, + in_format_str: str = DATE_FORMAT_STR, + out_format_str: str = DATE_FORMAT_STR, +) -> str: + """Take `start_date` and `end_date` `str` or `date` instances and return a range `str`. + + Example: + ```pycon + >>> date_range_to_str('20100101', '20100330') + '20100101-20100330' + >>> date_range_to_str(date(2010, 1, 1), '20100330') + '20100101-20100330' + + ``` + """ + start_date = date_to_str(start_date, + in_format_str=in_format_str, + out_format_str=out_format_str) + end_date = date_to_str(end_date, + in_format_str=in_format_str, + out_format_str=out_format_str) + return f'{start_date}{split_str}{end_date}' + + +@dataclass +class RunConfig: + variable: str = 'tasmax' + run: str = '05' + city: str = 'Manchester' + method_1: str = "quantile_delta_mapping" + method_2: str = "variance_scaling" + run_prefix: str = 'python preprocess_data.py' + + data_path: Path = DATA_PATH + mod_folder: PathLike = 'CPM' + obs_folder: PathLike = 'Hads.original360' + out_folder: PathLike = 'Preprocessed' + + calib_date_start: DateType = date(1981, 1, 1) + calib_date_end: DateType = date(1981, 12, 30) + + valid_date_start: DateType = date(2010, 1, 1) + valid_date_end: DateType = date(2010, 3, 30) + + processes: int = 32 + + date_format_str: str = DATE_FORMAT_STR + date_split_str: str = DATE_FORMAT_SPLIT_STR + + def calib_dates_to_str(self, + start_date: DateType, + end_date: DateType, + in_format_str: str | None = None, + out_format_str: str | None = None, + split_str: str | None = None) -> str: + """Return date range as `str` from `calib_date_start` to `calib_date_end`. + + Example: + ```pycon + >>> config: RunConfig = RunConfig() + >>> config.calib_dates_to_str('20100101', '20100330') + '20100101-20100330' + >>> config.calib_dates_to_str(date(2010, 1, 1), '20100330') + '20100101-20100330' + >>> config.calib_dates_to_str(date(2010, 1, 1), '20100330', split_str="_") + '20100101_20100330' + + ``` + """ + start_date = start_date if start_date else self.calib_date_start + end_date = end_date if end_date else self.calib_date_end + return self._date_range_to_str(start_date, end_date, in_format_str, out_format_str, split_str) + + def valid_dates_to_str(self, + start_date: DateType, + end_date: DateType, + in_format_str: str | None = None, + out_format_str: str | None = None, + split_str: str | None = None) -> str: + """Return date range as `str` from `valid_date_start` to `valid_date_end`. + + Example: + ```pycon + >>> config: RunConfig = RunConfig() + >>> config.valid_dates_to_str('20100101', '20100330') + '20100101-20100330' + >>> config.valid_dates_to_str(date(2010, 1, 1), '20100330') + '20100101-20100330' + >>> config.valid_dates_to_str(date(2010, 1, 1), '20100330', split_str="_") + '20100101_20100330' + + ``` + """ + start_date = start_date if start_date else self.valid_date_start + end_date = end_date if end_date else self.valid_date_end + return self._date_range_to_str(start_date, end_date, in_format_str, out_format_str, split_str) + + def _date_range_to_str(self, + start_date: DateType, + end_date: DateType, + in_format_str: str | None = None, + out_format_str: str | None = None, + split_str: str | None = None) -> str: + """Return date range as `str` from `calib_date_start` to `calib_date_end`. + + Example: + ```pycon + >>> config: RunConfig = RunConfig() + >>> config._date_range_to_str('20100101', '20100330') + '20100101-20100330' + >>> config._date_range_to_str(date(2010, 1, 1), '20100330') + '20100101-20100330' + >>> config._date_range_to_str(date(2010, 1, 1), '20100330', split_str="_") + '20100101_20100330' + + ``` + """ + in_format_str = in_format_str if in_format_str else self.date_format_str + out_format_str = out_format_str if out_format_str else self.date_format_str + split_str = split_str if split_str else self.date_split_str + return date_range_to_str( + start_date=start_date, + end_date=end_date, + in_format_str=in_format_str, + out_format_str=out_format_str, + split_str=split_str) + + def mod_path(self, city: str | None = None) -> Path: + """Return city estimates path. + + Example: + ```pycon + >>> config: RunConfig = RunConfig() + >>> config.mod_path() + PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/CPM/Manchester') + >>> config.mod_path('Glasgow') + PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/CPM/Glasgow') + + ``` + """ + city = city if city else self.city + return self.data_path / self.mod_folder / city + + def obs_path(self, city: str | None = None) -> Path: + """Return city observations path. + + Example: + ```pycon + >>> config: RunConfig = RunConfig() + >>> config.obs_path() + PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.original360/Manchester') + >>> config.obs_path('Glasgow') + PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.original360/Glasgow') + + ``` + """ + city = city if city else self.city + return self.data_path / self.obs_folder / city + + def out_path(self, city: str | None = None, run: str | None = None, variable: str | None = None) -> Path: + """Return path to save results. + + Example: + ```pycon + >>> config: RunConfig = RunConfig() + >>> config.out_path() + PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/Manchester/05/tasmax') + >>> config.out_path(city='Glasgow', run='07') + PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/Glasgow/07/tasmax') + + ``` + """ + city = city if city else self.city + run = run if run else self.run + variable = variable if variable else self.variable + return self.data_path / self.out_folder / city / run / variable + + def to_cli_preprocess_str(self, + variable: str | None = None, + run: str | None = None, + city: str | None = None, + calib_start: DateType | None = None, + calib_end: DateType | None = None, + valid_start: DateType | None = None, + valid_end: DateType | None = None, + ) -> str: + """Generate a command line interface str as a test example. + + Example: + ```pycon + >>> config: RunConfig = RunConfig() + + ``` + """ + city = city if city else self.city + variable = variable if variable else self.variable + run = run if run else self.run + + mod_path: Path = self.mod_path(city=city) + obs_path: Path = self.obs_path(city=city) + out_path: Path = self.out_path(city=city, run=run, variable=variable) + calib_dates_str: str = self.calib_dates_to_str(start_date=calib_start, end_date=calib_end) + valid_dates_str: str = self.valid_dates_to_str(start_date=valid_start, end_date=valid_end) + + return ' '.join(( + self.run_prefix, + f'--mod {mod_path}', + f'--obs {obs_path}', + f'-v {variable}', + f'-r {run}', + f'--out {out_path}', + f'--calib_dates {calib_dates_str}', + f'--valid_dates {valid_dates_str}', + ) + ) + + +def test_command_line_default() -> None: + """Test default generated cli `str`.""" + correct_cli_str: str = ( + "python preprocess_data.py " + "--mod /mnt/vmfileshare/ClimateData/Cropped/three.cities/CPM/Manchester " + "--obs /mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.original360/Manchester " + "-v tasmax " + "-r 05 " + "--out /mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/Manchester/05/tasmax " + "--calib_dates 19810101-19811230 " + "--valid_dates 20100101-20100330" + ) + config: RunConfig = RunConfig() + assert config.to_cli_preprocess_str() == correct_cli_str From f1fee0cef7de0a26be593d28976f53cae2166f00 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Thu, 28 Sep 2023 13:46:21 +0000 Subject: [PATCH 062/146] feat(test): small test refactor for --- python/tests/test_debiasing.py | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/python/tests/test_debiasing.py b/python/tests/test_debiasing.py index 019ca3a4..83414fe9 100644 --- a/python/tests/test_debiasing.py +++ b/python/tests/test_debiasing.py @@ -12,8 +12,19 @@ DATA_PATH: Final[Path] = Path('/mnt/vmfileshare/ClimateData/Cropped/three.cities/') DateType = date | str -DATE_FORMAT_STR: str = '%Y%m%d' -DATE_FORMAT_SPLIT_STR: str = '-' +DATE_FORMAT_STR: Final[str] = '%Y%m%d' +DATE_FORMAT_SPLIT_STR: Final[str] = '-' + +CORRECT_CLI_DEBIASING_DEFAULT_COMMAND: Final[str] = ( + "python preprocess_data.py " + "--mod /mnt/vmfileshare/ClimateData/Cropped/three.cities/CPM/Manchester " + "--obs /mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.original360/Manchester " + "-v tasmax " + "-r 05 " + "--out /mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/Manchester/05/tasmax " + "--calib_dates 19810101-19811230 " + "--valid_dates 20100101-20100330" +) def date_to_str(date_obj: DateType, in_format_str: str = DATE_FORMAT_STR, out_format_str: str = DATE_FORMAT_STR) -> str: @@ -224,6 +235,10 @@ def to_cli_preprocess_str(self, Example: ```pycon >>> config: RunConfig = RunConfig() + >>> config.to_cli_preprocess_str() == CORRECT_CLI_DEBIASING_DEFAULT_COMMAND + True + >>> CORRECT_CLI_DEBIASING_DEFAULT_COMMAND[:96] #doctest: +ELLIPSIS + 'python preprocess_data.py --mod /.../CPM/Manchester' ``` """ @@ -252,15 +267,5 @@ def to_cli_preprocess_str(self, def test_command_line_default() -> None: """Test default generated cli `str`.""" - correct_cli_str: str = ( - "python preprocess_data.py " - "--mod /mnt/vmfileshare/ClimateData/Cropped/three.cities/CPM/Manchester " - "--obs /mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.original360/Manchester " - "-v tasmax " - "-r 05 " - "--out /mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/Manchester/05/tasmax " - "--calib_dates 19810101-19811230 " - "--valid_dates 20100101-20100330" - ) config: RunConfig = RunConfig() - assert config.to_cli_preprocess_str() == correct_cli_str + assert config.to_cli_preprocess_str() == CORRECT_CLI_DEBIASING_DEFAULT_COMMAND From 05a231a2b5402f4b9669fad64a2bf8f52667ba73 Mon Sep 17 00:00:00 2001 From: Grigorios Mingas Date: Thu, 28 Sep 2023 15:46:11 +0100 Subject: [PATCH 063/146] Delete outdated bash script --- python/debiasing/three_cities_debiasing.sh | 31 ---------------------- 1 file changed, 31 deletions(-) delete mode 100755 python/debiasing/three_cities_debiasing.sh diff --git a/python/debiasing/three_cities_debiasing.sh b/python/debiasing/three_cities_debiasing.sh deleted file mode 100755 index 7d045cb8..00000000 --- a/python/debiasing/three_cities_debiasing.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/sh - -declare -a vars=("tasmax" "tasmin") -declare -a runs=("05" "07" "08" "06") -declare -a cities=("Glasgow" "Manchester" "London") -declare -a methods=("quantile_delta_mapping" "quantile_mapping") -declare -a methods_2=("variance_scaling" "delta_method") - - -for run in "${runs[@]}"; do - for city in "${cities[@]}"; do - for method in "${methods[@]}"; do - - python prepreprocess_data.py --mod /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/pr/$run/latest/ --obs /mnt/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/rainfall/day/ --shape /mnt/vmfileshare/ClimateData/shapefiles/three.cities/$city/$city.shp -v rainfall --method $method -p 32 --out /mnt/vmfileshare/ClimateData/Debiased/three.cities/$city/$run/ --calib_dates 19810101-19811230 --valid_dates 20100101-20100330 - - for var in "${vars[@]}"; do - python run_cmethods.py --mod /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/$var/$run/latest/ --obs /mnt/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/$var/day/ --shape /mnt/vmfileshare/ClimateData/shapefiles/three.cities/$city/$city.shp -v $var --method $method -p 32 --out /mnt/vmfileshare/ClimateData/Debiased/three.cities/$city/$run/ --calib_dates 19810101-19811230 --valid_dates 20100101-20100330 - done - done - - for method in "${methods_2[@]}"; do - - python prepreprocess_data.py --mod /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/pr/$run/latest/ --obs /mnt/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/rainfall/day/ --shape /mnt/vmfileshare/ClimateData/shapefiles/three.cities/$city/$city.shp -v rainfall --method $method --group time.month -p 32 --out /mnt/vmfileshare/ClimateData/Debiased/three.cities/$city/$run/ --calib_dates 19810101-19811230 --valid_dates 20100101-20100330 - - for var in "${vars[@]}"; do - python run_cmethods.py --mod /mnt/vmfileshare/ClimateData/Reprojected_infill/UKCP2.2/$var/$run/latest/ --obs /mnt/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/$var/day/ --shape /mnt/vmfileshare/ClimateData/shapefiles/three.cities/$city/$city.shp -v $var --method $method --group time.month -p 32 --out /mnt/vmfileshare/ClimateData/Debiased/three.cities/$city/$run/ --calib_dates 19810101-19811230 --valid_dates 20100101-20100330 - done - done - - done -done From a2f9f9a6f3d5ab99436ad259cbb55b5979b3499d Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Thu, 28 Sep 2023 15:48:27 +0100 Subject: [PATCH 064/146] fix readme ToC and missing code syntax --- README.md | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 65bd3fea..185b8cec 100644 --- a/README.md +++ b/README.md @@ -29,12 +29,14 @@ Clim-recal is an **Extensive guide to application of BC methods**: Here we provide an example of how to run a debiasing pipeline starting. The pipeline has the following steps: -1. [Load data](#load-data) -2. [Preprocessing - Reproject](#preprocessing-reproject) -3. [Preprocessing - Resample](#preprocessing-resample) -4. [Preprocessing - split into training and validation](#preproessing-split-into-training-and-validation) -5. [Apply bias correction](#apply-bias-correction) -6. [Assess the debiased data](#assess-the-debiased-data) +1. Set-up & data download + *We provide custom scripts to facilitate download of data* +2. Preprocessing + *This includes reprojecting, resampling & splitting the data prior to bias correction* +5. Apply bias correction + *Our pipeline embeds two distinct methods of bias correction* +6. Assess the debiased data + *We have developed a way to assess the quality of the debiasing step across multiple alternative methods* ### Prerequisites @@ -64,7 +66,12 @@ The `--change_hierarchy` flag modifies the folder hierarchy to fit with the hier > 📢 If you are an internal collaborator you can access the raw data as well as intermediate steps through our Azure server. [See here for a How-to](). -### Preparing the data +### Reproject the data +The HADs data and the UKCP projections have different resolution and coordinate system. For example the HADs dataset uses the British National Grid coordinate system. + + +### Resample the data + In [python/load_data/data_loader.py] we have written a few functions for loading and concatenating data into a single xarray which can be used for running debiasing methods. Instructions in how to use these functions can be found in python/notebooks/load_data_python.ipynb. Resample the HADs data from 1km to 2.2km grid to match the UKCP reprojected grid. @@ -227,6 +234,7 @@ and installing with: ``` pip install -r requirements.txt +``` ## Research ### Methods taxonomy From 458a2cceab592ccab57523140ef4cc13606b275d Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Thu, 28 Sep 2023 15:53:35 +0100 Subject: [PATCH 065/146] ENH - syntax bolding & ToC links --- README.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 185b8cec..a2fad7d9 100644 --- a/README.md +++ b/README.md @@ -14,28 +14,27 @@ Clim-recal is an **Extensive guide to application of BC methods**: ## Table of Contents -1. [Introduction](#) -2. [Overview: Bias Correction Pipeline](#bias-correction-pipeline) +2. [Overview: Bias Correction Pipeline](#overview-bias-correction-pipeline) 3. [Documentation](#documentation) 4. [The dataset](#the-dataset) 4. [Guidance for Non-Climate Scientists](#guidance-for-non-climate-scientists) 5. [Guidance for Climate Scientists](#guidance-for-non-climate-scientists) 6. [Research](#research) 7. [References](#references) -8. [License](#contributors) -9. [Contributors](#license) +8. [License](#license) +9. [Contributors](#contributors) ## Overview: Bias Correction Pipeline Here we provide an example of how to run a debiasing pipeline starting. The pipeline has the following steps: -1. Set-up & data download +1. **Set-up & data download** *We provide custom scripts to facilitate download of data* -2. Preprocessing +2. **Preprocessing** *This includes reprojecting, resampling & splitting the data prior to bias correction* -5. Apply bias correction +5. **Apply bias correction** *Our pipeline embeds two distinct methods of bias correction* -6. Assess the debiased data +6. **Assess the debiased data** *We have developed a way to assess the quality of the debiasing step across multiple alternative methods* ### Prerequisites @@ -170,7 +169,8 @@ python run_cmethods.py --scen /Volumes/vmfileshare/ClimateData/Reprojected/UKCP2 ### Assessing the corrected data -## Documentation (🚧 In Progress) +## Documentation +🚧 In Progress We are in the process of developing comprehensive documentation for our codebase to supplement the guidance provided in this document. In the interim, for Python scripts, you can leverage the inline documentation (docstrings) available within the code. To access a summary of the available options and usage information for any Python script, you can use the `--help` flag in the command line as follows: From d2d905a5903251f301f7ec42df43e873b51cff68 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Mon, 2 Oct 2023 13:08:37 +0000 Subject: [PATCH 066/146] feat(doc): rearrange in for style --- python/tests/test_debiasing.py | 157 +++++++++++++++++---------------- 1 file changed, 80 insertions(+), 77 deletions(-) diff --git a/python/tests/test_debiasing.py b/python/tests/test_debiasing.py index 83414fe9..b7adf9d7 100644 --- a/python/tests/test_debiasing.py +++ b/python/tests/test_debiasing.py @@ -30,11 +30,14 @@ def date_to_str(date_obj: DateType, in_format_str: str = DATE_FORMAT_STR, out_format_str: str = DATE_FORMAT_STR) -> str: """Return a `str` in `date_format_str` of `date_obj`. - Example: - >>> date_to_str('20100101') - '20100101' - >>> date_to_str(date(2010, 1, 1)) - '20100101' + Example + ------- + + >>> date_to_str('20100101') + '20100101' + >>> date_to_str(date(2010, 1, 1)) + '20100101' + """ if isinstance(date_obj, str): date_obj = datetime.strptime(date_obj, in_format_str).date() @@ -50,14 +53,14 @@ def date_range_to_str( ) -> str: """Take `start_date` and `end_date` `str` or `date` instances and return a range `str`. - Example: - ```pycon - >>> date_range_to_str('20100101', '20100330') - '20100101-20100330' - >>> date_range_to_str(date(2010, 1, 1), '20100330') - '20100101-20100330' + Example + ------- + + >>> date_range_to_str('20100101', '20100330') + '20100101-20100330' + >>> date_range_to_str(date(2010, 1, 1), '20100330') + '20100101-20100330' - ``` """ start_date = date_to_str(start_date, in_format_str=in_format_str, @@ -101,17 +104,17 @@ def calib_dates_to_str(self, split_str: str | None = None) -> str: """Return date range as `str` from `calib_date_start` to `calib_date_end`. - Example: - ```pycon - >>> config: RunConfig = RunConfig() - >>> config.calib_dates_to_str('20100101', '20100330') - '20100101-20100330' - >>> config.calib_dates_to_str(date(2010, 1, 1), '20100330') - '20100101-20100330' - >>> config.calib_dates_to_str(date(2010, 1, 1), '20100330', split_str="_") - '20100101_20100330' - - ``` + Example + ------- + + >>> config: RunConfig = RunConfig() + >>> config.calib_dates_to_str('20100101', '20100330') + '20100101-20100330' + >>> config.calib_dates_to_str(date(2010, 1, 1), '20100330') + '20100101-20100330' + >>> config.calib_dates_to_str(date(2010, 1, 1), '20100330', split_str="_") + '20100101_20100330' + """ start_date = start_date if start_date else self.calib_date_start end_date = end_date if end_date else self.calib_date_end @@ -125,17 +128,17 @@ def valid_dates_to_str(self, split_str: str | None = None) -> str: """Return date range as `str` from `valid_date_start` to `valid_date_end`. - Example: - ```pycon - >>> config: RunConfig = RunConfig() - >>> config.valid_dates_to_str('20100101', '20100330') - '20100101-20100330' - >>> config.valid_dates_to_str(date(2010, 1, 1), '20100330') - '20100101-20100330' - >>> config.valid_dates_to_str(date(2010, 1, 1), '20100330', split_str="_") - '20100101_20100330' - - ``` + Example + ------- + + >>> config: RunConfig = RunConfig() + >>> config.valid_dates_to_str('20100101', '20100330') + '20100101-20100330' + >>> config.valid_dates_to_str(date(2010, 1, 1), '20100330') + '20100101-20100330' + >>> config.valid_dates_to_str(date(2010, 1, 1), '20100330', split_str="_") + '20100101_20100330' + """ start_date = start_date if start_date else self.valid_date_start end_date = end_date if end_date else self.valid_date_end @@ -149,17 +152,17 @@ def _date_range_to_str(self, split_str: str | None = None) -> str: """Return date range as `str` from `calib_date_start` to `calib_date_end`. - Example: - ```pycon - >>> config: RunConfig = RunConfig() - >>> config._date_range_to_str('20100101', '20100330') - '20100101-20100330' - >>> config._date_range_to_str(date(2010, 1, 1), '20100330') - '20100101-20100330' - >>> config._date_range_to_str(date(2010, 1, 1), '20100330', split_str="_") - '20100101_20100330' - - ``` + Example + ------- + + >>> config: RunConfig = RunConfig() + >>> config._date_range_to_str('20100101', '20100330') + '20100101-20100330' + >>> config._date_range_to_str(date(2010, 1, 1), '20100330') + '20100101-20100330' + >>> config._date_range_to_str(date(2010, 1, 1), '20100330', split_str="_") + '20100101_20100330' + """ in_format_str = in_format_str if in_format_str else self.date_format_str out_format_str = out_format_str if out_format_str else self.date_format_str @@ -174,15 +177,15 @@ def _date_range_to_str(self, def mod_path(self, city: str | None = None) -> Path: """Return city estimates path. - Example: - ```pycon - >>> config: RunConfig = RunConfig() - >>> config.mod_path() - PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/CPM/Manchester') - >>> config.mod_path('Glasgow') - PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/CPM/Glasgow') + Example + ------- + + >>> config: RunConfig = RunConfig() + >>> config.mod_path() + PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/CPM/Manchester') + >>> config.mod_path('Glasgow') + PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/CPM/Glasgow') - ``` """ city = city if city else self.city return self.data_path / self.mod_folder / city @@ -190,15 +193,15 @@ def mod_path(self, city: str | None = None) -> Path: def obs_path(self, city: str | None = None) -> Path: """Return city observations path. - Example: - ```pycon - >>> config: RunConfig = RunConfig() - >>> config.obs_path() - PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.original360/Manchester') - >>> config.obs_path('Glasgow') - PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.original360/Glasgow') + Example + ------- + + >>> config: RunConfig = RunConfig() + >>> config.obs_path() + PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.original360/Manchester') + >>> config.obs_path('Glasgow') + PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.original360/Glasgow') - ``` """ city = city if city else self.city return self.data_path / self.obs_folder / city @@ -206,15 +209,15 @@ def obs_path(self, city: str | None = None) -> Path: def out_path(self, city: str | None = None, run: str | None = None, variable: str | None = None) -> Path: """Return path to save results. - Example: - ```pycon - >>> config: RunConfig = RunConfig() - >>> config.out_path() - PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/Manchester/05/tasmax') - >>> config.out_path(city='Glasgow', run='07') - PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/Glasgow/07/tasmax') + Example + ------- + + >>> config: RunConfig = RunConfig() + >>> config.out_path() + PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/Manchester/05/tasmax') + >>> config.out_path(city='Glasgow', run='07') + PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/Glasgow/07/tasmax') - ``` """ city = city if city else self.city run = run if run else self.run @@ -232,15 +235,15 @@ def to_cli_preprocess_str(self, ) -> str: """Generate a command line interface str as a test example. - Example: - ```pycon - >>> config: RunConfig = RunConfig() - >>> config.to_cli_preprocess_str() == CORRECT_CLI_DEBIASING_DEFAULT_COMMAND - True - >>> CORRECT_CLI_DEBIASING_DEFAULT_COMMAND[:96] #doctest: +ELLIPSIS - 'python preprocess_data.py --mod /.../CPM/Manchester' + Example + ------- + + >>> config: RunConfig = RunConfig() + >>> config.to_cli_preprocess_str() == CORRECT_CLI_DEBIASING_DEFAULT_COMMAND + True + >>> CORRECT_CLI_DEBIASING_DEFAULT_COMMAND[:96] #doctest: +ELLIPSIS + 'python preprocess_data.py --mod /.../CPM/Manchester' - ``` """ city = city if city else self.city variable = variable if variable else self.variable From 3ba55c9e78e575a27f88d03a193df4163d1c28ea Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Mon, 2 Oct 2023 18:01:40 +0000 Subject: [PATCH 067/146] feat(test): refactor test_debiasing.py to utils.py and run command via subprocess --- python/.pytest.ini | 1 + python/tests/test_debiasing.py | 254 +++++++++++++++++++++++---------- python/utils.py | 96 +++++++++++++ 3 files changed, 274 insertions(+), 77 deletions(-) create mode 100644 python/utils.py diff --git a/python/.pytest.ini b/python/.pytest.ini index ed6af647..f03e25bd 100644 --- a/python/.pytest.ini +++ b/python/.pytest.ini @@ -2,5 +2,6 @@ [pytest] minversion = 6.0 addopts = -ra -q --doctest-modules --pdbcls=IPython.terminal.debugger:TerminalPdb +pythonpath = . testpaths = tests diff --git a/python/tests/test_debiasing.py b/python/tests/test_debiasing.py index b7adf9d7..939388ba 100644 --- a/python/tests/test_debiasing.py +++ b/python/tests/test_debiasing.py @@ -6,69 +6,33 @@ from pathlib import Path from os import system, PathLike from dataclasses import dataclass -from typing import Final +from typing import Final, Generator from datetime import date, datetime +import subprocess + +from utils import ( + DATE_FORMAT_SPLIT_STR, DATE_FORMAT_STR, DateType, date_to_str, + iter_to_tuple_strs, date_range_to_str, path_iterdir +) DATA_PATH: Final[Path] = Path('/mnt/vmfileshare/ClimateData/Cropped/three.cities/') -DateType = date | str -DATE_FORMAT_STR: Final[str] = '%Y%m%d' -DATE_FORMAT_SPLIT_STR: Final[str] = '-' - -CORRECT_CLI_DEBIASING_DEFAULT_COMMAND: Final[str] = ( - "python preprocess_data.py " - "--mod /mnt/vmfileshare/ClimateData/Cropped/three.cities/CPM/Manchester " - "--obs /mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.original360/Manchester " - "-v tasmax " - "-r 05 " - "--out /mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/Manchester/05/tasmax " - "--calib_dates 19810101-19811230 " - "--valid_dates 20100101-20100330" + +CLI_DEBIASING_DEFAULT_COMMAND_TUPLE_CORRECT: Final[tuple[str]] = ( + "python", "preprocess_data.py", + "--mod", Path("/mnt/vmfileshare/ClimateData/Cropped/three.cities/CPM/Manchester"), + "--obs", Path("/mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.original360/Manchester"), + "-v", "tasmax", + "-r", "05", + "--out", Path("/mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/Manchester/05/tasmax"), + "--calib_dates", "19810101-19811230", + "--valid_dates", "20100101-20100330", ) +CLI_DEBIASING_DEFAULT_COMMAND_STR_CORRECT: Final[str] = ' '.join(iter_to_tuple_strs(CLI_DEBIASING_DEFAULT_COMMAND_TUPLE_CORRECT)) - -def date_to_str(date_obj: DateType, in_format_str: str = DATE_FORMAT_STR, out_format_str: str = DATE_FORMAT_STR) -> str: - """Return a `str` in `date_format_str` of `date_obj`. - - Example - ------- - - >>> date_to_str('20100101') - '20100101' - >>> date_to_str(date(2010, 1, 1)) - '20100101' - - """ - if isinstance(date_obj, str): - date_obj = datetime.strptime(date_obj, in_format_str).date() - return date_obj.strftime(out_format_str) - - -def date_range_to_str( - start_date: DateType, - end_date: DateType, - split_str: str = DATE_FORMAT_SPLIT_STR, - in_format_str: str = DATE_FORMAT_STR, - out_format_str: str = DATE_FORMAT_STR, -) -> str: - """Take `start_date` and `end_date` `str` or `date` instances and return a range `str`. - - Example - ------- - - >>> date_range_to_str('20100101', '20100330') - '20100101-20100330' - >>> date_range_to_str(date(2010, 1, 1), '20100330') - '20100101-20100330' - - """ - start_date = date_to_str(start_date, - in_format_str=in_format_str, - out_format_str=out_format_str) - end_date = date_to_str(end_date, - in_format_str=in_format_str, - out_format_str=out_format_str) - return f'{start_date}{split_str}{end_date}' +MOD_FOLDER_FILES_COUNT_CORRECT: Final[int] = 1478 +OBS_FOLDER_FILES_COUNT_CORRECT: Final[int] = MOD_FOLDER_FILES_COUNT_CORRECT +OUT_FOLDER_FILES_COUNT_CORRECT: Final[int] = 4 @dataclass @@ -224,7 +188,21 @@ def out_path(self, city: str | None = None, run: str | None = None, variable: st variable = variable if variable else self.variable return self.data_path / self.out_folder / city / run / variable - def to_cli_preprocess_str(self, + @property + def run_prefix_tuple(self) -> tuple[str, ...]: + """Split `self.run_prefix` by ' ' to a `tuple`. + + Example + ------- + + >>> config: RunConfig = RunConfig() + >>> config.run_prefix_tuple + ('python', 'preprocess_data.py') + + """ + return tuple(self.run_prefix.split(' ')) + + def to_cli_preprocess_tuple(self, variable: str | None = None, run: str | None = None, city: str | None = None, @@ -232,17 +210,21 @@ def to_cli_preprocess_str(self, calib_end: DateType | None = None, valid_start: DateType | None = None, valid_end: DateType | None = None, - ) -> str: - """Generate a command line interface str as a test example. + ) -> tuple[str | PathLike, ...]: + """Generate a `tuple` of `str` for a command line command. + + Note + ---- + + This will leave `Path` objects uncoverted. See + `self.to_cli_preprocess_tuple_strs` for passing to a terminal. Example ------- >>> config: RunConfig = RunConfig() - >>> config.to_cli_preprocess_str() == CORRECT_CLI_DEBIASING_DEFAULT_COMMAND - True - >>> CORRECT_CLI_DEBIASING_DEFAULT_COMMAND[:96] #doctest: +ELLIPSIS - 'python preprocess_data.py --mod /.../CPM/Manchester' + >>> command_str_tuple: tuple[str, ...] = config.to_cli_preprocess_tuple() + >>> assert command_str_tuple == CLI_DEBIASING_DEFAULT_COMMAND_TUPLE_CORRECT """ city = city if city else self.city @@ -255,20 +237,138 @@ def to_cli_preprocess_str(self, calib_dates_str: str = self.calib_dates_to_str(start_date=calib_start, end_date=calib_end) valid_dates_str: str = self.valid_dates_to_str(start_date=valid_start, end_date=valid_end) - return ' '.join(( - self.run_prefix, - f'--mod {mod_path}', - f'--obs {obs_path}', - f'-v {variable}', - f'-r {run}', - f'--out {out_path}', - f'--calib_dates {calib_dates_str}', - f'--valid_dates {valid_dates_str}', - ) - ) + return ( + *self.run_prefix_tuple, + '--mod', mod_path, + '--obs', obs_path, + '-v', variable, + '-r', run, + '--out', out_path, + '--calib_dates', calib_dates_str, + '--valid_dates', valid_dates_str, + ) + + def to_cli_preprocess_tuple_strs(self, + variable: str | None = None, + run: str | None = None, + city: str | None = None, + calib_start: DateType | None = None, + calib_end: DateType | None = None, + valid_start: DateType | None = None, + valid_end: DateType | None = None, + ) -> tuple[str, ...]: + """Generate a command line interface `str` `tuple` a test example. + + Example + ------- + + >>> config: RunConfig = RunConfig() + >>> command_str_tuple: tuple[str, ...] = config.to_cli_preprocess_tuple() + >>> assert command_str_tuple == CLI_DEBIASING_DEFAULT_COMMAND_TUPLE_CORRECT + + """ + return iter_to_tuple_strs(self.to_cli_preprocess_tuple( + variable=variable, + run=run, + city=city, + calib_start=calib_start, + calib_end=calib_end, + valid_start=valid_start, + valid_end=valid_end, + )) + + + def to_cli_preprocess_str(self, + variable: str | None = None, + run: str | None = None, + city: str | None = None, + calib_start: DateType | None = None, + calib_end: DateType | None = None, + valid_start: DateType | None = None, + valid_end: DateType | None = None, + ) -> str: + """Generate a command line interface str as a test example. + + Example + ------- + + >>> config: RunConfig = RunConfig() + >>> config.to_cli_preprocess_str() == CLI_DEBIASING_DEFAULT_COMMAND_STR_CORRECT + True + >>> CLI_DEBIASING_DEFAULT_COMMAND_STR_CORRECT[:96] #doctest: +ELLIPSIS + 'python preprocess_data.py --mod /.../CPM/Manchester' + + """ + return ' '.join(self.to_cli_preprocess_tuple_strs( + variable=variable, + run=run, + city=city, + calib_start=calib_start, + calib_end=calib_end, + valid_start=valid_start, + valid_end=valid_end, + )) + + def list_mod_folder(self, city: str | None = None) -> Generator[Path, None, None]: + """`Iterable` of all `Path`s in `self.mod_folder`. + + Example + ------- + + >>> config: RunConfig = RunConfig() + >>> len(tuple(config.list_mod_folder())) == MOD_FOLDER_FILES_COUNT_CORRECT + True + """ + return path_iterdir(self.obs_path(city=city)) + + def list_obs_folder(self, city: str | None = None) -> Generator[Path, None, None]: + """`Iterable` of all `Path`s in `self.obs_folder`. + + Example + ------- + + >>> config: RunConfig = RunConfig() + >>> len(tuple(config.list_obs_folder())) == OBS_FOLDER_FILES_COUNT_CORRECT + True + """ + return path_iterdir(self.obs_path(city=city)) + + def list_out_folder(self, city: str | None = None, run: str | None = None, variable: str | None = None) -> Generator[Path, None, None]: + """`Iterable` of all `Path`s in `self.out_folder`. + + Example + ------- + + >>> config: RunConfig = RunConfig() + >>> len(tuple(config.list_out_folder())) == OUT_FOLDER_FILES_COUNT_CORRECT + True + """ + return path_iterdir(self.out_path(city=city, run=run, variable=variable)) + + +@pytest.fixture +def run_config(tmp_path: Path) -> RunConfig: + """Generate a `RunConfig` instance to ease paramaterizing tests.""" + return RunConfig(out_folder=tmp_path) def test_command_line_default() -> None: """Test default generated cli `str`.""" - config: RunConfig = RunConfig() - assert config.to_cli_preprocess_str() == CORRECT_CLI_DEBIASING_DEFAULT_COMMAND + run_config: RunConfig = RunConfig() + assert run_config.to_cli_preprocess_str() == CLI_DEBIASING_DEFAULT_COMMAND_STR_CORRECT + + +@pytest.mark.parametrize( + 'run_kwargs, out_count', ( + ({}, 0), ({'city': 'Glasgow'}, 0), + ) +) +def test_run(run_config, run_kwargs, out_count, capsys) -> None: + """Test running generated command script via a subprocess.""" + process_complete: subprocess.CompletedProcess = ( + subprocess.run(run_config.to_cli_preprocess_tuple_strs(**run_kwargs), shell=True, check=True) + ) + assert process_complete.returncode == 0 + assert len(tuple(run_config.list_mod_folder())) == MOD_FOLDER_FILES_COUNT_CORRECT + assert len(tuple(run_config.list_obs_folder())) == OBS_FOLDER_FILES_COUNT_CORRECT + assert len(tuple(run_config.list_out_folder())) == out_count diff --git a/python/utils.py b/python/utils.py new file mode 100644 index 00000000..e400220b --- /dev/null +++ b/python/utils.py @@ -0,0 +1,96 @@ +""" +Utility functions. + +""" +from typing import Final, Any, Iterable, Generator +from datetime import date, datetime +from pathlib import Path + + +DateType = date | str +DATE_FORMAT_STR: Final[str] = '%Y%m%d' +DATE_FORMAT_SPLIT_STR: Final[str] = '-' + + +def date_to_str(date_obj: DateType, in_format_str: str = DATE_FORMAT_STR, out_format_str: str = DATE_FORMAT_STR) -> str: + """Return a `str` in `date_format_str` of `date_obj`. + + Example + ------- + + >>> date_to_str('20100101') + '20100101' + >>> date_to_str(date(2010, 1, 1)) + '20100101' + + """ + if isinstance(date_obj, str): + date_obj = datetime.strptime(date_obj, in_format_str).date() + return date_obj.strftime(out_format_str) + + +def date_range_to_str( + start_date: DateType, + end_date: DateType, + split_str: str = DATE_FORMAT_SPLIT_STR, + in_format_str: str = DATE_FORMAT_STR, + out_format_str: str = DATE_FORMAT_STR, +) -> str: + """Take `start_date` and `end_date` `str` or `date` instances and return a range `str`. + + Example + ------- + + >>> date_range_to_str('20100101', '20100330') + '20100101-20100330' + >>> date_range_to_str(date(2010, 1, 1), '20100330') + '20100101-20100330' + + """ + start_date = date_to_str(start_date, + in_format_str=in_format_str, + out_format_str=out_format_str) + end_date = date_to_str(end_date, + in_format_str=in_format_str, + out_format_str=out_format_str) + return f'{start_date}{split_str}{end_date}' + + +def iter_to_tuple_strs(iter_var: Iterable[Any]) -> tuple[str, ...]: + """Return a `tuple` with all components converted to `strs`. + + Examples + -------- + + >>> iter_to_tuple_strs(['cat', 1, Path('a/path')]) + ('cat', '1', 'a/path') + + """ + return tuple(str(obj) for obj in iter_var) + + +def path_iterdir(path: Path, strict: bool = False) -> Generator[Path | None, None, None]: + """Return an `Generator` after ensuring `path` exists. + + Examples + -------- + + >>> example_path: Path = Path('a/test/path') + >>> example_path.exists() + False + >>> tuple(path_iterdir(example_path.parent)) + FileNotFoundError... + >>> example_path.touch() + >>> tuple(path_iterdir(example_path.parent)) + (PosixPath('path'),) + >>> example_path.unlink() + >>> tuple(path_iterdir(example_path.parent)) + (,) + """ + try: + yield from path.iterdir() + except FileNotFoundError as error: + if strict: + raise error + else: + return From c944e1cb3e8c064d4abe70906b28ed80e5e07f05 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Mon, 2 Oct 2023 18:24:33 +0000 Subject: [PATCH 068/146] fix(test): fix path_iterdir and add utils.py to .pytest.ini --- python/.pytest.ini | 1 + python/tests/test_debiasing.py | 3 ++- python/utils.py | 15 ++++++++++++--- 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/python/.pytest.ini b/python/.pytest.ini index f03e25bd..512c9b08 100644 --- a/python/.pytest.ini +++ b/python/.pytest.ini @@ -5,3 +5,4 @@ addopts = -ra -q --doctest-modules --pdbcls=IPython.terminal.debugger:TerminalPd pythonpath = . testpaths = tests + utils.py diff --git a/python/tests/test_debiasing.py b/python/tests/test_debiasing.py index 939388ba..365ea5ea 100644 --- a/python/tests/test_debiasing.py +++ b/python/tests/test_debiasing.py @@ -360,7 +360,8 @@ def test_command_line_default() -> None: @pytest.mark.parametrize( 'run_kwargs, out_count', ( - ({}, 0), ({'city': 'Glasgow'}, 0), + ({}, 0), + ({'city': 'Glasgow'}, 0), ) ) def test_run(run_config, run_kwargs, out_count, capsys) -> None: diff --git a/python/utils.py b/python/utils.py index e400220b..8c6123f4 100644 --- a/python/utils.py +++ b/python/utils.py @@ -75,17 +75,26 @@ def path_iterdir(path: Path, strict: bool = False) -> Generator[Path | None, Non Examples -------- + >>> tmp_path = getfixture('tmp_path') + >>> from os import chdir + >>> chdir(tmp_path) >>> example_path: Path = Path('a/test/path') >>> example_path.exists() False >>> tuple(path_iterdir(example_path.parent)) - FileNotFoundError... + () + >>> tuple(path_iterdir(example_path.parent, strict=True)) + Traceback (most recent call last): + ... + FileNotFoundError: [Errno 2] No such file or directory: 'a/test' + >>> example_path.parent.mkdir(parents=True) >>> example_path.touch() >>> tuple(path_iterdir(example_path.parent)) - (PosixPath('path'),) + (PosixPath('a/test/path'),) >>> example_path.unlink() >>> tuple(path_iterdir(example_path.parent)) - (,) + () + """ try: yield from path.iterdir() From 52c4d1259dcf4bd8a6bb929a3eb2853bbc8a79b0 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Mon, 2 Oct 2023 18:57:00 +0000 Subject: [PATCH 069/146] feat(test): refactor default vars to fit three_cities_debiased_cropped.sh --- python/tests/test_debiasing.py | 66 +++++++++++++++++++++++----------- 1 file changed, 45 insertions(+), 21 deletions(-) diff --git a/python/tests/test_debiasing.py b/python/tests/test_debiasing.py index 365ea5ea..5f5f5b53 100644 --- a/python/tests/test_debiasing.py +++ b/python/tests/test_debiasing.py @@ -16,17 +16,41 @@ ) -DATA_PATH: Final[Path] = Path('/mnt/vmfileshare/ClimateData/Cropped/three.cities/') +DATA_PATH_DEFAULT: Final[Path] = Path('/mnt/vmfileshare/ClimateData/Cropped/three.cities/') + +RUN_NAME_DEFAULT: Final[str] = '05' +VARIABLE_NAME_DEFAULT: Final[str] = "tasmax" + +CITY_NAME_DEFAULT: Final[str] = "Manchester" + +MOD_FOLDER_DEFUALT: Final[Path] = Path('CPM') +OBS_FOLDER_DEFUALT: Final[Path] = Path('Hads.updated360') +OUT_FOLDER_DEFUALT: Final[Path] = Path('Preprocessed') + +CALIB_DATE_START_DEFAULT: DateType = date(1981, 1, 1) +CALIB_DATE_END_DEFAULT: DateType = date(1981, 12, 30) + +VALID_DATE_START_DEFAULT: DateType = date(2010, 1, 1) +VALID_DATE_END_DEFAULT: DateType = date(2010, 3, 30) + +CALIB_DATES_STR_DEFAULT: Final[str] = date_range_to_str( + CALIB_DATE_START_DEFAULT, CALIB_DATE_END_DEFAULT +) +VALID_DATES_STR_DEFAULT: Final[str] = date_range_to_str( + VALID_DATE_START_DEFAULT, VALID_DATE_END_DEFAULT +) + CLI_DEBIASING_DEFAULT_COMMAND_TUPLE_CORRECT: Final[tuple[str]] = ( "python", "preprocess_data.py", - "--mod", Path("/mnt/vmfileshare/ClimateData/Cropped/three.cities/CPM/Manchester"), - "--obs", Path("/mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.original360/Manchester"), - "-v", "tasmax", - "-r", "05", - "--out", Path("/mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/Manchester/05/tasmax"), - "--calib_dates", "19810101-19811230", - "--valid_dates", "20100101-20100330", + "--mod", DATA_PATH_DEFAULT / MOD_FOLDER_DEFUALT / CITY_NAME_DEFAULT, + "--obs", DATA_PATH_DEFAULT / OBS_FOLDER_DEFUALT / CITY_NAME_DEFAULT, + "-v", VARIABLE_NAME_DEFAULT, + "-r", RUN_NAME_DEFAULT, + "--out", (DATA_PATH_DEFAULT / OUT_FOLDER_DEFUALT / CITY_NAME_DEFAULT / + RUN_NAME_DEFAULT / VARIABLE_NAME_DEFAULT), + "--calib_dates", CALIB_DATES_STR_DEFAULT, + "--valid_dates", VALID_DATES_STR_DEFAULT, ) CLI_DEBIASING_DEFAULT_COMMAND_STR_CORRECT: Final[str] = ' '.join(iter_to_tuple_strs(CLI_DEBIASING_DEFAULT_COMMAND_TUPLE_CORRECT)) @@ -37,23 +61,23 @@ @dataclass class RunConfig: - variable: str = 'tasmax' - run: str = '05' - city: str = 'Manchester' + variable: str = VARIABLE_NAME_DEFAULT + run: str = RUN_NAME_DEFAULT + city: str = CITY_NAME_DEFAULT method_1: str = "quantile_delta_mapping" method_2: str = "variance_scaling" run_prefix: str = 'python preprocess_data.py' - data_path: Path = DATA_PATH - mod_folder: PathLike = 'CPM' - obs_folder: PathLike = 'Hads.original360' - out_folder: PathLike = 'Preprocessed' + data_path: Path = DATA_PATH_DEFAULT + mod_folder: PathLike = MOD_FOLDER_DEFUALT + obs_folder: PathLike = OBS_FOLDER_DEFUALT + out_folder: PathLike = OUT_FOLDER_DEFUALT - calib_date_start: DateType = date(1981, 1, 1) - calib_date_end: DateType = date(1981, 12, 30) + calib_date_start: DateType = CALIB_DATE_START_DEFAULT + calib_date_end: DateType = CALIB_DATE_END_DEFAULT - valid_date_start: DateType = date(2010, 1, 1) - valid_date_end: DateType = date(2010, 3, 30) + valid_date_start: DateType = VALID_DATE_START_DEFAULT + valid_date_end: DateType = VALID_DATE_END_DEFAULT processes: int = 32 @@ -162,9 +186,9 @@ def obs_path(self, city: str | None = None) -> Path: >>> config: RunConfig = RunConfig() >>> config.obs_path() - PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.original360/Manchester') + PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.updated360/Manchester') >>> config.obs_path('Glasgow') - PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.original360/Glasgow') + PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.updated360/Glasgow') """ city = city if city else self.city From 61e48c82d6bac29ab7d061aa1bc54776691233c7 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Tue, 3 Oct 2023 04:09:56 +0000 Subject: [PATCH 070/146] fix(test): remove shell=True from subprocess.run in test_debiased --- python/.pytest.ini | 2 ++ python/tests/test_debiasing.py | 44 +++++++++++++++++++++++----------- 2 files changed, 32 insertions(+), 14 deletions(-) diff --git a/python/.pytest.ini b/python/.pytest.ini index 512c9b08..351f02b4 100644 --- a/python/.pytest.ini +++ b/python/.pytest.ini @@ -6,3 +6,5 @@ pythonpath = . testpaths = tests utils.py +markers = + slow: mark test as slow. diff --git a/python/tests/test_debiasing.py b/python/tests/test_debiasing.py index 5f5f5b53..86501933 100644 --- a/python/tests/test_debiasing.py +++ b/python/tests/test_debiasing.py @@ -4,7 +4,7 @@ """ import pytest from pathlib import Path -from os import system, PathLike +from os import system, PathLike, chdir from dataclasses import dataclass from typing import Final, Generator from datetime import date, datetime @@ -17,6 +17,8 @@ DATA_PATH_DEFAULT: Final[Path] = Path('/mnt/vmfileshare/ClimateData/Cropped/three.cities/') +COMMAND_DIR_DEFAULT: Final[Path] = Path('debiasing').resolve() +COMMAND_FILE_NAME: Final[Path] = Path("preprocess_data.py") RUN_NAME_DEFAULT: Final[str] = '05' VARIABLE_NAME_DEFAULT: Final[str] = "tasmax" @@ -31,7 +33,7 @@ CALIB_DATE_END_DEFAULT: DateType = date(1981, 12, 30) VALID_DATE_START_DEFAULT: DateType = date(2010, 1, 1) -VALID_DATE_END_DEFAULT: DateType = date(2010, 3, 30) +VALID_DATE_END_DEFAULT: DateType = date(2010, 12, 30) CALIB_DATES_STR_DEFAULT: Final[str] = date_range_to_str( CALIB_DATE_START_DEFAULT, CALIB_DATE_END_DEFAULT @@ -42,7 +44,7 @@ CLI_DEBIASING_DEFAULT_COMMAND_TUPLE_CORRECT: Final[tuple[str]] = ( - "python", "preprocess_data.py", + "python", str(COMMAND_FILE_NAME), "--mod", DATA_PATH_DEFAULT / MOD_FOLDER_DEFUALT / CITY_NAME_DEFAULT, "--obs", DATA_PATH_DEFAULT / OBS_FOLDER_DEFUALT / CITY_NAME_DEFAULT, "-v", VARIABLE_NAME_DEFAULT, @@ -61,6 +63,7 @@ @dataclass class RunConfig: + command_dir: Path = COMMAND_DIR_DEFAULT variable: str = VARIABLE_NAME_DEFAULT run: str = RUN_NAME_DEFAULT city: str = CITY_NAME_DEFAULT @@ -369,6 +372,11 @@ def list_out_folder(self, city: str | None = None, run: str | None = None, varia """ return path_iterdir(self.out_path(city=city, run=run, variable=variable)) + @property + def command_path(self) -> Path: + """Return command path relative to running tests.""" + return (Path() / self.command_dir).absolute() + @pytest.fixture def run_config(tmp_path: Path) -> RunConfig: @@ -382,18 +390,26 @@ def test_command_line_default() -> None: assert run_config.to_cli_preprocess_str() == CLI_DEBIASING_DEFAULT_COMMAND_STR_CORRECT +@pytest.mark.slow @pytest.mark.parametrize( - 'run_kwargs, out_count', ( - ({}, 0), - ({'city': 'Glasgow'}, 0), - ) + 'city', (None, 'Glasgow',) ) -def test_run(run_config, run_kwargs, out_count, capsys) -> None: +def test_run(run_config, city) -> None: """Test running generated command script via a subprocess.""" - process_complete: subprocess.CompletedProcess = ( - subprocess.run(run_config.to_cli_preprocess_tuple_strs(**run_kwargs), shell=True, check=True) + chdir(run_config.command_path) + assert COMMAND_FILE_NAME in tuple(Path().iterdir()) + process: subprocess.CompletedProcess = ( + subprocess.run( + run_config.to_cli_preprocess_tuple_strs(city=city), + capture_output=True, text=True + ) ) - assert process_complete.returncode == 0 - assert len(tuple(run_config.list_mod_folder())) == MOD_FOLDER_FILES_COUNT_CORRECT - assert len(tuple(run_config.list_obs_folder())) == OBS_FOLDER_FILES_COUNT_CORRECT - assert len(tuple(run_config.list_out_folder())) == out_count + assert process.returncode == 0 + assert len(tuple(run_config.list_mod_folder(city=city))) == MOD_FOLDER_FILES_COUNT_CORRECT + assert len(tuple(run_config.list_obs_folder(city=city))) == OBS_FOLDER_FILES_COUNT_CORRECT + assert len(tuple(run_config.list_out_folder(city=city))) == OUT_FOLDER_FILES_COUNT_CORRECT + city = CITY_NAME_DEFAULT if city is None else city + for log_txt in ( + "Saved observed (HADs) data for validation, period ('2010-01-01', '2010-12-30')", + f"{city}/05/tasmax/modv_var-tasmax_run-05_20100101_20101230.nc"): + assert log_txt in process.stdout From c6ecaa68219b088b62aa976c18fe8859abe50b6e Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Tue, 3 Oct 2023 13:18:55 +0000 Subject: [PATCH 071/146] feat(test): refactor to add cmethods test cases --- python/debiasing/run_cmethods.py | 0 python/tests/test_debiasing.py | 334 ++++++++++++++++++++++++------- 2 files changed, 262 insertions(+), 72 deletions(-) mode change 100644 => 100755 python/debiasing/run_cmethods.py diff --git a/python/debiasing/run_cmethods.py b/python/debiasing/run_cmethods.py old mode 100644 new mode 100755 diff --git a/python/tests/test_debiasing.py b/python/tests/test_debiasing.py index 86501933..e626c6a2 100644 --- a/python/tests/test_debiasing.py +++ b/python/tests/test_debiasing.py @@ -7,6 +7,7 @@ from os import system, PathLike, chdir from dataclasses import dataclass from typing import Final, Generator +from enum import StrEnum, auto from datetime import date, datetime import subprocess @@ -17,17 +18,73 @@ DATA_PATH_DEFAULT: Final[Path] = Path('/mnt/vmfileshare/ClimateData/Cropped/three.cities/') + COMMAND_DIR_DEFAULT: Final[Path] = Path('debiasing').resolve() -COMMAND_FILE_NAME: Final[Path] = Path("preprocess_data.py") +PREPROCESS_FILE_NAME: Final[Path] = Path("preprocess_data.py") +CMETHODS_FILE_NAME: Final[Path] = Path("run_cmethods.py") + + +class VariableOptions(StrEnum): + """Supported options for variables""" + TASMAX = auto() + RAINFALL = auto() + TASMIN = auto() + + @classmethod + def default(cls) -> str: + """Default option.""" + return cls.TASMAX.value + + +class RunOptions(StrEnum): + """Supported options for variables""" + FIVE = '05' + SEVEN = '07' + EIGHT = '08' + SIX = '06' + + @classmethod + def default(cls) -> str: + """Default option.""" + return cls.FIVE.value + + +class CityOptions(StrEnum): + """Supported options for variables.""" + GLASGOW = "Glasgow" + MANCHESTER = "Manchester" + LONDON = "London" + + @classmethod + def default(cls) -> str: + """Default option.""" + return cls.MANCHESTER.value + -RUN_NAME_DEFAULT: Final[str] = '05' -VARIABLE_NAME_DEFAULT: Final[str] = "tasmax" +class MethodOptions(StrEnum): + """Supported options for methods.""" + QUANTILE_DELTA_MAPPING = auto() + QUANTILE_MAPPING = auto() + VARIANCE_SCALING = auto() + DELTA_METHODS = auto() -CITY_NAME_DEFAULT: Final[str] = "Manchester" + @classmethod + def default_method_1(cls) -> str: + """Default method_1 option.""" + return cls.QUANTILE_DELTA_MAPPING.value -MOD_FOLDER_DEFUALT: Final[Path] = Path('CPM') -OBS_FOLDER_DEFUALT: Final[Path] = Path('Hads.updated360') -OUT_FOLDER_DEFUALT: Final[Path] = Path('Preprocessed') + @classmethod + def default_method_2(cls) -> str: + """Default method_2 option.""" + return cls.VARIANCE_SCALING.value + +PROCESSESORS_DEFAULT: Final[int] = 32 +RUN_PREFIX_DEFAULT: Final[str] = "python" + +MOD_FOLDER_DEFAULT: Final[Path] = Path('CPM') +OBS_FOLDER_DEFAULT: Final[Path] = Path('Hads.updated360') +PREPROCESS_OUT_FOLDER_DEFAULT: Final[Path] = Path('Preprocessed') +CMETHODS_OUT_FOLDER_DEFAULT: Final[Path] = Path('../../Debiased/three.cities.cropped') CALIB_DATE_START_DEFAULT: DateType = date(1981, 1, 1) CALIB_DATE_END_DEFAULT: DateType = date(1981, 12, 30) @@ -43,38 +100,60 @@ ) -CLI_DEBIASING_DEFAULT_COMMAND_TUPLE_CORRECT: Final[tuple[str]] = ( - "python", str(COMMAND_FILE_NAME), - "--mod", DATA_PATH_DEFAULT / MOD_FOLDER_DEFUALT / CITY_NAME_DEFAULT, - "--obs", DATA_PATH_DEFAULT / OBS_FOLDER_DEFUALT / CITY_NAME_DEFAULT, - "-v", VARIABLE_NAME_DEFAULT, - "-r", RUN_NAME_DEFAULT, - "--out", (DATA_PATH_DEFAULT / OUT_FOLDER_DEFUALT / CITY_NAME_DEFAULT / - RUN_NAME_DEFAULT / VARIABLE_NAME_DEFAULT), +CLI_PREPROCESS_DEFAULT_COMMAND_TUPLE_CORRECT: Final[tuple[str]] = ( + "python", PREPROCESS_FILE_NAME, + "--mod", DATA_PATH_DEFAULT / MOD_FOLDER_DEFAULT / CityOptions.default(), + "--obs", DATA_PATH_DEFAULT / OBS_FOLDER_DEFAULT / CityOptions.default(), + "-v", VariableOptions.default(), + "-r", RunOptions.default(), + "--out", (DATA_PATH_DEFAULT / PREPROCESS_OUT_FOLDER_DEFAULT / CityOptions.default() / + RunOptions.default() / VariableOptions.default()), "--calib_dates", CALIB_DATES_STR_DEFAULT, "--valid_dates", VALID_DATES_STR_DEFAULT, ) -CLI_DEBIASING_DEFAULT_COMMAND_STR_CORRECT: Final[str] = ' '.join(iter_to_tuple_strs(CLI_DEBIASING_DEFAULT_COMMAND_TUPLE_CORRECT)) +CLI_PREPROCESS_DEFAULT_COMMAND_STR_CORRECT: Final[str] = ' '.join(iter_to_tuple_strs( + CLI_PREPROCESS_DEFAULT_COMMAND_TUPLE_CORRECT +)) + +CLI_CMETHODS_DEFAULT_COMMAND_TUPLE_CORRECT: Final[tuple[str]] = ( + "python", CMETHODS_FILE_NAME, + "--input_data_folder", CLI_PREPROCESS_DEFAULT_COMMAND_TUPLE_CORRECT[11], + "--out", (DATA_PATH_DEFAULT / CMETHODS_OUT_FOLDER_DEFAULT / + CityOptions.default() / RunOptions.default()).resolve(), + "--method", MethodOptions.default_method_1(), + "-v", VariableOptions.default(), + "-p", PROCESSESORS_DEFAULT, +) +CLI_CMETHODS_DEFAULT_COMMAND_STR_CORRECT: Final[str] = ' '.join(iter_to_tuple_strs( + CLI_CMETHODS_DEFAULT_COMMAND_TUPLE_CORRECT +)) + MOD_FOLDER_FILES_COUNT_CORRECT: Final[int] = 1478 OBS_FOLDER_FILES_COUNT_CORRECT: Final[int] = MOD_FOLDER_FILES_COUNT_CORRECT -OUT_FOLDER_FILES_COUNT_CORRECT: Final[int] = 4 +PREPROCESS_OUT_FOLDER_FILES_COUNT_CORRECT: Final[int] = 4 @dataclass class RunConfig: + + """Manage creating command line scripts to run `debiasing` `cli`.""" + command_dir: Path = COMMAND_DIR_DEFAULT - variable: str = VARIABLE_NAME_DEFAULT - run: str = RUN_NAME_DEFAULT - city: str = CITY_NAME_DEFAULT - method_1: str = "quantile_delta_mapping" - method_2: str = "variance_scaling" - run_prefix: str = 'python preprocess_data.py' + variable: str = VariableOptions.default() + run: str = RunOptions.default() + city: str = CityOptions.default() + method_1: str = MethodOptions.default_method_1() + method_2: str = MethodOptions.default_method_2() + run_prefix: str = RUN_PREFIX_DEFAULT + preprocess_data_file: PathLike = PREPROCESS_FILE_NAME + run_cmethods_file: PathLike = CMETHODS_FILE_NAME data_path: Path = DATA_PATH_DEFAULT - mod_folder: PathLike = MOD_FOLDER_DEFUALT - obs_folder: PathLike = OBS_FOLDER_DEFUALT - out_folder: PathLike = OUT_FOLDER_DEFUALT + mod_folder: PathLike = MOD_FOLDER_DEFAULT + obs_folder: PathLike = OBS_FOLDER_DEFAULT + preprocess_out_folder: PathLike = PREPROCESS_OUT_FOLDER_DEFAULT + cmethods_out_folder: PathLike = CMETHODS_OUT_FOLDER_DEFAULT calib_date_start: DateType = CALIB_DATE_START_DEFAULT calib_date_end: DateType = CALIB_DATE_END_DEFAULT @@ -82,7 +161,7 @@ class RunConfig: valid_date_start: DateType = VALID_DATE_START_DEFAULT valid_date_end: DateType = VALID_DATE_END_DEFAULT - processes: int = 32 + processors: int = PROCESSESORS_DEFAULT date_format_str: str = DATE_FORMAT_STR date_split_str: str = DATE_FORMAT_SPLIT_STR @@ -97,7 +176,6 @@ def calib_dates_to_str(self, Example ------- - >>> config: RunConfig = RunConfig() >>> config.calib_dates_to_str('20100101', '20100330') '20100101-20100330' @@ -105,7 +183,6 @@ def calib_dates_to_str(self, '20100101-20100330' >>> config.calib_dates_to_str(date(2010, 1, 1), '20100330', split_str="_") '20100101_20100330' - """ start_date = start_date if start_date else self.calib_date_start end_date = end_date if end_date else self.calib_date_end @@ -121,7 +198,6 @@ def valid_dates_to_str(self, Example ------- - >>> config: RunConfig = RunConfig() >>> config.valid_dates_to_str('20100101', '20100330') '20100101-20100330' @@ -129,7 +205,6 @@ def valid_dates_to_str(self, '20100101-20100330' >>> config.valid_dates_to_str(date(2010, 1, 1), '20100330', split_str="_") '20100101_20100330' - """ start_date = start_date if start_date else self.valid_date_start end_date = end_date if end_date else self.valid_date_end @@ -145,7 +220,6 @@ def _date_range_to_str(self, Example ------- - >>> config: RunConfig = RunConfig() >>> config._date_range_to_str('20100101', '20100330') '20100101-20100330' @@ -153,7 +227,6 @@ def _date_range_to_str(self, '20100101-20100330' >>> config._date_range_to_str(date(2010, 1, 1), '20100330', split_str="_") '20100101_20100330' - """ in_format_str = in_format_str if in_format_str else self.date_format_str out_format_str = out_format_str if out_format_str else self.date_format_str @@ -170,13 +243,11 @@ def mod_path(self, city: str | None = None) -> Path: Example ------- - >>> config: RunConfig = RunConfig() >>> config.mod_path() PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/CPM/Manchester') >>> config.mod_path('Glasgow') PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/CPM/Glasgow') - """ city = city if city else self.city return self.data_path / self.mod_folder / city @@ -186,34 +257,54 @@ def obs_path(self, city: str | None = None) -> Path: Example ------- - >>> config: RunConfig = RunConfig() >>> config.obs_path() PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.updated360/Manchester') >>> config.obs_path('Glasgow') PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.updated360/Glasgow') - """ city = city if city else self.city return self.data_path / self.obs_folder / city - def out_path(self, city: str | None = None, run: str | None = None, variable: str | None = None) -> Path: + def preprocess_out_path( + self, + city: str | None = None, + run: str | None = None, + variable: str | None = None + ) -> Path: """Return path to save results. Example ------- - >>> config: RunConfig = RunConfig() - >>> config.out_path() + >>> config.preprocess_out_path() PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/Manchester/05/tasmax') - >>> config.out_path(city='Glasgow', run='07') + >>> config.preprocess_out_path(city='Glasgow', run='07') PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/Glasgow/07/tasmax') - """ city = city if city else self.city run = run if run else self.run variable = variable if variable else self.variable - return self.data_path / self.out_folder / city / run / variable + return (self.data_path / self.preprocess_out_folder / city / run / variable).resolve() + + def cmethods_out_path( + self, + city: str | None = None, + run: str | None = None, + ) -> Path: + """Return path to save cmethods results. + + Example + ------- + >>> config: RunConfig = RunConfig() + >>> config.cmethods_out_path() + PosixPath('/mnt/vmfileshare/ClimateData/Debiased/three.cities.cropped/Manchester/05') + >>> config.cmethods_out_path(city='Glasgow', run='07') + PosixPath('/mnt/vmfileshare/ClimateData/Debiased/three.cities.cropped/Glasgow/07') + """ + city = city if city else self.city + run = run if run else self.run + return (self.data_path / self.cmethods_out_folder / city / run).resolve() @property def run_prefix_tuple(self) -> tuple[str, ...]: @@ -221,11 +312,9 @@ def run_prefix_tuple(self) -> tuple[str, ...]: Example ------- - - >>> config: RunConfig = RunConfig() + >>> config: RunConfig = RunConfig(run_prefix='python -m') >>> config.run_prefix_tuple - ('python', 'preprocess_data.py') - + ('python', '-m') """ return tuple(self.run_prefix.split(' ')) @@ -242,17 +331,14 @@ def to_cli_preprocess_tuple(self, Note ---- - This will leave `Path` objects uncoverted. See `self.to_cli_preprocess_tuple_strs` for passing to a terminal. Example ------- - >>> config: RunConfig = RunConfig() >>> command_str_tuple: tuple[str, ...] = config.to_cli_preprocess_tuple() - >>> assert command_str_tuple == CLI_DEBIASING_DEFAULT_COMMAND_TUPLE_CORRECT - + >>> assert command_str_tuple == CLI_PREPROCESS_DEFAULT_COMMAND_TUPLE_CORRECT """ city = city if city else self.city variable = variable if variable else self.variable @@ -260,17 +346,18 @@ def to_cli_preprocess_tuple(self, mod_path: Path = self.mod_path(city=city) obs_path: Path = self.obs_path(city=city) - out_path: Path = self.out_path(city=city, run=run, variable=variable) + preprocess_out_path: Path = self.preprocess_out_path(city=city, run=run, variable=variable) calib_dates_str: str = self.calib_dates_to_str(start_date=calib_start, end_date=calib_end) valid_dates_str: str = self.valid_dates_to_str(start_date=valid_start, end_date=valid_end) return ( *self.run_prefix_tuple, + self.preprocess_data_file, '--mod', mod_path, '--obs', obs_path, '-v', variable, '-r', run, - '--out', out_path, + '--out', preprocess_out_path, '--calib_dates', calib_dates_str, '--valid_dates', valid_dates_str, ) @@ -288,11 +375,9 @@ def to_cli_preprocess_tuple_strs(self, Example ------- - >>> config: RunConfig = RunConfig() >>> command_str_tuple: tuple[str, ...] = config.to_cli_preprocess_tuple() - >>> assert command_str_tuple == CLI_DEBIASING_DEFAULT_COMMAND_TUPLE_CORRECT - + >>> assert command_str_tuple == CLI_PREPROCESS_DEFAULT_COMMAND_TUPLE_CORRECT """ return iter_to_tuple_strs(self.to_cli_preprocess_tuple( variable=variable, @@ -318,13 +403,11 @@ def to_cli_preprocess_str(self, Example ------- - >>> config: RunConfig = RunConfig() - >>> config.to_cli_preprocess_str() == CLI_DEBIASING_DEFAULT_COMMAND_STR_CORRECT + >>> config.to_cli_preprocess_str() == CLI_PREPROCESS_DEFAULT_COMMAND_STR_CORRECT True - >>> CLI_DEBIASING_DEFAULT_COMMAND_STR_CORRECT[:96] #doctest: +ELLIPSIS + >>> CLI_PREPROCESS_DEFAULT_COMMAND_STR_CORRECT[:96] #doctest: +ELLIPSIS 'python preprocess_data.py --mod /.../CPM/Manchester' - """ return ' '.join(self.to_cli_preprocess_tuple_strs( variable=variable, @@ -341,7 +424,6 @@ def list_mod_folder(self, city: str | None = None) -> Generator[Path, None, None Example ------- - >>> config: RunConfig = RunConfig() >>> len(tuple(config.list_mod_folder())) == MOD_FOLDER_FILES_COUNT_CORRECT True @@ -353,41 +435,149 @@ def list_obs_folder(self, city: str | None = None) -> Generator[Path, None, None Example ------- - >>> config: RunConfig = RunConfig() >>> len(tuple(config.list_obs_folder())) == OBS_FOLDER_FILES_COUNT_CORRECT True """ return path_iterdir(self.obs_path(city=city)) - def list_out_folder(self, city: str | None = None, run: str | None = None, variable: str | None = None) -> Generator[Path, None, None]: - """`Iterable` of all `Path`s in `self.out_folder`. + def list_preprocess_out_folder(self, + city: str | None = None, + run: str | None = None, + variable: str | None = None + ) -> Generator[Path, None, None]: + """`Iterable` of all `Path`s in `self.preprocess_out_folder`. Example ------- - >>> config: RunConfig = RunConfig() - >>> len(tuple(config.list_out_folder())) == OUT_FOLDER_FILES_COUNT_CORRECT + >>> len(tuple(config.list_preprocess_out_folder())) == PREPROCESS_OUT_FOLDER_FILES_COUNT_CORRECT True """ - return path_iterdir(self.out_path(city=city, run=run, variable=variable)) + return path_iterdir(self.preprocess_out_path(city=city, run=run, variable=variable)) @property def command_path(self) -> Path: """Return command path relative to running tests.""" return (Path() / self.command_dir).absolute() + def to_cli_run_cmethods_1_tuple(self, + city: str | None = None, + run: str | None = None, + variable: str | None = None, + method_1: str | None = None, + input_data_path: PathLike | None = None, + cmethods_out_path: PathLike | None = None, + processors: int | None = None, + ) -> tuple[str | PathLike, ...]: + """Generate a `tuple` of `str` for a command line command. + + Note + ---- + This will leave `Path` objects uncoverted. See + `self.to_cli_run_cmethods_tuple_strs` for passing to a terminal. + + Example + ------- + >>> config: RunConfig = RunConfig() + >>> command_str_tuple: tuple[str, ...] = config.to_cli_run_cmethods_1_tuple() + >>> assert command_str_tuple == CLI_CMETHODS_DEFAULT_COMMAND_TUPLE_CORRECT + """ + city = city if city else self.city + variable = variable if variable else self.variable + run = run if run else self.run + method_1 = method_1 if method_1 else self.method_1 + + input_data_path: PathLike = ( + input_data_path if input_data_path + else self.preprocess_out_path(city=city, run=run, variable=variable) + ) + + cmethods_out_path = ( + cmethods_out_path if cmethods_out_path else + self.cmethods_out_path(city=city, run=run) + ) + + processors = processors if processors else self.processors + + return ( + *self.run_prefix_tuple, + self.run_cmethods_file, + '--input_data_folder', input_data_path, + '--out', cmethods_out_path, + '--method', method_1, + '-v', variable, + '-p', processors, + ) + + # def to_cli_run_cmethods_tuple_strs(self, + # variable: str | None = None, + # run: str | None = None, + # city: str | None = None, + # calib_start: DateType | None = None, + # calib_end: DateType | None = None, + # valid_start: DateType | None = None, + # valid_end: DateType | None = None, + # ) -> tuple[str, ...]: + # """Generate a command line interface `str` `tuple` a test example. + + # Example + # ------- + # >>> config: RunConfig = RunConfig() + # >>> command_str_tuple: tuple[str, ...] = config.to_cli_preprocess_tuple() + # >>> assert command_str_tuple == CLI_PREPROCESS_DEFAULT_COMMAND_TUPLE_CORRECT + # """ + # return iter_to_tuple_strs(self.to_cli_preprocess_tuple( + # variable=variable, + # run=run, + # city=city, + # calib_start=calib_start, + # calib_end=calib_end, + # valid_start=valid_start, + # valid_end=valid_end, + # )) + + + # def to_cli_run_cmethods_str(self, + # variable: str | None = None, + # run: str | None = None, + # city: str | None = None, + # calib_start: DateType | None = None, + # calib_end: DateType | None = None, + # valid_start: DateType | None = None, + # valid_end: DateType | None = None, + # ) -> str: + # """Generate a command line interface str as a test example. + + # Example + # ------- + # >>> config: RunConfig = RunConfig() + # >>> config.to_cli_preprocess_str() == CLI_PREPROCESS_DEFAULT_COMMAND_STR_CORRECT + # True + # >>> CLI_PREPROCESS_DEFAULT_COMMAND_STR_CORRECT[:96] #doctest: +ELLIPSIS + # 'python preprocess_data.py --mod /.../CPM/Manchester' + # """ + # return ' '.join(self.to_cli_preprocess_tuple_strs( + # variable=variable, + # run=run, + # city=city, + # calib_start=calib_start, + # calib_end=calib_end, + # valid_start=valid_start, + # valid_end=valid_end, + # )) + @pytest.fixture def run_config(tmp_path: Path) -> RunConfig: """Generate a `RunConfig` instance to ease paramaterizing tests.""" - return RunConfig(out_folder=tmp_path) + return RunConfig(preprocess_out_folder=tmp_path) def test_command_line_default() -> None: """Test default generated cli `str`.""" run_config: RunConfig = RunConfig() - assert run_config.to_cli_preprocess_str() == CLI_DEBIASING_DEFAULT_COMMAND_STR_CORRECT + assert run_config.to_cli_preprocess_str() == CLI_PREPROCESS_DEFAULT_COMMAND_STR_CORRECT @pytest.mark.slow @@ -397,7 +587,7 @@ def test_command_line_default() -> None: def test_run(run_config, city) -> None: """Test running generated command script via a subprocess.""" chdir(run_config.command_path) - assert COMMAND_FILE_NAME in tuple(Path().iterdir()) + assert PREPROCESS_FILE_NAME in tuple(Path().iterdir()) process: subprocess.CompletedProcess = ( subprocess.run( run_config.to_cli_preprocess_tuple_strs(city=city), @@ -407,8 +597,8 @@ def test_run(run_config, city) -> None: assert process.returncode == 0 assert len(tuple(run_config.list_mod_folder(city=city))) == MOD_FOLDER_FILES_COUNT_CORRECT assert len(tuple(run_config.list_obs_folder(city=city))) == OBS_FOLDER_FILES_COUNT_CORRECT - assert len(tuple(run_config.list_out_folder(city=city))) == OUT_FOLDER_FILES_COUNT_CORRECT - city = CITY_NAME_DEFAULT if city is None else city + assert len(tuple(run_config.list_preprocess_out_folder(city=city))) == PREPROCESS_OUT_FOLDER_FILES_COUNT_CORRECT + city = CityOptions.default() if city is None else city for log_txt in ( "Saved observed (HADs) data for validation, period ('2010-01-01', '2010-12-30')", f"{city}/05/tasmax/modv_var-tasmax_run-05_20100101_20101230.nc"): From 9c62263af1ec5c4bba490194b21ab40bb106e0eb Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Tue, 3 Oct 2023 13:41:26 +0000 Subject: [PATCH 072/146] feat(test): add remaining methods for run_cmethods_1 testing infrastructure --- python/tests/test_debiasing.py | 142 ++++++++++++++++++--------------- 1 file changed, 76 insertions(+), 66 deletions(-) diff --git a/python/tests/test_debiasing.py b/python/tests/test_debiasing.py index e626c6a2..c5170cdd 100644 --- a/python/tests/test_debiasing.py +++ b/python/tests/test_debiasing.py @@ -100,7 +100,7 @@ def default_method_2(cls) -> str: ) -CLI_PREPROCESS_DEFAULT_COMMAND_TUPLE_CORRECT: Final[tuple[str]] = ( +CLI_PREPROCESS_DEFAULT_COMMAND_TUPLE_CORRECT: Final[tuple[str, ...]] = ( "python", PREPROCESS_FILE_NAME, "--mod", DATA_PATH_DEFAULT / MOD_FOLDER_DEFAULT / CityOptions.default(), "--obs", DATA_PATH_DEFAULT / OBS_FOLDER_DEFAULT / CityOptions.default(), @@ -111,9 +111,14 @@ def default_method_2(cls) -> str: "--calib_dates", CALIB_DATES_STR_DEFAULT, "--valid_dates", VALID_DATES_STR_DEFAULT, ) -CLI_PREPROCESS_DEFAULT_COMMAND_STR_CORRECT: Final[str] = ' '.join(iter_to_tuple_strs( - CLI_PREPROCESS_DEFAULT_COMMAND_TUPLE_CORRECT -)) + +CLI_PREPROCESS_DEFAULT_COMMAND_TUPLE_STR_CORRECT: Final[tuple[str, ...]] = ( + iter_to_tuple_strs(CLI_PREPROCESS_DEFAULT_COMMAND_TUPLE_CORRECT) +) + +CLI_PREPROCESS_DEFAULT_COMMAND_STR_CORRECT: Final[str] = ' '.join( + CLI_PREPROCESS_DEFAULT_COMMAND_TUPLE_STR_CORRECT +) CLI_CMETHODS_DEFAULT_COMMAND_TUPLE_CORRECT: Final[tuple[str]] = ( "python", CMETHODS_FILE_NAME, @@ -124,9 +129,13 @@ def default_method_2(cls) -> str: "-v", VariableOptions.default(), "-p", PROCESSESORS_DEFAULT, ) -CLI_CMETHODS_DEFAULT_COMMAND_STR_CORRECT: Final[str] = ' '.join(iter_to_tuple_strs( - CLI_CMETHODS_DEFAULT_COMMAND_TUPLE_CORRECT -)) + +CLI_CMEHTODS_DEFAULT_COMMAND_TUPLE_STR_CORRECT: Final[tuple[str, ...]] = ( + iter_to_tuple_strs(CLI_CMETHODS_DEFAULT_COMMAND_TUPLE_CORRECT) +) +CLI_CMETHODS_DEFAULT_COMMAND_STR_CORRECT: Final[str] = ' '.join( + CLI_CMEHTODS_DEFAULT_COMMAND_TUPLE_STR_CORRECT +) MOD_FOLDER_FILES_COUNT_CORRECT: Final[int] = 1478 @@ -376,8 +385,8 @@ def to_cli_preprocess_tuple_strs(self, Example ------- >>> config: RunConfig = RunConfig() - >>> command_str_tuple: tuple[str, ...] = config.to_cli_preprocess_tuple() - >>> assert command_str_tuple == CLI_PREPROCESS_DEFAULT_COMMAND_TUPLE_CORRECT + >>> command_str_tuple: tuple[str, ...] = config.to_cli_preprocess_tuple_strs() + >>> assert command_str_tuple == CLI_PREPROCESS_DEFAULT_COMMAND_TUPLE_STR_CORRECT """ return iter_to_tuple_strs(self.to_cli_preprocess_tuple( variable=variable, @@ -451,7 +460,8 @@ def list_preprocess_out_folder(self, Example ------- >>> config: RunConfig = RunConfig() - >>> len(tuple(config.list_preprocess_out_folder())) == PREPROCESS_OUT_FOLDER_FILES_COUNT_CORRECT + >>> (len(tuple(config.list_preprocess_out_folder())) == + ... PREPROCESS_OUT_FOLDER_FILES_COUNT_CORRECT) True """ return path_iterdir(self.preprocess_out_path(city=city, run=run, variable=variable)) @@ -510,62 +520,62 @@ def to_cli_run_cmethods_1_tuple(self, '-p', processors, ) - # def to_cli_run_cmethods_tuple_strs(self, - # variable: str | None = None, - # run: str | None = None, - # city: str | None = None, - # calib_start: DateType | None = None, - # calib_end: DateType | None = None, - # valid_start: DateType | None = None, - # valid_end: DateType | None = None, - # ) -> tuple[str, ...]: - # """Generate a command line interface `str` `tuple` a test example. - - # Example - # ------- - # >>> config: RunConfig = RunConfig() - # >>> command_str_tuple: tuple[str, ...] = config.to_cli_preprocess_tuple() - # >>> assert command_str_tuple == CLI_PREPROCESS_DEFAULT_COMMAND_TUPLE_CORRECT - # """ - # return iter_to_tuple_strs(self.to_cli_preprocess_tuple( - # variable=variable, - # run=run, - # city=city, - # calib_start=calib_start, - # calib_end=calib_end, - # valid_start=valid_start, - # valid_end=valid_end, - # )) - - - # def to_cli_run_cmethods_str(self, - # variable: str | None = None, - # run: str | None = None, - # city: str | None = None, - # calib_start: DateType | None = None, - # calib_end: DateType | None = None, - # valid_start: DateType | None = None, - # valid_end: DateType | None = None, - # ) -> str: - # """Generate a command line interface str as a test example. - - # Example - # ------- - # >>> config: RunConfig = RunConfig() - # >>> config.to_cli_preprocess_str() == CLI_PREPROCESS_DEFAULT_COMMAND_STR_CORRECT - # True - # >>> CLI_PREPROCESS_DEFAULT_COMMAND_STR_CORRECT[:96] #doctest: +ELLIPSIS - # 'python preprocess_data.py --mod /.../CPM/Manchester' - # """ - # return ' '.join(self.to_cli_preprocess_tuple_strs( - # variable=variable, - # run=run, - # city=city, - # calib_start=calib_start, - # calib_end=calib_end, - # valid_start=valid_start, - # valid_end=valid_end, - # )) + def to_cli_run_cmethods_1_tuple_strs(self, + city: str | None = None, + run: str | None = None, + variable: str | None = None, + method_1: str | None = None, + input_data_path: PathLike | None = None, + cmethods_out_path: PathLike | None = None, + processors: int | None = None, + ) -> tuple[str, ...]: + """Generate a command line interface `str` `tuple` a test example. + + Example + ------- + >>> config: RunConfig = RunConfig() + >>> command_str_tuple: tuple[str, ...] = config.to_cli_run_cmethods_1_tuple_strs() + >>> assert command_str_tuple == CLI_CMEHTODS_DEFAULT_COMMAND_TUPLE_STR_CORRECT + """ + return iter_to_tuple_strs(self.to_cli_run_cmethods_1_tuple( + city=city, + run=run, + variable=variable, + method_1=method_1, + input_data_path=input_data_path, + cmethods_out_path=cmethods_out_path, + processors=processors, + )) + + + def to_cli_run_cmethods_1_str(self, + city: str | None = None, + run: str | None = None, + variable: str | None = None, + method_1: str | None = None, + input_data_path: PathLike | None = None, + cmethods_out_path: PathLike | None = None, + processors: int | None = None, + ) -> str: + """Generate a command line interface str as a test example. + + Example + ------- + >>> config: RunConfig = RunConfig() + >>> config.to_cli_run_cmethods_1_str() == CLI_CMETHODS_DEFAULT_COMMAND_STR_CORRECT + True + >>> CLI_CMETHODS_DEFAULT_COMMAND_STR_CORRECT #doctest: +ELLIPSIS + 'python run_cmethods.py...--method quantile_delta_mapping...' + """ + return ' '.join(self.to_cli_run_cmethods_1_tuple_strs( + city=city, + run=run, + variable=variable, + method_1=method_1, + input_data_path=input_data_path, + cmethods_out_path=cmethods_out_path, + processors=processors, + )) @pytest.fixture From 891274a027e5c5165a0e8e7bdf8d13a3a8ff5bb0 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Tue, 3 Oct 2023 14:00:08 +0000 Subject: [PATCH 073/146] feat(test): add cmethod_run test cases for Manchester and Glasgow --- python/tests/test_debiasing.py | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/python/tests/test_debiasing.py b/python/tests/test_debiasing.py index c5170cdd..432e88d6 100644 --- a/python/tests/test_debiasing.py +++ b/python/tests/test_debiasing.py @@ -598,18 +598,36 @@ def test_run(run_config, city) -> None: """Test running generated command script via a subprocess.""" chdir(run_config.command_path) assert PREPROCESS_FILE_NAME in tuple(Path().iterdir()) - process: subprocess.CompletedProcess = ( + preprocess_run: subprocess.CompletedProcess = ( subprocess.run( run_config.to_cli_preprocess_tuple_strs(city=city), capture_output=True, text=True ) ) - assert process.returncode == 0 + assert preprocess_run.returncode == 0 assert len(tuple(run_config.list_mod_folder(city=city))) == MOD_FOLDER_FILES_COUNT_CORRECT assert len(tuple(run_config.list_obs_folder(city=city))) == OBS_FOLDER_FILES_COUNT_CORRECT - assert len(tuple(run_config.list_preprocess_out_folder(city=city))) == PREPROCESS_OUT_FOLDER_FILES_COUNT_CORRECT - city = CityOptions.default() if city is None else city + assert (len(tuple(run_config.list_preprocess_out_folder(city=city))) == + PREPROCESS_OUT_FOLDER_FILES_COUNT_CORRECT) + test_city = CityOptions.default() if city is None else city for log_txt in ( "Saved observed (HADs) data for validation, period ('2010-01-01', '2010-12-30')", - f"{city}/05/tasmax/modv_var-tasmax_run-05_20100101_20101230.nc"): - assert log_txt in process.stdout + f"{test_city}/05/tasmax/modv_var-tasmax_run-05_20100101_20101230.nc"): + assert log_txt in preprocess_run.stdout + cmethods_run: subprocess.CompletedProcess = ( + subprocess.run( + run_config.to_cli_run_cmethods_1_tuple_strs(city=city), + capture_output=True, text=True + ) + ) + assert cmethods_run.returncode == 0 + for log_txt in ( + "Loading modelled calibration data (CPM)", + + ( + f"Debiased/three.cities.cropped/{test_city}/05/tasmax/" + "debiased_quantile_delta_mapping_result_var" + "-tasmax_quantiles-1000_kind-+_group-None_20100101_20101229.nc" + ), + ): + assert log_txt in cmethods_run.stdout From a538fbac4c7c47fb18b14811034f65abf680c558 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Tue, 3 Oct 2023 16:59:26 +0000 Subject: [PATCH 074/146] fix(test): add pytest with sugar extention to environment.yml --- environment.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/environment.yml b/environment.yml index 2080f352..d2c6b3cb 100644 --- a/environment.yml +++ b/environment.yml @@ -47,6 +47,7 @@ dependencies: - pyproj==3.4.0 - python-dateutil==2.8.2 - pytz==2022.5 + - pytest-sugar=0.9.7 - rasterio==1.3.3 - rioxarray==0.12.3 - six==1.16.0 From a236e96e067db743077aa0cf7baf5beb91d94f1d Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Wed, 11 Oct 2023 13:11:41 +0100 Subject: [PATCH 075/146] simplify readme sections --- README.md | 36 ++++++++++++------------------------ 1 file changed, 12 insertions(+), 24 deletions(-) diff --git a/README.md b/README.md index a2fad7d9..1684bd51 100644 --- a/README.md +++ b/README.md @@ -1,26 +1,20 @@ # Welcome to the `clim-recal` repository! - Welcome to clim-recal, a specialized resource designed to tackle systematic errors or biases in Regional Climate Models (RCMs). As researchers, policy-makers, and various stakeholders explore publicly available RCMs, they need to consider the challenge of biases that can affect the accurate representation of climate change signals. Clim-recal provides both a **broad review** of available bias correction methods as well as **practical tutorials** and **guidance** on how to easily apply those methods to various datasets. Clim-recal is an **Extensive guide to application of BC methods**: -- Accessible information for non quantitative researchers and lay-audience stakeholders -- Technical resource for application BC methods -- Framework for open additions +- Accessible information about the [why and how of bias correction for climate data]() +- Technical resource for application BC methods (see our full pipeline for bias-correction of the ground-breaking local-scale (2.2km)[Convection Permitting Model (CPM)](https://www.metoffice.gov.uk/pub/data/weather/uk/ukcp18/science-reports/UKCP-Convection-permitting-model-projections-report.pdf)) - In partnership with the MetOffice to ensure the propriety, quality, and usability of our work -- Full pipeline for bias-corrected data of the ground-breaking local-scale (2.2km)[Convection Permitting Model (CPM)](https://www.metoffice.gov.uk/pub/data/weather/uk/ukcp18/science-reports/UKCP-Convection-permitting-model-projections-report.pdf). - +- Framework for open additions (in planning) ## Table of Contents 2. [Overview: Bias Correction Pipeline](#overview-bias-correction-pipeline) 3. [Documentation](#documentation) 4. [The dataset](#the-dataset) -4. [Guidance for Non-Climate Scientists](#guidance-for-non-climate-scientists) -5. [Guidance for Climate Scientists](#guidance-for-non-climate-scientists) -6. [Research](#research) -7. [References](#references) +4. [Why bias correction?](#why-bias-correction) 8. [License](#license) 9. [Contributors](#contributors) @@ -203,13 +197,19 @@ The UK Climate Projections 2018 (UKCP18) dataset offers insights into the potent ### HADS [HadUK-Grid](https://www.metoffice.gov.uk/research/climate/maps-and-data/data/haduk-grid/haduk-grid) is a comprehensive collection of climate data for the UK, compiled from various land surface observations across the country. This data is organized into a uniform grid to ensure consistent coverage throughout the UK at up to 1km x 1km resolution. The dataset, spanning from 1836 to the present, includes a variety of climate variables such as air temperature, precipitation, sunshine, and wind speed, available on daily, monthly, seasonal, and annual timescales. -## Guidance for Non-Climate Scientists +## Why bias correction? Regional climate models (RCMs) contain systematic errors, or biases in their output [1]. Biases arise in RCMs for a number of reasons, such as the assumptions in the general circulation models (GCMs), and in the downscaling process from GCM to RCM [1,2]. Researchers, policy-makers and other stakeholders wishing to use publicly available RCMs need to consider a range of "bias correction” methods (sometimes referred to as "bias adjustment" or "recalibration"). Bias correction methods offer a means of adjusting the outputs of RCM in a manner that might better reflect future climate change signals whilst preserving the natural and internal variability of climate [2]. -## Guidance for Climate Scientists +Part of the clim-recal project is to review several bias correction methods. This work is ongoing and you can find our initial [taxonomy here](https://docs.google.com/spreadsheets/d/18LIc8omSMTzOWM60aFNv1EZUl1qQN_DG8HFy1_0NdWk/edit?usp=sharing). When we've completed our literature review, it will be submitted for publication in an open peer-reviewed journal. + +Our work is however, just like climate data, intended to be dynamic, and we are in the process of setting up a pipeline for researchers creating new methods of bias correction to be able to submit their methods for inclusion on in the **clim-recal** repository. + + 1. Senatore et al., 2022, https://doi.org/10.1016/j.ejrh.2022.101120 + 2. Ayar et al., 2021, https://doi.org/10.1038/s41598-021-82715-1 + ### Let's collaborate! @@ -236,23 +236,11 @@ and installing with: pip install -r requirements.txt ``` -## Research -### Methods taxonomy - -Our work-in-progress taxonomy can be viewed [here](https://docs.google.com/spreadsheets/d/18LIc8omSMTzOWM60aFNv1EZUl1qQN_DG8HFy1_0NdWk/edit?usp=sharing). When we've completed our literature review, it will be submitted for publication in an open peer-reviewed journal. - -Our work is however, just like climate data, intended to be dynamic, and we are in the process of setting up a pipeline for researchers creating new methods of bias correction to be able to submit their methods for inclusion on in the **clim-recal** repository. - ## 🚧 Future plans - **More BC Methods**: Further bias correction of UKCP18 products. *This is planned for a future release and is not available yet.* - **Pipeline for adding new methods**: *This is planned for a future release and is not available yet.* -## References - - 1. Senatore et al., 2022, https://doi.org/10.1016/j.ejrh.2022.101120 - 2. Ayar et al., 2021, https://doi.org/10.1038/s41598-021-82715-1 - ## License ## Contributors From d7fff1dd8fc139d2e831cfc9e658a9708a9e53f7 Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Wed, 11 Oct 2023 13:21:26 +0100 Subject: [PATCH 076/146] add flowchart viz link --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 1684bd51..9e71ddb2 100644 --- a/README.md +++ b/README.md @@ -31,6 +31,8 @@ Here we provide an example of how to run a debiasing pipeline starting. The pipe 6. **Assess the debiased data** *We have developed a way to assess the quality of the debiasing step across multiple alternative methods* +*see also this [flowchart viz](https://github.com/alan-turing-institute/clim-recal/blob/documentation/docs/pipeline.md) of the pipeline* + ### Prerequisites #### Setting up your environment From 6cd5d37796f34e98304d952143a6f04cd452a2fe Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Wed, 11 Oct 2023 14:08:00 +0100 Subject: [PATCH 077/146] adding reprojection instructions, fixes #30 --- README.md | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/README.md b/README.md index 9e71ddb2..905db40f 100644 --- a/README.md +++ b/README.md @@ -64,6 +64,31 @@ The `--change_hierarchy` flag modifies the folder hierarchy to fit with the hier ### Reproject the data The HADs data and the UKCP projections have different resolution and coordinate system. For example the HADs dataset uses the British National Grid coordinate system. +The first step in our analysis pipeline is to reproject the data. For this purpose, we utilize the GDAL package. GDAL, or the Geospatial Data Abstraction Library, is a computer software library designed for reading and writing raster and vector geospatial data formats. It provides a unified abstract data model to the calling application for all supported formats and can be equipped with various command line interface utilities for data translation and processing. Projections and transformations are further supported by the PROJ library. + +> **Warning**: +> To reproduce our exact pipeline, make sure you use GDAL version 3.4. Please be aware that this specific version of GDAL requires a different Python version than the one specified in our environment file. Therefore, for this step, you'll need to set up a new environment: +> ``` +> conda create -n gdal_env python=3.10 gdal=3.4 +> conda activate gdal_env +> ``` +> + +To execute the reprojection, run the `reproject_all.sh` script from your shell. First, ensure the scripts have the necessary permissions and that the parallel package is installed: + +```bash +chmod +x ./reproject_one.sh +chmod +x ./reproject_all.sh +sudo apt-get update +sudo apt-get install parallel +``` + +Once the above steps are completed, you can run the script with the following command: + +```bash +./reproject_all.sh +``` + ### Resample the data From 99b78aaa377ffa112df55541ba890b98908da8fa Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Wed, 11 Oct 2023 14:54:05 +0100 Subject: [PATCH 078/146] ADD instructions for using parallel library to both readme and internal guidance --- README.md | 29 +++++++++++++---------------- internal_docs/INTERNAL.md | 12 ++++++++++++ 2 files changed, 25 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index 905db40f..66129380 100644 --- a/README.md +++ b/README.md @@ -42,6 +42,15 @@ environment file for ease-of-use. ``` conda env create -f environment.yml ``` + +> **Warning**: +> To reproduce our exact outputs, you will require GDAL version 3.4. Please be aware that this specific version of GDAL requires a different Python version than the one specified in our environment file. Therefore, we have not included it in the environment file and instead, for the reprojection step, you'll need to set up a new environment: +> ``` +> conda create -n gdal_env python=3.10 gdal=3.4 +> ``` + +In order to paralellize the reprojection step, we make use of the [GNU parallel shell tool](https://www.gnu.org/software/parallel/). + #### Downloading the data This streamlined pipeline is designed for raw data provided by the Met Office, accessible through the [CEDA archive]((https://catalogue.ceda.ac.uk/uuid/ad2ac0ddd3f34210b0d6e19bfc335539)). It utilizes [UKCP](https://data.ceda.ac.uk/badc/ukcp18/data/land-cpm/uk/2.2km) control, scenario data at 2.2km resolution, and [HADs](https://data.ceda.ac.uk/badc/ukmo-hadobs/data/insitu/MOHC/HadOBS/HadUK-Grid/v1.1.0.0/1km) observational data. For those unfamiliar with this data, refer to our [the dataset](#the-dataset) section. @@ -64,32 +73,20 @@ The `--change_hierarchy` flag modifies the folder hierarchy to fit with the hier ### Reproject the data The HADs data and the UKCP projections have different resolution and coordinate system. For example the HADs dataset uses the British National Grid coordinate system. -The first step in our analysis pipeline is to reproject the data. For this purpose, we utilize the GDAL package. GDAL, or the Geospatial Data Abstraction Library, is a computer software library designed for reading and writing raster and vector geospatial data formats. It provides a unified abstract data model to the calling application for all supported formats and can be equipped with various command line interface utilities for data translation and processing. Projections and transformations are further supported by the PROJ library. +The first step in our analysis pipeline is to reproject the data. For this purpose, we utilize the Geospatial Data Abstraction Library (GDAL), designed for reading and writing raster and vector geospatial data formats. > **Warning**: -> To reproduce our exact pipeline, make sure you use GDAL version 3.4. Please be aware that this specific version of GDAL requires a different Python version than the one specified in our environment file. Therefore, for this step, you'll need to set up a new environment: +> Note that, to reproduce our exact pipeline, we switch environments here as explained in the requirements. > ``` -> conda create -n gdal_env python=3.10 gdal=3.4 > conda activate gdal_env > ``` -> - -To execute the reprojection, run the `reproject_all.sh` script from your shell. First, ensure the scripts have the necessary permissions and that the parallel package is installed: - -```bash -chmod +x ./reproject_one.sh -chmod +x ./reproject_all.sh -sudo apt-get update -sudo apt-get install parallel -``` -Once the above steps are completed, you can run the script with the following command: +To execute the reprojection in parallel fashion, run the `reproject_all.sh` script from your shell. First, ensure the scripts have the necessary permissions and that the parallel package is installed: ```bash -./reproject_all.sh +sh bash/reproject_all.sh ``` - ### Resample the data In [python/load_data/data_loader.py] we have written a few functions for loading and concatenating data into a single xarray which can be used for running debiasing methods. Instructions in how to use these functions can be found in python/notebooks/load_data_python.ipynb. diff --git a/internal_docs/INTERNAL.md b/internal_docs/INTERNAL.md index 01c8b07a..18aeb702 100644 --- a/internal_docs/INTERNAL.md +++ b/internal_docs/INTERNAL.md @@ -47,3 +47,15 @@ All the data used in this project can be found in the `/Volumes/vmfileshare/Clim ├── Middle_Layer_Super_Output_Areas_(December_2011)_Boundaries └── infuse_ctry_2011_clipped ``` +## Running the pipeline + +### Reprojection + +In order to run the [reprojection step](https://github.com/alan-turing-institute/clim-recal/tree/documentation#reproject-the-data) of the pipeline on the Azure VM there are some additional steps that need to be taken: You need to set permissions and install the parallel package. + +``` +chmod +x ./reproject_one.sh +chmod +x ./reproject_all.sh +sudo apt-get update +sudo apt-get install parallel +``` \ No newline at end of file From 4f426962512dabcb4b97d4a4c7e4e9894d9edee9 Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Wed, 11 Oct 2023 15:10:10 +0100 Subject: [PATCH 079/146] ADD resampling section --- README.md | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 66129380..41b257aa 100644 --- a/README.md +++ b/README.md @@ -73,7 +73,7 @@ The `--change_hierarchy` flag modifies the folder hierarchy to fit with the hier ### Reproject the data The HADs data and the UKCP projections have different resolution and coordinate system. For example the HADs dataset uses the British National Grid coordinate system. -The first step in our analysis pipeline is to reproject the data. For this purpose, we utilize the Geospatial Data Abstraction Library (GDAL), designed for reading and writing raster and vector geospatial data formats. +The first step in our analysis pipeline is to reproject the UKCP datasets to the British National Grid coordinate system. For this purpose, we utilize the Geospatial Data Abstraction Library (GDAL), designed for reading and writing raster and vector geospatial data formats. > **Warning**: > Note that, to reproduce our exact pipeline, we switch environments here as explained in the requirements. @@ -89,15 +89,16 @@ sh bash/reproject_all.sh ### Resample the data -In [python/load_data/data_loader.py] we have written a few functions for loading and concatenating data into a single xarray which can be used for running debiasing methods. Instructions in how to use these functions can be found in python/notebooks/load_data_python.ipynb. +Resample the HADsUK dataset from 1km to 2.2km grid to match the UKCP reprojected grid. We run the resampling python script specifying the `--input` location of the reprojected files from the previous step, the UKCP `--grid` file an the `--output` location for saving the resampled files. -Resample the HADs data from 1km to 2.2km grid to match the UKCP reprojected grid. +``` +python python/resampling/resampling_hads.py --input path_to_reprojected --grid path_to_grid_file --output path_to_resampled -reproject the UKCP datasets to the British National Grid coordinate system. -**Resampling** for the HADsUK datasets from 1km to a 2.2 km grid to match the UKCP re-projected grid. -**Data loaders** functions for loading and concatenating data into a single xarray which can be used for running debiasing methods. +``` ### Preparing the bias correction and assessment +**Data loaders** functions for loading and concatenating data into a single xarray which can be used for running debiasing methods. +In [python/load_data/data_loader.py] we have written a few functions for loading and concatenating data into a single xarray which can be used for running debiasing methods. Instructions in how to use these functions can be found in python/notebooks/load_data_python.ipynb. ### Applying the bias correction - **Debiasing scripts** that interface with implementations of the debiasing (bias correction) methods implemented by different libraries (by March 2023 we have only implemented the python-cmethods library). From 292d9a2793133f64bb6724b52308cd433d710e6f Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Wed, 11 Oct 2023 15:14:00 +0100 Subject: [PATCH 080/146] FIX add switch of conda environment for resampling --- README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 41b257aa..1c37af49 100644 --- a/README.md +++ b/README.md @@ -92,8 +92,11 @@ sh bash/reproject_all.sh Resample the HADsUK dataset from 1km to 2.2km grid to match the UKCP reprojected grid. We run the resampling python script specifying the `--input` location of the reprojected files from the previous step, the UKCP `--grid` file an the `--output` location for saving the resampled files. ``` -python python/resampling/resampling_hads.py --input path_to_reprojected --grid path_to_grid_file --output path_to_resampled +# switch to main environment +conda activate clim-recal +# run resampling +python python/resampling/resampling_hads.py --input path_to_reprojected --grid path_to_grid_file --output path_to_resampled ``` ### Preparing the bias correction and assessment From ebeda875b202e8e3c05a5fd694c0b46c408cd350 Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Wed, 11 Oct 2023 15:55:06 +0100 Subject: [PATCH 081/146] adding cropping step --- README.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/README.md b/README.md index 1c37af49..5b1be02e 100644 --- a/README.md +++ b/README.md @@ -68,6 +68,14 @@ You need to replace `uuu` and `ppp` with your CEDA username and FTP password res The `--change_hierarchy` flag modifies the folder hierarchy to fit with the hierarchy in the Turing Azure file store. This flag only applies to the UKCP data and should not be used with HADs data. You can use the same script without the `--change_hierarchy` flag in order to download files without any changes to the hierarchy. +**Shapefiles** +In addition to the climate data we use geospatial data to divide the data into smaller chunks. Specifically we use the following datasets for city boundaries: + +- Scottish localities boundaries for cropping out Glasgow. Downloaded from [nrscotland.gov.uk](https://www.nrscotland.gov.uk/statistics-and-data/geography/our-products/settlements-and-localities-dataset/settlements-and-localities-digital-boundaries) on 1st Aug 2023 + +- Major Towns and Cities boundaries for cropping out Manchester. Downloaded from [https://geoportal.statistics.gov.uk/](https://geoportal.statistics.gov.uk/datasets/980da620a0264647bd679642f96b42c1/explore) + + > 📢 If you are an internal collaborator you can access the raw data as well as intermediate steps through our Azure server. [See here for a How-to](). ### Reproject the data @@ -98,6 +106,15 @@ conda activate clim-recal # run resampling python python/resampling/resampling_hads.py --input path_to_reprojected --grid path_to_grid_file --output path_to_resampled ``` +> **Warning**: +> Note that the resampling script has to be run separately for different metrics. In our pipeline we bias corrected data for `tasmax`, `tasmin` and `rainfall` + +## Splitting data + +Because the bias correction process is computationally intensive, handling large datasets can be challenging and time-consuming. To make the pipeline more manageable and efficient, it is recommended to split the data into smaller subsets. For the purposes of our example pipeline, we've opted for reducing the data to three specific cities as sub-regions from the broader UK data, London, Glasgow and Manchester. + + + ### Preparing the bias correction and assessment **Data loaders** functions for loading and concatenating data into a single xarray which can be used for running debiasing methods. From 39cb72dd7ee59aa84be904473ab1f291e4a8e6bc Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Wed, 11 Oct 2023 17:14:46 +0100 Subject: [PATCH 082/146] clean up debiasing step --- README.md | 95 +++++++++++++++---------------------------------------- 1 file changed, 25 insertions(+), 70 deletions(-) diff --git a/README.md b/README.md index 5b1be02e..122ee379 100644 --- a/README.md +++ b/README.md @@ -51,6 +51,16 @@ conda env create -f environment.yml In order to paralellize the reprojection step, we make use of the [GNU parallel shell tool](https://www.gnu.org/software/parallel/). +#### The cmethods library + +This repository contains a python script used to run debiasing in climate data using a fork of the [original python-cmethods](https://github.com/btschwertfeger/python-cmethods) module written by Benjamin Thomas Schwertfeger's , which has +been modified to function with the dataset used in the clim-recal project. This library has been included as a +submodule to this project, so you must run the following command to pull the submodules required. + +``` +git submodule update --init --recursive +``` + #### Downloading the data This streamlined pipeline is designed for raw data provided by the Met Office, accessible through the [CEDA archive]((https://catalogue.ceda.ac.uk/uuid/ad2ac0ddd3f34210b0d6e19bfc335539)). It utilizes [UKCP](https://data.ceda.ac.uk/badc/ukcp18/data/land-cpm/uk/2.2km) control, scenario data at 2.2km resolution, and [HADs](https://data.ceda.ac.uk/badc/ukmo-hadobs/data/insitu/MOHC/HadOBS/HadUK-Grid/v1.1.0.0/1km) observational data. For those unfamiliar with this data, refer to our [the dataset](#the-dataset) section. @@ -106,98 +116,43 @@ conda activate clim-recal # run resampling python python/resampling/resampling_hads.py --input path_to_reprojected --grid path_to_grid_file --output path_to_resampled ``` -> **Warning**: -> Note that the resampling script has to be run separately for different metrics. In our pipeline we bias corrected data for `tasmax`, `tasmin` and `rainfall` - -## Splitting data - -Because the bias correction process is computationally intensive, handling large datasets can be challenging and time-consuming. To make the pipeline more manageable and efficient, it is recommended to split the data into smaller subsets. For the purposes of our example pipeline, we've opted for reducing the data to three specific cities as sub-regions from the broader UK data, London, Glasgow and Manchester. - - - - ### Preparing the bias correction and assessment -**Data loaders** functions for loading and concatenating data into a single xarray which can be used for running debiasing methods. -In [python/load_data/data_loader.py] we have written a few functions for loading and concatenating data into a single xarray which can be used for running debiasing methods. Instructions in how to use these functions can be found in python/notebooks/load_data_python.ipynb. -### Applying the bias correction - - **Debiasing scripts** that interface with implementations of the debiasing (bias correction) methods implemented by different libraries (by March 2023 we have only implemented the python-cmethods library). +**Spatial cropping** +Because the bias correction process is computationally intensive, handling large datasets can be challenging and time-consuming. To make the pipeline more manageable and efficient, it is recommended to split the data into smaller subsets. For the purposes of our example pipeline, we've opted for reducing the data to three specific cities as sub-regions from the broader UK data, London, Glasgow and Manchester using the `Cropping_Rasters_to_three_cities.R script`. - The code in the [debiasing](debiasing) directory contains scripts that interface with implementations of the debiasing methods -implemented by different libraries. - -Note: By March 2023 we have only implemented the [python-cmethods](https://github.com/alan-turing-institute/python-cmethods) library. - - -### The cmethods library - -This repository contains a python script used to run debiasing in climate data using a fork of the [original python-cmethods](https://github.com/btschwertfeger/python-cmethods) module written by Benjamin Thomas Schwertfeger's , which has -been modified to function with the dataset used in the clim-recal project. This library has been included as a -submodule to this project, so you must run the following command to pull the submodules required. +**calibration-validation data split** +For the purpose of assessing our bias correction, we then split our data, both the projection as well as the ground-truth observations by dates, such that we can calibrate the bias correction based on the years 1981 to 1983. We then use data from year 2010 to validate the quality of the bias correction. ``` cd debiasing -git submodule update --init --recursive +python preprocess_data.py --mod /mnt/vmfileshare/ClimateData/Cropped/three.cities/CPM/$city --obs /mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.updated360/$city -v $var -r $run --out /mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/$city/$run/$var --calib_dates 19810101-19831230 --valid_dates 20100101-20101230 ``` -The [run_cmethods.py](debiasing/run_cmethods.py) allow us to adjusts climate biases in climate data using the python-cmethods library. -It takes as input observation data (HADs data), control data (historical UKCP data), and scenario data (future UKCP data), -and applies a correction method to the scenario data. The resulting output is saved as a `.nc` to a specified directory. -The script will also produce a time-series and a map plot of the debiased data. +The preprocess_data.py script also aligns the calendars of the historical simulation data and observed data, ensuring that they have the same time dimension and checks that the observed and simulated historical data have the same dimensions. -**Usage**: +> **Note**: +> preprocess_data.py makes use of our custom data loader functions. In [python/load_data/data_loader.py] we have written a few functions for loading and concatenating data into a single xarray which can be used for running debiasing methods. Instructions in how to use these functions can be found in python/notebooks/load_data_python.ipynb. -The script can be run from the command line using the following arguments: -``` -python3 run_cmethods.py.py --obs --contr --scen --shp ---out -m -v -u -g -k -n -p -``` - -where: - -where: +### Applying the bias correction +Note: By March 2023 we have only implemented the [python-cmethods](https://github.com/alan-turing-institute/python-cmethods) library. -- `--obs` specifies the path to the observation datasets -- `--contr` specifies the path to the control datasets -- `--scen` specifies the path to the scenario datasets (data to adjust) -- `--shp` specifies the path to a shapefile, in case we want to select a smaller region (default: None) -- `--out` specifies the path to save the output files (default: current directory) -- `--method` specifies the correction method to use (default: quantile_delta_mapping) -- `-v` specifies the variable to adjust (default: tas) -- `-u` specifies the unit of the variable (default: °C) -- `-g` specifies the value grouping (default: time) -- `-k` specifies the method kind (+ or *, default: +) -- `-n` specifies the number of quantiles to use (default: 1000) -- `-p` specifies the number of processes to use for multiprocessing (default: 1) -For more details on the script and options you can run: +The [run_cmethods.py](debiasing/run_cmethods.py) allow us to adjusts climate biases in climate data using the python-cmethods library. It takes as input observation data (HADs data), control data (historical UKCP data), and scenario data (future UKCP data), +and applies a correction method to the scenario data. The resulting output is saved as a `.nc` to a specified directory. The script will also produce a time-series and a map plot of the debiased data. ``` -python run_cmethods.py --help +python3 run_cmethods.py --input_data_folder --out --method --group --v -p ``` -**Main Functionality**: - -The script applies corrections extracted from historical observed and simulated data between `1980-12-01` and `1999-11-30`. -Corrections are applied to future scenario data between `2020` and `2080` (however there is no available scenario data between `2040` to `2060`, so this time -period is skipped. - - -The script performs the following steps: -- Parses the input arguments. -- Loads, merges and clips (if shapefile is provided) the all input datasets and merges them into two distinct datasets: the observation and control datasets. -- Aligns the calendars of the historical simulation data and observed data, ensuring that they have the same time dimension -and checks that the observed and simulated historical data have the same dimensions. -- Loops over the future time periods specified in the `future_time_periods` variable and performs the following steps: +The run_cmethods.py script loops over the time periods and applies debiasing in periods of 10 years in the following steps: - Loads the scenario data for the current time period. - Applies the specified correction method to the scenario data. - Saves the resulting output to the specified directory. - Creates diagnotic figues of the output dataset (time series and time dependent maps) and saves it into the specified directory. -In this script -datasets are debiased in periods of 10 years, in a consecutive loop, for each time period it will produce an `.nc` output file -with the adjusted data and a time-series plot and a time dependent map plot of the adjusted data. +For each 10 year time period it will produce an `.nc` output file with the adjusted data and a time-series plot and a time dependent map plot of the adjusted data. **Working example**. From 787f7559523a407c2a1bcf5c82c4a247440e29aa Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Wed, 11 Oct 2023 17:26:30 +0100 Subject: [PATCH 083/146] ENH sub-titles and FIX links, DEL surplus info --- README.md | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 122ee379..3e8ee911 100644 --- a/README.md +++ b/README.md @@ -63,6 +63,7 @@ git submodule update --init --recursive #### Downloading the data +**Climate data** This streamlined pipeline is designed for raw data provided by the Met Office, accessible through the [CEDA archive]((https://catalogue.ceda.ac.uk/uuid/ad2ac0ddd3f34210b0d6e19bfc335539)). It utilizes [UKCP](https://data.ceda.ac.uk/badc/ukcp18/data/land-cpm/uk/2.2km) control, scenario data at 2.2km resolution, and [HADs](https://data.ceda.ac.uk/badc/ukmo-hadobs/data/insitu/MOHC/HadOBS/HadUK-Grid/v1.1.0.0/1km) observational data. For those unfamiliar with this data, refer to our [the dataset](#the-dataset) section. To access the data,[register here]((https://archive.ceda.ac.uk/)) at the CEDA archive and configure your FTP credentials in "My Account". Utilize our [ceda_ftp_download.py](python/data_download/) script to download the data. @@ -78,7 +79,7 @@ You need to replace `uuu` and `ppp` with your CEDA username and FTP password res The `--change_hierarchy` flag modifies the folder hierarchy to fit with the hierarchy in the Turing Azure file store. This flag only applies to the UKCP data and should not be used with HADs data. You can use the same script without the `--change_hierarchy` flag in order to download files without any changes to the hierarchy. -**Shapefiles** +**Geospatial data** In addition to the climate data we use geospatial data to divide the data into smaller chunks. Specifically we use the following datasets for city boundaries: - Scottish localities boundaries for cropping out Glasgow. Downloaded from [nrscotland.gov.uk](https://www.nrscotland.gov.uk/statistics-and-data/geography/our-products/settlements-and-localities-dataset/settlements-and-localities-digital-boundaries) on 1st Aug 2023 @@ -86,6 +87,7 @@ In addition to the climate data we use geospatial data to divide the data into s - Major Towns and Cities boundaries for cropping out Manchester. Downloaded from [https://geoportal.statistics.gov.uk/](https://geoportal.statistics.gov.uk/datasets/980da620a0264647bd679642f96b42c1/explore) + > 📢 If you are an internal collaborator you can access the raw data as well as intermediate steps through our Azure server. [See here for a How-to](). ### Reproject the data @@ -132,7 +134,7 @@ python preprocess_data.py --mod /mnt/vmfileshare/ClimateData/Cropped/three.citie The preprocess_data.py script also aligns the calendars of the historical simulation data and observed data, ensuring that they have the same time dimension and checks that the observed and simulated historical data have the same dimensions. > **Note**: -> preprocess_data.py makes use of our custom data loader functions. In [python/load_data/data_loader.py] we have written a few functions for loading and concatenating data into a single xarray which can be used for running debiasing methods. Instructions in how to use these functions can be found in python/notebooks/load_data_python.ipynb. +> preprocess_data.py makes use of our custom data loader functions. In [`data_loader/`](python/load_data/data_loader.py) we have written a few functions for loading and concatenating data into a single xarray which can be used for running debiasing methods. Instructions in how to use these functions can be found in python/notebooks/load_data_python.ipynb. ### Applying the bias correction @@ -153,13 +155,6 @@ The run_cmethods.py script loops over the time periods and applies debiasing in - Creates diagnotic figues of the output dataset (time series and time dependent maps) and saves it into the specified directory. For each 10 year time period it will produce an `.nc` output file with the adjusted data and a time-series plot and a time dependent map plot of the adjusted data. - -**Working example**. - -Example of code working on the **clim-recal** dataset: -``` -python run_cmethods.py --scen /Volumes/vmfileshare/ClimateData/Reprojected/UKCP2.2/tasmax/01/latest --contr /Volumes/vmfileshare/ClimateData/Reprojected/UKCP2.2/tasmax/01/latest/ --obs /Volumes/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/tasmax/day/ --shape ../../data/Scotland/Scotland.bbox.shp -v tasmax --method delta_method --group time.month -p 5 -``` ### Assessing the corrected data From cea894135d917c758d1f83308957da8c36c84b2a Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Thu, 12 Oct 2023 08:13:39 +0000 Subject: [PATCH 084/146] fix: pytest-sugar version typo in environment.yml --- environment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/environment.yml b/environment.yml index d2c6b3cb..852fb832 100644 --- a/environment.yml +++ b/environment.yml @@ -47,7 +47,7 @@ dependencies: - pyproj==3.4.0 - python-dateutil==2.8.2 - pytz==2022.5 - - pytest-sugar=0.9.7 + - pytest-sugar==0.9.7 - rasterio==1.3.3 - rioxarray==0.12.3 - six==1.16.0 From 39b488e083e782ca069c1c1c4f0358b62a07407d Mon Sep 17 00:00:00 2001 From: Greg Mingas Date: Thu, 12 Oct 2023 10:21:49 +0100 Subject: [PATCH 085/146] Add firewall instructions for accessing storage account to README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 44866c27..281f54a7 100644 --- a/README.md +++ b/README.md @@ -93,6 +93,8 @@ The fileshare will be mounted under `/Volumes/vmfileshare/` +You might also need to add your IP address to the Firewall IP expections list in the Azure portal by going to the `dymestorage1` resource and selecting `Networking`. + Instructions on how the mount in other operating systems can be found in [the azure how-tos](https://learn.microsoft.com/en-us/azure/storage/files/storage-how-to-use-files-linux?tabs=smb311). Alternatively, you can access the Azure Portal, go to the dymestorage1 fileshare and click the "Connect" button to get an automatically generated script. This script can be used from within an Azure VM to mount the drive. From d9fc672f3dcdc27c6cf88db0eb85aaa153781368 Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Thu, 12 Oct 2023 13:43:07 +0100 Subject: [PATCH 086/146] separate guidance from main readme --- README.md | 127 +---------------------------------- docs/pipeline_guidance.md | 136 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 137 insertions(+), 126 deletions(-) create mode 100644 docs/pipeline_guidance.md diff --git a/README.md b/README.md index 3e8ee911..94cdebc9 100644 --- a/README.md +++ b/README.md @@ -31,132 +31,7 @@ Here we provide an example of how to run a debiasing pipeline starting. The pipe 6. **Assess the debiased data** *We have developed a way to assess the quality of the debiasing step across multiple alternative methods* -*see also this [flowchart viz](https://github.com/alan-turing-institute/clim-recal/blob/documentation/docs/pipeline.md) of the pipeline* - -### Prerequisites - -#### Setting up your environment - -Methods can be used with a custom environment, here we provide a Anaconda -environment file for ease-of-use. -``` -conda env create -f environment.yml -``` - -> **Warning**: -> To reproduce our exact outputs, you will require GDAL version 3.4. Please be aware that this specific version of GDAL requires a different Python version than the one specified in our environment file. Therefore, we have not included it in the environment file and instead, for the reprojection step, you'll need to set up a new environment: -> ``` -> conda create -n gdal_env python=3.10 gdal=3.4 -> ``` - -In order to paralellize the reprojection step, we make use of the [GNU parallel shell tool](https://www.gnu.org/software/parallel/). - -#### The cmethods library - -This repository contains a python script used to run debiasing in climate data using a fork of the [original python-cmethods](https://github.com/btschwertfeger/python-cmethods) module written by Benjamin Thomas Schwertfeger's , which has -been modified to function with the dataset used in the clim-recal project. This library has been included as a -submodule to this project, so you must run the following command to pull the submodules required. - -``` -git submodule update --init --recursive -``` - -#### Downloading the data - -**Climate data** -This streamlined pipeline is designed for raw data provided by the Met Office, accessible through the [CEDA archive]((https://catalogue.ceda.ac.uk/uuid/ad2ac0ddd3f34210b0d6e19bfc335539)). It utilizes [UKCP](https://data.ceda.ac.uk/badc/ukcp18/data/land-cpm/uk/2.2km) control, scenario data at 2.2km resolution, and [HADs](https://data.ceda.ac.uk/badc/ukmo-hadobs/data/insitu/MOHC/HadOBS/HadUK-Grid/v1.1.0.0/1km) observational data. For those unfamiliar with this data, refer to our [the dataset](#the-dataset) section. - -To access the data,[register here]((https://archive.ceda.ac.uk/)) at the CEDA archive and configure your FTP credentials in "My Account". Utilize our [ceda_ftp_download.py](python/data_download/) script to download the data. - -``` -# cpm data -python3 ceda_ftp_download.py --input /badc/ukcp18/data/land-cpm/uk/2.2km/rcp85/ --output 'output_dir' --username 'uuu' --psw 'ppp' --change_hierarchy - -# hads data -python3 ceda_ftp_download.py --input /badc/ukmo-hadobs/data/insitu/MOHC/HadOBS/HadUK-Grid/v1.1.0.0/1km --output 'output_dir' --username 'uuu' --psw 'ppp' -``` -You need to replace `uuu` and `ppp` with your CEDA username and FTP password respectively and replace `output_dir` with the directory you want to write the data to. - -The `--change_hierarchy` flag modifies the folder hierarchy to fit with the hierarchy in the Turing Azure file store. This flag only applies to the UKCP data and should not be used with HADs data. You can use the same script without the `--change_hierarchy` flag in order to download files without any changes to the hierarchy. - -**Geospatial data** -In addition to the climate data we use geospatial data to divide the data into smaller chunks. Specifically we use the following datasets for city boundaries: - -- Scottish localities boundaries for cropping out Glasgow. Downloaded from [nrscotland.gov.uk](https://www.nrscotland.gov.uk/statistics-and-data/geography/our-products/settlements-and-localities-dataset/settlements-and-localities-digital-boundaries) on 1st Aug 2023 - -- Major Towns and Cities boundaries for cropping out Manchester. Downloaded from [https://geoportal.statistics.gov.uk/](https://geoportal.statistics.gov.uk/datasets/980da620a0264647bd679642f96b42c1/explore) - - - -> 📢 If you are an internal collaborator you can access the raw data as well as intermediate steps through our Azure server. [See here for a How-to](). - -### Reproject the data -The HADs data and the UKCP projections have different resolution and coordinate system. For example the HADs dataset uses the British National Grid coordinate system. - -The first step in our analysis pipeline is to reproject the UKCP datasets to the British National Grid coordinate system. For this purpose, we utilize the Geospatial Data Abstraction Library (GDAL), designed for reading and writing raster and vector geospatial data formats. - -> **Warning**: -> Note that, to reproduce our exact pipeline, we switch environments here as explained in the requirements. -> ``` -> conda activate gdal_env -> ``` - -To execute the reprojection in parallel fashion, run the `reproject_all.sh` script from your shell. First, ensure the scripts have the necessary permissions and that the parallel package is installed: - -```bash -sh bash/reproject_all.sh -``` - -### Resample the data - -Resample the HADsUK dataset from 1km to 2.2km grid to match the UKCP reprojected grid. We run the resampling python script specifying the `--input` location of the reprojected files from the previous step, the UKCP `--grid` file an the `--output` location for saving the resampled files. - -``` -# switch to main environment -conda activate clim-recal - -# run resampling -python python/resampling/resampling_hads.py --input path_to_reprojected --grid path_to_grid_file --output path_to_resampled -``` -### Preparing the bias correction and assessment - -**Spatial cropping** -Because the bias correction process is computationally intensive, handling large datasets can be challenging and time-consuming. To make the pipeline more manageable and efficient, it is recommended to split the data into smaller subsets. For the purposes of our example pipeline, we've opted for reducing the data to three specific cities as sub-regions from the broader UK data, London, Glasgow and Manchester using the `Cropping_Rasters_to_three_cities.R script`. - -**calibration-validation data split** -For the purpose of assessing our bias correction, we then split our data, both the projection as well as the ground-truth observations by dates, such that we can calibrate the bias correction based on the years 1981 to 1983. We then use data from year 2010 to validate the quality of the bias correction. - -``` -cd debiasing -python preprocess_data.py --mod /mnt/vmfileshare/ClimateData/Cropped/three.cities/CPM/$city --obs /mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.updated360/$city -v $var -r $run --out /mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/$city/$run/$var --calib_dates 19810101-19831230 --valid_dates 20100101-20101230 -``` - -The preprocess_data.py script also aligns the calendars of the historical simulation data and observed data, ensuring that they have the same time dimension and checks that the observed and simulated historical data have the same dimensions. - -> **Note**: -> preprocess_data.py makes use of our custom data loader functions. In [`data_loader/`](python/load_data/data_loader.py) we have written a few functions for loading and concatenating data into a single xarray which can be used for running debiasing methods. Instructions in how to use these functions can be found in python/notebooks/load_data_python.ipynb. - - -### Applying the bias correction -Note: By March 2023 we have only implemented the [python-cmethods](https://github.com/alan-turing-institute/python-cmethods) library. - - -The [run_cmethods.py](debiasing/run_cmethods.py) allow us to adjusts climate biases in climate data using the python-cmethods library. It takes as input observation data (HADs data), control data (historical UKCP data), and scenario data (future UKCP data), -and applies a correction method to the scenario data. The resulting output is saved as a `.nc` to a specified directory. The script will also produce a time-series and a map plot of the debiased data. - -``` -python3 run_cmethods.py --input_data_folder --out --method --group --v -p -``` - -The run_cmethods.py script loops over the time periods and applies debiasing in periods of 10 years in the following steps: - - Loads the scenario data for the current time period. - - Applies the specified correction method to the scenario data. - - Saves the resulting output to the specified directory. - - Creates diagnotic figues of the output dataset (time series and time dependent maps) and saves it into the specified directory. - -For each 10 year time period it will produce an `.nc` output file with the adjusted data and a time-series plot and a time dependent map plot of the adjusted data. - -### Assessing the corrected data +For a quick start on bias correction, refer to our [comprehensive analysis pipeline guide](https://github.com/alan-turing-institute/clim-recal/blob/documentation/docs/pipeline_guidance.md). ## Documentation 🚧 In Progress diff --git a/docs/pipeline_guidance.md b/docs/pipeline_guidance.md new file mode 100644 index 00000000..abc94bf8 --- /dev/null +++ b/docs/pipeline_guidance.md @@ -0,0 +1,136 @@ + +# Analysis pipeline guidance + +This is a detailed guide to our analyiss pipeline. +*see also this [flowchart viz](https://github.com/alan-turing-institute/clim-recal/blob/documentation/docs/pipeline.md) of the pipeline* + +### Prerequisites + +#### Setting up your environment + +Methods can be used with a custom environment, here we provide a Anaconda +environment file for ease-of-use. +``` +conda env create -f environment.yml +``` + +> **Warning**: +> To reproduce our exact outputs, you will require GDAL version 3.4. Please be aware that this specific version of GDAL requires a different Python version than the one specified in our environment file. Therefore, we have not included it in the environment file and instead, for the reprojection step, you'll need to set up a new environment: +> ``` +> conda create -n gdal_env python=3.10 gdal=3.4 +> ``` + +In order to paralellize the reprojection step, we make use of the [GNU parallel shell tool](https://www.gnu.org/software/parallel/). + +``` +parallel --version +``` + +#### The cmethods library + +This repository contains a python script used to run debiasing in climate data using a fork of the [original python-cmethods](https://github.com/btschwertfeger/python-cmethods) module written by Benjamin Thomas Schwertfeger's , which has +been modified to function with the dataset used in the clim-recal project. This library has been included as a +submodule to this project, so you must run the following command to pull the submodules required. + +``` +git submodule update --init --recursive +``` + +#### Downloading the data + +**Climate data** + +This streamlined pipeline is designed for raw data provided by the Met Office, accessible through the [CEDA archive]((https://catalogue.ceda.ac.uk/uuid/ad2ac0ddd3f34210b0d6e19bfc335539)). It utilizes [UKCP](https://data.ceda.ac.uk/badc/ukcp18/data/land-cpm/uk/2.2km) control, scenario data at 2.2km resolution, and [HADs](https://data.ceda.ac.uk/badc/ukmo-hadobs/data/insitu/MOHC/HadOBS/HadUK-Grid/v1.1.0.0/1km) observational data. For those unfamiliar with this data, refer to our [the dataset](#the-dataset) section. + +To access the data,[register here]((https://archive.ceda.ac.uk/)) at the CEDA archive and configure your FTP credentials in "My Account". Utilize our [ceda_ftp_download.py](python/data_download/) script to download the data. + +``` +# cpm data +python3 ceda_ftp_download.py --input /badc/ukcp18/data/land-cpm/uk/2.2km/rcp85/ --output 'output_dir' --username 'uuu' --psw 'ppp' --change_hierarchy + +# hads data +python3 ceda_ftp_download.py --input /badc/ukmo-hadobs/data/insitu/MOHC/HadOBS/HadUK-Grid/v1.1.0.0/1km --output 'output_dir' --username 'uuu' --psw 'ppp' +``` +You need to replace `uuu` and `ppp` with your CEDA username and FTP password respectively and replace `output_dir` with the directory you want to write the data to. + +The `--change_hierarchy` flag modifies the folder hierarchy to fit with the hierarchy in the Turing Azure file store. This flag only applies to the UKCP data and should not be used with HADs data. You can use the same script without the `--change_hierarchy` flag in order to download files without any changes to the hierarchy. + +**Geospatial data** + +In addition to the climate data we use geospatial data to divide the data into smaller chunks. Specifically we use the following datasets for city boundaries: + +- Scottish localities boundaries for cropping out Glasgow. Downloaded from [nrscotland.gov.uk](https://www.nrscotland.gov.uk/statistics-and-data/geography/our-products/settlements-and-localities-dataset/settlements-and-localities-digital-boundaries) on 1st Aug 2023 + +- Major Towns and Cities boundaries for cropping out Manchester. Downloaded from [https://geoportal.statistics.gov.uk/](https://geoportal.statistics.gov.uk/datasets/980da620a0264647bd679642f96b42c1/explore) + + + +> 📢 If you are an internal collaborator you can access the raw data as well as intermediate steps through our Azure server. [See here for a How-to](). + +### Reproject the data +The HADs data and the UKCP projections have different resolution and coordinate system. For example the HADs dataset uses the British National Grid coordinate system. + +The first step in our analysis pipeline is to reproject the UKCP datasets to the British National Grid coordinate system. For this purpose, we utilize the Geospatial Data Abstraction Library (GDAL), designed for reading and writing raster and vector geospatial data formats. + +> **Warning**: +> Note that, to reproduce our exact pipeline, we switch environments here as explained in the requirements. +> ``` +> conda activate gdal_env +> ``` + +To execute the reprojection in parallel fashion, run the `reproject_all.sh` script from your shell. First, ensure the scripts have the necessary permissions and that the parallel package is installed: + +```bash +sh bash/reproject_all.sh +``` + +### Resample the data + +Resample the HADsUK dataset from 1km to 2.2km grid to match the UKCP reprojected grid. We run the resampling python script specifying the `--input` location of the reprojected files from the previous step, the UKCP `--grid` file an the `--output` location for saving the resampled files. + +``` +# switch to main environment +conda activate clim-recal + +# run resampling +python python/resampling/resampling_hads.py --input path_to_reprojected --grid path_to_grid_file --output path_to_resampled +``` +### Preparing the bias correction and assessment + +**Spatial cropping** +Because the bias correction process is computationally intensive, handling large datasets can be challenging and time-consuming. To make the pipeline more manageable and efficient, it is recommended to split the data into smaller subsets. For the purposes of our example pipeline, we've opted for reducing the data to three specific cities as sub-regions from the broader UK data, London, Glasgow and Manchester using the `Cropping_Rasters_to_three_cities.R script`. + +**calibration-validation data split** +For the purpose of assessing our bias correction, we then split our data, both the projection as well as the ground-truth observations by dates, such that we can calibrate the bias correction based on the years 1981 to 1983. We then use data from year 2010 to validate the quality of the bias correction. + +``` +cd debiasing +python preprocess_data.py --mod /mnt/vmfileshare/ClimateData/Cropped/three.cities/CPM/$city --obs /mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.updated360/$city -v $var -r $run --out /mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/$city/$run/$var --calib_dates 19810101-19831230 --valid_dates 20100101-20101230 +``` + +The preprocess_data.py script also aligns the calendars of the historical simulation data and observed data, ensuring that they have the same time dimension and checks that the observed and simulated historical data have the same dimensions. + +> **Note**: +> preprocess_data.py makes use of our custom data loader functions. In [`data_loader/`](python/load_data/data_loader.py) we have written a few functions for loading and concatenating data into a single xarray which can be used for running debiasing methods. Instructions in how to use these functions can be found in python/notebooks/load_data_python.ipynb. + + +### Applying the bias correction +Note: By March 2023 we have only implemented the [python-cmethods](https://github.com/alan-turing-institute/python-cmethods) library. + + +The [run_cmethods.py](debiasing/run_cmethods.py) allow us to adjusts climate biases in climate data using the python-cmethods library. It takes as input observation data (HADs data), control data (historical UKCP data), and scenario data (future UKCP data), +and applies a correction method to the scenario data. The resulting output is saved as a `.nc` to a specified directory. The script will also produce a time-series and a map plot of the debiased data. + +``` +python3 run_cmethods.py --input_data_folder --out --method --group --v -p +``` + +The run_cmethods.py script loops over the time periods and applies debiasing in periods of 10 years in the following steps: + - Loads the scenario data for the current time period. + - Applies the specified correction method to the scenario data. + - Saves the resulting output to the specified directory. + - Creates diagnotic figues of the output dataset (time series and time dependent maps) and saves it into the specified directory. + +For each 10 year time period it will produce an `.nc` output file with the adjusted data and a time-series plot and a time dependent map plot of the adjusted data. + +### Assessing the corrected data \ No newline at end of file From 8520a5c68eb51947aceb121ed0ade091dd68e0b3 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Thu, 12 Oct 2023 13:15:21 +0000 Subject: [PATCH 087/146] feat(test): add a is_darwin fixture for filtering testing on macOS --- environment.yml | 1 + python/.pytest.ini | 1 + python/conftest.py | 19 +++++++++++++++++++ python/tests/test_debiasing.py | 7 +++++++ 4 files changed, 28 insertions(+) create mode 100644 python/conftest.py diff --git a/environment.yml b/environment.yml index 852fb832..78cd05bb 100644 --- a/environment.yml +++ b/environment.yml @@ -39,6 +39,7 @@ dependencies: - click-plugins==1.1.1 - cligj==0.7.2 - geopandas==0.12.2 + - ipython==8.15.0 - netcdf4==1.6.1 - numpy==1.23.4 - packaging==21.3 diff --git a/python/.pytest.ini b/python/.pytest.ini index 351f02b4..e512639e 100644 --- a/python/.pytest.ini +++ b/python/.pytest.ini @@ -8,3 +8,4 @@ testpaths = utils.py markers = slow: mark test as slow. + server: mark for only server support diff --git a/python/conftest.py b/python/conftest.py new file mode 100644 index 00000000..5fc44c17 --- /dev/null +++ b/python/conftest.py @@ -0,0 +1,19 @@ +import sys + +import pytest + + +@pytest.fixture() +def is_platform_darwin() -> bool: + """Check if `sys.platform` is `Darwin` (macOS).""" + return sys.platform.startswith("darwin") + + +@pytest.fixture(autouse=True) +def doctest_auto_fixtures( + doctest_namespace: dict, is_platform_darwin: bool +) -> None: + """Elements to add to default `doctest` namespace.""" + doctest_namespace["is_platform_darwin"] = is_platform_darwin + doctest_namespace["pprint"] = pprint + doctest_namespace["pytest"] = pytest diff --git a/python/tests/test_debiasing.py b/python/tests/test_debiasing.py index 432e88d6..601ee80d 100644 --- a/python/tests/test_debiasing.py +++ b/python/tests/test_debiasing.py @@ -252,6 +252,8 @@ def mod_path(self, city: str | None = None) -> Path: Example ------- + >>> if is_platform_darwin: + ... pytest.skip('paths fail if not linux') >>> config: RunConfig = RunConfig() >>> config.mod_path() PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/CPM/Manchester') @@ -266,6 +268,8 @@ def obs_path(self, city: str | None = None) -> Path: Example ------- + >>> if is_platform_darwin: + ... pytest.skip('paths fail if not linux') >>> config: RunConfig = RunConfig() >>> config.obs_path() PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.updated360/Manchester') @@ -285,6 +289,8 @@ def preprocess_out_path( Example ------- + >>> if is_platform_darwin: + ... pytest.skip('paths fail if not linux') >>> config: RunConfig = RunConfig() >>> config.preprocess_out_path() PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/Manchester/05/tasmax') @@ -590,6 +596,7 @@ def test_command_line_default() -> None: assert run_config.to_cli_preprocess_str() == CLI_PREPROCESS_DEFAULT_COMMAND_STR_CORRECT +@pytest.mark.server @pytest.mark.slow @pytest.mark.parametrize( 'city', (None, 'Glasgow',) From 57ef17aeb417227e734eab1a2db7111a4c0940c9 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Thu, 12 Oct 2023 15:06:37 +0000 Subject: [PATCH 088/146] fix(test): add import pprint to conftest.py --- python/conftest.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/conftest.py b/python/conftest.py index 5fc44c17..02097f8c 100644 --- a/python/conftest.py +++ b/python/conftest.py @@ -1,4 +1,5 @@ import sys +import pprint import pytest From 91aff436fa15f38cde6206cce343586441da8154 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Fri, 13 Oct 2023 17:19:45 +0000 Subject: [PATCH 089/146] feat(ci): initial .pre-commit-config.yaml --- .pre-commit-config.yaml | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..10f04570 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,26 @@ +repos: + - repo: https://github.com/psf/black + rev: "23.9.1" + hooks: + - id: black-jupyter + + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.5.0 + hooks: + - id: check-added-large-files + - id: check-case-conflict + - id: check-merge-conflict + - id: check-symlinks + - id: check-yaml + - id: debug-statements + - id: end-of-file-fixer + - id: mixed-line-ending + - id: requirements-txt-fixer + - id: trailing-whitespace + + - repo: https://github.com/pre-commit/pygrep-hooks + rev: "v1.10.0" + hooks: + - id: rst-backticks + - id: rst-directive-colons + - id: rst-inline-touching-normal From 6255bf7a2f8ed3c0ab14b938bc01e853016177c1 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Sat, 14 Oct 2023 18:21:20 +0100 Subject: [PATCH 090/146] feat(doc): basic config for `quarto` --- .dockerignore | 0 .gitignore | 4 +++- _quarto.yml | 11 +++++++++++ 3 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 .dockerignore create mode 100644 _quarto.yml diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..e69de29b diff --git a/.gitignore b/.gitignore index 676f2b3f..b4693c3a 100644 --- a/.gitignore +++ b/.gitignore @@ -260,5 +260,7 @@ rsconnect/ # End of https://www.toptal.com/developers/gitignore/api/macos,python,r -# Some custom adds +# Some custom adds data/* + +/.quarto/ diff --git a/_quarto.yml b/_quarto.yml new file mode 100644 index 00000000..f6ea2afa --- /dev/null +++ b/_quarto.yml @@ -0,0 +1,11 @@ +project: + title: "." + type: website + output-dir: _site + preview: + port: 8888 + browser: false + render: + - "docs/" + - "!clim-recal.Rproj" + - "!R" From 835fa5af579a0b004e3b3cefad99ace31407033a Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Sat, 14 Oct 2023 19:14:11 +0100 Subject: [PATCH 091/146] feat(doc): add `quartodoc` config for `download_ftp` `docstring` --- _quarto.yml | 22 ++++++++++++++++++++++ environment.yml | 1 + 2 files changed, 23 insertions(+) diff --git a/_quarto.yml b/_quarto.yml index f6ea2afa..10c762e5 100644 --- a/_quarto.yml +++ b/_quarto.yml @@ -9,3 +9,25 @@ project: - "docs/" - "!clim-recal.Rproj" - "!R" + +# tell quarto to read the generated sidebar +metadata-files: + - _sidebar.yml + + +quartodoc: + # the name used to import the package you want to create reference docs for + package: null + dir: docs/reference + + # write sidebar data to this file + sidebar: _sidebar.yml + source_dir: ./python/data_download/ + + sections: + - title: Data Source Management + desc: How data is downloaded for use + contents: + # the functions being documented in the package. + # you can refer to anything: class methods, modules, etc.. + - ceda_ftp_download.download_ftp diff --git a/environment.yml b/environment.yml index 2080f352..35a835ca 100644 --- a/environment.yml +++ b/environment.yml @@ -56,3 +56,4 @@ dependencies: - matplotlib==3.6.1 - scipy==1.10.0 - pillow==9.4.0 + - quartodoc==0.6.3 From 1cb19bccb46526fd47fb4305e199518046aea19d Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Tue, 17 Oct 2023 18:56:11 +0100 Subject: [PATCH 092/146] feat(doc): add R files and initial navigation --- _quarto.yml | 36 +++++++++++++++++++++++++++++++++--- 1 file changed, 33 insertions(+), 3 deletions(-) diff --git a/_quarto.yml b/_quarto.yml index 10c762e5..bebd1757 100644 --- a/_quarto.yml +++ b/_quarto.yml @@ -1,20 +1,50 @@ project: - title: "." + title: "clim-recal" type: website output-dir: _site preview: port: 8888 browser: false render: - - "docs/" + - "README.md" - "!clim-recal.Rproj" - - "!R" + - "R/README.md" + - "R/misc/Identifying_Runs.md" + # - "R/comparing-r-and-python/HADs-reprojection/WIP-Comparing-HADs-grids.md" + - "python/README.md" + +toc: True +number-sections: True + # tell quarto to read the generated sidebar metadata-files: - _sidebar.yml +website: + back-to-top-navigation: true + sidebar: + style: "docked" + search: true + contents: + - text: "Summary" + href: "README.md" + - section: "R" + contents: + - href: "R/README.md" + text: Overview + - href: "R/misc/Identifying_Runs.md" + text: Identifying Runs + # - section: "Comparing R and Python" + # contents: + # - href: "R/comparing-r-and-python/HADs-reprojection/WIP-Comparing-HADs-grids.md" + # text: "WIP Comparing HADs grids" + + - section: "python" + contents: + - "python/README.md" + quartodoc: # the name used to import the package you want to create reference docs for package: null From 4e99b44931994ad9d36b37dccc35005fcf1f514e Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Tue, 17 Oct 2023 23:04:30 +0100 Subject: [PATCH 093/146] feat(doc): add `download_ftp` `docstring` to `_quarto.yml` --- _quarto.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/_quarto.yml b/_quarto.yml index bebd1757..2db784ca 100644 --- a/_quarto.yml +++ b/_quarto.yml @@ -10,6 +10,7 @@ project: - "!clim-recal.Rproj" - "R/README.md" - "R/misc/Identifying_Runs.md" + - "docs/reference" # - "R/comparing-r-and-python/HADs-reprojection/WIP-Comparing-HADs-grids.md" - "python/README.md" @@ -44,6 +45,11 @@ website: - section: "python" contents: - "python/README.md" + - section: "Reference" + contents: + - href: "docs/reference/ceda_ftp_download.download_ftp.qmd" + text: "download_ftp" + quartodoc: # the name used to import the package you want to create reference docs for From 17ab70230f299635dc9ac0114e55cf5b9244e62e Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Wed, 18 Oct 2023 16:48:17 +0100 Subject: [PATCH 094/146] change all guidance to dummy paths --- docs/pipeline_guidance.md | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/docs/pipeline_guidance.md b/docs/pipeline_guidance.md index abc94bf8..c6b08f6c 100644 --- a/docs/pipeline_guidance.md +++ b/docs/pipeline_guidance.md @@ -55,6 +55,8 @@ You need to replace `uuu` and `ppp` with your CEDA username and FTP password res The `--change_hierarchy` flag modifies the folder hierarchy to fit with the hierarchy in the Turing Azure file store. This flag only applies to the UKCP data and should not be used with HADs data. You can use the same script without the `--change_hierarchy` flag in order to download files without any changes to the hierarchy. +> 📢 If you are an internal collaborator you can access the raw data as well as intermediate steps through our Azure server. [See here for a How-to](). + **Geospatial data** In addition to the climate data we use geospatial data to divide the data into smaller chunks. Specifically we use the following datasets for city boundaries: @@ -64,9 +66,6 @@ In addition to the climate data we use geospatial data to divide the data into s - Major Towns and Cities boundaries for cropping out Manchester. Downloaded from [https://geoportal.statistics.gov.uk/](https://geoportal.statistics.gov.uk/datasets/980da620a0264647bd679642f96b42c1/explore) - -> 📢 If you are an internal collaborator you can access the raw data as well as intermediate steps through our Azure server. [See here for a How-to](). - ### Reproject the data The HADs data and the UKCP projections have different resolution and coordinate system. For example the HADs dataset uses the British National Grid coordinate system. @@ -78,10 +77,11 @@ The first step in our analysis pipeline is to reproject the UKCP datasets to the > conda activate gdal_env > ``` -To execute the reprojection in parallel fashion, run the `reproject_all.sh` script from your shell. First, ensure the scripts have the necessary permissions and that the parallel package is installed: +To execute the reprojection in parallel fashion, run the `reproject_all.sh` script from your shell. As an input to the script we provide the path to the raw netCDF files ```bash -sh bash/reproject_all.sh +cd bash +sh reproject_all.sh path_to_netcdf_files ``` ### Resample the data @@ -93,19 +93,26 @@ Resample the HADsUK dataset from 1km to 2.2km grid to match the UKCP reprojected conda activate clim-recal # run resampling -python python/resampling/resampling_hads.py --input path_to_reprojected --grid path_to_grid_file --output path_to_resampled +cd ../python/resampling +python resampling_hads.py --input path_to_reprojected --grid path_to_grid_file --output path_to_resampled ``` ### Preparing the bias correction and assessment **Spatial cropping** -Because the bias correction process is computationally intensive, handling large datasets can be challenging and time-consuming. To make the pipeline more manageable and efficient, it is recommended to split the data into smaller subsets. For the purposes of our example pipeline, we've opted for reducing the data to three specific cities as sub-regions from the broader UK data, London, Glasgow and Manchester using the `Cropping_Rasters_to_three_cities.R script`. +Because the bias correction process is computationally intensive, handling large datasets can be challenging and time-consuming. Therefore, to make the pipeline more manageable and efficient, it is recommended to split the data into smaller subsets. For the purposes of our example pipeline, we've opted for reducing the data to individual city boundaries. To crop you need to adjust the paths in `Cropping_Rasters_to_three_cities.R` script to fit 1your own directory sturcture. The cropping is implemented in the `cpm_read_crop` and `hads_read_crop` functions. + +``` +Rscript Cropping_Rasters_to_three_cities.R +``` + +>>>>>>> 3718d6b (update guidance to dummy paths) **calibration-validation data split** -For the purpose of assessing our bias correction, we then split our data, both the projection as well as the ground-truth observations by dates, such that we can calibrate the bias correction based on the years 1981 to 1983. We then use data from year 2010 to validate the quality of the bias correction. +For the purpose of assessing our bias correction, we then split our data, both the projection as well as the ground-truth observations by dates. In this example here we calibrate the bias correction based on the years 1981 to 1983. We then use data from year 2010 to validate the quality of the bias correction. You need to replace `path_to_cropped` with the path where the data from the previous cropping step was saved and `path_to_preprocessed` with the output directory you choose. You can leave the `-v` and `-r` flags as specified below or choose another metric and run if you prefer. ``` -cd debiasing -python preprocess_data.py --mod /mnt/vmfileshare/ClimateData/Cropped/three.cities/CPM/$city --obs /mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.updated360/$city -v $var -r $run --out /mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/$city/$run/$var --calib_dates 19810101-19831230 --valid_dates 20100101-20101230 +cd ../debiasing +python preprocess_data.py --mod path_to_cropped --obs path_to_cropped -v tasmax -r '05' --out path_to_preprocessed --calib_dates 19810101-19831230 --valid_dates 20100101-20101230 ``` The preprocess_data.py script also aligns the calendars of the historical simulation data and observed data, ensuring that they have the same time dimension and checks that the observed and simulated historical data have the same dimensions. @@ -119,10 +126,10 @@ Note: By March 2023 we have only implemented the [python-cmethods](https://githu The [run_cmethods.py](debiasing/run_cmethods.py) allow us to adjusts climate biases in climate data using the python-cmethods library. It takes as input observation data (HADs data), control data (historical UKCP data), and scenario data (future UKCP data), -and applies a correction method to the scenario data. The resulting output is saved as a `.nc` to a specified directory. The script will also produce a time-series and a map plot of the debiased data. +and applies a correction method to the scenario data. The resulting output is saved as a `.nc` to a specified directory. The script will also produce a time-series and a map plot of the debiased data. To run this you need to replace `path_to_validation_data` with the output directories of the previous step and specify `path_to_corrected_data` as your output directory for the bias corrected data. You can also specify your preferred `bias_correction_method` (e.g. quantile_delta_mapping). ``` -python3 run_cmethods.py --input_data_folder --out --method --group --v -p +python3 run_cmethods.py --input_data_folder path_to_validation_data --out path_to_corrected_data --method bias_correction_method --v 'tas' ``` The run_cmethods.py script loops over the time periods and applies debiasing in periods of 10 years in the following steps: From 0be354f9f3cc9fc1b587246a47cc039a058326d8 Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Wed, 18 Oct 2023 17:01:54 +0100 Subject: [PATCH 095/146] adding R instructions --- docs/pipeline_guidance.md | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/docs/pipeline_guidance.md b/docs/pipeline_guidance.md index c6b08f6c..a82bb636 100644 --- a/docs/pipeline_guidance.md +++ b/docs/pipeline_guidance.md @@ -6,9 +6,27 @@ This is a detailed guide to our analyiss pipeline. ### Prerequisites -#### Setting up your environment +We use sophisticated bias correction methods, tapping into dedicated packages in both Python and R ecosystems. The integration of these languages allows us to utilize the cutting-edge functionalities specific to each. Given this dual-language nature of our analysis pipeline, we also provide preprocessing scripts written in both Python and R. To facilitate a seamless experience, users are required to set up both Python and R environments as detailed below. -Methods can be used with a custom environment, here we provide a Anaconda +### Setting up your R environment + +- **Download and Install R:** Visit [CRAN (The Comprehensive R Archive Network)](https://cran.r-project.org/) to download the latest version of R compatible with your operating system. + +- **Verify R Installation:** Open your command line interface (CLI) and type the following command and press enter to confirm that R is successfully installed on your system.: +``` +R --version +``` +- **Install Necessary R Packages:** Our analysis utilizes several R packages. You can install these packages by starting R (just type `R` in your command line and press enter) and entering the following commands in the R console: + ```R + install.packages("package1") + install.packages("package2") + #... (continue for all necessary packages) + ``` +- Replace `"package1"`, `"package2"`, etc., with the actual names of the required packages. A list of required R packages is provided in the 'R_Package_Requirements.txt' file. + +#### Setting up your python environment + +Methods can be used with a custom environment, here we provide an Anaconda environment file for ease-of-use. ``` conda env create -f environment.yml @@ -77,7 +95,7 @@ The first step in our analysis pipeline is to reproject the UKCP datasets to the > conda activate gdal_env > ``` -To execute the reprojection in parallel fashion, run the `reproject_all.sh` script from your shell. As an input to the script we provide the path to the raw netCDF files +To execute the reprojection in parallel fashion, run the `reproject_all.sh` script from your shell. As an input to the script replace `path_to_netcdf_files` with the path to the raw netCDF files. ```bash cd bash @@ -104,9 +122,6 @@ Because the bias correction process is computationally intensive, handling large ``` Rscript Cropping_Rasters_to_three_cities.R ``` - ->>>>>>> 3718d6b (update guidance to dummy paths) - **calibration-validation data split** For the purpose of assessing our bias correction, we then split our data, both the projection as well as the ground-truth observations by dates. In this example here we calibrate the bias correction based on the years 1981 to 1983. We then use data from year 2010 to validate the quality of the bias correction. You need to replace `path_to_cropped` with the path where the data from the previous cropping step was saved and `path_to_preprocessed` with the output directory you choose. You can leave the `-v` and `-r` flags as specified below or choose another metric and run if you prefer. From 3a3c3f84e630e5cef23eab6a2595042d21957f8a Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Wed, 18 Oct 2023 19:08:48 +0100 Subject: [PATCH 096/146] replace fixed path with input parameter but keep as default --- bash/reproject_all.sh | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/bash/reproject_all.sh b/bash/reproject_all.sh index f540fd85..e02fb042 100755 --- a/bash/reproject_all.sh +++ b/bash/reproject_all.sh @@ -1,2 +1,10 @@ -files=`find /mnt/vmfileshare/ClimateData/Raw/UKCP2.2/ -type f -name "*.nc"` # Find all netCDF files in the UKCP2.2 directory +#!/bin/bash + +# Default path +default_path="/mnt/vmfileshare/ClimateData/Raw/UKCP2.2/" + +# Use the provided path or the default path if none is given +path_to_search="${1:-$default_path}" + +files=`find $path_to_search -type f -name "*.nc"` # Find all netCDF files in the specified or default directory parallel ./reproject_one.sh {} ::: $files # Run reproject_one.sh on each file in parallel From be44d04a6ebfc8283dd5bbdc00e0162383039d28 Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Wed, 18 Oct 2023 20:28:33 +0100 Subject: [PATCH 097/146] FIX typos and links --- README.md | 2 +- docs/pipeline_guidance.md | 28 ++++++++++++++++++---------- 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 94cdebc9..9aa258a1 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ Welcome to clim-recal, a specialized resource designed to tackle systematic erro Clim-recal is an **Extensive guide to application of BC methods**: -- Accessible information about the [why and how of bias correction for climate data]() +- Accessible information about the [why and how of bias correction for climate data](#why-bias-correction) - Technical resource for application BC methods (see our full pipeline for bias-correction of the ground-breaking local-scale (2.2km)[Convection Permitting Model (CPM)](https://www.metoffice.gov.uk/pub/data/weather/uk/ukcp18/science-reports/UKCP-Convection-permitting-model-projections-report.pdf)) - In partnership with the MetOffice to ensure the propriety, quality, and usability of our work - Framework for open additions (in planning) diff --git a/docs/pipeline_guidance.md b/docs/pipeline_guidance.md index a82bb636..5b945140 100644 --- a/docs/pipeline_guidance.md +++ b/docs/pipeline_guidance.md @@ -1,18 +1,28 @@ # Analysis pipeline guidance -This is a detailed guide to our analyiss pipeline. +This is a detailed guide to our analysis pipeline. *see also this [flowchart viz](https://github.com/alan-turing-institute/clim-recal/blob/documentation/docs/pipeline.md) of the pipeline* +**Contents:** +* [Prerequisites](#prerequisites) + * [Setting up your R environment](#setting-up-your-r-environment) + * [Setting up your python environment](#setting-up-your-python-environment) +* [Downloading the data](#downloading-the-data) +* [Reproject the data](#reproject-the-data) +* [Resample the data](#resample-the-data) +* [Preparing the bias correction and assessment](#preparing-the-bias-correction-and-assessment) +* [Applying the bias correction](#applying-the-bias-correction) + + ### Prerequisites -We use sophisticated bias correction methods, tapping into dedicated packages in both Python and R ecosystems. The integration of these languages allows us to utilize the cutting-edge functionalities specific to each. Given this dual-language nature of our analysis pipeline, we also provide preprocessing scripts written in both Python and R. To facilitate a seamless experience, users are required to set up both Python and R environments as detailed below. +For our bias correction methods, we tap into dedicated packages in both Python and R ecosystems. The integration of these languages allows us to utilize the cutting-edge functionalities implemented in each. Given this dual-language nature of our analysis pipeline, we also provide preprocessing scripts written in both Python and R. To facilitate a seamless experience, users are required to set up both Python and R environments as detailed below. -### Setting up your R environment +#### Setting up your R environment -- **Download and Install R:** Visit [CRAN (The Comprehensive R Archive Network)](https://cran.r-project.org/) to download the latest version of R compatible with your operating system. +- **Download and Install R:** Visit [CRAN (The Comprehensive R Archive Network)](https://cran.r-project.org/) to download the latest version of R compatible with your operating system. Then verify successful installation via command line: -- **Verify R Installation:** Open your command line interface (CLI) and type the following command and press enter to confirm that R is successfully installed on your system.: ``` R --version ``` @@ -26,8 +36,7 @@ R --version #### Setting up your python environment -Methods can be used with a custom environment, here we provide an Anaconda -environment file for ease-of-use. +For your python environment, we provide an Anaconda environment file for ease-of-use. ``` conda env create -f environment.yml ``` @@ -140,7 +149,7 @@ The preprocess_data.py script also aligns the calendars of the historical simula Note: By March 2023 we have only implemented the [python-cmethods](https://github.com/alan-turing-institute/python-cmethods) library. -The [run_cmethods.py](debiasing/run_cmethods.py) allow us to adjusts climate biases in climate data using the python-cmethods library. It takes as input observation data (HADs data), control data (historical UKCP data), and scenario data (future UKCP data), +The [run_cmethods.py](../debiasing/run_cmethods.py) allow us to adjusts climate biases in climate data using the python-cmethods library. It takes as input observation data (HADs data), control data (historical UKCP data), and scenario data (future UKCP data), and applies a correction method to the scenario data. The resulting output is saved as a `.nc` to a specified directory. The script will also produce a time-series and a map plot of the debiased data. To run this you need to replace `path_to_validation_data` with the output directories of the previous step and specify `path_to_corrected_data` as your output directory for the bias corrected data. You can also specify your preferred `bias_correction_method` (e.g. quantile_delta_mapping). ``` @@ -154,5 +163,4 @@ The run_cmethods.py script loops over the time periods and applies debiasing in - Creates diagnotic figues of the output dataset (time series and time dependent maps) and saves it into the specified directory. For each 10 year time period it will produce an `.nc` output file with the adjusted data and a time-series plot and a time dependent map plot of the adjusted data. - -### Assessing the corrected data \ No newline at end of file + \ No newline at end of file From 5c827fa22c76b7eefe27699b7087bd633516f40c Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Wed, 18 Oct 2023 20:28:33 +0100 Subject: [PATCH 098/146] FIX typos and links --- docs/pipeline_guidance.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/pipeline_guidance.md b/docs/pipeline_guidance.md index 5b945140..12fc8847 100644 --- a/docs/pipeline_guidance.md +++ b/docs/pipeline_guidance.md @@ -9,8 +9,8 @@ This is a detailed guide to our analysis pipeline. * [Setting up your R environment](#setting-up-your-r-environment) * [Setting up your python environment](#setting-up-your-python-environment) * [Downloading the data](#downloading-the-data) -* [Reproject the data](#reproject-the-data) -* [Resample the data](#resample-the-data) +* [Reprojecting the data](#reprojecting-the-data) +* [Resampling the data](#resampling-the-data) * [Preparing the bias correction and assessment](#preparing-the-bias-correction-and-assessment) * [Applying the bias correction](#applying-the-bias-correction) @@ -93,7 +93,7 @@ In addition to the climate data we use geospatial data to divide the data into s - Major Towns and Cities boundaries for cropping out Manchester. Downloaded from [https://geoportal.statistics.gov.uk/](https://geoportal.statistics.gov.uk/datasets/980da620a0264647bd679642f96b42c1/explore) -### Reproject the data +### Reprojecting the data The HADs data and the UKCP projections have different resolution and coordinate system. For example the HADs dataset uses the British National Grid coordinate system. The first step in our analysis pipeline is to reproject the UKCP datasets to the British National Grid coordinate system. For this purpose, we utilize the Geospatial Data Abstraction Library (GDAL), designed for reading and writing raster and vector geospatial data formats. @@ -111,7 +111,7 @@ cd bash sh reproject_all.sh path_to_netcdf_files ``` -### Resample the data +### Resampling the data Resample the HADsUK dataset from 1km to 2.2km grid to match the UKCP reprojected grid. We run the resampling python script specifying the `--input` location of the reprojected files from the previous step, the UKCP `--grid` file an the `--output` location for saving the resampled files. From 9c5db01b8af46367d0fb46566a4e6b49fe8fae53 Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Wed, 18 Oct 2023 21:00:23 +0100 Subject: [PATCH 099/146] adding note about azure virtual machines --- internal_docs/INTERNAL.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/internal_docs/INTERNAL.md b/internal_docs/INTERNAL.md index 18aeb702..44adfee8 100644 --- a/internal_docs/INTERNAL.md +++ b/internal_docs/INTERNAL.md @@ -49,6 +49,10 @@ All the data used in this project can be found in the `/Volumes/vmfileshare/Clim ``` ## Running the pipeline +> **Placeholder**: +> Creating an azure virtual machine outside the DYME-CHH azure resource group may cause +> permission errors in mounting vmfileshare from dymestorage1. + ### Reprojection In order to run the [reprojection step](https://github.com/alan-turing-institute/clim-recal/tree/documentation#reproject-the-data) of the pipeline on the Azure VM there are some additional steps that need to be taken: You need to set permissions and install the parallel package. From 45f0e0da1151fa0179aa03cda149319c01f0b319 Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Wed, 18 Oct 2023 21:03:02 +0100 Subject: [PATCH 100/146] removing sphinx files --- docs/doc_make/Makefile | 20 ---------- docs/doc_make/ceda_ftp_download.rst | 5 --- docs/doc_make/conf.py | 61 ----------------------------- docs/doc_make/index.rst | 24 ------------ docs/doc_make/make.bat | 35 ----------------- docs/doc_make/resampling_hads.rst | 5 --- 6 files changed, 150 deletions(-) delete mode 100644 docs/doc_make/Makefile delete mode 100644 docs/doc_make/ceda_ftp_download.rst delete mode 100644 docs/doc_make/conf.py delete mode 100644 docs/doc_make/index.rst delete mode 100644 docs/doc_make/make.bat delete mode 100644 docs/doc_make/resampling_hads.rst diff --git a/docs/doc_make/Makefile b/docs/doc_make/Makefile deleted file mode 100644 index d4bb2cbb..00000000 --- a/docs/doc_make/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line, and also -# from the environment for the first two. -SPHINXOPTS ?= -SPHINXBUILD ?= sphinx-build -SOURCEDIR = . -BUILDDIR = _build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/doc_make/ceda_ftp_download.rst b/docs/doc_make/ceda_ftp_download.rst deleted file mode 100644 index 229b22a7..00000000 --- a/docs/doc_make/ceda_ftp_download.rst +++ /dev/null @@ -1,5 +0,0 @@ -ceda_ftp_download -================= - -.. automodule:: data_download.ceda_ftp_download - :members: diff --git a/docs/doc_make/conf.py b/docs/doc_make/conf.py deleted file mode 100644 index 5f5721f0..00000000 --- a/docs/doc_make/conf.py +++ /dev/null @@ -1,61 +0,0 @@ -# Configuration file for the Sphinx documentation builder. -# -# This file only contains a selection of the most common options. For a full -# list see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html - -# -- Path setup -------------------------------------------------------------- - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -import os -import sys -import sphinx_rtd_theme - -sys.path.insert(0, os.path.abspath('../../python/')) -sys.path.insert(0, os.path.abspath('../../R/')) - -# -- Project information ----------------------------------------------------- - -project = 'clim-recal' -copyright = '2023, FIX:ADD AUTHORS' -author = 'FIX:ADD AUTHORS' - -# The full version, including alpha/beta/rc tags -release = '0.1.0' - - -# -- General configuration --------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] - - -# -- Options for HTML output ------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -html_theme = 'sphinx_rtd_theme' -html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] - - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -extensions = ['sphinx.ext.autodoc'] \ No newline at end of file diff --git a/docs/doc_make/index.rst b/docs/doc_make/index.rst deleted file mode 100644 index e5203e36..00000000 --- a/docs/doc_make/index.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. clim-recal documentation master file, created by - sphinx-quickstart on Thu Sep 21 12:44:53 2023. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Welcome to clim-recal's documentation! -====================================== - -.. toctree:: - :maxdepth: 2 - :caption: Contents: - - ceda_ftp_download - - resampling_hads - - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/docs/doc_make/make.bat b/docs/doc_make/make.bat deleted file mode 100644 index 32bb2452..00000000 --- a/docs/doc_make/make.bat +++ /dev/null @@ -1,35 +0,0 @@ -@ECHO OFF - -pushd %~dp0 - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-build -) -set SOURCEDIR=. -set BUILDDIR=_build - -%SPHINXBUILD% >NUL 2>NUL -if errorlevel 9009 ( - echo. - echo.The 'sphinx-build' command was not found. Make sure you have Sphinx - echo.installed, then set the SPHINXBUILD environment variable to point - echo.to the full path of the 'sphinx-build' executable. Alternatively you - echo.may add the Sphinx directory to PATH. - echo. - echo.If you don't have Sphinx installed, grab it from - echo.https://www.sphinx-doc.org/ - exit /b 1 -) - -if "%1" == "" goto help - -%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% -goto end - -:help -%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% - -:end -popd diff --git a/docs/doc_make/resampling_hads.rst b/docs/doc_make/resampling_hads.rst deleted file mode 100644 index 4a104c68..00000000 --- a/docs/doc_make/resampling_hads.rst +++ /dev/null @@ -1,5 +0,0 @@ -resampling_hads -=============== - -.. automodule:: resampling.resampling_hads - :members: From 35c34469058b1c41144614dd9dc999103555b7b5 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Thu, 19 Oct 2023 15:34:56 +0100 Subject: [PATCH 101/146] feat(doc): refactor diagram to `pipeline.mermaid` --- .gitignore | 6 +- docs/assets/pipeline.mermaid | 173 +++++++++++++++++++++++++++++++++++ 2 files changed, 178 insertions(+), 1 deletion(-) create mode 100644 docs/assets/pipeline.mermaid diff --git a/.gitignore b/.gitignore index b4693c3a..58419692 100644 --- a/.gitignore +++ b/.gitignore @@ -246,7 +246,11 @@ vignettes/*.pdf .Renviron # pkgdown site -docs/ +# Commented out by @griff-rees in configuring quarto +# docs/ + +# Add as a replacement for use of quarto +docs/reference # translation temp files po/*~ diff --git a/docs/assets/pipeline.mermaid b/docs/assets/pipeline.mermaid new file mode 100644 index 00000000..1cd65000 --- /dev/null +++ b/docs/assets/pipeline.mermaid @@ -0,0 +1,173 @@ +graph TB + +subgraph Legend + direction RL + data_external[(external data)] + data_fileshare[path to fileshare] + script_r([R script]) + script_py([Python script]) + script_bash([Bash script]) + var[parameter]:::var +end + +%%% INPUT DATA +subgraph CEDA + data_hads[(HADS)] + data_cpm[(UKCP2.2)] + data_hads --> script_load + data_cpm --> script_load + data_hads --> script_load +end + +subgraph Core pipeline + subgraph Data Ingress + %%% Loading data to disk + script_load([ceda_ftp_download.py]) + data_hads_raw[RAW/HadsUKgrid/../*.nc] + data_cpm_raw[RAW/UKCP2.2/../*.nc] + script_load --> data_hads_raw + script_load --> data_cpm_raw + end + subgraph Preprocessing + %% resampling & reprojecting + script_resampling([resampling_hads.py]) + script_reproject([reproject_all.sh]) + + data_hads_res[Processed/HadsUKgrid/../*.nc] + data_cpm_rep[Reprojected/UKCP2.2/../*.tiff] + + script_resampling --> data_hads_res + script_reproject --> data_cpm_rep + + %% cropping + script_crop_city([Cropping_Rasters_to_three_cities.R]) + + data_cropped_cpm[Cropped/cpm/..] + data_cropped_hads[Cropped/hads/..] + script_crop_city --> data_cropped_cpm + script_crop_city --> data_cropped_hads + + + end + + subgraph Data Splitting + data_outdir[Cropped/preprocessed/..] + + script_preproc([preprocess_data.py]) + + data_out_train[../simh..] + data_out_calibrate[../simp..] + data_out_groundtruth_h[../obsh..] + data_out_groundtruth_p[../obsp..] + + script_preproc --> data_outdir + + data_outdir --> data_out_train + data_outdir --> data_out_calibrate + data_outdir --> data_out_groundtruth_h + data_outdir --> data_out_groundtruth_p + end + + subgraph bc[Bias Correction] + script_bc_py([run_cmethods.py]) + script_bc_r([run_cmethods.R]) + function_bc_r[[fitQmapQUANT.R]] + + + data_out_py[Debiased/...] + data_out_r[Debiased/R/QuantileMapping/resultsL*] + + data_out_train --> script_bc_py + data_out_calibrate --> script_bc_py + data_out_groundtruth_h --> script_bc_py + data_out_train --> script_bc_r + data_out_calibrate --> script_bc_r + data_out_groundtruth_h --> script_bc_r + script_bc_r --> function_bc_r + + script_bc_py-->data_out_py + function_bc_r-->data_out_r + end + + subgraph Assessment + script_asses[tbc] + data_out_groundtruth_p --> script_asses + end + data_out_py --> script_asses + data_out_r --> script_asses +end + + +subgraph nner_py[Execute Python pipeline for MO dataset] + data_shape_uk[(shape London)] + data_shape_gl[(shape Glasgow)] + data_shape_ma[(shape Manchester)] + + + script_BC_wrapper[three_cities_debiasing.sh] + param1["metric (eg tasmax)"]:::var + param2["runs (eg 05)"]:::var + param3["BC method (eg quantile_mapping)"]:::var + param4[city]:::var + + script_BC_wrapper --> param1 + param1 --> param2 + param2 --> param3 + param3 --> param4 + param4 -- for loop --> script_preproc + + %% Looping connections + param4 -.-> param3 + param3 -.-> param2 + param2 -.-> param1 +end + +subgraph nner_jupyter[Jupyter Notebook for Guidance] + direction BT + data_shape_gl2[(shape Glasgow)] + data_cpm2[(UKCP2.2_Monthly)] + + param5["tasmax"]:::var + param6["quantile_mapping"]:::var + param7[Glasgow]:::var + + script_BC_wrapper --> param1 + param5 --> script_preproc + param6 --> script_preproc + param7 --> script_preproc + + data_cpm2 --> script_load + data_shape_gl2 --> script_crop_city +end + +%% between block connections +%% input preproc 1 +data_hads_raw --> script_resampling +data_cpm_raw --> script_reproject +%% input cropping +data_cpm_rep --> script_crop_city + +data_hads_res --> script_crop_city +data_shape_uk --> script_crop_city +data_shape_ma --> script_crop_city +data_shape_gl --> script_crop_city + +%% input preproc2 +data_cropped_cpm --> script_preproc +data_cropped_hads --> script_preproc + +param4 -- for loop --> script_bc_py + + +%% class styles +classDef python fill:#4CAF50; +classDef r fill:#FF5722; +classDef bash fill:#f9f +classDef var fill:none,stroke:#0f0; +classDef dashed stroke-dasharray: 5 5; + +class script_crop_city,script_crop_uk,function_bc_r,script_r,script_df_uk,function_bc,function_crop_bc,fn_crop_cpm,fn_crop_hads,fn_bc,script_bc_r r; +class script_load,script_resampling,script_preproc,script_bc_py,script_py python; +class script_reproject,script_BC_wrapper,script_bash bash; +class inner_py dashed; +class inner_r dashed; From eeeccc8af144420fd6ad18c6077e7cf155d5b508 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Thu, 19 Oct 2023 16:14:35 +0100 Subject: [PATCH 102/146] feat(doc): add `workflow.qmd` to `_quarto.yml` config --- _quarto.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/_quarto.yml b/_quarto.yml index 2db784ca..ac720181 100644 --- a/_quarto.yml +++ b/_quarto.yml @@ -11,6 +11,7 @@ project: - "R/README.md" - "R/misc/Identifying_Runs.md" - "docs/reference" + - "docs/workflow.qmd" # - "R/comparing-r-and-python/HADs-reprojection/WIP-Comparing-HADs-grids.md" - "python/README.md" @@ -31,6 +32,8 @@ website: contents: - text: "Summary" href: "README.md" + - text: "Pipeline" + href: "docs/pipeline.qmd" - section: "R" contents: - href: "R/README.md" From 96fc2e4a385995fe2f0bbb0fdc554e74925f6257 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Thu, 19 Oct 2023 17:30:06 +0100 Subject: [PATCH 103/146] feat(test): exclude `python-cmethods` tests by default and add `server` test `mark` --- python/.pytest.ini | 5 +++-- python/tests/test_debiasing.py | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/python/.pytest.ini b/python/.pytest.ini index 351f02b4..f8c23b23 100644 --- a/python/.pytest.ini +++ b/python/.pytest.ini @@ -1,10 +1,11 @@ # pytest.ini or .pytest.ini [pytest] minversion = 6.0 -addopts = -ra -q --doctest-modules --pdbcls=IPython.terminal.debugger:TerminalPdb +addopts = -ra -q --doctest-modules --pdbcls=IPython.terminal.debugger:TerminalPdb --ignore=python/debiasing/python-cmethods pythonpath = . testpaths = tests utils.py markers = - slow: mark test as slow. + slow: runs slowly. + server: only run on server. diff --git a/python/tests/test_debiasing.py b/python/tests/test_debiasing.py index 432e88d6..83c85e47 100644 --- a/python/tests/test_debiasing.py +++ b/python/tests/test_debiasing.py @@ -590,6 +590,7 @@ def test_command_line_default() -> None: assert run_config.to_cli_preprocess_str() == CLI_PREPROCESS_DEFAULT_COMMAND_STR_CORRECT +@pytest.mark.server @pytest.mark.slow @pytest.mark.parametrize( 'city', (None, 'Glasgow',) From e6dc1df6eeb3c4686041faec36db6e7004d3d486 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Sat, 21 Oct 2023 06:08:10 +0100 Subject: [PATCH 104/146] fix(test): skip server specifc testing by default --- .pre-commit-config.yaml | 13 + python/.pytest.ini | 7 +- python/conftest.py | 29 +- python/tests/test_debiasing.py | 562 +++++++++++++++++++-------------- 4 files changed, 375 insertions(+), 236 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 10f04570..96e9cee0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -24,3 +24,16 @@ repos: - id: rst-backticks - id: rst-directive-colons - id: rst-inline-touching-normal + + - repo: https://github.com/hadialqattan/pycln + rev: v2.2.2 + hooks: + - id: pycln + args: ["python/"] + + - repo: https://github.com/pycqa/isort + rev: 5.12.0 + hooks: + - id: isort + name: isort (python) + args: ["--profile", "black"] diff --git a/python/.pytest.ini b/python/.pytest.ini index e8f916af..2a0de9a6 100644 --- a/python/.pytest.ini +++ b/python/.pytest.ini @@ -1,7 +1,12 @@ # pytest.ini or .pytest.ini [pytest] minversion = 6.0 -addopts = -ra -q --doctest-modules --pdbcls=IPython.terminal.debugger:TerminalPdb --ignore=python/debiasing/python-cmethods +addopts = -ra -q + --doctest-modules + --ignore=python/debiasing/python-cmethods + -m "not server" + --pdbcls=IPython.terminal.debugger:TerminalPdb + pythonpath = . testpaths = tests diff --git a/python/conftest.py b/python/conftest.py index 02097f8c..3026cfa4 100644 --- a/python/conftest.py +++ b/python/conftest.py @@ -1,8 +1,19 @@ -import sys import pprint +import sys +from os import PathLike +from pathlib import Path +from typing import Final import pytest +PYTHON_DIR_NAME: Final[Path] = Path("python") +MODULE_NAMES: Final[tuple[PathLike, ...]] = ( + "debiasing", + "resampling", + "data_download", + "load_data", +) + @pytest.fixture() def is_platform_darwin() -> bool: @@ -11,9 +22,19 @@ def is_platform_darwin() -> bool: @pytest.fixture(autouse=True) -def doctest_auto_fixtures( - doctest_namespace: dict, is_platform_darwin: bool -) -> None: +def ensure_python_path() -> None: + """Return path for test running.""" + path: Path = Path() + if not set(Path(p) for p in MODULE_NAMES) <= set(path.iterdir()): + raise ValueError( + f"'clim-recal' python tests must be " + f"run in 'clim-recal/{PYTHON_DIR_NAME}', " + f"not '{path.absolute()}'" + ) + + +@pytest.fixture(autouse=True) +def doctest_auto_fixtures(doctest_namespace: dict, is_platform_darwin: bool) -> None: """Elements to add to default `doctest` namespace.""" doctest_namespace["is_platform_darwin"] = is_platform_darwin doctest_namespace["pprint"] = pprint diff --git a/python/tests/test_debiasing.py b/python/tests/test_debiasing.py index 601ee80d..3bb58356 100644 --- a/python/tests/test_debiasing.py +++ b/python/tests/test_debiasing.py @@ -2,30 +2,37 @@ Test generating and running `debiasing` scripts """ -import pytest -from pathlib import Path -from os import system, PathLike, chdir +import subprocess from dataclasses import dataclass -from typing import Final, Generator +from datetime import date from enum import StrEnum, auto -from datetime import date, datetime -import subprocess +from os import PathLike, chdir +from pathlib import Path +from typing import Final, Generator +import pytest from utils import ( - DATE_FORMAT_SPLIT_STR, DATE_FORMAT_STR, DateType, date_to_str, - iter_to_tuple_strs, date_range_to_str, path_iterdir + DATE_FORMAT_SPLIT_STR, + DATE_FORMAT_STR, + DateType, + date_range_to_str, + date_to_str, + iter_to_tuple_strs, + path_iterdir, ) +DATA_PATH_DEFAULT: Final[Path] = Path( + "/mnt/vmfileshare/ClimateData/Cropped/three.cities/" +) -DATA_PATH_DEFAULT: Final[Path] = Path('/mnt/vmfileshare/ClimateData/Cropped/three.cities/') - -COMMAND_DIR_DEFAULT: Final[Path] = Path('debiasing').resolve() +COMMAND_DIR_DEFAULT: Final[Path] = Path("debiasing").resolve() PREPROCESS_FILE_NAME: Final[Path] = Path("preprocess_data.py") CMETHODS_FILE_NAME: Final[Path] = Path("run_cmethods.py") class VariableOptions(StrEnum): """Supported options for variables""" + TASMAX = auto() RAINFALL = auto() TASMIN = auto() @@ -38,10 +45,11 @@ def default(cls) -> str: class RunOptions(StrEnum): """Supported options for variables""" - FIVE = '05' - SEVEN = '07' - EIGHT = '08' - SIX = '06' + + FIVE = "05" + SEVEN = "07" + EIGHT = "08" + SIX = "06" @classmethod def default(cls) -> str: @@ -51,6 +59,7 @@ def default(cls) -> str: class CityOptions(StrEnum): """Supported options for variables.""" + GLASGOW = "Glasgow" MANCHESTER = "Manchester" LONDON = "London" @@ -63,6 +72,7 @@ def default(cls) -> str: class MethodOptions(StrEnum): """Supported options for methods.""" + QUANTILE_DELTA_MAPPING = auto() QUANTILE_MAPPING = auto() VARIANCE_SCALING = auto() @@ -78,13 +88,14 @@ def default_method_2(cls) -> str: """Default method_2 option.""" return cls.VARIANCE_SCALING.value + PROCESSESORS_DEFAULT: Final[int] = 32 RUN_PREFIX_DEFAULT: Final[str] = "python" -MOD_FOLDER_DEFAULT: Final[Path] = Path('CPM') -OBS_FOLDER_DEFAULT: Final[Path] = Path('Hads.updated360') -PREPROCESS_OUT_FOLDER_DEFAULT: Final[Path] = Path('Preprocessed') -CMETHODS_OUT_FOLDER_DEFAULT: Final[Path] = Path('../../Debiased/three.cities.cropped') +MOD_FOLDER_DEFAULT: Final[Path] = Path("CPM") +OBS_FOLDER_DEFAULT: Final[Path] = Path("Hads.updated360") +PREPROCESS_OUT_FOLDER_DEFAULT: Final[Path] = Path("Preprocessed") +CMETHODS_OUT_FOLDER_DEFAULT: Final[Path] = Path("../../Debiased/three.cities.cropped") CALIB_DATE_START_DEFAULT: DateType = date(1981, 1, 1) CALIB_DATE_END_DEFAULT: DateType = date(1981, 12, 30) @@ -101,39 +112,62 @@ def default_method_2(cls) -> str: CLI_PREPROCESS_DEFAULT_COMMAND_TUPLE_CORRECT: Final[tuple[str, ...]] = ( - "python", PREPROCESS_FILE_NAME, - "--mod", DATA_PATH_DEFAULT / MOD_FOLDER_DEFAULT / CityOptions.default(), - "--obs", DATA_PATH_DEFAULT / OBS_FOLDER_DEFAULT / CityOptions.default(), - "-v", VariableOptions.default(), - "-r", RunOptions.default(), - "--out", (DATA_PATH_DEFAULT / PREPROCESS_OUT_FOLDER_DEFAULT / CityOptions.default() / - RunOptions.default() / VariableOptions.default()), - "--calib_dates", CALIB_DATES_STR_DEFAULT, - "--valid_dates", VALID_DATES_STR_DEFAULT, + "python", + PREPROCESS_FILE_NAME, + "--mod", + DATA_PATH_DEFAULT / MOD_FOLDER_DEFAULT / CityOptions.default(), + "--obs", + DATA_PATH_DEFAULT / OBS_FOLDER_DEFAULT / CityOptions.default(), + "-v", + VariableOptions.default(), + "-r", + RunOptions.default(), + "--out", + ( + DATA_PATH_DEFAULT + / PREPROCESS_OUT_FOLDER_DEFAULT + / CityOptions.default() + / RunOptions.default() + / VariableOptions.default() + ), + "--calib_dates", + CALIB_DATES_STR_DEFAULT, + "--valid_dates", + VALID_DATES_STR_DEFAULT, ) -CLI_PREPROCESS_DEFAULT_COMMAND_TUPLE_STR_CORRECT: Final[tuple[str, ...]] = ( - iter_to_tuple_strs(CLI_PREPROCESS_DEFAULT_COMMAND_TUPLE_CORRECT) -) +CLI_PREPROCESS_DEFAULT_COMMAND_TUPLE_STR_CORRECT: Final[ + tuple[str, ...] +] = iter_to_tuple_strs(CLI_PREPROCESS_DEFAULT_COMMAND_TUPLE_CORRECT) -CLI_PREPROCESS_DEFAULT_COMMAND_STR_CORRECT: Final[str] = ' '.join( +CLI_PREPROCESS_DEFAULT_COMMAND_STR_CORRECT: Final[str] = " ".join( CLI_PREPROCESS_DEFAULT_COMMAND_TUPLE_STR_CORRECT ) CLI_CMETHODS_DEFAULT_COMMAND_TUPLE_CORRECT: Final[tuple[str]] = ( - "python", CMETHODS_FILE_NAME, - "--input_data_folder", CLI_PREPROCESS_DEFAULT_COMMAND_TUPLE_CORRECT[11], - "--out", (DATA_PATH_DEFAULT / CMETHODS_OUT_FOLDER_DEFAULT / - CityOptions.default() / RunOptions.default()).resolve(), - "--method", MethodOptions.default_method_1(), - "-v", VariableOptions.default(), - "-p", PROCESSESORS_DEFAULT, + "python", + CMETHODS_FILE_NAME, + "--input_data_folder", + CLI_PREPROCESS_DEFAULT_COMMAND_TUPLE_CORRECT[11], + "--out", + ( + DATA_PATH_DEFAULT + / CMETHODS_OUT_FOLDER_DEFAULT + / CityOptions.default() + / RunOptions.default() + ).resolve(), + "--method", + MethodOptions.default_method_1(), + "-v", + VariableOptions.default(), + "-p", + PROCESSESORS_DEFAULT, ) -CLI_CMEHTODS_DEFAULT_COMMAND_TUPLE_STR_CORRECT: Final[tuple[str, ...]] = ( - iter_to_tuple_strs(CLI_CMETHODS_DEFAULT_COMMAND_TUPLE_CORRECT) -) -CLI_CMETHODS_DEFAULT_COMMAND_STR_CORRECT: Final[str] = ' '.join( +CLI_CMEHTODS_DEFAULT_COMMAND_TUPLE_STR_CORRECT: Final[ + tuple[str, ...] +] = iter_to_tuple_strs(CLI_CMETHODS_DEFAULT_COMMAND_TUPLE_CORRECT) +CLI_CMETHODS_DEFAULT_COMMAND_STR_CORRECT: Final[str] = " ".join( CLI_CMEHTODS_DEFAULT_COMMAND_TUPLE_STR_CORRECT ) @@ -168,19 +202,21 @@ class RunConfig: calib_date_end: DateType = CALIB_DATE_END_DEFAULT valid_date_start: DateType = VALID_DATE_START_DEFAULT - valid_date_end: DateType = VALID_DATE_END_DEFAULT + valid_date_end: DateType = VALID_DATE_END_DEFAULT processors: int = PROCESSESORS_DEFAULT date_format_str: str = DATE_FORMAT_STR date_split_str: str = DATE_FORMAT_SPLIT_STR - def calib_dates_to_str(self, - start_date: DateType, - end_date: DateType, - in_format_str: str | None = None, - out_format_str: str | None = None, - split_str: str | None = None) -> str: + def calib_dates_to_str( + self, + start_date: DateType, + end_date: DateType, + in_format_str: str | None = None, + out_format_str: str | None = None, + split_str: str | None = None, + ) -> str: """Return date range as `str` from `calib_date_start` to `calib_date_end`. Example @@ -195,14 +231,18 @@ def calib_dates_to_str(self, """ start_date = start_date if start_date else self.calib_date_start end_date = end_date if end_date else self.calib_date_end - return self._date_range_to_str(start_date, end_date, in_format_str, out_format_str, split_str) - - def valid_dates_to_str(self, - start_date: DateType, - end_date: DateType, - in_format_str: str | None = None, - out_format_str: str | None = None, - split_str: str | None = None) -> str: + return self._date_range_to_str( + start_date, end_date, in_format_str, out_format_str, split_str + ) + + def valid_dates_to_str( + self, + start_date: DateType, + end_date: DateType, + in_format_str: str | None = None, + out_format_str: str | None = None, + split_str: str | None = None, + ) -> str: """Return date range as `str` from `valid_date_start` to `valid_date_end`. Example @@ -217,14 +257,18 @@ def valid_dates_to_str(self, """ start_date = start_date if start_date else self.valid_date_start end_date = end_date if end_date else self.valid_date_end - return self._date_range_to_str(start_date, end_date, in_format_str, out_format_str, split_str) - - def _date_range_to_str(self, - start_date: DateType, - end_date: DateType, - in_format_str: str | None = None, - out_format_str: str | None = None, - split_str: str | None = None) -> str: + return self._date_range_to_str( + start_date, end_date, in_format_str, out_format_str, split_str + ) + + def _date_range_to_str( + self, + start_date: DateType, + end_date: DateType, + in_format_str: str | None = None, + out_format_str: str | None = None, + split_str: str | None = None, + ) -> str: """Return date range as `str` from `calib_date_start` to `calib_date_end`. Example @@ -241,11 +285,12 @@ def _date_range_to_str(self, out_format_str = out_format_str if out_format_str else self.date_format_str split_str = split_str if split_str else self.date_split_str return date_range_to_str( - start_date=start_date, - end_date=end_date, - in_format_str=in_format_str, - out_format_str=out_format_str, - split_str=split_str) + start_date=start_date, + end_date=end_date, + in_format_str=in_format_str, + out_format_str=out_format_str, + split_str=split_str, + ) def mod_path(self, city: str | None = None) -> Path: """Return city estimates path. @@ -253,7 +298,7 @@ def mod_path(self, city: str | None = None) -> Path: Example ------- >>> if is_platform_darwin: - ... pytest.skip('paths fail if not linux') + ... pytest.skip('requires linux server mount paths') >>> config: RunConfig = RunConfig() >>> config.mod_path() PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/CPM/Manchester') @@ -262,14 +307,14 @@ def mod_path(self, city: str | None = None) -> Path: """ city = city if city else self.city return self.data_path / self.mod_folder / city - + def obs_path(self, city: str | None = None) -> Path: """Return city observations path. Example ------- >>> if is_platform_darwin: - ... pytest.skip('paths fail if not linux') + ... pytest.skip('requires linux server mount paths') >>> config: RunConfig = RunConfig() >>> config.obs_path() PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/Hads.updated360/Manchester') @@ -278,19 +323,19 @@ def obs_path(self, city: str | None = None) -> Path: """ city = city if city else self.city return self.data_path / self.obs_folder / city - + def preprocess_out_path( - self, - city: str | None = None, - run: str | None = None, - variable: str | None = None - ) -> Path: + self, + city: str | None = None, + run: str | None = None, + variable: str | None = None, + ) -> Path: """Return path to save results. Example ------- >>> if is_platform_darwin: - ... pytest.skip('paths fail if not linux') + ... pytest.skip('requires linux server mount paths') >>> config: RunConfig = RunConfig() >>> config.preprocess_out_path() PosixPath('/mnt/vmfileshare/ClimateData/Cropped/three.cities/Preprocessed/Manchester/05/tasmax') @@ -300,13 +345,15 @@ def preprocess_out_path( city = city if city else self.city run = run if run else self.run variable = variable if variable else self.variable - return (self.data_path / self.preprocess_out_folder / city / run / variable).resolve() - + return ( + self.data_path / self.preprocess_out_folder / city / run / variable + ).resolve() + def cmethods_out_path( - self, - city: str | None = None, - run: str | None = None, - ) -> Path: + self, + city: str | None = None, + run: str | None = None, + ) -> Path: """Return path to save cmethods results. Example @@ -331,17 +378,18 @@ def run_prefix_tuple(self) -> tuple[str, ...]: >>> config.run_prefix_tuple ('python', '-m') """ - return tuple(self.run_prefix.split(' ')) - - def to_cli_preprocess_tuple(self, - variable: str | None = None, - run: str | None = None, - city: str | None = None, - calib_start: DateType | None = None, - calib_end: DateType | None = None, - valid_start: DateType | None = None, - valid_end: DateType | None = None, - ) -> tuple[str | PathLike, ...]: + return tuple(self.run_prefix.split(" ")) + + def to_cli_preprocess_tuple( + self, + variable: str | None = None, + run: str | None = None, + city: str | None = None, + calib_start: DateType | None = None, + calib_end: DateType | None = None, + valid_start: DateType | None = None, + valid_end: DateType | None = None, + ) -> tuple[str | PathLike, ...]: """Generate a `tuple` of `str` for a command line command. Note @@ -361,31 +409,45 @@ def to_cli_preprocess_tuple(self, mod_path: Path = self.mod_path(city=city) obs_path: Path = self.obs_path(city=city) - preprocess_out_path: Path = self.preprocess_out_path(city=city, run=run, variable=variable) - calib_dates_str: str = self.calib_dates_to_str(start_date=calib_start, end_date=calib_end) - valid_dates_str: str = self.valid_dates_to_str(start_date=valid_start, end_date=valid_end) + preprocess_out_path: Path = self.preprocess_out_path( + city=city, run=run, variable=variable + ) + calib_dates_str: str = self.calib_dates_to_str( + start_date=calib_start, end_date=calib_end + ) + valid_dates_str: str = self.valid_dates_to_str( + start_date=valid_start, end_date=valid_end + ) return ( - *self.run_prefix_tuple, + *self.run_prefix_tuple, self.preprocess_data_file, - '--mod', mod_path, - '--obs', obs_path, - '-v', variable, - '-r', run, - '--out', preprocess_out_path, - '--calib_dates', calib_dates_str, - '--valid_dates', valid_dates_str, + "--mod", + mod_path, + "--obs", + obs_path, + "-v", + variable, + "-r", + run, + "--out", + preprocess_out_path, + "--calib_dates", + calib_dates_str, + "--valid_dates", + valid_dates_str, ) - def to_cli_preprocess_tuple_strs(self, - variable: str | None = None, - run: str | None = None, - city: str | None = None, - calib_start: DateType | None = None, - calib_end: DateType | None = None, - valid_start: DateType | None = None, - valid_end: DateType | None = None, - ) -> tuple[str, ...]: + def to_cli_preprocess_tuple_strs( + self, + variable: str | None = None, + run: str | None = None, + city: str | None = None, + calib_start: DateType | None = None, + calib_end: DateType | None = None, + valid_start: DateType | None = None, + valid_end: DateType | None = None, + ) -> tuple[str, ...]: """Generate a command line interface `str` `tuple` a test example. Example @@ -394,26 +456,28 @@ def to_cli_preprocess_tuple_strs(self, >>> command_str_tuple: tuple[str, ...] = config.to_cli_preprocess_tuple_strs() >>> assert command_str_tuple == CLI_PREPROCESS_DEFAULT_COMMAND_TUPLE_STR_CORRECT """ - return iter_to_tuple_strs(self.to_cli_preprocess_tuple( - variable=variable, - run=run, - city=city, - calib_start=calib_start, - calib_end=calib_end, - valid_start=valid_start, - valid_end=valid_end, - )) - - - def to_cli_preprocess_str(self, - variable: str | None = None, - run: str | None = None, - city: str | None = None, - calib_start: DateType | None = None, - calib_end: DateType | None = None, - valid_start: DateType | None = None, - valid_end: DateType | None = None, - ) -> str: + return iter_to_tuple_strs( + self.to_cli_preprocess_tuple( + variable=variable, + run=run, + city=city, + calib_start=calib_start, + calib_end=calib_end, + valid_start=valid_start, + valid_end=valid_end, + ) + ) + + def to_cli_preprocess_str( + self, + variable: str | None = None, + run: str | None = None, + city: str | None = None, + calib_start: DateType | None = None, + calib_end: DateType | None = None, + valid_start: DateType | None = None, + valid_end: DateType | None = None, + ) -> str: """Generate a command line interface str as a test example. Example @@ -424,21 +488,25 @@ def to_cli_preprocess_str(self, >>> CLI_PREPROCESS_DEFAULT_COMMAND_STR_CORRECT[:96] #doctest: +ELLIPSIS 'python preprocess_data.py --mod /.../CPM/Manchester' """ - return ' '.join(self.to_cli_preprocess_tuple_strs( - variable=variable, - run=run, - city=city, - calib_start=calib_start, - calib_end=calib_end, - valid_start=valid_start, - valid_end=valid_end, - )) + return " ".join( + self.to_cli_preprocess_tuple_strs( + variable=variable, + run=run, + city=city, + calib_start=calib_start, + calib_end=calib_end, + valid_start=valid_start, + valid_end=valid_end, + ) + ) def list_mod_folder(self, city: str | None = None) -> Generator[Path, None, None]: """`Iterable` of all `Path`s in `self.mod_folder`. Example ------- + >>> if is_platform_darwin: + ... pytest.skip('requires linux server mount paths') >>> config: RunConfig = RunConfig() >>> len(tuple(config.list_mod_folder())) == MOD_FOLDER_FILES_COUNT_CORRECT True @@ -450,42 +518,50 @@ def list_obs_folder(self, city: str | None = None) -> Generator[Path, None, None Example ------- + >>> if is_platform_darwin: + ... pytest.skip('requires linux server mount paths') >>> config: RunConfig = RunConfig() >>> len(tuple(config.list_obs_folder())) == OBS_FOLDER_FILES_COUNT_CORRECT True """ return path_iterdir(self.obs_path(city=city)) - def list_preprocess_out_folder(self, - city: str | None = None, - run: str | None = None, - variable: str | None = None - ) -> Generator[Path, None, None]: + def list_preprocess_out_folder( + self, + city: str | None = None, + run: str | None = None, + variable: str | None = None, + ) -> Generator[Path, None, None]: """`Iterable` of all `Path`s in `self.preprocess_out_folder`. Example ------- + >>> if is_platform_darwin: + ... pytest.skip('requires linux server mount paths') >>> config: RunConfig = RunConfig() >>> (len(tuple(config.list_preprocess_out_folder())) == ... PREPROCESS_OUT_FOLDER_FILES_COUNT_CORRECT) True """ - return path_iterdir(self.preprocess_out_path(city=city, run=run, variable=variable)) + return path_iterdir( + self.preprocess_out_path(city=city, run=run, variable=variable) + ) @property def command_path(self) -> Path: """Return command path relative to running tests.""" return (Path() / self.command_dir).absolute() - def to_cli_run_cmethods_1_tuple(self, - city: str | None = None, - run: str | None = None, - variable: str | None = None, - method_1: str | None = None, - input_data_path: PathLike | None = None, - cmethods_out_path: PathLike | None = None, - processors: int | None = None, - ) -> tuple[str | PathLike, ...]: + def to_cli_run_cmethods_1_tuple( + self, + city: str | None = None, + run: str | None = None, + variable: str | None = None, + method_1: str | None = None, + input_data_path: PathLike | None = None, + cmethods_out_path: PathLike | None = None, + processors: int | None = None, + ) -> tuple[str | PathLike, ...]: """Generate a `tuple` of `str` for a command line command. Note @@ -505,36 +581,44 @@ def to_cli_run_cmethods_1_tuple(self, method_1 = method_1 if method_1 else self.method_1 input_data_path: PathLike = ( - input_data_path if input_data_path + input_data_path + if input_data_path else self.preprocess_out_path(city=city, run=run, variable=variable) ) - + cmethods_out_path = ( - cmethods_out_path if cmethods_out_path else - self.cmethods_out_path(city=city, run=run) + cmethods_out_path + if cmethods_out_path + else self.cmethods_out_path(city=city, run=run) ) processors = processors if processors else self.processors return ( - *self.run_prefix_tuple, + *self.run_prefix_tuple, self.run_cmethods_file, - '--input_data_folder', input_data_path, - '--out', cmethods_out_path, - '--method', method_1, - '-v', variable, - '-p', processors, + "--input_data_folder", + input_data_path, + "--out", + cmethods_out_path, + "--method", + method_1, + "-v", + variable, + "-p", + processors, ) - def to_cli_run_cmethods_1_tuple_strs(self, - city: str | None = None, - run: str | None = None, - variable: str | None = None, - method_1: str | None = None, - input_data_path: PathLike | None = None, - cmethods_out_path: PathLike | None = None, - processors: int | None = None, - ) -> tuple[str, ...]: + def to_cli_run_cmethods_1_tuple_strs( + self, + city: str | None = None, + run: str | None = None, + variable: str | None = None, + method_1: str | None = None, + input_data_path: PathLike | None = None, + cmethods_out_path: PathLike | None = None, + processors: int | None = None, + ) -> tuple[str, ...]: """Generate a command line interface `str` `tuple` a test example. Example @@ -543,26 +627,28 @@ def to_cli_run_cmethods_1_tuple_strs(self, >>> command_str_tuple: tuple[str, ...] = config.to_cli_run_cmethods_1_tuple_strs() >>> assert command_str_tuple == CLI_CMEHTODS_DEFAULT_COMMAND_TUPLE_STR_CORRECT """ - return iter_to_tuple_strs(self.to_cli_run_cmethods_1_tuple( - city=city, - run=run, - variable=variable, - method_1=method_1, - input_data_path=input_data_path, - cmethods_out_path=cmethods_out_path, - processors=processors, - )) - - - def to_cli_run_cmethods_1_str(self, - city: str | None = None, - run: str | None = None, - variable: str | None = None, - method_1: str | None = None, - input_data_path: PathLike | None = None, - cmethods_out_path: PathLike | None = None, - processors: int | None = None, - ) -> str: + return iter_to_tuple_strs( + self.to_cli_run_cmethods_1_tuple( + city=city, + run=run, + variable=variable, + method_1=method_1, + input_data_path=input_data_path, + cmethods_out_path=cmethods_out_path, + processors=processors, + ) + ) + + def to_cli_run_cmethods_1_str( + self, + city: str | None = None, + run: str | None = None, + variable: str | None = None, + method_1: str | None = None, + input_data_path: PathLike | None = None, + cmethods_out_path: PathLike | None = None, + processors: int | None = None, + ) -> str: """Generate a command line interface str as a test example. Example @@ -573,15 +659,17 @@ def to_cli_run_cmethods_1_str(self, >>> CLI_CMETHODS_DEFAULT_COMMAND_STR_CORRECT #doctest: +ELLIPSIS 'python run_cmethods.py...--method quantile_delta_mapping...' """ - return ' '.join(self.to_cli_run_cmethods_1_tuple_strs( - city=city, - run=run, - variable=variable, - method_1=method_1, - input_data_path=input_data_path, - cmethods_out_path=cmethods_out_path, - processors=processors, - )) + return " ".join( + self.to_cli_run_cmethods_1_tuple_strs( + city=city, + run=run, + variable=variable, + method_1=method_1, + input_data_path=input_data_path, + cmethods_out_path=cmethods_out_path, + processors=processors, + ) + ) @pytest.fixture @@ -593,48 +681,60 @@ def run_config(tmp_path: Path) -> RunConfig: def test_command_line_default() -> None: """Test default generated cli `str`.""" run_config: RunConfig = RunConfig() - assert run_config.to_cli_preprocess_str() == CLI_PREPROCESS_DEFAULT_COMMAND_STR_CORRECT + assert ( + run_config.to_cli_preprocess_str() == CLI_PREPROCESS_DEFAULT_COMMAND_STR_CORRECT + ) @pytest.mark.server @pytest.mark.slow @pytest.mark.parametrize( - 'city', (None, 'Glasgow',) + "city", + ( + None, + "Glasgow", + ), ) def test_run(run_config, city) -> None: """Test running generated command script via a subprocess.""" chdir(run_config.command_path) assert PREPROCESS_FILE_NAME in tuple(Path().iterdir()) - preprocess_run: subprocess.CompletedProcess = ( - subprocess.run( - run_config.to_cli_preprocess_tuple_strs(city=city), - capture_output=True, text=True - ) + preprocess_run: subprocess.CompletedProcess = subprocess.run( + run_config.to_cli_preprocess_tuple_strs(city=city), + capture_output=True, + text=True, ) assert preprocess_run.returncode == 0 - assert len(tuple(run_config.list_mod_folder(city=city))) == MOD_FOLDER_FILES_COUNT_CORRECT - assert len(tuple(run_config.list_obs_folder(city=city))) == OBS_FOLDER_FILES_COUNT_CORRECT - assert (len(tuple(run_config.list_preprocess_out_folder(city=city))) == - PREPROCESS_OUT_FOLDER_FILES_COUNT_CORRECT) + assert ( + len(tuple(run_config.list_mod_folder(city=city))) + == MOD_FOLDER_FILES_COUNT_CORRECT + ) + assert ( + len(tuple(run_config.list_obs_folder(city=city))) + == OBS_FOLDER_FILES_COUNT_CORRECT + ) + assert ( + len(tuple(run_config.list_preprocess_out_folder(city=city))) + == PREPROCESS_OUT_FOLDER_FILES_COUNT_CORRECT + ) test_city = CityOptions.default() if city is None else city for log_txt in ( - "Saved observed (HADs) data for validation, period ('2010-01-01', '2010-12-30')", - f"{test_city}/05/tasmax/modv_var-tasmax_run-05_20100101_20101230.nc"): + "Saved observed (HADs) data for validation, period ('2010-01-01', '2010-12-30')", + f"{test_city}/05/tasmax/modv_var-tasmax_run-05_20100101_20101230.nc", + ): assert log_txt in preprocess_run.stdout - cmethods_run: subprocess.CompletedProcess = ( - subprocess.run( - run_config.to_cli_run_cmethods_1_tuple_strs(city=city), - capture_output=True, text=True - ) + cmethods_run: subprocess.CompletedProcess = subprocess.run( + run_config.to_cli_run_cmethods_1_tuple_strs(city=city), + capture_output=True, + text=True, ) assert cmethods_run.returncode == 0 for log_txt in ( - "Loading modelled calibration data (CPM)", - + "Loading modelled calibration data (CPM)", ( f"Debiased/three.cities.cropped/{test_city}/05/tasmax/" "debiased_quantile_delta_mapping_result_var" "-tasmax_quantiles-1000_kind-+_group-None_20100101_20101229.nc" ), - ): + ): assert log_txt in cmethods_run.stdout From 88bfb874bfc0103fdd3deccfacf9f02efbabdcc4 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Wed, 25 Oct 2023 14:38:49 +0100 Subject: [PATCH 105/146] feat(test): set `PROCESSESORS_DEFAULT` to 2 in `test_debiasing.py` --- python/tests/test_debiasing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/tests/test_debiasing.py b/python/tests/test_debiasing.py index 3bb58356..e8e57f03 100644 --- a/python/tests/test_debiasing.py +++ b/python/tests/test_debiasing.py @@ -89,7 +89,7 @@ def default_method_2(cls) -> str: return cls.VARIANCE_SCALING.value -PROCESSESORS_DEFAULT: Final[int] = 32 +PROCESSESORS_DEFAULT: Final[int] = 2 RUN_PREFIX_DEFAULT: Final[str] = "python" MOD_FOLDER_DEFAULT: Final[Path] = Path("CPM") From a1d8dac9905d0f0104660e2edcdd160f619f9279 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Wed, 25 Oct 2023 14:59:45 +0100 Subject: [PATCH 106/146] feat(test): add `variable`, `run`, `city`, `method_1` and `method_2` config for `test_debiasing` --- python/tests/test_debiasing.py | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/python/tests/test_debiasing.py b/python/tests/test_debiasing.py index e8e57f03..fa1d4c62 100644 --- a/python/tests/test_debiasing.py +++ b/python/tests/test_debiasing.py @@ -76,7 +76,7 @@ class MethodOptions(StrEnum): QUANTILE_DELTA_MAPPING = auto() QUANTILE_MAPPING = auto() VARIANCE_SCALING = auto() - DELTA_METHODS = auto() + DELTA_METHOD = auto() @classmethod def default_method_1(cls) -> str: @@ -689,18 +689,28 @@ def test_command_line_default() -> None: @pytest.mark.server @pytest.mark.slow @pytest.mark.parametrize( - "city", + "city, variable, run, method_1, method_2", ( - None, - "Glasgow", + (None, None, None, None, None), + ("Glasgow", None, None, None, None), + ( + "London", + VariableOptions.RAINFALL, + RunOptions.SIX, + None, + MethodOptions.DELTA_METHOD, + ), ), ) -def test_run(run_config, city) -> None: +def test_run(run_config, city, variable, run, method_1, method_2) -> None: """Test running generated command script via a subprocess.""" chdir(run_config.command_path) assert PREPROCESS_FILE_NAME in tuple(Path().iterdir()) + if method_1 or method_2: + run_config.method_1 = method_1 + run_config.method_2 = method_2 preprocess_run: subprocess.CompletedProcess = subprocess.run( - run_config.to_cli_preprocess_tuple_strs(city=city), + run_config.to_cli_preprocess_tuple_strs(city=city, variable=variable, run=run), capture_output=True, text=True, ) @@ -720,7 +730,7 @@ def test_run(run_config, city) -> None: test_city = CityOptions.default() if city is None else city for log_txt in ( "Saved observed (HADs) data for validation, period ('2010-01-01', '2010-12-30')", - f"{test_city}/05/tasmax/modv_var-tasmax_run-05_20100101_20101230.nc", + f"{test_city}/{run}/{variable}/modv_var-{variable}_run-{run}_20100101_20101230.nc", ): assert log_txt in preprocess_run.stdout cmethods_run: subprocess.CompletedProcess = subprocess.run( @@ -732,9 +742,9 @@ def test_run(run_config, city) -> None: for log_txt in ( "Loading modelled calibration data (CPM)", ( - f"Debiased/three.cities.cropped/{test_city}/05/tasmax/" + f"Debiased/three.cities.cropped/{test_city}/{run}/{variable}/" "debiased_quantile_delta_mapping_result_var" - "-tasmax_quantiles-1000_kind-+_group-None_20100101_20101229.nc" + f"-{variable}_quantiles-1000_kind-+_group-None_20100101_20101229.nc" ), ): assert log_txt in cmethods_run.stdout From 52f7ac8c865d9b6d803e628dedd7ab3161740560 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Wed, 25 Oct 2023 18:18:30 +0000 Subject: [PATCH 107/146] feat(test): refactor test_debiasing.py to cover more model parameters --- python/tests/test_debiasing.py | 129 ++++++++++++++++++++------------- 1 file changed, 77 insertions(+), 52 deletions(-) diff --git a/python/tests/test_debiasing.py b/python/tests/test_debiasing.py index fa1d4c62..48883b0d 100644 --- a/python/tests/test_debiasing.py +++ b/python/tests/test_debiasing.py @@ -79,15 +79,10 @@ class MethodOptions(StrEnum): DELTA_METHOD = auto() @classmethod - def default_method_1(cls) -> str: - """Default method_1 option.""" + def default(cls) -> str: + """Default method option.""" return cls.QUANTILE_DELTA_MAPPING.value - @classmethod - def default_method_2(cls) -> str: - """Default method_2 option.""" - return cls.VARIANCE_SCALING.value - PROCESSESORS_DEFAULT: Final[int] = 2 RUN_PREFIX_DEFAULT: Final[str] = "python" @@ -157,7 +152,7 @@ def default_method_2(cls) -> str: / RunOptions.default() ).resolve(), "--method", - MethodOptions.default_method_1(), + MethodOptions.default(), "-v", VariableOptions.default(), "-p", @@ -186,8 +181,7 @@ class RunConfig: variable: str = VariableOptions.default() run: str = RunOptions.default() city: str = CityOptions.default() - method_1: str = MethodOptions.default_method_1() - method_2: str = MethodOptions.default_method_2() + method: str = MethodOptions.default() run_prefix: str = RUN_PREFIX_DEFAULT preprocess_data_file: PathLike = PREPROCESS_FILE_NAME run_cmethods_file: PathLike = CMETHODS_FILE_NAME @@ -500,7 +494,7 @@ def to_cli_preprocess_str( ) ) - def list_mod_folder(self, city: str | None = None) -> Generator[Path, None, None]: + def yield_mod_folder(self, city: str | None = None) -> Generator[Path, None, None]: """`Iterable` of all `Path`s in `self.mod_folder`. Example @@ -508,12 +502,12 @@ def list_mod_folder(self, city: str | None = None) -> Generator[Path, None, None >>> if is_platform_darwin: ... pytest.skip('requires linux server mount paths') >>> config: RunConfig = RunConfig() - >>> len(tuple(config.list_mod_folder())) == MOD_FOLDER_FILES_COUNT_CORRECT + >>> len(tuple(config.yield_mod_folder())) == MOD_FOLDER_FILES_COUNT_CORRECT True """ return path_iterdir(self.obs_path(city=city)) - def list_obs_folder(self, city: str | None = None) -> Generator[Path, None, None]: + def yield_obs_folder(self, city: str | None = None) -> Generator[Path, None, None]: """`Iterable` of all `Path`s in `self.obs_folder`. Example @@ -521,12 +515,12 @@ def list_obs_folder(self, city: str | None = None) -> Generator[Path, None, None >>> if is_platform_darwin: ... pytest.skip('requires linux server mount paths') >>> config: RunConfig = RunConfig() - >>> len(tuple(config.list_obs_folder())) == OBS_FOLDER_FILES_COUNT_CORRECT + >>> len(tuple(config.yield_obs_folder())) == OBS_FOLDER_FILES_COUNT_CORRECT True """ return path_iterdir(self.obs_path(city=city)) - def list_preprocess_out_folder( + def yield_preprocess_out_folder( self, city: str | None = None, run: str | None = None, @@ -539,7 +533,7 @@ def list_preprocess_out_folder( >>> if is_platform_darwin: ... pytest.skip('requires linux server mount paths') >>> config: RunConfig = RunConfig() - >>> (len(tuple(config.list_preprocess_out_folder())) == + >>> (len(tuple(config.yield_preprocess_out_folder())) == ... PREPROCESS_OUT_FOLDER_FILES_COUNT_CORRECT) True """ @@ -552,12 +546,12 @@ def command_path(self) -> Path: """Return command path relative to running tests.""" return (Path() / self.command_dir).absolute() - def to_cli_run_cmethods_1_tuple( + def to_cli_run_cmethods_tuple( self, city: str | None = None, run: str | None = None, variable: str | None = None, - method_1: str | None = None, + method: str | None = None, input_data_path: PathLike | None = None, cmethods_out_path: PathLike | None = None, processors: int | None = None, @@ -572,13 +566,13 @@ def to_cli_run_cmethods_1_tuple( Example ------- >>> config: RunConfig = RunConfig() - >>> command_str_tuple: tuple[str, ...] = config.to_cli_run_cmethods_1_tuple() + >>> command_str_tuple: tuple[str, ...] = config.to_cli_run_cmethods_tuple() >>> assert command_str_tuple == CLI_CMETHODS_DEFAULT_COMMAND_TUPLE_CORRECT """ city = city if city else self.city variable = variable if variable else self.variable run = run if run else self.run - method_1 = method_1 if method_1 else self.method_1 + method = method if method else self.method input_data_path: PathLike = ( input_data_path @@ -602,19 +596,19 @@ def to_cli_run_cmethods_1_tuple( "--out", cmethods_out_path, "--method", - method_1, + method, "-v", variable, "-p", processors, ) - def to_cli_run_cmethods_1_tuple_strs( + def to_cli_run_cmethods_tuple_strs( self, city: str | None = None, run: str | None = None, variable: str | None = None, - method_1: str | None = None, + method: str | None = None, input_data_path: PathLike | None = None, cmethods_out_path: PathLike | None = None, processors: int | None = None, @@ -624,27 +618,27 @@ def to_cli_run_cmethods_1_tuple_strs( Example ------- >>> config: RunConfig = RunConfig() - >>> command_str_tuple: tuple[str, ...] = config.to_cli_run_cmethods_1_tuple_strs() + >>> command_str_tuple: tuple[str, ...] = config.to_cli_run_cmethods_tuple_strs() >>> assert command_str_tuple == CLI_CMEHTODS_DEFAULT_COMMAND_TUPLE_STR_CORRECT """ return iter_to_tuple_strs( - self.to_cli_run_cmethods_1_tuple( + self.to_cli_run_cmethods_tuple( city=city, run=run, variable=variable, - method_1=method_1, + method=method, input_data_path=input_data_path, cmethods_out_path=cmethods_out_path, processors=processors, ) ) - def to_cli_run_cmethods_1_str( + def to_cli_run_cmethods_str( self, city: str | None = None, run: str | None = None, variable: str | None = None, - method_1: str | None = None, + method: str | None = None, input_data_path: PathLike | None = None, cmethods_out_path: PathLike | None = None, processors: int | None = None, @@ -654,17 +648,17 @@ def to_cli_run_cmethods_1_str( Example ------- >>> config: RunConfig = RunConfig() - >>> config.to_cli_run_cmethods_1_str() == CLI_CMETHODS_DEFAULT_COMMAND_STR_CORRECT + >>> config.to_cli_run_cmethods_str() == CLI_CMETHODS_DEFAULT_COMMAND_STR_CORRECT True >>> CLI_CMETHODS_DEFAULT_COMMAND_STR_CORRECT #doctest: +ELLIPSIS 'python run_cmethods.py...--method quantile_delta_mapping...' """ return " ".join( - self.to_cli_run_cmethods_1_tuple_strs( + self.to_cli_run_cmethods_tuple_strs( city=city, run=run, variable=variable, - method_1=method_1, + method=method, input_data_path=input_data_path, cmethods_out_path=cmethods_out_path, processors=processors, @@ -689,26 +683,42 @@ def test_command_line_default() -> None: @pytest.mark.server @pytest.mark.slow @pytest.mark.parametrize( - "city, variable, run, method_1, method_2", + "city, variable, run, method", ( - (None, None, None, None, None), - ("Glasgow", None, None, None, None), ( - "London", + CityOptions.default(), # 'Manchester' + VariableOptions.default(), # 'tasmax` + RunOptions.default(), # '05' + MethodOptions.default(), # 'quantile_delta_mapping' + ), + ( + CityOptions.GLASGOW, + VariableOptions.default(), + RunOptions.default(), + MethodOptions.default(), + ), + pytest.param( + CityOptions.LONDON, + VariableOptions.default(), + RunOptions.default(), + MethodOptions.default(), + marks=pytest.mark.slow, + ), + pytest.param( + CityOptions.LONDON, VariableOptions.RAINFALL, RunOptions.SIX, - None, MethodOptions.DELTA_METHOD, + marks=pytest.mark.slow, ), ), ) -def test_run(run_config, city, variable, run, method_1, method_2) -> None: +def test_run(run_config, city, variable, run, method) -> None: """Test running generated command script via a subprocess.""" + initial_folder: path = Path().resolve() chdir(run_config.command_path) assert PREPROCESS_FILE_NAME in tuple(Path().iterdir()) - if method_1 or method_2: - run_config.method_1 = method_1 - run_config.method_2 = method_2 + # run_config.method = method preprocess_run: subprocess.CompletedProcess = subprocess.run( run_config.to_cli_preprocess_tuple_strs(city=city, variable=variable, run=run), capture_output=True, @@ -716,35 +726,50 @@ def test_run(run_config, city, variable, run, method_1, method_2) -> None: ) assert preprocess_run.returncode == 0 assert ( - len(tuple(run_config.list_mod_folder(city=city))) + len(tuple(run_config.yield_mod_folder(city=city))) == MOD_FOLDER_FILES_COUNT_CORRECT ) assert ( - len(tuple(run_config.list_obs_folder(city=city))) + len(tuple(run_config.yield_obs_folder(city=city))) == OBS_FOLDER_FILES_COUNT_CORRECT ) - assert ( - len(tuple(run_config.list_preprocess_out_folder(city=city))) - == PREPROCESS_OUT_FOLDER_FILES_COUNT_CORRECT - ) - test_city = CityOptions.default() if city is None else city + + if method == MethodOptions.default(): + assert ( + len(tuple(run_config.yield_preprocess_out_folder(city=city))) + == PREPROCESS_OUT_FOLDER_FILES_COUNT_CORRECT + ) for log_txt in ( "Saved observed (HADs) data for validation, period ('2010-01-01', '2010-12-30')", - f"{test_city}/{run}/{variable}/modv_var-{variable}_run-{run}_20100101_20101230.nc", + f"{city}/{run}/{variable}/modv_var-{variable}_run-{run}_20100101_20101230.nc", ): assert log_txt in preprocess_run.stdout cmethods_run: subprocess.CompletedProcess = subprocess.run( - run_config.to_cli_run_cmethods_1_tuple_strs(city=city), + run_config.to_cli_run_cmethods_tuple_strs( + city=city, run=run, variable=variable, method=method + ), capture_output=True, text=True, ) assert cmethods_run.returncode == 0 for log_txt in ( "Loading modelled calibration data (CPM)", + # ( + # f"Debiased/three.cities.cropped/{city}/{run}/{variable}/" + # f"debiased_{method}_result_var" + # f"-{variable}_quantiles-1000_kind-+_group-None_20100101_20101229.nc" + # ), ( - f"Debiased/three.cities.cropped/{test_city}/{run}/{variable}/" - "debiased_quantile_delta_mapping_result_var" - f"-{variable}_quantiles-1000_kind-+_group-None_20100101_20101229.nc" + f"Debiased/three.cities.cropped/{city}/{run}/{variable}/" + f"debiased_{method}_result_var" ), + "Saving to", + # ( + # f"Saving to {DATA_PATH_DEFAULT}/{city}/{run}/{variable}/" + # f"debiased_{method}_result_var-{variable}_kind-+None_20100101_20101229.nc" + # ), + (f"{city}/{run}/{variable}/debiased_{method}_result_var-"), ): assert log_txt in cmethods_run.stdout + + chdir(initial_folder) From 0ef355fb729bbe89a89af0c2d7abf1c2afdd8d90 Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Thu, 26 Oct 2023 13:55:07 +0100 Subject: [PATCH 108/146] ENH readme - improve welcome text Co-authored-by: Greg Mingas --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 9aa258a1..0e046ab4 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,8 @@ # Welcome to the `clim-recal` repository! -Welcome to clim-recal, a specialized resource designed to tackle systematic errors or biases in Regional Climate Models (RCMs). As researchers, policy-makers, and various stakeholders explore publicly available RCMs, they need to consider the challenge of biases that can affect the accurate representation of climate change signals. Clim-recal provides both a **broad review** of available bias correction methods as well as **practical tutorials** and **guidance** on how to easily apply those methods to various datasets. +Welcome to clim-recal, a specialized resource designed to tackle systematic errors or biases in Regional Climate Models (RCMs). As researchers, policy-makers, and various stakeholders explore publicly available RCMs, they need to consider the challenge of biases that can affect the accurate representation of climate change signals. + +Clim-recal provides both a **broad review** of available bias correction methods as well as **software**, **practical tutorials** and **guidance** that helps users apply these methods methods to various datasets. Clim-recal is an **Extensive guide to application of BC methods**: From 9805571fa52e82fe0483f8971bb7abf435922a23 Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Thu, 26 Oct 2023 13:55:31 +0100 Subject: [PATCH 109/146] ENH readme - improve description Co-authored-by: Greg Mingas --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 0e046ab4..643a86d7 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ Welcome to clim-recal, a specialized resource designed to tackle systematic erro Clim-recal provides both a **broad review** of available bias correction methods as well as **software**, **practical tutorials** and **guidance** that helps users apply these methods methods to various datasets. -Clim-recal is an **Extensive guide to application of BC methods**: +Clim-recal is an **extensive software library and guide to application of BC methods.** Clim-recal: - Accessible information about the [why and how of bias correction for climate data](#why-bias-correction) - Technical resource for application BC methods (see our full pipeline for bias-correction of the ground-breaking local-scale (2.2km)[Convection Permitting Model (CPM)](https://www.metoffice.gov.uk/pub/data/weather/uk/ukcp18/science-reports/UKCP-Convection-permitting-model-projections-report.pdf)) From 046bcf236a7496d7675c1dd2728d24af708d8179 Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Thu, 26 Oct 2023 13:58:59 +0100 Subject: [PATCH 110/146] add line for installation of gdal --- docs/pipeline_guidance.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/pipeline_guidance.md b/docs/pipeline_guidance.md index 12fc8847..25e85c9d 100644 --- a/docs/pipeline_guidance.md +++ b/docs/pipeline_guidance.md @@ -42,8 +42,9 @@ conda env create -f environment.yml ``` > **Warning**: -> To reproduce our exact outputs, you will require GDAL version 3.4. Please be aware that this specific version of GDAL requires a different Python version than the one specified in our environment file. Therefore, we have not included it in the environment file and instead, for the reprojection step, you'll need to set up a new environment: +> To reproduce our exact outputs, you will require GDAL version 3.4. Please be aware that this specific version of GDAL requires a different Python version than the one specified in our environment file. Therefore, we have not included it in the environment file and instead, for the reprojection step, you'll need to install GDAL (for example using conda) and set up a new environment: > ``` +> conda install -c conda-forge gdal > conda create -n gdal_env python=3.10 gdal=3.4 > ``` From 6467a9ea25d434d78052049bc1ff9b8096edb7f8 Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Thu, 26 Oct 2023 14:06:29 +0100 Subject: [PATCH 111/146] Update README.md Co-authored-by: Greg Mingas --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 643a86d7..685b41ae 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ Clim-recal is an **extensive software library and guide to application of BC met - Accessible information about the [why and how of bias correction for climate data](#why-bias-correction) - Technical resource for application BC methods (see our full pipeline for bias-correction of the ground-breaking local-scale (2.2km)[Convection Permitting Model (CPM)](https://www.metoffice.gov.uk/pub/data/weather/uk/ukcp18/science-reports/UKCP-Convection-permitting-model-projections-report.pdf)) - In partnership with the MetOffice to ensure the propriety, quality, and usability of our work -- Framework for open additions (in planning) +- Provides a framework for open additions of new software libraries/bias correction methods (in planning) ## Table of Contents From eac5b367c516122ac00757737b11ef29070322db Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Thu, 26 Oct 2023 14:08:44 +0100 Subject: [PATCH 112/146] ENH readme - improve description Co-authored-by: Greg Mingas --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 685b41ae..93295bee 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ Clim-recal provides both a **broad review** of available bias correction methods Clim-recal is an **extensive software library and guide to application of BC methods.** Clim-recal: - Accessible information about the [why and how of bias correction for climate data](#why-bias-correction) -- Technical resource for application BC methods (see our full pipeline for bias-correction of the ground-breaking local-scale (2.2km)[Convection Permitting Model (CPM)](https://www.metoffice.gov.uk/pub/data/weather/uk/ukcp18/science-reports/UKCP-Convection-permitting-model-projections-report.pdf)) +- Is a software library for for the application of BC methods (see our full pipeline for bias-correction of the ground-breaking local-scale (2.2km)[Convection Permitting Model (CPM)](https://www.metoffice.gov.uk/pub/data/weather/uk/ukcp18/science-reports/UKCP-Convection-permitting-model-projections-report.pdf)). Clim-recal brings together different software packages in Python and R that implement a variety of bias correction methods, making it easy to apply them to data and compare their outputs. - In partnership with the MetOffice to ensure the propriety, quality, and usability of our work - Provides a framework for open additions of new software libraries/bias correction methods (in planning) From 1b1f5f6bd595ea769c9d92489057a908b89e8ae7 Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Thu, 26 Oct 2023 14:09:10 +0100 Subject: [PATCH 113/146] ENH readme Co-authored-by: Greg Mingas --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 93295bee..a0b9dfe7 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ Clim-recal provides both a **broad review** of available bias correction methods Clim-recal is an **extensive software library and guide to application of BC methods.** Clim-recal: -- Accessible information about the [why and how of bias correction for climate data](#why-bias-correction) +- Contains accessible information about the [why and how of bias correction for climate data](#why-bias-correction) - Is a software library for for the application of BC methods (see our full pipeline for bias-correction of the ground-breaking local-scale (2.2km)[Convection Permitting Model (CPM)](https://www.metoffice.gov.uk/pub/data/weather/uk/ukcp18/science-reports/UKCP-Convection-permitting-model-projections-report.pdf)). Clim-recal brings together different software packages in Python and R that implement a variety of bias correction methods, making it easy to apply them to data and compare their outputs. - In partnership with the MetOffice to ensure the propriety, quality, and usability of our work - Provides a framework for open additions of new software libraries/bias correction methods (in planning) From f79a84b9044251debda96dcb34403d1667571d58 Mon Sep 17 00:00:00 2001 From: Sophie Arana Date: Thu, 26 Oct 2023 14:09:21 +0100 Subject: [PATCH 114/146] ENH readme Co-authored-by: Greg Mingas --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a0b9dfe7..0683a810 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ Clim-recal is an **extensive software library and guide to application of BC met - Contains accessible information about the [why and how of bias correction for climate data](#why-bias-correction) - Is a software library for for the application of BC methods (see our full pipeline for bias-correction of the ground-breaking local-scale (2.2km)[Convection Permitting Model (CPM)](https://www.metoffice.gov.uk/pub/data/weather/uk/ukcp18/science-reports/UKCP-Convection-permitting-model-projections-report.pdf)). Clim-recal brings together different software packages in Python and R that implement a variety of bias correction methods, making it easy to apply them to data and compare their outputs. -- In partnership with the MetOffice to ensure the propriety, quality, and usability of our work +- Was developed in partnership with the MetOffice to ensure the propriety, quality, and usability of our work - Provides a framework for open additions of new software libraries/bias correction methods (in planning) ## Table of Contents From 9c2537b6690e697100dedef96af956bca7d054c7 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Thu, 26 Oct 2023 15:36:44 +0100 Subject: [PATCH 115/146] fix: typos in `test_debiasing.py` --- python/tests/test_debiasing.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/python/tests/test_debiasing.py b/python/tests/test_debiasing.py index 48883b0d..930e3307 100644 --- a/python/tests/test_debiasing.py +++ b/python/tests/test_debiasing.py @@ -139,7 +139,7 @@ def default(cls) -> str: CLI_PREPROCESS_DEFAULT_COMMAND_TUPLE_STR_CORRECT ) -CLI_CMETHODS_DEFAULT_COMMAND_TUPLE_CORRECT: Final[tuple[str]] = ( +CLI_CMETHODS_DEFAULT_COMMAND_TUPLE_CORRECT: Final[tuple[str, ...]] = ( "python", CMETHODS_FILE_NAME, "--input_data_folder", @@ -505,6 +505,7 @@ def yield_mod_folder(self, city: str | None = None) -> Generator[Path, None, Non >>> len(tuple(config.yield_mod_folder())) == MOD_FOLDER_FILES_COUNT_CORRECT True """ + city = city if city else self.city return path_iterdir(self.obs_path(city=city)) def yield_obs_folder(self, city: str | None = None) -> Generator[Path, None, None]: @@ -518,6 +519,7 @@ def yield_obs_folder(self, city: str | None = None) -> Generator[Path, None, Non >>> len(tuple(config.yield_obs_folder())) == OBS_FOLDER_FILES_COUNT_CORRECT True """ + city = city if city else self.city return path_iterdir(self.obs_path(city=city)) def yield_preprocess_out_folder( @@ -537,6 +539,9 @@ def yield_preprocess_out_folder( ... PREPROCESS_OUT_FOLDER_FILES_COUNT_CORRECT) True """ + city = city if city else self.city + run = run if run else self.run + variable = variable if variable else self.variable return path_iterdir( self.preprocess_out_path(city=city, run=run, variable=variable) ) @@ -573,8 +578,9 @@ def to_cli_run_cmethods_tuple( variable = variable if variable else self.variable run = run if run else self.run method = method if method else self.method + processors = processors if processors else self.processors - input_data_path: PathLike = ( + input_data_path = ( input_data_path if input_data_path else self.preprocess_out_path(city=city, run=run, variable=variable) @@ -586,8 +592,6 @@ def to_cli_run_cmethods_tuple( else self.cmethods_out_path(city=city, run=run) ) - processors = processors if processors else self.processors - return ( *self.run_prefix_tuple, self.run_cmethods_file, @@ -715,7 +719,7 @@ def test_command_line_default() -> None: ) def test_run(run_config, city, variable, run, method) -> None: """Test running generated command script via a subprocess.""" - initial_folder: path = Path().resolve() + initial_folder: Path = Path().resolve() chdir(run_config.command_path) assert PREPROCESS_FILE_NAME in tuple(Path().iterdir()) # run_config.method = method From 659c148ae3b8a4886f421504d7d3e7fecd83e38d Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Mon, 30 Oct 2023 18:12:13 +0000 Subject: [PATCH 116/146] feat(doc): add quarto to environment.yml --- environment.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/environment.yml b/environment.yml index 35a835ca..3821d8c2 100644 --- a/environment.yml +++ b/environment.yml @@ -24,6 +24,7 @@ dependencies: - openssl=3.0.5 - pip=22.3 - python=3.11.0 + - r-quarto=1.3 - readline=8.1.2 - setuptools=65.5.0 - tk=8.6.12 From 45a19a5c40f7718b8d21a2984e190c95bb373064 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Mon, 30 Oct 2023 18:49:37 +0000 Subject: [PATCH 117/146] feat(doc): add quarto local render instructions to README.md --- README.md | 53 +++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 39 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index 3af02492..45034359 100644 --- a/README.md +++ b/README.md @@ -1,36 +1,36 @@ # Welcome to the `clim-recal` repository! -Welcome to clim-recal, a specialized resource designed to tackle systematic errors or biases in Regional Climate Models (RCMs). As researchers, policy-makers, and various stakeholders explore publicly available RCMs, they need to consider the challenge of biases that can affect the accurate representation of climate change signals. +Welcome to `clim-recal`, a specialized resource designed to tackle systematic errors or biases in Regional Climate Models (RCMs). As researchers, policy-makers, and various stakeholders explore publicly available RCMs, they need to consider the challenge of biases that can affect the accurate representation of climate change signals. -Clim-recal provides both a **broad review** of available bias correction methods as well as **software**, **practical tutorials** and **guidance** that helps users apply these methods methods to various datasets. +`clim-recal` provides both a **broad review** of available bias correction methods as well as **software**, **practical tutorials** and **guidance** that helps users apply these methods methods to various datasets. -Clim-recal is an **extensive software library and guide to application of BC methods.** Clim-recal: +`clim-recal` is an **extensive software library and guide to application of Bias Correction (BC) methods.** `clim-recal`: - Contains accessible information about the [why and how of bias correction for climate data](#why-bias-correction) -- Is a software library for for the application of BC methods (see our full pipeline for bias-correction of the ground-breaking local-scale (2.2km) [Convection Permitting Model (CPM)](https://www.metoffice.gov.uk/pub/data/weather/uk/ukcp18/science-reports/UKCP-Convection-permitting-model-projections-report.pdf)). Clim-recal brings together different software packages in Python and R that implement a variety of bias correction methods, making it easy to apply them to data and compare their outputs. +- Is a software library for for the application of BC methods (see our full pipeline for bias-correction of the ground-breaking local-scale (2.2km) [Convection Permitting Model (CPM)](https://www.metoffice.gov.uk/pub/data/weather/uk/ukcp18/science-reports/UKCP-Convection-permitting-model-projections-report.pdf)). `clim-recal` brings together different software packages in `python` and `R` that implement a variety of bias correction methods, making it easy to apply them to data and compare their outputs. - Was developed in partnership with the MetOffice to ensure the propriety, quality, and usability of our work - Provides a framework for open additions of new software libraries/bias correction methods (in planning) ## Table of Contents 1. [Overview: Bias Correction Pipeline](#overview-bias-correction-pipeline) -3. [Documentation](#documentation) -4. [The dataset](#the-dataset) +2. [Documentation](#documentation) +3. [The dataset](#the-dataset) 4. [Why bias correction?](#why-bias-correction) -8. [License](#license) -9. [Contributors](#contributors) +5. [License](#license) +6. [Contributors](#contributors) ## Overview: Bias Correction Pipeline -Here we provide an example of how to run a debiasing pipeline starting. The pipeline has the following steps: +`clim-recal` is a debiasing pipeline, with the following steps: 1. **Set-up & data download** *We provide custom scripts to facilitate download of data* 2. **Preprocessing** *This includes reprojecting, resampling & splitting the data prior to bias correction* -5. **Apply bias correction** +3. **Apply bias correction** *Our pipeline embeds two distinct methods of bias correction* -6. **Assess the debiased data** +4. **Assess the debiased data** *We have developed a way to assess the quality of the debiasing step across multiple alternative methods* For a quick start on bias correction, refer to our [comprehensive analysis pipeline guide](https://github.com/alan-turing-institute/clim-recal/blob/documentation/docs/pipeline_guidance.md). @@ -38,7 +38,16 @@ For a quick start on bias correction, refer to our [comprehensive analysis pipel ## Documentation 🚧 In Progress -We are in the process of developing comprehensive documentation for our codebase to supplement the guidance provided in this document. In the interim, for Python scripts, you can leverage the inline documentation (docstrings) available within the code. To access a summary of the available options and usage information for any Python script, you can use the `--help` flag in the command line as follows: +We are in the process of developing comprehensive documentation for our codebase to supplement the guidance provided in this document and other `README.md` files in this code base. In the interim, you can: + +- Comments within `R` scripts +- See command line `--help` documentation for some of our `python` scripts +- See documentation within `python` function and class [`docstrings`](https://docs.python.org/3/library/doctest.html) +- Locally render documentation via [`quarto`](https://quarto.org/) + +For `R` scripts, please refer to contextual information and usage guidelines, and feel free to reach out with any specific queries. + +To access a summary of the available options and usage information for any `python` script, you can use the `--help` flag in the command line as follows: ```sh $ python resampling_hads.py --help @@ -54,7 +63,23 @@ options: This will display all available options for the script, including their purposes. -For R scripts, please refer to the comments within the R scripts for contextual information and usage guidelines, and feel free to reach out with any specific queries. +We also hope to provide comprehensive documentation via [`quarto`](https://quarto.org/). This is a work in progress, but if you would like to render that locally you can do so via [`conda`](https://docs.conda.io): + +1. Ensure you have a local installation of [`conda`](https://docs.conda.io). +1. Checkout a copy of our `git` repository +1. Create a local `conda` `environment` via our `environment.yml` file. This should install `quarto`. +1. Activate that environment +1. Run `quarto preview`. + +Below are example `bash` shell commands to render locally (assuming you have a `conda` install): + +```sh +$ git clone https://github.com/alan-turing-institute/clim-recal +$ cd clim-recal +$ conda create -n clim-recal -f environment.yml +$ conda activate clim-recal +$ quarto preview +``` We appreciate your patience and encourage you to check back for updates on our ongoing documentation efforts. @@ -74,7 +99,7 @@ Researchers, policy-makers and other stakeholders wishing to use publicly availa Part of the `clim-recal` project is to review several bias correction methods. This work is ongoing and you can find our initial [taxonomy here](https://docs.google.com/spreadsheets/d/18LIc8omSMTzOWM60aFNv1EZUl1qQN_DG8HFy1_0NdWk/edit?usp=sharing). When we've completed our literature review, it will be submitted for publication in an open peer-reviewed journal. -Our work is however, just like climate data, intended to be dynamic, and we are in the process of setting up a pipeline for researchers creating new methods of bias correction to be able to submit their methods for inclusion on in the **`clim-recal`** repository. +Our work is however, just like climate data, intended to be dynamic, and we are in the process of setting up a pipeline for researchers creating new methods of bias correction to be able to submit their methods for inclusion on in the `clim-recal` repository. 1. Senatore et al., 2022, https://doi.org/10.1016/j.ejrh.2022.101120 2. Ayar et al., 2021, https://doi.org/10.1038/s41598-021-82715-1 From ef07d8e200378387f173128d1fae9c26614969ef Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Mon, 30 Oct 2023 18:57:29 +0000 Subject: [PATCH 118/146] feat(ci): add commented out `R` config to `.pre-commit-config.yaml` --- .pre-commit-config.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 96e9cee0..6d9f367b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -37,3 +37,10 @@ repos: - id: isort name: isort (python) args: ["--profile", "black"] + + # Currently fails to build. + # See: https://github.com/lorenzwalthert/precommit/issues/476 + # - repo: https://github.com/lorenzwalthert/precommit + # rev: v0.3.2 + # hooks: + # - id: readme-rmd-rendered From 24457f5ab0c13a23212a1b319b38db881a37827a Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Mon, 30 Oct 2023 19:32:06 +0000 Subject: [PATCH 119/146] fix(doc): fix typos in navigation and some rephrasing --- README.md | 52 +++++++++++++++++++++++++++++----------------------- 1 file changed, 29 insertions(+), 23 deletions(-) diff --git a/README.md b/README.md index 45034359..8cc43e9f 100644 --- a/README.md +++ b/README.md @@ -1,24 +1,25 @@ # Welcome to the `clim-recal` repository! -Welcome to `clim-recal`, a specialized resource designed to tackle systematic errors or biases in Regional Climate Models (RCMs). As researchers, policy-makers, and various stakeholders explore publicly available RCMs, they need to consider the challenge of biases that can affect the accurate representation of climate change signals. +Welcome to `clim-recal`, a specialized resource designed to tackle systematic errors or biases in **Regional Climate Models (RCMs)**. As researchers, policy-makers, and various stakeholders explore publicly available RCMs, they need to consider the challenge of biases that can affect the accurate representation of climate change signals. `clim-recal` provides both a **broad review** of available bias correction methods as well as **software**, **practical tutorials** and **guidance** that helps users apply these methods methods to various datasets. -`clim-recal` is an **extensive software library and guide to application of Bias Correction (BC) methods.** `clim-recal`: +`clim-recal` is an **extensive software library and guide to application of Bias Correction (BC) methods**: - Contains accessible information about the [why and how of bias correction for climate data](#why-bias-correction) -- Is a software library for for the application of BC methods (see our full pipeline for bias-correction of the ground-breaking local-scale (2.2km) [Convection Permitting Model (CPM)](https://www.metoffice.gov.uk/pub/data/weather/uk/ukcp18/science-reports/UKCP-Convection-permitting-model-projections-report.pdf)). `clim-recal` brings together different software packages in `python` and `R` that implement a variety of bias correction methods, making it easy to apply them to data and compare their outputs. +- Is a software library for for the application of BC methods (see our full pipeline for bias-correction of the ground-breaking local-scale (2.2km) [Convection Permitting Model (CPM)](https://www.metoffice.gov.uk/pub/data/weather/uk/ukcp18/science-reports/UKCP-Convection-permitting-model-projections-report.pdf). `clim-recal` brings together different software packages in `python` and `R` that implement a variety of bias correction methods, making it easy to apply them to data and compare their outputs. - Was developed in partnership with the MetOffice to ensure the propriety, quality, and usability of our work - Provides a framework for open additions of new software libraries/bias correction methods (in planning) ## Table of Contents 1. [Overview: Bias Correction Pipeline](#overview-bias-correction-pipeline) -2. [Documentation](#documentation) -3. [The dataset](#the-dataset) -4. [Why bias correction?](#why-bias-correction) -5. [License](#license) -6. [Contributors](#contributors) +1. [Documentation](#documentation) +1. [The Datasets](#the-datasets) +1. [Why Bias Correction?](#why-bias-correction) +1. [Contributing](#contributing) +1. [Future Plans](#future-plans) +1. [License](/LICENCE) ## Overview: Bias Correction Pipeline @@ -35,19 +36,22 @@ Welcome to `clim-recal`, a specialized resource designed to tackle systematic er For a quick start on bias correction, refer to our [comprehensive analysis pipeline guide](https://github.com/alan-turing-institute/clim-recal/blob/documentation/docs/pipeline_guidance.md). -## Documentation -🚧 In Progress +## 🚧 Documentation -We are in the process of developing comprehensive documentation for our codebase to supplement the guidance provided in this document and other `README.md` files in this code base. In the interim, you can: +We are in the process of developing comprehensive documentation for our code base to supplement the guidance provided in this and other `README.md` files. In the interim, there is documentation available in the following forms: - Comments within `R` scripts -- See command line `--help` documentation for some of our `python` scripts -- See documentation within `python` function and class [`docstrings`](https://docs.python.org/3/library/doctest.html) -- Locally render documentation via [`quarto`](https://quarto.org/) +- Command line `--help` documentation for some of our `python` scripts +- `python` `function` and `class` [`docstrings`](https://docs.python.org/3/library/doctest.html) +- Local render of documentation via [`quarto`](https://quarto.org/) + +### `R` For `R` scripts, please refer to contextual information and usage guidelines, and feel free to reach out with any specific queries. -To access a summary of the available options and usage information for any `python` script, you can use the `--help` flag in the command line as follows: +### `python` + +For many of our `python` command line scripts, you can use the `--help` flag to access a summary of the available options and usage information: ```sh $ python resampling_hads.py --help @@ -63,15 +67,17 @@ options: This will display all available options for the script, including their purposes. -We also hope to provide comprehensive documentation via [`quarto`](https://quarto.org/). This is a work in progress, but if you would like to render that locally you can do so via [`conda`](https://docs.conda.io): +### Quarto + +We also hope to provide comprehensive documentation via [`quarto`](https://quarto.org/). This is a work in progress, but if you would like to render documentation locally you can do so via `quarto` and [`conda`](https://docs.conda.io): -1. Ensure you have a local installation of [`conda`](https://docs.conda.io). +1. Ensure you have a [local installation](https://conda.io/projects/conda/en/latest/user-guide/install/index.html) of `conda` or [`anaconda`](https://www.anaconda.com/download) . 1. Checkout a copy of our `git` repository 1. Create a local `conda` `environment` via our `environment.yml` file. This should install `quarto`. 1. Activate that environment 1. Run `quarto preview`. -Below are example `bash` shell commands to render locally (assuming you have a `conda` install): +Below are example `bash` shell commands to render locally after installing `conda`: ```sh $ git clone https://github.com/alan-turing-institute/clim-recal @@ -83,7 +89,7 @@ $ quarto preview We appreciate your patience and encourage you to check back for updates on our ongoing documentation efforts. -## The datasets +## The Datasets ### UKCP18 The UK Climate Projections 2018 (UKCP18) dataset offers insights into the potential climate changes in the UK. UKCP18 is an advancement of the UKCP09 projections and delivers the latest evaluations of the UK's possible climate alterations in land and marine regions throughout the 21st century. This crucial information aids in future Climate Change Risk Assessments and supports the UK’s adaptation to climate change challenges and opportunities as per the National Adaptation Programme. @@ -91,9 +97,9 @@ The UK Climate Projections 2018 (UKCP18) dataset offers insights into the potent ### HADS [HadUK-Grid](https://www.metoffice.gov.uk/research/climate/maps-and-data/data/haduk-grid/haduk-grid) is a comprehensive collection of climate data for the UK, compiled from various land surface observations across the country. This data is organized into a uniform grid to ensure consistent coverage throughout the UK at up to 1km x 1km resolution. The dataset, spanning from 1836 to the present, includes a variety of climate variables such as air temperature, precipitation, sunshine, and wind speed, available on daily, monthly, seasonal, and annual timescales. -## Why bias correction? +## Why Bias Correction? -Regional climate models (RCMs) contain systematic errors, or biases in their output [1]. Biases arise in RCMs for a number of reasons, such as the assumptions in the general circulation models (GCMs), and in the downscaling process from GCM to RCM [1,2]. +Regional climate models contain systematic errors, or biases in their output [1]. Biases arise in RCMs for a number of reasons, such as the assumptions in the general circulation models (GCMs), and in the downscaling process from GCM to RCM [1,2]. Researchers, policy-makers and other stakeholders wishing to use publicly available RCMs need to consider a range of "bias correction” methods (sometimes referred to as "bias adjustment" or "recalibration"). Bias correction methods offer a means of adjusting the outputs of RCM in a manner that might better reflect future climate change signals whilst preserving the natural and internal variability of climate [2]. @@ -105,13 +111,13 @@ Our work is however, just like climate data, intended to be dynamic, and we are 2. Ayar et al., 2021, https://doi.org/10.1038/s41598-021-82715-1 -### Let's collaborate! +## Contributing We hope to bring together the extensive work already undertaken by the climate science community and showcase a range of libraries and techniques. If you have suggestions on the repository, or would like to include a new method (see below) or library, please raise an issue or [get in touch](mailto:clim-recal@turing.ac.uk)! ### Adding to the conda environment file -To use `R` in anaconda you may need to specify the `conda-forge` channel: +To use `R` in `anaconda` you may need to specify the `conda-forge` channel: ```sh $ conda config --env --add channels conda-forge From 0fac4f3067dde19d0df98c7cedd1e6c5f7167860 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Mon, 30 Oct 2023 19:46:23 +0000 Subject: [PATCH 120/146] fix(doc): README.md navigation tweaks for GitHub compatibility --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 8cc43e9f..e91be540 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ Welcome to `clim-recal`, a specialized resource designed to tackle systematic er 1. [Why Bias Correction?](#why-bias-correction) 1. [Contributing](#contributing) 1. [Future Plans](#future-plans) -1. [License](/LICENCE) +1. [License](/LICENSE) ## Overview: Bias Correction Pipeline @@ -36,7 +36,7 @@ Welcome to `clim-recal`, a specialized resource designed to tackle systematic er For a quick start on bias correction, refer to our [comprehensive analysis pipeline guide](https://github.com/alan-turing-institute/clim-recal/blob/documentation/docs/pipeline_guidance.md). -## 🚧 Documentation +## Documentation 🚧 We are in the process of developing comprehensive documentation for our code base to supplement the guidance provided in this and other `README.md` files. In the interim, there is documentation available in the following forms: @@ -136,6 +136,6 @@ and installing with: $ pip install -r requirements.txt ``` -## 🚧 Future plans +## Future plans 🚧 - **More BC Methods**: Further bias correction of UKCP18 products. *This is planned for a future release and is not available yet.* - **Pipeline for adding new methods**: *This is planned for a future release and is not available yet.* From d21646beef87d543ce1e7738c50785ce8705c6af Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Mon, 30 Oct 2023 20:07:33 +0000 Subject: [PATCH 121/146] feat(doc): add `docs/workflow.qmd` --- docs/workflow.qmd | 191 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 191 insertions(+) create mode 100644 docs/workflow.qmd diff --git a/docs/workflow.qmd b/docs/workflow.qmd new file mode 100644 index 00000000..437343b1 --- /dev/null +++ b/docs/workflow.qmd @@ -0,0 +1,191 @@ +--- +title: "**Workflow**" +output: + github_document +--- + +Workflow diagram + + + +```{mermaid} +graph TB + +subgraph Legend + direction RL + data_external[(external data)] + data_fileshare[path to fileshare] + script_r([R script]) + script_py([Python script]) + script_bash([Bash script]) + var[parameter]:::var +end + +%%% INPUT DATA +subgraph CEDA + data_hads[(HADS)] + data_cpm[(UKCP2.2)] + data_hads --> script_load + data_cpm --> script_load + data_hads --> script_load +end + +subgraph Core pipeline + subgraph Data Ingress + %%% Loading data to disk + script_load([ceda_ftp_download.py]) + data_hads_raw[RAW/HadsUKgrid/../*.nc] + data_cpm_raw[RAW/UKCP2.2/../*.nc] + script_load --> data_hads_raw + script_load --> data_cpm_raw + end + subgraph Preprocessing + %% resampling & reprojecting + script_resampling([resampling_hads.py]) + script_reproject([reproject_all.sh]) + + data_hads_res[Processed/HadsUKgrid/../*.nc] + data_cpm_rep[Reprojected/UKCP2.2/../*.tiff] + + script_resampling --> data_hads_res + script_reproject --> data_cpm_rep + + %% cropping + script_crop_city([Cropping_Rasters_to_three_cities.R]) + + data_cropped_cpm[Cropped/cpm/..] + data_cropped_hads[Cropped/hads/..] + script_crop_city --> data_cropped_cpm + script_crop_city --> data_cropped_hads + + + end + + subgraph Data Splitting + data_outdir[Cropped/preprocessed/..] + + script_preproc([preprocess_data.py]) + + data_out_train[../simh..] + data_out_calibrate[../simp..] + data_out_groundtruth_h[../obsh..] + data_out_groundtruth_p[../obsp..] + + script_preproc --> data_outdir + + data_outdir --> data_out_train + data_outdir --> data_out_calibrate + data_outdir --> data_out_groundtruth_h + data_outdir --> data_out_groundtruth_p + end + + subgraph bc[Bias Correction] + script_bc_py([run_cmethods.py]) + script_bc_r([run_cmethods.R]) + function_bc_r[[fitQmapQUANT.R]] + + + data_out_py[Debiased/...] + data_out_r[Debiased/R/QuantileMapping/resultsL*] + + data_out_train --> script_bc_py + data_out_calibrate --> script_bc_py + data_out_groundtruth_h --> script_bc_py + data_out_train --> script_bc_r + data_out_calibrate --> script_bc_r + data_out_groundtruth_h --> script_bc_r + script_bc_r --> function_bc_r + + script_bc_py-->data_out_py + function_bc_r-->data_out_r + end + + subgraph Assessment + script_asses[tbc] + data_out_groundtruth_p --> script_asses + end + data_out_py --> script_asses + data_out_r --> script_asses +end + + +subgraph nner_py[Execute Python pipeline for MO dataset] + data_shape_uk[(shape London)] + data_shape_gl[(shape Glasgow)] + data_shape_ma[(shape Manchester)] + + + script_BC_wrapper[three_cities_debiasing.sh] + param1["metric (eg tasmax)"]:::var + param2["runs (eg 05)"]:::var + param3["BC method (eg quantile_mapping)"]:::var + param4[city]:::var + + script_BC_wrapper --> param1 + param1 --> param2 + param2 --> param3 + param3 --> param4 + param4 -- for loop --> script_preproc + + %% Looping connections + param4 -.-> param3 + param3 -.-> param2 + param2 -.-> param1 +end + +subgraph nner_jupyter[Jupyter Notebook for Guidance] + direction BT + data_shape_gl2[(shape Glasgow)] + data_cpm2[(UKCP2.2_Monthly)] + + param5["tasmax"]:::var + param6["quantile_mapping"]:::var + param7[Glasgow]:::var + + script_BC_wrapper --> param1 + param5 --> script_preproc + param6 --> script_preproc + param7 --> script_preproc + + data_cpm2 --> script_load + data_shape_gl2 --> script_crop_city +end + +%% between block connections +%% input preproc 1 +data_hads_raw --> script_resampling +data_cpm_raw --> script_reproject +%% input cropping +data_cpm_rep --> script_crop_city + +data_hads_res --> script_crop_city +data_shape_uk --> script_crop_city +data_shape_ma --> script_crop_city +data_shape_gl --> script_crop_city + +%% input preproc2 +data_cropped_cpm --> script_preproc +data_cropped_hads --> script_preproc + +param4 -- for loop --> script_bc_py + + +%% class styles +classDef python fill:#4CAF50; +classDef r fill:#FF5722; +classDef bash fill:#f9f +classDef var fill:none,stroke:#0f0; +classDef dashed stroke-dasharray: 5 5; + +class script_crop_city,script_crop_uk,function_bc_r,script_r,script_df_uk,function_bc,function_crop_bc,fn_crop_cpm,fn_crop_hads,fn_bc,script_bc_r r; +class script_load,script_resampling,script_preproc,script_bc_py,script_py python; +class script_reproject,script_BC_wrapper,script_bash bash; +class inner_py dashed; +class inner_r dashed; +``` From 1d439ba3bab613d92308016b977e1a81c9ab9aa2 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Mon, 30 Oct 2023 20:26:30 +0000 Subject: [PATCH 122/146] fix(doc): fix `README.md` navigation and `docs/pipeline.qmd` --- README.md | 4 +- _quarto.yml | 2 +- docs/pipeline.md | 180 ---------------------------- docs/{workflow.qmd => pipeline.qmd} | 0 4 files changed, 3 insertions(+), 183 deletions(-) delete mode 100644 docs/pipeline.md rename docs/{workflow.qmd => pipeline.qmd} (100%) diff --git a/README.md b/README.md index e91be540..b74a6a5e 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ Welcome to `clim-recal`, a specialized resource designed to tackle systematic er For a quick start on bias correction, refer to our [comprehensive analysis pipeline guide](https://github.com/alan-turing-institute/clim-recal/blob/documentation/docs/pipeline_guidance.md). -## Documentation 🚧 +## Documentation [🚧](#documentation) We are in the process of developing comprehensive documentation for our code base to supplement the guidance provided in this and other `README.md` files. In the interim, there is documentation available in the following forms: @@ -136,6 +136,6 @@ and installing with: $ pip install -r requirements.txt ``` -## Future plans 🚧 +## Future plans [🚧](#future-plans) - **More BC Methods**: Further bias correction of UKCP18 products. *This is planned for a future release and is not available yet.* - **Pipeline for adding new methods**: *This is planned for a future release and is not available yet.* diff --git a/_quarto.yml b/_quarto.yml index ac720181..cf086ca0 100644 --- a/_quarto.yml +++ b/_quarto.yml @@ -11,7 +11,7 @@ project: - "R/README.md" - "R/misc/Identifying_Runs.md" - "docs/reference" - - "docs/workflow.qmd" + - "docs/pipeline.qmd" # - "R/comparing-r-and-python/HADs-reprojection/WIP-Comparing-HADs-grids.md" - "python/README.md" diff --git a/docs/pipeline.md b/docs/pipeline.md deleted file mode 100644 index 285ac4c5..00000000 --- a/docs/pipeline.md +++ /dev/null @@ -1,180 +0,0 @@ ---- -title: Analysis pipeline ---- -```mermaid - -graph TB - -subgraph Legend - direction RL - data_external[(external data)] - data_fileshare[path to fileshare] - script_r([R script]) - script_py([Python script]) - script_bash([Bash script]) - var[parameter]:::var -end - -%%% INPUT DATA -subgraph CEDA - data_hads[(HADS)] - data_cpm[(UKCP2.2)] - data_hads --> script_load - data_cpm --> script_load - data_hads --> script_load -end - -subgraph Core pipeline - subgraph Data Ingress - %%% Loading data to disk - script_load([ceda_ftp_download.py]) - data_hads_raw[RAW/HadsUKgrid/../*.nc] - data_cpm_raw[RAW/UKCP2.2/../*.nc] - script_load --> data_hads_raw - script_load --> data_cpm_raw - end - subgraph Preprocessing - %% resampling & reprojecting - script_resampling([resampling_hads.py]) - script_reproject([reproject_all.sh]) - - data_hads_res[Processed/HadsUKgrid/../*.nc] - data_cpm_rep[Reprojected/UKCP2.2/../*.tiff] - - script_resampling --> data_hads_res - script_reproject --> data_cpm_rep - - %% cropping - script_crop_city([Cropping_Rasters_to_three_cities.R]) - - data_cropped_cpm[Cropped/cpm/..] - data_cropped_hads[Cropped/hads/..] - script_crop_city --> data_cropped_cpm - script_crop_city --> data_cropped_hads - - - end - - subgraph Data Splitting - data_outdir[Cropped/preprocessed/..] - - script_preproc([preprocess_data.py]) - - data_out_train[../simh..] - data_out_calibrate[../simp..] - data_out_groundtruth_h[../obsh..] - data_out_groundtruth_p[../obsp..] - - script_preproc --> data_outdir - - data_outdir --> data_out_train - data_outdir --> data_out_calibrate - data_outdir --> data_out_groundtruth_h - data_outdir --> data_out_groundtruth_p - end - - subgraph bc[Bias Correction] - script_bc_py([run_cmethods.py]) - script_bc_r([run_cmethods.R]) - function_bc_r[[fitQmapQUANT.R]] - - - data_out_py[Debiased/...] - data_out_r[Debiased/R/QuantileMapping/resultsL*] - - data_out_train --> script_bc_py - data_out_calibrate --> script_bc_py - data_out_groundtruth_h --> script_bc_py - data_out_train --> script_bc_r - data_out_calibrate --> script_bc_r - data_out_groundtruth_h --> script_bc_r - script_bc_r --> function_bc_r - - script_bc_py-->data_out_py - function_bc_r-->data_out_r - end - - subgraph Assessment - script_asses[tbc] - data_out_groundtruth_p --> script_asses - end - data_out_py --> script_asses - data_out_r --> script_asses -end - - -subgraph nner_py[Execute Python pipeline for MO dataset] - data_shape_uk[(shape London)] - data_shape_gl[(shape Glasgow)] - data_shape_ma[(shape Manchester)] - - - script_BC_wrapper[three_cities_debiasing.sh] - param1["metric (eg tasmax)"]:::var - param2["runs (eg 05)"]:::var - param3["BC method (eg quantile_mapping)"]:::var - param4[city]:::var - - script_BC_wrapper --> param1 - param1 --> param2 - param2 --> param3 - param3 --> param4 - param4 -- for loop --> script_preproc - - %% Looping connections - param4 -.-> param3 - param3 -.-> param2 - param2 -.-> param1 -end - -subgraph nner_jupyter[Jupyter Notebook for Guidance] - direction BT - data_shape_gl2[(shape Glasgow)] - data_cpm2[(UKCP2.2_Monthly)] - - param5["tasmax"]:::var - param6["quantile_mapping"]:::var - param7[Glasgow]:::var - - script_BC_wrapper --> param1 - param5 --> script_preproc - param6 --> script_preproc - param7 --> script_preproc - - data_cpm2 --> script_load - data_shape_gl2 --> script_crop_city -end - -%% between block connections -%% input preproc 1 -data_hads_raw --> script_resampling -data_cpm_raw --> script_reproject -%% input cropping -data_cpm_rep --> script_crop_city - -data_hads_res --> script_crop_city -data_shape_uk --> script_crop_city -data_shape_ma --> script_crop_city -data_shape_gl --> script_crop_city - -%% input preproc2 -data_cropped_cpm --> script_preproc -data_cropped_hads --> script_preproc - -param4 -- for loop --> script_bc_py - - -%% class styles -classDef python fill:#4CAF50; -classDef r fill:#FF5722; -classDef bash fill:#f9f -classDef var fill:none,stroke:#0f0; -classDef dashed stroke-dasharray: 5 5; - -class script_crop_city,script_crop_uk,function_bc_r,script_r,script_df_uk,function_bc,function_crop_bc,fn_crop_cpm,fn_crop_hads,fn_bc,script_bc_r r; -class script_load,script_resampling,script_preproc,script_bc_py,script_py python; -class script_reproject,script_BC_wrapper,script_bash bash; -class inner_py dashed; -class inner_r dashed; -``` - diff --git a/docs/workflow.qmd b/docs/pipeline.qmd similarity index 100% rename from docs/workflow.qmd rename to docs/pipeline.qmd From c88d05a526248a53f2c579cb5570b815f125b8a8 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Mon, 30 Oct 2023 20:32:29 +0000 Subject: [PATCH 123/146] =?UTF-8?q?fix(doc):=20remove=20=F0=9F=9A=A7=20cha?= =?UTF-8?q?r=20from=20`doc`=20headers=20for=20nav=20Git/Quarto=20compatibi?= =?UTF-8?q?lity?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index b74a6a5e..60d1bbac 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ Welcome to `clim-recal`, a specialized resource designed to tackle systematic er For a quick start on bias correction, refer to our [comprehensive analysis pipeline guide](https://github.com/alan-turing-institute/clim-recal/blob/documentation/docs/pipeline_guidance.md). -## Documentation [🚧](#documentation) +## Documentation We are in the process of developing comprehensive documentation for our code base to supplement the guidance provided in this and other `README.md` files. In the interim, there is documentation available in the following forms: @@ -136,6 +136,6 @@ and installing with: $ pip install -r requirements.txt ``` -## Future plans [🚧](#future-plans) +## Future plans - **More BC Methods**: Further bias correction of UKCP18 products. *This is planned for a future release and is not available yet.* - **Pipeline for adding new methods**: *This is planned for a future release and is not available yet.* From 61fb458e536816cee675caafe13defd1d77b2cfe Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Wed, 1 Nov 2023 03:53:51 +0000 Subject: [PATCH 124/146] feat(doc): add docs for running tests in `python/README.md` --- python/README.md | 79 +++++++++++++++++++++++++++++------------------- 1 file changed, 48 insertions(+), 31 deletions(-) diff --git a/python/README.md b/python/README.md index 0ad0f261..98068676 100644 --- a/python/README.md +++ b/python/README.md @@ -2,18 +2,18 @@ ## Resampling HADs grid from 1 km to 2.2 km -The raw [UKHAD observational data](https://data.ceda.ac.uk/badc/ukmo-hadobs/data/insitu/MOHC/HadOBS/HadUK-Grid/v1.1.0.0/1km) +The raw [UKHAD observational data](https://data.ceda.ac.uk/badc/ukmo-hadobs/data/insitu/MOHC/HadOBS/HadUK-Grid/v1.1.0.0/1km) needs to be resampled to the same grid of the [RCP8.5 data](https://data.ceda.ac.uk/badc/ukcp18/data/land-cpm/uk/2.2km/rcp85/). This can be done with the `python/resampling/resampling_hads.py` script, which takes an input grid and uses to resample the data using [linear interpolation](https://docs.xarray.dev/en/stable/generated/xarray.DataArray.interp.html) (for simplicity have added a default grid in `data/rcp85_land-cpm_uk_2.2km_grid.nc`). -The script runs under the conda environment created on the main [README.md](../README.md) and has several options that can be understood by +The script runs under the conda environment created on the main [README.md](../README.md) and has several options that can be understood by running the following from the `resampling` directory: -``` -python resampling_hads.py --help +```console +$ python resampling_hads.py --help usage: resampling_hads.py [-h] --input INPUT [--output OUTPUT] [--grid_data GRID_DATA] @@ -30,16 +30,16 @@ The script expects the data to be files of `.nc` extension, have dimensions name of the [CEDA Archive](https://data.ceda.ac.uk/badc/ukmo-hadobs/data/insitu/MOHC/HadOBS/HadUK-Grid/v1.1.0.0/1km). Furthermore, the layer/variable to be resampled must be on the beginning of the name of the file before any `_` (e.g for `tasmax` is `tasmax_hadukgrid_uk_1km_day_19930501-19930531.nc`). -### Quickstart +### Quickstart For example, to run the resampling on `tasmax` daily data found in the fileshare (https://dymestorage1.file.core.windows.net/vmfileshare). -``` -cd python/resampling -python resampling_hads.py --input /Volumes/vmfileshare/ClimateData/Raw/HadsUKgrid/tasmax/day --output +```console +$ cd python/resampling +$ python resampling_hads.py --input /Volumes/vmfileshare/ClimateData/Raw/HadsUKgrid/tasmax/day --output ``` -as there is not a `--grid_data` flag, the default file described above is used. +as there is not a `--grid_data` flag, the default file described above is used. ## Loading UKCP and HADs data @@ -47,9 +47,9 @@ as there is not a `--grid_data` flag, the default file described above is used. In [python/load_data/data_loader.py] we have written a few functions for loading and concatenating data into a single xarray which can be used for running debiasing methods. Instructions in how to use these functions can be found in [python/notebooks/load_data_python.ipynb](../notebooks/load_data_python.ipynb). -## Running debiasing methods +## Running debiasing methods -The code in the [debiasing](debiasing) directory contains scripts that interface with implementations of the debiasing methods +The code in the [debiasing](debiasing) directory contains scripts that interface with implementations of the debiasing methods implemented by different libraries. Note: By March 2023 we have only implemented the [python-cmethods](https://github.com/alan-turing-institute/python-cmethods) library. @@ -57,37 +57,37 @@ Note: By March 2023 we have only implemented the [python-cmethods](https://githu ### The cmethods library -This repository contains two python scripts one for preprocessing/grouping data and one to run -debiasing in climate data using a fork of the [original python-cmethods](https://github.com/btschwertfeger/python-cmethods) -module written by Benjamin Thomas Schwertfeger's , which has -been modified to function with the dataset used in the clim-recal project. This library has been included as a +This repository contains two python scripts one for preprocessing/grouping data and one to run +debiasing in climate data using a fork of the [original python-cmethods](https://github.com/btschwertfeger/python-cmethods) +module written by Benjamin Thomas Schwertfeger's , which has +been modified to function with the dataset used in the clim-recal project. This library has been included as a submodule to this project, so you must run the following command to pull the submodules required. -``` -cd debiasing -git submodule update --init --recursive +```console +$ cd debiasing +$ git submodule update --init --recursive ``` - The [preprocess_data.py](debiasing/preprocess_data.py) script allows the user to specify directories from which the modelled (CPM/UKCP) data and observation (HADs) data should be loaded, as well as time periods to use for calibration and validation. The script parses the necessary files and combines them into two files for calibration (modelled and observed), and two files for validation (modelled and observed) - with the option to specify multiple validation periods. These can then be used as inputs to `run_cmethods.py`. -- The [run_cmethods.py](debiasing/run_cmethods.py) script allow us to adjust climate biases in climate data using the python-cmethods library. +- The [run_cmethods.py](debiasing/run_cmethods.py) script allow us to adjust climate biases in climate data using the python-cmethods library. It takes as input observation data (HADs data) and modelled data (historical CPM/UKCP data) for calibration, as well as observation and modelled data for validation (generated by `preprocess_data.py`). It calibrates the debiasing method using the calibration period data and debiases the modelled data for the validation period. The resulting output is saved as a `.nc` to a specified directory. The script will also produce a time-series and a map plot of the debiased data. **Usage**: The scripts can be run from the command line using the following arguments: -``` -python3 preprocess_data.py --mod --obs --shp --out -v -u -r --calib_dates --valid_dates +```consle +$ python3 preprocess_data.py --mod --obs --shp --out -v -u -r --calib_dates --valid_dates -python3 run_cmethods.py --input_data_folder --out -m -v -g -k -n -p +$ python3 run_cmethods.py --input_data_folder --out -m -v -g -k -n -p ``` For more details on the scripts and options you can run: -``` -python3 preprocess_data.py --help +```console +$ python3 preprocess_data.py --help ``` and -``` +```console python3 run_cmethods.py --help ``` **Main Functionality**: @@ -104,7 +104,7 @@ The `preprocess_data.py` script performs the following steps: - Aligns and saves the datasets to the output directory. The `run_cmethods.py` script performs the following steps: - - Reads the input calibration and validation datasets from the input directory. + - Reads the input calibration and validation datasets from the input directory. - Applies the specified debiasing method, combining the calibration and valiation data. - Saves the resulting output to the specified directory. - Creates diagnotic figues of the output dataset (time series and time dependent maps) and saves it into the specified directory. @@ -112,11 +112,28 @@ The `run_cmethods.py` script performs the following steps: **Working example**. Example of how to run the two scripts using data stored in the Azure fileshare, running the scripts locally (uses input data that have been cropped to contain only the city of Glasgow. The two scripts will debias only the `tasmax` variable, run 05 of the CPM, for calibration years 1980-2009 and validation years 2010-2019. It uses the `quantile_delta_mapping` debiasing method: -``` -python3 preprocess_data.py --mod /Volumes/vmfileshare/ClimateData/Cropped/three.cities/CPM/Glasgow/ --obs /Volumes/vmfileshare/ClimateData/Cropped/three.cities/Hads.original360/Glasgow/ -v tasmax --out ./preprocessed_data/ --calib_dates 19800101-20091230 --valid_dates 20100101-20191230 --run_number 05 +```console +$ python3 preprocess_data.py --mod /Volumes/vmfileshare/ClimateData/Cropped/three.cities/CPM/Glasgow/ --obs /Volumes/vmfileshare/ClimateData/Cropped/three.cities/Hads.original360/Glasgow/ -v tasmax --out ./preprocessed_data/ --calib_dates 19800101-20091230 --valid_dates 20100101-20191230 --run_number 05 -python3 run_cmethods.py --input_data_folder ./preprocessed_data/ --out ./debiased_data/ --method quantile_delta_mapping --v tasmax -p 4 +$ python3 run_cmethods.py --input_data_folder ./preprocessed_data/ --out ./debiased_data/ --method quantile_delta_mapping --v tasmax -p 4 ``` - - +## Testing + +Testing for `python` components uses `pytest`, with configuration specified in `clim-recal/python/.pytest.ini`. To run tests, ensure the `environment.yml` environment is installed and activated, then run `pytest` from within the `clim-recal/python` checkout directory. Note: tests are skipped unless run on a specific linux server wth data mounted to a specific path. + +```console +$ cd clim-recal +$ conda activate clim-recal +$ cd python +$ pytest +...sss........sss..... [100%] +============================== short test summary info =============================== +SKIPPED [1] :2: requires linux server mount paths +SKIPPED [1] :2: requires linux server mount paths +SKIPPED [1] :2: requires linux server mount paths +SKIPPED [1] :2: requires linux server mount paths +SKIPPED [1] :2: requires linux server mount paths +SKIPPED [1] :2: requires linux server mount paths +16 passed, 6 skipped, 4 deselected in 0.26s +``` From 8264b06abbd5011b6afc937e70a7706d0db4e17e Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Wed, 8 Nov 2023 00:15:18 +0000 Subject: [PATCH 125/146] fix(test): fix test dependency and filtering on server --- environment.yml | 48 +++++++++++++++++++++++----------------------- python/conftest.py | 4 ++-- 2 files changed, 26 insertions(+), 26 deletions(-) diff --git a/environment.yml b/environment.yml index ea0424ff..5f3da1f8 100644 --- a/environment.yml +++ b/environment.yml @@ -5,58 +5,58 @@ channels: - conda-forge dependencies: - bzip2=1.0.8 - - c-ares=1.18.1 - - ca-certificates=2022.9.24 - - hdf5=1.12.2 - - krb5=1.19.3 + - c-ares=1.19.1 + - ca-certificates=2023.08.22 + - hdf5=1.12.1 + - krb5=1.20.1 - libcurl=7.86.0 - libcxx=14.0.6 - - libedit=3.1.20191231 - - libev=4.33 - - libffi=3.4.2 + - libedit=3.1.20221030 + - libev=5.33 + - libffi=3.4.4 - libgfortran5=11.3.0 - - libnghttp2=1.47.0 + - libnghttp2=1.57.0 - libsqlite=3.39.4 - libssh2=1.10.0 - libzlib=1.2.13 - - llvm-openmp=14.0.4 - - ncurses=6.3 - - openssl=3.0.5 - - pip=22.3 - - python=3.11.0 + - llvm-openmp=14.0.6 + - ncurses=6.4 + - openssl=3.0.12 + - pip=23.3 + - python=3.11.5 - r-quarto=1.3 - - readline=8.1.2 - - setuptools=65.5.0 + - readline=8.2 + - setuptools=68.0.0 - tk=8.6.12 - - tzdata=2022e - - wheel=0.37.1 - - xz=5.2.6 + - tzdata=2023c + - wheel=0.41.2 + - xz=5.4.2 - pip: - affine==2.3.1 - attrs==22.1.0 - - certifi==2022.9.24 + - certifi==2023.07.22 - cftime==1.6.2 - click==8.1.3 - click-plugins==1.1.1 - cligj==0.7.2 - geopandas==0.12.2 - ipython==8.15.0 + - matplotlib==3.6.1 - netcdf4==1.6.1 - numpy==1.23.4 - packaging==21.3 - pandas==1.5.1 + - pillow==9.4.0 + - quartodoc==0.6.3 - pyparsing==3.0.9 - pyproj==3.4.0 - python-dateutil==2.8.2 - - pytz==2022.5 - pytest-sugar==0.9.7 + - pytz==2022.5 - rasterio==1.3.3 - rioxarray==0.12.3 + - scipy==1.10.0 - six==1.16.0 - snuggs==1.4.7 - tqdm==4.64.1 - xarray==2023.2.0 - - matplotlib==3.6.1 - - scipy==1.10.0 - - pillow==9.4.0 - - quartodoc==0.6.3 diff --git a/python/conftest.py b/python/conftest.py index 3026cfa4..c5183d64 100644 --- a/python/conftest.py +++ b/python/conftest.py @@ -6,6 +6,7 @@ import pytest +TEST_PATH = Path().absolute() PYTHON_DIR_NAME: Final[Path] = Path("python") MODULE_NAMES: Final[tuple[PathLike, ...]] = ( "debiasing", @@ -24,8 +25,7 @@ def is_platform_darwin() -> bool: @pytest.fixture(autouse=True) def ensure_python_path() -> None: """Return path for test running.""" - path: Path = Path() - if not set(Path(p) for p in MODULE_NAMES) <= set(path.iterdir()): + if not set(MODULE_NAMES) <= set(path.name for path in TEST_PATH.iterdir()): raise ValueError( f"'clim-recal' python tests must be " f"run in 'clim-recal/{PYTHON_DIR_NAME}', " From 77045f886fd971d2ddd95d25fccb80774df2cd7a Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Thu, 9 Nov 2023 13:27:58 +0000 Subject: [PATCH 126/146] feat(ci): add `compose/local` and `compose.yml` --- _quarto.yml | 6 +- compose.yml | 23 +++++++ compose/local/Dockerfile | 124 ++++++++++++++++++++++++++++++++++ compose/local/docs/Dockerfile | 58 ++++++++++++++++ environment.yml | 4 +- 5 files changed, 210 insertions(+), 5 deletions(-) create mode 100644 compose.yml create mode 100644 compose/local/Dockerfile create mode 100644 compose/local/docs/Dockerfile diff --git a/_quarto.yml b/_quarto.yml index cf086ca0..fd47a895 100644 --- a/_quarto.yml +++ b/_quarto.yml @@ -3,7 +3,7 @@ project: type: website output-dir: _site preview: - port: 8888 + port: 8080 browser: false render: - "README.md" @@ -13,7 +13,7 @@ project: - "docs/reference" - "docs/pipeline.qmd" # - "R/comparing-r-and-python/HADs-reprojection/WIP-Comparing-HADs-grids.md" - - "python/README.md" + # - "python/README.md" toc: True number-sections: True @@ -44,7 +44,7 @@ website: # contents: # - href: "R/comparing-r-and-python/HADs-reprojection/WIP-Comparing-HADs-grids.md" # text: "WIP Comparing HADs grids" - + - section: "python" contents: - "python/README.md" diff --git a/compose.yml b/compose.yml new file mode 100644 index 00000000..a0b93115 --- /dev/null +++ b/compose.yml @@ -0,0 +1,23 @@ +version: "3" + +# volumes: +# mount + +services: + + jupyter: + build: + context: . + dockerfile: ./compose/local/Dockerfile + target: clim-recal-base + ports: + - "8888:8888" + + docs: + build: + context: . + dockerfile: ./compose/local/docs/Dockerfile + # target: clim-recal-docs + ports: + - "8080:80" + # command: quarto preview --port 8080 diff --git a/compose/local/Dockerfile b/compose/local/Dockerfile new file mode 100644 index 00000000..6f7ea83e --- /dev/null +++ b/compose/local/Dockerfile @@ -0,0 +1,124 @@ +FROM jupyter/r-notebook as clim-recal-base + +# This is derived from documentation available at +# https://jupyter-docker-stacks.readthedocs.io/en/latest/ + +# Example run command: + + +# This will require a mount of `vmfileshare` from `dymestorage1` +# On macOS this can be solved via: +# open smb://dymestorage1.file.core.windows.net/vmfileshare +# Using user: dymestorage1 +# And password specified via: +# https://learn.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage?tabs=azure-portal#view-account-access-keys + +# Example run: +# cd clim-recal +# docker build --tag 'clim-recal' . +# docker run -it -p 8888:8888 -v /Volumes/vmfileshare:/home/jovyan/work/vmfileshare clim-recal + +ENV LC_ALL en_GB.UTF-8 +ENV LANG en_GB.UTF-8 +ENV LANGUAGE en_GB.UTF-8 +ENV SHELL /bin/bash +ARG env_name=clim-recal + +# `py_ver` is not currently used below and is specified in `environment.yaml` +# here as reminder and clarity if future change needed. +ARG py_ver=3.11 + +# The local_data_path is an absolute local path to ClimateData on the machine hosting running `docker` +ARG local_data_path=/Volumes/vmfileshare/ClimateData + +# The local_data_path is an absolute path to mount ClimateData within `docker` +ARG docker_data_path=/Volumes/vmfileshare/ClimateData + + +USER root + +# Generate the locales +RUN echo "en_GB.UTF-8 UTF-8" > /etc/locale.gen && locale-gen + + +RUN apt-get update && apt-get -y install gdal-bin python3-gdal libgdal-dev build-essential +RUN conda update -n base -c conda-forge conda + +# Ensure correct GDAL paths +RUN export CPLUS_INCLUDE_PATH=/usr/include/gdal && export C_INCLUDE_PATH=/usr/include/gdal + +# Create custom environment from environment.yml +# Add ipykernel for environment build as necessary +COPY --chown=${NB_UID}:${NB_GID} environment.yml /tmp/ +RUN mamba env create -p "${CONDA_DIR}/envs/${env_name}" -f /tmp/environment.yml && \ + mamba clean --all -f -y + +# Any additional `pip` installs can be added by using the following line +# Using `mamba` is highly recommended though +RUN "${CONDA_DIR}/envs/${env_name}/bin/pip" install --no-cache-dir \ + 'ipykernel' + +# Create kernel from custome `environment.yml` +RUN "${CONDA_DIR}/envs/${env_name}/bin/python" -m ipykernel install --user --name="${env_name}" && \ + fix-permissions "${CONDA_DIR}" && \ + fix-permissions "/home/${NB_USER}" + +# Copy the rest of the clim-recal code to volume +COPY --chown=${NB_UID}:${NB_GID} . . + + +# Add custom activate script to reflect environment +USER root +RUN activate_custom_env_script=/usr/local/bin/before-notebook.d/activate_custom_env.sh && \ + echo "#!/bin/bash" > ${activate_custom_env_script} && \ + echo "eval \"$(conda shell.bash activate "${env_name}")\"" >> ${activate_custom_env_script} && \ + chmod +x ${activate_custom_env_script} + +# Switch to default jupyter user +USER ${NB_UID} + +# Set this for default `conda activate` configuration +# You can comment this line to keep the default environment in Terminal +RUN echo "conda activate ${env_name}" >> "${HOME}/.bashrc" + +RUN cd python/debiasing && git submodule update --init --recursive + + +# This will use the default launch as discussed in +# https://jupyter-docker-stacks.readthedocs.io/en/latest/using/running.html + +FROM clim-recal-base as clim-recal-docs + +ARG port=4200 + +USER root + +RUN sudo apt-get -y install \ + pandoc \ + pandoc-citeproc \ + curl \ + gdebi-core \ + r-base \ + r-base-dev \ + r-cran-littler \ + && rm -rf /var/lib/apt/lists/* + +# RUN Rscript -e \ +# "install.packages(c('shiny', 'jsonlite', 'renv', 'knitr', 'rmarkdown', 'quarto'), repos='https://cran.rstudio.com')" +SHELL ["/bin/bash", "-o", "pipefail", "-c"] + +RUN arch=$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/) && \ + curl -LO https://quarto.org/download/latest/quarto-linux-${arch}.deb && \ + gdebi --non-interactive quarto-linux-${arch}.deb + +# wget "http://...../${arch}.deb" + +# RUN curl -LO https://quarto.org/download/latest/quarto-linux-amd64.deb +# RUN gdebi --non-interactive quarto-linux-${arch}.deb + +EXPOSE ${port}:${port} + +USER ${NB_UID} + +# CMD ["quarto", "preview", "--port={port}"] +CMD ["bash"] diff --git a/compose/local/docs/Dockerfile b/compose/local/docs/Dockerfile new file mode 100644 index 00000000..652d883d --- /dev/null +++ b/compose/local/docs/Dockerfile @@ -0,0 +1,58 @@ +# FROM rocker/rstudio +# +# ENV LC_ALL en_GB.UTF-8 +# ENV LANG en_GB.UTF-8 +# ENV LANGUAGE en_GB.UTF-8 +# ENV SHELL /bin/bash +# ARG env_name=clim-recal +# +# ARG py_ver=3.11 +# +# # The local_data_path is an absolute local path to ClimateData on the machine hosting running `docker` +# ARG local_data_path=/Volumes/vmfileshare/ClimateData +# +# # The local_data_path is an absolute path to mount ClimateData within `docker` +# ARG docker_data_path=/Volumes/vmfileshare/ClimateData +# +# +# USER root +# +# # Generate the locales +# RUN echo "en_GB.UTF-8 UTF-8" > /etc/locale.gen && locale-gen +# +# +# RUN apt-get update && apt-get -y install gdal-bin python3-gdal libgdal-dev build-essential +# +# # Ensure correct GDAL paths +# RUN export CPLUS_INCLUDE_PATH=/usr/include/gdal && export C_INCLUDE_PATH=/usr/include/gdal +# +# # Switch to default jupyter user +# USER ${NB_UID} +# +# ENTRYPOINT ['bash'] +ARG QUARTO_VERSION="1.3.450" + +FROM ghcr.io/quarto-dev/quarto:${QUARTO_VERSION} AS builder + +ARG PORT=8080 + +# ARG RIG_VERSION="latest" +# ARG R_VERSION="release" +# COPY install-rig.sh /tmp/install-rig.sh +# RUN bash /tmp/install-rig.sh "${RIG_VERSION}" +# RUN rig add ${R_VERSION} # && Rscript -e 'pak::pkg_install("renv")' + +# COPY mywebsite /app +# WORKDIR /app +# RUN Rscript -e "renv::restore()" +# RUN quarto render . +COPY . /app +WORKDIR /app +# RUN Rscript -e "renv::restore()" +EXPOSE ${PORT}:${PORT} + +# RUN quarto preview --port ${PORT}:${PORT} +RUN quarto render + +FROM httpd:alpine +COPY --from=builder /app/_site/ /usr/local/apache2/htdocs/ diff --git a/environment.yml b/environment.yml index 5f3da1f8..107c8bb9 100644 --- a/environment.yml +++ b/environment.yml @@ -12,7 +12,7 @@ dependencies: - libcurl=7.86.0 - libcxx=14.0.6 - libedit=3.1.20221030 - - libev=5.33 + - libev=4.33 - libffi=3.4.4 - libgfortran5=11.3.0 - libnghttp2=1.57.0 @@ -24,7 +24,7 @@ dependencies: - openssl=3.0.12 - pip=23.3 - python=3.11.5 - - r-quarto=1.3 + # - r-quarto=1.3 - readline=8.2 - setuptools=68.0.0 - tk=8.6.12 From 480f492ce9b78097a161645e71a31f36fb9ca776 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Thu, 9 Nov 2023 15:18:08 +0000 Subject: [PATCH 127/146] feat(ci): add `volumes` `mnt` config to replicate server config locally --- compose.yml | 18 +++++++--- compose/local/Dockerfile | 74 ++++++++++++++++++++-------------------- 2 files changed, 51 insertions(+), 41 deletions(-) diff --git a/compose.yml b/compose.yml index a0b93115..7b8af07c 100644 --- a/compose.yml +++ b/compose.yml @@ -1,7 +1,4 @@ -version: "3" - -# volumes: -# mount +version: "3.8" services: @@ -12,6 +9,11 @@ services: target: clim-recal-base ports: - "8888:8888" + volumes: + - climate_data:/mnt/vmfileshare + # - type: bind + # source: /Volumes/vmfileshare + # target: /mnt/vmfileshare docs: build: @@ -21,3 +23,11 @@ services: ports: - "8080:80" # command: quarto preview --port 8080 + +volumes: + climate_data: + driver: local + driver_opts: + type: none + device: /Volumes/vmfileshare + o: bind diff --git a/compose/local/Dockerfile b/compose/local/Dockerfile index 6f7ea83e..bb64af11 100644 --- a/compose/local/Dockerfile +++ b/compose/local/Dockerfile @@ -29,10 +29,10 @@ ARG env_name=clim-recal ARG py_ver=3.11 # The local_data_path is an absolute local path to ClimateData on the machine hosting running `docker` -ARG local_data_path=/Volumes/vmfileshare/ClimateData +ARG HOST_DATA_PATH=/Volumes/vmfileshare # The local_data_path is an absolute path to mount ClimateData within `docker` -ARG docker_data_path=/Volumes/vmfileshare/ClimateData +ARG DOCKER_DATA_PATH=/mnt/vmfileshare USER root @@ -87,38 +87,38 @@ RUN cd python/debiasing && git submodule update --init --recursive # This will use the default launch as discussed in # https://jupyter-docker-stacks.readthedocs.io/en/latest/using/running.html -FROM clim-recal-base as clim-recal-docs - -ARG port=4200 - -USER root - -RUN sudo apt-get -y install \ - pandoc \ - pandoc-citeproc \ - curl \ - gdebi-core \ - r-base \ - r-base-dev \ - r-cran-littler \ - && rm -rf /var/lib/apt/lists/* - -# RUN Rscript -e \ -# "install.packages(c('shiny', 'jsonlite', 'renv', 'knitr', 'rmarkdown', 'quarto'), repos='https://cran.rstudio.com')" -SHELL ["/bin/bash", "-o", "pipefail", "-c"] - -RUN arch=$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/) && \ - curl -LO https://quarto.org/download/latest/quarto-linux-${arch}.deb && \ - gdebi --non-interactive quarto-linux-${arch}.deb - -# wget "http://...../${arch}.deb" - -# RUN curl -LO https://quarto.org/download/latest/quarto-linux-amd64.deb -# RUN gdebi --non-interactive quarto-linux-${arch}.deb - -EXPOSE ${port}:${port} - -USER ${NB_UID} - -# CMD ["quarto", "preview", "--port={port}"] -CMD ["bash"] +# FROM clim-recal-base as clim-recal-docs +# +# ARG port=4200 +# +# USER root +# +# RUN sudo apt-get -y install \ +# pandoc \ +# pandoc-citeproc \ +# curl \ +# gdebi-core \ +# r-base \ +# r-base-dev \ +# r-cran-littler \ +# && rm -rf /var/lib/apt/lists/* +# +# # RUN Rscript -e \ +# # "install.packages(c('shiny', 'jsonlite', 'renv', 'knitr', 'rmarkdown', 'quarto'), repos='https://cran.rstudio.com')" +# SHELL ["/bin/bash", "-o", "pipefail", "-c"] +# +# RUN arch=$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/) && \ +# curl -LO https://quarto.org/download/latest/quarto-linux-${arch}.deb && \ +# gdebi --non-interactive quarto-linux-${arch}.deb +# +# # wget "http://...../${arch}.deb" +# +# # RUN curl -LO https://quarto.org/download/latest/quarto-linux-amd64.deb +# # RUN gdebi --non-interactive quarto-linux-${arch}.deb +# +# EXPOSE ${port}:${port} +# +# USER ${NB_UID} +# +# # CMD ["quarto", "preview", "--port={port}"] +# CMD ["bash"] From 7fb8a4d33ebf129750023c4f5bd2e673ca0b9680 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Tue, 14 Nov 2023 19:46:51 +0100 Subject: [PATCH 128/146] feat(test): refactor to run tests in local vs server mount --- compose.yml | 16 ++-- compose/local/Dockerfile | 2 +- environment.yml | 12 +-- python/conftest.py | 12 ++- python/tests/test_debiasing.py | 157 +++++++++++++++++---------------- python/utils.py | 37 ++++---- 6 files changed, 127 insertions(+), 109 deletions(-) diff --git a/compose.yml b/compose.yml index 7b8af07c..826350a7 100644 --- a/compose.yml +++ b/compose.yml @@ -10,7 +10,7 @@ services: ports: - "8888:8888" volumes: - - climate_data:/mnt/vmfileshare + # - climate_data:/mnt/vmfileshare # - type: bind # source: /Volumes/vmfileshare # target: /mnt/vmfileshare @@ -24,10 +24,10 @@ services: - "8080:80" # command: quarto preview --port 8080 -volumes: - climate_data: - driver: local - driver_opts: - type: none - device: /Volumes/vmfileshare - o: bind +# volumes: +# climate_data: +# driver: local +# driver_opts: +# type: none +# device: /Volumes/vmfileshare +# o: bind diff --git a/compose/local/Dockerfile b/compose/local/Dockerfile index bb64af11..4f35f24b 100644 --- a/compose/local/Dockerfile +++ b/compose/local/Dockerfile @@ -64,7 +64,7 @@ RUN "${CONDA_DIR}/envs/${env_name}/bin/python" -m ipykernel install --user --nam fix-permissions "/home/${NB_USER}" # Copy the rest of the clim-recal code to volume -COPY --chown=${NB_UID}:${NB_GID} . . +ADD --chown=${NB_UID}:${NB_GID} . . # Add custom activate script to reflect environment diff --git a/environment.yml b/environment.yml index 107c8bb9..b92d3779 100644 --- a/environment.yml +++ b/environment.yml @@ -7,23 +7,16 @@ dependencies: - bzip2=1.0.8 - c-ares=1.19.1 - ca-certificates=2023.08.22 - - hdf5=1.12.1 - - krb5=1.20.1 - - libcurl=7.86.0 - - libcxx=14.0.6 - libedit=3.1.20221030 - libev=4.33 - - libffi=3.4.4 - libgfortran5=11.3.0 - - libnghttp2=1.57.0 - - libsqlite=3.39.4 - libssh2=1.10.0 - libzlib=1.2.13 - llvm-openmp=14.0.6 - ncurses=6.4 - - openssl=3.0.12 - pip=23.3 - - python=3.11.5 + - python=3.9 + - gdal=3.3.2 # - r-quarto=1.3 - readline=8.2 - setuptools=68.0.0 @@ -34,6 +27,7 @@ dependencies: - pip: - affine==2.3.1 - attrs==22.1.0 + - backports.strenum==1.2.8 - certifi==2023.07.22 - cftime==1.6.2 - click==8.1.3 diff --git a/python/conftest.py b/python/conftest.py index c5183d64..dfcd2ff1 100644 --- a/python/conftest.py +++ b/python/conftest.py @@ -6,6 +6,7 @@ import pytest +CLIMATE_DATA_MOUNT_PATH = Path("/mnt/vmfileshare/ClimateData") TEST_PATH = Path().absolute() PYTHON_DIR_NAME: Final[Path] = Path("python") MODULE_NAMES: Final[tuple[PathLike, ...]] = ( @@ -22,6 +23,12 @@ def is_platform_darwin() -> bool: return sys.platform.startswith("darwin") +@pytest.fixture() +def is_climate_data_mounted() -> bool: + """Check if `sys.platform` is `Darwin` (macOS).""" + return CLIMATE_DATA_MOUNT_PATH.exists() + + @pytest.fixture(autouse=True) def ensure_python_path() -> None: """Return path for test running.""" @@ -34,8 +41,11 @@ def ensure_python_path() -> None: @pytest.fixture(autouse=True) -def doctest_auto_fixtures(doctest_namespace: dict, is_platform_darwin: bool) -> None: +def doctest_auto_fixtures( + doctest_namespace: dict, is_platform_darwin: bool, is_climate_data_mounted: bool +) -> None: """Elements to add to default `doctest` namespace.""" doctest_namespace["is_platform_darwin"] = is_platform_darwin + doctest_namespace["is_climate_data_mounted"] = is_climate_data_mounted doctest_namespace["pprint"] = pprint doctest_namespace["pytest"] = pytest diff --git a/python/tests/test_debiasing.py b/python/tests/test_debiasing.py index 930e3307..5cf8c1d4 100644 --- a/python/tests/test_debiasing.py +++ b/python/tests/test_debiasing.py @@ -3,12 +3,13 @@ """ import subprocess +import sys from dataclasses import dataclass from datetime import date -from enum import StrEnum, auto +from enum import auto from os import PathLike, chdir from pathlib import Path -from typing import Final, Generator +from typing import Final, Generator, Optional, Union import pytest from utils import ( @@ -16,11 +17,15 @@ DATE_FORMAT_STR, DateType, date_range_to_str, - date_to_str, iter_to_tuple_strs, path_iterdir, ) +if sys.version_info >= (3, 11): + from enum import StrEnum +else: + from backports.strenum import StrEnum + DATA_PATH_DEFAULT: Final[Path] = Path( "/mnt/vmfileshare/ClimateData/Cropped/three.cities/" ) @@ -207,9 +212,9 @@ def calib_dates_to_str( self, start_date: DateType, end_date: DateType, - in_format_str: str | None = None, - out_format_str: str | None = None, - split_str: str | None = None, + in_format_str: Optional[str] = None, + out_format_str: Optional[str] = None, + split_str: Optional[str] = None, ) -> str: """Return date range as `str` from `calib_date_start` to `calib_date_end`. @@ -233,9 +238,9 @@ def valid_dates_to_str( self, start_date: DateType, end_date: DateType, - in_format_str: str | None = None, - out_format_str: str | None = None, - split_str: str | None = None, + in_format_str: Optional[str] = None, + out_format_str: Optional[str] = None, + split_str: Optional[str] = None, ) -> str: """Return date range as `str` from `valid_date_start` to `valid_date_end`. @@ -259,9 +264,9 @@ def _date_range_to_str( self, start_date: DateType, end_date: DateType, - in_format_str: str | None = None, - out_format_str: str | None = None, - split_str: str | None = None, + in_format_str: Optional[str] = None, + out_format_str: Optional[str] = None, + split_str: Optional[str] = None, ) -> str: """Return date range as `str` from `calib_date_start` to `calib_date_end`. @@ -286,12 +291,12 @@ def _date_range_to_str( split_str=split_str, ) - def mod_path(self, city: str | None = None) -> Path: + def mod_path(self, city: Optional[str] = None) -> Path: """Return city estimates path. Example ------- - >>> if is_platform_darwin: + >>> if not is_climate_data_mounted: ... pytest.skip('requires linux server mount paths') >>> config: RunConfig = RunConfig() >>> config.mod_path() @@ -302,12 +307,12 @@ def mod_path(self, city: str | None = None) -> Path: city = city if city else self.city return self.data_path / self.mod_folder / city - def obs_path(self, city: str | None = None) -> Path: + def obs_path(self, city: Optional[str] = None) -> Path: """Return city observations path. Example ------- - >>> if is_platform_darwin: + >>> if not is_climate_data_mounted: ... pytest.skip('requires linux server mount paths') >>> config: RunConfig = RunConfig() >>> config.obs_path() @@ -320,15 +325,15 @@ def obs_path(self, city: str | None = None) -> Path: def preprocess_out_path( self, - city: str | None = None, - run: str | None = None, - variable: str | None = None, + city: Optional[str] = None, + run: Optional[str] = None, + variable: Optional[str] = None, ) -> Path: """Return path to save results. Example ------- - >>> if is_platform_darwin: + >>> if not is_climate_data_mounted: ... pytest.skip('requires linux server mount paths') >>> config: RunConfig = RunConfig() >>> config.preprocess_out_path() @@ -345,8 +350,8 @@ def preprocess_out_path( def cmethods_out_path( self, - city: str | None = None, - run: str | None = None, + city: Optional[str] = None, + run: Optional[str] = None, ) -> Path: """Return path to save cmethods results. @@ -376,14 +381,14 @@ def run_prefix_tuple(self) -> tuple[str, ...]: def to_cli_preprocess_tuple( self, - variable: str | None = None, - run: str | None = None, - city: str | None = None, - calib_start: DateType | None = None, - calib_end: DateType | None = None, - valid_start: DateType | None = None, - valid_end: DateType | None = None, - ) -> tuple[str | PathLike, ...]: + variable: Optional[str] = None, + run: Optional[str] = None, + city: Optional[str] = None, + calib_start: Optional[DateType] = None, + calib_end: Optional[DateType] = None, + valid_start: Optional[DateType] = None, + valid_end: Optional[DateType] = None, + ) -> tuple[Union[str, PathLike], ...]: """Generate a `tuple` of `str` for a command line command. Note @@ -434,13 +439,13 @@ def to_cli_preprocess_tuple( def to_cli_preprocess_tuple_strs( self, - variable: str | None = None, - run: str | None = None, - city: str | None = None, - calib_start: DateType | None = None, - calib_end: DateType | None = None, - valid_start: DateType | None = None, - valid_end: DateType | None = None, + variable: Optional[str] = None, + run: Optional[str] = None, + city: Optional[str] = None, + calib_start: Optional[DateType] = None, + calib_end: Optional[DateType] = None, + valid_start: Optional[DateType] = None, + valid_end: Optional[DateType] = None, ) -> tuple[str, ...]: """Generate a command line interface `str` `tuple` a test example. @@ -464,13 +469,13 @@ def to_cli_preprocess_tuple_strs( def to_cli_preprocess_str( self, - variable: str | None = None, - run: str | None = None, - city: str | None = None, - calib_start: DateType | None = None, - calib_end: DateType | None = None, - valid_start: DateType | None = None, - valid_end: DateType | None = None, + variable: Optional[str] = None, + run: Optional[str] = None, + city: Optional[str] = None, + calib_start: Optional[DateType] = None, + calib_end: Optional[DateType] = None, + valid_start: Optional[DateType] = None, + valid_end: Optional[DateType] = None, ) -> str: """Generate a command line interface str as a test example. @@ -494,12 +499,14 @@ def to_cli_preprocess_str( ) ) - def yield_mod_folder(self, city: str | None = None) -> Generator[Path, None, None]: + def yield_mod_folder( + self, city: Optional[str] = None + ) -> Generator[Path, None, None]: """`Iterable` of all `Path`s in `self.mod_folder`. Example ------- - >>> if is_platform_darwin: + >>> if not is_climate_data_mounted: ... pytest.skip('requires linux server mount paths') >>> config: RunConfig = RunConfig() >>> len(tuple(config.yield_mod_folder())) == MOD_FOLDER_FILES_COUNT_CORRECT @@ -508,12 +515,14 @@ def yield_mod_folder(self, city: str | None = None) -> Generator[Path, None, Non city = city if city else self.city return path_iterdir(self.obs_path(city=city)) - def yield_obs_folder(self, city: str | None = None) -> Generator[Path, None, None]: + def yield_obs_folder( + self, city: Optional[str] = None + ) -> Generator[Path, None, None]: """`Iterable` of all `Path`s in `self.obs_folder`. Example ------- - >>> if is_platform_darwin: + >>> if not is_climate_data_mounted: ... pytest.skip('requires linux server mount paths') >>> config: RunConfig = RunConfig() >>> len(tuple(config.yield_obs_folder())) == OBS_FOLDER_FILES_COUNT_CORRECT @@ -524,15 +533,15 @@ def yield_obs_folder(self, city: str | None = None) -> Generator[Path, None, Non def yield_preprocess_out_folder( self, - city: str | None = None, - run: str | None = None, - variable: str | None = None, + city: Optional[str] = None, + run: Optional[str] = None, + variable: Optional[str] = None, ) -> Generator[Path, None, None]: """`Iterable` of all `Path`s in `self.preprocess_out_folder`. Example ------- - >>> if is_platform_darwin: + >>> if not is_climate_data_mounted: ... pytest.skip('requires linux server mount paths') >>> config: RunConfig = RunConfig() >>> (len(tuple(config.yield_preprocess_out_folder())) == @@ -553,14 +562,14 @@ def command_path(self) -> Path: def to_cli_run_cmethods_tuple( self, - city: str | None = None, - run: str | None = None, - variable: str | None = None, - method: str | None = None, - input_data_path: PathLike | None = None, - cmethods_out_path: PathLike | None = None, - processors: int | None = None, - ) -> tuple[str | PathLike, ...]: + city: Optional[str] = None, + run: Optional[str] = None, + variable: Optional[str] = None, + method: Optional[str] = None, + input_data_path: Optional[PathLike] = None, + cmethods_out_path: Optional[PathLike] = None, + processors: Optional[int] = None, + ) -> tuple[Union[str, PathLike], ...]: """Generate a `tuple` of `str` for a command line command. Note @@ -609,13 +618,13 @@ def to_cli_run_cmethods_tuple( def to_cli_run_cmethods_tuple_strs( self, - city: str | None = None, - run: str | None = None, - variable: str | None = None, - method: str | None = None, - input_data_path: PathLike | None = None, - cmethods_out_path: PathLike | None = None, - processors: int | None = None, + city: Optional[str] = None, + run: Optional[str] = None, + variable: Optional[str] = None, + method: Optional[str] = None, + input_data_path: Optional[PathLike] = None, + cmethods_out_path: Optional[PathLike] = None, + processors: Optional[int] = None, ) -> tuple[str, ...]: """Generate a command line interface `str` `tuple` a test example. @@ -639,13 +648,13 @@ def to_cli_run_cmethods_tuple_strs( def to_cli_run_cmethods_str( self, - city: str | None = None, - run: str | None = None, - variable: str | None = None, - method: str | None = None, - input_data_path: PathLike | None = None, - cmethods_out_path: PathLike | None = None, - processors: int | None = None, + city: Optional[str] = None, + run: Optional[str] = None, + variable: Optional[str] = None, + method: Optional[str] = None, + input_data_path: Optional[PathLike] = None, + cmethods_out_path: Optional[PathLike] = None, + processors: Optional[int] = None, ) -> str: """Generate a command line interface str as a test example. diff --git a/python/utils.py b/python/utils.py index 8c6123f4..d1737da2 100644 --- a/python/utils.py +++ b/python/utils.py @@ -2,17 +2,20 @@ Utility functions. """ -from typing import Final, Any, Iterable, Generator from datetime import date, datetime from pathlib import Path +from typing import Any, Final, Generator, Iterable, Optional, Union +DateType = Union[date, str] +DATE_FORMAT_STR: Final[str] = "%Y%m%d" +DATE_FORMAT_SPLIT_STR: Final[str] = "-" -DateType = date | str -DATE_FORMAT_STR: Final[str] = '%Y%m%d' -DATE_FORMAT_SPLIT_STR: Final[str] = '-' - -def date_to_str(date_obj: DateType, in_format_str: str = DATE_FORMAT_STR, out_format_str: str = DATE_FORMAT_STR) -> str: +def date_to_str( + date_obj: DateType, + in_format_str: str = DATE_FORMAT_STR, + out_format_str: str = DATE_FORMAT_STR, +) -> str: """Return a `str` in `date_format_str` of `date_obj`. Example @@ -31,7 +34,7 @@ def date_to_str(date_obj: DateType, in_format_str: str = DATE_FORMAT_STR, out_fo def date_range_to_str( start_date: DateType, - end_date: DateType, + end_date: DateType, split_str: str = DATE_FORMAT_SPLIT_STR, in_format_str: str = DATE_FORMAT_STR, out_format_str: str = DATE_FORMAT_STR, @@ -47,13 +50,13 @@ def date_range_to_str( '20100101-20100330' """ - start_date = date_to_str(start_date, - in_format_str=in_format_str, - out_format_str=out_format_str) - end_date = date_to_str(end_date, - in_format_str=in_format_str, - out_format_str=out_format_str) - return f'{start_date}{split_str}{end_date}' + start_date = date_to_str( + start_date, in_format_str=in_format_str, out_format_str=out_format_str + ) + end_date = date_to_str( + end_date, in_format_str=in_format_str, out_format_str=out_format_str + ) + return f"{start_date}{split_str}{end_date}" def iter_to_tuple_strs(iter_var: Iterable[Any]) -> tuple[str, ...]: @@ -69,7 +72,9 @@ def iter_to_tuple_strs(iter_var: Iterable[Any]) -> tuple[str, ...]: return tuple(str(obj) for obj in iter_var) -def path_iterdir(path: Path, strict: bool = False) -> Generator[Path | None, None, None]: +def path_iterdir( + path: Path, strict: bool = False +) -> Generator[Optional[Path], None, None]: """Return an `Generator` after ensuring `path` exists. Examples @@ -102,4 +107,4 @@ def path_iterdir(path: Path, strict: bool = False) -> Generator[Path | None, Non if strict: raise error else: - return + return From d2eb0756c5a2b621c293cad095d106a56b0f9570 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Tue, 14 Nov 2023 19:50:34 +0100 Subject: [PATCH 129/146] fix(ci): comment out `volumes` portion of `clim-recal` in `compose.yml` --- compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/compose.yml b/compose.yml index 826350a7..a53218b9 100644 --- a/compose.yml +++ b/compose.yml @@ -9,7 +9,7 @@ services: target: clim-recal-base ports: - "8888:8888" - volumes: + # volumes: # - climate_data:/mnt/vmfileshare # - type: bind # source: /Volumes/vmfileshare From b4e4f0505c339198ea2a5bc9f3799e33c62c7f1b Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Wed, 15 Nov 2023 12:54:21 +0000 Subject: [PATCH 130/146] feat(doc): add `python/utils` to `quarto` rendering --- _quarto.yml | 9 ++++++--- python/utils.py | 13 +++++-------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/_quarto.yml b/_quarto.yml index fd47a895..492071f0 100644 --- a/_quarto.yml +++ b/_quarto.yml @@ -50,8 +50,10 @@ website: - "python/README.md" - section: "Reference" contents: - - href: "docs/reference/ceda_ftp_download.download_ftp.qmd" + - href: "docs/reference/data_download.ceda_ftp_download.download_ftp.qmd" text: "download_ftp" + - href: "docs/reference/utils.qmd" + text: "Utilities" quartodoc: @@ -61,7 +63,7 @@ quartodoc: # write sidebar data to this file sidebar: _sidebar.yml - source_dir: ./python/data_download/ + source_dir: ./python/ sections: - title: Data Source Management @@ -69,4 +71,5 @@ quartodoc: contents: # the functions being documented in the package. # you can refer to anything: class methods, modules, etc.. - - ceda_ftp_download.download_ftp + - data_download.ceda_ftp_download.download_ftp + - utils diff --git a/python/utils.py b/python/utils.py index d1737da2..9bba22ae 100644 --- a/python/utils.py +++ b/python/utils.py @@ -1,7 +1,4 @@ -""" -Utility functions. - -""" +"""Utility functions.""" from datetime import date, datetime from pathlib import Path from typing import Any, Final, Generator, Iterable, Optional, Union @@ -18,8 +15,8 @@ def date_to_str( ) -> str: """Return a `str` in `date_format_str` of `date_obj`. - Example - ------- + Examples + -------- >>> date_to_str('20100101') '20100101' @@ -41,8 +38,8 @@ def date_range_to_str( ) -> str: """Take `start_date` and `end_date` `str` or `date` instances and return a range `str`. - Example - ------- + Examples + -------- >>> date_range_to_str('20100101', '20100330') '20100101-20100330' From 18002181b81aa62654ed19ab7268f87f166495d3 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Wed, 15 Nov 2023 13:55:35 +0000 Subject: [PATCH 131/146] feat(ci): apply `linting` `workflow` to whole repo excluding `R` --- .github/workflows/ci.yaml | 126 +++++++++ .pre-commit-config.yaml | 1 + Dockerfile | 6 +- README.md | 26 +- bash/README.md | 1 - bash/reproject_one.sh | 2 +- bash/ubuntu_install.sh | 6 +- compose/local/docs/Dockerfile | 5 +- docs/assets/pipeline.mermaid | 24 +- docs/pipeline.qmd | 24 +- docs/pipeline_guidance.md | 15 +- internal_docs/INTERNAL.md | 10 +- notebooks/load_data_python.ipynb | 58 ++-- python/README.md | 2 +- python/data_download/ceda_ftp_download.py | 122 ++++++--- python/debiasing/preprocess_data.py | 314 +++++++++++++++------- python/debiasing/run_cmethods.py | 225 ++++++++++------ python/load_data/data_loader.py | 150 +++++++---- python/resampling/check_calendar.py | 80 +++--- python/resampling/resampling_hads.py | 116 +++++--- setup-instructions.md | 10 +- 21 files changed, 897 insertions(+), 426 deletions(-) create mode 100644 .github/workflows/ci.yaml diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 00000000..e0e1309b --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,126 @@ +name: CI + +# Enable Buildkit and let compose use it to speed up image building +env: + DOCKER_BUILDKIT: 1 + COMPOSE_DOCKER_CLI_BUILD: 1 + +on: + pull_request: + branches: ['main', 'docker-config'] + paths-ignore: ['docs/**'] + + push: + branches: ['main', 'docker-config'] + paths-ignore: ['docs/**'] + +concurrency: + group: ${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +permissions: + contents: write + +jobs: + linter: + runs-on: ubuntu-latest + defaults: + run: + working-directory: config/ + steps: + - name: Checkout Code Repository + uses: actions/checkout@main + + - name: Set up Python + uses: actions/setup-python@main + with: + python-version: '3.9' + + - name: Run pre-commit + uses: pre-commit/action@main + + - name: Update pre-commit + uses: pre-commit-ci/lite-action@v1.0.1 + if: always() + + # With no caching at all the entire ci process takes 4m 30s to complete! + pytest: + runs-on: ubuntu-latest + + steps: + - name: Checkout Code Repository + uses: actions/checkout@main + + - name: Build, Test and Save Test Coverage + run: | + docker compose build + docker compose up --detach + docker compose run django pytest -p no:sugar + export JUPYTER_ID=$(docker compose -f local.yml ps -q jupyter) + echo "jupyter_id=$JUPYTER_ID" >> $GITHUB_ENV + echo "jupyter_id=$JUPYTER_ID" + + - name: Check accessing saved jupyter_id + run: | + echo ${{ env.jupyter_id }} + + - name: Copy test coverage results + run: | + docker cp ${{ env.jupyter_id }}:app/docs/assets/coverage.svg docs/assets/ + + - name: Archive coverage svg + uses: actions/upload-artifact@v3 + with: + name: coverage-badge + path: docs/assets/coverage.svg + + - name: Tear down the Stack + run: docker compose -f local.yml down + + docs: + needs: [linter, pytest] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - uses: actions/setup-python@v4 + with: + python-version: '3.9' + + - run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV + name: Update cache_id + + - name: Build quarto + run: | + docker compose build + # docker cp ${{ env.jupyter_id }}:app/docs/assets/coverage.svg docs/assets/ + docker compose up --detach + docker cp $(docker compose ps -q docs):/usr/local/apache2/htdocs/ . + + - name: Download coverage svg + uses: actions/download-artifact@v3 + with: + name: coverage-badge + path: assets/ + # - name: Build docker quarto + # run: | + # docker compose build + # docker cp ${{ env.jupyter_id }}:app/docs/assets/coverage.svg docs/assets/ + # docker compose up --detach + # docker cp /app/_site/ /usr/local/apache2/htdocs/ + # - name: Apply mkdocs cache + # uses: actions/cache@v3 + # with: + # key: mkdocs-material-${{ env.cache_id }} + # path: .cache + # restore-keys: | + # mkdocs-material- + # + # - name: Install doc dependencies via poetry + # run: | + # pip install poetry + # poetry install --with docs + # + # - name: Build docs with gh-deploy --force + # run: | + # poetry run mkdocs gh-deploy --force diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6d9f367b..4723a0bd 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,3 +1,4 @@ +exclude: "R" repos: - repo: https://github.com/psf/black rev: "23.9.1" diff --git a/Dockerfile b/Dockerfile index 09610747..323670d0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,7 +9,7 @@ FROM jupyter/r-notebook # This will require a mount of `vmfileshare` from `dymestorage1` # On macOS this can be solved via: # open smb://dymestorage1.file.core.windows.net/vmfileshare -# Using user: dymestorage1 +# Using user: dymestorage1 # And password specified via: # https://learn.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage?tabs=azure-portal#view-account-access-keys @@ -26,7 +26,7 @@ ARG env_name=clim-recal # `py_ver` is not currently used below and is specified in `environment.yaml` # here as reminder and clarity if future change needed. -ARG py_ver=3.11 +ARG py_ver=3.11 # The local_data_path is an absolute local path to ClimateData on the machine hosting running `docker` ARG local_data_path=/Volumes/vmfileshare/ClimateData @@ -74,7 +74,7 @@ RUN activate_custom_env_script=/usr/local/bin/before-notebook.d/activate_custom_ echo "eval \"$(conda shell.bash activate "${env_name}")\"" >> ${activate_custom_env_script} && \ chmod +x ${activate_custom_env_script} -# Switch to default jupyter user +# Switch to default jupyter user USER ${NB_UID} # Set this for default `conda activate` configuration diff --git a/README.md b/README.md index 60d1bbac..71e866f8 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -# Welcome to the `clim-recal` repository! +# Welcome to the `clim-recal` repository! -Welcome to `clim-recal`, a specialized resource designed to tackle systematic errors or biases in **Regional Climate Models (RCMs)**. As researchers, policy-makers, and various stakeholders explore publicly available RCMs, they need to consider the challenge of biases that can affect the accurate representation of climate change signals. +Welcome to `clim-recal`, a specialized resource designed to tackle systematic errors or biases in **Regional Climate Models (RCMs)**. As researchers, policy-makers, and various stakeholders explore publicly available RCMs, they need to consider the challenge of biases that can affect the accurate representation of climate change signals. `clim-recal` provides both a **broad review** of available bias correction methods as well as **software**, **practical tutorials** and **guidance** that helps users apply these methods methods to various datasets. @@ -69,15 +69,15 @@ This will display all available options for the script, including their purposes ### Quarto -We also hope to provide comprehensive documentation via [`quarto`](https://quarto.org/). This is a work in progress, but if you would like to render documentation locally you can do so via `quarto` and [`conda`](https://docs.conda.io): +We also hope to provide comprehensive documentation via [`quarto`](https://quarto.org/). This is a work in progress, but if you would like to render documentation locally you can do so via `quarto` and [`conda`](https://docs.conda.io): 1. Ensure you have a [local installation](https://conda.io/projects/conda/en/latest/user-guide/install/index.html) of `conda` or [`anaconda`](https://www.anaconda.com/download) . 1. Checkout a copy of our `git` repository -1. Create a local `conda` `environment` via our `environment.yml` file. This should install `quarto`. +1. Create a local `conda` `environment` via our `environment.yml` file. This should install `quarto`. 1. Activate that environment 1. Run `quarto preview`. -Below are example `bash` shell commands to render locally after installing `conda`: +Below are example `bash` shell commands to render locally after installing `conda`: ```sh $ git clone https://github.com/alan-turing-institute/clim-recal @@ -95,27 +95,27 @@ We appreciate your patience and encourage you to check back for updates on our o The UK Climate Projections 2018 (UKCP18) dataset offers insights into the potential climate changes in the UK. UKCP18 is an advancement of the UKCP09 projections and delivers the latest evaluations of the UK's possible climate alterations in land and marine regions throughout the 21st century. This crucial information aids in future Climate Change Risk Assessments and supports the UK’s adaptation to climate change challenges and opportunities as per the National Adaptation Programme. ### HADS -[HadUK-Grid](https://www.metoffice.gov.uk/research/climate/maps-and-data/data/haduk-grid/haduk-grid) is a comprehensive collection of climate data for the UK, compiled from various land surface observations across the country. This data is organized into a uniform grid to ensure consistent coverage throughout the UK at up to 1km x 1km resolution. The dataset, spanning from 1836 to the present, includes a variety of climate variables such as air temperature, precipitation, sunshine, and wind speed, available on daily, monthly, seasonal, and annual timescales. +[HadUK-Grid](https://www.metoffice.gov.uk/research/climate/maps-and-data/data/haduk-grid/haduk-grid) is a comprehensive collection of climate data for the UK, compiled from various land surface observations across the country. This data is organized into a uniform grid to ensure consistent coverage throughout the UK at up to 1km x 1km resolution. The dataset, spanning from 1836 to the present, includes a variety of climate variables such as air temperature, precipitation, sunshine, and wind speed, available on daily, monthly, seasonal, and annual timescales. ## Why Bias Correction? Regional climate models contain systematic errors, or biases in their output [1]. Biases arise in RCMs for a number of reasons, such as the assumptions in the general circulation models (GCMs), and in the downscaling process from GCM to RCM [1,2]. -Researchers, policy-makers and other stakeholders wishing to use publicly available RCMs need to consider a range of "bias correction” methods (sometimes referred to as "bias adjustment" or "recalibration"). Bias correction methods offer a means of adjusting the outputs of RCM in a manner that might better reflect future climate change signals whilst preserving the natural and internal variability of climate [2]. +Researchers, policy-makers and other stakeholders wishing to use publicly available RCMs need to consider a range of "bias correction” methods (sometimes referred to as "bias adjustment" or "recalibration"). Bias correction methods offer a means of adjusting the outputs of RCM in a manner that might better reflect future climate change signals whilst preserving the natural and internal variability of climate [2]. -Part of the `clim-recal` project is to review several bias correction methods. This work is ongoing and you can find our initial [taxonomy here](https://docs.google.com/spreadsheets/d/18LIc8omSMTzOWM60aFNv1EZUl1qQN_DG8HFy1_0NdWk/edit?usp=sharing). When we've completed our literature review, it will be submitted for publication in an open peer-reviewed journal. +Part of the `clim-recal` project is to review several bias correction methods. This work is ongoing and you can find our initial [taxonomy here](https://docs.google.com/spreadsheets/d/18LIc8omSMTzOWM60aFNv1EZUl1qQN_DG8HFy1_0NdWk/edit?usp=sharing). When we've completed our literature review, it will be submitted for publication in an open peer-reviewed journal. -Our work is however, just like climate data, intended to be dynamic, and we are in the process of setting up a pipeline for researchers creating new methods of bias correction to be able to submit their methods for inclusion on in the `clim-recal` repository. +Our work is however, just like climate data, intended to be dynamic, and we are in the process of setting up a pipeline for researchers creating new methods of bias correction to be able to submit their methods for inclusion on in the `clim-recal` repository. - 1. Senatore et al., 2022, https://doi.org/10.1016/j.ejrh.2022.101120 - 2. Ayar et al., 2021, https://doi.org/10.1038/s41598-021-82715-1 + 1. Senatore et al., 2022, https://doi.org/10.1016/j.ejrh.2022.101120 + 2. Ayar et al., 2021, https://doi.org/10.1038/s41598-021-82715-1 ## Contributing -We hope to bring together the extensive work already undertaken by the climate science community and showcase a range of libraries and techniques. If you have suggestions on the repository, or would like to include a new method (see below) or library, please raise an issue or [get in touch](mailto:clim-recal@turing.ac.uk)! +We hope to bring together the extensive work already undertaken by the climate science community and showcase a range of libraries and techniques. If you have suggestions on the repository, or would like to include a new method (see below) or library, please raise an issue or [get in touch](mailto:clim-recal@turing.ac.uk)! -### Adding to the conda environment file +### Adding to the conda environment file To use `R` in `anaconda` you may need to specify the `conda-forge` channel: diff --git a/bash/README.md b/bash/README.md index 28982ef2..7ca3a146 100644 --- a/bash/README.md +++ b/bash/README.md @@ -1,4 +1,3 @@ # Bash Here you find scripts to reproject the UKCP datasets to the British National Grid coordinate system. - diff --git a/bash/reproject_one.sh b/bash/reproject_one.sh index bda5ca49..152136a8 100755 --- a/bash/reproject_one.sh +++ b/bash/reproject_one.sh @@ -2,4 +2,4 @@ f=$1 # The first argument is the file to reproject fn=${f/Raw/Reprojected_infill} # Replace Raw with Reprojected_infill in the filename folder=`dirname $fn` # Get the folder name mkdir -p $folder # Create the folder if it doesn't exist -gdalwarp -t_srs 'EPSG:27700' -tr 2200 2200 -r near -overwrite $f "${fn%.nc}.tif" # Reproject the file \ No newline at end of file +gdalwarp -t_srs 'EPSG:27700' -tr 2200 2200 -r near -overwrite $f "${fn%.nc}.tif" # Reproject the file diff --git a/bash/ubuntu_install.sh b/bash/ubuntu_install.sh index b3f8c647..04b6b320 100755 --- a/bash/ubuntu_install.sh +++ b/bash/ubuntu_install.sh @@ -5,7 +5,7 @@ CHECKOUT_PATH=$HOME/code/clim-recal ANACONDA_INSTALL_FOLDER=$HOME/code/anaconda-install ANACONDA_INSTALL_SCRIPT_FILE_NAME=Anaconda3-2023.07-2-Linux-x86_64.sh -ANACONDA_INSTALL_URL=https://repo.anaconda.com/archive/$ANACONDA_INSTALL_SCRIPT_FILE_NAME +ANACONDA_INSTALL_URL=https://repo.anaconda.com/archive/$ANACONDA_INSTALL_SCRIPT_FILE_NAME VMFILESHARE_PATH=/mnt/vmfileshare AZURE_STORAGE_NAME=dymestorage1 @@ -37,7 +37,7 @@ function set_azure_credentials { function mount_vmfileshare { echo $VMFILESHARE_PATH is needed to run default model configurations - echo + echo while true; do read -p "Would you like to mount vmfileshare to $VMFILESHARE_PATH (needed for running models)? " yn @@ -54,7 +54,7 @@ function mount_vmfileshare { read -s -p "Access key for $AZURE_STORAGE_NAME: " PASSWORD echo - + if [ ! -d "/etc/smbcredentials" ]; then echo Createing /etc/smbcredentials sudo mkdir /etc/smbcredentials diff --git a/compose/local/docs/Dockerfile b/compose/local/docs/Dockerfile index 652d883d..93c5d218 100644 --- a/compose/local/docs/Dockerfile +++ b/compose/local/docs/Dockerfile @@ -35,6 +35,7 @@ ARG QUARTO_VERSION="1.3.450" FROM ghcr.io/quarto-dev/quarto:${QUARTO_VERSION} AS builder ARG PORT=8080 +ARG py_ver=3.9 # ARG RIG_VERSION="latest" # ARG R_VERSION="release" @@ -46,12 +47,14 @@ ARG PORT=8080 # WORKDIR /app # RUN Rscript -e "renv::restore()" # RUN quarto render . -COPY . /app +ADD . /app WORKDIR /app # RUN Rscript -e "renv::restore()" EXPOSE ${PORT}:${PORT} # RUN quarto preview --port ${PORT}:${PORT} +RUN apt-get update && apt-get install -y python${py_ver} python3-pip +RUN pip3 install quartodoc && quartodoc build RUN quarto render FROM httpd:alpine diff --git a/docs/assets/pipeline.mermaid b/docs/assets/pipeline.mermaid index 1cd65000..fa450610 100644 --- a/docs/assets/pipeline.mermaid +++ b/docs/assets/pipeline.mermaid @@ -10,8 +10,8 @@ subgraph Legend var[parameter]:::var end -%%% INPUT DATA -subgraph CEDA +%%% INPUT DATA +subgraph CEDA data_hads[(HADS)] data_cpm[(UKCP2.2)] data_hads --> script_load @@ -31,14 +31,14 @@ subgraph Core pipeline subgraph Preprocessing %% resampling & reprojecting script_resampling([resampling_hads.py]) - script_reproject([reproject_all.sh]) + script_reproject([reproject_all.sh]) data_hads_res[Processed/HadsUKgrid/../*.nc] data_cpm_rep[Reprojected/UKCP2.2/../*.tiff] script_resampling --> data_hads_res script_reproject --> data_cpm_rep - + %% cropping script_crop_city([Cropping_Rasters_to_three_cities.R]) @@ -67,7 +67,7 @@ subgraph Core pipeline data_outdir --> data_out_groundtruth_h data_outdir --> data_out_groundtruth_p end - + subgraph bc[Bias Correction] script_bc_py([run_cmethods.py]) script_bc_r([run_cmethods.R]) @@ -88,7 +88,7 @@ subgraph Core pipeline script_bc_py-->data_out_py function_bc_r-->data_out_r end - + subgraph Assessment script_asses[tbc] data_out_groundtruth_p --> script_asses @@ -102,8 +102,8 @@ subgraph nner_py[Execute Python pipeline for MO dataset] data_shape_uk[(shape London)] data_shape_gl[(shape Glasgow)] data_shape_ma[(shape Manchester)] - - + + script_BC_wrapper[three_cities_debiasing.sh] param1["metric (eg tasmax)"]:::var param2["runs (eg 05)"]:::var @@ -124,9 +124,9 @@ end subgraph nner_jupyter[Jupyter Notebook for Guidance] direction BT - data_shape_gl2[(shape Glasgow)] + data_shape_gl2[(shape Glasgow)] data_cpm2[(UKCP2.2_Monthly)] - + param5["tasmax"]:::var param6["quantile_mapping"]:::var param7[Glasgow]:::var @@ -146,7 +146,7 @@ data_hads_raw --> script_resampling data_cpm_raw --> script_reproject %% input cropping data_cpm_rep --> script_crop_city - + data_hads_res --> script_crop_city data_shape_uk --> script_crop_city data_shape_ma --> script_crop_city @@ -164,7 +164,7 @@ classDef python fill:#4CAF50; classDef r fill:#FF5722; classDef bash fill:#f9f classDef var fill:none,stroke:#0f0; -classDef dashed stroke-dasharray: 5 5; +classDef dashed stroke-dasharray: 5 5; class script_crop_city,script_crop_uk,function_bc_r,script_r,script_df_uk,function_bc,function_crop_bc,fn_crop_cpm,fn_crop_hads,fn_bc,script_bc_r r; class script_load,script_resampling,script_preproc,script_bc_py,script_py python; diff --git a/docs/pipeline.qmd b/docs/pipeline.qmd index 437343b1..4ac0bd8c 100644 --- a/docs/pipeline.qmd +++ b/docs/pipeline.qmd @@ -27,8 +27,8 @@ subgraph Legend var[parameter]:::var end -%%% INPUT DATA -subgraph CEDA +%%% INPUT DATA +subgraph CEDA data_hads[(HADS)] data_cpm[(UKCP2.2)] data_hads --> script_load @@ -48,14 +48,14 @@ subgraph Core pipeline subgraph Preprocessing %% resampling & reprojecting script_resampling([resampling_hads.py]) - script_reproject([reproject_all.sh]) + script_reproject([reproject_all.sh]) data_hads_res[Processed/HadsUKgrid/../*.nc] data_cpm_rep[Reprojected/UKCP2.2/../*.tiff] script_resampling --> data_hads_res script_reproject --> data_cpm_rep - + %% cropping script_crop_city([Cropping_Rasters_to_three_cities.R]) @@ -84,7 +84,7 @@ subgraph Core pipeline data_outdir --> data_out_groundtruth_h data_outdir --> data_out_groundtruth_p end - + subgraph bc[Bias Correction] script_bc_py([run_cmethods.py]) script_bc_r([run_cmethods.R]) @@ -105,7 +105,7 @@ subgraph Core pipeline script_bc_py-->data_out_py function_bc_r-->data_out_r end - + subgraph Assessment script_asses[tbc] data_out_groundtruth_p --> script_asses @@ -119,8 +119,8 @@ subgraph nner_py[Execute Python pipeline for MO dataset] data_shape_uk[(shape London)] data_shape_gl[(shape Glasgow)] data_shape_ma[(shape Manchester)] - - + + script_BC_wrapper[three_cities_debiasing.sh] param1["metric (eg tasmax)"]:::var param2["runs (eg 05)"]:::var @@ -141,9 +141,9 @@ end subgraph nner_jupyter[Jupyter Notebook for Guidance] direction BT - data_shape_gl2[(shape Glasgow)] + data_shape_gl2[(shape Glasgow)] data_cpm2[(UKCP2.2_Monthly)] - + param5["tasmax"]:::var param6["quantile_mapping"]:::var param7[Glasgow]:::var @@ -163,7 +163,7 @@ data_hads_raw --> script_resampling data_cpm_raw --> script_reproject %% input cropping data_cpm_rep --> script_crop_city - + data_hads_res --> script_crop_city data_shape_uk --> script_crop_city data_shape_ma --> script_crop_city @@ -181,7 +181,7 @@ classDef python fill:#4CAF50; classDef r fill:#FF5722; classDef bash fill:#f9f classDef var fill:none,stroke:#0f0; -classDef dashed stroke-dasharray: 5 5; +classDef dashed stroke-dasharray: 5 5; class script_crop_city,script_crop_uk,function_bc_r,script_r,script_df_uk,function_bc,function_crop_bc,fn_crop_cpm,fn_crop_hads,fn_bc,script_bc_r r; class script_load,script_resampling,script_preproc,script_bc_py,script_py python; diff --git a/docs/pipeline_guidance.md b/docs/pipeline_guidance.md index 25e85c9d..afea7f2e 100644 --- a/docs/pipeline_guidance.md +++ b/docs/pipeline_guidance.md @@ -36,7 +36,7 @@ R --version #### Setting up your python environment -For your python environment, we provide an Anaconda environment file for ease-of-use. +For your python environment, we provide an Anaconda environment file for ease-of-use. ``` conda env create -f environment.yml ``` @@ -56,8 +56,8 @@ parallel --version #### The cmethods library -This repository contains a python script used to run debiasing in climate data using a fork of the [original python-cmethods](https://github.com/btschwertfeger/python-cmethods) module written by Benjamin Thomas Schwertfeger's , which has -been modified to function with the dataset used in the clim-recal project. This library has been included as a +This repository contains a python script used to run debiasing in climate data using a fork of the [original python-cmethods](https://github.com/btschwertfeger/python-cmethods) module written by Benjamin Thomas Schwertfeger's , which has +been modified to function with the dataset used in the clim-recal project. This library has been included as a submodule to this project, so you must run the following command to pull the submodules required. ``` @@ -100,7 +100,7 @@ The HADs data and the UKCP projections have different resolution and coordinate The first step in our analysis pipeline is to reproject the UKCP datasets to the British National Grid coordinate system. For this purpose, we utilize the Geospatial Data Abstraction Library (GDAL), designed for reading and writing raster and vector geospatial data formats. > **Warning**: -> Note that, to reproduce our exact pipeline, we switch environments here as explained in the requirements. +> Note that, to reproduce our exact pipeline, we switch environments here as explained in the requirements. > ``` > conda activate gdal_env > ``` @@ -127,7 +127,7 @@ python resampling_hads.py --input path_to_reprojected --grid path_to_grid_file - ### Preparing the bias correction and assessment **Spatial cropping** -Because the bias correction process is computationally intensive, handling large datasets can be challenging and time-consuming. Therefore, to make the pipeline more manageable and efficient, it is recommended to split the data into smaller subsets. For the purposes of our example pipeline, we've opted for reducing the data to individual city boundaries. To crop you need to adjust the paths in `Cropping_Rasters_to_three_cities.R` script to fit 1your own directory sturcture. The cropping is implemented in the `cpm_read_crop` and `hads_read_crop` functions. +Because the bias correction process is computationally intensive, handling large datasets can be challenging and time-consuming. Therefore, to make the pipeline more manageable and efficient, it is recommended to split the data into smaller subsets. For the purposes of our example pipeline, we've opted for reducing the data to individual city boundaries. To crop you need to adjust the paths in `Cropping_Rasters_to_three_cities.R` script to fit 1your own directory sturcture. The cropping is implemented in the `cpm_read_crop` and `hads_read_crop` functions. ``` Rscript Cropping_Rasters_to_three_cities.R @@ -150,7 +150,7 @@ The preprocess_data.py script also aligns the calendars of the historical simula Note: By March 2023 we have only implemented the [python-cmethods](https://github.com/alan-turing-institute/python-cmethods) library. -The [run_cmethods.py](../debiasing/run_cmethods.py) allow us to adjusts climate biases in climate data using the python-cmethods library. It takes as input observation data (HADs data), control data (historical UKCP data), and scenario data (future UKCP data), +The [run_cmethods.py](../debiasing/run_cmethods.py) allow us to adjusts climate biases in climate data using the python-cmethods library. It takes as input observation data (HADs data), control data (historical UKCP data), and scenario data (future UKCP data), and applies a correction method to the scenario data. The resulting output is saved as a `.nc` to a specified directory. The script will also produce a time-series and a map plot of the debiased data. To run this you need to replace `path_to_validation_data` with the output directories of the previous step and specify `path_to_corrected_data` as your output directory for the bias corrected data. You can also specify your preferred `bias_correction_method` (e.g. quantile_delta_mapping). ``` @@ -163,5 +163,4 @@ The run_cmethods.py script loops over the time periods and applies debiasing in - Saves the resulting output to the specified directory. - Creates diagnotic figues of the output dataset (time series and time dependent maps) and saves it into the specified directory. -For each 10 year time period it will produce an `.nc` output file with the adjusted data and a time-series plot and a time dependent map plot of the adjusted data. - \ No newline at end of file +For each 10 year time period it will produce an `.nc` output file with the adjusted data and a time-series plot and a time dependent map plot of the adjusted data. diff --git a/internal_docs/INTERNAL.md b/internal_docs/INTERNAL.md index 44adfee8..423f1c2a 100644 --- a/internal_docs/INTERNAL.md +++ b/internal_docs/INTERNAL.md @@ -19,19 +19,19 @@ The fileshare will be mounted under `/Volumes/vmfileshare/` -Instructions on how the mount in other operating systems can be found in [the azure how-tos](https://learn.microsoft.com/en-us/azure/storage/files/storage-how-to-use-files-linux?tabs=smb311). +Instructions on how the mount in other operating systems can be found in [the azure how-tos](https://learn.microsoft.com/en-us/azure/storage/files/storage-how-to-use-files-linux?tabs=smb311). Alternatively, you can access the Azure Portal, go to the dymestorage1 fileshare and click the "Connect" button to get an automatically generated script. This script can be used from within an Azure VM to mount the drive. ### Pre-downloaded/pre-processed data description -All the data used in this project can be found in the `/Volumes/vmfileshare/ClimateData/` directory. +All the data used in this project can be found in the `/Volumes/vmfileshare/ClimateData/` directory. ``` . ├── Debiased # Directory where debiased datasets are stored. │   └── tasmax -├── Processed # Directory where processed climate datasets are stored. +├── Processed # Directory where processed climate datasets are stored. │   ├── CHESS-SCAPE │   ├── HadsUKgrid # Resampled HADs grid. │   └── UKCP2.2_Reproj # Old reprojections (to delete). @@ -39,7 +39,7 @@ All the data used in this project can be found in the `/Volumes/vmfileshare/Clim │   ├── CHESS-SCAPE │   ├── HadsUKgrid │   ├── UKCP2.2 -│   └── ceda_fpt_download.py # script to download data from CEDA database. +│   └── ceda_fpt_download.py # script to download data from CEDA database. ├── Reprojected # Directory where reprojected UKCP datasets are stored. │   └── UKCP2.2 ├── Reprojected_infill # Directory where reprojected UKCP datasets are stored, including the newest infill UKCP2.2 data published in May 2023. @@ -62,4 +62,4 @@ chmod +x ./reproject_one.sh chmod +x ./reproject_all.sh sudo apt-get update sudo apt-get install parallel -``` \ No newline at end of file +``` diff --git a/notebooks/load_data_python.ipynb b/notebooks/load_data_python.ipynb index e1f3a74b..c4e51a58 100644 --- a/notebooks/load_data_python.ipynb +++ b/notebooks/load_data_python.ipynb @@ -15,7 +15,8 @@ "source": [ "# need to be in the clim-recal root director\n", "import os\n", - "os.chdir('../')" + "\n", + "os.chdir(\"../\")" ] }, { @@ -26,7 +27,7 @@ "source": [ "import matplotlib.pyplot as plt\n", "\n", - "#importing the python functions to load data\n", + "# importing the python functions to load data\n", "from python.load_data.data_loader import load_data" ] }, @@ -55,9 +56,9 @@ "outputs": [], "source": [ "# difining variables for loader\n", - "scotland_shapefile = 'data/Scotland/Scotland.bbox.shp'\n", - "date_period = ('1980-01-01', '2000-01-01')\n", - "variable = 'tasmax'" + "scotland_shapefile = \"data/Scotland/Scotland.bbox.shp\"\n", + "date_period = (\"1980-01-01\", \"2000-01-01\")\n", + "variable = \"tasmax\"" ] }, { @@ -70,10 +71,17 @@ }, "outputs": [], "source": [ - "input_files = '/Volumes/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/tasmax/day'\n", + "input_files = (\n", + " \"/Volumes/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/tasmax/day\"\n", + ")\n", "\n", "# data loader\n", - "hads = load_data(input_files, date_range=date_period, variable=variable, shapefile_path=scotland_shapefile)" + "hads = load_data(\n", + " input_files,\n", + " date_range=date_period,\n", + " variable=variable,\n", + " shapefile_path=scotland_shapefile,\n", + ")" ] }, { @@ -108,7 +116,7 @@ } ], "source": [ - "print (hads)" + "print(hads)" ] }, { @@ -138,7 +146,7 @@ } ], "source": [ - "hads['tasmax'].isel(time=1).plot()" + "hads[\"tasmax\"].isel(time=1).plot()" ] }, { @@ -168,9 +176,15 @@ ], "source": [ "# if extension is .tif file we should specify it in the inputs (default is .nc)\n", - "input_files = '/Volumes/vmfileshare/ClimateData/Reprojected/UKCP2.2/tasmax/01/latest'\n", - "ext = 'tif'\n", - "ukcp = load_data(input_files, date_range=date_period, variable=variable, shapefile_path=scotland_shapefile, extension=ext)" + "input_files = \"/Volumes/vmfileshare/ClimateData/Reprojected/UKCP2.2/tasmax/01/latest\"\n", + "ext = \"tif\"\n", + "ukcp = load_data(\n", + " input_files,\n", + " date_range=date_period,\n", + " variable=variable,\n", + " shapefile_path=scotland_shapefile,\n", + " extension=ext,\n", + ")" ] }, { @@ -202,7 +216,7 @@ } ], "source": [ - "ukcp.where(ukcp['tasmax']<1000)['tasmax'].isel(time=1).plot()" + "ukcp.where(ukcp[\"tasmax\"] < 1000)[\"tasmax\"].isel(time=1).plot()" ] }, { @@ -232,13 +246,17 @@ } ], "source": [ - "plt.figure(figsize=(10,5),dpi=216)\n", - "ukcp.where(ukcp['tasmax']<1000)['tasmax'].groupby('time.dayofyear').mean(...).plot(label='$T_{sim,h}$')\n", - "hads['tasmax'].groupby('time.dayofyear').mean(...).plot(label='$T_{obs,h}$')\n", - "#simp['tasmax'].groupby('time.dayofyear').mean(...).plot(label='$T_{sim,p}$')\n", + "plt.figure(figsize=(10, 5), dpi=216)\n", + "ukcp.where(ukcp[\"tasmax\"] < 1000)[\"tasmax\"].groupby(\"time.dayofyear\").mean(...).plot(\n", + " label=\"$T_{sim,h}$\"\n", + ")\n", + "hads[\"tasmax\"].groupby(\"time.dayofyear\").mean(...).plot(label=\"$T_{obs,h}$\")\n", + "# simp['tasmax'].groupby('time.dayofyear').mean(...).plot(label='$T_{sim,p}$')\n", "\n", - "plt.title('Historical modeled and obseved temperatures between December 1980 and November 1981')#; and predicted temperatures')\n", - "plt.gca().grid(alpha=.3)\n", + "plt.title(\n", + " \"Historical modeled and obseved temperatures between December 1980 and November 1981\"\n", + ") # ; and predicted temperatures')\n", + "plt.gca().grid(alpha=0.3)\n", "plt.legend();" ] } @@ -264,4 +282,4 @@ }, "nbformat": 4, "nbformat_minor": 1 -} \ No newline at end of file +} diff --git a/python/README.md b/python/README.md index ce898f25..d3d6f62a 100644 --- a/python/README.md +++ b/python/README.md @@ -146,7 +146,7 @@ In the `python` subdirectory you can find code for the different data download, - **Resampling** for the HADsUK datasets from 1km to a 2.2 km grid to match the UKCP re-projected grid. - **Data loaders** functions for loading and concatenating data into a single xarray which can be used for running debiasing methods. - **Debiasing scripts** that interface with implementations of the debiasing (bias correction) methods implemented by different libraries (by March 2023 we have only implemented the python-cmethods library). - + More details in how to use this code can be found in [the python README file](python/README.md) and the environment used in this [environment setup file](setup-instructions.md). --> diff --git a/python/data_download/ceda_ftp_download.py b/python/data_download/ceda_ftp_download.py index 845eaebf..5d95b66e 100644 --- a/python/data_download/ceda_ftp_download.py +++ b/python/data_download/ceda_ftp_download.py @@ -1,10 +1,10 @@ #!/usr/bin/env python +import argparse import ftplib import os import random from datetime import datetime from pathlib import Path -import argparse def download_ftp(input, output, username, password, order): @@ -58,7 +58,7 @@ def download_ftp(input, output, username, password, order): for file in filelist: download = True - print('Downloading', file) + print("Downloading", file) current_time = datetime.now().strftime("%H:%M:%S") print("Current Time =", current_time) @@ -77,39 +77,68 @@ def download_ftp(input, output, username, password, order): f.retrbinary("RETR %s" % file, open(file, "wb").write) counter += 1 - print(counter, 'file downloaded out of', len(filelist)) + print(counter, "file downloaded out of", len(filelist)) - print('Finished: ', counter, ' files dowloaded from ', input) + print("Finished: ", counter, " files dowloaded from ", input) # Close FTP connection f.close() if __name__ == "__main__": """ - Script to download CEDA data from the command line. Note you need to have a user account and + Script to download CEDA data from the command line. Note you need to have a user account and provide your username and FTP password. - + """ # Initialize parser parser = argparse.ArgumentParser() # Adding optional argument - parser.add_argument("--input", help="Path where the CEDA data to download is located. This can be a path with" - "or without subdirectories. Set to `/badc/ukcp18/data/land-cpm/uk/2.2km/rcp85/`" - " to download all the raw UKCP2.2 climate projection data used in clim-recal.", - required=True, type=str) - parser.add_argument("--output", help="Path to save the downloaded data", required=False, default=".", type=str) - parser.add_argument("--username", help="Username to connect to the CEDA servers", required=True, type=str) - parser.add_argument("--psw", help="FTP password to authenticate to the CEDA servers", required=True, type=str) - parser.add_argument("--reverse", help="Run download in reverse (useful to run downloads in parallel)", - action='store_true') - parser.add_argument("--shuffle", help="Run download in shuffle mode (useful to run downloads in parallel)", - action='store_true') - parser.add_argument("--change_hierarchy", help="Change the output sub-directories' hierarchy to fit the Turing " - "Azure fileshare hierarchy (only applicable to UKCP climate " - "projection data, i.e. when --input is set to " - "`/badc/ukcp18/data/land-cpm/uk/2.2km/rcp85/`).", - action='store_true') + parser.add_argument( + "--input", + help="Path where the CEDA data to download is located. This can be a path with" + "or without subdirectories. Set to `/badc/ukcp18/data/land-cpm/uk/2.2km/rcp85/`" + " to download all the raw UKCP2.2 climate projection data used in clim-recal.", + required=True, + type=str, + ) + parser.add_argument( + "--output", + help="Path to save the downloaded data", + required=False, + default=".", + type=str, + ) + parser.add_argument( + "--username", + help="Username to connect to the CEDA servers", + required=True, + type=str, + ) + parser.add_argument( + "--psw", + help="FTP password to authenticate to the CEDA servers", + required=True, + type=str, + ) + parser.add_argument( + "--reverse", + help="Run download in reverse (useful to run downloads in parallel)", + action="store_true", + ) + parser.add_argument( + "--shuffle", + help="Run download in shuffle mode (useful to run downloads in parallel)", + action="store_true", + ) + parser.add_argument( + "--change_hierarchy", + help="Change the output sub-directories' hierarchy to fit the Turing " + "Azure fileshare hierarchy (only applicable to UKCP climate " + "projection data, i.e. when --input is set to " + "`/badc/ukcp18/data/land-cpm/uk/2.2km/rcp85/`).", + action="store_true", + ) # Read arguments from command line args = parser.parse_args() @@ -132,12 +161,43 @@ def download_ftp(input, output, username, password, order): # this calls the download_ftp function multiple times to download all the CEDA UKCP data. # It reads them in the hierarchy that CEDA uses and converts them to a different hierarchy in # the destination fileshare (reverting run number and variable name and removing the "day" level) - for n in ["01", "04", "05", "06", "07", "08", "09", "10", "11", "12", "13", "15"]: - for v in ["clt", "flashrate", "hurs", "huss", "pr", "prsn", "psl", "rls", "rss", "sfcWind", - "snw", "tas", "tasmax", "tasmin", "uas", "vas", "wsgmax10m"]: - download_ftp(os.path.join(args.input, n, v, "day", "latest"), - os.path.join(args.output, v, n, "latest"), - args.username, - args.psw, - order) - + for n in [ + "01", + "04", + "05", + "06", + "07", + "08", + "09", + "10", + "11", + "12", + "13", + "15", + ]: + for v in [ + "clt", + "flashrate", + "hurs", + "huss", + "pr", + "prsn", + "psl", + "rls", + "rss", + "sfcWind", + "snw", + "tas", + "tasmax", + "tasmin", + "uas", + "vas", + "wsgmax10m", + ]: + download_ftp( + os.path.join(args.input, n, v, "day", "latest"), + os.path.join(args.output, v, n, "latest"), + args.username, + args.psw, + order, + ) diff --git a/python/debiasing/preprocess_data.py b/python/debiasing/preprocess_data.py index 869c3e30..fe829f5c 100644 --- a/python/debiasing/preprocess_data.py +++ b/python/debiasing/preprocess_data.py @@ -9,17 +9,18 @@ import os import sys import time -import numpy as np from datetime import datetime from pathlib import Path -sys.path.insert(1, '../load_data') +import numpy as np + +sys.path.insert(1, "../load_data") from data_loader import load_data # * ----- L O G G I N G ----- formatter = logging.Formatter( - fmt='%(asctime)s %(module)s,line: %(lineno)d %(levelname)8s | %(message)s', - datefmt='%Y-%m-%d %H:%M:%S' + fmt="%(asctime)s %(module)s,line: %(lineno)d %(levelname)8s | %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", ) log = logging.getLogger() @@ -29,48 +30,103 @@ logging.getLogger().addHandler(screen_handler) # * ----- I N P U T - H A N D L I N G ----- -parser = argparse.ArgumentParser(description='Pre-process data before bias correction.') -parser.add_argument('--mod', '--modelled', dest='mod_fpath', type=str, - help='Path to modelled (CPM) datasets') -parser.add_argument('--obs', '--observed', dest='obs_fpath', type=str, - help='Path to observation (HADs) datasets') -parser.add_argument('--calib_dates', '--calibration_date_range', dest='calibration_date_range', type=str, - help='Start and end dates for calibration (historic CPM/HADs data used to ' - 'calibrate the debiasing model) - in YYYYMMDD-YYYYMMDD format', - default='19801201-19991130') -parser.add_argument('--valid_dates', '--validation_date_range', dest='validation_date_range', type=str, - help='Start and end dates for validation data (CPM data to be debiased using the ' - 'calibrated debiasing model, and HADs data as referece) - multiple date ranges can be passed, ' - 'separated by "_", each in YYYYMMDD-YYYYMMDD format e.g., ' - '"20100101-20191231_20200101-20291231"', - default='20201201-20291130_20301201-20391130') -parser.add_argument('--shp', '--shapefile', dest='shapefile_fpath', type=str, help='Path to shapefile', default=None) -parser.add_argument('--out', '--output', dest='output_fpath', type=str, help='Path to save output files', default='.') -parser.add_argument('-v', '--variable', dest='var', type=str, default='tasmax', help='Variable to adjust') -parser.add_argument('-u', '--unit', dest='unit', type=str, default='°C', help='Unit of the varible') -parser.add_argument('-r', '--run_number', dest='run_number', type=str, default=None, - help='Run number to process (out of 13 runs in the CPM data)') +parser = argparse.ArgumentParser(description="Pre-process data before bias correction.") +parser.add_argument( + "--mod", + "--modelled", + dest="mod_fpath", + type=str, + help="Path to modelled (CPM) datasets", +) +parser.add_argument( + "--obs", + "--observed", + dest="obs_fpath", + type=str, + help="Path to observation (HADs) datasets", +) +parser.add_argument( + "--calib_dates", + "--calibration_date_range", + dest="calibration_date_range", + type=str, + help="Start and end dates for calibration (historic CPM/HADs data used to " + "calibrate the debiasing model) - in YYYYMMDD-YYYYMMDD format", + default="19801201-19991130", +) +parser.add_argument( + "--valid_dates", + "--validation_date_range", + dest="validation_date_range", + type=str, + help="Start and end dates for validation data (CPM data to be debiased using the " + "calibrated debiasing model, and HADs data as referece) - multiple date ranges can be passed, " + 'separated by "_", each in YYYYMMDD-YYYYMMDD format e.g., ' + '"20100101-20191231_20200101-20291231"', + default="20201201-20291130_20301201-20391130", +) +parser.add_argument( + "--shp", + "--shapefile", + dest="shapefile_fpath", + type=str, + help="Path to shapefile", + default=None, +) +parser.add_argument( + "--out", + "--output", + dest="output_fpath", + type=str, + help="Path to save output files", + default=".", +) +parser.add_argument( + "-v", + "--variable", + dest="var", + type=str, + default="tasmax", + help="Variable to adjust", +) +parser.add_argument( + "-u", "--unit", dest="unit", type=str, default="°C", help="Unit of the varible" +) +parser.add_argument( + "-r", + "--run_number", + dest="run_number", + type=str, + default=None, + help="Run number to process (out of 13 runs in the CPM data)", +) params = vars(parser.parse_args()) -obs_fpath = params['obs_fpath'] -mod_fpath = params['mod_fpath'] -calibration_date_range = params['calibration_date_range'] -validation_date_range = params['validation_date_range'] -shape_fpath = params['shapefile_fpath'] -out_fpath = params['output_fpath'] -var = params['var'] -unit = params['unit'] -run_number = params['run_number'] - -calib_list = calibration_date_range.split('-') -h_date_period = (datetime.strptime(calib_list[0], '%Y%m%d').strftime('%Y-%m-%d'), - datetime.strptime(calib_list[1], '%Y%m%d').strftime('%Y-%m-%d')) -val_list = validation_date_range.split('_') -future_time_periods = [(p.split('-')[0], p.split('-')[1]) for p in val_list] -future_time_periods = [(datetime.strptime(p[0], '%Y%m%d').strftime('%Y-%m-%d'), - datetime.strptime(p[1], '%Y%m%d').strftime('%Y-%m-%d')) - for p in future_time_periods] +obs_fpath = params["obs_fpath"] +mod_fpath = params["mod_fpath"] +calibration_date_range = params["calibration_date_range"] +validation_date_range = params["validation_date_range"] +shape_fpath = params["shapefile_fpath"] +out_fpath = params["output_fpath"] +var = params["var"] +unit = params["unit"] +run_number = params["run_number"] + +calib_list = calibration_date_range.split("-") +h_date_period = ( + datetime.strptime(calib_list[0], "%Y%m%d").strftime("%Y-%m-%d"), + datetime.strptime(calib_list[1], "%Y%m%d").strftime("%Y-%m-%d"), +) +val_list = validation_date_range.split("_") +future_time_periods = [(p.split("-")[0], p.split("-")[1]) for p in val_list] +future_time_periods = [ + ( + datetime.strptime(p[0], "%Y%m%d").strftime("%Y-%m-%d"), + datetime.strptime(p[1], "%Y%m%d").strftime("%Y-%m-%d"), + ) + for p in future_time_periods +] # * ----- ----- -----M A I N ----- ----- ----- @@ -85,80 +141,103 @@ def preprocess_data() -> None: # load modelled data (CPM) for calibration period and place into ds_modc if run_number is not None: - ds_modc = \ - load_data(mod_fpath, date_range=h_date_period, variable=var, filter_filenames_on_variable=True, - run_number=run_number, filter_filenames_on_run_number=True, use_pr=use_pr, - shapefile_path=shape_fpath, - extension='tif')[var].rename({"projection_x_coordinate": "lon", - "projection_y_coordinate": "lat"}) + ds_modc = load_data( + mod_fpath, + date_range=h_date_period, + variable=var, + filter_filenames_on_variable=True, + run_number=run_number, + filter_filenames_on_run_number=True, + use_pr=use_pr, + shapefile_path=shape_fpath, + extension="tif", + )[var].rename( + {"projection_x_coordinate": "lon", "projection_y_coordinate": "lat"} + ) else: - ds_modc = \ - load_data(mod_fpath, date_range=h_date_period, variable=var, filter_filenames_on_variable=True, - use_pr=use_pr, shapefile_path=shape_fpath, - extension='tif')[var].rename({"projection_x_coordinate": "lon", - "projection_y_coordinate": "lat"}) + ds_modc = load_data( + mod_fpath, + date_range=h_date_period, + variable=var, + filter_filenames_on_variable=True, + use_pr=use_pr, + shapefile_path=shape_fpath, + extension="tif", + )[var].rename( + {"projection_x_coordinate": "lon", "projection_y_coordinate": "lat"} + ) # find file extensions for observation data files_obs_nc = glob.glob(f"{obs_fpath}/*.nc", recursive=True) files_obs_tif = glob.glob(f"{obs_fpath}/*.tif", recursive=True) if len(files_obs_nc) > 0 and len(files_obs_tif) == 0: - ext = 'nc' + ext = "nc" elif len(files_obs_nc) == 0 and len(files_obs_tif) > 0: - ext = 'tif' + ext = "tif" elif len(files_obs_nc) == 0 and len(files_obs_tif) == 0: - raise Exception(f"No observation files found in {obs_fpath} with extensions .nc or .tif") + raise Exception( + f"No observation files found in {obs_fpath} with extensions .nc or .tif" + ) else: - raise Exception(f"A mix of .nc and .tif observation files found in {obs_fpath}, file extension should be the " - f"same for all files in the directory.") + raise Exception( + f"A mix of .nc and .tif observation files found in {obs_fpath}, file extension should be the " + f"same for all files in the directory." + ) # load observation data (HADs) for calibration period and place into ds_obsc - ds_obsc = load_data(obs_fpath, date_range=h_date_period, variable=var, filter_filenames_on_variable=True, - shapefile_path=shape_fpath, extension=ext)[var].rename({"projection_x_coordinate": "lon", - "projection_y_coordinate": "lat"}) - log.info('Calibration data (modelled and observed) loaded.') + ds_obsc = load_data( + obs_fpath, + date_range=h_date_period, + variable=var, + filter_filenames_on_variable=True, + shapefile_path=shape_fpath, + extension=ext, + )[var].rename({"projection_x_coordinate": "lon", "projection_y_coordinate": "lat"}) + log.info("Calibration data (modelled and observed) loaded.") # aligning calendars, there might be extra days in the modelled data that need to be dropped - ds_modc = ds_modc.sel(time=ds_obsc.time, method='nearest') + ds_modc = ds_modc.sel(time=ds_obsc.time, method="nearest") if ds_obsc.shape != ds_modc.shape: - raise RuntimeError('Error, observed and modelled calibration data must have same dimensions.') + raise RuntimeError( + "Error, observed and modelled calibration data must have same dimensions." + ) - log.info('Resulting calibration datasets with shape') + log.info("Resulting calibration datasets with shape") log.info(ds_obsc.shape) # masking coordinates where the observed data has no x, y values ds_modc = ds_modc.where(~np.isnan(ds_obsc.isel(time=0))) ds_modc = ds_modc.where(ds_modc.values < 1000) - log.info('Calibration data masked') + log.info("Calibration data masked") - ds_obsc.attrs['unit'] = unit - ds_modc.attrs['unit'] = unit + ds_obsc.attrs["unit"] = unit + ds_modc.attrs["unit"] = unit # write modc to .nc file in output directory - modc_filename = f'modc_var-{var}_run-{run_number}_{calib_list[0]}_{calib_list[1]}' - modc_path = os.path.join(out_fpath, f'{modc_filename}.nc') + modc_filename = f"modc_var-{var}_run-{run_number}_{calib_list[0]}_{calib_list[1]}" + modc_path = os.path.join(out_fpath, f"{modc_filename}.nc") if not os.path.exists(os.path.dirname(modc_path)): folder_path = Path(os.path.dirname(modc_path)) folder_path.mkdir(parents=True) print(f"Saving modelled (CPM) data for calibration to {modc_path}") ds_modc.to_netcdf(modc_path) - log.info(f'Saved modelled (CPM) data for calibration to {modc_path}') + log.info(f"Saved modelled (CPM) data for calibration to {modc_path}") # write ds_obsc to .nc file in output directory - obsc_filename = f'obsc_var-{var}_run-{run_number}_{calib_list[0]}_{calib_list[1]}' - obsc_path = os.path.join(out_fpath, f'{obsc_filename}.nc') + obsc_filename = f"obsc_var-{var}_run-{run_number}_{calib_list[0]}_{calib_list[1]}" + obsc_path = os.path.join(out_fpath, f"{obsc_filename}.nc") if not os.path.exists(os.path.dirname(obsc_path)): folder_path = Path(os.path.dirname(obsc_path)) folder_path.mkdir(parents=True) print(f"Saving observation data (HADs) for calibration to {obsc_path}") ds_obsc.to_netcdf(obsc_path) - log.info(f'Saved observation data (HADs) for calibration period to {obsc_path}') + log.info(f"Saved observation data (HADs) for calibration period to {obsc_path}") # looping over validation time periods for f_date_period in future_time_periods: - - log.info(f'Running for {f_date_period} time period') + log.info(f"Running for {f_date_period} time period") # load modelled (CPM) data for validation period and store in ds_modv try: @@ -168,51 +247,74 @@ def preprocess_data() -> None: # load if run_number is not None: - ds_modv = \ - load_data(mod_fpath, date_range=f_date_period, variable=var, run_number=run_number, - filter_filenames_on_run_number=True, use_pr=use_pr, shapefile_path=shape_fpath, - filter_filenames_on_variable=True, extension='tif')[ - var].rename({"projection_x_coordinate": "lon", "projection_y_coordinate": "lat"}) + ds_modv = load_data( + mod_fpath, + date_range=f_date_period, + variable=var, + run_number=run_number, + filter_filenames_on_run_number=True, + use_pr=use_pr, + shapefile_path=shape_fpath, + filter_filenames_on_variable=True, + extension="tif", + )[var].rename( + {"projection_x_coordinate": "lon", "projection_y_coordinate": "lat"} + ) else: - ds_modv = \ - load_data(mod_fpath, date_range=f_date_period, variable=var, filter_filenames_on_variable=True, - use_pr=use_pr, shapefile_path=shape_fpath, extension='tif')[ - var].rename({"projection_x_coordinate": "lon", "projection_y_coordinate": "lat"}) + ds_modv = load_data( + mod_fpath, + date_range=f_date_period, + variable=var, + filter_filenames_on_variable=True, + use_pr=use_pr, + shapefile_path=shape_fpath, + extension="tif", + )[var].rename( + {"projection_x_coordinate": "lon", "projection_y_coordinate": "lat"} + ) except Exception as e: - log.info(f'No modelled data available for {f_date_period} time period') + log.info(f"No modelled data available for {f_date_period} time period") continue # load observed (HADs) data for validation period and store in ds_obsv try: - ds_obsv = load_data(obs_fpath, date_range=f_date_period, variable=var, filter_filenames_on_variable=True, - shapefile_path=shape_fpath, extension=ext)[var].rename( - {"projection_x_coordinate": "lon", - "projection_y_coordinate": "lat"}) + ds_obsv = load_data( + obs_fpath, + date_range=f_date_period, + variable=var, + filter_filenames_on_variable=True, + shapefile_path=shape_fpath, + extension=ext, + )[var].rename( + {"projection_x_coordinate": "lon", "projection_y_coordinate": "lat"} + ) except Exception as e: - log.info(f'No observed data available for {f_date_period} time period') + log.info(f"No observed data available for {f_date_period} time period") continue # aligning calendars, there might be extra days in the modelled data that need to be dropped - ds_modv = ds_modv.sel(time=ds_obsv.time, method='nearest') + ds_modv = ds_modv.sel(time=ds_obsv.time, method="nearest") if ds_obsv.shape != ds_modv.shape: - raise RuntimeError('Error, observed and modelled validation data must have same dimensions.') + raise RuntimeError( + "Error, observed and modelled validation data must have same dimensions." + ) - log.info('Resulting validation datasets with shape') + log.info("Resulting validation datasets with shape") log.info(ds_obsv.shape) # masking coordinates where the observed data has no x, y values ds_modv = ds_modv.where(~np.isnan(ds_obsv.isel(time=0))) ds_modv = ds_modv.where(ds_modv.values < 1000) - ds_obsv.attrs['unit'] = unit - ds_modv.attrs['unit'] = unit + ds_obsv.attrs["unit"] = unit + ds_modv.attrs["unit"] = unit # write ds_modv and ds_obsv to .nc files in output directory ds_modv_filename = f'modv_var-{var}_run-{run_number}_{f_date_period[0].replace("-","")}_{f_date_period[1].replace("-","")}' ds_obsv_filename = f'obsv_var-{var}_run-{run_number}_{f_date_period[0].replace("-","")}_{f_date_period[1].replace("-","")}' - ds_modv_path = os.path.join(out_fpath, f'{ds_modv_filename}.nc') - ds_obsv_path = os.path.join(out_fpath, f'{ds_obsv_filename}.nc') + ds_modv_path = os.path.join(out_fpath, f"{ds_modv_filename}.nc") + ds_obsv_path = os.path.join(out_fpath, f"{ds_obsv_filename}.nc") if not os.path.exists(os.path.dirname(ds_modv_path)): folder_path = Path(os.path.dirname(ds_modv_path)) folder_path.mkdir(parents=True) @@ -221,17 +323,21 @@ def preprocess_data() -> None: folder_path.mkdir(parents=True) print(f"Saving modelled (CPM) data for validation to {ds_modv_path}") ds_modv.to_netcdf(ds_modv_path) - log.info(f'Saved modelled (CPM) data for validation, period {f_date_period} to {ds_modv_path}') + log.info( + f"Saved modelled (CPM) data for validation, period {f_date_period} to {ds_modv_path}" + ) print(f"Saving observed (HADs) data for validation to {ds_obsv_path}") ds_obsv.to_netcdf(ds_obsv_path) - log.info(f'Saved observed (HADs) data for validation, period {f_date_period} to {ds_modv_path}') + log.info( + f"Saved observed (HADs) data for validation, period {f_date_period} to {ds_modv_path}" + ) end = time.time() - log.info(f'total time in seconds: {end - start}') - log.info('Done') + log.info(f"total time in seconds: {end - start}") + log.info("Done") -if __name__ == '__main__': +if __name__ == "__main__": preprocess_data() # * ----- ----- E O F ----- ----- diff --git a/python/debiasing/run_cmethods.py b/python/debiasing/run_cmethods.py index 028a833a..850c3e9b 100755 --- a/python/debiasing/run_cmethods.py +++ b/python/debiasing/run_cmethods.py @@ -8,20 +8,21 @@ import argparse import glob import logging +import os import sys import time -import numpy as np + import matplotlib.pyplot as plt -import os +import numpy as np import xarray as xr -sys.path.insert(1, 'python-cmethods') +sys.path.insert(1, "python-cmethods") from cmethods.CMethods import CMethods # * ----- L O G G I N G ----- formatter = logging.Formatter( - fmt='%(asctime)s %(module)s,line: %(lineno)d %(levelname)8s | %(message)s', - datefmt='%Y-%m-%d %H:%M:%S' + fmt="%(asctime)s %(module)s,line: %(lineno)d %(levelname)8s | %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", ) log = logging.getLogger() @@ -31,35 +32,78 @@ logging.getLogger().addHandler(screen_handler) # * ----- I N P U T - H A N D L I N G ----- -parser = argparse.ArgumentParser(description='Adjust climate data based on bias correction algorithms.') -parser.add_argument('--input_data_folder', '--input_data_folder', dest='input_dir', type=str, - help='Directory that contains all data files. NetCDF (.nc) files with names starting with ' - '`modc` and `obsc` should be found in the directory (containing ' - 'modelled calibration data (CPM) and observed calibration data (HADs) respectively), ' - 'as well as at least one file with name ' - 'starting with `modv` (containing modelled validation data (CPM). Calibration data ' - 'are used to calibrate the debiasing methods and validation data are debiased.') -parser.add_argument('--out', '--output', dest='output_fpath', type=str, help='Path to save output files', default='.') -parser.add_argument('-m', '--method', dest='method', type=str, help='Correction method', - default='quantile_delta_mapping') -parser.add_argument('-v', '--variable', dest='var', type=str, default='tas', help='Variable to adjust') -parser.add_argument('-g', '--group', dest='group', type=str, default=None, - help='Value grouping, default: time, (options: time.month, time.dayofyear, time.year') -parser.add_argument('-k', '--kind', dest='kind', type=str, default='+', help='+ or *, default: +') -parser.add_argument('-n', '--nquantiles', dest='n_quantiles', type=int, default=1000, help='Nr. of Quantiles to use') -parser.add_argument('-p', '--processes', dest='p', type=int, default=1, - help='Multiprocessing with n processes, default: 1') +parser = argparse.ArgumentParser( + description="Adjust climate data based on bias correction algorithms." +) +parser.add_argument( + "--input_data_folder", + "--input_data_folder", + dest="input_dir", + type=str, + help="Directory that contains all data files. NetCDF (.nc) files with names starting with " + "`modc` and `obsc` should be found in the directory (containing " + "modelled calibration data (CPM) and observed calibration data (HADs) respectively), " + "as well as at least one file with name " + "starting with `modv` (containing modelled validation data (CPM). Calibration data " + "are used to calibrate the debiasing methods and validation data are debiased.", +) +parser.add_argument( + "--out", + "--output", + dest="output_fpath", + type=str, + help="Path to save output files", + default=".", +) +parser.add_argument( + "-m", + "--method", + dest="method", + type=str, + help="Correction method", + default="quantile_delta_mapping", +) +parser.add_argument( + "-v", "--variable", dest="var", type=str, default="tas", help="Variable to adjust" +) +parser.add_argument( + "-g", + "--group", + dest="group", + type=str, + default=None, + help="Value grouping, default: time, (options: time.month, time.dayofyear, time.year", +) +parser.add_argument( + "-k", "--kind", dest="kind", type=str, default="+", help="+ or *, default: +" +) +parser.add_argument( + "-n", + "--nquantiles", + dest="n_quantiles", + type=int, + default=1000, + help="Nr. of Quantiles to use", +) +parser.add_argument( + "-p", + "--processes", + dest="p", + type=int, + default=1, + help="Multiprocessing with n processes, default: 1", +) params = vars(parser.parse_args()) -input_dir = params['input_dir'] -out_fpath = params['output_fpath'] +input_dir = params["input_dir"] +out_fpath = params["output_fpath"] -method = params['method'] -var = params['var'] -group = params['group'] -kind = params['kind'] -n_quantiles = params['n_quantiles'] -n_jobs = params['p'] +method = params["method"] +var = params["var"] +group = params["group"] +kind = params["kind"] +n_quantiles = params["n_quantiles"] +n_jobs = params["p"] # * ----- ----- -----M A I N ----- ----- ----- @@ -68,55 +112,71 @@ def run_debiasing() -> None: cm = CMethods() if method not in cm.get_available_methods(): - raise ValueError(f'Unknown method {method}. Available methods: {cm.get_available_methods()}') + raise ValueError( + f"Unknown method {method}. Available methods: {cm.get_available_methods()}" + ) modc_files = glob.glob(f"{input_dir}/modc*.nc") if len(modc_files) == 0: - raise Exception(f"No .nc files with filename starting with modc were " - f"found in the input directory {input_dir}") + raise Exception( + f"No .nc files with filename starting with modc were " + f"found in the input directory {input_dir}" + ) elif len(modc_files) > 1: - raise Exception(f"More than one .nc file with filenames starting with modc were " - f"found in the input directory {input_dir}") + raise Exception( + f"More than one .nc file with filenames starting with modc were " + f"found in the input directory {input_dir}" + ) else: - print('Loading modelled calibration data (CPM) from ', modc_files[0], "...") - with xr.open_dataset(modc_files[0], engine='netcdf4') as ds: + print("Loading modelled calibration data (CPM) from ", modc_files[0], "...") + with xr.open_dataset(modc_files[0], engine="netcdf4") as ds: ds_modc = ds.load()[var] - log.info(f'Modelled calibration data (CPM) loaded with shape {ds_modc.shape}.') + log.info(f"Modelled calibration data (CPM) loaded with shape {ds_modc.shape}.") obsc_files = glob.glob(f"{input_dir}/obsc*.nc") if len(obsc_files) == 0: - raise Exception(f"No .nc files with filename starting with obsc were " - f"found in the input directory {input_dir}") + raise Exception( + f"No .nc files with filename starting with obsc were " + f"found in the input directory {input_dir}" + ) elif len(obsc_files) > 1: - raise Exception(f"More than one .nc file with filenames starting with obsc were " - f"found in the input directory {input_dir}") + raise Exception( + f"More than one .nc file with filenames starting with obsc were " + f"found in the input directory {input_dir}" + ) else: - print('Loading observation data for calibration from ', obsc_files[0], "...") - with xr.open_dataset(obsc_files[0], engine='netcdf4') as ds: + print("Loading observation data for calibration from ", obsc_files[0], "...") + with xr.open_dataset(obsc_files[0], engine="netcdf4") as ds: ds_obsc = ds.load()[var] - log.info(f'Observation data for calibration loaded with shape {ds_obsc.shape}.') + log.info(f"Observation data for calibration loaded with shape {ds_obsc.shape}.") if ds_obsc.shape != ds_modc.shape: - raise RuntimeError('Error, observed and modelled calibration data must have same dimensions.') + raise RuntimeError( + "Error, observed and modelled calibration data must have same dimensions." + ) # looping over future time periods for which debiased data need to be generated modv_files = glob.glob(f"{input_dir}/modv*.nc") if len(modv_files) == 0: - raise Exception(f"No .nc files with filename starting with modv were " - f"found in the input directory {input_dir}") + raise Exception( + f"No .nc files with filename starting with modv were " + f"found in the input directory {input_dir}" + ) else: for modv_file in modv_files: - print('Loading modelled data (CPM) for validation from ', modv_file, "...") - with xr.open_dataset(modv_file, engine='netcdf4') as ds: + print("Loading modelled data (CPM) for validation from ", modv_file, "...") + with xr.open_dataset(modv_file, engine="netcdf4") as ds: ds_modv = ds.load()[var] - log.info(f'Modelled data (CPM) for validation loaded with shape {ds_modv.shape}.') + log.info( + f"Modelled data (CPM) for validation loaded with shape {ds_modv.shape}." + ) - start_date: str = ds_modv['time'][0].dt.strftime('%Y%m%d').values.ravel()[0] - end_date: str = ds_modv['time'][-1].dt.strftime('%Y%m%d').values.ravel()[0] + start_date: str = ds_modv["time"][0].dt.strftime("%Y%m%d").values.ravel()[0] + end_date: str = ds_modv["time"][-1].dt.strftime("%Y%m%d").values.ravel()[0] - descr1, descr2 = '', '' + descr1, descr2 = "", "" if method in cm.DISTRIBUTION_METHODS: - descr1 = f'_quantiles-{n_quantiles}' + descr1 = f"_quantiles-{n_quantiles}" # If output file do not exist create it result_path = os.path.join(out_fpath, var) @@ -124,7 +184,7 @@ def run_debiasing() -> None: os.makedirs(result_path) # ----- Adjustment ----- - log.info(f'Starting {method} adjustment') + log.info(f"Starting {method} adjustment") result = cm.adjust_3d( method=method, obs=ds_obsc, @@ -133,48 +193,55 @@ def run_debiasing() -> None: n_quantiles=n_quantiles, kind=kind, group=group, - n_jobs=n_jobs + n_jobs=n_jobs, ) - log.info('Saving now') + log.info("Saving now") result.name = var - result['time'] = ds_modv['time'] - result = result.rename({"lon": "projection_x_coordinate", "lat": "projection_y_coordinate"}) + result["time"] = ds_modv["time"] + result = result.rename( + {"lon": "projection_x_coordinate", "lat": "projection_y_coordinate"} + ) # define output name - output_name = f'{method}_result_var-{var}{descr1}_kind-{kind}_group-{group}{descr2}_{start_date}_{end_date}' - file_name = os.path.join(result_path, f'debiased_{output_name}.nc') + output_name = f"{method}_result_var-{var}{descr1}_kind-{kind}_group-{group}{descr2}_{start_date}_{end_date}" + file_name = os.path.join(result_path, f"debiased_{output_name}.nc") - log.info('Results') + log.info("Results") log.info(result.head()) plt.figure(figsize=(10, 5), dpi=216) - ds_modc.groupby('time.dayofyear').mean(...).plot(label='$T_{sim,h}$') - ds_obsc.groupby('time.dayofyear').mean(...).plot(label='$T_{obs,h}$') - ds_modv.groupby('time.dayofyear').mean(...).plot(label='$T_{sim,p}$') - result.groupby('time.dayofyear').mean(...).plot(label='$T^{*Debiased}_{sim,p}$') - plt.title( - f'Debiased {var} projected to {start_date} and {end_date}') - plt.gca().grid(alpha=.3) + ds_modc.groupby("time.dayofyear").mean(...).plot(label="$T_{sim,h}$") + ds_obsc.groupby("time.dayofyear").mean(...).plot(label="$T_{obs,h}$") + ds_modv.groupby("time.dayofyear").mean(...).plot(label="$T_{sim,p}$") + result.groupby("time.dayofyear").mean(...).plot( + label="$T^{*Debiased}_{sim,p}$" + ) + plt.title(f"Debiased {var} projected to {start_date} and {end_date}") + plt.gca().grid(alpha=0.3) plt.legend() - fig_name = os.path.join(result_path, f'time-series-{output_name}.png') + fig_name = os.path.join(result_path, f"time-series-{output_name}.png") plt.savefig(fig_name) index = list(np.linspace(0, len(result.time.values) - 1, 6, dtype=int)) plt.figure(figsize=(10, 5), dpi=216) - g_simple = result.isel(time=index).plot(x='projection_x_coordinate', y='projection_y_coordinate', col='time', - col_wrap=3) - fig_name = os.path.join(result_path, f'maps-{output_name}.png') + g_simple = result.isel(time=index).plot( + x="projection_x_coordinate", + y="projection_y_coordinate", + col="time", + col_wrap=3, + ) + fig_name = os.path.join(result_path, f"maps-{output_name}.png") plt.savefig(fig_name) - print('Saving to', file_name) + print("Saving to", file_name) result.to_netcdf(file_name) end = time.time() - log.info(f'total time in seconds: {end - start}') - log.info('Done') + log.info(f"total time in seconds: {end - start}") + log.info("Done") -if __name__ == '__main__': +if __name__ == "__main__": run_debiasing() # * ----- ----- E O F ----- ----- diff --git a/python/load_data/data_loader.py b/python/load_data/data_loader.py index b99a3d50..bf57fb60 100644 --- a/python/load_data/data_loader.py +++ b/python/load_data/data_loader.py @@ -1,13 +1,22 @@ -import xarray as xr import glob -import geopandas as gp import os from datetime import datetime +import geopandas as gp +import xarray as xr -def load_data(input_path, date_range, variable, filter_filenames_on_variable=False, - run_number=None, filter_filenames_on_run_number=False, use_pr=False, - shapefile_path=None, extension='nc'): + +def load_data( + input_path, + date_range, + variable, + filter_filenames_on_variable=False, + run_number=None, + filter_filenames_on_run_number=False, + use_pr=False, + shapefile_path=None, + extension="nc", +): """ This function takes a date range and a variable and loads and merges xarrays based on those parameters. If shapefile is provided it crops the data to that region. @@ -44,28 +53,37 @@ def load_data(input_path, date_range, variable, filter_filenames_on_variable=Fal An xarray containing all loaded and merged and clipped data """ - if extension not in ('nc', 'tif'): + if extension not in ("nc", "tif"): raise Exception("We only accept .nc or .tif extension for the input data") if filter_filenames_on_variable: if filter_filenames_on_run_number: if use_pr: # when run_number is used, use it to select files from CPM file list - files = glob.glob(f"{input_path}/pr*2.2km_{run_number}_*.{extension}", recursive=True) + files = glob.glob( + f"{input_path}/pr*2.2km_{run_number}_*.{extension}", recursive=True + ) else: # when run_number is used, use it to select files from CPM file list - files = glob.glob(f"{input_path}/{variable}*2.2km_{run_number}_*.{extension}", recursive=True) + files = glob.glob( + f"{input_path}/{variable}*2.2km_{run_number}_*.{extension}", + recursive=True, + ) else: if use_pr: # when run_number is not used, select files only based on variable (either CPM or HADs) files = glob.glob(f"{input_path}/pr*.{extension}", recursive=True) else: # when run_number is not used, select files only based on variable (either CPM or HADs) - files = glob.glob(f"{input_path}/{variable}*.{extension}", recursive=True) + files = glob.glob( + f"{input_path}/{variable}*.{extension}", recursive=True + ) else: if filter_filenames_on_run_number: # when run_number is used, use it to select files from CPM file list - files = glob.glob(f"{input_path}/*2.2km_{run_number}_*.{extension}", recursive=True) + files = glob.glob( + f"{input_path}/*2.2km_{run_number}_*.{extension}", recursive=True + ) else: # when run_number is not used, select files only based on variable (either CPM or HADs) files = glob.glob(f"{input_path}/*.{extension}", recursive=True) @@ -103,18 +121,27 @@ def clip_dataset(xa, variable, shapefile): geodf = gp.read_file(shapefile) # assign projection - xa_mask = xa[variable].rename({"projection_x_coordinate": "x", "projection_y_coordinate": "y"}) \ - .rio.write_crs('epsg:27700') + xa_mask = ( + xa[variable] + .rename({"projection_x_coordinate": "x", "projection_y_coordinate": "y"}) + .rio.write_crs("epsg:27700") + ) # clip and turn back to Dataset with original coordinate names - xa = xa_mask.rio.clip(geodf['geometry']).to_dataset().rename({ - "x": "projection_x_coordinate", - "y": "projection_y_coordinate", - }) + xa = ( + xa_mask.rio.clip(geodf["geometry"]) + .to_dataset() + .rename( + { + "x": "projection_x_coordinate", + "y": "projection_y_coordinate", + } + ) + ) try: # this is creating issues after clipping for hads - del xa[variable].attrs['grid_mapping'] + del xa[variable].attrs["grid_mapping"] except: pass @@ -126,27 +153,40 @@ def reformat_file(file, variable): Load tif file and reformat xarray into expected format. """ print(f"File: {file} needs rasterio library, trying...") - filename = os.path.basename(file).split('_') + filename = os.path.basename(file).split("_") - start = filename[-1].split('-')[0] - stop = filename[-1].split('-')[1].split('.')[0] - time_index = xr.cftime_range(start, stop, freq='D', calendar='360_day', inclusive='both') + start = filename[-1].split("-")[0] + stop = filename[-1].split("-")[1].split(".")[0] + time_index = xr.cftime_range( + start, stop, freq="D", calendar="360_day", inclusive="both" + ) try: - with xr.open_dataset(file, engine='rasterio') as x: - xa = x.rename({"x": "projection_x_coordinate", "y": "projection_y_coordinate", "band": "time", 'band_data': variable}) \ - .rio.write_crs('epsg:27700') - xa.coords['time'] = time_index + with xr.open_dataset(file, engine="rasterio") as x: + xa = x.rename( + { + "x": "projection_x_coordinate", + "y": "projection_y_coordinate", + "band": "time", + "band_data": variable, + } + ).rio.write_crs("epsg:27700") + xa.coords["time"] = time_index except Exception as e: with xr.open_rasterio(file) as x: - xa = x.rename({"x": "projection_x_coordinate", "y": "projection_y_coordinate", "band": "time"}) \ - .rio.write_crs('epsg:27700') - xa.coords['time'] = time_index - - xa = xa.transpose('time', 'projection_y_coordinate', - 'projection_x_coordinate').to_dataset( - name=variable) + xa = x.rename( + { + "x": "projection_x_coordinate", + "y": "projection_y_coordinate", + "band": "time", + } + ).rio.write_crs("epsg:27700") + xa.coords["time"] = time_index + + xa = xa.transpose( + "time", "projection_y_coordinate", "projection_x_coordinate" + ).to_dataset(name=variable) return xa @@ -175,13 +215,14 @@ def load_and_merge(date_range, files, variable): xarray_list = [] # Iterate through the variables for file in files: + filename = os.path.basename(file).split("_") + start_file = datetime.strptime(filename[-1].split("-")[0], "%Y%m%d") + stop_file = datetime.strptime( + filename[-1].split("-")[1].split(".")[0], "%Y%m%d" + ) - filename = os.path.basename(file).split('_') - start_file = datetime.strptime(filename[-1].split('-')[0], '%Y%m%d') - stop_file = datetime.strptime(filename[-1].split('-')[1].split('.')[0], '%Y%m%d') - - start_range = datetime.strptime(date_range[0], '%Y-%m-%d') - stop_range = datetime.strptime(date_range[1], '%Y-%m-%d') + start_range = datetime.strptime(date_range[0], "%Y-%m-%d") + stop_range = datetime.strptime(date_range[1], "%Y-%m-%d") if (stop_file < start_range) | (start_file > stop_range): continue @@ -189,16 +230,25 @@ def load_and_merge(date_range, files, variable): # Load the xarray try: try: - print('Loading and selecting ', file) - with xr.open_dataset(file, engine='netcdf4') as ds: + print("Loading and selecting ", file) + with xr.open_dataset(file, engine="netcdf4") as ds: x = ds.load() dv = list(x.data_vars) - if len(dv) > 1 and dv[0] == os.path.basename(file)[:-3] and dv[1] == "crs": - x = x.rename({"northing": "projection_y_coordinate", - "easting": "projection_x_coordinate", - os.path.basename(file)[:-3]: variable}) \ - .rio.write_crs('epsg:27700') - x = x.convert_calendar(dim='time', calendar='360_day', align_on='year') + if ( + len(dv) > 1 + and dv[0] == os.path.basename(file)[:-3] + and dv[1] == "crs" + ): + x = x.rename( + { + "northing": "projection_y_coordinate", + "easting": "projection_x_coordinate", + os.path.basename(file)[:-3]: variable, + } + ).rio.write_crs("epsg:27700") + x = x.convert_calendar( + dim="time", calendar="360_day", align_on="year" + ) x = x.sel(time=slice(*date_range)) except Exception as e: x = reformat_file(file, variable).sel(time=slice(*date_range)) @@ -213,9 +263,13 @@ def load_and_merge(date_range, files, variable): # Merge all xarrays in the list if len(xarray_list) == 0: - raise RuntimeError('No files passed the time selection. No merged output produced.') + raise RuntimeError( + "No files passed the time selection. No merged output produced." + ) else: print("Merging arrays from different files...") - merged_xarray = xr.concat(xarray_list, dim="time", coords='minimal').sortby('time') + merged_xarray = xr.concat(xarray_list, dim="time", coords="minimal").sortby( + "time" + ) return merged_xarray diff --git a/python/resampling/check_calendar.py b/python/resampling/check_calendar.py index 945ce89a..f743aacc 100644 --- a/python/resampling/check_calendar.py +++ b/python/resampling/check_calendar.py @@ -1,43 +1,48 @@ -import os -import xarray as xr import glob -import numpy as np +import os from collections import Counter -path_raw = '/Volumes/vmfileshare/ClimateData/Raw/HadsUKgrid/tasmax/day' -path_preproc = '/Volumes/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/tasmax/day' -#example files to be compared : +import numpy as np +import xarray as xr + +path_raw = "/Volumes/vmfileshare/ClimateData/Raw/HadsUKgrid/tasmax/day" +path_preproc = ( + "/Volumes/vmfileshare/ClimateData/Processed/HadsUKgrid/resampled_2.2km/tasmax/day" +) +# example files to be compared : # after resampling: tasmax_hadukgrid_uk_1km_day_2.2km_resampled_19800101-19800131.ncr # before resampling: tasmax_hadukgrid_uk_1km_day_20211201-20211231.nc # open log file and write both input paths on top: -with open('check_calendar_log.txt', 'w') as f: +with open("check_calendar_log.txt", "w") as f: f.write(f"{'*'*20} Comparing raw data: {path_raw} {'*'*20}\n") f.write(f"{'*'*20} to resampled data: {path_preproc} {'*'*20}\n") -#iterate through dir at path and loop through files +# iterate through dir at path and loop through files files = [os.path.basename(f) for f in glob.glob(path_raw + "**/*.nc", recursive=True)] -all_dates = np.array([], dtype='datetime64[ns]') # Specify the correct data type -for i,file in enumerate(files): - #separate filename from flag '2.2km_resamples' from date - output_name = f"{'_'.join(file.split('_')[:-1])}_2.2km_resampled_{file.split('_')[-1]}" +all_dates = np.array([], dtype="datetime64[ns]") # Specify the correct data type +for i, file in enumerate(files): + # separate filename from flag '2.2km_resamples' from date + output_name = ( + f"{'_'.join(file.split('_')[:-1])}_2.2km_resampled_{file.split('_')[-1]}" + ) raw_f = os.path.join(path_raw, file) preproc_f = os.path.join(path_preproc, output_name) - #load before and after resampling files + # load before and after resampling files try: data_raw = xr.open_dataset(raw_f, decode_coords="all") data_preproc = xr.open_dataset(preproc_f, decode_coords="all") # catch OSError and KeyError except (OSError, KeyError) as e: - with open('check_calendar_log.txt', 'a') as f: + with open("check_calendar_log.txt", "a") as f: f.write(f"File: {file} produced errors: {e}\n") continue - #convert to string - time_raw = [str(t).split('T')[0] for t in data_raw.coords['time'].values] - time_pre = [str(t).split(' ')[0] for t in data_preproc.coords['time'].values] + # convert to string + time_raw = [str(t).split("T")[0] for t in data_raw.coords["time"].values] + time_pre = [str(t).split(" ")[0] for t in data_preproc.coords["time"].values] # Use sets to find differences dates_in_raw_not_in_pre = set(time_raw) - set(time_pre) @@ -46,38 +51,33 @@ # check if dates are empty if dates_in_raw_not_in_pre | dates_in_pre_not_in_raw: # write to log file - with open('check_calendar_log.txt', 'a') as f: - f.write(f"raw # days: {len(set(time_raw))} - resampled # days: {len(set(time_pre))}\n") + with open("check_calendar_log.txt", "a") as f: + f.write( + f"raw # days: {len(set(time_raw))} - resampled # days: {len(set(time_pre))}\n" + ) f.write(f"Dates in raw not in resampled: {dates_in_raw_not_in_pre}\n") f.write(f"Dates in resampled not in raw: {dates_in_pre_not_in_raw}\n") # save dates for later overall comparison - all_dates = np.concatenate((all_dates, data_preproc.coords['time'].values)) + all_dates = np.concatenate((all_dates, data_preproc.coords["time"].values)) # generating expected dates -start = files[0].split('_')[-1].split('-')[0] -stop = files[-1].split('_')[-1].split('-')[1][:-5]+'30' -time_index = xr.cftime_range(start, stop, freq='D', calendar='360_day', inclusive='both') +start = files[0].split("_")[-1].split("-")[0] +stop = files[-1].split("_")[-1].split("-")[1][:-5] + "30" +time_index = xr.cftime_range( + start, stop, freq="D", calendar="360_day", inclusive="both" +) # convert to strings x_dates_str = [f"{date.year}-{date.month:02d}-{date.day:02d}" for date in time_index] y_dates_str = [f"{date.year}-{date.month:02d}-{date.day:02d}" for date in all_dates] # compare if all present not_in_y = [date_x for date_x in x_dates_str if date_x not in y_dates_str] -with open('check_calendar_log.txt', 'a') as f: - f.write(f'______________________________\n') - f.write(f'missing dates: {len(not_in_y)}\n') - # find duplicates - counts = Counter(y_dates_str) - for string, count in counts.items(): - if count > 1: - f.write(f"date '{string}' appears {count} times.\n") - - - - - - - - - +with open("check_calendar_log.txt", "a") as f: + f.write(f"______________________________\n") + f.write(f"missing dates: {len(not_in_y)}\n") + # find duplicates + counts = Counter(y_dates_str) + for string, count in counts.items(): + if count > 1: + f.write(f"date '{string}' appears {count} times.\n") diff --git a/python/resampling/resampling_hads.py b/python/resampling/resampling_hads.py index 53eaf789..48b91183 100644 --- a/python/resampling/resampling_hads.py +++ b/python/resampling/resampling_hads.py @@ -1,59 +1,72 @@ -''' +""" This script resamples the UKHADS data to match UKCP18 data. It resamples spatially, from 1km to 2.2km It resamples temporally to a 360 day calendar. -''' +""" import argparse -import pandas as pd -import xarray as xr #requires rioxarray extension -import os import glob import multiprocessing +import os from os import cpu_count -from tqdm import tqdm -import scipy + import netCDF4 +import pandas as pd +import scipy +import xarray as xr # requires rioxarray extension +from tqdm import tqdm -def enforce_date_dropping(raw_data: xr.Dataset, converted_data: xr.Dataset) -> xr.Dataset: + +def enforce_date_dropping( + raw_data: xr.Dataset, converted_data: xr.Dataset +) -> xr.Dataset: """ Workaround to avoid convert_calendar misbehavior with monthly data files. - + For leap years, the conversion assigns dropped data to the previous date instead of deleting it. Here we manually delete those dates to avoid duplicates later in the pipeline. - + Args: raw_data (xr.Dataset): The original data. converted_data (xr.Dataset): The data after conversion. - + Returns: xr.Dataset: The converted data with specific dates dropped. """ month_day_drop = {(1, 31), (4, 1), (6, 1), (8, 1), (10, 1), (12, 1)} - time_values = pd.DatetimeIndex(raw_data.coords['time'].values) - + time_values = pd.DatetimeIndex(raw_data.coords["time"].values) + # Get the indices of the dates to be dropped - index_to_drop = [i for i, (m, d) in enumerate(zip(time_values.month, time_values.day)) if (m, d) in month_day_drop] - + index_to_drop = [ + i + for i, (m, d) in enumerate(zip(time_values.month, time_values.day)) + if (m, d) in month_day_drop + ] + # Filter indices that are within the bounds of the converted_data - index_to_drop = [i for i in index_to_drop if i < len(converted_data.coords['time'].values)] - + index_to_drop = [ + i for i in index_to_drop if i < len(converted_data.coords["time"].values) + ] + if index_to_drop: - converted_data = converted_data.drop_sel(time=converted_data.coords['time'].values[index_to_drop]) - + converted_data = converted_data.drop_sel( + time=converted_data.coords["time"].values[index_to_drop] + ) + return converted_data - + + def resample_hadukgrid(x): - ''' + """ Resamples the UKHADs data to match UKCP18 data both spatially and temporally and saves the resampled data to the output directory. inputs: x: list of inputs x[0]: file to be resampled - x[1]: x_grid - x[2]: y_grid - x[3]: output_dir - ''' + x[1]: x_grid + x[2]: y_grid + x[3]: output_dir + """ try: # due to the multiprocessing implementations inputs come as list file = x[0] @@ -62,30 +75,38 @@ def resample_hadukgrid(x): output_dir = x[3] name = os.path.basename(file) - output_name = f"{'_'.join(name.split('_')[:-1])}_2.2km_resampled_{name.split('_')[-1]}" - if os.path.exists(os.path.join(output_dir,output_name)): + output_name = ( + f"{'_'.join(name.split('_')[:-1])}_2.2km_resampled_{name.split('_')[-1]}" + ) + if os.path.exists(os.path.join(output_dir, output_name)): print(f"File: {output_name} already exists in this directory. Skipping.") return 0 # files have the variable name as input (e.g. tasmax_hadukgrid_uk_1km_day_20211101-20211130.nc) - variable = os.path.basename(file).split('_')[0] + variable = os.path.basename(file).split("_")[0] data = xr.open_dataset(file, decode_coords="all") # convert to 360 day calendar. - data_360 = data.convert_calendar(dim='time', calendar='360_day', align_on='year') + data_360 = data.convert_calendar( + dim="time", calendar="360_day", align_on="year" + ) # apply correction if leap year if data.time.dt.is_leap_year.any(): - data_360 = enforce_date_dropping(data,data_360) + data_360 = enforce_date_dropping(data, data_360) # the dataset to be resample must have dimensions named projection_x_coordinate and projection_y_coordinate . - resampled = data_360[[variable]].interp(projection_x_coordinate=x_grid, projection_y_coordinate=y_grid, method="linear") + resampled = data_360[[variable]].interp( + projection_x_coordinate=x_grid, + projection_y_coordinate=y_grid, + method="linear", + ) - #make sure we keep the original CRS - resampled.rio.write_crs(data_360.rio.crs,inplace=True) + # make sure we keep the original CRS + resampled.rio.write_crs(data_360.rio.crs, inplace=True) # save resampled file - resampled.to_netcdf(os.path.join(output_dir,output_name)) + resampled.to_netcdf(os.path.join(output_dir, output_name)) except Exception as e: print(f"File: {file} produced errors: {e}") @@ -100,9 +121,26 @@ def resample_hadukgrid(x): parser = argparse.ArgumentParser() # Adding arguments - parser.add_argument("--input", help="Path where the .nc files to resample is located", required=True, type=str) - parser.add_argument("--grid_data", help="Path where the .nc file with the grid to resample is located", required=False,type=str, default='../../data/rcp85_land-cpm_uk_2.2km_grid.nc') - parser.add_argument("--output", help="Path to save the resampled data data", required=False, default=".", type=str) + parser.add_argument( + "--input", + help="Path where the .nc files to resample is located", + required=True, + type=str, + ) + parser.add_argument( + "--grid_data", + help="Path where the .nc file with the grid to resample is located", + required=False, + type=str, + default="../../data/rcp85_land-cpm_uk_2.2km_grid.nc", + ) + parser.add_argument( + "--output", + help="Path to save the resampled data data", + required=False, + default=".", + type=str, + ) parser_args = parser.parse_args() @@ -111,8 +149,8 @@ def resample_hadukgrid(x): try: # must have dimensions named projection_x_coordinate and projection_y_coordinate - x = grid['projection_x_coordinate'][:].values - y = grid['projection_y_coordinate'][:].values + x = grid["projection_x_coordinate"][:].values + y = grid["projection_y_coordinate"][:].values except Exception as e: print(f"Grid file: {parser_args.grid_data} produced errors: {e}") diff --git a/setup-instructions.md b/setup-instructions.md index f66bed3a..63d72399 100644 --- a/setup-instructions.md +++ b/setup-instructions.md @@ -1,15 +1,15 @@ # clim-recal setup -## Setup +## Setup Methods can be used with a custom environment, here we provide a Anaconda -environment file for ease-of-use. +environment file for ease-of-use. ``` conda env create -f environment.yml ``` -## Contributing +## Contributing -### Adding to the conda environment file +### Adding to the conda environment file To use `R` in anaconda you may need to specify the `conda-forge` channel: @@ -29,4 +29,4 @@ pip freeze > requirements.txt and installing with: ``` -pip install -r requirements.txt \ No newline at end of file +pip install -r requirements.txt From 61e6ecaa5af01f2fe5ae5c1f1b782d98cb0656c8 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Wed, 15 Nov 2023 19:23:39 +0000 Subject: [PATCH 132/146] feat(ci): add `.github/workflows/ci.yaml` for `lint` and `pytest` --- .github/workflows/ci.yaml | 12 +++++++----- compose.yml | 4 ++-- compose/{local => }/Dockerfile | 7 ++++++- compose/{local => }/docs/Dockerfile | 0 environment.yml | 5 ++++- python/.pytest.ini | 3 +++ 6 files changed, 22 insertions(+), 9 deletions(-) rename compose/{local => }/Dockerfile (95%) rename compose/{local => }/docs/Dockerfile (100%) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index e0e1309b..0e7541b3 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -4,6 +4,8 @@ name: CI env: DOCKER_BUILDKIT: 1 COMPOSE_DOCKER_CLI_BUILD: 1 + CONDA_ENV_NAME: clim-recal + MIN_PYTHON_VERSION: 3.9 on: pull_request: @@ -34,7 +36,7 @@ jobs: - name: Set up Python uses: actions/setup-python@main with: - python-version: '3.9' + python-version: $MIN_PYTHON_VERSION - name: Run pre-commit uses: pre-commit/action@main @@ -55,8 +57,8 @@ jobs: run: | docker compose build docker compose up --detach - docker compose run django pytest -p no:sugar - export JUPYTER_ID=$(docker compose -f local.yml ps -q jupyter) + docker compose exec jupyter bash -c "conda run -n $CONDA_ENV_NAME --cwd python pytest -p no:sugar" + export JUPYTER_ID=$(docker compose ps -q jupyter) echo "jupyter_id=$JUPYTER_ID" >> $GITHUB_ENV echo "jupyter_id=$JUPYTER_ID" @@ -75,7 +77,7 @@ jobs: path: docs/assets/coverage.svg - name: Tear down the Stack - run: docker compose -f local.yml down + run: docker compose down docs: needs: [linter, pytest] @@ -85,7 +87,7 @@ jobs: - uses: actions/setup-python@v4 with: - python-version: '3.9' + python-version: MIN_PYTHON_VERSION - run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV name: Update cache_id diff --git a/compose.yml b/compose.yml index a53218b9..2021fcb4 100644 --- a/compose.yml +++ b/compose.yml @@ -5,7 +5,7 @@ services: jupyter: build: context: . - dockerfile: ./compose/local/Dockerfile + dockerfile: ./compose/Dockerfile target: clim-recal-base ports: - "8888:8888" @@ -18,7 +18,7 @@ services: docs: build: context: . - dockerfile: ./compose/local/docs/Dockerfile + dockerfile: ./compose/docs/Dockerfile # target: clim-recal-docs ports: - "8080:80" diff --git a/compose/local/Dockerfile b/compose/Dockerfile similarity index 95% rename from compose/local/Dockerfile rename to compose/Dockerfile index 4f35f24b..0bdef25e 100644 --- a/compose/local/Dockerfile +++ b/compose/Dockerfile @@ -26,7 +26,7 @@ ARG env_name=clim-recal # `py_ver` is not currently used below and is specified in `environment.yaml` # here as reminder and clarity if future change needed. -ARG py_ver=3.11 +ARG py_ver=3.9 # The local_data_path is an absolute local path to ClimateData on the machine hosting running `docker` ARG HOST_DATA_PATH=/Volumes/vmfileshare @@ -77,12 +77,17 @@ RUN activate_custom_env_script=/usr/local/bin/before-notebook.d/activate_custom_ # Switch to default jupyter user USER ${NB_UID} +# This eases running shell commands outside docker following: +# https://pythonspeed.com/articles/activate-conda-dockerfile/ + # Set this for default `conda activate` configuration # You can comment this line to keep the default environment in Terminal RUN echo "conda activate ${env_name}" >> "${HOME}/.bashrc" RUN cd python/debiasing && git submodule update --init --recursive +# SHELL ["conda", "run", "-n", "clim-recal", "/bin/bash", "-c"] + # This will use the default launch as discussed in # https://jupyter-docker-stacks.readthedocs.io/en/latest/using/running.html diff --git a/compose/local/docs/Dockerfile b/compose/docs/Dockerfile similarity index 100% rename from compose/local/docs/Dockerfile rename to compose/docs/Dockerfile diff --git a/environment.yml b/environment.yml index b92d3779..737eb843 100644 --- a/environment.yml +++ b/environment.yml @@ -29,6 +29,7 @@ dependencies: - attrs==22.1.0 - backports.strenum==1.2.8 - certifi==2023.07.22 + - coverage-badge==1.1.0 - cftime==1.6.2 - click==8.1.3 - click-plugins==1.1.1 @@ -41,12 +42,14 @@ dependencies: - packaging==21.3 - pandas==1.5.1 - pillow==9.4.0 - - quartodoc==0.6.3 + - pre-commit==3.5.0 - pyparsing==3.0.9 - pyproj==3.4.0 - python-dateutil==2.8.2 + - pytest-cov==4.1.0 - pytest-sugar==0.9.7 - pytz==2022.5 + - quartodoc==0.6.3 - rasterio==1.3.3 - rioxarray==0.12.3 - scipy==1.10.0 diff --git a/python/.pytest.ini b/python/.pytest.ini index 2a0de9a6..5e0f35be 100644 --- a/python/.pytest.ini +++ b/python/.pytest.ini @@ -1,4 +1,5 @@ # pytest.ini or .pytest.ini +# --cov=. collects all python within ./python, to replaced in future [pytest] minversion = 6.0 addopts = -ra -q @@ -6,6 +7,8 @@ addopts = -ra -q --ignore=python/debiasing/python-cmethods -m "not server" --pdbcls=IPython.terminal.debugger:TerminalPdb + --cov=. + --cov-report=term:skip-covered pythonpath = . testpaths = From 38eb01e689f39ef7d318c2b70c5245a20ff000cb Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Wed, 15 Nov 2023 19:41:53 +0000 Subject: [PATCH 133/146] fix(ci): correct linting and badge config --- .github/workflows/ci.yaml | 5 +---- python/conftest.py | 9 +++++++++ 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 0e7541b3..59898e7a 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -26,9 +26,6 @@ permissions: jobs: linter: runs-on: ubuntu-latest - defaults: - run: - working-directory: config/ steps: - name: Checkout Code Repository uses: actions/checkout@main @@ -36,7 +33,7 @@ jobs: - name: Set up Python uses: actions/setup-python@main with: - python-version: $MIN_PYTHON_VERSION + python-version: ${{ env.MIN_PYTHON_VERSION }} - name: Run pre-commit uses: pre-commit/action@main diff --git a/python/conftest.py b/python/conftest.py index dfcd2ff1..237d6751 100644 --- a/python/conftest.py +++ b/python/conftest.py @@ -5,7 +5,9 @@ from typing import Final import pytest +from coverage_badge.__main__ import main as gen_cov_badge +BADGE_PATH: Final[Path] = Path("docs") / "assets" / "coverage.svg" CLIMATE_DATA_MOUNT_PATH = Path("/mnt/vmfileshare/ClimateData") TEST_PATH = Path().absolute() PYTHON_DIR_NAME: Final[Path] = Path("python") @@ -49,3 +51,10 @@ def doctest_auto_fixtures( doctest_namespace["is_climate_data_mounted"] = is_climate_data_mounted doctest_namespace["pprint"] = pprint doctest_namespace["pytest"] = pytest + + +def pytest_sessionfinish(session, exitstatus): + """Generate badges for docs after tests finish.""" + if exitstatus == 0: + BADGE_PATH.parent.mkdir(parents=True, exist_ok=True) + gen_cov_badge(["-o", f"{BADGE_PATH}", "-f"]) From abff6ccf9aa09d84b2468efafa0c9bba1c73d192 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Wed, 15 Nov 2023 19:50:53 +0000 Subject: [PATCH 134/146] fix(ci): exclude `data/` from `pre-commit` --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4723a0bd..bd658c3a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,4 +1,4 @@ -exclude: "R" +exclude: "R|data" repos: - repo: https://github.com/psf/black rev: "23.9.1" From 857bed0308053adda87fafa81d9ed4b8bb9f4869 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Wed, 15 Nov 2023 20:25:39 +0000 Subject: [PATCH 135/146] fix(ci): correct `coverage.svg` caching --- .github/workflows/ci.yaml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 59898e7a..e0a56bfa 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -6,6 +6,9 @@ env: COMPOSE_DOCKER_CLI_BUILD: 1 CONDA_ENV_NAME: clim-recal MIN_PYTHON_VERSION: 3.9 + COVERAGE_SVG_FOLDER: docs/assets/ + COVERAGE_SVG_FILE_NAME: coverage.svg + COVERAGE_SVG_PATH: ${{ env.COVERAGE_SVG_FOLDER }}${{ env.COVERAGE_SVG_FILE_NAME }} on: pull_request: @@ -65,13 +68,13 @@ jobs: - name: Copy test coverage results run: | - docker cp ${{ env.jupyter_id }}:app/docs/assets/coverage.svg docs/assets/ + docker cp ${{ env.jupyter_id }}:${{ env.COVERAGE_SVG_PATH }} ${{ env.COVERAGE_SVG_FOLDER }} - name: Archive coverage svg uses: actions/upload-artifact@v3 with: name: coverage-badge - path: docs/assets/coverage.svg + path: ${{ env.COVERAGE_SVG_PATH }} - name: Tear down the Stack run: docker compose down From 94ed0d1217059c140c5efb9f72d348e699580b6f Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Wed, 15 Nov 2023 21:00:02 +0000 Subject: [PATCH 136/146] fix(ci): use `python/` path for `coverage` badge --- .github/workflows/ci.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index e0a56bfa..5ef4f88a 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -6,9 +6,10 @@ env: COMPOSE_DOCKER_CLI_BUILD: 1 CONDA_ENV_NAME: clim-recal MIN_PYTHON_VERSION: 3.9 - COVERAGE_SVG_FOLDER: docs/assets/ + COVERAGE_SVG_FOLDER: python/docs/assets/ COVERAGE_SVG_FILE_NAME: coverage.svg - COVERAGE_SVG_PATH: ${{ env.COVERAGE_SVG_FOLDER }}${{ env.COVERAGE_SVG_FILE_NAME }} + # replace below with references to previous config lines + COVERAGE_SVG_PATH: python/docs/assets/coverage.svg on: pull_request: From be61962872847cd5bff532b0595a33877897a4bb Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Wed, 15 Nov 2023 22:03:55 +0000 Subject: [PATCH 137/146] fix(ci): use absolute path for `coverage` `artifact` --- .github/workflows/ci.yaml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 5ef4f88a..6ec508df 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -6,10 +6,11 @@ env: COMPOSE_DOCKER_CLI_BUILD: 1 CONDA_ENV_NAME: clim-recal MIN_PYTHON_VERSION: 3.9 - COVERAGE_SVG_FOLDER: python/docs/assets/ + PYTHON_MODULE_FOLDER: /home/jovyan/python/ + COVERAGE_SVG_FOLDER: docs/assets/ COVERAGE_SVG_FILE_NAME: coverage.svg # replace below with references to previous config lines - COVERAGE_SVG_PATH: python/docs/assets/coverage.svg + COVERAGE_SVG_PATH: docs/assets/coverage.svg on: pull_request: @@ -69,7 +70,7 @@ jobs: - name: Copy test coverage results run: | - docker cp ${{ env.jupyter_id }}:${{ env.COVERAGE_SVG_PATH }} ${{ env.COVERAGE_SVG_FOLDER }} + docker cp ${{ env.jupyter_id }}:${{ env.PYTHON_MODULE_FOLDER }}${{ env.COVERAGE_SVG_PATH }} ${{ env.COVERAGE_SVG_FOLDER }} - name: Archive coverage svg uses: actions/upload-artifact@v3 From 2b8659a39ada2b36383336b71d19e8de21edd288 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Wed, 15 Nov 2023 22:17:28 +0000 Subject: [PATCH 138/146] fix(ci): refactor `converage.svg` artifiact and add `quarto` build --- .github/workflows/ci.yaml | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 6ec508df..8ca3d40a 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -89,11 +89,17 @@ jobs: - uses: actions/setup-python@v4 with: - python-version: MIN_PYTHON_VERSION + python-version: ${{ env.MIN_PYTHON_VERSION }} - run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV name: Update cache_id + - name: Download coverage svg + uses: actions/download-artifact@v3 + with: + name: coverage-badge + path: ${{ env.COVERAGE_SVG_PATH }} + - name: Build quarto run: | docker compose build @@ -101,17 +107,13 @@ jobs: docker compose up --detach docker cp $(docker compose ps -q docs):/usr/local/apache2/htdocs/ . - - name: Download coverage svg - uses: actions/download-artifact@v3 - with: - name: coverage-badge - path: assets/ # - name: Build docker quarto # run: | # docker compose build # docker cp ${{ env.jupyter_id }}:app/docs/assets/coverage.svg docs/assets/ # docker compose up --detach - # docker cp /app/_site/ /usr/local/apache2/htdocs/ + # docker cp $(docker compose ps -q docs):/usr/local/apache2/htdocs/ . + # docker cp $(docker compose ps -q docs)/app/_site/ /usr/local/apache2/htdocs/ # - name: Apply mkdocs cache # uses: actions/cache@v3 # with: From b665a22ac334d0ee514a05d11889fdad856a9048 Mon Sep 17 00:00:00 2001 From: BZhang666 Date: Wed, 15 Nov 2023 23:15:30 +0000 Subject: [PATCH 139/146] Adding Geodata in the data folder --- .gitignore | 2 +- ...January_2018_FCB_in_the_United_Kingdom.cpg | 1 + ...January_2018_FCB_in_the_United_Kingdom.dbf | Bin 0 -> 1258 bytes ...January_2018_FCB_in_the_United_Kingdom.prj | 1 + ...January_2018_FCB_in_the_United_Kingdom.shp | Bin 0 -> 45369096 bytes ...ary_2018_FCB_in_the_United_Kingdom.shp.xml | 2 ++ ...January_2018_FCB_in_the_United_Kingdom.shx | Bin 0 -> 196 bytes data/Geofiles/three.cities/London/London.cpg | 1 + data/Geofiles/three.cities/London/London.dbf | Bin 0 -> 375 bytes data/Geofiles/three.cities/London/London.prj | 1 + data/Geofiles/three.cities/London/London.shp | Bin 0 -> 278908 bytes data/Geofiles/three.cities/London/London.shx | Bin 0 -> 108 bytes 12 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 data/Geofiles/UK/NUTS_Level_1_January_2018_FCB_in_the_United_Kingdom.cpg create mode 100644 data/Geofiles/UK/NUTS_Level_1_January_2018_FCB_in_the_United_Kingdom.dbf create mode 100644 data/Geofiles/UK/NUTS_Level_1_January_2018_FCB_in_the_United_Kingdom.prj create mode 100644 data/Geofiles/UK/NUTS_Level_1_January_2018_FCB_in_the_United_Kingdom.shp create mode 100644 data/Geofiles/UK/NUTS_Level_1_January_2018_FCB_in_the_United_Kingdom.shp.xml create mode 100644 data/Geofiles/UK/NUTS_Level_1_January_2018_FCB_in_the_United_Kingdom.shx create mode 100755 data/Geofiles/three.cities/London/London.cpg create mode 100755 data/Geofiles/three.cities/London/London.dbf create mode 100755 data/Geofiles/three.cities/London/London.prj create mode 100755 data/Geofiles/three.cities/London/London.shp create mode 100755 data/Geofiles/three.cities/London/London.shx diff --git a/.gitignore b/.gitignore index 58419692..c0a6630f 100644 --- a/.gitignore +++ b/.gitignore @@ -265,6 +265,6 @@ rsconnect/ # End of https://www.toptal.com/developers/gitignore/api/macos,python,r # Some custom adds -data/* +#data/* /.quarto/ diff --git a/data/Geofiles/UK/NUTS_Level_1_January_2018_FCB_in_the_United_Kingdom.cpg b/data/Geofiles/UK/NUTS_Level_1_January_2018_FCB_in_the_United_Kingdom.cpg new file mode 100644 index 00000000..3ad133c0 --- /dev/null +++ b/data/Geofiles/UK/NUTS_Level_1_January_2018_FCB_in_the_United_Kingdom.cpg @@ -0,0 +1 @@ +UTF-8 \ No newline at end of file diff --git a/data/Geofiles/UK/NUTS_Level_1_January_2018_FCB_in_the_United_Kingdom.dbf b/data/Geofiles/UK/NUTS_Level_1_January_2018_FCB_in_the_United_Kingdom.dbf new file mode 100644 index 0000000000000000000000000000000000000000..1f7bc4e37f39fe5c7e9b53b816a8a90933007277 GIT binary patch literal 1258 zcmaKrJ#X7E5QYVK>&Mik$i%TlA%S;1e%zcoO`60@+6J8pCsysCjs(c|)L)x_(j!G% ziQ|-mB;Lu7-+TFL^mlahrYMSs;)h(HuV%Ae0fgIGQ5?taXz!KWZ|-yd{a*j3ng6Q$ z+9cjy9q2dz=`Y)6o_}%@@9$pcUA`@+>uup+|E>CdemrTr^+JuOtF`(tZsyBrGyACW zBJc)G+&Ha`4XT97V+hPF1zmsAS}9vuWUmcSUjm_i?y1-HA*!)jgN;P2EKnkz<{-5(RHu)QZ6aFo#D} z!v>uwm0G5rq}ea;W^we@o(bf=b26k-2GJR-%1{wICk`a7r3e!T(6jZxFlhafW`BWd zftiBnBn}biLJz zq-6jcw4PnG&8%&9peGL&yag9a;DcdQrKxm?7N)hO0x_Zo!kyNySMBEIWO*%T-&2v1 zRLK<^NC6P;K((5kq-#e#@2Q8Ag~8@zfkx}ZS5rA-$YibAR~kGYbqIA_Dl;Z%9yd#r(`UZ-H>dF31g6D5Fs*A*%`#x4MJp@>{|>X z>lj-|7)51CL#dQ8_wzZ!_x8tq9QXG)9G~CgcV5?fuJ?7F%jr`CkmZT;!A7qdN8lcLyzb3*;!|G)nW49Gck zu%DH9vVGO2Ul*uk;r#2(zghg-$iHm-oof86%fCVV8_&OG{0qMFW7TtS6s;M&@pg2BYfsh;N_{S6 z22T2PaeNh=^7k7Z?=&k~Gj;6K2Oq`39Xy&Cu@fE zToCwYQ;xs&;l33(xc&#(FXOaa?WZN+w4@h1uCe^vX*1umoX?wwBi?%|{#~5ZqjEs~ zh@v$UBi^0y&s9GE#Db72I3a9)IybIQ&rO zDW!1A@fHU@#Bsk3&bZYWcRn8738$@Jnf)sE@3+OlO~0SH9w$Ba$^P0nCG?w0r*S}| zj}JbLBUfJz8_4Gc=lZD1!7F^;=`I0daq5aPop#}n{8evH$6RjwcAU5}d-NKd)~42- zuW{sr9aUy~{qvukegh}H6&&~j4*ujyr90T`slfLSs6Mkwe*Rw2=+YerG!@{uN z`)?f6Vn}i%j@Xksy$Sa_;mhUIKE?s%M@((+^EmMK{xLYNU+(3T<-M>?me<%l>nIK${7&dz9PrVU?3H=`;R9QBxrBog`*fLuV@?zZ zEy(i;KbcnfE1Xy`rBg8+@yK(brJM78KiONkGL8(cS!phgxLq`?0S@`{mH0C_sMOx5 zW;ixw_4J!Ksa(GO@8HDG8wX^2lh3al8{Zix1t*43W$TGk4jecTfsWWX1PRU(;=0M9sp6yz)1#Z5*%UYatw0u+t9I`(s`hew^b5tIOV+Sof zaKduxTiKRef8VuLZsCN6g%32wiB%5HOvfoh>t-y(LI3Sf&cMOxe^oh%Q?~q6IS2JW zt>WhBR2(qlftk5+>O+&e-oQTRhixzU9;(u{72hv)R;5Zfp+fCh@!0zQG7eh!bzq9+ z%f`%n4aaaJpJ+{az1ab+J^s~oVbyR2V>n%TUZExAO+>fyv_pimNqtm12H2&{Md}nzv7q@ zc>_vd>qouzJf9D4g#5|#NbBArJqo8*A2{s-j_lmI^CFyd^XXZuu-|_jww}zxiO=`! zn1n-qUJ*4N`~QC&2h58O8;64*?RKz#B=_S=(`kiqY@4IE$Ki-(>sul>D&gVRQ`QhSM2v{i6b^V7F7XTA8O)|*!2PV?LRhtzb^(Seerp6 zAdY+bw}azxTHdW8e_iJICpX@ni&GcQ?DQM<{w>787lz;Y6^9pmC;L(yetuBpotD#H zzW4mWHL7gHX&n#De!C;(wEIdpxZ|}Mld;dQ6pqO@W6xD=yAJgHak)Zzb)tQyN5wa} z#OJTsQ}rusf0zC%&*$itj)^$+(v;9Yad?GeGe&9p;}jfIV0ggG*ynQ-`}bDz`H9(o zIk1_}vwgkXfzRuEB6KjeKe&V=s*MVq=;!f$E_=P#J06I_A%DfsF5%~MAJ^i@dEZrz z#&NaB99W`5-wY`d#pnOq_TYhEcz*4t9oUBhmR=8Sh{GrC&v?9ZR{O}0L*5MOwg?C0 z8J)4?XRdF@r_l|gd7hL07uXrc-bhSI#X(J0RxX7jM|Pb3dKd2JoR<&WxXAsvJ)-hk zIPSuYj4RmsdaNtg_urCA@8Q@VznykiPaYBVF1CH1#g1oDIQ-SuL(f}IJJr_jfNs>E zC%RATg%hSvpI#ovCHzw<2B(dGqw?#P4;U2H9VgX$I9q33{;&9c9#4Na7<>Iw^!u|q zjK)D@>+ky(hd+3t<5V2*cfWv(ICgl&1JiKGvrEEsYTBRszQ9SHmInOj`PA!8*#2%a zj_dgLtSdOR_vqwtIQZ!g4}`zV^LeuV%(^(X)fZJ}YwAH`98~q?^qJV_-xa5J38=Ud zN9+sjG{p0BeKh4@cm5vzYXlA%@#yr8*zx!UY`pS756++d?6l`_T)sh7YvP2jx5tNK z@52skyDyJ}h|Ti#v3wUBJn;i$BkTli3i^Pq4QjvSopvFb#P3c zTLH~*Oq0i^zK@Ng0&&WKOP$wve6b&=#b3DlJaN$0km-|g?E3I*-{F8_edD9BfA1nr zd*RjSmvMO4ULol?w(hp{VC=Y7^S!M0xBVxs_w=nh6EXcrdSe`$W7nZ%YN9RH~BI5S6k93VHcdnf0{t_>(!EtTx zbX-v_T<@yVc-S;{6do;lz2VY9wqp3%s*zw^yc04KU@k3Tr%|3|dQK&}e>>f}3sep|K z{yIbZ@BDYC4%q8EjQ#$fe8BNto;dIVwmpA_t+y2|XM9_s`MV`>TEEm;JF)F6HxBtN z@14Vz)BgVWf&2f;uhY-qfCZCg{D1=nJk;q=Z2Y$oCyqJY<$=LmPwx^3=VJSjr*Xi< z4`+_T#v3o;;F{Mv4%PHW9dT^cFYoliUe^R1^77ZAov{5xBDVgv#EFNFPd$vizx8p# z=BX*6L#XdHKF=uW`F!s>*zx&M9DAv3rIy(5nGdISD_?a4Hjd7LjpxsKKI7r#)6}2d z%k~%gkox$;+D?}+_aR4PO5*AsA7MXlD~@d5EP4mF{jIQ^ z^A6y&rW@|e#wm?Dq+}b)^-&)`!p?_Y!Pd7I@45|+yw)aLS#162iS2(%;=~R^rw_s5 zjSp8UXgTr!2#@ddQpSTgE^1`Q&#>*|zwdcIL%ZFcjbj!B&N`P1%;fRm(9=eL-#$N~>h>a(1>>hhx9zV4OO9&cP?K{l^HLFe)*yD0Up2j6*h; zJM@UAUti|=f1ZvHz-d2LopBJy#rF*T;}rLoe(|r5sPDHDq8s9vMt>wfF&r-|IpgD# zd>`V^*RkV%KkRrDjZ^Y;$@VUGJ~|QyBp0jH0mrn@ojf0h)GoQdt;ere*I|=pJZbIm z#6^d(@nK6GvZP?j10Qq!Z_eHSrsbzk1eV11yAjxN=Sgh4ZisEqm2k?=S_f-*KJEDx z&!;|x;@E{1ve&`(vrprcYkRZT$IgeI(tO`$o?qu=NU+CqU*5vO@fjhHY2vURICAKz z^pZF(@qE|MvGXhKIJ63T{rPe5oOWHm!_L9=eEQep{Qai&W>3c{ANB2=f&*TP zNnV5#uOB(M0~7XiPQuPNR^jk5-J=g+`-R0gX!4S5N3i44G#tCQ<-Qb;C!UJa+`qlp zdNvF@U)Dayb~t3!?23oA^F(Ysasr3H-e>yjnttaRcD#K7J5RZVjnB(t$J;yD{^D_L z|C@aT^@Dcy`!Svu`GrSt%G(XPT*StI|HH8_d>6GHThCiz$Du@=ws`mcF4%r|iN`bl z7>;d^b8*7%2Gthg%s37kcO3G3#=mhMPaKndB-cZ{ITRZ|KY^Wh_r+;-pU%~^H# zc;-npu>El)_WRYw5!;5OH^Fha(x%qaJl9&DPkU*K?XRj?zUtvh?JcLjuHgBMAN{fI zt|WH;Hx7qH70Z?%JHFq+&T}surT-wlYBY-bL0oneJ3e>TgHyuJVdMReu=Awz*!lIO z`<(FLYf-bX-zSxF=Yxwhd6F~O_PYWb*Pq0x?fV}}#34neR6eeWCsyHv?~#;tDQ8pne}@yISMM93^K|J@at!yM@hsBg>7N_p zxJvIIY=(`yKEdWUUd86u)@k~$mpq<%_>b839g1zAx3O{J^VoQ-+*tlT`JrcUV)(EQ z^|ASYC$P_R8xC&MX=eHnO#bIDoHV4=vnd@>I^?_7l)|CZs1@PdI$alp<_-7aIFX9w&&p->!VeqL46Pgm5m*Mivl5su?( zMn>IC$-2MwaN_IdXB@|g)&9xW4%?sa#u2k)J9okMcS~__zicylW3OWvj@xmwODy(z z^zwZ2dXsU;nWs9p$L8s#dOV-g2K)S{Ve^+w@3ZZ-E)IHb)$A#j|1!4AE7-Vg88+T8 zft|m9g^m02WAhe?IPSy$W@N+mJL|Ch+3iD=Z+&FeW}Gsy?VXF*xOO8pUvXYDuB^uP zFQ;)_(Hv9PVHd4q<)ZZ-HMr4O?e#K->k!qo8>Thi!L*8#7_%nmciyHKE|;X8+I;> ztuF(yg)C()4Gs^*<7OfAd*xJcrG<{Cf~Ho({vQ zsSkx+!p3)NE&s7q{5BkOD__@r*ztD}wm-Omokv7t$E`e*_&&sAEwKGhacn%<2*-{1 zWX4d-$%{N`Iq~hB$ywL?)dBtoOrd@e|L4Z3%l@5} zb1EkO*}tFboA}%9C$aI&Rvet8MRI*?Jhu!R_jJRt?}S#Fg(JVHa&Qv1J`Bg!zZc`P zzW?*sc}QEF@N3=VBG`7(9cSjzaAMc9m4;hRzkPEb-;ee?9UDKK#^#Z>W5w=BT%3<4QN6VSt z)yB@p+G6|B!0B8c@!wN8vd6AFBXDZU6EidRa({TPMP~4Q$QK>M&I8`TmT$zaZ(PL2 zgTt_KOwpNK594CACSD80*0Yw_{--&%pQw%<2fxCRL9M0-XyV6bXK{aiJ#}E+9!$Qb zJ2qc82HWo~$Mz4=*mialo8NvDd*7d#&EF%R5r!i_ePQYpY(D)VO@I6+PJ830sH@*{ zedjMtsWyl2|8j-OC$agxKG^!d2YY|FWBZ4-*!XSv+^qiNQ*69*852BEZv^Y*nKQ^rsj+pv*@>AG$`WCjmJ&VIX`>S)a`|Nr|9Zg(umCrZt^Zjn>H|_ct zoVaLU^fqj~mW}hNS2I^&pHDt){%a{V|54O(@|lZpLd$)jrE$c=+4d)R{N3xZfBOtO zo|d6JWK_+7PcU&_wdZhbsTZP$VC#E0cKxg$Hb2x78^`v>wwEN`>!VI>vE#rIZ2!>$ zJAXNcQ*V?GZHk@8UBc$U>S6O`*RlEJT6d5C58HoN#6g$VS1dS>&nItR5Qp#np-LFG zKfSe!&l}cs?<+Vp`R}kB*yqwxGrnKY%s0nk@imb;myEhS>Z_TO2&)*xvtPj%|5|J^*(L+`Wsqq5le`X$O^3$2JQhK(0%VeZqRhp}t44RFegpVI?1?XHvM^eex8 z!}nu;Iusl4oWss%-&=r*n_tEDgMVPxgKqES?=emd{G8|U%~#2*vHk1Rg$$vx&u2a15;p#O51X$TwK!`$c^Vu42Y7%>5mnw66tDYBn`68Jn;E7@G&! zg3TXI-pKuAURMu0Psz238AB?dcDXr+?gzt#^N6*GD#C<0Q+SpRB~@Q;&N*?P(T{>scWCG|Q>yQ?Yr6 z2{`VrTp5$FosaavUT;lo z`ako0`jrZvfA{l^8%tsHApZTd#EAWmVB@dZ*#06AJI?)v&DZbS$mbD1ci5cOzGAWQ zOAK~hst>l`?~l#XcEGlm0XUBNSs9#CK5BYL91!|*@*?=_JLX{L&EI3^VGFS9F^6&N zl-m10$HX=1pJUgjmSOV&Be3(yL~MR87W;g5VC(1m*ynQyJHP3R9Z%2T#M!?`$6(jF z|Hk$&J+b4&qhE7XR-0n%Ud~~dYrM2 z&tu;D^fv0(?~6MpV&lyY*m^$~yPi-WDeHdq#NO|xvGcYlZ2wmsyMLsW=M%3t!se-7 zwVdmZ!mewDVV}qQ*!?2;vFr68Th98yrnTHJ@(NWG*EJnXvmZ0vEf zvF-F%Y+kxQwqMD+oxjigti9#rqXy%o4dXgLf`hs>3fqfq4`&U$1m?D~HnZ2bwr#$`RR{mAy!S@B6% zY+N`8yS~{28_!J8FRs}WgUxr2#IN{vZl3jJU(zsly-l>8f-r3isd}7&DiyyYuM+Q zgv}S;z~*muSiZdLoxiZ{H6A-Y6yWcf=c{Wu^TMjw@gWeKPi}!750>5ceZ8J|zaR7A z1j=nE6S3`Z8TR_)u<`RMZ2UGEJOA2-J#IBNe!PsCSN!)aw*UUioq);R2X z-Y0jpxEmXveS(eOzQ(Q>O}3ozWGxPUBWK5T*!@mlVEgNw$$TE| zv94x6N&#$tTN^u{cm|udu7-{8!?5vJ2=?z*ww&?sPGVO53daF;{yq3Bb{yTIuU!dP zgso2(vFq;BuzAUUu<_$KY(DhiLmba@kHwke9X7suU(=r##VP%_X7s{de-Z3@VdwkK zH!s%&$IP1F@iw0yvHgoquVCjP*Rk{Us@VH;6x$CxgU#0-z_#CV*nVj}c0W^DZ2UhG z8=n=#aT#%U`r*Vs(knlLGy8KKo43iK9GbZOacsX9h#fZ`!ND&?ce=fj^NGi@WADSC z*!8&dgP8gJ1?)V!xaTwfIIX!a&tunrzr)s_+M53Q*j=~Bu1_Dq&R<4h<2B1;e)+HK z7v4YO`GeT?o@^=H5Aqz^{7nVyJY_HTe!qqzr!}nd4K{x2kImz*#YsOEntjvb!-r)X zh@I#6JuyI%@ z_B!ig_X{o6^yg3C^=|C`)yJ{*q3Czq57uMyWAn$&apJbCcXHvh(nl(e!{!VBU5+2E zn-Y40`$N6kg_9UJx?=m2O*o~+)ru!D_x;c$?7CK=lhilnUjsGcWhZR=ZRh#KKl8D9 zora#j^w5;wvHORrVf)obPjP?9Usb^Nr%kc_L22xIS`7AiJ&Mg2#bfiU4`Acz;~vkt z$JJ$+_$VF6e$poFEVkd>`91fSeA9ev{rnZXuVM<0to2HlM^1A-`S;PDPkWh*onN-V z;U%whX!8TdlYe~}Ck*_y>oM$pofBVT#)CFzxSxzii?Q{x6Lx>{d~Dnkjh+9`#n$^# z*!8on*!e`f#}ntZ#*Ww9u>ETYb|2-u+Lc=Ij!vEy+M%b$x2IJt=WOnf#QyZ+b@yPlJQ-A|Idko!yCuV`x4{oIDh z<5hbGJ6~Cao&Q(GuE&0kQwx>a`wDjdYrJOsd)0FCGoNDDo7!N<_jj;y%usBcQxF@E zkH)dzJ~QLm=Ufl*{6y^ddmfvgpN$<4Qn2yq9?R)JKEaLyh0bTy_d?kHd@o|xdwyKN z{b!v#9J`)$5F0Zrue2Py{t|}Ghb7>o7oXhs6gFQz9%uFk*!)l|HlEFGc}#Mb z0oZZkqSwdwiorhbGdS}2FZ;S<`}@P#{qmi#{nKVmKhy^Md}dHMvGaim*!xivJ74OFjsIW9 zw#(zxSNHpr!S2^xY<*^a)ef7lT7dnWex6VMejzrln1pRlmb+fN7W;i|kCFYG+*yh3 z&xYWLaq-FC&wK6F&*_Mhz8{@#x$XTe%zfUIfE{njVdrBXW8;CRvHf2k?0$o?*!*)F z&!;~rjeRaPu=BkV*zw?o{So`=pTa)>CD`kIMw8FZPk(ste%ju0+h1OICor6H|K6k6 zao|NikNzYSvoCR02#(wM?%w*?zxyn9JnQE1_x>L1dlT`!>vR6K5gX^0;&}7Mm$Bo? zW7zQ_r`Jbbr6e}r)L%Ca4|^E9pMDm$f6IZ*hpoZ^>_5wxpLM-;uz9JA*!4U=FLhS_ zs8iVe%a8H*BA(9(T&Br4{>6AE%hKHqqEgy$2_euT}>PqCc*;!(@#2Nq-N>Y@`fhxfoBKsSIJ>v)f$Mt2 z`$zj-iQO++2-_aA+dl8sAFsEj?Jq82T5-yat0rLcCy#nOan1p3f1M9IzevZ%Wq!V& zSD608c}QOD`#0q^>mUze_alzN=1*>Mz0Lzx{mA{`Ii18lpCj1zcmx}-Uc|1qrC`?! zi~NLpr9~a|eEQ9@*nV|Cj;zzQVib1&_BP9jmq&U$ZEmwZyK!R*FMiwY(8-Xb{=#LhxfU4Xc0Cp z{T4glpN)+Nw_>0F6m0%)gXPQvhGOT_%dpR-yT_B4o`T(vuZ?FSu>E)sZ2$Hmc73J~ zcAj1or|sRDP20Z~^nBV+=JDLOJf2VcjilW8^6xpEPu{&ccKkSvQ;Id6Q39KvNWsqM z0Pem)<>Vk#(USW?W80&A95R;4|!CR4|ve)c`805KeoLFWA9U5%)a>S5!i9( z0qj1(t=Kp$7k2;VS?v78^3=5--%iJ_pXSCs-^Y1=KG$s6_SXRW|9@*1=aYY%uZdsI zW8=eZ*mm|kcAfGd_I@10&L?kRK!U;3K3Ak)I zorJDzCcv5}UO5BLLneKFYhzPG+R ze_4#pTc7cM^M2_B?BCys&DW2{&Ii|E-=FvtJ3pI=?T?4x%=i&o?>k`E!+T@rH*Z+Z z_|+6UkE?~9e>K3qe^DJ9r_{%ei&e4f3yrb$zaq|zKe2IcIc$6T6uX}H`wYsBkFoLi zVeI|1eHwr5z_#aP&*wgTjor`tyXTWH+KlbrZ(;k34cPr+dAMKBHxjXVZTo-s4}F1M zpDK>sZ?VARSwDIf`@Onj+fl6NGoHPK&Bsi$oc^~wHovjb^QmvevHkM_Z2bmbRQcS0ik^niJbU4Z+Swk51$B>HlY7`-ctK zcw!B9f9nG5b$y4O?~T*c?{b$ppL}{0c32_jo1V}3 zFbkVct%r?Y6R>gIE0(iAashTd=tb-}bqw2X^I0CBk#g2@-v3IULj7XC_9M1`yMg_j zpRjqfli2m6pEcvwZp+zEbR9cSn2LQL_Yds)e;juG;$LiDa|GtOcYE-+tol0;JHKp> zZ9fgL`yN|h=2@L;V#n>4*!8B0_Z=Vd;n?XtJwYI zW3lhwl*ZP}&#?J{9N2ZtS(d*uZ|Y>8r~Adf$F`3Tu<@wh$G_7QyPxqYw*8gC?tjU^ z?l&u8IrEX6-VfrfoY;80xc7_v+B(}i`Q}%!su|7_^ zI_LHX9RBZb`|Duy4%+!|IJTc3<@vn-|3A;Cy^P21*Q2<}vJj^zWHpJY+fRh#Ro$>M5G%cp963KZ?zZJj(sB zou9@&|0>vhnMXXnT*BU|*nH$y*!MO5!OllM!PbXDJg?N(vQHcA`OF_mVZYBH&u4sH zkL`D(vFH7ST~F(Vt)I6&pK+!ac04IgeeymIz~;ffFxu=DZGn)l)sW1s&-?0)Mn zEhipck6k}oW;yX*8a7|B9Q!^^VcP@o>I!VWpbR!1UE=xdtE!Ei2hYal4clScNgQ@P zwjcKWhvA-2d^AdvFYk|SkDp`v<4ElH%A3aZvi~^;Hh&a`-G6aqBIf12UdSUZuld$vk z&X~BP;sTE+Pu14*Y47W?{YyRUcw6WO_akrW!GGhj>R);6`soGi_*x6QA8-%0zDHr> zsd#KWI1!uwdl%b3e}T=1weWc6H^cwn`}02emI<^6@>CnK?L?DaRt-k(RX{ZR~d+&(s*`@{PMgRs}T0{cGrIP89w zSnR&fsn~p1J)9XYVB_WT*m=iV>^g47xUBlT1v`FU!M2xu*!lEk?0Vr|Y<^}LcDy@` zgBSnWWgfO3o%VR{&rIw(;vd-lVvy(4&i~Tn&3j|>bvLo=61N5HdpZANO#IENwzRCAvKAJ<5 zU+I92kJHEU`Lw5t*!*)(Y`)_fcKs|GyT7x*U)*o@BfsJCj3WcFao;o8emNeSk1c|o zKP|w~uhPLF4Q;0f&aIE1}FvDi512zGy5J#0Vv zgU2&pcpm%vH0jv)wF0s0)nEM0-(&rv(`f1!dHO5Z`D!S(eYN|C>u0?&eH7Qj{Np4x zFTNVPAGz%<{vL6A47T4of!(L^0ybat_P<4mLF z<1A<1@=5k$nd==te1h3evl6?%eihg6$tx zVfWRv(ew|yvHf91&nMq{5W9Zx9QJ*>>)85zdpOru{lWO0Ik`UOormxGacn*#0h@p7 zid`>gfNifsvHQ1Q#9q&4Y&*?|tzWrwaXsv>O&v!4WFAmKv!0oPUC*k4okxC!?JwTM zjuR8H`*-@_$iFWH4#f8VpLjg;rJmS+bS3uv<968lvj-c`G{vqDUBRvwhib07a&GPi z>*Kkw{bEh*`v$i^;`uSZu8Zxz{=~)+Z)4;1BbGD2?}Ke0^Re@dLD;x>GIstu9J?QU zB=)_bNt*pAgR%3wE!h2c?LD6OE7@}HLkny?aRNL4Xo6i&{=stM_1Ca*)){Pn`U>`a z!XuWm&#tWI|GHuG4au>59`X09*nRa|vH6^5u>JX3Y`x8cecx%B<-}_zxnI`D>GLcx=AE0OiiBds@zZt18%jOnoVick=SO_H8JkaOqsgz2!p`^lW9K71u;b7u z?EK*!k7pg`Gi-fojGf0U!;YWNVCzq2{pI-;)zr5iDfjzk$L3dVVB6m>^nd0Fa?n1! zPbaYL=^-3cH{$j#9P`}c83nNM@&fGq?=ft=G7+ccs9fm@%UP$XkF7^#u=~bmYTADW zf6x6h`>^wq-?9Dub?kimsMo{q#XZ3F8UJs??!T*my)V-|p7~ZQ?7Y0E<@^p&UynCV z#>TxfJ_kDiCzAxDd+iw@6J@}q-Wo-Q05F0-~W;yQ*bi=-XV*7LcJq8<> zW!U~$@A?w^eSXKzFVAB4i=W2MUvt|2d7t+vw!bWnjh~O;d+p!f^ErUce|4kW{CXm` zKih|0KY9n7kB*@KF@C=DA?Fi6^|e1DzmtkxuUUz0hX*w4ayzlt^9{D%@5in`ZN0e#dHDOpjnUXRcK|j%?u;Fe zQnCGEJM8*Nt_S&i@}+HXVwnrspTN#*Be37A1~y+?6FWXfV(U|7>^ja_?EIhrcHJuW zA?_FR>a-zTFZ218*!yw`dw&n(@b7jVJdf>HZ+SfXMZUwvim>kD?? zauK^;bR64HeusS@J1~g*$$H5)P5f0EyB;tHTi?fH_o=kR=J97@^Se#3@x*FuKCmXX zzU{}(XR2V=1MXb9{H6**naCGc7M|KL9{pG;UNXMAH;hZeC`&{r$3mFU61`5d!N!U-DIWh*tn=z zVV)QJ71v|q_gdIEcA;im8;!+=;fB%l< z#7nI_p5IR@P=vovJ!*+vp9{mj?`gUFDF$NK)f!^Q^F`RdSJ!f$OP->9KlZJ@j(z`U z6sF$ptB#%D{e+EoD`N9m-HK(kha#TO{GnFytonU=Ag2D@!NyNtV%ueC3GO%RJ@c{i zua?;OYz{U)nW0(Vn}+RI*J1Myld$WAxl2;cdi49)-=8UhiF>Q`#pdUq$ByqkvGb(n z*gRNQ&!--wVb}i~VfQmvDaGfJ@2Y`q2aU1$lNT%}zw)+be{U6R{=7T(J<~93K76|6 z%vVFP^X(1T{L}N;elr;xr$3F&Z>3`M)TOceVXkAx*CLwzV?mGc`OKFCu-~^lcHVS# z0R0#9h>6&CJ{%h_uE(i^3MEHt=A%cj?X$JVlUK=K8nfS}0(SpK7!nTiOswo?sM2PTrYX}D9c%I+=acb1F`F2cd+yFPq6K> z*t1#nYdp5TjK%&w%oOZ#-(d5@GqL^EDeSm17bme#vRHY(A9=*(*!NUgVB@iE9?$xJ z!3rEd=it5vy#LJC@;t}qlW*#e%>$Qtp6|o>oq>H{Qe0dvks>=&S3XnKV|!5e5w?Z|zzhklEWjV{4&wS>1!hXog z*!ANrmh+xd7wo(@knzm+cM?0FAB7zs9|+_7vcG*DHjXThT~9iKyk7M%_%{`uYq#E}7w8Gwxx3S|$ zYmX1V#5@n#%$9?$@r55A0@rxwPJQ>9<#c%FM8Hh=d&?EdvXxqs$&YGdCg_zhdXBCz?% zAF%s-`eWyB&Yzuc$6k4~uVMSshiXvHdf5w}&$?X=?Ea2Qmeaqyjg2=dVAt12 zVdKpwETvHP&Hhx7c2AOE2}xSm)7yWVug_Cda2 zD|Xx-j_rqzVE1c8W9Qwcad4#v4z$4b$LF#8IqP8K*9+MCPyst{Jb|5G2jY95=lt>v z?KgAXjdH)=A#A^N#?K?qv;~{TeY7Uen|$O@&3qyPdmZ(#>p6q4`NSgF{U)DT&imu( z_BV|8Ut#m%KV##QL)bX-Fm}8sSnThsIzMp!w*osKNvFT}_dwQR#zGQXZ%UQe$R#2{^Sg{Kd6JvH-CryKI<57oQI#s?)R95eLh#Q>nG!|&*xuk zetsx6zwqE|e7})L?)1U#H!Onf?;O9JpTDG8e`vF$ocZt~jbJei<>e-ePx4er}OAHPi zg6(&U)WhWYjxm1wzGw{g_xK&(U7wkQZNH7N{m@$M{;}%VeBc#q`zwKcJ`dOD`!GMs zgKdBHu>J96;tBhQ4D9ze{xE*5+ko@Q_YBd@hZbw%*M=TXd%c5g|An#n)At&3y~K~- z5Koy$If{*M2WaB6e2qAs{qgm&{cR`B_s@@gzhpnQ{vGMZ-=iO|(3tN>oG}W!-qrxS ze$X4+e#c__!%%Eouphgh{PKHT5A)-OO?V!>-&PYlzI}_G5AW#9=dlmqmZrT-z{W*2 zBRHS`tB<|jA=vs?NYgJK#g4Dv^x^LjkMC|udnf;RzBl!O{KLb|_&)3(o{F9CG{C-J zQ4PC3vH?3^{vsx;|4hf$*XOZu=>u=@_lW!RV)ws1g6&Ul_R6|n1+ew-BsTwE&~ow# z+pzDGLx{W9Os8M~gG zf}LNq#Lh>(zs5x=?^0ih@0Rg=Y?oEB_w5;M`}w0A<>dP(@w}biO~LlV4`b)EHL&fb zAN9#R-j%Ldpd+@vJcjKr+Il?Wb9KfG*E1Vy@{PH%zc*LkWee7$-bJw#gcs}b~yNDLq`{NBN2?09+{d;dma$Bz@(e9my}`v3RX z&;JlRp9$jg98X4L$CpC*|Bh#$=g%DP`!3xn_xl&OocUY?HvWDV+yB(V?vt&A-EUVH zXU>nX{g8J3sf{L%Z{YE~kGTYUf8Ms7^{@8aAMeLN?Eb+7?-%n~uh05?9Q*shL$K@n zMY+HB1Mgwie`{;DQk3c@w|n)%2@%u>DaU>^{opsUNog%bmF&tPjRv^GDxf=L5&E z`+AOIuO~P0oAvpm$Me3Sk>IpaJ&xJokD&dAJ&$PyRX?J5Q^IjjxQ)%u7ClGsj!({*02? z{N{Y@eCc8o&x3ein&r%24`SEzzQWd@<(hi%J@)q%dSde`jcAX~hx1_9*Oy@P(X%^o z{j4W#vc1y(jKJpe_F>n@2jY9}-|;90d%ZtW?s)dD$FqL>2loC)Sx!Exp#2Z|n|9dv zt2MU%zHT}F<%ihzR0~@_7GvX`aP0Fr8=nlsk#}m(=#GtFKGf_lXlprnoKLX*S##|A(Rgh9(+C^q&A{## zecj{PZ?z1YU#^Lr4+eX`*l&^xo3E*f&HMb_f%C~9H^I)2uVLo}QP|(hIb%8f+Y)Si zw;Ve^$jf+YzuCj%dHzMQac)PC=RQ})=C5AC?w5bd@t5(cFm|1(ugCKq#e>*=z_Hl8 zYc_1XAA@c0|GdNJG44#p_Al44`OWFr`jU#hf3vZ9y2IE!`z-AGND}rs=VIrj8?ocT z=bHJ^YHU6J5?g;)VE3o5#_r!=itV5F;RM#D7h>1TUn1TxpYjp*_v_bT^8szJ|N9Mg zzSkVvucc!5ztzEhemeH~K8MY(EX z>G6z@rLpthO?S^PfL)KleD?q4j8?WZ9c7WRFlX4vmL61(5z zY3%sl6T4nh0DGU`_IU2wG47A+1tuf{-LJj?5o&|ov&5L z+^0@EG}rSoHeOth%|lv0{QaCSvFjauvGKud&3>Fy*x#EQhwU$YKE9v%f#=_SKGySo z*#4_J<@anH*v{`mJUWkZ$Nebm`v;$UKKn9yV8@rm*m=PF*!Lg4z|MC^VB@Go*!8(@ zJ)d#zL(ga4bi(ub{pvp0d6BmNYKHGUFYDcF*!jX9%B?@OvGLY1Y<;Yb?Z?ksPQU!3 z<@7(lWA}en!^XRJJf8RUUcin=4>^Cod%XGaLfGF&dztdg{5baavFc#wpUtr2aC7YQ zZ-LEAdOfZW^uf-nN@4R%pJU^j!r1n<3Hv^7<-5m^$Hu8|VE=nsL$LL{FZTD_KeU|w zdN8*B#d<#Z*kK;e?hw^Te%x%tHkh*>)TB1e!dgf-(Q%DT@O8l?PvV{&dbi= zd-ub9%6aVg`WfYZ&IRoIQPVx1dh`o6|L5~Ee!GOtJFdZw!~fkqel0fM%})FE{gGAJ z_W1zLT<`FF;*WgTeE&R;=l9l1VE0cS@_5FXSFq1LMHBzMfsOk!u=B!uEYgo7nrY37dCmh0R}GG`~e0R^M{o|Eo{_%ioi4fsOY!;>`d4 z5w<>^!meMo#^%lbz>X)G=Dy`2UuRtZ7UkyC9>Wn=E6!|(ecrXP`ITtw@B44T*5AoE zbN<;p>;4_{c=GOVV1MuJI(EGv5}QZ)4|_jmV)Gr97!T~f4thNC?Hi6C8TWnmvAKQLsI9`!YeY_?2oB8!_oSDzX=4(=MW`YuM*ef%wq)wYBBsQyOB|X``_F8Tw)KqwTQqhw-ibS1oM+@;f$eejdC3 zF^Krv`~ADuPkdSjn|IlVolo>KKBxa!fdlyOnI>Szn>E<}aW{6pv9AMAa2GLrAhJgz_XJ>-Yq z;r`PW8;om=0}LP&SIZ)7wo#>ddrFb24MI9jMl`{ zA7cM|4qfiE>*e9t`N8{?`?-a&`%3!ydGw>%u>FRgXFl;3-`Dms3fuqZq}={(26n&6 zYnpuRCeOcnJ;rmtV87>E9r=E&?}U=SvL9=UeLtW&Hg1c;KDXNDyU1(x#{QmWO^;{1 z?~kpYHL&}kKJxrY{|l^!oyV=d&#q6`?L;|ocXP^(Q%+*nJNskf!*`;1zRa8EVfQsX z(HXPP{0E%b?y>DAJNZ%j=VRFUTP5uJYPo3cKfj0g4t89rjm>ZM#F_gTEa!83W7l_j zVdwWBVB@C+*!DCFyZ>!D_V>rW#P&BUvA^4#h+W_0zZ{S`FI$VvPtM22KfAI2{isRU z{Q5EMf8Qn6a<0?-zWXx=LcT6 zCDs>gxBpRY{9Fh7`M|ByXzyy`M-ER^=b^|e*df3`!)faul^0Y9y=M|JKlJCF*bjDj&k4caXrR$&Hp@} z`SKp@dTFVyd_Matk74sWb#dmp9=3nFg3X(JargXZy7Bk#uICs}j>f(}G1GF!r*iLd zJb96+*!wXQ`}=wmvGL2l*mWYW*Y(rj?%Yp)_iQ>gA5#sxP85$_|7eP>KOXP=bSrj$ z_yFwnRqv5izuRN?8Jxw&s}-^H;ubymd%Vw>!*b&DzSwop+dN;N+gxn^_Xc);pNw7q zyo{aiUci3tFWCK;wR>@W#0O_I`zQawzF%SeGk^1749}N%+Vvy9e+le5`$24cTpruM zyPo9!{|1(`ZnN3rnWwbIu6Jy>TON(gqgns$_Xc3|C))iWW3lh^ti#stc#o&uWu8ww zyc;`D*h{(j+Edv1#(r#lxsIK0TF(D=DA1ed!TPl8TdsQtW9LDeu;ayZ*!~)?EM{sZ7=<>>z6a|z58$fyacEZ`TKb6dPofA#%n%r^WzgdpS+yy)B3d%o1YnvZGUI5^WhJ% z`PO^`v-+19>^vn8c6@mo`}<=Bv2k8w?D|SoY(Bj$_WL)$&fnT;)@`EhD|g+YAHKK# z@4DzG*!O|n;CR2^1blD(-1VU;o=HIL`H?ZCc|?s~b`|M(!D&)xNL z+h-$eKJ{hFo&SD={k?^n*mZ-FA8b$*KiWM-|-~&K3>Mg zFUPU-*<0B7@)&kLlHKn|e$sOH>*vG9rzf!U_a`;u=UHrj(E|J5=lB&Hf3(Gp&*!o0 zS+8U7{~^m6FWO=EAML|F-@%p>A0Dur_k)&T^IG3y*A4e#^JRV?^QZfP*!%N?<&0xJvGslj zcHhBD?DOf4ohNU@&hOiKKKtMgW8=zZn*QdZ<^1L zT?ei65#{V(ziK|geS+9{|15TXwGo>i+J}u_u42c(jULZ>$Roq3kK{kT)Vw!Q8@vDF zGiftzFZ z!=J;M>yg-a_Qc0rKjUvM&3{kr>?hnm;++HJTQcLP5nK=Xg?MaUulY!x2YIMS?0lx= zD4y@Po2I>l9e*cd_Ys$|ocLqa7>*~OxSxEG&!yHl{yyz?DE2+Vn&Y`%?n^^#e6?)? z=aZk#fqk!H?_{oz`By3ORko)NpHUxqpX)F4XXG*7#rN`MzVGxNcD`_#a_3)fPNkgR z`FRgJZ#{wC*Xnwt`IT3vW!0Cz$>&+WcVYYgqvrqEuU&6C$Fp9s#BzS$sqYNFFY~k7 z*nPO0vGuYHHh**zd);}k@%?2AfA8mX!3cLPO8XK>)n#1SQ zkG=O6<-`XUvHM!GW82>y?ESsjhI0DHVsm+ZtmFQIoiA3y_SdJe^s3fo^!#jaP4#J0zk9?yJSJ3l;TIrGbLn)=^t9?zG0 z*B`r&ThUP#C{t!Y0CVyeet z){np3gq=@5w}AUWUin?ic`sxLwjI>Q<~ep?_v4qr&hH9+p4I>7#g12VvGK#TR(wCk z?^~8L&p3gtZ%r0v)!!s+{%#O<-{op-zVMMnd>_WOkFjxI6YTXyW8>V@n(;RnyMF)J zVm^<3R{yljst+$>^=TEhzx@FFdm}$#e=n{+_I?-m0<#|~2-{w& zW8F-~sG@;TN&%4gbB#^Pyff#>T(jWAip|dpzHNFLoWK2e!U%#P-LN zJpPBZo#tWdP4%T5&wQpGb{w6Jz0dWq>uWE3N&R8ISsL5F&&7`4IX#~7^Ogvsc+=}Dq{PKQP}ru z{(U2>zV626F%MzeQ|T3aKicgF*f_K$cD_ZEBQRu)4pn!Ri9_#*uJk-{s6lU zc}=s9PzyV*%t)j@FuzP`%JboS*8PgV&wilJ*m?T|Z2PN-jeBaY;&`6JhKQ{CHU)dX zKES@8u^bz}bi&42H?jG+;@Ejdi`AItyS)jY&wjzt*zvL#Hhw*UeedxBYEzdafoZ&t(R2mE~Fj|13x*b}?o_JYUrdw}m? z^Q`$dVB(Rc*nE03?08$(a@I?xVe4aU?EB>Vv2kda#}n`Ti2eSLYuaDojaluv5O$pI zgq_D0#$Mn1*!M*PvGeja*!Fs>A;%M+jK%gZSFrEXZ^iC!KZ7&-mrc|k)6^U1H&-o^9a{jy`8&;EpF z*!7)l*!fBo?E9se*H8RZ9UC9bqulkzBKY3*8@~l&^99Q(Hy`@`PCk!(Pgm^wfd#N} zTPtk4ct*3|Gz9xTO<#{^KL0HKf0#P!IIW7mfiB(BDIFr+9ZE_gpnxFVf}luuBO$4T zbcm!h0)l{acc(~7x4;7LInVC%d)I&X9B1yc_s*U9_RKZ@5taMZAI@ky_3mk4{n>P| z`!PO%L->A18rXGPP$nOf1orvz!TPJQ?a$xOZ2R_>`=Y_lmzVrK>px`u+h1V)o%k?v zEJ`TjslSjm{QBpx^3cDpzjYc`?pBrYhtt63%W}c$gAmwtk{mYwewX*9evAuipKbH~ zF&>F*r_TRJSo_uccYoe6Si9E#>akw1{!SEFJ$DS&PPkfy-y^RSo4@D!J`S7D`T^E| zSqEERP>1I;kA4xhj-ngv_bBem%%_>K{_+jjc*SVPgL!vaCXUqsR&MTvUDp@+eb0kf z4SOEOUf6lK6xNm6YImuIkfuvW14dZfKvkDuERHa{CmyX&WHg#N_w-sFeXrm7eLaA!C;GP%?bul%jF#N zVExatu=DLZ+ws3H!tQsd2fN=O(>}%nd6);*zbFoCzr}?E@*Z{{{j*P)5BOs}_lLFb zg7xqA!0yl63tLZdMP}XZftymF`WiOh_Xljerpf`X=RB=>UXML816KZgZ##LCwXk}8 zAnfsN-V46M`Me!r?VTmC@})WKcx{J`V>hrre(+h_@o#Fw z#;ac0&hM77KmJs*LyR}^gW|C7 zf1km|501*zfA_MT`2JSd`1S}`|44RzkAhu~D{W`KR)(E7vilXX!PZAChtZp%v0&qw zOJU=Ir+GixyK`)3VAsRet4@Tq-&ereA3wmZtHp5Ma;f(Z^Lp%&IWqfAzJr~=Ghp*N z9lf6Y8{fhH{dO|nBWMC!kJu8{&rJ=xj%&#HEf<-O)(4e=jk`2*{t}-n2W!`T3R}-w z309w{aQ@;ySA*3X(P8)1)eh&UtS|kx`mplhjO&lOmwK@CaUtw{tYv@ZPg~a``mZ$X z{HOw(@6HEXfAEy`tzQyDMsDs_9)T=VS1s7{H0r|IhtXv6713es zfceNH-^ccHJRkmACD`*8Cc&;(<)iW2QLy>`c(C@zcd+|wUaOy2H?3jgM^|CxNiA6a zXBX_>FAeJ-uZ69r%?fKzslWVQQGD2Z-6&XpImG_Fm+xTb*?(pE``F8kVdu*YSbMH6 ztXw5{7hp_MO zAgo;=YdPAADY3|m28I96Cc9*OY7ml_oeNR zeX$O9z1_k7v`#`c{%{o5o?8wF%z|aMX>X%jn}iD|Mq&` z$2YKY<^#stdi23?Aie}!XXSVszibOT&!@rxdj!@V{7vTkqRTSly4CB+kHvO=QvZ1v zHh)+)!u->Is{v<=pdJ`206h(-)wh>vcYWTB$m9o7ArFkl`S;AveWLszzjqu~e++|t zFPC7~^(L8o(H$9o?V*f3i_ZGgf60zK^!tYSVe6wlhqWs{g|!nVDo>FgJz@Q!*~(A$ zuS|yBKOj3!>tW+CzrgCt(_YW~U2Qw@_4}~%?;NcC_zupyDDjd1VB;gP)eqzc)IXlD zkq0*alMFV`Tmp7I<%PB1YscRng2-a^4_&@lcJz?#g1+-i5-5E9xw9o7D3!2!@ zcwL3{Gpl$#_TVGfcyj^RdbSt#*N%YAhrNN#e@3-Ge${iYr#^6p&%^qB2b%|;3hO@y z{iPhR^$qdR&+bQ7zUo({fSu>gSM!?bVEwTPu=T0sVe4J9z{X2j$=C-ET)*TyM#Jj; zUGP@?y_K-%lTL$;zeT~m(9irFHXhp^R<9+8{k`R|{=$Wim=E}84`A~!(_z;~TI_%A zugjGU%|egZ(-|g^T3{0ti9;@@OMh``}*^+`fn?2zT^(9 z-#!aY&;HMp*r)2#Ua;|yFSTdcFV(>Q)VEC6eq}#)QCRtR3AV01C2Tw;*@%Y&^ds?7Hg= zD~~S1%8A^t=N}|IihSlgtmv?MaSUwUVpeg+3xD|!Sotvk_V3?^jhi)teV)9>VDf<_ zVBOcZ#ICp zmmfA>vJSRB?>MYHj0|h9+=TVh9~2JLUvFUj=L@iQPRf&E>v0dPJjx3jPu&P>S66|R z*UMn@R}JjX`dSFX5VdD#NVb}k5*gRw$Sp9a={?sePfVrckX<`dZbOHNpQRO4LO`z$UK2j~Xt-;{^V zKMaFC-=d=J*b}p0^Gn88$we0(RbvhLyul96#*C z&amt4yyL_Egf?DJK7Fs_$NFq$f6m+a6?UF}0lOYb+n@2w2Wxkwg!Ma9!s>y*!<-l*!jH}c7Me`u<_lYu=>IKb^mBZ*!NK$_WcyIo%-44u<^DWUQd3l z8Em~rCRqQf1*~108aCh7UM9|;7*;NBgtdPzSx&Z8|?zWho3hK*8a@_E5ByI zuET<``D6c{=MH=fYmd!__20gN^$*s<#utZqJ@K5)GWD&0z@9I+7j`{dhLwA#VC&GH zdOdaC=V9w9V_ggzzl*T%Cl#zc==kW*W`wOzy&$8nGQs+Hr(yFr>0#yl39rX*ILhyP z-XCuykgwS9-wVHA{nR}&cF9WGu?vsvgY_4dz{WGr!M*r*J-A8fBcI?ySu=Zof z<*@k|nuq7ZFO3O1e;>ly1u0)^EqMbx6i}IP1?fx`}<(W>pNI`X%FoD?`J!8 z2D@!X9{dbj&tkjrsPk~(94pxRf&XCT>RH>VA1Hk#?0GN2_8SkY&+o#nmnE?Kxt_x6 zyX~;@|GC%WKOTdfFE3%`)dSePd{mxaz5NQ-o`?<`2h8wyn0$#1t8bgZ#+l>TAH6&Z zw!R~t?bxH4ukwDd8&kvTkKVBB?St^^$HMBHY_Rd(xv=r}+_q!?v=yv8^!=J2scSp)^lw;sTLX6fUV;6)$zlD03$XEo{)~tFwRghW59wj+pH{-! zpDP@H=J`Ze{h3DQdn!#}*Gnkx+kIW1!}=>*VB_o+Vg13Lu<^Shwo{K^6IS2ng!QwF z!up4qVCx|h$<${jfPKFkneXmTxtp8kMgOgZja&T(&!&!Qx$6PB9181S&xO@b?_l#~ z(_rKHvUX;7*!T4c4&*6e4bz}9OXhqXUW!~VSuu=$*0u=^v&z|M~Y zu=)F#ucDX?*?@vw35Zm@pXUi6FcwOO$Gu?wus0_u>Rifu2?0P&5`+fuciD&G9 zwYLt_?)>=McIsrV!shQs^83!08?f<%$*}Rkr?%sdPKSfHtv&@`J??h23Bqr zgIy0pV8^=>?D%)}dg@o4zv}T8UcdeL{${Xo%Hpv3+-|V)DJ!h~(hv50%h<5$$zd2b12@?V`{*Yjij zAMU?BY+S!J92_6xa+zV{8m?#8%?bQ3_gf@~m0zX9{k0RkALETr@kc%XWCg5!w-dH* ze-doGVJ>Wa(g0XH?<-jQsUEC;N(<|UCV`DhjKts8U+I%8?0M?LKJO>6`HEt&^EMMa zoBbg<^w05Y62Qh|Ul0$_{@$K5Y`vX_jVFwUl|SojCw^T9w%%u;jJf zUf6Zj4%S{?1P9|M=I=(y*mE^#w|=fYY<#9DtQ`9Y_C05ZjejJ7m9v>)<9iRY(;xl# zg!qi{pQf;N1pgS%VI35Qw;KP0wHJ?PV}4M-HNgJN?@wUu<-V}zVaA57%kKeOzjpn@ zu>LLW&-+-})Ax2YX}+tX=jAtX((?cKz0Zjo%J{^`|@8pYKCehTTWf7q&hsll`%)hr#Y= zO>8^%&_vk%@gXwjjV*(fn^&{&e#oaifvt~k2?zXL-iP(>QDObl4zO~eWoE|1JS?pK zC=3VuDOmd}7OX$91@?I#Wa9Z4hl{ZG;%V48Q5xqz^63{DyR#Z>ez_m4em6d;zt;x# zy59CD{#hM%UK$@XzL3xBv7a`<)*n91$lqtZT!!5*w+`0-xdU6jIR!R8_FN{u+Z$G2 z#z20AP(PRpHa?vcRxd<_t*7`Mrrva) z@|^LQ06S0XA^&}!AHe2Qs-aJekM&H=_z@3Ys=mUH8}~lrMSkOB^&S3TUf8%nV^}?$ z23FrJg&nsCsklG%%V_K!-&-$O|Ly^7zP&c=d|rlqrG1_kc0MG>zSDos;PuqmcGDgt z?~?*np1j1qbU$1a+ljkn(_Tluy-msek#87Zzl5;&Um5m1o9M9iLsMA$!glSmez1Ay z1hDqvKp8z63)X+{4F~T}dwUCP9L?X;-Z>1LCynIqVP{@}&8xrQdGu3n!^+|NGUNIH zHh%jLY(Dyt{jp0g`ux0?e_-?L`(f*KR>O|-7W*5YgN@g%wVnN`-DTF>XxRCgAJ*Th z4SOF?7(eB4T39*012(?(Iz`y}nCkeFU%e#b?=_c++pL21f6Kwzr;}jgD8*pq^HA7% zo=GN;*ud+Rzp(!O)8vc~{&7Lr_jevv?^S?3KVcKBeNhp1z5f9R{Uhz-!LWII@6Ywx z1a{s;=lP7+-b@CwKK0*}uRCD-diIlMC4>pU?-+8hE zc3)>v+t~+u+5Xr|wPEGTD_A*L*LLC(32*Rx#48%Z*7s$Qk>|}|_0=b^@s9?u`I~mI z{>SIA@w?%${zOIC`14Fy|FNKqf4d0QZ^~tV-uHIcJc`e&zjzV$Jtu+#b^fsWI0md; zn&2k)!}k*2B@L5@Szz|7S>PA1Y7SXtG_>j%_sZ!JXd50?D<{(p6h$LjK6XkHXfVs4);f# z|Cs%;mvh3}`zK)SsZy}{Qv3V&tH9br=V9~ZO=0aT$H#cVKv@6A@iu-m64noj!uv4( z{1a^aEIMpm)^ymsgzwAooExFP@^_hxynM;;S+BOv{`hTxY=?dR*0BE3RM_*~n#=er zV`1fPOW64DKo~hxq#bO%T5nkUu`BG~{l@EA*F9nF!N#!pi$1V=pc?EsHof3LU5<>L zaD1I7?~?rgcv*iF3M>B|FXi`3*!}ZOVExH0u=e|xu=4GX2yDE69sIxNVL#e(ucyAb zmDf|xv&{a;-+r+6=2F{vzM-&w>0(%WV+3qGX#uRh9wn16mA9n>-&pn1ccVe&d1UYlfcH+mAVdvoynfmKC_GkSag^ja+W;^=$r0vwx*MOCC zXKW|FS_!sZ`z);A{xR%+HQTN8DB|_uzpsCh8&-~Pr(OM=&i=@Uwf3hzDhKR)TMnzI zlfve$7si}3ecT;&wLccp~{_-%apVdxgzsGN| z`m+wKz8nTyFIx;&4txunKgt7p{zOCAdZ{e1^%X_Eo;Yt(*!{6-WZqwF*tl;L7&$ck zT|%zs+<`~x5A4(Duh1{*JK2`g_# z!q)jzfIUy7qt_E>&ksA_8`*BX3+#DQHDKeBNnrDRpUAA&cj!~=v5UdVg=27FzQeA^ zHMXNqV!_rYPJ`Xw{3ro`kMC8Cf?faDVD))l*!;p7SpD*~{n>xLA2xs90QNpN%GCQ+ zgY|zF!P>VKVB=Aq#ZD94yW9$>{i^j12>{ZzP z5cOo_*+y8sUs=ZAm?~rMeFQuHLtyjMg<$1u57>I++_3qN=CE=zHSGJT4r|xNg{||; z2b(X91Y3`l3fA9!5ufYX-yQ{azt6w0e$QR(E8{gMVe_IFVeP{mwj)t_#vweOC==G*(h%8L{BC%)eU*3P>CYu|K)jTc{oy}wql_0!t>evhGv{V{MK!^R)# z!P?`|7$4-0R*a0hF%D~2}HpA+VNU-tbHL&aHW-NZ6`1W|)vEP1#ozLxH^NL@=+V5px z-$!xSdXy79uk}1vV)Fc)A2$P5KmHCozdFMD^P_F2zPvW9es2bA&qjhB-_RKRJ?x9i zjIVy;Y1n$*{jl_;&_k^vVtPZ<>CX@Xc_X6Slqr>jAeari_ zK5|*~u=#ZhcD?t3jjvsW&G)v3tuHtOt2eTS_dfs|Z@e3g`$aGBf|ajVVEuzFuz7|P zwqqCchOLiW0sn7)F@JxyKlOx*ydHmLJgoh37gmq-t-+$6wk58(-fB2jX$}`1{nE4TH_&x4O^xQs46_Y`k|b?0P9-f9o3_z~t?6z}m4T zVf9*enR)artX|3tYZsn|&BweC8~={^koT>ev7P$9*|2ex46yag+hOH@Rybg9KH`2k zm&m`T92^Y???-*J5LUmGqTPD^L$LZ-R(`*P&7YTqmDd>`GakH$qB8M{7O-*MT(EJ| zp0M%2oUqTg7}h_@4Qr=7lf$3iICFw0%rEAt*J}?~hRt6UgtfOj!s@Rgu;V`;HvZ@R zC|~!(?zbroYmdB!&3AtU8=uVjl>YehvUW{nIsE#7KL-Er_w@&1<0X0RkALqF+Vb7yj1skti18c7xf%TUs!>-piu=O+DVDsITpNGA# zy0Gix3alPVWPkjRG%xsl;zoBvd0ynsm$31%*|71Q(Xjq@SJ?d523UDeAJ)!E_#fkm zy;K6$-rWmpzs7_0C(FI$`O(+^zUFzc7ly#b6VJfbOYDVRKf7S-dAhvf{-|gD1vX!D zN2cDaKWu!m%xjqO>k2zRLuKrzL9pwzQz-Y3+?W6xra#*bc)zu7$k zD_?rT%HRI9YgY}CiC+(dT`!|x>))tQ4&2u`Si5HjY@B(t?Z}Ck?|5G9cKd68W`{k` zz~9sF%m+IU2f^ki3&GaY4uFkY6^5-3>H#YsKJ}#bTsL6jqGh}uyJRhFeNzZ*{;?12d)UJJSN@Pl z2>AJPVD;XYGV@?GY&@V2Y`m{K?0TpMyT7B6?dbI~u=Z7ISbIDNY(DY)D}JAPznrlC z)gjn(C<=N#_5Ukj?Qhxq+)NpNH8*TLdMNCAObx5Y8hJhPAu+6dUB~{|X>noYb4}Q| zeh94HP(`M`?=JJtcvpGY@j4GXULS|IAAzk4k(DpAVehxBOuhUR*!XuD*m~yCuyL2t zu=8&aZ2q_y?6~)W^@j_>-e))1@yr7oSL|+o)@3$05I=*pFEhffyTP#fH#KbjWE5;X zH6`r+{~uw;Bf0IwGiJi($CJUT$EK56r}1I; ztJQ^_f01G1*FV7eQ%_!oJ^xJDIK@5K_|isLzk4Wbd~LPYGw-|F&iyX1o%lk1*mJy= zz{X<>z}7!Zg|86zP6PY=Jz?itWY~Ct^V9it??1+mdY;a(^ZGDsy-N?+-}@C-{&t70 zi>nR?<%jPfGi<%l4BEAu_q^c!us?Y=Y<_kF?E4KoANI`z*m&}A+R41zr$ zIxTEIZ5ZtHhS;Ax|6JI5hfB}@|M|_QoP@Q<7un8x*a2(rkA}5BHp9lF2E*1ft%TK| z6Jg_l3t;0B0r`YIG#xe`bKd^c^_G!|*PMXWhs9*#!}DR|Q}tovO5U$}y(_G}+68w1 zZ+}>QQWADNd%()0l(2E`&am<=hSy^Ue;wZbhWTY2r2%Zb;R0;@`%~Dw*9hk$>-%Ha z_t#WLUgwp`_oRTee-p#Xk2mNe$2U4`{N@O3T>Q>6?iYEu5Y~SA6ZZM1!OqwDuyS}D z?0(e|u=f3M*!umNu=N0gVeOe0Pr07+gD1f5hug9g zP+!=28(`i`XV`TnYtOZ_o$>e$_I#V>uj<>`E?$4ew>AkpPYotW+#@nHR_nQ$Qg18ZMRhK;`_hxIQez{V4E zcs<`=8VZ|dYY3YU`VLl}HG$2)cY=-IIUhaGs2!|)w!ePjXL9&_HGf?RHonr<>#1)m z0xK7Xz}iWPVdc_1e-D2+y6wafSHq6~?Z>neZ#xQG*K-#3{oH`Hzc#`_d2IcC1=#(| zRatM^@kbwVKdh_IVB?MR;h_Gu{;HYn+`smd`NZ6?=Q5Osw^HYk0oJc51c&fnDmeK2 z?iYLTFl@ZMUcX=)tbf!FHa;~T_Bk8b&VJT`u<^P{<`<0pVJREFJXW4?qA!^`p*C>7e0l9_O*6rcG&!OLfV~g$zbI~d|3Z=|2=*W zJst}-9zRcJKc({B`ux$b_V9C9e|iY4{I~}@{(WKPwDy$eeRP8Lt9HW9lcuo#zvE{- zG^^L+kDP|hN5zG$8`56Wu8Rq~zO!*Z?l(;ZyFQ$6`rXN4*Lx}0`5nvt*oS#+r*8Q6 zU6}k_R@l14@8RJ3($44xD{s=!?(=nl&EIN2xu3QTY(2_ZIPiUC*m{9GGInhxSUbY^ z?f%g+u=4O4?0P8xn?F8iJMSX{tbW)7vu~?VLRi1&3~c^28mzp14eQ5;-r?_aT}JND zalQ{bUyHzj?}5Oshi_r?4S&P>tu%g;n?rx_&>U*qUJNj*t*He#D z8a9r(8`gg-DU&aI0lQDD80eu+ z+ayyry9!pHZifTs^TUC70jyp56>R;~CD`>`6E?1M1@?O;*iW24 zJLm1!hPA);!{*a!!s^p4aPar_f7jWc`owg!n_pgIfArZq#{;`$txTNzF|0kh8usrz za6HJXtdzsY&-}qW79JGI}=dJ;3M+N2&`ll-F{-Rm*_dJra5w5p>w*(xtmyMsKg{@P_d?^G66zb9e5e9xI+{n1p8AM@q;&9L!%A6DPrfVKBB!Oqhi zu=VD~3p|f!t&E;34I3vJ3oGx+$c+CmSbOSYug6~K3@hJD!Rn{4VC7X&`*WUI3)uSK z+_3WgOW5!K`1h3~bz%2wCxf+@Yk7TmJVg0i9S+X_BLlx{T@lv)IqG=w{oRjc?DboY z5AP=*tln_GI8XAzK2Jt|-~AMYV84Ifz`sxYq%ds$qYeDO{_LY^0_)Fbqh0-699C|m zgz>|_`T#Z$lnA!&$@q==u*k4-BAV^kPw#HfA3H9<)VGGh!ROOHeFiI!olL&6A#=28*ivCqklg2dg7f;;ZHd) z`_Xm!Bj;Pg+Jj?Y?UQd{_V=)-^1|lRUti<>v45p3Y<a&z~+0#!TKAMVf~wFu=;pB z?7aI6wjS<#*!<61*m{xnu=U&tBGDgvp+4+-EDc+)mIT&*s|n}ro-5MBfA~G>Q5wU3 z|8gsA9Iy*KfVzU|u=1iWtUMeJtA8fK##1WG?AO}}TTlDoD$heact5N?|0itR`6TT8 z`3ZKuK8MZkl!C3-i4vLdBo7!H*3XOtyZ`pu-;6Ky{V(b7{hfxDx7TFu>rXh4uaSx0 zt*}4yX$7p^^%Jb#9|s%X8VV~<`@`1141_(;^J}lC{;wabU6ajr;%UQe4}X8o$FZ>E z`_}b`e=-?1UU$azhrFNQ_0*5Af{lL8Vgq=U_VePZQu=P24VDtHXy&ip(7FJI8 zgUuJkfQ`?NfgSfJSCF6N-)wh({ViBKr8jJR=savaOlR15$9`CQxgP9%Uul2d@29YK z<2*QMUzpFB37datNV|UWMA-P{M%d?U1eahOwEZF#THQ0F4 zSFrwc4cqy?h4-VKCR?AT{h_?80;?yp%FLTjVAp3HxM_m=Q}e;TpWEmQ|9)IJ7@zn1 zWEU^P;qgV^`wBRy|IPo-fvuPB$Mxo4hRfKK9sE7w=e=R;^^JF$KWPP9Z&Vw0KVL(c zIGg^2-_yT}}5%8$sjd;ZtUOZ4YFg9q5Rj@Lujb@4CkdbtZ5 zPq_jsKQF=N1N0Y+WABH9{*(FKov?PnpR`+7veo|K@n7xHb+CTs1ls+3i(%_G$J?KH z^t=fDegB3`f(rZ z`%eeEpU?P52>-pi$o(ScmcsfS2Vw8`H?LG~Qe24MZq_Fa*6YcsZabWdtSKl|! z8O8q8WA=fS!%r`8KYR~$2y8z6EbMtu>tW~DR@nNQZLoT4Z3H%cwHP)|_y_H-`(NN- z{9Jo#4(vLfM!WXzEZF$dBv}1D1GfHIdsO>+GOV2@n|~Px2jlDdn}cA_o0&|z^Z8p? z{W24FzhW!c=b8&ESDL}juNAO)p2o0txc0d5u!gYyzW$H;sXlC+_%5uSR~y#;KW=~O zk1E+e+<()bE@yxIaP4*XpUdup8|&|}&dR{*5B)#m?;b8p7?|w+f1 z`Z?d&&U+pS2lI={?|yLb{h7b`4z|8voxhL%Y6lx1cn#}+eFg{fgPw;|4G#Ki)>8$T zc%b&ab@sCRKx@IqrE0_4ggl z`HlnhH=g_M9QQ+g(M5j`JK_aw{^u6#`+f%N|G$Eb2RwnzXJ^D-aeh99mG8y0Z}@w% ze)zAj=aF2H$t&)G1K(SL&1c3&KbtpPW;^lZ=CJXXk+6P9FWC5VPguD)9oC-t4AxJW z1zQhRBLb^0io^cS&-TY3$N`%-^Lp#KGr`7bm%*OrmKIiD{|T#qGQvT7N4+E~H}7~o z_DV|oBNt;~-zewe!{(o|!s_YJv)n)5&ngFN7d?fQZ&hLIi5|h$dwdRiKNsy!{HZ4F z?;eKL2c==`iZ@$f>xoO)j=b6wetj|6ddfMl@2fm)d}{)%UE2ayFV?U>{#`fwv!9|A zto-aFqfZJ&Wc=P#uy$~F+VzJ*&xF0-p0M-g32ggB*zdAjfb~~p_3at^lV=|b8-F_l zYp;!mwWs#M?#KU8X1~F1*!;!MuzEt)?_CdT@7v#czCEz@vU}{0{q+d;y{&~kf9VZu z-2PV?J1t=p?w|b69N7F^HduW(0d|~=z}Bbqfdh6dY&@$S?7XQCyI!lp?iX!kJ90HI ztR3GUR{ti0U3Vj3>s#W(`ri{^<3Ddt^ZV$1*?iek*m|@vuz&v{tbO0l{#Ez1xdR*b zX$O10<84^|^BHU$_zJ8%FAcl?cf-Dyw6?R~aTRQSF%PU=Jr#BzM?u*A#p7Y)eT8B3 zk0b1le_seTPu2@I{^9s2cRIkHKV`fAWIH&JcZJ>G-vl;qoDQ}=qamz*N(rlH>cIZ} z6tMPaHCVgRcIy+%+0M9n|L%_}2K#&&VeNxLu=2GwYi^iV^_J^ld;W*_r+zF98yCBM zir?csCWixg3D|r{Tv)xf7FM3clKEcna@hGF16H3bl*ymlZhUz*9Q-}+t6v26`!Zc& z_md~4zvokRf{ho4z}ht}>`y-97UQA3{}fj49tj_xaM##i_ zPQm)OV`1|_&KL7oi)|+!@YdgB|I=z%x&NN;W7>|8ov{98TE`cCxE&782ldAH5!iTR z4OqLk4*hKpJpp5XcYw9GH^82M&^G+{mfB9eOMO`Tdyb6#`w?uvxv>6JPT2Rk47MIO z1FT*22dvyp2y3rwv7PT_@8@}pUtWRDSFM1x(=Wp6^SQ9+1?+*fV|v2YH?M@N%e z*mznRSiiI`Y`s8M*!+BLSUq3f{^U7p!1|{F`HS3@eV$>oJKxINPW*&VEw^? zu;Z7(cJjl@Bja!}Ve98Q!>)&S$61f$Bbvg-4`uy1pVxI2oBodXJM@|Maa`EA+gkM> zai%2xKI1${eTTk^1FtxIa+3Ch@9ho0XMU!n?f4tlVc&m2*m&z<+o@j+*efsl?b`wy zm&{1J`M0&O_Fx*=Jkugr{g4TEy^OS-JaTH7c~GbytUgEtm0|bS+=PSITc2?SHjdwvcIzF_!`4x?fwjZV!NwQ0 zPtA|pu3a$_*3Uj1fjwVgFRYzBjCSioHp9Vqo$q-dY`!J%Jbs4=4%)-!5gSEdA?4wjv}AJg0+8g@A?q}!(J5EDk*HcYcJ*PkBd@lxDUzFYJ+28%*Fz*xnZT?&Ne+Kq_ zo6pv7*bKWqPQuFPF|c(_hhXiQ{;+Wm;~T!`-mvHUt$;m0pr`HJr}ZD62ihGrj%NL! z^&cH!<;NtiXT7$Cm8;eh>JMjw%}4zJ6NhY@!T#a(e#XO-!NGj4=OVuI{c*30&S^%LrO zJ?pqDtbI`p4&>2c$FTrxK30F-`l__B-|y=JYd^j`!2O5&@A@eZVB;sh+8=-PFIauO z1-AZTIc!|^I2_VxV9PnM_RVcrJM1S|yTE*j=f8A^eZNcL5dJF*yS{e9-gj}^Ij<%b z^2z-v*I?PJx&1KvZEDNdheu%Zf1PE{PuT%0k4M1nquvPXUrw=|{LU)a@2RYV z&F{~EosU;x^?7etJ4yddd$~1iy;Cgoq4A@7u=YtRSUHpqcAw7swj(c7z{by#$=t`S zef)jwZ2cGY$a7eGuq15!;{~i9*4_xV8)t3e^?WbrKJ7lg_NDRrI}uoW$lq6vHL{)f z-BsB5UIEzs!Si6pHv{axzLB<*Cy}jp`3BZsDgk?*O##^Yl!>tR$dkQc>v;ogziY5^ z{W5I*=_%OppM*W;I8C;l@je6leuu;Q4=tm?-N2v+%NSmwmZM;!^*q#u=A)b zth~+)``&(pmG6aN{fp_abq%Fq?Ya4|`lXERy!XYh`QEay{>$nJY(3v5*!N<8^H1Bn z9=YW2IX`yTj(=PNR=@3nwZDtNf%Og>KgsX)>?c?b2gk#Dv!$^1O-kCG$KH?jZCu#+ zKlK8T_*=%*W*Z7`S=nx-!{np_)95chs}=xu=Qw*VfUMKg^eTLgpF@>hpmTr zYCG|p9!Uhse19;kzHAM9etLh{dYErv?W5i@`HrEs`+Y0edgG}w z-uqD4`u4oA@BK@e{b4y_^VM}=<$W&LctG_C z{hfcGz^>mywEO+;aCre~j~3`xDn}0y~bgVE_JiuzBNOWzJ_E25TQIvp@NQ8L;)u zD`E5dt6}A*^IN&S*Xz++Yhdl5tFY^L6|8^xO2$524jYe&{~rC(+lyhJGXw1JI^O1c z3&P6HDX{U6I)Ei+<#>3$)Dqb9)5^g5Z)afb zj;yftA}?U$b8+oYJ}^Oi{vO|lxW7AW{bh%Zhi`-3XIV?e-kJpK|1_{a<2l@R;yJBg z?dH~APkuwTe!2u~e5R93J#JiBzy533_}lAUj0ewM4>tdBK_*XJ238-fhmBJgfc4LQ zfvrFL!0XA6kA%%nB!Zp4U0~nC-#nk^{ndqC@7rOYuPW^Nnd*3OzJD25`+gj3{=Fn@ zzIiyT+$sVaf9(#te>Ds2diczC?D9BX&;Ice@`y4aFLu(7y_ynsKHh@$gQLRk-#I5U zZ?-c(y#MvE``t&v+EpW9bNerzpSZ_PSU+q7tp8FTw%(+q?Tk;T`hj@!<85K{cP(rleUXe`IT6<1 z?hC7L#=!co-@?`(j)h%s&BA#JY z)pOXqNeWo|^Db<@^U$CCKK8&7*!tY+uzBGFu>Q|Su<^gmu;+E8gUvfug8jP-Hq#$_ zJ-O}t{k}4F5V2tG&JM8iX%qU~ylEuZ@$3VKu>WW7Chm{>sttRewP5$tl!o=^vcSf} zbHK{chkwAV-{i3I!FsUrI1L=ae+gvrQAuI_$@3dopV%>}V4rs#Z2fB{*!vj`tKYN1 z#@k!L#=ok;c^yR?DlLH;#|t>;<x$Lg^5QhC=Ge&xwE z{GRy=*m{tea=1R$ejNe_@;Ua$zpW#4KBfJY7bW3K=aqdU6F;jBTgO#8y#K3J z{5{TV$_8tnUzEuoWrnTy-vT=i6T!x>$H3+l;>zfmcChxj@UZ*PgfTZ?7dHvsnhlD4#au6=aa{ZSKO^N+{z&&&tU zf%Rt>!^)KW;VdGnWz{=~yu=PiqY$v{S8-G#z*>>mu zbl7t!$;%-?vw)nMa(5Ao-nS0BQ`e6jj55*&;lXdgUY zL4V=_M~FWtXJ){DKeQmMy;=gco^1s2ih!NEoc_p{im>zRH(0wVDy$te7`DD}KJgIa zw54G6V+`2%-<;gDCdfc?I>tGse{7N=Ia%nN+ ziM>+=cKu&~wF}+vXFQ^cjJ*>BHeOnJ5sd$~lk*9@zi6=Wp$V|-=fFb#KI8HUtp7C{ z*1t>vYah3OeI5?B4cOte?a%YR#%RZ?hmWWc@x^F?ct`>hyK9%6vn5L!R|-P4O{BCOrn8xEZ3 z=y@LG1De{7{Z<$@-ccQPUFC$`fBy+=K4TZ>iqE{zF}~3f);`z*D~C(M?n@d68{duxtLNLp%Hs!fxgXDcvp>&w9#-yr z3>#nF2piuGf%PB1hLt1FJimqd?q>GKPCE;0Z&ZN;^#ic`A2Q3Vm%gz2>e(FT2Y%7f=6cWj<9r?SKMi2dZ$1qxSIfcrQ*mJB z=;~QqPd(cs&g;>CoeO*YQCPp_C)oJYa@hAb0#^RyfXxp#ht0Pg<@_K0rE2zPy%vS7 z11kV)pWNmAAmzJk{A)9;T+0J%ul@q-2jzy9C-v>mc^$dD9{({ntUZtqR(_|mop@1U zSb32EHh)+W_WfPpd?V#s6WIEtz2WB{eFdAZog`yV_l5OGZ+du+nr`lIogAC9l* zbsBd~2W!7Yww-uwRam=y66aMJFa8mB{5-Erd9w^QKA2P{zqSTe{@>v|Ed8HNuy)r5 z&&wizcijHe$qa_wk8=yQezyj!9eW?vKJ`3j^SHJv-ws}f*)MnoHvc~bR-f*G^~b(~ z&9^SHoqTaISot{+R=z&vygQ%wb6EfDAgn&E4(rzpwVm@?%XvNaQ)k$EsWPzos06Hk zQc>od2hTUu|7+s)*pGQ&?ZfG?@xGCs-_3ZPgOyX?$jI?)wo@Ym4~-pPZ$G0w0D_B{65u;&y^hkgIkVEy$UVe2P}BkgzWh&cVXj~ zJK>zj`(88RCrZd;jErw1p8sgwllGE+;uzTVz1DW**)Op1r%kYRu-9Op z=K!p|-h%Nm&UX>kzgZ_!uXzqO&X){%q5ixKTkkml_IFOp`1=!K?|(CFz4o85e((a= z`Q>@P)=N%@-H$R0dFB4xZeGv({txzh$X#LWyK>57&Qt6QYp*tdJwK%fY`o+<*m#!L zyZ`Mc*m&(3sC)yKtQ?f=`b{zp!kxWWzl zGe1(o+G~HouETh+dU&zzrc&vl_z;%?FHw9@z!Xt&pk;d zp1+XsFrP9Oc0O*GnQxs!;b#~*!8&q{+REH?S-8uo+s%1ya8+f z-G$9(zJ-mC#6y4Tuc!Crti6D|%>rvbMa3R5 zKa~dd{pEqpn@O{Dm0UJI2GJ+KxW118ZNVfR#^!wU3a` z8DPw@y>sC!Gq%~Du>mQrRvZco^FdQ;f_ja6ayY5xRU-;>n-#Gf|7 z+RyX3AM0bE%E<94GWFb1@du3;4}pEYEHe7D9_(}0k(qy4Ve2sl=|6g&G2^MeUJ3hs z^JTE{?&Glg&q~5gIY%ri{;2tvRoZv#k1M4A%5$`U^>03fU01Kd=SM5p{mvU<{nJjc z@rTi{@}j%_kuPtVZ~CVbVB?YtoR8F@%z=#~6_N3Se}|1r4^`f9AGcxarmMi(U!kz^ zf(FPZ^9`l(xBdO4$Uozatz`Vq2C(zKFRVYc68Y)6-vFBj?h5Pw?t#sNXNUFwj>4|D z2FP>$-(#@yHWRGfy&rbI+(F-4Keq+;eNBMP*X#dVpU@7zawSfI!LaMg@4q;|y+8fc zJJ=W2V;1y!>NSUIe^3t>IsEz(u>+UAeeivp;QP^Eo(uc;n)`dirz^tR+rDq(uW4cHuZ&k1_n&r| z-^1T41{()^d5QT#eMUB~$DSA>vp(L3tylUO)}DzE4`82316V!(fca;>W)G}A_y*Sh zuEg(a-^KR#vD4Q0_n0q9Vb|wc?#FmUN!yth7hvMT`^v-C8${uGUANU?^S=dQ*F{6v zxMM3=xz-L=ua1Y6$30-|u;w>l{q`xa6?Ik4ll1a^Pc za@hG<%lVBTG6U8={Z%HeFb-A^y?~8dcZaq6O0qtT|2KxU8^3|I)62p7%a>rU%L{AY zWMI9iH~$6|nNW1FYN{3Tu~4fUUP{?DgUD zRrwnac0XEN<9bcpt;&_fEmqx73Bz&ud}x|07}j&yle9!X?|0F9l%hm+ryt zCwqpz3D~dpXT2PPjaMIn^}koa+5u}|^VL)AAFdy?kK4lTQ!MWF)PppE$F*oQJq_&n zrgdfFQ2+3J%F!CI_MGPnYaeOPdY`|+`ps2g{gaxG2XUO{u>Ns2ug9M4WIOqVlrr&* z{;+wZoUrF1Xb)RI^@jJMe$&2iz8r_uPxE2xqCEdt`MkmFu}`MJp5MF!)_?W;C&p9$ zft{BnVeN}su-_|>1AD%}L)iVp&KLJl$;z7v%s1l%Cu}D_Fvj^uy#ETUy*5k6AAf3p z?1IVmC+`%~_s4wd3sVMFFbnMcm4rS2#CWRfKOyWsr9!axvz+;Fe6cYch;PB#sZC(# z{S(&{alFrA>&@=M>Z6LV^%QC8AHshnVDG28Ong5pY~5v7*!7nH4%(;2!~Wy`o!^&e zHxK3b=#RS|m2=m8Ug~3Fc>mN<`20crto?lnc0WNQ+mYj^>`%RdY+h{-Z2l(;ti2%{ z*E1gD{`LK^@q{z5cKTtj$3Gh3{UGnp!^*wD`R>SP$J_br`Q5?sR<3VlJbd4Q@x`y` z4yy-C@w~pTCa`g&a*hZ2p{vL<<vQB7ls4zI^?5yo_w(R#dEOoA{VUwSfl(5 zA7AY;{WaxydfL^8m0`cr5Cc}9Mz=rbr9bj{@yAXfuY=>K{Fo2>|F_Vt{oY&o&i>Ga zK0oyXbztk0et>-srC{x#Z(-$10a$(HdJBH~(Gg#+KSmyxgWVfB>l>ZMTPEBa4<-@)~5yv6Z2 z4x8^i3cH?um)TFU4tD?B9N6TT}8`X7y8?c*5U59_54 ztbG#)wtl*j{h4R)!P=)eVCA2z-TYnz_IyvrJ9xeM#6z(2JFdTvynX`0}T)@9hk%ef9zDc%OsSUy)?& zofEMBR1}%nU2oJUR)GWGKZTWRP2hk&gq2TIVfDpk>_yLu-31%B?gd-#?fVME z+hOxV@fc72=Tq3D+GT}d&xP-fvqe0NJg&?fSm`!v4>rUhhY8jHn4i)Dr|lG z`>^r%$FO;Vk748QhvC5Yu)H35_=}7@NWt%$pX&(g-+cpHXHh{W-sbl*1Nk{vJsO|; zXWr*^errFKhTYHeug{CztppnnIRzVcDg1gc{p#1XfCKR%*nYKS;-T|l_bZh3dhCntJ|Fp@9A3}*Ee~5y9|N{-)bVgX z_dVv1btKhc;}lzA<#Z3&da7?>&xctoBiD=vE1!45=I4#ynjhT*n^$#1Kl4I96> zO+4Cop#F>c>?&-0;XcefIefx)>L3z2zW7(GVdIS1VSj%rti1Ug*1u>DtIt}%>Z{LS z<>9xmbq2=Qou4CM?dZa`6aSiHJNvG)!sfFM!Pb!{fYrB&ct6J1u9M$zUFx422RH`n zzs!b>zx)X6Hy(lYPx`_78~?(Na|c*C^b$6%*9vxgVzNGr%QS`cAJfC;5x%fL_DWt@ zyZpAfp{0~)@MY9XOo}u`(-{yRM>os-<#1sP5?WX3J3EC<~Q%a z&ae8ko8Nr`JDx3JpEC;MAHsi~y`Fq)bXfV*AGV$|hK&AN3aiiGa((drmH*FR>t~JU zt5^Je=R@FoI@H6zgS9&j({BA#Y<`cr^df)2?(a+vn@^U#{}26p*pusE^ZkWj?Wp;% z_F;Zle|P|_KbIC(j`fDEhs@yh)Kheal^0oHTXn)SP)}BxgIzGYg$&rVfo&wfxzQO!5-e&;uh%5U?L=3BbL!QXSe_OU;Cx%;&HeBZ;$ zvmO2(>w6&V{$amgr=IF&f8=i;*nj)U*l87E{ei`>Nv-D1zQ02JedBYj zy`FWw47T2*5bS=6*t`$ruAD z40~NA*z-DS!QNLj*!@(_7yawZuyH2mm-bt0Sb3Py_VDkS85c&xdm;tpBkW)_?W;lJ0k0 z4r{;WftBC$VfAKR*!b;u*!)ai*l`&E8-H5{n?LOY8^>NEvwxvgc>8!bP+tNoZ+gO> zCt4b|?$3O`^&L53^>IyD`{;c*u%BEez7|JDKK#x7^LG!wo5A}b-g(gHr;gwiyp?+D zHL&~tZo5m-N9HLQJ`4OYJ{_j>a4sbTG^ zWw3P^e*eqAzs7dvuk{7N-_u@oL@a2VmnRFYHhJZ5gaT7m4@h zych;+zsG{j$9IFRcgzOsFIKca>$8gO#7T3(>f0}2^OqT6{T{!cY8_T`Sou5$)~^ha z@mJT%)aN{$#{I@>+x!5mU49I<{@@~PymcpRzB!Wd74~!02KneivC>i7PPH=cvl=SheMsjuh4K7VE7L*$F5!P*yJ z!|n&0BQws!VdIADVBhCd*m|8Uu=Pw!ZAahlg53{!2v+~>x1HxVziBRCS*f>~8SbJ(LY`&`y>^^{Du<|4+tliWaHeT!Zxz!J~z25u^tUp%6 z{?u=mhJAinf2*?X_-Ack?4qAqz{XRW$=IWPVdqCZ*!eRM)(-pIxHm}qcc3cj@ z)-!d3t>jJEucmOu;_{i(|oz<}Mpy;e8_2VK~e`lA=V;$R(6P@f&eRzLZ zd!{w4T$l(OH}?DW%Dw5Z_0vVYp6}JHfz4AihxO+U!q%s?g56*85Y{fWKEiq1FlsOn}_`ocKlYrL3!`&tQ`KMCj?^1zO#^YRky%D3sZvp$}{ z+7&Zo^ldcsoq4O-u<@vPu=dV!*n05~VEv9wu=@&X!NyB=*iJl9f5Ci*?^k*AHLSgQ z6%OG%`F?Ov9{3)|z>ZfH-jDP2Cs;kO4mLkD16F^&cK#6`(LT_R@_X>+UsiZM>vJ@$ zo?ZiMFRg{m%l+>4$gh~>Kdk542jlieEP%DY(pq1DejN)tuAjo{x52Ra?GCW(yeI5=58Gw(zF)w~{}Ad~JwKxa z?EFX!TW^>b*1yjPd){mo`(v+^g0260&vwoeuL7%YBFT(*7n%6YA>Oa{-yqof%q6h? z*kCv~e~{BN=D_+_Eoe7hHy`%=gNrNCIEg&G>t(25BDxL3I zcjo?H<{v(b^E~%Han3$_fA>D;YEL7>>Weq9Z`ND9t%e;hpngW1PAN2 z-On^Fte$+FcJvUH`ykGaM18vJ zHSSKLo%;Xs)~loEw1L$tK7*a-cTZ$I;cu>h9l!6x=6&wN`Z*gX@cfDAGgB|``qoCU z&WY`X-Aj30y`g-knw8^`Fol4-?ifz55(6)t?ws33x$mj=fm2g zQn2ss7Oa1o3HH7D`-iKl99qM^L@$?eg$&bDdI}bL3-QQ03J=TPMAI)I(;>z& ze$k!hV}C3I`~78M^C$&i{jfB!&pi$7zO~6<>oHz@#rgQVNj)Baaw{D8{RKD>x53V9 z1!VlVQn3DmpYOQ7`enGis|0Iz_Q8&iMzHH2=E3&IUadM5p(m9YJDBJ6%Z zIcz6Sa8PDGxH*FNi~WzrdMf?(Ut#mv4Po<4tzheWCq-~F*!l8r*m~I=!(ql*N!Du_ zPYi>dAA7;ZpA}%^m3^-7!hT*G7VZzlf5ZD=|E9^X`E>U?)}ED+BYwZd@wtB}*UNrl z^;j=w{aFXt@q81uKH=C9z8@Vp6YK5VPh=XbU(*M6KaiHT6Mvk9o%h}j=K8Q>=~&O_ z`rR$Caaeho_xBBKKBWol{dIt?pK^bH_j||->*qa%)iw4F;(qWC-LKC4&;r=`y*sRb z(Gu1l3+z9H|5^}MkGV&?`ueGXyifXl3HH+o;cp`B_?`p%{tLpcf9%2fM*WhB1H${) zf3W&g3fMTey!-3WuP*n;exesFb^Rps>@?W-JcIR>+Wl+&!tGfU)^lpl=ELSwdcxMX zeqwvXdQk08O&L2fll7y<$D8}|eB5uD^{4)xOLbWLU)J@g)XPPO^>cf`=5NpT;rdxm zw_K*)^A}kE>p1Lw*mGcYsTVT3(*#)iTAKB;_V*#4k3ZQ1_V>dF!0HWu!`1%u+Kj_`$xFG;SwC!PYphr@b{U2!0HOeVDs1kMwfU9Yp2f9 zZvDc%gyHK)&%o+jxf5|d{p=KMeRkc%To3i9Ct&lK!;)~n)UzLfT^}L)9{;qR@s%d& z|9@U!eKV~8cYt>Fk@B!{wbyUFu*-JVM|geeRmGBVf8@_@!{)OK!Hz#aU;EM2$TPU z2Ez825ZL|XK7@no?X}bGJfHaDi2Z~0S}kp-?$q_!`e}_|?d4CfhX+^e_?fOk9R+b zJFxcZ8ROM@#M`oVOr~Dp9&G%+&hgCqeGIEt&xWlB^ZU)`PJpc+O(dH)fUVES20QNh z!|Da)Vc*XXSbd>2Y}_&&Ha_?i)}NMx&sYEB1K2v1rM6Qa{|Nh~T{;ds56p$l7r%h@ zr$b#Ik9|+d`_(>N#2)JZ74rR~PpyZ|%U6-H>m6X9e;BOpl^FK^C&JdB48z{~`w4Sl z`)6TT|6?s2-2Y6S^H;_Ef!UJk0RwTm<(0bcMCIbzt?H z*s$@{P}u%=$o28es{>&D(M_;*exqRL*Kx4*G}C47%l)0*UtWcj*t8ctUpr-Hoo2qTVD|gcE5!X=4*c+d^P@%`GX>GfjSGn^!HYbuMWbl z5AG{7Pad@$Kd>k4csvP*@Rt!bo_^}_dQxW@JJ%f6uWbbzU-#D^!XDLzUB9^ww*IFC?EG*Ec6=AM z9Y5|RY(02ISUX-`e~kUNio^DoPWof?k94s6nGJ#Me{b}UnK#CHJio_v6jo220c#(c zz|JRM!vVb!)_?qm@6$h+1DntO4)#8Nfz1y#hK;9=z~%>1d41Q9wm&c9XWioY=x^SG zeO@2I=5-@+ecGFfGI~xySiLH#jK0#=`$4BZ!u!_mX2<)$y2^>Lc@}?f+C1e)u=<3* zAFUqR4z?a5EAQKQJOix0R2p`^P6gY)8^PXJVvomeeGj{SDXzz(r}+Cj>c=TzpT}m{ zJV{$CjOy(EGi$Nv7M^H>yE`xp4VEA09s z?49{5e~-($l{FFeFOv47&cglQz{cGvV09Gjulc_0u=#>8SUuC<$8-ICC)j>i2==-{ zVeMHlSU*nWp=i}5e@l`h1cs(|(o}Cib-+#$`;<|;W zT))?I6?Pu^7q;JDgpKq3eHH7=_rltr%ib^b0o!5ix%;~s2kRf2KXpG^fB$AB?D$&g z@z{~Yu)m-14Xhnn2;1-cJs98DA~=wDhs|s0f2w!;`(4%(_<4c-B4Fn$=S%IRzu#ql zKLopvgujQV-h2|a4)Kuvf%)|Xtj;_Rc3-s=JP-5Ztzq|n$_ZNJ5A(n@&&Qw1ATw{?gte<-j92|_=Qqb&XX0_~VK+Ds{}JD7 zPd35&W2t2Ft@mNqOJ7DGus$@3;|0BAGOWJhcu^;LjsBn>R@(7`zB?JVo~{vWy+lbk zIG&x))GvIVAJXo4dx)OqI64g*5BHVPabsXV91qdd+n6s)!rJp&=y}fPm2F2qII6zK ze#3QPpZ9E;`KL8(U0ZV4_v?J1&hw}GBlFJ)&nKU~7Y6}kj~@D%je_z{k&V;pZ-==*tm2vtUgxK`(^*0 zt+0N@9nLpD<$P%ze}wP1ej*W{r$4vc>!IEvjf}qY6>NXV1_$a%Ve23B!GZY-w%)Kf ztS(p0<9WZeVe`@%VdKG8o{xXK6a7}Z=kwC;Y+EhAZgz`iai>+Uw)6>wgk3->DCsl8LJ` zIRBA1y$x%hJ2*eG9xoZ6r$2lUwjMq&tbh0jR(~r2tIwrp{xv?T0jvAhl<9BHVC$)) zqSxrZj)t8dTfzGK<6-Szci8&MsUFYxnjw>)pAPE}ZFu*1^ttHhN$&SDgm(LRc33;) z^U+>4hmDIH!p=VfVe7rC!PW8)bmmVLH53_Ff8`$Ul8~HTn z5AOnzgg`8eY)nfRs?>^w72#=g3K%(~4FVdIbA$oCmfwSv9>fiSvK z(JHoM7hAwSha#~4S_+S6o=gmTeb3EDQjZp5d#mvLqx1iLpBMARA=<46`%}g~o8R>Q zHo&eY8ErfDR5M`fmHNTH&oANs?T(XM||1J+)eA2#oj8`i&ZJ-+%!D%gHK z4Ay^)2|Mrq2y5SO(O>MxYhm^DGjL$M!_Mcs;o$Y_7p=A(eg3qc$9Vby)}Hh_Aof`d_5oi!0-2ceG04hyx{unx0PY{CpZu5 zw-0#t@HZJxO9%d%2i;LgVc$u=RI?W$b=U@1J^^F0l1a zePQRhY_N4W$2=e1;x6@z+WV}$U;XN%uyIdi*!8AsVC%ix!}{k_Jf40#%;V7qKY)FH zD`EXL*H`P`?eKhbrpflPib7i=67-|@ryIB|*3lg&(A!~MG+ z-Q!(%RU1}si^}J@zPb-=y}}dPx3k~cB-s3c_DcWY0<6Dt54Jz0;Q9Oik74b6J>Lhq z=W|#)H&Z75y#qVHKY{Jv7h&hak-T5`dp_*>*wI|{2iJ$pfqlMxVe17(!qy}H09%j! z8Ek!0Ec+Y&Nln|Sd#VcSzvqJWx7~lpc>gK=Q~m4^to=U%2lpQ`Uoa4MpWAlyXYEBR z*yq_BcD&Vstv~qzHZHl(c(Z=%IP87|lVSZ+*EgxZz7GfX6NAmimw@#@E5OFDx#9n< z_lvLx##gIoSMU8t`+@&_30Cjdf3n_iG4{gWyU_kv-}n+XpEcOyiSN>2pX?94V8>G* z*yG#4>VpShzrQ)Geq9Os7w}79{pnS(cDW-Q*gp{ash-yp)_zWuBm8%-|2k}ZJeqdr z>jc^QVu(RrjZ z9IQ`tU6}r4u>McG{{RkNzj>3`_)o@_?myx@k{Y&twXx3wJt+^Y9xx4d{Xr?%da9E! z>x#Nogw1ng!5`HARf65`e-P~aqd#Xn+!X)P{-HnTd_D~}KUmOq>XG$79q&0{KR+q{ zs`WdOVC_|9*!_+4@3dE8u=#;&uy)w}W6WQif}Mx%*dF0O>kmZ5Up9VUNxSiPcA5K{ z0{ebj$n3}RIc$750M-t*h6DBQu=eF8?_&;i>c%JT=XMJAIbX9~UCVaro9^09oIDqH zez|Wu`rdHZ@pAz-KiEEkt*3H5hW*9-)#8ZrlfglM-+cDp^f%-G&;2~|uZv{nS@%mc zPN@pp-}1wb*R-(x$NDql`G@v*`oRgG7j|=j{hR$Pe}UaUcPOm?;eI>n#vjY%6X(I! zA-90liRQ!3W9pN^_(S`Cf$?je?=kJ_1^P$oZ87-%Kzu=$y9Jf3-ZIqdlT0uIKb{vOc>uzLD$#7jZ_S%0Gz@vQY+@6m4Eko#fkpGAV5 z5Bm^LtJ5DMzIT3o5fN`MhMo885RW^qrrVC5cER|acy2hXUmb#8pq^F|ww`^s^&%1Z zb9JA+=qt`|v(ayye?rx7)Lmii)Aw*NpYHKb)N|;!4QSWCw?qGNzR&9U{2t?4SpViV zdXn`mxzLl;5k8kA^kYA_9IXD65B(`&njhZ0LGMz3Tmrj3V<&7p=Ki_*_nl$&g4eL` zDY3`%{{6jc_nSIR{>#tz_t1>*m%)zfg|PJ~<}2+FD`5T039!1-GuZmIuVDLMZ1qR# z9zTP%gUMm-Z%8x{b3Mn{MsAVUva->>)CC0 ze657t=eavQ%Hu=D9? z*gC*Lu<_O&*!<83u>Qem*!@~t!RDhb!{)zQ!|EGPVc)06o5x8?e$jE)9`?F&!`2sd zhOO_Y0h_Pw0XzTAhmBW;!tU?*0=AxK2CV;>n0%=3|9j6TZt(Y@tmpVare3NVtlt=D z$9|blHJ+JHJMR9znXvU;Q()uWa~_ZW@fED!lahRF@cZpgnPBbJVA{RzeCBJ>t@^@_ z_Y$x=N-x;s%fcb#ue-t4g~_fbY6n|iR}$8L_IWuj3c_B0bJ%*e1RhU5sXnY;@SN*2 zZm0vRPo0MK=Viy+aM-xq^8@>H!#+2^Pd_WC?c`@W!1kMjo{!!0eYpPU1kcycX#qRm zE`r_PsiBMxFcx-x@cYd7{c0ub{r88}<$m)1_`X)K@y#8cpL%2g zSo`t}c0F%?SUolHdspZ+DPZf?wvxa1dputM`lb1P>Iurg#tU(%2hje7!ur4en*V1! zw}c(v{(gkNN3jTY9;gMYmj|ww{HgoxyB>ZT?LPm(9*Lt{Z zZo$5f9I*B2PhsQVXw3H*oQx6(iTO~!v84BP)U!S=TwVe8*Fz}BxFvpx&| zXESX6?h$PN-!5aPQ&8VzJ@D_a&$|WeeiYYW^~-Ls@!vf-P+tODPyGtE-hLVEeu$a9 z{)qZ1{n&i4b=0S5cYRVx*t*6f)LW^KX>YxsTCnwR+8cilbO3C;r@c16H3c@`HU_qy zWid>iH}ZF$kKXh%tUj^`<~g_D1Z&@R*^XcJJ?#E>y_mnOADafNPkaYEA54VRz4kc2 z;g9!+wettxVds;Zu=Z>Q?T)iM@38$QGX8}2;wklIJ|F9+oFDJO+SfF&I^Qx_|2YM$ zKX-`FGv0db_fZdY4%Tn~6V|>if~{Y%o=SgkDa`z`=m4zWz07u=|8CfP@fz6t!b~{u zdpEHAS9XStPy9af2CsQO_QSKV`S;_n@9QRPes(ErecoHor>=Dpte%jP>vzBSFJS$W za*vEbUVBjA`aR;_k747B9&pg! zoB#M64%&D3XBrAS-&N&!^HE{2`PfphdS@dUdr{DK;=Piv^&S~z?(aR=x{6e=`a?z; zeLjijqcgnadbQ8m3;od)v>UH?qW}9ma>MSg))w}9I>P!DC1IcQa9F=Fk;h~2zp|aY zcud&&dOqxaFfV#D-tiOvgk9fq3wAx{&g9%be)kmEIJ#^KK9By_238+y2Akh%0sB2u zVE3O0g}$N9rb+bL+g7!LGh`C;$#)+fAA)(4k|9Y054`)@Vaddj`9`fC{MygL_mKZhwY z>l;SH>N%TX>w#Ls&X43md=nh6C$nV4veBaG*Xh?f-wC@oXH} zdYp!|8^3@a!}hQ8u=;m=+IjE$OW4kJCxX>03c|*-@4;=UON#}o8Fj6``k2onC?Dc1aSH>~*|_GvW{DhOHmYlAhzKpL^be@y7nDxnc7^ z|H1BOR!C-j(LLCBsxz$Ke+oAK=>uzD4#L(S@Ai1?!4%m1R^AL8Pk*Zn8}H?ZtwX8^ z`&>g|6JVe>bOVRekru<_u1*!ia? z?0i!>6Ya#ybz$w}FR=4iW>`I@b>{HvzxolM$2?m-3*U!cH6GSqnF8AnK83B1xdLlf z8p6)!!?JR}=--KKXWVaqy`LxD!{2AFY>ZF#N8JS*KM#amk2%YB)_?qHJMrom+Zp%C zv-5q#Z(hqcF-Ve3`ScjbAa zXFq`ThYrE|qpxAFe;aK7OqZMYPaV+&SpB^oY<{w_?bL-df?Y3A4c6aj3j4fE!p3(^ zVC#Fb!`63{hrRyRUHClKzn1fS;=2;Cx@ZYl`;Y$-}1Ja%9Y9I*GW zy6_y>ykag`yYMCKJdy-9{%Py+^!I46^&y2}^{MCFuexjs*m3#<_P#=3^W}GC_J8=Z z6XOe=^bBl&+XUPHe}|nP7sKwCvrR@XnhUE_FNf8Sr^@)fi(u{lIFDyt)qL3adj#w{ z!-cTxdwajb_Rnsxddh6unTN{5#zj+L=iBTucC#I9Ty(J`_lG~&7*=oG0XuKhld-$Y zY$v}{8&#t{mtrx5T zYY$}g+bXa+rffY)R@nZO5mrBlDibG_^mzQ^mF@X{=Bu)>_IUuTzhB+su@CKSr@sAT zSUZqT#_kP+{XL+o?HKR)?XtSSF<3ug9Bf{4J*+Od7}lOofPJnTVC!MK!Rpdc=`Zg0 zx34WsJ!o-Qe=!@ZK2s6)`Tf=A|Jn8Nzre=zWo+m7sV2bc2!&vEnVxW4)+Z%`U5{BD zHs2K)_I+l9)vxT2t{+GPI}WFL{j^{Dfb)6(eQam_-4QtWe0@JZ!Nx;Xun+2yZEdG6 zWCHfZcp)$B`;H0g-$(X%)|V8g)%wryf@ucKy^m*#1!(wjQSw?EB9GTi=~c`-@H<0vkt0hRv(rXwCD&e_D4hofNgToXN?-qk{$AVp2Y+V=Y`;qdo2Q=*t7qJ1zH&W880`04gsrb?13M4z zg7pWh%G8$)fPIfQTXH=4h0L(|g;}uAAp|ym+za;m?l50k*HRT$cl{IA-sJIk>S7N> z%%=%FK4QML?&LxXJ|8()^?nojC-)f(w*KJ+tpA@2f6#nNPT2hIF8ycv*X71sFL}EM_~Yi~^TF0V zd?DkwUTnntf&ZEkHs3i2_Whs6|JR==1v{QM!TRedJ)Zj>4ZD8jM#J#&J(hUGdG1^T zu7`Ta5ZL@!eb{)ch4B>jc5Z#HpSsIR#!qi5rydGx?~}v+9(qmKd2ufBoclAatVjPL z-mVOLoq1v7^IWig%d5Kd2mGzru=aN=?7V)G_|ST^%CPh0U|2t?a~-ai_1nFSAE_fL z4C`-nh4oLP!q%m?gdKOwYID7OaYNYp>-w;MdR*A~FHy~KdmjVVo_toF`$eCS^)Jg+ z!L9G_O^`iO_)!^W4JD*gZY#{E5D^X~r=Pdl$fh1Fed8DFFK->AsvQ#XA< zrXF=4?E5$jJ1;GdXx{<*oWHdlJw`o6y?79;UN>5%zcq#R7rMdfYsF#TQ(oBodlcCI znH*Lhy<36LBfg7bJNn;I*f{J3`iFUvt+4e!4`uA=^mm!|KCpW7GuqYLn!v`lar`{) zvj80YKA&#_*!)E@+O?BWY>!x~lPe8R7=cH$PSU$Pw5AN?EFUmFj5eYZUxeYpv2|Mv6L3mU-c%=h5H`~wI2 zA#6SVAlUr^j=({EN_K>h)9S_3Z-(%SLD!}MJvAG}p*2A>>+)~Nt8pq(^ z`!aq#=lQ&^Lbl^CUx3Xc7KN=}xdJcRg1TiAXY3VYote7@}e&2U08p73XJSrFx2zOk8FkA-{n)-_aB@7Z~wBtsE22T9skGS zfDQ%Qug}5iB2_#d{p=L1J$v8d8HYRJ|LIxi-}X1xAM~T$I!F7H^_u-*b%y`AU-SP1 zZ6_YN?ERyE4}q8uy)_$)gK$fUgvz+`l}qU z<9QEke_v6Cg_OlO5a{tWtKfta> zX#(q4ZG^RdIbi*fov`s?2&}!n1nb`}Ey49rKNpqxM|~wd?0SrpuzAhP#bJIw<~`W> zcs^{rS^?G{`V0=_3p}3jS`aoL@HuQtXHWCfIz(R9O8dJ^q6FNwEFts?7NK5;mW|6V^_Rh4puTmdR_) zhRt&gg#G`YVC_*mSbggfY&~l-nf}%ff6d=t>yeN9BQHN6|5bnUWggBa-%$>KS^u&T zY=5qgzwdg8PPt&#OV=Wv;C-gJ_hK;{D_VrKL@p%(Aeos&kroR3xY<{S!Oq{+S z_V=Jhz{W{iVBh!8u=DvESUvm-te&zQc0Y(83&Gf{iLiR+*upTn&d0F#GY59Rzb>{@ zKk^4`ezdWSJsntt@54UFfb~z;z{WX$r{{Xn!x9wb`-z+8!Cp@W*m~G0u>SaV*tjZSK#$HiV)J^duCZu1Xpo#G)GyOX#C--n;~8*Cn>D6BvI6Rdwy({}QO3q79u84pv3 zGHWiZySpO(5Y&}hBSbeu7 zY`t$0&qvSi0=xb(CafOv4eb6&H`DO_*gBZYu=~MnfYsYx z!`k!3u>B%;Df%b%{Zl+2{jV5oJ=$1UJ*O3{UhpYwd~ppnUy~Qs?@d>lcJ$GxFgi#6 zim?6qVQRjgI*eAZ&t<2Ke*YD0K4Ktjytfv%pS6QsuY1>aexENttbdZS4A+C-8V|N# zY>?3_Hl_-HAMwg^JpROB*z2hXs|UA*^-KH175+YVu$b+9&H|4|-$??S-+Kg`A2^Vb z&!>K?R5|X4{yG`9?zSJS-`5fLeQks7FBxF{=@ zzuR1}`NQk5`_E^Cz21bCXy?5?NY44__nlztMR&m7$0(V6!U9--b1Lj}oB;bimU}+? zI1coD^tmmt*V_|ze!K;{A5Ak@JvmKf+KKmz*iL^h2s>|Qgsq1u0lVLCOxU=#1+2bx zFB#v5U(^ORAA1RQzJ4Ee-uxX7*gsf*d6Vtf%cQV%6f0oI>7QJW^$I`2>OV(e=NsAg zuz4yAQm*`*W_vK*y*aNWZce2Cgdw+nA*!i#=tRJ5eR`<*R8~e8u=tA72e?pU%Rr-yJAZ@3#at-@89C$D`lQfz|Ii!{#IA!>(_M2b;g$0Gpqi zn~3A_8}@lV`HRA^^WH^RyYeI<*N?974{SVh61Jbck?~)4!H&;(T(5bAwXpeuEU@)L z^Webz1e@0y0;>xihpi7N4XdZ+@dM-uVvfejUSnzs%>C zV8`Fru=RB(VfDM2u;cxAkH=4)Cu4Uu!TM8+JfFX%w$pDG!}hoFu)6mGSbx1S?7GKs zGS~N0d_JGg>Eij!JNaSzM<>|0Hz};2Y`gWwk!0*k80`E0D<01qy<(up6YuVYt?wTX z8=o(M^)FY!)^iVlgZ9k%EjO&bnS$}8y}urpcYlJ%V7Iu4A?sJZ?G56uaV<$f9OBkVfC;qe0yaIMS zPlJtjCcr+=6R`Wa)bn`qL`kY~|HNl0VB?SWuzvrJ7%=npUotv$GuV2;R@FJ4e8qp! zVfyUQ72&W96Z`hTmMd>?Up z8QA{SA9g(FupRrh40b(9JlORoF>CQY$Tyyf8h(F`Vc*{hSi98|_C9C8=JzMU#!p|s z`W44u?O7ww=RTs;ra$3t=7Ixt`LOF0qI*1g^c#*>zq%HM>tjEem$3QvBd~teYd_!h zez5g6kvU&`yT;?u5n{p4XA5BMNm4%_dof)`SI7isq(1Qr&nG`q5Vqg-gB{n^Vf%kK z*!tCvVdLNi9*@6z6LvlM{m9|{vu_>lhu^ymmC@T0*X8rjQ_e-=^YHtd%Z&f|uyOck z8GW=Ztp7L_HosRBR=?N>tHY+V9la}3J+2>p>sAQo^ZOLVWa?w~!1@=hV8_v7*!vkF zQ~x{;c0M`=`}?bPJf8mf0M@T7?(yg-8S00RuM{57_$mSGXNSn>0}WvP@@sE11?=?@ zSbu8^?E8=0fcxjYw1u^YrC{T+3b1+L8?gTD{x_Kdy5K8V`?CSoK2>bU^`W1QfYmoX zfQ_#@$;8`zVB?L(uR;O&!i0^lQxYv9>>;1ly`Tf0lu=NJdV0F^|wxdfn zZ%jM&K^-@KeUFezo-lw z?~Z`I?((qjWvcDyMWsES_3>+Lr@s|~tv9*~t52kZ^^2d$?l9_Hq%de$bkB zfr^)0ae`E^)bY8UMLy5#ZbY`??mv8Q0;!+o&z80+BR^YZ$a zcs}bxF3=vlUj4A^uzuWm+O^9!JfD34p1{WES7H5+gj~P-f1H8!M~lGrE5FZprwwfW zWwDHYI~CTR_m|PfZ^8O0WnjNAW=q~T&oK|IK9my1FPokXR)5Xfit$f;^TA8bXWoAg zcAl>O4(pc|fjurQ?dB))!s@#*JfHbFC+vKC-Op#d=Z1}sSHt=@g<ce64D@Qz@-*0RHJD;6`&8Meq=|9xF#^(F9;|t+{zboTM^n;D# zyrMK4mmsQ`^*k&SChfsUj|rv7Xtgd(%Oz6xcf2hhkRyn zSU;&A9PA(ZqlsbTwO*c2UHjul^bf}28rc4}16H>h;&`Bbt`@9rz6I8=$PC+`&%n+z zNnz`0U%}eV7ypHizYxZo{`0@EevISK_~JCIUY7(`|J?$sC!~?74_P9!UsNVoJ$f#z zKU>rD$shHRnZH9}-&+@tr(b>O`HcTsuzt!I+sSX_fz|0J%G_Ui+ZoqeVC}=Jf4N`w z8%org^Qq@K1{Jh6%guM#{_+A=-|p`5#DkgN=X!{9hWL5d>s+w$!Z6tURcTne6@-1hcVX8ZKoeKKldNl zc>M*epMDcIAN3z>JbMu~|9l(vy`P57r(J=qlROFsuh;qW0Ia^W+4IRK@AZ80<^5sf z?ESF%UE6MOZO?bzr4VC(B`cRXFPJ>q!n$Vu4weje@4 zkMm&l(D|_Wp-*Aw)77?9cXosRX#QZGpGQ7+5A6E=jj-!s=h%OVFV4XFs{>%)`(fDU zR7vK0x52*e>=DOLg0!-zr_47MpKitnEq0Id@fZb0k6&$oj>Iu1F{l|Q?2fyF^QZAT&UbGDDj<0OC zQ?FS?CO%6KTaVQWHm-^VyPrWf*m-6L_SX5S9qj%72&=!egw<<@!}{0dVdwKcu=&Tl zuyNoZ*!7P|Vb_O$?D6dH@%ACdNBDo*&lj+ECqM1(I}+XV(S;Ms=!f2~ap(=^8?U1p ztbg`1tiRt8HvZ`V+uugO>h+mn?ZZ?!7$4}jtc10zh481%*RPeCKTi;kn1B8SRuAq8 zJD;zI&Ch3t)fLyk>UYMxPhs>^AYPdD*3~`GOtBzvQo`!0P{PV86E>tUr<$wjM0I z%yW$j_uTSn;nxrNeCmJi5r3N>y#|}V`vcYtN%`maukaD(w7y z=RWqGc%d8Y`}z@f+|+{I&$+LRzmN>Jo}wCTe()yxf&N?$*!k*L*!b(kJ+4px2)5qf zG_3v{)8n!Gy=3OeUFa9)i*mt%JOFHb5Mn!iS`S#=z23 zgMH4EVeQSTJ3L?VL$hGx(=o8|<5t_rXSRo3AC{f#G2Tc6YoGJLu3w1{t6!vt%`ZoR zUEdws<9UydZ}a?u`U~;)9@u>BIX++iWF_qU{Hx!OeI5#1FS!&}-)jy#FHVDvZ{otP zw;ksB?5DWx7WYSdG6wed!$!frze%w5rd{CR{W)*fhpk`!iFWn9Dz-;lkK?kQ=kpxz zz0;n@Hfu`ePb-_{C6An zeQbw~r~ZMhpFAj2mn{1{&cdz_l(n0;Y-hdceOUke8LS`ux95{*jYR*`Ke-C)--W=w zuS>A=^(&574>|kJ`PN^Yg7uGY`+fLV$7JHqYa z-)H}<1P8Cj^-DQDpZI()$EzVoePXnuOl!EQADPi^Z9I*3gQrLLnAFm%BUN(Q<0``5& z?$=lgHa<-f!TDhIodmG=lL^)@hzFYwNCR6h5gXP&hz&chyt&E!;pawy&A&f|)z{RE zeGdP?&WF)q_3B%&_n!_nUOFmMua^xrj@k@+e>pv$_q7UE@6G{h=NG+mzW&w>SpVd# zdNrTh0akw-0qYl}m)Q@kflQt_1U623hF)$SV#f{o2liwxY=0OJJO6cn?O(lM-)k$_ z{Wv~^1N-R0)>DPT`tJo{`+q&yy0J*G^=(yP?fxzFdUg0Bu>Ij2Y&}(4+j*Wp!Rqyq zVe5pxf}KxpUZ=k@pALpymwO3zzA6t}S8+rpf1V9icRS(nOaa51UtO zWPT`eo-T1<{iVpT`GD8g=s)b=yNi62e(nue|8E8Cei47d){_l~%@^*5)dNCd>%kVm zzK;s9_I(O$9Fhw*-`U^uvBxQ4b?g?f<1HC%zp8IL`f74mJ-h~NeOESEevws2axKI5m8?dZdA z$Y1KW#fOdam&5uyTQ2kYtjGEk*6$n!YsZSh>IL~>-+LC=I6M}tea!$Hx4yW<`=(wp zJ?wtEmtpfunLVHU+-cbJ3c%XyEwJ-IURZngg-kr_`Sz3Mu%90V)_=|bJ6~KRpKBiX z*Na>)^}M@a{gVl>>ou0Z>XKbx{o(Im_g8KSTfY(p8?P1jc=YEsu=-*)kEgz&Iqdz0 z*pC0z7}l?Ob%Fb1T&0G!2e)AB>*kWLww`G{?DsW+fabfnQ|ufwf`l4cfxyZ$5<8yJ}g_fL{6;tlw756t!^Xq1_G8LB$NM~Hz`^Mnd>kF8l55xK|)>pXS(s`Np_807Y^*8ML!5ln4^Ab;B z^H;@S=Z}}L^}AiFGXPW z(yD$Q{$)8>|IqI@ey#=w>x;B!bzt)V$!OP~YT)_gk)pxovl@9m=lGJ<565`_ z>_^fBRyU6dyMD7KZ2jH?o}YSijR-#B^NeWMPh1V_Py79j&oQw3y)}l_ry9WOs%>EV z+cWEJ(1SjM{r$h+Ve=7#VD*kMusY{JSbsmU?d<2|@%rEQt*=9W?hRXqX1$$yqwIK^ zU6%fiKky~&xIR>d@1y=}IIRBls5Ix3zwQAWFHI@M@vLjE2x~ufmgM`;L({^>3+Ib- zKDyc=-#7ID8(^RRCfIp$F|2>K4z~VnJgmR9%68`4VX*VuN?7|_4_1GX%>$RSo%mih zFOULOk6Qw}pU$RYyiek#Ngj`$I|5cW9SiH9HMJcbaTKiHRt5I=5{JW%mwQEF{KGKV zdWD6s`MEBz@!JfU=ieFDzI^2IjH_<2{$f?wI7T*KR~gn{>I$1b?F8#*$A^6$U1jPg zLSVoX@P81Ig z_Fwg%$DFTSKTEsg_di&F;{~i9a}T!P#c@1PFMS_Y-^l~(pZo)>ua=gvdyimrdEcsvYiCy&6|#eZP+ z##ylA_9?6%JsCFc4Z$DK4txl^zrb7CeO^^z>xl~5&VJ^*&vQQYAr)ZfyYaC8LS1=& zq_hoeC*FMM_p|;tGwl8yTVby+5^UY}C0Kp$_j54g`(Ic;`3Knf>jkX-^#$yBiN)u; zeyt9yKd8T?t`iS-ejf$v_ccAs^Cb^812(?P0te!ASpVR}8O9HGFBblr{jxQze^dZ= zKa3Zrxqj?_71-z|^d*MXgXH+npNVkB6-{7+arvgriR6Z@wBZC)S`Z2Zy*HV)fzocBW>???S>@@s8i ze_v}8Z2fL!*!t2U#uv;d*N^c$*jKckjBb4zHohAV>$mKP;3cr@`PagM`d#81_2yDX z!|g{C<1f|={BneLbk+BX7qt_84)cES|N6n&!{)Gg-j9i2oj=e+?UVj)vWT^#j`@^cLf?%$|?`pB(+b_weEX@1N^hq`rWj zJq7mre^tNYIydd-d>qd(^r~9}+bd*!DzZd(5Oq`M(J=yx$Z(+wl zN7(1l1=bF)Rez>`WrX!Vtas9$B-%^=WuE*BecS6D{CoKK?|_|e>cQ$9Wt;MO^v9cf zI37K2I_&uT7IyrewViplC2X7#zZv&K{v!`;eg80-`#ZXu_lf>!y^e9kDp)_^Oept@ z{(55<_k;aRL4LqG#oe&i7l-_Xeo1B6=UD}I{>|a>tZ(cA`#w^_!TJH`*J!ZymwCy* z7@u7DjqA04!sa{nz}8_cgx!yIwoE*C7}m~@f%SKqkRLMNQ3}?-Y)L-K-;15SlXm`tbX2!`35J zh1Jhnb>j2U^G|J}f3Ti#3#>o!1+0JZ0JguiguRXyox`u^=|;|H{`np@AGQV7uW#0c zcI;=tX}ne1MiFY=?JVJwG~#68qk$?)>~AEjrV?m)f+OvzVC$H_&nmj`@e)g zzbvrzJy&4;%B!&ZmzKAk_$z&P&S(CJx1Q^vo+q=6eTV`(J~F}j!>`x<|MBX%&tdiE zdn{ugBE$BlC$M(t$%pg@=EIk;e%T0_{{FGw$2z1BxIW{l(OjSY zMtWHPbPH_#dmPyO%re+``wI83F8DL-`+XAeJkG)DYqD|QAF%%X23Y^`C9Hia=ke&h zm3+SFkp*o>zo-Li5B}r%tCtLfeI9FI^LtBRKleLWoo+L1ywnBOue||lpZ4&+?N2>< zzv|<^(?9gj>(W0QzX@RKv?AYw)rU{e-}HB~(ciRdN9dpC_w&&|9bc2^zy9t^Kl*Q= z9u{_;>tQ&cH`CvZKNr#8^}EJ0KJ-WHFg{$DQ5SaHedc&UkKe#}bA8PX8NKIQ*!lS- ztiIS6_WdSgJeoJkWjpq!6zqId4)!@Tgw2N+f$gvDVEqm4iT=nI*m&g-_RD-tDaOD3 zw%zuY31FZ9Anj#Dy`K8w4A^?Ld)R+N65MpV6=P!j7Y_oFDO99>CiDX0Y|Ck(uw@zpf1Id>j+De`SJw zA8}>!BOfsT>R)E}c=Zn0{MUPMpxy;`zu6+N^HqH3Yw~PmVExf?=BM#zK8KCR%(R14SK-FD}JAGT~}CrL;piRwhF9&8VXxKmjU+qck+DnphU3s`X9m8i$sCVAM}Qu ze^2PIVc)00)@Ln)eNIbYb;5G6`K!&adEPj%dfCsg^VJ{thw6v>Jf3)IIqdU22nX}m z=693gZ`ofH({85R|fq0H~_cQwk_WwVDwfE8RKdoQ=)bpt?hzHxhet@m-$_g9T zl;ZQ$yNAPp`g{Cw_k;cx|JQn=0X4(lS9jQc(;YS*&|ePdi?E;9Sbtgl1vZ{-3S0lY ztp@jpzF8SI-?*ZBc>Blr>&9GL}?DHNcQ$OAf zcKmkG@cG~4wL_g@b(8L}^WX=t z{%0T9{L^&eUB>D3ny`9v%Sz$z-}`qwe_5XM`Td)mu;Zj=Iok0<7x4W2ob<49)^yl^ z)nl~BlVEj_b7eXI+P`l{!Pet1h3$94VDo}E(08og?gKl|_lKR=Klgm{n-5EKK5@rb z*nT_|)*t@?c06T;)tA@7=5G_gKG$8a`xPXDeg9`*b%yM)>reiMonK#-;(4%t+ymHp zp>43bO&;#o@iGrKUMvr5$G`A+^r{B1&#xP7ywb*Y>|0ftd|{Z({&`7c@_oZ#=le?~ z!|ml1&u88n3A_GkIqdjXKeK-9s_p1dLuKN#C$QtPC9FRh%lC_(R2DYQ&J0`sk_q;c{`VpWhRzYddjaPT2Tol}tT-ELeMg4%U8cE*9P&QZv5PBf7%cqe}30)|;k< z_3PWg>d|?N^8L(TXJPYu2a520taE$=JC7BW(Km81ezm6$3iJK=zXf3X%dfEdW**q< z`3^QuniE#99{~Hl^1;?4gu?3I?|D4-q=?7ke`JQ$C*BmIoqEbtu=&bEu;bWv^~L-&nz*t;yAj}GMXaQ;gGTc3~w_WJJ@)CWLW>95o|taaUQ-OyVMB|bnWhw*vb;z}EBSfwkw)a&o=IZ;fE*%LTCOwR@_EG4GUtjjwmYj+e7JIG%oX1U9aA z{}|$i*QwCg{5^mruytnzVdJ-9u=P!)Vf$l8Sbwyp?aceNVaH=@nekQ`*4}i2jkim~ zu7Cd!wtl@JY<*T3Z2mqA?EC*jChl^7VfC2au=-1E*!o{tf9GyC#uxtMN3iu9M`80N z-C*_29kx@SC>!5zgzd-OVfCb+ZAUNj`+a^3VdKVruyy0pV8_7r<{~<@sPY{(ya7 zhhgiXFW8Qq=?`nK?!jJfUs(VD0qpk&+R4|wg4M@Ar`>rny80>K69$`)O9(q2+>g}x zDmCo*4Ta5DW`Xs;YrtN29$5QP9Cm$Q0oeYZ-Sf#86oH+mQ^5XyY;oB7=DS&V|I9P$ zxBAP=VAorg_IT#^A7J;(wB2~d{Y;G~OTg}b^EFH!X;x90{WV6*!SBZp7y{cba(X`T zb${6Bn-2DU_k$hJNn!JGgJ6H}A}(zG>=@Ymrq9dyW4Yc*^>Y&WcawanwukM6_Pi!FeS$KrCme*SdWe03h!_XB(|XP(W%cys*7>e*c#kLX;nVDpjtVe?#%v4{F2<*_&FK3C)jd+7Xn zQbw;zhW&K@`c-B>nonTu!;i3j*IZaXc?#_Kx}v@1`zOHKq1f7M{OQpVoECfTdWUD& zd-F&2VUOPiYY)51%zw*Z*AaAr)mKKs`bphkpKB{v9iS&{e3KmZ^K4fKj_L8}cOTl$ zc>9<6%l_|v(&jU-%f$cvVg20wu=aX{jQ+j@w%?A2jWagdPCjrt9L%RU|GA&H^>nLf zSD$x&)W2K?t1Dl#o&7%-dp>?rEaq3^kA)sjJnnwx>eY*2{nU1__WdW%M{j!|Q(v~( zJN}V-w7KR=Z9VIC|g%r$#(8v_W67R+n?6J+WDyX8$NHpU;Q~b?0h)|*1ko9 z9lxJ@KJn=@KaX+T6}G<|gRPIQ4y#8Tu$}#xE5P=v-(l@fDVcobL0G*spU1PF{h~}g zMqb!@o9KL>#t?UzjObPK)#Fh@S(7}*G=LDb(;P%_WDdj z{BQ_%ee5qX`dT*pN%!Ym1RIYJ(7z=AH50bp^epUn`wkA&3+TV{ynlqP;|+tY@7)HQ zFI@<$haZ3g;|YIQzw;LC`rq2H^>b0ZKl16T^`FVdWQ2_$a_OI=w^a3hiPu75^VIsk z#@&Nu#-nuzFiO;t$tLw)K4KMa#XzUQZYtJl^*+9d@1! zd>;DZN?4t?Ip^!pt_W72FjfWx;?|J=^VfD0Ru=^D~Bpx;H zv7W^F>Jn^xumd*VFc;RJJ`U?IO@_@Y+=SJ&M#0+Ce_->^pTIt!XvCxLuiXjuIi-fx z*P6rb#}En|M^%Q6k9)wLZ+_Lf&e^d0`^1HV^+fs~kBINJ$8%{np1W)OPaWBGnRx%Y z@jr2G3t0X5FzofkgYEAdVBhavuNPf%xs0AvNv6NdgpC`E!{+a%!0HVPVf*g{SiAWG zHeQ|#8@IUMp6e3l!{%qN!0H6Q!alEL#Pil)Y=>QsQxbMP-G12lxjSrLXrITE510zO zK4LShF0=qPex3)rkIYZ7|NkYtYUIG9$6?1sH&}al(etSvZw>qZw_xw5F6?uE3kURF zSo@qn{Q`ZiknQM{6=D4a^+@gYM%ev&Z=qkh{woH0i2dn=dM5du{IKHo?{x-Gc2OJ7GUB6?%^2=OC;eog3B;ANF{jYc<$>@DbSjKxbIJ;Sj7Y z)*lXjzt8bY&!^w6q22uSD%-K=Jz)2{+y<+Mw}AD(cEHY04d6ih=lRq_mV;eSv>i6S zE)KiT!+O~GGe2zq8V~EQ=7#k@d%)V~%&^bDDr`KR7S?}vKS1Z(gs}eMVxEWkKor}l z+iBzTA^v@Yer$a~9N78k797wAd_Tmq>d(RYJp0!f*!vwvyZdPzg?*mUeSi2XYiuV! z{XOsBxO4{Wekg-r_idXX;~%z$wMQd7pZh2Z>+cPKjYCe-zs;Amft|0d=kxO#dpzq= z7BfEdr>nv0o$g<0Tvh@0c}$P6PdVUV{halhsbF>2XV^Q}bG<^3w+{RUtQ~s+>sK7J z9sBcKy`J~~gYC?JuVvO_4YHlOM)i8*z51~AuaRK=`roz3)I)f@cHklNf!7;CyZ1W= zHvjO9cCR}stR1@nYkw~=ANiaw!OnXdou9A=XJGx0#jx|!QCR=0m&|@%YdoJgyCQ6V z{~k6!www7dm=7^t8w>k;vxPk#|GGDqoCJf1781OuPG`j)JZ4iRSmA|Hp%U{yAauf2Z&d^fS#5 zY3D}3+MTu@PunhmErv;h#9q&4jgg>GW6Fk7gFEKlT>?$MO6f z?EAh7J0Du#qJOhje~J73#^bSnLuGV|k+AhoEATJf-^l!!arEb~`HENEpZ(Y0_t77| z59?3mg&i+9y?^Q(lEcO$XJq1;bNIXN-+KVot`vif_ZGv(d(mM1!MU*ZawYMB{?tTR zoox#2{s3b=AG`LxOg&&fSbw$>?EW2Lu<=kHSbeDzY(2yc;twA4^h&VxS|^Q1(DO>e z)^qHV@dt{+j>iRXz&_cIPS6{6e}40ozK6TSN3LHeB;)^Ag^dTR!P=?Ju=c11Y<ag$aBJA_24I7_th1CPH#)=k?NK_) z*ZzI`G3gDIT zt{K)J_WR5i=Y@S9ePQFPrN-0bMa^r``SFo59B057NWvkBwpd{U2cU9p6{r_by=l>$X3zC>yz5R_O}GE@4FeS{d-P5g5#v9$5VHC z2DaWKBdmV51lCV;zjyaH@AVD`>(zXIwg>AUtaEZdc>TS^kSIQ`gfIK=i%zG{jNBy9^48xZhH^5f9HT6~JHp0$Sz*VC`^`Ild=7hGb79vfo`tO!SmODtpNqotGyZ<*@zl4K zgMGhMeSi4d-C^~sZ{R?^3asvxj`y!UTLRlZtH92eYhmVbQIIu4&?E5In_*NIa54(P(E3EyvXGXAf6Dkh>Z`7oIt5#wGr{Ar&zoWO!6UGGoX;;Xe|kRgp6q(%#Mo2k z+cmJdS7F$C(hacvw=vAR?XEsw`}c=3decss{`DQ~dh9*0^-90P>RHy?sAt`Q-S2B3 zY`s`A?7x1J?b^%6u<_h3nfEmuwjOH-?EI^Kul?EP`HbV0GWGQvZKp$TgU!3FlhNPL z!Rmq91M@CdWuE_H&!_(GKJ0$t3t`7`Eax}Y2QGjE`UY%2ng?sIy1@3|2{QBRG}~E! zG|uCx_gDa1AND2e_3ePw%Z9?{3y;C>-!jPanLn<0KJ_gFVfER!u=Q;NVB_x=%+KzR z^)c*x`4OyN_L0YvpBe(&4?nb>e!U&mKeFBYR4@Dm^~|=g{=p4cyVV*F>W$vN?Sc3Q ze?@!I+4J#xAHw=monZ5M`a_Pl9y0n$H`spG7goP{23vnM6%NdY=wGf^_z6~*TLima z?KfEeE(iXZI!1i#z5aMR*m){1Y`*7v*#1}^wm$eQtpDBAcHZktSbO;a>^k5Y_>0Ds z`b+lLFj&3F`O)_?47NVU`O)#R1$Mr1ehlal_@nA=&0+o3hHyZ)ww?8wLty;`zdw*~ zf~|9C2s?johwZPr#61a>`KHaIXp!q!ivhOK*^2D|@mB3OMX9saob zl=j8>E-LYXd8^B?_3_zY^Tiio0#p{+4_xQu*U`7FV|lM zHqMJfyYX0k*gRMa*nCVI*m*PrHvjVp?D~sGTu;D%mxJ|t_$ixU*Sl|`T|2oA)(`y| zHs0Cp@e$+M{fV}~uK%1#yY_Fs?bMru!P<*Su=<4kJ+PjI`!(Lr4hQew`u)tX`pj0^ z^*7RcKJ_Hu!uG$!Fy%Y#^ZC43uX~vJ&-`^-+tG`6!TRsdc>d0Rt6_EY+rA(Ct)*}< zo^;=XZ(;qubF^Fc@|DLk55Ixcd&a{0+j;nW$J=z+_-ugR$NS&o`NU<@VE41~`|Y+1BRrC#*Mgfu%$m1SCa38brDqmhO`7md*v~?h=$*x$-RMkq_@Deit+6+~>@h`S;8|hxJ3tz{bb2;qSXY#(GfeD@wu2p%58A ztGw|6>JPOi+;8&*ti9oSXvp8UEDUR3#B_c}T(3NGJt*Ie*Ba+Z0_#t;g|!Y6)t28+Es^)7h-?ShOHa<9k%X$K5RWfL*p^zGkU?= zeS_ghf5Q5z*|2t4z`ww6IuAR3f&Q%58?b+mWDwVzmq?P2?}a>`4qLyQ3U;1Mft~Mt zVe6ekW%Pfn0$h(iH6EZnQ$xmH_!zd%B6mUFhx6CFz~%=U72YgF>w))r!S~S~T@O27ZoI|jHIBl@yLQm7UAh(a zJ)gk(Tccq8i+Sbw9@qi(VdDX}VCP3QSb5?3Cg#iZ7p+?;Do6N(=Ci8F5&mG{yaHIe zC^`L&*M1KhPgFm6KEzen_{AgEoAIvCEAl?%ug|+4SuX`){o8Y}&s76N4KY}aq zeIo27I?UaVJ94G1Y2K!1Xe$kgnj5F9%~G)Q z|2>)i?`S*mj;Hu{+KWSA;?l z_4Pj3czbi((dWOy=3zR+=A(Xr^#}UH=A#b6*8lc^jo)m6&1;3hexG@8VE)6(spYW# z)$FaFG3_dhV7@4&{5ufVR)gf+Q8bvq|v{f(Hg_UCTnjvipV+Sdy9{>T%z-^WaT=_B z@ICB083n5+R>V$vqFxdEWY}kCsVfVkFFDt>;Pw%xq z->)-lUhpKWf04Q#^ArEPoJ@YCRekPHe))#`b2y*N^F{TC4!A!Dzo}F({n?NDC;NSr zW1+BmbqH*Ju~7qlk9iXd)}I{(Ysde?exbmADOkJYG;F`m8`4fZ=z#6SU%J4aUpo*s z|5y=LpSb@>Kk}&ki8Ebve-iH(zY)xL?k}UK?$hr5bHT2^?6CFdGu`ime^UmwUU)L> zc?VnVk6j;={ZF31{Tw!K6as5!B;x*_4>S(;+`HqnJ8z@P?jM64zYy-{`*((|uN?`i zUyAU)j%Qu=W9eV4k*VWo=YB5iqaywuc4jH|hiPBGrrr2#LD+d7M7#O91h9ToY1sLe z1lB&NV>@!-eb{k|#q*lq$PQ~~Wq^(Q)Pc>%#ewyA`oXS?2e9_V2H5=mW7zY`_rm)7 zS7GglJ+ON4wC$X8brDuR--4BcmtfC@_#JlM{|j4Bb`UlmaUV9`wHsFc#N_=wAM01x z{Rpw#zs9^?39GNGI6mn8sWSFJ1=x7Tcd+`Y4QyUyGVK1q_OS84!LaoeGhy{wE7&Zrwg1v2Wzigg3S{ZhW&djH(=|#zkszrpV*)E z-wAfSU%~3zuCV^mQ`q_37j~ZBg3TwifVI!B!{%>o*PhxAEB7bD=6iO*f&J88&;F=6 zUQfJkJgolneIwh^Tdz97#&dSk?tJNLJMj|7Te(^t*5BL;o8PGlt8cD(KkDUUdp+yy z1+2du*XtRlSdICf>`!(-W90nNUwr}x&I6>scFlFz{UUMcPh8^FO}{_+r*yF6c^Gzo zNf4~xvLDtS{~Fex9tP`Yb%Kq*jJ7}d)c&yJ76N;|?ikqo)g;(?S+EJeM?Cr#Y`&u( zYI!o~sT!RAAH!Jemh8ut5kgUxUM2`dje!OH)Ku=Rbi@B0Q; zzm9{=Po#9cvA0^;=KadeyDGkMYD9$Pew2(Xe%TRbcCf zOTzkl+hEsS0odO;4O@>?12)cf#{S6H4zTvfaoG3n3R@Sx3)X+n%kLYH8qDvTFE0U` z&*=c`uWf)mZ|XBx{eH12?Zmr+9Ut=aWnuL~TE_#uc8mVb*L#jX{@n$cy#7g8`InRa z>i1bP@!)H)dOsWN_v_m%eEl3{f3D{{#Ba{`AfDSEc0b7qSosnM_IvDw9sk$t?+xrX zht21?Us!qnUJIBwrTc+XO*(n7M@yLXb`ADBbb;0Lzrp%1J?L+|;CHRnXrDwyF8Ea!!|s><9QHP{)h2?{yny=yr1L$D{LNKRz9Y)9eFkl_V2a* z!gzVU+zHt4-VC<>do8SAmKIjt&4jgke&TytH`pAG{NBd@irS99wA}TJUb^7(5bx*) zJ0CW}t_S6hbpW$r^Na3h_xHQO&WBX6@yMpIcDee)_*E6yc=#LikKZRFjNcREG_1Y< zknvLgZ-UK_pK?4|=QCly*GAYl*%;XUFRNhlRsCV>l^4Rwokp^JL8ufygigK0OPzZKR#YYbce+#HVledTK*+u1+i z^I0Ds4|ae5&#?8>ZGQ@%zhSWTMFnB&Hy*<3+bciPAOGnCzYqDi1+e!?411p7DBC&z zCK>GWb%M?Bd}cfI`RFR17kep>%=iBZ){iLwYrijmBkeQeL2Y2itGE5p7xistANg?D zI8+fha=t1rUasW(vA<~*?bI$yM4|*dLmly&2`&p1D);m{)t*klP?T`%?3AL!rc zu=RZ{VeRKz%fp|qE$n;VfVG#}!R`l;m4BWmV7%oFterg__B_1fu=;K(tUlibyPxB< z*Aw6A3LDRlj{MVp{tDJ!=mH1o=$7$)u$S(^*0p7VjX$MDzUmJ?TFUPc=RO7NU*v_= zgE^7c`pLg7;rWnn9bn@r-^1<$>jN8y9{{`GeXvY?yE|+>@&VX*d0trg`#Y??kQz3g zasyT_#kZYt^t=x9ZO;~mug^H>5BLA_jhE5W z^*WTEOlv8DTr|-Nvx`YNJekrU2}EIR_hWNC7MVKS2Mfzi%(%e&l;H zzyUc3TMz#u?Eb`>u>QhM*uOtf0@lAc410b`epq??4{W~S6Igwm9DVEhdb$wC9u0w= zKf$o_Z<gQgM ze^&!`oepUmuK(`MWq#oAL~BPo`g1Jox?j_t@5lc0`g3S!|3pxS@cHlo>~r*lwJ&$h zrXBsd64pPPA!CnTk&!n&VSn!-tUuBLwmz^=$ME;3dSwXy%JXM$L!4SvyO+u+6^UO{k8@&_Gv}f{CzpE zH~wKeagDsNak$m6&;1^(-+TzR{^rFD#vecP4XnJp7{ST9a6NM8Fzo)suVMcl+#1_C zzht6}{WA{MK3EMKe;sUp>c8*7+7V4*=U1Ap;p@8`?ED-9yH6sMOgu3|H^v|P`}%a| z2ljetSpD`pY`$hS?EKs>Qz!ZuRbgeEoHWtp^+j`+PNF?|TH+eog~xZ_JU28-4_<$J@fn)i=}lJ>*Xe zSUcq@?D~G?^HKjDo&L`6`>^)n`>=BA1Z+Gd8Em~*e%LrvTKnUl#LI+26ds z`3l>$zfZ%?-{i1z^$@I_iVJH`Z-lMmjtQIZ8xFglDT?jruM>O^^HPs^UdLxVY<+bi z+LZ^j{eGN(>GjqT>>fA8Lu@b~=;*1!J^Rt|m&N8#_w9hQktCxngnO@cj7B@t|V zy1`rR)>9UPtq**McH`-ZVC}A#tT*FX&nMGPz19iX{mPqR=iOf0vCEdi=IhtO#-pad z#z%U=&Zq9SQx6pkEAJb_<}a(k#;t1DpSVIL*mzt;*!5Q)*4{4)8<+XU{;c!Vu=YhK z*z>=SPvUv-r)I*&wT8pyYoEY=uTNp)N$>K0*3-v>jc25Ut*3nXJ@SJ5LVDQWc?4^J z=5jocceh~u=T5M8@lM$NMWL|v(M(wX?g(uDJH-CT@5^4#xDJPngS>|Q{XwwnF)8y& zd$f=3#0U1n){O+i=5hapUB9(q^~!VDdS}_^h{F1CUetxP=iY&hUp9ozFXo2*9xY-0 z+;XsXP#f5~^KW6}?d|PPUU#I|Q~%QecD*lxwZCNjmdh}9_LSDJ`STa{M?W-y)enhT z|K{bY!`2<-hPB6n|00>w4o|zQ2A|SK9TnZ!likD_vmg9a6)}>&dY4D-moyXBzB&is-QZ#cWvn;0)i( z{V}Ux;|<$k@3$G&jtGIBw_!4Iy;k=*b6Hvjr9Z2js@`(vlig5Ad(hv##@(suikPs$EE4l$7z>aU`( z_EU|wSi5Dh@&voHs_)17SjS1mKi)3^?0FhL!~R|h zSUna7dp%E@(`rWX4e=}@eEf(`pd*QhKiC4#W{t{;igViV6 z6XsQ9{jb8XcEVBEI*Gcl`L9d16MtL^o4>yWt49yR+81|V<>5tGfAuQtI=%^8PktFz ze#B+{`MZC@?w>ph8;>~+D=)ia4|qOA7;HVjN!WRC0#+_Q)?UCLI3*LmPSca`gTJmm zRi9;%S>J!aj>8wQ@#QP9_FZvUxpyBnu2&D%Ux|jkmV3a;+oZ61vOlc8O9`77m<|W( zeAVyBlMt`Re=H^=U;BAI@}mfBzPA-@zQcCocqwgXeCycGc*cOOziAFT9)I(G+U-4H z%@rrY>@}hWz{dp61-OJ9)*RbPR6z1RPAMtzIvjt)E?yyVB;vV@_Z?5eRKj?e_;o#{P24H_h+znlI>pi72Uc!8@*1ni%f1a;2tY0z= zw!Xa-?0E@;;DEmgYY+E_oxgcy^i@w-Ig`%**khe+XI;N?{IDlm!rJrqVD)kn*m&hm zSbbg=HlDr|c7Ib**zY>u{_GDd2wP7!8}|3}!R9Yy^BTEe^Yim;XS~zH#gHC~6Z}DK)>txvT1be{B_nffyMsL{pmc@4JEc?Ogx1_M~ zz&Wt>6;H8ulsijc>s4;T+WVW~zMN!I9VN@1KPAm;HU?i0cP1-o#f^!1_PA2lD&a5!qnv z@shCdvd>}VT?bhGX1j8v4{UxmtNoEDXJG696T!~w%dqkOgs}SOxr~4FA?&=%J?Ou7 z>zazd>SOyG*D4KLSC9bKAE*PX&mDi`A5CE6Z-Mc^|8EW(k9xxUYbP{@-M@AVw*H_Z ztUquLHeOX0wjN*)tb8s98}Hr*`+SvPr58gpS*n=Sbu#MY#gPl{jm?n z*`IYh7B;@t7gl~vw?FHt6YP3jV1Me-n#1l7SO#ma*M*f6%VG6qC9mi2r7VKgU%6rP zz_YWq6tw(7Jo8P}Of%!(A=Q7yyt~SHkKYL;G7fWFMhbOS{ z>Kd^2!YkPC{h95^jr89!zlfKe3SqqPZwkZ8nYFO-o0hQqIYz?Prwp?{`;$7r+O5lE z>Z5DJ%CC$=xIgiMw6OCt6n4MPqw#!i?C~=)`Ha6|?a>TF!^iVH?0T&UTSpuQYYz{D zUAKo|?b3a)^K&<>{e1#vto%7@fAXx2VW0mO zSi7J-tXw$=t2cVXo)@|o*3KCS8!y}mYex>Vo%yr{)^8pO`|o<#dX`bJ`%RX^=7C#! zJ^LxE!{$rM!v0QP*zZ*uRzD|(wO3ofMUel$j|=C)uzGzT?DzW~_I!bzu>Q>vuP6UK z-*)2Kw_w-x7~6^OK7w8UePs006IlJ+8TLHScZPF6^0Ccfzu$+j`xhF+=DD)N%9{qT zc3~k{`A`QozETNRPG*8#&(&q@fw&Re5!SA_JvMwk_Jyr8-VZwtV_@a}ZrFUtR@n6v zYCClzf7nhvW}t{dY~+`Pd3J-rqvTAFBr|PX@r++tut( zTzQ@C#ODgY{@#69`5R<^;@7E0(vCeB4K_}k7xwoaj^TNb=LKQU54sB*zi9`nA1=Vw zV^4B>M@K5 z>wlK_!(aXyHorI+R3F3WMEGd*Q8i>lyxpJ)iyn?fN@M?T_5v0K0$e4>(X411o3F z+n;qe!FK8g&)T2+kA}@-pMkYg+QG&<4#LX+3b6Lq0$BN)0oJb`4jTuIYk%rR21WFL z$9C#^I>FjG&qnk7Rj=p>V8=wCQ)}NjX+b<97{*mr5-=XmrUXMLm71kd6681dm zAlUs0rD4DS$x*x?b&}O!>yW3wo}bm&{^SFz!TL{4VC|udu;(0WejY5WH9V`l+j@2T|HQR z`XlpAf2Ra&{=6P+JSV;F)cK@={r`8pp7>mJSpV+t5wznc-(dcu2b=DP^+%4v)*H-( z)w{=Fzh58N{c#&$^FtMFXT8pV19i`DvGJk1!}%WAeN$;S{(J;>zo%@zbOY@AnFt%N zS_~_HdfOkrSJwY)1S=O;dp+Y^)&BT%GhpX)JKK>b{b20^uXmjmg*|U-Fs!}%YS@46 z#`7k_K2IO}GjG0wwKx00?!WG2JL5VWHXmLNRv!<6^_xG1t@j)%Q-_cMR^AMQ-7g*u z*1j9-_1H6ahKBc_53A3Ah1CZ?!0wOR47>h+gq5?Qu=8V=%zl%(uzKJG?EDx6>qp#! ztygIU8;{Dx_f`I7fCKgqY`uCSug6~Q2^%lJH-zyezuOzu9=!#t|AxWV`J96T_7d#= z-}SI@=~J-h&31wHKQF?5=Qgl@?KRjuLSwIwn6I7-lMFV$7Kiz3y#2{{e1G?C!q%mn zf{kyafz=CZVD(!j`;+foVE?~vPRa_KpP2}2|Kx(_W=@hP*#5-BzL5F*b7f)WN+Yi) z{_$im-=FwYGuSxC1z7*DCv4v32y8rhH0<|U0;_js+MoPzAJ};Mb{RjT3ha8g0ILtv z*p7XkSb0FaZowe#hyRuu_Pm7tuyLJ2u<^SDuLahGKH!>69{At@`eW~99LMib zKQR>6E*}e9pBE2y{)EDw7k#=v&(AzRWjp@e8rb~7E7RjM*nHy=SpRmYjJ&>Y zf9e@p!p0v8Or)JWOIDeBzJ9Rl>|SryC;rBISif!qtbO?a)}I~&YX@ilp6BO0fMD4A zlo7D;gZE(P_emMOd!`rfiyd(mcAx%9nfkoju=`bZ!RAAQCh@(<%Pfbj%P9#5>Lg(G z?Q+=hFAOV3ufX<;4?CY?PG)`~FQ4~hJg{#%!OH(o*yp+eyAB$|*30FX!uwE9m=*T- zYs1DT62kgzOJKjx2o2J1(kgspFF4eKXug^f4Pf{pJTv7P-j zmto`eyJ79ZiqjbnzWV~1^Z#1G`b!gG$7PXB9sEGp`S=s;I_nO9yCCYzG&A^p_G{FI zjR&TNjpvty^*4vZo-dFQR_|AlN#;X#NW^0B&1{%<7g->)fWfBflB=Y@|)GTYg&UKUn&Oj=}Cb`VKbkw-7cC{3Gml9tx}Pj=`??5wi0I)}HALd!9hb z1^gcNY;~`v9wq~<{3{UAo*!1PW`))76=C;JC5C;D%CP=eY*_mvpUnRCqn-IZ;-8Ow zKKz|Ju=8T3&qsXrVJGG%_w4`&>ONrOvvpfP*5z0@e! z_(f~j{KjTjJGl;Q9myS7eO?;2o}nJkXMVCctbev0c6}FtwO1N14F4VlZD)OUhRyHi zgw^BYVaGK)Y&>fotUml4*3VxJn@`RPtDnz!J#|S*VEykG_D7D#gWV5t-Tv51DPZ;C zdHYBFp7G{OZ?W>_okgrS&fCgpf8HzOTbzn`d`Z~&K_=SWuhj!KpP9zrLtamWt#wzgt&gY) zyH1M0`YrY0$nnrWmGw`O((ZR`WIMn6%<*J?)`pGWgu&|X25{u}Gyl>QHlEjscGp)h zZ2rBH{fVEHvz_tI1zT@Z3U04m)ESJCK{fYOjg|*AVVC%yt!`AyQgY{EJ!{)zd!`eX| zVe=)!VD)Y$*!f@6cI4|nt{>)EPFOwuFRb638a5sjgZ}Q9iV5pK5c{q>KB!j8*3u>0pa%h=hU!TLKTyq-9Cd|0_r z99C|>tW~bV%Ye|aoGJ4Q@x&eP2hR>?h|4Cv=G{j7mb3gFW%$z z*rNkv@c%UDu!RDjBgdLA>VDsgl!q&?b zgtd2)!H)NruyL^$J}=)Z3vB)|rq{FH6T*J~1hD!%Caizo-TRSey48;HVn4tO*mH6I zf|WCQ`ToXN|A4hiW=G72Yq0*$6WI8M?0iVT`Zw=;7uHW-PrLP+-T6KB_hH!i*w^Rd z`yTUt*ik)T_h0=bvyLjm>d!RX&%f_@hx=I{^EDj#y#D*G?Zo+hr(Jt*GHkq9Hts*y z{>bk`u=et9*u39ySpV-jtbef&HedD-HvfAPR{q9Q9%x_dndxy$3JDfwNJ{! zuKT}X^8zvL&-}gvn~%8GhTo??SN8p9!rJqXVD&&Z*tmH#?ytX46;@uvgRN650&Cy< z{qFx%>F|}-T+i=}gFUZv5bXYk9kBIarD5&V&m3?3@#C%de%NL8VfTa0h0Ui8cYM)n zZDGe}53D_&1=bHLxrFz}zM0gL@n(PNK$-kg6`1AHd@-!OnHu)`O|a+HylTPsWS$(g zo&4z^u~n{} z`gg}*>lydk&flX8h1D;oVfPcQg0-Wr!rJi@VdJGwVDneQVEx?pR`9;q)4gTl%dOx* z9u#)G_t}1E-qlZF<;XEue>uMG*b!G`*2~MLTu*+v&<|XXe|8!6J9mN21D=4jGZw?@ zr=xJdpMsq?w_(@oTG)8P6Ii*rXeHM(&tm#}$g9n;@-8}TeCTIbc^U;ao_!FGd>->j z=V9Z;ySbnK(EF>9f5dBd!phIeu=$w-{vP$`%VFaff7qY>X6s?&Gk?R@qnv_~bH^UQ z+HZH^$lp`W#rTou$NzjoyZf&{gpJ2NgI$kFY{!nc1kX+S`K%z=aaaQzf6oXTFPICv zE;HDl^JFH&%I9}s`MCvler|@fw->|up9f+06O4teYdHez2M&a-Pq_gb zuYKK^>#rVf{5Pz96b5@f$T`^as>i_U%k8l8IT()o9{QQZVDm{+Y$xB8#p_wmV`1&> zw6OkmCD`+eqr(Av81{Sq-UxYs{r)3tJ;Kj2_QfF?dutPHy-FO$*ZAxpSpOjlY`o@M z*l{lno3E<}D{p$h=I6e)o%(}SUXOjB3pW1sK&IX^C2V{y=}$2JQvw;e-UfC)#D|Ta zu7=f*L2w{m0~;Sr4Qpp5UH#wveg5>Y_CR5;Cw`S5wqCR&*Q^NZPga7>la+v#%S~YOvBhEa>j2pJ zVj0*t^J>`qT4mUH#>uzZm3z_F(x3gZ-_Wkz_9^Un69r(uV}984;?ls*@3L^@_-H59 zhn;U9({B82to~1RHPb2OHn~5jL-|^Z#Mb8(l%W{&q9k z^$%sgM|0T!Zvy}CdB~sT_kQFviqfv$i~SZCnNj`C-3DAw9I*=RKF=Sp^$vAm<+N>^KjxKlX0Itvo;Vb@gEV$Bx_hUdZ`Ou<|eZj_}{VTc7nzd@TM> z#sfXL58g?=&{wefWeaSat0-)oXC175Dgyid%VF!c@_9Y+$N8{vh|ggCjv27_VG>w* zJWWPk$AbO)Ow(cGKo58xrgiRluy)*SSikZ|*nC>9?Tioh$V1rkVooz2`ZJSwALVyd zn7n@W#jy3?%Nbwi>ou8rm?@0E>p8yTt^VZucz#m>*!4G+@27uJ!*=Q~f?(H8Yug$B zTzqfqmqx()mzkU&)YJ8c%_}@$y=g}eg|%ajz}DN^Uw>g4?71W(Vf~Ofu=;oqto|Pd zJ8$~gpYKr%cD!tN|HBjHhxI;#ZRhv)!rD3hzUMJ5k?|LL!PciQgq`nIVCCx~uP0xT z2R2^56t>(=X8|;tWkPP;{mcp+8N6aVVOOC(idF+CnuYGJM z?y<;r?4M?^@?`>Sd^#A`UmGQ}PHMvXHA7+bwc}wNv>&V;`;_r=e@a)_dZW3p_IWMX z^)m|AA1WWg9bm_^IBb1YVc2*{L0Eg@3HraVAQwM)CIubICIV8X%>A?*819*u(z4xg^|AeOpBPnfP`(*m{Bs*z2Cxo(a}&84g>YmC^p{Ls)+_Ev%io z4*TDHVtiOTp)3A|a`aE12mO};f5v>pav8sH68@3?-UQh1Y&_09N`KgR&%3aGU^7_% zRR7%gZC%*(|+Cd^S1kK9vyIdVS;L#{Ij) z-nTdG`5KL2$D=r`{r&~4zmo%2FUEw8SLTF`lRT~UR)6XtZ^Ev(xwPZ|7F!Klk1^8z z_}hzN=j$NY@4rSSzTF76u5Lf9{ZbA#&#()vC7eGv`@$Bu>VB+`NVCyN;!1@{eVe@eDV8^{5tX~lm)-D?b8^60tK2pE0 z4{YB5G>qQNGXOSza}Ks1r>oZ!|NR+OAGLPYpZ9_F-wMO}JL_QeMh@GF1Gj+nhp$!R_o%nY47>i8!p5P;lh1V> z+^kAFdVDo(z0yVaf9=>q2jD;)i+1Pj4{)G90#;52!^Zzoz{cAfz{-Ir_GiCzaoBm9 z9QOMahCN>_2CV(`vI@+8peK`fU+l40aAbbkf5TwM`7!PKpT}V3#Sz%?s|p)Go#*w8 ze@mJ8c0bs-Z+lq(Wb60g-)|FaJUs?%9wLg*NBzr>6S+TsU-2E-`F{*{yV#~9^Gcxc`67AM|!Pdj9uE_KA-p?6N=UEy! zvOd8$+DEYQnWMC8UnPdk+ckCk(G#&?$D=>2y?VI<Eh=PSe7ogw4+edN$K zSb5kBHjer;Y~4VA*!)%;o=5-rQ`m8RA2tqpdo1ruyeko`fA)CvM?X@7-`0RC9|Krawyf5{o@A7-*Kk~uq)6sBb zeUtk{15H!|J?~&A2JCxUmKtM`#q|` zj$>upBi>he|1}&qznbT>zPB!{JvQC=N&Vb^DA*z>^JmHY4cv@5E^#=HLV z_wYZ0;K=cGo!l=QK0l7|{mkoagpC*O;rqJ(WxDOe3va>B=U-*?Lnh{na<2sUGd^ZL zk$U*7zlXeQ==UHmmzU?WKC=pJ9={lD{b*CaAO2%S*!4Eqn1-)rX<5@zlXyj~x3^X8iiX*26V})z3b!_G<%~_0u2Ljw%dW-!%YMKIez|ZZmts z`ZEvwzSz+{VeQvjuyKG|u=Zp&zMp@8uYrs_*$sO>Y%r|8O@chJp4oQSQ3u#@ZUnm? zcf;=IXlj4zUr)o4_j7;TpRoQ_UD~zBuffWjny~)#->~_es<3+O7OdY>*88zO$9O&R zY_jtgxx1X_b3L9d!~Dm8>MpatN7nyu00;h_4eWO+3j6#E;Xu7U9Qb=Mu;(2|h1C!9 zVCy~)Gk%^A)CP9Fc5!?;UpPJNdaeg+k9`PR@0uAl-f*up-;+2(0oZuvHP~^w!FW3# z=flPgy2IL&m0;t=#boArdf58Wo6JZ3t;gRmKKMsru;cd%R{uYQ^%D}%-}vcaSb6s; ztbaKT*4}yT_1Ndt?9bn$IS*@p(9TJ>Q*5D%*ht8eG_W= zmauhVlVRm)AN%utXTi$tL9q7VV%T*%6xOcT3!5(+4Xd}4@%#GslVIbc?jKi<&43;6 zLa_U@=6F5zq~&1s#9~-`y1wni!+wIb?*_rfbN0iL`+4;TufW>Rn`qar{}*<@(@EHQ zpOE#Zzvq5ltFSUttUR__4q;KydL}i0<66?RVMy%6LucY zg|%ZpVEkP#OX0}xVSeZb+o?Y(OuO=OBdlI&=l3EX{xhuozQuO(H)mn=|gYRL#OBdSx zcL3~s>d=e#A->ZLb{>uC87|Mt!seBJ>A~}mXUh(I&dwegeU$<>UiusCb6w(lJD+aD z<~xqS+W)b+pMK#E*!aaJII>+o<_he5K0v$ndu*OZd%PEHJxh>G{h9JOl9iX)VDmOR zxZeG*$~*H3Z~T4Ymg!*UQ#qfPcz$wNdD*Kw>kWTAp3Hq3cjNm+$OqToo36}n;!w&T z$E?fNULIy2tvciRM8ZyDN&@!((SiHpe|Lb*M_y{j{mEnXf*sceZ5coIcMXTthuL7|$;h`@IXDUSyp*MF{`)@MU-f;fzwz%{uyTD4?b>5CW!A$g zSb4av70*lE#X8u0{90K3vdDJg^S4^^`{ZX<+K!*Gr#asjxv~Y;{!0&A*SA^5o@&*M z=Of;}6}CQdOB3cVad4kke=z!6Y&}fk#{XTfojs!w_eXDSq`&sd{)V(;2X27f@BUqb z|N5J+8w9&<_eJzy5F9>V4#3ve&445CXT3vLIP&{Pvgb$ar@#Jr^0%(nZ%GO(CqlhH z^5BR1F#gXP*!ekHCcg7C?D-#KVC8`F+I(A8Sb2T{)=&Mr9`g-5Ehp>I-_Ha4{Ht7_ zj7R6X;qoyH_LXtW$};=MzJc|7(!=@-Jz?W_uj{ZL@t^m?#yjW2&i{W6`lwC za|G;o6oA$5bF{}^Ev+xz{XGN!OpL>u>Rm;pP%@^0{f$H;xiuZ zpBoQr&y{t2c%LD#@v5P)=Rfs?oo`!V^?w^!JF-Gmo;Sju*1tXsTc7nY?ViUHgYTg| z5d*e<;5_Vk5vTZG>bcB(KjUP({hrh%^oF&kR>AI1JPf;kWg2XrxFYkx`sEI=`@a{# z>eI%s_VH=h{Sf70=WA5vlk=k>tREYV`KLehl=)~pdto8ney`)OezEqodioh0DL;)L z--E5kSWdh1D=E+GI824j$9%*4xF2ifD1vajyJae5a zgpHT1hqd4K!Om~>jd7n{u;;y~kM#34z{Xq0ymh^KPxYPOGZF2^F$cqe`Oo)r{QAK@ zM_=U!c0gBHJ1RQtci9dr$5JsLv_Bf#&iM|(&M(G6)?W1QK`GyAL~thNuk|kFVC8P| zi1}OxHlKCe^@04T1zS&_8@3*(GOT?6f%T>Q`5N}QbHU~}io@=2>%#hVzwBqQ{%LF0 zx8FA<>^k^Ic|kpDa#;U47VY};@59!k6r^2$Aqs50Pfgf-O#-hcuT}*%Z<7o*uM-2- ze@_Gt;Jmio<+z@DvG}m|TPW=F$MJgfdU@D-_g5tuAL6X}VdY2++N~FOSeEg{KYU?3 z_Qg%u_4*L@?_*|yjZ+T>-P?Jnag1F!x>@i=RUA;odU4=!!EG)cfI1=k9BgcIPb^2uLbM( zT!Nji&x>(?;uia0?f7W+CqMs_jGf+%cK3Vq^ZND#EQ``VS)ZsKVBaSW?LKEM+xh-~ z(e8MZfweEf>`#2VBW(P5iHtut4AyTP3#;$P*pB`EF05Q!!Sn0S<|;xv{>fO_{exR! z&x2`8fBp4ou>NmT*nG)0+tE8IVC}V?u<^Rn#kfEDicr}2+a}oj{tDRrohxDG(tOzZ z*afilukB&y*^#f``}Y8E!tN&y zg3ae1hSjrgit_x_OU#G0dlS*F|2h#izF@oY!j>|10MB6U>c+6&_rC4KE2qFd&p}u} zzN_uXgLSa-r@GAg{sq>)%?C4WrmTmx_umxadB{&KgNaM$%|pBSq3_=6Z~UVxto+MJ zyK&vtu=PF3VCx~;!0O#g+|U329Ckk4hJF4FuzvL(*mM6L72$cYt9ROtJ^TvR4{GK0 z=#lSW>-)3A%JD+5>m@C$eVfJWso%O=kmo_KT`o+2?5RItMSju_gbAQfpcwGP{{!$3m54Z~(_dA}C_amNpSf;Kr zJFNWO4;#;W4>m5i19pC{{gU73@3Z{`YyZ6u>#t3L^|zwIe&3d`dTM-L-UqwRcH=Dl zVeR-?u=}xU!PawsXFK<=0jn4K!PeI%lF^@2Ve>NyVdD!EVe7X#e8Kpj7lUEPqk10R z7d>7)Ceq z`^JEccg};|Px>NX_>EfA{C$d8y}WZh!oriLig~qY7;PxfSew zj&iX1jN&r#>?>IN>`Zq0v%e%2tb7fHtzSz7Tc6e$c0bF@Y~k`V4{UzrwoJY=p6#sH zi|_#QBiBA>ywEErVC#RU!0PW%*!rRZu=dhe+gXomva!DLBU{1Bt0}O4V;xw1J3_|4 zsRkQAZ2=p1C<9w>SrzttzLK!>B@V1V@(pbK`e|1B1F;!}<$DvhaS`2iLRme&ppk!0N>-uy$<**!ubXu=D=|*tpmM*zr5_8P`)^ zu>&^Va6B{D^Y=A2+8_J1J*+>q8_v#o`mZzbzQiT>!7PjB|H9To*xz}!16ICog4M4R zVB;U&UwKm&cD^5#sb}buk^bbz-{XB;-&tY(+AOf=FYV62_olw45v+aJ88%MWANKtE zD6nz5HL&yVWO~}EliLj&?_LNS?>%ZecKR+@zy2hwJlbS`BdlCZ2?+sr<0}F4w^NO_gBfU}so+rZwz)lkIF*)m>?hZ=a{K^neryeFJ;+kn@!Ae+4>hnq@z5}@ zXTBGJ%@a*Z@QftCcoNR=KWT{%EO+p=eKr&wKoR9 z#uL7_oxhJZ6?T0jgN?^6fc0PRf6DW+|7s;1m`AYnGOJ+oH49NH&*tkhBtln-2TVE3d>z8(djX!To$#_x^-^c#Q>*wE2z<5yC z^98J&DF7R9st8-(F)n_%{nNvC*4z9KXvd!21e-691FP?D!`53rexLhsURo#YG3|?_ zuy)sZ*!ypZ%Xl!(iLvL*|L23%E1h7+bMbpH@tM}xf6AW@u=;BRY(4*)cUV8zziqJ> zT@TY^^FEw+7ZZC@xi}m)-;fg4ALs}h-zlxVNq)W-?7q1Xu=*|=Y(DuCtX@kEyMOo- z>|g!!xUl*$gG?Ro`dH!fvk2^bF9;iNuBp9DeDX8cdgi*Y-!TR3dTj*jFMI$S&us;( z2V=ml*G{nZ;{BMkV}Fl@jhEemJ)dEkOufZHSbOgZ?0nw{n-6;qJI-BT^<+NmeaGV) z*za2rwr)5q2IJ2>tOdItb{p*96R8V3-iu)4T8&`yOA}zvvup)B-@b$O_dD7j`>-0U zUo;ukzbFW+UslT4GihM!TaI`=`K(y5_Wd*1?;Zzsyk5ilThF6&J#|tk@o$V*KY%@N zxr+V|`Jt%Z5C6R`ti6y9R-d$il^+FcC%@DgHvUxwHV@GYw!U#PtR5W!TQ9s7_WREC zdcOb9u<~pPY(3Afuz!zC|I6=n8rDxfV1MS(Z?N@Rm)~ORqyB~cj%U3eaoOnjbDme^ z{fuXS0K4DC-&byD(EmeTEP>T;`C#)Dyth@2`^&~V_QK|A ze)M|m%r~(5XuQn)OV97QKV&HE`e^6#VSjw<_1OR4!>)&xu=dX?SpD1-Rv&M*o#$?B zf9kuEGd{*6?eG1+gq3fBcJ`BZcRX1yC203O#>3j5nPKgxm9TMyH_jK<*;-h;<{#Mc z4289aFTv`c^)mA56s$eJ8g~EDLD+b~0@!-X^|0~&!1^Pfv=CMv)uvs!Iu$k^d!6?& zAKMl-9(K_8BOg@o0PCSZ_EU*1mmAyLp4*u=aHqeowis zJW;NUfwd1yz~=LI!JdEjrHmeb5BcNoWrH2hGO*_lB!}H!T_3i-DgmtB*8w)(8w<8x zXEtp9^sw@c{d((Q6k{oE?H%dQKWQf< zq20Ji6xw}GzrXdCxnS+PR*J<8j=U1ty|6Omsc0d%KAN^6(`=QU5zX=NL@AG-IKL*0aSJJ@BgEy~(0>5MY zMt|u9tlgKw{^+HTVf~>uys!JQ_Ph!T#NGaZjei|@85GFpZjBg^t>Y{3)VlMd!P49Iizea z<4rtYAy|K;3~U|J2e9K&7B;^$>t0a6e)<&FUTJxo@nikp(;h`m&?pzyG4KMX1;}u-!}c5@k3wV!oJtuZg7+F#-1Dm zJ734$fLVXWbBqTjhK(aW#lO+Ne14thMgGSp6CbGmoRcyvFy!FC74DKaGW* z2i0KfXFj%_`{?f(Cu?*y=zsp9<6Gtm-w*$KD*mGSFE(r)-@wm$)DMDNDfSJ7&PvcjdC3^;;M={+bb1pRV~U{P`2Y=C=;Q%E{;W(~i#= zSby^&Y&@vPMZOpI>~Yw9bw*fyY&^j8aTfeRfBg8LVD0}&@bTecueZUxm!UJkR)I@6Lghd);95 z`*zrPc66D1{|Z=tXvew#`a927!`7qCqTT$=T-bQWZ2RMHjDwBmcz^xP0kHn>0$6?C z9`-vdgVi_HVCCVDUQc}V9oX;h`&chj_3VHBwU_#y;ePDrTJHUj?-x$P*pDl{o_e)X zC&TB1<7<5R3)uMV3Gau!+T{fIqaODXtp2+d#`EJ>--SItDjICvSuE!R@uN1s(x3BC zKZKQAO<>n~9N4(u+GC7A`zwpU#!Fit<@t$+wDo$%r@}AHH|l&tY$u*u<_PyAe!N*` zKY0B^;n#o2{5F3d^&t1h-meZ@kMiIE<4L`X@d^FK60mZ08f<*z$UYc*DIV)j`(iun zcT5TUd_!Q*Q^)`tpXv&mCrbr8z8CiJzU;4R11k?(!{(Lx*pA(l1or)h!Nzl5?&ki~ zXH9@FQg?k8*3Oz{JHOu+Hvecm*!fuqw*Gwsteuz;)<4+?E3fkJ;(4g2z5zQw|J=#= zvF;wp_!~7~pF1Y<$oTk%?R-!2sF`8oJ0rJ+KYu~k@te3cTwc_Kt;cV<1^I@()DiZ5 zpKNA5V5c|mdd|Z*1G_%!!P@idVCSip|I!kbb_roPa|aV3)x}U z;is_Q|7+W^BOcA=emr+Y*zbHEHeOT?Hom$a_C00oiRG~VkNx%ETEfbOrm*p&3h=kg z?*_2*|KM!;BVQA0kFoxSz}lxRVDsUPVCCRw*!5K%)_qr)$>G~S>2 z`UDP~hX^a5Khl22o~Z}x|7dS(Kc%xj&rt;S`Abb@J`txeziR(%u=1)8Z2iZP$;?0E zn-^i{dpX$p(RcAzT>r^n?cId1_VUw7ybt;!6|8-<5jGx^2{!+{9Ok>_$>sIP!zr-& z_X4nTwx9ixPeozR^UVoc$5d7(PxKLN9KABEee~pe-Y+5^W*%}Q?0JPPYwQOWDctLGob z{jtY;cs+mreb`ujAOC+gte$HQ>%Yv0l_LdY;y5c{zk6y}dtt5Zrd_%iBs_oc0F=_WXAMtzI;ct*H=GS+-9?6Ftfz=bkVdqH*OrA1& zFZ&bk8xFglrZen%90(gjHsml=*6pU8NJa?z}Q(PZ)i$6(`E>zF^r%QwQt9b~`XQrP)Ck@=?m@l{0sp)$`? z%=ST*59Wrgk4gf&zMa3;8$LxJdLCRi*z*=cVDpOQ;Xs^9eaZT|h&=Ik@1p;)1Ni$w z+#fx@4>m7957us72W$Tff{n)-kMaIZVf}~^u=BAjtUuBl_C0dI#sk~I+A|MXfBGYp zVg1&Iu=R*ZVe<(e!KsGa$`K3J4)}}ptbO(Zd&zSo=EKUz`>^_~F>L+g4cm!-u2DWP zZl_`Qt9FL%D4 z7~@6W^O?;4-*iL6=ktd?KlcB^AuxV>%82KG_#O9uUhkKju+LQ#Hm_Xd&HOHk)P218r_@u#J-^7 zu>RlsGUqw9fQ?(c*NgiR&t3sL9x-}yJ@rNZ%H)s6bm#uqgSDw=G%xjDH<)C#-+IRlfCAXi$2-RQN#b6GI)&?tW3chjw6OaBG;IE+LC5gemmZmcxP$qW%a9zwB`QTdvjpNzg!#U2l=flu=1})YwpM2n@dN1u5pg~ zt#}{edW~S$*MgQX`Pq@y^YUJETd)rYm?*2Bv4IkN*UXctJ%GtbFFH?e*c89`M#O3_D)D++R1y)fql+8uzo}p z*tl#B*m$k?)7~y4Q@>Lf*3T{i8%N4-JMo)5uzp!y*!X>N*!Y^y@Avt!5%YokhwrO= zivnwpWP+^&?bVR`k@x-twr-<59Qpm-znZ%N?f5%)nLp+mI|ege{Qa?$u{ZcYPt3_SfQm_z%Zn_m2d_%7@*s^vC+oZ(`PUTI|6U1O-=7TjeEUVPcJ%!k^d}xV4-V}2gN^^WzKtu5 zg?-Lh_D6oyfR$4-VdLp7s`I>@zdT#!e7}FIaXt542zx%_6^N>&ReO&)LsxTj@Z{Kb^=Zf90%=_RU?)Q59-8HcJn?o}9`>PW7 zLtdTodhCDj&ny}}E?1q((H+g>7dHUjf58{8PVC!XP!{*O-!`i=z_&xpgS+Moh(_!NtL*dBh zcR!iq<-DxS`$WoP_0a*}m%5v;Xg43%{N!!0-y;s|OaCMkb{$1w{du0qYWrJf z?Rw5y0I>&LMAaT2V564UX(FTDYqUq}FJ2jxTlX=i-`yRUyB z9H<+H_1})bbBV(gis0SIPsgu4Y+R+Q@|^XS*ze0c>505I{^@$uuZxR*h?K{+kCLg6 zu0XrrEp20-AOEwujJ^+r%}3XO)rTq3caEp>-8|wl*uS4u30D7IfVH2zpYwfoQ|^bK zqkQ*#oC9#=ekJX%_nYzjvQld(4%V~-iXzUuekxw>G#x&N!E-;a1<66`zuE$vtB zp^UJ8eLPrseHZrcFKPeI-J2?VbnHXtL+=QCxM>&KIiKW9>}mJ^U53?HAH(X=vfX%o z;*YDb$F1*7+dW*Kbccy+A5Yzr_kX=+#(nI6{gk$_@sabee)mAw_~SlU|NDgP*g>mo zM{lO@#r&XtJ_I(tGa7bWyTJd))LF-ARs0Q8|&jXZLx3>py&sGxzSjb7#Ii^H`6%QeKjO-?y;wh}6Bpzt_jG z@taw93MJP(C^|0nuC)CI4C&2Lmt(SQQD_>^7zIQtO7ybKQUXOiJ z9JYSvL)iFX8Q6N$e6abw^04*PX=L|R?$Lo3C+_3S?Fymw77xKbB-+4Kr-ThYk zVC6|x+Ku0=hSkd{yqUYe8wUnD z^*{*hd`n?}^wlKTcwk)E^*93#*4JoX?S=J2F5w@CpZ~)8CLZ-OY@J3-p3n8uE22FW ztbA?_2ggtSUDf{ByKC%^{d8G>9(!Q3{nI6gau#+zZ->=GNA%~BAKPH{?>_ru7p;WN zpP8>wf5`54S}h082jYE@)649SJlI6L{?h{3dgh(p4?ofTkMDaB_B_nlwv+EW2Ya5$ zOxqdn`1rTR7stceBUxeXg;8+8|AgI7I26{NNs0fheopB1*rQKj^~s;aKa3+~A)ev< zn+RL4FdeqOuQP1EvkmbO{k43s_TdXyzcCuD{Wz&V@`rP4P7~j8oNmL`6YnzK!*@78 z0OmZ^39xbJDFd0GoD19yHl8^Nwhp5Oto_?>5aUO@^F!D;@&?%TSO`|m?Su7~-?u;Z z^C9~a-QBRoAcFr$Z2eoY*m$h_ z^Rx?g!}`0sVb}X%Sot6;9}^5`z2J{LvmH4W`)8Ou%@NrAW}gw^^5$>Y{NQWY-`AZ5 ztN)|?!uKWaJQX(1+Yr_c4VCf3F2m+=mch=`*Rb)~4RA1CY5e*)Z2Y*xui@+E5*+^f z=iHAt(p%X5A(=)p-)4-Nn1b;zUhygH`pOAguf7y^el~(#Z+Bt!{wm>$z8QD@Lna<%{7`%KFZ)w}@-BQJY-jXUu=^cy!P;vFVB>u)V1Lj4CT!kq z9jyIw&+D;=Zo>NGZ(!@@askK>Q#`&$1Fn=eWS8_)XyHh!E7cHAq&%J&Mki_WZoXwo~7+2KK!#z@E>hya~p?9k0Exd8BD$cz)va_hIWH zF2d$7V<69bp25HIyu^WxZ|jfWfb~1yld&fX{m%W#my}fg5npHp2jk=V4O8U^`KY~9 zI)wR(eOHtI`VEI*>q)D_=0kIiWxUW^Rbcy%AIJN!eUC-c3Dw{LcSU{95w!t<~nslAN-m}@Hik;Cy|;}h>sqy7DE zduKTxv6o{`=lQUYA2Z*TzfENNxn8sBu)#oza*tf~y!0++E+C{IaXX5`C`_y*oSf0t8NB%YJek}J-XYGUu=?eR?Z}gSe}<3$eAs%4-mrdaC~W*b=X{tlpKO!TqP&6>Z@_{R{PF#$#^HV|l~mk6#r2pOn_%sY!?6DS23UJwJZwC4J*<2y4J%Jq!RCST$i!R!ww-)N zIye;lya+bF6(9DzqB#-mZ&{DVi>BI6{O~Dkejx^jK;>mL_^wRh6O`neUo9)Bqb?E5u>ozL-L z?WV77$DfG}D-R~Z#uHxULB5c$y$-8K9>UsVF&SUav%Uuh>iuAUU+gig9{(C<9__yY zE9d&b)?Htg`FAj^K0FUAmxs&9fs?TN6Gp)L2m4^-XUZq{DTKm)_nolyai74(hmOF` zlU%UzqkFLTdj~d;^%S-q^j>bBpZwun*f`O7SbO6ptiIg_>wld0dh&4#W#rmXSbyjb z`xED|{*AcG2P0wELr2)Un=fVb!RNMPKQypE<5t0T>Qg?3t$)Y@vrcwZggr0mnB!0V zRw4TnFZcUsPnUvy?n2B5_21{PeyIyj{k;~S$Nc)ZNNn9i zPgwh}8SUD?pUdQPyY*{P z@w+Rqa<3n(Uv>%(?4!4xIOD(eCvTA*Hh!|mcIu&TppOIV)9caK4Pk$epcd@9D+L>m zC}V%@t;*_i;v1uLhJU{zu=&ivu=@*Q!^*+dGV

;dIT8vEnVkA&45abWGouCUMl z@;$}}Ibl7W=ZW8et>23dTfcDtHtup5dqcZyBW(Pm0<51t4z{i#Bdq=yZhyw(E%u7} z#@?{`iY>7AV>6lO8v+}DD+yaa)dg1X=d&F@r-ID;#DUEh-M}8w&t9E_=SS}R0h_P* z9@gF;3TvO&gVkqgVC{>|*~8yIChR`YzhL)2-NW8;o=t;2ujg-Ad!aw9f8HGS`Rc&h zh4t*u`ltr`{JCN47jwbN(U;hJ>g_1@C!hYG_8{jS+{hL_zK3DYcUlaa57`TAfBp>n zT@S$egI(>(Q3W#Az49&c9x;@sydc_Dd(&c=T}C_Nt) z_*kbl0(Sp-CC3l>KMA&eAr?Ze%1IxKUn{un(GIDZ6B(jsA84mbMtZ(h#b+G>7SlBv?{W5mlM%O>z_ZsZF?TmaeKhzxgVfF;OMgUyG3t$ao9e*v3coeX>a^Y^fFd?M`qe}!Eimtp1iY}omk4f*f z#d@T7(Qo<(3E-gqv(7a+9FS+~Kl1&lV9#6H1e^Cr11lFF!j5lh8GoiI`q=X#z256~ z$=E+pVB;U}s?YJ4UhzEIjx#b%lDEf1T&ItVKl0`>v%_4~H-9b0%k^|NnaF>K!V4>+)1VEV z*nj4?%i_lx40_Gdn(f!zODC!c16y>?*9?m(OUx(EbKf>yfH?Vmx{aN!%+3-J=hdupXeDCS7^Z7AsedsG#e?B4pzWKzruzx?9 z%zB81zwCT!0y~bLzvO%y2J7c_k+FB@$$ZxdGIH?@tUTTiYfr>A9zmS-A*@~TF|6O2 zmUx7D$!}rv1Htou=D^mue?q(GDQ&SodayC9|91&?oqP#9uIY(~7#HgXE3coyo*!`m z_V4Fkz~9Fod1E_r@-x^vm{i11)LXy7+WX~T=k)~G?|~8wdm^a^Lx%;j34J= zdOn)EMh%S2X_!Q9@|W|p49&Azt|F) z$tUGn{J;Lr?<27J_#O7g->R~N^+G<-`#T>N!hSFRzWIoCu=ct8^^Bi)T*~uOAGZ`X z{yJnC^PTq^Wjp!!$A5D_$-~f!#|D)7>SHg$<_8MH zzVAI)xwm=^%>J5fu=e(Xwaf?n!l~``4qKa zlselA3NYZ*!;J@r)t7NM z!o+LtvK~C&C^f8}e*_zEDFhooib{X=aaCCRFE*@w)ztp{7ti~n{|Cd)zxc40$MK>n*$w@juV|QlB@2t#6v@^WblHfz3}%gw6j9g*{(%ENmQX ztk?70^I+|bRj~HaOxXIWrLcKr_sgm$+id0gVW)lzdw%3^u=a6F*!SB3tIs|E&F8uS z2lsPp?`GV_^~4AAdq3{`F6?0;+Kr2&_IU4!ce~|IPedP1yRht@cN6w1d^F zn`Qj-j&SgNxWKtru=PXk54WDW4{ZIf`>CDZ?)O!Hx!>CST!{S{zsa!n>T*~+bDaH= zBU@nOuQGP|%N?-4M>7F-f5SdFxWC)^bfpOa@+olgYi-AkaXLb zUnsyoWa8ANVfO>MKiv6O8FpT}pWJo#1+2X^3bsC=z1OpDM!?F^i?IH9S6Ka#c?aX^ z{$<#BNNL!4+XB}A`vlg0`qKX7g_^CfoalD@PXZ@_B-MC;knfbrf>(P%tz{cGV!Om;# z4f8ekVfSkchkgHNu=&VQGUr9VhK&zQgpH47+707>AB5HS@4=4uaoGCkLa_OYbFhA3 zIoNuyE3o!VP5a}Y{RivMH-Oc5vhuK%{W)J!`$RoE77pIe=gqi>=f_^#L%Z>ci!gT3 zt`)HRMGNeO$uBMNe#H0sz}|N{Y`sC|*^%Gub zUvoZ3#Yoqi?`sA7y<>Pk?B=A0!sX+A`uq1X!}{yDVe9$xz{WMM!0sQ*3+oqNkQuMs z_Gdr)Sy*|U9d?|JUz>-{0J{#4!RqN0u=@On?c^`x+D^RcFsvOAB~rWg#v`6r|L!>L z?&t63^AmTz==I#UQzTXnwTF!-`FzH)+t`kOc@EaT=?eQC0e^#dS`7Z4_VPB`m7mdN zQ^+y`P#sfaJ9sSZAcKk}g=C8hijVoq> zjra6|U7tzpPyBZ{?0Pc3puU)5f9ee%=zkO6S`G)})#}%?u=?vD?b>@EGakly7sBR$ z*TPY#e;B5}&U!in8-Mx{HvWALw*I>>to$Bzobe?;(iwK2;!N25RTo&jf5>*^_7AY} zfH)`ke$;dIl#$QnVe2ag!0NGXu<@6Hu=RJNV4qXg{+|JBH~Kv0Q0t7i_uuK#af^K}aVu=SP9NC#&PwB9_y;{@>W%)iKlb(a_UF8k@8ID3E7vN(%F`cd zH{P9EW_=8UtxJmnn~xm{n@@X4yiR}M57_;8=Vd-z@{_DT?B9XL^N6EXgPrd^VCQW^ z*!WmCSiij!Y`mwt*E2uY!OHtS_UC+%!}iDCl+DMyh23xD{j68{_7vkoytNH%ys0N_ zJw;3VV?Pdt^+Ug~op}7Ou<^#vVC|F;*za8r){mS3`<*|AwQt75)*V%_Kl8XBti6#M zRvvr@Yv+X#A9a4Uv_JdMFT>^&8pGN%=WIuw)U-eQ*H6R7r%J=hr&AH_1z_{8vT^&& zu=e~J*!uS;yube1d04-68SMPH1^ZmhVdFvfVdvdTzK`<#h0J+xhhgo9s64-R*eck% zp4h(c`nqGM!_KoLz90MG+QQ1=La_E-b6ENR0c`%DHtc(qfVHb#U*=Ux*`Mbr3@iW4 zzgU0t9<2XofAv>JuP1I+6t>>>rR$mfD;Z(+=Y81tY)bp{UbkJ}_-{U+^65CN9=gE$ zDNpvo=0grS9_Wc3_NU%qEv)?@E3X#7+Hree&xe}`2k&P-eHa|D2WU5*)6M?Kx5u#i z8`{G9mq~qJ&PS^ctJlAPolj+8?Vv|8a{C4GcKxY@d>{4rLs);m5^Nme2JCyc@q6N5 z+=TruySXs6NkTVJ@n5B5BZ?6CC&FJ$5bCQ}bHd8s3b5dpX<4_c-6`z~-%Ing79m`T^D-3xTco7zVo5Edy)s7Pp=FQ484Ty2STYUi9;N z^vzb-?`u9*J+KnCu4=UXndcp0<&pVD_26#joB4F+oB0>@opQ2}jGx*9_WY8Pu=4Z^ z*my}`exnavU#?HrgZg#=Y`o-8SU+MItp3~vtG9oG{r-n#@~^gAKXl9M$;S_ZwJ%=5 z#(TV<^F12tTY1(6)}CN z7|}k&{?soiuLAJ|Sh?W)d4ARxwqpmp<^7e{HDTq7?0PQ>dtOge`uqNAVB-sMVdFx5W$O#;z}64hUwf*t{dte%uIafR#TNVZQT$Y>q$j;w-Fvn2!0Ryxj(Ck5+)yYg=LS1?m&mjqLa9Arl|l z4x9HH0~^oU1N)sP!Oq+LuiB^{X$z<~LWvp7(eY_MD^5u<;Dv&$!cG zSU)p8?{9wokj(xx>rsqH9koC81l3{lkFxb!pTg=buMg(egYv=o)RlJkM{K3t`VsAG z^9?J!A9h(!*tkhD*!LI>`#i@PKmEb+_QyWh;QY1z0anlc1Zzjj&cBL|xB5+{o;n|F zetHocwEx|YunSiIYJWPvPQ%LAFux~$)pb~V>?CZx#&cMIbU$ofJ|_MKdF28-yq>s0 zQvDJ1(|X&HKY8H5IhwHFv8aqbbH3X)|m;S8l-S?;pTkl&#j;I%Kzd;4q@z&q7F0K^pdBN>r&0AeA=nuVKf35N!U-dQAQE@vz_bQ`k7cf3Wwj3oB1w!`c^Z?N59z z8RKcZy*sSDbG(g5n2$FOJ_)uS%=tNhvt8(%*OD@@denCOt!%LIhh4CKPC?lEj6~Rr#!KtS_~jd6 z<98!q{ppn0pMm@sZ2fO;SUYGg?0P)~>yPZVKla>tSbJy%?0aX#-nD*ntNp1T$qfhN zpZ=Y{VfUZCrrq;udt*PNH`^8<9&DUqI;_4)1-rlY&BgHd>j9f@K6aVwkq1v?_RFTb z%J)MqnGf;bX4k^+A8LLC|FQLT?uS2Oe$2YRJFs%udI9xm{{I*+{ASNr_I(?{#?Lat z)}I`M)mQ$$ll6bEZ-hV3T-bb8&RblMeP#Yry*?M#Z_5iC4<2$m{Cf<5t#>^MDnJ1Y)ja@$fxA1%}*bPt>^d|wm!2-81GA+!U5QI(-#ir z+s*gSg&mhOw5tz}z)>0`+>7Z<$G`|@ug_P@9u7sKW=yTHcFn?9f&eY+fXfA3gWd9z(czwL#!&kn=Z z8>D*}zJ4CU)<1QEt+R>CejW33f56I9_xBlJ*bFQG^1<5U_h9oO#bM=3sYi?l_1>Sr z+E*=Q?1Qhoo_yS&aNxXQ*!>|pVg1>^VE0=`d&1u*UYpMSM8vns!|Ic~GWzU>O#PDQ ziK-_uJY_zU|LYEWUe5@bczlS=e9Q5S@5wq|11qn`!=7t&0(KrPg_YlDVfEbc=Uh)d zD>?g}jAuuG$^5`Cs|%~ItGo(d-|k1#A3O$|-~9_#e++)j^J5?FhJC+eZ+Tw6m;1|< zBhO&%-mI|a_4VXDee0p}!NyO=aNfT5e<@f$bw20u>ra-qo%P<(^ZUuic>X#U-BJ^6Mk#y@Yv`X`=OX8lG?e!s`@FA3`pWq^bG6ODfrf`juz`LNCYtml86 zZ=9F&Bp&0*`I;}C@BCi(cz!R){i@?&$M-Sp_&0&6BM&*7fcA*>X8tRGLdF;W@DS@y zz52P#`Q%$*<->Z|di5EuPvXgcz{WoY!q#`blF=6}VCP#ce(%Whl|Hl`f9N!9zTjP0 zIhiGK`0tg(h0PDNhdsaO1@c3GZi(&W74E?LOBwjRCF_mO!rIBjVDT;vRAN{V4sD z(XjUV9oTv3dEwe^DN}?$&rI9d?-*Mqp7O}+@z4CdD!*SI#|QZ{7xp}rqOf%&V^T9; zuvf~#+8Y~W=4S=i_|u1J{?~4u%y!syT#M1i% zG3<0bWB_3CEZ@k1NI)~5u1 zpACEKtMrT~@@h8i>g%sz{ddR9@AeaHy@2EIcbXt0S4YC;kKVxMPkPxO|D-?$zCZCE z&v&=(Zwzd`4#C-gJ@gg7r)K?SMp*lM2yDDHC9Gat1DoH8CKDey4eM_|KtHPI9@`&z z^aytUeC$j-5B{a~Smvh+!@>G1<#rv|c>G@4okzdH#;@1I`k&Wezn{OiX}wG0%rN`! zN5c9oMPdD+zP2;}4#Vad3c%)b!|ab7&S8Jn&r{fO$Zmhm7l@LD@x|WG25Y}&i^S^t zrLf=U1onjY&6kz?5r?!M$@lFC`+IWMA8DT+gRR$mA69-;$;SAQPkoMkqkTI~CO*Cs zHf~-tJMx)2fp1{teFa$mz9sDU911(`AHdo-Z(-NdE9@)%p!hj>ALK<$+Wq~OUtsh8 z`)D_w7~?(0oA;gtTW47w4%TxR&-)ct|61?h^=o0Cb6gku^Y?eypZu=-?HsR7Ik_MH zMj6=k^=~fv^Z&ELo}W-YH{X+WSOj+bLSgqOl(U`v*4N-Ksb48Cvu>m0;e9#(LN@P_ z2loAPM{reGJr<~UAYa-H4%YX&F3-y3lQP<$`0oW+zo!_i{qkL2o(FmUtxR3u0oeIt zJ+1yr^?ZCk?&E$%?TJA$=K<`3)uTVd$}{&r8aF;~JMo>pu=YxY{EQFw+6mjK&uIlK zXYRnt+n%s-ym)?3=GPcl`z4c1Ty{2Wyw&<3>jl?&J^HvP?7TY-2i7NS+~9@RV;@w5 z(Th`(6yWc39%Mt<_sa{b=d5qipREQPuV@Err}co9cb#F!dp>Nwd9dx|?RLS&Dg6Bo z<2#pO$7{Cj$iD}$`R2K>`Tzq;}Wy7TEm<%V~GMT!F0@ z4*WhTap}hI^Y=M#Ww+OpH~bS;UmlF;pR^EvFQT4Kd*vqV{a(^;JR>o`kL&)fRIESk zg*&kNE(@&u%vOZIhku$KwqCA3?0n7v>)$5k_k^|Qvw1!8`a{@y6o22k-W-G%C*X zaz0NXnD1V!u}mJ&dTH0ea@c%JZJ9W0%o6-P>@k18#{7@J*X(@r_iR{pa{r#NU2lz#EVDkn39=7uMFWC5gcNlZ?tJyOCO$cm!Wgl$5WfSbWXaQ?K z?uV^s9SNI{Ev|h@JaZB3d8_?l@4pj{GP>`it=gOPI{-mE*%QF7x_X@D_?v${0XhqokUN+cyJp?u$QON$-jSFGpqGe$H-~C>XJ}quL zb%~xwZ2qhq96bNn^Q+3j#`jWVe=Bb)dOdo_^K9HNQ6KhvovN_*Rb$wBT^sg$G==@% z?O@LQ|gxwEu0(QS;S=e=W1~%V4MUMD;uAi;2{)>NKIdKy<&*Sr1@0f-2 zrnFNYz{b_O!`c}y?a%(3bewnPd<-!9yAoXc;ONP(Y1i%>0Q;T^d4J>euOr&i!0OxF zp67*KT?Y1hu8YLRC$7Tg&#KaH{YnzgA7ee&^nUF3tP1PzH-@zzR>S7YzOz4Zu>zcD zrXBP%to&~Pn-7@*yPstyY`kVSZ2UVJ=c6gl)_FbSn;O=h-2|IY$q8Fubli60;f-MB z!*$r-R~ibdA79Airx)3u_;khcydUSOOoyHKLt*Q45^-Le_Vi{s;{0FbchwJB@6@Hm z;5<0bA5H*!K5tUk@ooq^zuUvc*Gs|X$2-F22fNFR_bS-&7zewq7s1-;|G@rk$sSm{ zye^mBC*dg3U+)h*&n&(dAsH-uEFZ9C9wM^#>3X{9*3P*OJRS1 z`vR;#kg_u4OI~FWtp2D2Ye#N}oxig7^M(^&4;~$wNG+Yqd(_cX01qn-g`3ae69rR zM-Hsccu=p^6IOoosSz&!|Ae*w^L)hjCSUvz_PncNu=@K2Y`pLzSb6yh_V;X>!=C^2 z26p|ng{>cb3Tr<``Iz@5zOWB=zJCL&PqI{EJcvIfsTIB+io*JTd12!q`CzZl4{P7W zmYL@j?9YClII!{Z&am^+cH_cBV844@*ykK?JN14EVeN+rFnQnC1!3*W$*^@_rESOG zoDO?_;)k&N4d%dp&nmEa=gqM8XAM~YyJBtbN4-;P+p*8j)aCi{&wqw}@3WtVum5qd z_2ymbg@3PUu=(NdVC~HguyL?5^~2x)JJ>kM3E1rU3+$lH*=NB*Z5tRJ}$F4!tfw7#%$gm)W-kM983c-Z%_e(eZYy*wAzU-~Vgf6~VP zWA$NH*!5+9?f;Kp_e1{zdp_$>ICwwfV|QWmSAp-r-)Z(4f1mhqd-@yKUJkq7zlZhT zw!p#nHQqL;3F`&9Fo|~KrNcj`KlbNL*u2r8rmTPLu{p5zZslfN&p0fAm7l4;V0?)0 zEQYmTZo}?hT@LH-^lT1eZ!dza+x!vMu9^j#pWN})|5*Ea7py(>C++ShKhT1|haQ_3 zas3%seK;9*zQ=0G^E1ze!RDJjg8eT2VfER1SUu(UaX#c|^}lxOI?KY^!y#UeebyRQ zzB(S(Yqo;T%gu(hC$_@cWee?3e)0jVe`>qm=dsN15BYtqvq{#P=jS~8pJ3za^CPk2 zcpFy!j;7uE=uB8-FPVYtK3#eV@KwAMyS5hZe)eL!D2? z53999o|4~jKIrEyfStFUVE--Np7ABW_zmoP_k-2Ponifm+pzvZS6F|2YzNx$<9$Eh z_bhDOeH_2k8+(EZ~UgOY-gf3ht+3o;PUM{7Mu>dU!yJTyo=w3=VSdehmAj` zfP>?`iSrmTz}l0JukZIB92{@!3yMeTuRmN0wq7Feyv&b2UQfSPo+HW{jQf` z^T!Ect?=Eb|P^H4Uwmf3dpi!FuCXWZd>^-M8XduJc){7MGvKW>%r2Xi4R!{`w)?@lRfA|FZt}!^ST?kKXeSh;UadT=zZ|PK-;21bzc-;BPy{v}_7s1_c{~ty{{AD6dwXC3Y&?4< z?0(LgeRv<_+XPs9btoMAN7f0$VEwtHuytf#z}l}bVDkZeKku6UXEd3Sp|{t|y%{n7$9o_z{79#xF>>wfOvVCC8o*Ejo) zde}~$`8zo9`!KNQfP4U(|0xcuub(2ntS?Ot8&5t1>z~B*dhFjnlyB(UEBOD!HDXkP ztvi_on=g+KD+dPHj$L*2d#-2Rw1v$#gu>S2mxGOqO|hMImk(AR{Q?^gP75m+dcvN| z_m=pE_HSnyJ#!m&JZizN%gM0y3sr5${^<+*eX79PXFXv3lH9O%{rj-?Q+C+ATq;<5 zBO9zA6b05lNdl{fE*g))9(;?uH{NmxHeSC8c0R9wtp{EQyWanXea>;P^<6Vy=fz;y zc*l7ABX@hi<~Jt7=A*j9)_0D8wYST`<_W)sjYk)TtuOl$c7Ja3`YUhFz}APPfUP&l%iq(E z$_pE}sN&zJp1vfk{a6(?Ue^HjecQpxH=o~nh~=>L4HIPQgZ9D3N9Mqe=NVYNvj{ex zb>DXOH?D=P5bTjkkA!&6AFV&0qWk`@J5+&d+(UdNL*R)416t*!Wmh8Gm?>?a12_u=DI4)`R)T zX0YqxTUfa@LneQ}3pU<<7B*h=64riv0c&TZWPK{H60rW92RUHt#WKLo$8xayaq`&C z{B92$e=ZFhPa6sQJ*&&)OK;2MgId7G7rsGWSikA-by;uR88-jq?|CWrzlD|KLtx`u zPn9qH-sn8o@t=bHu|8%uY<%P*?0#G2k@2H!>QD5+4LGQOl?U;VXZpQ6Y1ht809!8+ zA9-m0Dlu#va2l-t5DRv`=R)3U@A>;4>bu&o@y)2TTi^OUY`*gy??*k@MEfHj{Jjt3 z3mMP{#$zkO+S?Uj=T}wO`n5)|=i|uwXA@xUVt;SUe893u?Zz`Rqu(6A^0fQ+m&3}b zJh1Vtr!w_7*0$L{nf|o14*h)>{lJy5 z^|8-+fA#e(*!g)5Hh+#?`94&wgAS^YgY?bTYa`5k}1Mf-9D>^kv#8gDE+nEvd~iUu2B zPBnz@d1c_6hxAu3Z->p-U5E8=G7Sr#|Nj1q`RWd^dSEN;eUHNdc?BSd58aRemk!_!rDLWY1jWA0jr;z!^;0j_Q#I+40ith zX@B%)9awu~HLU+y8P*Tp4hQwA`@4?9e)r6@>vx`k^#`A_z8(Kdu;1Nyx^gWHcK?*} zL-`Q*XU3npZ`pXrE83MOGhxU70j&M<7&f1K4c31s#NSiiZG^4A`xG{wHWUu7clA?d z`}6zN187%I?qIykA8J1sANmwF@8Ej3E-n_V{j~=6_eyf}J*>C&_xbdP{~pTq)DxVB z-9Ip!@2h=!54PSfhTorhhS#uvuPXCH`#TZy!}#I==8u2J-{a99YzONf`Fls|zdW`x z&U?K-cGU~!r|Wqo?Ea0Vuz8vVu=4qHSa}zCe)L@wSi4}Me;@rakNK~Cv=KHAP#5+* z{PyS)OSr27C!~O3=Tz}Z($`{wkbJnkNB?D}psW$9)D+zo5C#-+- zCzb3^9CQu>Np8 z*!kw~DS95mJ8&?c;yT@ny!1Qy`*!ASXTk0pGGAgmt01ht3H+WOcI`vtwe|bW@n@_j z{RdXRkATgeER@;*zd(P7dg|G*e$E@)@pqN??w2Z}Kc+ncyZ-CK)+^VQS>HWi_gCbA z9rs`L*ND&VQQr_>SPmPnnWufuJlYF;9_?7$S*Mp^>*Gel#ueh|KVm=s2CIK_!1_ZA zVC|clu=|_-f{n|yv_J9US+Mq4C)oV*U|9RT5A1&WfWLq~`3bhpz7*|#m&uXXe8K@( z|D_u3#urlIA8SW-gI#AW;GjR^`B#6z!RJx_EwPm?T7+qi5VIQad|&liw6*KU&Y0sprgtUa_A z_I!`5wo|XM5BB?KhOPfRAL)L6p9gT{cybKpV=$g>{dz&zeG|sl)!$`c>pF~22K)n9 z{hb2VKhFaH$T_e#8DIT?!mxVv3>=*Q+CwK{>kfv}?z&wM8?T-UyDpc))~m0FU4M&V z{p;(n@tC<@PrZ2}*1zXFguuQ}CfM^F`g=Y0X(;UeQRj>M*e=8ROU+@Q^AW7SSi^Sn z3dkS*w<5MPp1F`$>eI}y_Rs)WdnzGpz3xod_)8+0cx*Q0pXVWWwLfyX3LMN=Ie)%^ zwZl5n9%S`jTUdSllKXi+bpqJ=^bFQ+G``||RlfTB!OvMwu7_5z_RJI47xlrv!^W3h z!uqfKBei4ag}kpkXTRxP+RfiJR^H=R-m^b;(-Bzx6Pv$hKDi6}!}%WtFAAKkQBQvvpN@nUC5H=V9+>zRUdPTkV60{E2?- zmJ!?!IWdNI>x(D;%JUO{IqCJB5Bhv0@5{ItPqdyt=_uX@|4nvZVqsW)^OpXOTY1>{ z%wz9I9P=w!KkEqWd-Q^hV;is2A0Gni=LGUK*f*nOnhtmb2sE4S<)k?*k{^fBypD`;09MH|iY z<#`YKf!(Y`x(suP3jU8`i)5I}$6Gl&{9O-}QdX zZ=Y8=n-5msEC2Kl3c&i;%4_vWG1&24$M@F{Y6yS%^h(r4u-~l{9F!l{PyQUKzx(3) z!pf=L^f$g?yK?3wtUW)$>-oEB_0#~Ad9d*(<*Vy;6>NM>whn3=?09E@ zwX^<}@pIF`j{9!d_>%I|^C`B&#zo%p_mzixVDl$oz7O`)df52srAVyZtUT0y+(kQf zWx-~!cG_9k{rvSJ+OK&%c7FxfdjA(Pep5Nv`H^%C&qqA7qW!TKlEc;sR+fn`rn8;% zLmR-x=ZnDlo9gF)e_=c0@}KS46W))!!lb*f@-{K-{z>!m!FJbSRNh~^DJt#O5jY;o z>$g0g^(bj0wX2^UfAf(s=&zlVQN}OM04qPUc|G<-OW5(s3G2Ui^ZvvsbHUnszrn^Q zW%n_^f{hnC|IHhx{*CuxKj;P6^Nc#fu5;u6&i7uh^Qa2)Kz&ge*1l~dN8Hc+Q)gH^ zCN=lBo^lK9-w!+w_1u}Ogv-l?v>Pw{3|8-6gN@UVfPJo!{(aV0!KyI+$qd-@s4Bwh z&1JB9wLWY;+HqJr@_X1k%SXS5%hRd$$IlrAoBv+`tB-!Now&*0u=(BAwo@;+-Tu^Z zwu0S%dr*SprGw0h~wm}6Rf{C8&K5BB@@hK=L)h0TjL<$l(IJb<;M=gHIyl&Hb?q)ukJ zjD9HyYfo;3uDlcxW=% z`>%l2pC5BS&sSZ^{f!4)fz6xV@$cauC7{3K{>*mdKnYm?BxX&nCq8>_0`G%;GalB? z`(Pq}kNo0t*!^j>VZZAx+p%}Qf&+0e*!X@ESp9bsR==l#^{Z~czSkA*@3@_ZgX3)+ zBCAY2{0PT~^%c+ltouH&av~8N{J!R~lfdfN(X^{CbHUb=I$q}2e}z4t>lfJh|H$jn zZ^`}p%)bU7aXtE{GORxN5LREzfsLn*g0=7df%Q}7z`pks*!?yeV8`_-tR3|Rc0Oh0 zef|5lVC7^5*meE_R=(ARolhx0=KCQBbHJXzoe?%3Gy`@#cfp?jao_&zM{Zn;`9nM@ z%OswU=V}1!?-Yey=QC_)eSQR+|Bd=dxIS(IYhMq9{d>h>?YfbXn9n>e6t<432kpku zroigs0kH2q8CD+u25ZNzhLw*+VC}DMwiAym51a3PuQua}-fa#?VLxV0*m(F?u=BMd ztUf3WYY)`0zj1#1BTwqU#$Ou2*2C3>t#@iG({Gs9BVYT#<|9_Y=4ZOV#zT|WVSI?M zHiV5sWrNjcd2Pr4%o2(9r;EY*UsY&V-j#%nKiq+pgZH_=^DNP1o}cqSiqr1;$V0pG zb^>fXCo`-aILr2k=e3^fFIfA?@8P`P1v{@2!|sRQ^5HddL3A(?wp-^F4w6dz)eN{r}mYeEA`-$KJaI zo4?-)Tc39icK^-4Q+QwU6ea8PJ+L?C!N!B@!OFL`Q@KC(#m}(%Hpw)ek2=ETu=+0~ zG<>}#ZV>){KTHo_Z&hHQKiMCA5AtKJVDkxsXg6Ng+jiu}P*{6Sc9N-&{s7h77h z5wP-hKdk+@Bog~QkHgOIYqXnBNIjS5LqGaH`X6;*^+OAIQ{eXqxgYDGH|+eJMZ0m+ z5TA!SizBe|bOx;57;7Hw_|Xet>u0LL%CXh3`Hy?B@_8?8Jl4PhtJt{PSVr zV=)@hpL}9<*m|f$u=|(m+0OVThTT8W6jt6PfZadc8Mf{!I&40mD{MaJxqlD4<6Ez1 zKHQP1_h+8=-Q7Vl^M z-wIee_&RL;?P6Gca|w1H4~O-8kJyg=(7|@>siR(xeb5;8JgXD1{;2cE@xBIY=cI?V zrytlK`!5EpUWv`~`1h~yeO)){VCBs&=LhyvWmvyy66`+EzOZupbJ+R1UB*7m0lPou zJ>J*&(`DrW{^TLp?==86-?x7O>w$bzd|3Z6#$U`&_Io@ zuKXd+v;eo*`{SP{UI|l=l^|k1G=dLgIXxi<92g&1ySxPKI-3sbXVi2)u+A>P#%;TM zy}#$N3dUa_HE_LQe}BA& z`=Nh&z~<%m!q!cUhmA9hSj+e#pEkgrFSHTXuUG?H@33?ojJ&%ATff_7J@11aiQky{ ziyp7Hf%n0_FA6IkewUG#)TuRrwF9QX#+kl{jbA^2t$Y0ic77Mx#Qm{f zXTsVYgJI>_HrV>+?Xc_d0_=XJESvdz6aJwGPATm!q^B55EZe+*jc8!vKUv~pm{+5T$@1Bgr=9_lH%FA4|o5!36JHF9i$Elb7IdA1Af6x74 zZQ%0MgDr&v@(BLX-&cV>Undpp`Kk?J>vQA4>X*c@`Nj*3r}4HujIZa-ZG@e7J7E3V z^|1N;-(dCgY*>G*JnVWLW;=dgEZBJK53v5+3cj!PxL?A`+s?52#Xf=6lSyU#=Tfla zUx)dneQ<~Q=6YMe{4<}sOQ!yD2=mkYQwVHaA*M_`peO8ejAVYhzrHE#JWeVjpGv~U zJNB_YHoaLoDI2UGJp}fCalD@Rxa-L{&ws2h>tz$c`X_r}<^Dp}zjgV4u>PH2C6pJ$ z_q1>Py~2FT2mI6)uzAm?$Q$KXdD!(H3cJ7W3Gzw%bQ0W&{NH`peA!UgJm_KAJb5>- zM=t*h8@EaV`@Fxy%JXA?GhW!wmAsz3>!Rph?%1i8*sj%l0 zYrk5LI|VkrcnT)3@~`dI!5x5u_cOn;19rS-(5~OO!T#v)*|2uX3Rr*hFWA4YzAz8q z^ZED2*&n}WBW(P47;Ii{Cv3e-4_Lo`w@iNS2Ux%E3LMlg#_#XK))!W!UHSDkqJKd+ zI9|p#GQjG)42+-q!IQ$;-5Fu^eq5P)#>_Im7aq;)v466`?vs0ozA@jD16H3rgY_?S z!`dD9VeO}au=Nf%W!6hE*!q;qu=B4LZ2o8mtbNcPHXkwz9+jwP(Gb{iY6=_2nGEaK zzYi-fx54heO9&gkJ`XET|HEH1p79Rz&+87s`jcs3>p8Z;u9tSO@!H0)@6`=<-%d@~ z{Ods2by-TLo@9vq$!isr@s~%z)~n=zl@Aka$NouaJO0;Xug4!t4Ex@5Vdcal{7>zh zWw7%66s%ve3s#>kgq4>EydJqa16F^Xw;g};XIS}p8+JcNPdKQL-CxlPcAX_r-;y7% z16%)>Uj2&wSIPd=(^ZAt&y)r>zF0$M9;AegXZ>h@#{WP3apmD++p#k@=>OwS#XpW?#WjcsA`adBYt zH(g=lG0%yAXy5mTt^c@TJOn>+6s$Zr30r3}9kz~T7p&a)6IR}Cft_#bY)2n1gZ-Wd zY$qNt3s%pagqZo zRSd7EUL+Cn*m(Cg;z`DpGs*0?UJV;Z*bJ-pvcUS6*Wgab$9G`kCvRZm(+`PX`F%1V z|DB&FVEu)1uzvemIFx$4PhkC&6|nlYC9FJO23ucu1or*v!k%|<3AP@on*E7~Tt@%s zZ*L>M=J=IHKdI*?!N!|DgWWgr3+#I73+u1P68n?_>udmUlN`8NHX&%I#3=N(x8e=uykFb4LD^Ys_l`kR=r z_4DIk&*jMs>rYL9%{Lc>Jzr|7O#M;?*gU~(Sbw2DtbG&;d*8vZ^1<=9-gZ``>-Eq7 zfz=nkdp+}JyZwoW2fhdX&01LhtUv9p&jqmlO($5pVxrd*&uk4FU;fSOkv~oBk6k_; z);{|P*1tP%JMyPG9JD9Rzn6qvkF~J}jqetP^`E-H#;5$g*6;O))!P+e^BBLt#tF;H zH~O>bu#napxSca(ptZ9iAFCzBdx~yCj5NSDj(&Cyl4OKe``m zeB}-K0PVu=u;&Rph1LH}VC!$+!mg8gu+L}yz&fzXuScK>mLtuXS%cI~})VC7~>*nC-X z*u2ySu=$~Quy#x?SbsSwtiPHCR-ROY^^;S;+IQ_>&x?!!s}K8mJ>&6~{EhkO5wLRe znfV^#BNJflog1+GlJcCHu;+KAft`=$ z*R*e|!`9Or_j>f^XSP!>XMgJwJHqa7JuS2Ux<724#0A*%`F@9ke_wsO9Ckm^b=u8e z9QS(ahp)oM+n&MB%P?5~G4nt4XTPBB#`h}1+7n){ebNB7e&IIkd0y>c^`PzM8+*de zSD#0DGZ@yN_x+UDW9*NAyT|K^*G{n=`)oU`{W1@>-e)sxynm(riJz>4Js)cqtlnGg z^{kI;u=PnRVe2^yZRdT6N34Rai>U|ueK)}Bx9?!<#s7h|vj@TMXOW%%A+Y|Tf8Y5v z71kg1?|TlyDtJ`J7W>Q(>UV8|t!G&Rd(KH1?7A8aYcJ*4!S_VZ^n$e)>%-<3zlGI@ zGhxTKiP!V*Iaqtp{HgKeSFrJyPhjJ}1^?xK)C*RCt%oZO8}BSHlmBWeQ;#4UxBpS* zJC=m?$CtqVetBsbc@TFe<4L__4Ol&r6L!AVhmB{HhP8Vez}8Qci^TeGO1J*9R z1ACrx>xllheLvy}jbQKh8dm?6gRNhGpZn>*C4%(}cEQfeb>!Q%=T5=q9f!c~=ROUq z-`l{}ZOnu{?1XwAXIIj^72?c=!`oyW#|F+&#uF#*6)a zhpb27oQf2%^{;zj$GaBnd3c-QKs*BW?`?pUd!b%WJ=R9sk+1W;o_V(kHlDE$w*G&o z?d(TB1^asiyI|wqSG}J42HTzgF?Mr*@=*Rg;{wTG>$3cN+FNyH$FTL%f5X_Im1)6T$i){=Go_5#CgH zY*g>(cS!~t-~Ytxk*lw{UqHXW#!GL*`lrQVzfUy!ThEjo)}KfLn?JXH%Xm@;8T9W*m}n(uJgk!6Cj6 z`)L-z`v0Y1<=z@txm^P`p0pm;pKK%(PhSU{pK1-OFV@2P*WbYYe%0TR+ReX?hqcFR z(r*3YFR=BPxnSeSBVg_M6tH=rA#gz7!qx|NgS984+MoT_U105{TfC3&*$#HSMYo+i zRef0b8_WAKuC-zPt=O>pRcgYNY;e9irwr=?*1uUfQgU!{ii4}S7`?D<%* z_VIjJ|1%A2{%MWv=+Sp!?{fpz&&mXAKPNlP`*7Y*4%=C88DP&x%noaBmVlj)_Sf$D z3bvlw{?6x)u)jZUyLp7Mu=YtD*t+96Fmmd3G}w6MW>|Ua`3BCzGqCF~39OvB3Y$+( z4r`Czvz_zQlETLSk{@CGu#et>wHM06p6}(~b6z!rjR(l;hYqlE+5XyN{p^q4c}+c~ z=VJ|k9rtIj_S_&?JJj4$pLGR`F`3{#bDzpiF{txM-|w5>v*v9p@Y}s&%WXB>nHSt%_qNv zwHN2Z`V*dK5wI6zoFev3U)u^6d8LfA=m5Q{|u|255mUJtk3rQoQ924 z%!ieqn_$;#f3GKBuoQNlcs`5sbp&ku?+e&??*Q0%XDwK{@-=K+`a@WI?Ngcg>3J3Y z|1z+C|NF4}+%-Q6j)tvYC;+jjO7Ug3Gv3mstB$y->t9jFKA_XiUA_pm2w({3IoHmn?}?)|VY9(XyDU>+cL~d?FXD{5b(T{yAjwbcbQ}aT-{AW}p4} z`zQGO+IQ<=_mixKwab^lu3zV``RJ8iPyD4jto&UJ8$X@o_5A&@u<^#Ju-|h8tQ`9Q zw*Idtto|=X|=jUQd0~T$#9K82eexXU&J5?{{F& z(>W;<-*f+~{!p0AI(GlH>n_zvzDLCVf7erX*zJfOQy^VdZ6S+O5y50;}(O%J`d2V4wFZSbwk`?0l;a`f~FQ>Ad`91>!=69j@wk&--jxEp1+5@tPbm+)rFPsrC{gZ1lzIua=`Xm1DmhU2nY1H?Z}@ruy%8i z3w*DL^K^|5)P{{0zbZ+8{DtnY{>e*N|L!N-*>8Cp*8g7)o6kEAYY&9M{$Az3u=8Q` zMaGxMSrc}Ae}}D$87{LQeKc%7W)gH`TNv$4S@A0O2CdwH`w_8=df{=uVMYvA7JC*pTWih`@_nc zk74VFLSf~7CfM_;{8die8hktwn)}A;F8{dw1CH(of$n2NR4m;o1*q=B|dDwZn z6xJ{J3bvkYKCHhqz;^WcJlOhzZLsrsA*}p+0{i{`hRvI#xXO5tuk-%;1%+Yj$v49K zLp5RhZI{UxG>47%?SaiF{QwguZhH*Yz8(x4&pHKbZ*G(E7ws=!fQ_&3hMm{1VdI*z zb85oI`mknV3o%^&~uTb9W$zPO4&n6P%+0NDN0 z4~lbt{J8b@M}F*w^YWb3>c6SpkH2@zcIu}>y&nIp<_*3V_4V^%<5#0#KVf|8_6z{@g!c zp`Eu&a=9(@qj3|&|J*Oe{g5vwV9(76gN;A?eCFLA!2x?A5*yzd3cFq|(QZ7m0jzzz z7go*|g_Y-vVC}o4uzu7uSbH<7?c~eG*iJn41>eVdksh%6EHUlcf1kqoBY9!vS#nst zS=#$iZ}hq-xT3n}{RKk>Tbu=eUi*!g*0#y$)2di4DR*z;aIA7B0Q7S`_U4C_Cq;d{A1 z;Rjg%ITx(lZUq~6DFC}~yaKFzDDL;gUo8SFhsxQXcv=D2^;;dbo-q%sJ=Gl6-^&T> zZ}*VVAK75XVKS`!pT+*fb2h@x%R;bmrQNXlswnJyJ_5U*bHV0U&dBUPN)PM*U4mU7 z(P86LCt<(Gxes_g*7pI}eL#m`$NeAJaoAxy_S#%n`MC(Tp7AqSzcvI`k2myse!pS} z>^QZAo$noO=lgeujc3<~^^Zow)~{5Bwf`2w+9##q;PY$G6o>Wa2JIl&cum3x&Jl|_O_RI$RbDr8x*za)-cE7%0-6@|4I^25gM{Cy73zx*6_KfwLMJU@PGW7zq21J?g-0INSQ!rIjZVe|LfVDk)T zc^>mnb712kTVVHRhQj(ED`DqH2(0|y0J|y{tUcZh z4!*B`><_SZNI##C^E&I=&b-_W>sOY61NI55-B=VhJ{IFHd&8(+Nd_+Y=zhRr`cg_ZA9W&FU5 z^j97Zg^jc2g{`0I>-E%kmxRr4wTG3bm0|6l8nDk-9@cLw2M6pc*m}%juz9UQu;;Jk zgN;wth8@q0u=&d_u+Nv)>lufku<_z}UjKhgon^dLMc+W_k}joNKtKTj>6Da|k}iQO zB}z(2BO%ft-6>sC(j5X)(kxGWM%JB}gyD#Yhti4y={@7_nAM*F`dlJFsmqTFf1b+`W;E%%U zBY$7bI=2_F@u9`A_HyAz$Y1QVDX@NhJJ|CU#=^eeY#6=R><3u6x<*D0Hk0v-hQh{Y ztH7?uMqZD7kq@>GrIP)z2U5t$-2$-d_)#|cC#yCw2W)=(0_^XTq=l{TI0zfxiUS*$ z+hjZOrX@VT^}O9-&)cd8yFan4%znW2jE~>%0O8E_CMCAera7;yJ0o# zzMXQg^+(HK=R--@`pEgP^Ct`J`OYn1e}6L;te;xXcIqeIX5oIwt;(?W+*8>4fnu=v zvwN`ia9Y@U(5v=mUt1j5c=JhE{T35ezHNk^Cvjot`zY9PssTGbYcv1vd5|-chswkw zzNcNiTU%y4`*=P1kn}SC>>#hF9^%2ryf6OlaQjn_^^c4{JQlW|Yz1up*|2esNwD_i zPT2T%2Uz`e88+Tt6L!AbwLj;_6_$w${S6y8OabdpHh_&+49mp)BtHEZec<=W0y{sC z!}?o^VfQEOfR!&Fz^=2Qu;*?)$Qb(kJz?t;_rtE&?_lk{1+ekX60r8#I9UCV(f-W) z9)j_OSXW4(vKB3%kBz!0w+c3M=34p?{6nUp0T& z0yduXJM6fWg|)}^hpi|11lIrf%Kq4!abWGM{IK6MI;=gM95#;k0j&LeTYsJW(3^BT zFZBieVB>BZVEwmNGWPcpI3Vv}?T3-Dan47?8+`uuu=d~|u=^NG!p4(kz}i3gVe3U| z!{%der{(#ucfNr23%0@L>9fL)-(1-G=Y+8Hz8~zojRvcqYTJ&yeMCIP_*pjC_}euZ zKjCp2+KDgjgUt&rfsK!DfVGEO!2x+GBM&me#@9x{)Dc$*A=$D zbTh0!))dyiTVQ|gn+7(%-451HyhZ#-yR#7N`R_Ae*Xh+%yf6Crb69_@IqdngiD1X~ zb6Ef681XUf`CBPreh*?dtUYuBHompqcH*boV8>$|?Dy(rf5v?%?C*2bgtce-!0yv6 z=Jl-q2C#97w<+k4KbaRc?>R`uU&#o&?pnjvUA-WFXnrdrY(D%F?D>WNC5PExup8Fz zSpr+1x7z;5`=4R$kg2eGp$csNZnW*#56NKjkG)~zi8GRgu8&@@^`^~X^YvYAM=#v@ zh<46h>jYbWdIHvNYGym@d<1O$Yy;T(mo~8Tp}zge|1^T}qnkE_jh}xGo6l+lJ3g6V z<;wT4bsg_MWPO=GfUPTC3HyB)!un@F!LIiMu=TC^VB`CL!8u1v|2+|my?6Go*HOQ*7ps z`sX05UDOWNKb;~YZ#Khz*PmeR*yFJFU_012*?Sn7jWqUfmdkd2{I+O!8PWVh_KBR*K`p@gpZ%JVDPnTiim2qI>4SQhgPu?dA z{XXkqe_#9+?7Era^}PQ>8GpM5tlxhdwjM7vZ2saOSi9{Mu=8uH?c|51z}9VLL*JQCEde_ox73H|ks7v##gpyV3J&UX z@tW)Ak2 za=jUBeM4mIMdf-O*u26Lne|voW<6BIzIVQS0_%@#hxO+Z$=Gv^@ISPZ?#1E#c<3YV$T-bPj9@uew5u3kHe!dv2{dFI9 z981FbEjPolcEn{^yRfMJ`Ms30u=IS+fj z*Wa*tgIloilyR{8TOu(Y#>-wXe){z#VbA~g7S^uo3f;aie&!9f!s_3Iu<~F6Y`*;?SbNa;mijPPSbJ+&xl$;seG2o- zxLF0*c~BlU4pJFLe74a3Hvd!w)*gxmyZ$TLAAhDT^IL!0>(#F*VeO~Vuy*Nl>`lj^ zq|A9n2VvuCMPcoS;jrJkfK2^kb=Z3D&tdKPe_3zVb7l5=^0fzMTYoNKU|mjr@l=N>;L`s0q;ZoRti}CF$~u4k)5xdkAd2R5rgnI2;?-2+#hW!l3Zit4G7J=VuIr^$+~}%%kLTaD25NvccAS zO!j%O?nH^fISbn zXgJoMECp-dHl^M8ez|b%`biaG*R}W4-m3-&@2@{;J}~(Ay?d z_!bVnkM|FOjYs&t-oGuZ{V99@A7T9>$45EV3I6}_3d`@AH)=_{^RplA*5f(8!ThZ8 zsPDqAA4$9U)Yh>6;CS!H`BZ){_0?3{$)EZ?ji1kf-Jkru{fR>a^1ICUA7JCvhiTWI z>mlQ}DsPA@T>HiA`7bT<$b7+Qucu!**!cVu*!+G?SUY2y*JF=1wVnFo8L**3<6$9fh@fW%Dd2 zV9x`RwTDi_#tS#V+E?epvHs&FSbbu<>-WEK9H?&z*FPBV@cb6GFX2nC9Hn-d-xt{V9(cC z0{flQ*-kup5v*UH0oMNh1J)m`2x}+%yyn}x!S0vX4!d6F!`5N>{guPN!N$X!Kiadc zoR8Fx#)XYTURGY=N8X7UI$wsv+B3&t*HIN%dw&~j{-6`~r+V)aY<;cq5A)3hxu1EL z=h)A#$J4Y2;<2#bV+L$qME}XS`cGcZ-`x*ukF|k)-W{-UxTdi3Y?I79YXmD#m%;%* z*>>dReA}6?4PfQtB-s6QP3%v;vKt&fQnr?p;XwQq);@U*Yp;!ljq|;M^~d$!%#S!e zL4U(>Nb7h}N7&E%F@LhbeytLPZ;X;*kiBs zuZYtPgnf_8`eWz|=T~v+rvvepu=Nngw{g9G*kb$hdxj-p*NyW}dG(s>ecmt7PwuCT z*l{l|v!C-5SUHdm zHvg6r_WkZh=lgU1em>asashT-SB2GA3t-220Bl^W0c?DAJZ!yBVcV$_oB&&Io)gx- zo(L=V62tAQq#id6*3Nz(Eo?o*+S|8b^Sr%b<?M$C(Xw zuyNMB;n@4dhXeIUv};F3fj!T56KuW5izqxV{?%dchu?S!4#aU`_3LCu472`?Q0dS3khcpL($I>37tF8s9AlTh|l<8?TNEYiGQs9@Y5JwaENE^iOA4e`g(R zUZfnX9$E(LPo1KkR(Z1<_V*Z~!p3P&!OH)#)ax3*@P5j}rqlzcrQZ7yj9iNzs4ph| zdKq?oRG=PNdu0Ob^TxMc8F{w|wqE=V_2Z8BAUUkQynUYD<9m2L{^J1JjT5xD9sSpj zdUfY{CD{B=bJ+Fr1#CRNfK2==E^NMSD)sTkJ>tmJ+a!jqCx{IP_a`~e62r#RFB}V% zhuLB6`K_?yQ3zHKbb*a;7Kinx+t^OM%SW*5a{1BF`I-QBzgiF2`qhN6@z%2c@ch&Z zv`4>!)k7JWKkcbENCB${a=_Zd@ogvn zn-ey#ed2GvANjwsuy*%K*!aLVu=Uozz}nkQVe6v5hm~8cV82Ix*m&6h`{NHphmB`X zfwlKv9%Q}{Pgn>mPj125KdWTq_6gW}_)W0$YZa{iJYYNfNq>f|H@FU~530f1`N>?b z=&g*f`+pO|zGpPp_sHY=MZY{fz~7_ZsVZ#z=`O5)8Uj0Rw_x8V4)RKS_+Q)k`>$c; z=|kA~+B?|&)7N3+=Vg(%`X85J^NCGi<>?hzd!hrZocIe?e%(?&vtFj#PTVRR`oZrs z*mm-9rC{qVM##k5s>8~&;jngaUD!CqudsG(L)d!25ZHR_j$V)b*AjMp^znN1Toc&) z4~Esx)nM!Q#=z?Fvas(v)$1AGVzBdW7VLQ#Ibq{|3t;OzBErTmcEI}k&-c?G`*WAq zvwz|y9Bj8_h_*4Co-%)e#`s%ea7uzys-}sz>af! zSoyyW*8fQid;VU3*nW{@^ivPm`m7t6Yc(0GV6XUtbfo8)(`p3_U40{C57F8F~IAIZ|>R5_d>4sh0X8whn;ub zVCUn;O{{O?s2ySF+dNqNu`{gv8xDJ(Sa;a^$NsSOHGN?H{cmOBE8}40OFge={m+1{ z|0@rxZ>Ph?uZqFuv)91Ze-zxv^RsSe!rBQlHZUI8SB{7Il?kx4^jDes zgBY;##dhlwuWrm0WTi9@q-V+?q|FW>wj#A9pA^W-)}yw{))u>H!jo;_IZ-S+7~bXww3#(z}S1)<(uog#Q*KbSMd3D_W*u%A9^WDd6C;yxuHlA<{HnN z6tQ9Tbr#q`RGYYJGon9=^!)dl}vR#?9|2JOb}KJ|L!NCa4WSpUg-AJ31pe$@WP zClAV;kM_B&e1$zfG_TBlwsElbND)|j!1Gt#FZ#&$r(U=YteqXf{={>O!^RIH!p`fg zu=bJfYu#kvyi?-WnPKw}y;lA2@0$-w2b(Xdw$+34fpX{T$f$8wcy343t?f#bL*PB<%ad zhSfKty&eN?>f+Gv>+@>=7l-xF953yayNkFV_O9cp{g)B`fBi_jB{uB&w5{!rKYs5I z?uY;XBkcaflQ3>aF5@-o{r#|VwHvJ5SR1b0_~tBFd&lo<{%RcTc}?C=|7!@Wo(jZ! zkmo~&*C2T&Vy4N!vU&7iGU&89YWU%L%WQX;yqr&=Qt|#k_PA}x|u`ZIp z+H*T$<#a;W=bs{D&&sa%p|J6+c=qT2A+YvFU_E2URe*!*!*%|F*CP)fuzvLSA1>he zu|Mv(o;+_8HotZYcK^aISbN9#hVj~2wi9>tdhN=Iu=Qz=Ve^lp;Gn$G4;wDC5A7xG z$_3lKPJ2Z^U@Yvu@%OOy^hMY@(pbI^=lk4(%{Tad?)SI@8z)H&tN*TeJ#s4z?0&2> zUXS0J0ahRQdjY}mRURA+*RFo>{j9f1=lyxV&9LhxC2TyU9jts#0c&41goA%?3-u#a zVDn+h6a9xWu;ZCqcKyS?e=*qnitWMplJm2>*IPG3yY_ZpSiRc});=5n2j{!-p8l}= z2iwzb{IM6TU)BXyo_B%uyOl@A6I;W^^ISj5kH)ZiPD4i3go&1a7E zdiI0-PP_ipZ?L}?Jsj45p9Et!PWna0pIi!CkJicl=%2f=`tfU6`{6Mh)ED|I&%K`d z2iLoG2+v^cSLcK4@Vy*1U-fgN@_x?y4{5i)uQnXi@8y2~CybuU^WYB{-&C)Zhpn&3YJc)f*bqSi5*DY+U>*Y&~~7*zVEwtdu)i6y$Q=F#V{>;E^{`zC^|Klp40jJ;G3`(1xC7OcNs z4c5MTJe}vG-laTjecoBvd0$9-Uwr_Z&-}pt$~V~OeQ$h)JWDlLd+`peJj-H#&bv4d z8_$enJKy_XSby&5H0CSz{0>-ubjMWw9`zi?bCpxmVedN;*51nlyFMns0ecI!{(LO# zJc~Mo@xh;I3%eexPv(88->76edZQq0{JR`%{O;r=?nk^N2W%b09N6*92%ArA1M4rO zw4L#)534_tdp+MVC2W2w7VO`@G!Z8L{K|M0{_7}MIdd1*FKZ7wPp`s(?+IJ4X#Chb z{i_MgPwKpO!`kPwVg28guPihZa zpEM4(KCTX|9s4tEJxLYVd{i4)`~E9f`>LYtK4G zfA7zDPg>ac#f$OuNB+ph!Oy{tM+#W`c0TOyjm3hk*Yo!Ut#5cuJk!6I3^x9I(fB6! z_xBuwY<%JRIGA~QoOb8S->~%)2Vu{@TnOv8?S!q9`2p6ySp^$c^Y<`~JI=R1al**3 z`e+htT;j-B-jDNTN82C0yaBfEXc%mKuL{vI|SQW|#u_;;}Dx-e|qu7&-nU-}p} zpIskTKE{HL|CF+w{VFfV@O_Eb6o5Ui=?-ihIS=go*#^5`CX3f&uTFsd9@${!`Y_wk z-)X#_e0>2}|Lj>L#uNQ9b#&jk!+?f~rnGs4CZ zw!^+pB-s7Zi*3jMyfTXZ*b}2+<1gD_fw69u3K6AJn(zR=6_)8Z>rL+pYw$KyPr1??E1M1JHP$CW%b=@SUdFgh|uq|-*)nn zyI}J*Yh>~VQ(@y%vth?0@O#$m2mB3oe{L_@l~>(m=2=fzf36FxU)|OFA=f&<#-mDm zKjd#cSiipj?Eaj}_9w2DLnc4;C9M2P39Ek#z{=l5u=YhBSbyi{@X+_o;PuRhJ+Ssc zGFW}M687KY!`k29!q!7wjllKD>9SsrA9fTD)cwQCx4p3bcP`ldB%5IKfEn%2d|e1z zPx}#U9Bryh-ANSK{Rcx}=j+4YSg-8&`AH_uSxaW#N0rgf1z_jnqxWe8{$oa1zxxX8 z`aL)-bp20;^;b5*?l&I|8=sp6Yj+QUts@u*n|JIB8xQCMnh4lju!utQi2h$(>Yas0Ut01$ktHRdL<%FHbU&z?2 zDPZSaD%d#r`$3Ek@zVEi(gyaAy@0i+p2GU4Pi5i{cVNHoV_5lc1x`yo^);-0bqqFM zn2h^t5ATJ|v!{WzUw6Xh`x3y~eN$oOX>8d1-dI?9`i%SeT}Q#z+uVb#59|l)?_7cX z{w-khHm70NM{${Wz#CY9CqL{uip>4hzxiP6@9xr0UUhs1+tEk2Wb!(xVEwB{uSg1=$+yF7%A18;!MXFh?|GmBu)Q@alrT zH@?5`@jYzZJ3p)*y!eXyvA?%4Y<_KxOq{SFte)r!>%V*|v+k?ePP`-&to$npt4ICb zo`3K;tX@CH-*2#1^K!plu;ZE?c3$VOKkFwJ?0Si3fAWWq ze}U1DkznIK3t;1JXJ3S_k4muhrfX&F?zFIR_@%J^%Cmu7&wl?!u>RpZ*m%@pSbf$B zHcqn~)?RA}8&6pU8xP9|`(A&+=Hnv3`mg`M%B%SUcplCxiNW((-#Y#&SI@$(|6gF^Z_DgYeE$e+Jb5nc^SzfD?*XuRp`6SYw!KiAWKq2ss0{>;Pku+Oo@{=_ZU*gkXB_~Wqa_ZQgs)l=B~U~bs>a5~n5;~yI~ zpO+g}{v7Jd^Amq)3mgBK3;TY?6z}0(6!z?FZsg~uUe$2FTdN?lXmKXzk=1X+k4QCfA%q~e-a<| z{)u7ZQU7%3eTd`5hxJ>3g026JY=7c~A+Yv(Bv^Tu4>ta6yMA3#*m|$mjF zbyYWE{q29{!hhvH1#8cAhCRP$4y-+%6}J9*fJ}YXji0zb@q%`6;QPVKho-RcuvM`B zTV>ez*EBdNPaWqWu=aa&+u8ru6jlzNV!k*(OT+4w>9F!NDeU=+wPEwM5n$zFDcJnV zx~@DQ`%RxRzl~!LhCSb7x9frN>kT_kx5MTiyTIm|w!-?kZD7BXY&@)!?XGhX>%66$_V)s2!`9<@CAdv}3Q7gv}>xgsu0g3~TQ$3fFFYexmKv z8zrONd~h$=^*XsDOdV}%*xv(c1{=3}_Kfi&zMlhje(i%D_YEER`|Q8!4!d5)!2bP) zuyOMiGI6Mqu=ZYT8M&Vrwyy4v9~lqgDG#59j?Xw)`|YNTJ^2-^-r52CJ<7x8hql4S zx#Gjh!!dErIv`JTvw8&CKi_*vCq<5(+T z^Jj5j;}!j3_0O8`L*Kt9tQ|B!#y=wLD?@7bd)PYj*0Aqi3AWBG1FZd37^F8LT~(12(Ss zC9FKo0_!(^EMu>vgRReq1-suNIjo%6)Pldqdq;#lFRZPMemMD%@uAMSAZ#3DCG2;N z3;UcSVDt4CoAZ3cQ@?|?LuSIt%UrPfsV=O&_4)ztgZ-WzcK#fO)u(U2r9bx2B3OUw zHmvlHo<c%*V8^&xq7ux=>50a&iMZft8XX3)|dVOYX^*iwd22q)we@o z>)BF=^{);qw_i7;KXSJ)>^wOGo8L(b8#kN+t7rbb%k$x9wT7)f9s|2yv$XB#l@M6_ zCpm1rZw}Zv(bEQeAMDX+_Gg@~!P*nI@9;gS`?>-v$2P&v=YMR+9$5=J&&R-y=P20x z+7Q@$=U`ZQ+7~vS*T;73sm`!+rxk4cFA8kj`Ot0NA31QcK8*c63fA5p4y&IEz}8cB zgN=7)gr=A8<~MJA6FR?gz}j=)!{&F2!{%RN%hYr>wdG7WaU`@Gr}-C*^r?_<92_Sd`*=ffq0^_zOb z*1_Gm5jvlf$>@bGu=&+UuyXiM*m%*>YCIp`=@;1e|psf_lg-ue`*y>_e;{jnFO z!`f%_Vb6=|W`D-N1*|=k66U%oWnq6G=jOFgd2y;D&r7}5ldGZM`vmOyLdRg^HOFD& znMYvf!8O=8{$N-+8PUJb{LBXHkEDh@pXtFB?uY-G5!RnQ2Ydcl7FauHEv!9~+y3aA z8L)OqGT3^If%Xr(pY~dJ*m|=Dw7dU12do|03U>Z~1nZX!ww?UPrpw$9e|rq9Twez3 z|ICKnZ#)xro=)+4;^z}!*Z*YL{O@Sk{b076w;lqk2Uf$%^KW72!5Y|nbuHUDZ{&p6 zQ$Ldl);>E6JKi6{%7gQ;btX|^*V#4L_~U<J?b~WkG4)hxpj< zu=)P(u<_7=UXMM|7}nnH3Of%Me986H=f<#|dhLF&dC2P*cwWxeZvZ>4$6@8?f2CmL zXB$}m__&Py+tmKlFD-?QqgI8DpAUxBBY9=wM<+}2Jk(d8JJ0(PXW0&GubhFs-#l1( zvK!W383G%3*#vtY%dQfk-?K5SzRnLDUrH~NZ=O<|_rtF`caHCg{HzBnr`O8(2M>$E z#OFJ~zSr%dT#x*@bC&Ogz5O?AJ@84`^}PkgzF2h_c0WuF*!edSHcnp3cFs>33|r5Z z7IqwZ$gHn(MR+JS`*eD?*g~l?L|j=7o{Vua6gI zzLQtYY=7ig5!n5tabWXq2MUJ%z0+qx=TEW%ybtv*t6}}H`1!e>de|ATaiyeR@b@{t zdll?@$d)fue%!X5xLE?&x|rjz`MB44L%+vguzKTX+FkFz!}|NvVB^ulVeO`WVg38Q zUXOfSg(#rvCo=nCt9PJz8{Fl_$#3hZ|r0vmt)vQVhJUTiyd{YF@SW<6}Y;->B7 z%l5*?Pg*(t#FuJ~9 zPCOwd?0B7py?-iLdp!p8%kyM1!Org#&Nu9;Pi<%Xiol*fng{l|JHf^|Yr*C@2f?oU zI&g4)qepHyU$sa7^?LT>50Uxr4y@fjM&|c2;;??yKht6LUMkqQ*g`lh`HT{<@@E(9 zJdMKo)1J-1db7?T3#_~^DO2BE)OO-eWnGWxk6N(x^;KZ)m*%khplZX)&#v|-pV7kp z`1x01=gA~kKRq7uAh_PGN6!fxuh~SqdZ#ZOhy%dNpRus|?mnzOnuEM?zkL_viFWG| zSU-0FZ2T!P@=X8a57>H#5ZL(r-!kho67o|0mhYZh34=Bn-F8*{_T-+Qp<1MP#Yx2ldj*M2<@ zt1lYB?k~!VzVN)7Ua;d71AXK99P4Z+zxXlidCr?*=WhtCyxs*H519k2XAZ;4%UiJW z{a;u=wj}z^c>Q(T@lTt<%FFvQ_WVfL^Mjtl%#*Rz(Vxn#sOUe}<1E;#=zIT87TUGflET{O*t_^fyktD=ewD4T^IUfS@lII(RC`i8=`HN}Rij|*Q4`@$_@0Yk{e=dwe$ZO`Ge0}( zZy*mh!seg*!um^_Wa{>&!1{$JVdcpT*!g-1RzIAC)hn-I&jXH(KVv*h|42KclKu_y zqAaZc(Hz!p9cMe=XFja`GYwY1EP<7`H(=`|4#LW*Z1{`jaqh#)zhbcQ&?m6>X<<9( z_s7IvGoIc9*1k-ne}^AD2iAYg0PBb5!e4csG=3=hi+QE+3N?1AH z4R-y0j(;D}!?5)THDT@1B{K1z1F-cr7h%uOj!68#`069C=RAQmu=R#1jSt`--nSil zEHmu*d_w%e{TQFZ+Jp08^>86rdpAxko{!%fE(NQ{O2gJWehC{N>j}I6$aeSl`~mB~ z7lDlnM61o;LqB~2o3AYiD{s=k+CTGP>sO+}+F@^C<7fAHe#f)HH!yzq8QAqY5!T;3 zBC{S&!s^w%u<@;Qbr}C(e1rIPL)dX$>-~_w17Pjg)v)qpS~xaey$SYxjfW^NuE5$a z8@)gNTwdZ&`tyfi?XRY=dAjqk-}eXOQ}}~dyq-F?*|7WR-q?=*T@R~wV(`A^yRN|I zdosYv$7rw{yzDb!o=6~D;vPB&l0fya7$SKxs35R;_ctV z%8yF6vu--T=J&sbl?R<+;uw=#!rJe{VDmWbVDmwLG~oLo*Zv|t=sdmyTVEHGc%$)< z3=L^#UX_6TE(Kug5o^HelfrQCq^JI_1$!P(DcJqO4Pe(}b^EhFGXyqXGaPn2m&wEn zM$71<^|0}iaj@~KL$Lc1*1^idhj2h2HRA74PqT;ktNQ~+!P?uAi0=mS6|nj#EA0DT zfVCrv$jG4IG)-ac(^{~4 ztOu+;)*E(QM!?oTncwhyq-n5qACF=D)E&Ex8|Y;bXAz!#_iKfAnZl@=4DB4Bye8{Wy7G-)9P}eOwVXZoL_H z{?vqx$K@iQraW#7yZ(E_#*4ba)>CeT)la=(*F_ZbbHw+D!1~kQ!TM3tVExsdu=e#z z+nE=|+i*Yj!|jC4uQzDR_0$)=l==G`VCyLok?%8})~+4nN8NH&So>==tUqdgQTyx= zZ2s{lSU>Y7tp4q5JKrVa_o3rC0`|Q3j<9j2<*@5-9&DY{c362fxIN#4d}kK&pFZyv z*m%L0uzKq(?EP!N`n!>T;Cb=S+ri4|BCz$9lVQKp*Rb>W57>DA4%q!PS77afxIfZP z9eYIbyPgMH7uH`&FZ1trVe^HJVdY4n4&0x*o}RFNKvh`#VhC&;p)G7Z#$?$2bJyU; z#PN^A%GV?vY3DqQ7w{I&Pa6hncjP0#t)KBhC*Gg>iqWv~n_{s3+E(-5#HpKywV#5m zbDCm1aj)mF_d5xDzIaOV@#@>RGV$d?u=(?3op~SPn)PArk?gSXo7S-MvIJ~>Zg<%4 z-xW5VGac3sxC`sAKY`sp)~*ZviSIU|zQFv~SlGB<7wZwILsCu)o(c z0yd8Ir|rZur^<}yVc7WL3j0$pcO7m*`PA7NdW^&Q$B zg<<{fez4!i`Yq+cC|G}~Ev$T<0~^&iOn&9HSBlVudu9oYTvn_%Z_nr=J~_0=z7?a^Ye@+i}5o|ih3%C-~7 zDh6wxHI|73Hixy(euA~Py2z}TNw9YK64?CeI#_vp2v&dXlgZazfVJl%bq`(t)-Niz zeu9n1*N5F-v=VkbcZVIn<8WYnVExTxJwoNpEE#{eAnbT7^?K}*F0kKo4Xl0fK~Khq z`0aYwd~pldziYjh>vcEmdOQeguV(w1>-oK?6R>)ABJBFU3L8H-0GkiKVLS1u?7es& z_AB3q)l2nY;}Q>H?eP_`^X;C@dWhHif2<$Z4|e~v_cIQ&1Xf@8_mtC-`|!Nzt>duI zT}lqCS5^Pjm&sdhq+NS*IPCYH=lzIxErhj~Cc)Z&XJGy7p5Bk&^XSl*?~C0T5qAI4 z3E1-$=kvYP&t>}2A3M0Z-;X?Ld)RfB1=jwYVLNfM2h1nqug75XPg|UC#Md6c=I1BC z)}uUw9Y^a!^&b=V=YH(ps1CdC(!ic)lm<55mjU*>K5=~z_s6^^P2x0GrpbK2v*S9qjwAl!=Eu zfc@Sh?T^2ZV*ulae(eq$C;u5%ZZ?P2uQ3PGpS)=~*zw2-TX$U2cI?4#VAoqw*z5|C{jhO}Co=xV30Qgi*8aqAUfQ4EH%R38;U|_J94cRm!OHKtu;W$(w(hhs ztX!)LyWYpZ#@~Cw0eKATN6mq?>rUE^J$V@R_W~Zm#wVY{+I#VbaKGcV4y1Oz5dX{y z8!svgJ0Ekx`X85M^j{{}`B1?5NxgMk*!+JF89O5e?6{4Dt=o?Vt3T$z?pKZud!EEP zSbHWOY`#JHrT>r+_Ia!?M6WfC1)C?#|7ydM9rEbRMRk7+$yZCHCPA*}xC0V{{CN7Wu&3hNhG zf9m-bmtgm+G=r_ndju=r+QY^}5)9?<;|~mm{jNn|>x-7d`hSgKzt3LS{c4?I_4a<) zxNUElb$uLGK2C&c2RLlMg88zz@Aw|%jUxWa^RmuU z!_MdQGI^scu=TbjVC~=>u-~HstpA-Kw*IcV?bvNaVeN-5GUvZkgVl%b$8i5$7g+i0 zehvM+ZeC9vyc8AH&vzMSRcwIiD=;Z+s8xQ9gpT z$J@ezb7NrjsrzHhUygy5pE-wffBeV&vho4;`dhGe^%&Ut^VsgU!5(n`kI!8O*4~XZ zf_ChWny~SkRItB?+62}w$OpS0va^g_8wP8a%z@Q2b7AM_BG`R2viqI?gpH5=1sk{8 z3A;YrKjnGPr(oq>^O1}P=YL0cKN9h z9$(lWJ@E>*o~SBpJT=oOo|pJWSvXKn4V#az0vk{M#CGBhEnwF}E19~kzHnf_DD3YU zjE0rR>t*r`lVIi3->~at7OcH{4%YrzWPj?d?!(4Y*Tc?>m-Z*VzZo_@8gDf60Q+>e z?bJu*gv|$@gthOB!{+aH!pf&Iuzt^0IJlqB`Lhu=9?_k4?f?1q$N%aG>yNL7gZlxM zN84c6XDizEYwp0Vx8Gso39oGDeGkB{k67%NGp=$JHeW6K{%>IYmZmtH9-jDpoRNL7fm=Sh77Qmj@Pzm<> zb+GpJdYQQOML4j30X9B&8`ckuIiB(3oPztX^)KmQ>pxz>)(ez?T?diaPpSV_1NQsH zgq_bVyguywLG6~YaPWLU;~{Y;a6ir=s6~JMgRK)8AN-kxu=C;NB>J;%uENf@j+6QR z$d{L}{`@dlJ@DS^*^hewww@*q`(>5)g{L4N@wd{;=%E&{cH?#WJ6{*V&W|NB`x!UE z+GlfM-iq+c?#=c*U|Z@{5|5mhhgUvb0lz{#tZwSPxnmcdGRlD`S8;O_FZ*Ydt^IoJf?yD(f21|?Y%az>p#^@?oS+WDXhO*O2+Q-yg|pO z9_)8N4O^eo19rS0+s=Nz^|0~1PuTzMd|&AG*e&^C>Qdp=Kf*!<;N*!A!gY&^jH*yMJ&OZ2qZ?`}wh7($C?3)T=dutydTgdk*|hUXMR`4K}_!7&c$k zYc9`2JbO0mex4n$>um*WeCWbFzBh4{`>^vl>U_Qr=K(!~^(WH9&Nt7Cb3PP+wWFiS z*xz++XFq-%*!*L6*!lP&tiG54>#wAMwL8~_W9Qj**gUr9;TpG2wt(>nI}bs5m*x+y zC!Y8%AtqX%A3{ZU_7`}PT}9XQH%?6J2p_VHv`dom*DWhn1%!rK2y zVdIdo7Be4t&or=dr#7rU&k6g!-C^qm^1|9B!(sPZ=kt2(nd(bI=XXKa{9zl|{6t}| z$A0StYY!BI^-Cfy<$lD0>cO7xG-Mg?i+}MQtbgfwS^Dd3Ve@3?VfVYXgZ1ZAFX!)J zpV@9aGc)WtIc;Fy|5I4|u0E`NHN$q|g*9R4bJ7)DPkgHgY$pj@@j#`z9~K{+@aX*!@8< zRx&?1r@?mf0jXf+q1Ri-kPp_rEaUa$aq7U@H9o&`xG!uylh3QXn*m!-78q~z@XBzk z{kjL%AMkqLZ}2Lf2me1AY`y9{*!WB$Sbez!R^Pv2J_Y9IYT8-fCt&kCePH#v=dD=( zybd;Qa2ED{$6@8%G1z$ZW!Ue32-Z&j57zEn0qY+;g|)9&dp-7G>@|!J_QFnBdnebQ zd>_xBfYp1kVSg_%AFLd`3|oJ5RQbYuxCd)598#WWC&TVH*d30|7kZwx@tW7PoA2BR zd!9nVwJ`gK2f^CI* zU$F6zhp=*FZ28~`eY=m|9TGAPUtOT7YyCV{m>iBVe^d>VEvC3u=Oa@ z!?)v4&4%@FmfBAIbUv&;`~x;#DjScS4QsC^_=|SxAHCz{C(oA#bN!M0&w6wI#_*N6;{5e ze|^61?2o_P7B=2C6!!c+&olM!{{i>rTqDmPHLtT9R^E9&s&>%^*gUf5kGjA88tndC z*N^tkOW1XG26jHQ-og0cZ^dB!8DFn1Q!o4}th}fNo9A_XT3=HSwqC2C{n0a1Vb5#w zyn5$ZKiGBec}LFI#jx?tVYZ_<*V<0~)EL-xa}rkXc>cY1Z^E5?Z{n)QVfA(yIC%c2 zd9)(Cm|yr?>J$CC`mpn*J#4(~J6Qey18iQQH*Ef;v+cwahQP{+Zm{S5Ot!zj2LwCM z57?jjC(qk*ecXbb=bn%3`EBoE?SZ+l^CjNyu=xvnUSLL8|6(Dm-8d6AJ~0zEkFyB& zd#(z*{`0+zH}x^U!#;OD*nJ}-VB_mUVfF6-SbzKi9R9rL5B7!1AN8;GYdK)$UnAK0 z(i+yTtsrA3{{$=NO2eKvxB*tKrIa~;{haO0N6%OF_u`(y%Ih74e)RVW+&>f(c0FZ)jlZqn{=xHu)f+2e_sc$Ke!Aaw zHSB!42Wzi?$n~s?@x5UEzVtHsy0OeYqpYycn;%wQ6|^1y&Go6>TmZJ-E;?*{D;KQ4 z;QH5&zQf-Oo@b>zkL~@)ul$3&@cX@^-Fp_KfMTh{`NLlJNYiGzFDKcL4DPG*m&$>*!WJ=1N3LV^gI~7uqUxhK428AKc5k{ z9!mj@ju=OV8VD;HL`=c-N!1~vb4|D%5^&%vJ^-r3^&Zisr^VSo*m63mc z!ul7V9)Yo+XV}hqD-N6Am<;Pb)`qQT{9Q(#w}u_x@m|k5>;>yj$^`qCuyLX0|1clXSHocS`Xt!+%D}Mem%-*U`oW%`yViECqq1`#&}a- zh8K9_^Mpu=@#pKA-!86TBb(lIKIYzw0yD{GQ{Xe^~{#{?Plo&#FG``2dY!<$Vj- z{eAUe7#Fy9a)M!#LS%8F`iK z4DW}(9>Mv~_jwPi&mzLgui9t1o_NYl`l~;;z@9&}2PQA@#V*)$3I3JI2b_SFrzh-> z-+BYK?&>&f-0%r(y}0o+&#Rnzj{8%Oy4U{ff9!ak=g00{9(H|}3ycr(wK=fy_$9F8 zKNt4Cf5O&ZI^T_F?}4q?bo^{TYCHGu2mANV!^+jZu<@SDGV6W_tiL?&BJa=dJxqhG z`xyf(4;R3G-{G+P6W7DWqYhl6Kl36whxHGN!9o8(`*bAiKB+XeSb3p-H{NH%KKKZ{^~MV|Hj`F(7vAtYnLyE^&(`}dMyZyBFy25Wz$fRz_N!sY{G!OD|(H<^#rufD^6^1P+2@ZpVD^4x*-3o5|c zV~=F=wf_F4aj}M6uN+PcYY%uHtnt$%u)mKn2-aRnV>{MQUJCd zVj~><{ZH57X8V&*YUcIWQyXCCRYzEV_D|S4mqFp~XFjYy?ELrlx6~H{Vg2!MVe1L* z!1@avoR?4jdpB&JGe7OxMU8Ir_x8uX zUK=)^(-Ah`?Rogt&n1SnW75O^e*qkfAL;ikfwfbO{}^BU3)WsTzGEEJ-#^sfhXfwX+t$j&F>+{C(oJLt*ECa@cXzUe`Y?1S>Dv*q{3Gmayxy zF6`g$4IBTe1?zYH2?zaS;;OCo!0OXRw41Lw4F}J+)xUiWJ3q_Pu6)jUkH3$-RMGnp zFZ&ACpR5WSUpW99PjUU4_pWlE`*FUCzbB$RnGLI_=h~nBH-};UljX4fWs3*&NB&-g zmH%^LB2tm%4&|u=-*KY+Udt?Dw1tD}Vm89e-mL z?EcQ8kGMa1mlrT`oHswg`t3QG4_?0))?V>EXyaWG9>e6PPr$xULRfqF9jrXd1nZBa zWxgAK%4IwI14_Wg$11?aVOGM*m#(n(Yh-@^&w7r%u=78nj6XA4M*j?^zy8iZ*f?4t zSU;c>Y`ul&ut}kEQneqnF3P=Ji%HKZ3s>qaEB0R*tl!UACcsenZDzr(XM=~3!Cq_hW(^|c?qjO*TJsue2y>iyv4Bf5{+Qj!(>?duAA+w z*I_dA{xGaRQU|vFX%^p4|Ln5%BJutMu-|1LtlYW+o2T0VYY%2(eweQvWq<6etg!ve zN2^DA!P+mYVDG;gjzB%#F59t>55v|6+pZno+v~~i>HoREDGsb253!wbi)(-O3$=i) zpDAU3W599f?e$PIz`QY>9;~kF;ULRI(p#Aa&c3yo=yM9(8)=%-$WpfvS z-S3^v^@aar{e<<^TV>AU`-l1q<;Qc_^E+3-<|Cs&;r{%80c@W3!Bd`(^C=foPh$S8 z;4|hY@ub19@u%vr`x*Md+JSXpze{geeboSVp7->6?8_#wb#Lup&*!ZPn?DPIwY$B) z@w8@MPd+=A*PqC9sEz%Z=Vy5y*GCW7{pSbZfd2p+Ke`4hXRVLX&f5o@f1C*$-(3g? z>t~D)^tGLO^x3rQkIsgT%dUd0?@R?dzE5H2Sp-=B>xS*b#||)F#zWtGJ@)oZ*!7;k z@g(kW1XkXrhK-LbfE}-Ru=dPI+u4_r3RVuRfR%r_VB?EpVC;@V#bD>>cd+rk&tU7% zs>9Ad+qM6?z{;H(uw*T^WOTy%C|etKj!1;aQ&4(6JVeF z4DH4rJRjJ6;zro>8Arg{%^|RLxm#fE@#n4=>L4OHzu6Zw57xdqjLh}sJ1;0d$Xh+8T{-y_HV*rrO#5r)2X^db*zr-mSiid+4)|BF z^?&_f{j(OfhkXy{qilYwB<;$(Z*6DY_`R*)=>!{($^dI;_l5Nboj>|pb71RE62tm= z%V6!_Sg`TB(K7p}JRjM7>oC~5C+opnPeWkk%cXD}s8fTr?*`GXKfKd+?A_Mh4?kiP zY&`4>+lfc4m(lZCVe8D+!Op*!u=~-J7uLfkvp@FfaM=3cda&}p1gw4^4qMOpo_e@| ze+ZlBxCLtuJo9?&-(O_*BmKtrQ~za$J%{45-t2t2^Bi_x?mJj{`x>_XF&fWnJR&aZU;Pr__rc!H2s_`C!hXl> z;aK}V4{SYLV%oKfOTx<%a369yZi^6Z$82Jn2&uFR!_MFHvjM#w(dU!cE80Q*m}syuyN7@vhf~~ELJD&H$_?`9-@tU)+_TP5xANK1mg^impgVmR_V87Q8+ws?j!N%L0!N$kC!`deW zVfB0~*!7$N)?RH2yFW0N*Ry`=!_Mau_%Hf@*2}w(ZYb<|4CP?`r`oW2F!xvZ{wef# zSf^=W<=I^QGvdb=sAu+h+r#=B?%&Y9YXZA|R>Oh$jDM=#?D@+2NB8wt(ZBg&?f9Fp z@!kZmf8XD)*Ir3YeYN`q^go>kZ>;xb9>#<9-=4wR3v<0c@wn@-`vCI8#$~LJw+^8I z?DshUJFn`(=8HPO`q@2U>+3SZ+5t;p;~@*#zY|!mu=~0?!G70^u=PZ_ZAad{_Il3S zc(Q|G#SqwhXh~TAYyoWj${Y9JF+X?Mj{SEEcK^m1*xyr|VmtcjKl_t^83d~@Zo|sU zuCVs&ecQ1wTEfipvCmu2qT^&gBcc>aE8*!7eEb{&5M zYyVb;%~RZFzohxu*0AHe+5MAzhl;Ryo3*g}QJc%`-x&ZK7o833zbAmTmsZ2*`M`co z;}#cS{qvdZ_YAIY^Zbe2|B1a?k@f7qyWdnh<7ZfZ@K0EKZ3FB$G=!aBuWiSUj0Y>9 zq9Koz2XEQG>-fckoi}&k(7(fYvcLQiY#bmF?Vgu;0XF}X6juHmht=b<@_!ZV{j$O4 zmxsdUi}b&=FDk>{uLSbh`o_5ICpQ22Bdk5Mnf=eM%bBq8gZ{AbqdTzksw`~&y$SMP z`|t?+rYgoCI2mPYnPXGtvfv|b4ZS42fpPdDpXPgMDzxDsk zm$ruWZ?yM}L%P3Ty%%47&-afAyMH(Z?0WY6Kkc{=Vg0|eu*a2%NzK0#RiEuEU zRB70Hs9dn?CJSudrw;7-M2|UdB=|j*`-eS`BrKk! zd>a94f9T)q*A{|}SL}hczjMMq*HiDu{^X3dV<)9#J{e#C5OzO8cIOxNe@s|=q7KJNfndD|b>9`^hd+ z6TF^$?poM96WIB_6gJP36js0Qhs~F~IK%bCzwg1?m3v^{{|&4?<9Tq(+ZeBS zUi{GZu>03j$vpoTuz9-lu=NZvZO5)n2rDPAoDN+tFS(!dc_(bX^@e|s@tFzhUz~@{ zQ*?o~TQ9)9Xoxs2WK^J|}Hg4JV_WzMg9bc*?f|KAzbPW>15_ui_) z+L2pqC!b#cb{#E+U5`0o?V-7_`vfz4{8)KK}|1=qK3oU;h1< z`AYq+@8|rh4_kls3RXXrgq?S>86VH($p|~Yvccx7&Yq;5{WZlMKjcO~SUW10{mJ)d zhCR0_o&8yl_fPzfjRziw^?x(bZoXtDY`!lyY&`xO*!?^OVeOcbu;;gzlC8slUH^?? z>zkv%&hI|3@?h_AzBh4>QLx{4C9L1O7q(uf18iI{)ob3L=YDYv#$FA9jpu~G)|2#u zm1j9@C$G61HXpI;DDBEO*t}+U*z?dbzM(&QtuU-UtpaPW#+AuO429MAr~hF-aW2GK z*!=Di*!)Q1xAe!)E+5vu1U9}f=Lqe@C62@HE9wGU|4`^1?ZizI!P@BqVb3SnahT^N z&NTxz?=~NHzMO*ncYfJ9V1EsI&)?&_9y!GQu_OM0txp{W8=s93Aw?kGR}VD0|su=Q;{VdbFr(@y9QJOAUt+7E+e;)#i2{nMea z`+ri{j{KGN7t+J}tG~edgIQtY4`X20K`FQ~cF9p#`>zmeJ?=@^^`09xA9xzpo=OW} zT;2c8DOh_rA*{SPVLS2KXt45V2W-Cb%x>;a{BS3%KHUd9zjwjrPj|w`LH5G#kN68V zUwRN$POpR2b2nh+#6sA5fXA@;rWvsM;|Z+1n{0pdmF?D>53;}dPG-LKg|(Mn!^UrV zz|OnqvA93=F&$;%(-cU4&V-2hh4)q|~9sRKJ7n!)a;uK_E6 zzJvWc6=Cg(*0A~MQZn`Pw)=dAVeN}=yq)J zhqX`J$*iNu_D4VUhK(n`+QoR`559!8-@b<(uc#j|{?t2{hV>)k!Pd*=gw>lVVe9FW z!}>GXVg1)gu;WuatpBZ@d~fWx+Aw+8oJV2fpUq+C*Dl!l&i=6aWhv~uzVG$aEwzXB z*OSIh5zq$>VeQE@uz8TWuyK$=uyMbyVdDV3VD(7}*!aM8Sh<};Ca)1a4&RgSloj^; z!ep>{hb*x3t`KbgOZGgGIW`{iwx@uh7*$+QS=DHlP`SazldL(u{ zo`?M&t7YUwYgm736|DTe0ykd1`GEcPKbpjc(N}-K+5rb)^8<5W=TDpjd>`^Z(_!NZ znPv7nO@YmO{RHcujDmf>O|W%L17P*yYgl`*3+%XcOvwHD{i0g1`*33=3T@8?E5FOa z+B@lNXTEiUjngKAJs)U2j9D`N)eib&Up?}A;<;B~<6+$rGaj5LH48Rg@dWnYTfpYQ zDkg!^n|WcMuM%t>Kzi8yKRIn@{1U>}-+u(F4-$Dj^6LTjGf$Zf4&LATfc&uj%wpQr zTTNisT{T$wH3ru1eBkqi3qkFl`z<4Ius)q$}3 zD+(OEzw+S;^G!dwIqlZ>KY-2i)$n@cdII{Z=c~%(HH*R8CzWCCvH7s|eV=%L~4Lk17VdG2*VEy}&yr1>|j~Ks;)Y(6Sm2*#E<7EXIAMNey zu=;lxY(C@^tY3OgW?t@st;aY9yG}OQAOETmZ2s^#to`_!`|GDW-+~2JK` zOWMsp#)Hj&6tq9}4;j6GoGOQM!S0{QD^vIJneF&7MZ&dfe^&5%>hW{XZhb--Sojg)LF(`QS}WJ5T#Q>K)Sm`T0$CWP*OT11e7l6ZfR*j>5%T0ZV&`fKmi|Vi-vct zbMN>bzwle`nK?5%{=N4#4we$uK1xS>NIt{Xx5t2kcK0v7;{Ekc($KDb{iDqOnRmQC za{Vh$eu2&J#iPHS#}{Dbc@)Qoy4JJa&pb$Bdt`sPPV2(j2W4%?pJ`@$c0h{lA zKdjyOz+>i<`@bu2e=z^tFH*zbC!Ugn>$Mjv!@>Nu-lZz6ADM@C-@6vR)%2hR)o-+6Z1=SPlT@P6(` zOYi;2lfk@?^-!Dl@q5Ut@vwP+1Vg1dCuzKibSiACK zyY1L-_kAArRz=3s{U5Jj^ZzwqDeA*!sptT+i9@AtCMly@$@Q_HGi`d{;xCAACP@`xEcSU#mmA|GyI)vbSO5H66Sjx!(m= z{+O?0(~Jv@1uy3VWV*)I8XE zH;ndx1Nj?VZ+vDd?ZJH4@0|q)7xb@wRZH)W{Qf|`8h^M% z{{-^Ty!ComzwiqEtsg%f)(+eRyPs|#9P}@7t|Kq^Fg}s;S$pO-9LQ(mq1WKx_tghK z!vDQLe#CiLdtss96UoNw_QBc}7i`DBxF!>?yaF5Fya!vya0^x+CdVEK-p6|XRj~b5 zllR@tzNlQ-57wE?hmAKbfVKagz#;n^Hr`l<{?kq=u06wlgJJFMBCz$EW5YNYAL~O) z(;kd}VBaXGrg6Rct~u?-3&z9dTROt-#~21{?>^)AwZBrs?jJq|>#zLF`*}V|U*8x1 zW{dBO9q^3rbN^8r*!r$*u<^Q5GV7oLth{~$)_*uje`!~3i|p^=u=^nGV$xuuyUm) z>^}APVBfzmZ2qbo?7GhkTQ5)@w%#nc?d1QPz|Qwyckw*pc;#X9o#$ciD-5fbj=)8SMFt z`CX z;TLu=e#numjK6-_YS?*S7S?~828aAl*z?Z@!0M;TuyN6MVC!*@z^;=&x3eBN|Lqs% zpZU!l+vq>`^W28@|E9sNn>(<6Tx;0yh)blJP#|9gA*Z{t_=b3Tt1+hRrA4mst;2H}ihF`Og`K(QwSU=d$(_r(#OJU>DvtaXiqhaf;=D^zBLt*`eC9v|P7pxvX46E-y@p|G- zXJG52TEgnvA7SGW4Q1@^pJDUkwP5YbtFZE~0&Ltc)Q)|V19rV%qTTpW8rXGy4t9RL z>Gjwv%WS6}^zuf=ll@)`W$ID)%6#W+*!^EkVdc>`u=NgSHqf8c5wC&uU#G+V-cDFO z+#Pm){Q+xV=7Ft~P5Bn>_`w&}(_i>im0cYq=U)B_S zKk}vo?7C|UYk!{pj{C{0Z-UjMonYfPk6`1UZ^HU{u~YH>$dQ$6xSl$U_^|b4OJMcc zI@_uDzPy_8Mn9Z_t;1RY8^5{+`@Pd(^9`}zX1)-g=?Hs1Q4UzSQ47`{D+3!JFKavf zvII_-_U~gcVDrPj!p6<7ucE(@3$ao&KZrLRhFxb_VE4zahqXuR!p1j8c|HDR7n%LC z?O^rebl7}%QP_O;G8y|TEo}Y%R#<%;9X8){0`|PI3o9eO{{rmyuYf&2O19p33haKK zbFk;D^@sKUkHOl5&0zDK4`BVzny~Z!F04MN1RGzz20MR?*p7dC8P0dVNbCHt^82{$ zth=nR`Ku+cJ0vXo%dUoay|3mAJ{nNbXd6?Ck^jI{cRW6IBOc%_)2Y9 z`{8}q_*)X#dh({Q{?NrG5%TFX*yk*l>BoMs{XGs=ZVZF9SKGoNc?w%sRs?qad&bJt_{`hoQf9;QNcs}*Y+r1t?x;1S4?*#1o zy=y!9`@dlGBTpCed&KY3rsaO>oHonM`}}ZdUs)KNe;ErKKg&tG^({SR?7%p%<6jjv z-X0a!o+}jDexKhnp7&@`#Q5I!efWDK=fl{1=XO~AeUEnIM&HQT3(viue&`OX@8j`& z?fO!%`}UH#;A&!p3`#$jG~zu;+WM zg*}h7A#6T+2CTo<4EB45z}jD}VeQ)vuzpgAkrN-l&exA=$6iWxY5~u~9;oZ{(95%7 z=Sc}zy*mun9>{4s`Q+ZP{=zHX-~3+-Sbz6EtUUb)*1x)FJNqTe!+zHRSUa?!?Zin| z!qx{SfgR^buyM{`=SRHX2e5TGS;I zU33`MuT2D7PtXc>|3M)*9r@(OVeRf`sRZkt-pC=`3eLyGgC%&I; zF8zx=JkWOXyMNDNexM)y9{t7buX zXPU#-%fzDpjc32>{lp8B!rIr_VD0I*VdvA^u+K>cJ3i52*IR0tdd}ZwbAROcSw}b$ zHsAR_es9gni?7~=gYnb;`tvJ(pZbM!v>RVN0$Uey9yZUg4-V$D_E`^Ddm<*kXZ>DE zSbdWS)_;r+yIy@?_cz>|#qSfJE#Uj1XHUZZy_@Q=^&HD=$G@2lE6?h{A^Ql{{x2$% z@BIbVKEFN_#@>1g>#yyG9ls*!`99)uykA|&3m=CMhN5R_nUqoKtG4lROu=R_n zVePjK(^wDK;qhU|X9TQ$85cGm)eg2kKQ?TCHiL~TzT*9ze>Gv(!DZO<6LP`U;q`$% z&uYR{evkcjonh-2`@zcHO0a%IaoBuiQP}g|QhGh{qcpJg(fBEh4{`joGWxw2tp3ji zD{mXY)}L2_^OH z$LAHS{K^P>e~V1?5Bqb^Po%%F|2~72U*E&}ql01n;*BtNd)_^;^(Gx*?dEuy>2Kn* zX<+@QIk5FhlP5&Xzss=ts{yQDdI(!zSr*psc?w&`s2l6^K(65$EPT4{`MBE{K)|Oy)m-${mAVUGUJ{O#vfak2v+`< zkjWFofwh;a!PZMfhxJ<;!_L#^V`wM--yZgR9>UgF_mHvAZotOF`^o%0(o3-U(Jx@< z%|Y1s^Z;1<=pkcf77f7d)G&nF%|A2#3K9rk>HNw9X;5m@`a7;JnbX)b;bdGmAx>%-=+cEI-6(ZLbxHEBVZ z{RI04F`tN6UxAGen= z#~y22g6AV&TEpt&^Ce;8K-c7&=6zPcK5xVOJP*4m9&8>bW4Va_IoXr`K<`e1J-=!a zYu<`WUu=Pt7Vg1!D6(i>RAKe*G>Z#tS z#Cl@C$#GcylN+|aWsmLDBTa_QH!g#nCp%&F-W=HY)@|52!?Cs_kE2#*e6Sw}!|LaB zu=RUgVdX_}*!j@R`-v}n1bg0GRhhW#H?VT-b~k<>eHE)p#P>{wwQK%>wO`+a{my7r zxu1TE4m*E}z~;k$`kem4ek%j(r(J;k`%fRhm@#e6!uprJVdKfiVfU|2mZ=9m2%A4y z;Pupx?t|4UYhmTZHW`0#lZ<}e06V{y!^T0T!TK4iVAst!*t&^zuzG3~Y+P$2?C+28 ze&%)l4W&Mzk8w@*t&%w_B5wLpeDC~S63Tww4g7vRjc|Y=Zv+dMt`hM!I6|nYl z1@FgSnFkxM&j+i&2g3e6=x8#3Z@MIGJne7C4}FsnHZFTl=HF!APk!SL?7WNz8$Xt< z4|>ss`*=4t);slst;kMlJqRm*3)YTUU&CSJIR|0&RVLW| z3#scwtnUXM=^x_6yzr$et+Zix+?6aw`@yd~~cHRM4f4ZjaI2!RjyKJt+#ThO1Z^NcO&FZ502mOP(&`9m!jZ}Nkm!1|r*VD;lD*uU?z2zH*& zZpH7@{{>*}$rG^WS*3)1ugkE0Wnx(Uyt6gW=e&^S%^45Iu~Qq~hxqe3oHf^HFIfF| z7}nqE4{PsihduvvAguqtAgtZ|*G%tco^+<&_nF}Jz7-?)&UlV z^%rLQJmRl8VdK&pVf9-M*m#)hQGYuZ?EEOo_vwe0k&)XKVDmR+W$K9Q!^T@>$FDQ2 zf8zDVj|ai#-+e##(M^P1@5Lh9=fKu~J6}A%b~&uwkR8_FIt=SizF>Y?-+2)>-a?`v z6wkN-Yk%*B)kl|M*SYiGdfMN-A3u2&9E^A9oGRG3R&?fra?$ncd`boT9j;&FoCR!0 zox2{L*L7j{2Y&%;kN1pRkF8<%GfaYw_cVvCi_XdV)=sVl+i#iL(m(WDM%a4D&tdgf zV%v$M^l8WM(f=o#^83U`?!(5H*Te27&+!TUgFmtY~JX7*m!C**!8#$ zwvOh<#yp?=XrV4#&v`X#VfO)?fxT}eti2qgEAyZH!~j@(HwA1yvp4MDOUVIi2Yl}J zXEn+;XMueWUoYp*?p)h88Y;t!>J@_f!SDeV2|y>77Qv1OEb-ecIm&-rfyeh+=vpcnl? zKVE=C{SBMf-vhgU;+r;e6^Qmu}@Ab&V2C(t40W$h#G_3#M5%xT>ov`te zhOp}~S09-D=fz;-v&&%TM|{|P+ErLP`Dy(KecH1x-%EaN2WU20#-kFvz>Wz3^sn;0CrrT!Rqr8u;<0b|AO%)o>2fczF!VjUS)&LH;#jq zH#xkY^PhIW%C9Ukaw640uBVPE6|8;G0oGni2&+HG*-ri>4($70gYExEAJYHq$4odV zVm{u3jo*)eT~8Nb<8udL^Fb$J?d|KZ_WvGOx%32f-mQj}H;-WD-F(>ocq}e0&d2Gn>wXGsJ)~*;d$iw+R4}(Op$*D`XBu_TP7b_5;mSW9@g(Y2P+pF!^-Doqj+EJs+@4>?@+?#v7*D)HzgTO{}DI+ zvkv`3KOKX$H`c($x%Q6X`RosG0UJ*lJeKzJw~l0ojTbZ;$NVS%{%38T$GVR>p7%pv z?t^`w!LW5E^I+qJi(&1_>0ZzJePaUs!FP^>jTh&Gtq&aG_2dUS!R7(`!`g-GVAppi z*u2+c7*&+N0jwQbY+}UsmW8diTn(#lvctv=x=rGJShr7W(at>i4K}}g1vWlYX)^bd zU)~No|FV6_`(Xb}hK*;8migWOui9j@*9lm?@auHmk9^<^SbII;46diX zVFc{@E(Ci%O()oOws9uoOFa5^4aNiiDfTRwemDhNr|=!D{9gzwCmz7s*OOu6?SFm6 z`}1D)W#;)0v*|D5dAVT!K5oT1Tu&Gp15V#3z%_gTbzrk-eSwTSuE>l^MT-V$48yjR2e zQ_nu&eb8TrVe7^Ig0;t<$mGwj$>cT5e#>}Zx9*0mH*E+L7Jg@)%={V(`~3Z|cJET# z$rrDY+0XGUY<}l!*z=-x!k$+=1J=LZ0K4C+x7V`{YAlIZ4~b#pLB*HyKEyNoRAs(V zU(yD)ZodI+9A^fszmfn}{$7X8e=Vy*e_^ksS;l%JKHn8q4yT8eqm5weC6dd?;qtKc z2=}?)zbBIe)*svr>-Q#r_2)kF`S_Q=ROa_Nf8uxE*Z9LgSo`b-oG-`Hg8g9Y;h(|g z-@C%v)BnQytBt*%?>`7@r!)#<*JEYadZ1mjJ8z47Kl=L{??=DqfnBe|Vf}_Iu<`T0 zu=($_u=1;mOr9Y*to>XM)-HZrDdKtO_#W-^6BT(M?9SKh59+~|!NzG`(rz4hA?$kE zLA&+j@nG%91Z|0i_F_TUcw$tq=X|bQu)>WA=KHW~|K$6Om-d8>6Z{NgmrpGZo1Z)`v##2(UX^omVC|BeuyN!sy&r#J8|z#9 zy*sRZ{yA)1=X2Qg+X!}@bc4+|SA_L{dcp%&ZER5tR*&|TjZ4DXlVf1>SAQZ;j4w^I z9sl(P9K4U~>muxY{DyYN|0Ha@Vh!wf9fQr+u7}O%d=DE}KLlHk)(y5_FT?7O)ZWke zamQf&k@d(o{~oIMoBzrIYiG}fjrUJLo|?ZO1nc+rf|Xyx;E+6k?Z=6*_E~G#{Dj}* zd~fUZ^<`3J$>ZL`nafKeR`eF%e-SiGvf7ka3V}IyB^^`gP+xeh=L*(TZNre~;0p=FwZg#2@ zW>|aYF6{XR(Y>Dd%VXH{jjm%4c%JG@Sow7THvS(M`L4W;0lR-~h)iBH1?)b^dFlh~ zDDZim&sWntyqPML9;3Y&jeFp zV@9t>er%GdH_qbq%hMM7{`P2q*~Zce7h^-;e1{QTR-}M_p_dHD;%;P?O*Jl-(dA!PuTf>9@f9<1nb{k zf{owq<$Im4+0p0L4}1%oA1I{$=RAmAuyMPxu;Y3Z`%QVe7B=6r4A%ah>;0_rd)SBS zlXloM+QBif4_)UyVCUBy+O>;Uz`oZw?Mdw6MX>Szmaz8ZcG!8|%;zCbx56R&!0XAc zOoR2K;=tkN1TK8m%7Nv+hefxRn1`6_4ly#jU8d--db3HVis(D$JelWd4tzekG;bC z`L3g|agAlL@yye(V_}E${p`l?q2qK-fw;wn}_S?_2dl-(SQ0=d13v?5wP)z zy0CszdivY*{ojC{f0tqFs}`^xJkPc+^TB%fi>`0{UrW$f)HuzKcO*!s@# zVQikJmhyo8ufu3pkI#dR4>pB8pCUHv$@sJOhH<{>t~d7A+(Un;4`Z`l{d=_P8^^x{ zJg`)umuq3?M>XXu`G>DOIW%dxP}yUKhrZ`~2LzV>9~d`u2o-}ixxA8-qM*7~~Au%y<9(`vo{yzslKfVDpWIXjdNgk%@oClZoqB!JCx zB%|GWxX0z`Z{&6<*!uYku=eIBu<`mGu<_|Ru>RIJu<^ubys!4c5ZL_Cd$4k;E$sa3 zWjpp{CD=IS9@uplAJ)D&1^e8;%JIJB^P83fTBeHrVsYH^a{BXs~+gBy7H5h2O(| z(Nuhod4^%I_I?@Ib2;k5#@k!N{%(BO`tgOZ^6N4E>3qKc8(;bx)_%I^{rDe0z}EF$ zfz?+RV83q@tX;nZc0QMviIdHT&F80q_504j>XX~PFZ%N!?Eb-aW(Y>w5v~!T9}T*mFgO!^Ust!um56VB-(FVEv2Cu89f7|u*J@LFie)zpxWb)nBXxA>C1v?*P=E2nduah-Fs|j-ll?$ zQ!j*#w-twtU(5=-U;q3|SbMUT?bNe$gN@JBf!%N433h)}ZP<0t)a!|dm4l7{7lV!W zmbIPt%?^kB1z36eb|km;dg4UMVb@D_Sikpw&M)l$oUs4ifsHTZ@P6c#?b;_PVExU@ zuz8}Su;=^jfvp$+n|Q4D`&!t#s875fzqTuEd~phl7|-7U_PnPFu>1d%hwe-14Qmf< zV?DZG>k#bzv8AxKo%0Rq3*gEDju=NGXJL_?N3S;NlwXo}r8{LA{PltUT@zejo%GZ5x z&~AKuH*7xQs?TFQx7%*KN2cCk6>L4<2e9#_>0XaLame{kol#xb@hJg&K6wJz^?8H- za{tE7vM_(oq%~~4*&NvME&#h8`ohl3gN&EwG3EDq;y-I)*X^6I{kQ_wuK0yj>c7k3 zE+6(k@jdK%`xdsIY67f3zSDN%b)#Y91zTX_iFIJ>xqpJ4|0Q7Uv8S-|G9Rows^I$( zkH`w^Po0y|Lz!Uxy(eMq&a3o%zxp62?e6 z1Uo*}Ve@P+VC&C5vK@aZKK-vf*%&rHpgh!mZ|U{e`PE_TwLgRZ%NKAS(h}Huu35-m z?bV9(r+Jp0u>N&t*!ZyagnDM4{f+-|2sWRyA68$igpJ?-2J0tng3S**-sZtB$@mMI zVC&Hmp73 z!TL`NVgLTbDp)&n25i0KT$%GqCc@TxOoW}MgJI|Mo3^u`xTo#J(`uLEd#ERA533&* zycZ!)n%K_#c~+A5#l9a4yWe94Y@N?Q*!+1J*!rUuu1Xiz>hN- z!pfhxe7}BO@8ZlK?CW^2agAQE^Vjui9C3i{rI!;3~dFO%a>ezp$mx>g=~o_SH&{ofzK z?tge2_V1q^f%V@O7hyb!XQyDk`S%X$!GXVGzooRDI>TMeXZ6G1@ACU>2lxfnKfD4v zANsI9j0X>f1N+(anh;k1uf$%~zua4h>yh{E@yC>hwP3$bf7|+wHSf@W_}@cd^K-Lc z<>$As@ysr;_WL%OdD;Xv?(;VOqw+s4Z2ska*n0mP1sPB5$|11xgA-3fe$Xmdd!Q*C z_^-y}?iXM^N5&gHPb)2K{a|nWUB`1)etwU5={(qdfQ?}DXSZPO(jW5izLEa4c7He6 z^Io#xUu&m+3~TSUhSf9KVD0_&u-|npFW-l}I1O9huoL#YmAkO^_(V8}x0=6NpNH$I zSF1xjPyb^KY+j@utY1A8wr;vU?D_lsVC|X_wv(^S0Xu&e!rB)pVB;aidyS)<&&}^q zAN;#aKH)5EKHw+fi{yLXnxBjNiI2a8ji2;^JqNF%@pSTrd133JXT#37(>WO*&NI3K zYwwJNwI_-Yzjpsn9$0<00QTR*IT#P(9y?&a=R4Sc?;GzX&pr}%zfGRVczQKBls}h| zmk+ZupUeZ1|8T$lIN13)6;{tQgIy1!$-lT?^m;a~M_;ufU!$Jt4jcbk30vP%1-72# zGHm@&PMPODgq4@6Wc=UP%^wlpi4AL4=75a{-O9@EVXxGO&5Lh>oj2`Z?eQhB@wz^+ z{>3aVmqmwqc4AM#ocGV*@t<(shU-~g<=sIrRt(O)@iXMXIIi3i@x!22;ynO5_C z*xCDG*XtnI^{@zb+z#80-!ux=j)}d7`-y*k2K)B`3c&8?ZwebwPl%yDF@`7j+%qN4_LGY(6P1 zY(3T6u;c$6R-SB2$NlKjI_v3Q-g`RicP)gqKfBn@xFp{|JM{?f!LGM@u;*oGfb|zT z!TSHnVB;5qZO0#p1-qWt*v{WKxRaLtlNmHYj^8nEA+57ys$7gir8w;g@)w%5}y54JLX zo}b|FQ)hY!w!Zm{?}s0A6xN?S3Tu~agMF|4GV^6JY`xev*zu_en_9 z*JGC?mFdT|uQY=5$a{z8upgq>ekVEelstiQY!HeXa4wjO0Dtp3ajD@V%0 z%9WI`^-!JdU*_+lG%)>qo&NW_A7Jf|7O?vXcEI{kR~>Kck=?NIic_%bd5`xaXZAQh zkeB{L7f5iJ5KfC7r*y#gc=fCWJshRU#ZQ88Hg z_5tj;9Bggtn9KsR0{jyTtpe_shcC|A%GjGfTnxYv01^DcShSXxQ~p0oML+ z2Qw}w8^GpUo50Sak74^giPvNAeF9sr|3CUy{n#0HJ$(uLJ3V0KdNbJi!d_m_{-xZo z`Gsz<{`MKL)BeYQo&qb+FT>jV&0zN@tcI;mtPZPB=E~HU6oeh; zZm@bNKJ0lYrM#aw_G_^Acy5{Ry03i3zId9N>#fI89+Qu$0lR)z!|KiGu=&4Lu;<0x zdz%lVe74SrsVnLOY6hh<9%V*Qxn*E*bd&!-xufrn?FbcyB_<&?%(<~MMVEjhh4X; z;ZyexzqST8KhqOVhre?Trf%m&9N75Z1Mg=)#n87HKldj#FU3RZW-d2OHH-AZg=r2_DdhCPKu=$;GaL}L4 zsvc@v*z2i3_=_#fl3FRUMo3mea_3Ok=JC87Vwzs$m3v5uh)9IBgymB+PV>lNz3)*+U<5BPUIVe6}Q$<#UYhkgGAuRf*F$0R#>afF9RF*Pl^4eJ(USouTO{V*NgnV^|4Q3{g|~f z@wCFoSL1d&oB{IKZl-&ztjs3p6~o<3%j4}s@GE=Fa*~A z{T((w_LcWjck;9C&>#EUmTD3{9fikHT4np zObs}&58Z#$KqijX75%3_H4b*(Ho|@}{=3in>Hh_=b=j^b_iO!NJNeV+u=DnB*f`8v ztUvoLmi83=SqQd0A#5E~aoh2~vcu+eE5X*c=MH1_cqv$Wwk++&?W)?&cNT?R$2DN* zSAN*|axL4*mu7>lkFE?`f1TLt(eLlT+SAW?U*%3_*!ar>*!6px_t&1?0jn?T!k!;C z2{s>i%>Kcy9ta2avhuGXtiIku|C+xn0xM4|%lIkDVBGy?W#hs`eGxjUQPyw z>g$~UoHu+oG2f5rMOMPp?NFYCq^Nrtu(2S0+%mlsps5Wo7^-)H~FK;;#2*%q+(C4sM|>K9e{60+};`&qB6 zVg2x=u>CL{R-e8JJKw*6jfW(Mjo-C}jnCWP^S4bqRULLeVrE!-v@&cSF`mr%+EroW z+5cm_+<#IL*55q`>kpKHJx^#KtUY9XvGu$gY-jv(!{!B6!uqTK5U(&FGXvI+_(4XG zPKAxXY=x~Ko+c9)Sp_@J^SvH@z8cnkSq+Eu39MbW8&*!7g_VDYW#T*5qnn?;3}be- zxe2>|e}%0hm6h9n!}?Quyq@=b%l@H0Yd)-e&kkE}9v!xRpge4T_9F2b?S!hZ&skx8 zGyYX&@25`1`eyZt*Xu7WB0i=5mHqx=uyVZ;?0%@b#J_^;jce|PtvhN-yZPLB#>c4t zYYm&=~Sbw;g*ApM`dh1q-!1_NvPr3aA@j~mh z3c%)52g2re-u8ah`jr!9;!e&FntQc-J4qhqe26!TQNZVfE{NuSZ{fFXJbkf%TWq!;Z_(aA>~pJo9}IZAY)v z{EG3XULqO#M*lAwtiN4ZeT4ixGK=5GZ{lFdkbId5d;W4`n0*e>%fZG~%E0=c10vgf zUeIoRVLVuWI|J?RKS~AbKV6>5dLf<}4c1Tm9d^C{iN0|?-GlY#Zo=AECt>ULFT>h9 z)`RK4-hzGK*|2)|7ufhyFIah{zPIkEm`vX0F6{hG8+N_+!e2A^J>v5}`8@2=8E{B$ zz@Ep{AJ!h!zA#?jNG3k2eW8C-8g?9hhP4k;z}hwEVB@)|V9!H844dC84_j~lEv$b~ z61GlYHmv;53hTemlF@%Jr*l2=s9mt~;SX53EW6+LBV@X8{@*#+a|+7A+NJkl>wOG3795g)uYK9 z{g2;szieum{O~nc`_}gj-pBgs*I?r$uXDfa`tL*$<6jU~zwL+JUs}}Xlh54<>%SL< z&4(|7jYs-E`X|$1>w|N_)+tf&I=ed|%=xSzzl~+QL5n zEm-@q5v;xbrtOjS^qwb?!28j+QE3m@dZ}ZKr|T)E_ana-!LFA_3F!~~dFySpGmpaR z%VMzSbgzX&`iJ?Ve>TK+^wM_N@vP({jNZ^GK$MPc*k`(gJNE}shHuhfV2PxHgZZy&{_J+eN=c*jy$dus8N z2>Cq@HeWLh)*kNxJ0Cm3%9$3h{$^Cz{);D5h|=Ioo=^PX%Q(y*=Ix=${66vJEUl5CE^|v>`#&dGO?gv`x^^AX1*!k51R<1sn1XBml1a>~$ zfQ^sThmDV4f;~^CF04Ot5LWLsupK?R!uzS)?*xbHhhg`tG=|l$2PSeq`m?szNVKCbIbipP9*4Cf--gXc{{V;T`-c2KE3Z=1KJm9dUu}=PzXELi?;6-VRV|-K zeU|Ir__@-}6gk?3bLd_D_46_^#t?Jfn3OJ73;}1Noyra)Iv+e!n#J)H7k@hl6}R zerjDA|6>xYJ}oNaAI^;A)Ufu>9N7H&?!kON_R|vB`hpEI=kaZZt^3;wdmhbxnf->F z;WfMNygcIl*w^b}?b6e*^Y>d=d*~8uz0+{G8TI%VVCD4y?DmHV%xC zf8${Fa4tCLKmDOUzTkf3>p<^k9ZrRfpAUiEpVk=GUmXRTCoc>8yW?T)i$t(-5&PSH zX6FYmAMp2=!`i=FVe6?}KYr(E*zdUm8xI={8;_2LezRU-46I+NzOz3Tz{Ue=$=G`v zVb^ne7(ej&4%?|$>j^8rzK8X@C&2nIXS|+yya9Gzo`jVvN4%dn;9=N#aZYAFZi3Ah z-GE)6vt{((BiQ}ayNbbMI5a^geS@qXWS?44M!{ze(teODLy@%`jsOTfm* z*Teb|@4)8c*1*c6d@}y>S2Fq_2W-4$60H573HH4{gN^6Jfwk8&$?UKHi~ewa-s>B& zUM|}|*gq>_|DM7M8F?`PHeS{Tw%)BaY`=dDYwuQo^(U*#Ojpc0Qzr^^5X) zKmK_<+p%{u!`5}(>cjKs-?w1L^$_ejP6_)yn_$n+$Ox;)XTt8c&F%fvD|fP;I;#4x z@~WBlGu|Cw=TB|ec%$!ceCB=Fbu$uHUcLiczdZ%EUaTB!+;qRpdaMAu@A?d^{a*?; zesBl2{-_A7fBY2IuFD|fcP5GK|75U!iStYQ>qYMf`Cky$&ixxUj!+8L4`0gl)*m*2 zl_Ps$_0UJK=a)yL|FjRPdp++{2sYkS4c6{xV*k?rjbQudE3c=%wjFHz{V1%y=;i&` z&3}45@wX|k{!wz~hko2#?*ewfsIG!u$?-FOR(}LE9`!>Ut!l*YS{gx|G<8C zQW<~e6>NO>dwx%SmIixVzj89He$K1?PTsyP?0ha0$yH(F4pm^|tC?WWbE*Lwm)lPN zSYJ>b)=wA$8;>myyFMD(PJXz!_mi)U0&6$rgRM9GjsACiWQ2|XAA-$~`hK$Q%B8s8 zPdsQ2?D~ra>mT%imA`(kc5O4S$G`azc7IMa*!jEz)=w@Cn}^fi@SKu-u>RRh*!X5D z*nS-b8y9=ki{~*;qiiRibqCf?nE)GKJOOLB%IfjOu=&=$u=_tt!PXOg3hV#1hP9KH zz{cYz%hW5y;Ct+cNw9utQrPu526jH@wB7TeVCPXGSpE7b9Q42Ml?gUpK8kkzgG8|Q ztN$%1#G7uzJrb5q=S{` zUa$X^0M<@k30t4|vIoy6-n0z1e)Bo(_{!?R+pzh@Z*8Xz`;v^jS_G>nGfqf zY=s@4*|7HBGFZQ3hV8`rzp@>@FaQ%{Yp*nfm9Neh;}Rdh zj*sh0xmpUg?&WRRd0hbZeUrfY4Ow92z3Xi^^%!Zr9{cbO??=ujhmDU!fz4+mghT$2 z^ZDQDb7IM?ch{r!xY1k6_P}S_|v%KJ|V0zJ;*<)pOe;<-2*SSFrJV<%|A)boxjC|3lcg>FcoP zjcMN~2V%hH1vC0Q^q|*kC#Hq17kUl0E+;jte2nV#=-szr*KZV9`!FS}o%NF6*MCm| z2k&QmE;+0`_5ICfC4-e)j*ofsq~0GnK7oD>#>aTjn_iFK{wLQPSCx&Yd48h%RT9I_ zkGrsOzeKR#cN?~zNcQ`Fhs{4F^nSkQhD`s+)_?p88~;rJEAOwu{(W{?|Mh2?c^n^B z|6Yc*n`QO{W%U+pE}>Q2S>x&1FjeC%i-RS9L@@x?;L77@?Cq~daQx4_DXKANB<14 zoj6-QSbZs5pPwJr|L<=*@q~h~`DodC2K@ui3+e}35ArT-f6LnQMZ(zlP|+|pZdg3* ze&dEE;o$lDqwm4N?|Z&PX*hVl!2b!}-+mAIKiD&0@cYWUid=8rU@&ZbQB^qT5B<&R zJ`aDw{<6NbHtc*H2^*(WKKUH`*Y(~&#y=ek``jine&T%Cxa$B|KW(x16E7MA8}IoB z)}NW=^*m2jzE6XL_2B&ZDvYg@T@Zdf@uP2G$J_h$f0o0+?-{pV11rbBrd@ry5%%v} zxt@#{Zj+f0zOVXCdG7w)xv=?&18}gOtWP)!2kXcE{HNexJp}s2cv)z^V)y+`Am=?g2j|Ah7bLg&?@Zyv$!XX-+`-}fBW z9%>Kk&qYN)YY()7jeEQ<;~#zu^by)u{8SMV8^04+ML`*z*%I z!{#S5!piR~u=TTPVExpru+LB7_0%6^gU#iK!^_$SH)Q5jZrI~+H>jP!&rE{?LGkIX`&%?0uLpJ`j6V~t4U(@ec4+s8~=LIa4sgKjYQtr=% zOaK1kFwWA$?e%uB=a1X}?oVk6tKUQ8gZyp^dtQ*^sT^zs>yJ79>ZK21>u;PN=JRU9o^O!> zc70TX-ACno)1RpVI}g*^PJF989L!h$e>v}`4qE%y{h;r`uFDj#`Kh9?`LyId4}a(# z_&@uY`JWfojz~ni@!_1X@w@o2`QEHD@;wgh^D}sTqyw~0u4m%%Y^~GRVzj78Fiig4a2eNv;k4(IN zCafIp5ysy4IUM-+`uknN+O^L*!rFsA&-#N;Y>#|?GtO^o4F|uk{%8&d@8>vv44d!r zeO*tDVD+Wnr(OOL?0lECm+HdSIoThs<67R2eQbZ}w^fIO{#K9Bt&x#7S+(r?WLhx}sNt=G&6d;ZV{ z*t*K>UQhgD6YToW=Jo7%-2$7>&I%iMQh!@7l?C=Z{vEa>2QtIzyIr0$je^}lh0bTIEx=s2t$n-&iIL-lJK*!<9G z+C%YISov}m){cG~Hh!-^YP=#9Y+gtI)%ubYu=c<9gZ-BX4%V~Z7Yh#T0rhcI+v%TI zTyK2iS=Wf~iHkfje)cbHeQij-Vb?r_wb#|B?*Dt>_0)4J|E)K=2b%{f0-ImI3+wkA z4>bOF2X_BXRoHm_ZP%+>oe_`JaB zTg7(lU*E^Sm-vDA&f@_fc1Yf%lO{| zVb9aZ32QHnl#%BJVdd{6SpTXRY+UFoSbw4z6Eqogbxar@nSEteqgc zUYB@1`nEJ2`unC~Y(8pL*!9{GYhmrc(EZHM_3(esCmy@W`-9&{-fe~5kL3HgzibDr zz9<2EPQ`B6`7G;i?t_ExbN+k}yKaiw&iQtN?wR@l0m4Y2lR7O$r+WV81Z|M2uzB&*uzEcvY<%%Y*m}~}VEyTegG>^V#4NY?%B?M=mfC&KG}MdH(}@d zU08km7VJKx+pz1ydPd*>4($3!2M5m!#Rp*btK6quzdbXoe0>P3A7%a0KVj|lEU@~= z_ccD8)%%f;|G>tVWaY=B2(Fe5c3wS(jaSN!&r?|al^s@p*k8&8S^e=MjFmU9!dU$g zh5iq&*FKLH#>R6UpP=3Mc^wXZPkHl37(3q`-{Af1|G03-pXPmn`C)v-_jmjf)2=@v zYhNUVji3Ad>J8_k{VwajIA25Y7Fhk}eEzS0kvr*O?O6L;yVmt$JSP*ZKF9?d@5ulw zC-U1)JSaWv^9#e~4bpl)>!$?lKD;!r{uag0|IG*eiVC(DADzwG%DJz(QB zviYoDGW8`%Vb^6J*!Y2L{;NN1-c8mI900rSlfnA&%1i54WaacQ*tk}5*m}m1u=>UI zWBt?^*!>PEY>$+``rY=AbxM!JMox|uysI-VDl%T^~rmGV|(O!bpQ7f?~k01%8_NTew6D; zJ7EQ^ADI)jK4ul{{*GL*=kczEjYs8$^-I=yJ$f-e?7ZIyyFa)f?B6rl1nYOc3mfO# z0_#5&@qX$pw!y{`io@n%w!@xJQWEz2cEIk}Dh(U2+zF=}c02$3-cLStmrQ*{d078x zxA!CWD|tWqcn_@HtZF;{)?S(VfNHSu(0$&I+^q?#7i8sR9oYWb59{yLh3&8JVfPm` zg!Ky#!`AsVhmAiSg^l~QgY_$9Q|zR&%jLt*Dbs6VhTM!-RTs$WOT8UuJ##%7uMRQ(vFpiv%NCy(Y5yr#W$kRoLw{{6Y(3Owe-ArFwqC{YQ_pRKgZ1@* z`bF8gbmzDJ+%7nnpX$}Uuyq_~Y1dx;9(G(W`+VvtkHFS3Uxz)f^@PlM1^HeD?{gJ^wSTTzCna|G4h`(BfnNBWP-r6+LUFKUN9hmD^nO!f^~kTm=7;p} zwQpm<#>@0CJ>N8r?da{4uyr8uVeM5}dp0p_y{GJc%4A`zo=6dPz5Ze9Fg72S1`eLj zJbjS?{?ET<|3Owb@Q19Q%nAFuzQ6mA^TOIUNnztH1?5Qp+4`S%Ve75*w~cERfrI|@ zJk(XoRl>$n0OjKBGk zO0e_tne&751FP81JbnzDkE;qtv91NyUakhaP9NIN{*mgievb3cd}IypCm!+#Y(02Q z*m~WYuIWNmeIQRg?*Mjxy7q)|nuf6TA01%hVNGH42gP8->GW3K&v~Gr!8&?=6quJlUDOf+{TiAHsAz1mdR3@JIz3uqV%V7QBecn$!#d6!(-@Ds( z>M2&j%BP*M`RrA;vwpY1>X9|J6HnO;TetHatUTTb6UUCW9=6V6J*x{uDO;ei_!T zY7c9#{bW1#cpKRBVz0p3E6ri^H9vbjb%9M_<}%P4dEqcNeqI3f{2kk^AIlxa z=B=}ZUGIL1jIjOg_2%)?+8+6P)~}_6gZJ?~kJzx^lhysH#1YG)_1wTK6w97dlezx%9$dt<5d|p4<0(t zf&B!IpZ2!I58Quu8&>~4;d|YOa|`x&9>B)yewL}<^E?*gKj*wY^1Kf9%yHQMTS2>a z`cYWFeJ-p#Isymd9kAasI_!Gm3NL8*!c5FpO63A!t1fC zPkTM~e*@Tf^;y{Z*BWr}zWO_rVCBjs+RYV5b@*zZBqw6Q%3b@z`lml!Pt+^j z^LqRu*SGao_K*7qT>sA3tFZf7WaC8_VfCQwxe%wq*!@3;!&v)iZ`l2z`fd2X=cA9; z$&tUWJYNC3|JnD^{#_Qv#@D`vtvgWOXm8DiwP%zs#sjCqfjqH3e>`k{=QrA|_a0$8 z`s*gFUL6Q~-sLUW`j9@b@!tQz=1o6`wNGxt)@yWx?Z>;Y{!bg&^Xl$-J^oZvSiSQ= z#(!?${rDe$!uqFmyq=v~fumIuElRQl#a2|DM}H8Tp?X zHomvn>xp;dg3VVgx1D{41!VHXb71^{e4aO;{XWs_*^g7k`_bP+WX`K~e%gOMVEv_< zu=a6#8N2f%@8^Bm!LGCVu>RGDa3GJhH_O4w1Ld*#+=8%w&!`WqzRUnSFNeeG!^E(5 z$(OM4tJh%dvw7ZceRhY4{#ax?^5zU2h5FZ-u=?W{*nSufTPJZFcK`KI*!|T{Y^M&S zH|+k67(8G5*!MF(oD9~_XbtP{XNL8^8+kqF&*g=Ux7LHTQ%k{~15y<>u2lop4l4^= zpV!QG?4P3EkG;|r*8j~X^PNLs*N6H{xjhYbzlh^&eeV*lNA4wul|S2I^jt*vHqzY9N0VRjc%}c;Ip&`>(%(xX`hduJ(hOk8z*7y#~HAG-U(PgU@`3c zKL#t$*TUNCM`7*Jov``FBig#J^Y`pspY`n+o{hfcqo@c#O zRP>Yc+usZHmvUBiKb`v8_>gQ|HmUlI_`J`vemxZ&{C=RX%@-b`U45H{cI)G0Zg6M{$f*DIV|g^H-n8w?}Y<<)q1`?u<^B) zUXNY58`eK+1zW$c3wFJ=hCR!>)V%TfgHPth^opYscyjTHiMuHV%H*`^mQr zgN-Zv1v@Wf{rM*{@f_Ljd<8rIhr*sK^M?K^>oLU0*|@OdID~fX<;1Y{eX{y41#G-F z#N?mS!mhKyw0r(QI@tV`9Qf19&3>@@Vj%6#r#`mxo+Dx7>%Cy(F%w|_{zwnl`lRW$ zV~2G2di0w9z47_aVC$h4!|tCq{^0$~VdIUT!p^tvyq|qC9bn_3TVU<)Pi#jo?1tTM z+1B>R_=D%;wT9IfhiTVeZUt*UnLo8&)Od^E72kLX`llM~I!^~{S5|<{ujPP^_m+q4 z?{|!!5QixTYd^jRn}09s{m8pYu=)8iuzpoF*!pT&|LG$*h{q|fn#0P)(%#QF81Heu zc)j_l?y!GPPPU%l3pjYb=L?R8tta*Om0RYo{%dD_&4GjGseg?38(;VLf_S;|zLXr9 zKTzJk2M6(N^#hVG{Z&`e~<@OZx|1EzUA_M z>N$Re%}?fpgZdEbjK4b{(}r;nPY&Yq)=Qm_j6DZF zVHoRo$Ag3CYp2GBgZByI>B0N!M@6SS_&(!t(cqvzTpzE9*K7CMf7*}6?}Pqzei`2n z;TIHTA?U2geaNGUZuEJR90z7#zl4sWs{YV z%E$`oCMi;8WOkFtjIYSbND)HGsF2_D`Sv+}AOCnf?>X=H=RMAO?Q`C+eyjNn_1RhY zM!c8$(9^K_lOJi<4><+vZ~OpTUwQ%#@n7@F$6@`Ct+ZR`Z2ltT_wds@{sM>mA$nlC z`3}d|TKa3x9`ShOiTOR*YD{MWy`7Ouc3$Sv) zd|8NJE1%5Ah57>Zo%z4O%5n39fqm{#ei41t+4Cb`=Et2+`SLlezoERe9%i2H#7|y? zjW>J>TL<9vSwCSuGhDxR{T$f+F1yoiz46De_FxZKduTR%<9Sd&V7@fSW9xHf!P<+x zJwJ5}Gv!$Qmhp`b;85S79hMB=cpubVn2&Y;B2)>+%L`XI0-f%GXS>U;$7H!fu6AbwQPR0 zdmL-8bd6)}i7s)+YtNcb59jmww}ZXkDV)!7)CSg1oh;)owz3`n&+E}XXaW2DCdxdo zW-`zB9oXmd3~c?wSlH+B6s(=~w(b1R6ZXfC8x32(ZoPoxVHB)9ZV3B*qt|U`9~0{f z%x}L2TQ^r94)u7U9>MjLuhOnudxUoPzv%Dz*ze2w1>>ZB;AlTo*t&Z4wRYRXuyWk- z6zU(s`1AeLp*_qO?TXs4@!F2`H!o@ZM5xyZ^%SmWYD2qu;+nL(9=Ro~JtMn6TQk`G ze`>(N-q4Pd-6ygM?bbQT&fh0s*Qr^*;eL7AD}keZE#ms$s5gq8Pd}{|?cw(v_toKW zJ;ui#gtbGxKKGxg0vliQ{*0$ogspe*{zE;K`4ZXshWk7ox#ROPE}%W;_XK^up}xxd zlHJdt5be&Rx-fqD!~!z$JlWsR2Z#5ioX!br7f0`t^WO@G`V`NX9yb1KyZKn_*KWN3 zSUp_mAM$7VA^x6v)q1?}``Ve>pJ9BM&pzS#V)vuI`xOrNtKa@JY&~xDJjibzg2VXG zPCsD(*z+|Wve*8x_iwytHyrA7ysn+F`G@LUpZ>}Y*!XWX*!amd*yA3A-7j|wtlw4@ z_WHD^jWa#~2YWldUg%rf@#8Afp2GJR*TUq37gh9pF?-#7ae3JK`8Brlublmfcddf` z-ccD?xn}-Q{Za}J_P_eYdME9(FKIXaTLRX8TLwEWi^0Z;mcp*bE&{vWLk{(L#vi|c zL%o)Eo~&JGJ(uy7C2*+!^1fv4(Y&+=HvW?bj_O!ww+T!3~;zU*Y~8gf9!r7UkR|!$>*W`xJrF$cwWYXtyk5aTS2>Z z=oet^L7#W1Z&ePghTX694DHHk{Vn57Ct=s~_n!fxbwL1H)crQ#jbel4;@iEscX!jPjow!ml*yo%FHjY#T4*5{)X9~l{ zBht}s{LJ+b#>3AMuQe~?dWn$l3hOCCe$M)}+_dZGt)##6AqQ-|(;V3P@+`3OWwPzm zFWmwgZyF9`*ELKJhxoSs`!&{!`2D9S|3kj>B5Z!5EdBML&)7~r*8Gn5|EulT)&CNI zw_d~bEykly!R|M_7moJdgUvT@ww?K9{zdzL4eb7Bt?iH8`2u#nl#rQkvtj2)R@wRv zSpVW?*!KAy zVC|7ku=4y3`?J2YH5|n=VeO6Pu=4$NSb6yjtUvb}to&{Q8;=|Y>)$toU7s)nHV*M9 zto#`SYsWtfYgY_}qx}UUkzaMsA=IazP;}RSUJ&@cK;ss`+m;; z%#VLrj~e;Gu<@~tR7qlE5~=j*6+=c-7ge&J=+_w@#Sl>_2{tm=;@Sq z#Ic9N`q#I>#uJ9ZK^|&94u-9_%SF56^;Ot7ZhqM3IRJLv7lQrXP(Rr5t9&)C(MKl! zTn5%3=nWf}ss!r~^n_iP^&sr;zAUpopqBl~zjTK~{@eItH`w*<<7v14@hLdD?}kn@ zJzs2pM%RBWgk8V)2<_IxtcC}ldVESvSh@Ta9Qnf@&-+;G@jQ>}u=_QvgRL)p2-eP7 z4?BO|kIuOMcd&l*{jl{>n_=r;E6cpEtsc+%0r#tO9&CfXuL`hv>hEFWZ|+~GK9il_ z?pG4ne3ko;D93GA54c}SIKO`Ly|91ZLVMWHMSpk`tbBKW7xl$P*nETgk+`3=*YA6h z)i>T>w4MsqKJfm-{xZfDzJb*T?pIt?+3tPKOXaL+2JrAwJ&di-EZ9e z4MP2f^I6vK`W^XTy|!$g!1|BC-lz2;#yxDee)d~fx$bz^p8pEg|B{^_OJU_hCD`$y zJPY%|_dCnuWBYl!|MxWeOhyj`m0m*nF1zQ5x@2pIJ|o4G#8^`tvs1 z88?+_H;?ar7v^KMm)t*5c082xcJ&-4>;cbgoC}H-a7*adr3R=6ddd&_un`HTPK!{^J#be zWIOgm2H1K8_Y<+sHZ^R$sr!$pC$4qo`LRFTCfI!RWmvi5{w1F8BFu9re-+~IhhXEr?!RK(bsy~hdG61mzp@K< zUb=saakK6ACterr?}ERu71n=PLc4lqCG7l~=keIVAH$As_os>WtFk}wFZahWjy>4^ z_*d>1r(e()wjOc`z3f_fg$_q=z*+Jo-L=lj|LTc1`P z_I+)It@Eu4d;X2E@%&1#_Q|)f_IEkhb#ZIquwS9??<-inRGfDGYT5d@VlwfH)o|F4 zTz_pPto$lSyY|rvSpO*>?Ea;)@+3EGzG6A-`k5TCe%_a`_H~#hp(()BU@{?`!{_haHbzujBDFY@BcvY<~DS9PUp$;&2@6?;nJH zA8Y9E{?_gvZvMyTp})2sw*JZU`99rm+~?r$IiA+S=5Kr++LI}8xPRlLt6}pu%RHX= z=t@{W_6v_E&$%4lKKJ{Hi(%tk%V6_Y3t{Ds`{^t9=flQF7sKXD=E3+O9T&pd{c~XJ zL+8WRXU&Gq$A9MWd|&WG`y=1nFWzyK4Ez0%xwiBE-t&0Y)69XlUux2JD(txW1a?2% z$@a(Z_!u@`H38OMoNYV#rE#$N`;TlVfAW_7ng6q3>(AeSoo}-LkAU?LKZLEf9tIn) zo$2w!#|Fd39c2Bc0kHM9vhl9IaqX^K=^e+$+g^bk=bm5tw1*t~J>O$D*!Y(1#u;9O zmE&HY_E;C#{D!Rl?gU#m>-{NDI>6!nU60-l)(-LcsNdVdeqYw-r+wNQR-Z&nJ}AlK zk*7Xy?a`KScpvKb=j@MM{*-p@&8D_f5A6H&exA0S`k&8XzYp>x9Nxe6+E2iY`^gJv zSB^A>!+0@|-U!wXTtvI}+M_UWscB2>kKU;xQ?KTD)PG2X)xRrX{j~>S*RMFfwL7ZV zPMpp0Z#=Q0?fm}@Y&~UJIX0h+zm$TtA2!mieON-KE`1B^`pcrQ&t*I8xGDrE3+0Bj!TcG&fEvd{fy*mYFDz{Xep>qvk6O1~GN zKKK(>jwoNuqy7vVKe=o>`IY@L@wxwC^YlN!#%&XjSJo$PgPI+3#-gZ#A4?0wk!Huu-H-f=RlKavG@eg0TjeHiVR z%eWf>Td$>jHs3iI){e>P?@>Q6P$s{0JM8>!4?7R@!un$kVe=scVD(@nIOq%KZGKq0 zMt$OY%;NF*kN3jbLtl2dKK>qp^-n*6gMM@U&=lDE^%}HmUyp>%)7OH%zu|DukIJjQ zuyNZuv`6(1u=NNJ!|JP+GI2@S_~PRp&-;K2%?3hK=8U3|nt~_IcW4`?EVPx5MV6SJ1A%v;x+?S_A9P zEQH^8@|k*nIgb zaqRwjFT}C&q37k;`K(840sEZ%J?+=0;IO};^}`KdpM& z&-_q4Grr4vJo3`#q5V}H_Wf)BYp)it9eukJHXo1^4)(v-c?+zasr~ObN(JjbErqT3 zxYU;CM|?7}{~7;(!sc%ldp`0T2Vv#9`!_4EHo@jQK9`YOOJV(Y{RQ>YY}mN(r?B&Q zI;&d={-8atv~s~{>YDyVCT^S*!4dj!rE(}!pfVOaH9Kj!_K=8VD0J;VE5~L zA6EWMhl4++{W8Vl(NFGgYJ6!DY`jcX9*u#Ohwjg6ykn%y_jD)2QTzf9{+svRAGRKH z9PPfZo^X&4>Z6xr^7Nx=*H7vMt6xUIzVGK@>l)RY<#P!{n6XXEBB{* z0(PGy<(c+qBUnGPAFQ8TR^~mwXglk>bHI+fB-r}M%&>9Emaz4kf3;@(6Bl?A)}Q=c zj_C*I%P!dZyLui^ed2d;mdl&B=^tuWuY-d=RL`Zr`VZ<$_oG||``&B9j*n%q^<;9i zA2h5zQv-Ir;e41lSYFvW^N(P^$C(J5kDCtbe^iI{%g4h$ziP1hWfZKRD{D^=g0(l* z_tt~I8pqmo-Qe*3EbY$CJ0z|5pAkhr{>PB7YRtKKFYL z#=#fCt`CwOU!TL~zixrG&*sIkbyIU-*ALmQ{rV9c>{sp4nQ^SWGb66Q^_f%QaDM%) zNwyOg^nAus$HU=zlz(I3aJ|<5j)oIr-=lC`yatE+)$bYzhwo7+$9u!!dm#Ex-C*O@ z+Rx69&an9_;}@>4Y73hWi0o88}SnTr#N;#l#lCgzWzQqoKJhB7;OC8-*-Lb-LT_WR_^43wZpw$=Swa*cKwe3 ze_O%$U$@b29PtRO{B?X7AKeEV&y|gb?u=vg*Os{c+7I8_9{XO3dAP4&?JxZk$K{u> z>)-r6?UJQ%_10> zZg{jX`T-_y=Rd*n~S`oVX=&aa8Eav(2kUGnR;$KqqY*WMma zKCm$D#!EWE!T(WyHkV`aK>Oqg*mVPV1_yoN_^S$=kB;Jv=<6!Bvmb!|i1nNmVdqB^*m{<7uyM?1Jb&-$ZA!te z_i6^Kze~VDfBE}GVD0DDw)1_K!mx6wjm-X;cfvlO_ONm_7p$D=3`hPUY&~XI*!)sP z*!*-4`=gIidpz>87p%YgZ%g=e=2!c{>gSWN`ehib{yhY%-$ugGdI{J#%qUoSyc^bE z)E-d&{ts5Jy$M^tu@yGo@D}X&{|@$hUHTi*epImaF=Jru?=`S>3uA4^eqU~Xp11yp z@#7`1`vtrMn~zuk-}t^0c2iH-{TH;SqIzT4{aEJ7*tcb1pO5yh_SIdm^-spn^;fdn z9*ej8UebAd%-&ZYZfn7O!0*wXR$py`jsF^7S6-}#jsJT7sD2UFepw7h^{DpexJ9-z zk5<60w_j*G_RvyTJs;WMJpYBT@qmcQ7cGE|ugb;^KZRY-KOfdV`WV&@m3>|_Vb^PD zzx%vqz{YE2>p`Z*vGp{Q;`+OP-aD}Ide0ZFzk`)Svi8?oGW(0m#v|T<9gkjL;~nFs zkAPi2;{BXu|hxz}W zaM-`zao!C!zUlk59^^&X`n?sf{%jZchChX0+!0RTdvL30H{RSHR&VHkSvSxO4*sX{ z>ISguwbs+Fzg5HIsms|6YcEuU_1m|@#^FoYpYO--hV>H*!0yMfACBr1VfE(`*mz|| znfU)P+o{L6)|}@_y~yvjvp(=V9Qie{_WKD~yTSaJ@xenf`Jk(?@?;}yyx?Z?ZRY-u=h0`4&VQ4WXT+hSW zKdE8&%WMamA4!0X|Fws;H?BQLfA&xB02`1057wUQ2)j<@Dy-h_WIJ`HS77&>>I|!Y zF2mY8U0~}UWUu1|So`W<`(y9C7{}52miYeYgO}pi^^|frzxIyxCE@q2Tj&ml>$85Y z2dw?&^{P+gaDU1_>sPevynpw*dc}6v!^8TUvg^C9VB^>829ahMs_Kksil`IhXk`9|w^^mFsb#65<>&Zh#f z)k5Bj;Hakbt<)CRr5S8}(|-8I^FjXo z1Uqk@q22t*A=rGm&(}EJ5!mvA&0`gU)ek<8Fy6F-x5CEV ziqIaOr@vPWR)24#UAyRB*!nBqkMZ#PVB<+^VaI!ESb4n$);^Z4H(MFk?(aHYl*6TH zH%`734(HQ8{{jy3QUBEO7JlFQphd9yq`1Gw4Ld$vf9Cb-Z!Cbr{TOGT4@dhy*&qMX z@vT2_H*B2hGuZs@U9j?V9vq&h^(Av<@~?SmH(opkR_{BXeg4iT*Ei;d-T(DtnS97? zo-Z~Zeg4V|>t+3Zi18TZvGpTUVDl*#VdJ~+z{b-qz>Y^*|8N(q{p0sdw4Z;lo%b;a zcAo5jtq&Rqn}7Tsw*E(X7tuZ>aL^~l?fre%Q>bs;Uw$iWy@mS8`|^7Ad(~gz zezeQgZ^{|(H|Rg>xp&$>=tt~SpO<;www{l8;cmFw7dfZ3k*P1-1G`^nYuNg!efB5c zo&+0zJYfIW_jc8H2VwI)>SOD655vaYTFR^!{26xswt$Uq{bGN^qukkMsUza!QOBlIR2amkI=5Z z%>}#9(ZilErhlx1s^$4&`o{g|s@smAP@VqPzgK{RzVLk)hpl(+Lc7nufQ&sg820+J z!}>E5Z71HD9@gKQ3+w0qt35~k)>p9gt7o*|sL$K%@#GJGlX;)N!tUpyz36`I|G~=7 zZLs&7M|+a}fWEdJy-`f2Zf_;5zgz_l_OtV?9vu7ypZl}0c7XA29Jc;)8tum0Ccws*j4x|1PJyjgn*^JG-V8fmjAuKJ)8b!+-!raST_&z*f9>g) zVCx89_k3JWe^~u)d|CZ75cWCfuNrR{1uJIhaiZec=@H zZ(qX3k!1DpO4xi@AJ}^F)v)8WH|%>z@%Whk?(<#?yS}FWs&eXL=6yxC^lHKI0#b?`yE{x3TT`BgQ9OZ(%&%_((Q5#AB?V$Ongbv+}PX?DMWf zf92%^u=@|(1zYd(6ddBW>ZRx45WiJ^w1h+a&GkV^aLE7rJ8fX?m`t26%Gbl@ON^K5 zFT4O-f05C4@@(B<>+sXtPCg)tR}rtc36AprwzK{)E$n=J4OV`phMj*SZ6_a|%69bZ zB-r|ptJFUjznuju@BV?cla1%`T-siQ^?!~38Na_^f9B^(Sa~QLw=-TCSbJ=3T>q$k z!glfjp3nKP5f0~fK5T={Guy8HwF7ot%_Z2l)GpX~k?efm0~6Q#UbepXfXsaT8&>`u zhr|6CU-}c)9<=_ud$9S~)Ab;OD!gH|l*Xz|EZ{zW? zdKUFrM>yP{_J;BA!1{B>&%^U_TpE9Op7{QhfA7H9fmf}UF@7)wHXkGFKO65i{vb#7 zXz`4_`bivzd_y=ss*j^R{9edk=;!$RQGFlnVf-kE*2303M&kwf_#N!J1K)R4PYFA2 zeBbVW@;{h7!uPWC>POf*0LQcOq64sTIqQEM-$!8ekk8ZceGCrI+x5q%;P8IUqyGsj zmlB+B_$L=(;{(nwuj2|F=AZGbROExK7j=G`XTAwmUuTfrc)o*%Y9@-jc{{+sn-&T;#wTUoW9j!~^g|I?ef=5i{?tWb|GpA7URMm(em8%r-%=b_{zds! z;-@9xkbiak_`R_84qNH(dZCiA<7@|Pp7K7}`Y-dh&bv~u@?js$df8t}!{!fumV0z8 zTMpLWI|*AqQXzKypRoG8vd80J{tfGIJOJw#UW2giJD z1*?C}M?3x=hLyK@VdLtL!2ZtNu;06=4;wEn4&TTZxIPdL`p`Jz%dpQ&eH`-n=2x{> zln;w(*M5Ej_BkwrL;PKPb|vh3#W!i!9!{}8_POzQ_0>1un2?u*KS`TG^{44EI_36K`_Q(gYpJ za%Kwbc+{SAyzYRtceS64A7-Y0%=>L16HluEEB|W3`qOn`<(%VJ`P&T+`EB&b<(2lw zzIXileTq}Ilc#h1Yft5)o-X9yl{-yf?OVsc^~^(UkDXunyBc;sMCYgVQUAd~o*9qq zO#Pw$o%V|J-TKI&Ka9_>fVEFgBEQWyZidyHM`QB-7;HSzdQ;bzT!mf#zglMf>owT; z+$vaknvHr=_rJ0}H~QWzY~CyxwqDHoSLM$%Si8FtY&>8J?0c#TTUR>CcIqQ)!tU2N z0X8mQ*WlQW(V|Mn6b>^=P%&u{)neWPCb2e$sO zBJ8-y%=!)Ot2(g$Krz^O_>-{vPgjABmo$fcADwJxeY^gM@n6@^Sl2aOrY`6v*4MZ{ zl>VXeJ~QljK7_67&jGtX?QA%#Z+89tT-f_}J#eU3vrgFcz9HYHUvZ!3i{hE&j=kynFzvBS`1gSwXSeJBV;|0> z-Rmp_tFKqVA>R_oSC1#YSB&->>sw>%6V)Hjuzt>ZE!TTR^>ePLBYu)o#vb_$R?gi5 zYZv?mYiDPKt=~8o$NJ3|Vf`NatKa{D^`E_d*9TpO^)E8R{?1ic{dk*6SE397d`Dx$Y2CIMXhRtut z#@}3Tr{9_d*4`@&E1%^X>rG=E*0UPV%xZgV{bXRr%YAXjn{O!{cmA-RF#Nvx|MIqz zfAM;iFBRc%KVdy(xL^I^`)N16=JV11s|p*R%?O)seF#>cM9&v}XFN-L!RN0Yt^vC~ zCJk(U#rT){A{A_XX&u;c@*nTpx{tcB`{`YQ!}@0Jc=yLpo;>F9_}`ac<#Gd9|K~jH zdKu$$-q)WVPd>6SY+U9ntiNGA&pI*p*HA7$32UD^o~@5F{^$DPUu4$f86R}K9)k7b zj3-(jw+{~SNaKw^!0H?0nPL62cCqnK<-O~ZtuHX2sC^u*zh=M4cCdcS8rnnr)p)M! zh4l|Q(XPF{*yH)$XBXJKr}ObfK9G3c$FSq&CEAT6eCYA9_2t?d9bo19Fxs8(Rbl1r zcvyL!8xG&kR*tz|R=aK+?bhcXgpFtIgN>K(P@b@k<2dYiSqp1_o`cO7x?WbfcHaKP z*w6hz&z4u*zuDcwytM5Y(70VtlVx7hwov#|Cak1 zXb1ZJ>9D_tp)yjt6lAW8IF$+VEvHpu=>RPQ>@o< z{|?uI6oj>3`*?h8zjxnvKiiQjuK#zOx*v%7vS&S>_vwBjVLh|;OmEts`mZ-=w_a(i z?L42Iu>Q&iu=cs~(fI3JSbh2k^GA8L%Kqrf{?0G-`6k%-p5If5_FIS5k3ZVZJU$9* z7oUc`uG6slEo5N-9Odudu=1>+`|pq+zY1%QRDrekQ@j5S_IGXA=aB}so}v-#_(=;p zF5Mqoz2o`#8>PWCs7>KkG0$8NCl((flbK3})pdN^4B z+5Ok7H*voo?J)P>@_ScBVaJ{OXNCQRl&gN<)qJJjKQ?ZZiv977E6LX1+!V)-Kfgy6 z+QavF13SO6(thK73>x6c=q)u4r@pFJuUqU_eXLbCBoLbc>m@%>cFm#C;}Vbe+;%R+x?QvOEt1R z_WZOXpMs5d-c7sy*R!zo@$R3bd~?4>^ILbq#)I0z`Z@Vw^`84bTA!X5R-V5AYmeuF z9S81LYF$n)*z-sGO;TTw6Lx&LAEo;NMEftH@7xd8{C8H*=XxU8c(tJ7d}{BwpR{r)0S^1q8c(~z__nU+W81M?9RK>2^I-jh^UjCx zeNy7Xf5OJ?7SXQ$@q6Otv%iGRcc1cj^6zWl$R4&o{)^xH)UP=Lhy8o4SJ)40w|!5$ z`wQ-djhDLLwDlG{VEw&4uyqGpVB_`fCvCoUJskFv_PYHZyZb>Mp+aj@~ZgZ>_V$y;!^U)RHpf|Xl-KTo^R{hQQh z?x(Cg84QQ#r<@)Dn=kSCYR@UJ^*^@2`WLUj#{V|J+8OS*vf0#eC8iu>OhqCyXEcOZQJQ@4S$9{h`NU^HTHSuwRnn z`!m@2^$6|8gJkn}?vE74tNFr2ILaT*o?h(rFz@e%)qhK2zqeEXj@A=;KIB$j*m~p@u=~;F zhNJiZti6!~*1!DH1uw#3ei*O)6V_fjPP_8sci3@q23CIj2Aiii4?BO3!tS^6FRXoe z5O#cDfnEQ505(2zpWnlzzT0{W^9_~!UM2GDGg!Iy1gw8GS*E_Y6RbTn7*@V_gA2xreJ+J*Nhy1tsvVO4jSo#arg~t8Ct>POF~JSYi=>(P(X-@>naZxHRlzcaov6u#ka;-3tM2UoB7UmE)B&y0fA zr~2Fa4{yQ#-XE~vpBo1|?~dt@bN!QS=eiHTuHTsg8&BK^yPx3<*!=uf8GmyYtiJdf z)<68z_E>$ddSNjf)`MAhxyt_diSNxH(#p&r`!#SgY)M~$?d_~t&? z`8f<$-W-PYhoXEna_~4D^2f^8GqCcjm;G7Ce;)RI^?g<|&Jauu; z$;7`R#$Ia;D^JZYYY%pSl_Rq2$hyX{^Zu1MHvZZ_juWZx848Elbj zaUM*9jaPZS`mgW7j`wD;%EMVoh&)0asYS=ib z`E~92b+CD5--qLB6RbXN1DpTY4u|(?esDMJ`to+PyWjf(*zZ62{ znf1MWVf9mH*!r#kGUGVAjD8vfo4?C#f8@CH!+M&0a6$GXdL7nYxmzZ{J_=U96oHkO z=0ALH_rlg&o1buemyu)n2G3goR^LvfJzD<;TW_80`PfhEDLAZm)sJouyZ_0TwiEw* z$^NnRy2b-0!0P!QZ6}}eE^NQOu=4&rIMjPPo<4xJ4}YTFeDrLON8cTSjVF8xTim7E&;rzzi+*l_FOvH@6D*cv|rPE{K(k) z1M(;tJRU!K6Yb8|j2=%s?K{}@39|W{4Ki}=7TA2_I#~NJ6CCQvqIf(U*5_-t-zqb| zSJ7X6Cx`X@fx~)?z}kIF=pT*`@p<1*bbM@mN%%eeuX*%$y;x?>AJ%J|5A*uMego#4 zynff4ePn<9$%wJPXTr+An`sa0sl)wSpE;d&^UXewP|v15_jziEPVjuO=Wl-MZTrXG zpU-~;Y(0hV-}wJv*zxXo3H4s)^&DU3vz3q57ybzw4|oCAk2zy|OkO*$e}|J}`%n10 zvUQ&7SL4GcVddb{uzJAxp*(8@hxuYYw;pVK@Hg5+elfIL?@^WZFrUJD-0*wu$5x7V zpP$#Ko-Ybpx9t21>+{0>Xh+{oyY|+3uZQPwr^n-$U4-?I?tsI53hM{M{84^nquu(a z1fIWnj@w}Mnd3jKhxPg#-(i2NFdjm^hyGv|+O;puS2_-J+MoEa^*idLJ7E0->yOk& z1z_zl>sg#Hg<+qI^)=3iVzx6s&dJzgrC{TbXFZ-cUwPR1aT<2Ni2LCv-T)g9s|G8l zPQb38tL^cz{j}6?kHYHHUuoB0e;f|^Q{#uv!0zXJgm&xoT6jGE)*(1re-DRzpZ-l} zIPA}(ztkO8|L>>2>lFLI+VA^d*HI3Ft&4HLAM2W4gY|Fs*v`7q(Qp)}w4J=dIN0yq z{b)P()fCt|z}>LxX_H~crL2CO4Vzz$+8M{6!C}84<1kC&SpRBeTz~DewQx9}dVC`s ze$V5#%k0t`L18IRtN_SsQ5+@JFQ6l@*5_iw)YJZyZ;=c7M&1-2f? z=Vv@EZBP0W4>$ljo^OF&ujl?}`Z+mlkG&t?dtNxaKlj7D8xHSRy;lNOuDgGd@rJS< z9~&RWFDk?OCtM=eu&ZHyqYiYv=WosprT^yY&tOVeOvWu<@`#u<^OPu<_<0aF9Rd z8-~MS{XcaOl}Euro@pPx2}ki@k9WURSpTyutUojk4)qk;?GtQ|?WdrBH3<&&tHyyR z+n?w2Fvokn@50vGKL#t;roz_uH-?Qj$U&Y5)_&683H%59;p;D0w_tzAe;e35v*%Y2 zYTrlu!@|a|wBOCocs<6y^%ukSI`4bK)>n8xp}slXpK?Zf+&cNUJwCP`H`*^2)?b^`2-&+P--*+B% zo_u9HahZQ&$FGOgV^`s5KWEr{Y%1a-#)Wso>WQ0R>k<#b`lUC+<`0g0JbL9;<0-6P z`2*IU$_Bfi;NNiLQrA-54qF$Isu%Zz+&15?J$^In`VsT%)_G)xLw;R*{|?yvP$Al_ z2P*~}@3Njj`C1WH-Z!8|Ce|8&e+}`y# z=1cR!%BL4#>m`c9VSSHsxE!n;H@~aDR1G%Y>3UA%e~EEyy-!y-tUpz5PK3jHSL59u z!me{^!tvI>EwDfJjP+sjpx?={{J-;NKWx16F4~nhdu8g8Qp3vE?QmExtUUY{zTsbC zN6xf=Y(A=AUWLsUNB%MTVE`QBQ|g=HGJae~$LGBxCe4A}Z>&1(ez3p7+P6hu^JjPT zp+A1D^4j{3*0A{j<-Pfyv9NY&X4v@Ap}79Wx6a!h+dsnbU$8G6t2Z+rTLU(~`xW|4 zInWi>9<}~UeLTVb>_gHA*56tH8$T`!YkzKloqxB$%9lT2pWiod z6i|a)stV-u3y~)cKyOq*!Ya+({3FI zD?dN*c)lpUW!R`;R9yZ^Thu^n;;4-ZKdj>Y{ zc^}Wy=Qj{GAJq!>zFvpPdw$;h#SbKjM?7XiGNA)MLb;gBd>XnKPx_-TRVeRv|u=Sh?uz8Dn z2g9s?$PWAcfZ{`Vp2|y^yiN~Tf43a$e9t(P-y`2p0d~AR0ed|qVE=y`j^r_{KfTia z$eBA}<6l{Z@qF;dPGzJ&@8==d`MU{LFV%sSAFE*FfQ@WNugrq2zpV}XUPi!=-!p2)x!^St3XW)9N7gz#o|EzISe+> zXN zw@zi7|ag@)6jT>~P-FopPp1=8~(Xi{`&cf!0 z=f#eH|26K%eO6(wa|djF*Kv=>J}))=djE5<>r3B-8K-5=!}?o0;b`3s?0VzBVDm!Y`01^%`>XsN$Ik!yZ*V^D zD?RP*_t^z@|D$ZM@r9AFc4sD7Il2s1&ZdKnk4_%V^J9JE&9HXqe%SbgY~G{bo7@j_ z=N{XUSFK^kQ6-Nj9y}4Ye`A^X{K#8e5Bm$e3!Cql59@Ev^LYG#?Qhec?~QDPjXw+> z!~Noy{|u}5SHZ?lj>^ctW3cjZ7i@h)(pY{UefIp`BCfqgG&KV-bSU%UO0UwLQqzOc88!mgvQIFJ6!yOOZ)_aoT#Q}@GO zPpZ$ZkKYG8p7+`s4!@^gnfG(fN8GEj{jmdT!Nv_5!sfT0hJBAMVXvn)Z2X`TY(73M z?0UQ*u=OPwY{wp*0xPd8!uq?j?T`IX7q*^Z1#CX70j$1TXMg;Q$6)I(e4px<)-v+x z7;GM60Ia`!7It0G$FT8C-rZMg7w~xMoc{CvQFy6g>jAIG$h(VNZ&?4XUzUmX;Gc(f z?DS3MFT(E`k92*G`Y8|nwQpv_+Hd9Iu%1Wzzcx&r?vfylVpIk54OSD3)b(2_D~7UsmYyI!;vY`$+T{f$>tfc?I~30ObB z9&8-sHt!F4@~Zt~^_R-q<#4zk_fP#?rXH$;_k%q$1CG|)c|3Wq$6)s-+$d9zk^@#= z{Rr!iCLwS9y(6%{e~0pk_wDz3^}p{@KCwQ=?;UGzXN9$6{a&>5{TJmS^4#y4YhNFO zqk1CPdYCIR_PO6nH;z)2i2alH&#cXeF$_FblRW%J12dcbj+u zcHVb~wc|Q4zud3GdRz7E>#+9yf4nd27W%=VzSHsenC-;7thY2zp9VJWX}!1hS5eMy z+~0aJ>yyekK6!stVCU~#$5$+$?EMUaLp)S_sU{raC;E%G!sZ*EkCRu9zenI8ufzJh zFkZrXKlS7@Ja6;$+39cID;ummWBgzJy~_8GT>Suc9*%;24g=L^*bNO~?eog8alx&Q z2juNm^r`Q09_;!7;}Oo=F0l0q?P29jA{_in{f@QhcjNm%YEMuXxEj_^-2&_Hu7tJY zQedxRJ?uD{3_C7=@_6jC{;=me2*#DmY zJnY|>z^<2`=<}f7VFau{(TV4!9C!p)Zv5i&!!9c-V-FqS{b)b!NPqo#Uh(~rZ`%mF z9!B}2K3N63{^>DTJAWCRnD11Z*Bu``r$zS1kN5^QPr4NLy0S2yln*Je^&3rK=l!>^ z`SG!iXY}<>*!#We_`{w$2fLo+e#a~Eq1(A$^M~hQ-_O7pXIV;r^uyb*@9$pNdf#N& z^(`G>gU;+YbBt_$A$7{nweW`GfJWdZ8n1 zeD51r`EehtJi7>o_pg3Ww}Sg2F7Q3~XMFKK*!kl7H@;9?CXQVfR$mT;U61e`&sY8X zA*}x~1a|+2HL(6?J)b{yNeL@q@-~NVV!YsI-U*v`UIjbvs>0SE%z~A_kIUH2lVI<+ zlkB{Nm1l!t{l3Al`J7F#`NR&e@s|s*^)XMw#v3xOV*H?w>dWZY60rH`%CL0?m0{y* zC1vuPHDK!i3)r7=@d#{vWcE1LF1VcT`t>)k9ee(892-wt0(-ypY4`g*AHvGrM`7}@ zdB(!pf1cm{?NY%$FWc4k>(cUmiSv5A>lX&X>gPK4$9_GXhUdY01+Uk9{3h7c=!TQJVz{>wyV87Sa12*2`^K;xaf{kzP_j!`f%MCj}_WHcB zgEGVF*#ofh`j1qM2kNrU!LIMv1e>o}CsR+d47QGW5N!USo$c6ZFTm=v+_2B}C0Kv% zuLQ;~dBCS({kUDQ`ru($eboilzc|BqOCe6R=URa%f4&_~$U9?3E7<(jJXpPz6IR}| zfeTjYkoLfT1)_NDHRnf*$G-axHZE6>`NnuDwF5T4mXZ1Hy6NMvahT7MAK`e{!}W#} z@mJ^4ZvOot*u3^#uyw2DVDk+bJf7#357wVKb(Qloj&FgJ-4_kkUYwyktJ-kK+$)@q z_{!t3b)P?6E)eB;a=}UHoeHpZx2a+E`d|NYe;5G!ke|uaWpsdj{=4D9#5L;4>}UT2 zoQRyb71n+`0eiib|KWOx$Nerd@6Y_r?=vrC*Zr)4wZ9H}JaPErmuN@+$nGnb9agSu zj~;ekh>QFl^58$1cvITBuyKwAua|i?23Ahl?mkz;Vb`yng$LuOb%7mkr(o*~l02UL z$Z^=bM19!z^Q-OH4gD_g{)vwq_IU2MC9I!5)&A6ZWrpq74fc9}JzpTwr!8zJFSHvr zFW4APBtNnZwq8|z=KNm_n`b+re#0J~2NxtC8tFgA@od=m)@`&W5-*(d7r&4HJ_&uP z-<=Cq?r1M3@twT`=V)ht=b>;4d8?JM^~u`D+BYxTPCPmboIGdfj(V{3E*TBY0uEcnz{cC>T{%=_Sv<$31?GKy(*rh!{95g$u zKR-x&VMy9^Gyi~N@r8oeC%s_he_Q*Lx9B7@{+h|hJ9oJq)}Lz%6HjZM1$O*D;rXW2 zpO_Ig50%^g#5Jy-<@t~wOAjZq@8>1h_jU>UCXxKgS=jqO3P^ixu zu;a82oVadbsS9VW%a3uyuhc_jVCQpw<5|SnmY?SSiASe}eXi4B>me=?4>Qg_5;iV# z*!URo>N%Nu;IClo7^}n9*DbgG#iy@UhSi(Na1wIr;_uh*|1Eehbs76%c4jbQT4*NVN&Efix$D2>U)W7tH6Nxi@1p8il!G70f zn#?@t1Uo*u!}{0H+n>6RCt>Gh3)px{J=i?|Gcxu{O;~++kIZ-8^1<5IndJ(DE2oFO zzRRC*{tZ(u|M?r&Pd_?6>{zhsvg z*YCpW$D3f|X+v!%e)IQ7Fypl;Y<}c8Y+U>S*tp@(wog6wUPah=|4!I>R|)oeI-_i- zp6&QAoR9HS4Q7AK){|iU)#O>U6Ia@Il;?;4GLiVQ`NA)MzCIpnz{$L~ov?Ddd>m`v z6tO>c-d5VpXWb1uKGxeGy>%z-_*ez&59Eiv&V{i4UqM)XGTY=LXo0+$l7Z{>YI6M_`^`=MP}=#t+KqtwdP)RTkF&tYkZKt2}J}r39=W zRTb90DF`PKN2@Di4`#Ffj)WPlVe2mrCNmy$)!W_`R^Kgx9nWoL=FfO|Fm(e_JMoY+ z@6(@p%-M$-kLb@DGq@k@&J72zKhI9@(I5YA%TL@N^7s4c^vAEc9acYlIgS3reYWkt z-apq=?g#yUWH0weJ}48cJ+uZkKL78#w4>iYfURddDKoFe+K#{aB%Feu^y41pBXRRu za6<9s->-r7hsxRBqvMX5GV$ecQ@B3r`kwod=RrMaFWC8<8#Z6j1wNd%!j9Ci`K?ov zIUn-!vE7`XdE5z3#$WBf>-zX_IEnt)nfrGlKk)C1!OGvou=Bj@MA{kWnPKBmO*I0e14V>{Z7k1&aZt|wx4hG- zGn|NAPXp^m-)}qf^vt&F^5ERtJP+z_yTJNo<7Mp6`mpx-y>J3?`7*Hb|D(5fzv%mH zuyvM4-lQGBD26SjWyYdFEWHQ4;>9N2mNCTx77H>|vR5jOtS9v;j(koK_h>~T1Scyue+@0(V! zKXqr%$;_`*a1wF8rm+4&#ZlZpaf*&`RKGQn-{UMwX zmhq>Yyc-^Tc>0c}u=?YjAzUBv!YQ!+TS-{?_YJH+aD{pf{g<6GcGSCYBJc5>%=w>( zjkjcCzPNs%3#@#57&iZ&-u`b*F4Yc>@}ty?Xm9s{z5gw6La)Qqhr{aU<*@R9CagX7 ziT$xp=ELS+hft5AAC{5%OkAz=Bb%BX8>%@-2&bwRS zg7{78kXP#0lf7xDt}+v>-83K8{$1<&WA$bQsbg6K`};#|C+_??Y#rIlaPrDY+vma7 zIXw+qN45kG^?267E`{B%X;UxWA9?icud zU-9!M!sdUHY)4N#-JSmE-CK~?#yz{ryrM^kM?L%0-AZrJHs=mPv-zZb>RUCcfeYJ;;4>X6Be}z51(fkQRVC$WJf05@)o#HI@ z7xvr|89#0l?ELQxA7)+eHQ0ITeksbAn&?OOOWiHwXSPvaGA>uc&Zj}J_Gp(b+~2F0 z|6QoQMsK%*9mi{7?V6`x?ZqF|*Vqq@?N2=BXV~@QjciB0oq)Ts-`A6HLDoUtyYK(| z5B?(O<*KmbqQ1v7-fP3!JL@}hKL@@@yWMu|;*~J*>(0Aj*K^K>9e)R5<^5RL?;IS4 z{XSGrSU+PQY`*qokH>D^3#&ih@_6ju4`AiVD{u<+K<~=0Xwc{ z!^-cA?P22H>9B{)AN&j(pRWaLA7+9Rer?^l8upWVeNH?2vdw+h< zHHGVbS;nqPfR#7hVg2(zp67n3Yqh`o4ef;!Go|j*(*D$ykAMpjcdG?kpV1xG-!B9! z_jhtWEADH~_X2`3Y`s>JQ4s3 z>ifTwVB}+WSbgdKxY}DSVCBL0aHNOrkDOi)6W^Rs8g_ga;e5)IHT=HMKMkyXF$K2X zqzL_$Yj4rt`nY>Mo;nO!y`C0Up7e)}yPRpo^-xFL5jHQlA6Cyi0sB0DfW!0hJw)Hf z<9E*ZJn+~2KA!Q2ov`t?iyqH7*x~gdpZp%(;Z;W`6ykjP?WJM$!q>Ei{mR)d`|@41 z`#grX&xCl;R-w)ddKYB84GOYbL1lC@g0b95H0PNp?lASLtV9q}g zR(@x)KiA&|j_R5`ALFH|{Tc6h;1ud1?uV8CIbq{5g={BZJDc-qM_yt)nvWf5^*77N)T5Pzm50Ngz-G>5MO;1PC$<;kF-Av!|JP#WbByZ zP3ga+a+MmecE%=HxsnYwzSLPp9wi}P^>^-tjpxikek-Tz+s?Ru6E+TA6t-@#1Z+Kf z7T7pP5!m&aA3nqVFdr(SkBpy>hds|9=r`xpOxXAPi~0~d-u6WNmZ`Aupkc7_fcs(n zsRpq2=XmtFeojF+0lhgH`^b8(3s1wZZ=BEpj>a>r|5pQh&HC-%WqvmY9MwU>>YdZt zkIb7tZI9WL>eFxGg6;COd3y`jgFQPzCeAtzvh$59+l_!G}MZmbNLZ-%t!r!tSgNE0+tx+9jo7$9)Ew{C8HEdbx9dJPB9nP&>hP ziStb8x)@x(*EhK=79hn3Gi+K#^Z@(F$qyZ^Nmj>oPT z4JWW3rYCIv^>#P~xtf*d7p+5pwHq7C%(I(d$5%(#_)bPxd!-j_KCguRC$#%+7OXz4 z0y{nz!P>9wVC~o4u=hQ~A&qZ@c#GldyLG7q*kf>H+&a2gu}CmfIiuun}w>_b+g?PZ)fdb$MyNxjz3cJkIs8 zo~S5nK5;cn{r!f9u*W?K7sSpU4J+@<%EY^-!^VC8Y0UAtmhCuffBfU!u;TuRi_BXPmY_@ujz5?Y>K} z`N=M@cG7Lg7vlr>!0Pjcu=)Kwa5B%W1+1O$(_=7lbcgbY`mNQlbzrH!<9y^b-?sg? zdZq4%^(Q;Q{_beH7%_?C4cvCZQ9YNV_?U{7+AS; z33lDW>oV)gau6@jo*E$Y-H7V2^1BbL{rRZvjQj4e&-p1>d#scF$;&r~tw*T|Yd`dY zt+y|4f8tL4ZD+p!T8rza?sGJ3yksYwOk8FhtUmb$PAK@l((l6N*XG$j?|{~y+m3%S z+y3|`OJU>hqhRIzAF%Q$309v}L_cXSRDlN6<-i#0i(j*rH8 zA8fy-u=;%+?D%L8Ti-GW_WF&Vs26j~*wMRT?dWu{@s54=$Bz7~2Is>*+7BDgKL#5o z+Y37%_rTfxA|2b@&WiYHh{|wf?csZ`!=ko%roL%Voh ziL^5x^23hDqi{0wxVrkC_w@sufL+u}{f?iq8FruP*I?_rm%}OS%P<~xJWaA4xibUS zukHn_4`##GV?QNRcXSEX{;dgXr{psp!}^mVa0+#J4PnR4?CLQ4K%Rj0!`s2yN7;zS zIL;q}jep(=yFRoLoY1dQmn-x)&brM0jQ6)-J`8J*`~d6EJSL+D zzJ~R`8pFn|X2mmlXOjKZceEQX90MD#yTtG5U;BKV?|;I|qyDh^@}%wPoflxA=W^Kk zhT5>>>Jyp#L1|cdKV2?*+vXy$bxz~pMCxkpg7wGh!GpQ(?6BkSK3IDsr^j=ioz<@2 zPet4Dd&j~0-^Q1Hez(CM_YQ14edj}*A3JRhtiGBLYacI!jixT< zO;w(ekw;VPPyDLA$20!&!v*(lt9%@GKBk3@H=Kvf2c4?=|KrVdJ|DvRPw5yB`jg{f z;~zO?)^EH4J3sD$_2))-Jo%-Pu=eg0So_lWV_@@b-C*NWlk87Cqo2p)zr6w*S6T@B z{yW3kY3psLzB~!m{{Ia&AN(w=ysE@_Hm=>+cJg(NVdqIbnLKH8nK)OXjGfUA*8chT z0iFkO%E_?x^BZ9E0SjUEe@|F{W}J+?Zw)IqXLvkz^rQC24*48*zn40&`LZRj^waITi^U!C7u`KzY6hG^KENj z^J=wW=l6%O})J>Npcu4x2E@)AzLFL(;pA5VaN&h72b{N7xV^Am?30BeU7hK;X% z4jX^YB$KaP3F~kDSAqM(Z{1*j?7B0s^Lig_9(gUClJoNB)Iab%SVxh@cI=Jvu+QyG zd48X9UIx|={1rAoP!=}dvKKyFr0jQP*m|E=W$dy>wsT)CVD;r=u=Z`$ zIM&`Q02k!G8`5t6??O3#4>{A!WjhIbiF(dU!nRd>$wZV-FR;KUY4KgO#_% zVc%;>*zsCk#!e`1f9#k0Sq zf%Vfe8UIFarh>h`+hM=IdbP~;_t75K@0t(WZw#zHZx0(k`V2N+)5iXc-<7cW%;vV^ zKmQ18w>E>-2Y%=?itcFSk5`JPd*^I#clzS?%< zVc)>sXCv4+XZHxK$QA8ehUOnrHmVzBem=jS}B63510n!x67r`Vr-^bn87 zzw9bA|5n5P-q6xKU-HHu6K`^VgHEvd%)zkF<3;)_=laIZHvsm1HurqUuc5H{`A1{t zf5ZOd6AH(%^EE$gJgTAn$shj4{rmmqs<834Z9X6Jg%xbaUNqnD{45FU|9u4e-izCx zeBZ2X{qm*I`SmAAa#awVf}#?GXBW>ur zJqru=YY-*xwruhu=4zIuzC~=tsNp zqF3xsoM@%}@t2z0PQA%i*t(V`9#6dBM>y0kSYL7oRu9&oU4Qmx+lj+HAY=a?kQqmJ zz{)TCtFNyxUyXY@UzKO=o!`uZ^sx3pDcE_C6?PqG9#}b+%lVA_$_nc*rcxfo<{(rFbSNFr(ZKq-5HR=cLp$j)wFi1D|H;1;hqd1a*^XW>2|GR~!^ZpWlkw|5w?FHqi@@s36q&f-9kBZF zAZ$HqHrV>~oanc}zK2C{s7JBx?N8gWi>9I<^=~txPt6~`5!0u4z}nMM{*<^(lKK|A zq8sh{v!`Y3hgvf7trGfM`CbloJ;rD1bL8n=u=-$^?c84u`;+fTg*{=t$~Ei>?djau zALjQa!`@Fz?Gwi1OEU7In@qmVdKlwv(`EE|5^O$B`)DwAKF`AX%iqDu^(SHTQ#)

&q;$lrd})vYj5O-9Y6Wts159xWxig(o;ALH2OPy= zVaL;La3XRj1$I8m%Io>C_5QhF>m{bcUPpddJNq5j`BWU%Ul<9eu-@b`SpTyRY<^)7 zoIoDsMOgVg4YpqFWmtP_p8d(k^oJck%V6_&ugb&?*TVWI17WXwt4uws{ap{7U4Mc5 zXac)lv>>d!tz$d$sTgeia24B;2jyVpUp`oSqbh8j)K>fzm_V1-(-|rzAJ$nc2JopQa>{rQlh_i^K^tb@t`NAY#o^#C6kkL9>e?T=mE7uL_;3_Gsd!o~*= z*-rlZSvZR46CYN-*MogO)nUg`ML3clGJe!WcYj~5mw&kK zJ!j^bIdjgZ&kT9Hm9{famcr5bNB5^SA|7u1Hy-WUt5ae1+mXGDH+I|+*gE$4u>MWL z^0c#Fe}LU@&F;ga9bH?~S zawEj}K6+&hZ2jOzuA@GImv0 z*gVR0kH>!b683$(b z)}MIW{1$#*E&JndrGT}UOToTBB)iN$icIz=PSg)}{_P~c=J>pVwd=FP=Eq7`XT6Zu zd0;*eduTeWKA8#Y=bwlDozAe&FQi7~{g#CFuP4CDyNl*4v74^K`eVPr#*G5`R^ptK z;OKm-{z5lczcORZ$osG1@vQ$~*!f-zHXrwz^QoUQ!0tah=lPL0&&j`9zrGdL&bkb{ ze`JmQxz9u9XR&)0!pezFwqrNXhWWm4-X&4}r`R8RcOvXMoM=0K(HSGIMa>LpW`C-3* zQw26&^2B^O^Jtdi!TPKUdmT4C9=&mj{JQJtAsl$Vu=`10dOY&l`T^~^ShaXQ=&vte z{nrj~$cw#skCLx9{=5b@zthtEJ$mT{?EXXR1Jq};Ytx@~6odKz<-hd;>emCm@%zNX z_QD?jJFNUX47+b@6s(=H-~Pz`#&583srs<;G|*1`?{nDS@%P++60wo-#Q$yqD`zvn z?l(;fJ5C)o&>y=f2zEY~fPK!_yg&94B!|sQ?(=?#&)r%NTdxD_2X2D($I{pz`F3|5 z&xe5@$@n>5m0)%{M-WZ3e2^;780oFgw0XrX`A-}Yj9<1X2 z@&D$*uG6Eidf+?Q`L`W5E*wvJ=Q@D37vdpLwZFfE9k=VsbL{VLV1IWDtRK?`_Pq(e zz{;h1u<`cH_NQ*49PD*>K|d(BbHRc9RzGo&99*z&`IJu=QEC2kblB@e5Mgj=%NXd@=Qm31IEfm9TOl9vqNguCbrm+Amjc#*w_W@EEm%MBqQ_(JJy)M%Z=8d@zDF|t z`(fC5J{MNMY=fOYd12#u8|<$>Kwq0TKLlHkmRbEy-QN+9XMNp7{|BMR7r>6+4p@7x zr|slPm%!G$)Pke)+m3%SI1ulpUHkm$3a%eNA{pHL_&;A=g0(Ngw0EepT>!g&mtij{ zuRoTt-?nQ%vcBst=XmCKX6#}0)mO{7e&q3C>~G_GjLo?D720VA%a+=V9g0>i>@q zz0qI&{|r{%-=e?kG9%ANyZAfC!}(hK4R*i$0NC@lquuvEEQ7UE>caZN`VY#Ls<7|# zZi2lY-QPTG2km+0Wp9sXee{7{FN0v`%a5@3<8)Ym(|o@E%3|2M$vm+7?KbTDRMW%S zA+dP=<_}WY&iYCYJD-xm?x)G+d?5~A7B;_B-FDW?BipIBU+VG9huO?m(hxM-B%MM$ATHf_fzN5U%eSHs`9}4w&#$$m@ zJhwOObKUy}8()cs{Bb|&c-md(jb-zWu>Si1*nIF3x*m+(@dCa`p3j6)cA7S%eyJg~*6O_lqpZ3GrFSnG(#9^Mm=9}~%jK{o#{0Y>Z z$>_-}uzAx0u<^lcu=BZ`jJ==PcJy5+?0%3}oKJo89jrb17}j2!1RF2E2^$xRgZ@#D z{R`{QB~@QV9q;qWqJAPj6r1yFm*2V`=>j^h!h6 z^}JVoNj|s}Y#m1^`qa4DAlUa^PlENACc?(w7s0NxDISmf+zhLiCc(yw&&v4w{b1uA z1<~)}6nhL9* zAHeSaQlD5q@;3H{^5{2M`IkthzUvQI`IjCxUbGw5p3V*HzwNUfzu+TS|6R5|XSDr^ zTU>yRpC5$vCt{<2eg5Zd=XoZQsRN3E{S$^=niMuZm=CsoF(0hH8Y*K)Hh`@+SfjlZ zZLi>eoPdod52oGvhG6{{ChSkJaqk6i7%D>KQAb2ixgZV{RDzYCjRst%jiD`z|7*9$fdT_4uoJ7RzA=Ls@;G@14z`f?NO z{;Ptp@x&t@&-(rmHea6r`_T1!0e0VG9oTy6tlF>4)8VlCrYEetI0JjVBeaLHul~j! zRSr&sjfXvsvQK}7U2h4nUzZa%*l0WYAQ|lZ4A}G3Z={E{OODa*_~*7AeSQHpUQ{0T zePXX*?fn+8->1r02c~Xy9BiIH2do`G$9D3Ng=FN*cH6NpO2Y0pxdfXJ`Uo~&`WV(8 zE@eC8lLq_TczhLDy;21>{?`OnUv!aKccVQXeYaO8o}9TZ#}l7;iv6$rDgYa=ONRfT zJTDDf|5O}ypZF&-^S!tXv-g8y{^7Yfp@Xtrr;pTmLgf#*di- z>vznP$@kBcu~(PD#+B#5)+26&eII9lkr#Vl=gU&t(YqD&m$1_w!C?iy%cDQ${O(Wv zyYV6Gm3{x;->~xflJ&Is16O70r{d%PE1xgG`Wx22nt!|uyWb=^@eA$kn=<3x1or(i z5A07oe56cWTu^=P7rD0uw%)8LY#gIG@fh<6#o-Y4WppuKLq52)?bwsQz{YnzfsG4n zg^fqol8J*pfW5A!9?v|_Nqk6uw1&s?e9Id@LO)f3l}l}4#Mu|~Y)8)AG~Pu$$_&_g zspdu(T&E{DxiU5C}b)lV>f*zv&)IX`yc zb=c=q61JY?yOTUm^j~XO|LPd*`Wp$mzVn}AezMPM3+#9eg4G`fVC@(8t9!nJjp&d6 zmFG{c2fyt@*mz_ySbgy^?EdZ2uzASJu=|@nhK*OZvpp)`uDtsJwmz#4?Z!Qa$gJxY zu=$>`uyK)&u=dVs*!40Wc0Xpi##|5cIN@ook9=k^*mz+wxA z=M3lPeG)rh=TE3i{vvTx`r~JfhV|d;!{$3yz~&m1GIhdxVfDik*!dr~Ip?Q7^f_!kra=qZ@jqqb6q8`D z_Yv%Sb9aBi`Kj~V18cv$hTY#Us~=0W+=NY-c~^ zudwl|(y;OVa;;&;^)ln<^X><$kJrQcmvP$Aj^7m+U*ZGPVfUB6;Q9EzxEHYXk4s^% zzg{TUOPu0s=9BT`PBQXz2<-f?_$7=zwTb!ZKAMle;`fM0Tw*?JZ-Cr=IJ;!6`W}{ts*1QArAG>83>s5V|t|R@411EJoBad=+;(YvTe>8GFx_?*s zI{FCbC+=Ci3;n6HIE*|n4$}n=!LMAYeBilGmzj6wQ_ZJ8f~}YA27A3BUAZ3ORxM!t z)70Hy)^TFk{CDba=uf<6XXkn-u%{N z+O>zigpR&Wf9evfAJGkVf7nG>dE@tL)X!Zw-u)jXzlHI4 zKZpIk-TScX?;X#N-8jy6;zuVr-tpQ9yPq;U?dn^%m$3B-#XKJSGkfpI>&*(AzyBI`JTk!A zf#YS~_hLTDe4gL0Fh6#GKgUOXZ$f+h8tnOV|3EwA^B6XMQyl)k`hW9Cu=xnzk6>K6 zitYFtM`7310QmpjPr>*m>q9%^+Q)Y5fMM;SA+Y(UYOwjP?`7m*V>#;m5&pem7);*k zI{MN1u?*I(ih!*L{PIWIiFeO}{r=NQI3Snoj~u)YtIw;$u5Z8R;5;q>2fhanyMO$p z`6uEgo2d@3%b*yKkkD?Z~|+u<|1t?D~3TJ9gk1_KWyDLt)qP zblCdAgZ>`%PF-Z=mfwTWKF=G~-j(}v-X(b{n_0w0d@%gtr zo_teZ+lljLBVXn`{2A8Y9)x|OKU6cSe{zNEYFMoxNdoF^_xA?sd>oeBL z_<@^b^TDuo;%->E_BU*PC_Uq;y%f*;LmysreDR0N!}`ZZVB?*IVC$JL$ecGmtlbmR zpYxNCN&Q;Kg%FoQO z?Wr-CuZZAm?MfrQ!K1@5&E? ziQi>{wOgjb+5t&mpKq%nFzaj<#~Y8j47=XP!P<)jhjKi6brkHrzTjcBlm8iQJL_>a zZ2jYSSiM|uI7}RElI_IBJ|4k%5|^3{E2n;fwMQ4r{QvVv&PN>XFdWEh$=F@pW$J?- z!hB!lMFm*7k$M#Etd}J`U*ka^!^-Ic%sTyYa*=t$;b6O zFMoht-~AaM{l}WH`z2-Pm-$QUnGQRC$f+W*`P@se`riGA%GYbQV;9^cU#Wc@oAI=M zBn|DxOA5l~Rm;jekLHd)=kfQfKl}|g-d+z@f2H*Li63->wf}yHwO{+g!PI%~m5J{R zg0+he3t#h9RyZ>`B&(nBsFIaoFI`hGNYZ=&hUPtDO`JIpCsQIlu zT@g0^=X|qXp#g0D(lz^|ul=5h`L39(5Azq(VE1DN!@h5Et?jIbbg=Q1U$`Ijdkfh5 z_M_e}zyGi2!yeBzCUQQ6a(&97d@}D#$^yGTuokR-IqLky{{I%%KbsFL-v-Lq!9!s4 z3DaQrZFZKCCv#x+?H91|I=_G7euG*vam3TMv(Eo>{8(?nV>usn+4Euj|K_msDXrtp zx*7q85ZBz!^YFZ7#?j7m599f%$4bD~fAxg*(?ch6{pgun`0w7&71;RQ71-}2^_|4; zV{d*9tM~T7=Jh_aKl3rxWUiO^c2WDIw{ybUn<-$wr}s0g{k)ERvibUMQ|OQWNeNqj z_V!O)5BAtzSi3g@=6lr>()xSE1v5|Qd|YTb*f?%}*ymdjcD>|+)$8?P*Y%TWk>h#7 z^KqU1{5|AeHdy`B{tZ@Nl=S+MPpLUxe?5^Lb$;VZ;oiT`hjyPwdzm=yWPhJH#cjtc zD*vya)&*A1zoOkZ*@^MA6Mv|{de<+l#P~&5*9<6KW%hwDMVatEy4 z7S8orKimO($$0a-QTIO+R=-V#)t47w?U64TFYDi$W3L$>jX5Fm_YcGBx2&+w;}mQ@ zuQjYaHw1Qk7Qy;sb79AChyBqfgJAuS7(5^KP+&hV`_0nB=6j2JJpNu?pC5Uuwy^W3 z0IXhH0Bav5vYk4QzhIwB2iSF&0DD$HcRB2O`G+r< zy*lzHnqB`3VdJn{Xje|(l9?ArVC|I`aP)pH?UNkJAN-q?#^2DhAHn*!jbQz}X0UeN za9I0f5bXW0fPJ6wB{+J&f&O!h862bkG4}!lE`=rax2bkj;Ee>g6+)XllJGiY=zB-m7!gKKCkgO{L7B^ zC+-&;cAQ?p+7r`kN3ZRbiR996qwIk7zuUlJ`0+W1U8P9Rc2m4fdldgHXi=kcH*n;VdIk-&olnyLmt4| zC$nMW(6JW4)Ri8A%}>{WwV!fb;CbSQM!?1gmcr({VlSi}`FR~y&eUE+yYHE~#QmXP z@5$`1dI=l1DETYbgMHKJGUJJUY!4f683|j@FcsEcSqUp=cEZl1{jm9hdoud_DIEA- z3he%#B!AP6y%KXV*F%1@ENr|!1Xdn4ftBYqVAtm_u>MjH+gbNJVe85!$k?q{VfDjn zxD0WJ0arL4`?%;5uAh3SEwFy<2-xw0&c;!br*!2_( zHb0UP)}Cks`yA51?k_tJJKu-N*tc)3f~j{r32Qe6!{(u`!(LA&So`O>?ZgKv$h_Y( z{w?koKl)GD`0GU2`SnD`zdHqMPt;z``SD{4-KL%R#TeN9WFOdg(J|O{ya!g^K8LmM z65olGUwPN?JjmBogPo7JVe92X?sC86XQ!;AKli=u9>*hp|A2kI^I`qN9_u-tb@-b8 z+Dkvf;n!rY9QfWJtbeos)-QfxJN2W}VdD|$e&c-PS4YA6i-lq9f5Kq( zTLswoLKj&7?d#2)5BsDnqn-;+QFr4XI-s=^-Dg7m6y9=?I(}d{`?cRzAK0Q zdB5KQ*!Q31<@(H*Y-7EdKYW|}Q$IiE{)`hW4re?V$11KT^j&AhL%U<5^PBp}_h8Sr z9#$Uhc07rXF=;IGYQ{9O-4d_L4$Y=&JoDPh;kUfBAVyF6d_6Q6*6|6YX5 zzL+zxa%$8YY<*Kn=1(B+OS}3jq4SA&=Y81uc!T-r`*Hq-U7!19)_*MKyU%MatbLbM z#-5#IJ9<7RtUr0(^}%}o1J=LU=la2JybBwjjIbTM^Px;UEv4t9{@~O5{2uzRGi*F) zHLM=k2&+$HJ>Ytg3o*T3>SpS|%I7IEzjFuHFE|aW|AQVzp8sp^-}&VCMy%%@4(spk zg4Gj)9G@{iyf_E@zKGP0H~UjMKH~R@L+q#B?>Vf7t)CB}-F$4E$D9wl?}^WYxKa_? zjYoxT`rq%FAGpf%b^U$B{kZ>ik@K7UWKW*A`Sjml^JN`jBL(iRzC-p2HVDocpcF~R=xX*mE-tQS~-ZdfZ+JpbW);A`Bt#3}Xo9o3+DK8`U zGRVa3E5km=*0A%o6>R<9FR=DnKiKtg8dl$Kus`wl%O3A{HDRwOz|`v`=K7Tv7ijnQ zb9z6_kCU+bkxRk)v-@nv?yLs8J~!LWI%*1=KdlBkU+2KaUz&3NKJS>wYsa;S<3YSC z8?1eGgYhz6)E3sSodO$w35U&JJD%njgPia93y&Fp^-U=A(R|pAKe#`h`*+SS>e2VJ zo|VsIm>l^!c8qeSLG=t}@zZJp!XwG{{_aZ<2y}3`gfArWsT zOJM6;j>6veHrP082ImX$k+e@aAL}xw=ffV(1RFoiV>|X$7FheEq|836^sw&(`wrF) zzRmfJ*9?M{4|inN-#pm-@(B9-{MNwM2d{;VA8d!MA2{A&h#{jXuKw=1l@9qjYM@A(mSK7{zZ*ncy|cJ^(3By<12!S4U= z0$V?l^54k&3xkb!6_Ba7od7Ez+P%Tn>vxA;FXL#}ejf;HkIUx2hQh|zhr;^1Q(^0e zde|R-da;aL?EssfT5EsomXAH2{f5V2ud{~7lfQln8(&)jn_nINg7HI6_hr6n-}*f) z_b-oztw&u2o2QG7e$vm1fCKOSf*p^Ou=am$*!sC_FPUG=qb{)W_$X|BO?QuHUt5A# z=p*VK#&|r}QwsKZ?~w7|Pr>FZFT(1_60alY<5Spt4quE5JikQBJJ$1N*!s1!u0Q70 z0a(435BB@pv4Z%18UA=q+xdQSG8y|NJACMP-F+Eg_g7?t{eI|su=4R;+lkkfg0+VW z!Nv)()C2Z+71;UL1lFGjg{{LF0PFvBgO$sRVC#}P!G}szEwC9jUidXkoMX~f`y=nW z!=v~13Ec-f|9*h=V-L&3MW?`iZ}~6S`>CV7L0xfz7<``&y}t~uK4Q{=RIuwIDe@_t z@3m*O9sA;IN zY-Z%V=edk9ue=7-HsW#;LSyYbewu=D!5$Lp8C`m<^1Z@g$L ztbWZeqfalu#tSdeKcIh*2d=L!uy%e^Sb1}c>vdj^fCKX2SmgB|vmH5*6E+VMQ+dJt z-r#<%58Mp9AGqOR&WAs6nDwtdX?BS9#`kZlGd{+fLJo2~@g~=!a;qQXX?$v#^PTk? zdk^cc@vD|?8E@ae_tNLV_Z(lr=6ln?_$fu4FXpfI@%+rI)qdmoHxDVhqPz10Jurs( zVI9FLSo@_ptX@0_oBwYMyY7k4b{+=ZU8QbunD~3deOUQh2PO{q@*!;guBQF5lODsyxhlb~ z;~>80YJRaH?Dsa`g9G0igxMcGt-OrASJ>nE-sfW2@7G*We_@Ahf?Xe*VDb+IHo*D^ z*KH?H*A>RLwPyV+#Y(9S(`W-W1QaImhHome?{a-)zB)?a$eYO-@(#GHZ zgN>`#_WSWs_JHfj?@b#IOv?Ga&yukEHv{Z=mGyY+tE@2fyR9n1&eJ?Faib!2Jsv-` z1gt*p4f}o0;`V>9c)|X#ae-oR=O1IX9tu03a>2woUQUFSC;xJN&c8*l{{1Dm`ooch z*2C^cy9WDx<3HZuSur-X-U(aBJJaKNo;P6q%$%@xQ+mF)ZTxcw_LBPXGr!kOe61C% zp6CL*zr40Q^Vj`;zukOoeOS3a61G0PJ?!_0XUI|Zn|khN*m>NCcJ<3F*!j@k^EDVa zc{=QV(_yf7TR+%*%@|m@(Gzxm>mJy=>sKC+Ul#2BQ(svDc7Ak%kxvCOz{VHP!1>cm zD149mb$@;;#?!iwL$GoyzkK(D*85@Y8=r^zPS(%+1lFE700-hrFlG4#j>F2+FJSWE ztt1pXs{lqDMg{$An zH?0z^-M-lNEN7-PhRtV&!Nl)gb%)iDp|3hD1nMZFWwLkHax$wOl1wzxp%AsxW@_9YlW`)hCD(|!ly4ue5e+%n} zOn?LN9@zU{0qg%Pms^#1y$jagih!*HeqlTFYmZ#v;?!bESugm@%JYyTonBXm9nYn( z&#xh@9GYqS2W?xmg4O#~Wu8|**ztG|R=$sbtp~d5^(Xv#-#FMf@kQ^KxankA`|zBs z{D58Wu3zQcyf@l;ze|zzuy*Bn+R2w42#57!{)Uaq?zSC&=_VZX^qZiAuz8E8u=3(4 zYjuPko*{VdZ;5nSDKLVee1>&-Jkc)=#osx$_Hbd?qGr zTy&!S@jK$ceh<439PJM(ciO?x{+{!@5$yYf0{$L$M-|w5-TWL+d?Q8?nS4kV*!7kP zj`j~--@$OSKWd$=^GA7=oc`6zB#-eJ);=!=tJfF9&R6{x$D0sD$PXOaGJwEgD)$_syYyY|@6S*NtIo$;-pe}g^$8652oVRyY%+Wz=| z<7w9p$OY?HrNf?99;Je#<8j)b@uT|BvwhR$!f(sOJO6{#x37?Y+IuC4N4nqnC2T&Z zy^LIW1pED*#l|a<n!QV1( zk%{@IeP8q_=WDs|Wi{C6Jsb8qdcy9HTMm02<6z~%N?3n;7VN&I)v)qo2dw>*`kKRDZ;IrcpFC0tSU;pZY<%NAnfPxb*gEn1JP*fjJnXnl zfR%5@VCT#C@M!jT-nKt^;8Za0d2g8`1@}+B{R+=Jc+I>ipTXMg$KcVEIu@)AD__5c zeJeDi?_d5`d_LrR><6!rLyktHp|0luD-xIL$9^2K|5%8=!GxyGfy`TNC*EooG)Jw4YH&((S znI7%C0c)?zhYH`F`~+5>{b4)uKS8ShvHK1)%ESlu(e8Sv2%As-4bI;>?OTmy;-z75 z=S|%TeFLlCYQe70A7JgJ>~NWXZ}07If9e`C!}??0-eA{zVp#c-mG!BX9ZBJN#b{I@t^QSei_S`1e-#uhIdD+#l=esCJjkkW`16Vz< z*!~}`4*d_d{&W#+9mh-9dZHSzayd?#|BV;l>unn!R{kZW-TbX=y+;hshdzo28<)Pz z{b*Nuy!vh;tR4BB^K19@v7P(#dXzJrVEy0ou<_eUF!jpQHo*Fa#bD*eDmV~7e1om0 zO$#e0*Lptus${VHvNyuY@7L%j^M4Vraz=fnJUR|Lk8i`BJKbx27PemU2|T*ejaD~c z@7Lqa^WTT{>&kL|qaPaLOJqO{CM?CR<;`M)A*VEz9VjyL+F3angiX?u7?j25tY##V6tTnh_z zg&mK^_D4PqhqZ5A@5Tk^!-4oH9N4D>8&3#=^%oDouER30_Re)!f79#HFMcEw4-Lo* ze&-de{ToVq{&U&d#C?bBBd`6P=VSf81G{gh7i|1E%Ny;=mqxJjV+`%ei(@i&#yrnQ zelABk?jJwT`D&cL1Z;d{KJ2&;g|!=|*`M{d3=YJ%;gCcZ_Mf+Z)c836;-`<4Z%t{Z ze*Hjd8GEay=VLw;hqc=p!1k|hJL9MP*8i>#N6S0qa#!1lA1$Qa`O({U^xrzz_`zV< zc*t&#C%*L)Z2j3D*t(zJp-x`?9d>YdRd5x3KZg z^|1b12iSVjxY!r2kG`;WRykPtGtvIUv1-EBSuKicZwmAN!2@Stzo*p&CO=XjK_<>e zeO_n#GtWa{^PjcgdpERLgkLSK0 z*-jqx39SBo1uOpxX^)K?)H+!R=QqC$JHLZr{f_pqc6tI>Kdg^@?azI&VD0LDa@I=I z?sC3Bei^=3WBsdPu=UM-VdDq={e1@LQ&>IqH}@aN=fLK12EoRgZg73%pI_bdc~Z}{ z4|ZNJgpCXR4tI|CJg7hH{jaorea6;JVC$i$!N&O$z{UqU!RBp-G9Q(a2| z|L`uXzta-d|B2`EyCzSI0hOV}CDSa_V&v*x&mCR^F$81Mw&9dFoi+QvMsyxd$6R8U$Oy$TK=<2<&_> zNd5~u>8&m@{#9+*{6i>g9& z^wXYaT;u)4?-B3s1RG}>2x|}Jg3WK=hK&>4!k=;dXFbpPi9_#)9rq=$d5kdFddW-| z7*FaU^XvaGKZe0!J%8DE2Y*XDXcg>y*$V6b-G)b>8`pLsY&;>$MSdSW_Lcp~Cw>oW zf8>LW+XP+ad6J*X66HUB2CFBZ;*Wa2W&Y;;-g8cA88b9T>6&&IC_3Qtp0fbJ05Fb_1V9$@w4-=@uugn@~^@b zu7`DS&f|%{)`O$tKjs^IUFCj}*W+k6PWu-e9nW!o=Km*hy*8s=d2kNaKg}!i{Oeug z`7)2k0p?H|>u1WLSOB8de|Y zyGc88v@C3W;zQUtR|44i{pqd9`!Qc({bLKbdfwMDuEF|`V_@@DXJP%g>#*@u^F5v~ z)@`0AcKildeP12c-kk*-|NQU{_m6+j64qX5C9{4T$lTvv*nGpsGV=8X>^?p7UCQ;^ zcO%Cqqs((S4eLkUC7@$E-&^KD1>u9T@a{pmjU zL;PbXY+UL(Z2Y{J#}l_K@gQ=3w1M@b#=`mc2DhyXn=dZ-kl!Otnitj|J`cN(B>`-_ zpz0%@FY)mE3Ai8f1L2Qp#~+>xJKkNNM9!C9u=Pu^o<^>Z#;|tG53usRg6-6!eEE#= zM2@D1eZF5j=Xmm`*U2|3@7BQ9SD%52gG`9?FURlNIC(d$|NbYeUYY}2kN4dR#^?82 zulmFK|F!?)`mm?!dp!E_3alP34Le@(U(x??(5qOm@wd#c`91u%erz2tGfk`}`^`16n`oQ(PAYbhJ zzP7^VyRX64H+71|@yM$)=97sxm5rS~Uhi)8~2+5>n~iko%whYj?TBcPLm`|AF$W&(r*5HMIx>r z`IU+BG7py{G51eAC?D+il?x}Ky})<->&VoNOop`!n!x^_EI56@o@fu7Cs_e&KlFx` z`&(e;_fp%@AAi6;cUk#)9S#o7vTq5jzk1u_Yvv3xpKiV-RZ^}OeK8MKPLG1EPxuuM zj32BWxWIPSQK@8+<72z_{LirQjzu!-r*?9l2lmlV9^Yfvdtvg;@C}n;^C`38{NLq? zF#$He7e58RPyDb0tUfIO8xL;;JAPlo)~{9ec>J*wu=Nlh!S26rmy+{gzh;Gl?$+P z>`T~qSpP*mzA2*!A?;JN!P+ zF_rzX=Z2)CojlQT>Q#(4W=YTS%&QHs^L;pMd~H7L{jGsVFYH_3C~UpM2H5(FBe3<^ z7hvt*J@zNQwhg9k_}~;+f8=TgexLKUgw3B-%gFKUAFKl#|7s0Kw_8tg1Xi9_qFp<` zcqZlt^$|s3{j6HB@*^v(|9=YBUydEsKQsg$(6G=o>Un$~U&79RS-Wq&?HqRm);_3~ znfXAxU<2$rZ<2-Qhkw`?Htt$1E6*Q)q6Mt|auzn9QOb7Yf7fh`5B6qu*z2ATtN+r& z?iWmzo#W9PacyTmb$6IJdBj8Oo7n&S6t@2f*ylPtM`ZgFSpE7yCf+<6HqVniC)eM# zeBmEt&YMgo&ej>$KI{z}Z!Bj!d7`5u+S2nW9B1G^5dQh#NA(L>n& zTVda)(epic{fIVQVf}(kxwv2A?`dJt`tSUdF42z8@)XM^aCw zeN-(E_ltd<6E-hZG%x3;-lcC0ejmBh7QXlA;;C(5>%EHR<9PkBAdcs`m&woXQ4eK( zoAWzH0q#dRNd24Zbr0-*lzr9%5~mqnkn@u_AY&PrpUn!Y9&G52zgTBA7q* zN$!VnFA3)-pLr41?*EN?dGr4FVB+_Ohr(WW+tM)ea3b~f>cc-i;(Y8g-AFyY`O*eu zIUjj~snqK$58IUEeC)$I#(oRy_g}+7y#Kd7Z2Ygl$DEJ-6-8m!%g30`w z{dJK2u~+-RzE5Sb`&o#;w)h0bPOJ=TmyRmW`N*ptalZlelFwl6=Si^kVt574&+~X6 z)?VFFk@3SX?B{+o^lFOFBJZ~$Y<&I!Y@PNs_B*I2(^lesqV~I4?^hP~`yRX8A3?p@ zyw5p5al=}$b!lBH!^B7bV?T!LsweFJm1pkHpuX}l?DxP9z}m~ht1y1}3j<*5tLs&z ze|SWTY_QL{Uv+*DJM=aC35~Zjt-^Xjr`wOXiUn+AS^g!G1{f z%=QNS9{H}*?4Q&gSluvk{F1@ep;l|e{Udjeuz%Bff57dans%2_SA9`axLVC~3eU+{bQp=a5TYCLLsORfjIZZ>Q_ z?Tc10?}hmhHlHyS_Pq$5Vc+L;4|cyx71;H=wKeyTez|s@>qFjDZo~Dm|7ba^KK&1N z{&a@b>owal9_;g~1RHNj63X?F$GQ6#?bL~#huwcY7dAgJ=}Y>v|D$OXFZ_z%CvSH3 z9M6+@=_}a%JKJIFikG*eKl|5**^d3a19rUn!j8{rnYz0ku=4hD`$)chmixnwdkU*3 z{)T=3X{rvKkNwIgVdKl?I@0cYJ!SOS30S`@7`9$x@YnQ5p5;6fIbVi#i5#z!rx_3Y z>94yoUg*(5GIF!oH{3sd;*~#P;-iVaW&D|UzTZ=QyT3c*j~(OpN5k0ejnB2CdtUREU@Q)0sH)7!N&LQ{6IT);qDV0PyS|T8286} zRvN)R-@jnzZADo9kh(9&BNx-czOU}OjJ$evobf}x-0sKqu%F@@?0(m718B!DI{+Ks z8V>7s%I@#n0IM&~+RnbsG=um(=36q(ZyYfG;K=bT<@FG+eGI$bq9*M83Le7sVrMjf ztsf`{lfT;49@Y=)VLNq(Lv1HcG6l9?%Y`8UDFDP`j|ZAa3dct|;(xAy-c89n}m&mXxPJc{4PuKfl!4zmIF zeN@X~;}4;uIY00BxdOZ1X2a3%t8hQrL)-EDDlq^29>}CIj3?{pDC~2)CX)xs;QZye zCmzfFQU6~U)_$xD`+bN;u;VrZR(^-U`uG3XpK+Z7Yu{BF$M`ZF2VvLUSFm*qr(yNm zY}ntq2CH8akLP~aub7tgrT;q`_I=X>Vc!Sy_5{wy`Z@0ULm#YxmH!P`pXN_eOr#w< zcnGZhQx0~W1m34W9oaiQmVbIRu+;dIVb+ z6c_noTrKZpj>m7w00-XJ25S%HfCKOMn8N*`cT?D(`@R7CzMq&ddGbSXr$&y?OU|#~ zkRNuvJo5K=eyd>jtM7-k#|BJ;sSlVT6Q4@`6Tip%xxa;t-w%S#tJHSkK?BW z?0%vmzc3!y&5dlwuRSEQ&!-gZ`)6a%~rb`M}Kcwzh|E9 z#L2&)U4QN|?ECp!!{(9m&HrD!`=P_&==VcAzYf9C@0a#@KZMnnzJJDe!iNiJM^DA| z{9OMaIC}m2zLU8!-)kDr`q$q-1Bc)@w1JgZNfz>Z*oPHi^>5lmj0bry-jy9k@K+t9C*J7?E0C01bN_l0btihPuTduLRkG$95!FS9`^aBg3Gj}zTEz- zhnt7FAMDo?GS6cuti5wt#@;Fo>mT3sc-HlaLtGE@=N9buKG(p;ZSKR-?>APyJ@t6v z&O>MqyzgWM{ds>?N7#7hJFs?4HCX>W-%6Nq8E}yM!LFUVDsp{~KM>hIY&GYjKJWQ{ zt{3@#9(Eq?hF#Ao*U*kV=Jz3uN4A1}UqU-r{m^wSN`|GUBUA39>b6w?N^?k-Ij2C&&^V>N;_RE*B&v_>7K86vn`n@k~J-`{*_s@P7 z)jsogejmBHe;e)StJL8ze#u~1c{Lx_pDPPnueB7`9{UKkjxz2a{66}jG3@;$ms#&y zVC8yg*zr6J8}IqtcGl-9*!tPJu>QowsP?w7e%ODo??G7rn~x5>Pn2;u59=ouH)CE>{ZG#pP{_8_ePU59q#Qa5GtKOJSB=YKNs-iff+lVoS)`h93S zerz7tby0E~jJ&J}8*ix!`~LRIu=ZvX+liA^fsL1Z`$oI*mM|H)U5R%6>w&Q2UC#62 zzYUR*%f(^m&mfugo?T|1q?7T}g5bdVn|omQe@}x0@4tYU@dH2c3N4vv^#P<_HCg* z`KID~_&xHuTVUmOA3D9_v~fc$MHOe4`Kb4E3k2+?67g#qx&Q8KR>L$ef0qC=()TePu%b0 zgS6u(6}3O>W*uz&r?Bn3-|MqOTrYOaC$RCM5wLb@Ihp5i4pwedhMh<04|Bc9uQIUn zvjVJtSOT_wV>%rDd+PsPu)M{r_XWWEI}KpvO~a%7K5>Ksu-B9N z812~ce=$DR8->E!d*QJ5%zD`8KNq$RYWQ*bGhdp+>g%K@XeZ!XNXEWz3>$Y(1A9La zu<@^}n;8${H0@7vfB0QXVEw71uyq6@VaIjlDUL@z7PLS9bDBT7Ui63Gdm(=G-a9aU z;e*$kBG0!N_We}{VCC`t(=c|_8rZyL=QCU%>*yETu}3~T8#(_c!rBvyVdIdKVDkc1 z|Dqjv+8=hDlAMR#cLyua$HK-7CfJ{G-)}qb?eqNBoyEDp^JE{}THBc)RbamlcM#TJ z9t-=vr)x0vj8_lZAN}q9nfFg{k?SQNmJ~MrRS|Z7cV5^$&JVEmaW&Zav)6X&w+6%7 z4ewmy{KWTHc|6ah8tnT&{(+4fje~uk#``=U^BaG{zE7wHZ2ayP?E9uV`8*kyWtU;% zrpICJrF$}U1b@Q%X9fReJn%>M+Ml>YL)g6QZ?O62ez1CEC2TxuC#-$30(RbCwViQV zBeU+}T;cqzJCC=%tehORzMW6wWv=gq{fURfyb9w#Wph4LZ#@y#4s7rICx5vfR$ldj zt%q*)5B>2Um%zqfet^|4YhmN&zrbE^1e~Ay{s-3Hd<1)+MXqr?`tV;^c^3+6AH0C| zpBLHA^(99>8W*_+`~Ibbu=DuZb*_gxyMnOwKa8=uh=Y_() z@3p#29CromJg)*9A4z@-CXSE;RzKx|^&dj)k6teUyG}E~*82>%oq3uMc0HcIz) zu=_GzztOJ$d*L?i*on%2_qnCG!}##t>sqk+s+zF#pYyS=qYrGnaS`nMD0{%h+wa51A3DK~$GZ<0Z}dS+*!{>|;OO7e z?%5{eA3FZV+wRz(`S6VA<9saeko(6!IS%VD_k?}l!8+J@_*K~NF^q=wFQ-3($s;L0 z)qn3l=J^rlQyyzScZQ?osrJ!i*f>s3+5^w!3H|y0&~xUi@86gyQ@^(pw(flBQ~DD( z=mzV5W_?CG`+>8-?&l~0tB;kZ%BT9U-%C#no97$>t50LV-rspxJK#R+-Tcl=*f_#n zSou-$Ip@cYeF&RJYy&H2|AUQh^njhmaXmlv{F7kEIh*Il|NhN(o?9uI{hWtkzkgRw z#=d`Uf8tFQV4rK+e_`@c6=CCcrDfuPpUC)OHR0&<`M&67uyP;~?aGs6FE~Hn7dyuF znLjE68&BK{>*x29c`ln^|Gx!mxAn9?;}Gw^$nj_mtG_eC#v7}{z7L^*jGbHrwywS@ ztUNCOn~#`gJ9Po+V4ufHIZ7Y#``!M8*-v*=ePkU&k(ab%w_Jj)A8QY52dR&YpU;>1 zo>XI4JN5#sJUa6m$CHoG@rvicb<~8-uN8+~zeQm4gOI}}!*)q}nMrEg^j#B-N!pg;SA4#4_%GhyR1 zcVOe=V`1x!=f|X-@eH<|b+Hrn{i-oMp7+&UgrmP_ZM^3h?DIRip5G%*^HD4qKd%Vv z`76Qtm)YUK^M+ktsbKTFU%}>&g5ePAA^OO~pX0;&znf!ozvNX1uVZ}4?>&XRzV@*3 z*G6$@=lgs`VB^7+;xZoOG1jg{e^9@Ua+m78+Js!V@{WclaUmpPb zKBOVA@sNeKqc3`SJoe2A+pz;$*lvA7e8!u3k-_6xmz81l+od&(Cw6f&*m&4B*!WFP z+p)t(!LFaFa2f2%MzH#QryQe7%=cmU%OyxafAX!#ZW}`ucAL=f|$91AF~fVdJ%7u=1fhZ2i_G`x9TRX*<`y3|8NN0Q-Ks zgo!zx`0~@`+%M}hBWynT8f?8nIoS8t`n_D^MSWrI_x`ZYcQ@?&9Y2Ph2M=W8^4Va& zPt_#}$5VH3Y#HZcU&IvH_j_%Dz20z`ddyvmWbC2h!Hg$ym0qxNx;kuq!q>3+zcs8s z-vKt?)&=%@+QaIJ#~zRV`^Nsnm*OX-9Xq5etUo&fRvuM>jo*jE>a|L+^Xn1p^;U#k zZ_AT$KJ2jiu=?yTSp8QE_Ps*slhcm9@iFZ4`&{O^l!A?$jfH*QviGwE_WeL*VB;~z z-{|jicnG@>w~+mja~V=Fo&)!cPbV|}`CF_H{T3?~_s9FA$H40M;;?$G7p#7IZaZ<%cVOdV4N}w2`n|Y>cJj63VCBhr*yo)% z4ej_#es5cQJ_7bW)7wrxUD34k=Y9IO7Dwu*iLiQdJFGm~2`fKWz{=OxuyTF|tlmud z4(G%E{|Poem>V`fISDquRvNxQ++Y;!{nv(l?qRU!xdiJklz{cG+o$7rHpWRy;T*~?p#E>@wzjx`&hTZ z*4xL+K!5zF2-x-YF6=nU)_WX;ec$0oSpV_uj2zFp_yX46%`Fo@s|c(2K7rL=4;FF# z)CX39eV^k6SUK4U)_;tEo%fw!^QtRh?Tgv4@#vwjUT4e9 z{o^MOg9Gn5fUTccU_17}dRYJUZ&-VOH>^CkVLNf+6R`5)7Oej0mWA^X2Y0-+e;2~) z(|fRf-U`?{ll!pmf7xU^-#@wo+i$1sjOR7j`FaR8A9xOSUEa1G|9Y?e@z2U;g;`gN zVXuE0Y}{!y?C+h2^?wK2PTc3?Y+NsPk8D1)6>Qzo7~6S2KzCUGdxq`U#|v%8zd8Y% zul^l2j^O=!A7^0wf!DT^Z~4dm)W^hTK5C!G%Fgwn&*H(>f7O?X3qGO0@##LW>n9WK zKBpnD-;c`a@x<51*`NH(`>_7f&oXvuKG^u@3fSi=QwJL}LPkD$yngCYSosnnv!2hw z+PA4;pZnW6c%H2PGhB~)G8t_A@-S@tEDNmtbrAM>7J{wsay)#$WocM{^0G|)y}a#Q zr_a;){YY5(-_Gkp?<|A0$0otf^F6j>m#(oLJMfb2ykGSoY<>BtoZJujmG;Un^!IF7 zz5N|*z0dEk>r44&Tw(LO{62owY*@SLJgj{)6V^U@Afs2Nz|Q|HO}UBAy@?S{LscHBpdhxT9LJTU%pH`qGW zs<3gu?_u>!XIS|&+;-wL{o&~GwjS+HUVa}veG+|WekEN#`ZG_T!q!iG1pEHegz97B zaBX4Z+v#ET`W#q$FPF?byJdgm^C#+O)?d#2F!iX_Vb^~Z*!!;vJAYfq#A_?U%CG*g z-@hsbyWe<*$Fq+so5w5fVdEGtxIX8>V_5rUHf(+Gz=AM(@II{nbs4sPE(XUNKdD-X zcGh=mSUYentlirM*4|nVhY|O!1v?(=;OOf&U(?cd@**W^*Pgr0`PB>i`91xE-mv+N zO|a`fegPQ!u_vs*RUOto$m;b`w^kSSJSkzXzrM`x#Djg#wP5|thde*)Z;Qd^dw+)= z$HMmK_t(JM%LQTcl}ljb2Ss4NujzU-9@)p^`QAk{SUq*d@uY6op@3y*!bS3 zwqrNsgx#-^6jtxVwVi!jdzqiUpL`4RU;R83){h^f1N>TkXU zN9zOa$!pAi>kkhg4_&t>Trc?dpTXKQ=WR!xr-s#Af&R#YwD!l3O2GM?e@Pc|eVi|* zzmFU_wSe(Jj=l7F;yOEF?b(~K{>Mz%{WE7^^OZl^A9;UVMo#pAoiFQQ?Utsn{`VwU zyS*B$ee|L2jO%-_^EN_x!S_(&!#@9&GV=Q6eC8we$y8Xs@+xe+p{2}om;xJ@t7w1f zH@<^?o*%%jv!<})oD0_e`UF5%A5_&xGs83+)+mhKI{r@hkehQqQxJ+!=eE=2gPo8nlFI*4y+6vh3 z{_=S8(_g~ISz>d)>eJWgzY8%7AFc~)pIlZS5X7_2|r1a@6yf{h1uhpiXh$n{!p`J>0PFLAES z{;%OO^~IySKjhjP*zYejgai9gWa^MI!>-p~xL?0-veWTLKK<_a@O{8lu=dPpSpQ@k z>~p*iTmRDtHlO>Jp1Vc2*Fq-rWS7$9T%|)@O`|mFqYBJ=RfY zSbyo1zvsR?`{PIN^7p7$Eeu<)vkrD#LtyI#X2YH@F04PD7q-4?#%#u${Y^U@58^~M zVe@}|Vb7Ny_Wj6RWfW0j*tk|J*!uH-XK_4wFdwWw`5AWpy}({_-$pT5yZ?yx74rJ_ zOzxj};1byHJFJ40ixaf3(A#5R?Tyy3=dTMpAIih#AB(~PeTx029esBOE*EbfMKw4h}AOr)xNOvHDC|2&c5+@{NIH9p7F51ro+5XA-mVd z_aElM#&_O@9jDQ-^^7@TFOwW!A%U(jD-923CiBYMKxODSRV zYhS&|*rlJq);}bpUH|hI{+#PS$m6lc$LjA9&v^J#OR$eGWPrq{bB5}v#|EdOxQg3C0P4n#2c)A z^}WnIy+FJ9sm`!^`y8zQlTK!xZ-%uazrjDZp1Z${ysRu!7xV+{eWruGzdo?_f9Hq~ zlp$|79Cm$dHC{lS&_vk%_0w%c*3XKNiIaW_8y{H=TVLD`cAVzH`uFW&pJy34YJGb>nPmL2jkc4Y zm`D7{`hetIukX+4XgrE|Vm?^?SHXA{`k)l-I2MMr>&n4i=TYNbgk|1?-3PxJw*Dlu zjK2RB_In4pVDoj8Xr)yHRPSAXt?_0NLTAL!#Puy$MySpB&Y)?e!do6lJQD{n^Fjvk%@8*drs z@vP5&9#6bs0<7NcD3jkDYdiVOMzH#Q2&^8e0b5_&7xsNJS>Yh|tJJWaI8$6Wgu0n5 zuzB2##DA?f_|5yr{u~2q$B%@KAKW2+Z2sdp^G|*D3vB#vsq>NeT6dW`qCv3n-Z4=i6qP`TsNQb8ZAXuYQ2F zKdQp|JKw_EA%$Vks{8Je_?Fv!>CXeqY1Rm&UO9g2J%R_wZDH zpLlChk0-C+3AXO?Em-^YbJ+2Jnw)m7;{#a#q-Utz28Z&`vo3N;`tDN9{?NgzXod$ z_O=~=Z>voIULLRg11mqf!^)>|uyv2EY}fvPjqg;3oqzAaUS|#1{K$rhk>gX<{@BG! zVD;JOvg-wQzk7)N@nhS`*kAV(z{KO4!Rr0hu>M^=Sifr`?Drb0!GZN*f97LZ*!W*V z*m{nlu|K5T(I$xRj}(brTxh_&$FF4_3JoX4{~7+?EL!&4nq#ihkaknTv&T$y3Dw}3mYH# zcMQyV;@hA2$Yt30X0O=v$A9ht>wk5Cwd+G+>tRA+#z5G*nk%t5ALCR7wtj6Utetm%6!%MhI1COl z?g{(eppLNn0S3eVZU@`>{$E$v_mf$lp`F~;*w%W{2q2@Y8k(9A*>&LWf<2_+<&%=emn;2zxRWU=e3C9wjR&@ zrGoVf8^XrNo(*L@kiRADj~v

o3HCT}R)*+6OmcaQ(>Zs<8Q<2e9)zFKqty9_;;Q zfSpfQWc;^hLl{r&$tg1P=Uv$S>!V@!Po=gUIo=EQzF!RH`C+ejl!+hRh0QyE2D?6v z!p{4fK`_sKyzRv055r#1P*{I;GpxPU3J$!V$adyYGT8YW3J3O84&r&@57oCn^|ps$ zM)mzVLcO3M6Rav6DdegO9qRnO%(bc400=1|Y2y;2u8zuO(wzIf7~=ZAgU%6c#4 z(P7yCo503Lx4`BDs=|KXFAR3yLn+ueYVaHU|Cl<k4ZhSAoqx)`z{{-1aAbpBwf&A#q{XQv%re^p1Kx*VFyM zJTKncV_5(H8f^alckBD8Z`uVbCl|x|H>==4eVEL?+bXd6=(=#IM04h3h0O=lf%WHN z%E;eJu=d)OL0m6-vNmi!W*%&OC!LI4)d6-rM39lk^;3w{+9ShZ z>!n-3#zp$W*5A~F%`dix&6DJ^oqF+_j%VNLzx{Y0$ffeI`r!y{{Y@g+`t2WJ>lt5K zpNgH-64vg!Dr4W~gVh@=;GKRKzAw+4`r47O`>lJ!{@!zKIPl#jSbH)dto}&?`+Ub( zUrT+&=sw&J_DCP=aq&~S!0s>W0Be8Nfqj2XVB^sFVE6k~f_b^0pEKiE!rGH1Xg7}kGpznA0{i=)<7DDNd0^|+zk`jBq_#hCfLe~nZb@K& z?zarA9DYkZvhl1Ouztx+*mau_Hvf76wjTFoZ=OH(fV*Jz&uv(FwhrEj9NYyP_grl| zcE@Vi^{^IJ9*%~M&-?*v*HweHPjr&g0=HZtltb5qHe%@yNGKhRsXdq8?knJ3Q?DorJab5BA`FvQOoR zjNX_8TmO34{>0g9!^+A1GJ3WYtUa?)#-2_Kn{QqMtDoY)%Ig`h#|sC$-{4!=dip!v zIUn!2h3&|TOR#dRGOT~~`{#`PJ|DI|;XB%07ad{qA%i@geg*7%91WZQ$PWkXQ`mTY zM#uBMM!lm-4XwdqU1{+$RpKUP}Y( zS4WU}A30#-Yfrmyzl^I1@5Cq_F-};Qf(5y4IQbgdT20yYcJUu={nY%jnr5u==flP>iZ6`^`_}y_q)`FwbvIqp7rvfBj+c-Ii#(-j8-+GT8WWc<$Hv@VEo>6FvMT ztp2_RTW=E?b{+o->(AJ(zqlSYU-HJ|QE&7;tbHwe-&tVy=Y4{`p75~tVK~MsAFj0j z?0vCL^1~tP-}%wDUC?^%qGx15dWcwhUYx5~p__ph+uZ+PE~@jSQ9u)(R zu=$K1Ve7p9f!)v93HJNIckR#ouMaz~?!dmk(y;Y*S77UZBE!miS-Kj7wyvwu==(WtiRJA*8ge< zdz@6T-*c}I7v(*Lg}pA1?|$P~4Y?ldRQHcr-?kYxKRFUMzBmDP9Zj%5@~0oHy)+Yc zUwKW~e8(KeqhE@_*1vx*vmaqz1Fnbr9BY5>yD#ki;nT44`fYu#A3bmtHotZO_P)=- z+H1Sukp9Y}6|i~6L?rnY?8Q z*!aHokNMVkuyqXsVc*NAx*Q+7V=AoOeHhk$R9_lzT??xxSHaHnn6T?%J?wY7=GWo< zQ9rTH{?ublhC}qX>nS>{fAg2uOT2S(ZQcj@w$-q5Zy;>Ey*+IFsw=z(JFS@Qtm`ti z^Bz|-ALnshF<`%UHyzeq-dO8@>(_PrtR~k>{CTMD)RT0A{od>-Sby%18eAW7{E@Kp z^?Y^4qsM=O)#tmbazE&|r84n_9aVU~$kkQ$XT27N&13xr8*lo(GRLECb3N?*?pZ15 z{VjmC_ij|={M75Tf|Z9uD{wsGSj-QkOd)v6_y&^FF*Ib$W;~#~&ALK_({SSWcK6D|j z2fywwnBNc2`>FufOMG-H{+#o5RX)y7T>crX{>%ZJ&*-MVj6e4wchK{lV>|%A{)b%L z5AVG_O|P9{#HDw`?|!haG$#f6n()CM)gy9|LxMpUlE|>SWGwKhBRWu<_h$ z?hnOZm1AFyML8DNIy969e3F8-;99O&p)$2*8FsO&gXuyo9qWR z9yOHf(I42veqrVH99X-qf%}(<>u;6G1Lt=CHF3xX+>idr>B}6C=W;bC^Otx*D_HsV z2)3TDzRbGL&-2h8tPXp;aj^A`MQmq&ulrg zVCzpx@P3pFH!gBK=EKnZ^e6s#?gH0?AC-c3_4%{&v=e{W>H9>#O@Pe@M=r?qQLmK+ zwmzx0j9jU4j`7I*Sj-3gzZ5cZJSOwS_*~7?jK_a{WIJ|K+!H)6?ELG_AN+#cM>!sK zWZPla&osXef&Q8YYbX5#8!uVK`IN_HVE^6_wqEF0zb}Ej{G035PKhAnCsc>cqmJIs zc-CKNSosha)*jo={k#54?4uodUK7^dp0bzo6Mucq^K(DyalVhC+?ofgw|c?GaR%DX z`>FpI_ecCSKCFGwnD23z-#DC?`{lY9@8WpKfyS`@$%Y+_$8WsM`%+&-`Gfwvr=GBJ z%@f-=9^Vg%2^+twwUzUeCuq%jG2V4&Gshzz7)fS6oZrO#V0Rs4K6w698#x|v;2E&L z$MD+*+WB5sM_7GO1vY+IAJ(6FvY!6rf6Kz==PJP7UvkGIpa1%e_eCAVO6H&T_OUfV z_Y(?M-upc!*V_W-uk&R*>^c|^Ywy0|dr&^tP_`pym%+-dM>%+Y)crPtU5__lzf&-M z1>?~#*J0!BNnrhje_*dK>vG!3Z+?W$b7fn`^_{HLJUz!ZPX6Lo`ct2p6L!CQE?E1z zE3ADvcPZy%{?3Gbf6te2J$xVaBy9X>6|6kHVLSS}KkWRx2b*sxWPkQ=K7jSJsx0FE ziH9A4o#)*b2F8S=#~4`opAy!euLSE4z5IpiCm!bY8voh`TQ~61=Yjt? z$#$OWW!U&?8d(2$DQx~ZD(pJ=S*Ffr$$XBFozq4(PYJ8{v%}U$^_|D{kXJd0Jo0(n zoE!AKCcxHzM1ika_ah^p&i@Q!Uu;31g?z70eK1ZYE*SXU9danGOrB=zERK&IyjS^* z-7tM7#}Ag*#=GLp;Ck4%e;E0y{^>##bDzFO>9SQmWB1xW5C)0ogI(x)*y z);|v(4Qeo{Eykn z7p~{eA&f`vmqEU%|9i{Cfu_RB_t=A(pUl%F$Wzx{kwGx_eofeTMYjR;CvI~Ac72WM z8#F)vM1CuOc74bB*gsVnd%)wo>&^8c$Hu}w&*i-sk9>Uqd%x|w(T+b>2zx?*GFn&8 zhn$)Z>rW5q0uzVW4QnS}>CF2iA6m9J*MmOI*%8LSjDdY(KI}mIp#H64>*3mv2H4Xt17f32kOG^cgs}{#xC6J`N{v*C<9XmFk437H!jZovrfzL zJlqf4uK2;`!7}UbG__`8jk!n&y^51j=m8#{ty{9zOoax-s)sNj*tCv820yv z_QQUk_X@0kxE!{g`92)@okZJ-?|q>@A#Ym?Ht$i~^E2-g*q?gzCb0Iy!MyZGzpsSV z536DGD_ddhu92|v&g-}Sq`S;KIt%NMcZ7{cUxlr=Xb!tS<0h=VP}~0au6Ts%5eu4Qt^1LZz?3>N@ z$L`DZ8N05F!2!KUyX&w%?EC8tTX)eA)<5V98xIJyYiD>o@+1{$_kK#k)*+RL%_khn z&3M*fC)oIO7a4#17g)cqrA)kV0c?GO-%~d~H4oMvEF>cbeum8>#DR^kkA=0*?|;Mj zsV|reD<7`I#uI;p)td`p>$(QQuCGb9<1fkP8|uKm*I}@7D)ndmwLd=P;{3?#p|%ru zUo2zi&V}7CISclAFM_ovezYBVyb|{Oy=+I1ov=TCbyM4^Xa5^Ee^dr`-_%T4ef|w> zyl$dQy=G>a{P-l;`JP559_{^Fmy--GI(~W<&u_dqEv*0e9BxWnEEQ~>(mU97n9%;z zIYh)BvtB1AZ2fIiSbJn$POgXbqP?bnI1E-#q_-WtR2$a*Dh{hx6WPwX><+8PqrmPD z?GIZI_#_9%M^BG|_3w|u%I`(6`HuFm_Qh$~^%e=%|Gx|?C$DCwKmO@$*!bLJSbzE^ zZ2fI>x!1MX_IG~m&KC6i_rT`;I>P2BcfahNEO8b-V^8WQ_E@TW^e?4I1Eqh?=sAj`n&uUn|b|q~6=_FWt z;dj{mB4h2(K7hlpc6xi*=lj_H#1k{X+SzgSpYU_9Wq>(u1=x6eLYeRK>Ms_hKECnS zFm`@a{4>{AKACva*Rb+A5**^cxgJiWhlxk`qTP7!I#@q$s{N5qlVJVhUt!~pePH8j z$6>#3R~mMI$sO4JuVL-a`_SG|{%%YM6VJ#h`+Wdd{hboleyk+3E;_;HXJW(Jq%93;Q0HI-dP(8DaB&eP!&)%&_|g zYSwL)&X!}zoSeY%`p3;H$yv~>mLDY*WFJO^u7nd>e)T8 z_GTMc`(ich`fdw*y>sD!UWa|(KfwWg0-L`ZW;^xfD`4Z{oor`*?1Te(c33;`5UhVy zL}vZ$hrPcvu=QI9Vc%O6+p)LL!sZtvz`o}Pwo}jhPimMt)>p7`$j37AkhI#L{I1Fj zSUYYE?B9FA?g#uHHa?dJHr_ZDHlC2l@z{IwVZS#M%69C|HL&^A^QmAI)Gb)Qa~-Tb z6Rjxo1OITS?f6@jVf99R*!yZ_JNlp!Y<)vF*!}LLWPj|Z=P4P_d<~6#Z2aj6Z2e3G89!_jtbd*ocK_Xc*!Xk`So^Y_{mHL} zh0Sk8Nx}6qPm6GT=ilArybtt!T3Gw)Y%=bLcw~F-Uwivl(xB^`$@6zV@Pi~U_W6D0 zyZW_iV)_#Y9#4Pw$!$y+G+**_KIdDV1e_1~Uk-cT_*<^{+z)c~Bdi=*ADinX9`X=- z*|?j(r)B=Um-e=PeN5WPi>}pPr+?`fLF;1>_PO>(;piL>Irak99=scscH)cYwBLy< zO^U+#d9N#B>uVQB;`+&twuRL@n<8>P^m9)8<1a3bz<3ObB(U~XKUljrspE;$HG$20 zB!;#3^1#;5#f1I+uXwO=oA9u8VVlC!&U&c|8;@KM?@T(PLv`5rVP;r;w-$DPONp?Y zkLRD3c&+iMW?{H~*5`iXx9Ia1Uoakdn2PwW_nR~nr-`Ne;uE`GTzjcAF2MiCp9$L!W-z|P$Q2W5+X0%EHzYbby_2d-=UW^?7xf?~zx9eLkh^kKDe^?;Bdro>R8o9ac^xhK|83}aq)u15ca*af~}`(39CO!!`k~PV6P{GOup|5zxS!$j}AL; zCcu7=@Oc^LJAO=lSUr3IR{zAXKks8B?D{^@&f;>gZ(-FEaahnPCr;XEi7z3PY;>*In{Ux^`YHh;{zRG{hFq*->V7_ zYY&%(t-qY2d?s$3A2wdz9uCM8e@_+tJrsHF`aA<$&rwW$g8lgytlga&);?b2@2zql zk!>gbIN$!r$q(ok*Y8iT@vT>|b#3io?WtR^^(EzDelI@DaoBp4uWiTg-U2p z+#eU)cKjmkDdVtT$>_gcaG?H`->+3Z6@}IRhhX=6M7BSE%SKrF92(Y-9R>S6pLgg_ z_xXI__i_DRz3uM*x&fPC_^5uxU;5j2;`p!OKz%E${&@&bB(Jx`cH)kgVg0oRaMK}S z!~JA`;zl3&eO}|AHDKcy+hFbTEU@-Q3EQ!MUScns=lKHG9=HZ;Z$0GqfAya)z{bZq z!QQ9#c0gY7`@!08+T-#JnYi|1*mzb`*yp?wwm$C_zelWoTLWv~oPo9b*TcU5&am}G zXJPf_QGVap{aNw3UiWw0_4kvJFO^{9BoScq1pRC$-uaB*Z#F(M0QUdu{=PH!H`I3G z&HG^2N5G!PPniuX-}iVt{D3KrXFu;D`(t0Vu$_5(7j}K+f0#?Lp2Nm%PV#%x+D8vye}A^UjNf+;R*qGNl{f#u`khDpeQW%iU9fTENizCo1#CUV z5Lkb2DXhQu{_6kUx7Sl$roN{)?XKVSFuxa@Wh!ibE*5Nk-c4Bl>EacRkDSn+Q$8+- z1K$_59s4jX?Dr<(FhBJ>(p`p;gB@Y(buM4xdeHl$VCxw>!>;Fzu<^5#7wOM@4a@pc z&i99nYczs=|9xTQnD)DQ?5||j;X2nN^U>cocmMk{I8fhmKIrPd1Z@2I=~2#4oTD~uJ$y%4J>Jd! z{C(9Cu9tZ84p@CK?J(`=tCZN=#^)2mzK=4n@$^TBxL@kds>Aw$hhX<()P>D=^@O#9 z>ci$)%E5jwYc0oj-^0s;Fmh)%*RQ;I0XHQMztsChPQHb;FMovf|D!YB@4HsAKl&$j z@bNRi&i7a{c62=0_+b>!=l8>TzQ(V@Io{t3fQ_4d;P}R$PQoGQSO4vY)oTwuKXI^K z_9vcl(f;JkHp9kOyTJNwGhpqXM7Cr1kAU@$4jth9_-Acl_Y2R4jYl?v)$iZJ)>E{! zKmNwc{oFrw<)dKjh8wWz?Go() zyHO^e`b5SaEeGr0IN!~;yxhz6Vy}kfeJNM3!|KV%u=4XL?0k+32fiB#8;?s08!sFU z2Y&wmHa^h-);?y+*>>W+$6?paoIg1pdifq~ zzPT{0fB)M4F+vcht&^VY1f~MZ-4aA2-tk?<3Bha z>wFiiT)qway!XS#AJ)R^$$uQrJg#eh_Weah{%99wfQ=`_h0U)W*v@$D+oZ7bZUU^n z%mwSmbd{+C$uDCcwEK*;Yg5Sh2kC6*d4AZ&@q_b=?t{7o`}fhP9hA!upRt!q(T6gsle{4!bX+ zJZ!$Awe9q44C`Okgslf?E#r3-hn+{V^JHwzF*!g*h>$g6+i|xoK^VP;hn!_Ra zYvVf=V1F-aZE$;O$CIbo=6LM4;;{Pd2<&_+3hSr8gtcdEcYl%Z)4W45neVM+g&361XFK-7DOmq%5v+XO4Z9AO!|J==VCU0H zSbO1D*z1wqPcYa1)T8_g>xcB0hh;rH8dm>Ifb|cvz{-p9u<|>dY&{vQzDWlAy!*k{ zpT&ojlO18>Zn0qF&$Vr5UdZa7uWjdg1LM(;?>BJ&*pGSbPdw=+95R3O54PG)eSAUM z&F>C_lT&Y%2sW=>Mh>2z%HywKQRapBjn`JdXKvGwN4f7jD< zkI%X&E#q%KgpF4hkXd)vV4rVJnLO_f*!@pgW#WXJY-fFBgROg81Bd7v{mMyj$obV5 zqhal=hV(aHKMeMJ=8a+FOC4nVhX%0rZ41XE&nv_FDNSI%Z|V8`UT1ULi9>%t|C;~m z?0Dq-Dp>gw1$KRWufE2=4eNOPH}$*g?(A<|KXro*Ve6N+z{ZCP!1~K;VD)`L8GB$h zZ2e0~*tk=3+o=z--pBZHcsS(ve(&l2T8 zdiWvi`%eh_K6k^$U*4|f`mtYT!unGeVe=h*VC|E`@I2(|cd+^IKW)eE?+cs%+YGC3 zhQPj;HFEI#)<2lzc=9BRXm@?iw4M0nua4(_$HLC{)v)n`PO$oHz3tc=B|mF750c06 z%(E4=Yrp4!t&dy@TMwDR{(SG!@$Sz{4Tt!*et#&1C4{*R9ft9OCkl)rrMzufodcZ!@ zUa=j!N&C=zSS-hL|J7mR^TSnld$y+`(eKakR8?^ zdTE`vRf>&dw9RylaS>wi`u zo^Rb?W!m-kYQpNLQXUV#w>qrcPA(H?PYb(0H70DlGX)&b>&zGBKq}aJ-=Ca6>^Erv ztDlF!KF^M@@xD&5`x83D+8cFX<$>+`d&OY$*{xyY?ORsTpL|^**!{H2VCRX~qu!YZ zn;(k`yT542=Zsz30`@(}qFw#a1UAkc&Hm(L>%hwO*E}EVxvIhH^W(7br!uheZ4<2D zR~Y{P=ZjsK3wHn5JlcJJDQqW>Jry<{_Id^F=!+Gw^#pfd?c&X_dSg3mJz{aH6lVeS2nuy*uMu=BfxOg+g!*!?1PVEyB8u<|1sY`xRYC0s9dU13;%v^qSm z%=Nd^Vc$pT#f&E}P?!9m_T0)vj3*8>1opY7gso@V0J~1#ETlj3e;=&9c?vdfcL6qD zwE?!@6UURsm<9VijWFa(^)pJ_&bl%Gt2_(ic;W<$VB;&lEZ}{T-y8rN&&&iHkIL_O z;@jJQ;dtyn2nU-FZ3!FSIKla}yHmi{TdjtjN1e}^J522hOI}Q1Y1uq z2v#3{0lOc&l8pbDe;)4(dn7sR{`OsS>5t$3AJ0#JrXKA2y$l;iP6=DzWByb7`}G{g z2j@qv*FFO)uWr-sdfNnR_dSwXZ!=)u_iI>vnqOx9r}g~!(}`j2`TVfnROv{I?>k-?m~F$Hy*~mD7#jkn1sCUkFye7p2{Gn+Y~vTHO9TuRmtO=*!w3kNE$v z861zeKnvJ>T`$=AVt@5>W0`nELfCj`Y5S}HVdGbMVDpAAd47TUAoJet!Nz-5{6v4^ zw(DW*3);ZO4}O-}zgiWx9%2S;zk;yxWhQLA;OTVQ*^aRi_IS-<WomYm{ci+MKt@UC5o(=Z+9bo5Y>&f&d z-nRg@E+r!D^F0h3-@EoB{h7b#Vc+Lpa3J2{c;xglSowV&How*ic0OE#-Jkhp64%GN zI^}pAnwqe`SNc0_{I1w%?EFd$yPt1|_(@R;;+KYo=^PT%)=X-VAiQ`{!{qtUz{=j(R{VCWlssAwqHXfD(R-UJUJ%0z- z`+PH=_MKXL%9y*^GxPi=y=r0uu(Hr|?4_qAyqvs-5 z<$j3|&4P{R#)qw!ZEidEZYo&&BrELtN-Ohz){mf__(0;|@ePI}ANc-aN?3cS_fY!x zjh!Wp%yYgyg#P5Ia=^alm9X!>5UhPO4fgs=!1|X>W#(O789AB|R{r*ZeScrV&a>gR z6HjRzU(3FzLGfAIUegJkIehZ2dmFx!o~}W!}J|whE{nk7gJM;zjqkUEnR^Psq@t=#r*4I3jnb!$zXTH6Ht$(}Ui}C2UkFfRi zzry~$c_@yr|8l)2%sfvDD?gsY*8gmvzwwRu^beK%MAo&uFa4S5u<`Nfu>Rr*`UmRb zJs;oSNDO;lrC_fwl*c1JQpMwOpSOGPe6a`Xz{an4!LFyOuzKS+SbbU^_Iq}d?N2-{ zxBZbf&0+J!A348y#OaZylk$B(F0Y-br4h3+(>yzhL)QR7HO{f1lXSx*v>wGoJAb*8lrP zeaG{B4qKlT7Iwc#D6ZG=F07W%B;8KN4uK5dWRY z@dCex4O`FF1U6or9yXpD4>q2W*LLi@qg}Wj>@)ob?cY4#a=qMV16Y0btn>fa=UYcc zu2iDk_<3Yl`IsN}{e9@f@$l2Wft7O$VCTYXR^__8H z;~m#v=g*yvTt9Ik>xb3fePGwW^>q4ck!(kA93UQI{NyHHL zEvWbTr!~y?As*1KK0Ge-ef;;Z^)P#3{k$)De&%;(%h+)deZJHwjfGwBF=5yFK-f5} z^&r0QF0g(;0{fHyX$e~|5g%5bSAnhHhyklVi^AsnBf#b@qS#Kp_Z8P;eCcc}&d2&% zB_pra!|MN;u<_}Mu>OkmAlf6X#Zs6>eKwNal;a@erpxm%`?K*hg633 zpJK!MsRdx?h4uQ%hcu3-zW6rJ+xniku=_>sz|QA4ExBLvPrG34%?q&cs(r9}bTzCV znE~s6&WBxZlVJUk{;+oM5ZHKMaoBuhAK3FHcRY4Ve^`J0T?>v+9nLV={M$!ZJE8@w z|75%CvnH&bc_R})tp>Z_VHF(MuLK+aUhH_@Yiq|NhdbEL^B4{5{}h$6L!IC1z3eh^ z{MoSeqgi0rYX{i(5EoWn)q%av+s$bwKHUVie)K%-{+kA{_Uu;J?_rmL-EX+W{#N7y?g!6!z8eNb7NR}8&~Fg3w34elh|;`@x1=TuytJp zY1jWr2stbH28cKnrWu<_Oiu=6P+tiSWI8Ry5Z&HyX#w!+4fvfGaQoh6gsN)NlA zptp>E>kPYpzat#j{{~wh(Bd=JE@^0g^l(?lleep9JNmpMY&^LF>^ky!X>UDg3ghn- zfYncjWbz~{Vdr%hSbbQ&1o9A@@jPa}V_9uRBeQ!Yj!q!LjgI$j| znlK(YGsXTquStz)Ctp(^)=$g}E5B+xp8aGS8gM=6$AmKXvk%rzxkr7W@wxa7IUoA@ zI`xZw&p0t`9N_}2e#jzY=efVh`p~Jczb`fyR-evwJl`vu0SEkS+tDBHukw87VdZu$ z*!b`R8Gj^;%ySLv^&%JdQ-5jvATeybVV3oozTM$g4GB2;E?^}`p-9E<8(1;x8D7R&lkCWpZ)62o7*yS^dPJp zxDT7}UJGkKy|JBrKcisf=GUCx`A`T}erET2u>Ui_##eK~`VUDzYY+KelJT-<9Pe7S z&2t9$JpktSdD!_~+V24%ueRBa|ND&Zx0q)g2pj+PdqKvNvcTFMJz@R5l(6*&Ep5j? zf5ZG#K7@k(KFMC^GxapJ_}+`_bF1w{k4YG&;R6P zK2c9p7`8s)AZ&eFUfBHWE?7C6%kkttH^IuG?6zYkEQI}DW-8eIGXw0;dXEI_KflVs z^T0m&$oe*3auar5-jk8Dhhg(0>s%pRR)4-`v3d#3cqeo^@0U_Iqo+ zWa3X%VDnoYVC$1A*q?RO2sU0>8E(2cX}IL}Cl1&S_IzWqbG_6R^@826S4pNmV4h5z z=UBF&=erbkKK%y!K32opll@@d{}EXE@FFYEoBfZMY-fE8g_X-!W%37YVBhyG`STe)5BvVC{;z=|5w}HHO{imYw#de0L-rtUk%-@vtwG z!+y^urQ@*=62Qh|!@~X^ava$GeEsXg$lr*t_S&O*JWu4&MB5i_4}AmnI;O$ur^7PO z-|JBx{{|bESP1((<~SZZVG(RRpb_l;fH|<=3)ohd`)A+QKAGqJBdon~9X5V6(01(7 zd$94umaz82V_18ryzRsXpTNcu(>fkKVSnwOKk9Hl)CKH zmEYxI{gv~u_Wst||2v*?a5k*|xkbBq#wxJ&RGVPyyDrp%kwYV3<4Aj9^FM>_kG`1- z>;LqHjVJbp1Hb14`+IP8VSnFf2yDD6Hf(&TGrW^{^`)8|k9=4!Sb4e^w!Wf2Y+h&+ z?Dw~1_3aWF`P>KAPMQX*AG*OFr=m=KNk7sOwJg9G0eqFsOGR1MD0K8ppg z_TDVm`n}Vz_RP03{_b;Fd;C##`V+tD$NY7F-c?w8WjL%nKLVTI7zb-Vt%7|I6JhsL zEUU)zXFYxEc>KYJuyLG$u=&fvGXCyZ*z?DNjV~{@Kfljx;m8Qy$Eg**}p8cK>mASUsB_Rv$Hzu^+y6eKOCh!sZQZ*DlIuJNh{}>~or0 zh4Zr?YM7~A|IY~PN2h_6b8#JyUdv&B@`n)| z&voR5onNmjaz5mDK{yZxht>B5Y$x6{3s$eCgWX?V8@7Hdm+i>oG`6#^F*B_HbgKg6 z-S;eGCw7ymk0=4_2UdW!M;gNV`QO0C=X$`#qcXzEiQ%yJV`^A`WEyOKClws{ybTrc^h*0%F~)BLdZ zwC6Y9*vxj`Q&iad=_;eYpO@$PBH#T!m-6=%Y&_>LSpVT5tlxJDRzL2tKkMh7jJ>u2 zR$je?m8Xkg?ZnXNGxhy!`(r;ufCG9Sw(c$ltbbJ%c3dXdd_+mxiMM_uv(6I9totUg z?z|#11Me5(MXW}D}=KXH`=j>rG={O*r9 zQx>K!BrR-wZZB;8K?2*!C$5r_2T@?_DSw8o504BRPwQtp_KNN98)yXUH@)KdxZj~N zZ2tdW*zc&MmHGQ!*tkIqIOKVo7YpNf^xGlYop1Zfa6aV9C*HsDZbUFThF zC;spy99W;Q@q^9CE8}f(VfQD_gpDUOD9Z7v6RHJUKUxviUaSmTPf-;1ITnYNZ_kR* zPMq}~`oj5sUS@smft`ORV8185*>;}yG25|sd%(sY_runQ)wLZxyAifucG&gui{r72 z62j_nL@5s$D(tnYnPnB$XII;efZJlY2Om8>vbjV z#xDlL=0~Q$_KO4?AISr2r@ku4_3=Ej!p6f+!0unj2&=F6z@9HVY~F4MY<$!4#(%cK zelMt`T{CAPbs?N5AV18m%IJM8Zd zY=f z`aA33kn!e+Z^G&U$6LSu4AviTym5Q&Q|+a}u<`B}u=%F$vU&)1ewKAS-$ywCtB>-+ z#={@L=F@Y?$Y=c#=SLd*BL|+s#uF33A^wT+posRzueeLQ`Q~>8xE|vB*I?th*J0)J zMcDk*a9Dp)|HOFcY}owbsn6JWlm3YL-e$Iw_wfAwo@F`5qn97T>WlPpu)n0tn`wp(HIHS6sk{QkB77sx!ncC_pN`~oXa%faT07Qxzy1w0;c zhLy1EDGhA=@+$1_FZ`X3`(r)5m&s2rf%S7saJ~8;ya9#*hAhm2Z<^>kfZ|_5UZ!>?52BYmdy3saF{c ztH{ulUL<{EscRvu{2htUvR!%>J(a_NTt6 zBK$YMGcn%&)ct0+oqg-eVe4bwq+q_Y4tB%Z`MqHE&0jL{wGy!Q??G638v!;xaspOA z?@Z49Q~$jOZc5y=6|8(%Z#({eIN0^Q%68<~vt&WfX928SJ_li zSOIHqb#XlLqN(<0o>hmfe;)vAufOMcDaX>-AGx~{c3t02%K69#)`YD;T@4%WeB}JZ z@2de@r*+Qxi9K2hHXispZ2svR`v-93qoM#yAq4*w3TG;#RY=7*q)Uf%pnjVk$ zExXRsz+Pu2SUaqr>kGS}2<(2diTOAl_41uy*H2g2{N4aqzpaT(y~cQ0|En6TeK5!I z=)E$qcKjOHcvf*({r)Gc{Zq{T*hyz$>vc-nPTcn1=i`wZ&*7l&vvPgpF)#Bx)%OqW z&$wQ^PyN?Na`5}N-sm40eI3#HLcHV>tbU0Bt0xb^?r%y4Yp*PY-7k_wW?t2?KkKHL z%>2m(YwuNsov#^T-%B-EKkiG%osq`(Eh~F-f@50&$D3d)<>}M=?S(Izd0>4Z$>#j`2L;OwP5YYfsr30c-~&m7TELUg!LCX!@l3QiMbx)vJLFddfEXi|4YNh zLx#f2-wgIgeieqDUr}J6pTzZ?~H;2_8x5fsTr(1+i!p5 zZvn@nkN3dZF^OR9<9#x94KEV%JQ#OKrk>&i?7BS#t4Dr=jaQz7jiXM3mFH(*>$yh2 z?sGW_8>j058wc16Yme52tqQPuyNzau=OArVDl_*5^#N7 z*T-*Q*1;Xv^>GSTK3{>=&zoT7<|W(7zYKxZd)Hy}1+ndq9{mIxk1vsn{^XhKb3W?{ zlf&BI&0*t4VPNA7En)XVJ;=%NkS8r*>&-{&t#t-Vi0eJ1u=+m^Z2iOi z*xWz&{VW^fu?Kd+-p_2PpXX&*`)fIDymAk0oO3Shev*~;C*CmK{>A7=J=_kljM7s2Y4q4wuJ{R&%u^u7J5|5*!roJ}(GeX&fu@&s(Wa+3YYV?2bFTis;*ov_R= z^>z+ee>F91d@a1=IbI3pBXO4JT)*`v^19ts9f&=mwcK@2= z-LLaAtpE2V$Jc({4^Jcxp3LjP-o9fy>m(IyoyZf|eMot{9_-khtRM52`C#q8^00NJ zMIDbHlbrSD_bl?l%E9umay>Kbd}|H6A1@)S9vlH1Zw}>n{L`NukA5oZ^%H+d!ur=< zNhb4LGbtY!9}za*uYBg5Zt_1-yQ_4y_^WW4dAI~h42^4{a?PmO?` z-(SGSH#TN~H^h0@i0d)Vk^pvI7K43%|9(xoaS+(^o`aP~g<#jiAF$su%?(?>@{9dh z#~EeDjfVaHSQ5vhk4oFl`S0^QwFi>J+T$l-=g-ab+#mMbET2F7KhD7B#~Q=#cR3AP zU!CW(cH`4A9MAK-WIOc(_kF+E(e^hFeg;+!orJ9eI0d`Tdcw+&KVj!jY1`S~wHwyY z%j$Ui*B!we3%1^J6RduDosRRNPgcUl6OYN{(Wk-2Pkx7$>!W3^X9*lKfAtrq%G6&p zq20X4DA?;O3;TWLZm_>k8y{A$H-(jtf3m*xf1=6A_kFHE?2U-9d5OcY_QJKa93TDs z-2TLqmcxVD*IG09d_7_9k3q2U-~2Llu`^)TUuf8T`jHsTuhuTgZPaXRccNh)pca?(mC&$X@mz1Be-*Xz9 zn(;iB_OyFn*ZfePjpM?`M{~f=|GUZQPaf}aOwNy8b`o~IorC?J#2>KlZx5{9 zJKp}(U$20*n|jz@I!xYvu=(#Yj^};EgFSyT*nHiZ7>sA#y-UXZA{BZkYzvPd`@sX4HZRhz+jSAzxy-G?u`*X6v&gYV_ z-;0_P1t#zDI0@IwxQejxmB95%w%Z={5^ORpwoJaX{qS3D2(5A6HD3cKH7 zOClIOwgoo-o&(k&m!9 zu=?O$Y@RRjD;ezfJi5U4zZfw{UR8)if8vKZV4v5~n4F(D^X&+NesJQB1h->5eLj1p7WDW`Y{^kLoS{T$MfSl|BFh0>U_Gv?tg6s`~9NSu+RTt z6vpH4oeB%%Cmj5W^P%6$Cd9<|m$p zg;`f6VCP3QSpD<>_N7do;*E@+e-Vb`@f<_hpSV`l!^Zo! zz}8d!?s%^22yA`+Ci`Q*UW3ht?}YUS!tlJjf5+?Z#)I`AcENs^Ig{;+wujzof8JLO zSUYRJ{i!z@4683>^}z~QdvX=5ovHyieA- z?atpku>SZFSiMpW4!IufxbI-UC;9{Jey?E}Y(BU?>^j-(c=9b-VdZmT=8wORl*Rto z$@yU8%Yk;}VnNvWNPpXjOXq-n4R{s+(_r;Yc;=t=chY=dqo`}tQ(O>z- zcH~KR*tkszSh>?0R(`dX(I4Ak?Ssa!d5I^sqmRZop7_bXu<@#mu>N^y)~C;LoBfHa zWPu=`ai!rD(eZO7keA@lv0JFx3Hj?DLA z?!v}%K5_rfSg`)yA=v$;8DRCR{e5o*VB;wV9Pf8PVb}99$Fpwgz3F_x(AIX=@qe)KkoK_klc~Ia@^}ql?hRKR(y~!SdI7iiNgg zH(#OMc)|jic-!gE`s+_0ftB|^(eC#x_QS?UC&K1W{(_AgjDo$-KVj=FyTiseC&JdV zrgc1a-XPfepV+YWSP#cD@1nrgAJ&9DepuLgr*faM@$lTR^2g_|UPum`|BVT|z9YfD z_mr^l8|A%u!pio??|!dn=Hq8Np7>m1SpRDw93n5Z zpI5^AyZLB04k)`Hvb@J9AGOSO?1W*k^Kk?0_c{XdlQ_X%*!ehvcH@=@9FIKj20MR3 zVXr8Uo5Jo_$^a|ZeuDizP(?UIAE+nl!RAL+(eCdz53-#&i)?-DT-dmzUzv}J0|)G5*uOviBIx&BN$$vpp@u=#-;9v^#UC~O`) z8|->q18dj%JgtLzB3oz1`*htV#y>HRbRG8lXVvvzuumt$){)eKtq1wZcH&cC!OG_@ zGW&%h!p3(i!1{MpVCPeM$0Mf(!hWaxztCJSdb2m|^VbgCBVgkNU%}?v$H4Bt*!3}Cz#pIFc;agVVEvmV zGW8H;W%RK3qrUm@Az>hXa13^z(?eK!b_&*iyaBs^IV%3A^@RIj<0;?3`rEryB>4djvi%i6*2gdV$%EQJH%FwPpy7rpu!~XO6YY(k})o*J(K6>ao z+pTkgl{Zmg=Y1HiU%R#2EBfPiXYqcBPu7Ede>q|8^>nbuPXb#v778~0&5oMeh)4lZ2Wa2Z2s{~@OoVet52rG)=y4%Jo6|kY~56E+ljLuQ(oY&R)>{8 z{goF4AQr!Xv17*}f7H(%VfUw0hOIv?0b4&N2fq9NoOYhujbM5F3D!Pcr939TyAJmF z72&6RS0S-Xe0Ul9!S}!OKb}ASN)_1km0v~O2T*zdKhgT3#!*w5Zq zL;Ld{w7>nn$H%Air*11VY`pO&Sbe`A`(J%gRK{+s1RJlu{Dk>He02%_hxzC6u=2bH ztUfJhJL}~>{+4yl%^q`q*xPv+@B8cbi2l^mw&Q%p4}X8i@rfIb;(E=WzI#A_j{B$f z4|Qj&Vb^DJo|oU%sSYbAcftD8kM7f-c~q6>Z9V=J8GG?09LRItgITW=cwg50PluJ0 z5qaN!|L6K$o_DbR)4nJP8*eJW{Lv5k`ws2+RYzd2`{qA9FY1kMG5_=jCd2yc&6vOX z<2_*G(odZ4*b(XN&$?dYdf|TG-3}Tb6Zv8M;Wq5|G;+b(n@3^mF>Au=qb;y`l=jLS z{F2$Q`PX5v{!0^Be`mRjy&n~Joj-(~&xvI0glNR`%(qvCt*0#q`y9v1)H6(ljgMD@ z^*_22PgMRnUOVIh@k{4#bo=9nG$!6@-ElZr`+5uP{C|0i>m%O!4e?X=L#&6b*Y8X` z*8SphVdI&R$R8L78vbcgk48 z{>}edr{2Q;9RCvR`)h80?9&e9|E{7rw@IO`bL{h@neEK`Few?&d);I^zqj-i z?ESoj-QQhICcamQ_2vG9s<8D%^Ctt;??phZ=cc z%01Y;-5~oDr%aZbrzi-lVJ=mXlY^T1YC+z!t#r>N^>ni~p?;Z%7M~#`0=i_%>Y!BWqq}^K8 zcKo9DwqtkveUsx+KX?W%svj()KSE{Vc=)*!V1NHMJ#2n(D6G9x1opXifc^cs`LJ@l z5v;$p)$#a2)nMi50a$rh9@b9(8&-Z6wjICd1gyXH6>L4y8CZSr{07&(tM8&@rk(v`)33wqe{nyL`P=5O^>?pfRY$T9PbG1{;Bb>{>vj+d0EkRth{dqn_t=tTlZ84R(|{o zyPnFz?w3oFgX={;l!Uc6)56*-abfEP+Q7yuLfL<4wF6V&K%M7ht{=a;A8h{QIIR8L z3bvkLC2YN5KG^k94)*tJmU4dMHBTPqE&u2VV+nzs`sK-pX4U|0e%~9X0lU7Az{Y=8$jI~BwqyU+hTShx0M_3x6U@nE z>O%{_#_K}C`WIPX>!XHT;CYf?h$&+S<%Ny!MuM%+ntvW99uWc7KR9}h=Sh6{8S>tI zeQufSd8+Tyq$u_{|<8H&ssnM|Ut;?|alWeg5{W;k9 z&gIh_AGv-IRz7TiUB7!}^n5p1dv`aiUDyhCzniQ*lg{zPS!DHKsZ+EQ-x>kCAL9B+ zu7~?>Y=7QM4A}20goCxuXPw~q#HaS5?~K>AJkI?fH!8sH@5=}~uQJ=tdyXO#A4vgg zpWpeLemk}8#CgADKDaM8h8(;f!Frc>hhgS#1=`i0XJG42#yfxT@7Kd#-wE5P zC!7qspCB>o!F<*r*myz}*mc;(cI>9Pu=&hda46!jZ)Kiu1z3Gtm-Xm;sBJs-3O8Ww z(MGUwl0L3)z5OO}_>BHfG9pZS7zaDm8%!Z9Gy@y>#CuQ@>>UZ`PKY`6# zt52=3eG2>gNYRj2*00}%wg2nE)~C(2KXRua@=$wWr0uNVk+Aan)*-GBKQoT{0zEce z=KB&|VeQv8uucxONDypIX6 z@vO-*`L>;~^6op>`#b=9T}@@K=NPQ~C<2>*KMCtU$Ah)MPT3#59>)GWciVj~{r7Qy z+EuXYHv&AEeDzV-eD;IAyg&T-L$L9y@v!@qH`$II`Rg8@7kQEyu=Z~nnL6U`u+Q_z zUoiGd4aZ}r&4S%er9LyBIS4l1qrMEt7dWv0ndhTDaR1Ms`M6zu$ND)6hv+};`JZh^ z|MjQc_0b2`AE^mjH(U@lUy#^#t}isKKYhUWg`T^m!>Iuh4I)2pb=3#Qp+*zalSeJoO&&4dwAI z@1J?xoOp=WyH7?AzcPM;eYghJURn*Ccj^cmk9$G9#W-0`*zf&~vYr#W;6Cq1KhpS& z-!mBp8|Ph3{Kk3K+xN+Pod&C4^1%Ar4Po_tN?3cf3~YQnqWy^@Wro!|$98aj>H(e- zA2OcsBdmY30XBZo33k5Eg3T|z{e$tu6KWYx!XIt}Yo8>AwZG!pjvw?T?East+c`dR z`(to?svm6r=Lu~5eERQPAMyCzu=ekQt(>3u$s*W!Q3lq3o)7B}#j>6KjMn=p$IotI zJn^$7u=c=4nfSvp*!;%?*!_;b!S3H~3x~wVl#lsf{fxb|`@G}JtQYG$1M_z?=SRLD zg^e#{-Nf;bli_^6tk-KBc^~9wio(vj9k6kB>ot{wQ(^N%#@pOKP!KkrVEwJ%)7ifP zCZ6*UR{ocRjko9GeLEkM!|I)Ku=NK6*K>c^Gm~NM=@aX?KHiV}7u-iP1vWl+9agWl zfc<@e2eAIfqu)3l>+==a*l_cR0a0AFNMv2H-XLDjgpCT zw1xGrZ!cp!`fQ%<%%h#K_T*AnIXD;A-`fc5U;hC6y|f*$_GLereA!X^V|O&OKYrC6 z8Mzb%wq7Bv`%8#tUHX;&*zN8&@jm9j#=FjJ)^0!Rc-GY%*zZqA=l!`3HozXg`3m~8K3gD9wI5f*+M^R->x2?B zKa4M*g7u%iVE(widm)dNpEF?hpTCFoV>ZFs6G@Qw#xpP5PF}k>tUVT*`Di}9gRDG- z^#}LD)>kx!o%f;9AKF#DoxkiS&IB7jSPdIrSR}K5^DoB->l^DO&)Cj-bHAW*zJDCg z{O$uApLhd@#9w{CU$9>MJ&Q!@H{t*fJsxqZ*0A}}y^bf(xmkTjd}*cS*`ddYqZ^ql=>K|gqo?J?Q zuKN+}?`zkAwX^c-&mi~X!|o?)2CFY;E#Z9Fc^hEk2jgJl5xZgMM;BQ6asjq}C6evP z%{cfs);nEX%>7W0Ljob7*N4Hz6TgFk|35r6IhYwyIt{<2?Q*!fWsR^DA*5OjZj&&v5V1=dbq1p7Yg$;4B4+Mnxw z@eAX*&rtuz)LDjUUAzsHZlqhfJD2WG>6Y#W0cmLw>FyLzS_A=UY3UFt3F!{$e$RP! z_x;NkuCp`0nz-k#nFoC@Aa7yU-$JnV!+}M-FM8}I^+CS-!9uQQT-L*$((k_sR*npT zwQDyl;ClS^jkK>AZ{NboxskB>G(BPc(X&FzJ6fN3%@Xxh?13OS^t=}Jhz0YhkKDV4 z{;Rzn9o7#M2L0N6sy*|#Kl5UR{w&sm?6Cf=pVi;dlfuHj-%Hr~>*jKQ8s~$V z#|vk2J^ItkpkR5Fa|X;hx-Wn5{D}x_m)wK(`(2z)`?9{qE)d+GWnkmR8(`;ae#_Zs zwjcI<=9IAV^$zSh@NycA9-XTo--n&~8|?m`6|nyNhOlz2ZXxP3@4{Oj`S%ufyj`Ed z`yf}Z7vXv2yXS$`7gH3Y9DO>qj9pP0*3Nu4nfGHq-+ar-|N9R1d8>*u9*9S`g!RuR zD8ct)H`KH~>uhz{^L~rSoHO+k?EaisGW{8?B<+Wu_|GKT7k`WA<(mJ{AJ%^M{BH9T z62X4|Hdy=p)S5;?y;PC+XGf!_f`<%w(@K8Xs*W(y58^SyVre3IreBK*m%ywQM4C%ex+zn_lJ~) z9e(}LVfFSTu=1=jZ2a%;(BSqcDzi?! z8bW>K*`HAD$4VaL+4@F-z~7?Wsl|f=SIPP&q`SP&GUlw zZ_I%mpHX4;`-!mg>p1gOc{EDKe=!4gJP(DHD+OTf(~_`y*(T<@>vRE`@lh2HoT~!c z-*I65$E9K8OE;A#%)hGEXCQ2WJ)fizYCfy6 zyPkK4J?~+P<=8Q$Vb6y-?e*BZ@06F=(Vt-Tja#sOv0?O&>-P}Lncpj5?blWz^xj9_lvOp;Cis&R~AgZ)+j4*oqJu4nx% z0_)!{$9z<7)`r#pGBID>AKDDoZfOmxZ#IUF-(_-sGd{~$pL~RtGI5x~u;XwGth~Sg3ueWe*A|G zVeN-Iu=6_u?0S6|R-ZqM{^vX`V>$k-?dXsCXVSoq({`}qFD~r)964d%+x0|0*M9U< z?U5s}@v1hk>)#g3xnC04erXPe#uwCg>sXF{nN|Io_ICZzp7@ZK_hH`GfYskF!2x{& z_B@$Iu=+s@*zs8g*55wb>)8i6H4W`gd}_L=(JF*tBO1?Af7#p$2W z^I`1QU+q7xYnhVo!%zIka`KKphs~#GVSf|%nwx@hxaR{S9c{~eZ)W0T4wxq zgUu(eZGH5ie6aa#Rbl0M9N6|P3oB>-h);i^cSM8rThES1IqSw1u59@>Ujd@ug`-mv=sE5m+o z57^%i$RQ)2yLmnR8yR+3{qk zSG*p-OMBRSfmXh6NdCO@aW-te)aDpGpLl6Z+QajXTEL<6F${jB1JU_@;4Lgn}!s>q+VEqHPVCD19Xq4lxjmr4(e5Xk=d1hIhf5bOC!}_;^VDpo! zz>eSAuzsd@QK`?m)CJc5oeP`K*B18tu@bQP1#MvM`pi)nZ{&wr-@J@1k(rO!LA|Yy zKYKXre)MUu-*+hz^_e$|VbAaR1=jvs3p?*d!?xeakoq+(=lfn;j=q!{Hr_Ve`ER>M zr2X(~t(URq2Eyj&ypxGPG=ts0u+aI+_)ZIJzb@6@LGSuL0^^x@(-GM5YCf*}cV588 z-)h3j!PMAK=5v&i(Su6E#uo~~>T4Te&r6R1E1$Dt&$$kt49|Ec?(hP({p-N?f1;_} zA3dlDtlzsAtp4;k9Q}cupCB{-{)Dy57s9S9yI}9T6}Ek6!TQgyT0dlerTZ1$!uCU6 z%8i$V!5(#;Ne7#M5K$)ID;BIDI;!?6?eH=z?a6u@8CHKd3#)G>v_9)YdsumxRK|`f z2OCFA1e-4y1Zz)R;rYf_uZN)=IhPVv&wI^&TjSlIY!B?_v#{sIUADbK_Uoz-Z-LF< zpHI2#Yk%1Nlzm|HN{Yhz2g<_67dC6((yy=BkL>u04Xgi+h8-XK=zrIrgs}0y_A=xC z@dwKBn`U)<;Lke=>o1z5JSLvk7xw*fIbP}4?eBRW^D(dU5j`Ul96EoE&xVKXzeUV% z^Ox?tqa3@wKkRuB2Vvt2?P2rfHo)pJjg<%3ON(Lc!)#v9ch!Qm*TQ){@r@X8pnsJw z=qGR9rU>Yd50o$T!&zAU<%Z?NOSZzc=MfpdOf1;=_a<2RGVl$IUw5s{dAl`X_lK>p zK7PPVu=eRsu=6i6tiHJ%HhyyJHRb4KYhZt$XDw`du7vfse+|1X9=0609}CvbxCh&R z3tv&6@s`%_V?L#Xo&S^ke%d+WOBnra9_;r$d_n&(zxT=H2QG(0^)u!Bx3F?Iwf)2V z$pI_B>V(M0yU)3vyqdAF-#;1FAN-4q{@4a~fB$*w^V|~F=R5Mz|H{7?&v+m7@}9~k z^w3SP@$9*<_U|-Ud2t*z9@Z2#o_F2r(L1YoJ@!XL#+UYG5UjtbuH%h)70v7Mw>kcd zr(gS*_R(Jm`+Y}Xmye-Kj3@toBbo>KN$nt z-`8N}bz9i|epldt-vriPy8vrXe!9=|@RRJczIpMm^JTisd&Go8tvT`8o*Jo*rX=@m)9XFh0)HHZpe0ediDPjZI|c=T_ML<_%!wV+Wc1-sZ6DVGyi--x5}zizr7)HosX&eeEst zBff-PxAotJKHqVpeWiWahjQ)FO0etXH*jEI8ti$rGrb;tTzRNGUkvLX9RRCG?SNem z&#K?isTK!3sR-);>%AFtyNP(Qs4yMMf){sGpv4z$1e(jWNi^zWUbzXEa`_I$GE zut4(43P2pheoFTC1|J=a;XS+^;wfEP-`sr4}p6^yg{~+?? z25eq+EX!Gsoj;C;xB6%BYbS@bPd35ka~F1g**~yyZ4hk!TR}K!bgtf{l|^lgUFa0;|84fqnj9*!dF#yS{C)9QmIEcD=s^ zJKi(PoJSZJd8l7JKJ5Q>keAwF@>TXnK7vE9H~v~2`K-UGu-D_KtgF0cJ*)uxeVt&h z>kDhYEQRgA1-2*p!&j^yj*kfRm-^an*zcVSdwy4C)-%`p)0Q*uN5K0393T4gcf$G$ z+ccf3u=kW>aqkm3< zjb9vv-ES2GeMf)r0eA>;>*}!mxd%3$(p{$i)>xl7;2rfN;!JBTXM99L-*WsegUz34 z0qZYn3G4sJuD-`Q@tI6Ke+I0*d4~2eZ|j!j*sW`2{PYRbFZrIaaNzr4{cCk#_5Xgb z`yVPgf3UCbq2KCne@B0tw^SN^+V$rIZ2o(HSU+Y%%gIN31#6Fe!v7l3AJNAhAIhiF1CnKyr(G7O|UeW)Hy?O(-zoxx=lG&9|PZ{fNByOMf)`mTqBR>aW=kYoCu!DY;PTk#t{e5?(EY#q zi}J#b_p_91$9@hw-cw-zE1x65#+RzX#^0~upLcy91*=c))}N0Zwh>lOSpj>WQ?T>> zdsutwDXhODE37?M9RGvynYV)|N3UzEKLYu`0#;v}t-pf!+gzFPxeQi6_!`!J{KM;! z!&zbT9TMRWasKof$omjy`we!z%?hhuU4fO~G2p;?g|Opc+5qlPUQs&yE%rw}*!fZf z)<2sARzD~M>u-t+yZ*`M|Ge!_ed4sGVC~84u=xXCZ#=0nZ2nAmSo`;BKd$GTll?s3 z^|m{#{oTp;!9SWB)<2mWR`1%}m-_f&&+)ykYyW;tea2Bc+SB=Z8g@THZQGys{1JBk z7l&O(idoM1{6zn%-zI_GpZfq-|4Z4M`!PRz+y8vWf?m`|ZdHXn4pVeOkmu=@HXSUsaW ztUl%Q)bE~l1gsx#H5|y(lhGe{!R}Wo4!d9JDeU}627A9~j4$`|_HRdh*4^)6{ezWY z?U$Xf>q|b%(N8bJp1+aQa@L~=jAz>`icI^Ycl_gr+TE7#Wu5sAHh+5_Y`t{&H_f;D zPR3u`2X_3IhP7KG<1aP7P*A3yv%=b=d13v6m0{aI3+(!DzNgQvZ#nyiHp9kCo5Sk8 zr(pH_uCR9dL(7@}!(j7GV&LyJ|9^(pqnBmYKaBrlyYV@XN)=zixbdN@p1V`|q&&z!li_D-Hg9 z_1t2N=fL@#u=Z&;*nHQMuySA?to@Uk_<{PyS?eR8M#|`8k(n>%8(xPUf8RKN*d`K& zc!v7;1{t}z7*@`ogO!(`Wc<^qnV+HY6XPNIVgLR(743yyTNK{)@t1cQQ}aFeqszkP zOV)$UXZynX_>o4yUSA9L{K3_*aS`jA4_7V?{mnX@Rp$OvVCCCW-=B3MR$A(#m)?T) zXQzR+>sHI;ANYF{j^8n`ek^~V!ueGSHa>9;4#Zhxzvuac{xd(dBWyfupZ&?X$+Gi#uxYa7tYG2><`>+EpF<#9hI0M@b zbzuEH3t;1%rx^d9Z`RWKtm93UFYc>_)$eXOpUD5MOZ%J8U8^wogDer?^ zzJ|Op-|Y^pKg9EtZI8=xNIt&(b{sapZ5-u+`@o)0y9Ift{t*XuJ;)F151-wH`(wu! zLY^8AsSoR4olAU5`?xr){oN1_{(Wlh&-zvbHvYBqOTLFZtjWf!=Gy#JKDAMZ{>?oS@aV9W7O?SLKk zqhQDTRM>r-lVRt116Vmd16IFH2fH4vv_9+6*#?y3uRH~N{?TICeD3G4`(K8`>hUpX z5A~EUEk}RNDRX{T5zCQ(wPE$6bg=q$ci8nQ7VP>v5jMWHr9SVEU6#%MK%X84t8Xra zm3uv5*Sr0eETVD;;aj1Tqibg=stJbzw)Y$6%G+4yE? zeaCZI#+UX$LCVe7802_kepazQ=O65VeV_WU^6@IHe`6Hv`CX+MzwRGcZhidslN`^? z`$Mq);{%R=%V&G1c|HSR z^}~@~&$#(Z#=dO{n?L>?@C^Nf1rKzr$s|vdVXFT+Y>wG7ufauWANGmFW&PCO zT^)A+YhKv(|6Lv06Meq{tbe_I+>EhROfgZGFb`R#>@wOy+&2 zz~*)S1sjKIB{OdShSh_s!mbCGVdv-Dnv~<8x&f=7{s4Qv`a@X%L~EIK`6=xE3s@h0 z@rmWgvmMoG59}uI|0L6w$7jR(Q_sWtiJHQW-&3&mJNss@7v0(m)R`G{wNBoFP^OcbFR>P#*g;jTG+V%S=fA|xp1gHr@yf; z?D+J28uRV)dOhR5IIO-L12!+_gX=qT{c3rbcAM+?#UFYU)(_bMw!b#Q>d%>A^}X4a zqd)Cq{41|}c|H3#3p?LS!T$Y-^A&wL8EpQY`6$|B zpUL!7Sy+EiblCCj`9jLSn3iL&zC}KSvij*Su<_S~mh)Xrl^@8zCb0TS8sw4Z+r0FC z_`geaLs?MsGsi?@Rdq%kj4+fR&%tS027%z0e=^7WRDVqp<5;6z=c&w!LBH zQD)fq;5*hM{pST>+vgCh9aj$4pSBow{izFw=C`=tptFqptLOFTzdOC2`ErEy(s;`q zujjp2%gndvj8FaX+N-XgL*P(F$E`mDRFVEsEWWaQgE*!TGuVlQ8W&HsG~ zo8Q`)`J%pf0(Kk@b^f5Y9D&VO_(?X-2^)W}$b2-P?XXOoZv*W4gXdx6wC`a5zhpW4 z+&enoiO=1E%_kWJtJmL!9Y1?v^-0T}|F2>9L*4Lt)}tuM7xR~PT8@9BF|7SJA6B1V zBjXnvYJJAf6UR0hTXKw4iKtGCDgZ`$S_3x>F?SvOHZg1j$(SP^|R=+p~ zo4*hd`Kn&F9oBA7sl3GwTxNaN!4k0NWq6*Y`r=^A)gNKoVS!A0#)Sj@BQu^p;jh#m za7e~qx>x@t^5qt+{@NdQe@%>9F!9VBu=Ye!nfDG4n;)4LHlBFK{2=1N8DR4Te}awQ zTJF5=32T3)fnASd!^Xoh!tP&WLH!@&*Ic7N7X2~3<>)t4Wb~BeuzuuDu=#=UWzs1; zpVjk>-~0Zo(@9|cYR~;X#~c1$<2iR=*Pmms{)V%#@yw;Ldd+^XXJ6|m*!5~YJOn+y z7OWqzH?01gIi&sx`pfvnM*PdJd;RTC{0t-DfP8?>AF2a;{=-bxKlQ!zu>P{Fu<`Ej zu=;v@*Gv4VAIN9&{Do1jkK{pLhaK<5VdE#~VdGB!Fy8csoRHZEaFg-td{_>9o>VVb z`RsYX+Vcr5$FJjg!rId(m@lE{4ZEK!CG7ez)B5OP-#Gtxk3O*S;w1Ce@2CnZ4|BuD zVUoh`uPCMbKtDgLKOVd6x$=$iyA(EGWIwFG)bp0rZ&t#dhcf}zz8MC4K3`v0eIp&L zf2;+pJ-xGRaDS$Q)t7gafti1WVD+XEGIGDN%zJ$Sn-6Awm;Id?*8Xn*yMDedO*#84 z+Q9BNtO=VRy}|nEf%8kjB3OH+AZ$L)zr}ce^sl0@^5Q70{3;78Ztm0th1F+z!p28-!Nzl|Tc7cL7S=AzD|2q(W7vGpoB8=Z*7JPKfBm-? zgDA&t&xpKne@iP^fA@G;`>0wz`iFK}jXX2Iqk3MLIN>hjrTJKs^YFgteN#gG%T45v z^J=wEqQmO{wUGCYr$@PY9`W1tt`Dr22Vu|SiNktme0v-mYOkt)*MXH++gXpzkNqbX z_a`sCqV^&CKF-4WOS{454;````)(ra_}T*N2VMiazjGz*`AUaj{e$&nX* zwJ(uB2jEb9(tLpJmZM*sLjUuYzXR{`+A4CF^11hkIb#H9Y#L{)01?qmTXo z+wKP~XI#b9KE(d@`oQ@Luzre@u=xjN(SJkhYftQj^@r}ZKK9|iu>SOQ-Y>+ywEYIb z=6Cc`AICr14Ys{E!tOJz1>2uR(cj%)RRY#t{1*1So1$>2{%^c8r_4BbNx9?bU3Tt= ze&G3$uFuC|?U&)&7ubDsVaMktSbygbSo`!G?EZ=3mgDb@t^LFJzmbh{@>{FH?sJ?B zEC2e!USA7#e}evA{Q!@%@;u^dt1V~U+z)%5`=0~*dSLYr_dDyq%a(=qV!h3Y|I&Ut zn3?;rKfa0nP3-llu`WxmODg^kCofsG?=u|Df@3)uek_nGxC4@yTldDxB9Qy>3o6xe(sf6rNe zWQVkrGu|U)p#O+JB!|@pa>3SrnTB$nTNzfq9)Pc6S9g(#kF0@RkH*5v%ki+|?MKUz zw*z7KS8RaQ1DaT${Lp={>uUPclp}{`WDM?)m#Mg({YvgPu^)bewU=(dj<4@h@_hCQ z^v=Zkikz+{qmK@OwOgO0;C&e{b71w?B(V7d_h9oco+pP{XWx50a&QZ*-RpUe>UjlV z^Ou{!`d8+D&i%;m+y*-z`oY>kXJFf_32c1+M#%N)Vb61a3!DFNIT_5jN}h%GAnw)$ zR=&4~&412pIex2UaG-x+<2A=+0{%BOfNf+OZkD9=&L0cD{#oKAMc&zXzLd`!WgNN4q7QsM9oKi;?nElQy+a} zJ*+-g9uAyu7({}TsmvZ|5H_G)d?Tr)6 zwJ@;poH4Ne^G~rUNB>$0yROBsK5>8-u>G9S`s4#AD9HEG&xK+03mU`9jg~U{ z?<`n-wm+A`d?Uo*nZgkt9faEdVF%1rB%|J+7qo~?zQPnC=FeXRStVEq%zVEv27VD--wC1B*w zU$FT>bztqq7qI8WUxW2e)^t2F&eE1*{9@OC3G3e|1?v|ZZaM8f6W0D;;`rn}FT>_j z{R-=!dk^am-45$7ja!=gaZbX1*nTJqtDhYAdgj|J*!k&vP<|CEL;n%qI%$3Ml`mlJ z?}M;2Nap zOSZ!1gI=J0)HnXLK5?a~uzvMhu>QHMwkPxAj`h*ABf|Fg4a?1Q3K<`ly`J`I2)jOS zv%Yqz3c{{m<6+mc)H3}V1=ipChVQd~eux#!U10UAB(Up79$5Qm_-9;?esI?I z#=Zyx>u(zh8y8(2lkdl0*v)d}U z&-Xo#PC4<2kMwsS&jWV7Ixpi-83UUacLX+mm=e}M`YWt_z7~!9p+~NP)u$4}w!;$G z{F$RssgJx|ZGH6dwf|x5tp%|1aV_Qc!$jEmy&iV{cZZE%?SZuiO2Xy?-nAS*+?y!$ zC;gt7@A3KfVD~3vfc1x-knv-cfqmZ{GJfKwu=@uG$jHrWu>P9%u=aU+`a^xYh26_C_aIIa~lXU%rj?$>$snyPmYOKK`tQ zuPkmT@J*(pl{r?(l z{BbF)KQ2)P?vEV0B4ZEbgY{3wV7&VK&|ko=SIr&2=rtp)kN(@2@vdC0Q<3&0F7O<7 zfBhI(J!m%bMfvav_MDhltT$mezprW~+6TFuP{#jQ9oB#F4Lqbo(EJ6KqaW>u-A}O_ zHlOAK9Oyq-fAUjU`}!zsJif2<8#}g4Wu8ZVQYzOIP|Ou=yH?Wa70wVD+~c zU+})P;{w?DMq}9h7*}BJhuyH_@VeKNr+>wA@-}b7o)280I^RS5?}_F3iO0b1w|EHq zJ@aAp0_z*E{{=QL#`~N9b^`YKTVeenS!&R}jO#hpM-OQSYab1S)puvY#-}=2pL~&@ zVde7&+k^bQ0yTsCV-M{8d%@~&Q)TikX2RMPL#@w#g5|LKWL?<(p~qm?pNI6H@-0Fw zuE*X#XaDj((P4j2X{G&(oJtHEKWYxEf2D`@=Wk#xp^hu=yODVf+6M@9X;e!u10=ycE`7a9<{WXECgN zSOx3P8wH!!(#CS)^k2jFM|jwEvw_Swo#T3dUtt05tG@a{dBHsG58MC0!M00RSp8}S z?7Gpx`q=ANlt07^8o-Xv>9G4d8o};IYbf)b&0+KTRv^F37a8F7=rLU)b3K0g9k6n# z9IStPFRb4%3T*uKFsz-CJQDTs)0~shV|GO3eK<$&AK3L`ChT~<2s^&R%lI3&z|Mzh z5qLiGc`>YiJ1y*Z83G$W{yY5t-q-!ilf&^otZU01->fr_!t#9l$Bkg+`Fhy%ZG#-| z=&!wC?ZI%c^Su=8cyT^Cuik|T9&bBfpMMgze;31^C-4hwd}tEv`dt%NziI>P|NQVN zNg%#f4c2eE4b~qT1S`+}`j{jTN8joEXMLOqyB@4nUZ7`w2|M1GSJz_8%lqo5zXdy9OZooxC#-*P1Z+HL5p26|f%T6}kXcWzTF!NSVC}tRw72?0H?PO9 zTou;;-ciP1I|4Q@r@hSh-vFCO*;U3r8jt?hpWGW(f2#^BPY1z)eO0jMD-463?-OA2 zBWA*`=YPYluS>n2_j(7bUv7t8M{+RVJfAln?O}ZO5bS<|ny~Mkk@;@E&TyHya(~$T zwxh7?+!vO*KnDAiVtDu_c~a4yRSa)hhKIztUMXt04BaV2R7gBFIYJ?%-6Tv&Vjci4E&qnDJE-}eXXJRJZ#kMG0g zGsK6Lhwos=Q@R;c$>g#b}IaX zJV-2~w;yJF*-nEV^F8SI5n=O1@;u`C_-WEHUbQDVNHuUiXnn@F=W+(Y_UlFFgX=-- z2fRP?HxBH1=v(enPX5mX=8yTcYwvMC;(Pa*Z_2UpcPVE*tPDHuYTV)e_$z*4zPeuh zc$;$ir8{ii%&c3?KmMKo_Pot-u;*{yV*cBXJ8#h5$dk^n>ryz_-`Oc;Iqz5TI*flk z2W(zkv1>4TOFmfpgl6H~8nf9#~;GIn~e%aoJn^{e$c zpEfn@`28Dpf638HF!@C9VCT!Xu=-gJ-rxPtJO75U$2Y_JZ@aS7wuZ3zTa#e*?XO_-$Cka`= z$zlBiuVM4Q!-rh|JN@VWqa(bZ=F3t`(M4C6_E!F*W%XD(QM zbTO>F?+80yR>7`kKfu=847=YV2IJLyq;s(E+tBe$e)LPrdF~*`GjXFRyr1*)SJ?d8 zEHZY~pRjsHY1r}g0oE>PV>$LtR_6GQHRqnvdmE9~yM?c#E z8&B*4yYJ$p*R$VqHmp3o0_*qM4;yEDWPR+7a4lix#Ru!7AGU$DtHUZ!@GBgJ)ek;V zu3i5ER!@5hJ3hL$qJLPwPs8q4c?uhc*$X?KGqpzE;%`}RIp;Gsgq`2MzxMqI*nF7D zu=emW*ynVB)%PF3<_p)QyTTPqzP%i<4meMRj0*AA1+< zx^#%~q5OORJO2{H?&nC+p7ue{{fY6XJ(&wuZ@9{M_57B?u>DyB-gPp}{Jb*zx|TcM zS#MIp%HPAz5B5pEeeu`--dBHZGgx_2-}l3Ryyy(q z)Bn%-UgbeH*nHRDVAsFBf6`v$f3=5=PnU+>caR-+Ja;d#JfhV{>c2+6Nu}^0l557JJ9O$=W+@JM0llAedr-Ajm zq=L-{etQJQU-6dyQ~zrL>reUw8;4tVIC%Vq^LpCt^&#$$KKz;Y#}79cR$obHIq$RW z584C$z6q>k62`uPLc^W^%#+BKPNAN;nNVe=Q-g>a$+JRkpWPy36w z>dyU?WB)vbjknK(&9}==|C(<%4tC#j4cPV=0GsDL820>!PF_zuc|ELM(^5u{xBxp| zn#ttRJcP}AXbihP)auCiC*PtU?0inznf}FJHwm`g*bo!2^LE3YKYbncJlDp&zw>Z) z7wVITIR@5m5~(Zik3Mu3)~~-9HlO>Y--|tc71sY-$o64g)ab_h<8S-I_M_ey*mdJe z%h_+V12&%6(Q>~3IqY|Lu^c;fW_P}i{Z!3i^ZQ=H>Ibc0<)-q_4RiJ#;GUcqjrppYp)oXF2S+Xtj^}$m3jncwhYeE%#EN z{+tOLzn!&*_hY<0fvvY}H_u}p73<6M(f2p}&ii0@de#B+>!rB`@ZKgf&=gfs&@9S^+-}mT$ z+P?9B?0KxWf93s=uisGL^W9g%?jP?0JKwv)`coRij)$8YC}%y$Vtw?7$*}8dEZF`@ zXgPTdduea|Z?o1j9*FOJ2WyWvg!MP{l9{i$f8l!c^c1jp)9cq!PCWe){cHZ#6j=Fv z5%zqnJeCvBnhEPK*!nZ?&pb~CYyTfz%l9+RyE2|^pGL6jSvlDDy|V_!50)Gbw2zE` z<~ie2xjk()&qLq61)HCGY}Nl*|7IuH{IAE9tG^ywNjdpC5xAe<*9kVhoEA2oF=hq* zgFP}$Mt|C{ocBkLrT6=gzn6caKKZIY`aRf{Sz-4hCZ;`HA2R_7d-JC;z69U&Jp}$vx z%||;iDR_SV2HXD2C&I*EiVxy_v5#ZH<~#R=EpIk~_D*#w=WSSj$F%X>kMF4c4ex`$ zYU((ihreng?07#smiqV?Vh#@eo;I-hUP;*fkDtcSKH7b-dS`anyz}?4{(__5bAR%M zN)KVYFs^m*mm9y>p%DiYae|9yB-f2#`}>seQgxXdUzXl zKKuy#`+m8G^S z$MUexPX{aSvss_>Q3}Ayf%vfBT~0<1yZ0^agP*7#Y<}lX89!MwuV;T~8Q6S+wy^%! z(j&P)?NSqVJzqP5@qpe{99Eu`fW6-#-p_oT=EHeE?2WHr_2)#ezh_t5@4;?aF^qE7 z<9M)g`6z5Y_AA=U@t=_T#&eg$+T*2Q^InR>f%Eia;(=*l<@b76e_cecXMNiU>yHQn zJ3n_>pZu-A=`Z7VyJ7b)t%hBn{(#MQ=?`nK9zg>?X%f!7#!LI*zVdI?zVB?pMVEs2+lpo05E3o?7XxQ^C zuENIi!dXsy|4*-HeVmT`a$Fs-9DTPQZ24xd$A6O$4nBXJ@5QfsNqJ42v8dNG-ipHJ z=@;>O{6U3bg3%4{BsNdg@@-EB)0$u=1z@?D`YU`sil|(C5sDyF8fo^ZX?BJNoNq zSpBd)`k(S(F|7Ta19lx9XMOxHeb673HzQ&5gMwh=x4kWAKI})|G#=4L#%}EgyFN95 zJEA@Av-rgBQd8epyr4-y7@=hwAgn+blBjun79S_El2Y^QGd##+T#6`m5hyFSy_9 zO+W5O9@$gaJdJZQ{=LdF`u0j#IW}GUgZVnta{O>(I`RJ4^fX!ds-;Hw4bIA+a?-gLjVOQ&u2XU(_?MED-myG_g4%SZXX?^5k zRoM5*2?x%%l8FPvfz_8Ub>aPp14M@P2X}&78rXRCU|9K=4b~3rXgPLAW$Uw^c81MgJOZ1~BLDyVV;r}E zjlX=L+;*!DtAB@QzPi2@gY_>&g_VzSEl1x-0Gkgtzb((>oY3T!6PKC{E5Bviqq`hp zzx%y++E5>PRE+1T-*<#Nre%F%vvg{C!ko>NAepgnaL> zjVQ<8x)D}i7%5{vJ%EkRSAj$OUwMDAA?3({O0=K;xs5XZ<>s*at5?eCIc;I%K@(-} z*B4fQNCSuVhko!B4HzHfaZN>jo9|MoKJ}3=VOY=f$3Lh?edPZ(*E{kJR>Q`j-oVC( zgDgi+t;+gnzGm~flp{w6!;Ys!u<~RAtUvj99q!M0L-SzgPjlFL`U`A4wnlBP$M5R; ztG!ve7L31Tul2D@>eu9X_)WLKK5uUg%84&5gVifrRj2*23!1>%Ylps|oc;A>VaLbI zYFv+hHU;c{jykY%C?c$X{BdRa8+|l)$a-D{cKvtQYJg&t3X-EB2 z%HK7xeuL%ozx_GGa>n_Ou=}f$!|Dl@kl)6Yx>w|W?EeYs5ImkDP_CW67k`_6l2owz z#8%jN$4}mmeSXbh<#{UKmw8i8{~Ug;6R>{l8~6jwM>$x9_b0y84tCt-<$JX^R_Jd< zUX=Cwv4>*ge{_5_g!K;`ZAE?bk0!AC|9}=eAAfFD*mz1I*z$g`{*8Ricz^sNJ7M*q zBaLDFw;f>hy(h5oiEUN6AL~ZxFBzZ2vlhdytF#wlB@;#d6 ztTzj-kAJ{?2FKf3SpQPS+O!Y;z+15E^vYU14?n;=ujhP^<2AV-c1t$DAHR6D8hk&$ zcQ_a}K4rd%_GVMqaT60(pPMK%&U01c`K;S}Ve{Z3SLJ#5myW~wAF5R0`x$3{!s-{R zD${~lKE7O$_a$%RJ?wbCS%LRqU3~$2e$I#T!TMV?=7V`Wq3w^%x{Q&IWKXSeL~ z4JpI>;x`V0wNDz$jEn5B_GmR&d6rJb9}opLe?C3z_}p2V_hmgx0(;(nEI6?L8g@Ju zF2(a`hs>7a7wuS*@q-<>z8vNFJ8Qw(w@YO7nToLb`wZCplVY&`q0zAZ^N%HHZ{)-v znf32Fth|c^n@9B>tiS11S;|@e{Cza{=RAka*XRg4U!TJI!<%|N`LdR4r)7kl->+fy z>c_=l^p$YjU;TIjtX&lrwmnkF=%LXprynyG_>z}?+8pi)$ANIL%D7POP7lF|;BEs55KNW`2Go!)g4}4xIxc`&G=6CNX z2qVw)SRXxL4s8C~uiW1})87j4e&lEOhg}!*!QHVNJNkU&`|bSnFLriYnS7}OuyOXP zUXTA_FYLIeAS379|D&8}4f}gPC9IGB-_GlqH&J2pbJJU&b^ld982PxN6y^A{-0x)l zLa^s?a5CcwAWC$MXf3!upTm!1~ih z!0LMmeP8^qU18_F?C&kqhyC7ou>PVlu=6A;tUoCWtbIAk=c{LD<^5=vp0MW;hJ%&Y z4SgT-E$?Q5d9S3f`Cm0)<7F%PKK1pLnPKv`vctCjtW3NQe*2Hbsn2?u9#+171#92` zmY(~uZdQdIe^Fq^b9z`gP%K?=e;h7GIq{zzX&4{GJr=;`SMN>D`?K$}18h5b9-ia> zbJ%#-_>}Y?c6d0;v8PAF#s@MM;r@)j!_?RQNL-lvpQnA|)iw{$W4xY&wNK*a z;(q!eGtz(9pILHpKlU5!gFTP`diLP{oJ)H-|3<<3drsMYo-YKOuN9g0)_;368~4ZV z$Pa7Zca!m-wT9i#etER)!gtd1J?tYr>-Z*)wjv$n_>%?#NrR|QJSQh?{$d+gJ^gWV z-Uqoo3pOv|pv=0l5O)9AEE#*}to8B7G=bee?e7UXP7}iJU)vA+dpojmnzyj)ZwlCW zM0@0|{g}>j=D}OoICyGU|3ydSwc{-xtbC1%eE0o}SfBV}Ygl`sr1cp;saX#kKgDJD zf5HJf5q4hhf%U5${hanh{+)rff6~C_FC}4pQD115jQ8g~dc(@wjIeQ0e?QapG>+xy z9osC&E_sra`=M`Tas5JX-37ZJak5Om%#g`@SOx1(ZvyL2I1D?^a>9=D)~=WA-^>H+ zxApf!)%$b9>g`)#<5M|c{W|+#+c`133p?~NY&;<@?0s^u9_ycalZ5-Ro{of#Kir4a z)1JfXl_xC6pO%gFUj6D2te;^7?0hZ-JKp_$-hf<6OgZ{%de}ULTr%r&EbF7+e@H-m z{K;Wq^H^5H?zf8w`~GVz$ImbU)_?IBRz6ljzcHWarRB_{QLuKwE7)~t4{ZMHQ&|7a zzp(L-6JAgMR!6^bzK*e+e5N$$U*^{c|CE}Mlyba57uWsq=!TID>@!O#^L#_FIUk|?dKS< z{`FK98Gpp9-o>Vz{%Ht%e%dtH{H1SUg6}Elj!LH|j!hUa~&$u7{nMbhx*7q^F9)Dn*O2PBBFs%Ra3igucAH9vi_Yjv} z0oxxJVcT;o?D@o_Ehm3Dh4snLtOq;(?(w~zHsPRL{6biL^Gg~3_bOO>YEv}oV;?+3e|LRs2df{?fvukdcKmjQ{oV^vc|QKX z(y(%Cj*L8t59^O_3ad|_p#R)wln6Gzrwr_TKOcqXVL!QFQTx3LtUvZ6<4OA@25kK8 zHtcvf5t;WvPd^SDf1f4OAO4=Q@r@R+>(MmW_3T(A?$5l)@Aa&EC1B&|TN%%`&!>o7 z&wGxCea~%hKp%#U$4-LHx61;%o(_kNN5qB$`QcvAI`NkI;=D_4Ir`L2IFQ#Bf%~zp zY=iy1=<~4gz#rkC@e|CG@ss>uIr(}6tWVzCI5;q0!*hS?je*S@8!wYjHXSyfp|VVz zeG(klj~0&h$6quH)_*b=R{l=#dg6e!Wv-hHyMJkKSiTQ^?-$tpUiDztyNG@t{cbsi{c!F#*G}$Yee|X5u=!YRET`T$IvzHD zzk>DI^AYF3?mwIdo6j{{re7T~M* zU186; z{GRNv?H>bnzwSHjE&NLl-f(~NrpCkSd+%ZQXT^Zk7q7|eb9;pTYWY=I|6vVS`}8fW z{=NWxSbZn~_jkMwM_)F+QXF>tS3uu(zeMVg=f_e1#xIcwc0Ep#iTdO-M1}o5wPqQ) zKl|>Ncz@#CZLt^hPj!d2uYOO5JR%RJ5xj8p&nNWXb<42w^u6@|yHHf@(+4u{%H%9m1Hub%laCGrq^V*;!{Bogd;UfcJ_UpzE9*E8NT!k%wg zA2uJd8|?XWk3Q#mj|OfE{0zp1{bZ&$yrZLWxH_k9n{pG2_Smg#t;$ot?ws(z39Omat9+ODL2v5=yFYr@9qQv(3J<%kjlE5M z{M$kCX%E)p-mvE-WP)8EQr&{_tIm(hcx4?}awC}E#%4UQjy?N__9PDUCMNyQdbac` z-xqQ|qxr|fuE4~za!2QS{JJGB1($ywh3B*0hW(rVL_ca7iSdFzW%&i#oB8og1j_NF z<(Kj6Tn*3nli!=-9N$C!Y5%Z{fAZhIK1(_Mwml5rN4{+IBb1}pT<7*~|CPj|2R@CG6S0dnhNbDwmAC(qjkT z&v}kv`28p6``T?hkNDs%eqT!e!QAI2Y*$4KD z-^WsaOSge??EX3aUKeqc?mu%s{LdpOSAI8H6Ffen^7~o(GuN(wk#qjun0nE!AGsfS zQOEgxHTCBmOSv9BEr{P^bA1{OyMC60)knI)>chEZ;=y0Q&Wnt&@wZGee(-d#@wJGs ze!*g}{-h5}VD|Ucl+mB=!#=kq?D&}hn=kk+?D|r{`kaR`3f2$!;s=;>F4n;2jckXV z_ZwjQuN&+*xg?Wcks0=UuLS=7AMz)>*W+)_1S@ZjFXnpuA3?DGv!Sr@i;C7qzwZL; zKdJ(&?>2+=ch!L1=T#HdA2Acw|Ch&d^1ByVAHQ^buV=n4fNih$i}+sl|4x?qo>{Q+ zYNXd=SA8S1Uw;N{ytxRhJp2RJ-`8^C|JZm+Vc2oBnsVn)TG;sDLfAO=#|1F!<6PK& zIsm(leh=%9iwnCRZ-&h;c{TrkZ2s0wIQ0IG&(m<|{q&xiI?ZL7DeFDC7TM0oxzz zVDkf7!RC{Wh4rIXfb~OWmf45W33eW>n#21duLoF;-X0#-fA8-P>K_Orldmxm)*o?y zHupzvZGqLF&%o}F+UWJf3HQP3&vRh=YkebL|A?CBCNgC7WO^vTaLZ*Ijmpj39LT8ZYK2^ z7h!3C=l>K~zyCAqV|R6j&5Qff`aHKGtp9BcY`$YH*!cZOSo^Kme^`Aaqs%(Kh;s9q z6a0tO&*IDMpXo=r_IpGb{kNm_v4>C1;CbXHw18cQXT!?V1h9F{Q(@yzVPW-|v9P}v z^OWC@G(OoC_WSO_+N%{{$LDL<{M{^;)8CI__1$o=`AUCSpK*0;I`y%i9>KXd*JHIz zzN`0leD{D|pZCJ<$4L*{Uo$Nyu6}tM%z4*CVb}c~uvmb0|4;mfjc+W19gpXy@V(eE z-C^a`M%ebL25Xlrw4C-xDWkWIh1J_)TAw&e7uYy$GFbbzvdnq)31H*zrLE6A$ql=% zWPx3e^1zU(%X8!d2 z538?LhqXsec|G&6_jSXN8X>2(RVJ%#8=M2+NZl<&zJlQc0K$B)*p2Y_WfI09`e5Wl?ub2 zU(~_-5#LE;eb%pRu;VcStb9)o2j;ieBM+0nj)P0%VDy-ju=e^XIP`kw#X8vYvFcE+ ze>=NO|5fpN;+^NmG9Iuas>6==wXpVFb69_H9oYDEBkSXDD+lYJX$YIon-_NfRa01b z8_xQybDd!CH*^f_en+oI4m6TO?x)|l8ti&hnR5LX(JP&cD(!^WNVN);^j3UGR7akNo!cL^8tW4dhT?z%I!I8;7e2t8ZqPdA~NW z^C>Uv{)zsu_E0g)iEECv9Dm6wSovFw-|saa>nN<9+Y8oje%ku@<43{To3i@Cr2o`6 z-`d{;cD^5?T=}&c)?d@o`{9R5&hPgcPi#4g`ynU4MSs@6oF3L*xdW>QEdQ4J_~{O# zj~kB}4LcqZppUChMuycx`l63(uP+%%ed5)b(C?MsHDTrBYFNGD)Nrn6-o)qkWIg|4 zGi*FCFRcE#(sKMB#bEsj-&#(7Vr^J|Xoq2Z4|>-w*!A`KP{s%Q((l0LFYXx3^|VuT z>=FH2t-qmtm?uxXALF&)Aeess8+Lx(8o>LZkL-ZWpXvk~k5~lj$K2JQ`s6e8fc3BZ z0&9%sT1_Tk2U)MuZl z-=jaIFKl~d<9n2IJz&RIG3_nGPu>Up_#v!6Y;bp&xMd!G&(`(un{M0>KUyN} zSMB#VT`9+JIZCE~Pr%Cem9XcWZL*yEZj>3X3p&%E_^D@O59_~}(JA=*Vu!~+_f#4TXt#AaB3+wNx6r(M2`9PICE)0FnbAN*1#FLc9~+#k8N2m9aG%ioyy z#}0lE>pwo%5av9aB2n=N(5}rJ(4OR1_k!(*lJ&Vi<9aTv+;~=(@4?PK2b+&Nt2Wo8 z4;GHb^N6GTQH%GZof^aL?=Mu7_hJ4`gOy*)YH)wzg{xrKlWx^1$L_jneeN@}8u#OT zf;{*y?5_q@xgYavBCNk`dll+)-r{c9adEj4--jOBB1W)3i&BDe#}J?0l=K|BUNqz{cbL#2==AV)~PT>t;o{9{KI>`>7w-D$ModRlMZ>%A4e{exc5=alqdT@qOqi zFL*!4aUEE_Z-$JV?op8YqnF;|d(7uJ8U$m9ZqQ%Hd}*E!My{NN-KTIkH}6Nk`uk(% z_ukJ%{}KNv1-qWi%*prT2kQsxFB+Jg=dqr2)8B_&Tbh;k#h-qd_IAEahTZ=+lKwEh z^e_weBR@V1?0$~bGVdK5)(6U)(yZ^N!vMPc>jQ?Tuk9@cK(W&abG zjR~8tvO;Dd`ZqJ*gIzNgHa>g?*8Xn^t1lOVU9VrogVDPVW}-g+x4`cUBCmeT$o=uN z{=s+)obR83_d^e90IMHYfE|BR7@ztxwx{EM*z->q&-z0Tq~-lte-kp^wafd!=35SB zerWHXOatTh%nz#%=1#@^$#?#Y`R6=3n}X}nm&P)G^+&Et4x@ieX8t=qc6?5K@-?;~ zUtHg>B;$R_`-m))ulQF|?$0`&8Tn=ZT}@1V&b#>sd8wSMl8E;uFFqNpUt?tg?$5a% zsgbYl|B4o$a`Up0zvk~9jY~QC79Wt;-oNT++>idcihNgpe;0%2BYzhw@3Bv&M5i3N z*9msLNEnUwB5%m`A^3bg>Z1>Jg0&yUMB)DQcV^cQ=GFYj^cUyf-ef(}|1&5O*E8%G4Z zllBSa^u}!f0(}o9Jk3T97?^{DZ3#>n+5Uk#w57s`f3Ol~T zTFyLZ?DsLw|GEzo2Wk!beWPIQvaViF{Gl3fCT>sv;U_QSWh17n|umC^SL+@k%kANTS-o=<%G8sCdP*9rD~ldb=7 zf9#D~u=!TEuEN;EmEgerVf_RZVb8y5_#alTUARJhzEAzl^BkAJ+KpXc=W{p9L;6#F zuN3V5;RTi>w===6XFtK(v$3sD`>uwSr_V0)y_^^IGwlAdi?I1&Yhd$=_QLAH%VEzu zTP`zyyub3QhxOS{ISMwuPy$w8>JA&PN^g0HzUg{$^Ah75eX9`VzE?fiIN!MteKvuN z-S>-3eBjF8!Si7rY#!^+GJ4W@%h6v7!shdIfz`7wT;%)t*9i8!nclGaWfj={i&Zc1 z{lsr0!{&8$KhOP$C*DKV%0FY|mB9j6?BN=jJy7!7vZe1rXFd~xeB-Vb{{2W*~mI#_*j z3HF}yao16p_NxoK4poDVCq*?rL%ihJ5w1s090-ZuY=Mn0tbz4+%!T!LPlff*x3fOu zq#A5KaXMK2KOSuSJBiox+$gYmS6F%59oC=y9jyLz^APRH zIL;5dj^u{bYd>NCy1u7?wMUXzt{(hH@b~?}^UUwuu%G(&53GK=VK484T)7CF4>1c? z&pHX~U+xQsK@Qx6)z>fVftg?3XrI7&OuMd?VxOr$ zwdH$^hZVv;)BfFifcqh*60kn0Kfi>{7h4CL|B>q;??XFWV|~+K-4WJb@fYi%=R@6t z)r$(TK09BU(tft%6xMUso4c_3Petq<*V&A=H{-6b`T_QCE?B#(32c0Al;s86Mc9iz zp*$}N^I3u4hthuR%>DGAZ9;!BZ>t1sTwxIUj{do=ypQ8=0s6e_#X#8eD)*o-dHze_ z`|#VoL;q4AOwISYel1i#M<3o~f0K_r27S}@I67?HBqjQ>{{Duvr}Hd`__=<%e{65$ zVEWX&FL8{A^tXP$RK)j{XHnrmK13$ompqjEGV=esEa=DNYwm*eZ(qyC{qdjW=6?R3 zN_X-foR2+VpDM!r$hTPm>)(u5jQ1tpaMAB&JVYh`#q-RQ(mwjrx{x0eI8PB)4z()D z_oL60hRugC-${M1pyk*TrAr0tua~)>_NMt*#%sdE?w6`gewz7|iEU5%xoKsXaan=y zH9zY2YCMm2c~AQ|zH(RR`;f~`VEg@2P1=j`{MzwPe%6-Sj6WJEA*>voP@n73lYU^n zXs6X{$opc?2g&3Sw)-u<|<+tQ!pLG*+5^L1`V`}3Z4x5MP` zKkYy{{>ZjF_#XUBJ37PI&D~)A@_)kqURV#<>(0aKKkv9+d%^v(j;qPA`^&C&r#}9y z0y6FM*H?^Z;^OOhzW$~hJ$XKIaNlO$m*3av(L1>RKW(HwdisE`xt=_V7{5}EJ-DI| z?MXbi=z98#d3?Ap)`s=c-pZEhnu0Fmw>lNB^2Nm-_gRn}0)l;5R!r2SzU$I+*uCKkN&;-zDx4 z-XH&1+S$AhawOU??vMZE%}m~x=WHL&^J%AIGkCxMW9q!)eX9Qle#uCPkUg$V*1c9% zC|d|c$Q~D&Wh*m#Z`W4#>}IdH_PksqDs3S(cjoJl+9z`mHk z^Kc%v#cbXWyJhUynE5L`m-i>`a(y(6JzRYr_s5?}Fp~b{n^G*Go%)eJgZVw;gXI^o zUZ}5MFo^!xttVm6^Hm!ND7NGkpQ``^%TctpCw{c^~va-R1Php1#lshKlhdC5$gjqmd0+I- zoYvfrdWe17W5#P5Y<>Oju=es;*t*g*J6O-ifvA?;AG=`hPWn^#{a5pt_c^^4>q12Zh!ok!A+S@{N}a$*&o;^oniA2 zYhdmEX0ZOnYFK?=tqJ!dFaFwLo(FsAW@Gx34_z)Jmvc9uop^0lV(ulwdW z_h(@H)#ZNp3!|eMAN1SZ+B`3Ew&f|V$F5#fi|4cM>oo6&{`tHn<3}9o?;m3H#l#w1 zj~_Jq4A)~1m#)rup;w0g$bP|In^lGW*nb_*@%+RkQdQ#l$WNEQzl_Z@)x;;$Hd6z>L%Ut1$lJ+ofX0BgHk=19o319I*dj>#=gc>W8eb@tw@D zdE}h1aj*3DC(c+A*52FyHt)xMOW&YB@wObW`RkUy^Zx9sc_sNh^AUgYyvXTdGI6)T zcetK$UR;9r#ZN7HpZ8^*w1kz9Z~o2m5wE>poc_d5etg9Jh#y4B_{+Nfw&k0*ElI8{P zi(fe?l>T9l+rAesI8ZN9FeF_d?z1p{aKIkBQ#f7VJ)XQ09H_6jT!`zDGwBl2AA7Jf zjGW9c3DzI`;H`9lxXI721_$<4o&5A~u;A#^B*B68b~`WQ-y!Hkx1?N;9knYD_wSJ7 z*xF>lfqIw|xws$l?#S!G0sHD$PVQGfZ_v#YJP&sLK{(`6uCTkX@xO)iSI*>0$?vlt zZo>DT{SaObR?Za5&3JA7uYGIS{_Eh7ecv5l1Y2K~l;6jHne!{`ddmSD7y1`=9ruQb z7tbmB224HN9+-OF&`)6F59jRPY4*&vu>1cmtlaMcM=;MVcwgfn(_rH}(_!+vM^?e= zuPt!*jc?A0g!OYT!q$VGf{i=Q%t!wpi=U{FDmV}yISw0VZw&i8i5TBWj}pD;0te!6 zu=}B(9EcxsfBb=OVD)?k=Epe7GFbbjJnVR`gpJqNgpKp8wmzj%d zB(^LqHgslfD3wO`aIs7=Rzup+84t(ZMu4#J$@9fn{ZLlnQ-7dCmdb;Zg@ECx=&jqX1wRZ z)~B_Atz%mV>rW1ak>BCpz>|=>S=mpH(=s^l_i|zFtnXm$oXNIhuWf+U&nw}Ob{XPt zwH-SIPz$A2@X{|@$V(f#c zW&W`*>ciTP@4%ePdo~!hUaB*!o>~bTFYXCPwEU>(df0sDVpzR;9uAyeMxIbdGB;&f z+R3AIgq1TjV82&C8TQ;#7uy++<+hVI7zHbb*TUA(&WXdsAxg}LjVFFfdq}zOi|&VA zfBRtae@D*2)?b`~-KT%RuB+Ry`sh#Cd}v1HC*%Ae4(liW2WwwurQJG&MCo{c)=6$S zB6W1zjIiTZ04DF>HW%!Bm4MxM`C#+rRbcB^irAkz-l1_=e|2;m*3Os!D}O@hubnUp z)~+fFn}1mer~KgC;w9n0{(=mfOF7n&Sgu(2P;P= z!phGdVDt4e?2r9^8CG7bus`$J>#pYnR*SO3DwgT&}d{lV-Rm~ZU+im-O_yRiOI zTiAM)4`KJ&K>L$tZ3Y{+++sU&yPNHtBTt2X)=n4&2jZr%d5rO}{^Y0XbLMR_Y`l94 zY#shoSpTd5`aimColx0&=$`{>ymSby;xOg{5SIBcFURcYFj#Xsu( zt%Iut2cPQ3XW*DG{LJyN^CJN9H<*!xAnzF#L9J$Vgwe|3Y=J12gDtrr{tD_;|5 z(571m!({toYhJy00d-pUMnpSNMxOAZ*jt<5{I`=$u&JlC^-Ab-mHASXJ&?vswR zr#!eVyeq7|K1^nR_k@l2%<+2kT)}Buds~-|&ra$_$3TzxU2dsST0EdizHz?G0>IEly{isvVE5V+J-vB$#^Io9c@Q`W`YBC1~75|_*HC2o@{{i0~*8HciUm-qXn!yJOg8A zo)`h^AK&(R{KxsQ@$(|@@q7o~pLqy&|JH*iow^)!9X4J+3P#+8CeFt1k#AiBlRuf0 z0oHHX3=dATwtY@G=6f4FFZrw*u=NJD@u&E{LHqWw^8E`~d!a9^owpLcdOusy(Xe$f z*(%4_8!KS#uhKI5^IO<;xE0oZ+ztEhw_)ot552s;HvU}7O!J?gg0+vX!1}G1VB@OU zsxn{v`HWv+{q34C`Q+n&!1_0RVeOmSu-^-K3`eItIQtRoc(<;`^N!0G6)%|I$3B_{ zYp*1M)yu14?YGxq;|C95<#{mdc$UQfM6a~T2YX(y3+#LrhTTtB;OJ)WcPI)Q-wVZm z4M{(}cxl-B{0Q2Odwd4F9{R)1`xIEYTmx4Bt%ChNQy#eDv`1mlu<`vj;ecNTD{m9Q z#(AE=>hs@tUhV#Z*}0y&xQnp*?;TkC^GA5nz<+1gg^e$tm02&1VC}a@uyS^&*JE!b z@Og+AOoEm7N#WqmeP_>u-8YHhd*@frz6yID$LBF_d(Z3n{m0zTI_x4jm{0C=2exj$ zK^!)2Gz`|BxlX(KbtiH5ar7r@rjPJ$h`MX>AbGuXP-FCt;%H?sEbVVQF*4dCuW614jhw%)OV++lK5 z>Rd7V>23S>I9oI)Z2qY*tbTqA)~>qGdh~nnO=0glAEr(^{wP?xqb#g_7Vh=vr{ZwS zl`V_UhFzz>u^)mKO_@FS<@Md=hOb90g#+KOgZ)0pM%a3@Y_R8YqG99pX<+LJZojk`$8JP*6+V(|07?wI|iE%s0TC8@qdT)Ut8Ob-IXv8^GhCRAng7u z2)ho>!qn4-E`yEpUzU+42Vnh}i!$=>&Ak8X?>ZeGHhM&W%P}4kMVFIJ_oy= z7sA$c6@;nJf4&Abp8QaK`BwT8k+A!6r}~xeW}lW{>SKTZG3%Z57wYy(n4;bH&^>21)KkR)kss(Jmz8|c8*9G=`d3Wp;*Y(#jzn2(}Saa^h8rc2T z9sA4Y-flZ~?I!F)>%7ju&PQGCMdD`HVE6w+?9qs1pB;PT_2e1X!qkh0zFwH|BF?`b z``hR51N%LV`LNGD*>>#0QLyp9#jyFgKCt7v71sZ34}0BlSpC>cM*d!d-Nzrp%Iin4 z@umiFaJIjXB`U)F5La&q8<%_?cE2=+eZTau_D~q?K6?u`elc4{Pc^gM?{CS(V;92i zpFF-V&+`i$s7ry#UmPhP0^@&mwViljBRJOnSH5-XSU>yPn7fo_xUb2PeN8bsct} zUx6L>-@Kl<^Lp5Iaohgn0UXbk-|aefAJ(2v2J5ff z&n^KQPah3ypJj&4hqZ&rKetT|TeqJb)*iUY^Jxzp#lMcGe*QSD+;|t(f7}Oazcn!4 zfq%6N){gy%_=|NSN8!NtsbJSf6zur@W&Eed#uCxCzl`t3_P3s(9PRq6UT^+qG4Z9? z`+I)bc$I$9Mz6=7UPU}hJ+KD0{-D3{upQTnPq*FpDy&`E8+P6b!p0+e$mGMa$}_tb z{S0^P5rwfAbn=0&r@%DXZ$_R41Bf9~_bu=1?C@j?8mqOkg)Agug* z54OJPxba2ex6N%w-?f4D8wS2?A4a^=c(d>4d5U7N@jAy>P6unhIR5Ij)N$B#KbQEb za(IvTLq49%$9h4IZ<5K6uYlD%?g!(m8DagP88Y^0^1O^Ed9JRo@x?oNcwW}or?7R0 z+ho?6`%%3$8jkM0^F>Ekd!#xXGHGGB~ z{%8r<_+%;Ada~@W_SXQ|zaLltcKytNou?4k@mc_@Z{D##e!+T~zaL-McH(2Z;XwQu zcHED{e&=xjoO0vRSr=gA@$d1x){T@Y#_v&AGzwN8bcBsxhQr$bLtvkO0j#|{KMqe? zF*I}^tehK4yY_ps;@l7U_9bkcY(^QmUIDJVC2PByu=dy;pNDvSAK1FES7G~Ihs}3Q z4Q70>=QEUG{K;#UfVDTPz{cnIW#j$FZ9UrDcKm@luzKSY*tqDptUM3>t+YAXs^RArtTKxk}i-$6Fch-tAty zxv=`}Vg}v^IdD|Q|Ct9HcRlO%#1~q?%Jn<2{@<$f%olNyDkZre^>7Vg;~8CGe>V~A z`kx6K-(HuF_r*>*1DlT@0UJkr0$V>)(RS>*yl*3K(Q`@Rl>JK{tpi&J(>yJ|kA1x! z)(#AYgPL_|cM5iW{qQF3#2p{P?xzU2`;MN)5|?5;$WqZg~dzHb&- zJFk`P*u#IPfssR>*-l(wmCXN#!TJ*)!up%@VeQ;qsd--Fw})W!fMGE8I&B}o#>?x# z+K=%|$INdC?0(B2^L)u+<@39+a_?HInDx;cR(@`P)wdgA_x%Lh8Lw-w@wb++ainZz zcwXWS@5q=ZZDIZ04DeO*L*cOXEb(B+XAf*#_0AitSNw-xVCDA@uzDa}S;hl7zYI1m zG6;6QrpVY6$8E?O!5lPJ(hV&sPoBE=~$-50r(eH;Eygm%0{!kf~}|74(qRsu$}py z3oGA8!LI8eu<@+Xu=ZXpnSUP+dpcnL3t3$r%3<&1d(L(OVzG<}W{mtq(~G zyZ*bv#*2PV%I`gG8r2t89xjDB9~M3xwqC3s>^!fvKl{A{?6^h3#=Wb<%B4H7_GNL{ z^_sIh&riKhdN^q1Uqu_h>XpA=<9^itPKC9X_Q2+Uw!_9Nhr(A2O$z$a>p5rf4y=Fm zGi<(pW)hy4c-ABPQ$N!TRvsj*!1ExNLSW+~A+Yv)b{T)DA8dV323Yz0dPVvZFUSb% zHx+=Dr}<>|S1a3J-tTIK#V>~2&i*S$yYIUKwhpWhJox-Ov)({@CiX%vA)w*X1u5aI|A3eRb*TGJvI{7{~Hb)*IEYOgxPHdC-3tAJ@T0u=&fMVf~Q4uytSCVC|$?u<`GK zu-^gR2)mxbVB=bmuy#Qy*!}(ktbLnE#;?BtYkwR_7$YCSiK*xwrmo4-k2 z114TI4A#HS0&5@iw;j7OKWu!u18m%^0&E<##>@U~?9Vx;Fj#we80j}ofzGpS&OMNj0w!Wp6Onhbntlc;rRv&)@NAi8d zO|W&X-+MiJ@CVqs+DowV@D^-dAaP9?`ThuYzvPB}o+K~VYi(FNCOfQu+e5~mt_fFs zdi&@SSbJjxto@(xebxi@o|9qMXE#8;9%M{4J=-IzMOAVXn7zrD%%nUmZ z2Vv^j;^l$$ckaN}r5A(EA2vsy)h+xYUI+UVe;y0F|GLBK%jq(9X&2afkR`DB?T)bV zg*CAI{}b4_$#=FBU#$lFeSk<<`CSe+{(TeH?kNdtr&L0J8!spWTSq-oeLttciz=}7 zC^KO5+_horqPN1@pUq+IqwTQy+m^8Q-#%Esz7?#zxB^>W)fU$NdJ3CgXa<`fua3Q; zT~z~CKF@~D2bF~@QjdEGHXimi?C-yV{o{NWf<2dzSNjP2Iv=dwX$>o1bHT>l}rR z7kv*~FK`RiFN~ClJ0`^5Y}xFmusyK;d`Z~%H~>fEU)O`9hx732Jja_2N5V?bjr*{@fzC zd!g(vGQ#?o3t|1G%&_^4`7m~INGR;O4gCIFD~i>DW1mMm>H}DL6lH(%eNAoW_s)5L z?6p>~{_qW0d#0_7-;f=9-gVkj#$GA}n_n9YyUvTl#z#lOj%P7AV%M$b%WcQL&ToIe zD+inJ%Vqy;ZH^y-{X0yy4?dWG_Bq(`ZexGqQz_~&pVXNyg6l4QEvx{n|G3BNYu;>I z2KIgS!s^9xu;)b%!|w0O@WLr&;zh#B&#z$P58uPa3n#&@rxS2+=&Q$e*iOFlD9k>H ze->8$9D&U{UV)W+2VmpT|H0NX?}yEYWcnaxJ@13HU$ero*LQC?;CLR``8-K`#UJ-a zg}~0wE!g*a57wUj1b@zWV`JF3$S4_k)>Xz%nFeb=_KU;%3$x;|@sfG4^W1^{A$Lo) zI}a-#+QN?471(^T*5#?{4_BuCq$8;}$McpVA z&z%Enk43`PAMJ&$<6aNDpDx43RVI6Vg5sh7*`M!;*Mxnq;Ciriys&z`IPCcS*cU=ZIu0~ulWM@c^AUU*p56&2v@B3`K%4F^SP4o zzZX&W=mA(gQQ!F>Zjq=y`G2r=IJIcEp7oWFXva=yZGY;!gMe0IS2!cWqdIdK?^_ zXh?}V_Qx(-3VWVc_WOY+VB@b1Ve3m@W&OK8yV*`2tsJZ$`X#KNIv6%j(+_rEt$-ch zUa<8Cn_=xb*?Pr&_9wsJ8`e*_3G2uAg|#Dsh<|mzIydyIIBfmq6xj8Vo_6!83*o@u z8-QJ3%VG6fT3GvNHEf~{^VbJ z!ILVDJ@y}LzM!xDsSAzQfc-%|&>-0S^y{$x_E%mXUaLe7IQDw&`~0x$zZvcNTP0!R zBROE>#SLx8pGoHR$lcbk_Rw?Y!*$gI_Iq@%)9yGAg{_}U<^8D79Uh0Z1HxhL>?E{X zpRx?ruS)>y*F?bTE8~OerEReBp>%M-?|}8Us=?M*-GE*1Rbb`WbC`8B{XP`o4eNi7_j>9(;62~ z+xOtXoQD|Z{mI8ygSBtJfUh2ZT&x!CdFsIY;qSZ;Yo~omyLDvtkA2@sm!`I_3cHWr zp}p>cRZ%rz>qjctA9>gW_WJ^rVdLFR;n?vpo;l3xk)I{Kp7`K2SpU2Lto^tEw$AV^ zSUYnotUvi49NBhd(0N!tQ8u1b@MFf0^OUV%{gukFj;HyNZuXBIZ~V}q zu<@9dUXL7|YJcL{t!a`P8_%?96SET z&xhKdyum`+Tkbu5bPBA!I~rDwEQhZWcOPRrcF#IEc0SG9?v|-z`iyq<`BB)vkLh~% zyIsd@C;yY%`%y1(3XYX0>XTn!;~`mTSMNNuKkK3htUs5uG3_tcmv&=8*zW|@us`wT zcj4ImU_PM=?6~Hq-F4FnHr`;q!uV}BICekjU-p6j|M@w;FxczacT?%_e2swBuiwDV z+XOgvywuxEVB^8dXy?3g+pVzqtL^??wV!97wEgA$>2Li68>hNRyZh`4>^}NU=6hDx zVC%*DsBfq**2Dc4qu=lfdkkPS9YbOKiX zhr;Tk3$XrtNm%>z60E*7pB2mcV{Kvg=ULj_*XF;B|DN%F*44rMJ)z^?Uw`2d?0SfT zJ@@()*55j2J9c(f@@?81M`80Zm0;rn1;} zoNf)f4hzEO*H*}FjyJfJpS>T4wTGX>);-v6y+@``_6L()Py=z6=}hDi0gKyKX!Fc|}-x`U`A)t0o*fKKda| zVC{uHUXQ=t6pnpA^-~*Id&c)sUbTgdKb?`;_hGQ-VIIY~-n_B-b>)ZmSAM01)whjc z<3u@J57?Ws`s*#&czk16eO=7!iF^4x)-S#btEald%BS~W{kG3x<69MB<0HLb{pQjz z-*@jY0=6Ex60H3@&g);+1H`iOqj{Wm&(XGqwG)QY-}rYY*!aU|uy*cXSi7_nZ2jpN z*!|Z5)}NRmlh3hT`*{u=_#Q$WcD}p7em`#>?Vg|T_tcjWu=333*T3HnJHNwVzh4^- z8^4jYZ_85u!1wE)cZQYw@4~Uqr@X8M$BviwSWVmU_j}qO`CkfFKM#Sm!z;tqr;UQG z_i7KD{~jHev4^L^#&gDzd*lcli08qc_qZyPFMbNEzwW^L52-UUU*tvpft3fjVeR6eX1ot}>)Wt)eFE70 zW-HjdX?oatpINZ_DHj~5?}Dw@DF7?4qisjt6|z6)t^bDgPeWn-|2)*I7>{lNtJmhi z#tp+^>vvbc+AoV>S?^bJ#4;lIILf}0@fe@RYs2o3Z|o`)=9eGh(XKUn+l zBCNgf53Jw)3H3qR!7VY0{a)`9SbJbGY<>F)>xq6pe{6&8#MRSMZ)Du+1nhmAQ=g^nmr7SJ{3c zQ|2%2kG--THvc*Vc3*x8t2gGt`rCc%Puy)DtUs2`{@7C+?fXXi+yncR&H+L{jF<0V1MF$OJ(Gb_p?sH`ZVQE&NhrUc6$~X zc~=BhF8|GV8V@WD>rXqrKHs~r=O>!O%A?A#{z~RJ?E58&!_Lon=F|L3J^H&|_QUGu zwy^T)6WH@zyKN^gTN>8Rz6F~fNC|6CWNI7p{0~__A;ja$!pfg0*!as7SiL59Px4Pt zB&?l&9@gKl){f^#ZeM`SL-m2RlWn)oVJd9B<5`%xl_R@g>xmD+=Fg7XpM3r_Si3$b zjQ1yw+!}V@WP;5LENA`KZ9F1;5Ny52NZ2^zWY~S*MkemG30A)~hLtC~VeQlUUQa#U zA=vY1g<$trw)VWQehO@!G6Xh`Qvxd||^%Hu7t^|*s!&&OVdBlDLjI^F)%lkb4lceiXeKL}gj8rFgFK;KV=jYs!_ zjd#z3jq8npU4I8(ax|=bPUCo@&&zb=_lXykhh1-@WX5Nt%=wTNu=dLY*nGuNSb03d z>xuv0g#-CO*!)A{PkA2ddJ3_A%s=FS)z_(5pXOg$!|LZy*!?*I)<3QZ8!!6CcE*1W z?D@x3op^ud@uBU+0mjJ4sgvv<^T1z%nj`I1$z=YQ(L zvGU1t1KVKj)nY#K0u=&KlVDs*mVb^C?u6I7G zcIAGYV?GFLUyg-+-WRa@d=VUo$0L9A_x^)je?iJC?1zfocptvU8{zX1-#Y;-$E_!2 zoabim&iY2KG=v@Bj5qy>U%rD?*{L&lxkd`jFWX1>S4#-|Iy#vvEL&QDQTJ-Y?= zeYe5t(fzRB137Ac@)6haz~ss2_l@cQE3CX-18eX74(oqjhSdkZz{>NI{pe5q&kcBR zsUO?bhP8K+1iQW?VEw@q{ds@n-hEho)CblME0d4s zDLyz|B4P@wYONx0sKDx%r1YQ zc~1v>&TTX7_rkKm?jN7W^9}F8?yotp@z$C4M^5&$9sLpxYj@Q3dg|zB!_IF#*mXA_ z_IEnM`scS{?a!XD@y#>?VdQ&P*gRWq8UOxm`?Jrw!P-+5VEv_`u=R?qVe>D0Y{#w{ z1uI|T4dVCN?}K6Evk77C;byS@L0VXQu?B3tO)#v!SjGHlcZ9%>hxPc%pN6pVXgsX{ zvKcl%>Gy2(mw&MRWj%jTk})sN!PbZNrCm9lYB1wXe0vOR{4p=AoV^Mwe|o{5f6eLh z;}2hljnBBA)Yo-~{I9?D0gYhSQyJd}{c{U;zkdX~UNQ}ZssFAIYsY;98&7dQwBI(t z`Z>;*aiXiR{<`hvaUQ|e`Syi9-&^1-#*gn!o`GHO(_!mZgIHh2(@)t>-0BUud#2<; zH(~9)j<#d}7Z}F+V84!WJ#s#zEUZ1c5_TWfgw=1ZXZ2ur*m~^Z`MI8PzG#2qs=wLJ z`g#IaBtLz_>yM^v{}0SQD)KJ<)i()-$E>HdGUwOdfXVB%j)sj#r-fbTb>8Cs$j47% z>qGv6)t4t>?X9u}cwgjsyb(M>cal}bXYx8)BcQahLOx4epmWJ{2p<*kuv^jb=Y+| z2@d!_uy*`v*t*D4MVMdY-k?#85Bc+@A-oUv^>EmD$6DBW$V;&Gw!2{M`d{GK^F;jp zxuXR}GhWoWo(kpt@VDx~#?P}B<@c!{>jWETnhR^^PJ}&=`dTrVdYC0%&%FKzt4~hD z#t{=1=Y5D{p0PiAsWz;*w@_l|Kn#^DwE$#jM|^J`eSR+hFsIm3V&l{S8?CksS8DACIRU z|0@A(9{+<0T#vl{hw)I(N5DS6{e$`b$4{{518TtT@1hgwkKQ>?fBl8yaP0W1pXyAC z+0V6THy_m3AU08p5gzfk@8DaO^pRo4L3)bV{;Z2IQoK8FM)sJ@1NB#*LM|VA%AIv=?MxOp| zJL{ztY#uwIGn4V+`vI@X#7o|VtzS#-crou?VC~Oju={fk?D^~i+|T-p*TNZJ zzQ;BJHgEDi?Ee1>R<5>yt#27DBd@x|VXvP72lTi1qYmI0tX(`E4xE#?S7*mrKkVm9aNzF& zI3CFRR&eb4v~H;f?07GuU4LwDoa^<+7Qxo5C-!>2XK@b%09I};h0S-TpUZsV4-8Wt6OXF|`&@aI*X)NHFz?m+ocf^m2hZP! z{odqk*ym^h>qqrL{}_L~`||pE=r7}jJ?F8$+1Gtx;|C*P^?4Yq{V)#Je}91fHO{;i zw*EAY`k6SvSvZid!d_7C-uHU)6$7*%h!2*ZPdj$_RM>pbHCQ>Y(*Dd-?*%dMw@>>S zdw0Y_`eO%u1M3fMf<3qRmG(1v-J7s-vJY(hyzp%Sp2lO>;ydd3To)10y z3atOr8MeN12mXup!8w`ry$Lq|{wr+#q5h@y1b@TUvn+%4C(|zBeeh?dcs=p@0kHPV z5Lo+ZkL}3MfiU~($c;Gc^JZAe?~zX%NxSl~1gw8B4z^yaJZ!z}4A}i#)Becum9X#k z32Z$}0^Y}UcNmWK7u6qkWcKYk@5g$+2kVdIgY_rAf%W%p;h%f`SFmxSRj~g57TEpO z0=Ayu7OcPV!uSH`B{tYjyzBvNygC9_{!NCh51a-oAKJm{^Rc!Q*SSl)!#r3o*!cZ$ z*mzfGSi2(&?D>^2*xy@3yu|!#BUt-vsPPo)uN%VJdA(ul^FM~IXIt<2N$js-uy#pm zSiLqAR?Y_#Klb}HDVD{I-&5k%=IgV=`YVTF{iG7GaNn z@(r%zM9Xf3s7;O~^!PQGd-?0(G(`+b|Ma3tqlha~3rk)Q3>AYV8?`gIR;dJm82YJTLK*L9qU3IavGoYgoCp z2=+cnHX{E#UjrKtXaj2xyboVpGN2|fpA)d>X^X+;FQ3Ee+w`z{B-vJekNW#wuy#O4*ymYc zf8X3;@%MqS{=-z* zc<(6K_1y_p4^Dxt?>P4W=KS(HuSecDht*Gq;lOur?$geGy9_Hoy1?p>pzSg1`Pg4P z4{?+tuz9pJu=$9Nu+L3yKTt2T(f-J>Kksrs&f~9v)z9l;<@0W@r@p5ptbKaU>&b_u zgUw6*Zae;Fw>!KK^<1fTu>Qz{EW6EmL5|dejT0A!ttSYFozF?PSl{GPr^12pg{`AJ z2RlEn!^*+mVB-si{$xGyy~VWOb3Ojbnwv4>Q6AR*>S!b z(+1?#GTX6hZvI9)_Q46*`DzOLeWVAl@snJz^Y=GwytwkOw4*N*?d1LZ`%^b~e(dEC z*!432_V>!l$iq)$@;Oyt{ftbo`>G?XJ{WbK_hH@cx1IQ0OW1hg4cPI@3|qe&vWw>> zPFMLF*ONydDI*tK{mlD3uQa=FWQ@G~?K0!feXHza{g7X+bSb8N*8$q`Ypz|4dA_TM zcz)^)cggrCc@8uFobQ?;BcEEs)>CwW9q+}k`TgXu-y>>y1STH-=LPPMy*nQ^KlsIY zo}WBM=O~_^`jB7GFQm;!)<0G_5wqU2!{(#=!ODm7XL)}7t#dMULGPbo z{bHX+MDsq@L7b*P@#t13c^~4FzntXvvAc?#V*Zc^>!aD9`1zY*{~lzu6EOM7hNt;G z{L6F4`F-r-QLyzei{L;#Fl?M@xXiw61pD_Nlfn8iW4s^rM1Mua^dA6gzfFM6pC9LW zv`fNZm{c-^|zLgHv zua1U|tEcpO{Fy_r`FGj#B|E(y`|`zMo)^rfq_FGl71(-3+4cPIF`kdQ zhxA@g{_JvVdLS&Ve>&#VdE>=VCC@wn7?-to&+{tIK_7S*B3{*KY4?pu=*y5_SoNZ zzGdx=?zHP)Kc~O?uf2@_@h_}gYXe(H^8|K3w1BNYz7M-UTfxrf71;QD2iSP+Y1nwB z_tXD528;yW{@sAnS|z z|1Gfbl3OzV^*R}Q>;`Q9Yz=IE@M_rWd&}f!w!qdkd;)9t?}m*he+Vo8_S%kpR|ht3 zz6bU^QFYk*mYuNvP9@lSJ@4l@mWRFH64>*TCGAf=)d1Lfy?n6yxfksI$?5gf!E~`d z@zu<*_1o=X0ydvN0`{C?3)uKfZ&*8_4s5+$2iW+^&Hbz&>K#+U+KE@>wkv}Y!RF7-!rDjw zqMv-O)3E2o@4)(*r(osjby&OVJZyeyAFTfW5mruZf?ZE1Vb^a2>^@l!YcI`)l~0pk zzYo#?R?qc=jpvlNKlVvoSpAY8)<1Y#W?!U(_2+|OAr$s~w!zBP$9wpF?3($o`*{PbUoio; zUL-qgyto^zeGs*q^}%yDfwea~!sf%u!piMnSpA>b{^*H2kumZhBkX$q8ut5MFR*9K zKR2`;fBiD7eg6uqo;VM?-WKd)yr~yi4=exU!}>3CY-hb5-pTJ_uYLv_Pn`uD_h|_m zcj^tB&ub!+&utBxmy?Z8)PeP5Wc5vA*mc(mHvSm-J^eW!)Y$9!9??Qr`CJFqK4=Q7 z$AV$yZyMP1wfFbO$is^}cwfFRaz-YfUk`Sl?}7auSy9-0_!iiD{G71<+qbamFB7cX zTL~*~vcuXdBVhH<8@5wFF$6XqT6H_u=h+h$CR49;WgF{}?VF<3cpXe;Xl{aggrPVE54wi|yb4I4iwOuPE<%oh4%XJqqw?EH2z>mV7dfAczQ z{mUKfS@-Fk%`o!&0&G6w5UlO%t!D-fZwMuyLU7uyNj%u>L?R zucsbkf&Ka2r+b($^3Jni=i@Kf^Bogm^9g^z#-|3@PQLOQ?DJ)Xo!?8a^N|Afc`w4Q z*B9$}KJ1yZu=?r*?7H1=JMyoy?bK7xfwd=V!^Q(9!RG%8!p2X=!LGa8-^JX2x{SSa z5mw&Lg4JtBVdX>Me&mz3!PY^__FDzV{+{1Ym|=hP-b~t+KjUEQx2AbNzCSh)HjX|T z)?WG4>(LLLVAoe|*mzh8*!u8Nu-~7`3TyWjv_JAKEv#J40DE3Fh5gB2{2RgV5ifiV z_W8HM`pb!7&l9z?ojgwhSUvO+?05vh&j0(capr$^b3J~2d6_uQFR=DgF`4@5J+S^q zA=vXYn_!&dX^eRsgtqql~Qhxoq60~*2VtyQq+3#!AO)0hOS&nw!W zdaO~fdOJ6a+%C}<*01<+4e!f1e+(P9+zea)^**e==?we-;;{CAC7JI97PK8XadkDn zN1ay=SU;}|Y`h?~?buJ1VdK-OY{%ZuY=7dxawPGHgR5Zb71F@!k?b&G^3Xi6>nSO$ zy^{+zzj}Ek<4Jr!i|yp67sJLEUJ!5aJjZlcyZJHf`*nbo@6oV%in_3RVJ~d{qB3l} zVkfL$o_+=Ei+I=|*me74Iq%DR_J&>88({0etHb8|y20+(Jh1Yh2}~TRLk5}pk*cuo zksMZk6t$iAeMY>*`OXgO&;0>wpWa>;^LvY7$MYnN-kUiJHlOqztp2M7s}HB!j=l(m zjdy+y`+b8nuyKP?qA|N%JME9>D6C=CJsQZ ziT&9>U%;-rQHyzg_StyY|KD50^J8yLggu|K1lE4&0$ayj754jejlG`u!oF|VujHHZ zdp-8_MA&_m5;mXG6xJVjK|IaBk5dNrdydaxS@*Coc-?f9!^>FG~+w zSGO7Vd&jTZpMY9~?c^68^ZSm^_qO9tE`_Z(+y|SV9Rr&e+Y4(44}g8&Nc$5H=nh+t zumr|0oLLoi9o2`e>rQ7o`NiV4^Zh8-zy8C{IqW~=mFru(e*x_G9&WGdCg+5_QF8e^_&;Bo@fxP-<1m1|DEIY#2+uRK0R-B09L+)!`it=Ve^k8VAs)c z*zf;$w?F>XG1&dv1de@PzbEqntpB=~cH)44p_UQ0_^#(?_l#KWnkmA4`%Ux zjMEe2<>>QoVeNw}u=Yc1*m}exuzD;%?Dw#@z~(^`*v@)f2J25R3+MjG;f}C+sUvJW z>pfWi{GXY;KkN7+@qNF)c?h*#GB)wQmN%+S^%S2|*_^|fQAp7IrpP$0@#7BDDAHCfh zc7HXM(Z3a8zwcTTHV>B#_I!9**nECGnS4M&SUdUG$uavalkI#Ta35?vOg!89y@Rm( z;x$;gy&pE7lg$3;!=14Hs_pt88)47?CxrD|C&Bu^e~=F}AKMqU{`92zLY})1?D{?q zJ0G23*YOcpeO>}~|F46!+bhG$=}lfwzW&7|o{xHna9F?oC)oPe$*}puNLV{=EUbQ8 z4*UI?uVCkW670P6hs}$91{>e%1nZwPfQ@f`0;~Tj!rG6uWbz?}VeR8`uzxQjJM8(m z!mxIFR#-igA68$=#tX8*=Cd-w=Kqqx?x)nSajp2U`|im^)(3XkP4cbQL*9Yamseo( z8^6N(cc)?3&lTAEzwgb@l8?CxyPg)q0ecYEei;t?_Zu$4+JytWp8A~Awo`xkx$Ww2 zuSdRjg0a5Cnf;jw_I-Ytz<6S3z97G>9rz{edC?oN?-k#6 z?EO8k`KT-7*?+`uw!zAYld%5x2H1GP0a&|qE$sfL%3hS@uwLkh}A8Z~VrHq`~ z30vRu=Qw_kyvAm)=RC>?Sh=!VCXUh=)_++HyT6OV<_+e+#zB(8%Ja#v`PzG9xj*** zC|EhT7d9T;%XZ=+OJTpy*%r3`e-P~Xhhnhx-ECmcXQYPJ2k*mvui@T$o`?KaF4*Tj z467%fjN$k2pO?YLIj+FQH%Hk{{^l_3`NiI_`NYkz`N~ePdHt_p=cBbuUFHP);~#ti z8$bHk{(PUN4s2Yt46Gin0_zV3+fKb>Sy=t?Vl>yYjtas0r$58mky&J(FACN_d;?bA zt+Jgu!-wm*AN#r`?0Jn{u=aH$*!8smHeXiE; z_0444k#}#v&c`^|_qjJJX8nzTjqhE8jfW0{t+N{g8%LIvXYFC_jlr;WopoU21^r;- zlkdQee|OmD4ThBmO<K=0Xx5!Mj}73SFU^)v%XKkey?;7Y<25u<@2OuyNplu=SKlV81t43O1hi zJc8#(?xcdv|NIRr&uT6WBXeH*H`sVZefuMSugl~Q zOToU^1=w}_(-6j!e8hgQr_Lq{_WVdBti8Sqb{}km&F3zMjaRRRt?%dwyPgKZ%FQ0I z`>QK#9ZR>D{ln~!z1jlS-~I@8-PD4WH#K1G-tw^ex-zWXE(B|rR`7b_Ln&e9dTH46 zw`T^^P97>VtUosqR=)3lvy8}VCBumLF_;5ouRPy zZ5G)1C$g2yLp$H`nFJf>{0r9q>;&u2T!Zzu z-|>33i}U?hUwnUk9qjtr21h)2 zR^0kd^9sdb^HEb_*KcN6f86>|<<#r2^EM9legEyt`{LgZgPo_lu=4K<*!;?L*!dh{ zfBf-Lar$fj^o3owqiENk{S@~5^AlmeKhXjecauSdRbhqV(n z!`5jof&F`GKA+>&0al*xfVIzR!Nvy;z^<29VfW{cu=4If55@!g?Fwu?^RKXVbk|_* zxC^lU-3{1$)p}SvNsj&=Xv`ZpfEK<2!H`&a+8Kdd~KeV>l7_N($j{nH%Q{~r$< zJ2r^?FjYu$K1*43A=?{^y35B?n1k39%mKi$Rt*h^bs&mVMz-9Ia3)@K;3-U)-v zZ?%H0Urh_EFB{sQc+B>$+#mVg1or#wlVRvc11e7=^~v)`7>%!l{0K4m^^yzm`Ze|7?_KF9`p zzGfJ#zD*D7H}r;yGxJ4$?#W8l1NwS0Y<%V> zY<+Gw*m=GM>+dy#)iZZt&jZzktzVb@9%g0x6VH>i4?}DxfAt4!elH_z{NfVqKL5K@ zj6D?%yUzZAo&O`SamycJpKCvCJ<@8}=idiw@3e=V-vcuBb_H!GPq-V_9u9&%7q!*v z@mnu_%I}jm`4-lm+y@)4Tnzi&mszm;eHQF@kfy-;BQs>`-$ujk*9oxp`be3$_+YO` z&V2y`80!{%>m%dD5VaP04y5BdVOUZoZ7>Y>J7|MGd2A5~%P>!!3D_bP5X_Os9H z`{t0TAFB@=-%bg8-n0g+f0GD~eSY=Z(RTbk=L6HzZaiiu?D(aI-KXEd=I`Ht-Iq&X z^|fqWaBur_-sVkMf2A>O{Yyqz`??BjeOe~liC4W2J0Jd@>-%C`)&qY}Iy0<1J_cKN zCaZ6E!PeVmfIaWG0d}AGd&cL#hW+~qNnp(?N#9?6bs2X5CWozGIs)rA`TNEvR>9hJk5=&f z$f;?t^7RUAzI7;UJoljOoDb;^YuD|B_47M;J@(fQ*!@x$M$ewuYJdF2O0e}J+w6}& zS<-g=H^=H#y>t(jX z+PMQ|{F!yI`m83bKfVOkP7j5(vle+h@spf!+Ku1cX~ldJUx}dIdY<24>sh~pm0uTO zzuzmHe>@2rzxfvSeAIqe|7Rua`>nAZJ9CED6DJRc&Hs;u)!XA>^T8u!;uIgl?&JQj z>n|&8{GkWzcVch1WPbQQN;li7SDO!e9;>z2vknHy*b7Zz?aVH)_Cga_f2SF2eX;Di zECKuXQta>f{5)Qdon*W5>nyPQx&iEdN)J0<^g%BO;`dh;*X z`OgDuKRcu5 z`TZ!`{rz8I_tPQRJjzvAdvBNR14%zFtrKvJ9+U&>c2@l?zr6c)nJ9CnapXKxzA{-(cl&G1%*#G~@mFUPED6 z|Lr!c{4ePBe4pezY(BmatY5haR-Wd!Kl^bEtiF?dzm~9meNNc;NF!KznhSQH)r9?? zMm|{kqCD(=^Y`5^A#vF8&I}vR^8ByzCN=E$L5st_-}9!dPwb!auzKOX%>H-}R{s17 zo8PDeTQBuvoOaI>&4xWs_qP36&y(Tk92aLfKI+HO_QyW)`MmD{*m^Gk@<yEGM;~;FEZ&~~6U%;-v zim>MnmcYt^3b5;R9<2O%2ljmPWLST>6s-K|CpUt%S7hU3 zwc*(F-p2P!!uku&xAHq7Y<|t(_dN9dCiExH?eiJ0I|I88ZPy$cL@AG_ESc$r#{g27|^${ z^2+<0*SgmDfA6QgF%34J9c(-G3iV;n&834~Zv|lO&o^PmCpm1KP_};cOr!s;H}~~8 z*!+;^^UYVaf@Al)@x%u9fBF8(!RoN`wVeIYFQx4Na(?wMg5lWlwVositpDNr>Nh?6 znE4@o>H9h!4`JoJlF^Q@`WyYk%BnNIQOs z`&obHJRJMI1MT@pI97fcPYj3Eo7w5_-(Q#r>)*-xqa$GLb?@i?YX^It-TkUxS3`ce zAKlkkVB@JJydU<&o(B9L3cNaOo!(a1>uP&H@}nzZ)X&WNuzuN0SpD&l*HgdS2R1Ki zyWb=31beWdG^)T4$kr%sR*PZ&#c;#l;{PDb(JO}pq7QyQ8@vwQZ zrLgq~1!4WwL$G#9RvGz!1UA2y99CW(gUvra|B&%Q4jqO4-o$O#@3S9<^%oDq+Peo~ z^Y`QIPdYaeSL;ijd ztbLyZd&u=a)9cB{C6}3(uCRHWH(}$pRb>2+La^(t2pq)sh_wG)|G8kl&sYsspQVBI z6KcZN?*+m7LA7A#<7qv{pYv>WV837T5Z2GG@AcT@vhj#Uuyqc`xr{jQxpc zwSl$Acfi&YwDo%Q)h5`y&ZqWgpDdG!2M&PsKZe2X*CDX_zPar@?<80~TOW4br^4#d zEU@Pt!eR4NNnz#EOxQTki@K~Y{7&0F-|;u>KAhwA$k9JxnTl$VVAZsRBD6ZDHs6Sq=JQXAXt^o>;bOybu0JE%IyX%hHv(KlW`J@_W{w z9;?XnQqS@n4!mD^<^y>%l>Dmkst0BGJ@Q49%@>nDx&Rwr{Tf!Ee-4v3pLq>d556eP zdgA-@dC8}{|3|~xzn`0L=Q-z=;(Gjm*QhrzAGqdim~mQS{Q+{VBy2wW0qp!0gpH5? zCo>Phu<_>>pMN{{u%)L{fBq0-$2h+upR%pHLSdO1@^up zVCB%U67;A3Z4Im(ZUb9C7DRoF{_3UT$Opdf)DAX&vH><5B7F?>Wh@$Jz(!!57sU@SCs3KtLtIo<9$N;J-#P7k$NiQ zNeN`+LuKmIT(_M=cpv=8G}P~zFCSN!@nu3!QSazJ=u{|XJpU+k@1Rzu=701^H%(Qpy9dc&-YIP=iP|I<$$eA%ES3O^Ib)9(f;y0qV+_t z$msimhgpBf$HF=3kDoF32;&j>-aG4&I<<|k{_*cQ7(eRVf{yZh#OL3qzx(qLtlcKAOFpTwSN{dzUqq#Cs|+QSDG>Y+Cht9<<%V8Jzq8Q z6wgoncnEBK>JhB}e1Q8|@6-J>zlXgz7WVr{-@?`xe*oXBx8?Z*SU=+?&vUhZn&VA> z;C+x2m0<11erIC(XNR>vSHSue$9Nz0RnZ@LKH_y>!OG3AVdLaYVb{eku<@x7*t+dE z&(co)+hf*)->+x@JD*!&<=bS~dXF2d2lefUbF>o&Dhpe0{`UEp@rnog`+Z^Obra+5 z{yhNe54MD@=PP)D@hAWJ6Z2y|xcD{?Qs8GW9KQ!o~-` zf!%KF&N|(7Fd%J&D+WFqzZdiZeY8KWr`k=@aexEq$2v~X3MaJLy0M;Ix4Qo$j zka@nRGJaUw%rWDg?`OsfeOUwc@2S>@^;0v$uFpj>e{XSJCfbP`*S$u6;(F6y?d5T> z`e?H4T>mX>{~54#?1y2`v(JUKGj728uWMlUXU6N?5Bq2{?0ss(`Y%gh>ra}%{(HR) zjHlnvzd?WWVQ$#J*V`4=KZ;IIf9e?K!s>$&|Hsr>$7xmk4V3Qg?nXM6?oerwlmgZZf_I^~C#op^C2SoyKV>zNN%5^z7(%`I5Fcpr zKOX)4yCiqu4plm>g{`})1{*K`1vcJQUuIqY2wSh$8n&KlE^NJDH(2{_4y+y0&;G36 zdA4)@#0XgVwiq^_Jq0%3Qkm!Ryr+hD!`90;uy)5V*!6n|_C3eJo^LwL_hsJCgpDKo z!Fae%uEW|Jw;ey?gO6e7)l1kq^QW-S8=vv^@1Z_})ngf9{GMX6^6EXw<9S#&8JR!YXSraXw-&5@Rsz-!TW33Q(2rp4<2cMu>sz{eJ@MzPu=`EE zu$}iP?|f#Se+B#Z2AaaY|1jA4v%X$WK63qURg zcJp2RVeRz^uzKVStbMZ>cK`D$SUs>3cHQM-J^J?s|AdWCHG!S)TVdz%W7jY8;sC5X zdTxLG*^998ij1s({fpPI`KU&)@*(wo?niuV0qp!P0vrFm1MByG0PEjJL!Ov#Y6u$- zNCE4AG=tqwp9?mg&=OW2l(awdvbFuGx9n&;@sC!p_V^UI74_9^VdvilSiRB-ww~e& z?6~xWweMcS`nLmN-y^Z|j(ovj*mzTR*uVcc1a_W}hV^T0*B@L02i6~Kyl1Wb$wTyj zwFh^?+E2Y<{kH3{`+guO~lP688DF!R~KuC1aoOfwg6l$mp+CuyXtwtbMcw*57>qs}J|X)=9mw9e?2ptpAhj0l!cEPfqlM@x!vP`lBoC zeuL()e%L7W2lhi}SbJ^}Y}{p-?VJyD2)4fU8`yoL&td27H2bsuOQ+#})Mw0qjaOHJ z9hV8P{zM&E`Szvl)OihsjZcr3nUC|mp7$OO>tF1H^$+^P+D}o@f7%sa!`5T}=lwWu zb{=d!(PdcqzDOoseaq{S2S39B`-SVRi`fh#=GwP`m4~}w=SLsd`Ev|5zCHwY9zTv_UAeBz=88_u@@Zwys+OdGpw8_4Qn@)fz|glVC}&7 z?a%qxU0~}QK85v*zkwaU&tdJ#Ik5KbZ2My`{RjunSH_+(es>bqk7)$^eIq^M`G_}- zfSo_dVeQ+CuyLN^up3VD(X=$IK7nneSmg z8b7TGD^DxJ=0h98#*1pf`ajKK<9lsk^9kKx?dqfnA@|Ve27Q!P-YlVDniUVDmqJ!qzQqk;&U1@p|@`?STDm4`B00yJ6>7 z1?*$vV<%zjcfNqN2SQ=x!*K0q?4Kx4!rtdQnfin{u=%%nu)mWCcHJ+BUH8dgzef!1 zbMm3tVDm1OVe1zw!1|9>Vg27aaL~Uq{@uiO?56s(yFa!ytbg7CHV@tnR({NcwF5`P z&hN*t`g#j&{JbLmgXg!rfUOg43G3&@e9H5ZfB6>nJciV;^L(TISr^%1?aY_3@;w`D z9xcB9E&06MuyK%#u;&frhaLZ_uzuNxuyU&}?0)i(VDqlaVeRMUu=&I5uyUX;Y`(52 z{!(E6!p3uFz~=d;!pec;u-|bl>^{&?{VVnp9<)FC*Sz>=<}pv(A3f3@R_=$w?msvs zvwt@EGx`%xPK3YbIG2IVCw~F!5BG%K_p}i<9yw8F-lH%B>tvaXK0gARCs+*||GWYl zZ`lPaXF~1IdAY}6?U5wupH;yB4gCEY*mc@N zf0+HQ>tN-`+fziz19yl-nIb_ zzQ6IHtup@FaN4!w55d+~jf0&J$7RlQa{R5gKLhLkEQPH%zXB`ww!*&0Yul*@Jp}7N zCw9;p>Ib6~#LgSET2!1_a7VdIZE zqGb-~&Bd_$uXe%4&$htEN25fiopp8)Rv#6HwWI!njmxxvt-rVkd;aMcu_dqN3cN`}`Gy_az>j7FHirj+r?iC$qxl-M@g9N7-QY z!vfg%e-Ac)uod=u6@;z-xM6?pZ@cm-Ni6Qqx+?%Hce}&JlZ(ODN3DhR6ARnU`Y0Bg z@gqK#6EV>@~2Z{Xnl^b1$Q+QXG;*WP{&Yfp5Ct#9uYhw;PC{2Vslx)9c% z?`1oFu7FHj=gSC$$m4JzVWs1xD;17&|%@0(RiN`#I zt)FTDyRI{rMSD&h~<}BTtnI zd%r%gdD5h%xgYXkoYzx7SQIv|wHQ{uHHD4e?~~E*%V57(#*ypPVE7xbi>gE2ha%N;%<`4GX&#?QN+RONTn_=(Q0#=?M zgah;2cE;%lY+n8u_cPv+BtG98dnzNWzgHP{e@%25`8rT0Zhy?@r|$cx{W%|b3v69d zyaaqd>MBF0`17@F0k@twoLuiC$RaBVzQ6Dy55~(8di~3^O_fG-)eeLh><%M0pOy62Ff>w*b~IZwfnqf0j9~Z8oeOeh~KW1s;Ia z&kteaKhI$Gd-6m)A933Zm3d$0VIEk2>jPMMnh!P~-abOR`!eRh>aQZSyAEJ_R=EQ`h*j(RF97&Tw07;K*9kc@p)F}(dUtiN3!Hvdxh1J*zG@eJ6w zL|0h-dk{9BG6oL#=}8$c&RhBxHqKKGRxbC4tq17`yC1B({h5cOVC|lEu=dm>Si7-~ z?bIPIfc3|Vz~(3Zf}M9MVCCn(u=6!KtY32lR=&LA`Q2Z74>q3o$oJ#>z43bV-A&l@ zgOVqM(FgZr>b*>vadLeCQ54AD6(&`F^nLp$2SRU;^yF(O}o@5?J}Y zitlYb@CexYfF7{^QcKwQRc_m{&x(0{czpr#`KuhZ6KB4|{PcW>=&<`-cEQ2-Q~r*H zt=B%w`%;kU9X77DLdKrC1?z`Rhy9LsVD(=|*zbK6 z*8ZsqYgasn&AY^b{r}N?KJ4Vpu1E6MQDO7I{b1$jTl!nC(h*jUg~H~?YQoCF!1GXV zR1P+O?ELfjl`B19>tcq&##5c2`U4ZZ9yznfcILrCn7HZ8^|15p2m2F;I{_O< z7!4b5ehO;`^@WvZX_GVF)pabaP#S)Ufk^sr`|!F}xmobu}EQk3-*RPalQNbIe!&V0VVX>YEVQ z`=?I9{m|1d(0A4g7KWV%*I?Isd06}DN85>`_ks1Nn!&EGX|Voo6WDX@7s1Y}>|T#N z-s<)27rmsucmB%wb3enJf08CrO8TRJ7Qy;|abTZ&7Oel42-ZFsYk%fhYS{C6`oXTF zjIecH4eZbPJXvA$zyE2E@OxQc%KIbrhwRbcgOT-bG59X7xD2z$x=hira&o%R&*wwkbh=``5=gPmd5+XL(`*X>uZ z@0|#H&hzdk!Ro`F*nd99bXa@kH|$I6P-esCXOH7gIKRJ6#rI-f>u=~ET!r*K$MUEk;Quc$lRn3m@!&Nm3QUaN6Bo`?A!Qzm{D zFFoVWf2Z;9jMr6xohSO^%CC{I^_10M|6bq{SpVuc{=DZ=49LL!_<-&7_sMs)%*gNa z_gME3e+cXs%f@&SPm4qR!93L|*nM+t;6S}(c7BgI4Z9aer>FQXjT?DedNH@T1> z_^tO~=jTzFIC8ODu=O?_V9$q)Tp;XwH_XX+U@v|M`I^Wd z19hB}GV?yf6MmCfAN67N&Q4hQn;tfv{x7Wkb~O{fkKJ?u*6-L1E8lOx<}+r(KL1_V z`lgz&^Y;ntx=03_Z+Qk=m-##+&qtp9DXd*`2G)LgY=7b-3uWq|?!eZEmV=ECUV`1f z-YrAe`0av~UzgJ}-}%?)bNuGR&hMMB`G?o(Sg*+87qD^UC}~+Q*fA*zh5g=|l#DlZ z6SZOG#`Y9p@+w|o-XD9ct4zM3F|7QX2s^LV%J?x)$=^C3r@_WY*2Av)9t6y-3*i{1?_9 ziw9eubKL8Z8%bd2k?rPd(#XVT_ruDs+_3iE0oZj=1U7zj6gD1J7B)V67RC;LQziWR z8?gIfYrxL$r?B=-9s47fQZru8&j$8KFXe@m*DYk~bBe&m#oNK|4=H9ldafI+eO=Uc z?4h3akC4Cr%Kn^}oXmFo_7Sl8q9m|+#?kiY{wZPQ$q%spKqgpwc0O!SNi9HMNn^7b&S|B_dx{%Z&9col$+>#l*-2Zdng^AcG9pfGHFWejZG-2U2K z)-#!}%x{16cU#!~UA9}d(*V}r^m^Z`25i2pxJ>+~GOT_qC38MrSy(+<5%xJt+s=8z z)nVf=0Y?AVg3bSyqg_9`K5YE23asDV7&hPjA?$g#&0yciISwuIGV&Ap!UuUdOO z`J%4&XTG(8UBAAs`GM~65cW$?g&m*1GV5(7?E4IaoriMZ@0;09ymKh*-_!UNHh$aB zcH#$HVB@>JVe1(V*-rem7wq>u2|LgF!rFV0iv6#@@-i-L{I)youCFAp@y&Lyc2GK4 zf64JzpJjvHw^v?fedmLXPsX!9&r=gtj-N})`%ur*8+P5yfXx@JgY_qd!urqKVe5T+ z*p8ie$ad=^ZAbo}gO$gPVEws=5!m|aXR!6U&1tt@{Wa`!Sg+&#V;6_<7e;wK^B@_l z+?WWfcXLKy>(}zb>bvo@Yd;r(wU0-_?w2SfldtP+JM}!3Vg2fsGWqEmuy%a|*!kQX z_WRa@^-l)C+N1Bo)?>`Joq1LgR{pPmouBb-cOMF@|NlA(-xK?DA8h>QIgA}t@GxwC z<_@f!zYZ&(PQm7(qK42Pdw)J`J$-H1c+iiq@vvU7@w_##e$to-?b;R7VAoeh+O0R* z5BvOS;2y+3Z^Q0Kdq%yMe{V8R3DyVpZ%*3PpYOxQ?_0q7nLonj8+*Whk3;sSzIv|x zk?VCzGCzrv{0<>$w~q>2f1MqB!MKF? z*RHDst8Wv-#?zWdVB;lyVeR}x_TTeyhP2uV=vKr)2G+S+MgsK5QIz9_)Hb z=JlK>Fc%Jvhw|8ZF!OJYpYdYZcx_%-dHMsad@mT`e#-A6u<`Asw3{a@8s5GU*8VIe zNCdB)>S50ziF~e+|>DHKGpg&?bBQ^>+bbKICwwp z?EXxmh<d7djB=-J`TT!busT?&r_}ln?H$}hTp>utO+Ym;=`_wy0H5KQo^pE2C)87 zE?9Ze#CG1VBCOy1iS5)YHHY<^o5AJ-*4rO@uQ}{|OpbkSeQIOa`I`}ToqIpOPhQx3 zoA=ZHC=RPfTEN;LS+M7>c>p@q+%7X{6 zdAi*Z`djB83LBq{ivQyJi(Z=hp}+I#&tR`6huv>d1rGc@4A}MA5H^364_1G5h26he z9`?D1+Mn}~>cRniYJckO+rs+qGhp*xpTog=Irra>gY_2{*dIAL16DsQh1I_cZ6{9o z2kd!Ht8FKq^&hPLbO6?VzYIGM&%)ZnPhj^0rYu8${InSOZ+_=$u=(G_uyOS!u<{`* z?DyymTVGig_WYJ9u<||^Y}|S!ti2uycH9ozpY?Nu-?PprN?Gnt9Bv0}JSZjXd39T0 z&)dlgYY+bgt8eqc)?aRgjeC}ajYsZ+t+%NT>(~EdfBeHwVEv;Duyw`VWaQmluO}Zh z5H>&c1~%?93bqb1CeQ1B=y9<5-HfntfXT4yZ6s`dc@eB%asoDZ4C#=f{1o=e!yH??4{G{^UJJ z!s_V+<#|5V*&Nt-Q+incb0usXDHp6=@)*|s_z*T87(;)b{Crc`d_+9h_*&cW{t02@ zo1JCq730Is+X1k4#69ni{96edSH1%~9_wNK>iaVFq<_QOQ~tj3$^)=@&{wwO7aR+3 z_xG%yz5=@rs=(Gqyp@svmtgE7xopEGrqxh+yT2Ey%TKw^D*pr8y_+Lo2?@K zv72_no3sVdMQ%VC{f|uO4)inbH) z`2r63pRjd2gJ9#~jqHz|FcQ|D?*N-$9tV3~#YotA)FfDcc)RWRDc{TF{r1Aa_f=jT zfg^EV##GwP?_YqmzvsfrpLei+-&$CIJtOfu?aHn8XP-oN*!afZu={HA!`5>jvK@U= z2sTc19M*oeUHj`4?0m>+f9eU(!p2jQ!P-lg>`(n%eAwr?YJbKhHf+4|28`aA9n*IF zfcvoaM=aPnqGzyq*@Uoq{0;2^w{h2kN0^#ybP7+*>4LZ_3uSZikJx%Ifo1u<_fZuzw%O`cUPNzo))R0|$R! z`>a0fc}3}IS3kI)(0#}`VCPp0SpAyc>!}NBZaejTA-1!An!x6V%fZ3>IbSQo+6{SV zcYS{Xn=i}+>rb|X)i3G19{Jc7*4|4Cn}6yE8}GK=I-4#Lt~XxW9kyO4H|@rMKDV9q zQxvuyvM;ROs{k7>=?5E+stxO}^oNy4U(4j(W%HQJVB>*b!NK}i-){u$e2=HY&@_ati8VmHl9!()=u9HD;I0R+6xD~p7q}Z z_Iq8iKXqS2VB<1xVdKr~VCyB*R_6ZbwSQpsPYyUBKVkPZ7lifCa#&wW{H!vpzHbGa zfB6J8AH&*L0oIOyjR)`ce#n=uu=O(Ayq@Qo z0~>GK3~Mj_1)J|Z23v1@5Y}G*57zHJ4jVVV3~OJWhyA|yVC$wX+0J@;4SU@M*z?S? zRpIxs2d=`#XG+1^vv*+Qoz>w$JrV5tG=R-RM4|rF^N(A>?lVXTE1x^T`mt$Y>u-9( z>g~L+{%Sv1|GO-#e>E7^uC4`ZhYyF%_q4Q~^XW#y#;<$8+G(dDj zB(Qof4)wd%ePn>`x@3BuLf)1?Smc1*|6tb9I!v*`x9*4qV47v z*1+1Ef5XJYfc`f7%t+KA#U8f2a=!{vNlCpIp-Zm6#5T#y(i-LJP#t{hyR;j%Iiw7eoKGajTf}Ho$)>4^HJ}%UB(}L z0qZBmuEF=vzawAjdsm0em!yRCds@oG&(fPuWq;@Qu<|q=Y(8r#={6)TEt# zQaNDjfwRD#TazEQz9s~AJWIgVQGE#OpKO5Dzwv7^f2pte8}_?pgVi7Z!p3__!rCp@ zVC~sj_GdrRbFW7~HJ9);&hyf1G%^ErKC-iLf>W7z!~^_oSp-{O z@BRbN$6f`S|4LGX-)H@AgN?gqg{@~i3R_=Q5ccnTU52%nO2UEjFkt*z-MB!>;p-uzyc?mHpB0ckNGnU<0h36a6E8552k%R^MfW z-OqN)>#-m5!OD4`$9P96So@_s&+q-a!k)+dJ*++X8*ClN8{e1x-A`fd|D;8^o_J)+ z`f&LE3H3-mSUI1O_JE%!Babt~+MgdsXm|WO!v1~DG_*V42f*rc?{B@&XxRA?4>n%> zos7NW^Z55S=fa8budl$4-zr%9=>V+%wgXmPt%r>-AM$#}X$Gu6b{;nV&;?d6KZ1>) zRe_buQ9kDW#P1$4UfR)VVeRu1u>L}3I8d+d{O9kHw1BPm83AkWwt=nZr~z9a+YUD0 zk}5*G^{sD^AIg=Mw3`>c1pB<7z{cIyC||Hg>eA@8eP{2KP}Bj14i-mPK%D#yq30}8;t&v96PCpxS= zJ7Rz0KR3}I!SOX-c_acWAJ?m2_>T5B|2hsfZgT=QUfK$_4)B8Q$e(!jCm!PSX~#cR z-|_dIpTpMQsXx7MB;H4VayP7eOX&L{U(~0LcLvz=_k7b*_s_8Pr4?Y~hO1!f z^Zj1leR4jcb{>-EHEk~1HybBxmvdB(iU?))JCo&+|or2ICnl1sK8683zbF0gUTd9eGB zhQaz{i@cusGyyh#vIKU&|4i6C#bQ`_y&Mk8Z{wEhVeRedv}=DIfXzz}hL!vGVeNs> zVV~!v?dbRRu<@{{jTnFYuiCKkB0lW<6@sm!NDc@5f0_A^O2%(a5P>~^B$V+n-)MjB z(d}?1&Yy@QQ!n%lY~0e{*FPEr>n}frwHLQIf3d4)!-0AT*nH+l8TnWdw*H_$Y&_@| z-`99_Kd)!~?{NNcKF9!AIkFVi-*J5XezRch?oqJ*&v@8+sBdf!x3{z(rpf59_Oxq< z{|GBDwCAlSSScfqYs3D1;tjC-c*@K8Lx;ormx0aKor9HcrDe_!y9awtjqT3Y*RcLz zMOb?-F5_jK@dMcXgXtV^&j0urHhz&0){kfdTMtkcR(|({tv9G?J9)?%u>N-^So`Gx zY`$!U%>6%MJl*fP+v|R%{diomX~=g1@N#YV99+%cIrQ(xF~eSU-; z*PmePw;vFfPazEr|aajHR4eWY)4|YH999a306ZZSB zkeNR@ydFKV#s1hIxnS$V55dMQ^TDqF^RV+H1lFH<09$Wb1$Mtyq)(U+*!2x-u%sV9%4>1gpO~+KzwVd3NUO-i5c{cD&J3&6@Fk#3fzd=0DfV*gsuh|DNMH zSo`c7SiknE?bNsa3Y%|;-JJKKzGok-y_8C(j`j|$y__93zx*EaEfID{HCX*r3=XVg z`2YP0+c~iL zhKIbb@@Flq96SW;5ATOvPXYg&@%<+vlfSqCtN%_pe%N;}Wc15*ne`H*CGv;(btHb@ z^HS5m*2ARn`G|w$m8l0Q2)n*Zz{Zu{ht(IAVb_oLgZastwlhy$PsSM=z^<3Ouz&CD zb6CGL3*(`l90fZ*6=3D|WH^uygFUZhKCHf*09#M78Fu|FhV^H6!O9=?t@3&=9Mm7q z^L?=L;Jp3Om$n96jAt?wn17npBHVB-(Tu@Cj%L z8%Oc?{d*fZWbCd7uzphs?Pu)zmvA7S39F}~wBq^6zgM+Ceqb!veH_(b=R;!HdVJf> zYh;A=Z_2^OvqNC*_2RH{sj}_i`;+wp>e)^lGoAg3_ceq4UKf1d+x4aogVj$*VdK5u z!hYYqu=%-JwqtMYw4J|CvJ^I6{x_`rUke+T*avIB?1DW%=NPR1J^_1v)@9iBaaG2D ze+X;eJ%F`;vFXD#qJourVVb^^L*!`uwVCChfuy){=u=R1H zVD|?Pgxyc_6KuR}knPI55-|HkM%d2yd%mjn);L%>e-!q7;YqOZlLxT!_Sz`|2f(I#4VP<&bwK#@0GK3*!*8?fAnk@ zSbt+V?0i1~`~NrCPJA|V8JKyv6V^U#3VS}mA=vov0NDJ^aacd(TiE>+XJO;T3t;`Z zOK{-)0QkbAjD?@V##3XJ<@vB@VzuS@(A()??Yj6fag#!@cZx!O2Ni8yTH!>TCn+};qrp4Gg`sM1t;4dzq$vkov|C%zFrMGzb?Sqhx=gjiteZO zJx;>Pop=@bzUXh~i~1>-%=dl;Ye$!pi62LIzL6ho2pg|T09)VI1-4%@Sou2;HvXRy zc0JGadi<3%UeEsAt8hTx!;V|pN<1I(G9zsK;RqbO-uH>cem1{XYTDK3A+Yh03~+G2 zo%Zc}UQfNS`=7N>dcfA--hr(b*yi=~`$NFSn^FY{ov$Qh% zHry}m^B01Bu7TCKo_f??aBx4W`e{C_y^`yF#)JCtP+0x_C#*l4=~KoReH`lp-jDjY zBCzYXgiL;^66}6M_YXU-8{1AkY$0qt#}}~c@ef$LVI*vQ^<`MQZ82=UQPB^>=EG~) z_b3VLFUM#fHowcj+9Qc!;|H?in+Db%D*@{_W`)&91!3!I^26H4Sz-N$(y;mJWU%wM zI;>ysdEmxX8p6tfE3k3N&tT)5|G@f3!(h+H++#cO$H}nsbqB1xSP1LSZG+AC{}!QL z`EVe@_3FP%u>SJD^mm-z!RoI|u;ZPs1NX-+eFB@`DGqDT#^m|k-%|tD&v_5lpXdr3 zUuy;HXH4>X=6^5P^Ks|E>hEc`^ZTn{RP`u<^&Nb;IX}?dX^6aA1DG+IIzI*3DMfeKi$e>!%OG%H>+H^YmZXd{+mUb=mPe zY~FpO*JDrKgdM+`uz8>R_Gf;rgtccL!^Q;(_sM4r}+NtQYotp|JV) zGO*7TxSr?#;3M7-`LvyO?fYx6@u(kR>w#+2=l<+JpY8S7rxQPBeG~8b4A$NY{Cy1a z4{c%X+Nlkg56F|Iu=bjN-$VT+TOWB6)?WPtHs713A)e<%z(-XFyXe6xtF&H*(@fYm+nFXsqkJ^s@T?#7~E_glXX{?6LFW-dq|Nesg zeh*;%i32kGJ)gna<8NW(Pl*~aUdXABI`jMJ!;G+c^iSA$Qf}C~kb|)GYK3LK;|ZB~ zL^as^Uxk&=onXiJ6|6iM4ZF`fUKf5J|8@@S{*Dx|_QD34{A?!JdXZDG@zLC{@ytYx z8Bg-ng<Ae=d7A+CyyG3P``%-~#!Fwp+HKBP^Am|b;rX%0 zpE&;PFUbxo#~;Dw??PbXdku-#L-LJ3!*8dy~TZgs}Ha;>0b{?LDjZ+PWjYnLCwYSH@e$U6S`fa-HoM#`UE91pJ zfaS1p;S{j>&)u-~)p=n3@q4iKE)`(wL~8MV%Fiya_V-CxzbYSWU1gdUJU{x(`)jY} zgtfz;`h3WfLa=)60jyr}@68#nzY8mGYr@(Cx8PRnCuwdw^IvwqS0C8(u=!e!=W~9qgVk3ZWcs*aug<~B zm5RO(e%XT7^v8eB0Gkhc37el!0=tg0wc&fYEfc3`? z!N%tHO!O;~x*1=b#Y1Urw1!2bIV zw!UnF*Q0k5bmMx?FPQ=xe@hQ*Crq`S`)7l#SNa~de(pWk{N=Z>-?=#K?~R7lixp)2 z>z=UoU<25ATSM5mW_#H9cPiL8-4NT+hi`fRA<33^_zpInxeeAII|3WOdIc*tF4)ev z)#Q5PFaN==yJDSqUg8QnVdd9QSbt`N?f4nHV9&e!8TS9bgw6BLhV|nTcBUQwbPB9J znG@EZ9t)fAc^_6E_Jdt-+hFaxGO+o#W3c^-!tM`=+=c$=@f@)BL|WK9S0-3_RSkCj zq=wxO)&$nBOJzHD%>XzcpJDxrMX>etX=UV%f4|%F60^hFZ_z)a9sAJlqkonJwjQ;x z{rP?wVC&sV!mgv7u;;;5g8k0^{qW%T(w`~{8^7*NyY_uaSp7X5HopC#*HaHM3--Hr zhOH;~1NM9V0J}fqBy8Q#X4w4IP5UEnF2Tk_p2*Z^_U;N3pNrR>`N4jw&tdK9T(Eii zZm{-wN!!UQbb!tOeHh;U5$rnXCgWe1ggw9O_lS%hJLmQIm*r?z4%~;0cT|O4e{bwh z9zD3^ zjhE+!-H&!E0vm5n*p25WpPH3+^+kHvxJC}x{8bLvct~E@`Ii&cA1h#g;^5g~-mzx){m8gl^=KfJ?xe`u{3Tq$bgpFr@<@E(;>~(zg zr+%=1`1etsu7b5sPWt_b&yI(cpBrH9$`Jcg|1rsS^l>TJcw%4J_-+;0`sPM5{(Cjp zbx<2Nzb(6erZ}vh;QjPxUowBSzl(T1e#jlz?~)kS-rEW*|5C{0zc$0c_tn2&2y17p zrd@x$Cu}_GuJ>cTG>83uAwBrM=*RbA?d`6x>#8j5d>;oZS4zX4SFr%L{@3;H`rZ$l z-wc7(Q#pEujaM<)cv^p0dnrF`e1ATyU7j0e-nKam8y`&%8xMRfQ%@Hk*5Aq1i}^_Y z@df(D`8pf6PT)`V4dcHR)*ks4w!Y^)Y`xt=SUvyF{_M}13A_I&>*qW_e)nWpeOMLt zxyQr$NzGuNdn~Mf&;$0or>|k<=SZ)|zmv^Z&W_MueZ3Y|{s!*H-zWK3X1|2L=lDH= z{kxGqkNP`lZ|;X5vjo;YDhwN^SmXUU54i?xeeyQg@6ZOe9!34_xb%jdFPC8BV?$x% zSr5D(JLYTH{oik4^Qz-v_pired|h7?VCPpr{!@=U*8bEl6rkPuxZ$wtt}^U6^@FuH zn!w7N&tU64y21K8tzh-)Kv;kLeXqxVm;`%XO$izQLHX|fn@q6!X9;ZF|iSoqTWo=gY8m^yjd0;x+8|%@6zcYXj>I ze=52Cv0s|8{>*p9gdN}BuyO7O%tz~zm$+W3r?~_B{HyGbzP|yxZvT$J&d0O%r#}5Q z?bbg&fR!r`W$HbmB5%}h&Uf`qI@mhe*!;eJtnyX6Gdpa4t)cyiib#G+j;JRu=`KX!1{~7+KzvH5w;$zP#>NLf9^i4zJ4q7?;}|K z+4KwMBl!;5_uCH}pR~VqpH2HRKd~3D+aJBT1~$HM1$G`M=@+)%{XPBVhp_cIXT2W# zt@)QcAO7h-wj+lR!^(;6uUt#^gsH4K<>jqeVv=FSl_P70c-!`!Er$ey$kn6DZQ`e%SAJ!SSJfIaDTZ9Mkb2-t%=|-WNa1 z@pk{s@38gmo&7$@$IGzs+H-tA^C#*1aXoc&<6!kwMVWez*0A}rI^KHUa8AIib*$JhreZ;Qf_65X8k z53F6{eE098o|LKo&JUZvx&<2#D(LmdzlX5#^$^(idj*?stpaOj#rl$V>ZQlS%E6+r zah1KW`l~f;{5b*7Yd!5u*!7tKc0T+kqI|6X*m{$b;J7q-4Bhm8CXIR)guVU@@=8B*6l^@XD{Nll zR~S1wB)#p_->io{@3su`&bVLXfs7yes1x$g^Zm-f%CGdW`{kO!){~q;>BNNYm3wvI7E7<3+#dzvZ4TX&ZM`yg9PupSb zsRFR^o?~8*-`ovWu04ab4@TKe{a3WXTu+>45^OzE0@(P(Y#IHQ7S_L71G^vNec1ZZ zwXo~4F{~V34O>s%9M&FQ0aG{qrai3RJJ)vBV_(=fpTB4RWhiWX+4*DqYBFp*yB@4v zwis3qRDiXs*1^upvat5zX4vzM%D~=l7i|4_pq)J55m&+hO0{^eoifbZjaxXt%B9=c4%ejNiFk8=F9&tk*cH$`Fn zv8K#N{fvu#Z~Uvs&QIz|kHh+-7qMU5&nKJDT?hNUPQ%90zlXJ7F3aR+hrrek-G&4D z9oBDn0UOt?47*-p`+Tg=yk5`uO9LD4ObBbY7WVmAPodaT#9w;C#~WzXn@B z+m3ek=Uufw&(j??zY=KY{GcyvNB>=?UH$tNZ2a*yZ2V~qtUvJ>cAYrC0`WJW7khI) zY#uWe?0Bt$jr-(*^?gbk>-Uv~w@_al3cFvmJnVDD<9n(<%h-+`n*nxz zObD#~p9c>3hcf!BPC_i?rZO2??f8P`%>RD8TK56oUrR;IP5v6rEJIl`~X%? z)Pvm*Q5klgH-XjXWnlB?U10sj+^})2VfHt_0GmIW3oADlVUPRob=bex{Th4S^Vko- z0sB|`9Xsep*uT@U4*NgTKi%IAg3Zt5fZZ>&1~$L_Q2znB?e*FZ`(fq(Ihk>r3%h

=}OjL)i7v#p{U^#u(1~VOO?;wO>MD>jm1{pSW);SozTbcK_QD z*z@qc-oIaW$ad=Xhr*tJQFH|3i$5|E*59fNo9~(hTldh%cKqXou=OzuVC~iUaNzIP zz}iPM?N7dXhyBCv=XW^*`@BQEp8ClPu=(%qu@Y<%JctbJ1zR^LP)$@3uJ z62r!yGs4R2H~5d9rF%R6?p9shYh zti3lF*3LNyn}?VHTL`vHx(Noy`*-_Z*V*32}4;SHpewVR7 zZp+A@oUrowfy{g=3>!~;3}VCUIO*!b%?SbHlI^V##zPQdzG72x3enBQ**dw$Ci+RbnEhRwUl z#?Pm~#)<#&dd^q)1$KYszp(cBVOV*38aAGE&Fk_193SITuVDS<8?bS(4Epc*v#((N zw@xy0E%zwyM;+iG*zaEvRz6OJov$t6fc+rj-}HpFW9Gw-_dwYA(>z%DIRfqnlGUQhgR1+0Gm0@nXp537IL!`hWw;XphVR<7=cjSuYL{e18H zwxb`$z}n|8VfWK?f|Yx3VBfd2?VP9a7WR9kfR%Tz;efs8_r?DXwLN@&2Jde@mGi|s z#3kC*J13Z*`b`^Q>$JB!U-`Xtuy*G{*!%qkn-@(Q=qmSl(oEJG+X5J-+Jx6Ue ztiMnYwqAKLtUNCV`@5@P?^_pkJa@vzJKDg`v$MAI`+Z>N!*kgD_GsAekkavET&KaV zkGin=_?58#|1(%Wc|Gj+odoM2?2w7`uY_H{`{00I02{|X<@MC>rExy+To+~HRE=Q2 z>r+^NyAy0Z%NsagAIjK+WyiqAGnilIAG^cu*Inj(qrPGkY<&4BY(2zeIJjQ5SLfPJ zJwrC;yYYpku<^ROt_SAXO8aB)EOb5i_o86m^FC~Tb_eWtoQMAQ{Jo>F^SvYb-g1$Ld@^m^j$D`D#ipTO2%ZGw$oyz+YN;v=y8N8@}Q_P*y~{gbq? z`u#fWcYF_4p2?mkSKfB=A;;`boTMGB{N4^L&-=p4qs{i`yvE^PPdrn$9%UlzdH-9y zp7H(xc0TNc%}36MwI2`KpLps|u=40EtlnP@o3FeF8z0*U8^3%DYmaY*mG_CTmyGA` zfCKSv*f`#9*!f-`ww`gX*R#HQ!}=ln?9aF@llfk|VEysg*n{egO|bdI8Q7c3)tz2X zeEu=)`8Wr>p6{pstRHX`R{qq~e_&is!`dx9VfEl8SpQ%WY(2ysI1o>QeXhu3Y3J|L zWWYZ$o|YH({)P2l82{q1_IVxH^P6kH0sBaQ27A7v{rP^wVeOppa6muG)YmV9t#_FL zt9LiS`swpv{n3A6?UN<2{#Md)+>dD`yCCd7r2lwd&!LGNauzu32@c#8|=e){2u<@3hGV?X=_^|OzYJa|G zC0PGG6>NNKIBdKzE9`t*2Ya4#QCNHLCT!h88QZbj<4@pz#IHVpwWsq(Wc;F0IG~?l^ODiO zMShT%aebNZP7J%>u@UTk)wHnj$HK7RExYa1$DVNgvR^&~*50_udbiG|BdnY~0qZ|? zma*IB%fzL-!RpI#GV{8J*Avg~1RJmJ2fJT+5%R?S$bZ7_S1G0Z!M=O|``ykUzkIIL z6T`+wd0&vfhsNXy#3`Sm53HAI0~=TB1shNO-u}b^GQ;Yb^|1R34ysS6$GZ)CzV8b3 zm2!XNcQF3d64?0mI9Pcx!*=|vsj&X-IN0a?5qAC!hF$Ng;GqBKcyF6WnitO3vO_t*|Q4~F?ZX7&-eKR{vI!kv~Zo59N0Q*!`E~Ve>m(W#YVzVbA~Q3#-4rg!Q8a!P;vRVe2S{ z$@uL*+RnTi4jZ312sZ#aMVDj0uVeQ8}u>O5#Sh?H-_WcHXJ?B?! zg_Y-Dz|Nm2Q+Yq~W}m~_lSN?bxx2vHpS5BAXW2YJ8(4kV4K_a33wC~cz5e-EuyK)r zu<@>OuyrWk+D@EyE*y-HIsXpB*8ewWeOvDlYZ}*MzkLcTf8)dYNfTi0u_Ca)KTCN) zoV2p-+S}p%>%;2h!?1E}4y>FwZ#(gg6>z}bg*|U@8|*xatNbCKwjZ`0FeB`E9EX+5 zg<$jHXJO-X#>1V5@u!E)&&tS4eTv1>#5UfN{?Vb7<#2OIyK59`05 zhm|9%VddW$8Ts@Vti62-Hb0n|e2nw-IILbj2pfmHVt?vE%abp1|8FXOUwbqX`8NIG zEU0QP=eVdE{OVB>kyVV|!9JcRwX^I-E^J?&5Z z#bVevz+hOv zgSL~;>js-2J0@dSRkEFU#T{6^kQi2OM4iF-Sy#vSSbtv@*1oty{!9P0#plfp+ZD z?_~VuWwdLLF0ns(vn8KW6ZH z^85ASpuF-uJHqA(hVeaIzhA({-z&n(kD0LYISZ`+ywrBSTN2pom&4AFt;kF5wLf6x z&}vxuxE}VqPSSqoyRUuwsUYG{sTK7F2k<>rLgnq7Hk~Oe3<)zZ^Q0a9xM|Nya6khyTaOQm*8N2 z%zkHJ{rS4IoB#Y7R-aXu`S)u$m_M>Ux4rG;TZ_?dydfj3JWFGL_FF`QwfhfaFL?e^ z23UJ>2&{il4AzcpjeX>}_pm>Cia)UL)R$vm?YD{A=lIE!Ve_Z0VEvy(uy%TC*z?PF zc|HEeW9>!s))iPec1Zh@xKGsC+>iLjG}w4lbyz)95!Syv1qbb0&l}7&hx=Qt-n?USjn&szajUk-y^-&JA#_l|In3$qHSLG6_?O?>9-eOrd>>dlsDbVH zwbNmLr)YS*P!7y*;**K`7vDDtcK)xF@i)Jb$(PN7wfDxruKO`?@b}d}U0~}i`_b-q zjscsmtPblR-=+S~^%Vj;|5n4+A4Y|}|6<#TA3R~ajW3OcwMYJj)i2dy;}L!@?eh$< z=OGV+&9h&oKGE}7KD9r3e+wLmzn9S%Ha{~3)?ce_JNDNgSo!iHY`m!{Z2q7cY`!a< z?dbQ?u=O{g)Mt9WWpS@ZzMX`vHz^8R?>!aP4lM@z_r^+F&xyRL0$ZP(#r}jb{XO@W zCW0S~+*7o!%>3MBeKqGD_J*xzXbT%JS_HekYFlqj9QjunIa0=U^wICI_20Q*<@y>} zyCwmwo?8bSPk2ndx9c~+%$NPJb%ypgZnYfNuU`rOf4x2V{Nb?vQ%maY&BuKL>z}$G zKs&c2?D@sZsrPq3Zd_QoHU+Mm|6Szhu=eu+I1~FjBEi-8?PewhGL9hWBd zr`{k3Y~HRWY@FsjuV?+1g8h4Dg<B>y?)`mH&MIo3Q7L9)*MZ(Tr=TZ_VFd zu${Wkm9`UKz3A^#N2ES6|M3hqp4$cXdDHWI%GVd@f9HE9SpQ=>tlYQVdaVht{q_&fn{RLK@y=l*UAdkHWwx0evY`$;4?Tpth*m~Ppu=;4e{n01WVeQNDu;*}0 zgN?UWgpChw=rN9?LY^j97oXvgy-U(dq&8)IPg#1WbOzL(qbyu9aM zu=Yq>*nILj*!Wp}*!+ha*w1A<`O*!r_HJ@m{l5`b566U!uX{h&)zdb-U${N&dbkW* zm%5L3<;gzSIMNB&=UD?AH@Etq(m5yB|C`?0M09VDmd(Z@hS({jsBB!v6jkSUvTI=QZxu8rI&v>igjj16PiGM9{FH6)4I5vxzx8pIVC#Vo+m5}M z2R1)*8dl$@gRS$q2wN|a5ca+A!1^Umn{$88r-{M)D(4=<)~UvUU0yz4ez2|@Ud*t^f%fb6$4=;v|lU(3>{r8nJf^$Et-@hI#wARwNsqW<}dm*V?JUh9fq~r+}|HukIJ9nuzv7)+RcwlZW=b8=e!^F zM?GNW))`p)sx_=0I|=I#RD<>F4#KX3Jh1cK_2)W@2AiLq3j23v&Nborsc-EFtB2OY z)I3#$(&_`RspjRITG+YQ#gc-xrgJsf}R?vk+bd@ij0a;hHBN1R>xV7#tBY<}@AtUXl=R!+rbeyBI{z}D*|bv_x# zgk5K;Ve@TU>oOmadl_NBQ+HT@B!lhf**38Db2>P19vST4k4X+Y9(U`&*zYg-ef9As z*z*%^!0O+AaPWD}m)5l%`*$Dh`iuEt<;4zIc@a{Z{?rdHv>o}GMyCF34%`Dn{B149 zmw7wgcIsJ%z}DmThV_F3=LNA&o5AXvD>doQxHj^7?E8bT`61bS+-lp|U)uzBKFox* zCtAq(r=xA>JlA%x^QaGOyr(m)Tx$M2X(a65^T_S>d_UKx z@rJWC!p3WX*Q3v8z?s&+?(hq2eN+qByxecFaxM>SK4~TFx{eaDKXUQ$hx|VA0^@Ji zU7dif519{7OcibFU$Fk=H1Egw{sOCa$HUs^b7A+#`@F_|r`wJ{O|4}qOe zo+o8JXde3$f2#w#A0#cT-&xN7;qt|JX#rR{7=w20%5<>jZ@puExWD%G2mf2o<_!wK z+Wi+?U*tEJywCGsum9_MqW)nVY<%vp?ZnM0!$JAt^Cp7buegGC?a`3x^e5h15?23a zg!O+5z@CQ{4R-xxftAm5sM<@Ilm4Lz90Y2To3CXROJ1%pSHk`M}64&wjEZ#e-4|EJ|JV)&4jft&e@;3kUg;L zCKPtQABX+DH?aCP6!v*zGJeXt_>8~)LOj@bPDYtLKtkC3ZUNYO=0uJ+er5TH?X35@ zu>OMA&m+#(#CH7g*j|r6+YZ)0edY5pZUtfI^N+CmS+l?a`x~}yB^hj9(foz>-#eaH zxiJb>-k*Y9KmB3j-~(jpS3APN@l>8ShJCL`jz9I*_2A(5G9F(}#vd-?_u#t}gUz>Q zf?XelVe=)4VeP3xu=${eRhduBBUyjrBJB4n3LD=z0tf6T*zw#BdmdF8uc!WKEv$a2 z1Y0jT%J8_WY3xu;+!l zzKuH_vz`4q(V1WF|Jw@(=da&m6YTm(O1t^yIk0t#8DRB!Vc7X$zS8)+=RF!fEDEc4 z4_6AC4>fHkzq|}qj<$w<&*iZ1^95|){WxsiV<@a1j?4Y1*b%`w23` z&J*=f;Ji!6GhBXa9~XnIk3LMh_KfV`*VzU8K80c9ftz9d%RI2_W0g$&AU*7RErC5B zApxwtFbh^6z4m$W-(0Wm$GZjx>N{ZP%^}$L8w9(bZaQrJS2wT6-YNzg|5UyPzpwe# zMqZD<5SMo2+qP@pzwmotk2aNw1D}VDqql<9N1|5 zuU!LHJ~oGq|5SvPJ6#=bjemXv8{dcw`@Ltv#xd@d z```Mse(4PCI&ghjmv9)ij^j7j^G|lc#ufkYdhDewaBw|ZFZ(BKp8SCA#39$i&WrzG zzsE+~$&ZIdXg41j8T-pP(kj}GKg5+;cR#|e$CR-8VhF4}DFkcRHiOM)Rfdi8)Pl8V z&9@srs|xEsm_OGJD(m&+2h4x_+##_09U8;hqs3tT_*Ss_xDqn!wJYrT^p#-!xlyoj z2Jh#3_!TzJQ4|j5kFAG!54N6rGwu3^kIV8r<_BTre@@u_5r4t@{kO{SeAu_!?N5Eg zAvru=q8wfUd%n$nuV?-bm8l;(1uO4+!G5<3u=(D0UXMR<)%!8u>%iJ)=Bu^WOTgAc z-S&F^9z!lzzvLEd{2?Rk{IS3KFC}b#;+of^KN7*lvoFEs7vjOe_>pmiF`g_#NKZDJ`u7!=?-mx8fLVrVjcL-K58ea(LfBWMn&xZAS5hyF2wEr9m2` zK}wPC?(XjHkW>Uwx&;L3ltw~2MMAo}<6G+-_+I$)T#K36F>%-2vyXoJx3K3sQozQO z8^Y?NFJSdmS2$pgz<#f146Hmcf5G!Cvu#iQSr+T#e{TMy+;v@m`QzU&DObMKg_RT5 zcfX|r{PFtAn?BAr^0U0Z=Mjg)o-auWD>oLw#^-$>_p|oGAK%w~{%f%L_^;^??W&lp zSM#atAN`dyFmZw(W%Xk**Y}5bvgZi~SWbS0*Edcy%l2H?_^x*82KZwS6F!&0am{EhTRXk z;rQZzkA~e>I|l3j{0!@N9D$9`%z<5B$6@oc%qP+Rv%d0lJFK5{5Z2z<4{I0ig!PY3 zSdO3h2kiNi3$XUyP}q6&5OzI%>+|t@zKyViEMEm{$1Q->@5#{*u2=Ivv`16J+LO=W zkNU!Okdgf{<<&0QtM|&o&Z}x(k9^E3uStkkMejpH&}r{fCCN zuPejuH@xP4=5rUfobLfWftB|;VD0Dou=6_}tUfS5#r(uDuzq%6zmE9d1J+$3zMd3T-XDWM zu6Om*AvoYq+yCUx?0~hG3&6$)*TCj)IX<2joeR4jn!&Ea>9G6tePMqebpmW&$~xHm zjb5<&;E46vFY5}s-}q2wf31`C(T6daFC94V(AoOfE6HHT!}{87+Bfb?^|3wnNd?$= z?g&^tR3Fxkn+zL=*bMvr$=Toa{QDd@AV1yjC11hkcRyeNj9)UPDy)3zE8}ncPJbFd zsV*bWX27mT^CLW;I0e@3GT*^?zc2B8 z<}0^>ecxg-@v0@T^6?J(+Wn8y)@MIrBCK8a8rI(a3if+hac5G__fnSUWBibdU&HFv zZZOO5%jU50kn*tOwG>u>djfmzW*+Q*>RVWUd7I_zw@2Xm?$4co)o0#cy?@L4AD++r_ouM?myIcR zKOpUFo)7uh9`?LOPFVl5BkX=_L0I|S0rvc1xlii5-&z6IkFQ~So5EwqYrn%%DI`a`~9H*zw+`JTZH*m-nJW}kaD zY~ILku>Ra~SUp(*Hec)_Y=35jjc>h%^-D9r=39lGLw)?Al(6}d$zaDL73{cWh4n|Q zuf8n?>yP>ST-*Lx-J}_8e4>s_ocm|k_)2ToJkwLwXWy}z?K#JJ3pW1M+;Zf^b6C5$ zmF<~d;pbA0d>sHApG+?kf1M2b{zYKr(gy3ZZ`%-dU-uxaf8GvO9vp?0mtA4wyC-1f zNq_ib|7nMhg3S*+WI6k-lVRn?X4w79*|7T(ZQ)qtf$xRY1Ce3vlV_H*Z#y_gaJ_sp zkM}1YP+dk3*Mt4OXhGPxUUL||_vUqWo}cyB2UfqGhShsRVf%d^?6{1FwfB3&erIkf zY(8~lSUs``HeOxpOYTR$%N|&NI)RKFJqmmO!0-Dq4^F`D_nglby#7hpeX-53``xmB zoWE~pT;iDJ#FJ`TpM9u(u)*$PjmP~C8&3)goA165Hm~eT zRv7&-N#?nh!~R}JcUb?h2kd@PP0R7e9%l)zuLQ9EpT8$>eDI#*f&G;-b5LK8g&n`Y zGclf=XK4XzM-7D4Pvv3rnM=aPuS#0Z{$XlZeNr6O-g=%fXx|otU60q`kLCK+CuQ<1 zzoy*&*a;h-C=GkQeI;!EO*vS9WhQL=tTOERl98}+<+_&BU)^Ez^_#z}VfFAh*!{G|wa!H$1x+cQ4SXU|u3ft9a|VD)2v*#24u zJ6^xS?k8@9T`y~4@3#-uk2z?2+FkfiehhZK+<={TXKjyudjXsGavoMcMqvEhxB18V z$it|x_TVYFNa%7+V#4Z^zhTEeE^NGE2kid6~p8d_cu=?(r z?TNdG<$0Z-YvEYPqZqL3v#<5pPmB$luUS-PU&8)1{`f2%{e!-V0=vIES|*+tT}Hm9 zg^eethLz{{zF@x4PbFaetNpOos|c&l*1-CAwPDZORfPTh)QOgp_jW%m*JJ-?By9fC zSXle6E3Dm+1`gz%!TJ~L(r`cGQdMQ1cdpF$ePruRggu{C4OY(#hwbMYu=ZLnSbbO* zc0HH2KI^g@?0pNs=G*jzjicp;{k_Z~wkIB)(fZgAKfvbEriJy_=EBCeGuWQ*qpYw# z`wux_^Rm~$=C|#J&999$A11%~A*{U}0_)#r<9)RU(!j=1%EIoGWQ3hp)nWIevccwO z)q|a1xnR#1HHM9Ye`S00MJxEhkLPogf;~Uh5qAHeCT#vkSJ-`u7N4-+t>^+9zim&s z`gx+|=qa!7`Q;_B_HGr}c;Yr#zb3oowa`f6sDg8m#@?8TS73V8^oqY&>!W{PF#jQyV`i*ACeVYaflHzWa8MVEv&5UXT5S z{0q50_Rk{Ne7kb+$8z;*b(qI(Hrx8_|1^O;uk@4GV}G|JZ2WjUtX$~@8$TWa>wgTe zJ@L7=)<^!1mGw8^zDFf{H2ExJqeoDblSb10y)=#&+@vs`O^7j?&`P>Gu`uDN(gZ0$H za>nTztXl3nFp)ybHeJA#jyRF zUS|L0cUb@VbJ%fN5364i!p0}I!{#x@h4sJxwm$aXqtr0xn_j@iS+2sqXV}H8PyE$` zuzEihY`lJq^~sk@2P-Bk9fA|MjeR~u({xci) zyy#xb@e}65#)&q-#!;5Q=6x)HU1!T+_YWq^*e9}n<{;QO^MJ^O(=S1J=*jY