library (dismo) library (maptools) library (rgdal) library (rgeos) library (spThin) data (wrld_simpl) setwd ("C:/Users/jezkovt/Dropbox/Mammals_glaciation") SHEET <- readShapePoly ("Sheet/ice018000_main.shp") crs (SHEET) <- crs (wrld_simpl) PAIRS <- read.csv ("C:/Users/jezkovt/Dropbox/Mammals_glaciation/Pairs.csv", header=FALSE) PAIRS <- as.matrix (PAIRS) #make sure you are not missing the csv extension in the species names for (i in 1:71) { setwd ("./spThin/") SPECIES1 <- read.csv (PAIRS[i,1]) SPECIES2 <- read.csv (PAIRS[i,2]) NAME1 <- as.vector (SPECIES1$specificepithet [1]) NAME2 <- as.vector (SPECIES2$specificepithet [1]) setwd ("../") LONGLAT1 <- subset (SPECIES1)[,c("decimallongitude","decimallatitude")] LONGLAT2 <- subset (SPECIES2)[,c("decimallongitude","decimallatitude")] coordinates(LONGLAT1) <- ~ decimallongitude + decimallatitude coordinates(LONGLAT2) <- ~ decimallongitude + decimallatitude plot (SHEET, border='blue', lwd=2) points (LONGLAT1, cex=1, col='red') points (LONGLAT2, cex=1, col='blue') plot (wrld_simpl, add=TRUE) box () POINTS1 <- SpatialPoints(LONGLAT1) crs (POINTS1) <- crs (SHEET) POINTS2 <- SpatialPoints(LONGLAT2) crs (POINTS2) <- crs (SHEET) OVERLAP1 <- !is.na(over(POINTS1, as(SHEET, "SpatialPolygons"))) OVERLAP2 <- !is.na(over(POINTS2, as(SHEET, "SpatialPolygons"))) PERSISTENT1 <- POINTS1 [!OVERLAP1,] EXPANDED1 <- POINTS1 [OVERLAP1,] PERSISTENT2 <- POINTS2 [!OVERLAP2,] EXPANDED2 <- POINTS2 [OVERLAP2,] head (EXPANDED1) EXPANDED_COOR1 <- EXPANDED1@coords head (EXPANDED2) EXPANDED_COOR2 <- EXPANDED2@coords DIM_E1 <- nrow (EXPANDED_COOR1) DIM_E2 <- nrow(EXPANDED_COOR2) DIM_P1 <- nrow (PERSISTENT1@coords) DIM_P2 <- nrow(PERSISTENT2@coords) points (PERSISTENT1, cex=1, col='green') points (EXPANDED_COOR1, cex=1, col='red') points (PERSISTENT2, cex=1, col='blue') points (EXPANDED_COOR2, cex=1, col='grey') VARIABLES <- list.files (path="C:/Users/jezkovt/Dropbox/wordclim/bio_30s", pattern='bil', full.names=TRUE) CLIMATE <- stack (VARIABLES) PERSISTENT_NICHE1 <- extract(CLIMATE, PERSISTENT1) PERSISTENT_NICHE2 <- extract(CLIMATE, PERSISTENT2) EXPANDED_NICHE1 <- extract(CLIMATE, EXPANDED1) EXPANDED_NICHE2 <- extract(CLIMATE, EXPANDED2) PERSISTENT_NICHE_SP1 <- PERSISTENT_NICHE1[complete.cases(PERSISTENT_NICHE1), ] PERSISTENT_NICHE_SP2 <- PERSISTENT_NICHE2[complete.cases(PERSISTENT_NICHE2), ] EXPANDED_NICHE_SP1 <- EXPANDED_NICHE1[complete.cases(EXPANDED_NICHE1), ] EXPANDED_NICHE_SP2 <- EXPANDED_NICHE2[complete.cases(EXPANDED_NICHE2), ] E1_ID <- transform (EXPANDED_NICHE_SP1, ID="E1") E2_ID <- transform (EXPANDED_NICHE_SP2, ID="E2") P1_ID <- transform (PERSISTENT_NICHE_SP1, ID="P1") P2_ID <- transform (PERSISTENT_NICHE_SP2, ID="P2") ALL_ID <- rbind (E1_ID, E2_ID, P1_ID, P2_ID) ALL_ID <- ALL_ID [,20] ALL_CLIMATE <- rbind (EXPANDED_NICHE_SP1, EXPANDED_NICHE_SP2, PERSISTENT_NICHE_SP1, PERSISTENT_NICHE_SP2) PCA <- prcomp (ALL_CLIMATE, center=TRUE, scale.=TRUE) SCORES <- PCA$x SCORES <- cbind (ALL_ID, SCORES) SCORES1 <- subset(SCORES, ALL_ID=="E1") SCORES1 <- SCORES1 [,2:4] SCORES2 <- subset(SCORES, ALL_ID=="E2") SCORES2 <- SCORES2 [,2:4] SCORES3 <- subset(SCORES, ALL_ID=="P1") SCORES3 <- SCORES3 [,2:4] SCORES4 <- subset(SCORES, ALL_ID=="P2") SCORES4 <- SCORES4 [,2:4] # compute hypervolumes with auto-bandwidth for both species library (hypervolume) hv1 <- hypervolume_gaussian(SCORES1, samples.per.point = ceiling((10^(3 + sqrt(ncol(SCORES1))))/nrow(SCORES1)), kde.bandwidth = estimate_bandwidth(SCORES1), sd.count = 3, quantile.requested = 0.95, quantile.requested.type = "probability", chunk.size = 1000, verbose = TRUE,name='e1') hv2 <- hypervolume_gaussian(SCORES2, samples.per.point = ceiling((10^(3 + sqrt(ncol(SCORES2))))/nrow(SCORES2)), kde.bandwidth = estimate_bandwidth(SCORES2), sd.count = 3, quantile.requested = 0.95, quantile.requested.type = "probability", chunk.size = 1000, verbose = TRUE,name='e2') hv3 <- hypervolume_gaussian(SCORES3, samples.per.point = ceiling((10^(3 + sqrt(ncol(SCORES3))))/nrow(SCORES3)), kde.bandwidth = estimate_bandwidth(SCORES3), sd.count = 3, quantile.requested = 0.95, quantile.requested.type = "probability", chunk.size = 1000, verbose = TRUE,name='s1') hv4 <- hypervolume_gaussian(SCORES4, samples.per.point = ceiling((10^(3 + sqrt(ncol(SCORES4))))/nrow(SCORES4)), kde.bandwidth = estimate_bandwidth(SCORES4), sd.count = 3, quantile.requested = 0.95, quantile.requested.type = "probability", chunk.size = 1000, verbose = TRUE,name='s2') # determine intersection and unique components of the overlap hv_set12 <- hypervolume_set(hv1, hv2, check.memory=FALSE) hv_set13 <- hypervolume_set(hv1, hv3, check.memory=FALSE) hv_set34 <- hypervolume_set(hv3, hv4, check.memory=FALSE) hv_set24 <- hypervolume_set(hv2, hv4, check.memory=FALSE) hv_set14 <- hypervolume_set(hv1, hv4, check.memory=FALSE) hv_set23 <- hypervolume_set(hv2, hv3, check.memory=FALSE) hv_dis13 <- hypervolume_distance(hv1, hv3, type = "centroid", num.points.max = 1000, check.memory = TRUE) hv_dis14 <- hypervolume_distance(hv1, hv4, type = "centroid", num.points.max = 1000, check.memory = TRUE) hv_dis23 <- hypervolume_distance(hv2, hv3, type = "centroid", num.points.max = 1000, check.memory = TRUE) hv_dis24 <- hypervolume_distance(hv2, hv4, type = "centroid", num.points.max = 1000, check.memory = TRUE) # put all the output volumes in one convenient place volumes12 <- get_volume(hv_set12) volumes13 <- get_volume(hv_set13) volumes34 <- get_volume(hv_set34) volumes24 <- get_volume(hv_set24) volumes14 <- get_volume(hv_set14) volumes23 <- get_volume(hv_set23) INFO <- cbind (NAME1, NAME2, t(volumes12), t(volumes34), t(volumes13), t(volumes24), t(volumes14), t(volumes23), hv_dis13, hv_dis14, hv_dis23, hv_dis24) INFO write.table (INFO, file ="hypevolumes4.csv", append=TRUE, sep = ",", row.names = FALSE,col.names = FALSE) }