############################## Estimate a detection probability and a mean abundance for each locality # Code to estimate the abundance of fifteen species along the Magdalena Valley library(rjags) library(dclone) library(abind) # Jags model MRV.model <- function(){ # The sampling process # Loop along species for (i in 1:nspp){ # Loop along sites (i.e. locations along the gradient) for (j in 1:nsites){ p[i,j] ~ dbeta(p.bar[j]*tau,(1-p.bar[j])*tau) loglam[i,j] ~ dnorm(mu.h[j],tau.h) lambda[i,j] <- exp(loglam[i,j]) w[i,j] ~ dbern(psi[j]) lambda.occ[i,j] <- lambda[i,j]*w[i,j] # Sample site*species unobserved abundance for(s in start.ind[j]:end.ind[j]){ N[i,s] ~ dpois(lambda.occ[i,j]) for(t in 1:nvisits){ # Observed counts are product of site*species specific detection # probability and site*species unobserved abundance counts[s,t,i]~dbin(p[i,j],N[i,s]) }# end visits loop }# end pcounts loop }# end sites loop }# end spp loop } # Load Counts Data load('~/Documents/PhD Thesis/Chapter2/Turnover2/RData/Abundance_RawData.RData') load('~/OneDrive - University of Florida/Abundance/RData/Gomez/mod4/Abundance_Est_DC_20K.RData') # Obtain MLEs for the parameters hats <- extractdctable(out20)[,1] upper.ci<- hats+1.96*extractdctable(out20)[,2]*sqrt(20) lower.ci<- hats-1.96*extractdctable(out20)[,2]*sqrt(20) ######### Multispecies estimation # Define number of clons # This estimation process obtains the MLE for the hyperparameters # defining the distributions of abundance and detection probability in each site ######### Multispecies sampling from the posterior to get the kalman estimates # of N and Lambda. # All point counts to.drop <- which(locality=="Boqueron"|locality=="La Perla") counts2 <- counts[-to.drop,,] locality2 <- locality[-to.drop] unique.locs2 <- unique(locality2) nspp <- dim(counts2)[3] # Setting Initiatilization for abundance max.count <- apply(counts2,c(3,1),max,na.rm=TRUE) # Starting and ending indices for looping along point counts in each locality start.ind <- end.ind <- numeric(length(unique.locs2)) for(i in 1:length(unique.locs2)){ ith.loc <- unique.locs2[i] loc.pos <- which(locality2==ith.loc) start.ind[i] <- min(loc.pos) end.ind[i] <- max(loc.pos) } # Setting Initialization for occupancy w.init <- matrix(1,nrow=nspp,ncol=length(unique.locs2)) for(i in 1:nspp){ ith.max <- max.count[i,] for(j in 1:length(unique.locs2)){ jth.loc <- unique.locs2[j] jth.loc.occ <- sum(ith.max[locality2==jth.loc],na.rm=TRUE) if(jth.loc.occ==0){w.init[i,j]<-0} } } data_list <- list(counts=counts2,nspp=dim(counts2)[3],nsites=length(unique.locs2) ,nvisits=dim(counts2)[2],start.ind=start.ind,end.ind=end.ind ,p.bar=hats[grep("p.bar",names(hats))],tau=hats["tau"] ,mu.h=hats[grep("mu.h",names(hats))],tau.h=1/hats["sigma.h"] ,psi=hats[grep("psi",names(hats))]) params.sample <- c("lambda.occ","N","w") cl <- makePSOCKcluster(3) clusterEvalQ(cl,library(dclone)) out.sample <- jags.parfit(cl,data_list,params=params.sample,model=MRV.model ,n.chains=3,n.adapt=3000,n.update=10000,n.iter=50000 ,thin=100,inits=list(N=max.count,w=w.init)) stopCluster(cl) # Organizing samples into matrices lambda.pos <- grep("lambda",colnames(out.sample[[1]])) N.pos <- grep("N",colnames(out.sample[[1]])) lam.mat <- matrix(apply(out.sample[[1]][,lambda.pos],2,median) ,ncol=length(unique.locs2),nrow=dim(counts2)[3] ,dimnames=list(dimnames(counts2)[[3]],unique.locs2)) N.mat <- matrix(apply(out.sample[[1]][,N.pos],2,mean) ,ncol=dim(counts2)[1],nrow=dim(counts2)[3] ,dimnames=dimnames(counts2)[c(3,1)]) save(list=c("out40","tot.time40K","finish.time40K"),file="C:/Users/jugomez/OneDrive - University of Florida/Abundance/RData/Abundance_Est_DC_20K.RData")