content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
bergmS <- function(formulae,
iters=10000,
m.priors=NULL,
sigma.priors=NULL,
gammas=NULL,
nchains=NULL,
sigma.epsilons=NULL,
aux.iters=1000,
main.iters=NULL,
burn.ins=NULL,
...){
ptm = proc.time()
nmodels <- length(formulae)
control <- control.simulate.formula(MCMC.burnin = aux.iters,
MCMC.interval = 0)
control$MCMC.samplesize <- 1
dims <- rep(0, nmodels)
y <- ergm.getnetwork(formulae[[1]])
if (is.null(main.iters))
main.iters <- rep(1000, nmodels)
if (is.null(burn.ins))
burn.ins <- rep(100, nmodels)
if (is.null(gammas))
gammas <- rep(0.5, nmodels)
models <- vector("list", nmodels)
specs <- vector("list", nmodels)
theta <- vector("list", nmodels)
Sigma <- vector("list", nmodels)
Mu <- vector("list", nmodels)
post <- vector("list", nmodels)
stats <- vector("list", nmodels)
for (i in 1L:nmodels) {
post[[i]] <- bergm(formula = formulae[[i]], burn.in = burn.ins[i],
main.iters = main.iters[i], aux.iters = aux.iters,
m.prior = m.priors[[i]], sigma.prior = sigma.priors[[i]],
gamma = gammas[i], nchains = nchains[i], sigma.epsilon = sigma.epsilons[[i]])
if (post[[i]]$nchains > 1) {
Sigma[[i]] <- cov(apply(post[[i]]$Theta, 2, cbind))
diag(Sigma[[i]]) = 8/7 * diag(Sigma[[i]])
}
else {
Sigma[[i]] <- var(post[[i]]$Theta)
diag(Sigma[[i]]) = Sigma[[i]]
}
Mu[[i]] <- apply(post[[i]]$Theta, 2, mean)
theta[[i]] <- runif(post[[i]]$dim, min = -0.1, max = 0.1)
dims[i] <- post[[i]]$dim
specs[[i]] <- post[[i]]$specs
#bergm.output(post[[i]])
}
#
Theta <- matrix(0L, iters, max(dims))
M <- rep.int(0, iters)
m <- sample(seq(1:nmodels), 1)
Wrate <- matrix(0L, 2, nmodels)
Brate <- matrix(0L, 2, 1)
for (i in 1L:iters) {
m1 <- sample(1:nmodels, 1)
if (m1 != m) {
Brate[2, ] <- Brate[2, ] + 1
}
else {
Wrate[2, m] <- Wrate[2, m] + 1
}
theta1 <- rmvnorm(1, mean = Mu[[m1]], sigma = Sigma[[m1]])[1,]
ww1 <- dmvnorm(theta1, mean = Mu[[m1]],
sigma = Sigma[[m1]],log=TRUE)
pr1 <- dmvnorm(theta1, mean = post[[m1]]$m.prior,
sigma = post[[m1]]$sigma.prior,log=TRUE)
ww <- dmvnorm(theta[[m]], mean = Mu[[m]],
sigma = Sigma[[m]],log=TRUE)
pr <- dmvnorm(theta[[m]], mean = post[[m]]$m.prior,
sigma = post[[m]]$sigma.prior,log=TRUE)
z <- ergm.mcmcslave(Clist = post[[m1]]$Clist, MHproposal = post[[m1]]$MHproposal,
eta0 = theta1, control = post[[m1]]$control, verbose = FALSE)
y1 <- newnw.extract(y, z)
smy1 <- ergm.getglobalstats(y1, post[[m]]$model)
delta <- smy1 - post[[m]]$stats
beta <- (theta[[m]] %*% delta) +
(theta1 %*% as.numeric(-z$s)) +
pr1 - pr + ww - ww1
if (beta >= log(runif(1))) {
theta[[m1]] <- theta1
if (m1 != m) {
Brate[1, ] <- Brate[1, ] + 1
m <- m1
}
else {
Wrate[1, m] <- Wrate[1, m] + 1
}
}
Theta[i, 1:dims[m]] <- theta[[m]]
M[i] <- m
}
time = proc.time() - ptm
out = list(M = M, iters = iters, Theta = Theta, post = post, formulae = formulae,
models = models, specs = specs, dims = dims, nmodels = nmodels,
Baccept = Brate[1, ]/Brate[2, ], Waccept = Wrate[1, ]/Wrate[2,
], time = time)
out
}
|
/Bergm/R/bergmS.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 4,222 |
r
|
bergmS <- function(formulae,
iters=10000,
m.priors=NULL,
sigma.priors=NULL,
gammas=NULL,
nchains=NULL,
sigma.epsilons=NULL,
aux.iters=1000,
main.iters=NULL,
burn.ins=NULL,
...){
ptm = proc.time()
nmodels <- length(formulae)
control <- control.simulate.formula(MCMC.burnin = aux.iters,
MCMC.interval = 0)
control$MCMC.samplesize <- 1
dims <- rep(0, nmodels)
y <- ergm.getnetwork(formulae[[1]])
if (is.null(main.iters))
main.iters <- rep(1000, nmodels)
if (is.null(burn.ins))
burn.ins <- rep(100, nmodels)
if (is.null(gammas))
gammas <- rep(0.5, nmodels)
models <- vector("list", nmodels)
specs <- vector("list", nmodels)
theta <- vector("list", nmodels)
Sigma <- vector("list", nmodels)
Mu <- vector("list", nmodels)
post <- vector("list", nmodels)
stats <- vector("list", nmodels)
for (i in 1L:nmodels) {
post[[i]] <- bergm(formula = formulae[[i]], burn.in = burn.ins[i],
main.iters = main.iters[i], aux.iters = aux.iters,
m.prior = m.priors[[i]], sigma.prior = sigma.priors[[i]],
gamma = gammas[i], nchains = nchains[i], sigma.epsilon = sigma.epsilons[[i]])
if (post[[i]]$nchains > 1) {
Sigma[[i]] <- cov(apply(post[[i]]$Theta, 2, cbind))
diag(Sigma[[i]]) = 8/7 * diag(Sigma[[i]])
}
else {
Sigma[[i]] <- var(post[[i]]$Theta)
diag(Sigma[[i]]) = Sigma[[i]]
}
Mu[[i]] <- apply(post[[i]]$Theta, 2, mean)
theta[[i]] <- runif(post[[i]]$dim, min = -0.1, max = 0.1)
dims[i] <- post[[i]]$dim
specs[[i]] <- post[[i]]$specs
#bergm.output(post[[i]])
}
#
Theta <- matrix(0L, iters, max(dims))
M <- rep.int(0, iters)
m <- sample(seq(1:nmodels), 1)
Wrate <- matrix(0L, 2, nmodels)
Brate <- matrix(0L, 2, 1)
for (i in 1L:iters) {
m1 <- sample(1:nmodels, 1)
if (m1 != m) {
Brate[2, ] <- Brate[2, ] + 1
}
else {
Wrate[2, m] <- Wrate[2, m] + 1
}
theta1 <- rmvnorm(1, mean = Mu[[m1]], sigma = Sigma[[m1]])[1,]
ww1 <- dmvnorm(theta1, mean = Mu[[m1]],
sigma = Sigma[[m1]],log=TRUE)
pr1 <- dmvnorm(theta1, mean = post[[m1]]$m.prior,
sigma = post[[m1]]$sigma.prior,log=TRUE)
ww <- dmvnorm(theta[[m]], mean = Mu[[m]],
sigma = Sigma[[m]],log=TRUE)
pr <- dmvnorm(theta[[m]], mean = post[[m]]$m.prior,
sigma = post[[m]]$sigma.prior,log=TRUE)
z <- ergm.mcmcslave(Clist = post[[m1]]$Clist, MHproposal = post[[m1]]$MHproposal,
eta0 = theta1, control = post[[m1]]$control, verbose = FALSE)
y1 <- newnw.extract(y, z)
smy1 <- ergm.getglobalstats(y1, post[[m]]$model)
delta <- smy1 - post[[m]]$stats
beta <- (theta[[m]] %*% delta) +
(theta1 %*% as.numeric(-z$s)) +
pr1 - pr + ww - ww1
if (beta >= log(runif(1))) {
theta[[m1]] <- theta1
if (m1 != m) {
Brate[1, ] <- Brate[1, ] + 1
m <- m1
}
else {
Wrate[1, m] <- Wrate[1, m] + 1
}
}
Theta[i, 1:dims[m]] <- theta[[m]]
M[i] <- m
}
time = proc.time() - ptm
out = list(M = M, iters = iters, Theta = Theta, post = post, formulae = formulae,
models = models, specs = specs, dims = dims, nmodels = nmodels,
Baccept = Brate[1, ]/Brate[2, ], Waccept = Wrate[1, ]/Wrate[2,
], time = time)
out
}
|
#Section 2.1
# 1
# this gives histogram of probability of d-neighbourhood being a tree for different d and n fixing c
library(igraph)
check.tree <- function(n, c, d){
p = c / n
vector = NULL
for(i in 1:100){
con = NULL
for(j in 1:20){
g = sample_gnp(n,p)
v = sample(1:n,1)
sub = induced_subgraph(g, which(distances(g, v, as.vector(V(g))) <= d))
con = c(con,as.numeric(length(V(sub)) == length(E(sub)) + 1))
}
vector = c(vector,mean(con))
}
return(vector)
}
main <- function(d = 4){
nseq = c(1000,3000,6000,10000,30000,60000)
cseq = c(0.5,1,1.5,2)
for (c in cseq){
quartz(width = 10, height = 7)
layout(matrix(1:6, nrow = 2, byrow = T))
for (n in nseq){
vector = check.tree(n, c, d)
print(range(vector))
x=vector[1:20]
t = (mean(x)-1)/sqrt(var(x)/length(x))
p = pt(t,length(x)-1,lower.tail=T)
print(p)
l = sort(vector)[3]
u = sort(vector)[97]
hist(vector, breaks=seq(0,1,by=.05), prob=TRUE, xlab="p_tree", col=rgb(0,0,1,1/4), main=paste("For n = ",n))
legend("topleft", c(paste("CI = (",l,",",u,")"),paste("s.e. = ",se(vector))))
}
}
}
main()
#Section 2.2
# 2
# this gives the expected no. of cycles in d-neighbourhood
library(igraph)
myplot <- function(matrix){
r = length(matrix[,1])
c = length(matrix[1,])
x = row.names(matrix)
plot(NULL, xlim = c(0,10000), ylim = c(min(matrix),max(matrix)), xlab = "n", ylab = "count")
for (i in 1:c){
lines(x, matrix[,i], col = i, lty = 1, lwd = 2)
}
}
find_cycle <- function(n, c, d){
p = c / n
g = sample_gnp(n,p)
v = sample(1:n,1)
sub = induced_subgraph(g, which(distances(g, v, as.vector(V(g))) <= d))
Cycles = NULL
for(v1 in V(sub)){
for(v2 in neighbors(sub, v1, mode="out")){
Cycles = c(Cycles, lapply(all_simple_paths(sub, v2,v1, mode="out"), function(p) c(v1,p)))
}
}
u = length(Cycles[which(sapply(Cycles, length) == 4)])/6
v = length(Cycles[which(sapply(Cycles, length) == 5)])/8
w = length(Cycles[which(sapply(Cycles, length) == 6)])/10
return(sum(u,v,w))
}
main <- function(d=4){
nseq = c(100,300,600,1000,3000,6000)
cseq = c(0.5,1,1.5,2)
tmatrix = data.frame()
for (n in nseq){
tvector = NULL
for (c in cseq){
tval = 0
for(rept in 1:500){
v = find_cycle(n, c, d)
tval = tval + v
}
tvector = c(tvector, tval/500)
}
tmatrix = rbind(tmatrix, tvector)
}
rownames(tmatrix) = nseq
colnames(tmatrix) = cseq
dev.new()
myplot(tmatrix)
}
main()
# Section 2.3
# 3
# this gives histogram of probability of d-neighbourhood being a tree for different c and n fixing d
library(igraph)
check.tree <- function(n, c, d){
p = c / n
vector = NULL
for(i in 1:100){
con = NULL
for(j in 1:20){
g = sample_gnp(n,p)
v = sample(1:n,1)
sub = induced_subgraph(g, which(distances(g, v, as.vector(V(g))) <= d))
con = c(con,as.numeric(length(V(sub)) == length(E(sub)) + 1))
}
vector = c(vector,mean(con))
}
return(vector)
}
main <- function(){
nseq = c(2000,5000,10000,50000,100000,200000)
dseq = seq(3,6,by=1)
c = 3
for (d in dseq){
quartz(width = 10, height = 7)
layout(matrix(1:6, nrow = 2, byrow = T))
for (n in nseq){
vector = check.tree(n, c, d)
print(range(vector))
print(mean(vector))
l = sort(vector)[3]
u = sort(vector)[97]
hist(vector, breaks=seq(-0.05,1.05,by=0.1), prob=TRUE, xlab="p_tree", col=rgb(0,0,1,1/4), main=paste("For n = ",n))
legend("topleft", c(paste("CI = (",l,",",u,")"),paste("mean = ",mean(vector))))
legend("topright", c(paste("CI = (",l,",",u,")"),paste("mean = ",mean(vector))))
}
}
}
main()
#Section 2.4
# 4
# this gives histogram of degrees of randomly chosen vertices for different c and n fixing d
library(igraph)
deg <-function(n, c, d){
vector = NULL
p = c / n
for (i in 1:1000){
g = sample_gnp(n,p)
v = sample(1:n,1)
dg = degree(g, v)
vector = c(vector, dg)
}
return(vector)
}
main <- function(d = 4){
nseq = c(100,500,1000,10000)
cseq = c(0.5,1,1.5,2,3,4)
for (c in cseq){
quartz(width = 10, height = 7)
layout(matrix(1:6, nrow = 2, byrow = T))
for (n in nseq){
vector = deg(n,c,d)
poi = round(dpois(c(1:10),c)*1000)
poi = c(1000 - sum(poi), poi)
poi = rep(c(0:10),poi)
chi = 0
for(i in 1:10){
chi = chi + (length(which(vector == i))/1000 - dpois(i,c))^2/dpois(i,c)
}
p1 = round(pchisq(chi,9, lower.tail=F),2)
x = NULL
y = NULL
for(i in 0:10){
x = c(x, length(which(vector <= i))/1000)
y = c(y, length(which(vector <= i))/1000)
}
p2 = round(ks.test(x,y)$p.value,2)
p3 = round(t.test(vector,poi, alternative = "two.sided", var.equal = FALSE)$p.value,2)
hist(vector, breaks=seq(-.5,15.5,by=1), ylim=c(0,0.1+max(dpois(1:10,c))), prob=T, xlab="degree", ylab="probability", col=rgb(0,0,1,1/4), main=paste("For n = ",n)) ## range of x (-.5,15.5) for c = 4 and (-.5,12.5) for c = 3
hist(poi, breaks=seq(-.5,15.5,by=1), ylim=c(0,max(0.1+dpois(1:10,c))), prob=T, xlab="degree", ylab="probability", col=rgb(1,0,0,1/4), add=T)
legend("topright", c("p-value",paste("chisq.test = ",p1),paste("ks.test = ",p2),paste("t.test = ",p3)))
}
}
}
main()
#Section 2.5
# 5
# this gives histogram of degrees of randomly chosen vertices at t-depth in d-neighbourhood for different c and n fixing d
library(igraph)
nbdeg <- function(n, c, d, t){
p = c / n
vector = NULL
for (i in 1:1000){
g = sample_gnp(n,p)
v = sample(1:n,1)
ngb = which(distances(g,v,V(g)) == t)
if(length(ngb) == 1){
vector = c(vector, degree(g,ngb))
}
if(length(ngb) > 1){
dg = degree(g, sample(ngb,1))
vector = c(vector, dg)
}
}
return(vector)
}
main <- function(d = 5){
nseq = c(100,500,1000,10000)
cseq = c(0.5,1,1.5,2,3,4)
t = 1 #fix t here
for (c in cseq){
quartz(width = 10, height = 7)
layout(matrix(1:4, nrow = 2, byrow = T))
for (n in nseq){
vector = nbdeg(n, c, d, t)
poi = round(dpois(c(1:10),c)*1000)
poi = c(1000 - sum(poi), poi)
poi = rep(c(0:10),poi)
poi = poi + 1
chi = 0
for(i in 1:10){
chi = chi + (length(which(vector == i))/1000 - dpois(i,c))^2/dpois(i,c)
}
p1 = round(pchisq(chi,9, lower.tail=F),2)
x = NULL
y = NULL
for(i in 0:10){
x = c(x, length(which(vector <= i))/1000)
y = c(y, length(which(vector <= i))/1000)
}
p2 = round(ks.test(x,y)$p.value,2)
p3 = round(t.test(vector,poi, alternative = "two.sided", var.equal = FALSE)$p.value,2)
hist(vector, breaks=seq(-.5,15.5,by=1), ylim=c(0,0.1+max(dpois(1:10,c))), prob=T, xlab="degree", col=rgb(0,0,1,1/4), main=paste("For n = ",n)) ## range of x (-.5,15.5) for c = 4 and (-.5,12.5) for c = 3
hist(poi, breaks=seq(-.5,15.5,by=1), ylim=c(0,max(0.1+dpois(1:10,c))), prob=T, xlab="degree", ylab="probability", col=rgb(1,0,0,1/4), add=T)
legend("topright", c("p-value",paste("chisq.test = ",p1),paste("ks.test = ",p2),paste("t.test = ",p3)))
}
}
}
main()
#Section 2.6
# 6
# this gives different tests for independence of two randomly selected vertices in d-neighbourhood
library(igraph)
ngbhdeg <-function(n, c, d){
vec1 = NULL
vec2 = NULL
p = c / n
i = 0
while(i < 100){
g = sample_gnp(n,p)
v = sample(1:n,1)
dis = distances(g,v,V(g))
if(length(which(dis <= d)) >= 5){
vertex = sample(intersect(which(dis>0),which(dis<d)), 2, replace = FALSE)
vec1 = c(vec1, degree(g,vertex[1]))
vec2 = c(vec2, degree(g,vertex[2]))
i = i + 1
}
}
return(list(vec1, vec2))
}
test.kendall <- function(x,y){
n = length(x)
con = 0
for(i in 1:n){
for(j in 1:n){
if(isTRUE((x[i]-x[j])*(y[i]-y[j]) > 0)){
con = +1
}
}
}
r = con/(2*choose(n,2))
return(r)
}
test.spearman <- function(x,y){
n = length(x)
d = rank(x)-rank(y)
s = sum(d*d)
r = 1-6*s/(n*(n^2-1))
return(r)
}
test.chisq <- function(x,y){
k = 5
table = matrix(0,k,k)
n = length(x)
for(i in 1:n){
if(x[i] < k && y[i] < k && x*y > 0){
table[x[i],y[i]] = table[x[i],y[i]] + 1
}
}
for(i in 1:(k-1)){
table[k,i] = sum(table[,i])
table[i,k] = sum(table[i,])
}
table[k,k] = sum(table[k,])
n = table[k,k]
chi = 0
for(i in 1:(k-1)){
for(j in 1:(k-1)){
e = table[i,k]*table[k,j]/n
chi = chi + (table[i,j] - e)^2/e
}
}
p = round(pchisq(chi,(k-2)^2,lower.tail=FALSE),2)
return(p)
}
main <- function(d = 4){
nseq = c(seq(1000,9000,by=4000),seq(30000,90000,by=20000))
cseq = c(1,1.5,2,2.5)
chi = NULL
quartz(width = 10, height = 7)
layout(matrix(1:4, nrow = 2, byrow = T))
for (c in cseq){
r.k = NULL
r.s = NULL
p.chi = NULL
for (n in nseq){
out = ngbhdeg(n, c, d)
vec1 = unlist(out[1])
vec2 = unlist(out[2])
r.k = c(r.k,test.kendall(vec1,vec2))
r.s = c(r.s,test.spearman(vec1,vec2))
p.chi = c(p.chi,test.chisq(vec1,vec2))
}
chi = rbind(chi,p.chi)
print(chi)
plot(nseq, abs(r.k), ylim = c(0,1), xlab="n", ylab="correlation", col=2, pch=20, main=paste("For c = ",c))
points(nseq, abs(r.s), col=3, pch = 20)
legend("topleft",legend=c("Kendall corr","Pearson rank corr"),col=c(2,3),pch=20)
legend("topright",legend=c("p-value of chisq test"),col=c(4),pch=20)
}
print(chi)
quartz(width = 10, height = 7)
layout(matrix(1:4, nrow = 2, byrow = T))
for(i in 1:length(chi[,1])){
plot(nseq, chi[i,], ylim = c(0,1), xlab="n", ylab="p-value", col=4, pch=20, main=paste("For c = ",cseq[i]))
}
}
main()
# Section 2.6
#7
# this gives the histogram of sum of degrees of two randomly selected vertices in d-neighbourhood
library(igraph)
ngbhdeg <-function(n, c, d){
vec1 = NULL
vec2 = NULL
p = c / n
i = 0
while(i < 1000){
g = sample_gnp(n,p)
v = sample(1:n,1)
dis = distances(g,v,V(g))
if(length(which(dis <= d)) >= 5){
vertex = sample(intersect(which(dis>0),which(dis<d)), 2, replace = FALSE)
vec1 = c(vec1, degree(g,vertex[1]))
vec2 = c(vec2, degree(g,vertex[2]))
i = i + 1
}
}
return(list(vec1, vec2))
}
main <- function(d = 4){
nseq = c(500,1000,5000,10000)
cseq = c(1,1.5,2,3)
for (c in cseq){
quartz(width = 10, height = 7)
layout(matrix(1:4, nrow = 2, byrow = T))
r.k = NULL
r.s = NULL
p.chi = NULL
for (n in nseq){
out = ngbhdeg(n, c, d)
vec1 = unlist(out[1])
vec2 = unlist(out[2])
poi = round(dpois(c(1:13),2*c)*1000)
poi = c(1000 - sum(poi), poi)
poi = rep(c(0:13),poi)
poi = poi + 2
vector = vec1 + vec2
chi = 0
for(i in 2:15){
chi = chi + (length(which(vector == i))/1000 - dpois(i-2,2*c))^2/dpois(i-2,2*c)
}
p1 = round(pchisq(chi,13, lower.tail=F),2)
x = NULL
y = NULL
for(i in 0:13){
x = c(x, length(which(vector <= i))/1000)
y = c(y, length(which(poi <= i))/1000)
}
p2 = round(ks.test(x,y)$p.value,2)
p3 = round(t.test(vector,poi, alternative = "two.sided", var.equal = FALSE)$p.value,2)
hist(vector, breaks=seq(-.5,20.5,by=1), ylim=c(0,0.1+max(dpois(1:10,c))), prob=T, xlab="degree", col=rgb(0,0,1,1/4), main=paste("For n = ",n)) ## range of x (-.5,15.5) for c = 4 and (-.5,12.5) for c = 3
hist(poi, breaks=seq(-.5,20.5,by=1), ylim=c(0,max(0.1+dpois(1:10,c))), prob=T, xlab="degree", col=rgb(1,0,0,1/4), add=T)
legend("topright", c("p-value",paste("chisq.test = ",p1),paste("ks.test = ",p2),paste("t.test = ",p3)))
}
}
}
main()
|
/R-code.R
|
no_license
|
Sohamdas-stat/Random-graphs
|
R
| false | false | 11,106 |
r
|
#Section 2.1
# 1
# this gives histogram of probability of d-neighbourhood being a tree for different d and n fixing c
library(igraph)
check.tree <- function(n, c, d){
p = c / n
vector = NULL
for(i in 1:100){
con = NULL
for(j in 1:20){
g = sample_gnp(n,p)
v = sample(1:n,1)
sub = induced_subgraph(g, which(distances(g, v, as.vector(V(g))) <= d))
con = c(con,as.numeric(length(V(sub)) == length(E(sub)) + 1))
}
vector = c(vector,mean(con))
}
return(vector)
}
main <- function(d = 4){
nseq = c(1000,3000,6000,10000,30000,60000)
cseq = c(0.5,1,1.5,2)
for (c in cseq){
quartz(width = 10, height = 7)
layout(matrix(1:6, nrow = 2, byrow = T))
for (n in nseq){
vector = check.tree(n, c, d)
print(range(vector))
x=vector[1:20]
t = (mean(x)-1)/sqrt(var(x)/length(x))
p = pt(t,length(x)-1,lower.tail=T)
print(p)
l = sort(vector)[3]
u = sort(vector)[97]
hist(vector, breaks=seq(0,1,by=.05), prob=TRUE, xlab="p_tree", col=rgb(0,0,1,1/4), main=paste("For n = ",n))
legend("topleft", c(paste("CI = (",l,",",u,")"),paste("s.e. = ",se(vector))))
}
}
}
main()
#Section 2.2
# 2
# this gives the expected no. of cycles in d-neighbourhood
library(igraph)
myplot <- function(matrix){
r = length(matrix[,1])
c = length(matrix[1,])
x = row.names(matrix)
plot(NULL, xlim = c(0,10000), ylim = c(min(matrix),max(matrix)), xlab = "n", ylab = "count")
for (i in 1:c){
lines(x, matrix[,i], col = i, lty = 1, lwd = 2)
}
}
find_cycle <- function(n, c, d){
p = c / n
g = sample_gnp(n,p)
v = sample(1:n,1)
sub = induced_subgraph(g, which(distances(g, v, as.vector(V(g))) <= d))
Cycles = NULL
for(v1 in V(sub)){
for(v2 in neighbors(sub, v1, mode="out")){
Cycles = c(Cycles, lapply(all_simple_paths(sub, v2,v1, mode="out"), function(p) c(v1,p)))
}
}
u = length(Cycles[which(sapply(Cycles, length) == 4)])/6
v = length(Cycles[which(sapply(Cycles, length) == 5)])/8
w = length(Cycles[which(sapply(Cycles, length) == 6)])/10
return(sum(u,v,w))
}
main <- function(d=4){
nseq = c(100,300,600,1000,3000,6000)
cseq = c(0.5,1,1.5,2)
tmatrix = data.frame()
for (n in nseq){
tvector = NULL
for (c in cseq){
tval = 0
for(rept in 1:500){
v = find_cycle(n, c, d)
tval = tval + v
}
tvector = c(tvector, tval/500)
}
tmatrix = rbind(tmatrix, tvector)
}
rownames(tmatrix) = nseq
colnames(tmatrix) = cseq
dev.new()
myplot(tmatrix)
}
main()
# Section 2.3
# 3
# this gives histogram of probability of d-neighbourhood being a tree for different c and n fixing d
library(igraph)
check.tree <- function(n, c, d){
p = c / n
vector = NULL
for(i in 1:100){
con = NULL
for(j in 1:20){
g = sample_gnp(n,p)
v = sample(1:n,1)
sub = induced_subgraph(g, which(distances(g, v, as.vector(V(g))) <= d))
con = c(con,as.numeric(length(V(sub)) == length(E(sub)) + 1))
}
vector = c(vector,mean(con))
}
return(vector)
}
main <- function(){
nseq = c(2000,5000,10000,50000,100000,200000)
dseq = seq(3,6,by=1)
c = 3
for (d in dseq){
quartz(width = 10, height = 7)
layout(matrix(1:6, nrow = 2, byrow = T))
for (n in nseq){
vector = check.tree(n, c, d)
print(range(vector))
print(mean(vector))
l = sort(vector)[3]
u = sort(vector)[97]
hist(vector, breaks=seq(-0.05,1.05,by=0.1), prob=TRUE, xlab="p_tree", col=rgb(0,0,1,1/4), main=paste("For n = ",n))
legend("topleft", c(paste("CI = (",l,",",u,")"),paste("mean = ",mean(vector))))
legend("topright", c(paste("CI = (",l,",",u,")"),paste("mean = ",mean(vector))))
}
}
}
main()
#Section 2.4
# 4
# this gives histogram of degrees of randomly chosen vertices for different c and n fixing d
library(igraph)
deg <-function(n, c, d){
vector = NULL
p = c / n
for (i in 1:1000){
g = sample_gnp(n,p)
v = sample(1:n,1)
dg = degree(g, v)
vector = c(vector, dg)
}
return(vector)
}
main <- function(d = 4){
nseq = c(100,500,1000,10000)
cseq = c(0.5,1,1.5,2,3,4)
for (c in cseq){
quartz(width = 10, height = 7)
layout(matrix(1:6, nrow = 2, byrow = T))
for (n in nseq){
vector = deg(n,c,d)
poi = round(dpois(c(1:10),c)*1000)
poi = c(1000 - sum(poi), poi)
poi = rep(c(0:10),poi)
chi = 0
for(i in 1:10){
chi = chi + (length(which(vector == i))/1000 - dpois(i,c))^2/dpois(i,c)
}
p1 = round(pchisq(chi,9, lower.tail=F),2)
x = NULL
y = NULL
for(i in 0:10){
x = c(x, length(which(vector <= i))/1000)
y = c(y, length(which(vector <= i))/1000)
}
p2 = round(ks.test(x,y)$p.value,2)
p3 = round(t.test(vector,poi, alternative = "two.sided", var.equal = FALSE)$p.value,2)
hist(vector, breaks=seq(-.5,15.5,by=1), ylim=c(0,0.1+max(dpois(1:10,c))), prob=T, xlab="degree", ylab="probability", col=rgb(0,0,1,1/4), main=paste("For n = ",n)) ## range of x (-.5,15.5) for c = 4 and (-.5,12.5) for c = 3
hist(poi, breaks=seq(-.5,15.5,by=1), ylim=c(0,max(0.1+dpois(1:10,c))), prob=T, xlab="degree", ylab="probability", col=rgb(1,0,0,1/4), add=T)
legend("topright", c("p-value",paste("chisq.test = ",p1),paste("ks.test = ",p2),paste("t.test = ",p3)))
}
}
}
main()
#Section 2.5
# 5
# this gives histogram of degrees of randomly chosen vertices at t-depth in d-neighbourhood for different c and n fixing d
library(igraph)
nbdeg <- function(n, c, d, t){
p = c / n
vector = NULL
for (i in 1:1000){
g = sample_gnp(n,p)
v = sample(1:n,1)
ngb = which(distances(g,v,V(g)) == t)
if(length(ngb) == 1){
vector = c(vector, degree(g,ngb))
}
if(length(ngb) > 1){
dg = degree(g, sample(ngb,1))
vector = c(vector, dg)
}
}
return(vector)
}
main <- function(d = 5){
nseq = c(100,500,1000,10000)
cseq = c(0.5,1,1.5,2,3,4)
t = 1 #fix t here
for (c in cseq){
quartz(width = 10, height = 7)
layout(matrix(1:4, nrow = 2, byrow = T))
for (n in nseq){
vector = nbdeg(n, c, d, t)
poi = round(dpois(c(1:10),c)*1000)
poi = c(1000 - sum(poi), poi)
poi = rep(c(0:10),poi)
poi = poi + 1
chi = 0
for(i in 1:10){
chi = chi + (length(which(vector == i))/1000 - dpois(i,c))^2/dpois(i,c)
}
p1 = round(pchisq(chi,9, lower.tail=F),2)
x = NULL
y = NULL
for(i in 0:10){
x = c(x, length(which(vector <= i))/1000)
y = c(y, length(which(vector <= i))/1000)
}
p2 = round(ks.test(x,y)$p.value,2)
p3 = round(t.test(vector,poi, alternative = "two.sided", var.equal = FALSE)$p.value,2)
hist(vector, breaks=seq(-.5,15.5,by=1), ylim=c(0,0.1+max(dpois(1:10,c))), prob=T, xlab="degree", col=rgb(0,0,1,1/4), main=paste("For n = ",n)) ## range of x (-.5,15.5) for c = 4 and (-.5,12.5) for c = 3
hist(poi, breaks=seq(-.5,15.5,by=1), ylim=c(0,max(0.1+dpois(1:10,c))), prob=T, xlab="degree", ylab="probability", col=rgb(1,0,0,1/4), add=T)
legend("topright", c("p-value",paste("chisq.test = ",p1),paste("ks.test = ",p2),paste("t.test = ",p3)))
}
}
}
main()
#Section 2.6
# 6
# this gives different tests for independence of two randomly selected vertices in d-neighbourhood
library(igraph)
ngbhdeg <-function(n, c, d){
vec1 = NULL
vec2 = NULL
p = c / n
i = 0
while(i < 100){
g = sample_gnp(n,p)
v = sample(1:n,1)
dis = distances(g,v,V(g))
if(length(which(dis <= d)) >= 5){
vertex = sample(intersect(which(dis>0),which(dis<d)), 2, replace = FALSE)
vec1 = c(vec1, degree(g,vertex[1]))
vec2 = c(vec2, degree(g,vertex[2]))
i = i + 1
}
}
return(list(vec1, vec2))
}
test.kendall <- function(x,y){
n = length(x)
con = 0
for(i in 1:n){
for(j in 1:n){
if(isTRUE((x[i]-x[j])*(y[i]-y[j]) > 0)){
con = +1
}
}
}
r = con/(2*choose(n,2))
return(r)
}
test.spearman <- function(x,y){
n = length(x)
d = rank(x)-rank(y)
s = sum(d*d)
r = 1-6*s/(n*(n^2-1))
return(r)
}
test.chisq <- function(x,y){
k = 5
table = matrix(0,k,k)
n = length(x)
for(i in 1:n){
if(x[i] < k && y[i] < k && x*y > 0){
table[x[i],y[i]] = table[x[i],y[i]] + 1
}
}
for(i in 1:(k-1)){
table[k,i] = sum(table[,i])
table[i,k] = sum(table[i,])
}
table[k,k] = sum(table[k,])
n = table[k,k]
chi = 0
for(i in 1:(k-1)){
for(j in 1:(k-1)){
e = table[i,k]*table[k,j]/n
chi = chi + (table[i,j] - e)^2/e
}
}
p = round(pchisq(chi,(k-2)^2,lower.tail=FALSE),2)
return(p)
}
main <- function(d = 4){
nseq = c(seq(1000,9000,by=4000),seq(30000,90000,by=20000))
cseq = c(1,1.5,2,2.5)
chi = NULL
quartz(width = 10, height = 7)
layout(matrix(1:4, nrow = 2, byrow = T))
for (c in cseq){
r.k = NULL
r.s = NULL
p.chi = NULL
for (n in nseq){
out = ngbhdeg(n, c, d)
vec1 = unlist(out[1])
vec2 = unlist(out[2])
r.k = c(r.k,test.kendall(vec1,vec2))
r.s = c(r.s,test.spearman(vec1,vec2))
p.chi = c(p.chi,test.chisq(vec1,vec2))
}
chi = rbind(chi,p.chi)
print(chi)
plot(nseq, abs(r.k), ylim = c(0,1), xlab="n", ylab="correlation", col=2, pch=20, main=paste("For c = ",c))
points(nseq, abs(r.s), col=3, pch = 20)
legend("topleft",legend=c("Kendall corr","Pearson rank corr"),col=c(2,3),pch=20)
legend("topright",legend=c("p-value of chisq test"),col=c(4),pch=20)
}
print(chi)
quartz(width = 10, height = 7)
layout(matrix(1:4, nrow = 2, byrow = T))
for(i in 1:length(chi[,1])){
plot(nseq, chi[i,], ylim = c(0,1), xlab="n", ylab="p-value", col=4, pch=20, main=paste("For c = ",cseq[i]))
}
}
main()
# Section 2.6
#7
# this gives the histogram of sum of degrees of two randomly selected vertices in d-neighbourhood
library(igraph)
ngbhdeg <-function(n, c, d){
vec1 = NULL
vec2 = NULL
p = c / n
i = 0
while(i < 1000){
g = sample_gnp(n,p)
v = sample(1:n,1)
dis = distances(g,v,V(g))
if(length(which(dis <= d)) >= 5){
vertex = sample(intersect(which(dis>0),which(dis<d)), 2, replace = FALSE)
vec1 = c(vec1, degree(g,vertex[1]))
vec2 = c(vec2, degree(g,vertex[2]))
i = i + 1
}
}
return(list(vec1, vec2))
}
main <- function(d = 4){
nseq = c(500,1000,5000,10000)
cseq = c(1,1.5,2,3)
for (c in cseq){
quartz(width = 10, height = 7)
layout(matrix(1:4, nrow = 2, byrow = T))
r.k = NULL
r.s = NULL
p.chi = NULL
for (n in nseq){
out = ngbhdeg(n, c, d)
vec1 = unlist(out[1])
vec2 = unlist(out[2])
poi = round(dpois(c(1:13),2*c)*1000)
poi = c(1000 - sum(poi), poi)
poi = rep(c(0:13),poi)
poi = poi + 2
vector = vec1 + vec2
chi = 0
for(i in 2:15){
chi = chi + (length(which(vector == i))/1000 - dpois(i-2,2*c))^2/dpois(i-2,2*c)
}
p1 = round(pchisq(chi,13, lower.tail=F),2)
x = NULL
y = NULL
for(i in 0:13){
x = c(x, length(which(vector <= i))/1000)
y = c(y, length(which(poi <= i))/1000)
}
p2 = round(ks.test(x,y)$p.value,2)
p3 = round(t.test(vector,poi, alternative = "two.sided", var.equal = FALSE)$p.value,2)
hist(vector, breaks=seq(-.5,20.5,by=1), ylim=c(0,0.1+max(dpois(1:10,c))), prob=T, xlab="degree", col=rgb(0,0,1,1/4), main=paste("For n = ",n)) ## range of x (-.5,15.5) for c = 4 and (-.5,12.5) for c = 3
hist(poi, breaks=seq(-.5,20.5,by=1), ylim=c(0,max(0.1+dpois(1:10,c))), prob=T, xlab="degree", col=rgb(1,0,0,1/4), add=T)
legend("topright", c("p-value",paste("chisq.test = ",p1),paste("ks.test = ",p2),paste("t.test = ",p3)))
}
}
}
main()
|
library(ggplot2)
library(stringr)
theme_set(theme_classic())
library(sm)
library(gridExtra)
library(gtable)
options(error=traceback)
library(ggisoband)
library(viridis)
library(scales)
theme0 <- function(...) theme( legend.position = "none",
panel.background = element_blank(),
#panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.margin = unit(0,"null"),
axis.ticks = element_blank(),
#axis.text.x = element_blank(),
#axis.text.y = element_blank(),
#axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.ticks.length = unit(0,"null"),
axis.ticks.margin = unit(0,"null"),
axis.line = element_blank(),
panel.border=element_rect(color=NA),
...)
theme_left_dens <- function(...) theme( legend.position = "none",
panel.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.spacing = unit(1,"null"),
axis.ticks = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.ticks.length = unit(0,"null"),
axis.ticks.margin = unit(0,"null"),
axis.line = element_blank(),
panel.border=element_rect(color=NA),
# plot.margin = unit(c(-0.25, 0.4, -0.25, 0.4),"lines"),
...)
theme_top_dens <- function(...) theme( legend.position = "none",
panel.background = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.margin = unit(0,"null"),
# axis.ticks = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.ticks.length = unit(0,"null"),
axis.ticks.margin = unit(0,"null"),
# axis.line = element_blank(),
panel.border=element_rect(color=NA),
# plot.margin = unit(c(0, 0.4, 0.4, 0.4),"lines"),
legend.background = element_rect(fill = "transparent"),
legend.box.background = element_rect(fill = "transparent"), # get rid of legend panel bg
...)
theme_1D_plot <- function(...) theme( legend.position = "none",
panel.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.margin = unit(0,"null"),
axis.ticks = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
# axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.ticks.length = unit(0,"null"),
axis.ticks.margin = unit(0,"null"),
axis.line = element_blank(),
panel.border=element_rect(color=NA),
plot.margin = unit(c(0.5, 1.2, 0.5, 1.2),"lines"),
...)
theme_contour <- function(...) theme( legend.position = "none",
panel.background = element_rect(fill = "transparent",colour = NA),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.spacing = unit(0,"null"),
axis.ticks = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.ticks.length = unit(0,"null"),
axis.ticks.margin = unit(0,"null"),
axis.line = element_blank(),
plot.background = element_rect(fill = "transparent", color = NA),
legend.background = element_rect(fill = "transparent"),
legend.box.background = element_rect(fill = "transparent"), # get rid of legend panel bg
# get rid of legend bg
#panel.border=element_rect(color=NA),
...)
theme_empty <- function(...) theme(plot.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
axis.line = element_blank(),
)
fancy_scientific <- function(l) {
# turn in to character string in scientific notation
l <- format(l, scientific = TRUE)
# quote the part before the exponent to keep all the digits
l <- gsub("^(.*)e", "'\\1'e", l)
# turn the 'e+' into plotmath format
l <- gsub("e", "%*%10^", l)
# return this as an expression
parse(text=l)
}
get_name_idx <- function(string_ref=NULL, param_idx=NULL) {
# If string ref supplied, name index is returned
# If param index is supplied, expression of param is returned
names = list(
"D", "g", "g_trp", "K_A_B", "K_A_I", "K_A_T", "K_A_V",
"k_I", "K_mu", "K_mu_trp", "k_omega_B", "k_omega_T", "k_TV_ann",
"kA_1", "kB_max", "kI_max", "kT_max", "kV_max", "mu_max_1", "mu_max_2", "nI", "n_A_B",
"n_A_I", "n_A_T", "n_A_V", "p", "S0", "S0_trp", "omega_max_1", "n_omega_B", "n_omega_T",
"N_1", "N_2", "S_glu", "S_trp", "B", "A", "V", "I", "T"
)
# "kA_1", "K_omega", "n_omega", "S0", "gX",
# "gC", "C0L", "KDL","nL","K1L",
# "K2L", "ymaxL", "K1T", "K2T", "ymaxT",
# "C0B", "LB", "NB", "KDB", "K1B",
# "K2B", "K3B", "ymaxB", "cgt", "k_alpha_max",
# "k_beta_max", "X", "C", "S",
# "B", "A"
expression_names_list <- list(
expression(D), expression(mu[max[1]]), expression(mu[max[2]]), expression(K[B[1]]), expression(K[B[mccV]]),
expression(kA[1]), expression(kA[2]), expression(K[omega[1]]), expression(K[omega[2]]), expression(kBmax[1]),
expression(kBmax[2]), expression(nB[1]), expression(nB[2]), expression(n[omega[1]]), expression(n[omega[2]]),
expression(omega[max[1]]), expression(omega[max[2]]), expression(N[1]), expression(N[2])
)
if(!is.null(string_ref)) {
for (i in seq_along(names)) {
if (string_ref == names[[i]]) {
# print(names[[i]])
return(names[[i]])
# return(expression_names_list[[i]])
}
}
}
if (!is.null(param_idx)) {
return(names[[param_idx]])
# return(expression_names_list[[param_idx]])
}
}
convert_to_grid <- function(x, y) {
nrows = length(x)
ncolmns = length(y)
new_y = c()
new_x = c()
count = 1
for (y_coor in y){
for (x_coor in x){
new_x[count] <- x_coor
new_y[count] <- y_coor
count = count + 1
}
}
new_df <- data.frame(new_x, new_y)
colnames(new_df) <- c('x', 'y')
return(new_df)
}
make_contour_plot <-function(x_data, y_data, x_lims, y_lims, weights_data, true_val_x, true_val_y) {
y_trans_scale <- "identity"
x_trans_scale <- "identity"
if ((x_lims[1] < 1e-4 && x_lims[1] != 0) || (x_lims[1] > 1e4)){
x_trans_scale <- "log10"
}
if ((y_lims[1] < 1e-4 && y_lims[1] != 0) || (y_lims[1] > 1e4)){
y_trans_scale <- "log10"
}
if (identical("log10", x_trans_scale)) {
x_lims <- log(x_lims)
}
if (identical("log10", y_trans_scale)) {
y_lims <- log(y_lims)
}
# Force expansion of grid by adding data with lims with weight zero
x_data <- c(x_data, x_lims[1])
x_data <- c(x_data, x_lims[2])
y_data <- c(y_data, y_lims[1])
y_data <- c(y_data, y_lims[2])
weights_data <- c(weights_data)
dens <- sm.density( cbind(x_data, y_data), weights=weights_data, display="none", nbins=0)
x1 = dens$eval.points[,1]
y1 = dens$eval.points[,2]
# z1 = dens$estimate
# Generate coordinates corresponding to z grid
dens_df <- convert_to_grid(x1, y1)
colnames(dens_df) <- cbind("x1", "y1")
dens_df$z1 <- c(dens$estimate)
dens_df$z1 <- rescale(c(dens_df$z1), to=c(0, 10000))
# dens_df$z1 <- sapply(dens_df$z1, function(x) x*1000)
# Filling a column with same values so they can be passed in the same df
dens_df$true_val_x <- c(rep(true_val_x, length(x1)))
dens_df$true_val_y <- c(rep(true_val_y, length(y1)))
# "white","#aae7e6", "#507dbc", "#e87554", "#d14141"
pCont_geom <- ggplot(data=dens_df, aes(x=x1, y=y1, z=z1)) +
scale_fill_gradientn(colors=c("white", "#058989", "#ff9914", "#d14141"), values=scales::rescale(c(0, 10000)))
pCont_geom <- pCont_geom +
geom_isobands(aes(z=z1, fill=stat((zmax^2+zmin^2)/2)), color = NA) +
scale_x_continuous(name="x", expand = c(0,0)) +
scale_y_continuous(position="right", expand = c(0,0)) +
coord_cartesian(expand = FALSE) +
theme_contour()
return(pCont_geom)
}
make_dual_contour_plot <-function(x1_data, y1_data, x2_data, y2_data, x_lims, y_lims, weights_data_1, weights_data_2) {
dens_1 <- sm.density( cbind(x1_data, y1_data), weights=weights_data_1, display="none", nbins=0 )
dens_2 <- sm.density( cbind(x2_data, y2_data), weights=weights_data_2, display="none", nbins=0 )
# Extract data from sm.density functions
x1 = dens_1$eval.points[,1]
y1 = dens_1$eval.points[,2]
z1 = dens_1$estimate
x2 = dens_2$eval.points[,1]
y2 = dens_2$eval.points[,2]
z2 = dens_2$estimate
# Generate grid coordinates and df for each parameter set
dens_1_df <- convert_to_grid(x1, y1)
colnames(dens_1_df) <- cbind("x1", "y1")
dens_2_df <- convert_to_grid(x2, y2)
colnames(dens_2_df) <- cbind("x2", "y2")
# Add z data to each parameter set
dens_1_df$z1 <- c(z1)
dens_2_df$z2 <- c(z2)
# Combine dataframes
dens_df_combined <- cbind(dens_1_df, dens_2_df)
pCont_geom <- ggplot(data=dens_df_combined) +
geom_contour(aes(x=x1, y=y1, z=z1, colour="red"), bins=10) +
geom_contour(aes(x=x2, y=y2, z=z2, colour="blue"), bins=10) +
scale_x_continuous(name="x", limits = x_lims, expand = c(0,0)) +
scale_y_continuous(position="right", limits=y_lims, expand = c(0,0)) +
theme_bw() +
theme_contour()
return(pCont_geom)
}
make_annotation_plot <- function(annot_text) {
# plot_str <- paste(annot_text)
print(annot_text)
pAnnot <- ggplot() +
annotate("text", x = 4, y = 25, size=1.5, label = paste(annot_text), parse=TRUE) +
theme_bw() +
theme_empty()
return(pAnnot)
}
make_top_plot <- function(x_data, x_lims, weights_data) {
trans_scale <- "identity"
if ((x_lims[1] < 1e-4 && x_lims[1] != 0) || (x_lims[1] > 1e4)){
x_lims <- log(x_lims)
}
plot_df <- data.frame(x_data, weights_data)
colnames(plot_df) <- c("x", "w")
pTop <- ggplot(data=plot_df) +
geom_density(aes(x= x, weight=w, colour = '#058989')) +
scale_x_continuous(name = 'log10(GFP)', limits=x_lims, expand = c(0,0)) +
scale_y_continuous(position="right", expand = c(0,0), limits= c(0, NA)) +
theme_bw() + theme_top_dens()
return(pTop)
}
make_1d_param_plot <- function(x_data, x_lims, weights_data, param_name) {
trans_scale <- "identity"
if ((x_lims[1] < 1e-4 && x_lims[1] != 0) || (x_lims[1] > 1e4)){
trans_scale <- "log10"
x_lims <- log(x_lims)
print(typeof(x_data))
}
plot_df <- data.frame(x_data, weights_data)
colnames(plot_df) <- c("x", "w")
pTop <- ggplot(data=plot_df) +
geom_density(aes(x= x, weight=w, colour = 'red')) +
scale_x_continuous(name = param_name, limits=x_lims, expand = c(0,0)) +
scale_y_continuous(position="right", expand = c(0,0)) +
theme_bw() + theme_1D_plot()
print(x_lims)
return(pTop)
}
make_dual_top_plot <- function(x_data_1, x_data_2, x_lims, weights_data_1, weights_data_2) {
plot_df <- data.frame(x_data_1, x_data_2, weights_data_1, weights_data_2)
colnames(plot_df) <- c("x1", "x2", "w1", "w2")
print(x_lims)
if ((x_lims[1] < 1e-4 && x_lims[1] != 0) || (x_lims[1] > 1e4)){
trans_scale <- "log10"
x_lims <- log(x_lims)
}
pTop <- ggplot(data=plot_df) +
geom_density(aes(x= x1, weight=w1, colour = 'red')) +
geom_density(aes(x= x2, weight=w2, colour = 'blue')) +
scale_x_continuous(name = 'log10(GFP)', limits=x_lims, expand = c(0,0), trans='log10') +
scale_y_continuous(position="right", expand = c(0,0)) +
theme_bw() + theme_top_dens()
return(pTop)
}
make_left_plot <- function(x_data, x_lims, weights_data) {
trans_scale <- "identity"
if ((x_lims[1] < 1e-4 && x_lims[1] != 0) || (x_lims[1] > 1e4)){
trans_scale <- "log10"
x_lims <- log(x_lims)
}
plot_df <- data.frame(x_data, weights_data)
colnames(plot_df) <- c("x", "w")
pLeft <- ggplot(data=plot_df) +
geom_density(aes(x=x, weight=w, colour = '#058989')) +
scale_x_continuous(name = 'log10(GFP)', limits=x_lims, expand = c(0,0)) +
scale_y_continuous(position="right", expand = c(0,0)) +
coord_flip() +
scale_y_reverse() +
theme_bw() + theme_left_dens()
return(pLeft)
}
make_dual_left_plot <- function(x_data_1, x_data_2, x_lims, weights_data_1, weights_data_2) {
plot_df <- data.frame(x_data_1, x_data_2, weights_data_1, weights_data_2)
colnames(plot_df) <- c("x1", "x2", "w1", "w2")
pLeft <- ggplot(data=plot_df) +
geom_density(aes(x= x1, weight=w1, colour = 'red')) +
geom_density(aes(x= x2, weight=w2, colour = 'blue')) +
scale_x_continuous(name = 'log10(GFP)', position="top", limits = x_lims, expand = c(0,0)) +
coord_flip() +
scale_y_reverse() +
theme_bw() + theme_left_dens()
return(pLeft)
}
make_empty_plot <- function() {
pEmpty <- ggplot() + geom_point(aes(1,1), colour = 'white') + theme_empty()
return(pEmpty)
}
plot_dens_2d_one_pop <- function(param_data, weights_data, cut, param_limits, output_name, true_values_vector) {
# Plots densities and contours for one population on a grid with each par
# vs another
nptot <- dim(param_data)[2]
pars <- c(0:nptot)
# remove the cut parameters
pars <- pars[ !(pars %in% cut) ]
param_names <- names(accepted_df)
nParams <- length(pars)
nCols <- length(pars)
nRows <- length(pars)
# Generate top plots
top_plots <- list()
left_plots <- list()
# Initiate empty plot list
plot_list <- list()
plot_list_index <- 1
row_idx <- 1
for (row in pars) {
col_idx <- 1
for (col in pars) {
# Set top left tile to empty
if ((row_idx == 1) & (col_idx ==1)) {
plot_list[[plot_list_index]] <- make_empty_plot()
}
# Set top row to top_dens plots
else if (row_idx == 1) {
plot_list[[plot_list_index]] <- make_top_plot(param_data[,col], param_limits[,col], weights_data)
}
else if (col_idx == 1) {
# Col == 1 we plot a left density
plot_list[[plot_list_index]] <- make_left_plot(param_data[,row], param_limits[,row], weights_data)
}
# Set middle row to param name
else if (col_idx == row_idx) {
# par_string <- get_name_idx(string_ref = names(param_data)[col])
plot_list[[plot_list_index]] <- make_annotation_plot(param_names[[col]])
}
# Plot contours for all other grid spaces
else {
plot_list[[plot_list_index]] <- make_contour_plot(param_data[,col], param_data[,row], param_limits[,col], param_limits[,row], weights_data, true_values_vector[col_idx -1], true_values_vector[row_idx - 1])
}
plot_list_index = plot_list_index + 1
col_idx = col_idx + 1
}
row_idx = row_idx + 1
col_idx <- 1
}
print(nCols)
# Set size of grid widths and heights
width_list <- as.list(rep(4, nCols))
height_list <- as.list(rep(4, nCols))
print(width_list)
print(height_list)
print("starting grid arrange")
pMar <- grid.arrange(grobs=plot_list, ncol=nCols, nrow=nRows, widths = width_list, heights = height_list)
#pMar <- do.call("grid.arrange", c(plot_list, ncol=nCols+1, nrow=nRows+1))
print(output_name)
ggsave(output_name, pMar, bg="transparent")
}
plot_dens_2d_two_pop <- function(param_data_1, param_data_2, weights_data_1, weights_data_2, cut, param_limits, output_name) {
# Plots densities and contours of two parameters
# against each other
if ( dim(param_data_1)[2] != dim(param_data_2)[2] ) {
print("bad params")
print(param_data_1)
print("param_data_1 and param_data_2 are not the same dimensions")
quit()
}
# Set total number of parameters
nptot <- dim(param_data_1)[2]
pars <- c(0:nptot)
# remove the cut parameters
pars <- pars[ !(pars %in% cut) ]
nCols = length(pars)
nRows = length(pars)
# Generate top plots
top_plots <- list()
left_plots <- list()
# Initiate empty plot list
plot_list <- list()
plot_list_index <- 1
row_idx <- 1
for (row in pars) {
col_idx <- 1
for (col in pars) {
# Set top left tile to empty
if ((row_idx == 1) & (col_idx ==1)) {
plot_list[[plot_list_index]] <- make_empty_plot()
}
# Set top row to top_dens plots
else if (row_idx == 1) {
plot_list[[plot_list_index]] <- make_dual_top_plot(param_data_1[,col], param_data_2[,col], param_limits[,col], weights_data_1, weights_data_2)
}
else if (col_idx == 1) {
# Plot a left density in column 1
plot_list[[plot_list_index]] <- make_dual_left_plot(param_data_1[,row], param_data_2[,row], param_limits[,row], weights_data_1, weights_data_2)
}
# Set middle row to param name
else if (col_idx == row_idx) {
par_string <- get_name_idx(param_idx = col)
plot_list[[plot_list_index]] <- make_annotation_plot(par_string)
}
# Plot contours
else {
plot_list[[plot_list_index]] <- make_dual_contour_plot(param_data_1[,col], param_data_1[,row],
param_data_2[,col], param_data_2[,row],
param_limits[,col], param_limits[,row],
weights_data_1, weights_data_2)
}
plot_list_index = plot_list_index + 1
col_idx = col_idx + 1
}
row_idx = row_idx + 1
col_idx <- 1
}
print("starting grid arrange")
print(length(plot_list))
width_list <- as.list(rep(4, nCols))
height_list <- as.list(rep(4, nCols))
pMar <- grid.arrange(grobs=plot_list, ncol=nCols, nrow=nRows, widths = width_list, heights = height_list)
ggsave(output_name, pMar, bg="transparent")
}
plot_1d_one_pop <- function(param_data, weights_data, cut, param_limits, output_name, true_values_vector)
{
param_names <- names(accepted_df)
# Plots densities and contours for one population on a grid with each par
# vs another
nptot <- dim(param_data)[2]
pars <- c(1:nptot)
# remove the cut parameters
pars <- pars[ !(pars %in% cut) ]
nParams <- length(pars)
nCols <- length(pars)
nRows <- length(pars)
# Initiate empty plot list
plot_list <- list()
plot_list_index <- 1
for (p in pars) {
plot_list[[plot_list_index]] <- make_1d_param_plot(param_data[,p], param_limits[,p], weights_data, param_names[[p]])
plot_list_index <- plot_list_index + 1
}
# Set size of grid widths and heights
width_list <- as.list(rep(3, nCols))
height_list <- as.list(rep(3, nCols))
pMar <- grid.arrange(grobs=plot_list, ncol=4)
ggsave(output_name, pMar)
}
get_fixed_parameter_columns <- function(param_lims) {
fixed_param_list = c()
for(i in seq(from=1, to=dim(param_lims)[2], by=1)) {
diff = param_lims[2, i] - param_lims[1, i]
if (diff == 0 ) {
fixed_param_list <- c(fixed_param_list, i)
}
}
return(fixed_param_list)
}
make_param_lims_from_input <- function(output_params_df, input_params_file_path, input_species_file_path) {
param_lims_list <- c()
input_params_df <- read.table(input_params_file_path, sep=",")
# Iterate parameter names
for(i in names(output_params_df)){
idx = 1
for(j in input_params_df[[1]]) {
if(i == j) {
min_x = (input_params_df[[2]][idx])
max_x = (input_params_df[[3]][idx])
param_lims_list <- cbind(param_lims_list, c(min_x, max_x))
}
idx = idx + 1
}
}
input_species_df <- read.table(input_species_file_path, sep=",")
# Iterate parameter names
for(i in names(output_params_df)){
idx = 1
for(j in input_species_df[[1]]) {
if(i == j) {
min_x = (input_species_df[[2]][idx])
max_x = (input_species_df[[3]][idx])
param_lims_list <- cbind(param_lims_list, c(min_x, max_x))
}
idx = idx + 1
}
}
return(param_lims_list)
}
make_correlation_csv <- function(accepted_df, weights, to_cut, output_path) {
nptot <- dim(accepted_df)[2]
pars <- c(0:nptot)
# remove the cut parameters
pars <- pars[ !(pars %in% to_cut) ]
param_names <- names(accepted_df)
param_names <- c()
for (i in pars) {
param_names <- c(param_names, names(accepted_df)[i])
}
nParams <- length(param_names)
nCols <- length(param_names)
nRows <- length(param_names)
idx_i <- 1
idx_j <- 1
coeff_mat <- matrix(0L, nrow = nParams, ncol = nParams)
rownames(coeff_mat) <- param_names
colnames(coeff_mat) <- param_names
for (i in param_names) {
for (j in param_names){
x <- accepted_df[i]
y <- accepted_df[j]
corr_vals <- cor(x, y)
coeff_mat[[i, j]] <- corr_vals[1]
}
}
write.table(coeff_mat, file=output_path, sep=',')
}
make_param_lims <- function(params_data_df) {
param_lims_list <- c()
idx = 1
for(i in names(params_data_df)){
min_x = min(data_df[i])
max_x = max(data_df[i])
param_lims_list <- cbind(param_lims_list, c(min_x, max_x))
idx = idx + 1
}
return(param_lims_list)
}
args <- commandArgs(trailingOnly = TRUE)
params_posterior_path <- args[1]
param_priors_inputs_path <- args[2]
species_inputs_path <- args[3]
model_idx <- args[4]
output_dir <- args[5]
make_1d_plot <- args[6]
make_2d_plot <- args[7]
data_df <- read.csv(params_posterior_path)
param_lims <- make_param_lims_from_input(data_df[, 7:ncol(data_df)], param_priors_inputs_path, species_inputs_path)
weights <- data_df$particle_weight
if(all(is.na(weights))) {
weights <- rep(1, length(weights))
}
# Remove unecessary columns
accepted_df <- data_df[, 7:ncol(data_df)]
fixed_params = get_fixed_parameter_columns(param_lims)
# List of columns to be dropped
to_cut <- fixed_params
# Column heading to plot, or keep remove columns as empty
# keep_columns <- c("D", "mu_max_1", "mu_max_2", "mu_max_3", "kB_max_1", "kB_max_2", "kB_max_3")
# keep_columns <- c("D")
# remove_columns <- setdiff(names(accepted_df), keep_columns)
remove_columns <- c()
idx <- 1
for (name in names(accepted_df)) {
if (name %in% remove_columns) {
to_cut <- cbind(to_cut, idx)
}
idx <- idx + 1
}
dummy_true_val_vector <- rep(0.8, dim(accepted_df)[2])
name_prefix <- paste("model_", toString(model_idx), sep="")
output_name <- paste(name_prefix, "_2D_corr_coeff.csv", sep="")
output_path <- paste(output_dir, output_name, sep="")
make_correlation_csv(accepted_df, weights, to_cut, output_path)
if (make_1d_plot) {
output_name <- paste(name_prefix, "_1D_dens.pdf", sep="")
output_path <- paste(output_dir, output_name, sep="")
plot_1d_one_pop(accepted_df, weights, to_cut, param_lims, output_path, dummy_true_val_vector)
}
if (make_2d_plot){
output_name <- paste(name_prefix, "_2D_dens.pdf", sep="")
output_path <- paste(output_dir, output_name, sep="")
plot_dens_2d_one_pop(accepted_df, weights, to_cut, param_lims, output_path, dummy_true_val_vector)
}
|
/data_analysis/dens_plot_2D.R
|
no_license
|
behzadk/AutoCD
|
R
| false | false | 24,628 |
r
|
library(ggplot2)
library(stringr)
theme_set(theme_classic())
library(sm)
library(gridExtra)
library(gtable)
options(error=traceback)
library(ggisoband)
library(viridis)
library(scales)
theme0 <- function(...) theme( legend.position = "none",
panel.background = element_blank(),
#panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.margin = unit(0,"null"),
axis.ticks = element_blank(),
#axis.text.x = element_blank(),
#axis.text.y = element_blank(),
#axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.ticks.length = unit(0,"null"),
axis.ticks.margin = unit(0,"null"),
axis.line = element_blank(),
panel.border=element_rect(color=NA),
...)
theme_left_dens <- function(...) theme( legend.position = "none",
panel.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.spacing = unit(1,"null"),
axis.ticks = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.ticks.length = unit(0,"null"),
axis.ticks.margin = unit(0,"null"),
axis.line = element_blank(),
panel.border=element_rect(color=NA),
# plot.margin = unit(c(-0.25, 0.4, -0.25, 0.4),"lines"),
...)
theme_top_dens <- function(...) theme( legend.position = "none",
panel.background = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.margin = unit(0,"null"),
# axis.ticks = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.ticks.length = unit(0,"null"),
axis.ticks.margin = unit(0,"null"),
# axis.line = element_blank(),
panel.border=element_rect(color=NA),
# plot.margin = unit(c(0, 0.4, 0.4, 0.4),"lines"),
legend.background = element_rect(fill = "transparent"),
legend.box.background = element_rect(fill = "transparent"), # get rid of legend panel bg
...)
theme_1D_plot <- function(...) theme( legend.position = "none",
panel.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.margin = unit(0,"null"),
axis.ticks = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
# axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.ticks.length = unit(0,"null"),
axis.ticks.margin = unit(0,"null"),
axis.line = element_blank(),
panel.border=element_rect(color=NA),
plot.margin = unit(c(0.5, 1.2, 0.5, 1.2),"lines"),
...)
theme_contour <- function(...) theme( legend.position = "none",
panel.background = element_rect(fill = "transparent",colour = NA),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.spacing = unit(0,"null"),
axis.ticks = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.ticks.length = unit(0,"null"),
axis.ticks.margin = unit(0,"null"),
axis.line = element_blank(),
plot.background = element_rect(fill = "transparent", color = NA),
legend.background = element_rect(fill = "transparent"),
legend.box.background = element_rect(fill = "transparent"), # get rid of legend panel bg
# get rid of legend bg
#panel.border=element_rect(color=NA),
...)
theme_empty <- function(...) theme(plot.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
axis.line = element_blank(),
)
fancy_scientific <- function(l) {
# turn in to character string in scientific notation
l <- format(l, scientific = TRUE)
# quote the part before the exponent to keep all the digits
l <- gsub("^(.*)e", "'\\1'e", l)
# turn the 'e+' into plotmath format
l <- gsub("e", "%*%10^", l)
# return this as an expression
parse(text=l)
}
get_name_idx <- function(string_ref=NULL, param_idx=NULL) {
# If string ref supplied, name index is returned
# If param index is supplied, expression of param is returned
names = list(
"D", "g", "g_trp", "K_A_B", "K_A_I", "K_A_T", "K_A_V",
"k_I", "K_mu", "K_mu_trp", "k_omega_B", "k_omega_T", "k_TV_ann",
"kA_1", "kB_max", "kI_max", "kT_max", "kV_max", "mu_max_1", "mu_max_2", "nI", "n_A_B",
"n_A_I", "n_A_T", "n_A_V", "p", "S0", "S0_trp", "omega_max_1", "n_omega_B", "n_omega_T",
"N_1", "N_2", "S_glu", "S_trp", "B", "A", "V", "I", "T"
)
# "kA_1", "K_omega", "n_omega", "S0", "gX",
# "gC", "C0L", "KDL","nL","K1L",
# "K2L", "ymaxL", "K1T", "K2T", "ymaxT",
# "C0B", "LB", "NB", "KDB", "K1B",
# "K2B", "K3B", "ymaxB", "cgt", "k_alpha_max",
# "k_beta_max", "X", "C", "S",
# "B", "A"
expression_names_list <- list(
expression(D), expression(mu[max[1]]), expression(mu[max[2]]), expression(K[B[1]]), expression(K[B[mccV]]),
expression(kA[1]), expression(kA[2]), expression(K[omega[1]]), expression(K[omega[2]]), expression(kBmax[1]),
expression(kBmax[2]), expression(nB[1]), expression(nB[2]), expression(n[omega[1]]), expression(n[omega[2]]),
expression(omega[max[1]]), expression(omega[max[2]]), expression(N[1]), expression(N[2])
)
if(!is.null(string_ref)) {
for (i in seq_along(names)) {
if (string_ref == names[[i]]) {
# print(names[[i]])
return(names[[i]])
# return(expression_names_list[[i]])
}
}
}
if (!is.null(param_idx)) {
return(names[[param_idx]])
# return(expression_names_list[[param_idx]])
}
}
convert_to_grid <- function(x, y) {
nrows = length(x)
ncolmns = length(y)
new_y = c()
new_x = c()
count = 1
for (y_coor in y){
for (x_coor in x){
new_x[count] <- x_coor
new_y[count] <- y_coor
count = count + 1
}
}
new_df <- data.frame(new_x, new_y)
colnames(new_df) <- c('x', 'y')
return(new_df)
}
make_contour_plot <-function(x_data, y_data, x_lims, y_lims, weights_data, true_val_x, true_val_y) {
y_trans_scale <- "identity"
x_trans_scale <- "identity"
if ((x_lims[1] < 1e-4 && x_lims[1] != 0) || (x_lims[1] > 1e4)){
x_trans_scale <- "log10"
}
if ((y_lims[1] < 1e-4 && y_lims[1] != 0) || (y_lims[1] > 1e4)){
y_trans_scale <- "log10"
}
if (identical("log10", x_trans_scale)) {
x_lims <- log(x_lims)
}
if (identical("log10", y_trans_scale)) {
y_lims <- log(y_lims)
}
# Force expansion of grid by adding data with lims with weight zero
x_data <- c(x_data, x_lims[1])
x_data <- c(x_data, x_lims[2])
y_data <- c(y_data, y_lims[1])
y_data <- c(y_data, y_lims[2])
weights_data <- c(weights_data)
dens <- sm.density( cbind(x_data, y_data), weights=weights_data, display="none", nbins=0)
x1 = dens$eval.points[,1]
y1 = dens$eval.points[,2]
# z1 = dens$estimate
# Generate coordinates corresponding to z grid
dens_df <- convert_to_grid(x1, y1)
colnames(dens_df) <- cbind("x1", "y1")
dens_df$z1 <- c(dens$estimate)
dens_df$z1 <- rescale(c(dens_df$z1), to=c(0, 10000))
# dens_df$z1 <- sapply(dens_df$z1, function(x) x*1000)
# Filling a column with same values so they can be passed in the same df
dens_df$true_val_x <- c(rep(true_val_x, length(x1)))
dens_df$true_val_y <- c(rep(true_val_y, length(y1)))
# "white","#aae7e6", "#507dbc", "#e87554", "#d14141"
pCont_geom <- ggplot(data=dens_df, aes(x=x1, y=y1, z=z1)) +
scale_fill_gradientn(colors=c("white", "#058989", "#ff9914", "#d14141"), values=scales::rescale(c(0, 10000)))
pCont_geom <- pCont_geom +
geom_isobands(aes(z=z1, fill=stat((zmax^2+zmin^2)/2)), color = NA) +
scale_x_continuous(name="x", expand = c(0,0)) +
scale_y_continuous(position="right", expand = c(0,0)) +
coord_cartesian(expand = FALSE) +
theme_contour()
return(pCont_geom)
}
make_dual_contour_plot <-function(x1_data, y1_data, x2_data, y2_data, x_lims, y_lims, weights_data_1, weights_data_2) {
dens_1 <- sm.density( cbind(x1_data, y1_data), weights=weights_data_1, display="none", nbins=0 )
dens_2 <- sm.density( cbind(x2_data, y2_data), weights=weights_data_2, display="none", nbins=0 )
# Extract data from sm.density functions
x1 = dens_1$eval.points[,1]
y1 = dens_1$eval.points[,2]
z1 = dens_1$estimate
x2 = dens_2$eval.points[,1]
y2 = dens_2$eval.points[,2]
z2 = dens_2$estimate
# Generate grid coordinates and df for each parameter set
dens_1_df <- convert_to_grid(x1, y1)
colnames(dens_1_df) <- cbind("x1", "y1")
dens_2_df <- convert_to_grid(x2, y2)
colnames(dens_2_df) <- cbind("x2", "y2")
# Add z data to each parameter set
dens_1_df$z1 <- c(z1)
dens_2_df$z2 <- c(z2)
# Combine dataframes
dens_df_combined <- cbind(dens_1_df, dens_2_df)
pCont_geom <- ggplot(data=dens_df_combined) +
geom_contour(aes(x=x1, y=y1, z=z1, colour="red"), bins=10) +
geom_contour(aes(x=x2, y=y2, z=z2, colour="blue"), bins=10) +
scale_x_continuous(name="x", limits = x_lims, expand = c(0,0)) +
scale_y_continuous(position="right", limits=y_lims, expand = c(0,0)) +
theme_bw() +
theme_contour()
return(pCont_geom)
}
make_annotation_plot <- function(annot_text) {
# plot_str <- paste(annot_text)
print(annot_text)
pAnnot <- ggplot() +
annotate("text", x = 4, y = 25, size=1.5, label = paste(annot_text), parse=TRUE) +
theme_bw() +
theme_empty()
return(pAnnot)
}
make_top_plot <- function(x_data, x_lims, weights_data) {
trans_scale <- "identity"
if ((x_lims[1] < 1e-4 && x_lims[1] != 0) || (x_lims[1] > 1e4)){
x_lims <- log(x_lims)
}
plot_df <- data.frame(x_data, weights_data)
colnames(plot_df) <- c("x", "w")
pTop <- ggplot(data=plot_df) +
geom_density(aes(x= x, weight=w, colour = '#058989')) +
scale_x_continuous(name = 'log10(GFP)', limits=x_lims, expand = c(0,0)) +
scale_y_continuous(position="right", expand = c(0,0), limits= c(0, NA)) +
theme_bw() + theme_top_dens()
return(pTop)
}
make_1d_param_plot <- function(x_data, x_lims, weights_data, param_name) {
trans_scale <- "identity"
if ((x_lims[1] < 1e-4 && x_lims[1] != 0) || (x_lims[1] > 1e4)){
trans_scale <- "log10"
x_lims <- log(x_lims)
print(typeof(x_data))
}
plot_df <- data.frame(x_data, weights_data)
colnames(plot_df) <- c("x", "w")
pTop <- ggplot(data=plot_df) +
geom_density(aes(x= x, weight=w, colour = 'red')) +
scale_x_continuous(name = param_name, limits=x_lims, expand = c(0,0)) +
scale_y_continuous(position="right", expand = c(0,0)) +
theme_bw() + theme_1D_plot()
print(x_lims)
return(pTop)
}
make_dual_top_plot <- function(x_data_1, x_data_2, x_lims, weights_data_1, weights_data_2) {
plot_df <- data.frame(x_data_1, x_data_2, weights_data_1, weights_data_2)
colnames(plot_df) <- c("x1", "x2", "w1", "w2")
print(x_lims)
if ((x_lims[1] < 1e-4 && x_lims[1] != 0) || (x_lims[1] > 1e4)){
trans_scale <- "log10"
x_lims <- log(x_lims)
}
pTop <- ggplot(data=plot_df) +
geom_density(aes(x= x1, weight=w1, colour = 'red')) +
geom_density(aes(x= x2, weight=w2, colour = 'blue')) +
scale_x_continuous(name = 'log10(GFP)', limits=x_lims, expand = c(0,0), trans='log10') +
scale_y_continuous(position="right", expand = c(0,0)) +
theme_bw() + theme_top_dens()
return(pTop)
}
make_left_plot <- function(x_data, x_lims, weights_data) {
trans_scale <- "identity"
if ((x_lims[1] < 1e-4 && x_lims[1] != 0) || (x_lims[1] > 1e4)){
trans_scale <- "log10"
x_lims <- log(x_lims)
}
plot_df <- data.frame(x_data, weights_data)
colnames(plot_df) <- c("x", "w")
pLeft <- ggplot(data=plot_df) +
geom_density(aes(x=x, weight=w, colour = '#058989')) +
scale_x_continuous(name = 'log10(GFP)', limits=x_lims, expand = c(0,0)) +
scale_y_continuous(position="right", expand = c(0,0)) +
coord_flip() +
scale_y_reverse() +
theme_bw() + theme_left_dens()
return(pLeft)
}
make_dual_left_plot <- function(x_data_1, x_data_2, x_lims, weights_data_1, weights_data_2) {
plot_df <- data.frame(x_data_1, x_data_2, weights_data_1, weights_data_2)
colnames(plot_df) <- c("x1", "x2", "w1", "w2")
pLeft <- ggplot(data=plot_df) +
geom_density(aes(x= x1, weight=w1, colour = 'red')) +
geom_density(aes(x= x2, weight=w2, colour = 'blue')) +
scale_x_continuous(name = 'log10(GFP)', position="top", limits = x_lims, expand = c(0,0)) +
coord_flip() +
scale_y_reverse() +
theme_bw() + theme_left_dens()
return(pLeft)
}
make_empty_plot <- function() {
pEmpty <- ggplot() + geom_point(aes(1,1), colour = 'white') + theme_empty()
return(pEmpty)
}
plot_dens_2d_one_pop <- function(param_data, weights_data, cut, param_limits, output_name, true_values_vector) {
# Plots densities and contours for one population on a grid with each par
# vs another
nptot <- dim(param_data)[2]
pars <- c(0:nptot)
# remove the cut parameters
pars <- pars[ !(pars %in% cut) ]
param_names <- names(accepted_df)
nParams <- length(pars)
nCols <- length(pars)
nRows <- length(pars)
# Generate top plots
top_plots <- list()
left_plots <- list()
# Initiate empty plot list
plot_list <- list()
plot_list_index <- 1
row_idx <- 1
for (row in pars) {
col_idx <- 1
for (col in pars) {
# Set top left tile to empty
if ((row_idx == 1) & (col_idx ==1)) {
plot_list[[plot_list_index]] <- make_empty_plot()
}
# Set top row to top_dens plots
else if (row_idx == 1) {
plot_list[[plot_list_index]] <- make_top_plot(param_data[,col], param_limits[,col], weights_data)
}
else if (col_idx == 1) {
# Col == 1 we plot a left density
plot_list[[plot_list_index]] <- make_left_plot(param_data[,row], param_limits[,row], weights_data)
}
# Set middle row to param name
else if (col_idx == row_idx) {
# par_string <- get_name_idx(string_ref = names(param_data)[col])
plot_list[[plot_list_index]] <- make_annotation_plot(param_names[[col]])
}
# Plot contours for all other grid spaces
else {
plot_list[[plot_list_index]] <- make_contour_plot(param_data[,col], param_data[,row], param_limits[,col], param_limits[,row], weights_data, true_values_vector[col_idx -1], true_values_vector[row_idx - 1])
}
plot_list_index = plot_list_index + 1
col_idx = col_idx + 1
}
row_idx = row_idx + 1
col_idx <- 1
}
print(nCols)
# Set size of grid widths and heights
width_list <- as.list(rep(4, nCols))
height_list <- as.list(rep(4, nCols))
print(width_list)
print(height_list)
print("starting grid arrange")
pMar <- grid.arrange(grobs=plot_list, ncol=nCols, nrow=nRows, widths = width_list, heights = height_list)
#pMar <- do.call("grid.arrange", c(plot_list, ncol=nCols+1, nrow=nRows+1))
print(output_name)
ggsave(output_name, pMar, bg="transparent")
}
plot_dens_2d_two_pop <- function(param_data_1, param_data_2, weights_data_1, weights_data_2, cut, param_limits, output_name) {
# Plots densities and contours of two parameters
# against each other
if ( dim(param_data_1)[2] != dim(param_data_2)[2] ) {
print("bad params")
print(param_data_1)
print("param_data_1 and param_data_2 are not the same dimensions")
quit()
}
# Set total number of parameters
nptot <- dim(param_data_1)[2]
pars <- c(0:nptot)
# remove the cut parameters
pars <- pars[ !(pars %in% cut) ]
nCols = length(pars)
nRows = length(pars)
# Generate top plots
top_plots <- list()
left_plots <- list()
# Initiate empty plot list
plot_list <- list()
plot_list_index <- 1
row_idx <- 1
for (row in pars) {
col_idx <- 1
for (col in pars) {
# Set top left tile to empty
if ((row_idx == 1) & (col_idx ==1)) {
plot_list[[plot_list_index]] <- make_empty_plot()
}
# Set top row to top_dens plots
else if (row_idx == 1) {
plot_list[[plot_list_index]] <- make_dual_top_plot(param_data_1[,col], param_data_2[,col], param_limits[,col], weights_data_1, weights_data_2)
}
else if (col_idx == 1) {
# Plot a left density in column 1
plot_list[[plot_list_index]] <- make_dual_left_plot(param_data_1[,row], param_data_2[,row], param_limits[,row], weights_data_1, weights_data_2)
}
# Set middle row to param name
else if (col_idx == row_idx) {
par_string <- get_name_idx(param_idx = col)
plot_list[[plot_list_index]] <- make_annotation_plot(par_string)
}
# Plot contours
else {
plot_list[[plot_list_index]] <- make_dual_contour_plot(param_data_1[,col], param_data_1[,row],
param_data_2[,col], param_data_2[,row],
param_limits[,col], param_limits[,row],
weights_data_1, weights_data_2)
}
plot_list_index = plot_list_index + 1
col_idx = col_idx + 1
}
row_idx = row_idx + 1
col_idx <- 1
}
print("starting grid arrange")
print(length(plot_list))
width_list <- as.list(rep(4, nCols))
height_list <- as.list(rep(4, nCols))
pMar <- grid.arrange(grobs=plot_list, ncol=nCols, nrow=nRows, widths = width_list, heights = height_list)
ggsave(output_name, pMar, bg="transparent")
}
plot_1d_one_pop <- function(param_data, weights_data, cut, param_limits, output_name, true_values_vector)
{
param_names <- names(accepted_df)
# Plots densities and contours for one population on a grid with each par
# vs another
nptot <- dim(param_data)[2]
pars <- c(1:nptot)
# remove the cut parameters
pars <- pars[ !(pars %in% cut) ]
nParams <- length(pars)
nCols <- length(pars)
nRows <- length(pars)
# Initiate empty plot list
plot_list <- list()
plot_list_index <- 1
for (p in pars) {
plot_list[[plot_list_index]] <- make_1d_param_plot(param_data[,p], param_limits[,p], weights_data, param_names[[p]])
plot_list_index <- plot_list_index + 1
}
# Set size of grid widths and heights
width_list <- as.list(rep(3, nCols))
height_list <- as.list(rep(3, nCols))
pMar <- grid.arrange(grobs=plot_list, ncol=4)
ggsave(output_name, pMar)
}
get_fixed_parameter_columns <- function(param_lims) {
fixed_param_list = c()
for(i in seq(from=1, to=dim(param_lims)[2], by=1)) {
diff = param_lims[2, i] - param_lims[1, i]
if (diff == 0 ) {
fixed_param_list <- c(fixed_param_list, i)
}
}
return(fixed_param_list)
}
make_param_lims_from_input <- function(output_params_df, input_params_file_path, input_species_file_path) {
param_lims_list <- c()
input_params_df <- read.table(input_params_file_path, sep=",")
# Iterate parameter names
for(i in names(output_params_df)){
idx = 1
for(j in input_params_df[[1]]) {
if(i == j) {
min_x = (input_params_df[[2]][idx])
max_x = (input_params_df[[3]][idx])
param_lims_list <- cbind(param_lims_list, c(min_x, max_x))
}
idx = idx + 1
}
}
input_species_df <- read.table(input_species_file_path, sep=",")
# Iterate parameter names
for(i in names(output_params_df)){
idx = 1
for(j in input_species_df[[1]]) {
if(i == j) {
min_x = (input_species_df[[2]][idx])
max_x = (input_species_df[[3]][idx])
param_lims_list <- cbind(param_lims_list, c(min_x, max_x))
}
idx = idx + 1
}
}
return(param_lims_list)
}
make_correlation_csv <- function(accepted_df, weights, to_cut, output_path) {
nptot <- dim(accepted_df)[2]
pars <- c(0:nptot)
# remove the cut parameters
pars <- pars[ !(pars %in% to_cut) ]
param_names <- names(accepted_df)
param_names <- c()
for (i in pars) {
param_names <- c(param_names, names(accepted_df)[i])
}
nParams <- length(param_names)
nCols <- length(param_names)
nRows <- length(param_names)
idx_i <- 1
idx_j <- 1
coeff_mat <- matrix(0L, nrow = nParams, ncol = nParams)
rownames(coeff_mat) <- param_names
colnames(coeff_mat) <- param_names
for (i in param_names) {
for (j in param_names){
x <- accepted_df[i]
y <- accepted_df[j]
corr_vals <- cor(x, y)
coeff_mat[[i, j]] <- corr_vals[1]
}
}
write.table(coeff_mat, file=output_path, sep=',')
}
make_param_lims <- function(params_data_df) {
param_lims_list <- c()
idx = 1
for(i in names(params_data_df)){
min_x = min(data_df[i])
max_x = max(data_df[i])
param_lims_list <- cbind(param_lims_list, c(min_x, max_x))
idx = idx + 1
}
return(param_lims_list)
}
args <- commandArgs(trailingOnly = TRUE)
params_posterior_path <- args[1]
param_priors_inputs_path <- args[2]
species_inputs_path <- args[3]
model_idx <- args[4]
output_dir <- args[5]
make_1d_plot <- args[6]
make_2d_plot <- args[7]
data_df <- read.csv(params_posterior_path)
param_lims <- make_param_lims_from_input(data_df[, 7:ncol(data_df)], param_priors_inputs_path, species_inputs_path)
weights <- data_df$particle_weight
if(all(is.na(weights))) {
weights <- rep(1, length(weights))
}
# Remove unecessary columns
accepted_df <- data_df[, 7:ncol(data_df)]
fixed_params = get_fixed_parameter_columns(param_lims)
# List of columns to be dropped
to_cut <- fixed_params
# Column heading to plot, or keep remove columns as empty
# keep_columns <- c("D", "mu_max_1", "mu_max_2", "mu_max_3", "kB_max_1", "kB_max_2", "kB_max_3")
# keep_columns <- c("D")
# remove_columns <- setdiff(names(accepted_df), keep_columns)
remove_columns <- c()
idx <- 1
for (name in names(accepted_df)) {
if (name %in% remove_columns) {
to_cut <- cbind(to_cut, idx)
}
idx <- idx + 1
}
dummy_true_val_vector <- rep(0.8, dim(accepted_df)[2])
name_prefix <- paste("model_", toString(model_idx), sep="")
output_name <- paste(name_prefix, "_2D_corr_coeff.csv", sep="")
output_path <- paste(output_dir, output_name, sep="")
make_correlation_csv(accepted_df, weights, to_cut, output_path)
if (make_1d_plot) {
output_name <- paste(name_prefix, "_1D_dens.pdf", sep="")
output_path <- paste(output_dir, output_name, sep="")
plot_1d_one_pop(accepted_df, weights, to_cut, param_lims, output_path, dummy_true_val_vector)
}
if (make_2d_plot){
output_name <- paste(name_prefix, "_2D_dens.pdf", sep="")
output_path <- paste(output_dir, output_name, sep="")
plot_dens_2d_one_pop(accepted_df, weights, to_cut, param_lims, output_path, dummy_true_val_vector)
}
|
# ---------- helper_pkgname_rawlink() Tests ---------- #
test_that("helper_pkgname_rawlink gets correct link and name from various input formats", {
inputs <- c("https://raw.githubusercontent.com/falo0/dstr/master/DESCRIPTION",
"https://github.com/falo0/dstr/blob/master/DESCRIPTION",
"https://github.com/falo0/dstr/blob/master",
"https://github.com/falo0/dstr",
"falo0/dstr"
)
res <- lapply(inputs, helper_pkgname_rawlink)
for(i in 1:length(res)){
expect_equal(unname(res[[i]][1]), "dstr")
expect_equal(unname(res[[i]][2]), "https://raw.githubusercontent.com/falo0/dstr/master/DESCRIPTION")
}
})
|
/tests/testthat/test_helper_pkgname_rawlink.R
|
no_license
|
albers93/dstr
|
R
| false | false | 692 |
r
|
# ---------- helper_pkgname_rawlink() Tests ---------- #
test_that("helper_pkgname_rawlink gets correct link and name from various input formats", {
inputs <- c("https://raw.githubusercontent.com/falo0/dstr/master/DESCRIPTION",
"https://github.com/falo0/dstr/blob/master/DESCRIPTION",
"https://github.com/falo0/dstr/blob/master",
"https://github.com/falo0/dstr",
"falo0/dstr"
)
res <- lapply(inputs, helper_pkgname_rawlink)
for(i in 1:length(res)){
expect_equal(unname(res[[i]][1]), "dstr")
expect_equal(unname(res[[i]][2]), "https://raw.githubusercontent.com/falo0/dstr/master/DESCRIPTION")
}
})
|
library(GJRM)
### Name: VuongClarke
### Title: Vuong and Clarke tests
### Aliases: VuongClarke
### Keywords: Vuong test Clarke test likelihood ratio test
### ** Examples
## see examples for gjrm
|
/data/genthat_extracted_code/GJRM/examples/VuongClarke.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 202 |
r
|
library(GJRM)
### Name: VuongClarke
### Title: Vuong and Clarke tests
### Aliases: VuongClarke
### Keywords: Vuong test Clarke test likelihood ratio test
### ** Examples
## see examples for gjrm
|
library(tidyverse)
discursos <- list.files(path ="data/sesiones_ordinarias/",pattern=".*txt") %>% as_tibble()
f <- function(x, output) {
file <- read_file(paste0("data/sesiones_ordinarias/",x))
file_wo_space <- str_replace_all(file,"\r", " ")
file_wo_space <- str_replace_all(file,"\n", " ")
file_wo_dash <- str_replace_all(file_wo_space,"- ","")
write_lines(file_wo_dash, x)
}
a<-apply(discursos, 1,f)
file<-a[[1]]
length(discursos$value)
for(i in 1:113){
a[[i]] %>% writeLines(discursos$value[i])
}
|
/juntar_palabras_funcion.R
|
no_license
|
lucariel/dpresidenciales
|
R
| false | false | 532 |
r
|
library(tidyverse)
discursos <- list.files(path ="data/sesiones_ordinarias/",pattern=".*txt") %>% as_tibble()
f <- function(x, output) {
file <- read_file(paste0("data/sesiones_ordinarias/",x))
file_wo_space <- str_replace_all(file,"\r", " ")
file_wo_space <- str_replace_all(file,"\n", " ")
file_wo_dash <- str_replace_all(file_wo_space,"- ","")
write_lines(file_wo_dash, x)
}
a<-apply(discursos, 1,f)
file<-a[[1]]
length(discursos$value)
for(i in 1:113){
a[[i]] %>% writeLines(discursos$value[i])
}
|
#' get1Gen Function for getting all gids associated with a name
#'
#' @import RPostgreSQL
#' @param gid A gid
#' @param dbenv the database connection environment
#' @param type a numeric name type
#' @return a vector of names
#' @export
#'
#Get all names associated with a gid
getallNames<- function(gid, dbenv, type=6){
assign('gid', gid, envir=dbenv)
assign('type', type, envir=dbenv)
if(!is.na(gid)){
if(type=='all'){
nms<- with(dbenv, dbGetQuery(con,
sprintf("SELECT nval FROM names WHERE nstat!=9 AND gid=%s", gid)))
}else{
nms<- with(dbenv, dbGetQuery(con,
sprintf("SELECT nval FROM names WHERE nstat!=9 AND gid=%s AND ntype=%s", gid, type)))
}
if(nrow(nms)>0){
nms<- nms[,1]
}else{
nms<- gid
}
}else{
nms<- NA
}
return(nms)
}
|
/R/getallNames.R
|
no_license
|
InternationalRiceResearchInstitute/pedX
|
R
| false | false | 834 |
r
|
#' get1Gen Function for getting all gids associated with a name
#'
#' @import RPostgreSQL
#' @param gid A gid
#' @param dbenv the database connection environment
#' @param type a numeric name type
#' @return a vector of names
#' @export
#'
#Get all names associated with a gid
getallNames<- function(gid, dbenv, type=6){
assign('gid', gid, envir=dbenv)
assign('type', type, envir=dbenv)
if(!is.na(gid)){
if(type=='all'){
nms<- with(dbenv, dbGetQuery(con,
sprintf("SELECT nval FROM names WHERE nstat!=9 AND gid=%s", gid)))
}else{
nms<- with(dbenv, dbGetQuery(con,
sprintf("SELECT nval FROM names WHERE nstat!=9 AND gid=%s AND ntype=%s", gid, type)))
}
if(nrow(nms)>0){
nms<- nms[,1]
}else{
nms<- gid
}
}else{
nms<- NA
}
return(nms)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloudresourcemanager_functions.R
\name{organizations.setIamPolicy}
\alias{organizations.setIamPolicy}
\title{Sets the access control policy on an Organization resource. Replaces anyexisting policy. The `resource` field should be the organization's resourcename, e.g. 'organizations/123'.}
\usage{
organizations.setIamPolicy(SetIamPolicyRequest, resource)
}
\arguments{
\item{SetIamPolicyRequest}{The \link{SetIamPolicyRequest} object to pass to this method}
\item{resource}{REQUIRED: The resource for which the policy is being specified}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/cloud-platform
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://cloud.google.com/resource-manager}{Google Documentation}
Other SetIamPolicyRequest functions: \code{\link{SetIamPolicyRequest}},
\code{\link{projects.setIamPolicy}}
}
|
/googlecloudresourcemanagerv1.auto/man/organizations.setIamPolicy.Rd
|
permissive
|
GVersteeg/autoGoogleAPI
|
R
| false | true | 1,236 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloudresourcemanager_functions.R
\name{organizations.setIamPolicy}
\alias{organizations.setIamPolicy}
\title{Sets the access control policy on an Organization resource. Replaces anyexisting policy. The `resource` field should be the organization's resourcename, e.g. 'organizations/123'.}
\usage{
organizations.setIamPolicy(SetIamPolicyRequest, resource)
}
\arguments{
\item{SetIamPolicyRequest}{The \link{SetIamPolicyRequest} object to pass to this method}
\item{resource}{REQUIRED: The resource for which the policy is being specified}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/cloud-platform
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://cloud.google.com/resource-manager}{Google Documentation}
Other SetIamPolicyRequest functions: \code{\link{SetIamPolicyRequest}},
\code{\link{projects.setIamPolicy}}
}
|
extractSingleBetaroll = function(Fm.roll, name){
require(xts)
result = matrix(NA, nrow = length(Fm.roll), ncol = ncol(coef(Fm.roll[[1]])))
colnames(result) = colnames(coef(Fm.roll[[1]]))
for (i in 1:length(Fm.roll)){
if(name %in% row.names(coef(Fm.roll[[i]]))){
result[i,] = t(as.vector(coef(Fm.roll[[i]])[c(name),])[1,])
}
}
row.names(result) = names(Fm.roll)
return(result)
}
name = "CITADEL"
|
/R/extractSingleBetaroll.R
|
no_license
|
bplloyd/Core
|
R
| false | false | 423 |
r
|
extractSingleBetaroll = function(Fm.roll, name){
require(xts)
result = matrix(NA, nrow = length(Fm.roll), ncol = ncol(coef(Fm.roll[[1]])))
colnames(result) = colnames(coef(Fm.roll[[1]]))
for (i in 1:length(Fm.roll)){
if(name %in% row.names(coef(Fm.roll[[i]]))){
result[i,] = t(as.vector(coef(Fm.roll[[i]])[c(name),])[1,])
}
}
row.names(result) = names(Fm.roll)
return(result)
}
name = "CITADEL"
|
library(ggplot2)
theme_bari <- function() {
theme_minimal() +
theme(
panel.grid.major.y = element_line(
size = .75
),
panel.grid = element_line(),
panel.border = element_rect(size = .5, fill = NA, color = "#524d4d"),
#text = element_text(family = "HelveticaNeue-CondensedBold"),
plot.title = element_text(color = "#241c1c", size = 22),
plot.subtitle = element_text(color = "#2e2828", size = 18),
plot.caption = element_text(
color = "#524d4d", size = 8,
hjust = 1,
margin = margin(t = 10),
# family = "Avenir Next Condensed Medium"
)
)
}
bari_red <- "#8d0909"
theme_set(theme_bari())
|
/practice_probs/.Rprofile
|
no_license
|
JosiahParry/da5030
|
R
| false | false | 693 |
rprofile
|
library(ggplot2)
theme_bari <- function() {
theme_minimal() +
theme(
panel.grid.major.y = element_line(
size = .75
),
panel.grid = element_line(),
panel.border = element_rect(size = .5, fill = NA, color = "#524d4d"),
#text = element_text(family = "HelveticaNeue-CondensedBold"),
plot.title = element_text(color = "#241c1c", size = 22),
plot.subtitle = element_text(color = "#2e2828", size = 18),
plot.caption = element_text(
color = "#524d4d", size = 8,
hjust = 1,
margin = margin(t = 10),
# family = "Avenir Next Condensed Medium"
)
)
}
bari_red <- "#8d0909"
theme_set(theme_bari())
|
merge = function(method, X, batch, covariate=NULL) {
if (method == "combat")
combat(X, batch, covariate)
else if (method == "dwd")
dwd(X, batch)
else if (method == "none")
none(X)
else
stop("invalid method")
}
combat = function(X, batch, covariate=NULL) {
if (is.matrix(covariate) || is.null(covariate))
mat = covariate
else
mat = model.matrix(~as.factor(batch), data=covariate)
sva::ComBat(dat=.list2mat(X), batch=batch, mod=mat, par.prior=TRUE)
}
dwd = function(X, batch) {
stop("inSilicoMerging package no longer available on Bioconductor")
}
none = function(X, batch=NA) {
.list2mat(X)
}
.list2mat = function(ll) {
if (is.list(ll))
narray::stack(ll, along=2)
else
ll
}
.mat2list = function(X, subsets) {
if (is.matrix(X))
narray::split(X, along=2, subsets=subsets)
else
X
}
|
/stats/batch.r
|
permissive
|
mschubert/ebits
|
R
| false | false | 915 |
r
|
merge = function(method, X, batch, covariate=NULL) {
if (method == "combat")
combat(X, batch, covariate)
else if (method == "dwd")
dwd(X, batch)
else if (method == "none")
none(X)
else
stop("invalid method")
}
combat = function(X, batch, covariate=NULL) {
if (is.matrix(covariate) || is.null(covariate))
mat = covariate
else
mat = model.matrix(~as.factor(batch), data=covariate)
sva::ComBat(dat=.list2mat(X), batch=batch, mod=mat, par.prior=TRUE)
}
dwd = function(X, batch) {
stop("inSilicoMerging package no longer available on Bioconductor")
}
none = function(X, batch=NA) {
.list2mat(X)
}
.list2mat = function(ll) {
if (is.list(ll))
narray::stack(ll, along=2)
else
ll
}
.mat2list = function(X, subsets) {
if (is.matrix(X))
narray::split(X, along=2, subsets=subsets)
else
X
}
|
# setwd and load packages -------------------------------------------------
rm(list=ls())
library(rstudioapi)
#sets working directory to file directory
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
setwd("..")
# Load Packages
if (!require("pacman")) install.packages("pacman") ; require("pacman")
p_load(httr,rtweet,tidyverse,textclean, textstem, sentimentr, lexicon, maps, dplyr)
# create tibble -----------------------------------------------------------
data <- read.csv("./sources/raw/dataset.csv", stringsAsFactors = F)
colnames(data) <- c('user_id', 'text', 'timestamp', 'screenname', 'location', 'timeline')
data$timeline <- NULL
data <- as_tibble(data) %>% rowid_to_column()
# Debug Mode --------------------------------------------------------------
# Place following line in comment if you want to perform data manipulations on entire dateset
# otherwise we only look at the first 200 entries
#data <- data %>% slice(., 1:200)
# impute missing data ---------------------------------------------------------
# adjust part of datadrame from row 9.362 untill 23.213 (due to bug in scrape function)
indices <- c(9362:23213)
data[indices,'text'] <- data[indices, 'user_id']
data[indices, c('user_id', 'screenname', 'location')] <- NA
data[indices, c('timestamp')] <- '2020-03-13'
# Preprocess data ---------------------------------------------------------
# Extract text from tibble
text <- data$text
# Preprocess steps
# 1. convert encoding
text <- iconv(text, from = "latin1", to = "ascii", sub = "byte")
# 2. function to clean text
cleanText <- function(text) {
clean_texts <- text %>%
gsub("&", "", .) %>% # remove &
gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", .) %>% # remove retweet entities
gsub("@\\w+", "", .) %>% # remove at people replace_at() also works
#gsub("[[:punct:]]", "", .) %>% # remove punctuation
gsub("[[:digit:]]", "", .) %>% # remove digits
gsub("[ \t]{2,}", " ", .) %>% # remove unnecessary spaces
gsub("^\\s+|\\s+$", "", .) %>% # remove unnecessary spaces
gsub('#', "", .) %>% #remove only hashtag
#gsub("<.*>", "", .) %>% # remove remainig emojis
#gsub("(?:\\s*#\\w+)+\\s*$", "", .) %>% #remove hashtags in total
#gsub("http\\w+", "", .) %>% # remove html links replace_html() also works
tolower
return(clean_texts)
}
remove_topics <- function(text) {
clean_texts <- text %>%
gsub('covid', '', .) %>%
gsub('corona', '', .) %>%
gsub('covid19', '', .) %>%
gsub('corona19', '', .) %>%
gsub('COVID', '', .) %>%
gsub('CORONA', '', .) %>%
gsub('COVID19', '', .) %>%
gsub('virus', '', .) %>%
gsub('CORONA19', '',.) %>%
gsub('pandemia', '',.) %>%
gsub('pandemic', '', .)
return(clean_texts)
}
# applt the cleaning functions and save the text in text_clean
text_clean <- cleanText(text) %>% replace_contraction() %>%
replace_internet_slang() %>% replace_kern() %>% replace_word_elongation()
# 3. Perform lemmatization
lemma_dictionary_hs <- make_lemma_dictionary(text_clean,
engine = 'hunspell')
text_clean <- lemmatize_strings(text_clean, dictionary = lemma_dictionary_hs)
# 4. Also remove the topics for topicmodeling
topic_text <- remove_topics(text_clean)
# 5. store the cleaned text in the dataset and write to csv
data$text <- text_clean
data <- data[!duplicated(data$text),]
#Save clean data
write_csv(data, './sources/cleaned/dataset_cleaned.csv')
# 6. store the cleaned and topic-removed text in the dataset and write to csv
# note that we have altered 'data' in the previous step so we perform some steps again
data <- read.csv("./sources/raw/dataset.csv", stringsAsFactors = F)
colnames(data) <- c('user_id', 'text', 'timestamp', 'screenname', 'location', 'timeline')
data$timeline <- NULL
data <- as_tibble(data)%>% rowid_to_column()
indices <- c(9362:23213)
data[indices,'text'] <- data[indices, 'user_id']
data[indices, c('user_id', 'screenname', 'location')] <- NA
data[indices, c('timestamp')] <- '2020-03-13'
data$text <- topic_text
data <- data[!duplicated(data$text),]
write_csv(data, './sources/cleaned/dataset_topics_removed.csv')
|
/scripts/SMWA_preprocess.R
|
no_license
|
HenriArno/Project_SMWA
|
R
| false | false | 4,168 |
r
|
# setwd and load packages -------------------------------------------------
rm(list=ls())
library(rstudioapi)
#sets working directory to file directory
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
setwd("..")
# Load Packages
if (!require("pacman")) install.packages("pacman") ; require("pacman")
p_load(httr,rtweet,tidyverse,textclean, textstem, sentimentr, lexicon, maps, dplyr)
# create tibble -----------------------------------------------------------
data <- read.csv("./sources/raw/dataset.csv", stringsAsFactors = F)
colnames(data) <- c('user_id', 'text', 'timestamp', 'screenname', 'location', 'timeline')
data$timeline <- NULL
data <- as_tibble(data) %>% rowid_to_column()
# Debug Mode --------------------------------------------------------------
# Place following line in comment if you want to perform data manipulations on entire dateset
# otherwise we only look at the first 200 entries
#data <- data %>% slice(., 1:200)
# impute missing data ---------------------------------------------------------
# adjust part of datadrame from row 9.362 untill 23.213 (due to bug in scrape function)
indices <- c(9362:23213)
data[indices,'text'] <- data[indices, 'user_id']
data[indices, c('user_id', 'screenname', 'location')] <- NA
data[indices, c('timestamp')] <- '2020-03-13'
# Preprocess data ---------------------------------------------------------
# Extract text from tibble
text <- data$text
# Preprocess steps
# 1. convert encoding
text <- iconv(text, from = "latin1", to = "ascii", sub = "byte")
# 2. function to clean text
cleanText <- function(text) {
clean_texts <- text %>%
gsub("&", "", .) %>% # remove &
gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", .) %>% # remove retweet entities
gsub("@\\w+", "", .) %>% # remove at people replace_at() also works
#gsub("[[:punct:]]", "", .) %>% # remove punctuation
gsub("[[:digit:]]", "", .) %>% # remove digits
gsub("[ \t]{2,}", " ", .) %>% # remove unnecessary spaces
gsub("^\\s+|\\s+$", "", .) %>% # remove unnecessary spaces
gsub('#', "", .) %>% #remove only hashtag
#gsub("<.*>", "", .) %>% # remove remainig emojis
#gsub("(?:\\s*#\\w+)+\\s*$", "", .) %>% #remove hashtags in total
#gsub("http\\w+", "", .) %>% # remove html links replace_html() also works
tolower
return(clean_texts)
}
remove_topics <- function(text) {
clean_texts <- text %>%
gsub('covid', '', .) %>%
gsub('corona', '', .) %>%
gsub('covid19', '', .) %>%
gsub('corona19', '', .) %>%
gsub('COVID', '', .) %>%
gsub('CORONA', '', .) %>%
gsub('COVID19', '', .) %>%
gsub('virus', '', .) %>%
gsub('CORONA19', '',.) %>%
gsub('pandemia', '',.) %>%
gsub('pandemic', '', .)
return(clean_texts)
}
# applt the cleaning functions and save the text in text_clean
text_clean <- cleanText(text) %>% replace_contraction() %>%
replace_internet_slang() %>% replace_kern() %>% replace_word_elongation()
# 3. Perform lemmatization
lemma_dictionary_hs <- make_lemma_dictionary(text_clean,
engine = 'hunspell')
text_clean <- lemmatize_strings(text_clean, dictionary = lemma_dictionary_hs)
# 4. Also remove the topics for topicmodeling
topic_text <- remove_topics(text_clean)
# 5. store the cleaned text in the dataset and write to csv
data$text <- text_clean
data <- data[!duplicated(data$text),]
#Save clean data
write_csv(data, './sources/cleaned/dataset_cleaned.csv')
# 6. store the cleaned and topic-removed text in the dataset and write to csv
# note that we have altered 'data' in the previous step so we perform some steps again
data <- read.csv("./sources/raw/dataset.csv", stringsAsFactors = F)
colnames(data) <- c('user_id', 'text', 'timestamp', 'screenname', 'location', 'timeline')
data$timeline <- NULL
data <- as_tibble(data)%>% rowid_to_column()
indices <- c(9362:23213)
data[indices,'text'] <- data[indices, 'user_id']
data[indices, c('user_id', 'screenname', 'location')] <- NA
data[indices, c('timestamp')] <- '2020-03-13'
data$text <- topic_text
data <- data[!duplicated(data$text),]
write_csv(data, './sources/cleaned/dataset_topics_removed.csv')
|
\name{kidney}
\alias{kidney}
\docType{data}
\title{
Kidney Renal Clear Cell Carcinoma [KIRC] RNA-Seq data
}
\description{
The KIRC RNA-seq dataset from The Cancer Genome Atlas containing 20531 genes and 72 paired columns of data with rows corresponding to genes and columns corresponding to replicates; replic vector specifies replicates and treatment vector specifies non-tumor and tumor group samples respectively within replicate.
The maximum possible sample size that can be simulated with this dataset is 36 replicates in each of two treatment groups. However, it is recommended to use a sample size of 20 or lower for simulation studies to ensure each simulated dataset is sufficiently different from the other simulated datasets.
Disclaimer:
The version of the KIRC dataset provided is:
unc.edu_KIRC.IlluminaHiSeq_RNASeqV2.Level_3.1.5.0.
The Cancer Genome Atlas updates its datasets periodically. The latest version of the KIRC dataset can always be downloaded from:
https://tcga-data.nci.nih.gov/tcga/.
Please appropriately reference the source below when using this dataset. The source code used to assemble this dataset is provided below.
}
\usage{data(kidney)}
\format{
List containing:
\itemize{
\item counts: matrix of RNA-seq data for 20531 sampled genes and 72 paired columns from individuals with Kidney Renal Clear Cell Carcinoma.
\item replic: vector detailing which column in counts matrix belongs to each individual.
\item treatment: vector detailing whether each column in counts matrix is a non-tumor or tumor sample.
}
}
\source{
https://tcga-data.nci.nih.gov/tcga/
The Cancer Genome Atlas Research Network (2013). Comprehensive molecular characterization of clear cell renal cell carcinoma. Nature, 499(7456), 43-49.
}
\examples{
data(kidney)
\dontrun{
### Source code used to assemble KIRC dataset
### load in SimSeq package for sorting counts matrix
library(SimSeq)
### htmlToText function used to scrape barcode data from uuid
htmlToText <- function(input, ...) {
###---PACKAGES ---###
library(RCurl)
library(XML)
###--- LOCAL FUNCTIONS ---###
# Determine how to grab html for a single input element
evaluate_input <- function(input) {
# if input is a .html file
if(file.exists(input)) {
char.vec <- readLines(input, warn = FALSE)
return(paste(char.vec, collapse = ""))
}
# if input is html text
if(grepl("</html>", input, fixed = TRUE)) return(input)
# if input is a URL, probably should use a regex here instead?
if(!grepl(" ", input)) {
# downolad SSL certificate in case of https problem
if(!file.exists("cacert.perm")) {
download.file(url = "http://curl.haxx.se/ca/cacert.pem", destfile = "cacert.perm")
}
return(getURL(input, followlocation = TRUE, cainfo = "cacert.perm"))
}
# return NULL if none of the conditions above apply
return(NULL)
}
# convert HTML to plain text
convert_html_to_text <- function(html) {
doc <- htmlParse(html, asText = TRUE)
text <- xpathSApply(doc, paste0("//text()",
"[not(ancestor::script)][not(ancestor::style)]",
"[not(ancestor::noscript)][not(ancestor::form)]"), xmlValue)
return(text)
}
# format text vector into one character string
collapse_text <- function(txt) {
return(paste(txt, collapse = " "))
}
###--- MAIN ---###
# STEP 1: Evaluate input
html.list <- lapply(input, evaluate_input)
# STEP 2: Extract text from HTML
text.list <- lapply(html.list, convert_html_to_text)
# STEP 3: Return text
text.vector <- sapply(text.list, collapse_text)
return(text.vector)
}
### Specify path name for folder containing raw counts for each sample
mainDir <- getwd()
folder.path <- "unc.edu_KIRC.IlluminaHiSeq_RNASeqV2.Level_3.1.5.0"
### Determine list of files containing summarized raw counts
file.list <- dir(file.path(mainDir, folder.path))
keep <- grepl("genes.results", file.list)
file.list <- file.list[keep]
### Create summarized count matrix.
### Get n.row and n.col for summarized count matrix number of genes in first
### sample and number of total samples from file.list
file.temp <- file.path(mainDir, folder.path, file.list[1])
n.row <- nrow(read.table(file = file.temp, header = TRUE))
n.col <- length(file.list)
### initialize counts matrix
counts <- matrix(NA, nrow = n.row, ncol = n.col)
### get gene id's
gene.id <- read.table(file.temp, header = TRUE, stringsAsFactors = FALSE)$gene_id
### read in raw read counts from file.list
for(i in 1:n.col){
file.temp <- file.path(mainDir, folder.path, file.list[i])
counts[, i] <- read.table(file.temp, header = TRUE)$raw_count
}
### Data was summarized using RSEM software which produces non_integer
### counts for ambiguous reads. Counts are rounded as a preprocessing
### step.
counts <- round(counts)
### Cast counts matrix as integer type
counts <- matrix(as.integer(counts), nrow = nrow(counts), ncol = ncol(counts))
### Get uuid's for each sample
uuid <- substr(file.list, start = 9, stop = 44)
### Create urls from uuid list
urls <- paste(rep("https://tcga-data.nci.nih.gov/uuid/uuidws/mapping/xml/uuid/",
length(uuid)), uuid, sep = "")
### Scrape barcodes from urls
l <- length(urls)
barcodes <- vector("character", l)
for(i in 1:l){
barcodes[i] <- htmlToText(urls[i])
}
barcodes <- substr(barcodes, start = 1, stop = 28)
### Get metadata on which samples were taken from each individual,
### tumor type of sample, etc. from barcodes for each sample
metadata <- data.frame(barcodes, stringsAsFactors = FALSE)
### Study Participant
metadata$participant <- substr(barcodes, start = 9, stop = 12)
### Sample type code. See:
### https://tcga-data.nci.nih.gov/datareports/codeTablesReport.htm?codeTable=Sample\%20type
### for full list of codes and details on TCGA barcodes.
### 01: Primary Solid Tumor
### 02: Recurrent Solid Tumor
### 05: Additional New Primary
### 06: Metastatic Tumor
### 11: Solid Tissue Normal
metadata$type <- substr(barcodes, start = 14, stop = 15)
### Only keep Primary Solid Tumor and Solid Tissue Normal
keep.metadata <- metadata$type == "01" | metadata$type == "11"
metadata <- metadata[keep.metadata, ]
counts <- counts[, keep.metadata]
### Code from 01 to Tumor and 11 to Non-Tumor for easy identifiability
metadata$tumor <- "Non-Tumor"
metadata$tumor[metadata$type == "01"] <- "Tumor"
### tag participant, type, and tumor as factors
metadata$participant <- as.factor(metadata$participant)
metadata$type <-as.factor(metadata$type)
metadata$tumor <- as.factor(metadata$tumor)
### Sort and subset down to paired data
sorting <-
SortData(counts, treatment = metadata$tumor,
replic = metadata$participant, sort.method = "paired")$sorting
counts <- counts[, sorting]
metadata <- metadata[sorting, ]
metadata$participant <- factor(metadata$participant)
### Add in attributes of counts matrix
dimnames(counts) <- list(gene.id, metadata$barcodes)
attr(counts, "uuid") <- uuid
kidney <- vector("list", 3)
kidney[[1]] <- counts
kidney[[2]] <- metadata$participant
kidney[[3]] <- metadata$tumor
names(kidney) <- c("counts", "replic", "treatment")
###Save file
save(kidney, file = "kidney.rda")
}
}
\keyword{datasets}
|
/man/kidney.Rd
|
no_license
|
cran/SimSeq
|
R
| false | false | 7,464 |
rd
|
\name{kidney}
\alias{kidney}
\docType{data}
\title{
Kidney Renal Clear Cell Carcinoma [KIRC] RNA-Seq data
}
\description{
The KIRC RNA-seq dataset from The Cancer Genome Atlas containing 20531 genes and 72 paired columns of data with rows corresponding to genes and columns corresponding to replicates; replic vector specifies replicates and treatment vector specifies non-tumor and tumor group samples respectively within replicate.
The maximum possible sample size that can be simulated with this dataset is 36 replicates in each of two treatment groups. However, it is recommended to use a sample size of 20 or lower for simulation studies to ensure each simulated dataset is sufficiently different from the other simulated datasets.
Disclaimer:
The version of the KIRC dataset provided is:
unc.edu_KIRC.IlluminaHiSeq_RNASeqV2.Level_3.1.5.0.
The Cancer Genome Atlas updates its datasets periodically. The latest version of the KIRC dataset can always be downloaded from:
https://tcga-data.nci.nih.gov/tcga/.
Please appropriately reference the source below when using this dataset. The source code used to assemble this dataset is provided below.
}
\usage{data(kidney)}
\format{
List containing:
\itemize{
\item counts: matrix of RNA-seq data for 20531 sampled genes and 72 paired columns from individuals with Kidney Renal Clear Cell Carcinoma.
\item replic: vector detailing which column in counts matrix belongs to each individual.
\item treatment: vector detailing whether each column in counts matrix is a non-tumor or tumor sample.
}
}
\source{
https://tcga-data.nci.nih.gov/tcga/
The Cancer Genome Atlas Research Network (2013). Comprehensive molecular characterization of clear cell renal cell carcinoma. Nature, 499(7456), 43-49.
}
\examples{
data(kidney)
\dontrun{
### Source code used to assemble KIRC dataset
### load in SimSeq package for sorting counts matrix
library(SimSeq)
### htmlToText function used to scrape barcode data from uuid
htmlToText <- function(input, ...) {
###---PACKAGES ---###
library(RCurl)
library(XML)
###--- LOCAL FUNCTIONS ---###
# Determine how to grab html for a single input element
evaluate_input <- function(input) {
# if input is a .html file
if(file.exists(input)) {
char.vec <- readLines(input, warn = FALSE)
return(paste(char.vec, collapse = ""))
}
# if input is html text
if(grepl("</html>", input, fixed = TRUE)) return(input)
# if input is a URL, probably should use a regex here instead?
if(!grepl(" ", input)) {
# downolad SSL certificate in case of https problem
if(!file.exists("cacert.perm")) {
download.file(url = "http://curl.haxx.se/ca/cacert.pem", destfile = "cacert.perm")
}
return(getURL(input, followlocation = TRUE, cainfo = "cacert.perm"))
}
# return NULL if none of the conditions above apply
return(NULL)
}
# convert HTML to plain text
convert_html_to_text <- function(html) {
doc <- htmlParse(html, asText = TRUE)
text <- xpathSApply(doc, paste0("//text()",
"[not(ancestor::script)][not(ancestor::style)]",
"[not(ancestor::noscript)][not(ancestor::form)]"), xmlValue)
return(text)
}
# format text vector into one character string
collapse_text <- function(txt) {
return(paste(txt, collapse = " "))
}
###--- MAIN ---###
# STEP 1: Evaluate input
html.list <- lapply(input, evaluate_input)
# STEP 2: Extract text from HTML
text.list <- lapply(html.list, convert_html_to_text)
# STEP 3: Return text
text.vector <- sapply(text.list, collapse_text)
return(text.vector)
}
### Specify path name for folder containing raw counts for each sample
mainDir <- getwd()
folder.path <- "unc.edu_KIRC.IlluminaHiSeq_RNASeqV2.Level_3.1.5.0"
### Determine list of files containing summarized raw counts
file.list <- dir(file.path(mainDir, folder.path))
keep <- grepl("genes.results", file.list)
file.list <- file.list[keep]
### Create summarized count matrix.
### Get n.row and n.col for summarized count matrix number of genes in first
### sample and number of total samples from file.list
file.temp <- file.path(mainDir, folder.path, file.list[1])
n.row <- nrow(read.table(file = file.temp, header = TRUE))
n.col <- length(file.list)
### initialize counts matrix
counts <- matrix(NA, nrow = n.row, ncol = n.col)
### get gene id's
gene.id <- read.table(file.temp, header = TRUE, stringsAsFactors = FALSE)$gene_id
### read in raw read counts from file.list
for(i in 1:n.col){
file.temp <- file.path(mainDir, folder.path, file.list[i])
counts[, i] <- read.table(file.temp, header = TRUE)$raw_count
}
### Data was summarized using RSEM software which produces non_integer
### counts for ambiguous reads. Counts are rounded as a preprocessing
### step.
counts <- round(counts)
### Cast counts matrix as integer type
counts <- matrix(as.integer(counts), nrow = nrow(counts), ncol = ncol(counts))
### Get uuid's for each sample
uuid <- substr(file.list, start = 9, stop = 44)
### Create urls from uuid list
urls <- paste(rep("https://tcga-data.nci.nih.gov/uuid/uuidws/mapping/xml/uuid/",
length(uuid)), uuid, sep = "")
### Scrape barcodes from urls
l <- length(urls)
barcodes <- vector("character", l)
for(i in 1:l){
barcodes[i] <- htmlToText(urls[i])
}
barcodes <- substr(barcodes, start = 1, stop = 28)
### Get metadata on which samples were taken from each individual,
### tumor type of sample, etc. from barcodes for each sample
metadata <- data.frame(barcodes, stringsAsFactors = FALSE)
### Study Participant
metadata$participant <- substr(barcodes, start = 9, stop = 12)
### Sample type code. See:
### https://tcga-data.nci.nih.gov/datareports/codeTablesReport.htm?codeTable=Sample\%20type
### for full list of codes and details on TCGA barcodes.
### 01: Primary Solid Tumor
### 02: Recurrent Solid Tumor
### 05: Additional New Primary
### 06: Metastatic Tumor
### 11: Solid Tissue Normal
metadata$type <- substr(barcodes, start = 14, stop = 15)
### Only keep Primary Solid Tumor and Solid Tissue Normal
keep.metadata <- metadata$type == "01" | metadata$type == "11"
metadata <- metadata[keep.metadata, ]
counts <- counts[, keep.metadata]
### Code from 01 to Tumor and 11 to Non-Tumor for easy identifiability
metadata$tumor <- "Non-Tumor"
metadata$tumor[metadata$type == "01"] <- "Tumor"
### tag participant, type, and tumor as factors
metadata$participant <- as.factor(metadata$participant)
metadata$type <-as.factor(metadata$type)
metadata$tumor <- as.factor(metadata$tumor)
### Sort and subset down to paired data
sorting <-
SortData(counts, treatment = metadata$tumor,
replic = metadata$participant, sort.method = "paired")$sorting
counts <- counts[, sorting]
metadata <- metadata[sorting, ]
metadata$participant <- factor(metadata$participant)
### Add in attributes of counts matrix
dimnames(counts) <- list(gene.id, metadata$barcodes)
attr(counts, "uuid") <- uuid
kidney <- vector("list", 3)
kidney[[1]] <- counts
kidney[[2]] <- metadata$participant
kidney[[3]] <- metadata$tumor
names(kidney) <- c("counts", "replic", "treatment")
###Save file
save(kidney, file = "kidney.rda")
}
}
\keyword{datasets}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/adaptiveSchemes.R
\name{initialiseAMCMC.constanth}
\alias{initialiseAMCMC.constanth}
\title{initaliseAMCMC.constanth function}
\usage{
\method{initialiseAMCMC}{constanth}(obj, ...)
}
\arguments{
\item{obj}{an object}
\item{...}{additional arguments}
}
\value{
initial h for scheme
}
\description{
Initialises the \link{constanth} adaptive scheme.
}
\seealso{
\link{constanth}
}
|
/man/initialiseAMCMC.constanth.Rd
|
no_license
|
bentaylor1/lgcp
|
R
| false | false | 466 |
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/adaptiveSchemes.R
\name{initialiseAMCMC.constanth}
\alias{initialiseAMCMC.constanth}
\title{initaliseAMCMC.constanth function}
\usage{
\method{initialiseAMCMC}{constanth}(obj, ...)
}
\arguments{
\item{obj}{an object}
\item{...}{additional arguments}
}
\value{
initial h for scheme
}
\description{
Initialises the \link{constanth} adaptive scheme.
}
\seealso{
\link{constanth}
}
|
context('Checking that upper and lower case are detected properly.')
# create text to check
toCheck <- c('BIG', 'little', 'Mixed', 'BIG WITH SPACE', 'little with space', 'Mixed With Space',
'UPPER 17', 'UPPER17', 'UPP17ER', 'lower 17', 'lower17', 'low17er',
'Mixed 17', 'Mixed17', 'Mix17ed',
'19')
test_that('The functions return logicals', {
expect_is(find.case(toCheck, 'upper'), 'logical')
expect_is(find.case(toCheck, 'lower'), 'logical')
expect_is(find.case(toCheck, 'mixed'), 'logical')
expect_is(find.case(toCheck, 'numeric'), 'logical')
expect_is(upper.case(toCheck), 'logical')
expect_is(lower.case(toCheck), 'logical')
expect_is(mixed.case(toCheck), 'logical')
})
test_that('The correct number of elements are returned', {
expect_equal(length(find.case(toCheck, 'upper')), length(toCheck))
expect_equal(length(find.case(toCheck, 'lower')), length(toCheck))
expect_equal(length(find.case(toCheck, 'mixed')), length(toCheck))
expect_equal(length(find.case(toCheck, 'numeric')), length(toCheck))
expect_equal(length(upper.case(toCheck)), length(toCheck))
expect_equal(length(lower.case(toCheck)), length(toCheck))
expect_equal(length(mixed.case(toCheck)), length(toCheck))
expect_equal(length(find.case(toCheck, 'upper')), length(upper.case(toCheck)))
expect_equal(length(find.case(toCheck, 'lower')), length(lower.case(toCheck)))
expect_equal(length(find.case(toCheck, 'mixed')), length(mixed.case(toCheck)))
})
test_that('find.case returns the same results as upper.case and lower.case', {
expect_identical(find.case(toCheck, 'upper'), upper.case(toCheck))
expect_identical(find.case(toCheck, 'lower'), lower.case(toCheck))
expect_identical(find.case(toCheck, 'mixed'), mixed.case(toCheck))
})
test_that('find.case, upper.case and lower.case work on simple data', {
expect_identical(find.case(toCheck, 'upper'), c(TRUE, FALSE, FALSE, TRUE, FALSE, FALSE,
TRUE, TRUE, TRUE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE,
TRUE))
expect_identical(upper.case(toCheck), c(TRUE, FALSE, FALSE, TRUE, FALSE, FALSE,
TRUE, TRUE, TRUE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE,
TRUE))
expect_identical(find.case(toCheck, 'lower'), c(FALSE, TRUE, FALSE, FALSE, TRUE, FALSE,
FALSE, FALSE, FALSE, TRUE, TRUE, TRUE,
FALSE, FALSE, FALSE,
TRUE))
expect_identical(lower.case(toCheck), c(FALSE, TRUE, FALSE, FALSE, TRUE, FALSE,
FALSE, FALSE, FALSE, TRUE, TRUE, TRUE,
FALSE, FALSE, FALSE,
TRUE))
expect_identical(find.case(toCheck, 'mixed'), c(FALSE, FALSE, TRUE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
TRUE, TRUE, TRUE,
FALSE))
expect_identical(mixed.case(toCheck), c(FALSE, FALSE, TRUE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
TRUE, TRUE, TRUE,
FALSE))
expect_identical(find.case(toCheck, 'numeric'), c(FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE,
TRUE))
})
|
/tests/testthat/test-case-checker.r
|
no_license
|
cran/useful
|
R
| false | false | 4,128 |
r
|
context('Checking that upper and lower case are detected properly.')
# create text to check
toCheck <- c('BIG', 'little', 'Mixed', 'BIG WITH SPACE', 'little with space', 'Mixed With Space',
'UPPER 17', 'UPPER17', 'UPP17ER', 'lower 17', 'lower17', 'low17er',
'Mixed 17', 'Mixed17', 'Mix17ed',
'19')
test_that('The functions return logicals', {
expect_is(find.case(toCheck, 'upper'), 'logical')
expect_is(find.case(toCheck, 'lower'), 'logical')
expect_is(find.case(toCheck, 'mixed'), 'logical')
expect_is(find.case(toCheck, 'numeric'), 'logical')
expect_is(upper.case(toCheck), 'logical')
expect_is(lower.case(toCheck), 'logical')
expect_is(mixed.case(toCheck), 'logical')
})
test_that('The correct number of elements are returned', {
expect_equal(length(find.case(toCheck, 'upper')), length(toCheck))
expect_equal(length(find.case(toCheck, 'lower')), length(toCheck))
expect_equal(length(find.case(toCheck, 'mixed')), length(toCheck))
expect_equal(length(find.case(toCheck, 'numeric')), length(toCheck))
expect_equal(length(upper.case(toCheck)), length(toCheck))
expect_equal(length(lower.case(toCheck)), length(toCheck))
expect_equal(length(mixed.case(toCheck)), length(toCheck))
expect_equal(length(find.case(toCheck, 'upper')), length(upper.case(toCheck)))
expect_equal(length(find.case(toCheck, 'lower')), length(lower.case(toCheck)))
expect_equal(length(find.case(toCheck, 'mixed')), length(mixed.case(toCheck)))
})
test_that('find.case returns the same results as upper.case and lower.case', {
expect_identical(find.case(toCheck, 'upper'), upper.case(toCheck))
expect_identical(find.case(toCheck, 'lower'), lower.case(toCheck))
expect_identical(find.case(toCheck, 'mixed'), mixed.case(toCheck))
})
test_that('find.case, upper.case and lower.case work on simple data', {
expect_identical(find.case(toCheck, 'upper'), c(TRUE, FALSE, FALSE, TRUE, FALSE, FALSE,
TRUE, TRUE, TRUE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE,
TRUE))
expect_identical(upper.case(toCheck), c(TRUE, FALSE, FALSE, TRUE, FALSE, FALSE,
TRUE, TRUE, TRUE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE,
TRUE))
expect_identical(find.case(toCheck, 'lower'), c(FALSE, TRUE, FALSE, FALSE, TRUE, FALSE,
FALSE, FALSE, FALSE, TRUE, TRUE, TRUE,
FALSE, FALSE, FALSE,
TRUE))
expect_identical(lower.case(toCheck), c(FALSE, TRUE, FALSE, FALSE, TRUE, FALSE,
FALSE, FALSE, FALSE, TRUE, TRUE, TRUE,
FALSE, FALSE, FALSE,
TRUE))
expect_identical(find.case(toCheck, 'mixed'), c(FALSE, FALSE, TRUE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
TRUE, TRUE, TRUE,
FALSE))
expect_identical(mixed.case(toCheck), c(FALSE, FALSE, TRUE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
TRUE, TRUE, TRUE,
FALSE))
expect_identical(find.case(toCheck, 'numeric'), c(FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE,
TRUE))
})
|
source("main_work/Code/01_generalFunctions.R")
source("main_work/Code/02_simulationFunctions.R")
source("main_work/Code/03_estimationFunctions2.R")
real.cov2 <- function(i, j, k, l, MATR) {
MATRij <- MATR[i,j]
MATRkl <- MATR[k,l]
MATRik <- MATR[i,k]
MATRil <- MATR[i,l]
MATRjk <- MATR[j,k]
MATRjl <- MATR[j,l]
(MATRij*MATRkl/2) * (MATRik^2 + MATRil^2 + MATRjk^2 + MATRjl^2) -
MATRij*(MATRik*MATRil + MATRjk*MATRjl) -
MATRkl*(MATRik*MATRjk + MATRil*MATRjl) +
(MATRik*MATRjl + MATRil*MATRjk)
}
p <- 10
MATR <- build_parameters(p, 0.5, c(0,1))$Corr.mat
# MATR <- matrix(1:9, ncol = 3)
# MATR <- MATR + t(MATR) + diag(3)*9
vector_var_matrix_calc_COR <- function(MATR, nonpositive = c("Stop", "Force", "Ignore"),
reg_par = 0){
if(length(nonpositive) > 1) nonpositive <- nonpositive[1]
if(!is.positive.definite(MATR)){
if(nonpositive == "Force") {MATR <- force_positive_definiteness(MATR)$Matrix
} else if(nonpositive != "Ignore") stop("MATR not positive definite") }
p <- nrow(MATR)
m <- p*(p-1)/2
order_vecti <- unlist(lapply(1:(p - 1), function(i) rep(i, p - i)))
order_vectj <- unlist(lapply(1:(p - 1), function(i) (i + 1):p))
pelet <- matrix(0, nrow = m, ncol = m)
for(i1 in 1:m){
for(j1 in i1:m){
i <- order_vecti[i1]
j <- order_vectj[i1]
k <- order_vecti[j1]
l <- order_vectj[j1]
MATRij <- MATR[i,j]
MATRkl <- MATR[k,l]
MATRik <- MATR[i,k]
MATRil <- MATR[i,l]
MATRjk <- MATR[j,k]
MATRjl <- MATR[j,l]
pelet[i1,j1] <-
(MATRij*MATRkl/2) * (MATRik^2 + MATRil^2 + MATRjk^2 + MATRjl^2) -
MATRij*(MATRik*MATRil + MATRjk*MATRjl) -
MATRkl*(MATRik*MATRjk + MATRil*MATRjl) +
(MATRik*MATRjl + MATRil*MATRjk)
}
}
pelet <- pelet + t(pelet) - diag(diag(pelet))
if((reg_par < 0) | (reg_par > 1)) warning("Regularization Parameter not between 0,1")
if(reg_par != 0) pelet <- (1 - reg_par)*pelet + reg_par*diag(diag(pelet))
return(pelet)
}
cppFunction(
'NumericMatrix corcalc_c(NumericMatrix MATR, int p, int m, NumericVector order_vecti, NumericVector order_vectj) {
NumericMatrix pelet(m, m);
for (int i1 = 0; i1 < m; i1++) {
for (int j1 = 0; j1 < m; j1++) {
int i = order_vecti[i1];
int j = order_vectj[i1];
int k = order_vecti[j1];
int l = order_vectj[j1];
int MATRij = MATR(i,j);
int MATRkl = MATR(k,l);
int MATRik = MATR(i,k);
int MATRil = MATR(i,l);
int MATRjk = MATR(j,k);
int MATRjl = MATR(j,l);
pelet(i1,j1) =
(MATRij*MATRkl/2) * (MATRik^2 + MATRil^2 + MATRjk^2 + MATRjl^2) -
MATRij*(MATRik*MATRil + MATRjk*MATRjl) -
MATRkl*(MATRik*MATRjk + MATRil*MATRjl) +
(MATRik*MATRjl + MATRil*MATRjk);
}
}
return pelet;
}')
corcalc_R <- function(MATR, p, m, order_vecti, order_vectj){
pelet <- matrix(0, nrow = m, ncol = m)
for(i1 in 1:m){
for(j1 in i1:m){
i <- order_vecti[i1]
j <- order_vectj[i1]
k <- order_vecti[j1]
l <- order_vectj[j1]
MATRij <- MATR[i,j]
MATRkl <- MATR[k,l]
MATRik <- MATR[i,k]
MATRil <- MATR[i,l]
MATRjk <- MATR[j,k]
MATRjl <- MATR[j,l]
pelet[i1,j1] <-
(MATRij*MATRkl/2) * (MATRik^2 + MATRil^2 + MATRjk^2 + MATRjl^2) -
MATRij*(MATRik*MATRil + MATRjk*MATRjl) -
MATRkl*(MATRik*MATRjk + MATRil*MATRjl) +
(MATRik*MATRjl + MATRil*MATRjk)
}
}
return(pelet)
}
vector_var_matrix_calc_COR_CR <- function(MATR, nonpositive = c("Stop", "Force", "Ignore"),
reg_par = 0){
if(length(nonpositive) > 1) nonpositive <- nonpositive[1]
if(!is.positive.definite(MATR)){
if(nonpositive == "Force") {MATR <- force_positive_definiteness(MATR)$Matrix
} else if(nonpositive != "Ignore") stop("MATR not positive definite") }
p <- nrow(MATR)
m <- p*(p-1)/2
order_vecti <- unlist(lapply(1:(p - 1), function(i) rep(i, p - i)))
order_vectj <- unlist(lapply(1:(p - 1), function(i) (i + 1):p))
pelet <- corcalc_R(MATR, p, m, order_vecti, order_vectj)
pelet <- pelet + t(pelet) - diag(diag(pelet))
if((reg_par < 0) | (reg_par > 1)) warning("Regularization Parameter not between 0,1")
if(reg_par != 0) pelet <- (1 - reg_par)*pelet + reg_par*diag(diag(pelet))
return(pelet)
}
vector_var_matrix_calc_COR_C <- function(MATR, nonpositive = c("Stop", "Force", "Ignore"),
reg_par = 0){
if(length(nonpositive) > 1) nonpositive <- nonpositive[1]
if(!is.positive.definite(MATR)){
if(nonpositive == "Force") {MATR <- force_positive_definiteness(MATR)$Matrix
} else if(nonpositive != "Ignore") stop("MATR not positive definite") }
p <- nrow(MATR)
m <- p*(p-1)/2
order_vecti <- unlist(lapply(1:(p - 1), function(i) rep(i, p - i))) - 1
order_vectj <- unlist(lapply(1:(p - 1), function(i) (i + 1):p)) - 1
pelet <- corcalc_c(MATR, p, m, order_vecti, order_vectj)
pelet <- pelet + t(pelet) - diag(diag(pelet))
if((reg_par < 0) | (reg_par > 1)) warning("Regularization Parameter not between 0,1")
if(reg_par != 0) pelet <- (1 - reg_par)*pelet + reg_par*diag(diag(pelet))
return(pelet)
}
vector_var_matrix_calc_COR_par <- function(MATR, nonpositive = c("Stop", "Force", "Ignore"),
reg_par = 0){
if(length(nonpositive) > 1) nonpositive <- nonpositive[1]
if(!is.positive.definite(MATR)){
if(nonpositive == "Force") {MATR <- force_positive_definiteness(MATR)$Matrix
} else if(nonpositive != "Ignore") stop("MATR not positive definite") }
p <- dim(MATR)[1]
m <- p*(p-1)/2
tocomp <- unlist(sapply(1:m, function(i) (i - 1)*m + i:m))
real.cov2 <- function(q, MATR, p, m, cumsum) {
t1 <- ceiling(q/m)
t2 <- q %% m
t2 <- m*(t2 == 0) + t2*(t2 != 0)
i <- sum(cumsum < t1)
j <- i + t1 - cumsum[i]
k <- sum(cumsum < t2)
l <- k + t2 - cumsum[k]
MATRij <- MATR[i,j]
MATRkl <- MATR[k,l]
MATRik <- MATR[i,k]
MATRil <- MATR[i,l]
MATRjk <- MATR[j,k]
MATRjl <- MATR[j,l]
(MATRij*MATRkl/2) * (MATRik^2 + MATRil^2 + MATRjk^2 + MATRjl^2) -
MATRij*(MATRik*MATRil + MATRjk*MATRjl) -
MATRkl*(MATRik*MATRjk + MATRil*MATRjl) +
(MATRik*MATRjl + MATRil*MATRjk)
}
cumsum <- c(0, cumsum((p - 1):1))
pelet <- mclapply(tocomp, real.cov2, MATR = MATR,
p = p, m = m, cumsum = cumsum,
mc.cores = ifelse(.Platform$OS.type == "windows", 1, ncores))
pelet <- vector2triangle(unlist(pelet), diag = T)
if((reg_par < 0) | (reg_par > 1)) warning("Regularization Parameter not between 0,1")
if(reg_par != 0) pelet <- (1 - reg_par)*pelet + reg_par*diag(diag(pelet))
return(pelet)
}
# profvis({
tt1 <- Sys.time()
pelet1 <- vector_var_matrix_calc_COR(MATR)
tt1 <- Sys.time() - tt1
tt2 <- Sys.time()
pelet2 <- vector_var_matrix_calc_COR_C(MATR)
tt2 <- Sys.time() - tt2
tt3 <- Sys.time()
#pelet3 <- vector_var_matrix_calc_COR_par(MATR)
tt3 <- Sys.time() - tt3
# })
identical(round(pelet1, 2), round(pelet2 ,2))
#identical(round(pelet2, 2), round(pelet3 ,2))
tt1
tt2
tt3
#tt4
# rm(pelet1, pelet2, pelet3, pelet4)
gc()
|
/archive/temp_vector_var.R
|
no_license
|
itamarfaran/correlation_glm
|
R
| false | false | 7,316 |
r
|
source("main_work/Code/01_generalFunctions.R")
source("main_work/Code/02_simulationFunctions.R")
source("main_work/Code/03_estimationFunctions2.R")
real.cov2 <- function(i, j, k, l, MATR) {
MATRij <- MATR[i,j]
MATRkl <- MATR[k,l]
MATRik <- MATR[i,k]
MATRil <- MATR[i,l]
MATRjk <- MATR[j,k]
MATRjl <- MATR[j,l]
(MATRij*MATRkl/2) * (MATRik^2 + MATRil^2 + MATRjk^2 + MATRjl^2) -
MATRij*(MATRik*MATRil + MATRjk*MATRjl) -
MATRkl*(MATRik*MATRjk + MATRil*MATRjl) +
(MATRik*MATRjl + MATRil*MATRjk)
}
p <- 10
MATR <- build_parameters(p, 0.5, c(0,1))$Corr.mat
# MATR <- matrix(1:9, ncol = 3)
# MATR <- MATR + t(MATR) + diag(3)*9
vector_var_matrix_calc_COR <- function(MATR, nonpositive = c("Stop", "Force", "Ignore"),
reg_par = 0){
if(length(nonpositive) > 1) nonpositive <- nonpositive[1]
if(!is.positive.definite(MATR)){
if(nonpositive == "Force") {MATR <- force_positive_definiteness(MATR)$Matrix
} else if(nonpositive != "Ignore") stop("MATR not positive definite") }
p <- nrow(MATR)
m <- p*(p-1)/2
order_vecti <- unlist(lapply(1:(p - 1), function(i) rep(i, p - i)))
order_vectj <- unlist(lapply(1:(p - 1), function(i) (i + 1):p))
pelet <- matrix(0, nrow = m, ncol = m)
for(i1 in 1:m){
for(j1 in i1:m){
i <- order_vecti[i1]
j <- order_vectj[i1]
k <- order_vecti[j1]
l <- order_vectj[j1]
MATRij <- MATR[i,j]
MATRkl <- MATR[k,l]
MATRik <- MATR[i,k]
MATRil <- MATR[i,l]
MATRjk <- MATR[j,k]
MATRjl <- MATR[j,l]
pelet[i1,j1] <-
(MATRij*MATRkl/2) * (MATRik^2 + MATRil^2 + MATRjk^2 + MATRjl^2) -
MATRij*(MATRik*MATRil + MATRjk*MATRjl) -
MATRkl*(MATRik*MATRjk + MATRil*MATRjl) +
(MATRik*MATRjl + MATRil*MATRjk)
}
}
pelet <- pelet + t(pelet) - diag(diag(pelet))
if((reg_par < 0) | (reg_par > 1)) warning("Regularization Parameter not between 0,1")
if(reg_par != 0) pelet <- (1 - reg_par)*pelet + reg_par*diag(diag(pelet))
return(pelet)
}
cppFunction(
'NumericMatrix corcalc_c(NumericMatrix MATR, int p, int m, NumericVector order_vecti, NumericVector order_vectj) {
NumericMatrix pelet(m, m);
for (int i1 = 0; i1 < m; i1++) {
for (int j1 = 0; j1 < m; j1++) {
int i = order_vecti[i1];
int j = order_vectj[i1];
int k = order_vecti[j1];
int l = order_vectj[j1];
int MATRij = MATR(i,j);
int MATRkl = MATR(k,l);
int MATRik = MATR(i,k);
int MATRil = MATR(i,l);
int MATRjk = MATR(j,k);
int MATRjl = MATR(j,l);
pelet(i1,j1) =
(MATRij*MATRkl/2) * (MATRik^2 + MATRil^2 + MATRjk^2 + MATRjl^2) -
MATRij*(MATRik*MATRil + MATRjk*MATRjl) -
MATRkl*(MATRik*MATRjk + MATRil*MATRjl) +
(MATRik*MATRjl + MATRil*MATRjk);
}
}
return pelet;
}')
corcalc_R <- function(MATR, p, m, order_vecti, order_vectj){
pelet <- matrix(0, nrow = m, ncol = m)
for(i1 in 1:m){
for(j1 in i1:m){
i <- order_vecti[i1]
j <- order_vectj[i1]
k <- order_vecti[j1]
l <- order_vectj[j1]
MATRij <- MATR[i,j]
MATRkl <- MATR[k,l]
MATRik <- MATR[i,k]
MATRil <- MATR[i,l]
MATRjk <- MATR[j,k]
MATRjl <- MATR[j,l]
pelet[i1,j1] <-
(MATRij*MATRkl/2) * (MATRik^2 + MATRil^2 + MATRjk^2 + MATRjl^2) -
MATRij*(MATRik*MATRil + MATRjk*MATRjl) -
MATRkl*(MATRik*MATRjk + MATRil*MATRjl) +
(MATRik*MATRjl + MATRil*MATRjk)
}
}
return(pelet)
}
vector_var_matrix_calc_COR_CR <- function(MATR, nonpositive = c("Stop", "Force", "Ignore"),
reg_par = 0){
if(length(nonpositive) > 1) nonpositive <- nonpositive[1]
if(!is.positive.definite(MATR)){
if(nonpositive == "Force") {MATR <- force_positive_definiteness(MATR)$Matrix
} else if(nonpositive != "Ignore") stop("MATR not positive definite") }
p <- nrow(MATR)
m <- p*(p-1)/2
order_vecti <- unlist(lapply(1:(p - 1), function(i) rep(i, p - i)))
order_vectj <- unlist(lapply(1:(p - 1), function(i) (i + 1):p))
pelet <- corcalc_R(MATR, p, m, order_vecti, order_vectj)
pelet <- pelet + t(pelet) - diag(diag(pelet))
if((reg_par < 0) | (reg_par > 1)) warning("Regularization Parameter not between 0,1")
if(reg_par != 0) pelet <- (1 - reg_par)*pelet + reg_par*diag(diag(pelet))
return(pelet)
}
vector_var_matrix_calc_COR_C <- function(MATR, nonpositive = c("Stop", "Force", "Ignore"),
reg_par = 0){
if(length(nonpositive) > 1) nonpositive <- nonpositive[1]
if(!is.positive.definite(MATR)){
if(nonpositive == "Force") {MATR <- force_positive_definiteness(MATR)$Matrix
} else if(nonpositive != "Ignore") stop("MATR not positive definite") }
p <- nrow(MATR)
m <- p*(p-1)/2
order_vecti <- unlist(lapply(1:(p - 1), function(i) rep(i, p - i))) - 1
order_vectj <- unlist(lapply(1:(p - 1), function(i) (i + 1):p)) - 1
pelet <- corcalc_c(MATR, p, m, order_vecti, order_vectj)
pelet <- pelet + t(pelet) - diag(diag(pelet))
if((reg_par < 0) | (reg_par > 1)) warning("Regularization Parameter not between 0,1")
if(reg_par != 0) pelet <- (1 - reg_par)*pelet + reg_par*diag(diag(pelet))
return(pelet)
}
vector_var_matrix_calc_COR_par <- function(MATR, nonpositive = c("Stop", "Force", "Ignore"),
reg_par = 0){
if(length(nonpositive) > 1) nonpositive <- nonpositive[1]
if(!is.positive.definite(MATR)){
if(nonpositive == "Force") {MATR <- force_positive_definiteness(MATR)$Matrix
} else if(nonpositive != "Ignore") stop("MATR not positive definite") }
p <- dim(MATR)[1]
m <- p*(p-1)/2
tocomp <- unlist(sapply(1:m, function(i) (i - 1)*m + i:m))
real.cov2 <- function(q, MATR, p, m, cumsum) {
t1 <- ceiling(q/m)
t2 <- q %% m
t2 <- m*(t2 == 0) + t2*(t2 != 0)
i <- sum(cumsum < t1)
j <- i + t1 - cumsum[i]
k <- sum(cumsum < t2)
l <- k + t2 - cumsum[k]
MATRij <- MATR[i,j]
MATRkl <- MATR[k,l]
MATRik <- MATR[i,k]
MATRil <- MATR[i,l]
MATRjk <- MATR[j,k]
MATRjl <- MATR[j,l]
(MATRij*MATRkl/2) * (MATRik^2 + MATRil^2 + MATRjk^2 + MATRjl^2) -
MATRij*(MATRik*MATRil + MATRjk*MATRjl) -
MATRkl*(MATRik*MATRjk + MATRil*MATRjl) +
(MATRik*MATRjl + MATRil*MATRjk)
}
cumsum <- c(0, cumsum((p - 1):1))
pelet <- mclapply(tocomp, real.cov2, MATR = MATR,
p = p, m = m, cumsum = cumsum,
mc.cores = ifelse(.Platform$OS.type == "windows", 1, ncores))
pelet <- vector2triangle(unlist(pelet), diag = T)
if((reg_par < 0) | (reg_par > 1)) warning("Regularization Parameter not between 0,1")
if(reg_par != 0) pelet <- (1 - reg_par)*pelet + reg_par*diag(diag(pelet))
return(pelet)
}
# profvis({
tt1 <- Sys.time()
pelet1 <- vector_var_matrix_calc_COR(MATR)
tt1 <- Sys.time() - tt1
tt2 <- Sys.time()
pelet2 <- vector_var_matrix_calc_COR_C(MATR)
tt2 <- Sys.time() - tt2
tt3 <- Sys.time()
#pelet3 <- vector_var_matrix_calc_COR_par(MATR)
tt3 <- Sys.time() - tt3
# })
identical(round(pelet1, 2), round(pelet2 ,2))
#identical(round(pelet2, 2), round(pelet3 ,2))
tt1
tt2
tt3
#tt4
# rm(pelet1, pelet2, pelet3, pelet4)
gc()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/knit_slides.R
\name{knit_slides}
\alias{knit_slides}
\title{Knit RMarkdown Slides}
\usage{
knit_slides(name, homepage = FALSE)
}
\arguments{
\item{name}{a character of length 1. The course sub-folder name
(e.g. \code{"template"}).}
\item{homepage}{a boolean. If \code{TRUE}, only the Home page will be knit.}
}
\description{
This function knits Rmd slides to HTML output (stored in the same folder).
All Rmd files need to be named \strong{index.Rmd}.
}
\examples{
\dontrun{
## Knit course ----
knit_slides(name = "template")
}
}
|
/man/knit_slides.Rd
|
permissive
|
FRBCesab/theodatasci
|
R
| false | true | 608 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/knit_slides.R
\name{knit_slides}
\alias{knit_slides}
\title{Knit RMarkdown Slides}
\usage{
knit_slides(name, homepage = FALSE)
}
\arguments{
\item{name}{a character of length 1. The course sub-folder name
(e.g. \code{"template"}).}
\item{homepage}{a boolean. If \code{TRUE}, only the Home page will be knit.}
}
\description{
This function knits Rmd slides to HTML output (stored in the same folder).
All Rmd files need to be named \strong{index.Rmd}.
}
\examples{
\dontrun{
## Knit course ----
knit_slides(name = "template")
}
}
|
#' Change the observations of fitted Dirichlet Process.
#'
#' Using a fitted Dirichlet process object include new data. The new data will be assigned to the best fitting cluster for each point.
#'@param dpobj The Dirichlet process object.
#'@param newData New data to be included
#'@return Changed Dirichlet process object
#'@examples
#'
#' y <- rnorm(10)
#' dp <- DirichletProcessGaussian(y)
#' dp <- ChangeObservations(dp, rnorm(10))
#'
#'@export
ChangeObservations <- function(dpobj, newData) UseMethod("ChangeObservations", dpobj)
#' @export
ChangeObservations.default <- function(dpobj, newData) {
if (!is.matrix(newData)){
newData <- matrix(newData, ncol = 1)
}
predicted_data <- ClusterLabelPredict(dpobj, newData)
predicted_data$pointsPerCluster[1:dpobj$numberClusters] <- predicted_data$pointsPerCluster[1:dpobj$numberClusters] -
dpobj$pointsPerCluster #removes the old data from the clusters
emptyClusters <- which(predicted_data$pointsPerCluster == 0)
if (length(emptyClusters) > 0) {
predicted_data$pointsPerCluster <- predicted_data$pointsPerCluster[-emptyClusters]
# predicted_data$clusterParams = predicted_data$clusterParams[-emptyClusters, ,
# drop=FALSE]
predicted_data$clusterParams <- lapply(predicted_data$clusterParams, function(x) x[,
, -emptyClusters, drop = FALSE])
predicted_data$numLabels <- predicted_data$numLabels - length(emptyClusters)
for (i in length(emptyClusters):1) {
# go through backwards to reindex correctly
predicted_data$componentIndexes[predicted_data$componentIndexes > emptyClusters[i]] <- predicted_data$componentIndexes[predicted_data$componentIndexes >
emptyClusters[i]] - 1
}
}
dpobj$data <- newData
dpobj$n <- nrow(newData)
dpobj$clusterLabels <- predicted_data$componentIndexes
dpobj$pointsPerCluster <- predicted_data$pointsPerCluster
dpobj$numberClusters <- predicted_data$numLabels
dpobj$clusterParameters <- predicted_data$clusterParams
dpobj <- InitialisePredictive(dpobj)
return(dpobj)
}
#'@export
ChangeObservations.hierarchical <- function(dpobj, newData){
for(i in seq_along(dpobj$indDP)){
dpobj$indDP[[i]] <- ChangeObservations(dpobj$indDP[[i]], newData[[i]])
}
return(dpobj)
}
|
/R/change_observations.R
|
no_license
|
cran/dirichletprocess
|
R
| false | false | 2,256 |
r
|
#' Change the observations of fitted Dirichlet Process.
#'
#' Using a fitted Dirichlet process object include new data. The new data will be assigned to the best fitting cluster for each point.
#'@param dpobj The Dirichlet process object.
#'@param newData New data to be included
#'@return Changed Dirichlet process object
#'@examples
#'
#' y <- rnorm(10)
#' dp <- DirichletProcessGaussian(y)
#' dp <- ChangeObservations(dp, rnorm(10))
#'
#'@export
ChangeObservations <- function(dpobj, newData) UseMethod("ChangeObservations", dpobj)
#' @export
ChangeObservations.default <- function(dpobj, newData) {
if (!is.matrix(newData)){
newData <- matrix(newData, ncol = 1)
}
predicted_data <- ClusterLabelPredict(dpobj, newData)
predicted_data$pointsPerCluster[1:dpobj$numberClusters] <- predicted_data$pointsPerCluster[1:dpobj$numberClusters] -
dpobj$pointsPerCluster #removes the old data from the clusters
emptyClusters <- which(predicted_data$pointsPerCluster == 0)
if (length(emptyClusters) > 0) {
predicted_data$pointsPerCluster <- predicted_data$pointsPerCluster[-emptyClusters]
# predicted_data$clusterParams = predicted_data$clusterParams[-emptyClusters, ,
# drop=FALSE]
predicted_data$clusterParams <- lapply(predicted_data$clusterParams, function(x) x[,
, -emptyClusters, drop = FALSE])
predicted_data$numLabels <- predicted_data$numLabels - length(emptyClusters)
for (i in length(emptyClusters):1) {
# go through backwards to reindex correctly
predicted_data$componentIndexes[predicted_data$componentIndexes > emptyClusters[i]] <- predicted_data$componentIndexes[predicted_data$componentIndexes >
emptyClusters[i]] - 1
}
}
dpobj$data <- newData
dpobj$n <- nrow(newData)
dpobj$clusterLabels <- predicted_data$componentIndexes
dpobj$pointsPerCluster <- predicted_data$pointsPerCluster
dpobj$numberClusters <- predicted_data$numLabels
dpobj$clusterParameters <- predicted_data$clusterParams
dpobj <- InitialisePredictive(dpobj)
return(dpobj)
}
#'@export
ChangeObservations.hierarchical <- function(dpobj, newData){
for(i in seq_along(dpobj$indDP)){
dpobj$indDP[[i]] <- ChangeObservations(dpobj$indDP[[i]], newData[[i]])
}
return(dpobj)
}
|
rm(list = ls())
graphics.off()
my_data <- read.csv("IBM-HR-Emplyee-NoAttrition.csv")
cols <- colnames(my_data)
rownames(my_data) <- my_data$Subj
data1 <- my_data[,11:32]
data1$Age <- my_data$Age
data1$MonthlyIncome <- cut(data1$MonthlyIncome,breaks = c(min(data1$MonthlyIncome)-1,5000 ,10000, max(data1$MonthlyIncome)+1),labels = c(1,2,3))
data1$DailyRate <- cut(data1$DailyRate,breaks = c(min(data1$DailyRate)-1,600 ,1100, max(data1$DailyRate)+1),labels = c(1,2,3))
data1$HourlyRate <- cut(data1$HourlyRate,breaks = c(min(data1$HourlyRate)-1,55 ,80, max(data1$HourlyRate)+1),labels = c(1,2,3))
data1$MonthlyRate <- cut(data1$MonthlyRate,breaks = c(min(data1$MonthlyRate)-1,10000 ,17500, max(data1$MonthlyRate)+1),labels = c(1,2,3))
data1$DistanceFromHome <- cut(data1$DistanceFromHome,breaks = c(min(data1$DistanceFromHome)-1,6 , max(data1$DistanceFromHome)+1),labels = c(1,2))
data1$Education <- cut(data1$Education,breaks = c(min(data1$Education)-1, 2,3 , max(data1$Education)+1),labels = c(1,2,3))
data1$JobInvolvement <- cut(data1$JobInvolvement,breaks = c(min(data1$JobInvolvement)-1,2.5,3.5, max(data1$JobInvolvement)+1),labels = c(1,2,3))
data1$JobLevel <- cut(data1$JobLevel,breaks = c(min(data1$JobLevel)-1,2,4, max(data1$JobLevel)+1),labels = c(1,2,3))
data1$NumCompaniesWorked <- cut(data1$NumCompaniesWorked,breaks = c(min(data1$NumCompaniesWorked)-1, 2,6, max(data1$NumCompaniesWorked)+1),labels = c(1,2,3))
data1$TotalWorkingYears <- cut(data1$TotalWorkingYears,breaks = c(min(data1$TotalWorkingYears)-1, 10, max(data1$TotalWorkingYears)+1),labels = c(1,2))
data1$PercentSalaryHike <- cut(data1$PercentSalaryHike,breaks = c(min(data1$PercentSalaryHike)-1, 13,19, max(data1$PercentSalaryHike)+1),labels = c(1,2,3))
data1$WorkLifeBalance <- cut(data1$WorkLifeBalance,breaks = c(min(data1$WorkLifeBalance)-1, 2, max(data1$WorkLifeBalance)+1),labels = c(1,2))
data1$Age <- cut(data1$Age,breaks = c(min(data1$Age)-1, 30,40, max(data1$Age)+1),labels = c(1,2,3))
data1$TrainingTimesLastYear <- cut(data1$TrainingTimesLastYear,breaks = c(min(data1$TrainingTimesLastYear)-1, 2,4, max(data1$TrainingTimesLastYear)+1),labels = c(1,2,3))
data1$YearsAtCompany <- cut(data1$YearsAtCompany,breaks = c(min(data1$YearsAtCompany)-1, 5,15, max(data1$YearsAtCompany)+1),labels = c(1,2,3))
data1$YearsInCurrentRole <- cut(data1$YearsInCurrentRole,breaks = c(min(data1$YearsInCurrentRole)-1, 4,10, max(data1$YearsInCurrentRole)+1),labels = c(1,2,3))
data1$YearsSinceLastPromotion <- cut(data1$YearsSinceLastPromotion,breaks = c(min(data1$YearsSinceLastPromotion)-1, 2,7, max(data1$YearsSinceLastPromotion)+1),labels = c(1,2,3))
data1$YearsWithCurrManager <- cut(data1$YearsWithCurrManager,breaks = c(min(data1$YearsWithCurrManager)-1, 4,10, max(data1$YearsWithCurrManager)+1),labels = c(1,2,3))
library(corrplot)
data2 <- my_data[,11:32]
data2$Age <- my_data$Age
cor.my_data <- cor(data2)
corrplot(cor.my_data, method = "ellipse")
data1$PerformanceRating <- as.factor(data1$PerformanceRating)
data1$JobInvolvement <- as.factor(data1$JobInvolvement)
data1$StockOptionLevel <- as.factor(data1$StockOptionLevel)
data1$EnvironmentSatisfaction <- as.factor(data1$EnvironmentSatisfaction)
data1$JobSatisfaction <- as.factor(data1$JobSatisfaction)
data1$RelationshipSatisfaction <- as.factor(data1$RelationshipSatisfaction)
resMCA.sym <- epMCA(data1 ,make_data_nominal = TRUE ,DESIGN = my_data$Department ,make_design_nominal = TRUE,graphs = FALSE, symmetric = TRUE)
resMCA.asym <- epMCA(data1 ,make_data_nominal = TRUE ,DESIGN = my_data$Department ,make_design_nominal = TRUE,graphs = FALSE, symmetric = FALSE)
resMCA.inf <- epMCA.inference.battery(data1, make_data_nominal = TRUE, DESIGN = my_data$Department ,make_design_nominal =TRUE,graphs = FALSE)
DESIGN <- list()
DESIGN$rows$Region$labels <- unique(my_data$Department)
DESIGN$rows$Region$vec <- my_data$Department
#Convert the vector to a matrix
DESIGN$rows$Region$mat <- makeNominalData(as.matrix(DESIGN$rows$Region$vec))
automatic_colors <- createColorVectorsByDesign(DESIGN$rows$Region$mat)
DESIGN$rows$Region$color_groups <- c("red","blue","yellow")
DESIGN$rows$Region$color_observ <- as.matrix(DESIGN$rows$Region$vec)
DESIGN$rows$Region$color_observ[which(DESIGN$rows$Region$vec=="Research & Development")] <- DESIGN$rows$Region$color_groups[1]
DESIGN$rows$Region$color_observ[which(DESIGN$rows$Region$vec=="Human Resources")] <- DESIGN$rows$Region$color_groups[2]
DESIGN$rows$Region$color_observ[which(DESIGN$rows$Region$vec=="Sales")] <- DESIGN$rows$Region$color_groups[3]
PlotScree(ev = resMCA.sym$ExPosition.Data$eigs,
p.ev = resMCA.inf$Inference.Data$components$p.vals,
title = 'IBM-No-Attririon data Set. Eigenvalues Inference',
plotKaiser = TRUE
)
MonthlyIncome1 <- "red"
DailyRate1 <- "pink"
HourlyRate1 <- "blue"
MonthlyRate1 <- "skyblue"
DistanceFromHome1 <- "green"
PerformanceRating1 <- "navyblue"
Education1 <- "darkolivegreen4"
JobInvolvement1 <- "darkgoldenrod3"
JobLevel1 <- "brown"
StockOptionLevel1 <- "orange"
NumCompaniesWorked1 <- "cornflowerblue"
PercentSalaryHike1 <- "chartreuse3"
TotalWorkingYears1 <- "peachpuff3"
TrainingTimesLastYear1 <- "mediumorchid2"
WorkLifeBalance1 <- "turquoise3"
YearsAtCompany1 <- "wheat4"
YearsInCurrentRole1 <-"slategray2"
YearsSinceLastPromotion1 <- "purple"
YearsWithCurrManager1 <- "magenta"
EnvironmentSatisfaction1 <- "teal"
JobSatisfaction1 <- "chromeyellow"
RelationshipSatisfaction1 <- "yellow"
Age1 <- "black"
col4J <- dplyr::recode(resMCA.sym$Plotting.Data$fj.col,
'JobLevel1' = JobLevel1 , 'MonthlyIncome1' = MonthlyIncome1, 'DailyRate1' = DailyRate1, 'HourlyRate'=HourlyRate1, 'MonthlyRate' =MonthlyRate1, 'DistanceFromHome'= DistanceFromHome1,
'PerformanceRating'= PerformanceRating1, 'Education' =Education1, 'JobInvolvement'=JobInvolvement1, 'StockOptionLevel'=StockOptionLevel1,
'NumCompaniesWorked'= NumCompaniesWorked1 , 'PercentSalaryHike'= PercentSalaryHike1 ,'TotalWorkingYears'= TotalWorkingYears1, 'TrainingTimesLastYear'=TrainingTimesLastYear1,
'WorkLifeBalance'=WorkLifeBalance1, 'YearsAtCompany'= YearsAtCompany1, 'YearsInCurrentRole'= YearsInCurrentRole1,
'YearsSinceLastPromotion'= YearsSinceLastPromotion1, 'YearsWithCurrManager'=YearsWithCurrManager1 ,'EnvironmentSatisfaction'= EnvironmentSatisfaction1,
'JobSatisfaction'=JobSatisfaction1 ,'RelationshipSatisfaction'=RelationshipSatisfaction1,'Age'= Age1 )
## Biplot for symmetrical plot for component 1 and 2 for Department datatype
col4I <- prettyGraphsColorSelection(NCOL(resMCA.sym$Plotting.Data$fj.col))
symMap1 <- createFactorMapIJ(resMCA.sym$ExPosition.Data$fi,resMCA.sym$ExPosition.Data$fj,
col.points.i = DESIGN$rows$Region$color_observ,
col.points.j = col4I,
col.labels.i = DESIGN$rows$Region$color_observ ,
col.labels.j = col4I ,
cex.i = 2.5, pch.i = 20,
pch.j = 21, cex.j = 2.5,text.cex.j =2, axis1 = 1,axis2 = 2, title = "Symmetrical plot for components 1 and 2 for Department Type",
alpha.axes = 0.2,alpha.points.i = 1)
labels4MCA1 <- createxyLabels(resCA = resMCA.sym, x_axis = 1,y_axis = 2)
map.IJ.sym1 <- symMap1$baseMap + symMap1$I_points
map.IJ.sym2 <- symMap1$baseMap + symMap1$J_labels + symMap1$J_points + labels4MCA1
map.IJ.sym3 <- symMap1$baseMap + symMap1$I_points +
symMap1$J_labels + symMap1$J_points + labels4MCA1
print(map.IJ.sym1)
print(map.IJ.sym2)
print(map.IJ.sym3)
## Biplot for symmetrical plot for component 2 and 3 for Department datatype
symMap2 <- createFactorMapIJ(resMCA.sym$ExPosition.Data$fi,resMCA.sym$ExPosition.Data$fj,
col.points.i = DESIGN$rows$Region$color_observ,
col.points.j = "black",
col.labels.i = DESIGN$rows$Region$color_observ ,
col.labels.j = "black" ,
cex.i = 5, pch.i = 20,
pch.j = 21, text.cex.j =2, axis1 = 2,axis2 = 3, title = "Symmetrical plot for components 2 and 3 for Department Type",
alpha.axes = 0.2,alpha.points.i = 1)
labels4MCA2 <- createxyLabels(resCA = resMCA.sym, x_axis = 2, y_axis = 3)
map.IJ.sym11 <- symMap2$baseMap + symMap2$I_points
map.IJ.sym21 <- symMap2$baseMap + symMap2$J_labels + symMap2$J_points + labels4MCA2
map.IJ.sym31 <- symMap2$baseMap + symMap2$I_points +
symMap2$J_labels + symMap2$J_points + labels4MCA2
print(map.IJ.sym11)
print(map.IJ.sym21)
print(map.IJ.sym31)
## Biplot for symmetrical plot for component 1 and 3 for Department datatype
symMap3 <- createFactorMapIJ(resMCA.sym$ExPosition.Data$fi,resMCA.sym$ExPosition.Data$fj,
col.points.i = DESIGN$rows$Region$color_observ ,
col.points.j = "black",
col.labels.i = DESIGN$rows$Region$color_observ ,
col.labels.j = "black" ,
cex.i = 5, pch.i = 20,
pch.j = 21, text.cex.j =2, axis1 = 1,axis2 = 3, title = "Symmetrical plot for components 1 and 3 for Department Type",
alpha.axes = 0.2,alpha.points.i = 1)
labels4MCA3 <- createxyLabels(resCA = resMCA.sym, x_axis = 1,y_axis = 3)
map.IJ.sym31 <- symMap3$baseMap + symMap3$I_points
map.IJ.sym32 <- symMap3$baseMap + symMap3$J_labels + symMap3$J_points + labels4MCA3
map.IJ.sym33 <- symMap3$baseMap + symMap3$I_points +
symMap3$J_labels + symMap3$J_points + labels4MCA3
print(map.IJ.sym31)
print(map.IJ.sym32)
print(map.IJ.sym33)
# color for gender
DESIGN1 <- list()
DESIGN1$rows$Region$labels <- unique(my_data$Gender)
DESIGN1$rows$Region$vec <- my_data$Gender
#Convert the vector to a matrix
DESIGN1$rows$Region$mat <- makeNominalData(as.matrix(DESIGN1$rows$Region$vec))
automatic_colors <- createColorVectorsByDesign(DESIGN1$rows$Region$mat)
DESIGN1$rows$Region$color_groups <- c("skyblue","pink")
DESIGN1$rows$Region$color_observ <- as.matrix(DESIGN1$rows$Region$vec)
DESIGN1$rows$Region$color_observ[which(DESIGN1$rows$Region$vec=="Male")] <- DESIGN1$rows$Region$color_groups[1]
DESIGN1$rows$Region$color_observ[which(DESIGN1$rows$Region$vec=="Female")] <- DESIGN1$rows$Region$color_groups[2]
# inference
resMCA.sym1 <- epMCA(data1 ,make_data_nominal = TRUE ,DESIGN = my_data$Gender ,make_design_nominal = TRUE,graphs = FALSE, symmetric = TRUE)
resMCA.asym1 <- epMCA(data1 ,make_data_nominal = TRUE ,DESIGN = my_data$Gender ,make_design_nominal = TRUE,graphs = FALSE, symmetric = FALSE)
resMCA.inf1 <- epMCA.inference.battery(data1, make_data_nominal = TRUE, DESIGN = my_data$Gender ,make_design_nominal =TRUE,graphs = FALSE)
## Biplot for symmetrical plot for component 1 and 2 for Gender datatype
symMa <- createFactorMapIJ(resMCA.sym1$ExPosition.Data$fi,resMCA.sym1$ExPosition.Data$fj,
col.points.i = DESIGN1$rows$Region$color_observ,
col.points.j = "black",
col.labels.i = DESIGN1$rows$Region$color_observ ,
col.labels.j = "black" ,
cex.i = 2.5, pch.i = 20,
pch.j = 21, cex.j = 2.5,text.cex.j =2, axis1 = 1,axis2 = 2, title = "Symmetrical plot for components 1 and 2 for Gender Type",
alpha.axes = 0.2,alpha.points.i = 1)
labels4 <- createxyLabels(resCA = resMCA.sym1, x_axis = 1,y_axis = 2)
map.IJ.sym111 <- symMa$baseMap + symMa$I_points
map.IJ.sym222 <- symMa$baseMap + symMa$J_labels + symMa$J_points + labels4
map.IJ.sym333 <- symMa$baseMap + symMa$I_points +
symMa$J_labels + symMa$J_points + labels4
print(map.IJ.sym111)
print(map.IJ.sym222)
print(map.IJ.sym333)
## Biplot for symmetrical plot for component 2 and 3 for Gender datatype
symMap21 <- createFactorMapIJ(resMCA.sym1$ExPosition.Data$fi,resMCA.sym1$ExPosition.Data$fj,
col.points.i = DESIGN1$rows$Region$color_observ,
col.points.j = "black",
col.labels.i = DESIGN1$rows$Region$color_observ ,
col.labels.j = "black" ,
cex.i = 5, pch.i = 20,
pch.j = 21, text.cex.j =2, axis1 = 2,axis2 = 3, title = "Symmetrical plot for components 2 and 3 for Gender Type",
alpha.axes = 0.2,alpha.points.i = 1)
labels4MCA22 <- createxyLabels(resCA = resMCA.sym1, x_axis = 2, y_axis = 3)
map.IJ.sym1111 <- symMap21$baseMap + symMap21$I_points
map.IJ.sym211 <- symMap21$baseMap + symMap21$J_labels + symMap21$J_points + labels4MCA22
map.IJ.sym311 <- symMap21$baseMap + symMap21$I_points +
symMap21$J_labels + symMap21$J_points + labels4MCA22
print(map.IJ.sym1111)
print(map.IJ.sym211)
print(map.IJ.sym311)
## Biplot for symmetrical plot for component 1 and 3 for Gender datatype
```{r}
symMap31 <- createFactorMapIJ(resMCA.sym1$ExPosition.Data$fi,resMCA.sym1$ExPosition.Data$fj,
col.points.i = DESIGN1$rows$Region$color_observ,
col.points.j = "black",
col.labels.i = DESIGN1$rows$Region$color_observ ,
col.labels.j = "black" ,
cex.i = 5, pch.i = 20,
pch.j = 21, text.cex.j =2, axis1 = 1,axis2 = 3, title = "Symmetrical plot for components 1 and 3 for Gender Type",
alpha.axes = 0.2,alpha.points.i = 1)
labels4MCA31 <- createxyLabels(resCA = resMCA.sym1, x_axis = 1,y_axis = 3)
map.IJ.sym311 <- symMap31$baseMap + symMap31$I_points
map.IJ.sym321 <- symMap31$baseMap + symMap31$J_labels + symMap31$J_points + labels4MCA31
map.IJ.sym331 <- symMap31$baseMap + symMap31$I_points +
symMap31$J_labels + symMap31$J_points + labels4MCA31
print(map.IJ.sym311)
print(map.IJ.sym321)
print(map.IJ.sym331)
## Biplot for asymmetrical plot for component 1
asymMap1 <- createFactorMapIJ(resMCA.asym1$ExPosition.Data$fi,resMCA.asym1$ExPosition.Data$fj,
col.points.i = DESIGN1$rows$Region$color_observ,
col.points.j = "black",
col.labels.i = DESIGN1$rows$Region$color_observ ,
col.labels.j = "black" ,
cex.i = 5, pch.i = 20,
pch.j = 21, text.cex.j =2,
alpha.axes = 0.2,alpha.points.i = 1, axis1 = 1, axis2 = 2, title = " Asymmetrical plot for components 1 and 2")
alabels4MCA1 <- createxyLabels(resCA = resMCA.asym1)
map.IJ.asym1 <- asymMap1$baseMap + asymMap1$I_points +
asymMap1$J_labels + asymMap1$J_points + alabels4MCA1
print(map.IJ.asym1)
## Bootstrap Interval
constraints.mca <- minmaxHelper(mat1 = resMCA.sym$ExPosition.Data$fi, mat2 = resMCA.sym$ExPosition.Data$fj)
color <- prettyGraphsColorSelection(NCOL(data1))
baseMap.i1 <- createFactorMap(resMCA.sym$ExPosition.Data$fi,constraints = constraints.mca,
col.points = DESIGN$rows$Region$color_observ , axis1 = 1, axis2 = 2,
cex = 1, pch = 20,
display.labels = FALSE
)
label4Map1 <- createxyLabels.gen(1,2,
lambda =resMCA.sym$ExPosition.Data$eigs,
tau = resMCA.sym$ExPosition.Data$t)
a1 <- baseMap.i1$zeMap + baseMap.i1$zeMap_dots +label4Map1
BootCube.Gr <- Boot4Mean(resMCA.sym$ExPosition.Data$fi,
design = my_data$Department,
niter = 100,
suppressProgressBar = TRUE)
#_____________________________________________________________________
# Bootstrap ratios ----
bootRatios.Gr <- boot.ratio.test(BootCube.Gr$BootCube)
#*********************************************************************
# Mean Map
# create the map for the means
# get the means by groups
dataMeans <- getMeans(resMCA.sym$ExPosition.Data$fi, my_data$Department)
# a vector of color for the means
col4data <- DESIGN$rows$Region$color_observ
col4Means <- unique(col4data)
# the map
MapGroup <- createFactorMap(dataMeans,
# use the constraint from the main map
constraints = constraints.mca,
col.points = col4Means,
cex = 7, # size of the dot (bigger)
col.labels = col4Means,
text.cex = 6)
# The map with observations and group means
a003.Map.I.withMeans <- a1 +
MapGroup$zeMap_dots + MapGroup$zeMap_text
print(a003.Map.I.withMeans)
#_____________________________________________________________________
# Create the ellipses
# Bootstrapped CI ----
#_____________________________________________________________________
# Create Confidence Interval Plots
# use function MakeCIEllipses from package PTCA4CATA
GraphElli <- MakeCIEllipses(BootCube.Gr$BootCube[,1:2,],
names.of.factors = c("Dimension 1","Dimension 2"),
col = col4Means,
p.level = .95
)
#_____________________________________________________________________
# create the I-map with Observations, means and confidence intervals
#
a004.Map.I.withCI <- a1 + MapGroup$zeMap_text + GraphElli
#_____________________________________________________________________
# plot it!
dev.new()
print(a004.Map.I.withCI)
GraphTI.Hull <- PTCA4CATA::MakeToleranceIntervals(resMCA.sym$ExPosition.Data$fi,
design = my_data$Department,
# line below is needed
names.of.factors = c("Dim1","Dim2"), # needed
col = col4Means,
line.size = .50,
line.type = 3,
alpha.ellipse = .2,
alpha.line = .4,
p.level = .75)
#_____________________________________________________________________
# Create the map:
a005.Map.I.withTIHull <-a1 +
GraphTI.Hull + MapGroup$zeMap_dots +
MapGroup$zeMap_text + MapGroup$zeMap_dots
#_____________________________________________________________________
# plot it
print(a005.Map.I.withTIHull)
# for gender
constraints.mca1 <- minmaxHelper(mat1 = resMCA.sym1$ExPosition.Data$fi, mat2 = resMCA.sym1$ExPosition.Data$fj)
baseMap.i2 <- createFactorMap(resMCA.sym1$ExPosition.Data$fi,constraints = constraints.mca1,
col.points = DESIGN1$rows$Region$color_observ, axis1 = 1, axis2 = 2,
cex = 1, pch = 20,
display.labels = FALSE
)
label4Map2 <- createxyLabels.gen(1,2,
lambda =resMCA.sym1$ExPosition.Data$eigs,
tau = resMCA.sym1$ExPosition.Data$t)
a2 <- baseMap.i2$zeMap + baseMap.i2$zeMap_dots +label4Map2
BootCube.Gr1 <- Boot4Mean(resMCA.sym1$ExPosition.Data$fi,
design = my_data$Gender,
niter = 100,
suppressProgressBar = TRUE)
#_____________________________________________________________________
# Bootstrap ratios ----
bootRatios.Gr1 <- boot.ratio.test(BootCube.Gr1$BootCube)
#*********************************************************************
# Mean Map
# create the map for the means
# get the means by groups
dataMeans1 <- getMeans(resMCA.sym1$ExPosition.Data$fi, my_data$Gender)
# a vector of color for the means
col4data1 <- DESIGN1$rows$Region$color_observ
col4Means1 <- unique(col4data1)
# the map
MapGroup1 <- createFactorMap(dataMeans1,
# use the constraint from the main map
constraints = constraints.mca1,
col.points = col4Means1,
cex = 7, # size of the dot (bigger)
col.labels = col4Means1,
text.cex = 6)
# The map with observations and group means
a003.Map.I.withMeans1 <- a2 +
MapGroup1$zeMap_dots + MapGroup1$zeMap_text
print(a003.Map.I.withMeans1)
#_____________________________________________________________________
# Create the ellipses
# Bootstrapped CI ----
#_____________________________________________________________________
# Create Confidence Interval Plots
# use function MakeCIEllipses from package PTCA4CATA
GraphElli1 <- MakeCIEllipses(BootCube.Gr1$BootCube[,1:2,],
names.of.factors = c("Dimension 1","Dimension 2"),
col = col4Means1,
p.level = .95
)
#_____________________________________________________________________
# create the I-map with Observations, means and confidence intervals
#
a004.Map.I.withCI1 <- a2 + MapGroup1$zeMap_text + GraphElli1
#_____________________________________________________________________
# plot it!
print(a004.Map.I.withCI1)
GraphTI.Hull1 <- PTCA4CATA::MakeToleranceIntervals(resMCA.sym1$ExPosition.Data$fi,
design = my_data$Gender,
# line below is needed
names.of.factors = c("Dim1","Dim2"), # needed
col = col4Means1,
line.size = .50,
line.type = 3,
alpha.ellipse = .2,
alpha.line = .4,
p.level = .75)
#_____________________________________________________________________
# Create the map:
a005.Map.I.withTIHull1 <-a2 +
GraphTI.Hull1 + MapGroup1$zeMap_dots +
MapGroup1$zeMap_text + MapGroup1$zeMap_dots
#_____________________________________________________________________
# plot it
print(a005.Map.I.withTIHull1)
## Contribution for variables
signed.ctrJ <- resMCA.sym$ExPosition.Data$cj * sign(resMCA.sym$ExPosition.Data$fj)
b003.ctrJ.s.1 <- PrettyBarPlot2(signed.ctrJ[,1],
threshold = 1 / NROW(signed.ctrJ),
font.size = 5,
# color4bar = gplots::col2hex(col4J.ibm), # we need hex code
main = 'MCA on the IBM-No-Attririon data Set: Variable Contributions (Signed)',
ylab = 'Contributions',
ylim = c(1.2*min(signed.ctrJ), 1.2*max(signed.ctrJ))
)
print(b003.ctrJ.s.1)
b004.ctrJ.s.2 <- PrettyBarPlot2(signed.ctrJ[,2],
threshold = 1 / NROW(signed.ctrJ),
font.size = 5,
# color4bar = gplots::col2hex(col4J.ibm), # we need hex code
main = 'MCA on the IBM-No-Attririon dataSet: Variable Contributions (Signed)',
ylab = 'Contributions',
ylim = c(1.2*min(signed.ctrJ), 1.2*max(signed.ctrJ))
)
print(b004.ctrJ.s.2)
b004.ctrJ.s.3 <- PrettyBarPlot2(signed.ctrJ[,3],
threshold = 1 / NROW(signed.ctrJ),
font.size = 5,
# color4bar = gplots::col2hex(col4J.ibm), # we need hex code
main = 'MCA on the IBM-No-Attririon dataSet: Variable Contributions (Signed)',
ylab = 'Contributions',
ylim = c(1.2*min(signed.ctrJ), 1.2*max(signed.ctrJ))
)
print(b004.ctrJ.s.3)
## Bootstrap Ratios for Variables
BR <- resMCA.inf$Inference.Data$fj.boots$tests$boot.ratios
laDim = 1
ba001.BR1 <- PrettyBarPlot2(BR[,laDim],
threshold = 2,
font.size = 5,
#color4bar = gplots::col2hex(col4J.ibm),
main = paste0( 'MCA on the IBM-NoAttrition data Set: Bootstrap ratio ',laDim),
ylab = 'Bootstrap ratios'
#ylim = c(1.2*min(BR[,laDim]), 1.2*max(BR[,laDim]))
)
print(ba001.BR1)
#
laDim = 2
ba002.BR2 <- PrettyBarPlot2(BR[,laDim],
threshold = 2,
font.size = 5,
#color4bar = gplots::col2hex(col4J.ibm),
main = paste0(
'MCA on the IBM-NoAttrition data Set: Bootstrap ratio ',laDim),
ylab = 'Bootstrap ratios'
)
print(ba002.BR2)
laDim = 3
ba002.BR3 <- PrettyBarPlot2(BR[,laDim],
threshold = 2,
font.size = 5,
main = paste0(
'MCA on the IBM-NoAttrition data Set: Bootstrap ratio ',laDim),
ylab = 'Bootstrap ratios'
)
print(ba002.BR3)
|
/MCA./mcappt.r
|
permissive
|
richars7/Advanced-Research-Methods-in-Behavioral-and-Brain-Science
|
R
| false | false | 25,736 |
r
|
rm(list = ls())
graphics.off()
my_data <- read.csv("IBM-HR-Emplyee-NoAttrition.csv")
cols <- colnames(my_data)
rownames(my_data) <- my_data$Subj
data1 <- my_data[,11:32]
data1$Age <- my_data$Age
data1$MonthlyIncome <- cut(data1$MonthlyIncome,breaks = c(min(data1$MonthlyIncome)-1,5000 ,10000, max(data1$MonthlyIncome)+1),labels = c(1,2,3))
data1$DailyRate <- cut(data1$DailyRate,breaks = c(min(data1$DailyRate)-1,600 ,1100, max(data1$DailyRate)+1),labels = c(1,2,3))
data1$HourlyRate <- cut(data1$HourlyRate,breaks = c(min(data1$HourlyRate)-1,55 ,80, max(data1$HourlyRate)+1),labels = c(1,2,3))
data1$MonthlyRate <- cut(data1$MonthlyRate,breaks = c(min(data1$MonthlyRate)-1,10000 ,17500, max(data1$MonthlyRate)+1),labels = c(1,2,3))
data1$DistanceFromHome <- cut(data1$DistanceFromHome,breaks = c(min(data1$DistanceFromHome)-1,6 , max(data1$DistanceFromHome)+1),labels = c(1,2))
data1$Education <- cut(data1$Education,breaks = c(min(data1$Education)-1, 2,3 , max(data1$Education)+1),labels = c(1,2,3))
data1$JobInvolvement <- cut(data1$JobInvolvement,breaks = c(min(data1$JobInvolvement)-1,2.5,3.5, max(data1$JobInvolvement)+1),labels = c(1,2,3))
data1$JobLevel <- cut(data1$JobLevel,breaks = c(min(data1$JobLevel)-1,2,4, max(data1$JobLevel)+1),labels = c(1,2,3))
data1$NumCompaniesWorked <- cut(data1$NumCompaniesWorked,breaks = c(min(data1$NumCompaniesWorked)-1, 2,6, max(data1$NumCompaniesWorked)+1),labels = c(1,2,3))
data1$TotalWorkingYears <- cut(data1$TotalWorkingYears,breaks = c(min(data1$TotalWorkingYears)-1, 10, max(data1$TotalWorkingYears)+1),labels = c(1,2))
data1$PercentSalaryHike <- cut(data1$PercentSalaryHike,breaks = c(min(data1$PercentSalaryHike)-1, 13,19, max(data1$PercentSalaryHike)+1),labels = c(1,2,3))
data1$WorkLifeBalance <- cut(data1$WorkLifeBalance,breaks = c(min(data1$WorkLifeBalance)-1, 2, max(data1$WorkLifeBalance)+1),labels = c(1,2))
data1$Age <- cut(data1$Age,breaks = c(min(data1$Age)-1, 30,40, max(data1$Age)+1),labels = c(1,2,3))
data1$TrainingTimesLastYear <- cut(data1$TrainingTimesLastYear,breaks = c(min(data1$TrainingTimesLastYear)-1, 2,4, max(data1$TrainingTimesLastYear)+1),labels = c(1,2,3))
data1$YearsAtCompany <- cut(data1$YearsAtCompany,breaks = c(min(data1$YearsAtCompany)-1, 5,15, max(data1$YearsAtCompany)+1),labels = c(1,2,3))
data1$YearsInCurrentRole <- cut(data1$YearsInCurrentRole,breaks = c(min(data1$YearsInCurrentRole)-1, 4,10, max(data1$YearsInCurrentRole)+1),labels = c(1,2,3))
data1$YearsSinceLastPromotion <- cut(data1$YearsSinceLastPromotion,breaks = c(min(data1$YearsSinceLastPromotion)-1, 2,7, max(data1$YearsSinceLastPromotion)+1),labels = c(1,2,3))
data1$YearsWithCurrManager <- cut(data1$YearsWithCurrManager,breaks = c(min(data1$YearsWithCurrManager)-1, 4,10, max(data1$YearsWithCurrManager)+1),labels = c(1,2,3))
library(corrplot)
data2 <- my_data[,11:32]
data2$Age <- my_data$Age
cor.my_data <- cor(data2)
corrplot(cor.my_data, method = "ellipse")
data1$PerformanceRating <- as.factor(data1$PerformanceRating)
data1$JobInvolvement <- as.factor(data1$JobInvolvement)
data1$StockOptionLevel <- as.factor(data1$StockOptionLevel)
data1$EnvironmentSatisfaction <- as.factor(data1$EnvironmentSatisfaction)
data1$JobSatisfaction <- as.factor(data1$JobSatisfaction)
data1$RelationshipSatisfaction <- as.factor(data1$RelationshipSatisfaction)
resMCA.sym <- epMCA(data1 ,make_data_nominal = TRUE ,DESIGN = my_data$Department ,make_design_nominal = TRUE,graphs = FALSE, symmetric = TRUE)
resMCA.asym <- epMCA(data1 ,make_data_nominal = TRUE ,DESIGN = my_data$Department ,make_design_nominal = TRUE,graphs = FALSE, symmetric = FALSE)
resMCA.inf <- epMCA.inference.battery(data1, make_data_nominal = TRUE, DESIGN = my_data$Department ,make_design_nominal =TRUE,graphs = FALSE)
DESIGN <- list()
DESIGN$rows$Region$labels <- unique(my_data$Department)
DESIGN$rows$Region$vec <- my_data$Department
#Convert the vector to a matrix
DESIGN$rows$Region$mat <- makeNominalData(as.matrix(DESIGN$rows$Region$vec))
automatic_colors <- createColorVectorsByDesign(DESIGN$rows$Region$mat)
DESIGN$rows$Region$color_groups <- c("red","blue","yellow")
DESIGN$rows$Region$color_observ <- as.matrix(DESIGN$rows$Region$vec)
DESIGN$rows$Region$color_observ[which(DESIGN$rows$Region$vec=="Research & Development")] <- DESIGN$rows$Region$color_groups[1]
DESIGN$rows$Region$color_observ[which(DESIGN$rows$Region$vec=="Human Resources")] <- DESIGN$rows$Region$color_groups[2]
DESIGN$rows$Region$color_observ[which(DESIGN$rows$Region$vec=="Sales")] <- DESIGN$rows$Region$color_groups[3]
PlotScree(ev = resMCA.sym$ExPosition.Data$eigs,
p.ev = resMCA.inf$Inference.Data$components$p.vals,
title = 'IBM-No-Attririon data Set. Eigenvalues Inference',
plotKaiser = TRUE
)
MonthlyIncome1 <- "red"
DailyRate1 <- "pink"
HourlyRate1 <- "blue"
MonthlyRate1 <- "skyblue"
DistanceFromHome1 <- "green"
PerformanceRating1 <- "navyblue"
Education1 <- "darkolivegreen4"
JobInvolvement1 <- "darkgoldenrod3"
JobLevel1 <- "brown"
StockOptionLevel1 <- "orange"
NumCompaniesWorked1 <- "cornflowerblue"
PercentSalaryHike1 <- "chartreuse3"
TotalWorkingYears1 <- "peachpuff3"
TrainingTimesLastYear1 <- "mediumorchid2"
WorkLifeBalance1 <- "turquoise3"
YearsAtCompany1 <- "wheat4"
YearsInCurrentRole1 <-"slategray2"
YearsSinceLastPromotion1 <- "purple"
YearsWithCurrManager1 <- "magenta"
EnvironmentSatisfaction1 <- "teal"
JobSatisfaction1 <- "chromeyellow"
RelationshipSatisfaction1 <- "yellow"
Age1 <- "black"
col4J <- dplyr::recode(resMCA.sym$Plotting.Data$fj.col,
'JobLevel1' = JobLevel1 , 'MonthlyIncome1' = MonthlyIncome1, 'DailyRate1' = DailyRate1, 'HourlyRate'=HourlyRate1, 'MonthlyRate' =MonthlyRate1, 'DistanceFromHome'= DistanceFromHome1,
'PerformanceRating'= PerformanceRating1, 'Education' =Education1, 'JobInvolvement'=JobInvolvement1, 'StockOptionLevel'=StockOptionLevel1,
'NumCompaniesWorked'= NumCompaniesWorked1 , 'PercentSalaryHike'= PercentSalaryHike1 ,'TotalWorkingYears'= TotalWorkingYears1, 'TrainingTimesLastYear'=TrainingTimesLastYear1,
'WorkLifeBalance'=WorkLifeBalance1, 'YearsAtCompany'= YearsAtCompany1, 'YearsInCurrentRole'= YearsInCurrentRole1,
'YearsSinceLastPromotion'= YearsSinceLastPromotion1, 'YearsWithCurrManager'=YearsWithCurrManager1 ,'EnvironmentSatisfaction'= EnvironmentSatisfaction1,
'JobSatisfaction'=JobSatisfaction1 ,'RelationshipSatisfaction'=RelationshipSatisfaction1,'Age'= Age1 )
## Biplot for symmetrical plot for component 1 and 2 for Department datatype
col4I <- prettyGraphsColorSelection(NCOL(resMCA.sym$Plotting.Data$fj.col))
symMap1 <- createFactorMapIJ(resMCA.sym$ExPosition.Data$fi,resMCA.sym$ExPosition.Data$fj,
col.points.i = DESIGN$rows$Region$color_observ,
col.points.j = col4I,
col.labels.i = DESIGN$rows$Region$color_observ ,
col.labels.j = col4I ,
cex.i = 2.5, pch.i = 20,
pch.j = 21, cex.j = 2.5,text.cex.j =2, axis1 = 1,axis2 = 2, title = "Symmetrical plot for components 1 and 2 for Department Type",
alpha.axes = 0.2,alpha.points.i = 1)
labels4MCA1 <- createxyLabels(resCA = resMCA.sym, x_axis = 1,y_axis = 2)
map.IJ.sym1 <- symMap1$baseMap + symMap1$I_points
map.IJ.sym2 <- symMap1$baseMap + symMap1$J_labels + symMap1$J_points + labels4MCA1
map.IJ.sym3 <- symMap1$baseMap + symMap1$I_points +
symMap1$J_labels + symMap1$J_points + labels4MCA1
print(map.IJ.sym1)
print(map.IJ.sym2)
print(map.IJ.sym3)
## Biplot for symmetrical plot for component 2 and 3 for Department datatype
symMap2 <- createFactorMapIJ(resMCA.sym$ExPosition.Data$fi,resMCA.sym$ExPosition.Data$fj,
col.points.i = DESIGN$rows$Region$color_observ,
col.points.j = "black",
col.labels.i = DESIGN$rows$Region$color_observ ,
col.labels.j = "black" ,
cex.i = 5, pch.i = 20,
pch.j = 21, text.cex.j =2, axis1 = 2,axis2 = 3, title = "Symmetrical plot for components 2 and 3 for Department Type",
alpha.axes = 0.2,alpha.points.i = 1)
labels4MCA2 <- createxyLabels(resCA = resMCA.sym, x_axis = 2, y_axis = 3)
map.IJ.sym11 <- symMap2$baseMap + symMap2$I_points
map.IJ.sym21 <- symMap2$baseMap + symMap2$J_labels + symMap2$J_points + labels4MCA2
map.IJ.sym31 <- symMap2$baseMap + symMap2$I_points +
symMap2$J_labels + symMap2$J_points + labels4MCA2
print(map.IJ.sym11)
print(map.IJ.sym21)
print(map.IJ.sym31)
## Biplot for symmetrical plot for component 1 and 3 for Department datatype
symMap3 <- createFactorMapIJ(resMCA.sym$ExPosition.Data$fi,resMCA.sym$ExPosition.Data$fj,
col.points.i = DESIGN$rows$Region$color_observ ,
col.points.j = "black",
col.labels.i = DESIGN$rows$Region$color_observ ,
col.labels.j = "black" ,
cex.i = 5, pch.i = 20,
pch.j = 21, text.cex.j =2, axis1 = 1,axis2 = 3, title = "Symmetrical plot for components 1 and 3 for Department Type",
alpha.axes = 0.2,alpha.points.i = 1)
labels4MCA3 <- createxyLabels(resCA = resMCA.sym, x_axis = 1,y_axis = 3)
map.IJ.sym31 <- symMap3$baseMap + symMap3$I_points
map.IJ.sym32 <- symMap3$baseMap + symMap3$J_labels + symMap3$J_points + labels4MCA3
map.IJ.sym33 <- symMap3$baseMap + symMap3$I_points +
symMap3$J_labels + symMap3$J_points + labels4MCA3
print(map.IJ.sym31)
print(map.IJ.sym32)
print(map.IJ.sym33)
# color for gender
DESIGN1 <- list()
DESIGN1$rows$Region$labels <- unique(my_data$Gender)
DESIGN1$rows$Region$vec <- my_data$Gender
#Convert the vector to a matrix
DESIGN1$rows$Region$mat <- makeNominalData(as.matrix(DESIGN1$rows$Region$vec))
automatic_colors <- createColorVectorsByDesign(DESIGN1$rows$Region$mat)
DESIGN1$rows$Region$color_groups <- c("skyblue","pink")
DESIGN1$rows$Region$color_observ <- as.matrix(DESIGN1$rows$Region$vec)
DESIGN1$rows$Region$color_observ[which(DESIGN1$rows$Region$vec=="Male")] <- DESIGN1$rows$Region$color_groups[1]
DESIGN1$rows$Region$color_observ[which(DESIGN1$rows$Region$vec=="Female")] <- DESIGN1$rows$Region$color_groups[2]
# inference
resMCA.sym1 <- epMCA(data1 ,make_data_nominal = TRUE ,DESIGN = my_data$Gender ,make_design_nominal = TRUE,graphs = FALSE, symmetric = TRUE)
resMCA.asym1 <- epMCA(data1 ,make_data_nominal = TRUE ,DESIGN = my_data$Gender ,make_design_nominal = TRUE,graphs = FALSE, symmetric = FALSE)
resMCA.inf1 <- epMCA.inference.battery(data1, make_data_nominal = TRUE, DESIGN = my_data$Gender ,make_design_nominal =TRUE,graphs = FALSE)
## Biplot for symmetrical plot for component 1 and 2 for Gender datatype
symMa <- createFactorMapIJ(resMCA.sym1$ExPosition.Data$fi,resMCA.sym1$ExPosition.Data$fj,
col.points.i = DESIGN1$rows$Region$color_observ,
col.points.j = "black",
col.labels.i = DESIGN1$rows$Region$color_observ ,
col.labels.j = "black" ,
cex.i = 2.5, pch.i = 20,
pch.j = 21, cex.j = 2.5,text.cex.j =2, axis1 = 1,axis2 = 2, title = "Symmetrical plot for components 1 and 2 for Gender Type",
alpha.axes = 0.2,alpha.points.i = 1)
labels4 <- createxyLabels(resCA = resMCA.sym1, x_axis = 1,y_axis = 2)
map.IJ.sym111 <- symMa$baseMap + symMa$I_points
map.IJ.sym222 <- symMa$baseMap + symMa$J_labels + symMa$J_points + labels4
map.IJ.sym333 <- symMa$baseMap + symMa$I_points +
symMa$J_labels + symMa$J_points + labels4
print(map.IJ.sym111)
print(map.IJ.sym222)
print(map.IJ.sym333)
## Biplot for symmetrical plot for component 2 and 3 for Gender datatype
symMap21 <- createFactorMapIJ(resMCA.sym1$ExPosition.Data$fi,resMCA.sym1$ExPosition.Data$fj,
col.points.i = DESIGN1$rows$Region$color_observ,
col.points.j = "black",
col.labels.i = DESIGN1$rows$Region$color_observ ,
col.labels.j = "black" ,
cex.i = 5, pch.i = 20,
pch.j = 21, text.cex.j =2, axis1 = 2,axis2 = 3, title = "Symmetrical plot for components 2 and 3 for Gender Type",
alpha.axes = 0.2,alpha.points.i = 1)
labels4MCA22 <- createxyLabels(resCA = resMCA.sym1, x_axis = 2, y_axis = 3)
map.IJ.sym1111 <- symMap21$baseMap + symMap21$I_points
map.IJ.sym211 <- symMap21$baseMap + symMap21$J_labels + symMap21$J_points + labels4MCA22
map.IJ.sym311 <- symMap21$baseMap + symMap21$I_points +
symMap21$J_labels + symMap21$J_points + labels4MCA22
print(map.IJ.sym1111)
print(map.IJ.sym211)
print(map.IJ.sym311)
## Biplot for symmetrical plot for component 1 and 3 for Gender datatype
```{r}
symMap31 <- createFactorMapIJ(resMCA.sym1$ExPosition.Data$fi,resMCA.sym1$ExPosition.Data$fj,
col.points.i = DESIGN1$rows$Region$color_observ,
col.points.j = "black",
col.labels.i = DESIGN1$rows$Region$color_observ ,
col.labels.j = "black" ,
cex.i = 5, pch.i = 20,
pch.j = 21, text.cex.j =2, axis1 = 1,axis2 = 3, title = "Symmetrical plot for components 1 and 3 for Gender Type",
alpha.axes = 0.2,alpha.points.i = 1)
labels4MCA31 <- createxyLabels(resCA = resMCA.sym1, x_axis = 1,y_axis = 3)
map.IJ.sym311 <- symMap31$baseMap + symMap31$I_points
map.IJ.sym321 <- symMap31$baseMap + symMap31$J_labels + symMap31$J_points + labels4MCA31
map.IJ.sym331 <- symMap31$baseMap + symMap31$I_points +
symMap31$J_labels + symMap31$J_points + labels4MCA31
print(map.IJ.sym311)
print(map.IJ.sym321)
print(map.IJ.sym331)
## Biplot for asymmetrical plot for component 1
asymMap1 <- createFactorMapIJ(resMCA.asym1$ExPosition.Data$fi,resMCA.asym1$ExPosition.Data$fj,
col.points.i = DESIGN1$rows$Region$color_observ,
col.points.j = "black",
col.labels.i = DESIGN1$rows$Region$color_observ ,
col.labels.j = "black" ,
cex.i = 5, pch.i = 20,
pch.j = 21, text.cex.j =2,
alpha.axes = 0.2,alpha.points.i = 1, axis1 = 1, axis2 = 2, title = " Asymmetrical plot for components 1 and 2")
alabels4MCA1 <- createxyLabels(resCA = resMCA.asym1)
map.IJ.asym1 <- asymMap1$baseMap + asymMap1$I_points +
asymMap1$J_labels + asymMap1$J_points + alabels4MCA1
print(map.IJ.asym1)
## Bootstrap Interval
constraints.mca <- minmaxHelper(mat1 = resMCA.sym$ExPosition.Data$fi, mat2 = resMCA.sym$ExPosition.Data$fj)
color <- prettyGraphsColorSelection(NCOL(data1))
baseMap.i1 <- createFactorMap(resMCA.sym$ExPosition.Data$fi,constraints = constraints.mca,
col.points = DESIGN$rows$Region$color_observ , axis1 = 1, axis2 = 2,
cex = 1, pch = 20,
display.labels = FALSE
)
label4Map1 <- createxyLabels.gen(1,2,
lambda =resMCA.sym$ExPosition.Data$eigs,
tau = resMCA.sym$ExPosition.Data$t)
a1 <- baseMap.i1$zeMap + baseMap.i1$zeMap_dots +label4Map1
BootCube.Gr <- Boot4Mean(resMCA.sym$ExPosition.Data$fi,
design = my_data$Department,
niter = 100,
suppressProgressBar = TRUE)
#_____________________________________________________________________
# Bootstrap ratios ----
bootRatios.Gr <- boot.ratio.test(BootCube.Gr$BootCube)
#*********************************************************************
# Mean Map
# create the map for the means
# get the means by groups
dataMeans <- getMeans(resMCA.sym$ExPosition.Data$fi, my_data$Department)
# a vector of color for the means
col4data <- DESIGN$rows$Region$color_observ
col4Means <- unique(col4data)
# the map
MapGroup <- createFactorMap(dataMeans,
# use the constraint from the main map
constraints = constraints.mca,
col.points = col4Means,
cex = 7, # size of the dot (bigger)
col.labels = col4Means,
text.cex = 6)
# The map with observations and group means
a003.Map.I.withMeans <- a1 +
MapGroup$zeMap_dots + MapGroup$zeMap_text
print(a003.Map.I.withMeans)
#_____________________________________________________________________
# Create the ellipses
# Bootstrapped CI ----
#_____________________________________________________________________
# Create Confidence Interval Plots
# use function MakeCIEllipses from package PTCA4CATA
GraphElli <- MakeCIEllipses(BootCube.Gr$BootCube[,1:2,],
names.of.factors = c("Dimension 1","Dimension 2"),
col = col4Means,
p.level = .95
)
#_____________________________________________________________________
# create the I-map with Observations, means and confidence intervals
#
a004.Map.I.withCI <- a1 + MapGroup$zeMap_text + GraphElli
#_____________________________________________________________________
# plot it!
dev.new()
print(a004.Map.I.withCI)
GraphTI.Hull <- PTCA4CATA::MakeToleranceIntervals(resMCA.sym$ExPosition.Data$fi,
design = my_data$Department,
# line below is needed
names.of.factors = c("Dim1","Dim2"), # needed
col = col4Means,
line.size = .50,
line.type = 3,
alpha.ellipse = .2,
alpha.line = .4,
p.level = .75)
#_____________________________________________________________________
# Create the map:
a005.Map.I.withTIHull <-a1 +
GraphTI.Hull + MapGroup$zeMap_dots +
MapGroup$zeMap_text + MapGroup$zeMap_dots
#_____________________________________________________________________
# plot it
print(a005.Map.I.withTIHull)
# for gender
constraints.mca1 <- minmaxHelper(mat1 = resMCA.sym1$ExPosition.Data$fi, mat2 = resMCA.sym1$ExPosition.Data$fj)
baseMap.i2 <- createFactorMap(resMCA.sym1$ExPosition.Data$fi,constraints = constraints.mca1,
col.points = DESIGN1$rows$Region$color_observ, axis1 = 1, axis2 = 2,
cex = 1, pch = 20,
display.labels = FALSE
)
label4Map2 <- createxyLabels.gen(1,2,
lambda =resMCA.sym1$ExPosition.Data$eigs,
tau = resMCA.sym1$ExPosition.Data$t)
a2 <- baseMap.i2$zeMap + baseMap.i2$zeMap_dots +label4Map2
BootCube.Gr1 <- Boot4Mean(resMCA.sym1$ExPosition.Data$fi,
design = my_data$Gender,
niter = 100,
suppressProgressBar = TRUE)
#_____________________________________________________________________
# Bootstrap ratios ----
bootRatios.Gr1 <- boot.ratio.test(BootCube.Gr1$BootCube)
#*********************************************************************
# Mean Map
# create the map for the means
# get the means by groups
dataMeans1 <- getMeans(resMCA.sym1$ExPosition.Data$fi, my_data$Gender)
# a vector of color for the means
col4data1 <- DESIGN1$rows$Region$color_observ
col4Means1 <- unique(col4data1)
# the map
MapGroup1 <- createFactorMap(dataMeans1,
# use the constraint from the main map
constraints = constraints.mca1,
col.points = col4Means1,
cex = 7, # size of the dot (bigger)
col.labels = col4Means1,
text.cex = 6)
# The map with observations and group means
a003.Map.I.withMeans1 <- a2 +
MapGroup1$zeMap_dots + MapGroup1$zeMap_text
print(a003.Map.I.withMeans1)
#_____________________________________________________________________
# Create the ellipses
# Bootstrapped CI ----
#_____________________________________________________________________
# Create Confidence Interval Plots
# use function MakeCIEllipses from package PTCA4CATA
GraphElli1 <- MakeCIEllipses(BootCube.Gr1$BootCube[,1:2,],
names.of.factors = c("Dimension 1","Dimension 2"),
col = col4Means1,
p.level = .95
)
#_____________________________________________________________________
# create the I-map with Observations, means and confidence intervals
#
a004.Map.I.withCI1 <- a2 + MapGroup1$zeMap_text + GraphElli1
#_____________________________________________________________________
# plot it!
print(a004.Map.I.withCI1)
GraphTI.Hull1 <- PTCA4CATA::MakeToleranceIntervals(resMCA.sym1$ExPosition.Data$fi,
design = my_data$Gender,
# line below is needed
names.of.factors = c("Dim1","Dim2"), # needed
col = col4Means1,
line.size = .50,
line.type = 3,
alpha.ellipse = .2,
alpha.line = .4,
p.level = .75)
#_____________________________________________________________________
# Create the map:
a005.Map.I.withTIHull1 <-a2 +
GraphTI.Hull1 + MapGroup1$zeMap_dots +
MapGroup1$zeMap_text + MapGroup1$zeMap_dots
#_____________________________________________________________________
# plot it
print(a005.Map.I.withTIHull1)
## Contribution for variables
signed.ctrJ <- resMCA.sym$ExPosition.Data$cj * sign(resMCA.sym$ExPosition.Data$fj)
b003.ctrJ.s.1 <- PrettyBarPlot2(signed.ctrJ[,1],
threshold = 1 / NROW(signed.ctrJ),
font.size = 5,
# color4bar = gplots::col2hex(col4J.ibm), # we need hex code
main = 'MCA on the IBM-No-Attririon data Set: Variable Contributions (Signed)',
ylab = 'Contributions',
ylim = c(1.2*min(signed.ctrJ), 1.2*max(signed.ctrJ))
)
print(b003.ctrJ.s.1)
b004.ctrJ.s.2 <- PrettyBarPlot2(signed.ctrJ[,2],
threshold = 1 / NROW(signed.ctrJ),
font.size = 5,
# color4bar = gplots::col2hex(col4J.ibm), # we need hex code
main = 'MCA on the IBM-No-Attririon dataSet: Variable Contributions (Signed)',
ylab = 'Contributions',
ylim = c(1.2*min(signed.ctrJ), 1.2*max(signed.ctrJ))
)
print(b004.ctrJ.s.2)
b004.ctrJ.s.3 <- PrettyBarPlot2(signed.ctrJ[,3],
threshold = 1 / NROW(signed.ctrJ),
font.size = 5,
# color4bar = gplots::col2hex(col4J.ibm), # we need hex code
main = 'MCA on the IBM-No-Attririon dataSet: Variable Contributions (Signed)',
ylab = 'Contributions',
ylim = c(1.2*min(signed.ctrJ), 1.2*max(signed.ctrJ))
)
print(b004.ctrJ.s.3)
## Bootstrap Ratios for Variables
BR <- resMCA.inf$Inference.Data$fj.boots$tests$boot.ratios
laDim = 1
ba001.BR1 <- PrettyBarPlot2(BR[,laDim],
threshold = 2,
font.size = 5,
#color4bar = gplots::col2hex(col4J.ibm),
main = paste0( 'MCA on the IBM-NoAttrition data Set: Bootstrap ratio ',laDim),
ylab = 'Bootstrap ratios'
#ylim = c(1.2*min(BR[,laDim]), 1.2*max(BR[,laDim]))
)
print(ba001.BR1)
#
laDim = 2
ba002.BR2 <- PrettyBarPlot2(BR[,laDim],
threshold = 2,
font.size = 5,
#color4bar = gplots::col2hex(col4J.ibm),
main = paste0(
'MCA on the IBM-NoAttrition data Set: Bootstrap ratio ',laDim),
ylab = 'Bootstrap ratios'
)
print(ba002.BR2)
laDim = 3
ba002.BR3 <- PrettyBarPlot2(BR[,laDim],
threshold = 2,
font.size = 5,
main = paste0(
'MCA on the IBM-NoAttrition data Set: Bootstrap ratio ',laDim),
ylab = 'Bootstrap ratios'
)
print(ba002.BR3)
|
# Simple MACD strategy
#
# MACD may be used in many ways, this will demonstrate a trend indicator.
#
# traditionally, when the MACD signal crosses zero, this indicated a establishment of a positive trend
#
# we'll buy on positive treshold crossover of the 'signal' column, and sell on negative threshold crossover
#
# Author: brian
###############################################################################
require(quantstrat)
suppressWarnings(rm("order_book.macd",pos=.strategy))
suppressWarnings(rm("account.macd","portfolio.macd",pos=.blotter))
suppressWarnings(rm("account.st","portfolio.st","stock.str","stratMACD","initDate","initEq",'start_t','end_t'))
stock.str='AAPL' # what are we trying it on
#MA parameters for MACD
fastMA = 12
slowMA = 26
signalMA = 9
maType="EMA"
currency('USD')
stock(stock.str,currency='USD',multiplier=1)
#or use fake data
#stock.str='sample_matrix' # what are we trying it on
#data(sample_matrix) # data included in package xts
#sample_matrix<-as.xts(sample_matrix)
##### PLACE DEMO AND TEST DATES HERE #################
#
#if(isTRUE(options('in_test')$in_test))
# # use test dates
# {initDate="2011-01-01"
# endDate="2012-12-31"
# } else
# # use demo defaults
# {initDate="1999-12-31"
# endDate=Sys.Date()}
initDate='2006-12-31'
initEq=1000000
portfolio.st='macd'
account.st='macd'
initPortf(portfolio.st,symbols=stock.str, initDate=initDate)
initAcct(account.st,portfolios=portfolio.st, initDate=initDate)
initOrders(portfolio=portfolio.st,initDate=initDate)
strat.st<-portfolio.st
# define the strategy
strategy(strat.st, store=TRUE)
#one indicator
add.indicator(strat.st, name = "MACD", arguments = list(x=quote(Cl(mktdata))) )
#two signals
add.signal(strat.st,name="sigThreshold",
arguments = list(column="signal.MACD.ind",
relationship="gt",
threshold=0,
cross=TRUE),
label="signal.gt.zero")
add.signal(strat.st,name="sigThreshold",
arguments = list(column="signal.MACD.ind",
relationship="lt",
threshold=0,
cross=TRUE),
label="signal.lt.zero")
####
# add rules
# entry
add.rule(strat.st,name='ruleSignal',
arguments = list(sigcol="signal.gt.zero",
sigval=TRUE, orderqty=1000000,
ordertype='market',
orderside='long',
threshold=NULL,
osFUN='osMaxPos'),
type='enter',
label='enter')
#alternatives for risk stops:
# simple stoplimit order, with threshold multiplier
#add.rule(strat.st,name='ruleSignal', arguments = list(sigcol="signal.gt.zero",sigval=TRUE, orderqty='all', ordertype='stoplimit', orderside='long', threshold=-.05,tmult=TRUE, orderset='exit2'),type='risk',label='risk',storefun=FALSE)
# alternately, use a trailing order, also with a threshold multiplier
#add.rule(strat.st,name='ruleSignal', arguments = list(sigcol="signal.gt.zero",sigval=TRUE, orderqty='all', ordertype='stoptrailing', orderside='long', threshold=-.15,tmult=TRUE, orderset='exit2'),type='risk',label='trailingexit')
# exit
add.rule(strat.st,name='ruleSignal',
arguments = list(sigcol="signal.lt.zero",
sigval=TRUE, orderqty='all',
ordertype='market',
orderside='long',
threshold=NULL,
orderset='exit2'),
type='exit',
label='exit')
add.rule(strat.st, 'rulePctEquity',
arguments=list(rebalance_on='months',
trade.percent=.02,
refprice=quote(last(getPrice(mktdata)[paste('::',curIndex,sep='')])),
digits=0
),
type='rebalance',
label='rebalance'
)
#end rules
####
getSymbols(stock.str,from=initDate,src='yahoo')
start_t<-Sys.time()
out<-applyStrategy.rebalancing(strat.st , portfolios=portfolio.st,parameters=list(nFast=fastMA, nSlow=slowMA, nSig=signalMA,maType=maType),verbose=TRUE)
end_t<-Sys.time()
print(end_t-start_t)
start_t<-Sys.time()
updatePortf(Portfolio=portfolio.st,Dates=paste('::',as.Date(Sys.time()),sep=''))
end_t<-Sys.time()
print("trade blotter portfolio update:")
print(end_t-start_t)
chart.Posn(Portfolio=portfolio.st,Symbol=stock.str)
plot(add_MACD(fast=fastMA, slow=slowMA, signal=signalMA,maType="EMA"))
#look at the order book
getOrderBook('macd')
###############################################################################
# R (http://r-project.org/) Quantitative Strategy Model Framework
#
# Copyright (c) 2009-2012
# Peter Carl, Dirk Eddelbuettel, Brian G. Peterson, Jeffrey Ryan, and Joshua Ulrich
#
# This library is distributed under the terms of the GNU Public License (GPL)
# for full details see the file COPYING
#
# $Id$
#
##############################################################################
##### PLACE THIS BLOCK AT END OF DEMO SCRIPT ###################
#
# book = getOrderBook(port)
# stats = tradeStats(port)
# rets = PortfReturns(acct)
################################################################
|
/demo/macd2.R
|
no_license
|
milktrader/A-Mustering-of-Storks
|
R
| false | false | 5,279 |
r
|
# Simple MACD strategy
#
# MACD may be used in many ways, this will demonstrate a trend indicator.
#
# traditionally, when the MACD signal crosses zero, this indicated a establishment of a positive trend
#
# we'll buy on positive treshold crossover of the 'signal' column, and sell on negative threshold crossover
#
# Author: brian
###############################################################################
require(quantstrat)
suppressWarnings(rm("order_book.macd",pos=.strategy))
suppressWarnings(rm("account.macd","portfolio.macd",pos=.blotter))
suppressWarnings(rm("account.st","portfolio.st","stock.str","stratMACD","initDate","initEq",'start_t','end_t'))
stock.str='AAPL' # what are we trying it on
#MA parameters for MACD
fastMA = 12
slowMA = 26
signalMA = 9
maType="EMA"
currency('USD')
stock(stock.str,currency='USD',multiplier=1)
#or use fake data
#stock.str='sample_matrix' # what are we trying it on
#data(sample_matrix) # data included in package xts
#sample_matrix<-as.xts(sample_matrix)
##### PLACE DEMO AND TEST DATES HERE #################
#
#if(isTRUE(options('in_test')$in_test))
# # use test dates
# {initDate="2011-01-01"
# endDate="2012-12-31"
# } else
# # use demo defaults
# {initDate="1999-12-31"
# endDate=Sys.Date()}
initDate='2006-12-31'
initEq=1000000
portfolio.st='macd'
account.st='macd'
initPortf(portfolio.st,symbols=stock.str, initDate=initDate)
initAcct(account.st,portfolios=portfolio.st, initDate=initDate)
initOrders(portfolio=portfolio.st,initDate=initDate)
strat.st<-portfolio.st
# define the strategy
strategy(strat.st, store=TRUE)
#one indicator
add.indicator(strat.st, name = "MACD", arguments = list(x=quote(Cl(mktdata))) )
#two signals
add.signal(strat.st,name="sigThreshold",
arguments = list(column="signal.MACD.ind",
relationship="gt",
threshold=0,
cross=TRUE),
label="signal.gt.zero")
add.signal(strat.st,name="sigThreshold",
arguments = list(column="signal.MACD.ind",
relationship="lt",
threshold=0,
cross=TRUE),
label="signal.lt.zero")
####
# add rules
# entry
add.rule(strat.st,name='ruleSignal',
arguments = list(sigcol="signal.gt.zero",
sigval=TRUE, orderqty=1000000,
ordertype='market',
orderside='long',
threshold=NULL,
osFUN='osMaxPos'),
type='enter',
label='enter')
#alternatives for risk stops:
# simple stoplimit order, with threshold multiplier
#add.rule(strat.st,name='ruleSignal', arguments = list(sigcol="signal.gt.zero",sigval=TRUE, orderqty='all', ordertype='stoplimit', orderside='long', threshold=-.05,tmult=TRUE, orderset='exit2'),type='risk',label='risk',storefun=FALSE)
# alternately, use a trailing order, also with a threshold multiplier
#add.rule(strat.st,name='ruleSignal', arguments = list(sigcol="signal.gt.zero",sigval=TRUE, orderqty='all', ordertype='stoptrailing', orderside='long', threshold=-.15,tmult=TRUE, orderset='exit2'),type='risk',label='trailingexit')
# exit
add.rule(strat.st,name='ruleSignal',
arguments = list(sigcol="signal.lt.zero",
sigval=TRUE, orderqty='all',
ordertype='market',
orderside='long',
threshold=NULL,
orderset='exit2'),
type='exit',
label='exit')
add.rule(strat.st, 'rulePctEquity',
arguments=list(rebalance_on='months',
trade.percent=.02,
refprice=quote(last(getPrice(mktdata)[paste('::',curIndex,sep='')])),
digits=0
),
type='rebalance',
label='rebalance'
)
#end rules
####
getSymbols(stock.str,from=initDate,src='yahoo')
start_t<-Sys.time()
out<-applyStrategy.rebalancing(strat.st , portfolios=portfolio.st,parameters=list(nFast=fastMA, nSlow=slowMA, nSig=signalMA,maType=maType),verbose=TRUE)
end_t<-Sys.time()
print(end_t-start_t)
start_t<-Sys.time()
updatePortf(Portfolio=portfolio.st,Dates=paste('::',as.Date(Sys.time()),sep=''))
end_t<-Sys.time()
print("trade blotter portfolio update:")
print(end_t-start_t)
chart.Posn(Portfolio=portfolio.st,Symbol=stock.str)
plot(add_MACD(fast=fastMA, slow=slowMA, signal=signalMA,maType="EMA"))
#look at the order book
getOrderBook('macd')
###############################################################################
# R (http://r-project.org/) Quantitative Strategy Model Framework
#
# Copyright (c) 2009-2012
# Peter Carl, Dirk Eddelbuettel, Brian G. Peterson, Jeffrey Ryan, and Joshua Ulrich
#
# This library is distributed under the terms of the GNU Public License (GPL)
# for full details see the file COPYING
#
# $Id$
#
##############################################################################
##### PLACE THIS BLOCK AT END OF DEMO SCRIPT ###################
#
# book = getOrderBook(port)
# stats = tradeStats(port)
# rets = PortfReturns(acct)
################################################################
|
##' Turn tree data in catalog form into a tree data structure
##'
##' This would normally be applied to output from \code{\link{data2catalog}} (which determines the identity of root and reshapes data into catalog form). Catalog form (input) is node-by-node data with information on children and their probabilities. The tree structure returned is the same data in a recursive list, for onward use in the functions \code{\link{getQstring}} and \code{\link{getCstring}}.
##'
##' @title Make a tree
##' @param nm a character specifying the tree root
##' @param x tree data in catalog form
##' @return tree
##' @author Pete Dodd
##' @export
makeTree <- function(nm,x){
tree <- list()
tree[[nm]] <- list(n=x[[nm]]$n, #names
c=x[[nm]]$c, #cost
q=x[[nm]]$q, #qol
p=x[[nm]]$p, #prob
k=list() #kids
)
for(K in x[[nm]]$k) #recurse for children
tree[[nm]]$k <- c(tree[[nm]]$k,makeTree(K,x=x))
tree
}
|
/R/makeTree.R
|
permissive
|
petedodd/dtree
|
R
| false | false | 1,049 |
r
|
##' Turn tree data in catalog form into a tree data structure
##'
##' This would normally be applied to output from \code{\link{data2catalog}} (which determines the identity of root and reshapes data into catalog form). Catalog form (input) is node-by-node data with information on children and their probabilities. The tree structure returned is the same data in a recursive list, for onward use in the functions \code{\link{getQstring}} and \code{\link{getCstring}}.
##'
##' @title Make a tree
##' @param nm a character specifying the tree root
##' @param x tree data in catalog form
##' @return tree
##' @author Pete Dodd
##' @export
makeTree <- function(nm,x){
tree <- list()
tree[[nm]] <- list(n=x[[nm]]$n, #names
c=x[[nm]]$c, #cost
q=x[[nm]]$q, #qol
p=x[[nm]]$p, #prob
k=list() #kids
)
for(K in x[[nm]]$k) #recurse for children
tree[[nm]]$k <- c(tree[[nm]]$k,makeTree(K,x=x))
tree
}
|
\name{ChisqSupp}
\alias{ChisqSupp}
\alias{mchisq}
\alias{levchisq}
\alias{mgfchisq}
\title{Moments and Moment Generating Function of the (non-central) Chi-Squared Distribution}
\description{
Raw moments, limited moments and moment generating function for the
chi-squared (\eqn{\chi^2}{chi^2}) distribution with \code{df} degrees
of freedom and optional non-centrality parameter \code{ncp}.
}
\usage{
mchisq(order, df, ncp = 0)
levchisq(limit, df, ncp = 0, order = 1)
mgfchisq(x, df, ncp = 0, log= FALSE)
}
\arguments{
\item{order}{order of the moment.}
\item{limit}{limit of the loss variable.}
\item{df}{degrees of freedom (non-negative, but can be non-integer).}
\item{ncp}{non-centrality parameter (non-negative).}
\item{x}{numeric vector.}
\item{log}{logical; if \code{TRUE}, the cumulant generating function
is returned.}
}
\details{
The \eqn{k}th raw moment of the random variable \eqn{X} is
\eqn{E[X^k]}{E[X^k]}, the \eqn{k}th limited moment at some limit
\eqn{d} is \eqn{E[\min(X, d)]}{E[min(X, d)]} and the moment generating
function is \eqn{E[e^{xX}]}.
Only integer moments are supported for the non central Chi-square
distribution (\code{ncp > 0}).
The limited expected value is supported for the centered Chi-square
distribution (\code{ncp = 0}).
}
\value{
\code{mchisq} gives the \eqn{k}th raw moment,
\code{levchisq} gives the \eqn{k}th moment of the limited loss
variable, and
\code{mgfchisq} gives the moment generating function in \code{x}.
Invalid arguments will result in return value \code{NaN}, with a warning.
}
\seealso{
\code{\link[stats]{Chisquare}}
}
\references{
Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2008),
\emph{Loss Models, From Data to Decisions, Third Edition}, Wiley.
Johnson, N. L. and Kotz, S. (1970), \emph{Continuous Univariate
Distributions, Volume 1}, Wiley.
}
\author{
Christophe Dutang, Vincent Goulet \email{vincent.goulet@act.ulaval.ca}
}
\examples{
mchisq(2, 3, 4)
levchisq(10, 3, order = 2)
mgfchisq(0.25, 3, 2)
}
\keyword{distribution}
|
/man/ChisqSupp.Rd
|
no_license
|
mrthat/actuar
|
R
| false | false | 2,060 |
rd
|
\name{ChisqSupp}
\alias{ChisqSupp}
\alias{mchisq}
\alias{levchisq}
\alias{mgfchisq}
\title{Moments and Moment Generating Function of the (non-central) Chi-Squared Distribution}
\description{
Raw moments, limited moments and moment generating function for the
chi-squared (\eqn{\chi^2}{chi^2}) distribution with \code{df} degrees
of freedom and optional non-centrality parameter \code{ncp}.
}
\usage{
mchisq(order, df, ncp = 0)
levchisq(limit, df, ncp = 0, order = 1)
mgfchisq(x, df, ncp = 0, log= FALSE)
}
\arguments{
\item{order}{order of the moment.}
\item{limit}{limit of the loss variable.}
\item{df}{degrees of freedom (non-negative, but can be non-integer).}
\item{ncp}{non-centrality parameter (non-negative).}
\item{x}{numeric vector.}
\item{log}{logical; if \code{TRUE}, the cumulant generating function
is returned.}
}
\details{
The \eqn{k}th raw moment of the random variable \eqn{X} is
\eqn{E[X^k]}{E[X^k]}, the \eqn{k}th limited moment at some limit
\eqn{d} is \eqn{E[\min(X, d)]}{E[min(X, d)]} and the moment generating
function is \eqn{E[e^{xX}]}.
Only integer moments are supported for the non central Chi-square
distribution (\code{ncp > 0}).
The limited expected value is supported for the centered Chi-square
distribution (\code{ncp = 0}).
}
\value{
\code{mchisq} gives the \eqn{k}th raw moment,
\code{levchisq} gives the \eqn{k}th moment of the limited loss
variable, and
\code{mgfchisq} gives the moment generating function in \code{x}.
Invalid arguments will result in return value \code{NaN}, with a warning.
}
\seealso{
\code{\link[stats]{Chisquare}}
}
\references{
Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2008),
\emph{Loss Models, From Data to Decisions, Third Edition}, Wiley.
Johnson, N. L. and Kotz, S. (1970), \emph{Continuous Univariate
Distributions, Volume 1}, Wiley.
}
\author{
Christophe Dutang, Vincent Goulet \email{vincent.goulet@act.ulaval.ca}
}
\examples{
mchisq(2, 3, 4)
levchisq(10, 3, order = 2)
mgfchisq(0.25, 3, 2)
}
\keyword{distribution}
|
## These functions together allow caching the inverse of a matrix so that it
## doesn't need to be recomputed.
## The makeCachMatrix function creates a matrix object that is able to compute
## an inverse, taking advantage of the fact that if the inverse has been
## computed once, it can simply be cached and returned rather than recomputed
makeCacheMatrix <- function(theMatrix = matrix()) {
# A place to store the matrix inverse
theInverse <- NULL
# Save the matrix and remove any cached inverse
set <- function(x) {
theMatrix <<- x
theInverse <<- NULL
}
# Return the current contents of the matrix
getMatrix <- function() theMatrix
# Save and return the matrix inverse
setInverse <- function(y) theInverse <<- y
getInverse <- function() theInverse
# Return the list of functions for the special matrix object
list (set = set,
getMatrix = getMatrix,
setInverse = setInverse,
getInverse = getInverse)
}
## The cacheSolve function looks to see if a matrix inverse has already been
## computed. If it has, the function avoids the computation load and returns
## the cached inverse. If not, it computes, then saves the matrix inverse.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'. Get it from the cache
## if possible
# First look in the cache
xInv <- x$getInverse()
# If there's an inverse there, then return it
if (!is.null(xInv)) {
message('Returning cached inverse')
return(xInv)
}
# Nothing in the cache, so get the matrix and compute its inverse
xMat <- x$getMatrix()
xInv <- solve(xMat)
# Put the inverse in the cache in case we need it
x$setInverse(xInv)
# Return the computed inverse
xInv
}
|
/cachematrix.R
|
no_license
|
braynebuddy/ProgrammingAssignment2
|
R
| false | false | 1,805 |
r
|
## These functions together allow caching the inverse of a matrix so that it
## doesn't need to be recomputed.
## The makeCachMatrix function creates a matrix object that is able to compute
## an inverse, taking advantage of the fact that if the inverse has been
## computed once, it can simply be cached and returned rather than recomputed
makeCacheMatrix <- function(theMatrix = matrix()) {
# A place to store the matrix inverse
theInverse <- NULL
# Save the matrix and remove any cached inverse
set <- function(x) {
theMatrix <<- x
theInverse <<- NULL
}
# Return the current contents of the matrix
getMatrix <- function() theMatrix
# Save and return the matrix inverse
setInverse <- function(y) theInverse <<- y
getInverse <- function() theInverse
# Return the list of functions for the special matrix object
list (set = set,
getMatrix = getMatrix,
setInverse = setInverse,
getInverse = getInverse)
}
## The cacheSolve function looks to see if a matrix inverse has already been
## computed. If it has, the function avoids the computation load and returns
## the cached inverse. If not, it computes, then saves the matrix inverse.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'. Get it from the cache
## if possible
# First look in the cache
xInv <- x$getInverse()
# If there's an inverse there, then return it
if (!is.null(xInv)) {
message('Returning cached inverse')
return(xInv)
}
# Nothing in the cache, so get the matrix and compute its inverse
xMat <- x$getMatrix()
xInv <- solve(xMat)
# Put the inverse in the cache in case we need it
x$setInverse(xInv)
# Return the computed inverse
xInv
}
|
# scrape_weedmaps
library(tidyverse)
library(rvest)
library(testthat)
library(tidyverse)
source('code/functions.R')
# Direct remote driver to index page --------------------------------------
# docker run -d -p 4445:4444 selenium/standalone-firefox:2.53.0
source("code/start_remote_driver.R")
# To prevent going to mobile site, set window to a big size!
remDr$setWindowSize(width=1600, height=900, winHand = "current")
wm_index <- "https://weedmaps.com/dispensaries/in/united-states/california"
remDr$navigate(wm_index) # used to work
remDr$navigate("weedmaps.com")
remDr$screenshot(display = TRUE)
# Click on RAND splash screen
remDr$findElement("css", "a")$clickElement()
remDr$screenshot(display = TRUE)
wm_index_html <- read_html_safely(remDr$getPageSource() %>% unlist) # read HTML after loading
# Compile City Index URLs -----------------------------------
# Grab links from left sidebar
city_links_CSS <- "ion-nav-view.pane li a"
city_links <- paste0("https://weedmaps.com",
wm_index_html %>%
html_nodes(city_links_CSS) %>%
html_attr("href") %>%
str_subset("dispensaries") %>%
str_replace_all("%2F", "/"))
write.csv(city_links, "data/weedmaps_city_links_111118.csv")
# Scrape Store URLs, iterating through city index URLs --------------------
city_links <- read_csv("data/weedmaps_city_links_111118.csv") %>%
select(-X1) %>% unlist # drop empty column
city_index_url <- city_links[1]
######### DO ONCE FOR ALL STORES (MED OR REC)
# Get details for all stores (med/rec), deploying function for each (gives 1003 links)
links <- list()
for (i in 1:length(city_links)) {
links[[i]] <- get_city_store_links(city_index_url = city_links[[i]], rec_only = FALSE, delay = 1) %>%
unlist %>% unique
if (i %%10 == 0) print(i)
}
# Add link prefix and details page suffix (which can be static-ly scraped)
all_store_links <- store_links %>% lapply( function (x) paste0(x, "#/details")) %>%
lapply(as.data.frame) %>% bind_rows
all_store_links <- all_store_links %>% map(function(x) str_replace(x, "#/details", "/about")) %>% unlist %>% unique
write.csv(all_store_links, "data/weedmaps_store_links_111518.csv"); beepr::beep(5)
######### DO ONCE FOR ONLY REC
# Get details for all stores (med/rec), deploying function for each (gives 1003 links)
rec_links <- list()
for (i in 1:length(city_links)) {
rec_links[[i]] <- get_city_store_links(city_index_url = city_links[[i]],
rec_only = TRUE, delay = 1.5, screenshot=TRUE) %>%
unlist %>% unique
if (i %%10 == 0) print(i)
}
# Add link prefix and details page suffix (which can be static-ly scraped)
rec_store_links <- rec_links %>% lapply( function (x) paste0(x, "#/details")) %>%
lapply(as.data.frame) %>% bind_rows
rec_store_links <- rec_store_links %>% map(function(x) str_replace(x, "#/details", "/about")) %>% unlist %>% unique
write.csv(rec_store_links, "data/weedmaps_store_links_111518_v2_rec.csv"); beepr::beep(5)
########## DO ONCE FOR ONLY MED
# Get details for all stores (med/rec), deploying function for each (gives 1003 links)
med_links <- list()
for (i in 153:length(city_links)) {
med_links[[i]] <- get_city_store_links(city_index_url = city_links[[i]],
med_only = TRUE, delay = 1.5, screenshot=TRUE) %>%
unlist %>% unique
if (i %%10 == 0) print(i)
}
# Add link prefix and details page suffix (which can be static-ly scraped)
med_store_links <- med_links %>% lapply( function (x) paste0(x, "#/details")) %>%
lapply(as.data.frame) %>% bind_rows
med_store_links <- med_store_links %>% map(function(x) str_replace(x, "#/details", "/about")) %>% unlist %>% unique
write.csv(med_store_links, "data/weedmaps_store_links_111518_v2_med.csv"); beepr::beep(5)
# Combine links -----------------------------------------------------------
# Because you might not get the full list from a single scrape...
# combine all the med attempts and all the rec attempts
# "Generic" search (med or rec)
generic15 <- read_csv("data/weedmaps_store_links_111518.csv") %>% select(x) %>% unlist
generic11 <- read_csv("data/weedmaps_store_links_111118.csv") %>% select(x) %>% unlist
generic <- c(generic11, generic15) %>% map(function(x) str_replace(x, "/about/about", "/about")) %>%
unlist %>% unique %>% str_subset("^h")
# Rec only
# rec14 <- read_csv("data/weedmaps_store_links_111418_rec.csv")%>% select(x) %>% unlist
rec15 <- read_csv("data/weedmaps_store_links_111518_rec.csv")%>% select(x) %>% unlist
rec15_2 <- read_csv("data/weedmaps_store_links_111518_v2_rec.csv")%>% select(x) %>% unlist
rec <- c(rec15, rec15_2) %>% unique %>% str_subset("^h")
# Deleted validation step:
# Why the big diff b/w rec14 and rec15? rec14 included false positives!
# So we are discarding that, bc rec15 and 15_2 agreed and spot checks of rec14 diff are med only
# setdiff(rec14, rec15)[15]
# Med only
med14 <- read_csv("data/weedmaps_store_links_111418_med.csv")%>% select(x) %>% unlist
med15 <- read_csv("data/weedmaps_store_links_111518_med.csv")%>% select(x) %>% unlist
med15_2 <- read_csv("data/weedmaps_store_links_111518_v2_med.csv")%>% select(x) %>% unlist
med <- c(med14, med15, med15_2) %>% unique %>% str_subset("^h")
length(generic)
length(med)
length(rec)
wm_links_type <- full_join(
data.frame(url=med, med=TRUE),
data.frame(url=rec, rec=TRUE)) %>%
full_join(data.frame(url=generic, generic=TRUE))
write_csv(wm_links_type, "data/weedmaps_store_links_111518_type_validated.csv")
# Scrape store info from store URLs ---------------------------------------
# Load list of URLS, w info by if returned from med/rec/generic search
wm_links_type <- read_csv("data/weedmaps_store_links_111518_type_validated.csv")
# Iterate through each URL, collecting store info.
store_details_list <- list()
for (i in 1:length(wm_links_type$url)) {
store_details_list[[i]] <- tryCatch(get_store_details_wm(wm_links_type$url[[i]], delay = .5),
error = function (e) data.frame(url=wm_links_type$url[[i]]))
if (i %%10 == 0) print(i)
}
i <- 1038
# Restore to data-frame, add on URL info
stores_wm <- lapply(store_details_list, data.frame) %>%
bind_rows() %>%
full_join(wm_links_type)
stores_wm %>% tail(30)
# Export for cleaning
write.csv(stores_wm, "data/store_details_wm_nov15.csv"); beepr::beep(5)
beepr::beep(5)
|
/code/scrape_weedmaps.R
|
no_license
|
ssdavenport/WeedScrape
|
R
| false | false | 6,455 |
r
|
# scrape_weedmaps
library(tidyverse)
library(rvest)
library(testthat)
library(tidyverse)
source('code/functions.R')
# Direct remote driver to index page --------------------------------------
# docker run -d -p 4445:4444 selenium/standalone-firefox:2.53.0
source("code/start_remote_driver.R")
# To prevent going to mobile site, set window to a big size!
remDr$setWindowSize(width=1600, height=900, winHand = "current")
wm_index <- "https://weedmaps.com/dispensaries/in/united-states/california"
remDr$navigate(wm_index) # used to work
remDr$navigate("weedmaps.com")
remDr$screenshot(display = TRUE)
# Click on RAND splash screen
remDr$findElement("css", "a")$clickElement()
remDr$screenshot(display = TRUE)
wm_index_html <- read_html_safely(remDr$getPageSource() %>% unlist) # read HTML after loading
# Compile City Index URLs -----------------------------------
# Grab links from left sidebar
city_links_CSS <- "ion-nav-view.pane li a"
city_links <- paste0("https://weedmaps.com",
wm_index_html %>%
html_nodes(city_links_CSS) %>%
html_attr("href") %>%
str_subset("dispensaries") %>%
str_replace_all("%2F", "/"))
write.csv(city_links, "data/weedmaps_city_links_111118.csv")
# Scrape Store URLs, iterating through city index URLs --------------------
city_links <- read_csv("data/weedmaps_city_links_111118.csv") %>%
select(-X1) %>% unlist # drop empty column
city_index_url <- city_links[1]
######### DO ONCE FOR ALL STORES (MED OR REC)
# Get details for all stores (med/rec), deploying function for each (gives 1003 links)
links <- list()
for (i in 1:length(city_links)) {
links[[i]] <- get_city_store_links(city_index_url = city_links[[i]], rec_only = FALSE, delay = 1) %>%
unlist %>% unique
if (i %%10 == 0) print(i)
}
# Add link prefix and details page suffix (which can be static-ly scraped)
all_store_links <- store_links %>% lapply( function (x) paste0(x, "#/details")) %>%
lapply(as.data.frame) %>% bind_rows
all_store_links <- all_store_links %>% map(function(x) str_replace(x, "#/details", "/about")) %>% unlist %>% unique
write.csv(all_store_links, "data/weedmaps_store_links_111518.csv"); beepr::beep(5)
######### DO ONCE FOR ONLY REC
# Get details for all stores (med/rec), deploying function for each (gives 1003 links)
rec_links <- list()
for (i in 1:length(city_links)) {
rec_links[[i]] <- get_city_store_links(city_index_url = city_links[[i]],
rec_only = TRUE, delay = 1.5, screenshot=TRUE) %>%
unlist %>% unique
if (i %%10 == 0) print(i)
}
# Add link prefix and details page suffix (which can be static-ly scraped)
rec_store_links <- rec_links %>% lapply( function (x) paste0(x, "#/details")) %>%
lapply(as.data.frame) %>% bind_rows
rec_store_links <- rec_store_links %>% map(function(x) str_replace(x, "#/details", "/about")) %>% unlist %>% unique
write.csv(rec_store_links, "data/weedmaps_store_links_111518_v2_rec.csv"); beepr::beep(5)
########## DO ONCE FOR ONLY MED
# Get details for all stores (med/rec), deploying function for each (gives 1003 links)
med_links <- list()
for (i in 153:length(city_links)) {
med_links[[i]] <- get_city_store_links(city_index_url = city_links[[i]],
med_only = TRUE, delay = 1.5, screenshot=TRUE) %>%
unlist %>% unique
if (i %%10 == 0) print(i)
}
# Add link prefix and details page suffix (which can be static-ly scraped)
med_store_links <- med_links %>% lapply( function (x) paste0(x, "#/details")) %>%
lapply(as.data.frame) %>% bind_rows
med_store_links <- med_store_links %>% map(function(x) str_replace(x, "#/details", "/about")) %>% unlist %>% unique
write.csv(med_store_links, "data/weedmaps_store_links_111518_v2_med.csv"); beepr::beep(5)
# Combine links -----------------------------------------------------------
# Because you might not get the full list from a single scrape...
# combine all the med attempts and all the rec attempts
# "Generic" search (med or rec)
generic15 <- read_csv("data/weedmaps_store_links_111518.csv") %>% select(x) %>% unlist
generic11 <- read_csv("data/weedmaps_store_links_111118.csv") %>% select(x) %>% unlist
generic <- c(generic11, generic15) %>% map(function(x) str_replace(x, "/about/about", "/about")) %>%
unlist %>% unique %>% str_subset("^h")
# Rec only
# rec14 <- read_csv("data/weedmaps_store_links_111418_rec.csv")%>% select(x) %>% unlist
rec15 <- read_csv("data/weedmaps_store_links_111518_rec.csv")%>% select(x) %>% unlist
rec15_2 <- read_csv("data/weedmaps_store_links_111518_v2_rec.csv")%>% select(x) %>% unlist
rec <- c(rec15, rec15_2) %>% unique %>% str_subset("^h")
# Deleted validation step:
# Why the big diff b/w rec14 and rec15? rec14 included false positives!
# So we are discarding that, bc rec15 and 15_2 agreed and spot checks of rec14 diff are med only
# setdiff(rec14, rec15)[15]
# Med only
med14 <- read_csv("data/weedmaps_store_links_111418_med.csv")%>% select(x) %>% unlist
med15 <- read_csv("data/weedmaps_store_links_111518_med.csv")%>% select(x) %>% unlist
med15_2 <- read_csv("data/weedmaps_store_links_111518_v2_med.csv")%>% select(x) %>% unlist
med <- c(med14, med15, med15_2) %>% unique %>% str_subset("^h")
length(generic)
length(med)
length(rec)
wm_links_type <- full_join(
data.frame(url=med, med=TRUE),
data.frame(url=rec, rec=TRUE)) %>%
full_join(data.frame(url=generic, generic=TRUE))
write_csv(wm_links_type, "data/weedmaps_store_links_111518_type_validated.csv")
# Scrape store info from store URLs ---------------------------------------
# Load list of URLS, w info by if returned from med/rec/generic search
wm_links_type <- read_csv("data/weedmaps_store_links_111518_type_validated.csv")
# Iterate through each URL, collecting store info.
store_details_list <- list()
for (i in 1:length(wm_links_type$url)) {
store_details_list[[i]] <- tryCatch(get_store_details_wm(wm_links_type$url[[i]], delay = .5),
error = function (e) data.frame(url=wm_links_type$url[[i]]))
if (i %%10 == 0) print(i)
}
i <- 1038
# Restore to data-frame, add on URL info
stores_wm <- lapply(store_details_list, data.frame) %>%
bind_rows() %>%
full_join(wm_links_type)
stores_wm %>% tail(30)
# Export for cleaning
write.csv(stores_wm, "data/store_details_wm_nov15.csv"); beepr::beep(5)
beepr::beep(5)
|
\name{BasicMonteCarlo}
\alias{BasicMonteCarlo}
\title{The \code{BasicMonteCarlo} design method}
\description{A native \code{mtk} design method to generate Monte Carlo samples.}
\section{Usage}{
\itemize{
\item mtkBasicMonteCarloDesigner(listParameters=NULL)
\item mtkNativeDesigner(design="BasicMonteCarlo", information=NULL)
}
}
\section{Parameters}{
\describe{
\item{size :}{the sample size.}
}}
\section{Details}{
\enumerate{
\item The \code{mtk} implementation of the \code{Basic Monte-Carlo} method includes the following classes:
\itemize{
\item \code{\linkS4class{mtkBasicMonteCarloDesigner}} for Basic Monte-Carlo design processes.
\item \code{\linkS4class{mtkBasicMonteCarloDesignerResult}} to store and manage the design.
}
\item Many ways to create a \code{Basic Monte-Carlo} designer are available in \code{mtk}, but we recommend the following class constructors:
\code{\link{mtkBasicMonteCarloDesigner}} or \code{\link{mtkNativeDesigner}}.
}
}
\references{
\enumerate{
\item A. Saltelli, K. Chan and E. M. Scott (2000). Sensitivity Analysis. Wiley, New York.
\item J. Wang, H. Richard, R. Faivre, H. Monod (2013). Le package \code{mtk}, une bibliothèque R pour l'exploration numérique des modèles.
\emph{In:} Analyse de sensibilité et exploration de modèles : Application aux sciences de la nature et de l'environnement
(R. Faivre, B. Iooss, S. Mahévas, D. Makowski, H. Monod, Eds). Editions Quae, Versailles.
}
}
\examples{
## Experiments design with the "Basic Monte-Carlo" method for the "Ishigami" model
# Example I: by using the class constructors: mtkBasicMonteCarloDesigner()
# 1) Create a designer process based on the Basic Monte-Carlo method
MCdesign <- mtkBasicMonteCarloDesigner(listParameters = list(size=20))
# 2) Import the input factors of the "Ishigami" model
data(Ishigami.factors)
# 3) Build and run the workflow
exp1 <- mtkExpWorkflow(expFactors = Ishigami.factors,
processesVector = c(design=MCdesign))
run(exp1)
# 4) Report and plot the design
show(exp1)
plot(exp1)
# Example II: by using the class constructors: mtkNativeDesigner()
# 1) Create a designer process based on the Basic Monte-Carlo method
MCdesign <- mtkNativeDesigner("BasicMonteCarlo", information = list(size=20))
# 2) Import the input factors of the "Ishigami" model
data(Ishigami.factors)
# 3) Build and run the workflow
exp1 <- mtkExpWorkflow(expFactors = Ishigami.factors,
processesVector = c(design=MCdesign))
run(exp1)
# 4) Print and plot the design
print(exp1)
plot(exp1)
}
|
/man/BasicMonteCarlo.Rd
|
no_license
|
santoshpanda15/mtk
|
R
| false | false | 2,617 |
rd
|
\name{BasicMonteCarlo}
\alias{BasicMonteCarlo}
\title{The \code{BasicMonteCarlo} design method}
\description{A native \code{mtk} design method to generate Monte Carlo samples.}
\section{Usage}{
\itemize{
\item mtkBasicMonteCarloDesigner(listParameters=NULL)
\item mtkNativeDesigner(design="BasicMonteCarlo", information=NULL)
}
}
\section{Parameters}{
\describe{
\item{size :}{the sample size.}
}}
\section{Details}{
\enumerate{
\item The \code{mtk} implementation of the \code{Basic Monte-Carlo} method includes the following classes:
\itemize{
\item \code{\linkS4class{mtkBasicMonteCarloDesigner}} for Basic Monte-Carlo design processes.
\item \code{\linkS4class{mtkBasicMonteCarloDesignerResult}} to store and manage the design.
}
\item Many ways to create a \code{Basic Monte-Carlo} designer are available in \code{mtk}, but we recommend the following class constructors:
\code{\link{mtkBasicMonteCarloDesigner}} or \code{\link{mtkNativeDesigner}}.
}
}
\references{
\enumerate{
\item A. Saltelli, K. Chan and E. M. Scott (2000). Sensitivity Analysis. Wiley, New York.
\item J. Wang, H. Richard, R. Faivre, H. Monod (2013). Le package \code{mtk}, une bibliothèque R pour l'exploration numérique des modèles.
\emph{In:} Analyse de sensibilité et exploration de modèles : Application aux sciences de la nature et de l'environnement
(R. Faivre, B. Iooss, S. Mahévas, D. Makowski, H. Monod, Eds). Editions Quae, Versailles.
}
}
\examples{
## Experiments design with the "Basic Monte-Carlo" method for the "Ishigami" model
# Example I: by using the class constructors: mtkBasicMonteCarloDesigner()
# 1) Create a designer process based on the Basic Monte-Carlo method
MCdesign <- mtkBasicMonteCarloDesigner(listParameters = list(size=20))
# 2) Import the input factors of the "Ishigami" model
data(Ishigami.factors)
# 3) Build and run the workflow
exp1 <- mtkExpWorkflow(expFactors = Ishigami.factors,
processesVector = c(design=MCdesign))
run(exp1)
# 4) Report and plot the design
show(exp1)
plot(exp1)
# Example II: by using the class constructors: mtkNativeDesigner()
# 1) Create a designer process based on the Basic Monte-Carlo method
MCdesign <- mtkNativeDesigner("BasicMonteCarlo", information = list(size=20))
# 2) Import the input factors of the "Ishigami" model
data(Ishigami.factors)
# 3) Build and run the workflow
exp1 <- mtkExpWorkflow(expFactors = Ishigami.factors,
processesVector = c(design=MCdesign))
run(exp1)
# 4) Print and plot the design
print(exp1)
plot(exp1)
}
|
# what if debris flow information is in the remarks?
debrisFlow <- grep("debris flow", stormData$remarks)
stormData$evtype[debrisFlow]
# debris flows ocurrs in many remarks related to flooding. It's likely that flooding or heavy rain caused debris flows. We shouldn't, however, change that initial classificaion to debris flow. The documen stated that 'landslide' was changed to 'debris flow', so we should only change those.
landslide <- which(stormData$evtype == 'landslide')
stormData$evtype[landslide] <- "debris flow"
flathead <- subset(stormData, stormData$countyname == "FLATHEAD")
unique(flathead$evtype)
length(unique(amatch(stormData$evtype, eventTypes, maxDist = 1)))
small <- stormData[1:5000,]
unique(small[['evtype']])
length(unique(small[['evtype']]))
termCorrection <- function(x, table) {
pb <- txtProgressBar(min = 1, max = length(table), style = 3)
uniqueEvents <- length(unique(x[['evtype']]))
print(paste("Unique events before corrections:", uniqueEvents))
for(i in 1:length(table)) {
type <- table[i]
if(nchar(type) / 2 <= 2) {
maxDist <- 2
} else {
maxDist <- ceiling(nchar(type)/3) - 1 # this sets astronimical high tides to low tides, which doesn't make sense
}
matches <- amatch(x[['evtype']], type, maxDist = maxDist)
matchIdx <- which(!is.na(matches))
x[['evtype']][matchIdx] <- type
setTxtProgressBar(pb, i)
}
close(pb)
uniqueEvents <- length(unique(x[['evtype']]))
print(paste("Unique events after corrections:", uniqueEvents))
x
}
tstmWindHail <- check(copy, "tstm wind/hail", 4)
length(tstmWindHail)
length(grep("hail", copy$remarks[tstmWindHail]))
# should we make a function that finds all non-matches, then uses a regular expression to see what odd categories could possibly be corrected?
|
/random.R
|
no_license
|
ahesse2567/RepData_PeerAssessment2
|
R
| false | false | 1,866 |
r
|
# what if debris flow information is in the remarks?
debrisFlow <- grep("debris flow", stormData$remarks)
stormData$evtype[debrisFlow]
# debris flows ocurrs in many remarks related to flooding. It's likely that flooding or heavy rain caused debris flows. We shouldn't, however, change that initial classificaion to debris flow. The documen stated that 'landslide' was changed to 'debris flow', so we should only change those.
landslide <- which(stormData$evtype == 'landslide')
stormData$evtype[landslide] <- "debris flow"
flathead <- subset(stormData, stormData$countyname == "FLATHEAD")
unique(flathead$evtype)
length(unique(amatch(stormData$evtype, eventTypes, maxDist = 1)))
small <- stormData[1:5000,]
unique(small[['evtype']])
length(unique(small[['evtype']]))
termCorrection <- function(x, table) {
pb <- txtProgressBar(min = 1, max = length(table), style = 3)
uniqueEvents <- length(unique(x[['evtype']]))
print(paste("Unique events before corrections:", uniqueEvents))
for(i in 1:length(table)) {
type <- table[i]
if(nchar(type) / 2 <= 2) {
maxDist <- 2
} else {
maxDist <- ceiling(nchar(type)/3) - 1 # this sets astronimical high tides to low tides, which doesn't make sense
}
matches <- amatch(x[['evtype']], type, maxDist = maxDist)
matchIdx <- which(!is.na(matches))
x[['evtype']][matchIdx] <- type
setTxtProgressBar(pb, i)
}
close(pb)
uniqueEvents <- length(unique(x[['evtype']]))
print(paste("Unique events after corrections:", uniqueEvents))
x
}
tstmWindHail <- check(copy, "tstm wind/hail", 4)
length(tstmWindHail)
length(grep("hail", copy$remarks[tstmWindHail]))
# should we make a function that finds all non-matches, then uses a regular expression to see what odd categories could possibly be corrected?
|
# Define Classes and Set Methods for DLMtool package
# January 2016
# Tom Carruthers UBC (t.carruthers@fisheries.ubc.ca)
# Adrian Hordyk (a.hordyk@murdoch.edu.au)
# DLMtool Classes:
# DLM_data - An object for storing data for analysis using data-limited methods
# Stock - An operating model component that specifies the parameters of the
# population dynamics model
# Fleet - The component of the operating model that controls fishing dynamics
# Observation - An operating model component that controls the observation model
# OM - An object containing all the parameters needed to control the MSE which
# is built from components: Stock, Fleet and Observation objects.
# MSE - A Management Strategy Evaluation object that contains information about
# simulation conditions and performance of data-limited methods
# DLM_fease - An object for storing information about what data are available
# or might be available
# DLM_general - An object for storing general toolkit data. The data are stored
# in the right format in the slot data
# lmmodel - An object for storing fitted linear model objects in this case the
# relationship between M, age-at-maturity and the von B. K parameter.
# Create DLM_data class
setClass("DLM_data", representation(Name = "character", Year = "vector",
Cat = "matrix", Ind = "matrix", Rec = "matrix", t = "vector",
AvC = "vector", Dt = "vector", Mort = "vector", FMSY_M = "vector",
BMSY_B0 = "vector", Cref = "vector", Bref = "vector", Iref = "vector",
L50 = "vector", L95 = "vector", LFC = "vector", LFS = "vector",
CAA = "array", Dep = "vector", Abun = "vector", vbK = "vector",
vbLinf = "vector", vbt0 = "vector", wla = "vector", wlb = "vector",
steep = "vector", CV_Cat = "vector", CV_Dt = "vector", CV_AvC = "vector",
CV_Ind = "vector", CV_Mort = "vector", CV_FMSY_M = "vector",
CV_BMSY_B0 = "vector", CV_Cref = "vector", CV_Bref = "vector",
CV_Iref = "vector", CV_Rec = "vector", CV_Dep = "vector",
CV_Abun = "vector", CV_vbK = "vector", CV_vbLinf = "vector",
CV_vbt0 = "vector", CV_L50 = "vector", CV_LFC = "vector",
CV_LFS = "vector", CV_wla = "vector", CV_wlb = "vector",
CV_steep = "vector", sigmaL = "vector", MaxAge = "vector",
Units = "character", Ref = "numeric", Ref_type = "character",
Log = "list", params = "list", PosMPs = "vector", MPs = "vector",
OM = "data.frame", Obs = "data.frame", TAC = "array", TACbias = "array",
Sense = "array", CAL_bins = "numeric", CAL = "array", MPrec = "vector",
ML = "array", Lbar = "array", Lc = "array", LHYear = "numeric",
Misc = "list"))
# initialize DLM_data
setMethod("initialize", "DLM_data", function(.Object,stock="nada"){
#.Object
#})
#.Object<-new('DLM_data')
# run an error check here
if(file.exists(stock)){
dat <- read.csv(stock,header=F,colClasses="character") # read 1st sheet
dname<-dat[,1]
dat<-dat[,2:ncol(dat)]
.Object@Name<-dat[match("Name", dname),1]
.Object@Year <-as.numeric(dat[match("Year",dname),dat[match("Year",dname),]!=""])
.Object@Cat <-matrix(as.numeric(dat[match("Catch",dname),dat[match("Catch",dname),]!=""]),nrow=1)
.Object@Ind<-matrix(as.numeric(dat[match("Abundance index",dname),1:length(.Object@Year)]),nrow=1)
.Object@Rec<-matrix(as.numeric(dat[match("Recruitment",dname),1:length(.Object@Year)]),nrow=1)
.Object@t<-as.numeric(dat[match("Duration t",dname),1])
.Object@AvC<-as.numeric(dat[match("Average catch over time t",dname),1])
.Object@Dt<-as.numeric(dat[match("Depletion over time t",dname),1])
.Object@Mort<-as.numeric(dat[match("M",dname),1])
.Object@FMSY_M<-as.numeric(dat[match("FMSY/M",dname),1])
.Object@BMSY_B0<-as.numeric(dat[match("BMSY/B0",dname),1])
.Object@Cref<-as.numeric(dat[match("Cref",dname),1])
.Object@Bref<-as.numeric(dat[match("Bref",dname),1])
.Object@Iref<-as.numeric(dat[match("Iref",dname),1])
.Object@L50<-as.numeric(dat[match("Length at 50% maturity",dname),1])
.Object@L95<-as.numeric(dat[match("Length at 95% maturity",dname),1])
.Object@LFC<-as.numeric(dat[match("Length at first capture",dname),1])
.Object@LFS<-as.numeric(dat[match("Length at full selection",dname),1])
.Object@Dep<-as.numeric(dat[match("Current stock depletion",dname),1])
.Object@Abun<-as.numeric(dat[match("Current stock abundance",dname),1])
.Object@vbK<-as.numeric(dat[match("Von Bertalanffy K parameter", dname),1])
.Object@vbLinf<-as.numeric(dat[match("Von Bertalanffy Linf parameter", dname),1])
.Object@vbt0<-as.numeric(dat[match("Von Bertalanffy t0 parameter", dname),1])
.Object@wla<-as.numeric(dat[match("Length-weight parameter a", dname),1])
.Object@wlb<-as.numeric(dat[match("Length-weight parameter b", dname),1])
.Object@steep<-as.numeric(dat[match("Steepness", dname),1])
.Object@sigmaL<-as.numeric(dat[match("Sigma length composition", dname),1])
.Object@CV_Cat<-as.numeric(dat[match("CV Catch", dname),1])
.Object@CV_Dt<-as.numeric(dat[match("CV Depletion over time t", dname),1])
.Object@CV_AvC<-as.numeric(dat[match("CV Average catch over time t", dname),1])
.Object@CV_Ind<-as.numeric(dat[match("CV Abundance index", dname),1])
.Object@CV_Mort<-as.numeric(dat[match("CV M", dname),1])
.Object@CV_Rec<-as.numeric(dat[match("CV Rec", dname),1])
.Object@CV_FMSY_M<-as.numeric(dat[match("CV FMSY/M", dname),1])
.Object@CV_BMSY_B0<-as.numeric(dat[match("CV BMSY/B0", dname),1])
.Object@CV_Cref<-as.numeric(dat[match("CV Cref", dname),1])
.Object@CV_Bref<-as.numeric(dat[match("CV Bref", dname),1])
.Object@CV_Iref<-as.numeric(dat[match("CV Iref", dname),1])
.Object@CV_Dep<-as.numeric(dat[match("CV current stock depletion", dname),1])
.Object@CV_Abun<-as.numeric(dat[match("CV current stock abundance", dname),1])
.Object@CV_vbK<-as.numeric(dat[match("CV von B. K parameter", dname),1])
.Object@CV_vbLinf<-as.numeric(dat[match("CV von B. Linf parameter", dname),1])
.Object@CV_vbt0<-as.numeric(dat[match("CV von B. t0 parameter", dname),1])
.Object@CV_L50<-as.numeric(dat[match("CV Length at 50% maturity", dname),1])
.Object@CV_LFC<-as.numeric(dat[match("CV Length at first capture", dname),1])
.Object@CV_LFS<-as.numeric(dat[match("CV Length at full selection", dname),1])
.Object@CV_wla<-as.numeric(dat[match("CV Length-weight parameter a", dname),1])
.Object@CV_wlb<-as.numeric(dat[match("CV Length-weight parameter b", dname),1])
.Object@CV_steep<-as.numeric(dat[match("CV Steepness", dname),1])
.Object@MaxAge<-as.numeric(dat[match("Maximum age", dname),1])
.Object@MPrec<-as.numeric(dat[match("MPrec", dname),1])
if(length(grep("CAL",dname))>1){
CAL_bins<-as.numeric(dat[match("CAL_bins",dname),dat[match("CAL_bins",dname),]!=""])
nCAL<-length(CAL_bins)-1
.Object@CAL_bins<-CAL_bins
CALdat<-grep("CAL ",dname)
.Object@CAL<-array(as.numeric(as.matrix(dat[CALdat,1:nCAL])),dim=c(1,length(CALdat),nCAL))
}
CAAy<-grep("CAA",dname)[1:length(grep("CAA",dname))]
CAAa<-sum(dat[CAAy[1],]!="")
if(!is.na(CAAa)){
.Object@CAA<-array(as.numeric(as.matrix(dat[CAAy,1:CAAa])),dim=c(1,length(CAAy),CAAa))
}
.Object@ML<-matrix(as.numeric(dat[match("Mean length",dname),1:length(.Object@Year)]),nrow=1)
.Object@Lbar<-matrix(as.numeric(dat[match("Mean length Lc",dname),1:length(.Object@Year)]),nrow=1)
.Object@Lc<-matrix(as.numeric(dat[match("Modal length",dname),1:length(.Object@Year)]),nrow=1)
.Object@LHYear<-as.numeric(dat[match("LHYear",dname),1])
.Object@Units<-dat[match("Units", dname),1]
.Object@Ref<-as.numeric(dat[match("Reference TAC",dname),1])
.Object@Ref_type<-dat[match("Reference TAC type",dname),1]
.Object@Log[[1]]<-paste("Created:", Sys.time())
.Object@params<-new('list')
.Object@OM<-data.frame(NA)
.Object@Obs<-data.frame(NA)
.Object@TAC<-array(NA,dim=c(1,1,1))
.Object@TACbias<-array(NA,dim=c(1,1,1))
.Object@Sense<-array(NA,dim=c(1,1,1))
.Object@PosMPs<-NA
.Object@MPs<-NA
}else{
if(stock!="MSE"){
if(!is.na(stock))print("Couldn't find specified csv file, blank DLM object created")
}
}
# Default values -------------------------------------------------------------
if(NAor0(.Object@CV_Cat)).Object@CV_Cat<-0.2
if(NAor0(.Object@CV_Dt)).Object@CV_Dt<-0.25
if(NAor0(.Object@CV_AvC)).Object@CV_AvC<-0.2
if(NAor0(.Object@CV_Ind)).Object@CV_Ind<-0.2
if(NAor0(.Object@CV_Mort)).Object@CV_Mort<-0.2
if(NAor0(.Object@CV_FMSY_M)).Object@CV_FMSY_M<-0.2
if(NAor0(.Object@CV_BMSY_B0)).Object@CV_BMSY_B0<-0.045
if(NAor0(.Object@CV_Cref)).Object@CV_Cref<-0.2
if(NAor0(.Object@CV_Bref)).Object@CV_Bref<-0.2
if(NAor0(.Object@CV_Iref)).Object@CV_Iref<-0.2
if(NAor0(.Object@CV_Rec)).Object@CV_Rec<-0.2
if(NAor0(.Object@CV_Dep)).Object@CV_Dep<-0.25
if(NAor0(.Object@CV_Abun)).Object@CV_Abun<-0.25
if(NAor0(.Object@CV_vbK)).Object@CV_vbK<-0.1
if(NAor0(.Object@CV_vbLinf)).Object@CV_vbLinf<-0.1
if(NAor0(.Object@CV_vbt0)).Object@CV_vbt0<-0.1
if(NAor0(.Object@CV_L50)).Object@CV_L50<-0.1
if(NAor0(.Object@CV_LFC)).Object@CV_LFC<-0.2
if(NAor0(.Object@CV_LFS)).Object@CV_LFS<-0.2
if(NAor0(.Object@CV_wla)).Object@CV_wla<-0.1
if(NAor0(.Object@CV_wlb)).Object@CV_wlb<-0.1
if(NAor0(.Object@CV_steep)).Object@CV_steep<-0.2
if(length(.Object@sigmaL)==0).Object@sigmaL<-0.2
if(length(.Object@CAA)==0).Object@CAA<-array(NA,c(1,1,1))
if(length(.Object@CAL)==0).Object@CAL<-array(NA,c(1,1,1))
if(length(.Object@CAL_bins)==0).Object@CAL_bins<-1
if(length(.Object@TAC)==0).Object@TAC<-array(1,c(1,1))
if(length(.Object@TACbias)==0).Object@TACbias<-array(1,c(1,1))
if(length(.Object@Sense)==0).Object@Sense<-array(1,c(1,1))
if(length(.Object@ML)==0).Object@ML<-array(NA,c(1,1))
if(length(.Object@Lbar)==0).Object@Lbar<-array(NA,c(1,1))
if(length(.Object@Lc)==0).Object@Lc<-array(NA,c(1,1))
.Object
})
# Create Stock class
setClass("Stock",representation(Name="character",maxage="numeric",R0="numeric",
M="numeric", Msd="numeric",Mgrad="numeric",h="numeric",SRrel="numeric",
Linf="numeric",K="numeric",t0="numeric", Ksd="numeric",Kgrad="numeric",
Linfsd="numeric",Linfgrad="numeric",recgrad="numeric", a="numeric",
b="numeric",D="numeric",Perr="numeric", Period="numeric", Amplitude="numeric",
Size_area_1="numeric",
Frac_area_1="numeric", Prob_staying="numeric",AC="numeric",
L50="numeric", L50_95="numeric",Source="character"))
# initialize Stock
setMethod("initialize", "Stock", function(.Object,file=NA){
if (!is.na(file)) {
if (file.exists(file)) {
dat <- read.csv(file,header=F,colClasses="character") # read 1st sheet
dname<-dat[,1]
dat<-dat[,2:ncol(dat)]
.Object@Name<-dat[match("Name", dname),1]
.Object@maxage<-as.numeric(dat[match("maxage",dname),1])
.Object@R0<-as.numeric(dat[match("R0",dname),1])
.Object@M<-as.numeric(dat[match("M",dname),1:2])
.Object@Msd<-as.numeric(dat[match("Msd",dname),1:2])
.Object@Mgrad<-as.numeric(dat[match("Mgrad",dname),1:2])
.Object@h<-as.numeric(dat[match("h",dname),1:2])
.Object@SRrel<-as.numeric(dat[match("SRrel",dname),1])
.Object@Linf<-as.numeric(dat[match("Linf",dname),1:2])
.Object@K<-as.numeric(dat[match("K",dname),1:2])
.Object@t0<-as.numeric(dat[match("t0",dname),1:2])
.Object@Ksd<-as.numeric(dat[match("Ksd",dname),1:2])
.Object@Kgrad<-as.numeric(dat[match("Kgrad",dname),1:2])
.Object@Linfsd<-as.numeric(dat[match("Linfsd",dname),1:2])
.Object@Linfgrad<-as.numeric(dat[match("Linfgrad",dname),1:2])
.Object@recgrad<-as.numeric(dat[match("recgrad",dname),1:2])
.Object@a<-as.numeric(dat[match("a",dname),1])
.Object@b<-as.numeric(dat[match("b",dname),1])
.Object@D<-as.numeric(dat[match("D",dname),1:2])
.Object@Perr<-as.numeric(dat[match("Perr",dname),1:2])
.Object@Period<-as.numeric(dat[match("Period",dname),1:2])
.Object@Amplitude<-as.numeric(dat[match("Amplitude",dname),1:2])
.Object@AC<-as.numeric(dat[match("AC",dname),1:2])
.Object@Size_area_1<-as.numeric(dat[match("Size_area_1",dname),1:2])
.Object@Frac_area_1<-as.numeric(dat[match("Frac_area_1",dname),1:2])
.Object@Prob_staying<-as.numeric(dat[match("Prob_staying",dname),1:2])
.Object@L50<-as.numeric(dat[match("L50", dname),1:2])
.Object@L50_95<-as.numeric(dat[match("L50_95", dname),1:2])
.Object@Source<-dat[match("Source", dname),1]
} else {
message("File doesn't exist")
}
}
.Object
})
# Create Fleet class
setClass("Fleet",slots=c(Name="character",nyears="numeric", Spat_targ="numeric",
Fsd="numeric", qinc="numeric",qcv="numeric",
EffYears="numeric", EffLower="numeric", EffUpper="numeric",
SelYears="numeric", AbsSelYears="numeric", L5="numeric", LFS="numeric",
Vmaxlen="numeric", L5Lower="numeric", L5Upper="numeric", LFSLower="numeric",
LFSUpper="numeric", VmaxLower="numeric", VmaxUpper="numeric"))
# initialize Fleet
setMethod("initialize", "Fleet", function(.Object,file=NA){
if (!is.na(file)) {
if (file.exists(file)) {
dat <- read.csv(file,header=F,colClasses="character") # read 1st sheet
dname<-dat[,1]
dat<-dat[,2:ncol(dat)]
.Object@Name<-dat[match("Name", dname),1]
.Object@nyears <-as.numeric(dat[match("nyears",dname),1])
.Object@Spat_targ<-as.numeric(dat[match("Spat_targ",dname),1:2])
.Object@Fsd<-as.numeric(dat[match("Fsd",dname),1:2])
# .Object@Fgrad<-as.numeric(dat[match("Fgrad",dname),1:2])
nEffYears <- ncol(dat[match("EffYears",dname),])
oldw <- getOption("warn")
options(warn=-1)
chk <- as.numeric(dat[match("EffYears",dname),1:nEffYears])
options(warn = oldw)
ind <- which(!is.na(chk))
nEffYears <- length(ind)
.Object@EffYears <-as.numeric(dat[match("EffYears",dname),1:nEffYears])
.Object@EffLower <-as.numeric(dat[match("EffLower",dname),1:nEffYears])
.Object@EffUpper <-as.numeric(dat[match("EffUpper",dname),1:nEffYears])
.Object@qinc<-as.numeric(dat[match("qinc",dname),1:2])
.Object@qcv<-as.numeric(dat[match("qcv",dname),1:2])
chkName <- match("SelYears",dname) # Check if vector of selectivity years exists
if (is.finite(chkName)) {
nSelYears <- ncol(dat[match("SelYears",dname),])
oldw <- getOption("warn")
options(warn=-1)
chk <- as.numeric(dat[match("SelYears",dname),1:nSelYears])
options(warn = oldw)
ind <- which(is.finite(chk))
nSelYears <- length(ind)
chk <- length(ind)
if (is.finite(chk) & chk > 0) { # parameters for selectivity years exists
.Object@SelYears <- as.numeric(dat[match("SelYears",dname),1:nSelYears])
.Object@L5Lower <- as.numeric(dat[match("L5Lower",dname),1:nSelYears])
.Object@L5Upper <- as.numeric(dat[match("L5Upper",dname),1:nSelYears])
.Object@LFSLower <- as.numeric(dat[match("LFSLower",dname),1:nSelYears])
.Object@LFSUpper <- as.numeric(dat[match("LFSUpper",dname),1:nSelYears])
.Object@VmaxLower <- as.numeric(dat[match("VmaxLower",dname),1:nSelYears])
.Object@VmaxUpper <- as.numeric(dat[match("VmaxUpper",dname),1:nSelYears])
}
}
# These are ignored in MSE if L5Lower etc are set
.Object@L5 <- as.numeric(dat[match("L5",dname),1:2])
.Object@LFS <- as.numeric(dat[match("LFS",dname),1:2])
.Object@Vmaxlen <-as.numeric(dat[match("Vmaxlen",dname),1:2])
} else {
message("File doesn't exist")
}
}
.Object
})
# Create Observation class
setClass("Observation",representation(Name="character",LenMcv="numeric",
Cobs="numeric",Cbiascv="numeric",CAA_nsamp="numeric",CAA_ESS="numeric",
CAL_nsamp="numeric",CAL_ESS="numeric",CALcv="numeric",
Iobs="numeric",Mcv="numeric",Kcv="numeric",t0cv="numeric",Linfcv="numeric",
LFCcv="numeric",LFScv="numeric",B0cv="numeric",
FMSYcv="numeric",FMSY_Mcv="numeric",BMSY_B0cv="numeric",
rcv="numeric", Dbiascv="numeric",Dcv="numeric",
Btbias="numeric",Btcv="numeric",Fcurbiascv="numeric",Fcurcv="numeric",
hcv="numeric",Icv="numeric",maxagecv="numeric",Reccv="numeric",
Irefcv="numeric",Crefcv="numeric",Brefcv="numeric",beta="numeric"))
# initialize Observation
setMethod("initialize", "Observation", function(.Object,file=NA){
if (!is.na(file)) {
if (file.exists(file)) {
dat <- read.csv(file,header=F,colClasses="character") # read 1st sheet
dname<-dat[,1]
dat<-dat[,2:ncol(dat)]
.Object@Name<-dat[match("Name", dname),1]
.Object@LenMcv<-as.numeric(dat[match("LenMcv",dname),1])
.Object@Cobs<-as.numeric(dat[match("Cobs",dname),1:2])
.Object@Cbiascv<-as.numeric(dat[match("Cbiascv",dname),1])
.Object@CAA_nsamp<-as.numeric(dat[match("CAA_nsamp",dname),1:2])
.Object@CAA_ESS<-as.numeric(dat[match("CAA_ESS",dname),1:2])
.Object@CAL_nsamp<-as.numeric(dat[match("CAA_nsamp",dname),1:2])
.Object@CAL_ESS<-as.numeric(dat[match("CAA_ESS",dname),1:2])
.Object@CALcv<-as.numeric(dat[match("CALcv",dname),1:2])
.Object@Iobs<-as.numeric(dat[match("Iobs",dname),1:2])
.Object@Mcv<-as.numeric(dat[match("Mcv",dname),1])
.Object@Kcv<-as.numeric(dat[match("Kcv",dname),1])
.Object@t0cv<-as.numeric(dat[match("t0cv",dname),1])
.Object@Linfcv<-as.numeric(dat[match("Linfcv",dname),1])
.Object@LFCcv<-as.numeric(dat[match("LFCcv",dname),1])
.Object@LFScv<-as.numeric(dat[match("LFScv",dname),1])
.Object@B0cv<-as.numeric(dat[match("B0cv",dname),1])
.Object@FMSYcv<-as.numeric(dat[match("FMSYcv",dname),1])
.Object@FMSY_Mcv<-as.numeric(dat[match("FMSY_Mcv",dname),1])
.Object@BMSY_B0cv<-as.numeric(dat[match("BMSY_B0cv",dname),1])
.Object@rcv<-as.numeric(dat[match("rcv",dname),1])
.Object@Dbiascv<-as.numeric(dat[match("Dbiascv",dname),1])
.Object@Dcv<-as.numeric(dat[match("Dcv",dname),1:2])
.Object@Btbias<-as.numeric(dat[match("Btbias",dname),1:2])
.Object@Btcv<-as.numeric(dat[match("Btcv",dname),1:2])
.Object@Fcurbiascv<-as.numeric(dat[match("Fcurbiascv",dname),1])
.Object@Fcurcv<-as.numeric(dat[match("Fcurcv",dname),1:2])
.Object@hcv<-as.numeric(dat[match("hcv",dname),1])
.Object@Icv<-as.numeric(dat[match("Icv",dname),1])
.Object@maxagecv<-as.numeric(dat[match("maxagecv",dname),1])
.Object@Reccv<-as.numeric(dat[match("Reccv",dname),1:2])
.Object@Irefcv<-as.numeric(dat[match("Irefcv",dname),1])
.Object@Crefcv<-as.numeric(dat[match("Crefcv",dname),1])
.Object@Brefcv<-as.numeric(dat[match("Brefcv",dname),1])
.Object@beta<-as.numeric(dat[match("beta",dname),1:2])
} else {
message("File doesn't exist")
}
}
.Object
})
# Create OM class
setClass("OM",representation(Name="character",nyears="numeric",maxage="numeric",
R0="numeric",M="numeric", Msd="numeric",Mgrad="numeric",h="numeric",
SRrel="numeric",Linf="numeric",K="numeric",t0="numeric", Ksd="numeric",
Kgrad="numeric",Linfsd="numeric",Linfgrad="numeric",recgrad="numeric",
a="numeric",b="numeric",D="numeric", Size_area_1="numeric",
Frac_area_1="numeric",Prob_staying="numeric", Source="character",
L50="numeric", L50_95="numeric", SelYears="numeric", AbsSelYears="numeric",
L5="numeric", LFS="numeric", Vmaxlen="numeric",
L5Lower="numeric", L5Upper="numeric", LFSLower="numeric",
LFSUpper="numeric", VmaxLower="numeric", VmaxUpper="numeric",
beta="numeric",
Spat_targ="numeric", Fsd="numeric", Period="numeric", Amplitude="numeric",
EffYears="numeric", EffLower="numeric", EffUpper="numeric",
# Fgrad="numeric",
qinc="numeric",qcv="numeric",AC="numeric", Cobs="numeric",Cbiascv="numeric",
CAA_nsamp="numeric",CAA_ESS="numeric", CAL_nsamp="numeric",CAL_ESS="numeric",
CALcv="numeric", Iobs="numeric",Perr="numeric", Mcv="numeric",Kcv="numeric",
t0cv="numeric",Linfcv="numeric", LFCcv="numeric", LFScv="numeric",
B0cv="numeric",FMSYcv="numeric",FMSY_Mcv="numeric",BMSY_B0cv="numeric",
LenMcv="numeric",rcv="numeric", Dbiascv="numeric",Dcv="numeric",
Btbias="numeric",Btcv="numeric", Fcurbiascv="numeric",Fcurcv="numeric",
hcv="numeric", Icv="numeric",maxagecv="numeric", Reccv="numeric",
Irefcv="numeric",Crefcv="numeric",Brefcv="numeric"))
# initialize OM
setMethod("initialize", "OM", function(.Object,Stock,Fleet,Observation){
if(class(Stock)!='Stock')print(paste('Could not build operating model:',deparse(substitute(Stock)),'not of class Stock'))
if(class(Fleet)!='Fleet')print(paste('Could not build operating model:',deparse(substitute(Fleet)),'not of class Fleet'))
if(class(Observation)!='Observation')print(paste('Could not build operating model:',deparse(substitute(Observation)),'not of class Observation'))
if(class(Stock)!='Stock'|class(Fleet)!='Fleet'|class(Observation)!='Observation')stop()
.Object@Name<-paste("Stock:",Stock@Name," Fleet:",Fleet@Name," Observation model:",Observation@Name,sep="")
# Now copy the values for stock, fleet and observation slots to same slots in the Sim object
Sslots<-slotNames(Stock)
for(i in 2:length(Sslots)) {
tt <- .hasSlot(Stock,Sslots[i]) # For back-compatibility
if (tt) slot(.Object,Sslots[i])<-slot(Stock,Sslots[i])
}
Fslots<-slotNames(Fleet)
for(i in 2:length(Fslots)) {
tt <- .hasSlot(Fleet,Fslots[i])
if (tt) slot(.Object,Fslots[i])<-slot(Fleet,Fslots[i])
}
Oslots<-slotNames(Observation)
for(i in 2:length(Oslots)) {
tt <- .hasSlot(Observation,Oslots[i])
if (tt) slot(.Object,Oslots[i])<-slot(Observation,Oslots[i])
}
.Object
})
# Create MSE class
setClass("MSE",representation(Name="character",nyears="numeric",
proyears="numeric",nMPs="numeric",MPs="character", nsim="numeric",
OM="data.frame",Obs="data.frame",B_BMSY="array", F_FMSY="array",
B="array",FM="array",C="array",TAC="array",SSB_hist="array",
CB_hist="array",FM_hist="array"))
setMethod("initialize", "MSE", function(.Object,Name,nyears,proyears,nMPs,MPs,
nsim,OMtable,Obs,B_BMSYa,F_FMSYa,Ba,FMa,Ca,TACa,SSB_hist,CB_hist,FM_hist){
.Object@Name<-Name
.Object@nyears <-nyears
.Object@proyears<-proyears
.Object@nMPs<-nMPs
.Object@MPs<-MPs
.Object@nsim<-nsim
.Object@OM<-OMtable
.Object@Obs<-Obs
.Object@B_BMSY<-B_BMSYa
.Object@F_FMSY<-F_FMSYa
.Object@B<-Ba
.Object@FM<-FMa
.Object@C<-Ca
.Object@TAC<-TACa
.Object@SSB_hist<-SSB_hist
.Object@CB_hist<-CB_hist
.Object@FM_hist<-FM_hist
.Object
})
# Create DLM_fease class
setClass("DLM_fease",representation(Name="character",Case="character",Catch="numeric",
Index="numeric",Natural_mortality_rate="numeric",Maturity_at_length="numeric",
Growth="numeric",Length_weight_conversion="numeric",Fleet_selectivity="numeric",
Catch_at_length="numeric",Catch_at_age="numeric",Recruitment_index="numeric",
Stock_recruitment_relationship="numeric",Target_catch="numeric",Target_biomass="numeric",
Target_index="numeric",Abundance="numeric"))
# initialize DLM_fease
setMethod("initialize", "DLM_fease", function(.Object,file="nada",ncases=1){
# run an error check here
if(file.exists(file)){
dat <- read.csv(file,header=F,colClasses="character") # read 1st sheet
nr<-nrow(dat)
ncases=ncol(dat)-1
dname<-dat[,1]
if(ncases==1)dat<-array(dat[,2:ncol(dat)],dim=c(nr,ncases))
if(ncases>1)dat<-dat[,2:ncol(dat)]
.Object@Name<- dat[match("Name", dname),1]
.Object@Case<-as.character(dat[match("Case",dname),1:ncases])
.Object@Catch<-as.numeric(dat[match("Catch",dname),1:ncases])
.Object@Index<-as.numeric(dat[match("Index",dname),1:ncases])
.Object@Natural_mortality_rate<-as.numeric(dat[match("Natural_mortality_rate",dname),1:ncases])
.Object@Maturity_at_length<-as.numeric(dat[match("Maturity_at_length",dname),1:ncases])
.Object@Growth<-as.numeric(dat[match("Growth",dname),1:ncases])
.Object@Length_weight_conversion<-as.numeric(dat[match("Length_weight_conversion",dname),1:ncases])
.Object@Fleet_selectivity<-as.numeric(dat[match("Fleet_selectivity",dname),1:ncases])
.Object@Catch_at_length<-as.numeric(dat[match("Catch_at_length",dname),1:ncases])
.Object@Catch_at_age<-as.numeric(dat[match("Catch_at_age",dname),1:ncases])
.Object@Recruitment_index<-as.numeric(dat[match("Recruitment_index",dname),1:ncases])
.Object@Stock_recruitment_relationship<-as.numeric(dat[match("Stock_recruitment_relationship",dname),1:ncases])
.Object@Target_catch<-as.numeric(dat[match("Target_catch",dname),1:ncases])
.Object@Target_biomass<-as.numeric(dat[match("Target_biomass",dname),1:ncases])
.Object@Target_index<-as.numeric(dat[match("Target_index",dname),1:ncases])
.Object@Abundance<-as.numeric(dat[match("Abundance",dname),1:ncases])
}else{
.Object@Name<-"Blank DLM_Fease"
.Object@Case<-"Case 1"
.Object@Catch<-0
.Object@Index<-0
.Object@Natural_mortality_rate<-0
.Object@Maturity_at_length<-0
.Object@Growth<-0
.Object@Length_weight_conversion<-0
.Object@Fleet_selectivity<-0
.Object@Catch_at_length<-0
.Object@Catch_at_age<-0
.Object@Recruitment_index<-0
.Object@Stock_recruitment_relationship<-0
.Object@Target_catch<-0
.Object@Target_biomass<-0
.Object@Target_index<-0
.Object@Abundance<-0
}
.Object
})
# Create DLM_general object
setClass("DLM_general",representation(Name="character",data="list"))
# initialize DLM_general
setMethod("initialize", "DLM_general", function(.Object){
.Object
})
# Create lmmodel class
setClass("lmmodel",representation(Name="character",models="list"))
# initialize lmmodel
setMethod("initialize", "lmmodel", function(.Object,Name,models){
.Object@Name<-Name
.Object@models<-models
.Object
})
# Define generic plot method for DLM data objects
setMethod("plot",
signature(x = "DLM_data"),
function(x,funcs=NA,maxlines=6,perc=0.5,xlims=NA){
DLM_data<-x
cols<-rep(c('black','red','green','blue','orange','brown','purple','dark grey','violet','dark red','pink','dark blue','grey'),4)
ltys<-rep(1:4,each=13)
if(is.na(funcs[1]))funcs<-DLM_data@MPs
nMPs<-length(funcs)
nplots<-ceiling(nMPs/maxlines)
maxl<-ceiling(nMPs/nplots)
mbyp <- split(1:nMPs, ceiling(1:nMPs/maxl)) # assign methods to plots
if(is.na(xlims[1])|length(xlims)!=2){
xlims<-quantile(DLM_data@TAC,c(0.005,0.95),na.rm=T)
if(xlims[1]<0)xlims[1]<-0
}
if(!NAor0(DLM_data@Ref)){
if(xlims[1]>DLM_data@Ref)xlims[1]<-max(0,0.98*DLM_data@Ref)
if(xlims[2]<DLM_data@Ref)xlims[2]<-1.02*DLM_data@Ref
}
ylims<-c(0,1)
#for(m in 1:nMPs){
# if(sum(!is.na(DLM_data@TAC[m,,1]))>2){
# dens<-density(DLM_data@TAC[m,,1],na.rm=T)
#print(quantile(dens$y,0.99,na.rm=T))
# if(quantile(dens$y,0.9,na.rm=T)>ylims[2])ylims[2]<-quantile(dens$y,0.90,na.rm=T)
#}
#}
#dev.new2(width=10,height=0.5+7*nplots)
par(mfrow=c(ceiling(nplots/2),2),mai=c(0.4,0.4,0.01,0.01),omi=c(0.35,0.35,0.35,0.05))
for(p in 1:nplots){
m<-mbyp[[p]][1]
plot(NA,NA,xlim=xlims,ylim=ylims,main="",xlab="",ylab="",col="white",lwd=3,type="l")
abline(h=0)
if(!NAor0(DLM_data@Ref)){
abline(v=DLM_data@Ref,col="light grey",lwd=2)
if(!NAor0(DLM_data@Ref_type[1]))legend('right',DLM_data@Ref_type,text.col="grey",bty='n')
}
#plot(density(DLM@TAC[m,,1],from=0,na.rm=T),xlim=xlims,ylim=ylims,main="",xlab="",ylab="",col=coly[m],lty=ltyy[m],type="l")
if(!is.na(perc[1]))abline(v=quantile(DLM_data@TAC[m,,1],p=perc,na.rm=T),col=cols[m],lty=ltys[m])
#if(length(mbyp[[p]])>0){
for(ll in 1:length(mbyp[[p]])){
m<-mbyp[[p]][ll]
if(sum(!is.na(DLM_data@TAC[m,,1]))>10){ # only plot if there are sufficient non-NA TAC samples
x<-density(DLM_data@TAC[m,,1],from=0,na.rm=T)$x
y<-density(DLM_data@TAC[m,,1],from=0,na.rm=T)$y
y<-y/max(y)
lines(x,y,col=cols[ll])
}else{
print(paste("Method ",funcs[m]," produced too many NA TAC values for plotting densities",sep=""))
}
if(!is.na(perc[1]))abline(v=quantile(DLM_data@TAC[m,,1],p=perc,na.rm=T),col=cols[ll],lty=2)
}
#}
cind<-1:length(mbyp[[p]])
legend('topright',funcs[mbyp[[p]]],text.col=cols[cind],col=cols[cind],lty=1,bty='n',cex=0.75)
}
mtext(paste("TAC (",DLM_data@Units,")",sep=""),1,outer=T,line=0.5)
mtext(paste("Standardized relative frequency",sep=""),2,outer=T,line=0.5)
mtext(paste("TAC calculation for ",DLM_data@Name,sep=""),3,outer=T,line=0.5)
})
# Define generic summary method for DLM data objects
setMethod("summary",
signature(object = "DLM_data"),
function(object){
scols<-c('red','green','blue','orange','brown','purple','dark grey','violet','dark red','pink','dark blue','grey')
#dev.new2(width=8,height=4.5)
par(mai=c(0.35,0.9,0.2,0.01),c(0.3,0,0,0))
layout(matrix(c(1,2,1,2,1,2,3,3,3,3),nrow=2))
plot(object@Year,object@Cat[1,],col="blue",type="l",xlab="Year",ylab=paste("Catch (",object@Units,")",sep=""),ylim=c(0,max(object@Cat[1,],na.rm=T)))
plot(object@Year,object@Ind[1,],col="orange",type="l",xlab="Year",ylab="Relative abundance",ylim=c(0,max(object@Ind[1,],na.rm=T)))
slots<-c("Dep","Mort","FMSY_M","Dt","BMSY_B0","vbK")
namey<-c("Stock depletion", "Natural Mortality rate","Ratio of FMSY to M","Depletion over time t","BMSY relative to unfished","Von B. k parameter")
slotsCV<-c("CV_Dep","CV_Mort","CV_FMSY_M","CV_Dt","CV_BMSY_B0","CV_vbK")
ind<-rep(TRUE,length(slotsCV))
for(i in 1:length(slotsCV))if(NAor0(attr(object,slots[i]))|NAor0(attr(object,slotsCV[i])))ind[i]<-FALSE
slots<-slots[ind]
slotsCV<-slotsCV[ind]
nrep<-150
xstore<-array(NA,c(length(slots),nrep))
ystore<-array(NA,c(length(slots),nrep))
for(i in 1:length(slots)){
mu<-attr(object,slots[i])
cv<-attr(object,slotsCV[i])
xstore[i,]<-qlnorm(seq(0,1,length.out=nrep),mconv(mu,cv),sdconv(mu,cv))
ystore[i,]<-dlnorm(xstore[i,],mconv(mu,cv),sdconv(mu,cv))
}
plot(xstore[1,],ystore[1,],type="l",xlim=c(0,1.2),ylim=c(0,quantile(ystore,0.97)),xlab="",ylab="Relative frequency",col=scols[1])
if(length(slots)>1){
for(i in 2:length(slots)) lines(xstore[i,],ystore[i,],col=scols[i])
}
legend('topright',legend=namey[ind],text.col=scols[1:length(slots)],bty='n')
mtext(paste("Data summary for",deparse(substitute(DLM_data)),sep=" "),3,font=2,line=0.25,outer=T)
})
# Define generic summary method for MSE objects
setMethod("summary",
signature(object = "MSE"),
function(object){
MSEobj<-object
nm<-MSEobj@nMPs
nsim<-MSEobj@nsim
proyears<-MSEobj@proyears
Yd<-P10<-P50<-P100<-POF<-LTY<-STY<-VY<-array(NA,c(nm,nsim))
yind<-max(MSEobj@proyears-4,1):MSEobj@proyears
RefYd<-MSEobj@OM$RefY
yend<-max(MSEobj@proyears-9,1):MSEobj@proyears
ystart<-1:10
y1<-1:(MSEobj@proyears-1)
y2<-2:MSEobj@proyears
for(m in 1:nm){
Yd[m,]<-round(apply(MSEobj@C[,m,yind],1,mean,na.rm=T)/RefYd*100,1)
POF[m,]<-round(apply(MSEobj@F_FMSY[,m,]>1,1,sum,na.rm=T)/proyears*100,1)
P10[m,]<-round(apply(MSEobj@B_BMSY[,m,]<0.1,1,sum,na.rm=T)/proyears*100,1)
P50[m,]<-round(apply(MSEobj@B_BMSY[,m,]<0.5,1,sum,na.rm=T)/proyears*100,1)
P100[m,]<-round(apply(MSEobj@B_BMSY[,m,]<1,1,sum,na.rm=T)/proyears*100,1)
LTY[m]<-round(sum(MSEobj@C[,m,yend]/RefYd>0.5)/(MSEobj@nsim*length(yend))*100,1)
STY[m]<-round(sum(MSEobj@C[,m,ystart]/RefYd>0.5)/(MSEobj@nsim*length(ystart))*100,1)
AAVY<-apply(((MSEobj@C[,m,y1]-MSEobj@C[,m,y2])^2)^0.5,1,mean)/apply(MSEobj@C[,m,y2],1,mean)
VY[m]<-round(sum(AAVY<0.1)/MSEobj@nsim*100,1)
}
nr<-2
out<-cbind(MSEobj@MPs,round(apply(Yd,1,mean,na.rm=T),nr),round(apply(Yd,1,sd,na.rm=T),nr),
round(apply(POF,1,mean,na.rm=T),nr),round(apply(POF,1,sd,na.rm=T),nr),
round(apply(P10,1,mean,na.rm=T),nr),round(apply(P10,1,sd,na.rm=T),nr),
round(apply(P50,1,mean,na.rm=T),nr),round(apply(P50,1,sd,na.rm=T),nr),
round(apply(P100,1,mean,na.rm=T),nr),round(apply(P100,1,sd,na.rm=T),nr),
round(apply(LTY,1,mean,na.rm=T),nr),
round(apply(STY,1,mean,na.rm=T),nr),
round(apply(VY,1,mean,na.rm=T),nr))
out<-as.data.frame(out)
names(out)<-c("MP","Yield","stdev","POF","stdev ","P10","stdev",
"P50","stdev","P100","stdev","LTY","STY","VY")
out[,1]<-as.character(out[,1])
for(i in 2:ncol(out))out[,i]<-as.numeric(as.character(out[,i]))
out
})
# Plotting code for MSE object
setMethod("plot",
signature(x = "MSE"),
function(x){
MSEobj<-x
Pplot(MSEobj)
Kplot(MSEobj)
Tplot(MSEobj)
})
|
/DLMtool/R/ClassMeths.r
|
no_license
|
ingted/R-Examples
|
R
| false | false | 33,787 |
r
|
# Define Classes and Set Methods for DLMtool package
# January 2016
# Tom Carruthers UBC (t.carruthers@fisheries.ubc.ca)
# Adrian Hordyk (a.hordyk@murdoch.edu.au)
# DLMtool Classes:
# DLM_data - An object for storing data for analysis using data-limited methods
# Stock - An operating model component that specifies the parameters of the
# population dynamics model
# Fleet - The component of the operating model that controls fishing dynamics
# Observation - An operating model component that controls the observation model
# OM - An object containing all the parameters needed to control the MSE which
# is built from components: Stock, Fleet and Observation objects.
# MSE - A Management Strategy Evaluation object that contains information about
# simulation conditions and performance of data-limited methods
# DLM_fease - An object for storing information about what data are available
# or might be available
# DLM_general - An object for storing general toolkit data. The data are stored
# in the right format in the slot data
# lmmodel - An object for storing fitted linear model objects in this case the
# relationship between M, age-at-maturity and the von B. K parameter.
# Create DLM_data class
setClass("DLM_data", representation(Name = "character", Year = "vector",
Cat = "matrix", Ind = "matrix", Rec = "matrix", t = "vector",
AvC = "vector", Dt = "vector", Mort = "vector", FMSY_M = "vector",
BMSY_B0 = "vector", Cref = "vector", Bref = "vector", Iref = "vector",
L50 = "vector", L95 = "vector", LFC = "vector", LFS = "vector",
CAA = "array", Dep = "vector", Abun = "vector", vbK = "vector",
vbLinf = "vector", vbt0 = "vector", wla = "vector", wlb = "vector",
steep = "vector", CV_Cat = "vector", CV_Dt = "vector", CV_AvC = "vector",
CV_Ind = "vector", CV_Mort = "vector", CV_FMSY_M = "vector",
CV_BMSY_B0 = "vector", CV_Cref = "vector", CV_Bref = "vector",
CV_Iref = "vector", CV_Rec = "vector", CV_Dep = "vector",
CV_Abun = "vector", CV_vbK = "vector", CV_vbLinf = "vector",
CV_vbt0 = "vector", CV_L50 = "vector", CV_LFC = "vector",
CV_LFS = "vector", CV_wla = "vector", CV_wlb = "vector",
CV_steep = "vector", sigmaL = "vector", MaxAge = "vector",
Units = "character", Ref = "numeric", Ref_type = "character",
Log = "list", params = "list", PosMPs = "vector", MPs = "vector",
OM = "data.frame", Obs = "data.frame", TAC = "array", TACbias = "array",
Sense = "array", CAL_bins = "numeric", CAL = "array", MPrec = "vector",
ML = "array", Lbar = "array", Lc = "array", LHYear = "numeric",
Misc = "list"))
# initialize DLM_data
setMethod("initialize", "DLM_data", function(.Object,stock="nada"){
#.Object
#})
#.Object<-new('DLM_data')
# run an error check here
if(file.exists(stock)){
dat <- read.csv(stock,header=F,colClasses="character") # read 1st sheet
dname<-dat[,1]
dat<-dat[,2:ncol(dat)]
.Object@Name<-dat[match("Name", dname),1]
.Object@Year <-as.numeric(dat[match("Year",dname),dat[match("Year",dname),]!=""])
.Object@Cat <-matrix(as.numeric(dat[match("Catch",dname),dat[match("Catch",dname),]!=""]),nrow=1)
.Object@Ind<-matrix(as.numeric(dat[match("Abundance index",dname),1:length(.Object@Year)]),nrow=1)
.Object@Rec<-matrix(as.numeric(dat[match("Recruitment",dname),1:length(.Object@Year)]),nrow=1)
.Object@t<-as.numeric(dat[match("Duration t",dname),1])
.Object@AvC<-as.numeric(dat[match("Average catch over time t",dname),1])
.Object@Dt<-as.numeric(dat[match("Depletion over time t",dname),1])
.Object@Mort<-as.numeric(dat[match("M",dname),1])
.Object@FMSY_M<-as.numeric(dat[match("FMSY/M",dname),1])
.Object@BMSY_B0<-as.numeric(dat[match("BMSY/B0",dname),1])
.Object@Cref<-as.numeric(dat[match("Cref",dname),1])
.Object@Bref<-as.numeric(dat[match("Bref",dname),1])
.Object@Iref<-as.numeric(dat[match("Iref",dname),1])
.Object@L50<-as.numeric(dat[match("Length at 50% maturity",dname),1])
.Object@L95<-as.numeric(dat[match("Length at 95% maturity",dname),1])
.Object@LFC<-as.numeric(dat[match("Length at first capture",dname),1])
.Object@LFS<-as.numeric(dat[match("Length at full selection",dname),1])
.Object@Dep<-as.numeric(dat[match("Current stock depletion",dname),1])
.Object@Abun<-as.numeric(dat[match("Current stock abundance",dname),1])
.Object@vbK<-as.numeric(dat[match("Von Bertalanffy K parameter", dname),1])
.Object@vbLinf<-as.numeric(dat[match("Von Bertalanffy Linf parameter", dname),1])
.Object@vbt0<-as.numeric(dat[match("Von Bertalanffy t0 parameter", dname),1])
.Object@wla<-as.numeric(dat[match("Length-weight parameter a", dname),1])
.Object@wlb<-as.numeric(dat[match("Length-weight parameter b", dname),1])
.Object@steep<-as.numeric(dat[match("Steepness", dname),1])
.Object@sigmaL<-as.numeric(dat[match("Sigma length composition", dname),1])
.Object@CV_Cat<-as.numeric(dat[match("CV Catch", dname),1])
.Object@CV_Dt<-as.numeric(dat[match("CV Depletion over time t", dname),1])
.Object@CV_AvC<-as.numeric(dat[match("CV Average catch over time t", dname),1])
.Object@CV_Ind<-as.numeric(dat[match("CV Abundance index", dname),1])
.Object@CV_Mort<-as.numeric(dat[match("CV M", dname),1])
.Object@CV_Rec<-as.numeric(dat[match("CV Rec", dname),1])
.Object@CV_FMSY_M<-as.numeric(dat[match("CV FMSY/M", dname),1])
.Object@CV_BMSY_B0<-as.numeric(dat[match("CV BMSY/B0", dname),1])
.Object@CV_Cref<-as.numeric(dat[match("CV Cref", dname),1])
.Object@CV_Bref<-as.numeric(dat[match("CV Bref", dname),1])
.Object@CV_Iref<-as.numeric(dat[match("CV Iref", dname),1])
.Object@CV_Dep<-as.numeric(dat[match("CV current stock depletion", dname),1])
.Object@CV_Abun<-as.numeric(dat[match("CV current stock abundance", dname),1])
.Object@CV_vbK<-as.numeric(dat[match("CV von B. K parameter", dname),1])
.Object@CV_vbLinf<-as.numeric(dat[match("CV von B. Linf parameter", dname),1])
.Object@CV_vbt0<-as.numeric(dat[match("CV von B. t0 parameter", dname),1])
.Object@CV_L50<-as.numeric(dat[match("CV Length at 50% maturity", dname),1])
.Object@CV_LFC<-as.numeric(dat[match("CV Length at first capture", dname),1])
.Object@CV_LFS<-as.numeric(dat[match("CV Length at full selection", dname),1])
.Object@CV_wla<-as.numeric(dat[match("CV Length-weight parameter a", dname),1])
.Object@CV_wlb<-as.numeric(dat[match("CV Length-weight parameter b", dname),1])
.Object@CV_steep<-as.numeric(dat[match("CV Steepness", dname),1])
.Object@MaxAge<-as.numeric(dat[match("Maximum age", dname),1])
.Object@MPrec<-as.numeric(dat[match("MPrec", dname),1])
if(length(grep("CAL",dname))>1){
CAL_bins<-as.numeric(dat[match("CAL_bins",dname),dat[match("CAL_bins",dname),]!=""])
nCAL<-length(CAL_bins)-1
.Object@CAL_bins<-CAL_bins
CALdat<-grep("CAL ",dname)
.Object@CAL<-array(as.numeric(as.matrix(dat[CALdat,1:nCAL])),dim=c(1,length(CALdat),nCAL))
}
CAAy<-grep("CAA",dname)[1:length(grep("CAA",dname))]
CAAa<-sum(dat[CAAy[1],]!="")
if(!is.na(CAAa)){
.Object@CAA<-array(as.numeric(as.matrix(dat[CAAy,1:CAAa])),dim=c(1,length(CAAy),CAAa))
}
.Object@ML<-matrix(as.numeric(dat[match("Mean length",dname),1:length(.Object@Year)]),nrow=1)
.Object@Lbar<-matrix(as.numeric(dat[match("Mean length Lc",dname),1:length(.Object@Year)]),nrow=1)
.Object@Lc<-matrix(as.numeric(dat[match("Modal length",dname),1:length(.Object@Year)]),nrow=1)
.Object@LHYear<-as.numeric(dat[match("LHYear",dname),1])
.Object@Units<-dat[match("Units", dname),1]
.Object@Ref<-as.numeric(dat[match("Reference TAC",dname),1])
.Object@Ref_type<-dat[match("Reference TAC type",dname),1]
.Object@Log[[1]]<-paste("Created:", Sys.time())
.Object@params<-new('list')
.Object@OM<-data.frame(NA)
.Object@Obs<-data.frame(NA)
.Object@TAC<-array(NA,dim=c(1,1,1))
.Object@TACbias<-array(NA,dim=c(1,1,1))
.Object@Sense<-array(NA,dim=c(1,1,1))
.Object@PosMPs<-NA
.Object@MPs<-NA
}else{
if(stock!="MSE"){
if(!is.na(stock))print("Couldn't find specified csv file, blank DLM object created")
}
}
# Default values -------------------------------------------------------------
if(NAor0(.Object@CV_Cat)).Object@CV_Cat<-0.2
if(NAor0(.Object@CV_Dt)).Object@CV_Dt<-0.25
if(NAor0(.Object@CV_AvC)).Object@CV_AvC<-0.2
if(NAor0(.Object@CV_Ind)).Object@CV_Ind<-0.2
if(NAor0(.Object@CV_Mort)).Object@CV_Mort<-0.2
if(NAor0(.Object@CV_FMSY_M)).Object@CV_FMSY_M<-0.2
if(NAor0(.Object@CV_BMSY_B0)).Object@CV_BMSY_B0<-0.045
if(NAor0(.Object@CV_Cref)).Object@CV_Cref<-0.2
if(NAor0(.Object@CV_Bref)).Object@CV_Bref<-0.2
if(NAor0(.Object@CV_Iref)).Object@CV_Iref<-0.2
if(NAor0(.Object@CV_Rec)).Object@CV_Rec<-0.2
if(NAor0(.Object@CV_Dep)).Object@CV_Dep<-0.25
if(NAor0(.Object@CV_Abun)).Object@CV_Abun<-0.25
if(NAor0(.Object@CV_vbK)).Object@CV_vbK<-0.1
if(NAor0(.Object@CV_vbLinf)).Object@CV_vbLinf<-0.1
if(NAor0(.Object@CV_vbt0)).Object@CV_vbt0<-0.1
if(NAor0(.Object@CV_L50)).Object@CV_L50<-0.1
if(NAor0(.Object@CV_LFC)).Object@CV_LFC<-0.2
if(NAor0(.Object@CV_LFS)).Object@CV_LFS<-0.2
if(NAor0(.Object@CV_wla)).Object@CV_wla<-0.1
if(NAor0(.Object@CV_wlb)).Object@CV_wlb<-0.1
if(NAor0(.Object@CV_steep)).Object@CV_steep<-0.2
if(length(.Object@sigmaL)==0).Object@sigmaL<-0.2
if(length(.Object@CAA)==0).Object@CAA<-array(NA,c(1,1,1))
if(length(.Object@CAL)==0).Object@CAL<-array(NA,c(1,1,1))
if(length(.Object@CAL_bins)==0).Object@CAL_bins<-1
if(length(.Object@TAC)==0).Object@TAC<-array(1,c(1,1))
if(length(.Object@TACbias)==0).Object@TACbias<-array(1,c(1,1))
if(length(.Object@Sense)==0).Object@Sense<-array(1,c(1,1))
if(length(.Object@ML)==0).Object@ML<-array(NA,c(1,1))
if(length(.Object@Lbar)==0).Object@Lbar<-array(NA,c(1,1))
if(length(.Object@Lc)==0).Object@Lc<-array(NA,c(1,1))
.Object
})
# Create Stock class
setClass("Stock",representation(Name="character",maxage="numeric",R0="numeric",
M="numeric", Msd="numeric",Mgrad="numeric",h="numeric",SRrel="numeric",
Linf="numeric",K="numeric",t0="numeric", Ksd="numeric",Kgrad="numeric",
Linfsd="numeric",Linfgrad="numeric",recgrad="numeric", a="numeric",
b="numeric",D="numeric",Perr="numeric", Period="numeric", Amplitude="numeric",
Size_area_1="numeric",
Frac_area_1="numeric", Prob_staying="numeric",AC="numeric",
L50="numeric", L50_95="numeric",Source="character"))
# initialize Stock
setMethod("initialize", "Stock", function(.Object,file=NA){
if (!is.na(file)) {
if (file.exists(file)) {
dat <- read.csv(file,header=F,colClasses="character") # read 1st sheet
dname<-dat[,1]
dat<-dat[,2:ncol(dat)]
.Object@Name<-dat[match("Name", dname),1]
.Object@maxage<-as.numeric(dat[match("maxage",dname),1])
.Object@R0<-as.numeric(dat[match("R0",dname),1])
.Object@M<-as.numeric(dat[match("M",dname),1:2])
.Object@Msd<-as.numeric(dat[match("Msd",dname),1:2])
.Object@Mgrad<-as.numeric(dat[match("Mgrad",dname),1:2])
.Object@h<-as.numeric(dat[match("h",dname),1:2])
.Object@SRrel<-as.numeric(dat[match("SRrel",dname),1])
.Object@Linf<-as.numeric(dat[match("Linf",dname),1:2])
.Object@K<-as.numeric(dat[match("K",dname),1:2])
.Object@t0<-as.numeric(dat[match("t0",dname),1:2])
.Object@Ksd<-as.numeric(dat[match("Ksd",dname),1:2])
.Object@Kgrad<-as.numeric(dat[match("Kgrad",dname),1:2])
.Object@Linfsd<-as.numeric(dat[match("Linfsd",dname),1:2])
.Object@Linfgrad<-as.numeric(dat[match("Linfgrad",dname),1:2])
.Object@recgrad<-as.numeric(dat[match("recgrad",dname),1:2])
.Object@a<-as.numeric(dat[match("a",dname),1])
.Object@b<-as.numeric(dat[match("b",dname),1])
.Object@D<-as.numeric(dat[match("D",dname),1:2])
.Object@Perr<-as.numeric(dat[match("Perr",dname),1:2])
.Object@Period<-as.numeric(dat[match("Period",dname),1:2])
.Object@Amplitude<-as.numeric(dat[match("Amplitude",dname),1:2])
.Object@AC<-as.numeric(dat[match("AC",dname),1:2])
.Object@Size_area_1<-as.numeric(dat[match("Size_area_1",dname),1:2])
.Object@Frac_area_1<-as.numeric(dat[match("Frac_area_1",dname),1:2])
.Object@Prob_staying<-as.numeric(dat[match("Prob_staying",dname),1:2])
.Object@L50<-as.numeric(dat[match("L50", dname),1:2])
.Object@L50_95<-as.numeric(dat[match("L50_95", dname),1:2])
.Object@Source<-dat[match("Source", dname),1]
} else {
message("File doesn't exist")
}
}
.Object
})
# Create Fleet class
setClass("Fleet",slots=c(Name="character",nyears="numeric", Spat_targ="numeric",
Fsd="numeric", qinc="numeric",qcv="numeric",
EffYears="numeric", EffLower="numeric", EffUpper="numeric",
SelYears="numeric", AbsSelYears="numeric", L5="numeric", LFS="numeric",
Vmaxlen="numeric", L5Lower="numeric", L5Upper="numeric", LFSLower="numeric",
LFSUpper="numeric", VmaxLower="numeric", VmaxUpper="numeric"))
# initialize Fleet
setMethod("initialize", "Fleet", function(.Object,file=NA){
if (!is.na(file)) {
if (file.exists(file)) {
dat <- read.csv(file,header=F,colClasses="character") # read 1st sheet
dname<-dat[,1]
dat<-dat[,2:ncol(dat)]
.Object@Name<-dat[match("Name", dname),1]
.Object@nyears <-as.numeric(dat[match("nyears",dname),1])
.Object@Spat_targ<-as.numeric(dat[match("Spat_targ",dname),1:2])
.Object@Fsd<-as.numeric(dat[match("Fsd",dname),1:2])
# .Object@Fgrad<-as.numeric(dat[match("Fgrad",dname),1:2])
nEffYears <- ncol(dat[match("EffYears",dname),])
oldw <- getOption("warn")
options(warn=-1)
chk <- as.numeric(dat[match("EffYears",dname),1:nEffYears])
options(warn = oldw)
ind <- which(!is.na(chk))
nEffYears <- length(ind)
.Object@EffYears <-as.numeric(dat[match("EffYears",dname),1:nEffYears])
.Object@EffLower <-as.numeric(dat[match("EffLower",dname),1:nEffYears])
.Object@EffUpper <-as.numeric(dat[match("EffUpper",dname),1:nEffYears])
.Object@qinc<-as.numeric(dat[match("qinc",dname),1:2])
.Object@qcv<-as.numeric(dat[match("qcv",dname),1:2])
chkName <- match("SelYears",dname) # Check if vector of selectivity years exists
if (is.finite(chkName)) {
nSelYears <- ncol(dat[match("SelYears",dname),])
oldw <- getOption("warn")
options(warn=-1)
chk <- as.numeric(dat[match("SelYears",dname),1:nSelYears])
options(warn = oldw)
ind <- which(is.finite(chk))
nSelYears <- length(ind)
chk <- length(ind)
if (is.finite(chk) & chk > 0) { # parameters for selectivity years exists
.Object@SelYears <- as.numeric(dat[match("SelYears",dname),1:nSelYears])
.Object@L5Lower <- as.numeric(dat[match("L5Lower",dname),1:nSelYears])
.Object@L5Upper <- as.numeric(dat[match("L5Upper",dname),1:nSelYears])
.Object@LFSLower <- as.numeric(dat[match("LFSLower",dname),1:nSelYears])
.Object@LFSUpper <- as.numeric(dat[match("LFSUpper",dname),1:nSelYears])
.Object@VmaxLower <- as.numeric(dat[match("VmaxLower",dname),1:nSelYears])
.Object@VmaxUpper <- as.numeric(dat[match("VmaxUpper",dname),1:nSelYears])
}
}
# These are ignored in MSE if L5Lower etc are set
.Object@L5 <- as.numeric(dat[match("L5",dname),1:2])
.Object@LFS <- as.numeric(dat[match("LFS",dname),1:2])
.Object@Vmaxlen <-as.numeric(dat[match("Vmaxlen",dname),1:2])
} else {
message("File doesn't exist")
}
}
.Object
})
# Create Observation class
setClass("Observation",representation(Name="character",LenMcv="numeric",
Cobs="numeric",Cbiascv="numeric",CAA_nsamp="numeric",CAA_ESS="numeric",
CAL_nsamp="numeric",CAL_ESS="numeric",CALcv="numeric",
Iobs="numeric",Mcv="numeric",Kcv="numeric",t0cv="numeric",Linfcv="numeric",
LFCcv="numeric",LFScv="numeric",B0cv="numeric",
FMSYcv="numeric",FMSY_Mcv="numeric",BMSY_B0cv="numeric",
rcv="numeric", Dbiascv="numeric",Dcv="numeric",
Btbias="numeric",Btcv="numeric",Fcurbiascv="numeric",Fcurcv="numeric",
hcv="numeric",Icv="numeric",maxagecv="numeric",Reccv="numeric",
Irefcv="numeric",Crefcv="numeric",Brefcv="numeric",beta="numeric"))
# initialize Observation
setMethod("initialize", "Observation", function(.Object,file=NA){
if (!is.na(file)) {
if (file.exists(file)) {
dat <- read.csv(file,header=F,colClasses="character") # read 1st sheet
dname<-dat[,1]
dat<-dat[,2:ncol(dat)]
.Object@Name<-dat[match("Name", dname),1]
.Object@LenMcv<-as.numeric(dat[match("LenMcv",dname),1])
.Object@Cobs<-as.numeric(dat[match("Cobs",dname),1:2])
.Object@Cbiascv<-as.numeric(dat[match("Cbiascv",dname),1])
.Object@CAA_nsamp<-as.numeric(dat[match("CAA_nsamp",dname),1:2])
.Object@CAA_ESS<-as.numeric(dat[match("CAA_ESS",dname),1:2])
.Object@CAL_nsamp<-as.numeric(dat[match("CAA_nsamp",dname),1:2])
.Object@CAL_ESS<-as.numeric(dat[match("CAA_ESS",dname),1:2])
.Object@CALcv<-as.numeric(dat[match("CALcv",dname),1:2])
.Object@Iobs<-as.numeric(dat[match("Iobs",dname),1:2])
.Object@Mcv<-as.numeric(dat[match("Mcv",dname),1])
.Object@Kcv<-as.numeric(dat[match("Kcv",dname),1])
.Object@t0cv<-as.numeric(dat[match("t0cv",dname),1])
.Object@Linfcv<-as.numeric(dat[match("Linfcv",dname),1])
.Object@LFCcv<-as.numeric(dat[match("LFCcv",dname),1])
.Object@LFScv<-as.numeric(dat[match("LFScv",dname),1])
.Object@B0cv<-as.numeric(dat[match("B0cv",dname),1])
.Object@FMSYcv<-as.numeric(dat[match("FMSYcv",dname),1])
.Object@FMSY_Mcv<-as.numeric(dat[match("FMSY_Mcv",dname),1])
.Object@BMSY_B0cv<-as.numeric(dat[match("BMSY_B0cv",dname),1])
.Object@rcv<-as.numeric(dat[match("rcv",dname),1])
.Object@Dbiascv<-as.numeric(dat[match("Dbiascv",dname),1])
.Object@Dcv<-as.numeric(dat[match("Dcv",dname),1:2])
.Object@Btbias<-as.numeric(dat[match("Btbias",dname),1:2])
.Object@Btcv<-as.numeric(dat[match("Btcv",dname),1:2])
.Object@Fcurbiascv<-as.numeric(dat[match("Fcurbiascv",dname),1])
.Object@Fcurcv<-as.numeric(dat[match("Fcurcv",dname),1:2])
.Object@hcv<-as.numeric(dat[match("hcv",dname),1])
.Object@Icv<-as.numeric(dat[match("Icv",dname),1])
.Object@maxagecv<-as.numeric(dat[match("maxagecv",dname),1])
.Object@Reccv<-as.numeric(dat[match("Reccv",dname),1:2])
.Object@Irefcv<-as.numeric(dat[match("Irefcv",dname),1])
.Object@Crefcv<-as.numeric(dat[match("Crefcv",dname),1])
.Object@Brefcv<-as.numeric(dat[match("Brefcv",dname),1])
.Object@beta<-as.numeric(dat[match("beta",dname),1:2])
} else {
message("File doesn't exist")
}
}
.Object
})
# Create OM class
setClass("OM",representation(Name="character",nyears="numeric",maxage="numeric",
R0="numeric",M="numeric", Msd="numeric",Mgrad="numeric",h="numeric",
SRrel="numeric",Linf="numeric",K="numeric",t0="numeric", Ksd="numeric",
Kgrad="numeric",Linfsd="numeric",Linfgrad="numeric",recgrad="numeric",
a="numeric",b="numeric",D="numeric", Size_area_1="numeric",
Frac_area_1="numeric",Prob_staying="numeric", Source="character",
L50="numeric", L50_95="numeric", SelYears="numeric", AbsSelYears="numeric",
L5="numeric", LFS="numeric", Vmaxlen="numeric",
L5Lower="numeric", L5Upper="numeric", LFSLower="numeric",
LFSUpper="numeric", VmaxLower="numeric", VmaxUpper="numeric",
beta="numeric",
Spat_targ="numeric", Fsd="numeric", Period="numeric", Amplitude="numeric",
EffYears="numeric", EffLower="numeric", EffUpper="numeric",
# Fgrad="numeric",
qinc="numeric",qcv="numeric",AC="numeric", Cobs="numeric",Cbiascv="numeric",
CAA_nsamp="numeric",CAA_ESS="numeric", CAL_nsamp="numeric",CAL_ESS="numeric",
CALcv="numeric", Iobs="numeric",Perr="numeric", Mcv="numeric",Kcv="numeric",
t0cv="numeric",Linfcv="numeric", LFCcv="numeric", LFScv="numeric",
B0cv="numeric",FMSYcv="numeric",FMSY_Mcv="numeric",BMSY_B0cv="numeric",
LenMcv="numeric",rcv="numeric", Dbiascv="numeric",Dcv="numeric",
Btbias="numeric",Btcv="numeric", Fcurbiascv="numeric",Fcurcv="numeric",
hcv="numeric", Icv="numeric",maxagecv="numeric", Reccv="numeric",
Irefcv="numeric",Crefcv="numeric",Brefcv="numeric"))
# initialize OM
setMethod("initialize", "OM", function(.Object,Stock,Fleet,Observation){
if(class(Stock)!='Stock')print(paste('Could not build operating model:',deparse(substitute(Stock)),'not of class Stock'))
if(class(Fleet)!='Fleet')print(paste('Could not build operating model:',deparse(substitute(Fleet)),'not of class Fleet'))
if(class(Observation)!='Observation')print(paste('Could not build operating model:',deparse(substitute(Observation)),'not of class Observation'))
if(class(Stock)!='Stock'|class(Fleet)!='Fleet'|class(Observation)!='Observation')stop()
.Object@Name<-paste("Stock:",Stock@Name," Fleet:",Fleet@Name," Observation model:",Observation@Name,sep="")
# Now copy the values for stock, fleet and observation slots to same slots in the Sim object
Sslots<-slotNames(Stock)
for(i in 2:length(Sslots)) {
tt <- .hasSlot(Stock,Sslots[i]) # For back-compatibility
if (tt) slot(.Object,Sslots[i])<-slot(Stock,Sslots[i])
}
Fslots<-slotNames(Fleet)
for(i in 2:length(Fslots)) {
tt <- .hasSlot(Fleet,Fslots[i])
if (tt) slot(.Object,Fslots[i])<-slot(Fleet,Fslots[i])
}
Oslots<-slotNames(Observation)
for(i in 2:length(Oslots)) {
tt <- .hasSlot(Observation,Oslots[i])
if (tt) slot(.Object,Oslots[i])<-slot(Observation,Oslots[i])
}
.Object
})
# Create MSE class
setClass("MSE",representation(Name="character",nyears="numeric",
proyears="numeric",nMPs="numeric",MPs="character", nsim="numeric",
OM="data.frame",Obs="data.frame",B_BMSY="array", F_FMSY="array",
B="array",FM="array",C="array",TAC="array",SSB_hist="array",
CB_hist="array",FM_hist="array"))
setMethod("initialize", "MSE", function(.Object,Name,nyears,proyears,nMPs,MPs,
nsim,OMtable,Obs,B_BMSYa,F_FMSYa,Ba,FMa,Ca,TACa,SSB_hist,CB_hist,FM_hist){
.Object@Name<-Name
.Object@nyears <-nyears
.Object@proyears<-proyears
.Object@nMPs<-nMPs
.Object@MPs<-MPs
.Object@nsim<-nsim
.Object@OM<-OMtable
.Object@Obs<-Obs
.Object@B_BMSY<-B_BMSYa
.Object@F_FMSY<-F_FMSYa
.Object@B<-Ba
.Object@FM<-FMa
.Object@C<-Ca
.Object@TAC<-TACa
.Object@SSB_hist<-SSB_hist
.Object@CB_hist<-CB_hist
.Object@FM_hist<-FM_hist
.Object
})
# Create DLM_fease class
setClass("DLM_fease",representation(Name="character",Case="character",Catch="numeric",
Index="numeric",Natural_mortality_rate="numeric",Maturity_at_length="numeric",
Growth="numeric",Length_weight_conversion="numeric",Fleet_selectivity="numeric",
Catch_at_length="numeric",Catch_at_age="numeric",Recruitment_index="numeric",
Stock_recruitment_relationship="numeric",Target_catch="numeric",Target_biomass="numeric",
Target_index="numeric",Abundance="numeric"))
# initialize DLM_fease
setMethod("initialize", "DLM_fease", function(.Object,file="nada",ncases=1){
# run an error check here
if(file.exists(file)){
dat <- read.csv(file,header=F,colClasses="character") # read 1st sheet
nr<-nrow(dat)
ncases=ncol(dat)-1
dname<-dat[,1]
if(ncases==1)dat<-array(dat[,2:ncol(dat)],dim=c(nr,ncases))
if(ncases>1)dat<-dat[,2:ncol(dat)]
.Object@Name<- dat[match("Name", dname),1]
.Object@Case<-as.character(dat[match("Case",dname),1:ncases])
.Object@Catch<-as.numeric(dat[match("Catch",dname),1:ncases])
.Object@Index<-as.numeric(dat[match("Index",dname),1:ncases])
.Object@Natural_mortality_rate<-as.numeric(dat[match("Natural_mortality_rate",dname),1:ncases])
.Object@Maturity_at_length<-as.numeric(dat[match("Maturity_at_length",dname),1:ncases])
.Object@Growth<-as.numeric(dat[match("Growth",dname),1:ncases])
.Object@Length_weight_conversion<-as.numeric(dat[match("Length_weight_conversion",dname),1:ncases])
.Object@Fleet_selectivity<-as.numeric(dat[match("Fleet_selectivity",dname),1:ncases])
.Object@Catch_at_length<-as.numeric(dat[match("Catch_at_length",dname),1:ncases])
.Object@Catch_at_age<-as.numeric(dat[match("Catch_at_age",dname),1:ncases])
.Object@Recruitment_index<-as.numeric(dat[match("Recruitment_index",dname),1:ncases])
.Object@Stock_recruitment_relationship<-as.numeric(dat[match("Stock_recruitment_relationship",dname),1:ncases])
.Object@Target_catch<-as.numeric(dat[match("Target_catch",dname),1:ncases])
.Object@Target_biomass<-as.numeric(dat[match("Target_biomass",dname),1:ncases])
.Object@Target_index<-as.numeric(dat[match("Target_index",dname),1:ncases])
.Object@Abundance<-as.numeric(dat[match("Abundance",dname),1:ncases])
}else{
.Object@Name<-"Blank DLM_Fease"
.Object@Case<-"Case 1"
.Object@Catch<-0
.Object@Index<-0
.Object@Natural_mortality_rate<-0
.Object@Maturity_at_length<-0
.Object@Growth<-0
.Object@Length_weight_conversion<-0
.Object@Fleet_selectivity<-0
.Object@Catch_at_length<-0
.Object@Catch_at_age<-0
.Object@Recruitment_index<-0
.Object@Stock_recruitment_relationship<-0
.Object@Target_catch<-0
.Object@Target_biomass<-0
.Object@Target_index<-0
.Object@Abundance<-0
}
.Object
})
# Create DLM_general object
setClass("DLM_general",representation(Name="character",data="list"))
# initialize DLM_general
setMethod("initialize", "DLM_general", function(.Object){
.Object
})
# Create lmmodel class
setClass("lmmodel",representation(Name="character",models="list"))
# initialize lmmodel
setMethod("initialize", "lmmodel", function(.Object,Name,models){
.Object@Name<-Name
.Object@models<-models
.Object
})
# Define generic plot method for DLM data objects
setMethod("plot",
signature(x = "DLM_data"),
function(x,funcs=NA,maxlines=6,perc=0.5,xlims=NA){
DLM_data<-x
cols<-rep(c('black','red','green','blue','orange','brown','purple','dark grey','violet','dark red','pink','dark blue','grey'),4)
ltys<-rep(1:4,each=13)
if(is.na(funcs[1]))funcs<-DLM_data@MPs
nMPs<-length(funcs)
nplots<-ceiling(nMPs/maxlines)
maxl<-ceiling(nMPs/nplots)
mbyp <- split(1:nMPs, ceiling(1:nMPs/maxl)) # assign methods to plots
if(is.na(xlims[1])|length(xlims)!=2){
xlims<-quantile(DLM_data@TAC,c(0.005,0.95),na.rm=T)
if(xlims[1]<0)xlims[1]<-0
}
if(!NAor0(DLM_data@Ref)){
if(xlims[1]>DLM_data@Ref)xlims[1]<-max(0,0.98*DLM_data@Ref)
if(xlims[2]<DLM_data@Ref)xlims[2]<-1.02*DLM_data@Ref
}
ylims<-c(0,1)
#for(m in 1:nMPs){
# if(sum(!is.na(DLM_data@TAC[m,,1]))>2){
# dens<-density(DLM_data@TAC[m,,1],na.rm=T)
#print(quantile(dens$y,0.99,na.rm=T))
# if(quantile(dens$y,0.9,na.rm=T)>ylims[2])ylims[2]<-quantile(dens$y,0.90,na.rm=T)
#}
#}
#dev.new2(width=10,height=0.5+7*nplots)
par(mfrow=c(ceiling(nplots/2),2),mai=c(0.4,0.4,0.01,0.01),omi=c(0.35,0.35,0.35,0.05))
for(p in 1:nplots){
m<-mbyp[[p]][1]
plot(NA,NA,xlim=xlims,ylim=ylims,main="",xlab="",ylab="",col="white",lwd=3,type="l")
abline(h=0)
if(!NAor0(DLM_data@Ref)){
abline(v=DLM_data@Ref,col="light grey",lwd=2)
if(!NAor0(DLM_data@Ref_type[1]))legend('right',DLM_data@Ref_type,text.col="grey",bty='n')
}
#plot(density(DLM@TAC[m,,1],from=0,na.rm=T),xlim=xlims,ylim=ylims,main="",xlab="",ylab="",col=coly[m],lty=ltyy[m],type="l")
if(!is.na(perc[1]))abline(v=quantile(DLM_data@TAC[m,,1],p=perc,na.rm=T),col=cols[m],lty=ltys[m])
#if(length(mbyp[[p]])>0){
for(ll in 1:length(mbyp[[p]])){
m<-mbyp[[p]][ll]
if(sum(!is.na(DLM_data@TAC[m,,1]))>10){ # only plot if there are sufficient non-NA TAC samples
x<-density(DLM_data@TAC[m,,1],from=0,na.rm=T)$x
y<-density(DLM_data@TAC[m,,1],from=0,na.rm=T)$y
y<-y/max(y)
lines(x,y,col=cols[ll])
}else{
print(paste("Method ",funcs[m]," produced too many NA TAC values for plotting densities",sep=""))
}
if(!is.na(perc[1]))abline(v=quantile(DLM_data@TAC[m,,1],p=perc,na.rm=T),col=cols[ll],lty=2)
}
#}
cind<-1:length(mbyp[[p]])
legend('topright',funcs[mbyp[[p]]],text.col=cols[cind],col=cols[cind],lty=1,bty='n',cex=0.75)
}
mtext(paste("TAC (",DLM_data@Units,")",sep=""),1,outer=T,line=0.5)
mtext(paste("Standardized relative frequency",sep=""),2,outer=T,line=0.5)
mtext(paste("TAC calculation for ",DLM_data@Name,sep=""),3,outer=T,line=0.5)
})
# Define generic summary method for DLM data objects
setMethod("summary",
signature(object = "DLM_data"),
function(object){
scols<-c('red','green','blue','orange','brown','purple','dark grey','violet','dark red','pink','dark blue','grey')
#dev.new2(width=8,height=4.5)
par(mai=c(0.35,0.9,0.2,0.01),c(0.3,0,0,0))
layout(matrix(c(1,2,1,2,1,2,3,3,3,3),nrow=2))
plot(object@Year,object@Cat[1,],col="blue",type="l",xlab="Year",ylab=paste("Catch (",object@Units,")",sep=""),ylim=c(0,max(object@Cat[1,],na.rm=T)))
plot(object@Year,object@Ind[1,],col="orange",type="l",xlab="Year",ylab="Relative abundance",ylim=c(0,max(object@Ind[1,],na.rm=T)))
slots<-c("Dep","Mort","FMSY_M","Dt","BMSY_B0","vbK")
namey<-c("Stock depletion", "Natural Mortality rate","Ratio of FMSY to M","Depletion over time t","BMSY relative to unfished","Von B. k parameter")
slotsCV<-c("CV_Dep","CV_Mort","CV_FMSY_M","CV_Dt","CV_BMSY_B0","CV_vbK")
ind<-rep(TRUE,length(slotsCV))
for(i in 1:length(slotsCV))if(NAor0(attr(object,slots[i]))|NAor0(attr(object,slotsCV[i])))ind[i]<-FALSE
slots<-slots[ind]
slotsCV<-slotsCV[ind]
nrep<-150
xstore<-array(NA,c(length(slots),nrep))
ystore<-array(NA,c(length(slots),nrep))
for(i in 1:length(slots)){
mu<-attr(object,slots[i])
cv<-attr(object,slotsCV[i])
xstore[i,]<-qlnorm(seq(0,1,length.out=nrep),mconv(mu,cv),sdconv(mu,cv))
ystore[i,]<-dlnorm(xstore[i,],mconv(mu,cv),sdconv(mu,cv))
}
plot(xstore[1,],ystore[1,],type="l",xlim=c(0,1.2),ylim=c(0,quantile(ystore,0.97)),xlab="",ylab="Relative frequency",col=scols[1])
if(length(slots)>1){
for(i in 2:length(slots)) lines(xstore[i,],ystore[i,],col=scols[i])
}
legend('topright',legend=namey[ind],text.col=scols[1:length(slots)],bty='n')
mtext(paste("Data summary for",deparse(substitute(DLM_data)),sep=" "),3,font=2,line=0.25,outer=T)
})
# Define generic summary method for MSE objects
setMethod("summary",
signature(object = "MSE"),
function(object){
MSEobj<-object
nm<-MSEobj@nMPs
nsim<-MSEobj@nsim
proyears<-MSEobj@proyears
Yd<-P10<-P50<-P100<-POF<-LTY<-STY<-VY<-array(NA,c(nm,nsim))
yind<-max(MSEobj@proyears-4,1):MSEobj@proyears
RefYd<-MSEobj@OM$RefY
yend<-max(MSEobj@proyears-9,1):MSEobj@proyears
ystart<-1:10
y1<-1:(MSEobj@proyears-1)
y2<-2:MSEobj@proyears
for(m in 1:nm){
Yd[m,]<-round(apply(MSEobj@C[,m,yind],1,mean,na.rm=T)/RefYd*100,1)
POF[m,]<-round(apply(MSEobj@F_FMSY[,m,]>1,1,sum,na.rm=T)/proyears*100,1)
P10[m,]<-round(apply(MSEobj@B_BMSY[,m,]<0.1,1,sum,na.rm=T)/proyears*100,1)
P50[m,]<-round(apply(MSEobj@B_BMSY[,m,]<0.5,1,sum,na.rm=T)/proyears*100,1)
P100[m,]<-round(apply(MSEobj@B_BMSY[,m,]<1,1,sum,na.rm=T)/proyears*100,1)
LTY[m]<-round(sum(MSEobj@C[,m,yend]/RefYd>0.5)/(MSEobj@nsim*length(yend))*100,1)
STY[m]<-round(sum(MSEobj@C[,m,ystart]/RefYd>0.5)/(MSEobj@nsim*length(ystart))*100,1)
AAVY<-apply(((MSEobj@C[,m,y1]-MSEobj@C[,m,y2])^2)^0.5,1,mean)/apply(MSEobj@C[,m,y2],1,mean)
VY[m]<-round(sum(AAVY<0.1)/MSEobj@nsim*100,1)
}
nr<-2
out<-cbind(MSEobj@MPs,round(apply(Yd,1,mean,na.rm=T),nr),round(apply(Yd,1,sd,na.rm=T),nr),
round(apply(POF,1,mean,na.rm=T),nr),round(apply(POF,1,sd,na.rm=T),nr),
round(apply(P10,1,mean,na.rm=T),nr),round(apply(P10,1,sd,na.rm=T),nr),
round(apply(P50,1,mean,na.rm=T),nr),round(apply(P50,1,sd,na.rm=T),nr),
round(apply(P100,1,mean,na.rm=T),nr),round(apply(P100,1,sd,na.rm=T),nr),
round(apply(LTY,1,mean,na.rm=T),nr),
round(apply(STY,1,mean,na.rm=T),nr),
round(apply(VY,1,mean,na.rm=T),nr))
out<-as.data.frame(out)
names(out)<-c("MP","Yield","stdev","POF","stdev ","P10","stdev",
"P50","stdev","P100","stdev","LTY","STY","VY")
out[,1]<-as.character(out[,1])
for(i in 2:ncol(out))out[,i]<-as.numeric(as.character(out[,i]))
out
})
# Plotting code for MSE object
setMethod("plot",
signature(x = "MSE"),
function(x){
MSEobj<-x
Pplot(MSEobj)
Kplot(MSEobj)
Tplot(MSEobj)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LobsterSurveyProcess.r
\name{LobsterSurveyProcess}
\alias{LobsterSurveyProcess}
\title{LobsterSurveyProcess}
\usage{
LobsterSurveyProcess(
species = 2550,
size.range = c(0, 200),
lfa = "34",
yrs,
mths = c("May", "Jun", "Jul", "Aug", "Sep", "Oct"),
gear.type = NULL,
sex = c(1:3, NA),
bin.size = 5,
LFS = 160,
Net = NULL,
comparative = F,
biomass = F
)
}
\arguments{
\item{size.range}{defines the minimum and maximum value and is a filter (default is 0, 200mm CW)}
\item{lfa}{defines the specific LFA for the ILTS}
\item{yrs}{is the survey years to estimate}
\item{gear.type}{survey trawl net identification (if !NULL options are '280 BALLOON' or 'NEST')}
\item{bin.size}{aggregates the abundance into size bins (default is 5mm bins)}
\item{mnts}{months of the survey, defaults to the full year}
}
\value{
data.frame of survey data called 'surveyLobsters'
}
\description{
LobsterSurveyProcess
}
\author{
Brad Hubley & Manon Cassista-Da Ros
}
|
/man/LobsterSurveyProcess.Rd
|
no_license
|
LobsterScience/bio.lobster
|
R
| false | true | 1,048 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LobsterSurveyProcess.r
\name{LobsterSurveyProcess}
\alias{LobsterSurveyProcess}
\title{LobsterSurveyProcess}
\usage{
LobsterSurveyProcess(
species = 2550,
size.range = c(0, 200),
lfa = "34",
yrs,
mths = c("May", "Jun", "Jul", "Aug", "Sep", "Oct"),
gear.type = NULL,
sex = c(1:3, NA),
bin.size = 5,
LFS = 160,
Net = NULL,
comparative = F,
biomass = F
)
}
\arguments{
\item{size.range}{defines the minimum and maximum value and is a filter (default is 0, 200mm CW)}
\item{lfa}{defines the specific LFA for the ILTS}
\item{yrs}{is the survey years to estimate}
\item{gear.type}{survey trawl net identification (if !NULL options are '280 BALLOON' or 'NEST')}
\item{bin.size}{aggregates the abundance into size bins (default is 5mm bins)}
\item{mnts}{months of the survey, defaults to the full year}
}
\value{
data.frame of survey data called 'surveyLobsters'
}
\description{
LobsterSurveyProcess
}
\author{
Brad Hubley & Manon Cassista-Da Ros
}
|
# RMSE
The residual standard error reported for the regression model for poverty rate of U.S. counties in terms of high school graduation rate is 4.67. What does this mean?
# The typical difference between the observed poverty rate and the poverty rate predicted by the model is about 4.67 percentage points.
-----
# Standard error of residuals
One way to assess strength of fit is to consider how far off the model is for a typical case. That is, for some observations, the fitted value will be very close to the actual value, while for others it will not. The magnitude of a typical residual can give us a sense of generally how close our estimates are.
However, recall that some of the residuals are positive, while others are negative. In fact, it is guaranteed by the least squares fitting procedure that the mean of the residuals is zero. Thus, it makes more sense to compute the square root of the mean squared residual, or root mean squared error (RMSE). R calls this quantity the residual standard error.
To make this estimate unbiased, you have to divide the sum of the squared residuals by the degrees of freedom in the model. Thus,
RMSE = \sqrt{ \frac{\sum_i{e_i^2}}{d.f.} } = \sqrt{ \frac{SSE}{d.f.} }
You can recover the residuals from mod with residuals(), and the degrees of freedom with df.residual().
# View summary of model
summary(mod)
# Compute the mean of the residuals
mean(residuals(mod))
# Compute RMSE
sqrt(sum(residuals(mod)^2) / df.residual(mod))
-----
# Assessing simple linear model fit
Recall that the coefficient of determination (R^2), can be computed as
R^2 = 1 - \frac{SSE}{SST} = 1 - \frac{Var(e)}{Var(y)} \,,
where is the vector of residuals and is the response variable. This gives us the interpretation of R^2 as the percentage of the variability in the response that is explained by the model, since the residuals are the part of that variability that remains unexplained by the model.
# View model summary
summary(mod)
# Compute R-squared
bdims_tidy %>%
summarize(var_y = var(wgt), var_e = var(.resid)) %>%
mutate(R_squared = 1 - (var_e/var_y))
-----
# Interpretation of R^2
The R^2 reported for the regression model for poverty rate of U.S. counties in terms of high school graduation rate is 0.464.
lm(formula = poverty ~ hs_grad, data = countyComplete) %>%
summary()
How should this result be interpreted?
# 46.4% of the variability in poverty rate among U.S. counties can be explained by high school graduation rate.
-----
# Linear vs. average
The R^2 gives us a numerical measurement of the strength of fit relative to a null model based on the average of the response variable:
\hat{y}_{null} = \bar{y}
This model has an R^2 of zero because . That is, since the fitted values (\hat{y}_{null}) are all equal to the average (\bar{y}), the residual for each observation is the distance between that observation and the mean of the response. Since we can always fit the null model, it serves as a baseline against which all other models will be compared.
In the graphic, we visualize the residuals for the null model (mod_null at left) vs. the simple linear regression model (mod_hgt at right) with height as a single explanatory variable. Try to convince yourself that, if you squared the lengths of the grey arrows on the left and summed them up, you would get a larger value than if you performed the same operation on the grey arrows on the right.
It may be useful to preview these augment()-ed data frames with glimpse():
glimpse(mod_null)
glimpse(mod_hgt)
# Compute SSE for null model
mod_null %>%
summarize(SSE = sum(.resid^2))
# Compute SSE for regression model
mod_hgt %>%
summarize(SSE = sum(.resid^2))
-----
# Leverage
The leverage of an observation in a regression model is defined entirely in terms of the distance of that observation from the mean of the explanatory variable. That is, observations close to the mean of the explanatory variable have low leverage, while observations far from the mean of the explanatory variable have high leverage. Points of high leverage may or may not be influential.
The augment() function from the broom package will add the leverage scores (.hat) to a model data frame.
# Rank points of high leverage
mod %>%
augment %>%
arrange(desc(.hat)) %>%
head()
-----
# Influence
As noted previously, observations of high leverage may or may not be influential. The influence of an observation depends not only on its leverage, but also on the magnitude of its residual. Recall that while leverage only takes into account the explanatory variable (), the residual depends on the response variable () and the fitted value ().
Influential points are likely to have high leverage and deviate from the general relationship between the two variables. We measure influence using Cooks distance, which incorporates both the leverage and residual of each observation.
# Rank influential points
mod %>%
augment() %>%
arrange(desc(.cooksd)) %>%
head()
-----
# Removing outliers
Observations can be outliers for a number of different reasons. Statisticians must always be careful—and more importantly, transparent—when dealing with outliers. Sometimes, a better model fit can be achieved by simply removing outliers and re-fitting the model. However, one must have strong justification for doing this. A desire to have a higher R^2 is not a good enough reason!
In the mlbBat10 data, the outlier with an OBP of 0.550 is Bobby Scales, an infielder who had four hits in 13 at-bats for the Chicago Cubs. Scales also walked seven times, resulting in his unusually high OBP. The justification for removing Scales here is weak. While his performance was unusual, there is nothing to suggest that it is not a valid data point, nor is there a good reason to think that somehow we will learn more about Major League Baseball players by excluding him.
Nevertheless, we can demonstrate how removing him will affect our model.
# Create nontrivial_players
nontrivial_players <- mlbBat10 %>%
filter(AB >= 10, OBP < 0.5)
# Fit model to new data
mod_cleaner <- lm(SLG ~ OBP, data = nontrivial_players)
# View model summary
summary(mod_cleaner)
# Visualize new model
ggplot(data = nontrivial_players, aes(x = OBP, y = SLG)) +
geom_point() +
geom_smooth(method = "lm")
-----
# High leverage points
Not all points of high leverage are influential. While the high leverage observation corresponding to Bobby Scales in the previous exercise is influential, the three observations for players with OBP and SLG values of 0 are not influential.
This is because they happen to lie right near the regression anyway. Thus, while their extremely low OBP gives them the power to exert influence over the slope of the regression line, their low SLG prevents them from using it.
# Rank high leverage points
mod %>%
augment() %>%
arrange(desc(.hat), .cooksd) %>%
head()
-----
|
/1.R_Courses/Correlation_and_Regression_in_R/5.Model_fit.r
|
no_license
|
satyamchaturvedi/datacamp
|
R
| false | false | 6,900 |
r
|
# RMSE
The residual standard error reported for the regression model for poverty rate of U.S. counties in terms of high school graduation rate is 4.67. What does this mean?
# The typical difference between the observed poverty rate and the poverty rate predicted by the model is about 4.67 percentage points.
-----
# Standard error of residuals
One way to assess strength of fit is to consider how far off the model is for a typical case. That is, for some observations, the fitted value will be very close to the actual value, while for others it will not. The magnitude of a typical residual can give us a sense of generally how close our estimates are.
However, recall that some of the residuals are positive, while others are negative. In fact, it is guaranteed by the least squares fitting procedure that the mean of the residuals is zero. Thus, it makes more sense to compute the square root of the mean squared residual, or root mean squared error (RMSE). R calls this quantity the residual standard error.
To make this estimate unbiased, you have to divide the sum of the squared residuals by the degrees of freedom in the model. Thus,
RMSE = \sqrt{ \frac{\sum_i{e_i^2}}{d.f.} } = \sqrt{ \frac{SSE}{d.f.} }
You can recover the residuals from mod with residuals(), and the degrees of freedom with df.residual().
# View summary of model
summary(mod)
# Compute the mean of the residuals
mean(residuals(mod))
# Compute RMSE
sqrt(sum(residuals(mod)^2) / df.residual(mod))
-----
# Assessing simple linear model fit
Recall that the coefficient of determination (R^2), can be computed as
R^2 = 1 - \frac{SSE}{SST} = 1 - \frac{Var(e)}{Var(y)} \,,
where is the vector of residuals and is the response variable. This gives us the interpretation of R^2 as the percentage of the variability in the response that is explained by the model, since the residuals are the part of that variability that remains unexplained by the model.
# View model summary
summary(mod)
# Compute R-squared
bdims_tidy %>%
summarize(var_y = var(wgt), var_e = var(.resid)) %>%
mutate(R_squared = 1 - (var_e/var_y))
-----
# Interpretation of R^2
The R^2 reported for the regression model for poverty rate of U.S. counties in terms of high school graduation rate is 0.464.
lm(formula = poverty ~ hs_grad, data = countyComplete) %>%
summary()
How should this result be interpreted?
# 46.4% of the variability in poverty rate among U.S. counties can be explained by high school graduation rate.
-----
# Linear vs. average
The R^2 gives us a numerical measurement of the strength of fit relative to a null model based on the average of the response variable:
\hat{y}_{null} = \bar{y}
This model has an R^2 of zero because . That is, since the fitted values (\hat{y}_{null}) are all equal to the average (\bar{y}), the residual for each observation is the distance between that observation and the mean of the response. Since we can always fit the null model, it serves as a baseline against which all other models will be compared.
In the graphic, we visualize the residuals for the null model (mod_null at left) vs. the simple linear regression model (mod_hgt at right) with height as a single explanatory variable. Try to convince yourself that, if you squared the lengths of the grey arrows on the left and summed them up, you would get a larger value than if you performed the same operation on the grey arrows on the right.
It may be useful to preview these augment()-ed data frames with glimpse():
glimpse(mod_null)
glimpse(mod_hgt)
# Compute SSE for null model
mod_null %>%
summarize(SSE = sum(.resid^2))
# Compute SSE for regression model
mod_hgt %>%
summarize(SSE = sum(.resid^2))
-----
# Leverage
The leverage of an observation in a regression model is defined entirely in terms of the distance of that observation from the mean of the explanatory variable. That is, observations close to the mean of the explanatory variable have low leverage, while observations far from the mean of the explanatory variable have high leverage. Points of high leverage may or may not be influential.
The augment() function from the broom package will add the leverage scores (.hat) to a model data frame.
# Rank points of high leverage
mod %>%
augment %>%
arrange(desc(.hat)) %>%
head()
-----
# Influence
As noted previously, observations of high leverage may or may not be influential. The influence of an observation depends not only on its leverage, but also on the magnitude of its residual. Recall that while leverage only takes into account the explanatory variable (), the residual depends on the response variable () and the fitted value ().
Influential points are likely to have high leverage and deviate from the general relationship between the two variables. We measure influence using Cooks distance, which incorporates both the leverage and residual of each observation.
# Rank influential points
mod %>%
augment() %>%
arrange(desc(.cooksd)) %>%
head()
-----
# Removing outliers
Observations can be outliers for a number of different reasons. Statisticians must always be careful—and more importantly, transparent—when dealing with outliers. Sometimes, a better model fit can be achieved by simply removing outliers and re-fitting the model. However, one must have strong justification for doing this. A desire to have a higher R^2 is not a good enough reason!
In the mlbBat10 data, the outlier with an OBP of 0.550 is Bobby Scales, an infielder who had four hits in 13 at-bats for the Chicago Cubs. Scales also walked seven times, resulting in his unusually high OBP. The justification for removing Scales here is weak. While his performance was unusual, there is nothing to suggest that it is not a valid data point, nor is there a good reason to think that somehow we will learn more about Major League Baseball players by excluding him.
Nevertheless, we can demonstrate how removing him will affect our model.
# Create nontrivial_players
nontrivial_players <- mlbBat10 %>%
filter(AB >= 10, OBP < 0.5)
# Fit model to new data
mod_cleaner <- lm(SLG ~ OBP, data = nontrivial_players)
# View model summary
summary(mod_cleaner)
# Visualize new model
ggplot(data = nontrivial_players, aes(x = OBP, y = SLG)) +
geom_point() +
geom_smooth(method = "lm")
-----
# High leverage points
Not all points of high leverage are influential. While the high leverage observation corresponding to Bobby Scales in the previous exercise is influential, the three observations for players with OBP and SLG values of 0 are not influential.
This is because they happen to lie right near the regression anyway. Thus, while their extremely low OBP gives them the power to exert influence over the slope of the regression line, their low SLG prevents them from using it.
# Rank high leverage points
mod %>%
augment() %>%
arrange(desc(.hat), .cooksd) %>%
head()
-----
|
library(dada2); packageVersion("dada2")
# File parsing
filtpath <- "input/filtered"
filtFs <- list.files(filtpath, pattern="_F_filt.fastq.gz", full.names = TRUE)
filtRs <- list.files(filtpath, pattern="_R_filt.fastq.gz", full.names = TRUE)
sample.names <- sapply(strsplit(basename(filtFs), "_F_filt"), `[`, 1)
names(filtFs) <- sample.names
names(filtRs) <- sample.names
set.seed(100)
# Learn forward error rates
errF <- learnErrors(filtFs, nbases=1e8, multithread=TRUE)
# Learn reverse error rates
errR <- learnErrors(filtRs, nbases=1e8, multithread=TRUE)
# Sample inference and merger of paired-end reads
mergers <- vector("list", length(sample.names))
names(mergers) <- sample.names
for(sam in sample.names) {
cat("Processing:", sam, "\n")
derepF <- derepFastq(filtFs[[sam]])
ddF <- dada(derepF, err=errF, multithread=TRUE)
derepR <- derepFastq(filtRs[[sam]])
ddR <- dada(derepR, err=errR, multithread=TRUE)
merger <- mergePairs(ddF, derepF, ddR, derepR)
mergers[[sam]] <- merger
}
rm(derepF); rm(derepR)
# Construct sequence table and remove chimeras
seqtab <- makeSequenceTable(mergers)
saveRDS(seqtab, "input/seqtab.rds")
# print statistics
getN <- function(x) sum(getUniques(x))
out <- cbind(sapply(mergers, getN))
colnames(out) <- c("merged")
write.table(out, "input/stats_merge.tsv", sep="\t", quote=F, col.names=NA)
|
/2_FilterError.R
|
permissive
|
aponsero/dada2_16Sprocessing
|
R
| false | false | 1,359 |
r
|
library(dada2); packageVersion("dada2")
# File parsing
filtpath <- "input/filtered"
filtFs <- list.files(filtpath, pattern="_F_filt.fastq.gz", full.names = TRUE)
filtRs <- list.files(filtpath, pattern="_R_filt.fastq.gz", full.names = TRUE)
sample.names <- sapply(strsplit(basename(filtFs), "_F_filt"), `[`, 1)
names(filtFs) <- sample.names
names(filtRs) <- sample.names
set.seed(100)
# Learn forward error rates
errF <- learnErrors(filtFs, nbases=1e8, multithread=TRUE)
# Learn reverse error rates
errR <- learnErrors(filtRs, nbases=1e8, multithread=TRUE)
# Sample inference and merger of paired-end reads
mergers <- vector("list", length(sample.names))
names(mergers) <- sample.names
for(sam in sample.names) {
cat("Processing:", sam, "\n")
derepF <- derepFastq(filtFs[[sam]])
ddF <- dada(derepF, err=errF, multithread=TRUE)
derepR <- derepFastq(filtRs[[sam]])
ddR <- dada(derepR, err=errR, multithread=TRUE)
merger <- mergePairs(ddF, derepF, ddR, derepR)
mergers[[sam]] <- merger
}
rm(derepF); rm(derepR)
# Construct sequence table and remove chimeras
seqtab <- makeSequenceTable(mergers)
saveRDS(seqtab, "input/seqtab.rds")
# print statistics
getN <- function(x) sum(getUniques(x))
out <- cbind(sapply(mergers, getN))
colnames(out) <- c("merged")
write.table(out, "input/stats_merge.tsv", sep="\t", quote=F, col.names=NA)
|
library(foreign)
library(haven)
library(sandwich)
library(lmtest)
library(naniar)
library(dplyr)
library(cobalt)
library(boot)
library(stats)
library(plm)
library(vcov)
library(multiwayvcov)
library(gdata)
library(car)
#Part 1
#Q1
dat <- read_dta("simulated0_2019.dta")
df<- data.frame(dat)
#a
cor(df$u0,df$group)
#b
A <- lm(y0~treat,data=df)
summary(A)
#c
coeftest(A,cluster.vcov(A,cluster=df$group))
#Q2
dat <- read_dta("simulated1_2019.dta")
df<- data.frame(dat)
#a
cor(df$u,df$group)
#b
A <- lm(y~treat,data=df)
summary(A)
#c
coeftest(A,cluster.vcov(A,cluster=df$group))
#d
#e
cov <- vector("list",1000)
i=1
B=1000
for(i in 1:B) {
y <- df$y
treat <- df$treat
y_re <-sample(y,replace=TRUE)
treat_re <- sample(treat,replace=TRUE)
iter <- cbind(y_re,treat_re)
iter <- as.data.frame(iter)
reg <- lm(y_re~treat_re,data=iter)
X<-summary(reg)$coefficients[, 2]
cov[i]<-X[2]
i=i+1
}
cov_boot <- as.data.frame(unlist(cov))
cov_boot.mean<- mean(unlist(cov))
cov_boot$dif <- cov_boot-cov_boot.mean
cov_boot$difsq <- (cov_boot[2])^2
cov_boot.sum <- colSums(cov_boot[3])
cov_boot.fin <- cov_boot.sum/(B-1)
cov_boot.fin
#f
B=1000
cov <- vector("list",B)
i=1
for(i in 1:B) {
y <- df$y
treat <- df$treat
y_re <-sample(y,replace=TRUE)
treat_re <- sample(treat,replace=TRUE)
iter <- cbind(y_re,treat_re)
iter <- as.data.frame(iter)
reg <- lm(y_re~treat_re,data=iter)
X<-coeftest(reg,cluster.vcov(reg,cluster=df$group))
cov[i]<-X[2]
i=i+1
}
cov_boot <- as.data.frame(unlist(cov))
cov_boot.mean<- mean(unlist(cov))
cov_boot$dif <- cov_boot-cov_boot.mean
cov_boot$difsq <- (cov_boot[2])^2
cov_boot.sum <- colSums(cov_boot[3])
cov_boot.fin <- cov_boot.sum/(B-1)
cov_boot.fin
#g
A <- data.frame(z1=double(),z2=double(),z3=double(),z4=double(),z5=double(),z6=double(),z7=double(),z8=double(),
stringsAsFactors=FALSE)
j=1
for(j in 1:100) {
B=5
cov <- vector("list",B)
i=1
for(i in 1:B) {
y <- df$y
treat <- df$treat
y_re <-sample(y,replace=TRUE)
treat_re <- sample(treat,replace=TRUE)
iter <- cbind(y_re,treat_re)
iter <- as.data.frame(iter)
reg <- lm(y_re~treat_re,data=iter)
X<-coeftest(reg,cluster.vcov(reg,cluster=df$group))
cov[i]<-X[2]
i=i+1
}
cov_boot <- as.data.frame(unlist(cov))
cov_boot.mean<- mean(unlist(cov))
cov_boot$dif <- cov_boot-cov_boot.mean
cov_boot$difsq <- (cov_boot[2])^2
cov_boot.sum <- colSums(cov_boot[3])
cov_boot.fin <- cov_boot.sum/(B-1)
A[1,j] <- cov_boot.fin
j=j+1
}
j=1
for(j in 1:100) {
B=10
cov <- vector("list",B)
i=1
for(i in 1:B) {
y <- df$y
treat <- df$treat
y_re <-sample(y,replace=TRUE)
treat_re <- sample(treat,replace=TRUE)
iter <- cbind(y_re,treat_re)
iter <- as.data.frame(iter)
reg <- lm(y_re~treat_re,data=iter)
X<-coeftest(reg,cluster.vcov(reg,cluster=df$group))
cov[i]<-X[2]
i=i+1
}
cov_boot <- as.data.frame(unlist(cov))
cov_boot.mean<- mean(unlist(cov))
cov_boot$dif <- cov_boot-cov_boot.mean
cov_boot$difsq <- (cov_boot[2])^2
cov_boot.sum <- colSums(cov_boot[3])
cov_boot.fin <- cov_boot.sum/(B-1)
A[2,j] <- cov_boot.fin
j=j+1
}
j=1
for(j in 1:100) {
B=15
cov <- vector("list",B)
i=1
for(i in 1:B) {
y <- df$y
treat <- df$treat
y_re <-sample(y,replace=TRUE)
treat_re <- sample(treat,replace=TRUE)
iter <- cbind(y_re,treat_re)
iter <- as.data.frame(iter)
reg <- lm(y_re~treat_re,data=iter)
X<-coeftest(reg,cluster.vcov(reg,cluster=df$group))
cov[i]<-X[2]
i=i+1
}
cov_boot <- as.data.frame(unlist(cov))
cov_boot.mean<- mean(unlist(cov))
cov_boot$dif <- cov_boot-cov_boot.mean
cov_boot$difsq <- (cov_boot[2])^2
cov_boot.sum <- colSums(cov_boot[3])
cov_boot.fin <- cov_boot.sum/(B-1)
A[3,j] <- cov_boot.fin
j=j+1
}
j=1
for(j in 1:100) {
B=20
cov <- vector("list",B)
i=1
for(i in 1:B) {
y <- df$y
treat <- df$treat
y_re <-sample(y,replace=TRUE)
treat_re <- sample(treat,replace=TRUE)
iter <- cbind(y_re,treat_re)
iter <- as.data.frame(iter)
reg <- lm(y_re~treat_re,data=iter)
X<-coeftest(reg,cluster.vcov(reg,cluster=df$group))
cov[i]<-X[2]
i=i+1
}
cov_boot <- as.data.frame(unlist(cov))
cov_boot.mean<- mean(unlist(cov))
cov_boot$dif <- cov_boot-cov_boot.mean
cov_boot$difsq <- (cov_boot[2])^2
cov_boot.sum <- colSums(cov_boot[3])
cov_boot.fin <- cov_boot.sum/(B-1)
A[4,j] <- cov_boot.fin
j=j+1
}
j=1
for(j in 1:100) {
B=30
cov <- vector("list",B)
i=1
for(i in 1:B) {
y <- df$y
treat <- df$treat
y_re <-sample(y,replace=TRUE)
treat_re <- sample(treat,replace=TRUE)
iter <- cbind(y_re,treat_re)
iter <- as.data.frame(iter)
reg <- lm(y_re~treat_re,data=iter)
X<-coeftest(reg,cluster.vcov(reg,cluster=df$group))
cov[i]<-X[2]
i=i+1
}
cov_boot <- as.data.frame(unlist(cov))
cov_boot.mean<- mean(unlist(cov))
cov_boot$dif <- cov_boot-cov_boot.mean
cov_boot$difsq <- (cov_boot[2])^2
cov_boot.sum <- colSums(cov_boot[3])
cov_boot.fin <- cov_boot.sum/(B-1)
A[5,j] <- cov_boot.fin
j=j+1
}
j=1
for(j in 1:100) {
B=50
cov <- vector("list",B)
i=1
for(i in 1:B) {
y <- df$y
treat <- df$treat
y_re <-sample(y,replace=TRUE)
treat_re <- sample(treat,replace=TRUE)
iter <- cbind(y_re,treat_re)
iter <- as.data.frame(iter)
reg <- lm(y_re~treat_re,data=iter)
X<-coeftest(reg,cluster.vcov(reg,cluster=df$group))
cov[i]<-X[2]
i=i+1
}
cov_boot <- as.data.frame(unlist(cov))
cov_boot.mean<- mean(unlist(cov))
cov_boot$dif <- cov_boot-cov_boot.mean
cov_boot$difsq <- (cov_boot[2])^2
cov_boot.sum <- colSums(cov_boot[3])
cov_boot.fin <- cov_boot.sum/(B-1)
A[6,j] <- cov_boot.fin
j=j+1
}
j=1
for(j in 1:100) {
B=80
cov <- vector("list",B)
i=1
for(i in 1:B) {
y <- df$y
treat <- df$treat
y_re <-sample(y,replace=TRUE)
treat_re <- sample(treat,replace=TRUE)
iter <- cbind(y_re,treat_re)
iter <- as.data.frame(iter)
reg <- lm(y_re~treat_re,data=iter)
X<-coeftest(reg,cluster.vcov(reg,cluster=df$group))
cov[i]<-X[2]
i=i+1
}
cov_boot <- as.data.frame(unlist(cov))
cov_boot.mean<- mean(unlist(cov))
cov_boot$dif <- cov_boot-cov_boot.mean
cov_boot$difsq <- (cov_boot[2])^2
cov_boot.sum <- colSums(cov_boot[3])
cov_boot.fin <- cov_boot.sum/(B-1)
A[7,j] <- cov_boot.fin
j=j+1
}
j=1
for(j in 1:100) {
B=100
cov <- vector("list",B)
i=1
for(i in 1:B) {
y <- df$y
treat <- df$treat
y_re <-sample(y,replace=TRUE)
treat_re <- sample(treat,replace=TRUE)
iter <- cbind(y_re,treat_re)
iter <- as.data.frame(iter)
reg <- lm(y_re~treat_re,data=iter)
X<-coeftest(reg,cluster.vcov(reg,cluster=df$group))
cov[i]<-X[2]
i=i+1
}
cov_boot <- as.data.frame(unlist(cov))
cov_boot.mean<- mean(unlist(cov))
cov_boot$dif <- cov_boot-cov_boot.mean
cov_boot$difsq <- (cov_boot[2])^2
cov_boot.sum <- colSums(cov_boot[3])
cov_boot.fin <- cov_boot.sum/(B-1)
A[8,j] <- cov_boot.fin
j=j+1
}
A<-t(A)
boxplot(A,names=c("B=5","B=10","B=15","B=20","B=30","B=50","B=80","B=100"))
#Part 2
#Q1
#a
#b
#Q2
#a
#b
#c
#Q3
dat <- read_dta("traffic_safety2_2019.dta")
df<- data.frame(dat)
#a
reg1 <- lm(fatalities~primary,data=df)
summary(reg1)
df$time <- df$year-1981
df$time2<-df$time*df$time
df$time3<-df$time2*df$time
df$time4<-df$time3*df$time
reg2 <- lm(fatalities~primary+time+time2+time3+time4,data=df)
summary(reg2)
reg3 <- lm(fatalities~primary+time+time2+time3+time4+college+beer+precip+snow32+secondary+rural_speed+urban_speed+secondary+totalvmt,data=df)
summary(reg3)
#b
robust1<-hccm(reg1)
robust2<-hccm(reg2)
robust3<-hccm(reg3)
#manual clustering function
clustering <- function(dat,fn,cluster){
attach(dat, warn.conflicts = F)
G <- length(unique(cluster))
N <- length(cluster)
K <- fn$rank
uj <- apply(estfun(fn),2, function(x) tapply(x, cluster, sum));
vcovCL <- sandwich(fn, meat=crossprod(uj)/N)
coeftest(fn, vcovCL) }
summary(reg1)
robust1
clustering(df,reg1,state)
coeftest(reg1,cluster.vcov(reg1,cluster=df$state))
summary(reg2)
robust2
clustering(df,reg2,state)
coeftest(reg2,cluster.vcov(reg2,cluster=df$state))
summary(reg3)
robust3
clustering(df,reg3,state)
coeftest(reg3,cluster.vcov(reg3,cluster=df$state))
#c
btwn1 <- plm(fatalities~primary,data=df,model="between")
btwn2 <- plm(fatalities~primary+time+time2+time3+time4,data=df,model="between")
btwn3 <- plm(fatalities~primary+time+time2+time3+time4+college+beer+precip+snow32+secondary+rural_speed+urban_speed+secondary+totalvmt,data=df,model="between")
summary(btwn1)
summary(btwn2)
summary(btwn3)
#d
re1 <- plm(fatalities~primary,data=df,model="random")
re2 <- plm(fatalities~primary+time+time2+time3+time4,data=df,model="random")
re3 <- plm(fatalities~primary+time+time2+time3+time4+college+beer+precip+snow32+secondary+rural_speed+urban_speed+secondary+totalvmt,data=df,model="random")
summary(re1)
summary(re2)
summary(re3)
#e
coeftest(re1,vcov.=vcovHC(re1,cluster="group"))
coeftest(re2,vcov.=vcovHC(re2,cluster="group"))
coeftest(re3,vcov.=vcovHC(re3,cluster="group"))
#f
fe <- plm(fatalities~primary+time4,data=df,model="within")
summary(fe)
coeftest(fe,vcov.=vcovHC(fe,cluster="group"))
#g
fe1 <- plm(fatalities~primary,data=df,model="within")
fe2 <- plm(fatalities~primary+time+time2+time3+time4,data=df,model="within")
fe3 <- plm(fatalities~primary+time+time2+time3+time4+college+beer+precip+snow32+secondary+rural_speed+urban_speed+secondary+totalvmt,data=df,model="within")
summary(fe1)
summary(fe2)
summary(fe3)
coeftest(fe1,vcov.=vcovHC(fe1,cluster="group"))
coeftest(fe2,vcov.=vcovHC(fe2,cluster="group"))
coeftest(fe3,vcov.=vcovHC(fe3,cluster="group"))
#appendix
plot(fatalities~primary,data=df)
plot(fatalities~primary+time+time2+time3+time4,data=df)
plot(fatalities~primary+time+time2+time3+time4+college+beer+precip+snow32+secondary+rural_speed+urban_speed+secondary+totalvmt,data=df)
|
/Problem Set 4 code.R
|
no_license
|
spangler-morgan/Econometrics-Coursework
|
R
| false | false | 9,946 |
r
|
library(foreign)
library(haven)
library(sandwich)
library(lmtest)
library(naniar)
library(dplyr)
library(cobalt)
library(boot)
library(stats)
library(plm)
library(vcov)
library(multiwayvcov)
library(gdata)
library(car)
#Part 1
#Q1
dat <- read_dta("simulated0_2019.dta")
df<- data.frame(dat)
#a
cor(df$u0,df$group)
#b
A <- lm(y0~treat,data=df)
summary(A)
#c
coeftest(A,cluster.vcov(A,cluster=df$group))
#Q2
dat <- read_dta("simulated1_2019.dta")
df<- data.frame(dat)
#a
cor(df$u,df$group)
#b
A <- lm(y~treat,data=df)
summary(A)
#c
coeftest(A,cluster.vcov(A,cluster=df$group))
#d
#e
cov <- vector("list",1000)
i=1
B=1000
for(i in 1:B) {
y <- df$y
treat <- df$treat
y_re <-sample(y,replace=TRUE)
treat_re <- sample(treat,replace=TRUE)
iter <- cbind(y_re,treat_re)
iter <- as.data.frame(iter)
reg <- lm(y_re~treat_re,data=iter)
X<-summary(reg)$coefficients[, 2]
cov[i]<-X[2]
i=i+1
}
cov_boot <- as.data.frame(unlist(cov))
cov_boot.mean<- mean(unlist(cov))
cov_boot$dif <- cov_boot-cov_boot.mean
cov_boot$difsq <- (cov_boot[2])^2
cov_boot.sum <- colSums(cov_boot[3])
cov_boot.fin <- cov_boot.sum/(B-1)
cov_boot.fin
#f
B=1000
cov <- vector("list",B)
i=1
for(i in 1:B) {
y <- df$y
treat <- df$treat
y_re <-sample(y,replace=TRUE)
treat_re <- sample(treat,replace=TRUE)
iter <- cbind(y_re,treat_re)
iter <- as.data.frame(iter)
reg <- lm(y_re~treat_re,data=iter)
X<-coeftest(reg,cluster.vcov(reg,cluster=df$group))
cov[i]<-X[2]
i=i+1
}
cov_boot <- as.data.frame(unlist(cov))
cov_boot.mean<- mean(unlist(cov))
cov_boot$dif <- cov_boot-cov_boot.mean
cov_boot$difsq <- (cov_boot[2])^2
cov_boot.sum <- colSums(cov_boot[3])
cov_boot.fin <- cov_boot.sum/(B-1)
cov_boot.fin
#g
A <- data.frame(z1=double(),z2=double(),z3=double(),z4=double(),z5=double(),z6=double(),z7=double(),z8=double(),
stringsAsFactors=FALSE)
j=1
for(j in 1:100) {
B=5
cov <- vector("list",B)
i=1
for(i in 1:B) {
y <- df$y
treat <- df$treat
y_re <-sample(y,replace=TRUE)
treat_re <- sample(treat,replace=TRUE)
iter <- cbind(y_re,treat_re)
iter <- as.data.frame(iter)
reg <- lm(y_re~treat_re,data=iter)
X<-coeftest(reg,cluster.vcov(reg,cluster=df$group))
cov[i]<-X[2]
i=i+1
}
cov_boot <- as.data.frame(unlist(cov))
cov_boot.mean<- mean(unlist(cov))
cov_boot$dif <- cov_boot-cov_boot.mean
cov_boot$difsq <- (cov_boot[2])^2
cov_boot.sum <- colSums(cov_boot[3])
cov_boot.fin <- cov_boot.sum/(B-1)
A[1,j] <- cov_boot.fin
j=j+1
}
j=1
for(j in 1:100) {
B=10
cov <- vector("list",B)
i=1
for(i in 1:B) {
y <- df$y
treat <- df$treat
y_re <-sample(y,replace=TRUE)
treat_re <- sample(treat,replace=TRUE)
iter <- cbind(y_re,treat_re)
iter <- as.data.frame(iter)
reg <- lm(y_re~treat_re,data=iter)
X<-coeftest(reg,cluster.vcov(reg,cluster=df$group))
cov[i]<-X[2]
i=i+1
}
cov_boot <- as.data.frame(unlist(cov))
cov_boot.mean<- mean(unlist(cov))
cov_boot$dif <- cov_boot-cov_boot.mean
cov_boot$difsq <- (cov_boot[2])^2
cov_boot.sum <- colSums(cov_boot[3])
cov_boot.fin <- cov_boot.sum/(B-1)
A[2,j] <- cov_boot.fin
j=j+1
}
j=1
for(j in 1:100) {
B=15
cov <- vector("list",B)
i=1
for(i in 1:B) {
y <- df$y
treat <- df$treat
y_re <-sample(y,replace=TRUE)
treat_re <- sample(treat,replace=TRUE)
iter <- cbind(y_re,treat_re)
iter <- as.data.frame(iter)
reg <- lm(y_re~treat_re,data=iter)
X<-coeftest(reg,cluster.vcov(reg,cluster=df$group))
cov[i]<-X[2]
i=i+1
}
cov_boot <- as.data.frame(unlist(cov))
cov_boot.mean<- mean(unlist(cov))
cov_boot$dif <- cov_boot-cov_boot.mean
cov_boot$difsq <- (cov_boot[2])^2
cov_boot.sum <- colSums(cov_boot[3])
cov_boot.fin <- cov_boot.sum/(B-1)
A[3,j] <- cov_boot.fin
j=j+1
}
j=1
for(j in 1:100) {
B=20
cov <- vector("list",B)
i=1
for(i in 1:B) {
y <- df$y
treat <- df$treat
y_re <-sample(y,replace=TRUE)
treat_re <- sample(treat,replace=TRUE)
iter <- cbind(y_re,treat_re)
iter <- as.data.frame(iter)
reg <- lm(y_re~treat_re,data=iter)
X<-coeftest(reg,cluster.vcov(reg,cluster=df$group))
cov[i]<-X[2]
i=i+1
}
cov_boot <- as.data.frame(unlist(cov))
cov_boot.mean<- mean(unlist(cov))
cov_boot$dif <- cov_boot-cov_boot.mean
cov_boot$difsq <- (cov_boot[2])^2
cov_boot.sum <- colSums(cov_boot[3])
cov_boot.fin <- cov_boot.sum/(B-1)
A[4,j] <- cov_boot.fin
j=j+1
}
j=1
for(j in 1:100) {
B=30
cov <- vector("list",B)
i=1
for(i in 1:B) {
y <- df$y
treat <- df$treat
y_re <-sample(y,replace=TRUE)
treat_re <- sample(treat,replace=TRUE)
iter <- cbind(y_re,treat_re)
iter <- as.data.frame(iter)
reg <- lm(y_re~treat_re,data=iter)
X<-coeftest(reg,cluster.vcov(reg,cluster=df$group))
cov[i]<-X[2]
i=i+1
}
cov_boot <- as.data.frame(unlist(cov))
cov_boot.mean<- mean(unlist(cov))
cov_boot$dif <- cov_boot-cov_boot.mean
cov_boot$difsq <- (cov_boot[2])^2
cov_boot.sum <- colSums(cov_boot[3])
cov_boot.fin <- cov_boot.sum/(B-1)
A[5,j] <- cov_boot.fin
j=j+1
}
j=1
for(j in 1:100) {
B=50
cov <- vector("list",B)
i=1
for(i in 1:B) {
y <- df$y
treat <- df$treat
y_re <-sample(y,replace=TRUE)
treat_re <- sample(treat,replace=TRUE)
iter <- cbind(y_re,treat_re)
iter <- as.data.frame(iter)
reg <- lm(y_re~treat_re,data=iter)
X<-coeftest(reg,cluster.vcov(reg,cluster=df$group))
cov[i]<-X[2]
i=i+1
}
cov_boot <- as.data.frame(unlist(cov))
cov_boot.mean<- mean(unlist(cov))
cov_boot$dif <- cov_boot-cov_boot.mean
cov_boot$difsq <- (cov_boot[2])^2
cov_boot.sum <- colSums(cov_boot[3])
cov_boot.fin <- cov_boot.sum/(B-1)
A[6,j] <- cov_boot.fin
j=j+1
}
j=1
for(j in 1:100) {
B=80
cov <- vector("list",B)
i=1
for(i in 1:B) {
y <- df$y
treat <- df$treat
y_re <-sample(y,replace=TRUE)
treat_re <- sample(treat,replace=TRUE)
iter <- cbind(y_re,treat_re)
iter <- as.data.frame(iter)
reg <- lm(y_re~treat_re,data=iter)
X<-coeftest(reg,cluster.vcov(reg,cluster=df$group))
cov[i]<-X[2]
i=i+1
}
cov_boot <- as.data.frame(unlist(cov))
cov_boot.mean<- mean(unlist(cov))
cov_boot$dif <- cov_boot-cov_boot.mean
cov_boot$difsq <- (cov_boot[2])^2
cov_boot.sum <- colSums(cov_boot[3])
cov_boot.fin <- cov_boot.sum/(B-1)
A[7,j] <- cov_boot.fin
j=j+1
}
j=1
for(j in 1:100) {
B=100
cov <- vector("list",B)
i=1
for(i in 1:B) {
y <- df$y
treat <- df$treat
y_re <-sample(y,replace=TRUE)
treat_re <- sample(treat,replace=TRUE)
iter <- cbind(y_re,treat_re)
iter <- as.data.frame(iter)
reg <- lm(y_re~treat_re,data=iter)
X<-coeftest(reg,cluster.vcov(reg,cluster=df$group))
cov[i]<-X[2]
i=i+1
}
cov_boot <- as.data.frame(unlist(cov))
cov_boot.mean<- mean(unlist(cov))
cov_boot$dif <- cov_boot-cov_boot.mean
cov_boot$difsq <- (cov_boot[2])^2
cov_boot.sum <- colSums(cov_boot[3])
cov_boot.fin <- cov_boot.sum/(B-1)
A[8,j] <- cov_boot.fin
j=j+1
}
A<-t(A)
boxplot(A,names=c("B=5","B=10","B=15","B=20","B=30","B=50","B=80","B=100"))
#Part 2
#Q1
#a
#b
#Q2
#a
#b
#c
#Q3
dat <- read_dta("traffic_safety2_2019.dta")
df<- data.frame(dat)
#a
reg1 <- lm(fatalities~primary,data=df)
summary(reg1)
df$time <- df$year-1981
df$time2<-df$time*df$time
df$time3<-df$time2*df$time
df$time4<-df$time3*df$time
reg2 <- lm(fatalities~primary+time+time2+time3+time4,data=df)
summary(reg2)
reg3 <- lm(fatalities~primary+time+time2+time3+time4+college+beer+precip+snow32+secondary+rural_speed+urban_speed+secondary+totalvmt,data=df)
summary(reg3)
#b
robust1<-hccm(reg1)
robust2<-hccm(reg2)
robust3<-hccm(reg3)
#manual clustering function
clustering <- function(dat,fn,cluster){
attach(dat, warn.conflicts = F)
G <- length(unique(cluster))
N <- length(cluster)
K <- fn$rank
uj <- apply(estfun(fn),2, function(x) tapply(x, cluster, sum));
vcovCL <- sandwich(fn, meat=crossprod(uj)/N)
coeftest(fn, vcovCL) }
summary(reg1)
robust1
clustering(df,reg1,state)
coeftest(reg1,cluster.vcov(reg1,cluster=df$state))
summary(reg2)
robust2
clustering(df,reg2,state)
coeftest(reg2,cluster.vcov(reg2,cluster=df$state))
summary(reg3)
robust3
clustering(df,reg3,state)
coeftest(reg3,cluster.vcov(reg3,cluster=df$state))
#c
btwn1 <- plm(fatalities~primary,data=df,model="between")
btwn2 <- plm(fatalities~primary+time+time2+time3+time4,data=df,model="between")
btwn3 <- plm(fatalities~primary+time+time2+time3+time4+college+beer+precip+snow32+secondary+rural_speed+urban_speed+secondary+totalvmt,data=df,model="between")
summary(btwn1)
summary(btwn2)
summary(btwn3)
#d
re1 <- plm(fatalities~primary,data=df,model="random")
re2 <- plm(fatalities~primary+time+time2+time3+time4,data=df,model="random")
re3 <- plm(fatalities~primary+time+time2+time3+time4+college+beer+precip+snow32+secondary+rural_speed+urban_speed+secondary+totalvmt,data=df,model="random")
summary(re1)
summary(re2)
summary(re3)
#e
coeftest(re1,vcov.=vcovHC(re1,cluster="group"))
coeftest(re2,vcov.=vcovHC(re2,cluster="group"))
coeftest(re3,vcov.=vcovHC(re3,cluster="group"))
#f
fe <- plm(fatalities~primary+time4,data=df,model="within")
summary(fe)
coeftest(fe,vcov.=vcovHC(fe,cluster="group"))
#g
fe1 <- plm(fatalities~primary,data=df,model="within")
fe2 <- plm(fatalities~primary+time+time2+time3+time4,data=df,model="within")
fe3 <- plm(fatalities~primary+time+time2+time3+time4+college+beer+precip+snow32+secondary+rural_speed+urban_speed+secondary+totalvmt,data=df,model="within")
summary(fe1)
summary(fe2)
summary(fe3)
coeftest(fe1,vcov.=vcovHC(fe1,cluster="group"))
coeftest(fe2,vcov.=vcovHC(fe2,cluster="group"))
coeftest(fe3,vcov.=vcovHC(fe3,cluster="group"))
#appendix
plot(fatalities~primary,data=df)
plot(fatalities~primary+time+time2+time3+time4,data=df)
plot(fatalities~primary+time+time2+time3+time4+college+beer+precip+snow32+secondary+rural_speed+urban_speed+secondary+totalvmt,data=df)
|
library(dplyr)
library(caret)
# Read in results CSV
result <- read.csv("C:/Maxwell_Data/archive/chips2/test_result4.csv")
# Set reference and predicted columns to factors
result$class <- as.factor(result$class)
result$predicted <- as.factor(result$predicted)
# Use caret to create confusion matrix
cm <- confusionMatrix(data=result$predicted, reference=result$class, mode="everything")
# Print confusion matrix
cm
|
/PyTorch_SAT6/sat_6_conf_matrix.R
|
no_license
|
carlos-alberto-silva/wvview_geodl_examples
|
R
| false | false | 418 |
r
|
library(dplyr)
library(caret)
# Read in results CSV
result <- read.csv("C:/Maxwell_Data/archive/chips2/test_result4.csv")
# Set reference and predicted columns to factors
result$class <- as.factor(result$class)
result$predicted <- as.factor(result$predicted)
# Use caret to create confusion matrix
cm <- confusionMatrix(data=result$predicted, reference=result$class, mode="everything")
# Print confusion matrix
cm
|
library(Seurat)
library(viridis)
library(scales)
library(Matrix)
library(ComplexHeatmap)
library(BuenColors)
library(dplyr)
library(data.table)
"%ni%" <- Negate("%in%")
# Import ATAC processed data
mdf <- readRDS("../output/ArchR_main_metadata.rds")
mdf$barcode <- gsub("ASAP_marrow_hg38#", "", rownames(mdf))
# Look at the ADT data
adt_ss <- readRDS("../output/adt_mat/ASAP_bonemarrow_matrix.rds")[,mdf$barcode]
adt_simple_norm <- data.matrix(t(t(adt_ss)/colSums(adt_ss))*1000)
adtbm <- CreateSeuratObject(counts = adt_ss, assay = "ADT")
adtbm <- NormalizeData(adtbm, assay = "ADT", normalization.method = "CLR")
adtbm <- ScaleData(adtbm, assay="ADT")
mat <- data.matrix(adtbm@assays$ADT@scale.data)
# Compute mean to filter
m_ps <- mdf %>% dplyr::filter(!is.na(monocyte_PS)) %>% pull(barcode)
ps_mean_adt_count <- rowMeans(data.matrix(adt_simple_norm)[,m_ps])
names(ps_mean_adt_count) <- rownames(adt_simple_norm)
other_mean_adt_count <- rowMeans(data.matrix(adt_simple_norm)[,colnames(adt_simple_norm) %ni% m_ps])
names(other_mean_adt_count) <- rownames(adt_simple_norm)
# Process the gene scores and pseudotime axis
dat <- fread("../../multiple_datasets/data/marker_gene_mapping.tsv")
gs_mat <- readRDS("../../../asap_large_data_files/bonemarrow_data/output/signac_marrow_gene_scores.rds")
# Make a master data frame
master_df <- data.frame(
dat,
adt.idx = match(dat$Marker_name,rownames(mat)),
gs.idx = match(dat$Gene_symbol, rownames(gs_mat))
)
master_df$mean_adt <- ps_mean_adt_count[master_df$Marker_name]
master_df$mean_adt_other_cells <- other_mean_adt_count[master_df$Marker_name]
master_df <- master_df[complete.cases(master_df),] %>% mutate(ratio = mean_adt/mean_adt_other_cells)
# Order the cells and the gene / tags
ordered_cells <- mdf[!is.na(mdf$monocyte_PS),] %>% arrange((monocyte_PS))
gs_mat_ordered <- gs_mat[,ordered_cells$barcode]
adt_mat_ordered <- mat[,ordered_cells$barcode]
# Now group/order/smooth the cell states and make ArchR's nice heatmaps
# see: https://github.com/GreenleafLab/ArchR/blob/5f855d7b7ff3f57eb0d28f312c5ea5d373d27ebd/R/Trajectory.R
n_bins <- 100
groups_gs <- sapply(1:n_bins, function(idx){
multiplier <- 100/n_bins
bcs <- ordered_cells %>% dplyr::filter(monocyte_PS >= (idx-1)*multiplier & monocyte_PS < (idx*multiplier)) %>% pull(barcode)
print(length(bcs))
rs <- rowSums(gs_mat_ordered[,bcs, drop = FALSE], na.rm = TRUE)
log2(rs/sum(rs) *10000 + 1) # basic normalization
})
groups_adt <- sapply(1:n_bins, function(idx){
multiplier <- 100/n_bins
bcs <- ordered_cells %>% dplyr::filter(monocyte_PS >= (idx-1)*multiplier & monocyte_PS < idx*multiplier) %>% pull(barcode)
rowMeans(adt_mat_ordered[,bcs], na.rm = TRUE)
})
# Filter based on indices
groups_gs <- groups_gs[master_df$gs.idx,]
groups_adt <- groups_adt[master_df$adt.idx,]
smoothWindow = 11
smooth_groups_adt <- data.matrix((apply((groups_adt), 1, function(x) ArchR:::.centerRollMean((x), k = smoothWindow))))
smooth_groups_gs <- data.matrix((apply((groups_gs), 1, function(x) ArchR:::.centerRollMean((x), k = smoothWindow))))
smooth_groups_minmax_adt <- t(apply(smooth_groups_adt, 2, function(x)(x-min(x))/(max(x)-min(x))))
smooth_groups_minmax_gs <- t(apply(smooth_groups_gs, 2, function(x)(x-min(x))/(max(x)-min(x))))
time_df_50 <- data.frame(
gs_50 = max.col(smooth_groups_minmax_gs >=0.5, ties.method = "first"),
adt_50 = max.col(smooth_groups_minmax_adt >=0.5, ties.method = "first"),
adt_max = max.col(smooth_groups_minmax_adt),
gs_max = max.col(smooth_groups_minmax_gs),
master_df
) %>% mutate(adt_after = adt_50 >= gs_50)
time_df_50 %>%
filter(mean_adt > 1 & ratio > 1) %>%
filter(adt_max > 25) %>%
pull(adt_after) %>% table()
time_df_50 %>%
filter(mean_adt > 1 & ratio > 1) %>%
filter(adt_max > 25) %>%
pS <- time_df_50 %>%
filter(mean_adt > 1 & ratio > 1) %>%
filter(adt_max > 25) %>%
ggplot(aes(x = gs_50, y = adt_50, color = gs_50 >= adt_50)) + geom_point(size = 0.5) +
scale_y_continuous(limits = c(0, 100)) +
scale_x_continuous(limits = c(0, 100)) +
pretty_plot(fontsize = 8) + L_border() +
geom_abline(intercept = 0, slope = 1, linetype = 2) +
theme(legend.position = "none") +
labs(x = "Pseudotime of 50% max gene score", y = "Pseudotime of 50% max protein expression") +
scale_color_manual(values =c ("dodgerblue3", "firebrick"))
cowplot::ggsave2(pS, file = "../plots/adt_chromatin_timing_max50.pdf", width = 1.8, height = 1.8)
|
/bonemarow_asapseq/code/05a_monocyte_Aviv_tweak.R
|
no_license
|
liuxiaoping2020/asap_reproducibility
|
R
| false | false | 4,441 |
r
|
library(Seurat)
library(viridis)
library(scales)
library(Matrix)
library(ComplexHeatmap)
library(BuenColors)
library(dplyr)
library(data.table)
"%ni%" <- Negate("%in%")
# Import ATAC processed data
mdf <- readRDS("../output/ArchR_main_metadata.rds")
mdf$barcode <- gsub("ASAP_marrow_hg38#", "", rownames(mdf))
# Look at the ADT data
adt_ss <- readRDS("../output/adt_mat/ASAP_bonemarrow_matrix.rds")[,mdf$barcode]
adt_simple_norm <- data.matrix(t(t(adt_ss)/colSums(adt_ss))*1000)
adtbm <- CreateSeuratObject(counts = adt_ss, assay = "ADT")
adtbm <- NormalizeData(adtbm, assay = "ADT", normalization.method = "CLR")
adtbm <- ScaleData(adtbm, assay="ADT")
mat <- data.matrix(adtbm@assays$ADT@scale.data)
# Compute mean to filter
m_ps <- mdf %>% dplyr::filter(!is.na(monocyte_PS)) %>% pull(barcode)
ps_mean_adt_count <- rowMeans(data.matrix(adt_simple_norm)[,m_ps])
names(ps_mean_adt_count) <- rownames(adt_simple_norm)
other_mean_adt_count <- rowMeans(data.matrix(adt_simple_norm)[,colnames(adt_simple_norm) %ni% m_ps])
names(other_mean_adt_count) <- rownames(adt_simple_norm)
# Process the gene scores and pseudotime axis
dat <- fread("../../multiple_datasets/data/marker_gene_mapping.tsv")
gs_mat <- readRDS("../../../asap_large_data_files/bonemarrow_data/output/signac_marrow_gene_scores.rds")
# Make a master data frame
master_df <- data.frame(
dat,
adt.idx = match(dat$Marker_name,rownames(mat)),
gs.idx = match(dat$Gene_symbol, rownames(gs_mat))
)
master_df$mean_adt <- ps_mean_adt_count[master_df$Marker_name]
master_df$mean_adt_other_cells <- other_mean_adt_count[master_df$Marker_name]
master_df <- master_df[complete.cases(master_df),] %>% mutate(ratio = mean_adt/mean_adt_other_cells)
# Order the cells and the gene / tags
ordered_cells <- mdf[!is.na(mdf$monocyte_PS),] %>% arrange((monocyte_PS))
gs_mat_ordered <- gs_mat[,ordered_cells$barcode]
adt_mat_ordered <- mat[,ordered_cells$barcode]
# Now group/order/smooth the cell states and make ArchR's nice heatmaps
# see: https://github.com/GreenleafLab/ArchR/blob/5f855d7b7ff3f57eb0d28f312c5ea5d373d27ebd/R/Trajectory.R
n_bins <- 100
groups_gs <- sapply(1:n_bins, function(idx){
multiplier <- 100/n_bins
bcs <- ordered_cells %>% dplyr::filter(monocyte_PS >= (idx-1)*multiplier & monocyte_PS < (idx*multiplier)) %>% pull(barcode)
print(length(bcs))
rs <- rowSums(gs_mat_ordered[,bcs, drop = FALSE], na.rm = TRUE)
log2(rs/sum(rs) *10000 + 1) # basic normalization
})
groups_adt <- sapply(1:n_bins, function(idx){
multiplier <- 100/n_bins
bcs <- ordered_cells %>% dplyr::filter(monocyte_PS >= (idx-1)*multiplier & monocyte_PS < idx*multiplier) %>% pull(barcode)
rowMeans(adt_mat_ordered[,bcs], na.rm = TRUE)
})
# Filter based on indices
groups_gs <- groups_gs[master_df$gs.idx,]
groups_adt <- groups_adt[master_df$adt.idx,]
smoothWindow = 11
smooth_groups_adt <- data.matrix((apply((groups_adt), 1, function(x) ArchR:::.centerRollMean((x), k = smoothWindow))))
smooth_groups_gs <- data.matrix((apply((groups_gs), 1, function(x) ArchR:::.centerRollMean((x), k = smoothWindow))))
smooth_groups_minmax_adt <- t(apply(smooth_groups_adt, 2, function(x)(x-min(x))/(max(x)-min(x))))
smooth_groups_minmax_gs <- t(apply(smooth_groups_gs, 2, function(x)(x-min(x))/(max(x)-min(x))))
time_df_50 <- data.frame(
gs_50 = max.col(smooth_groups_minmax_gs >=0.5, ties.method = "first"),
adt_50 = max.col(smooth_groups_minmax_adt >=0.5, ties.method = "first"),
adt_max = max.col(smooth_groups_minmax_adt),
gs_max = max.col(smooth_groups_minmax_gs),
master_df
) %>% mutate(adt_after = adt_50 >= gs_50)
time_df_50 %>%
filter(mean_adt > 1 & ratio > 1) %>%
filter(adt_max > 25) %>%
pull(adt_after) %>% table()
time_df_50 %>%
filter(mean_adt > 1 & ratio > 1) %>%
filter(adt_max > 25) %>%
pS <- time_df_50 %>%
filter(mean_adt > 1 & ratio > 1) %>%
filter(adt_max > 25) %>%
ggplot(aes(x = gs_50, y = adt_50, color = gs_50 >= adt_50)) + geom_point(size = 0.5) +
scale_y_continuous(limits = c(0, 100)) +
scale_x_continuous(limits = c(0, 100)) +
pretty_plot(fontsize = 8) + L_border() +
geom_abline(intercept = 0, slope = 1, linetype = 2) +
theme(legend.position = "none") +
labs(x = "Pseudotime of 50% max gene score", y = "Pseudotime of 50% max protein expression") +
scale_color_manual(values =c ("dodgerblue3", "firebrick"))
cowplot::ggsave2(pS, file = "../plots/adt_chromatin_timing_max50.pdf", width = 1.8, height = 1.8)
|
# HW1: data frames and tibbles
#
# 1. Convert the dataset `mtcars` to a tibble `t1`.
# 2. Create a tibble `t2` which contains all columns of `t1` with a miles per gallon higher than 18.
# 3. Create a tibble `t3` that contains all the values of `t2` truncated to the corresponding integer.
# 4. Copy `t3` into a tibble `t4` and merge its two columns 'vs' and 'am' by summing them, rename it 'vsam' and make sure it is `t4`'s first column.
## Do not modify this line! ## Write your code for 1. after this line! ##
library(tibble)
t1 <- as_tibble(mtcars)
## Do not modify this line! ## Write your code for 2. after this line! ##
t2 <- t1[t1$mpg > 18, ]
## Do not modify this line! ## Write your code for 3. after this line! ##
t3 <- floor(t2)
## Do not modify this line! ## Write your code for 4. after this line! ##
t4 <- t3
t4 <- add_column(t4, vsam = t3$vs + t3$am, .before = 1)
|
/HW1/hw1_8_data_frames_and_tibbles.R
|
no_license
|
QihangYang/Computational-Statistics
|
R
| false | false | 879 |
r
|
# HW1: data frames and tibbles
#
# 1. Convert the dataset `mtcars` to a tibble `t1`.
# 2. Create a tibble `t2` which contains all columns of `t1` with a miles per gallon higher than 18.
# 3. Create a tibble `t3` that contains all the values of `t2` truncated to the corresponding integer.
# 4. Copy `t3` into a tibble `t4` and merge its two columns 'vs' and 'am' by summing them, rename it 'vsam' and make sure it is `t4`'s first column.
## Do not modify this line! ## Write your code for 1. after this line! ##
library(tibble)
t1 <- as_tibble(mtcars)
## Do not modify this line! ## Write your code for 2. after this line! ##
t2 <- t1[t1$mpg > 18, ]
## Do not modify this line! ## Write your code for 3. after this line! ##
t3 <- floor(t2)
## Do not modify this line! ## Write your code for 4. after this line! ##
t4 <- t3
t4 <- add_column(t4, vsam = t3$vs + t3$am, .before = 1)
|
#' Dynamic Harmonic Regression for Time Series with Cross-Validation
#'
#' This function fits a dynamic harmonic regression model to time series data
#' and evaluates predictive performance using cross-validation.
#'
#' @param y A time series object (ts or msts).
#' @param h The number of periods for forecasting. If multiple values are provided, a rolling forecast origin is utilized.
#' @param lags Lags of the series. If provided, these will be added as regressors in the model.
#' @param x A data frame of external regressors. The number of rows must match the length of y.
#' @param x_include A vector of column names of x to include as regressors in the model.
#' By default, all columns of x are included as regressors in the model.
#' @param x_include_lags A vector of column names of x where lags of these columns are added as regressors in the model.
#' By default, only lags of the series are added as regressors when the lag argument is utilized.
#' This provides the option to also include lags of the external regressors.
#' @param x_include_dummy A vector of column names of x where these columns are transformed to a set of dummy regressors.
#' @param order See order in \link[forecast]{Arima}
#' @param s_order See seasonal in \link[forecast]{Arima}
#' @param s_period The seasonal frequency.
#' @param fourier_k The number of fourier terms to add as regressors.
#' This vector must have the same length as the number of seasonal periods of y,
#' and each value must not exceed half of the size of the seasonal period.
#' @param auto_season_dummy If true, dummy variables for the seasonal periods are added as regressors.
#' @param drift See include.drift in \link[forecast]{Arima}
#' @param constant See include.constant in \link[forecast]{Arima}
#' @param mean See include.mean in \link[forecast]{Arima}
#' @param roll_update Option to update the model when the forecast origin is rolled forward.
#' @param seed Option to specify the seed.
#' @param verbose Option to print info to console.
#' @param out_tbl_width Option to set the info table width when verbose = T.
#'
#' @return A list. See the following details: \cr
#' y - the original time series. \cr
#' x_train - the subset of regressors used in training. \cr
#' y_train - the subset of y used in training. \cr
#' x_test - the subset of regressors used in testing. \cr
#' y_test - the subset of y used in testing. \cr
#' y_fitted - the fitted values (from the training stage). \cr
#' y_hat - the predicted values (from the testing stage). \cr
#' train_rmse - the training root mean squared error. \cr
#' train_mape - the training mean absolute percentage error. \cr
#' test_rmse - the testing (cross-validated) root mean squared error. \cr
#' test_mape - the testing (cross-validated) mean absolute percentage error. \cr
#' mod - the final model (an Arima object). \cr
#' rt - the total runtime. \cr
#'
#' @export
#'
dhr_cv <- function(y, h, lags = NULL, x = NULL, x_include = names(x),
x_include_lags = NULL, x_include_dummy = NULL, order = c(0, 0, 0),
s_order = c(0, 0, 0), s_period = 1, fourier_k = NULL,
auto_season_dummy = F, drift = T, constant = T, mean = T,
roll_update = F, method = "ML",
seed = 1, verbose = F, out_tbl_width = 140) {
st <- Sys.time()
# starting message
if (verbose) {
pander::pander("\n")
pander::pander(paste0("Fitting DHR with ARIMA(", order[1], ", ",
order[2], ", ", order[3], ") errors \n"))
}
# empty vectors
y_hat <- numeric()
# initial data
data <- data.frame(y = as.numeric(y))
# add x if not null
if (!is.null(x)) {
if (nrow(x) != length(y)) {
stop(paste0("nrow(x) = ", nrow(x), " must match length(y) = ", length(y)))
}
# add variables
data <- dplyr::bind_cols(data, dplyr::select(x, c(x_include,
x_include_dummy,
x_include_lags)))
# add dummy variables
if (!is.null(x_include_dummy)) {
data <- create_dummies(data, x_include_dummy)
}
# add x lags
if (any(lags > 0) & !is.null(x_include_lags)) {
data <- create_lags(data, lags, x_include_lags)
# remove lag only predictors
x_rm <- x_include_lags[!x_include_lags %in% x_include]
data <- dplyr::select(data, -x_rm)
}
}
# seasonal periods from msts
periods <- attr(y, "msts")
# seasonal periods from ts
if (is.null(periods)) {
periods <- stats::frequency(y)
}
# add seasonal dummy predictors
if (auto_season_dummy) {
# seasonal factor variables
for (i in 1:length(periods)) {
data[[paste0("sp_", periods[i], "_")]] <- rep(1:periods[i],
length.out = nrow(data))
}
# seasonal dummy variables
dummy_vars <- names(dplyr::select(data, dplyr::starts_with("sp_")))
data <- create_dummies(data, dummy_vars)
}
# add fourier predictors
if (!is.null(fourier_k)) {
fourier_data <- data.frame(forecast::fourier(y, K = fourier_k))
data <- dplyr::bind_cols(data, fourier_data)
}
# add lag response predictors
if (any(lags > 0)) {
data <- create_lags(data, lags, "y")
}
# remove NA's
if (!is.null(lags)) {
data <- data[-c(1:max(lags)), ]
}
# n_train
n_train <- nrow(data) - sum(h)
# check train length
if (n_train <= 30) {
warning(paste0("Small training set: ", n_train, (" obs.")))
}
# empty mod
mod <- NULL
# rolling forecasts
for (i in 1:(length(h))) {
if (verbose) {
pander::pander(paste0("Forecasting time ", n_train + 1, " to ", n_train + h[i], "\n"))
}
# train data
x_train <- as.matrix(data[1:n_train, !names(data) %in% c("y"), drop = F])
# train response
y_train <- ts(as.numeric(data[1:n_train, "y"]), frequency = s_period)
# test data
x_test <- as.matrix(data[(1 + n_train):(n_train + h[i]),
!names(data) %in% c("y"), drop = F])
# model
if (is.null(mod)) {
if (ncol(x_train) == 0) {
mod <- forecast::Arima(y_train, order = order, seasonal = s_order,
include.mean = mean, include.drift = drift,
include.constant = constant, method = method)
y_hat <- c(y_hat, unname(forecast::forecast(mod, h = h[i])$mean))
} else {
mod <- forecast::Arima(y_train, order = order, xreg = x_train, seasonal = s_order,
include.mean = mean, include.drift = drift,
include.constant = constant, method = method)
y_hat <- c(y_hat, unname(forecast::forecast(mod, h = h[i], xreg = x_test)$mean))
}
} else {
if (ncol(x_train) == 0) {
mod <- forecast::Arima(y_train, order = order, seasonal = s_order,
include.mean = mean, include.drift = drift,
include.constant = constant, method = method,
model = if (roll_update) {NULL} else {mod})
y_hat <- c(y_hat, unname(forecast::forecast(mod, h = h[i])$mean))
} else {
mod <- forecast::Arima(y_train, order = order, xreg = x_train, seasonal = s_order,
include.mean = mean, include.drift = drift,
include.constant = constant, method = method,
model = if (roll_update) {NULL} else {mod})
y_hat <- c(y_hat, unname(forecast::forecast(mod, h = h[i], xreg = x_test)$mean))
}
}
# update training length
n_train <- n_train + h[i]
}
# observed response (test)
y_test <- tail(data$y, sum(h))
y_train <- head(data$y, nrow(data) - sum(h))
y_fitted <- mod$fitted[1:length(y_train)]
# train metrics
train_rmse <- sqrt(mean((y_train - y_fitted)^2))
train_mape <- mean(abs((y_train - y_fitted) / y_train)) * 100
# test metrics
test_rmse <- sqrt(mean((y_test - y_hat)^2))
test_mape <- mean(abs((y_test - y_hat) / y_test)) * 100
# print
if (verbose) {
pander::pander(paste("Mean of Test Series:", round(mean(y_test), 6), "\n"))
pander::pander(paste("Test RMSE:", round(test_rmse, 6), "\n"))
if (all(y_test != 0)) {
pander::pander(paste("Test MAPE:", round(test_mape, 6), "%\n"))
}
}
# runtime
rt <- difftime(Sys.time(), st, units = "mins")
# output list
list(y = y, x_train = x_train, y_train = y_train, x_test = x_test, y_test = y_test,
y_fitted = y_fitted, y_hat = y_hat,
train_rmse = train_rmse, train_mape = train_mape,
test_rmse = test_rmse, test_mape = test_mape,
mod = mod, rt = rt)
}
|
/R/dhr_cv.R
|
no_license
|
ebrist/mlts
|
R
| false | false | 8,786 |
r
|
#' Dynamic Harmonic Regression for Time Series with Cross-Validation
#'
#' This function fits a dynamic harmonic regression model to time series data
#' and evaluates predictive performance using cross-validation.
#'
#' @param y A time series object (ts or msts).
#' @param h The number of periods for forecasting. If multiple values are provided, a rolling forecast origin is utilized.
#' @param lags Lags of the series. If provided, these will be added as regressors in the model.
#' @param x A data frame of external regressors. The number of rows must match the length of y.
#' @param x_include A vector of column names of x to include as regressors in the model.
#' By default, all columns of x are included as regressors in the model.
#' @param x_include_lags A vector of column names of x where lags of these columns are added as regressors in the model.
#' By default, only lags of the series are added as regressors when the lag argument is utilized.
#' This provides the option to also include lags of the external regressors.
#' @param x_include_dummy A vector of column names of x where these columns are transformed to a set of dummy regressors.
#' @param order See order in \link[forecast]{Arima}
#' @param s_order See seasonal in \link[forecast]{Arima}
#' @param s_period The seasonal frequency.
#' @param fourier_k The number of fourier terms to add as regressors.
#' This vector must have the same length as the number of seasonal periods of y,
#' and each value must not exceed half of the size of the seasonal period.
#' @param auto_season_dummy If true, dummy variables for the seasonal periods are added as regressors.
#' @param drift See include.drift in \link[forecast]{Arima}
#' @param constant See include.constant in \link[forecast]{Arima}
#' @param mean See include.mean in \link[forecast]{Arima}
#' @param roll_update Option to update the model when the forecast origin is rolled forward.
#' @param seed Option to specify the seed.
#' @param verbose Option to print info to console.
#' @param out_tbl_width Option to set the info table width when verbose = T.
#'
#' @return A list. See the following details: \cr
#' y - the original time series. \cr
#' x_train - the subset of regressors used in training. \cr
#' y_train - the subset of y used in training. \cr
#' x_test - the subset of regressors used in testing. \cr
#' y_test - the subset of y used in testing. \cr
#' y_fitted - the fitted values (from the training stage). \cr
#' y_hat - the predicted values (from the testing stage). \cr
#' train_rmse - the training root mean squared error. \cr
#' train_mape - the training mean absolute percentage error. \cr
#' test_rmse - the testing (cross-validated) root mean squared error. \cr
#' test_mape - the testing (cross-validated) mean absolute percentage error. \cr
#' mod - the final model (an Arima object). \cr
#' rt - the total runtime. \cr
#'
#' @export
#'
dhr_cv <- function(y, h, lags = NULL, x = NULL, x_include = names(x),
x_include_lags = NULL, x_include_dummy = NULL, order = c(0, 0, 0),
s_order = c(0, 0, 0), s_period = 1, fourier_k = NULL,
auto_season_dummy = F, drift = T, constant = T, mean = T,
roll_update = F, method = "ML",
seed = 1, verbose = F, out_tbl_width = 140) {
st <- Sys.time()
# starting message
if (verbose) {
pander::pander("\n")
pander::pander(paste0("Fitting DHR with ARIMA(", order[1], ", ",
order[2], ", ", order[3], ") errors \n"))
}
# empty vectors
y_hat <- numeric()
# initial data
data <- data.frame(y = as.numeric(y))
# add x if not null
if (!is.null(x)) {
if (nrow(x) != length(y)) {
stop(paste0("nrow(x) = ", nrow(x), " must match length(y) = ", length(y)))
}
# add variables
data <- dplyr::bind_cols(data, dplyr::select(x, c(x_include,
x_include_dummy,
x_include_lags)))
# add dummy variables
if (!is.null(x_include_dummy)) {
data <- create_dummies(data, x_include_dummy)
}
# add x lags
if (any(lags > 0) & !is.null(x_include_lags)) {
data <- create_lags(data, lags, x_include_lags)
# remove lag only predictors
x_rm <- x_include_lags[!x_include_lags %in% x_include]
data <- dplyr::select(data, -x_rm)
}
}
# seasonal periods from msts
periods <- attr(y, "msts")
# seasonal periods from ts
if (is.null(periods)) {
periods <- stats::frequency(y)
}
# add seasonal dummy predictors
if (auto_season_dummy) {
# seasonal factor variables
for (i in 1:length(periods)) {
data[[paste0("sp_", periods[i], "_")]] <- rep(1:periods[i],
length.out = nrow(data))
}
# seasonal dummy variables
dummy_vars <- names(dplyr::select(data, dplyr::starts_with("sp_")))
data <- create_dummies(data, dummy_vars)
}
# add fourier predictors
if (!is.null(fourier_k)) {
fourier_data <- data.frame(forecast::fourier(y, K = fourier_k))
data <- dplyr::bind_cols(data, fourier_data)
}
# add lag response predictors
if (any(lags > 0)) {
data <- create_lags(data, lags, "y")
}
# remove NA's
if (!is.null(lags)) {
data <- data[-c(1:max(lags)), ]
}
# n_train
n_train <- nrow(data) - sum(h)
# check train length
if (n_train <= 30) {
warning(paste0("Small training set: ", n_train, (" obs.")))
}
# empty mod
mod <- NULL
# rolling forecasts
for (i in 1:(length(h))) {
if (verbose) {
pander::pander(paste0("Forecasting time ", n_train + 1, " to ", n_train + h[i], "\n"))
}
# train data
x_train <- as.matrix(data[1:n_train, !names(data) %in% c("y"), drop = F])
# train response
y_train <- ts(as.numeric(data[1:n_train, "y"]), frequency = s_period)
# test data
x_test <- as.matrix(data[(1 + n_train):(n_train + h[i]),
!names(data) %in% c("y"), drop = F])
# model
if (is.null(mod)) {
if (ncol(x_train) == 0) {
mod <- forecast::Arima(y_train, order = order, seasonal = s_order,
include.mean = mean, include.drift = drift,
include.constant = constant, method = method)
y_hat <- c(y_hat, unname(forecast::forecast(mod, h = h[i])$mean))
} else {
mod <- forecast::Arima(y_train, order = order, xreg = x_train, seasonal = s_order,
include.mean = mean, include.drift = drift,
include.constant = constant, method = method)
y_hat <- c(y_hat, unname(forecast::forecast(mod, h = h[i], xreg = x_test)$mean))
}
} else {
if (ncol(x_train) == 0) {
mod <- forecast::Arima(y_train, order = order, seasonal = s_order,
include.mean = mean, include.drift = drift,
include.constant = constant, method = method,
model = if (roll_update) {NULL} else {mod})
y_hat <- c(y_hat, unname(forecast::forecast(mod, h = h[i])$mean))
} else {
mod <- forecast::Arima(y_train, order = order, xreg = x_train, seasonal = s_order,
include.mean = mean, include.drift = drift,
include.constant = constant, method = method,
model = if (roll_update) {NULL} else {mod})
y_hat <- c(y_hat, unname(forecast::forecast(mod, h = h[i], xreg = x_test)$mean))
}
}
# update training length
n_train <- n_train + h[i]
}
# observed response (test)
y_test <- tail(data$y, sum(h))
y_train <- head(data$y, nrow(data) - sum(h))
y_fitted <- mod$fitted[1:length(y_train)]
# train metrics
train_rmse <- sqrt(mean((y_train - y_fitted)^2))
train_mape <- mean(abs((y_train - y_fitted) / y_train)) * 100
# test metrics
test_rmse <- sqrt(mean((y_test - y_hat)^2))
test_mape <- mean(abs((y_test - y_hat) / y_test)) * 100
# print
if (verbose) {
pander::pander(paste("Mean of Test Series:", round(mean(y_test), 6), "\n"))
pander::pander(paste("Test RMSE:", round(test_rmse, 6), "\n"))
if (all(y_test != 0)) {
pander::pander(paste("Test MAPE:", round(test_mape, 6), "%\n"))
}
}
# runtime
rt <- difftime(Sys.time(), st, units = "mins")
# output list
list(y = y, x_train = x_train, y_train = y_train, x_test = x_test, y_test = y_test,
y_fitted = y_fitted, y_hat = y_hat,
train_rmse = train_rmse, train_mape = train_mape,
test_rmse = test_rmse, test_mape = test_mape,
mod = mod, rt = rt)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/appengine_objects.R
\name{Instance}
\alias{Instance}
\title{Instance Object}
\usage{
Instance(name = NULL, id = NULL, appEngineRelease = NULL,
availability = NULL, vmName = NULL, vmZoneName = NULL, vmId = NULL,
startTime = NULL, requests = NULL, errors = NULL, qps = NULL,
averageLatency = NULL, memoryUsage = NULL, vmStatus = NULL,
vmDebugEnabled = NULL)
}
\arguments{
\item{name}{Full path to the Instance resource in the API}
\item{id}{Relative name of the instance within the version}
\item{appEngineRelease}{App Engine release this instance is running on}
\item{availability}{Availability of the instance}
\item{vmName}{Name of the virtual machine where this instance lives}
\item{vmZoneName}{Zone where the virtual machine is located}
\item{vmId}{Virtual machine ID of this instance}
\item{startTime}{Time that this instance was started}
\item{requests}{Number of requests since this instance was started}
\item{errors}{Number of errors since this instance was started}
\item{qps}{Average queries per second (QPS) over the last minute}
\item{averageLatency}{Average latency (ms) over the last minute}
\item{memoryUsage}{Total memory in use (bytes)}
\item{vmStatus}{Status of the virtual machine where this instance lives}
\item{vmDebugEnabled}{Whether this instance is in debug mode}
}
\value{
Instance object
}
\description{
Instance Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
An Instance resource is the computing unit that App Engine uses to automatically scale an application.
}
|
/googleappenginev1.auto/man/Instance.Rd
|
permissive
|
Phippsy/autoGoogleAPI
|
R
| false | true | 1,634 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/appengine_objects.R
\name{Instance}
\alias{Instance}
\title{Instance Object}
\usage{
Instance(name = NULL, id = NULL, appEngineRelease = NULL,
availability = NULL, vmName = NULL, vmZoneName = NULL, vmId = NULL,
startTime = NULL, requests = NULL, errors = NULL, qps = NULL,
averageLatency = NULL, memoryUsage = NULL, vmStatus = NULL,
vmDebugEnabled = NULL)
}
\arguments{
\item{name}{Full path to the Instance resource in the API}
\item{id}{Relative name of the instance within the version}
\item{appEngineRelease}{App Engine release this instance is running on}
\item{availability}{Availability of the instance}
\item{vmName}{Name of the virtual machine where this instance lives}
\item{vmZoneName}{Zone where the virtual machine is located}
\item{vmId}{Virtual machine ID of this instance}
\item{startTime}{Time that this instance was started}
\item{requests}{Number of requests since this instance was started}
\item{errors}{Number of errors since this instance was started}
\item{qps}{Average queries per second (QPS) over the last minute}
\item{averageLatency}{Average latency (ms) over the last minute}
\item{memoryUsage}{Total memory in use (bytes)}
\item{vmStatus}{Status of the virtual machine where this instance lives}
\item{vmDebugEnabled}{Whether this instance is in debug mode}
}
\value{
Instance object
}
\description{
Instance Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
An Instance resource is the computing unit that App Engine uses to automatically scale an application.
}
|
library(plot3D)
library(TeachingDemos)
library(mvtnorm)
library(magick)
library(purrr)
library(ape)
library(RColorBrewer)
setwd("/Users/nikolai/3DBM_visual")
#SIMULATE DATA
for(run in 1:17){
print(run)
#subtending branch to root -- 0.25 BLs
stepsPerBL <- 100
rm <- matrix(data = c(1,0.5,0.5,2), nrow = 2)
rootMean <- c(4,4)
BL1 <- 0.25
nsteps <- BL1*stepsPerBL
t1 <- seq(0, BL1, by = 1 / stepsPerBL)
hw1 <- rbind(rootMean, rmvnorm(n = nsteps, mean = c(0,0), sigma = rm / stepsPerBL))
hw1 <- cbind(cumsum(hw1[,1]), cumsum(hw1[,2]))
#split 1, 0.5 BL on branch 2 to split 2, 1 BL on branch 3 to tip 1
BL2 <- 0.5
nsteps <- BL2*stepsPerBL
t2 <- seq(tail(t1, 1), tail(t1, 1) + BL2, by = 1 / stepsPerBL)
hw2 <- rbind(hw1[dim(hw1)[1],], rmvnorm(n = nsteps, mean = c(0,0), sigma = rm / stepsPerBL))
hw2 <- cbind(cumsum(hw2[,1]), cumsum(hw2[,2]))
BL3 <- 1
nsteps <- BL3*stepsPerBL
t3 <- seq(tail(t1, 1), tail(t1, 1) + BL3, by = 1 / stepsPerBL)
hw3 <- rbind(hw1[dim(hw1)[1],], rmvnorm(n = nsteps, mean = c(0,0), sigma = rm / stepsPerBL))
hw3 <- cbind(cumsum(hw3[,1]), cumsum(hw3[,2]))
# split 2 to tips 2 and 3, each branch with 0.5 BLs
BL4 <- 0.5
nsteps <- BL4*stepsPerBL
t4 <- seq(tail(t2, 1), tail(t2, 1) + BL4, by = 1 / stepsPerBL)
hw4 <- rbind(hw2[dim(hw2)[1],], rmvnorm(n = nsteps, mean = c(0,0), sigma = rm / stepsPerBL))
hw4 <- cbind(cumsum(hw4[,1]), cumsum(hw4[,2]))
BL5 <- 0.5
nsteps <- BL5*stepsPerBL
t5 <- seq(tail(t2, 1), tail(t2, 1) + BL5, by = 1 / stepsPerBL)
hw5 <- rbind(hw2[dim(hw2)[1],], rmvnorm(n = nsteps, mean = c(0,0), sigma = rm / stepsPerBL))
hw5 <- cbind(cumsum(hw5[,1]), cumsum(hw5[,2]))
tree <- read.tree(text = "((A:0.5,B:0.5):0.5,C:1):0.25;")
plot(tree, type = "cladogram", root.edge = T, edge.width = 4, font = 2, cex = 3, direction = "up", tip.color = colorChoices[c(4,5,3)], edge.color = colorChoices[c(2,4,5,3)], srt = -90, label.offset = .06)
axisPhylo(2,backward = F)
####################################################################################################
####################################################################################################
####################################################################################################
#ANIMATE
colorChoices <- c(1,2,3,4,5)
colorChoices <- brewer.pal(5, "Dark2")
colorChoices <- sample(c(brewer.pal(9, "Set1"),brewer.pal(8, "Set2"),brewer.pal(12, "Set3"), brewer.pal(8, "Dark2")), size = 5)
pngWidth <- 360
pngHeight <- 360
zlabel <- ""
#find animation parameters
project = c("xy0", "yz0", "xz0", "xy1", "yz1", "xz1") #all planes projected upon
project = c("xz1", "yz0")
# project = ""
wigglyLineThickness <- 2
all <- rbind(hw1, hw2, hw3, hw4, hw5)
xl <- c(min(all[,1]), max(all[,1]))
yl <- c(min(all[,2]), max(all[,2]))
rotate <- TRUE
bounce <- TRUE
if(rotate){
if (bounce){
rotateDegrees <- seq(0, 22.5, length.out = (length(t1) + length(t2) + length(t4) - 3)/2)
rotateDegrees <- c(rotateDegrees, rev(rotateDegrees))
} else {
rotateDegrees <- seq(0, 90, length.out = (length(t1) + length(t2) + length(t4) - 3))
}
} else {
rotateDegrees <- seq(0, 0, length.out = (length(t1) + length(t2) + length(t4) - 3))
}
counter <- 0
for(i in 2:length(t1)){
counter <- counter + 1
png(width = pngWidth, height = pngHeight, paste0("mvBM", paste0(rep(0, (6 - nchar(counter))), collapse = ""), counter, ".png"))
#main squigglies
scatter3D(x = hw1[1:i,1], y = hw1[1:i,2], z = t1[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t1[i]),
ticktype = "detailed", lwd = wigglyLineThickness, zlab = zlabel, xlab = "", ylab = "", add = F, col = colorChoices[1], phi = 45, theta = 45 + rotateDegrees[counter])
#projections
projLWD <- 0.4
if(any(project == "xy0")){
scatter3D(x = hw1[1:i,1], y = hw1[1:i,2], z = rep(0, length(hw1[1:i,1])), type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
}
if(any(project == "xy1")){
scatter3D(x = hw1[1:i,1], y = hw1[1:i,2], z = rep(t1[i], length(hw1[1:i,1])), type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
}
if(any(project == "xz0")){
scatter3D(x = hw1[1:i,1], y =rep(yl[1], length(hw1[1:i,1])), z = t1[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
}
if(any(project == "xz1")){
scatter3D(x = hw1[1:i,1], y =rep(yl[2], length(hw1[1:i,1])), z = t1[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
}
if(any(project == "yz0")){
scatter3D(x = rep(xl[1], length(hw1[1:i,1])), y = hw1[1:i,2], z = t1[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
}
if(any(project == "yz1")){
scatter3D(x = rep(xl[2], length(hw1[1:i,1])), y = hw1[1:i,2], z = t1[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
}
dev.off()
}
for(i in 2:length(t2)){
counter <- counter + 1
png(width = pngWidth, height = pngHeight, paste0("mvBM", paste0(rep(0, (6 - nchar(counter))), collapse = ""), counter, ".png"))
#main squigglies
scatter3D(x = hw1[,1], y = hw1[,2], z = t1, type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = 2, zlab = zlabel, xlab = "", ylab = "", add = F, col = colorChoices[1], phi = 45, theta = 45 + rotateDegrees[counter])
scatter3D(x = hw2[1:i,1], y = hw2[1:i,2], z = t2[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = 2, zlab = zlabel, xlab = "", ylab = "", col = colorChoices[2], phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw3[1:i,1], y = hw3[1:i,2], z = t3[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = 2, zlab = zlabel, xlab = "", ylab = "", col = colorChoices[3], phi = 45, theta = 45 + rotateDegrees[counter], add = T)
#projections
projLWD <- 0.3
if(any(project == "xy0")){
scatter3D(x = hw1[,1], y = hw1[,2], z = rep(0, length(hw1[,1])), type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
scatter3D(x = hw2[1:i,1], y = hw2[1:i,2], z = rep(0, length(hw2[1:i,1])), type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[2], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw3[1:i,1], y = hw3[1:i,2], z = rep(0, length(hw3[1:i,1])), type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[3], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
}
if(any(project == "xy1")){
scatter3D(x = hw1[,1], y = hw1[,2], z = rep(t2[i], length(hw1[,1])), type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
scatter3D(x = hw2[1:i,1], y = hw2[1:i,2], z = rep(t2[i], length(hw2[1:i,1])), type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[2], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw3[1:i,1], y = hw3[1:i,2], z = rep(t2[i], length(hw3[1:i,1])), type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[3], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
}
if(any(project == "xz0")){
scatter3D(x = hw1[,1], y =rep(yl[1], length(hw1[,1])), z = t1, type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
scatter3D(x = hw2[1:i,1], y = rep(yl[1], length(hw2[1:i,1])), z = t2[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[2], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw3[1:i,1], y = rep(yl[1], length(hw3[1:i,1])), z = t3[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[3], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
}
if(any(project == "xz1")){
scatter3D(x = hw1[,1], y =rep(yl[2], length(hw1[,1])), z = t1, type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
scatter3D(x = hw2[1:i,1], y = rep(yl[2], length(hw2[1:i,1])), z = t2[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[2], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw3[1:i,1], y = rep(yl[2], length(hw3[1:i,1])), z = t3[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[3], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
}
if(any(project == "yz0")){
scatter3D(x = rep(xl[1], length(hw1[,1])), y = hw1[,2], z = t1, type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
scatter3D(x = rep(xl[1], length(hw2[1:i,1])), y = hw2[1:i,2], z = t2[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[2], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = rep(xl[1], length(hw3[1:i,1])), y = hw3[1:i,2], z = t3[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[3], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
}
if(any(project == "yz1")){
scatter3D(x = rep(xl[2], length(hw1[,1])), y = hw1[,2], z = t1, type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
scatter3D(x = rep(xl[2], length(hw2[1:i,1])), y = hw2[1:i,2], z = t2[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[2], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = rep(xl[2], length(hw3[1:i,1])), y = hw3[1:i,2], z = t3[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[3], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
}
dev.off()
}
for(i in 2:length(t4)){
counter <- counter + 1
png(width = pngWidth, height = pngHeight, paste0("mvBM", paste0(rep(0, (6 - nchar(counter))), collapse = ""), counter, ".png"))
#main squigglies
scatter3D(x = hw1[,1], y = hw1[,2], z = t1, type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = wigglyLineThickness, zlab = zlabel, xlab = "", ylab = "", add = F, col = colorChoices[1], phi = 45, theta = 45 + rotateDegrees[counter])
scatter3D(x = hw2[,1], y = hw2[,2], z = t2, type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = wigglyLineThickness, zlab = zlabel, xlab = "", ylab = "", col = colorChoices[2], phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw3[1:(length(t2)+i-1),1], y = hw3[1:(length(t2)+i-1),2], z = t3[1:(length(t2)+i-1)], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = wigglyLineThickness, zlab = zlabel, xlab = "", ylab = "", col = colorChoices[3], phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw4[1:i,1], y = hw4[1:i,2], z = t4[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = wigglyLineThickness, zlab = zlabel, xlab = "", ylab = "", col = colorChoices[4], phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw5[1:i,1], y = hw5[1:i,2], z = t5[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = wigglyLineThickness, zlab = zlabel, xlab = "", ylab = "", col = colorChoices[5], phi = 45, theta = 45 + rotateDegrees[counter], add = T)
#projections
projLWD <- 0.2
if(any(project == "xy0")){
scatter3D(x = hw1[,1], y = hw1[,2], z = rep(0,length(t1)), type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
scatter3D(x = hw2[,1], y = hw2[,2], z = rep(0,length(t2)), type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[2], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw3[1:(length(t2)+i-1),1], y = hw3[1:(length(t2)+i-1),2], z = rep(0,length(t3[1:(length(t2)+i-1)])), type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[3], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw4[1:i,1], y = hw4[1:i,2], z = rep(0,length(t4[1:i])), type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[4], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw5[1:i,1], y = hw5[1:i,2], z = rep(0,length(t5[1:i])), type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[5], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
}
if(any(project == "xy1")){
scatter3D(x = hw1[,1], y = hw1[,2], z = rep(t4[i],length(t1)), type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
scatter3D(x = hw2[,1], y = hw2[,2], z = rep(t4[i],length(t2)), type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[2], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw3[1:(length(t2)+i-1),1], y = hw3[1:(length(t2)+i-1),2], z = rep(t4[i],length(t3[1:(length(t2)+i-1)])), type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[3], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw4[1:i,1], y = hw4[1:i,2], z = rep(t4[i], length(hw4[1:i,1])), type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[4], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw5[1:i,1], y = hw5[1:i,2], z = rep(t5[i], length(hw5[1:i,1])), type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[5], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
}
if(any(project == "xz0")){
scatter3D(x = hw1[,1], y = rep(yl[1], length(hw1[,2])), z = t1, type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
scatter3D(x = hw2[,1], y = rep(yl[1], length(hw2[,2])), z = t2, type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[2], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw3[1:(length(t2)+i-1),1], y = rep(yl[1], length(hw3[1:(length(t2)+i-1),2])), z = t3[1:(length(t2)+i-1)], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[3], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw4[1:i,1], y = rep(yl[1], length(hw4[1:i,2])), z = t4[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[4], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw5[1:i,1], y = rep(yl[1], length(hw5[1:i,2])), z = t5[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[5], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
}
if(any(project == "xz1")){
scatter3D(x = hw1[,1], y = rep(yl[2], length(hw1[,2])), z = t1, type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
scatter3D(x = hw2[,1], y = rep(yl[2], length(hw2[,2])), z = t2, type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[2], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw3[1:(length(t2)+i-1),1], y = rep(yl[2], length(hw3[1:(length(t2)+i-1),2])), z = t3[1:(length(t2)+i-1)], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[3], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw4[1:i,1], y = rep(yl[2], length(hw4[1:i,2])), z = t4[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[4], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw5[1:i,1], y = rep(yl[2], length(hw5[1:i,2])), z = t5[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[5], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
}
if(any(project == "yz0")){
scatter3D(x = rep(xl[1], length(hw1[,1])), y = hw1[,2], z = t1, type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
scatter3D(x = rep(xl[1], length(hw2[,1])), y = hw2[,2], z = t2, type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[2], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = rep(xl[1], length(hw3[1:(length(t2)+i-1),1])), y = hw3[1:(length(t2)+i-1),2], z = t3[1:(length(t2)+i-1)], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[3], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = rep(xl[1], length(hw4[1:i,1])), y = hw4[1:i,2], z = t4[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[4], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = rep(xl[1], length(hw5[1:i,1])), y = hw5[1:i,2], z = t5[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[5], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
}
if(any(project == "yz1")){
scatter3D(x = rep(xl[2], length(hw1[,1])), y = hw1[,2], z = t1, type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
scatter3D(x = rep(xl[2], length(hw2[,1])), y = hw2[,2], z = t2, type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[2], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = rep(xl[2], length(hw3[1:(length(t2)+i-1),1])), y = hw3[1:(length(t2)+i-1),2], z = t3[1:(length(t2)+i-1)], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[3], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = rep(xl[2], length(hw4[1:i,1])), y = hw4[1:i,2], z = t4[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[4], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = rep(xl[2], length(hw5[1:i,1])), y = hw5[1:i,2], z = t5[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[5], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
}
dev.off()
}
list.files(pattern = "*.png", full.names = T) %>%
map(image_read) %>% # reads each path file
image_join() %>% # joins image
image_animate(fps=10) %>% # animates, can opt for number of loops
image_write(paste0("manyTree", run, ".gif")) # write to current dir
file.remove(list.files(pattern=".png"))
}
|
/3dPlots_Array.R
|
no_license
|
NikVetr/dissertation_work
|
R
| false | false | 24,580 |
r
|
library(plot3D)
library(TeachingDemos)
library(mvtnorm)
library(magick)
library(purrr)
library(ape)
library(RColorBrewer)
setwd("/Users/nikolai/3DBM_visual")
#SIMULATE DATA
for(run in 1:17){
print(run)
#subtending branch to root -- 0.25 BLs
stepsPerBL <- 100
rm <- matrix(data = c(1,0.5,0.5,2), nrow = 2)
rootMean <- c(4,4)
BL1 <- 0.25
nsteps <- BL1*stepsPerBL
t1 <- seq(0, BL1, by = 1 / stepsPerBL)
hw1 <- rbind(rootMean, rmvnorm(n = nsteps, mean = c(0,0), sigma = rm / stepsPerBL))
hw1 <- cbind(cumsum(hw1[,1]), cumsum(hw1[,2]))
#split 1, 0.5 BL on branch 2 to split 2, 1 BL on branch 3 to tip 1
BL2 <- 0.5
nsteps <- BL2*stepsPerBL
t2 <- seq(tail(t1, 1), tail(t1, 1) + BL2, by = 1 / stepsPerBL)
hw2 <- rbind(hw1[dim(hw1)[1],], rmvnorm(n = nsteps, mean = c(0,0), sigma = rm / stepsPerBL))
hw2 <- cbind(cumsum(hw2[,1]), cumsum(hw2[,2]))
BL3 <- 1
nsteps <- BL3*stepsPerBL
t3 <- seq(tail(t1, 1), tail(t1, 1) + BL3, by = 1 / stepsPerBL)
hw3 <- rbind(hw1[dim(hw1)[1],], rmvnorm(n = nsteps, mean = c(0,0), sigma = rm / stepsPerBL))
hw3 <- cbind(cumsum(hw3[,1]), cumsum(hw3[,2]))
# split 2 to tips 2 and 3, each branch with 0.5 BLs
BL4 <- 0.5
nsteps <- BL4*stepsPerBL
t4 <- seq(tail(t2, 1), tail(t2, 1) + BL4, by = 1 / stepsPerBL)
hw4 <- rbind(hw2[dim(hw2)[1],], rmvnorm(n = nsteps, mean = c(0,0), sigma = rm / stepsPerBL))
hw4 <- cbind(cumsum(hw4[,1]), cumsum(hw4[,2]))
BL5 <- 0.5
nsteps <- BL5*stepsPerBL
t5 <- seq(tail(t2, 1), tail(t2, 1) + BL5, by = 1 / stepsPerBL)
hw5 <- rbind(hw2[dim(hw2)[1],], rmvnorm(n = nsteps, mean = c(0,0), sigma = rm / stepsPerBL))
hw5 <- cbind(cumsum(hw5[,1]), cumsum(hw5[,2]))
tree <- read.tree(text = "((A:0.5,B:0.5):0.5,C:1):0.25;")
plot(tree, type = "cladogram", root.edge = T, edge.width = 4, font = 2, cex = 3, direction = "up", tip.color = colorChoices[c(4,5,3)], edge.color = colorChoices[c(2,4,5,3)], srt = -90, label.offset = .06)
axisPhylo(2,backward = F)
####################################################################################################
####################################################################################################
####################################################################################################
#ANIMATE
colorChoices <- c(1,2,3,4,5)
colorChoices <- brewer.pal(5, "Dark2")
colorChoices <- sample(c(brewer.pal(9, "Set1"),brewer.pal(8, "Set2"),brewer.pal(12, "Set3"), brewer.pal(8, "Dark2")), size = 5)
pngWidth <- 360
pngHeight <- 360
zlabel <- ""
#find animation parameters
project = c("xy0", "yz0", "xz0", "xy1", "yz1", "xz1") #all planes projected upon
project = c("xz1", "yz0")
# project = ""
wigglyLineThickness <- 2
all <- rbind(hw1, hw2, hw3, hw4, hw5)
xl <- c(min(all[,1]), max(all[,1]))
yl <- c(min(all[,2]), max(all[,2]))
rotate <- TRUE
bounce <- TRUE
if(rotate){
if (bounce){
rotateDegrees <- seq(0, 22.5, length.out = (length(t1) + length(t2) + length(t4) - 3)/2)
rotateDegrees <- c(rotateDegrees, rev(rotateDegrees))
} else {
rotateDegrees <- seq(0, 90, length.out = (length(t1) + length(t2) + length(t4) - 3))
}
} else {
rotateDegrees <- seq(0, 0, length.out = (length(t1) + length(t2) + length(t4) - 3))
}
counter <- 0
for(i in 2:length(t1)){
counter <- counter + 1
png(width = pngWidth, height = pngHeight, paste0("mvBM", paste0(rep(0, (6 - nchar(counter))), collapse = ""), counter, ".png"))
#main squigglies
scatter3D(x = hw1[1:i,1], y = hw1[1:i,2], z = t1[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t1[i]),
ticktype = "detailed", lwd = wigglyLineThickness, zlab = zlabel, xlab = "", ylab = "", add = F, col = colorChoices[1], phi = 45, theta = 45 + rotateDegrees[counter])
#projections
projLWD <- 0.4
if(any(project == "xy0")){
scatter3D(x = hw1[1:i,1], y = hw1[1:i,2], z = rep(0, length(hw1[1:i,1])), type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
}
if(any(project == "xy1")){
scatter3D(x = hw1[1:i,1], y = hw1[1:i,2], z = rep(t1[i], length(hw1[1:i,1])), type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
}
if(any(project == "xz0")){
scatter3D(x = hw1[1:i,1], y =rep(yl[1], length(hw1[1:i,1])), z = t1[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
}
if(any(project == "xz1")){
scatter3D(x = hw1[1:i,1], y =rep(yl[2], length(hw1[1:i,1])), z = t1[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
}
if(any(project == "yz0")){
scatter3D(x = rep(xl[1], length(hw1[1:i,1])), y = hw1[1:i,2], z = t1[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
}
if(any(project == "yz1")){
scatter3D(x = rep(xl[2], length(hw1[1:i,1])), y = hw1[1:i,2], z = t1[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
}
dev.off()
}
for(i in 2:length(t2)){
counter <- counter + 1
png(width = pngWidth, height = pngHeight, paste0("mvBM", paste0(rep(0, (6 - nchar(counter))), collapse = ""), counter, ".png"))
#main squigglies
scatter3D(x = hw1[,1], y = hw1[,2], z = t1, type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = 2, zlab = zlabel, xlab = "", ylab = "", add = F, col = colorChoices[1], phi = 45, theta = 45 + rotateDegrees[counter])
scatter3D(x = hw2[1:i,1], y = hw2[1:i,2], z = t2[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = 2, zlab = zlabel, xlab = "", ylab = "", col = colorChoices[2], phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw3[1:i,1], y = hw3[1:i,2], z = t3[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = 2, zlab = zlabel, xlab = "", ylab = "", col = colorChoices[3], phi = 45, theta = 45 + rotateDegrees[counter], add = T)
#projections
projLWD <- 0.3
if(any(project == "xy0")){
scatter3D(x = hw1[,1], y = hw1[,2], z = rep(0, length(hw1[,1])), type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
scatter3D(x = hw2[1:i,1], y = hw2[1:i,2], z = rep(0, length(hw2[1:i,1])), type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[2], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw3[1:i,1], y = hw3[1:i,2], z = rep(0, length(hw3[1:i,1])), type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[3], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
}
if(any(project == "xy1")){
scatter3D(x = hw1[,1], y = hw1[,2], z = rep(t2[i], length(hw1[,1])), type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
scatter3D(x = hw2[1:i,1], y = hw2[1:i,2], z = rep(t2[i], length(hw2[1:i,1])), type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[2], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw3[1:i,1], y = hw3[1:i,2], z = rep(t2[i], length(hw3[1:i,1])), type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[3], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
}
if(any(project == "xz0")){
scatter3D(x = hw1[,1], y =rep(yl[1], length(hw1[,1])), z = t1, type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
scatter3D(x = hw2[1:i,1], y = rep(yl[1], length(hw2[1:i,1])), z = t2[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[2], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw3[1:i,1], y = rep(yl[1], length(hw3[1:i,1])), z = t3[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[3], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
}
if(any(project == "xz1")){
scatter3D(x = hw1[,1], y =rep(yl[2], length(hw1[,1])), z = t1, type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
scatter3D(x = hw2[1:i,1], y = rep(yl[2], length(hw2[1:i,1])), z = t2[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[2], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw3[1:i,1], y = rep(yl[2], length(hw3[1:i,1])), z = t3[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[3], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
}
if(any(project == "yz0")){
scatter3D(x = rep(xl[1], length(hw1[,1])), y = hw1[,2], z = t1, type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
scatter3D(x = rep(xl[1], length(hw2[1:i,1])), y = hw2[1:i,2], z = t2[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[2], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = rep(xl[1], length(hw3[1:i,1])), y = hw3[1:i,2], z = t3[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[3], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
}
if(any(project == "yz1")){
scatter3D(x = rep(xl[2], length(hw1[,1])), y = hw1[,2], z = t1, type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
scatter3D(x = rep(xl[2], length(hw2[1:i,1])), y = hw2[1:i,2], z = t2[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[2], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = rep(xl[2], length(hw3[1:i,1])), y = hw3[1:i,2], z = t3[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t2[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[3], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
}
dev.off()
}
for(i in 2:length(t4)){
counter <- counter + 1
png(width = pngWidth, height = pngHeight, paste0("mvBM", paste0(rep(0, (6 - nchar(counter))), collapse = ""), counter, ".png"))
#main squigglies
scatter3D(x = hw1[,1], y = hw1[,2], z = t1, type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = wigglyLineThickness, zlab = zlabel, xlab = "", ylab = "", add = F, col = colorChoices[1], phi = 45, theta = 45 + rotateDegrees[counter])
scatter3D(x = hw2[,1], y = hw2[,2], z = t2, type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = wigglyLineThickness, zlab = zlabel, xlab = "", ylab = "", col = colorChoices[2], phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw3[1:(length(t2)+i-1),1], y = hw3[1:(length(t2)+i-1),2], z = t3[1:(length(t2)+i-1)], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = wigglyLineThickness, zlab = zlabel, xlab = "", ylab = "", col = colorChoices[3], phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw4[1:i,1], y = hw4[1:i,2], z = t4[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = wigglyLineThickness, zlab = zlabel, xlab = "", ylab = "", col = colorChoices[4], phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw5[1:i,1], y = hw5[1:i,2], z = t5[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = wigglyLineThickness, zlab = zlabel, xlab = "", ylab = "", col = colorChoices[5], phi = 45, theta = 45 + rotateDegrees[counter], add = T)
#projections
projLWD <- 0.2
if(any(project == "xy0")){
scatter3D(x = hw1[,1], y = hw1[,2], z = rep(0,length(t1)), type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
scatter3D(x = hw2[,1], y = hw2[,2], z = rep(0,length(t2)), type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[2], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw3[1:(length(t2)+i-1),1], y = hw3[1:(length(t2)+i-1),2], z = rep(0,length(t3[1:(length(t2)+i-1)])), type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[3], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw4[1:i,1], y = hw4[1:i,2], z = rep(0,length(t4[1:i])), type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[4], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw5[1:i,1], y = hw5[1:i,2], z = rep(0,length(t5[1:i])), type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[5], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
}
if(any(project == "xy1")){
scatter3D(x = hw1[,1], y = hw1[,2], z = rep(t4[i],length(t1)), type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
scatter3D(x = hw2[,1], y = hw2[,2], z = rep(t4[i],length(t2)), type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[2], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw3[1:(length(t2)+i-1),1], y = hw3[1:(length(t2)+i-1),2], z = rep(t4[i],length(t3[1:(length(t2)+i-1)])), type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[3], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw4[1:i,1], y = hw4[1:i,2], z = rep(t4[i], length(hw4[1:i,1])), type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[4], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw5[1:i,1], y = hw5[1:i,2], z = rep(t5[i], length(hw5[1:i,1])), type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = projLWD, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[5], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
}
if(any(project == "xz0")){
scatter3D(x = hw1[,1], y = rep(yl[1], length(hw1[,2])), z = t1, type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
scatter3D(x = hw2[,1], y = rep(yl[1], length(hw2[,2])), z = t2, type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[2], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw3[1:(length(t2)+i-1),1], y = rep(yl[1], length(hw3[1:(length(t2)+i-1),2])), z = t3[1:(length(t2)+i-1)], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[3], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw4[1:i,1], y = rep(yl[1], length(hw4[1:i,2])), z = t4[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[4], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw5[1:i,1], y = rep(yl[1], length(hw5[1:i,2])), z = t5[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[5], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
}
if(any(project == "xz1")){
scatter3D(x = hw1[,1], y = rep(yl[2], length(hw1[,2])), z = t1, type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
scatter3D(x = hw2[,1], y = rep(yl[2], length(hw2[,2])), z = t2, type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[2], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw3[1:(length(t2)+i-1),1], y = rep(yl[2], length(hw3[1:(length(t2)+i-1),2])), z = t3[1:(length(t2)+i-1)], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[3], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw4[1:i,1], y = rep(yl[2], length(hw4[1:i,2])), z = t4[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[4], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = hw5[1:i,1], y = rep(yl[2], length(hw5[1:i,2])), z = t5[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[5], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
}
if(any(project == "yz0")){
scatter3D(x = rep(xl[1], length(hw1[,1])), y = hw1[,2], z = t1, type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
scatter3D(x = rep(xl[1], length(hw2[,1])), y = hw2[,2], z = t2, type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[2], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = rep(xl[1], length(hw3[1:(length(t2)+i-1),1])), y = hw3[1:(length(t2)+i-1),2], z = t3[1:(length(t2)+i-1)], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[3], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = rep(xl[1], length(hw4[1:i,1])), y = hw4[1:i,2], z = t4[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[4], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = rep(xl[1], length(hw5[1:i,1])), y = hw5[1:i,2], z = t5[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[5], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
}
if(any(project == "yz1")){
scatter3D(x = rep(xl[2], length(hw1[,1])), y = hw1[,2], z = t1, type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", add = T, col = adjustcolor(colorChoices[1], .5), phi = 45, theta = 45 + rotateDegrees[counter])
scatter3D(x = rep(xl[2], length(hw2[,1])), y = hw2[,2], z = t2, type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[2], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = rep(xl[2], length(hw3[1:(length(t2)+i-1),1])), y = hw3[1:(length(t2)+i-1),2], z = t3[1:(length(t2)+i-1)], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[3], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = rep(xl[2], length(hw4[1:i,1])), y = hw4[1:i,2], z = t4[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[4], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
scatter3D(x = rep(xl[2], length(hw5[1:i,1])), y = hw5[1:i,2], z = t5[1:i], type = "l", xlim = xl, ylim = yl, zlim = c(0, t4[i]),
ticktype = "detailed", lwd = 0.2, zlab = zlabel, xlab = "", ylab = "", col = adjustcolor(colorChoices[5], .5), phi = 45, theta = 45 + rotateDegrees[counter], add = T)
}
dev.off()
}
list.files(pattern = "*.png", full.names = T) %>%
map(image_read) %>% # reads each path file
image_join() %>% # joins image
image_animate(fps=10) %>% # animates, can opt for number of loops
image_write(paste0("manyTree", run, ".gif")) # write to current dir
file.remove(list.files(pattern=".png"))
}
|
library(raster); library(doParallel); library(spatstat)
l.sum <- read.csv("Simulated-layers/Layer-summaries.csv")
all.layers <- readRDS("Simulated-layers/All-simulated-layers.rds")
config <- readRDS("Simulated-species/Sim-config-species-list-RandomCentroids.rds")
spp.layers <- lapply(1:ncol(config$layers), function(x){dropLayer(all.layers, i = c(which(! 1:nlayers(all.layers) %in% config$layers[, x])))})
spp.points <- readRDS("Simulated-species/Species-presences-RandomCentroids.rds")
p.spp <- readRDS("Simulated-species/P-presence-RandomCentroids.rds")
spp.cent.cov <- readRDS("Simulated-species/Spp-cent-covs-RandomCentroids.rds")
### Formatting data for spatstat
spp.lay.df <- lapply(spp.layers, function(x){data.frame(rasterToPoints(x))}) #Transform layers to dataframes
spp.point.vals <- lapply(seq_along(spp.layers), function(x){extract(spp.layers[[x]], spp.points[[x]])}) #Extracting values at points
## Coercing raster package data to spatstat
ux = sort(unique(spp.lay.df[[1]]$x)) #Extracting unique coordinates
uy = sort(unique(spp.lay.df[[1]]$y))
nx = length(ux) #length of unique coordinates
ny = length(uy)
ref.cols = match(spp.lay.df[[1]]$x, ux) #position of every data point
ref.lines = match(spp.lay.df[[1]]$y, uy)
vec = rep(NA, max(ref.lines)*max(ref.cols)) # A vector with the length of data points
ref.vec = (ref.cols - 1)*max(ref.lines) + ref.lines
vec[ref.vec] = 1
data.mask = matrix(vec, max(ref.lines), max(ref.cols), dimnames = list(uy, ux))
win = as.owin(im(data.mask, xcol = ux, yrow = uy)) #Data analysis window
## transforming species points to a planar point pattern
spp.ppp <- lapply(spp.points, function(x){
ppp(x[, 'x'], x[, 'y'], window = win, check = F)
})
## Transforming species' layers to spatstat images
spp.lay.im <- lapply(spp.lay.df, function(x){
require(foreach)
names(x) <- c("x", "y", "a", "b", "c")
X <- with(x, cbind(a, b, c))
lay.im.list <- foreach(i = 1:ncol(X)) %do% {
vec.all = rep(NA, max(ref.lines)*max(ref.cols))
vec.ref = (ref.cols - 1)*max(ref.lines) + ref.lines
vec.all[ref.vec] = X[,i]
lay <- im(matrix(vec.all, max(ref.lines), max(ref.cols),
dimnames = list(uy, ux)), xcol = ux, yrow = uy)
return(lay)
}
names(lay.im.list) <- c("a", "b", "c")
return(lay.im.list)
})
## Fitting ppms
spp.ppms <- lapply(seq_along(spp.ppp), function(i){
ppm.mod <-ppm(spp.ppp[[i]],
trend = ~ a + b + c +
I(a^2) + I(b^2) + I(c^2),
covariates = spp.lay.im[[i]])
coef <- coefficients(ppm.mod)
rast <- raster(predict(ppm.mod, type = "trend", ngrid = c(100, 100)))
return(list(pred = rast,
coef = coef))
})
dir.create("../Resultados/Analysis-centroids/Fitted-Random-Saturated-PPMs")
for(i in seq_along(spp.ppms)){
saveRDS(spp.ppms[[i]], paste0("../Resultados/Analysis-centroids/Fitted-Random-Saturated-PPMs/PPM-", i, ".rds"))
}
|
/PPMs/Analysing-Random-spps-Saturated-ppms.R
|
no_license
|
gerardommc/Niche-centroids
|
R
| false | false | 3,025 |
r
|
library(raster); library(doParallel); library(spatstat)
l.sum <- read.csv("Simulated-layers/Layer-summaries.csv")
all.layers <- readRDS("Simulated-layers/All-simulated-layers.rds")
config <- readRDS("Simulated-species/Sim-config-species-list-RandomCentroids.rds")
spp.layers <- lapply(1:ncol(config$layers), function(x){dropLayer(all.layers, i = c(which(! 1:nlayers(all.layers) %in% config$layers[, x])))})
spp.points <- readRDS("Simulated-species/Species-presences-RandomCentroids.rds")
p.spp <- readRDS("Simulated-species/P-presence-RandomCentroids.rds")
spp.cent.cov <- readRDS("Simulated-species/Spp-cent-covs-RandomCentroids.rds")
### Formatting data for spatstat
spp.lay.df <- lapply(spp.layers, function(x){data.frame(rasterToPoints(x))}) #Transform layers to dataframes
spp.point.vals <- lapply(seq_along(spp.layers), function(x){extract(spp.layers[[x]], spp.points[[x]])}) #Extracting values at points
## Coercing raster package data to spatstat
ux = sort(unique(spp.lay.df[[1]]$x)) #Extracting unique coordinates
uy = sort(unique(spp.lay.df[[1]]$y))
nx = length(ux) #length of unique coordinates
ny = length(uy)
ref.cols = match(spp.lay.df[[1]]$x, ux) #position of every data point
ref.lines = match(spp.lay.df[[1]]$y, uy)
vec = rep(NA, max(ref.lines)*max(ref.cols)) # A vector with the length of data points
ref.vec = (ref.cols - 1)*max(ref.lines) + ref.lines
vec[ref.vec] = 1
data.mask = matrix(vec, max(ref.lines), max(ref.cols), dimnames = list(uy, ux))
win = as.owin(im(data.mask, xcol = ux, yrow = uy)) #Data analysis window
## transforming species points to a planar point pattern
spp.ppp <- lapply(spp.points, function(x){
ppp(x[, 'x'], x[, 'y'], window = win, check = F)
})
## Transforming species' layers to spatstat images
spp.lay.im <- lapply(spp.lay.df, function(x){
require(foreach)
names(x) <- c("x", "y", "a", "b", "c")
X <- with(x, cbind(a, b, c))
lay.im.list <- foreach(i = 1:ncol(X)) %do% {
vec.all = rep(NA, max(ref.lines)*max(ref.cols))
vec.ref = (ref.cols - 1)*max(ref.lines) + ref.lines
vec.all[ref.vec] = X[,i]
lay <- im(matrix(vec.all, max(ref.lines), max(ref.cols),
dimnames = list(uy, ux)), xcol = ux, yrow = uy)
return(lay)
}
names(lay.im.list) <- c("a", "b", "c")
return(lay.im.list)
})
## Fitting ppms
spp.ppms <- lapply(seq_along(spp.ppp), function(i){
ppm.mod <-ppm(spp.ppp[[i]],
trend = ~ a + b + c +
I(a^2) + I(b^2) + I(c^2),
covariates = spp.lay.im[[i]])
coef <- coefficients(ppm.mod)
rast <- raster(predict(ppm.mod, type = "trend", ngrid = c(100, 100)))
return(list(pred = rast,
coef = coef))
})
dir.create("../Resultados/Analysis-centroids/Fitted-Random-Saturated-PPMs")
for(i in seq_along(spp.ppms)){
saveRDS(spp.ppms[[i]], paste0("../Resultados/Analysis-centroids/Fitted-Random-Saturated-PPMs/PPM-", i, ".rds"))
}
|
library(reshape2)
# Load activity labels + features
activityLabels <- read.table("UCI HAR Dataset/activity_labels.txt")
activityLabels[,2] <- as.character(activityLabels[,2])
features <- read.table("UCI HAR Dataset/features.txt")
features[,2] <- as.character(features[,2])
# Extract only the data on mean and standard deviation
featuresWanted <- grep(".*mean.*|.*std.*", features[,2])
featuresWanted.names <- features[featuresWanted,2]
featuresWanted.names = gsub('-mean', 'Mean', featuresWanted.names)
featuresWanted.names = gsub('-std', 'Std', featuresWanted.names)
featuresWanted.names <- gsub('[-()]', '', featuresWanted.names)
# Load the datasets
train <- read.table("UCI HAR Dataset/train/X_train.txt")[featuresWanted]
trainActivities <- read.table("UCI HAR Dataset/train/y_train.txt")
trainSubjects <- read.table("UCI HAR Dataset/train/subject_train.txt")
train <- cbind(trainSubjects, trainActivities, train)
test <- read.table("UCI HAR Dataset/test/X_test.txt")[featuresWanted]
testActivities <- read.table("UCI HAR Dataset/test/y_test.txt")
testSubjects <- read.table("UCI HAR Dataset/test/subject_test.txt")
test <- cbind(testSubjects, testActivities, test)
# merge datasets and add labels
allData <- rbind(train, test)
colnames(allData) <- c("subject", "activity", featuresWanted.names)
# turn activities & subjects into factors
allData$activity <- factor(allData$activity, levels = activityLabels[,1], labels = activityLabels[,2])
allData$subject <- as.factor(allData$subject)
allData.melted <- melt(allData, id = c("subject", "activity"))
allData.mean <- dcast(allData.melted, subject + activity ~ variable, mean)
write.table(allData.mean, "tidy.txt", row.names = FALSE, quote = FALSE)
install.packages("dataMaid")
library(dataMaid)
makeCodebook(allData)
|
/Run_Analysis.R
|
no_license
|
Knotaprettygirl/DataAssignment
|
R
| false | false | 1,778 |
r
|
library(reshape2)
# Load activity labels + features
activityLabels <- read.table("UCI HAR Dataset/activity_labels.txt")
activityLabels[,2] <- as.character(activityLabels[,2])
features <- read.table("UCI HAR Dataset/features.txt")
features[,2] <- as.character(features[,2])
# Extract only the data on mean and standard deviation
featuresWanted <- grep(".*mean.*|.*std.*", features[,2])
featuresWanted.names <- features[featuresWanted,2]
featuresWanted.names = gsub('-mean', 'Mean', featuresWanted.names)
featuresWanted.names = gsub('-std', 'Std', featuresWanted.names)
featuresWanted.names <- gsub('[-()]', '', featuresWanted.names)
# Load the datasets
train <- read.table("UCI HAR Dataset/train/X_train.txt")[featuresWanted]
trainActivities <- read.table("UCI HAR Dataset/train/y_train.txt")
trainSubjects <- read.table("UCI HAR Dataset/train/subject_train.txt")
train <- cbind(trainSubjects, trainActivities, train)
test <- read.table("UCI HAR Dataset/test/X_test.txt")[featuresWanted]
testActivities <- read.table("UCI HAR Dataset/test/y_test.txt")
testSubjects <- read.table("UCI HAR Dataset/test/subject_test.txt")
test <- cbind(testSubjects, testActivities, test)
# merge datasets and add labels
allData <- rbind(train, test)
colnames(allData) <- c("subject", "activity", featuresWanted.names)
# turn activities & subjects into factors
allData$activity <- factor(allData$activity, levels = activityLabels[,1], labels = activityLabels[,2])
allData$subject <- as.factor(allData$subject)
allData.melted <- melt(allData, id = c("subject", "activity"))
allData.mean <- dcast(allData.melted, subject + activity ~ variable, mean)
write.table(allData.mean, "tidy.txt", row.names = FALSE, quote = FALSE)
install.packages("dataMaid")
library(dataMaid)
makeCodebook(allData)
|
##########################################################################################################################################
## This R script will perform in-memory scoring for batch scoring or for scoring remotely with a web service.
##########################################################################################################################################
# Inputs of the function:
## Untagged_Transactions_df: data frame with the untagged transactions to be scored.
## Account_Info_df: data frame with the account information for the untagged transactions.
## Stage: "Prod" for batch scoring, or "Web" for scoring remotely with web service.
in_memory_scoring <- function(Untagged_Transactions_df,
Account_Info_df,
Stage)
{
# Load library.
library(RevoScaleR)
library(MicrosoftML)
# Set the compute context to local.
rxSetComputeContext('local')
# Create transactionDateTime or recordDateTime. This is done by:
## converting transactionTime into a 6 digit time.
## concatenating transactionDate and transactionTime.
## converting it to a DateTime "%Y%m%d %H%M%S" format.
Untagged_Transactions_df$transactionDateTime = as.POSIXct(paste(Untagged_Transactions_df$transactionDate, sprintf("%06d", as.numeric(Untagged_Transactions_df$transactionTime)), sep=""), format = "%Y%m%d %H%M%S", tz = "GMT")
Account_Info_df$recordDateTime = as.POSIXct(paste(Account_Info_df$transactionDate, sprintf("%06d", as.numeric(Account_Info_df$transactionTime)), sep=""), format = "%Y%m%d %H%M%S", tz = "GMT")
# Load variables from Development Stage.
if(Stage == "Web"){
Risk_list <- model_objects$Risk_list
boosted_fit <- model_objects$boosted_fit
}
if(Stage == "Prod"){
# Directory that holds the tables and model from the Development stage.
LocalModelsDir <- file.path(LocalWorkDir, "model")
Risk_list <- readRDS(file.path(LocalModelsDir, "Risk_list.rds"))
boosted_fit <- readRDS(file.path(LocalModelsDir, "gbt_model.rds"))
}
############################################################################################################################################
## The block below will do the following:
## 1. Merge the two tables Untagged_Transactions and Account_Info.
## 2. Remove duplicates from the table.
############################################################################################################################################
# Merge Untagged_Transactions and Account_Info.
## Merge the input tables on accountID.
Untagged_Account_All_df = merge(x = Untagged_Transactions_df, y = Account_Info_df, by = "accountID", all.x = TRUE)
## Keep rows where recordDateTime <= transactionDateTime for every accountID, transactionID.
Untagged_Account_All_df = Untagged_Account_All_df[difftime(Untagged_Account_All_df$transactionDateTime, Untagged_Account_All_df$recordDateTime, units = "days") >= 0 , ]
## Get the highest recordDateTime for every accountID, transactionID.
Latest_Record_df <- aggregate(Untagged_Account_All_df$recordDateTime, by = list(Untagged_Account_All_df$accountID, Untagged_Account_All_df$transactionID), FUN = max)
colnames(Latest_Record_df) <- c("accountID", "transactionID", "recordDateTime")
## Merge with the Untagged_Account.
Untagged_Latest_Record_df = merge(x = Untagged_Transactions_df, y = Latest_Record_df, by = c("accountID", "transactionID"), all.x = TRUE)
## Add the data from Account_Info.
Untagged_Account_df = merge(x = Untagged_Latest_Record_df, y = Account_Info_df, by = c("accountID", "recordDateTime"), all.x = TRUE)
## Remove some columns.
Untagged_Account_df$recordDateTime <- NULL
Untagged_Account_df$transactionTime.y <- NULL
Untagged_Account_df$transactionDate.y <- NULL
## Select specific columns and remove observations when an ID variable is missing or when the transaction amount is negative.
to_keep <- c("accountID", "transactionID", "transactionDateTime", "isProxyIP", "paymentInstrumentType", "cardType", "paymentBillingAddress",
"paymentBillingPostalCode", "paymentBillingCountryCode", "paymentBillingName", "accountAddress", "accountPostalCode",
"accountCountry", "accountOwnerName", "shippingAddress", "transactionCurrencyCode","localHour", "ipState", "ipPostcode",
"ipCountryCode", "browserLanguage", "paymentBillingState", "accountState", "transactionAmountUSD", "digitalItemCount",
"physicalItemCount", "accountAge", "paymentInstrumentAgeInAccount", "numPaymentRejects1dPerUser", "isUserRegistered",
"transactionDate.x", "transactionTime.x")
Untagged_Account_df <- Untagged_Account_df[!(is.na(Untagged_Account_df$accountID) | is.na(Untagged_Account_df$transactionID) | is.na(Untagged_Account_df$transactionDateTime) | Untagged_Account_df$transactionAmountUSD < 0) , to_keep]
# Remove duplicates.
Untagged_Account_df <- Untagged_Account_df[!duplicated(Untagged_Account_df[, c("transactionID", "accountID", "transactionDateTime", "transactionAmountUSD")]), ]
############################################################################################################################################
## The block below will clean the merged data set.
############################################################################################################################################
# For coherence with the development code that used Hive tables, we change all the variable names to lower case.
colnames(Untagged_Account_df) <- unlist(sapply(colnames(Untagged_Account_df), tolower))
# Get the variable names with NA.
no_of_NA <- sapply(Untagged_Account_df, function(x) sum(is.na(x)))
var_with_NA <- names(no_of_NA[no_of_NA > 0])
# Cleaning and preprocessing function.
preprocessing <- function(data) {
# Replace missing values with 0 except for localHour with -99.
if(length(var_with_NA) > 0){
for(i in 1:length(var_with_NA)){
row_na <- which(is.na(data[, var_with_NA[i]]) == TRUE)
if(var_with_NA[i] == c("localhour")){
data[row_na, var_with_NA[i]] <- "-99"
} else{
data[row_na, var_with_NA[i]] <- "0"
}
}
}
# Fix some data entries in isUserRegistered, which should be binary.
row_na <- which(data[, c("isuserregistered")] %in% as.character(seq(1, 9)))
data[row_na, c("isuserregistered")] <- "0"
# Convert a few variables to numeric, replacing non-numeric entries with 0. a few other variables to fix some data entries.
numeric_to_fix <- c("accountage", "paymentinstrumentageinaccount", "numpaymentrejects1dperuser", "transactionamountusd",
"digitalitemcount", "physicalitemcount")
for(i in 1:length(numeric_to_fix)){
data[, numeric_to_fix[i]] <- as.numeric(data[, numeric_to_fix[i]])
row_na <- which(is.na(as.numeric(data[, numeric_to_fix[i]])) == TRUE)
data[row_na, numeric_to_fix[i]] <- 0
}
return(data)
}
# Apply the function.
Untagged_Account_Preprocessed_df <- preprocessing(Untagged_Account_df)
############################################################################################################################################
## The block below will add risk values and create mismatch address flags and a high amount flag.
############################################################################################################################################
# Variables to which we will add risk values.
risk_vars <- c("transactioncurrencycode", "localhour", "ipstate", "ippostcode","ipcountrycode", "browserlanguage",
"accountpostalcode", "accountstate", "accountcountry", "paymentbillingpostalcode", "paymentbillingstate",
"paymentbillingcountrycode")
# Function to assign risks.
assign_risk <- function(data){
for(name in risk_vars){
# Get the appropriate Risk Table from the Risk_list.
Risk_df <- Risk_list[[name]]
# Perform a left outer join with the Risk table. This will assign the risk value to every level of the variable.
data <- merge(data, Risk_df, by = name, all.x = TRUE)
new_name <- paste(name, "risk", sep ="")
colnames(data)[ncol(data)] <- new_name
# If a new level was found in the data, the assigned risk is NULL. We convert it to 0.
row_na <- which(is.na(data[, new_name]))
data[row_na, new_name] <- 0
}
return(data)
}
# Apply the function.
Untagged_Account_Features1_df <- assign_risk(Untagged_Account_Preprocessed_df)
# Create other variables:
## isHighAmount: flag for transactions of a high amount.
## various flags showing if there is a mismatch in the addresses variables.
Untagged_Account_Features1_df$ishighamount = ifelse(Untagged_Account_Features1_df$transactionamountusd > 150, "1", "0")
Untagged_Account_Features1_df$acctbillingaddressmismatchflag = ifelse(Untagged_Account_Features1_df$paymentbillingaddress == Untagged_Account_Features1_df$accountaddress, "0", "1")
Untagged_Account_Features1_df$acctbillingpostalcodemismatchflag = ifelse(Untagged_Account_Features1_df$paymentbillingpostalcode == Untagged_Account_Features1_df$accountpostalcode, "0", "1")
Untagged_Account_Features1_df$acctbillingcountrymismatchflag = ifelse(Untagged_Account_Features1_df$paymentbillingcountrycode == Untagged_Account_Features1_df$accountcountry, "0", "1")
Untagged_Account_Features1_df$acctbillingnamemismatchflag= ifelse(Untagged_Account_Features1_df$paymentbillingname == Untagged_Account_Features1_df$accountownername, "0", "1")
Untagged_Account_Features1_df$acctshippingaddressmismatchflag = ifelse(Untagged_Account_Features1_df$shippingaddress == Untagged_Account_Features1_df$accountaddress, "0", "1")
Untagged_Account_Features1_df$shippingBillingAddressmismatchflag = ifelse(Untagged_Account_Features1_df$shippingaddress == Untagged_Account_Features1_df$paymentbillingaddress, "0", "1")
# Create an artificial target variable label. This is for rxPredict to work.
Untagged_Account_Features1_df$label <- sample(c("0", "1"), size = nrow(Untagged_Account_Features1_df), replace = TRUE)
############################################################################################################################################
## The block below will compute the aggregates.
############################################################################################################################################
# Function that computes the aggregates for a given accountID.
aggregates_account_level <- function(dt){
if(nrow(dt) == 1){ #if there is only 1 transaction in that account, no aggregation.
return(NULL)
} else{
# Perform a cross-apply and filter: for each transactionID, z has data about the other transactionID that occured in the past 30 days.
z = merge(x = dt, y = dt[, c("transactionid", "transactiondatetime", "transactionamountusd")], by = NULL)
z = z[z$transactionid.x != z$transactionid.y & difftime(z$transactiondatetime.x , z$transactiondatetime.y, units = "days") > 0 & difftime(z$transactiondatetime.x , z$transactiondatetime.y, units = "days") < 30,]
# Keep the transactionIDs that occurred in the past 1 day and 30 days respectively.
z1day = z[difftime(z$transactiondatetime.x , z$transactiondatetime.y, units = "days") <= 1, ]
z30day = z[difftime(z$transactiondatetime.x , z$transactiondatetime.y, units = "days") <= 30, ]
# Compute the number of rows (sumPurchaseCount1dPerUser) and the total amount spent in the past day (sumPurchaseAmount1dPerUser).
if(nrow(z30day) == 0){
return(NULL)
} else{
aggsum30day <- aggregate(z30day$transactionamountusd.y, by = list(z30day$transactionid.x), FUN = sum)
colnames(aggsum30day) <- c("transactionid", "sumpurchaseamount30dperuser")
aggcount30day <- aggregate(z30day$transactionamountusd.y, by = list(z30day$transactionid.x), FUN = NROW)
colnames(aggcount30day) <- c("transactionid", "sumpurchasecount30dperuser")
agg30day <- merge(x = aggsum30day, y = aggcount30day , by = "transactionid")
}
# Compute the number of rows (sumPurchaseCount30dPerUser) and the total amount spent in the past 30 days (sumPurchaseAmount30dPerUser).
if(nrow(z1day) == 0){
agg30day$sumpurchaseamount1dperuser <- 0
agg30day$sumpurchasecount1dperuser <- 0
return(agg30day)
} else{
aggsum1day <- aggregate(z1day$transactionamountusd.y, by = list(z1day$transactionid.x), FUN = sum)
colnames(aggsum1day) <- c("transactionid", "sumpurchaseamount1dperuser")
aggcount1day <- aggregate(z1day$transactionamountusd.y, by = list(z1day$transactionid.x), FUN = NROW)
colnames(aggcount1day) <- c("transactionid", "sumpurchasecount1dperuser")
agg1day <- merge(x = aggsum1day, y = aggcount1day , by = "transactionid")
}
# Return the 4 new variables for each transactionID that had other transactions in the past 30 days.
agg <- merge(x = agg1day, y = agg30day, by = "transactionid", all = TRUE)
return(agg)
}
}
# Split the data set by accountID.
Splits <- split(Untagged_Account_Features1_df, f = Untagged_Account_Features1_df$accountid)
# Compute the aggregations for each accountID with the user defined function aggregates_account_level.
Aggregations_list <- lapply(X = Splits, FUN = aggregates_account_level)
# Bind the results into 1 data frame.
Aggregations_df <- do.call("rbind", Aggregations_list)
# Add the new variables to the initial data:
## If there was 1 transaction per account ID for all accounts, we simply add the 4 aggregate variables with values of 0.
if(is.null(Aggregations_df)){
Untagged_Account_Features_df <- Untagged_Account_Features1_df
for(new_name in c("sumpurchasecount1dperuser", "sumpurchasecount30dperuser", "sumpurchaseamount1dperuser", "sumpurchaseamount30dperuser")){
Untagged_Account_Features_df[, new_name] <- 0
}
}else{
## Otherwise, add the new variables to the initial data with a left outer join.
Untagged_Account_Features_df <- merge(x = Untagged_Account_Features1_df, y = Aggregations_df, by = "transactionid", all.x = TRUE)
# The transactions that had no other transactions in the 30 day time frame have missing values. We convert them to 0.
for(new_name in c("sumpurchasecount1dperuser", "sumpurchasecount30dperuser", "sumpurchaseamount1dperuser", "sumpurchaseamount30dperuser")){
row_na <- which(is.na(Untagged_Account_Features_df[, new_name]))
Untagged_Account_Features_df[row_na, new_name] <- 0
}
}
############################################################################################################################################
## The block below will convert character to factors for the prediction step.
############################################################################################################################################
for (name in colnames(Untagged_Account_Features_df)){
if(class(Untagged_Account_Features_df[[name]])[1] == "character" & ! name %in% c("accountid", "transactionid", "transactiondatetime", "transactiondate.x", "transactiontime.x", "paymentbillingaddress",
"paymentbillingname", "accountaddress", "accountownername", "shippingaddress", risk_vars)){
Untagged_Account_Features_df[[name]] <- factor(Untagged_Account_Features_df[[name]])
}
}
Untagged_Account_Features_df$isproxyip <- as.factor(as.character( Untagged_Account_Features_df$isproxyip))
Untagged_Account_Features_df$isuserregistered <- as.factor(as.character( Untagged_Account_Features_df$isuserregistered))
############################################################################################################################################
## The block below will score the featurized data set.
############################################################################################################################################
# Make predictions.
Predictions <- rxPredict(boosted_fit,
type = c("prob"),
data = Untagged_Account_Features_df,
extraVarsToWrite = c("accountid", "transactionid", "transactiondate.x", "transactiontime.x", "transactionamountusd"))
# Change the names of the variables in the predictions table for clarity.
Predictions$transactiondatetime = as.character(as.POSIXct(paste(Predictions$transactiondate.x, sprintf("%06d", as.numeric(Predictions$transactiontime.x)), sep=""), format = "%Y%m%d %H%M%S", tz = "GMT"))
Predictions$transactiondate.x <- NULL
Predictions$transactiontime.x <- NULL
Predictions <- Predictions[, c(2, 3, 4,5,6,7)]
colnames(Predictions)[1] <- c("prob")
return(Predictions)
}
|
/RSparkCluster/in_memory_scoring.R
|
permissive
|
microsoft/r-server-fraud-detection
|
R
| false | false | 17,270 |
r
|
##########################################################################################################################################
## This R script will perform in-memory scoring for batch scoring or for scoring remotely with a web service.
##########################################################################################################################################
# Inputs of the function:
## Untagged_Transactions_df: data frame with the untagged transactions to be scored.
## Account_Info_df: data frame with the account information for the untagged transactions.
## Stage: "Prod" for batch scoring, or "Web" for scoring remotely with web service.
in_memory_scoring <- function(Untagged_Transactions_df,
Account_Info_df,
Stage)
{
# Load library.
library(RevoScaleR)
library(MicrosoftML)
# Set the compute context to local.
rxSetComputeContext('local')
# Create transactionDateTime or recordDateTime. This is done by:
## converting transactionTime into a 6 digit time.
## concatenating transactionDate and transactionTime.
## converting it to a DateTime "%Y%m%d %H%M%S" format.
Untagged_Transactions_df$transactionDateTime = as.POSIXct(paste(Untagged_Transactions_df$transactionDate, sprintf("%06d", as.numeric(Untagged_Transactions_df$transactionTime)), sep=""), format = "%Y%m%d %H%M%S", tz = "GMT")
Account_Info_df$recordDateTime = as.POSIXct(paste(Account_Info_df$transactionDate, sprintf("%06d", as.numeric(Account_Info_df$transactionTime)), sep=""), format = "%Y%m%d %H%M%S", tz = "GMT")
# Load variables from Development Stage.
if(Stage == "Web"){
Risk_list <- model_objects$Risk_list
boosted_fit <- model_objects$boosted_fit
}
if(Stage == "Prod"){
# Directory that holds the tables and model from the Development stage.
LocalModelsDir <- file.path(LocalWorkDir, "model")
Risk_list <- readRDS(file.path(LocalModelsDir, "Risk_list.rds"))
boosted_fit <- readRDS(file.path(LocalModelsDir, "gbt_model.rds"))
}
############################################################################################################################################
## The block below will do the following:
## 1. Merge the two tables Untagged_Transactions and Account_Info.
## 2. Remove duplicates from the table.
############################################################################################################################################
# Merge Untagged_Transactions and Account_Info.
## Merge the input tables on accountID.
Untagged_Account_All_df = merge(x = Untagged_Transactions_df, y = Account_Info_df, by = "accountID", all.x = TRUE)
## Keep rows where recordDateTime <= transactionDateTime for every accountID, transactionID.
Untagged_Account_All_df = Untagged_Account_All_df[difftime(Untagged_Account_All_df$transactionDateTime, Untagged_Account_All_df$recordDateTime, units = "days") >= 0 , ]
## Get the highest recordDateTime for every accountID, transactionID.
Latest_Record_df <- aggregate(Untagged_Account_All_df$recordDateTime, by = list(Untagged_Account_All_df$accountID, Untagged_Account_All_df$transactionID), FUN = max)
colnames(Latest_Record_df) <- c("accountID", "transactionID", "recordDateTime")
## Merge with the Untagged_Account.
Untagged_Latest_Record_df = merge(x = Untagged_Transactions_df, y = Latest_Record_df, by = c("accountID", "transactionID"), all.x = TRUE)
## Add the data from Account_Info.
Untagged_Account_df = merge(x = Untagged_Latest_Record_df, y = Account_Info_df, by = c("accountID", "recordDateTime"), all.x = TRUE)
## Remove some columns.
Untagged_Account_df$recordDateTime <- NULL
Untagged_Account_df$transactionTime.y <- NULL
Untagged_Account_df$transactionDate.y <- NULL
## Select specific columns and remove observations when an ID variable is missing or when the transaction amount is negative.
to_keep <- c("accountID", "transactionID", "transactionDateTime", "isProxyIP", "paymentInstrumentType", "cardType", "paymentBillingAddress",
"paymentBillingPostalCode", "paymentBillingCountryCode", "paymentBillingName", "accountAddress", "accountPostalCode",
"accountCountry", "accountOwnerName", "shippingAddress", "transactionCurrencyCode","localHour", "ipState", "ipPostcode",
"ipCountryCode", "browserLanguage", "paymentBillingState", "accountState", "transactionAmountUSD", "digitalItemCount",
"physicalItemCount", "accountAge", "paymentInstrumentAgeInAccount", "numPaymentRejects1dPerUser", "isUserRegistered",
"transactionDate.x", "transactionTime.x")
Untagged_Account_df <- Untagged_Account_df[!(is.na(Untagged_Account_df$accountID) | is.na(Untagged_Account_df$transactionID) | is.na(Untagged_Account_df$transactionDateTime) | Untagged_Account_df$transactionAmountUSD < 0) , to_keep]
# Remove duplicates.
Untagged_Account_df <- Untagged_Account_df[!duplicated(Untagged_Account_df[, c("transactionID", "accountID", "transactionDateTime", "transactionAmountUSD")]), ]
############################################################################################################################################
## The block below will clean the merged data set.
############################################################################################################################################
# For coherence with the development code that used Hive tables, we change all the variable names to lower case.
colnames(Untagged_Account_df) <- unlist(sapply(colnames(Untagged_Account_df), tolower))
# Get the variable names with NA.
no_of_NA <- sapply(Untagged_Account_df, function(x) sum(is.na(x)))
var_with_NA <- names(no_of_NA[no_of_NA > 0])
# Cleaning and preprocessing function.
preprocessing <- function(data) {
# Replace missing values with 0 except for localHour with -99.
if(length(var_with_NA) > 0){
for(i in 1:length(var_with_NA)){
row_na <- which(is.na(data[, var_with_NA[i]]) == TRUE)
if(var_with_NA[i] == c("localhour")){
data[row_na, var_with_NA[i]] <- "-99"
} else{
data[row_na, var_with_NA[i]] <- "0"
}
}
}
# Fix some data entries in isUserRegistered, which should be binary.
row_na <- which(data[, c("isuserregistered")] %in% as.character(seq(1, 9)))
data[row_na, c("isuserregistered")] <- "0"
# Convert a few variables to numeric, replacing non-numeric entries with 0. a few other variables to fix some data entries.
numeric_to_fix <- c("accountage", "paymentinstrumentageinaccount", "numpaymentrejects1dperuser", "transactionamountusd",
"digitalitemcount", "physicalitemcount")
for(i in 1:length(numeric_to_fix)){
data[, numeric_to_fix[i]] <- as.numeric(data[, numeric_to_fix[i]])
row_na <- which(is.na(as.numeric(data[, numeric_to_fix[i]])) == TRUE)
data[row_na, numeric_to_fix[i]] <- 0
}
return(data)
}
# Apply the function.
Untagged_Account_Preprocessed_df <- preprocessing(Untagged_Account_df)
############################################################################################################################################
## The block below will add risk values and create mismatch address flags and a high amount flag.
############################################################################################################################################
# Variables to which we will add risk values.
risk_vars <- c("transactioncurrencycode", "localhour", "ipstate", "ippostcode","ipcountrycode", "browserlanguage",
"accountpostalcode", "accountstate", "accountcountry", "paymentbillingpostalcode", "paymentbillingstate",
"paymentbillingcountrycode")
# Function to assign risks.
assign_risk <- function(data){
for(name in risk_vars){
# Get the appropriate Risk Table from the Risk_list.
Risk_df <- Risk_list[[name]]
# Perform a left outer join with the Risk table. This will assign the risk value to every level of the variable.
data <- merge(data, Risk_df, by = name, all.x = TRUE)
new_name <- paste(name, "risk", sep ="")
colnames(data)[ncol(data)] <- new_name
# If a new level was found in the data, the assigned risk is NULL. We convert it to 0.
row_na <- which(is.na(data[, new_name]))
data[row_na, new_name] <- 0
}
return(data)
}
# Apply the function.
Untagged_Account_Features1_df <- assign_risk(Untagged_Account_Preprocessed_df)
# Create other variables:
## isHighAmount: flag for transactions of a high amount.
## various flags showing if there is a mismatch in the addresses variables.
Untagged_Account_Features1_df$ishighamount = ifelse(Untagged_Account_Features1_df$transactionamountusd > 150, "1", "0")
Untagged_Account_Features1_df$acctbillingaddressmismatchflag = ifelse(Untagged_Account_Features1_df$paymentbillingaddress == Untagged_Account_Features1_df$accountaddress, "0", "1")
Untagged_Account_Features1_df$acctbillingpostalcodemismatchflag = ifelse(Untagged_Account_Features1_df$paymentbillingpostalcode == Untagged_Account_Features1_df$accountpostalcode, "0", "1")
Untagged_Account_Features1_df$acctbillingcountrymismatchflag = ifelse(Untagged_Account_Features1_df$paymentbillingcountrycode == Untagged_Account_Features1_df$accountcountry, "0", "1")
Untagged_Account_Features1_df$acctbillingnamemismatchflag= ifelse(Untagged_Account_Features1_df$paymentbillingname == Untagged_Account_Features1_df$accountownername, "0", "1")
Untagged_Account_Features1_df$acctshippingaddressmismatchflag = ifelse(Untagged_Account_Features1_df$shippingaddress == Untagged_Account_Features1_df$accountaddress, "0", "1")
Untagged_Account_Features1_df$shippingBillingAddressmismatchflag = ifelse(Untagged_Account_Features1_df$shippingaddress == Untagged_Account_Features1_df$paymentbillingaddress, "0", "1")
# Create an artificial target variable label. This is for rxPredict to work.
Untagged_Account_Features1_df$label <- sample(c("0", "1"), size = nrow(Untagged_Account_Features1_df), replace = TRUE)
############################################################################################################################################
## The block below will compute the aggregates.
############################################################################################################################################
# Function that computes the aggregates for a given accountID.
aggregates_account_level <- function(dt){
if(nrow(dt) == 1){ #if there is only 1 transaction in that account, no aggregation.
return(NULL)
} else{
# Perform a cross-apply and filter: for each transactionID, z has data about the other transactionID that occured in the past 30 days.
z = merge(x = dt, y = dt[, c("transactionid", "transactiondatetime", "transactionamountusd")], by = NULL)
z = z[z$transactionid.x != z$transactionid.y & difftime(z$transactiondatetime.x , z$transactiondatetime.y, units = "days") > 0 & difftime(z$transactiondatetime.x , z$transactiondatetime.y, units = "days") < 30,]
# Keep the transactionIDs that occurred in the past 1 day and 30 days respectively.
z1day = z[difftime(z$transactiondatetime.x , z$transactiondatetime.y, units = "days") <= 1, ]
z30day = z[difftime(z$transactiondatetime.x , z$transactiondatetime.y, units = "days") <= 30, ]
# Compute the number of rows (sumPurchaseCount1dPerUser) and the total amount spent in the past day (sumPurchaseAmount1dPerUser).
if(nrow(z30day) == 0){
return(NULL)
} else{
aggsum30day <- aggregate(z30day$transactionamountusd.y, by = list(z30day$transactionid.x), FUN = sum)
colnames(aggsum30day) <- c("transactionid", "sumpurchaseamount30dperuser")
aggcount30day <- aggregate(z30day$transactionamountusd.y, by = list(z30day$transactionid.x), FUN = NROW)
colnames(aggcount30day) <- c("transactionid", "sumpurchasecount30dperuser")
agg30day <- merge(x = aggsum30day, y = aggcount30day , by = "transactionid")
}
# Compute the number of rows (sumPurchaseCount30dPerUser) and the total amount spent in the past 30 days (sumPurchaseAmount30dPerUser).
if(nrow(z1day) == 0){
agg30day$sumpurchaseamount1dperuser <- 0
agg30day$sumpurchasecount1dperuser <- 0
return(agg30day)
} else{
aggsum1day <- aggregate(z1day$transactionamountusd.y, by = list(z1day$transactionid.x), FUN = sum)
colnames(aggsum1day) <- c("transactionid", "sumpurchaseamount1dperuser")
aggcount1day <- aggregate(z1day$transactionamountusd.y, by = list(z1day$transactionid.x), FUN = NROW)
colnames(aggcount1day) <- c("transactionid", "sumpurchasecount1dperuser")
agg1day <- merge(x = aggsum1day, y = aggcount1day , by = "transactionid")
}
# Return the 4 new variables for each transactionID that had other transactions in the past 30 days.
agg <- merge(x = agg1day, y = agg30day, by = "transactionid", all = TRUE)
return(agg)
}
}
# Split the data set by accountID.
Splits <- split(Untagged_Account_Features1_df, f = Untagged_Account_Features1_df$accountid)
# Compute the aggregations for each accountID with the user defined function aggregates_account_level.
Aggregations_list <- lapply(X = Splits, FUN = aggregates_account_level)
# Bind the results into 1 data frame.
Aggregations_df <- do.call("rbind", Aggregations_list)
# Add the new variables to the initial data:
## If there was 1 transaction per account ID for all accounts, we simply add the 4 aggregate variables with values of 0.
if(is.null(Aggregations_df)){
Untagged_Account_Features_df <- Untagged_Account_Features1_df
for(new_name in c("sumpurchasecount1dperuser", "sumpurchasecount30dperuser", "sumpurchaseamount1dperuser", "sumpurchaseamount30dperuser")){
Untagged_Account_Features_df[, new_name] <- 0
}
}else{
## Otherwise, add the new variables to the initial data with a left outer join.
Untagged_Account_Features_df <- merge(x = Untagged_Account_Features1_df, y = Aggregations_df, by = "transactionid", all.x = TRUE)
# The transactions that had no other transactions in the 30 day time frame have missing values. We convert them to 0.
for(new_name in c("sumpurchasecount1dperuser", "sumpurchasecount30dperuser", "sumpurchaseamount1dperuser", "sumpurchaseamount30dperuser")){
row_na <- which(is.na(Untagged_Account_Features_df[, new_name]))
Untagged_Account_Features_df[row_na, new_name] <- 0
}
}
############################################################################################################################################
## The block below will convert character to factors for the prediction step.
############################################################################################################################################
for (name in colnames(Untagged_Account_Features_df)){
if(class(Untagged_Account_Features_df[[name]])[1] == "character" & ! name %in% c("accountid", "transactionid", "transactiondatetime", "transactiondate.x", "transactiontime.x", "paymentbillingaddress",
"paymentbillingname", "accountaddress", "accountownername", "shippingaddress", risk_vars)){
Untagged_Account_Features_df[[name]] <- factor(Untagged_Account_Features_df[[name]])
}
}
Untagged_Account_Features_df$isproxyip <- as.factor(as.character( Untagged_Account_Features_df$isproxyip))
Untagged_Account_Features_df$isuserregistered <- as.factor(as.character( Untagged_Account_Features_df$isuserregistered))
############################################################################################################################################
## The block below will score the featurized data set.
############################################################################################################################################
# Make predictions.
Predictions <- rxPredict(boosted_fit,
type = c("prob"),
data = Untagged_Account_Features_df,
extraVarsToWrite = c("accountid", "transactionid", "transactiondate.x", "transactiontime.x", "transactionamountusd"))
# Change the names of the variables in the predictions table for clarity.
Predictions$transactiondatetime = as.character(as.POSIXct(paste(Predictions$transactiondate.x, sprintf("%06d", as.numeric(Predictions$transactiontime.x)), sep=""), format = "%Y%m%d %H%M%S", tz = "GMT"))
Predictions$transactiondate.x <- NULL
Predictions$transactiontime.x <- NULL
Predictions <- Predictions[, c(2, 3, 4,5,6,7)]
colnames(Predictions)[1] <- c("prob")
return(Predictions)
}
|
setwd("~/Desktop/ML GIT/web scraping/")
library(rvest)
mac <- read_html("https://en.wikipedia.org/wiki/MacOS")
p_tag_data <- mac %>% html_nodes("p") %>% html_text()
table.df <- mac %>% html_nodes("table") %>% .[[3]] %>% html_table()
table.df2 <- mac %>% html_nodes("table") %>% .[[4]] %>% html_table()
|
/2.R
|
no_license
|
H-arshit/Web-Scraping-With-R
|
R
| false | false | 307 |
r
|
setwd("~/Desktop/ML GIT/web scraping/")
library(rvest)
mac <- read_html("https://en.wikipedia.org/wiki/MacOS")
p_tag_data <- mac %>% html_nodes("p") %>% html_text()
table.df <- mac %>% html_nodes("table") %>% .[[3]] %>% html_table()
table.df2 <- mac %>% html_nodes("table") %>% .[[4]] %>% html_table()
|
source("https://raw.githubusercontent.com/julbautista/Startup/master/julian_startup.R")
setwd("C:/Users/Julian Bautista/Documents/Portfolio/India Project")
df <- read.csv("https://raw.githubusercontent.com/julbautista/India-Project/master/India_Data.csv", header = T, skip = 2)
#Drop Andaman and Nicobar & Dadra NH & Daman and Diu & Lakshadweep & Chandigarh and 2003
df <- df[df$Year != 2003,]
df <- df[df$State != "Andaman & Nicobar Islands"
& df$State != "Dadra & Nagar Haveli"
& df$State != "Daman & Diu"
& df$State != "Lakshadweep"
& df$State != "Chandigarh",]
df$State <- as.character(df$State)
df$State[df$State == "Odisha"] <- "Orissa"
df$State[df$State == "Chhatisgarh"] <- "Chhattisgarh"
#Read in population data
pop <- read.csv("https://raw.githubusercontent.com/julbautista/India-Project/master/population.csv", header = TRUE)
pop <- pop[pop$State != "Andaman and Nicobar Islands"
& pop$State != "Dadra and Nagar Haveli"
& pop$State != "Daman and Diu"
& pop$State != "Lakshadweep"
& pop$State != "Chandigarh",]
#Merge population to dataframe
ind <- merge(df, pop)
#Function for projecting population up or down
pop.func <- function(year, pop2011){
pop <- NULL
pop <- ifelse(year > 2011,
pop2011*(1.0184^(year - 2011)),
ifelse(year < 2011,
pop2011*((1/1.0184)^(2011 - year)),
pop2011))
return(pop)
}
#Add column of projected population numbers
ind$population <- pop.func(ind$Year, ind$X2011pop)
#Remove old population numbers
ind <- ind[,-8]
rm(df,pop)
#Put all currency in Ruppee
ind$exp.rup <- NULL
for(i in 1:length(ind[,1])){
ind$exp.rup[i] <- ifelse(ind$Units[i] == "Lakh",
ind$Health.Exp[i]*100000,
ind$Health.Exp[i]*1000000)
}
#Get spending per capita
ind$percap <- ind$exp.rup/ind$population
#Create ratio of female to male IMR
ind$fm.imr <- ind$Female/ind$Male
#Regress IMR diff on health exp
#plot(lm(fm.imr ~ percap, data = ind))
#Sort by year
ind <- ind[order(ind$Year),]
#Create state id
ind <- within(ind, state.id <- match(State, unique(State)))
#Create Year id
ind <- within(ind, year.id <- match(Year, unique(Year)))
#plot pooled values across time, naive look
pool <- ind %>% group_by(Year) %>% summarise(poolIMR = mean(Person), poolHexp = mean(percap))
ggplot(pool, aes(poolHexp, poolIMR, label = Year)) + public +
geom_smooth(alpha = 0.12, method = 'lm') +
geom_text_repel(segment.color = jbpal$green, point.padding = unit(1.5, "lines"), size = 5) +
geom_point(size = 2.5) + ggtitle("Pooled IMR and Health Exp") +
xlab("Health Expenditure") + ylab("Infant Mortality")
ggsave("naive.pdf", width = 17, height = 8.5)
rm(pool)
#Plot per cap health exp by state over time
#non uniform y limit
# healthexp <- list()
# for(i in 1:30){
# plotter <- ggplot(subset(ind,ind$state.id == i), aes(Year, percap)) + public + geom_line() +
# labs(x = 'Year', y = 'Health Exp', title = unique(ind$State[ind$state.id == i])) +
# scale_x_continuous(breaks = c(2005, 2012)) +
# scale_y_continuous(breaks = c(0, round(max(ind[ind$state.id == i,]$percap)))) +
# theme(axis.text.x = element_text(size = 5),
# axis.text.y = element_text(size = 9),
# plot.title = element_text(size = 13),
# axis.title.x = element_text(size = 7),
# axis.title.y = element_text(size = 7),
# panel.grid.major = element_blank())
# healthexp[[i]] <- plotter
# }
#
# ggsave("HexpTime.pdf", plot = grid.arrange(grobs = healthexp, nrow = 6, ncol = 5), width = 17, height = 8.5)
#uniform y limits healthexp
healthexp2 <- list()
for(i in 1:30){
healthexp2[[i]] <-
ggplot(subset(ind,ind$state.id == i), aes(Year, percap) ) + public + geom_line() +
geom_text(x = 2005 + 0.25, y = round(min(ind[ind$state.id == i & ind$Year == 2005,]$percap)) + 350, label = round(min(ind[ind$Year == 2005 & ind$state.id == i ,]$percap)), size = 2) +
geom_text(x = 2012 - 0.25, y = round(max(ind[ind$state.id == i & ind$Year == 2012,]$percap)) + 350, label = round(max(ind[ind$Year == 2012 & ind$state.id == i ,]$percap)), size = 2) +
scale_x_continuous(breaks = c(2005, 2012)) +
scale_y_continuous(breaks = c(0, 3000 ), limits = c(0,3100) ) +
labs(x = 'Year', y = 'Health Exp', title = unique(ind$State[ind$state.id == i])) +
theme(axis.text.x = element_text(size = 7),
axis.text.y = element_text(size = 7),
plot.title = element_text(size = 13),
axis.title.x = element_text(size = 9),
axis.title.y = element_text(size = 9),
panel.grid.major = element_blank())
}
ggsave("HexpTime.pdf", plot = grid.arrange(grobs = healthexp2, nrow = 6, ncol = 5), width = 17, height = 8.5)
#Plot female and male IMR by state over time
genderIMR <- list()
for(i in 1:30){
#j and gap help create labels for the largest IMR gender gaps per state
j <- which.max(abs(ind[ind$state.id == i,]$Female - ind[ind$state.id == i,]$Male))
gap <- ind[ind$state.id == i,][j,]
genderIMR[[i]] <- ggplot(subset(ind,ind$state.id == i), aes(Year, Female)) + public +
geom_line(colour = jbpal$red) +
geom_line(aes(Year, Male), colour = jbpal$green) +
scale_x_continuous(breaks = c(2005, 2012)) +
scale_y_continuous(breaks = c(0, round(max(ind[ind$state.id == i,]$Female))), limits = c(0, 85)) +
geom_text(x = gap$Year, y = gap$Female, label = gap$Female, size = 2, colour =jbpal$red) +
geom_text(x = gap$Year, y = gap$Male, label = gap$Male, size = 2, colour = jbpal$green) +
labs(x = 'Year', y = 'IMR', title = unique(ind$State[ind$state.id == i])) +
theme(axis.text.x = element_text(size = 5),
axis.text.y = element_text(size = 5),
plot.title = element_text(size = 9),
axis.title.x = element_text(size = 7),
axis.title.y = element_text(size = 7),
panel.grid.major = element_blank())
}
ggsave("genderIMR.pdf", plot = grid.arrange(grobs = genderIMR, nrow = 6, ncol = 5), width = 17, height = 8.5)
#Plot IMR by state over time
IMR <- list()
for(i in 1:30){
IMR[[i]] <-
ggplot(subset(ind,ind$state.id == i), aes(Year, Person)) + public + geom_line() +
scale_x_continuous(breaks = c(2005, 2012)) +
scale_y_continuous(breaks = c(0,80), limits = c(0,80) ) +
geom_text(x = 2012, y = round(max(ind[ind$state.id == i,]$Person)), label = round(min(ind[ind$state.id == i,]$Person)), size = 2) +
geom_text(x = 2005, y = round(min(ind[ind$state.id == i,]$Person)), label = round(max(ind[ind$state.id == i,]$Person)), size = 2) +
labs(x = 'Year', y = 'IMR', title = unique(ind$State[ind$state.id == i])) +
theme(axis.text.x = element_text(size = 5),
axis.text.y = element_text(size = 5),
plot.title = element_text(size = 9),
axis.title.x = element_text(size = 7),
axis.title.y = element_text(size = 7),
panel.grid.major = element_blank())
}
ggsave("IMR.pdf", plot = grid.arrange(grobs = IMR, nrow = 6, ncol = 5), width = 17, height = 8.5)
#Plot IMR RATIO by state over time
ratioIMR <- list()
for(i in 1:30){
ratioIMR[[i]] <-
ggplot(subset(ind,ind$state.id == i), aes(Year, fm.imr)) + public + geom_line() +
scale_x_continuous(breaks = c(2005, 2012)) +
scale_y_continuous(breaks = c(0, round(min(ind[ind$state.id == i,]$fm.imr)), round(max(ind[ind$state.id == i,]$Person)), 2.7 ), limits = c(0,2.7) ) +
geom_hline(yintercept = 1, linetype = 2, alpha = 0.2) +
labs(x = 'Year', y = 'IMR Ratio', title = unique(ind$State[ind$state.id == i])) +
theme(axis.text.x = element_text(size = 5),
axis.text.y = element_text(size = 5),
plot.title = element_text(size = 9),
axis.title.x = element_text(size = 7),
axis.title.y = element_text(size = 7),
panel.grid.major = element_blank())
}
ggsave("IMRratio.pdf", plot = grid.arrange(grobs = ratioIMR, nrow = 6, ncol = 5), width = 17, height = 8.5)
#Plot IMR in 2012 and 2005 by State
ind2 <- ind
ind2$State <- factor(ind2$State)
ind2$State <- factor(ind2$State, levels = ind2$State[order(ind2$Person[ind2$Year == 2012])])
IMR0512 <- ggplot(ind2[ind2$Year == 2012| ind2$Year == 2005,], aes(Person, State, colour = factor(Year), fill = factor(Year))) +
ggtitle("Infant Mortality Rate per State, 2012 and 2005") +
public +
geom_dotplot(binaxis = "y", dotsize = 0.35) +
labs(x = 'IMR', y = "") +
jbcol + jbfill +
theme(legend.title = element_blank(), panel.grid.major.x = element_blank(),
axis.line = element_blank(),
axis.text = element_text(colour = "#2f4f4f", family = "Open Sans", size = 14),
legend.text = element_text(colour = "#2f4f4f", family = "Open Sans", size = 20))
ggsave("IMR0512.pdf", plot = IMR0512, width = 17, height = 8.5)
rm(ind2)
#Stan model normal for females
year_id <- ind$year.id
state_id <- ind$state.id
imr <- (ind$Female)
percap <- ind$percap
N <- length(imr)
fit1 <- stan("normal.stan",
data = list("year_id", "state_id", "N", "imr","percap"),
iter = 2000, chains = 4)
print(fit1, pars = c("mu_alpha_s", "tau_alpha_s",
"mu_alpha_t", "tau_alpha_t",
"beta", "alpha_t"))
#extract quantiles
ind$y.upper <- NULL
ind$y.lower <- NULL
for(i in 1:240){
ind$y.lower[i] <- quantile(extract(fit1)$y_pred[,i], 0.25)
ind$y.upper[i] <- quantile(extract(fit1)$y_pred[,i], 0.75)
}
#plot predicted values by state for females
#blue is predicted green is actual
ind$pred <- (colMeans(extract(fit1)$y_pred))
pred <- list()
for(i in 1:30){
pred[[i]] <- ggplot(subset(ind,ind$state.id == i),
aes(Year, pred)) +
public + #geom_line() +
geom_line(aes(Year, Female), colour = jbpal$green) +
geom_line(aes(Year,y.lower), linetype = 'dashed', alpha = 0.3) +
geom_line(aes(Year,y.upper), linetype = 'dashed', alpha = 0.3) +
scale_x_continuous(breaks = c(2005, 2012)) +
scale_y_continuous(breaks = c(0, 80), limits = c(0,80 ) ) +
geom_text(x = 2012, y = round(max(ind[ind$state.id == i,]$Female)), label = round(min(ind[ind$state.id == i,]$Female)), size = 2) +
geom_text(x = 2005, y = round(min(ind[ind$state.id == i,]$Female)), label = round(max(ind[ind$state.id == i,]$Female)), size = 2) +
labs(x = 'Year', y = 'IMR', title = unique(ind$State[ind$state.id == i])) +
theme(axis.text.x = element_text(size = 5),
axis.text.y = element_text(size = 5),
plot.title = element_text(size = 9),
axis.title.x = element_text(size = 7),
axis.title.y = element_text(size = 7),
panel.grid.major = element_blank())
}
ggsave("predfem.pdf", plot = grid.arrange(grobs = pred, nrow = 6, ncol = 5), width = 17, height = 8.5)
#Posterior Predictive Check
ggplot(ind, aes(pred, Female)) + public + geom_abline(slope = 1, intercept = 0) + geom_errorbarh(data = ind, aes(y = Female, x = pred, xmin = y.lower, xmax = y.upper)) + geom_point() + ggtitle("Posterior Predictive Check on Females") + xlim(c(0,80)) + ylim(c(0,80))
ggsave("postpredfem.pdf", width = 17, height = 8.5)
#Male pred model
year_id <- ind$year.id
state_id <- ind$state.id
imr <- (ind$Male)
percap <- ind$percap
N <- length(imr)
fitm <- stan("normal.stan",
data = list("year_id", "state_id", "N", "imr","percap"),
iter = 2000, chains = 4)
print(fitm, pars = c("mu_alpha_s", "tau_alpha_s",
"mu_alpha_t", "tau_alpha_t",
"beta", "alpha_t"))
#extract quantiles
ind$y.upperm <- NULL
ind$y.lowerm <- NULL
for(i in 1:240){
ind$y.lowerm[i] <- quantile(extract(fitm)$y_pred[,i], 0.25)
ind$y.upperm[i] <- quantile(extract(fitm)$y_pred[,i], 0.75)
}
#plot predicted values by state for males
#blue is predicted green is actual
ind$predm <- (colMeans(extract(fitm)$y_pred))
predm <- list()
for(i in 1:30){
predm[[i]] <- ggplot(subset(ind,ind$state.id == i),
aes(Year, predm)) +
public + #geom_line() +
geom_line(aes(Year, Male), colour = jbpal$green) +
geom_line(aes(Year,y.lowerm), linetype = 'dashed', alpha = 0.3) +
geom_line(aes(Year,y.upperm), linetype = 'dashed', alpha = 0.3) +
scale_x_continuous(breaks = c(2005, 2012)) +
scale_y_continuous(breaks = c(0, 80), limits = c(0,80 ) ) +
geom_text(x = 2012, y = round(max(ind[ind$state.id == i,]$Male)), label = round(min(ind[ind$state.id == i,]$Male)), size = 2) +
geom_text(x = 2005, y = round(min(ind[ind$state.id == i,]$Male)), label = round(max(ind[ind$state.id == i,]$Male)), size = 2) +
labs(x = 'Year', y = 'IMR', title = unique(ind$State[ind$state.id == i])) +
theme(axis.text.x = element_text(size = 5),
axis.text.y = element_text(size = 5),
plot.title = element_text(size = 9),
axis.title.x = element_text(size = 7),
axis.title.y = element_text(size = 7),
panel.grid.major = element_blank())
}
ggsave("predmal.pdf", plot = grid.arrange(grobs = predm, nrow = 6, ncol = 5), width = 17, height = 8.5)
#Posterior Predictive Check
ggplot(ind, aes(predm, Male)) + public + geom_abline(slope = 1, intercept = 0) + geom_errorbarh(data = ind, aes(y = Male, x = predm, xmin = y.lowerm, xmax = y.upperm)) + geom_point() + ggtitle("Posterior Predictive Check on Males") + xlim(c(0,80)) + ylim(c(0,80))
ggsave("postpredfem.pdf", width = 17, height = 8.5)
#create lagged per cap variable
ind <- ind %>% group_by(state.id) %>% mutate(lag.exp = c(NA, percap[-length(percap)]))
#Get rid of 2005
ind1 <- ind[ind$Year != 2005,]
#Run the same model on per cap exp lagged
year_id <- ind1$year.id
state_id <- ind1$state.id
imr <- log(ind1$Female)
percap <- log(ind1$lag.exp)
N <- length(imr)
fit2 <- stan("normal.stan",
data = list("year_id", "state_id", "N", "imr","percap"),
iter = 2000, chains = 4)
print(fit2, pars = c("mu_alpha_s", "tau_alpha_s",
"mu_alpha_t", "tau_alpha_t",
"beta", "alpha_t"))
ind1$y.upper2 <- NULL
ind1$y.lower2 <- NULL
for(i in 1:N){
ind1$y.lower2[i] <- exp(quantile(extract(fit2)$y_pred[,i], 0.25))
ind1$y.upper2[i] <- exp(quantile(extract(fit2)$y_pred[,i], 0.75))
}
#plot predicted values by state
#blue is predicted green is actual
ind1$pred2 <- exp(colMeans(extract(fit2)$y_pred))
pred2 <- list()
for(i in 1:30){
pred2[[i]] <- ggplot(subset(ind1,ind1$state.id == i),
aes(Year, pred2)) +
public + #geom_line() +
geom_line(aes(Year, Female), colour = jbpal$green) +
geom_line(aes(Year,y.lower2), linetype = 'dashed', alpha = 0.3) +
geom_line(aes(Year,y.upper2), linetype = 'dashed', alpha = 0.3) +
scale_x_continuous(breaks = c(2006, 2012)) +
scale_y_continuous(breaks = c(0, 90), limits = c(0,90) ) +
labs(x = 'Year', y = 'IMR', title = unique(ind1$State[ind1$state.id == i])) +
geom_text(x = 2012, y = round(max(ind1[ind1$state.id == i,]$Female)), label = round(min(ind1[ind1$state.id == i,]$Female)), size = 2) +
geom_text(x = 2006, y = round(min(ind1[ind1$state.id == i,]$Female)), label = round(max(ind1[ind1$state.id == i,]$Female)), size = 2) +
theme(axis.text.x = element_text(size = 5),
axis.text.y = element_text(size = 5),
plot.title = element_text(size = 9),
axis.title.x = element_text(size = 7),
axis.title.y = element_text(size = 7),
panel.grid.major = element_blank())
}
ggsave("lagpredfem.pdf", plot = grid.arrange(grobs = pred2, nrow = 6, ncol = 5), width = 17, height = 8.5)
#Posterior Predictive Check
ggplot(ind1, aes(pred2, Female)) + public + geom_abline(slope = 1, intercept = 0) + geom_errorbarh(data = ind1, aes(y = Female, x = pred2, xmin = y.lower2, xmax = y.upper2)) + geom_point() + ggtitle("Posterior Predictive Check on Females") + xlim(c(0,90)) + ylim(c(0,90)) + xlab("Pred")
ggsave("lagpostpredfem.pdf", width = 17, height = 8.5)
#Run the same model on per cap exp lagged for boys
year_id <- ind1$year.id
state_id <- ind1$state.id
imr <- log(ind1$Male)
percap <- log(ind1$lag.exp)
N <- length(imr)
fit3 <- stan("normal.stan",
data = list("year_id", "state_id", "N", "imr","percap"),
iter = 2000, chains = 4)
print(fit3, pars = c("mu_alpha_s", "tau_alpha_s",
"mu_alpha_t", "tau_alpha_t",
"beta", "alpha_t"))
ind1$y.upper3 <- NULL
ind1$y.lower3 <- NULL
for(i in 1:N){
ind1$y.lower3[i] <- exp(quantile(extract(fit3)$y_pred[,i], 0.25))
ind1$y.upper3[i] <- exp(quantile(extract(fit3)$y_pred[,i], 0.75))
}
#plot predicted values by state
#blue is predicted green is actual
ind1$pred3 <- exp(colMeans(extract(fit3)$y_pred))
pred3 <- list()
for(i in 1:30){
pred3[[i]] <- ggplot(subset(ind1,ind1$state.id == i),
aes(Year, pred3)) +
public + #geom_line() +
geom_line(aes(Year, Male), colour = jbpal$green) +
geom_line(aes(Year,y.lower3), linetype = 'dashed', alpha = 0.3) +
geom_line(aes(Year,y.upper3), linetype = 'dashed', alpha = 0.3) +
scale_x_continuous(breaks = c(2006, 2012)) +
scale_y_continuous(breaks = c(0, 85 ), limits = c(0,85 ) ) +
geom_text(x = 2012, y = round(max(ind1[ind1$state.id == i,]$Male)), label = round(min(ind1[ind1$state.id == i,]$Male)), size = 2) +
geom_text(x = 2006, y = round(min(ind1[ind1$state.id == i,]$Male)), label = round(max(ind1[ind1$state.id == i,]$Male)), size = 2) +
labs(x = 'Year', y = 'IMR', title = unique(ind1$State[ind1$state.id == i])) +
theme(axis.text.x = element_text(size = 5),
axis.text.y = element_text(size = 5),
plot.title = element_text(size = 9),
axis.title.x = element_text(size = 7),
axis.title.y = element_text(size = 7),
panel.grid.major = element_blank())
}
ggsave("lagpredmal.pdf", plot = grid.arrange(grobs = pred3, nrow = 6, ncol = 5), width = 17, height = 8.5)
#Posterior Predictive Check
ggplot(ind1, aes(pred3, Male)) + public + geom_abline(slope = 1, intercept = 0) + geom_errorbarh(data = ind1, aes(y = Male, x = pred3, xmin = y.lower3, xmax = y.upper3)) + geom_point() + ggtitle("Posterior Predictive Check on Males") + xlim(c(0,85)) + ylim(c(0,85)) + xlab("Pred")
ggsave("lagpostpredmal.pdf", width = 17, height = 8.5)
#Look at distribution of effects by state in lagged log model
#create beta data_frame
beta_male <- data.frame(extract(fit3)$beta)
beta_female <- data.frame(extract(fit2)$beta)
dense <- list()
for(i in 1:30){
dense[[i]] <- ggplot(beta_male,
aes_(x = beta_male[,i] )) +
public + geom_density() + xlim(-0.5,0.5) + ylim(0,9.5) +
geom_density(data = beta_female, aes_(x = beta_female[,i]), colour = jbpal$green) +
labs(x = 'IMR', title = unique(ind1$State[ind1$state.id == i])) +
geom_vline(xintercept = 0, alpha = 0.4, linetype = 2) +
theme(axis.text.x = element_text(size = 5),
axis.text.y = element_blank(),
plot.title = element_text(size = 9),
axis.title.x = element_text(size = 7),
axis.title.y = element_blank(),
axis.line.y = element_blank(),
panel.grid.major = element_blank())
}
ggsave("betas.pdf", plot = grid.arrange(grobs = dense, nrow = 6, ncol = 5), width = 17, height = 8.5)
#Just look at the Mus
#plotting alpha for persons
alpha.per <- colMeans(extract(fit1)$alpha_t[,1:8])
alpha.per.upper <- NULL
alpha.per.lower <- NULL
for(i in 1:8){
alpha.per.upper[i] <- quantile(extract(fit1)$alpha_t[,i], 0.25)
alpha.per.lower[i] <- quantile(extract(fit1)$alpha_t[,i], 0.75)
}
alphas <- data.frame(year = c(2005:2012), alpha.per, alpha.per.upper, alpha.per.lower)
ggplot(alphas, aes(year, alpha.per)) + ylim(c(0,35)) + public +
geom_errorbar(ymin = alpha.per.lower, ymax = alpha.per.upper, colour = jbpal$green, alpha = 1, width = 0.1) + scale_x_continuous(breaks = c(2005:2012)) + theme(panel.grid.major = element_blank()) + ggtitle("Time Intercepts") + labs(x = "Year", y = "Intercept") + geom_point(size = 3)
ggsave("alphas.pdf", width = 17, height = 8.5)
# par(mfrow = c(1, 1))
# plot(c(2005:2012), alpha.per,
# xlim = c(2005, 2012),
# ylim = c(0, 40),
# pch = 16,
# cex = 0.6,
# xlab = "Years",
# ylab = "Time Intercept")
# abline(0, 1)
# arrows(x0 = c(2005:2012),
# y0 = alpha.per,
# x1 = c(2005:2012),
# y1 = alpha.per.upper,
# length = 0,
# col = "darkgrey")
# arrows(x0 = c(2005:2012),
# y0 = alpha.per,
# x1 = c(2005:2012),
# y1 = alpha.per.lower,
# length = 0,
# col = "darkgrey")
# points(c(2005:2012), alpha.per,
# pch = 16,
# cex = 0.6)
#plotting alpha for male and female
# alpha.female <- exp(colMeans(extract(fit2)$alpha_t[,1:8]))
# alpha.male <- exp(colMeans(extract(fit3)$alpha_t[,1:8]))
# alpha.female.upper <- NULL
# alpha.female.lower <- NULL
# alpha.male.upper <- NULL
# alpha.male.lower <- NULL
# for(i in 1:8){
# alpha.female.upper[i] <- exp(quantile(extract(fit2)$alpha_t[,i], 0.25))
# alpha.female.lower[i] <- exp(quantile(extract(fit2)$alpha_t[,i], 0.75))
# alpha.male.upper[i] <- exp(quantile(extract(fit3)$alpha_t[,i], 0.25))
# alpha.male.lower[i] <- exp(quantile(extract(fit3)$alpha_t[,i], 0.75))
# }
# par(mfrow = c(1, 2))
# plot(c(2005:2012), alpha.male,
# xlim = c(2005, 2012),
# ylim = c(-20, 300),
# pch = 16,
# cex = 0.6,
# main = "Male Time Intercept",
# xlab = "Years",
# ylab = "Time Intercept")
# abline(0, 1)
# arrows(x0 = c(2005:2012),
# y0 = alpha.male,
# x1 = c(2005:2012),
# y1 = alpha.male.upper,
# length = 0,
# col = "darkgrey")
# arrows(x0 = c(2005:2012),
# y0 = alpha.male,
# x1 = c(2005:2012),
# y1 = alpha.male.lower,
# length = 0,
# col = "darkgrey")
# points(c(2005:2012), alpha.male,
# pch = 16,
# cex = 0.6)
#
# plot(c(2005:2012), alpha.female,
# xlim = c(2005, 2012),
# ylim = c(-20, 300),
# pch = 16,
# cex = 0.6,
# main = "Female Time Intercept",
# xlab = "Years",
# ylab = "Time Intercept")
# abline(0, 1)
# arrows(x0 = c(2005:2012),
# y0 = alpha.female,
# x1 = c(2005:2012),
# y1 = alpha.female.upper,
# length = 0,
# col = "darkgrey")
# arrows(x0 = c(2005:2012),
# y0 = alpha.female,
# x1 = c(2005:2012),
# y1 = alpha.female.lower,
# length = 0,
# col = "darkgrey")
# points(c(2005:2012), alpha.female,
# pch = 16,
# cex = 0.6)
|
/Data Plots and Models.R
|
no_license
|
julbautista/India-Project
|
R
| false | false | 22,707 |
r
|
source("https://raw.githubusercontent.com/julbautista/Startup/master/julian_startup.R")
setwd("C:/Users/Julian Bautista/Documents/Portfolio/India Project")
df <- read.csv("https://raw.githubusercontent.com/julbautista/India-Project/master/India_Data.csv", header = T, skip = 2)
#Drop Andaman and Nicobar & Dadra NH & Daman and Diu & Lakshadweep & Chandigarh and 2003
df <- df[df$Year != 2003,]
df <- df[df$State != "Andaman & Nicobar Islands"
& df$State != "Dadra & Nagar Haveli"
& df$State != "Daman & Diu"
& df$State != "Lakshadweep"
& df$State != "Chandigarh",]
df$State <- as.character(df$State)
df$State[df$State == "Odisha"] <- "Orissa"
df$State[df$State == "Chhatisgarh"] <- "Chhattisgarh"
#Read in population data
pop <- read.csv("https://raw.githubusercontent.com/julbautista/India-Project/master/population.csv", header = TRUE)
pop <- pop[pop$State != "Andaman and Nicobar Islands"
& pop$State != "Dadra and Nagar Haveli"
& pop$State != "Daman and Diu"
& pop$State != "Lakshadweep"
& pop$State != "Chandigarh",]
#Merge population to dataframe
ind <- merge(df, pop)
#Function for projecting population up or down
pop.func <- function(year, pop2011){
pop <- NULL
pop <- ifelse(year > 2011,
pop2011*(1.0184^(year - 2011)),
ifelse(year < 2011,
pop2011*((1/1.0184)^(2011 - year)),
pop2011))
return(pop)
}
#Add column of projected population numbers
ind$population <- pop.func(ind$Year, ind$X2011pop)
#Remove old population numbers
ind <- ind[,-8]
rm(df,pop)
#Put all currency in Ruppee
ind$exp.rup <- NULL
for(i in 1:length(ind[,1])){
ind$exp.rup[i] <- ifelse(ind$Units[i] == "Lakh",
ind$Health.Exp[i]*100000,
ind$Health.Exp[i]*1000000)
}
#Get spending per capita
ind$percap <- ind$exp.rup/ind$population
#Create ratio of female to male IMR
ind$fm.imr <- ind$Female/ind$Male
#Regress IMR diff on health exp
#plot(lm(fm.imr ~ percap, data = ind))
#Sort by year
ind <- ind[order(ind$Year),]
#Create state id
ind <- within(ind, state.id <- match(State, unique(State)))
#Create Year id
ind <- within(ind, year.id <- match(Year, unique(Year)))
#plot pooled values across time, naive look
pool <- ind %>% group_by(Year) %>% summarise(poolIMR = mean(Person), poolHexp = mean(percap))
ggplot(pool, aes(poolHexp, poolIMR, label = Year)) + public +
geom_smooth(alpha = 0.12, method = 'lm') +
geom_text_repel(segment.color = jbpal$green, point.padding = unit(1.5, "lines"), size = 5) +
geom_point(size = 2.5) + ggtitle("Pooled IMR and Health Exp") +
xlab("Health Expenditure") + ylab("Infant Mortality")
ggsave("naive.pdf", width = 17, height = 8.5)
rm(pool)
#Plot per cap health exp by state over time
#non uniform y limit
# healthexp <- list()
# for(i in 1:30){
# plotter <- ggplot(subset(ind,ind$state.id == i), aes(Year, percap)) + public + geom_line() +
# labs(x = 'Year', y = 'Health Exp', title = unique(ind$State[ind$state.id == i])) +
# scale_x_continuous(breaks = c(2005, 2012)) +
# scale_y_continuous(breaks = c(0, round(max(ind[ind$state.id == i,]$percap)))) +
# theme(axis.text.x = element_text(size = 5),
# axis.text.y = element_text(size = 9),
# plot.title = element_text(size = 13),
# axis.title.x = element_text(size = 7),
# axis.title.y = element_text(size = 7),
# panel.grid.major = element_blank())
# healthexp[[i]] <- plotter
# }
#
# ggsave("HexpTime.pdf", plot = grid.arrange(grobs = healthexp, nrow = 6, ncol = 5), width = 17, height = 8.5)
#uniform y limits healthexp
healthexp2 <- list()
for(i in 1:30){
healthexp2[[i]] <-
ggplot(subset(ind,ind$state.id == i), aes(Year, percap) ) + public + geom_line() +
geom_text(x = 2005 + 0.25, y = round(min(ind[ind$state.id == i & ind$Year == 2005,]$percap)) + 350, label = round(min(ind[ind$Year == 2005 & ind$state.id == i ,]$percap)), size = 2) +
geom_text(x = 2012 - 0.25, y = round(max(ind[ind$state.id == i & ind$Year == 2012,]$percap)) + 350, label = round(max(ind[ind$Year == 2012 & ind$state.id == i ,]$percap)), size = 2) +
scale_x_continuous(breaks = c(2005, 2012)) +
scale_y_continuous(breaks = c(0, 3000 ), limits = c(0,3100) ) +
labs(x = 'Year', y = 'Health Exp', title = unique(ind$State[ind$state.id == i])) +
theme(axis.text.x = element_text(size = 7),
axis.text.y = element_text(size = 7),
plot.title = element_text(size = 13),
axis.title.x = element_text(size = 9),
axis.title.y = element_text(size = 9),
panel.grid.major = element_blank())
}
ggsave("HexpTime.pdf", plot = grid.arrange(grobs = healthexp2, nrow = 6, ncol = 5), width = 17, height = 8.5)
#Plot female and male IMR by state over time
genderIMR <- list()
for(i in 1:30){
#j and gap help create labels for the largest IMR gender gaps per state
j <- which.max(abs(ind[ind$state.id == i,]$Female - ind[ind$state.id == i,]$Male))
gap <- ind[ind$state.id == i,][j,]
genderIMR[[i]] <- ggplot(subset(ind,ind$state.id == i), aes(Year, Female)) + public +
geom_line(colour = jbpal$red) +
geom_line(aes(Year, Male), colour = jbpal$green) +
scale_x_continuous(breaks = c(2005, 2012)) +
scale_y_continuous(breaks = c(0, round(max(ind[ind$state.id == i,]$Female))), limits = c(0, 85)) +
geom_text(x = gap$Year, y = gap$Female, label = gap$Female, size = 2, colour =jbpal$red) +
geom_text(x = gap$Year, y = gap$Male, label = gap$Male, size = 2, colour = jbpal$green) +
labs(x = 'Year', y = 'IMR', title = unique(ind$State[ind$state.id == i])) +
theme(axis.text.x = element_text(size = 5),
axis.text.y = element_text(size = 5),
plot.title = element_text(size = 9),
axis.title.x = element_text(size = 7),
axis.title.y = element_text(size = 7),
panel.grid.major = element_blank())
}
ggsave("genderIMR.pdf", plot = grid.arrange(grobs = genderIMR, nrow = 6, ncol = 5), width = 17, height = 8.5)
#Plot IMR by state over time
IMR <- list()
for(i in 1:30){
IMR[[i]] <-
ggplot(subset(ind,ind$state.id == i), aes(Year, Person)) + public + geom_line() +
scale_x_continuous(breaks = c(2005, 2012)) +
scale_y_continuous(breaks = c(0,80), limits = c(0,80) ) +
geom_text(x = 2012, y = round(max(ind[ind$state.id == i,]$Person)), label = round(min(ind[ind$state.id == i,]$Person)), size = 2) +
geom_text(x = 2005, y = round(min(ind[ind$state.id == i,]$Person)), label = round(max(ind[ind$state.id == i,]$Person)), size = 2) +
labs(x = 'Year', y = 'IMR', title = unique(ind$State[ind$state.id == i])) +
theme(axis.text.x = element_text(size = 5),
axis.text.y = element_text(size = 5),
plot.title = element_text(size = 9),
axis.title.x = element_text(size = 7),
axis.title.y = element_text(size = 7),
panel.grid.major = element_blank())
}
ggsave("IMR.pdf", plot = grid.arrange(grobs = IMR, nrow = 6, ncol = 5), width = 17, height = 8.5)
#Plot IMR RATIO by state over time
ratioIMR <- list()
for(i in 1:30){
ratioIMR[[i]] <-
ggplot(subset(ind,ind$state.id == i), aes(Year, fm.imr)) + public + geom_line() +
scale_x_continuous(breaks = c(2005, 2012)) +
scale_y_continuous(breaks = c(0, round(min(ind[ind$state.id == i,]$fm.imr)), round(max(ind[ind$state.id == i,]$Person)), 2.7 ), limits = c(0,2.7) ) +
geom_hline(yintercept = 1, linetype = 2, alpha = 0.2) +
labs(x = 'Year', y = 'IMR Ratio', title = unique(ind$State[ind$state.id == i])) +
theme(axis.text.x = element_text(size = 5),
axis.text.y = element_text(size = 5),
plot.title = element_text(size = 9),
axis.title.x = element_text(size = 7),
axis.title.y = element_text(size = 7),
panel.grid.major = element_blank())
}
ggsave("IMRratio.pdf", plot = grid.arrange(grobs = ratioIMR, nrow = 6, ncol = 5), width = 17, height = 8.5)
#Plot IMR in 2012 and 2005 by State
ind2 <- ind
ind2$State <- factor(ind2$State)
ind2$State <- factor(ind2$State, levels = ind2$State[order(ind2$Person[ind2$Year == 2012])])
IMR0512 <- ggplot(ind2[ind2$Year == 2012| ind2$Year == 2005,], aes(Person, State, colour = factor(Year), fill = factor(Year))) +
ggtitle("Infant Mortality Rate per State, 2012 and 2005") +
public +
geom_dotplot(binaxis = "y", dotsize = 0.35) +
labs(x = 'IMR', y = "") +
jbcol + jbfill +
theme(legend.title = element_blank(), panel.grid.major.x = element_blank(),
axis.line = element_blank(),
axis.text = element_text(colour = "#2f4f4f", family = "Open Sans", size = 14),
legend.text = element_text(colour = "#2f4f4f", family = "Open Sans", size = 20))
ggsave("IMR0512.pdf", plot = IMR0512, width = 17, height = 8.5)
rm(ind2)
#Stan model normal for females
year_id <- ind$year.id
state_id <- ind$state.id
imr <- (ind$Female)
percap <- ind$percap
N <- length(imr)
fit1 <- stan("normal.stan",
data = list("year_id", "state_id", "N", "imr","percap"),
iter = 2000, chains = 4)
print(fit1, pars = c("mu_alpha_s", "tau_alpha_s",
"mu_alpha_t", "tau_alpha_t",
"beta", "alpha_t"))
#extract quantiles
ind$y.upper <- NULL
ind$y.lower <- NULL
for(i in 1:240){
ind$y.lower[i] <- quantile(extract(fit1)$y_pred[,i], 0.25)
ind$y.upper[i] <- quantile(extract(fit1)$y_pred[,i], 0.75)
}
#plot predicted values by state for females
#blue is predicted green is actual
ind$pred <- (colMeans(extract(fit1)$y_pred))
pred <- list()
for(i in 1:30){
pred[[i]] <- ggplot(subset(ind,ind$state.id == i),
aes(Year, pred)) +
public + #geom_line() +
geom_line(aes(Year, Female), colour = jbpal$green) +
geom_line(aes(Year,y.lower), linetype = 'dashed', alpha = 0.3) +
geom_line(aes(Year,y.upper), linetype = 'dashed', alpha = 0.3) +
scale_x_continuous(breaks = c(2005, 2012)) +
scale_y_continuous(breaks = c(0, 80), limits = c(0,80 ) ) +
geom_text(x = 2012, y = round(max(ind[ind$state.id == i,]$Female)), label = round(min(ind[ind$state.id == i,]$Female)), size = 2) +
geom_text(x = 2005, y = round(min(ind[ind$state.id == i,]$Female)), label = round(max(ind[ind$state.id == i,]$Female)), size = 2) +
labs(x = 'Year', y = 'IMR', title = unique(ind$State[ind$state.id == i])) +
theme(axis.text.x = element_text(size = 5),
axis.text.y = element_text(size = 5),
plot.title = element_text(size = 9),
axis.title.x = element_text(size = 7),
axis.title.y = element_text(size = 7),
panel.grid.major = element_blank())
}
ggsave("predfem.pdf", plot = grid.arrange(grobs = pred, nrow = 6, ncol = 5), width = 17, height = 8.5)
#Posterior Predictive Check
ggplot(ind, aes(pred, Female)) + public + geom_abline(slope = 1, intercept = 0) + geom_errorbarh(data = ind, aes(y = Female, x = pred, xmin = y.lower, xmax = y.upper)) + geom_point() + ggtitle("Posterior Predictive Check on Females") + xlim(c(0,80)) + ylim(c(0,80))
ggsave("postpredfem.pdf", width = 17, height = 8.5)
#Male pred model
year_id <- ind$year.id
state_id <- ind$state.id
imr <- (ind$Male)
percap <- ind$percap
N <- length(imr)
fitm <- stan("normal.stan",
data = list("year_id", "state_id", "N", "imr","percap"),
iter = 2000, chains = 4)
print(fitm, pars = c("mu_alpha_s", "tau_alpha_s",
"mu_alpha_t", "tau_alpha_t",
"beta", "alpha_t"))
#extract quantiles
ind$y.upperm <- NULL
ind$y.lowerm <- NULL
for(i in 1:240){
ind$y.lowerm[i] <- quantile(extract(fitm)$y_pred[,i], 0.25)
ind$y.upperm[i] <- quantile(extract(fitm)$y_pred[,i], 0.75)
}
#plot predicted values by state for males
#blue is predicted green is actual
ind$predm <- (colMeans(extract(fitm)$y_pred))
predm <- list()
for(i in 1:30){
predm[[i]] <- ggplot(subset(ind,ind$state.id == i),
aes(Year, predm)) +
public + #geom_line() +
geom_line(aes(Year, Male), colour = jbpal$green) +
geom_line(aes(Year,y.lowerm), linetype = 'dashed', alpha = 0.3) +
geom_line(aes(Year,y.upperm), linetype = 'dashed', alpha = 0.3) +
scale_x_continuous(breaks = c(2005, 2012)) +
scale_y_continuous(breaks = c(0, 80), limits = c(0,80 ) ) +
geom_text(x = 2012, y = round(max(ind[ind$state.id == i,]$Male)), label = round(min(ind[ind$state.id == i,]$Male)), size = 2) +
geom_text(x = 2005, y = round(min(ind[ind$state.id == i,]$Male)), label = round(max(ind[ind$state.id == i,]$Male)), size = 2) +
labs(x = 'Year', y = 'IMR', title = unique(ind$State[ind$state.id == i])) +
theme(axis.text.x = element_text(size = 5),
axis.text.y = element_text(size = 5),
plot.title = element_text(size = 9),
axis.title.x = element_text(size = 7),
axis.title.y = element_text(size = 7),
panel.grid.major = element_blank())
}
ggsave("predmal.pdf", plot = grid.arrange(grobs = predm, nrow = 6, ncol = 5), width = 17, height = 8.5)
#Posterior Predictive Check
ggplot(ind, aes(predm, Male)) + public + geom_abline(slope = 1, intercept = 0) + geom_errorbarh(data = ind, aes(y = Male, x = predm, xmin = y.lowerm, xmax = y.upperm)) + geom_point() + ggtitle("Posterior Predictive Check on Males") + xlim(c(0,80)) + ylim(c(0,80))
ggsave("postpredfem.pdf", width = 17, height = 8.5)
#create lagged per cap variable
ind <- ind %>% group_by(state.id) %>% mutate(lag.exp = c(NA, percap[-length(percap)]))
#Get rid of 2005
ind1 <- ind[ind$Year != 2005,]
#Run the same model on per cap exp lagged
year_id <- ind1$year.id
state_id <- ind1$state.id
imr <- log(ind1$Female)
percap <- log(ind1$lag.exp)
N <- length(imr)
fit2 <- stan("normal.stan",
data = list("year_id", "state_id", "N", "imr","percap"),
iter = 2000, chains = 4)
print(fit2, pars = c("mu_alpha_s", "tau_alpha_s",
"mu_alpha_t", "tau_alpha_t",
"beta", "alpha_t"))
ind1$y.upper2 <- NULL
ind1$y.lower2 <- NULL
for(i in 1:N){
ind1$y.lower2[i] <- exp(quantile(extract(fit2)$y_pred[,i], 0.25))
ind1$y.upper2[i] <- exp(quantile(extract(fit2)$y_pred[,i], 0.75))
}
#plot predicted values by state
#blue is predicted green is actual
ind1$pred2 <- exp(colMeans(extract(fit2)$y_pred))
pred2 <- list()
for(i in 1:30){
pred2[[i]] <- ggplot(subset(ind1,ind1$state.id == i),
aes(Year, pred2)) +
public + #geom_line() +
geom_line(aes(Year, Female), colour = jbpal$green) +
geom_line(aes(Year,y.lower2), linetype = 'dashed', alpha = 0.3) +
geom_line(aes(Year,y.upper2), linetype = 'dashed', alpha = 0.3) +
scale_x_continuous(breaks = c(2006, 2012)) +
scale_y_continuous(breaks = c(0, 90), limits = c(0,90) ) +
labs(x = 'Year', y = 'IMR', title = unique(ind1$State[ind1$state.id == i])) +
geom_text(x = 2012, y = round(max(ind1[ind1$state.id == i,]$Female)), label = round(min(ind1[ind1$state.id == i,]$Female)), size = 2) +
geom_text(x = 2006, y = round(min(ind1[ind1$state.id == i,]$Female)), label = round(max(ind1[ind1$state.id == i,]$Female)), size = 2) +
theme(axis.text.x = element_text(size = 5),
axis.text.y = element_text(size = 5),
plot.title = element_text(size = 9),
axis.title.x = element_text(size = 7),
axis.title.y = element_text(size = 7),
panel.grid.major = element_blank())
}
ggsave("lagpredfem.pdf", plot = grid.arrange(grobs = pred2, nrow = 6, ncol = 5), width = 17, height = 8.5)
#Posterior Predictive Check
ggplot(ind1, aes(pred2, Female)) + public + geom_abline(slope = 1, intercept = 0) + geom_errorbarh(data = ind1, aes(y = Female, x = pred2, xmin = y.lower2, xmax = y.upper2)) + geom_point() + ggtitle("Posterior Predictive Check on Females") + xlim(c(0,90)) + ylim(c(0,90)) + xlab("Pred")
ggsave("lagpostpredfem.pdf", width = 17, height = 8.5)
#Run the same model on per cap exp lagged for boys
year_id <- ind1$year.id
state_id <- ind1$state.id
imr <- log(ind1$Male)
percap <- log(ind1$lag.exp)
N <- length(imr)
fit3 <- stan("normal.stan",
data = list("year_id", "state_id", "N", "imr","percap"),
iter = 2000, chains = 4)
print(fit3, pars = c("mu_alpha_s", "tau_alpha_s",
"mu_alpha_t", "tau_alpha_t",
"beta", "alpha_t"))
ind1$y.upper3 <- NULL
ind1$y.lower3 <- NULL
for(i in 1:N){
ind1$y.lower3[i] <- exp(quantile(extract(fit3)$y_pred[,i], 0.25))
ind1$y.upper3[i] <- exp(quantile(extract(fit3)$y_pred[,i], 0.75))
}
#plot predicted values by state
#blue is predicted green is actual
ind1$pred3 <- exp(colMeans(extract(fit3)$y_pred))
pred3 <- list()
for(i in 1:30){
pred3[[i]] <- ggplot(subset(ind1,ind1$state.id == i),
aes(Year, pred3)) +
public + #geom_line() +
geom_line(aes(Year, Male), colour = jbpal$green) +
geom_line(aes(Year,y.lower3), linetype = 'dashed', alpha = 0.3) +
geom_line(aes(Year,y.upper3), linetype = 'dashed', alpha = 0.3) +
scale_x_continuous(breaks = c(2006, 2012)) +
scale_y_continuous(breaks = c(0, 85 ), limits = c(0,85 ) ) +
geom_text(x = 2012, y = round(max(ind1[ind1$state.id == i,]$Male)), label = round(min(ind1[ind1$state.id == i,]$Male)), size = 2) +
geom_text(x = 2006, y = round(min(ind1[ind1$state.id == i,]$Male)), label = round(max(ind1[ind1$state.id == i,]$Male)), size = 2) +
labs(x = 'Year', y = 'IMR', title = unique(ind1$State[ind1$state.id == i])) +
theme(axis.text.x = element_text(size = 5),
axis.text.y = element_text(size = 5),
plot.title = element_text(size = 9),
axis.title.x = element_text(size = 7),
axis.title.y = element_text(size = 7),
panel.grid.major = element_blank())
}
ggsave("lagpredmal.pdf", plot = grid.arrange(grobs = pred3, nrow = 6, ncol = 5), width = 17, height = 8.5)
#Posterior Predictive Check
ggplot(ind1, aes(pred3, Male)) + public + geom_abline(slope = 1, intercept = 0) + geom_errorbarh(data = ind1, aes(y = Male, x = pred3, xmin = y.lower3, xmax = y.upper3)) + geom_point() + ggtitle("Posterior Predictive Check on Males") + xlim(c(0,85)) + ylim(c(0,85)) + xlab("Pred")
ggsave("lagpostpredmal.pdf", width = 17, height = 8.5)
#Look at distribution of effects by state in lagged log model
#create beta data_frame
beta_male <- data.frame(extract(fit3)$beta)
beta_female <- data.frame(extract(fit2)$beta)
dense <- list()
for(i in 1:30){
dense[[i]] <- ggplot(beta_male,
aes_(x = beta_male[,i] )) +
public + geom_density() + xlim(-0.5,0.5) + ylim(0,9.5) +
geom_density(data = beta_female, aes_(x = beta_female[,i]), colour = jbpal$green) +
labs(x = 'IMR', title = unique(ind1$State[ind1$state.id == i])) +
geom_vline(xintercept = 0, alpha = 0.4, linetype = 2) +
theme(axis.text.x = element_text(size = 5),
axis.text.y = element_blank(),
plot.title = element_text(size = 9),
axis.title.x = element_text(size = 7),
axis.title.y = element_blank(),
axis.line.y = element_blank(),
panel.grid.major = element_blank())
}
ggsave("betas.pdf", plot = grid.arrange(grobs = dense, nrow = 6, ncol = 5), width = 17, height = 8.5)
#Just look at the Mus
#plotting alpha for persons
alpha.per <- colMeans(extract(fit1)$alpha_t[,1:8])
alpha.per.upper <- NULL
alpha.per.lower <- NULL
for(i in 1:8){
alpha.per.upper[i] <- quantile(extract(fit1)$alpha_t[,i], 0.25)
alpha.per.lower[i] <- quantile(extract(fit1)$alpha_t[,i], 0.75)
}
alphas <- data.frame(year = c(2005:2012), alpha.per, alpha.per.upper, alpha.per.lower)
ggplot(alphas, aes(year, alpha.per)) + ylim(c(0,35)) + public +
geom_errorbar(ymin = alpha.per.lower, ymax = alpha.per.upper, colour = jbpal$green, alpha = 1, width = 0.1) + scale_x_continuous(breaks = c(2005:2012)) + theme(panel.grid.major = element_blank()) + ggtitle("Time Intercepts") + labs(x = "Year", y = "Intercept") + geom_point(size = 3)
ggsave("alphas.pdf", width = 17, height = 8.5)
# par(mfrow = c(1, 1))
# plot(c(2005:2012), alpha.per,
# xlim = c(2005, 2012),
# ylim = c(0, 40),
# pch = 16,
# cex = 0.6,
# xlab = "Years",
# ylab = "Time Intercept")
# abline(0, 1)
# arrows(x0 = c(2005:2012),
# y0 = alpha.per,
# x1 = c(2005:2012),
# y1 = alpha.per.upper,
# length = 0,
# col = "darkgrey")
# arrows(x0 = c(2005:2012),
# y0 = alpha.per,
# x1 = c(2005:2012),
# y1 = alpha.per.lower,
# length = 0,
# col = "darkgrey")
# points(c(2005:2012), alpha.per,
# pch = 16,
# cex = 0.6)
#plotting alpha for male and female
# alpha.female <- exp(colMeans(extract(fit2)$alpha_t[,1:8]))
# alpha.male <- exp(colMeans(extract(fit3)$alpha_t[,1:8]))
# alpha.female.upper <- NULL
# alpha.female.lower <- NULL
# alpha.male.upper <- NULL
# alpha.male.lower <- NULL
# for(i in 1:8){
# alpha.female.upper[i] <- exp(quantile(extract(fit2)$alpha_t[,i], 0.25))
# alpha.female.lower[i] <- exp(quantile(extract(fit2)$alpha_t[,i], 0.75))
# alpha.male.upper[i] <- exp(quantile(extract(fit3)$alpha_t[,i], 0.25))
# alpha.male.lower[i] <- exp(quantile(extract(fit3)$alpha_t[,i], 0.75))
# }
# par(mfrow = c(1, 2))
# plot(c(2005:2012), alpha.male,
# xlim = c(2005, 2012),
# ylim = c(-20, 300),
# pch = 16,
# cex = 0.6,
# main = "Male Time Intercept",
# xlab = "Years",
# ylab = "Time Intercept")
# abline(0, 1)
# arrows(x0 = c(2005:2012),
# y0 = alpha.male,
# x1 = c(2005:2012),
# y1 = alpha.male.upper,
# length = 0,
# col = "darkgrey")
# arrows(x0 = c(2005:2012),
# y0 = alpha.male,
# x1 = c(2005:2012),
# y1 = alpha.male.lower,
# length = 0,
# col = "darkgrey")
# points(c(2005:2012), alpha.male,
# pch = 16,
# cex = 0.6)
#
# plot(c(2005:2012), alpha.female,
# xlim = c(2005, 2012),
# ylim = c(-20, 300),
# pch = 16,
# cex = 0.6,
# main = "Female Time Intercept",
# xlab = "Years",
# ylab = "Time Intercept")
# abline(0, 1)
# arrows(x0 = c(2005:2012),
# y0 = alpha.female,
# x1 = c(2005:2012),
# y1 = alpha.female.upper,
# length = 0,
# col = "darkgrey")
# arrows(x0 = c(2005:2012),
# y0 = alpha.female,
# x1 = c(2005:2012),
# y1 = alpha.female.lower,
# length = 0,
# col = "darkgrey")
# points(c(2005:2012), alpha.female,
# pch = 16,
# cex = 0.6)
|
data <- read.csv2('household_power_consumption.txt',stringsAsFactors=FALSE)
dd=data[data$Date=='1/2/2007'|data$Date=='2/2/2007',]
t<-strptime(paste(dd$Date,dd$Time),'%d/%m/%Y %H:%M:%S')
png = png(
"plot4.png",
width = 480,
height = 480,
units = "px",
)
par(mfrow=c(2,2))
plot(as.POSIXct(t),dd$Global_active_power,type='l',xlab = '',ylab = 'Global Active Power (kilowatts)')
plot(as.POSIXct(t),dd$Voltage,type='l', xlab = 'datetime',ylab = 'Voltage')
plot(as.POSIXct(t),dd$Sub_metering_1,type='l',xlab = '',ylab = 'Energy sub metering')
lines(as.POSIXct(t),dd$Sub_metering_2,col="red")
lines(as.POSIXct(t),dd$Sub_metering_3,col="blue")
legend('topright',legend=c('Sub_metering_1','Sub_metering_2','Sub_metering_3'),col=c("black","red","blue"),bty ='n', lty=c(1,1,1))
plot(as.POSIXct(t),dd$Global_reactive_power, type ='l', xlab = 'datetime',ylab = 'Global Reactive Power')
dev.off()
|
/plot4.R
|
no_license
|
wenfeiwang/ExData_Plotting1
|
R
| false | false | 936 |
r
|
data <- read.csv2('household_power_consumption.txt',stringsAsFactors=FALSE)
dd=data[data$Date=='1/2/2007'|data$Date=='2/2/2007',]
t<-strptime(paste(dd$Date,dd$Time),'%d/%m/%Y %H:%M:%S')
png = png(
"plot4.png",
width = 480,
height = 480,
units = "px",
)
par(mfrow=c(2,2))
plot(as.POSIXct(t),dd$Global_active_power,type='l',xlab = '',ylab = 'Global Active Power (kilowatts)')
plot(as.POSIXct(t),dd$Voltage,type='l', xlab = 'datetime',ylab = 'Voltage')
plot(as.POSIXct(t),dd$Sub_metering_1,type='l',xlab = '',ylab = 'Energy sub metering')
lines(as.POSIXct(t),dd$Sub_metering_2,col="red")
lines(as.POSIXct(t),dd$Sub_metering_3,col="blue")
legend('topright',legend=c('Sub_metering_1','Sub_metering_2','Sub_metering_3'),col=c("black","red","blue"),bty ='n', lty=c(1,1,1))
plot(as.POSIXct(t),dd$Global_reactive_power, type ='l', xlab = 'datetime',ylab = 'Global Reactive Power')
dev.off()
|
temp=rep(0,10000)
n=5000
for (i in 1:10000)temp[i]=mean(rexp(n,rate=2))
hist(temp,main=paste("Simulation of 10000 Means with Rate=2, n=",n),nclass=40,freq=FALSE)
x=seq(min(temp),max(temp),length=10001)
lines(x,dnorm(x,mean=0.5,sd=0.5/sqrt(n)),col=2)
for (i in 1:10000)temp[i]=1/mean(rexp(n,rate=2))
hist(temp,main=paste("Simulation of 10000 Estimates with Rate=2, n=",n),nclass=40,freq=FALSE)
x=seq(min(temp),max(temp),length=10001)
lines(x,dnorm(x,mean=2,sd=2/sqrt(n)),col=2)
|
/STAT_630/R Scripts/exponentialestimates.R
|
no_license
|
mauliasavana/Statistics-Masters
|
R
| false | false | 482 |
r
|
temp=rep(0,10000)
n=5000
for (i in 1:10000)temp[i]=mean(rexp(n,rate=2))
hist(temp,main=paste("Simulation of 10000 Means with Rate=2, n=",n),nclass=40,freq=FALSE)
x=seq(min(temp),max(temp),length=10001)
lines(x,dnorm(x,mean=0.5,sd=0.5/sqrt(n)),col=2)
for (i in 1:10000)temp[i]=1/mean(rexp(n,rate=2))
hist(temp,main=paste("Simulation of 10000 Estimates with Rate=2, n=",n),nclass=40,freq=FALSE)
x=seq(min(temp),max(temp),length=10001)
lines(x,dnorm(x,mean=2,sd=2/sqrt(n)),col=2)
|
x<-c(1,2,3,4,5)
mean(x) #calculates the mean
var(x) #calculates the sample variance
var(x)*4/5 #to calculate population variance will be var(myVector) * (n - 1) / n where n is the length of the vector
sd(x) #standard deviation
|
/Deviations.R
|
no_license
|
Anubhav-chauhan/DAR-at-NRTI
|
R
| false | false | 227 |
r
|
x<-c(1,2,3,4,5)
mean(x) #calculates the mean
var(x) #calculates the sample variance
var(x)*4/5 #to calculate population variance will be var(myVector) * (n - 1) / n where n is the length of the vector
sd(x) #standard deviation
|
## Script para avaliar sobreposicao de nicho, diretamente no espaco ambiental
## junho/2018
##pacotes
library(raster)
library(ecospat)
## definindo variaveis e parametros - Notebook
projectFolder = "/home/anderson/Projetos/Invasao_Omobranchus_punctatus" #pasta do projeto
envVarFolder = "/home/anderson/Projetos/Invasao_Omobranchus_punctatus/variaveis_ambientais" #pasta com as variaveis ambientais
predictors = stack(list.files(path=envVarFolder, full.names=TRUE, pattern='.asc')) #predictors com todas as variaveis
predictors = predictors[[grep(pattern=paste(c('Chlorophyll','Phytoplankton','Silicate','*.Mean$','*.Max$','*.Min$'),collapse='|'), names(predictors), value=FALSE,invert=TRUE)]]
spData = read.csv(file.path(projectFolder,'spOcc.csv'),header=TRUE) #dados de ocorrencia ambiente nativo
names(spData) = c('lon','lat')
wrld = readOGR('/home/anderson/shapefiles/ne_50m_ocean/ne_50m_ocean.shp') #mapa mundi
## separando area nativa e area invadida
#plot(wrld) #criando imagem
#points(spData,col='red') #plotando os pontos de ocorencia da sp
#drawExtent() #delimitando area no mapa
natAreaExtent = extent(19.53604, 184.1086, -58.39874, 53.9756) #area distr. nativa
invAreaExtent = extent(-92.2889 , 18.83274, -88.59934, 24.47734) #area distr. invadida
## verificando area invadida e nativa
areaNat = crop(wrld,natAreaExtent) #recortando area nativa
areaInv = crop(wrld,invAreaExtent) #recortando area invadida
plot(wrld) #plotando mapa mundi
plot(areaNat,ad=TRUE,col='blue') #verificando
plot(areaInv,ad=TRUE,col='red') #verificando
## recortando pontos para area nativa
spNat = spData[which(spData$lon > natAreaExtent[1] &
spData$lon < natAreaExtent[2] &
spData$lat > natAreaExtent[3] &
spData$lat < natAreaExtent[4]),]
## recortando pontos para area invadida
spInv = spData[which(spData$lon > invAreaExtent[1] &
spData$lon < invAreaExtent[2] &
spData$lat > invAreaExtent[3] &
spData$lat < invAreaExtent[4]),]
##abrindo variaveis ambientais
predAreaNat = stack(list.files(path=paste(projectFolder,'/variaveis_ambientais',sep=''),pattern='predAreaNat',full.names=TRUE))
predAreaInv = stack(list.files(path=paste(projectFolder,'/variaveis_ambientais',sep=''),pattern='predAreaInv',full.names=TRUE))
#predAreaInv = predAreaInv[[gsub(pattern='predictors_',replacement='',x=names(predAreaNat))]]
## criando pontos de background
bgAreaNat = randomPoints(mask=predAreaNat[[1]], n=1000, p=spNat, prob=FALSE)
bgAreaNat = data.frame(bgAreaNat)
names(bgAreaNat) = names(spNat)
bgAreaInv = dismo::randomPoints(mask=predAreaInv[[1]], n=1000, p=spInv, prob=FALSE)
bgAreaInv = data.frame(bgAreaInv)
names(bgAreaInv) = names(spInv)
## definindo dados de pres a ausencia
spNatData = data.frame(rbind(spNat,bgAreaNat),occ=c(rep(1,nrow(spNat)),rep(0,nrow(bgAreaNat))))
spInvData = data.frame(rbind(spInv,bgAreaInv),occ=c(rep(1,nrow(spInv)),rep(0,nrow(bgAreaInv))))
## extarindo dados das variaveis ambientais
spNatDataEnv = extract(predAreaNat, spNatData[,c('lon','lat')],method='bilinear',na.romove=TRUE)
spInvDataEnv = extract(predAreaInv, spInvData[,c('lon','lat')],method='bilinear',na.romove=TRUE)
## juntando dados de ocorrencia e dados das variaveis ambientais
spNatData = data.frame(spNatData,spNatDataEnv)
spInvData = data.frame(spInvData,spInvDataEnv)
## garantindo dados completos
spNatData = spNatData[complete.cases(spNatData),]
spInvData = spInvData[complete.cases(spInvData),]
## deixando os nomes iguais (necessario para o metodo)
names(spNatData) = gsub(pattern='predAreaNat_', replacement='', x=names(spNatData))
names(spInvData) = gsub(pattern='predAreaInv_', replacement='', x=names(spInvData))
## The PCA is calibrated on all the sites of the study area
pca.env <- dudi.pca(rbind(spNatData,spInvData)[,grep(pattern='Present.Benthic.Mean.Depth.', x=names(spNatData), value=TRUE)],scannf=F,nf=2)
## ecospat.plot.contrib(contrib=pca.env$co, eigen=pca.env$eig) #grafico
## PCA scores for the whole study area
scores.globclim <- pca.env$li
## PCA scores for the species native distribution
scores.sp.nat <- suprow(pca.env,spNatData[which(spNatData[,'occ']==1),grep(pattern='Present.Benthic.Mean.Depth.', x=names(spNatData), value=TRUE)])$li
## PCA scores for the species invasive distribution
scores.sp.inv <- suprow(pca.env,spInvData[which(spInvData[,'occ']==1),grep(pattern='Present.Benthic.Mean.Depth.', x=names(spInvData), value=TRUE)])$li
## PCA scores for the whole native study area
scores.clim.nat <-suprow(pca.env,spNatData[,grep(pattern='Present.Benthic.Mean.Depth.', x=names(spNatData), value=TRUE)])$li
## PCA scores for the whole invaded study area
scores.clim.inv <- suprow(pca.env,spInvData[,grep(pattern='Present.Benthic.Mean.Depth.', x=names(spInvData), value=TRUE)])$li
## gridding the native niche
grid.clim.nat <-ecospat.grid.clim.dyn(glob=scores.globclim, glob1=scores.clim.nat, sp=scores.sp.nat, R=100, th.sp=0)
## gridding the invasive niche
grid.clim.inv <- ecospat.grid.clim.dyn(glob=scores.globclim, glob1=scores.clim.inv, sp=scores.sp.inv, R=100, th.sp=0)
## equivalencia de nicho
equi.test <- ecospat.niche.equivalency.test(grid.clim.nat, grid.clim.inv, rep=100, alternative="greater")
## similaridade de nicho
simi.test <- ecospat.niche.similarity.test(grid.clim.nat, grid.clim.inv, rep=100, alternative="greater")
## tabela de resultados
nicheOverlapTab = data.frame(teste=c('similaridade','equivalencia'),
Schoener=c(simi.test$obs$D,equi.test$obs$D),
Schoener_p_value=c(simi.test$p.D,equi.test$p.D),
Hellinger=c(simi.test$obs$I,equi.test$obs$I),
Hellinger_p_value=c(simi.test$p.I,equi.test$p.I))
write.csv(nicheOverlapTab, paste(projectFolder,'/nicheOverlapTab.csv',sep=''), row.names=FALSE)
|
/nicheOverlapAnalysis.R
|
no_license
|
AndersonEduardo/R-Scripts
|
R
| false | false | 5,912 |
r
|
## Script para avaliar sobreposicao de nicho, diretamente no espaco ambiental
## junho/2018
##pacotes
library(raster)
library(ecospat)
## definindo variaveis e parametros - Notebook
projectFolder = "/home/anderson/Projetos/Invasao_Omobranchus_punctatus" #pasta do projeto
envVarFolder = "/home/anderson/Projetos/Invasao_Omobranchus_punctatus/variaveis_ambientais" #pasta com as variaveis ambientais
predictors = stack(list.files(path=envVarFolder, full.names=TRUE, pattern='.asc')) #predictors com todas as variaveis
predictors = predictors[[grep(pattern=paste(c('Chlorophyll','Phytoplankton','Silicate','*.Mean$','*.Max$','*.Min$'),collapse='|'), names(predictors), value=FALSE,invert=TRUE)]]
spData = read.csv(file.path(projectFolder,'spOcc.csv'),header=TRUE) #dados de ocorrencia ambiente nativo
names(spData) = c('lon','lat')
wrld = readOGR('/home/anderson/shapefiles/ne_50m_ocean/ne_50m_ocean.shp') #mapa mundi
## separando area nativa e area invadida
#plot(wrld) #criando imagem
#points(spData,col='red') #plotando os pontos de ocorencia da sp
#drawExtent() #delimitando area no mapa
natAreaExtent = extent(19.53604, 184.1086, -58.39874, 53.9756) #area distr. nativa
invAreaExtent = extent(-92.2889 , 18.83274, -88.59934, 24.47734) #area distr. invadida
## verificando area invadida e nativa
areaNat = crop(wrld,natAreaExtent) #recortando area nativa
areaInv = crop(wrld,invAreaExtent) #recortando area invadida
plot(wrld) #plotando mapa mundi
plot(areaNat,ad=TRUE,col='blue') #verificando
plot(areaInv,ad=TRUE,col='red') #verificando
## recortando pontos para area nativa
spNat = spData[which(spData$lon > natAreaExtent[1] &
spData$lon < natAreaExtent[2] &
spData$lat > natAreaExtent[3] &
spData$lat < natAreaExtent[4]),]
## recortando pontos para area invadida
spInv = spData[which(spData$lon > invAreaExtent[1] &
spData$lon < invAreaExtent[2] &
spData$lat > invAreaExtent[3] &
spData$lat < invAreaExtent[4]),]
##abrindo variaveis ambientais
predAreaNat = stack(list.files(path=paste(projectFolder,'/variaveis_ambientais',sep=''),pattern='predAreaNat',full.names=TRUE))
predAreaInv = stack(list.files(path=paste(projectFolder,'/variaveis_ambientais',sep=''),pattern='predAreaInv',full.names=TRUE))
#predAreaInv = predAreaInv[[gsub(pattern='predictors_',replacement='',x=names(predAreaNat))]]
## criando pontos de background
bgAreaNat = randomPoints(mask=predAreaNat[[1]], n=1000, p=spNat, prob=FALSE)
bgAreaNat = data.frame(bgAreaNat)
names(bgAreaNat) = names(spNat)
bgAreaInv = dismo::randomPoints(mask=predAreaInv[[1]], n=1000, p=spInv, prob=FALSE)
bgAreaInv = data.frame(bgAreaInv)
names(bgAreaInv) = names(spInv)
## definindo dados de pres a ausencia
spNatData = data.frame(rbind(spNat,bgAreaNat),occ=c(rep(1,nrow(spNat)),rep(0,nrow(bgAreaNat))))
spInvData = data.frame(rbind(spInv,bgAreaInv),occ=c(rep(1,nrow(spInv)),rep(0,nrow(bgAreaInv))))
## extarindo dados das variaveis ambientais
spNatDataEnv = extract(predAreaNat, spNatData[,c('lon','lat')],method='bilinear',na.romove=TRUE)
spInvDataEnv = extract(predAreaInv, spInvData[,c('lon','lat')],method='bilinear',na.romove=TRUE)
## juntando dados de ocorrencia e dados das variaveis ambientais
spNatData = data.frame(spNatData,spNatDataEnv)
spInvData = data.frame(spInvData,spInvDataEnv)
## garantindo dados completos
spNatData = spNatData[complete.cases(spNatData),]
spInvData = spInvData[complete.cases(spInvData),]
## deixando os nomes iguais (necessario para o metodo)
names(spNatData) = gsub(pattern='predAreaNat_', replacement='', x=names(spNatData))
names(spInvData) = gsub(pattern='predAreaInv_', replacement='', x=names(spInvData))
## The PCA is calibrated on all the sites of the study area
pca.env <- dudi.pca(rbind(spNatData,spInvData)[,grep(pattern='Present.Benthic.Mean.Depth.', x=names(spNatData), value=TRUE)],scannf=F,nf=2)
## ecospat.plot.contrib(contrib=pca.env$co, eigen=pca.env$eig) #grafico
## PCA scores for the whole study area
scores.globclim <- pca.env$li
## PCA scores for the species native distribution
scores.sp.nat <- suprow(pca.env,spNatData[which(spNatData[,'occ']==1),grep(pattern='Present.Benthic.Mean.Depth.', x=names(spNatData), value=TRUE)])$li
## PCA scores for the species invasive distribution
scores.sp.inv <- suprow(pca.env,spInvData[which(spInvData[,'occ']==1),grep(pattern='Present.Benthic.Mean.Depth.', x=names(spInvData), value=TRUE)])$li
## PCA scores for the whole native study area
scores.clim.nat <-suprow(pca.env,spNatData[,grep(pattern='Present.Benthic.Mean.Depth.', x=names(spNatData), value=TRUE)])$li
## PCA scores for the whole invaded study area
scores.clim.inv <- suprow(pca.env,spInvData[,grep(pattern='Present.Benthic.Mean.Depth.', x=names(spInvData), value=TRUE)])$li
## gridding the native niche
grid.clim.nat <-ecospat.grid.clim.dyn(glob=scores.globclim, glob1=scores.clim.nat, sp=scores.sp.nat, R=100, th.sp=0)
## gridding the invasive niche
grid.clim.inv <- ecospat.grid.clim.dyn(glob=scores.globclim, glob1=scores.clim.inv, sp=scores.sp.inv, R=100, th.sp=0)
## equivalencia de nicho
equi.test <- ecospat.niche.equivalency.test(grid.clim.nat, grid.clim.inv, rep=100, alternative="greater")
## similaridade de nicho
simi.test <- ecospat.niche.similarity.test(grid.clim.nat, grid.clim.inv, rep=100, alternative="greater")
## tabela de resultados
nicheOverlapTab = data.frame(teste=c('similaridade','equivalencia'),
Schoener=c(simi.test$obs$D,equi.test$obs$D),
Schoener_p_value=c(simi.test$p.D,equi.test$p.D),
Hellinger=c(simi.test$obs$I,equi.test$obs$I),
Hellinger_p_value=c(simi.test$p.I,equi.test$p.I))
write.csv(nicheOverlapTab, paste(projectFolder,'/nicheOverlapTab.csv',sep=''), row.names=FALSE)
|
#!/usr/bin/Rscript
library(ggplot2)
library(grid)
source("/srv/nfs4/medoid-home/NKI/a.schlicker/cagepR/utils.r")
source("/srv/nfs4/medoid-home/NKI/a.schlicker/cagepR/compare.r")
source("/srv/nfs4/medoid-home/NKI/a.schlicker/cagepR/plotting.r")
OPTIONS = getOptionList(c("config", "prefix", "result1", "result2"))
# Is there a configuration file?
args = commandArgs(TRUE)
if (length(args) == 0) {
args = c("--help")
}
config = parseOptions(OPTIONS, args)
if (!is.null(config$config)) {
config = parseConfigFile(config$config)
} else {
config = c()
}
# Parse options from configuration file and command line arguments
configOptions = parseOptions(OPTIONS, c(config, commandArgs(TRUE)))
# Load prioritization results 1
load(configOptions$result1)
results1 = results
# Load prioritization results 2
load(configOptions$result2)
results2 = results
for (n in names(results1)) {
commonGenes = intersect(rownames(results1[[n]]$prioritize.combined), rownames(results2[[n]]$prioritize.combined))
results1[[n]]$prioritize.combined = results1[[n]]$prioritize.combined[commonGenes, ]
results2[[n]]$prioritize.combined = results2[[n]]$prioritize.combined[commonGenes, ]
}
results.diff = diffMat(results1, results2)
result.df = heatmapDataframe(results.diff)
parameters = list()
## TS heatmaps
gene.order = getOrder(result.df, "TS", "gene")
diffGenes = names(alteredGenes(result.df, "TS"))
diffGenes = c(diffGenes[1:50], diffGenes[(length(diffGenes)-50+1):length(diffGenes)])
cancer.order = getOrder(result.df, "TS", "cancer")
parameters[["ts3"]] = list(data=result.df, prefix=configOptions$prefix, filename="ts_heatmap.png", gene.order=gene.order,
topgenes=diffGenes, cancer.order=cancer.order,
yaxis=theme(axis.text.y=element_text(color="grey50", face="bold")), score.type="TS",
color.low="#034b87", color.mid="gray98", color.high="#880000",
ylab="", title="Difference in tumor suppressor score")
parameters[["ts4"]] = list(data=result.df, prefix=configOptions$prefix, filename="ts_affected_heatmap.png", gene.order=gene.order,
topgenes=diffGenes, cancer.order=cancer.order,
yaxis=theme(axis.text.y=element_text(color="grey50", face="bold")), score.type="TS.affected",
color.low="#034b87", color.mid="gray98", color.high="#880000",
ylab="", title="Difference in % affected samples TS")
## OG heatmaps
gene.order = getOrder(result.df, "OG", "gene")
diffGenes = names(alteredGenes(result.df, "OG"))
diffGenes = c(diffGenes[1:50], diffGenes[(length(diffGenes)-50+1):length(diffGenes)])
cancer.order=getOrder(result.df, "OG", "cancer")
parameters[["og3"]] = list(data=result.df, prefix=configOptions$prefix, filename="og_heatmap_topgenes.png", gene.order=gene.order,
topgenes=diffGenes, cancer.order=cancer.order,
yaxis=theme(axis.text.y=element_text(color="grey50", face="bold")), score.type="OG",
color.low="#034b87", color.mid="gray98", color.high="#880000",
ylab="", title="Difference in oncogene score")
parameters[["og4"]] = list(data=result.df, prefix=configOptions$prefix, filename="og_affected_heatmap.png", gene.order=gene.order,
topgenes=diffGenes, cancer.order=cancer.order,
yaxis=theme(axis.text.y=element_text(color="grey50", face="bold")), score.type="OG.affected",
color.low="#034b87", color.mid="gray98", color.high="#880000",
ylab="", title="Difference in % affected samples OG")
## Combined score heatmaps
gene.order = getOrder(result.df, "Combined", "gene")
diffGenes = names(alteredGenes(result.df, "Combined"))
diffGenes = c(diffGenes[1:50], diffGenes[(length(diffGenes)-50+1):length(diffGenes)])
cancer.order=getOrder(result.df, "Combined", "cancer")
parameters[["cs3"]] = list(data=result.df, prefix=configOptions$prefix, filename="combined_heatmap.png", gene.order=gene.order,
topgenes=diffGenes, cancer.order=cancer.order,
yaxis=theme(axis.text.y=element_text(color="grey50", face="bold")), score.type="Combined",
color.low="#034b87", color.mid="gray98", color.high="#880000",
ylab="", title="Difference in combined score")
parameters[["cs4"]] = list(data=result.df, prefix=configOptions$prefix, filename="combined_affected_heatmap.png", gene.order=gene.order,
topgenes=diffGenes, cancer.order=cancer.order,
yaxis=theme(axis.text.y=element_text(color="grey50", face="bold")), score.type="Combined.affected",
color.low="#034b87", color.mid="gray98", color.high="#880000",
ylab="", title="Difference in % affected samples Combined score")
# And plot the heatmaps
invisible(lapply(parameters, plotHeatmap))
## Score distributions
result.df[, "gene"] = factor(result.df[, "gene"], levels=gene.order)
result.df[, "cancer"] = factor(result.df[, "cancer"], levels=cancer.order)
png(paste(configOptions$prefix, "score_histogram_by_cancer.png", sep="_"), width=4000, height=3000, res=300)
print(getDistPlot(subset(result.df, score.type %in% c("OG", "TS", "Combined")), facets="~ cancer",
plot.type="histogram", ncol=3,
title="Score distribution by cancer", x="Score", y="Number of genes"))
invisible(dev.off())
png(paste(configOptions$prefix, "score_density_by_cancer.png", sep="_"), width=4000, height=3000, res=300)
print(getDistPlot(subset(result.df, score.type %in% c("OG", "TS", "Combined")), facets="~ cancer",
plot.type="density", ncol=3,
title="Score distribution by cancer", x="Score", y="Density"))
invisible(dev.off())
scores = c("Copy number", "Methylation", "Mutation", "Achilles", "Expression")
names(scores) = c("cna", "methylation", "mutations", "achilles", "exprs")
score.colors = c("CNA"="#999999", "Expr"="#E69F00", "Meth"="#56B4E9", "Mut"="#009E73", "shRNA"="#F0E442", "others"="#CC79A7")
score.colors2 = score.colors
names(score.colors2) = c("Copy number", "Expression", "Methylation", "Mutation", "Achilles", "others")
cancer.score.df = data.frame()
for (cancType in names(results.diff)) {
for (score in names(scores)) {
for (st in c("og", "ts")) {
cancer.score.df = rbind(cancer.score.df,
data.frame(Cancer=cancType,
Gene=rownames(results.diff[[cancType]]$prioritize.combined),
Category=scores[score],
Score.category=toupper(st),
Score=results.diff[[cancType]]$prioritize.combined[, paste(st, score, sep=".")]))
}
}
}
png(paste(configOptions$prefix, "category_distribution.png", sep="_"), width=3000, height=3000, res=300)
ggplot(cancer.score.df, aes(x=Score, fill=Category)) +
geom_histogram(binwidth=0.4, position="dodge") +
facet_grid(Cancer ~ Score.category) +
scale_x_continuous(breaks=-1:1) +
scale_fill_manual(values=score.colors2) +
labs(title="Score distribution by gene", y="Number of genes", x="Score") +
theme(axis.ticks=element_blank(),
axis.text.x=element_text(colour="grey50", face="bold"),
axis.text.y=element_text(colour="grey50", face="bold"))
invisible(dev.off())
# Plot confusion heatmaps and barplots for single scores
for (m in c("combined.score", "og.score", "og.cna", "og.methylation", "og.mutations", "og.exprs",
"ts.score", "ts.cna", "ts.methylation", "ts.mutations", "ts.exprs")) {
notEqual = sapply(names(results1), function(n) { any(results1[[n]]$prioritize.combined[, m] -
results2[[n]]$prioritize.combined[, m] != 0) })
if (any(notEqual, na.rm=TRUE)) {
plots = compareScore(results1, results2, list(OG=m))
if (!(m %in% c("combined.score", "og.score", "ts.score"))) {
png(paste(configOptions$prefix, "_comparison_confusion_", m, ".png", sep=""), width=4000, height=4000, res=300)
print(plots[[1]])
dev.off()
}
png(paste(configOptions$prefix, "_comparison_confusion_", m, "_sum.png", sep=""), width=4000, height=4000, res=300)
print(plots[[3]])
dev.off()
png(paste(configOptions$prefix, "_comparison_barplot_", m, ".png", sep=""), width=4000, height=4000, res=300)
print(plots[[2]])
dev.off()
png(paste(configOptions$prefix, "_comparison_barplot_", m, "_sum.png", sep=""), width=4000, height=4000, res=300)
print(plots[[4]])
dev.off()
}
}
|
/TCGA_PAN/SCRIPTS/plot_prioritize_diff.r
|
permissive
|
thulsen/OncoScape
|
R
| false | false | 8,299 |
r
|
#!/usr/bin/Rscript
library(ggplot2)
library(grid)
source("/srv/nfs4/medoid-home/NKI/a.schlicker/cagepR/utils.r")
source("/srv/nfs4/medoid-home/NKI/a.schlicker/cagepR/compare.r")
source("/srv/nfs4/medoid-home/NKI/a.schlicker/cagepR/plotting.r")
OPTIONS = getOptionList(c("config", "prefix", "result1", "result2"))
# Is there a configuration file?
args = commandArgs(TRUE)
if (length(args) == 0) {
args = c("--help")
}
config = parseOptions(OPTIONS, args)
if (!is.null(config$config)) {
config = parseConfigFile(config$config)
} else {
config = c()
}
# Parse options from configuration file and command line arguments
configOptions = parseOptions(OPTIONS, c(config, commandArgs(TRUE)))
# Load prioritization results 1
load(configOptions$result1)
results1 = results
# Load prioritization results 2
load(configOptions$result2)
results2 = results
for (n in names(results1)) {
commonGenes = intersect(rownames(results1[[n]]$prioritize.combined), rownames(results2[[n]]$prioritize.combined))
results1[[n]]$prioritize.combined = results1[[n]]$prioritize.combined[commonGenes, ]
results2[[n]]$prioritize.combined = results2[[n]]$prioritize.combined[commonGenes, ]
}
results.diff = diffMat(results1, results2)
result.df = heatmapDataframe(results.diff)
parameters = list()
## TS heatmaps
gene.order = getOrder(result.df, "TS", "gene")
diffGenes = names(alteredGenes(result.df, "TS"))
diffGenes = c(diffGenes[1:50], diffGenes[(length(diffGenes)-50+1):length(diffGenes)])
cancer.order = getOrder(result.df, "TS", "cancer")
parameters[["ts3"]] = list(data=result.df, prefix=configOptions$prefix, filename="ts_heatmap.png", gene.order=gene.order,
topgenes=diffGenes, cancer.order=cancer.order,
yaxis=theme(axis.text.y=element_text(color="grey50", face="bold")), score.type="TS",
color.low="#034b87", color.mid="gray98", color.high="#880000",
ylab="", title="Difference in tumor suppressor score")
parameters[["ts4"]] = list(data=result.df, prefix=configOptions$prefix, filename="ts_affected_heatmap.png", gene.order=gene.order,
topgenes=diffGenes, cancer.order=cancer.order,
yaxis=theme(axis.text.y=element_text(color="grey50", face="bold")), score.type="TS.affected",
color.low="#034b87", color.mid="gray98", color.high="#880000",
ylab="", title="Difference in % affected samples TS")
## OG heatmaps
gene.order = getOrder(result.df, "OG", "gene")
diffGenes = names(alteredGenes(result.df, "OG"))
diffGenes = c(diffGenes[1:50], diffGenes[(length(diffGenes)-50+1):length(diffGenes)])
cancer.order=getOrder(result.df, "OG", "cancer")
parameters[["og3"]] = list(data=result.df, prefix=configOptions$prefix, filename="og_heatmap_topgenes.png", gene.order=gene.order,
topgenes=diffGenes, cancer.order=cancer.order,
yaxis=theme(axis.text.y=element_text(color="grey50", face="bold")), score.type="OG",
color.low="#034b87", color.mid="gray98", color.high="#880000",
ylab="", title="Difference in oncogene score")
parameters[["og4"]] = list(data=result.df, prefix=configOptions$prefix, filename="og_affected_heatmap.png", gene.order=gene.order,
topgenes=diffGenes, cancer.order=cancer.order,
yaxis=theme(axis.text.y=element_text(color="grey50", face="bold")), score.type="OG.affected",
color.low="#034b87", color.mid="gray98", color.high="#880000",
ylab="", title="Difference in % affected samples OG")
## Combined score heatmaps
gene.order = getOrder(result.df, "Combined", "gene")
diffGenes = names(alteredGenes(result.df, "Combined"))
diffGenes = c(diffGenes[1:50], diffGenes[(length(diffGenes)-50+1):length(diffGenes)])
cancer.order=getOrder(result.df, "Combined", "cancer")
parameters[["cs3"]] = list(data=result.df, prefix=configOptions$prefix, filename="combined_heatmap.png", gene.order=gene.order,
topgenes=diffGenes, cancer.order=cancer.order,
yaxis=theme(axis.text.y=element_text(color="grey50", face="bold")), score.type="Combined",
color.low="#034b87", color.mid="gray98", color.high="#880000",
ylab="", title="Difference in combined score")
parameters[["cs4"]] = list(data=result.df, prefix=configOptions$prefix, filename="combined_affected_heatmap.png", gene.order=gene.order,
topgenes=diffGenes, cancer.order=cancer.order,
yaxis=theme(axis.text.y=element_text(color="grey50", face="bold")), score.type="Combined.affected",
color.low="#034b87", color.mid="gray98", color.high="#880000",
ylab="", title="Difference in % affected samples Combined score")
# And plot the heatmaps
invisible(lapply(parameters, plotHeatmap))
## Score distributions
result.df[, "gene"] = factor(result.df[, "gene"], levels=gene.order)
result.df[, "cancer"] = factor(result.df[, "cancer"], levels=cancer.order)
png(paste(configOptions$prefix, "score_histogram_by_cancer.png", sep="_"), width=4000, height=3000, res=300)
print(getDistPlot(subset(result.df, score.type %in% c("OG", "TS", "Combined")), facets="~ cancer",
plot.type="histogram", ncol=3,
title="Score distribution by cancer", x="Score", y="Number of genes"))
invisible(dev.off())
png(paste(configOptions$prefix, "score_density_by_cancer.png", sep="_"), width=4000, height=3000, res=300)
print(getDistPlot(subset(result.df, score.type %in% c("OG", "TS", "Combined")), facets="~ cancer",
plot.type="density", ncol=3,
title="Score distribution by cancer", x="Score", y="Density"))
invisible(dev.off())
scores = c("Copy number", "Methylation", "Mutation", "Achilles", "Expression")
names(scores) = c("cna", "methylation", "mutations", "achilles", "exprs")
score.colors = c("CNA"="#999999", "Expr"="#E69F00", "Meth"="#56B4E9", "Mut"="#009E73", "shRNA"="#F0E442", "others"="#CC79A7")
score.colors2 = score.colors
names(score.colors2) = c("Copy number", "Expression", "Methylation", "Mutation", "Achilles", "others")
cancer.score.df = data.frame()
for (cancType in names(results.diff)) {
for (score in names(scores)) {
for (st in c("og", "ts")) {
cancer.score.df = rbind(cancer.score.df,
data.frame(Cancer=cancType,
Gene=rownames(results.diff[[cancType]]$prioritize.combined),
Category=scores[score],
Score.category=toupper(st),
Score=results.diff[[cancType]]$prioritize.combined[, paste(st, score, sep=".")]))
}
}
}
png(paste(configOptions$prefix, "category_distribution.png", sep="_"), width=3000, height=3000, res=300)
ggplot(cancer.score.df, aes(x=Score, fill=Category)) +
geom_histogram(binwidth=0.4, position="dodge") +
facet_grid(Cancer ~ Score.category) +
scale_x_continuous(breaks=-1:1) +
scale_fill_manual(values=score.colors2) +
labs(title="Score distribution by gene", y="Number of genes", x="Score") +
theme(axis.ticks=element_blank(),
axis.text.x=element_text(colour="grey50", face="bold"),
axis.text.y=element_text(colour="grey50", face="bold"))
invisible(dev.off())
# Plot confusion heatmaps and barplots for single scores
for (m in c("combined.score", "og.score", "og.cna", "og.methylation", "og.mutations", "og.exprs",
"ts.score", "ts.cna", "ts.methylation", "ts.mutations", "ts.exprs")) {
notEqual = sapply(names(results1), function(n) { any(results1[[n]]$prioritize.combined[, m] -
results2[[n]]$prioritize.combined[, m] != 0) })
if (any(notEqual, na.rm=TRUE)) {
plots = compareScore(results1, results2, list(OG=m))
if (!(m %in% c("combined.score", "og.score", "ts.score"))) {
png(paste(configOptions$prefix, "_comparison_confusion_", m, ".png", sep=""), width=4000, height=4000, res=300)
print(plots[[1]])
dev.off()
}
png(paste(configOptions$prefix, "_comparison_confusion_", m, "_sum.png", sep=""), width=4000, height=4000, res=300)
print(plots[[3]])
dev.off()
png(paste(configOptions$prefix, "_comparison_barplot_", m, ".png", sep=""), width=4000, height=4000, res=300)
print(plots[[2]])
dev.off()
png(paste(configOptions$prefix, "_comparison_barplot_", m, "_sum.png", sep=""), width=4000, height=4000, res=300)
print(plots[[4]])
dev.off()
}
}
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read.xport('C:/MEPS/.FYC..ssp');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Age groups
# To compute for all age groups, replace 'agegrps' in the 'svyby' function with 'agegrps_v2X' or 'agegrps_v3X'
FYC <- FYC %>%
mutate(agegrps = cut(AGELAST,
breaks = c(-1, 4.5, 17.5, 44.5, 64.5, Inf),
labels = c("Under 5","5-17","18-44","45-64","65+"))) %>%
mutate(agegrps_v2X = cut(AGELAST,
breaks = c(-1, 17.5 ,64.5, Inf),
labels = c("Under 18","18-64","65+"))) %>%
mutate(agegrps_v3X = cut(AGELAST,
breaks = c(-1, 4.5, 6.5, 12.5, 17.5, 18.5, 24.5, 29.5, 34.5, 44.5, 54.5, 64.5, Inf),
labels = c("Under 5", "5-6", "7-12", "13-17", "18", "19-24", "25-29",
"30-34", "35-44", "45-54", "55-64", "65+")))
# Race / ethnicity
# Starting in 2012, RACETHX replaced RACEX;
if(year >= 2012){
FYC <- FYC %>%
mutate(white_oth=F,
hisp = (RACETHX == 1),
white = (RACETHX == 2),
black = (RACETHX == 3),
native = (RACETHX > 3 & RACEV1X %in% c(3,6)),
asian = (RACETHX > 3 & RACEV1X %in% c(4,5)))
}else if(year >= 2002){
FYC <- FYC %>%
mutate(white_oth=0,
hisp = (RACETHNX == 1),
white = (RACETHNX == 4 & RACEX == 1),
black = (RACETHNX == 2),
native = (RACETHNX >= 3 & RACEX %in% c(3,6)),
asian = (RACETHNX >= 3 & RACEX %in% c(4,5)))
}else{
FYC <- FYC %>%
mutate(
hisp = (RACETHNX == 1),
black = (RACETHNX == 2),
white_oth = (RACETHNX == 3),
white = 0,native=0,asian=0)
}
FYC <- FYC %>% mutate(
race = 1*hisp + 2*white + 3*black + 4*native + 5*asian + 9*white_oth,
race = recode_factor(race, .default = "Missing", .missing = "Missing",
"1" = "Hispanic",
"2" = "White",
"3" = "Black",
"4" = "Amer. Indian, AK Native, or mult. races",
"5" = "Asian, Hawaiian, or Pacific Islander",
"9" = "White and other"))
FYCdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = FYC,
nest = TRUE)
results <- svyby(~TOTEXP.yy., FUN = svymean, by = ~race + agegrps, design = subset(FYCdsgn, TOTEXP.yy. > 0))
print(results)
|
/mepstrends/hc_use/json/code/r/meanEXP__race__agegrps__.r
|
permissive
|
RandomCriticalAnalysis/MEPS-summary-tables
|
R
| false | false | 2,911 |
r
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read.xport('C:/MEPS/.FYC..ssp');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Age groups
# To compute for all age groups, replace 'agegrps' in the 'svyby' function with 'agegrps_v2X' or 'agegrps_v3X'
FYC <- FYC %>%
mutate(agegrps = cut(AGELAST,
breaks = c(-1, 4.5, 17.5, 44.5, 64.5, Inf),
labels = c("Under 5","5-17","18-44","45-64","65+"))) %>%
mutate(agegrps_v2X = cut(AGELAST,
breaks = c(-1, 17.5 ,64.5, Inf),
labels = c("Under 18","18-64","65+"))) %>%
mutate(agegrps_v3X = cut(AGELAST,
breaks = c(-1, 4.5, 6.5, 12.5, 17.5, 18.5, 24.5, 29.5, 34.5, 44.5, 54.5, 64.5, Inf),
labels = c("Under 5", "5-6", "7-12", "13-17", "18", "19-24", "25-29",
"30-34", "35-44", "45-54", "55-64", "65+")))
# Race / ethnicity
# Starting in 2012, RACETHX replaced RACEX;
if(year >= 2012){
FYC <- FYC %>%
mutate(white_oth=F,
hisp = (RACETHX == 1),
white = (RACETHX == 2),
black = (RACETHX == 3),
native = (RACETHX > 3 & RACEV1X %in% c(3,6)),
asian = (RACETHX > 3 & RACEV1X %in% c(4,5)))
}else if(year >= 2002){
FYC <- FYC %>%
mutate(white_oth=0,
hisp = (RACETHNX == 1),
white = (RACETHNX == 4 & RACEX == 1),
black = (RACETHNX == 2),
native = (RACETHNX >= 3 & RACEX %in% c(3,6)),
asian = (RACETHNX >= 3 & RACEX %in% c(4,5)))
}else{
FYC <- FYC %>%
mutate(
hisp = (RACETHNX == 1),
black = (RACETHNX == 2),
white_oth = (RACETHNX == 3),
white = 0,native=0,asian=0)
}
FYC <- FYC %>% mutate(
race = 1*hisp + 2*white + 3*black + 4*native + 5*asian + 9*white_oth,
race = recode_factor(race, .default = "Missing", .missing = "Missing",
"1" = "Hispanic",
"2" = "White",
"3" = "Black",
"4" = "Amer. Indian, AK Native, or mult. races",
"5" = "Asian, Hawaiian, or Pacific Islander",
"9" = "White and other"))
FYCdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = FYC,
nest = TRUE)
results <- svyby(~TOTEXP.yy., FUN = svymean, by = ~race + agegrps, design = subset(FYCdsgn, TOTEXP.yy. > 0))
print(results)
|
uik=function(x,y){
#Output for UIK method as defined theoretically in:
# Christopoulos, Demetris T., Introducing Unit Invariant Knee (UIK) As an Objective Choice
# for Elbow Point in Multivariate Data Analysis Techniques (March 1, 2016).
# Available at SSRN: https://ssrn.com/abstract=3043076 or http://dx.doi.org/10.2139/ssrn.3043076
#Contact Emails: dchristop@econ.uoa.gr or dem.christop@gmail.com
if(length(x)<=3){stop('Method is not applicable for such a small vector. Please give at least a 5 numbers vector')}
# Check convexity or at least leading convexity:
cxv=check_curve(x,y)
knee=x[ede(x,y,cxv$index)[1]]
return(knee)
}
|
/R/uik.R
|
no_license
|
cran/inflection
|
R
| false | false | 698 |
r
|
uik=function(x,y){
#Output for UIK method as defined theoretically in:
# Christopoulos, Demetris T., Introducing Unit Invariant Knee (UIK) As an Objective Choice
# for Elbow Point in Multivariate Data Analysis Techniques (March 1, 2016).
# Available at SSRN: https://ssrn.com/abstract=3043076 or http://dx.doi.org/10.2139/ssrn.3043076
#Contact Emails: dchristop@econ.uoa.gr or dem.christop@gmail.com
if(length(x)<=3){stop('Method is not applicable for such a small vector. Please give at least a 5 numbers vector')}
# Check convexity or at least leading convexity:
cxv=check_curve(x,y)
knee=x[ede(x,y,cxv$index)[1]]
return(knee)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/removeBadVars.R
\name{removeBadVars}
\alias{removeBadVars}
\title{removeBadVars}
\usage{
removeBadVars(data, catlist, predlist, nLevelsCutoff = 5, minCount = 1)
}
\arguments{
\item{data}{input data.frame}
\item{catlist}{vector of categorical variable names to use in multLogistic()}
\item{predlist}{vector of all variable names to use in multLogistic()}
\item{nLevelsCutoff}{Maximum number of levels a categorical variable can have and still be included in the analysis.}
\item{minCount}{Minimum total number of non-NA entries a variable can have and still be included in the analyses.}
}
\value{
a list of varibale name vectors to include in a later analysis.
}
\description{
removeBadVars checks the variables in a dataframe, and removes those that do not meet the given parameters. To be used in conjunction with the multLogistic() function.
}
\examples{
#NULL
}
|
/man/removeBadVars.Rd
|
no_license
|
TaylorAndrew/atPrepAnalyze
|
R
| false | true | 949 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/removeBadVars.R
\name{removeBadVars}
\alias{removeBadVars}
\title{removeBadVars}
\usage{
removeBadVars(data, catlist, predlist, nLevelsCutoff = 5, minCount = 1)
}
\arguments{
\item{data}{input data.frame}
\item{catlist}{vector of categorical variable names to use in multLogistic()}
\item{predlist}{vector of all variable names to use in multLogistic()}
\item{nLevelsCutoff}{Maximum number of levels a categorical variable can have and still be included in the analysis.}
\item{minCount}{Minimum total number of non-NA entries a variable can have and still be included in the analyses.}
}
\value{
a list of varibale name vectors to include in a later analysis.
}
\description{
removeBadVars checks the variables in a dataframe, and removes those that do not meet the given parameters. To be used in conjunction with the multLogistic() function.
}
\examples{
#NULL
}
|
\name{atus80ord}
\alias{atus80ord}
\docType{data}
\title{
Ordinal Events Subset of the American Time Use Survey
}
\description{
Event histories from respondents over the age of 80 in the pooled 2003--2008 American Time Use Survey.
}
\usage{data(atus80ord)}
\format{
A data frame with 62,352 observations on the following 3 variables.
\describe{
\item{\code{Activities}}{Type of activity spell. See details.}
\item{\code{TUCASEID}}{Respondent unique identification number.}
\item{\code{SEX}}{Sex of respondent. 1=Males.}
}
}
\details{
Each activity was recoded into a general activity class with 14 possible values (including ``missing'' as NA).
}
\source{
Bureau of Labor Statistics. The American Time Use Survey. Available online at: \url{http://www.bls.gov/tus/}.
}
\examples{
data(atus80ord)
#Activity Spell Frequencies by Sex
table(atus80ord$Activities,atus80ord$SEX)
}
\keyword{datasets}
|
/man/atus80ord.Rd
|
no_license
|
cran/informR
|
R
| false | false | 911 |
rd
|
\name{atus80ord}
\alias{atus80ord}
\docType{data}
\title{
Ordinal Events Subset of the American Time Use Survey
}
\description{
Event histories from respondents over the age of 80 in the pooled 2003--2008 American Time Use Survey.
}
\usage{data(atus80ord)}
\format{
A data frame with 62,352 observations on the following 3 variables.
\describe{
\item{\code{Activities}}{Type of activity spell. See details.}
\item{\code{TUCASEID}}{Respondent unique identification number.}
\item{\code{SEX}}{Sex of respondent. 1=Males.}
}
}
\details{
Each activity was recoded into a general activity class with 14 possible values (including ``missing'' as NA).
}
\source{
Bureau of Labor Statistics. The American Time Use Survey. Available online at: \url{http://www.bls.gov/tus/}.
}
\examples{
data(atus80ord)
#Activity Spell Frequencies by Sex
table(atus80ord$Activities,atus80ord$SEX)
}
\keyword{datasets}
|
\name{dfToMatrix}
\alias{dfToMatrix}
\title{data frame to matrix}
\description{
This function takes a matrix that is in data frame format and transforms it into a matrix. Other packages that allows you to obtain an additive relationship matrix from a pedigree is the `pedigreemm` package.
}
\usage{
dfToMatrix(x, row="Row",column="Column",
value="Ainverse", returnInverse=FALSE,
bend=1e-6)
}
\arguments{
\item{x}{ginv element, output from the Ainverse function.}
\item{row}{name of the column in x that indicates the row in the original relationship matrix.}
\item{column}{name of the column in x that indicates the column in the original relationship matrix.}
\item{value}{name of the column in x that indicates the value for a given row and column in the original relationship matrix.}
\item{returnInverse}{a TRUE/FALSE value indicating if the inverse of the x matrix should be computed once the data frame x is converted into a matrix.}
\item{bend}{a numeric value to add to the diagonal matrix in case matrix is singular for inversion.}
}
\value{
\item{K}{ pedigree transformed in a relationship matrix.}
\item{Kinv}{ inverse of the pedigree transformed in a relationship matrix.}
}
\references{
Covarrubias-Pazaran G (2016) Genome assisted prediction of quantitative traits using the R package sommer. PLoS ONE 11(6): doi:10.1371/journal.pone.0156744
}
\author{
Giovanny Covarrubias-Pazaran
}
\examples{
library(Matrix)
m <- matrix(1:9,3,3)
m <- tcrossprod(m)
mdf <- as.data.frame(as.table(m))
mdf
dfToMatrix(mdf, row = "Var1", column = "Var2",
value = "Freq",returnInverse=FALSE )
}
\seealso{The core functions of the package \code{\link{mmer}} }
|
/man/dfToMatrix.Rd
|
no_license
|
covaruber/sommer
|
R
| false | false | 1,740 |
rd
|
\name{dfToMatrix}
\alias{dfToMatrix}
\title{data frame to matrix}
\description{
This function takes a matrix that is in data frame format and transforms it into a matrix. Other packages that allows you to obtain an additive relationship matrix from a pedigree is the `pedigreemm` package.
}
\usage{
dfToMatrix(x, row="Row",column="Column",
value="Ainverse", returnInverse=FALSE,
bend=1e-6)
}
\arguments{
\item{x}{ginv element, output from the Ainverse function.}
\item{row}{name of the column in x that indicates the row in the original relationship matrix.}
\item{column}{name of the column in x that indicates the column in the original relationship matrix.}
\item{value}{name of the column in x that indicates the value for a given row and column in the original relationship matrix.}
\item{returnInverse}{a TRUE/FALSE value indicating if the inverse of the x matrix should be computed once the data frame x is converted into a matrix.}
\item{bend}{a numeric value to add to the diagonal matrix in case matrix is singular for inversion.}
}
\value{
\item{K}{ pedigree transformed in a relationship matrix.}
\item{Kinv}{ inverse of the pedigree transformed in a relationship matrix.}
}
\references{
Covarrubias-Pazaran G (2016) Genome assisted prediction of quantitative traits using the R package sommer. PLoS ONE 11(6): doi:10.1371/journal.pone.0156744
}
\author{
Giovanny Covarrubias-Pazaran
}
\examples{
library(Matrix)
m <- matrix(1:9,3,3)
m <- tcrossprod(m)
mdf <- as.data.frame(as.table(m))
mdf
dfToMatrix(mdf, row = "Var1", column = "Var2",
value = "Freq",returnInverse=FALSE )
}
\seealso{The core functions of the package \code{\link{mmer}} }
|
# ################################################################# #
#### LOAD LIBRARY AND DEFINE CORE SETTINGS ####
# ################################################################# #
#install.packages("apollo")
### Clear memory
rm(list = ls())
workingDirectory="/Users/williz/Desktop/ModelosED/2. Articulo 2/1. Scripts/Modelos de Prueba/Regret"
setwd(workingDirectory)
### Load Apollo library
library(apollo)
### Initialise code
apollo_initialise()
## Establecer controles principales
apollo_control = list(
modelName = "Regret Simple",
modelDescr = "Modelos Regret en Eleccion de Ruta",
indivID = "ViajeId"
)
# ################################################################# #
#### CARGAR DATOS Y APLICAR CUALQUIER TRANSFORMACIÓN ####
# ################################################################# #
database = read.csv("/Users/williz/Desktop/ModelosED/2. Articulo 2/2. Database/DBMuestra_ModeloLogitVL.csv",sep="\t", dec=".",header=TRUE)
# Normalización de los viajes
for (i in 1:nrow(database)){
# Normalización de las variables tiempo
database$T_Alt_1[i] = (database$TIEMPOAlt1[i]- (min(c(database$TIEMPOAlt1[i],database$TIEMPOAlt2[i],database$TIEMPOAlt3[i],database$TIEMPOEC[i])-0.05)))/(max(database$TIEMPOAlt1[i],database$TIEMPOAlt2[i],database$TIEMPOAlt3[i],database$TIEMPOEC[i])-(min(database$TIEMPOAlt1[i],database$TIEMPOAlt2[i],database$TIEMPOAlt3[i],database$TIEMPOEC[i])-0.05))
database$T_Alt_2[i] = (database$TIEMPOAlt2[i]- (min(c(database$TIEMPOAlt1[i],database$TIEMPOAlt2[i],database$TIEMPOAlt3[i],database$TIEMPOEC[i])-0.05)))/(max(database$TIEMPOAlt1[i],database$TIEMPOAlt2[i],database$TIEMPOAlt3[i],database$TIEMPOEC[i])-(min(database$TIEMPOAlt1[i],database$TIEMPOAlt2[i],database$TIEMPOAlt3[i],database$TIEMPOEC[i])-0.05))
database$T_Alt_3[i] = (database$TIEMPOAlt3[i]- (min(c(database$TIEMPOAlt1[i],database$TIEMPOAlt2[i],database$TIEMPOAlt3[i],database$TIEMPOEC[i])-0.05)))/(max(database$TIEMPOAlt1[i],database$TIEMPOAlt2[i],database$TIEMPOAlt3[i],database$TIEMPOEC[i])-(min(database$TIEMPOAlt1[i],database$TIEMPOAlt2[i],database$TIEMPOAlt3[i],database$TIEMPOEC[i])-0.05))
database$T_Alt_4[i] = (database$TIEMPOEC[i]- (min(c(database$TIEMPOAlt1[i],database$TIEMPOAlt2[i],database$TIEMPOAlt3[i],database$TIEMPOEC[i])-0.05)))/(max(database$TIEMPOAlt1[i],database$TIEMPOAlt2[i],database$TIEMPOAlt3[i],database$TIEMPOEC[i])-(min(database$TIEMPOAlt1[i],database$TIEMPOAlt2[i],database$TIEMPOAlt3[i],database$TIEMPOEC[i])-0.05))
# Normalización de la variable distancia
database$D_Alt_1[i] = (database$DISTAlt1[i]- (min(c(database$DISTAlt1[i],database$DISTAlt2[i],database$DISTAlt3[i],database$DISTEC[i])-0.15)))/(max(c(database$DISTAlt1[i],database$DISTAlt2[i],database$DISTAlt3[i],database$DISTEC[i]))-(min(c(database$DISTAlt1[i],database$DISTAlt2[i],database$DISTAlt3[i],database$DISTEC[i])-0.15)))
database$D_Alt_2[i] = (database$DISTAlt2[i]- (min(c(database$DISTAlt1[i],database$DISTAlt2[i],database$DISTAlt3[i],database$DISTEC[i])-0.15)))/(max(c(database$DISTAlt1[i],database$DISTAlt2[i],database$DISTAlt3[i],database$DISTEC[i]))-(min(c(database$DISTAlt1[i],database$DISTAlt2[i],database$DISTAlt3[i],database$DISTEC[i])-0.15)))
database$D_Alt_3[i] = (database$DISTAlt3[i]- (min(c(database$DISTAlt1[i],database$DISTAlt2[i],database$DISTAlt3[i],database$DISTEC[i])-0.15)))/(max(c(database$DISTAlt1[i],database$DISTAlt2[i],database$DISTAlt3[i],database$DISTEC[i]))-(min(c(database$DISTAlt1[i],database$DISTAlt2[i],database$DISTAlt3[i],database$DISTEC[i])-0.15)))
database$D_Alt_4[i] = (database$DISTEC[i]- (min(c(database$DISTAlt1[i],database$DISTAlt2[i],database$DISTAlt3[i],database$DISTEC[i])-0.15)))/(max(c(database$DISTAlt1[i],database$DISTAlt2[i],database$DISTAlt3[i],database$DISTEC[i]))-(min(c(database$DISTAlt1[i],database$DISTAlt2[i],database$DISTAlt3[i],database$DISTEC[i])-0.15)))
}
#summary(database)
# ################################################################# #
#### DEFINE PARAMETROS DEL MODELO ####
# ################################################################# #
### Vector de parametros, incluidos los que se mantienen fijos en la estimación
apollo_beta=c(asc_ruta1 = 0, asc_ruta2 = 0, asc_ruta3 = 0, asc_ruta4 = 0,
b_tt = 0,
b_dt = 0,
b_Sem = 0,
b_CongAB = 0, b_CongCD = 0, b_CongEF = 0,
b_ACC_0 = 0, b_ACC_1 = 0, b_ACC_2 = 0,
b_NO_CAMFD = 0, b_SI_CAMFD = 0,
b_NO_PANEL = 0, b_SI_PANEL = 0,
b_NO_ZER = 0, b_SI_ZER = 0,
b_No_MTRP = 0, b_Si_MTRP = 0)
### Vector con nombres (entre comillas) de los parámetros que se mantendrán fijos en su valor inicial en apollo_beta, use apollo_beta_fixed = c () si ninguno
apollo_fixed = c("asc_ruta3", "b_CongAB", "b_ACC_0", "b_NO_CAMFD", "b_No_MTRP", "b_NO_PANEL", "b_NO_ZER")
# ################################################################# #
#### GROUP AND VALIDATE INPUTS ####
# ################################################################# #
apollo_inputs = apollo_validateInputs()
# ################################################################# #
#### DEFINE MODEL AND LIKELIHOOD FUNCTION ####
# ################################################################# #
apollo_probabilities=function(apollo_beta, apollo_inputs, functionality="estimate"){
### Attach inputs and detach after function exit
apollo_attach(apollo_beta, apollo_inputs)
on.exit(apollo_detach(apollo_beta, apollo_inputs))
### Crear una lista de probabilidades P
P = list()
### Preparar componentes de arrepentimiento para variables categóricas.
RSem_ruta1 = SEM_A1_km
RSem_ruta2 = SEM_A2_km
RSem_ruta3 = SEM_A3_km
RSem_ruta4 = SEM_EC_km
RCong_ruta1 = b_CongAB*CONG_AB_A1 + b_CongCD*CONG_CD_A1 + b_CongEF*CONG_EF_A1
RCong_ruta2 = b_CongAB*CONG_AB_A2 + b_CongCD*CONG_CD_A2 + b_CongEF*CONG_EF_A2
RCong_ruta3 = b_CongAB*CONG_AB_A3 + b_CongCD*CONG_CD_A3 + b_CongEF*CONG_EF_A3
RCong_ruta4 = b_CongAB*CONG_AB_EC + b_CongCD*CONG_CD_EC + b_CongEF*CONG_EF_EC
RAcc_ruta1 = b_ACC_0*ACC_A1_0 + b_ACC_1*ACC_A1_1 + b_ACC_2*ACC_A1_2
RAcc_ruta2 = b_ACC_0*ACC_A2_0 + b_ACC_1*ACC_A2_1 + b_ACC_2*ACC_A2_2
RAcc_ruta3 = b_ACC_0*ACC_A3_0 + b_ACC_1*ACC_A3_1 + b_ACC_2*ACC_A3_2
RAcc_ruta4 = b_ACC_0*ACC_EC_0 + b_ACC_1*ACC_EC_1 + b_ACC_2*ACC_EC_2
RCamFd_ruta1 = b_NO_CAMFD*NO_CAMFD_A1 + b_SI_CAMFD * SI_CAMFD_A1
RCamFd_ruta2 = b_NO_CAMFD*NO_CAMFD_A2 + b_SI_CAMFD * SI_CAMFD_A2
RCamFd_ruta3 = b_NO_CAMFD*NO_CAMFD_A3 + b_SI_CAMFD * SI_CAMFD_A3
RCamFd_ruta4 = b_NO_CAMFD*NO_CAMFD_EC + b_SI_CAMFD * SI_CAMFD_EC
RPanel_ruta1 = b_NO_PANEL*NO_PANEL_A1 + b_SI_PANEL*SI_PANEL_A1
RPanel_ruta2 = b_NO_PANEL*NO_PANEL_A2 + b_SI_PANEL*SI_PANEL_A2
RPanel_ruta3 = b_NO_PANEL*NO_PANEL_A3 + b_SI_PANEL*SI_PANEL_A3
RPanel_ruta4 = b_NO_PANEL*NO_PANEL_EC + b_SI_PANEL*SI_PANEL_EC
RZer_r1 = b_NO_ZER* NO_ZER_A1 + b_SI_ZER *SI_ZER_A1
RZer_r2 = b_NO_ZER* NO_ZER_A2 + b_SI_ZER *SI_ZER_A2
RZer_r3 = b_NO_ZER* NO_ZER_A3 + b_SI_ZER *SI_ZER_A3
RZer_r4 = b_NO_ZER* NO_ZER_EC + b_SI_ZER *SI_ZER_EC
RMtrp_r1 = b_No_MTRP *NO_MTRP_A1 + b_Si_MTRP * SI_MTRP_A1
RMtrp_r2 = b_No_MTRP *NO_MTRP_A2 + b_Si_MTRP * SI_MTRP_A2
RMtrp_r3 = b_No_MTRP *NO_MTRP_A3 + b_Si_MTRP * SI_MTRP_A3
RMtrp_r4 = b_No_MTRP *NO_MTRP_EC + b_Si_MTRP * SI_MTRP_EC
### List of regret functions: these must use the same names as in mnl_settings, order is irrelevant
R = list()
R[['ruta1']] = asc_ruta1 +
log(1+exp(b_tt*(T_Alt_2 - T_Alt_1))) +
log(1+exp(b_tt*(T_Alt_3 - T_Alt_1))) +
log(1+exp(b_tt*(T_Alt_4 - T_Alt_1))) +
log(1+exp(b_dt*(D_Alt_2 - D_Alt_1))) +
log(1+exp(b_dt*(D_Alt_3 - D_Alt_1))) +
log(1+exp(b_dt*(D_Alt_4 - D_Alt_1))) +
log(1+exp(b_Sem*(RSem_ruta2 - RSem_ruta1))) +
log(1+exp(b_Sem*(RSem_ruta3 - RSem_ruta1))) +
log(1+exp(b_Sem*(RSem_ruta4 - RSem_ruta1))) +
log(1+exp(RCong_ruta2 - RCong_ruta1)) +
log(1+exp(RCong_ruta3 - RCong_ruta1)) +
log(1+exp(RCong_ruta4 - RCong_ruta1)) +
log(1+exp(RAcc_ruta2 - RAcc_ruta1)) +
log(1+exp(RAcc_ruta3 - RAcc_ruta1)) +
log(1+exp(RAcc_ruta4 - RAcc_ruta1)) +
log(1+exp(RCamFd_ruta2 - RCamFd_ruta1)) +
log(1+exp(RCamFd_ruta3 - RCamFd_ruta1)) +
log(1+exp(RCamFd_ruta4 - RCamFd_ruta1)) +
log(1+exp(RPanel_ruta2 - RPanel_ruta1)) +
log(1+exp(RPanel_ruta3 - RPanel_ruta1)) +
log(1+exp(RPanel_ruta4 - RPanel_ruta1)) +
log(1+exp(RZer_r2 - RZer_r1)) +
log(1+exp(RZer_r3 - RZer_r1)) +
log(1+exp(RZer_r4 - RZer_r1)) +
log(1+exp(RMtrp_r2 - RMtrp_r1)) +
log(1+exp(RMtrp_r3 - RMtrp_r1)) +
log(1+exp(RMtrp_r4 - RMtrp_r1))
R[['ruta2']] = asc_ruta2 +
log(1+exp(b_tt*(T_Alt_1 - T_Alt_2))) +
log(1+exp(b_tt*(T_Alt_3 - T_Alt_2))) +
log(1+exp(b_tt*(T_Alt_4 - T_Alt_2))) +
log(1+exp(b_dt*(D_Alt_1 - D_Alt_2))) +
log(1+exp(b_dt*(D_Alt_3 - D_Alt_2))) +
log(1+exp(b_dt*(D_Alt_4 - D_Alt_2))) +
log(1+exp(b_Sem*(RSem_ruta1 - RSem_ruta2))) +
log(1+exp(b_Sem*(RSem_ruta3 - RSem_ruta2))) +
log(1+exp(b_Sem*(RSem_ruta4 - RSem_ruta2))) +
log(1+exp(RCong_ruta1 - RCong_ruta2)) +
log(1+exp(RCong_ruta3 - RCong_ruta2)) +
log(1+exp(RCong_ruta4 - RCong_ruta2)) +
log(1+exp(RAcc_ruta1 - RAcc_ruta2)) +
log(1+exp(RAcc_ruta3 - RAcc_ruta2)) +
log(1+exp(RAcc_ruta4 - RAcc_ruta2)) +
log(1+exp(RCamFd_ruta1 - RCamFd_ruta2)) +
log(1+exp(RCamFd_ruta3 - RCamFd_ruta2)) +
log(1+exp(RCamFd_ruta4 - RCamFd_ruta2)) +
log(1+exp(RPanel_ruta1 - RPanel_ruta2)) +
log(1+exp(RPanel_ruta3 - RPanel_ruta2)) +
log(1+exp(RPanel_ruta4 - RPanel_ruta2)) +
log(1+exp(RZer_r1 - RZer_r2)) +
log(1+exp(RZer_r3 - RZer_r2)) +
log(1+exp(RZer_r4 - RZer_r2)) +
log(1+exp(RMtrp_r1 - RMtrp_r2)) +
log(1+exp(RMtrp_r3 - RMtrp_r2)) +
log(1+exp(RMtrp_r4 - RMtrp_r2))
R[['ruta3']] = asc_ruta3 +
log(1+exp(b_tt*(T_Alt_1 - T_Alt_3))) +
log(1+exp(b_tt*(T_Alt_2 - T_Alt_3))) +
log(1+exp(b_tt*(T_Alt_4 - T_Alt_3))) +
log(1+exp(b_dt*(D_Alt_1 - D_Alt_3))) +
log(1+exp(b_dt*(D_Alt_2 - D_Alt_3))) +
log(1+exp(b_dt*(D_Alt_4 - D_Alt_3))) +
log(1+exp(b_Sem*(RSem_ruta1 - RSem_ruta3))) +
log(1+exp(b_Sem*(RSem_ruta2 - RSem_ruta3))) +
log(1+exp(b_Sem*(RSem_ruta4 - RSem_ruta3))) +
log(1+exp(RCong_ruta2 - RCong_ruta3)) +
log(1+exp(RCong_ruta1 - RCong_ruta3)) +
log(1+exp(RCong_ruta4 - RCong_ruta3)) +
log(1+exp(RAcc_ruta1 - RAcc_ruta3)) +
log(1+exp(RAcc_ruta2 - RAcc_ruta3)) +
log(1+exp(RAcc_ruta4 - RAcc_ruta3)) +
log(1+exp(RCamFd_ruta1 - RCamFd_ruta3)) +
log(1+exp(RCamFd_ruta2 - RCamFd_ruta3)) +
log(1+exp(RCamFd_ruta4 - RCamFd_ruta3)) +
log(1+exp(RPanel_ruta1 - RPanel_ruta3)) +
log(1+exp(RPanel_ruta2 - RPanel_ruta3)) +
log(1+exp(RPanel_ruta4 - RPanel_ruta3)) +
log(1+exp(RZer_r1 - RZer_r3)) +
log(1+exp(RZer_r2 - RZer_r3)) +
log(1+exp(RZer_r4 - RZer_r3)) +
log(1+exp(RMtrp_r1 - RMtrp_r3)) +
log(1+exp(RMtrp_r2 - RMtrp_r3)) +
log(1+exp(RMtrp_r4 - RMtrp_r3))
R[['ruta4']] = asc_ruta4 +
log(1+exp(b_tt*(T_Alt_1 - T_Alt_4))) +
log(1+exp(b_tt*(T_Alt_2 - T_Alt_4))) +
log(1+exp(b_tt*(T_Alt_3 - T_Alt_4))) +
log(1+exp(b_dt*(D_Alt_1 - D_Alt_4))) +
log(1+exp(b_dt*(D_Alt_2 - D_Alt_4))) +
log(1+exp(b_dt*(D_Alt_3 - D_Alt_4))) +
log(1+exp(b_Sem*(RSem_ruta1 - RSem_ruta4))) +
log(1+exp(b_Sem*(RSem_ruta2 - RSem_ruta4))) +
log(1+exp(b_Sem*(RSem_ruta3 - RSem_ruta4))) +
log(1+exp(RCong_ruta1 - RCong_ruta4)) +
log(1+exp(RCong_ruta1 - RCong_ruta4)) +
log(1+exp(RCong_ruta3 - RCong_ruta4)) +
log(1+exp(RAcc_ruta1 - RAcc_ruta4)) +
log(1+exp(RAcc_ruta2 - RAcc_ruta4)) +
log(1+exp(RAcc_ruta3 - RAcc_ruta4)) +
log(1+exp(RCamFd_ruta1 - RCamFd_ruta4)) +
log(1+exp(RCamFd_ruta2 - RCamFd_ruta4)) +
log(1+exp(RCamFd_ruta3 - RCamFd_ruta4)) +
log(1+exp(RPanel_ruta1 - RPanel_ruta4)) +
log(1+exp(RPanel_ruta2 - RPanel_ruta4)) +
log(1+exp(RPanel_ruta3 - RPanel_ruta4)) +
log(1+exp(RZer_r1 - RZer_r4)) +
log(1+exp(RZer_r2 - RZer_r4)) +
log(1+exp(RZer_r3 - RZer_r4)) +
log(1+exp(RMtrp_r1 - RMtrp_r4)) +
log(1+exp(RMtrp_r2 - RMtrp_r4)) +
log(1+exp(RMtrp_r3 - RMtrp_r4))
### Define settings for RRM model, which is MNL with negative regret as utility
mnl_settings <- list(
alternatives = c(ruta1=1, ruta2=2, ruta3=3, ruta4=4),
avail = list(ruta1=1, ruta2=1, ruta3=1, ruta4=1),
choiceVar = CHOICE,
V = lapply(R, "*", -1)
)
### Compute probabilities using MNL model
P[['model']] = apollo_mnl(mnl_settings, functionality)
### Take product across observation for same individual
#P = apollo_panelProd(P, apollo_inputs, functionality)
### Prepare and return outputs of function
P = apollo_prepareProb(P, apollo_inputs, functionality)
return(P)
}
# ################################################################# #
#### MODEL ESTIMATION ####
# ################################################################# #
model = apollo_estimate(apollo_beta, apollo_fixed, apollo_probabilities, apollo_inputs)
# ################################################################# #
#### MODEL OUTPUTS ####
# ################################################################# #
# ----------------------------------------------------------------- #
#---- FORMATTED OUTPUT (TO SCREEN) ----
# ----------------------------------------------------------------- #
apollo_modelOutput(model, modelOutput_settings=list(printPVal=TRUE) )
# ----------------------------------------------------------------- #
#---- FORMATTED OUTPUT (TO FILE, using model name) ----
# ----------------------------------------------------------------- #
apollo_saveOutput(model, saveOutput_settings=list(printPVal=TRUE) )
|
/Borrador Articulo 2/1. Scripts/Modelos de Prueba/Modelo Regret.R
|
no_license
|
williz519/ModelosED
|
R
| false | false | 14,228 |
r
|
# ################################################################# #
#### LOAD LIBRARY AND DEFINE CORE SETTINGS ####
# ################################################################# #
#install.packages("apollo")
### Clear memory
rm(list = ls())
workingDirectory="/Users/williz/Desktop/ModelosED/2. Articulo 2/1. Scripts/Modelos de Prueba/Regret"
setwd(workingDirectory)
### Load Apollo library
library(apollo)
### Initialise code
apollo_initialise()
## Establecer controles principales
apollo_control = list(
modelName = "Regret Simple",
modelDescr = "Modelos Regret en Eleccion de Ruta",
indivID = "ViajeId"
)
# ################################################################# #
#### CARGAR DATOS Y APLICAR CUALQUIER TRANSFORMACIÓN ####
# ################################################################# #
database = read.csv("/Users/williz/Desktop/ModelosED/2. Articulo 2/2. Database/DBMuestra_ModeloLogitVL.csv",sep="\t", dec=".",header=TRUE)
# Normalización de los viajes
for (i in 1:nrow(database)){
# Normalización de las variables tiempo
database$T_Alt_1[i] = (database$TIEMPOAlt1[i]- (min(c(database$TIEMPOAlt1[i],database$TIEMPOAlt2[i],database$TIEMPOAlt3[i],database$TIEMPOEC[i])-0.05)))/(max(database$TIEMPOAlt1[i],database$TIEMPOAlt2[i],database$TIEMPOAlt3[i],database$TIEMPOEC[i])-(min(database$TIEMPOAlt1[i],database$TIEMPOAlt2[i],database$TIEMPOAlt3[i],database$TIEMPOEC[i])-0.05))
database$T_Alt_2[i] = (database$TIEMPOAlt2[i]- (min(c(database$TIEMPOAlt1[i],database$TIEMPOAlt2[i],database$TIEMPOAlt3[i],database$TIEMPOEC[i])-0.05)))/(max(database$TIEMPOAlt1[i],database$TIEMPOAlt2[i],database$TIEMPOAlt3[i],database$TIEMPOEC[i])-(min(database$TIEMPOAlt1[i],database$TIEMPOAlt2[i],database$TIEMPOAlt3[i],database$TIEMPOEC[i])-0.05))
database$T_Alt_3[i] = (database$TIEMPOAlt3[i]- (min(c(database$TIEMPOAlt1[i],database$TIEMPOAlt2[i],database$TIEMPOAlt3[i],database$TIEMPOEC[i])-0.05)))/(max(database$TIEMPOAlt1[i],database$TIEMPOAlt2[i],database$TIEMPOAlt3[i],database$TIEMPOEC[i])-(min(database$TIEMPOAlt1[i],database$TIEMPOAlt2[i],database$TIEMPOAlt3[i],database$TIEMPOEC[i])-0.05))
database$T_Alt_4[i] = (database$TIEMPOEC[i]- (min(c(database$TIEMPOAlt1[i],database$TIEMPOAlt2[i],database$TIEMPOAlt3[i],database$TIEMPOEC[i])-0.05)))/(max(database$TIEMPOAlt1[i],database$TIEMPOAlt2[i],database$TIEMPOAlt3[i],database$TIEMPOEC[i])-(min(database$TIEMPOAlt1[i],database$TIEMPOAlt2[i],database$TIEMPOAlt3[i],database$TIEMPOEC[i])-0.05))
# Normalización de la variable distancia
database$D_Alt_1[i] = (database$DISTAlt1[i]- (min(c(database$DISTAlt1[i],database$DISTAlt2[i],database$DISTAlt3[i],database$DISTEC[i])-0.15)))/(max(c(database$DISTAlt1[i],database$DISTAlt2[i],database$DISTAlt3[i],database$DISTEC[i]))-(min(c(database$DISTAlt1[i],database$DISTAlt2[i],database$DISTAlt3[i],database$DISTEC[i])-0.15)))
database$D_Alt_2[i] = (database$DISTAlt2[i]- (min(c(database$DISTAlt1[i],database$DISTAlt2[i],database$DISTAlt3[i],database$DISTEC[i])-0.15)))/(max(c(database$DISTAlt1[i],database$DISTAlt2[i],database$DISTAlt3[i],database$DISTEC[i]))-(min(c(database$DISTAlt1[i],database$DISTAlt2[i],database$DISTAlt3[i],database$DISTEC[i])-0.15)))
database$D_Alt_3[i] = (database$DISTAlt3[i]- (min(c(database$DISTAlt1[i],database$DISTAlt2[i],database$DISTAlt3[i],database$DISTEC[i])-0.15)))/(max(c(database$DISTAlt1[i],database$DISTAlt2[i],database$DISTAlt3[i],database$DISTEC[i]))-(min(c(database$DISTAlt1[i],database$DISTAlt2[i],database$DISTAlt3[i],database$DISTEC[i])-0.15)))
database$D_Alt_4[i] = (database$DISTEC[i]- (min(c(database$DISTAlt1[i],database$DISTAlt2[i],database$DISTAlt3[i],database$DISTEC[i])-0.15)))/(max(c(database$DISTAlt1[i],database$DISTAlt2[i],database$DISTAlt3[i],database$DISTEC[i]))-(min(c(database$DISTAlt1[i],database$DISTAlt2[i],database$DISTAlt3[i],database$DISTEC[i])-0.15)))
}
#summary(database)
# ################################################################# #
#### DEFINE PARAMETROS DEL MODELO ####
# ################################################################# #
### Vector de parametros, incluidos los que se mantienen fijos en la estimación
apollo_beta=c(asc_ruta1 = 0, asc_ruta2 = 0, asc_ruta3 = 0, asc_ruta4 = 0,
b_tt = 0,
b_dt = 0,
b_Sem = 0,
b_CongAB = 0, b_CongCD = 0, b_CongEF = 0,
b_ACC_0 = 0, b_ACC_1 = 0, b_ACC_2 = 0,
b_NO_CAMFD = 0, b_SI_CAMFD = 0,
b_NO_PANEL = 0, b_SI_PANEL = 0,
b_NO_ZER = 0, b_SI_ZER = 0,
b_No_MTRP = 0, b_Si_MTRP = 0)
### Vector con nombres (entre comillas) de los parámetros que se mantendrán fijos en su valor inicial en apollo_beta, use apollo_beta_fixed = c () si ninguno
apollo_fixed = c("asc_ruta3", "b_CongAB", "b_ACC_0", "b_NO_CAMFD", "b_No_MTRP", "b_NO_PANEL", "b_NO_ZER")
# ################################################################# #
#### GROUP AND VALIDATE INPUTS ####
# ################################################################# #
apollo_inputs = apollo_validateInputs()
# ################################################################# #
#### DEFINE MODEL AND LIKELIHOOD FUNCTION ####
# ################################################################# #
apollo_probabilities=function(apollo_beta, apollo_inputs, functionality="estimate"){
### Attach inputs and detach after function exit
apollo_attach(apollo_beta, apollo_inputs)
on.exit(apollo_detach(apollo_beta, apollo_inputs))
### Crear una lista de probabilidades P
P = list()
### Preparar componentes de arrepentimiento para variables categóricas.
RSem_ruta1 = SEM_A1_km
RSem_ruta2 = SEM_A2_km
RSem_ruta3 = SEM_A3_km
RSem_ruta4 = SEM_EC_km
RCong_ruta1 = b_CongAB*CONG_AB_A1 + b_CongCD*CONG_CD_A1 + b_CongEF*CONG_EF_A1
RCong_ruta2 = b_CongAB*CONG_AB_A2 + b_CongCD*CONG_CD_A2 + b_CongEF*CONG_EF_A2
RCong_ruta3 = b_CongAB*CONG_AB_A3 + b_CongCD*CONG_CD_A3 + b_CongEF*CONG_EF_A3
RCong_ruta4 = b_CongAB*CONG_AB_EC + b_CongCD*CONG_CD_EC + b_CongEF*CONG_EF_EC
RAcc_ruta1 = b_ACC_0*ACC_A1_0 + b_ACC_1*ACC_A1_1 + b_ACC_2*ACC_A1_2
RAcc_ruta2 = b_ACC_0*ACC_A2_0 + b_ACC_1*ACC_A2_1 + b_ACC_2*ACC_A2_2
RAcc_ruta3 = b_ACC_0*ACC_A3_0 + b_ACC_1*ACC_A3_1 + b_ACC_2*ACC_A3_2
RAcc_ruta4 = b_ACC_0*ACC_EC_0 + b_ACC_1*ACC_EC_1 + b_ACC_2*ACC_EC_2
RCamFd_ruta1 = b_NO_CAMFD*NO_CAMFD_A1 + b_SI_CAMFD * SI_CAMFD_A1
RCamFd_ruta2 = b_NO_CAMFD*NO_CAMFD_A2 + b_SI_CAMFD * SI_CAMFD_A2
RCamFd_ruta3 = b_NO_CAMFD*NO_CAMFD_A3 + b_SI_CAMFD * SI_CAMFD_A3
RCamFd_ruta4 = b_NO_CAMFD*NO_CAMFD_EC + b_SI_CAMFD * SI_CAMFD_EC
RPanel_ruta1 = b_NO_PANEL*NO_PANEL_A1 + b_SI_PANEL*SI_PANEL_A1
RPanel_ruta2 = b_NO_PANEL*NO_PANEL_A2 + b_SI_PANEL*SI_PANEL_A2
RPanel_ruta3 = b_NO_PANEL*NO_PANEL_A3 + b_SI_PANEL*SI_PANEL_A3
RPanel_ruta4 = b_NO_PANEL*NO_PANEL_EC + b_SI_PANEL*SI_PANEL_EC
RZer_r1 = b_NO_ZER* NO_ZER_A1 + b_SI_ZER *SI_ZER_A1
RZer_r2 = b_NO_ZER* NO_ZER_A2 + b_SI_ZER *SI_ZER_A2
RZer_r3 = b_NO_ZER* NO_ZER_A3 + b_SI_ZER *SI_ZER_A3
RZer_r4 = b_NO_ZER* NO_ZER_EC + b_SI_ZER *SI_ZER_EC
RMtrp_r1 = b_No_MTRP *NO_MTRP_A1 + b_Si_MTRP * SI_MTRP_A1
RMtrp_r2 = b_No_MTRP *NO_MTRP_A2 + b_Si_MTRP * SI_MTRP_A2
RMtrp_r3 = b_No_MTRP *NO_MTRP_A3 + b_Si_MTRP * SI_MTRP_A3
RMtrp_r4 = b_No_MTRP *NO_MTRP_EC + b_Si_MTRP * SI_MTRP_EC
### List of regret functions: these must use the same names as in mnl_settings, order is irrelevant
R = list()
R[['ruta1']] = asc_ruta1 +
log(1+exp(b_tt*(T_Alt_2 - T_Alt_1))) +
log(1+exp(b_tt*(T_Alt_3 - T_Alt_1))) +
log(1+exp(b_tt*(T_Alt_4 - T_Alt_1))) +
log(1+exp(b_dt*(D_Alt_2 - D_Alt_1))) +
log(1+exp(b_dt*(D_Alt_3 - D_Alt_1))) +
log(1+exp(b_dt*(D_Alt_4 - D_Alt_1))) +
log(1+exp(b_Sem*(RSem_ruta2 - RSem_ruta1))) +
log(1+exp(b_Sem*(RSem_ruta3 - RSem_ruta1))) +
log(1+exp(b_Sem*(RSem_ruta4 - RSem_ruta1))) +
log(1+exp(RCong_ruta2 - RCong_ruta1)) +
log(1+exp(RCong_ruta3 - RCong_ruta1)) +
log(1+exp(RCong_ruta4 - RCong_ruta1)) +
log(1+exp(RAcc_ruta2 - RAcc_ruta1)) +
log(1+exp(RAcc_ruta3 - RAcc_ruta1)) +
log(1+exp(RAcc_ruta4 - RAcc_ruta1)) +
log(1+exp(RCamFd_ruta2 - RCamFd_ruta1)) +
log(1+exp(RCamFd_ruta3 - RCamFd_ruta1)) +
log(1+exp(RCamFd_ruta4 - RCamFd_ruta1)) +
log(1+exp(RPanel_ruta2 - RPanel_ruta1)) +
log(1+exp(RPanel_ruta3 - RPanel_ruta1)) +
log(1+exp(RPanel_ruta4 - RPanel_ruta1)) +
log(1+exp(RZer_r2 - RZer_r1)) +
log(1+exp(RZer_r3 - RZer_r1)) +
log(1+exp(RZer_r4 - RZer_r1)) +
log(1+exp(RMtrp_r2 - RMtrp_r1)) +
log(1+exp(RMtrp_r3 - RMtrp_r1)) +
log(1+exp(RMtrp_r4 - RMtrp_r1))
R[['ruta2']] = asc_ruta2 +
log(1+exp(b_tt*(T_Alt_1 - T_Alt_2))) +
log(1+exp(b_tt*(T_Alt_3 - T_Alt_2))) +
log(1+exp(b_tt*(T_Alt_4 - T_Alt_2))) +
log(1+exp(b_dt*(D_Alt_1 - D_Alt_2))) +
log(1+exp(b_dt*(D_Alt_3 - D_Alt_2))) +
log(1+exp(b_dt*(D_Alt_4 - D_Alt_2))) +
log(1+exp(b_Sem*(RSem_ruta1 - RSem_ruta2))) +
log(1+exp(b_Sem*(RSem_ruta3 - RSem_ruta2))) +
log(1+exp(b_Sem*(RSem_ruta4 - RSem_ruta2))) +
log(1+exp(RCong_ruta1 - RCong_ruta2)) +
log(1+exp(RCong_ruta3 - RCong_ruta2)) +
log(1+exp(RCong_ruta4 - RCong_ruta2)) +
log(1+exp(RAcc_ruta1 - RAcc_ruta2)) +
log(1+exp(RAcc_ruta3 - RAcc_ruta2)) +
log(1+exp(RAcc_ruta4 - RAcc_ruta2)) +
log(1+exp(RCamFd_ruta1 - RCamFd_ruta2)) +
log(1+exp(RCamFd_ruta3 - RCamFd_ruta2)) +
log(1+exp(RCamFd_ruta4 - RCamFd_ruta2)) +
log(1+exp(RPanel_ruta1 - RPanel_ruta2)) +
log(1+exp(RPanel_ruta3 - RPanel_ruta2)) +
log(1+exp(RPanel_ruta4 - RPanel_ruta2)) +
log(1+exp(RZer_r1 - RZer_r2)) +
log(1+exp(RZer_r3 - RZer_r2)) +
log(1+exp(RZer_r4 - RZer_r2)) +
log(1+exp(RMtrp_r1 - RMtrp_r2)) +
log(1+exp(RMtrp_r3 - RMtrp_r2)) +
log(1+exp(RMtrp_r4 - RMtrp_r2))
R[['ruta3']] = asc_ruta3 +
log(1+exp(b_tt*(T_Alt_1 - T_Alt_3))) +
log(1+exp(b_tt*(T_Alt_2 - T_Alt_3))) +
log(1+exp(b_tt*(T_Alt_4 - T_Alt_3))) +
log(1+exp(b_dt*(D_Alt_1 - D_Alt_3))) +
log(1+exp(b_dt*(D_Alt_2 - D_Alt_3))) +
log(1+exp(b_dt*(D_Alt_4 - D_Alt_3))) +
log(1+exp(b_Sem*(RSem_ruta1 - RSem_ruta3))) +
log(1+exp(b_Sem*(RSem_ruta2 - RSem_ruta3))) +
log(1+exp(b_Sem*(RSem_ruta4 - RSem_ruta3))) +
log(1+exp(RCong_ruta2 - RCong_ruta3)) +
log(1+exp(RCong_ruta1 - RCong_ruta3)) +
log(1+exp(RCong_ruta4 - RCong_ruta3)) +
log(1+exp(RAcc_ruta1 - RAcc_ruta3)) +
log(1+exp(RAcc_ruta2 - RAcc_ruta3)) +
log(1+exp(RAcc_ruta4 - RAcc_ruta3)) +
log(1+exp(RCamFd_ruta1 - RCamFd_ruta3)) +
log(1+exp(RCamFd_ruta2 - RCamFd_ruta3)) +
log(1+exp(RCamFd_ruta4 - RCamFd_ruta3)) +
log(1+exp(RPanel_ruta1 - RPanel_ruta3)) +
log(1+exp(RPanel_ruta2 - RPanel_ruta3)) +
log(1+exp(RPanel_ruta4 - RPanel_ruta3)) +
log(1+exp(RZer_r1 - RZer_r3)) +
log(1+exp(RZer_r2 - RZer_r3)) +
log(1+exp(RZer_r4 - RZer_r3)) +
log(1+exp(RMtrp_r1 - RMtrp_r3)) +
log(1+exp(RMtrp_r2 - RMtrp_r3)) +
log(1+exp(RMtrp_r4 - RMtrp_r3))
R[['ruta4']] = asc_ruta4 +
log(1+exp(b_tt*(T_Alt_1 - T_Alt_4))) +
log(1+exp(b_tt*(T_Alt_2 - T_Alt_4))) +
log(1+exp(b_tt*(T_Alt_3 - T_Alt_4))) +
log(1+exp(b_dt*(D_Alt_1 - D_Alt_4))) +
log(1+exp(b_dt*(D_Alt_2 - D_Alt_4))) +
log(1+exp(b_dt*(D_Alt_3 - D_Alt_4))) +
log(1+exp(b_Sem*(RSem_ruta1 - RSem_ruta4))) +
log(1+exp(b_Sem*(RSem_ruta2 - RSem_ruta4))) +
log(1+exp(b_Sem*(RSem_ruta3 - RSem_ruta4))) +
log(1+exp(RCong_ruta1 - RCong_ruta4)) +
log(1+exp(RCong_ruta1 - RCong_ruta4)) +
log(1+exp(RCong_ruta3 - RCong_ruta4)) +
log(1+exp(RAcc_ruta1 - RAcc_ruta4)) +
log(1+exp(RAcc_ruta2 - RAcc_ruta4)) +
log(1+exp(RAcc_ruta3 - RAcc_ruta4)) +
log(1+exp(RCamFd_ruta1 - RCamFd_ruta4)) +
log(1+exp(RCamFd_ruta2 - RCamFd_ruta4)) +
log(1+exp(RCamFd_ruta3 - RCamFd_ruta4)) +
log(1+exp(RPanel_ruta1 - RPanel_ruta4)) +
log(1+exp(RPanel_ruta2 - RPanel_ruta4)) +
log(1+exp(RPanel_ruta3 - RPanel_ruta4)) +
log(1+exp(RZer_r1 - RZer_r4)) +
log(1+exp(RZer_r2 - RZer_r4)) +
log(1+exp(RZer_r3 - RZer_r4)) +
log(1+exp(RMtrp_r1 - RMtrp_r4)) +
log(1+exp(RMtrp_r2 - RMtrp_r4)) +
log(1+exp(RMtrp_r3 - RMtrp_r4))
### Define settings for RRM model, which is MNL with negative regret as utility
mnl_settings <- list(
alternatives = c(ruta1=1, ruta2=2, ruta3=3, ruta4=4),
avail = list(ruta1=1, ruta2=1, ruta3=1, ruta4=1),
choiceVar = CHOICE,
V = lapply(R, "*", -1)
)
### Compute probabilities using MNL model
P[['model']] = apollo_mnl(mnl_settings, functionality)
### Take product across observation for same individual
#P = apollo_panelProd(P, apollo_inputs, functionality)
### Prepare and return outputs of function
P = apollo_prepareProb(P, apollo_inputs, functionality)
return(P)
}
# ################################################################# #
#### MODEL ESTIMATION ####
# ################################################################# #
model = apollo_estimate(apollo_beta, apollo_fixed, apollo_probabilities, apollo_inputs)
# ################################################################# #
#### MODEL OUTPUTS ####
# ################################################################# #
# ----------------------------------------------------------------- #
#---- FORMATTED OUTPUT (TO SCREEN) ----
# ----------------------------------------------------------------- #
apollo_modelOutput(model, modelOutput_settings=list(printPVal=TRUE) )
# ----------------------------------------------------------------- #
#---- FORMATTED OUTPUT (TO FILE, using model name) ----
# ----------------------------------------------------------------- #
apollo_saveOutput(model, saveOutput_settings=list(printPVal=TRUE) )
|
\name{BX}
\alias{BX}
\alias{BXBooks}
\docType{data}
\title{Book-Crossing Dataset (BX)}
\description{
The data set contains a 4-week crawl (August / September 2004) from the
Book-Crossing community. Contains 278,858 users (anonymized)
providing 1,149,780 ratings (explicit / implicit)
about 271,379 books.
}
\usage{
data(BX)
data(BXBooks)
}
\format{
The format is:
BX: Formal class 'realRatingMatrix' [package "recommenderlab"]
BXBooks: data.frame with book meta information (title, author, year, publisher)
}
\details{
44778 x 204680 rating matrix of class 'realRatingMatrix' with 493813 ratings
between 0 and 10.
}
%\source{
%}
\references{
Cai-Nicolas Ziegler, Sean M. McNee, Joseph A. Konstan, Georg Lausen,
"Improving Recommendation Lists Through Topic Diversification,"
Proceedings of the 14th International World Wide Web Conference (WWW '05), May 10-14, 2005, Chiba, Japan.
}
\examples{
data(BX)
BX
hist(getRatings(BX), main="Distribution of ratings", breaks=10)
}
\keyword{datasets}
|
/Work/data/recommenderlabBX/man/BX.Rd
|
no_license
|
tedconf/recommenderlab
|
R
| false | false | 1,018 |
rd
|
\name{BX}
\alias{BX}
\alias{BXBooks}
\docType{data}
\title{Book-Crossing Dataset (BX)}
\description{
The data set contains a 4-week crawl (August / September 2004) from the
Book-Crossing community. Contains 278,858 users (anonymized)
providing 1,149,780 ratings (explicit / implicit)
about 271,379 books.
}
\usage{
data(BX)
data(BXBooks)
}
\format{
The format is:
BX: Formal class 'realRatingMatrix' [package "recommenderlab"]
BXBooks: data.frame with book meta information (title, author, year, publisher)
}
\details{
44778 x 204680 rating matrix of class 'realRatingMatrix' with 493813 ratings
between 0 and 10.
}
%\source{
%}
\references{
Cai-Nicolas Ziegler, Sean M. McNee, Joseph A. Konstan, Georg Lausen,
"Improving Recommendation Lists Through Topic Diversification,"
Proceedings of the 14th International World Wide Web Conference (WWW '05), May 10-14, 2005, Chiba, Japan.
}
\examples{
data(BX)
BX
hist(getRatings(BX), main="Distribution of ratings", breaks=10)
}
\keyword{datasets}
|
# PREAMBLE
library(tidyverse)
library(ordinal)
library(bootstrap)
library(rwebppl)
library(jsonlite)
library(lme4)
library(lmerTest)
library(lsmeans)
setwd("~/Documents/GitHub/alts/results/Experiment3")
# HELPER SCRIPTS
theta <- function(x,xdata,na.rm=T) {mean(xdata[x],na.rm=na.rm)}
ci.low <- function(x,na.rm=T) {
mean(x,na.rm=na.rm) - quantile(bootstrap(1:length(x),1000,theta,x,na.rm=na.rm)$thetastar,.025,na.rm=na.rm)}
ci.high <- function(x,na.rm=T) {
quantile(bootstrap(1:length(x),1000,theta,x,na.rm=na.rm)$thetastar,.975,na.rm=na.rm) - mean(x,na.rm=na.rm)}
# LOAD DATA
d <- read.csv("results.csv")
# RUN EXCLUSIONS
# EXCLUDE NON-NATIVE ENGLISH SPEAKERS
levels(d$language) # KEEP DATA IF LANGUAGE = SOME SPELLING VARIATION OF 'ENGLISH' (OR NA)
d <- d %>%
filter(!(language == "Russian"))
# EXCLUDE PEOPLE WHO FAIL MORE THAN ONE EXCLUSION TRIAL
to_exclude <- d %>%
group_by(workerid, type, selection) %>%
filter((type == "left" && selection == "right") || (type == "right" && selection == "left")) %>%
group_by(workerid) %>%
summarize(n_mistakes = n()) %>%
filter(n_mistakes > 0)
d <- d %>%
filter(!(workerid %in% to_exclude$workerid))
# PROPORTION OF COMPETITOR PICTURES CHOSEN
sel_counts <- d %>%
# ONLY INTERESTED IN "LOOKS LIKE" TRIALS...
filter(type == "looks like") %>%
# ... THAT THE CONDITIONS SHARE IN COMMON
filter(id %in% (d %>% filter(type == "looks like" & condition == "symmetric"))$id) %>%
group_by(workerid,condition) %>%
summarize(n = n(), ntarget = sum(selection == "target"),
ncompetitor = sum(selection == "competitor"),
pcompetitor = ncompetitor / (ncompetitor + ntarget))
sel_counts_learning <- d %>%
# ONLY INTERESTED IN "LOOKS LIKE" TRIALS...
filter(type == "looks like") %>%
# ... THAT THE CONDITIONS SHARE IN COMMON
filter(id %in% (d %>% filter(type == "looks like" & condition == "symmetric"))$id) %>%
mutate(half = ifelse(order < 13, 1, 2)) %>%
group_by(workerid,condition,half) %>%
summarize(n = n(), ntarget = sum(selection == "target"),
ncompetitor = sum(selection == "competitor"),
pcompetitor = ncompetitor / (ncompetitor + ntarget))
# LOGISTIC REGRESSION TO INVESTIGATE EFFECT OF CONDITION (MAX RANEF STRUCTURE)
d$selection <- relevel(d$selection, ref = "target")
d$condition <- relevel(d$condition, ref = "control")
d_filtered <- d %>%
# ONLY INTERESTED IN "LOOKS LIKE" TRIALS...
filter(type == "looks like") %>%
# ... THAT THE CONDITIONS SHARE IN COMMON
filter(id %in% (d %>% filter(type == "looks like" & condition == "symmetric"))$id)
# EXAMPLE: BINARY LOGISTIC REGRESSION BETWEEN CONTROL AND TARGET CONDITIONS
m <- glmer(selection ~ condition + (1|workerid) + (1 + condition|id), family = "binomial", data = d_filtered)
lsmeans(m, revpairwise~condition)
# EXPLORATORY ANALYSIS: LOOKING FOR ORDER EFFECTS
m2 <- glmer(selection ~ condition + order + (1 + order|workerid) + (1 + order + condition|id), family = "binomial", data = d_filtered)
summary(m2)
# BAYESIAN DATA ANALYSIS
# LOAD & TRANSFORM DATA FROM NORMING STUDIES
d_noprompt <- read.csv("results_noprompt.csv")
levels(d_noprompt$language) # KEEP DATA IF LANGUAGE = SOME SPELLING VARIATION OF 'ENGLISH' (OR NA)
noprompt_to_exclude <- d %>%
group_by(workerid, type, selection) %>%
filter((type == "left" && selection == "right") || (type == "right" && selection == "left")) %>%
group_by(workerid) %>%
summarize(n_mistakes = n()) %>%
filter(n_mistakes > 0)
d_noprompt <- d_noprompt %>%
filter(!(workerid %in% noprompt_to_exclude$workerid))
d_noprompt <- d_noprompt %>%
filter(kind == "critical") %>%
group_by(id) %>%
summarize(ntarget = sum(selection == "target"),
ncompetitor = sum(selection == "competitor"),
competitor_prior = ncompetitor / (ncompetitor + ntarget))
d_naming <- read.csv("results_naming.csv")
d_naming <- d_naming %>%
group_by(id) %>%
summarize(nknowtarget = sum(type == "target" & know == "True"),
nknowcompetitor = sum(type == "competitor" & know == "True"),
target_nameability = nknowtarget / sum(type == "target"),
competitor_nameability = nknowcompetitor / sum(type == "competitor"))
# INFER PARAMS GLOBALLY
bda <- read_file("bda_inferglobal_6cost.txt")
# bda <- read_file("bda_inferglobal_3cost.txt")
d_total <- d %>%
filter(kind == "critical" & type == "looks like") %>%
group_by(id,condition) %>%summarize(ntarget = sum(selection == "target"),
ncompetitor = sum(selection == "competitor"),
observed_competitor = ncompetitor / (ncompetitor + ntarget))
d_total <- merge(d_total, d_naming, by = "id")
d_total <- merge(d_total, d_noprompt, by = "id")
d_total <- d_total %>%
select(id,observed_competitor,competitor_prior,target_nameability,competitor_nameability)
d_total_JSON <- toJSON(d_total)
full_bda <- webppl(paste("var itemData = ", d_total_JSON, "\n", bda))
full_posteriors <- (full_bda$posteriors)$support
full_maxap <- (full_bda$maxap)
# write.csv(full_posteriors, file = "posteriors/posteriors_fulldata.csv")
# INFER PARAMS BY CONDITION
# bda_bycondition <- read_file("bda_bycondition_6cost.txt")
bda_bycondition <- read_file("bda_bycondition_3cost.txt")
# GET BY-ITEM DATA BY CONDITION
data_byitem <- function(cond, data) {
output <- data %>%
filter(condition == cond & kind == "critical" & type == "looks like") %>%
group_by(id) %>%
summarize(ntarget = sum(selection == "target"),
ncompetitor = sum(selection == "competitor"),
observed_competitor = ncompetitor / (ncompetitor + ntarget))
output <- merge(output, d_naming, by = "id")
output <- merge(output, d_noprompt, by = "id")
output <- output %>%
select(id,observed_competitor,competitor_prior,target_nameability,competitor_nameability)
}
symmetric_byitem <-data_byitem("symmetric",d)
control_byitem <-data_byitem("control",d)
target_byitem <-data_byitem("target",d)
nottarget_byitem <-data_byitem("nottarget",d)
# INFER PARAMETERS: CONTROL CONDITION
control_byitem_json <- toJSON(control_byitem)
control_bda <- webppl(paste("var itemData = ", control_byitem_json,
"\n var alpha =", full_maxap$alpha,
"\n", bda_bycondition))
posteriors_control <- (control_bda$posteriors)$support
# write.csv(posteriors_control, file = "posteriors/posteriors_control.csv")
# INFER PARAMETERS: TARGET CONDITION
target_byitem_json <- toJSON(target_byitem)
target_bda <- webppl(paste("var itemData = ", target_byitem_json,
"\n var alpha =", full_maxap$alpha,
"\n", bda_bycondition))
posteriors_target <- (target_bda$posteriors)$support
# write.csv(posteriors_target, file = "posteriors/posteriors_target.csv")
# INFER PARAMETERS: NOT-TARGET CONDITION
nottarget_byitem_json <- toJSON(nottarget_byitem)
nottarget_bda <- webppl(paste("var itemData = ", nottarget_byitem_json,
"\n var alpha =", full_maxap$alpha,
"\n", bda_bycondition))
posteriors_nottarget <- (nottarget_bda$posteriors)$support
# write.csv(posteriors_nottarget, file = "posteriors/posteriors_nottarget.csv")
# INFER PARAMETERS: SYMMETRIC CONDITION
symmetric_byitem_json <- toJSON(symmetric_byitem)
symmetric_bda <- webppl(paste("var itemData = ", symmetric_byitem_json,
"\n var alpha =", full_maxap$alpha,
"\n", bda_bycondition))
posteriors_symmetric <- (symmetric_bda$posteriors)$support
# write.csv(posteriors_symmetric, file = "posteriors/posteriors_symmetric.csv")
# NEW VISUALIZATIONS
# BY-CONDITION VISUALIZATIONS
dodge = position_dodge(.9)
toplot <- function (data) {
output <- data %>%
# group_by(condition,half) %>%
group_by(condition) %>%
summarize(Mean = mean(pcompetitor),CILow=ci.low(pcompetitor),CIHigh =ci.high(pcompetitor)) %>%
ungroup() %>%
mutate(Ymin=Mean-CILow,Ymax=Mean+CIHigh)
return(output)
}
plot_means <- function (toplot) {
ggplot(toplot, aes(x=condition,y=Mean)) +
# facet_wrap(~half) +
geom_bar(stat="identity",position = "dodge") +
theme(axis.text.x=element_text(angle=20,hjust=1,vjust=1)) +
geom_errorbar(aes(ymin=Ymin,ymax=Ymax),width=.25, position = dodge) +
labs(x = "Condition", y = "Proportion") # +
# ggtitle("Proportion of a competitor image chosen on 'looks like' trials\n(Experiment 3)")
}
plot_means(toplot(sel_counts))
# BY-ITEM VISUALIZATIONS
byitem <- d %>%
filter(kind == "critical" & type == "looks like") %>%
# ... THAT THE CONDITIONS SHARE IN COMMON
filter(id %in% (d %>% filter(type == "looks like" & condition == "symmetric"))$id) %>%
group_by(id,condition) %>%
summarize(ntarget = sum(selection == "target"),
ncompetitor = sum(selection == "competitor"),
observed_competitor = ncompetitor / (ncompetitor + ntarget))
byitem <- merge(byitem, d_naming, by = "id")
byitem <- merge(byitem, d_noprompt, by = "id")
byitem <- byitem %>%
mutate(nameability_index = target_nameability / competitor_nameability)
table(d$id, d$condition, d$type)
ggplot(byitem, aes(x=condition, y=observed_competitor)) +
facet_wrap(~id)+
geom_point() +
labs(x = "Condition", y = "Proportion") +
# ggtitle("Proportion of a competitor image chosen on 'looks like' trials\n(By item, Experiment 3)") +
theme(axis.text.x=element_text(angle=20,hjust=1,vjust=1))
# ALPHA PARAMETER GLOBAL DIST
ggplot(full_posteriors, aes(alpha)) + geom_density(alpha = 0.2) +
labs(x = "Inferred alpha parameter value", y = "Density") # +
# ggtitle("Posterior distribution of alpha parameter values\n(Experiment 3)")
# VISUALIZE POSTERIOR PARAMS BY CONDITION
vizparams_bycondition <- function(posteriors) {
toplot <- posteriors %>%
select(cost_is,cost_not,cost_lookslike) %>%
# select(cost_istarget,cost_looksliketarget,cost_nottarget) %>%
gather(key = "parameter", value = "cost")
ggplot(toplot, aes(cost, fill = parameter)) + geom_density(alpha = 0.2) +
labs(x = "Inferred value", y = "Density")
}
vizparams_bycondition(posteriors_control)
vizparams_bycondition(posteriors_target)
vizparams_bycondition(posteriors_nottarget)
vizparams_bycondition(posteriors_symmetric)
# VISUALIZE POSTERIOR PREDICTIVES BY CONDITION
target_predictives <- target_bda$predictions
nottarget_predictives <- nottarget_bda$predictions
symmetric_predictives <- symmetric_bda$predictions
control_predictives <- control_bda$predictions %>%
filter(id %in% symmetric_predictives$id)
target_byitem$condition <- "target"
nottarget_byitem$condition <- "nottarget"
symmetric_byitem$condition <- "symmetric"
control_byitem$condition <- "control"
target_predictive_toplot <- cbind(target_predictives,target_byitem$observed_competitor,target_byitem$condition)
nottarget_predictive_toplot <- cbind(nottarget_predictives,nottarget_byitem$observed_competitor,nottarget_byitem$condition)
symmetric_predictive_toplot <- cbind(symmetric_predictives,symmetric_byitem$observed_competitor,symmetric_byitem$condition)
control_predictive_toplot <- cbind(control_predictives,(control_byitem %>% filter (id %in% control_predictives$id))$observed_competitor,
(control_byitem %>% filter (id %in% control_predictives$id))$condition)
colnames(target_predictive_toplot) <- c("id","prediction","observation","condition")
colnames(nottarget_predictive_toplot) <- c("id","prediction","observation","condition")
colnames(control_predictive_toplot) <- c("id","prediction","observation","condition")
colnames(symmetric_predictive_toplot) <- c("id","prediction","observation","condition")
toplot_predictive <- rbind(target_predictive_toplot, nottarget_predictive_toplot,
symmetric_predictive_toplot, control_predictive_toplot)
ggplot(toplot_predictive, aes(x=prediction, y=observation, color = condition)) + geom_point() +
labs(x = "Predicted proportion of competitor chosen", y = "Observed proportion of competitor chosen") +
expand_limits(x = c(0,0.75), y = c(0,0.75)) +
geom_abline()
summary(lmer(observation ~ prediction + (prediction|id), data = toplot_predictive))
# NAMEABILITY PLOTS
toplot_nameability <- rbind(target_byitem,nottarget_byitem,symmetric_byitem,control_byitem)
competitor_nameability_plot <- ggplot(toplot_nameability, aes(x=competitor_nameability, y = observed_competitor)) +
facet_wrap(~condition) +
labs(x = "Competitor nameability", y = "Observed proportion of competitor chosen") +
geom_point() +
geom_smooth(method='lm',formula=y~x)
d_filtered_total <- merge(d_filtered, d_naming, by = "id")
m_nameability <- glm (selection ~ condition * competitor_nameability, family = "binomial", data = d_filtered_total)
summary(glmer(selection ~ condition * competitor_nameability + (1|workerid) + (1 + condition|id), family = "binomial", data = d_filtered_total))
write.csv(symmetric_bda$predictions,"predictives/symmetric_predictives.csv" )
write.csv(control_bda$predictions,"predictives/control_predictives.csv" )
write.csv(target_bda$predictions,"predictives/target_predictives.csv" )
write.csv(nottarget_bda$predictions,"predictives/nottarget_predictives.csv" )
full_posteriors <- read.csv("posteriors/posteriors_fulldata.csv")
posteriors_control <- read.csv("posteriors/posteriors_control.csv")
posteriors_target <- read.csv("posteriors/posteriors_target.csv")
posteriors_nottarget <- read.csv("posteriors/posteriors_nottarget.csv")
posteriors_symmetric <- read.csv("posteriors/posteriors_symmetric.csv")
|
/results/Experiment3/analysis.R
|
no_license
|
bwaldon/alts
|
R
| false | false | 13,726 |
r
|
# PREAMBLE
library(tidyverse)
library(ordinal)
library(bootstrap)
library(rwebppl)
library(jsonlite)
library(lme4)
library(lmerTest)
library(lsmeans)
setwd("~/Documents/GitHub/alts/results/Experiment3")
# HELPER SCRIPTS
theta <- function(x,xdata,na.rm=T) {mean(xdata[x],na.rm=na.rm)}
ci.low <- function(x,na.rm=T) {
mean(x,na.rm=na.rm) - quantile(bootstrap(1:length(x),1000,theta,x,na.rm=na.rm)$thetastar,.025,na.rm=na.rm)}
ci.high <- function(x,na.rm=T) {
quantile(bootstrap(1:length(x),1000,theta,x,na.rm=na.rm)$thetastar,.975,na.rm=na.rm) - mean(x,na.rm=na.rm)}
# LOAD DATA
d <- read.csv("results.csv")
# RUN EXCLUSIONS
# EXCLUDE NON-NATIVE ENGLISH SPEAKERS
levels(d$language) # KEEP DATA IF LANGUAGE = SOME SPELLING VARIATION OF 'ENGLISH' (OR NA)
d <- d %>%
filter(!(language == "Russian"))
# EXCLUDE PEOPLE WHO FAIL MORE THAN ONE EXCLUSION TRIAL
to_exclude <- d %>%
group_by(workerid, type, selection) %>%
filter((type == "left" && selection == "right") || (type == "right" && selection == "left")) %>%
group_by(workerid) %>%
summarize(n_mistakes = n()) %>%
filter(n_mistakes > 0)
d <- d %>%
filter(!(workerid %in% to_exclude$workerid))
# PROPORTION OF COMPETITOR PICTURES CHOSEN
sel_counts <- d %>%
# ONLY INTERESTED IN "LOOKS LIKE" TRIALS...
filter(type == "looks like") %>%
# ... THAT THE CONDITIONS SHARE IN COMMON
filter(id %in% (d %>% filter(type == "looks like" & condition == "symmetric"))$id) %>%
group_by(workerid,condition) %>%
summarize(n = n(), ntarget = sum(selection == "target"),
ncompetitor = sum(selection == "competitor"),
pcompetitor = ncompetitor / (ncompetitor + ntarget))
sel_counts_learning <- d %>%
# ONLY INTERESTED IN "LOOKS LIKE" TRIALS...
filter(type == "looks like") %>%
# ... THAT THE CONDITIONS SHARE IN COMMON
filter(id %in% (d %>% filter(type == "looks like" & condition == "symmetric"))$id) %>%
mutate(half = ifelse(order < 13, 1, 2)) %>%
group_by(workerid,condition,half) %>%
summarize(n = n(), ntarget = sum(selection == "target"),
ncompetitor = sum(selection == "competitor"),
pcompetitor = ncompetitor / (ncompetitor + ntarget))
# LOGISTIC REGRESSION TO INVESTIGATE EFFECT OF CONDITION (MAX RANEF STRUCTURE)
d$selection <- relevel(d$selection, ref = "target")
d$condition <- relevel(d$condition, ref = "control")
d_filtered <- d %>%
# ONLY INTERESTED IN "LOOKS LIKE" TRIALS...
filter(type == "looks like") %>%
# ... THAT THE CONDITIONS SHARE IN COMMON
filter(id %in% (d %>% filter(type == "looks like" & condition == "symmetric"))$id)
# EXAMPLE: BINARY LOGISTIC REGRESSION BETWEEN CONTROL AND TARGET CONDITIONS
m <- glmer(selection ~ condition + (1|workerid) + (1 + condition|id), family = "binomial", data = d_filtered)
lsmeans(m, revpairwise~condition)
# EXPLORATORY ANALYSIS: LOOKING FOR ORDER EFFECTS
m2 <- glmer(selection ~ condition + order + (1 + order|workerid) + (1 + order + condition|id), family = "binomial", data = d_filtered)
summary(m2)
# BAYESIAN DATA ANALYSIS
# LOAD & TRANSFORM DATA FROM NORMING STUDIES
d_noprompt <- read.csv("results_noprompt.csv")
levels(d_noprompt$language) # KEEP DATA IF LANGUAGE = SOME SPELLING VARIATION OF 'ENGLISH' (OR NA)
noprompt_to_exclude <- d %>%
group_by(workerid, type, selection) %>%
filter((type == "left" && selection == "right") || (type == "right" && selection == "left")) %>%
group_by(workerid) %>%
summarize(n_mistakes = n()) %>%
filter(n_mistakes > 0)
d_noprompt <- d_noprompt %>%
filter(!(workerid %in% noprompt_to_exclude$workerid))
d_noprompt <- d_noprompt %>%
filter(kind == "critical") %>%
group_by(id) %>%
summarize(ntarget = sum(selection == "target"),
ncompetitor = sum(selection == "competitor"),
competitor_prior = ncompetitor / (ncompetitor + ntarget))
d_naming <- read.csv("results_naming.csv")
d_naming <- d_naming %>%
group_by(id) %>%
summarize(nknowtarget = sum(type == "target" & know == "True"),
nknowcompetitor = sum(type == "competitor" & know == "True"),
target_nameability = nknowtarget / sum(type == "target"),
competitor_nameability = nknowcompetitor / sum(type == "competitor"))
# INFER PARAMS GLOBALLY
bda <- read_file("bda_inferglobal_6cost.txt")
# bda <- read_file("bda_inferglobal_3cost.txt")
d_total <- d %>%
filter(kind == "critical" & type == "looks like") %>%
group_by(id,condition) %>%summarize(ntarget = sum(selection == "target"),
ncompetitor = sum(selection == "competitor"),
observed_competitor = ncompetitor / (ncompetitor + ntarget))
d_total <- merge(d_total, d_naming, by = "id")
d_total <- merge(d_total, d_noprompt, by = "id")
d_total <- d_total %>%
select(id,observed_competitor,competitor_prior,target_nameability,competitor_nameability)
d_total_JSON <- toJSON(d_total)
full_bda <- webppl(paste("var itemData = ", d_total_JSON, "\n", bda))
full_posteriors <- (full_bda$posteriors)$support
full_maxap <- (full_bda$maxap)
# write.csv(full_posteriors, file = "posteriors/posteriors_fulldata.csv")
# INFER PARAMS BY CONDITION
# bda_bycondition <- read_file("bda_bycondition_6cost.txt")
bda_bycondition <- read_file("bda_bycondition_3cost.txt")
# GET BY-ITEM DATA BY CONDITION
data_byitem <- function(cond, data) {
output <- data %>%
filter(condition == cond & kind == "critical" & type == "looks like") %>%
group_by(id) %>%
summarize(ntarget = sum(selection == "target"),
ncompetitor = sum(selection == "competitor"),
observed_competitor = ncompetitor / (ncompetitor + ntarget))
output <- merge(output, d_naming, by = "id")
output <- merge(output, d_noprompt, by = "id")
output <- output %>%
select(id,observed_competitor,competitor_prior,target_nameability,competitor_nameability)
}
symmetric_byitem <-data_byitem("symmetric",d)
control_byitem <-data_byitem("control",d)
target_byitem <-data_byitem("target",d)
nottarget_byitem <-data_byitem("nottarget",d)
# INFER PARAMETERS: CONTROL CONDITION
control_byitem_json <- toJSON(control_byitem)
control_bda <- webppl(paste("var itemData = ", control_byitem_json,
"\n var alpha =", full_maxap$alpha,
"\n", bda_bycondition))
posteriors_control <- (control_bda$posteriors)$support
# write.csv(posteriors_control, file = "posteriors/posteriors_control.csv")
# INFER PARAMETERS: TARGET CONDITION
target_byitem_json <- toJSON(target_byitem)
target_bda <- webppl(paste("var itemData = ", target_byitem_json,
"\n var alpha =", full_maxap$alpha,
"\n", bda_bycondition))
posteriors_target <- (target_bda$posteriors)$support
# write.csv(posteriors_target, file = "posteriors/posteriors_target.csv")
# INFER PARAMETERS: NOT-TARGET CONDITION
nottarget_byitem_json <- toJSON(nottarget_byitem)
nottarget_bda <- webppl(paste("var itemData = ", nottarget_byitem_json,
"\n var alpha =", full_maxap$alpha,
"\n", bda_bycondition))
posteriors_nottarget <- (nottarget_bda$posteriors)$support
# write.csv(posteriors_nottarget, file = "posteriors/posteriors_nottarget.csv")
# INFER PARAMETERS: SYMMETRIC CONDITION
symmetric_byitem_json <- toJSON(symmetric_byitem)
symmetric_bda <- webppl(paste("var itemData = ", symmetric_byitem_json,
"\n var alpha =", full_maxap$alpha,
"\n", bda_bycondition))
posteriors_symmetric <- (symmetric_bda$posteriors)$support
# write.csv(posteriors_symmetric, file = "posteriors/posteriors_symmetric.csv")
# NEW VISUALIZATIONS
# BY-CONDITION VISUALIZATIONS
dodge = position_dodge(.9)
toplot <- function (data) {
output <- data %>%
# group_by(condition,half) %>%
group_by(condition) %>%
summarize(Mean = mean(pcompetitor),CILow=ci.low(pcompetitor),CIHigh =ci.high(pcompetitor)) %>%
ungroup() %>%
mutate(Ymin=Mean-CILow,Ymax=Mean+CIHigh)
return(output)
}
plot_means <- function (toplot) {
ggplot(toplot, aes(x=condition,y=Mean)) +
# facet_wrap(~half) +
geom_bar(stat="identity",position = "dodge") +
theme(axis.text.x=element_text(angle=20,hjust=1,vjust=1)) +
geom_errorbar(aes(ymin=Ymin,ymax=Ymax),width=.25, position = dodge) +
labs(x = "Condition", y = "Proportion") # +
# ggtitle("Proportion of a competitor image chosen on 'looks like' trials\n(Experiment 3)")
}
plot_means(toplot(sel_counts))
# BY-ITEM VISUALIZATIONS
byitem <- d %>%
filter(kind == "critical" & type == "looks like") %>%
# ... THAT THE CONDITIONS SHARE IN COMMON
filter(id %in% (d %>% filter(type == "looks like" & condition == "symmetric"))$id) %>%
group_by(id,condition) %>%
summarize(ntarget = sum(selection == "target"),
ncompetitor = sum(selection == "competitor"),
observed_competitor = ncompetitor / (ncompetitor + ntarget))
byitem <- merge(byitem, d_naming, by = "id")
byitem <- merge(byitem, d_noprompt, by = "id")
byitem <- byitem %>%
mutate(nameability_index = target_nameability / competitor_nameability)
table(d$id, d$condition, d$type)
ggplot(byitem, aes(x=condition, y=observed_competitor)) +
facet_wrap(~id)+
geom_point() +
labs(x = "Condition", y = "Proportion") +
# ggtitle("Proportion of a competitor image chosen on 'looks like' trials\n(By item, Experiment 3)") +
theme(axis.text.x=element_text(angle=20,hjust=1,vjust=1))
# ALPHA PARAMETER GLOBAL DIST
ggplot(full_posteriors, aes(alpha)) + geom_density(alpha = 0.2) +
labs(x = "Inferred alpha parameter value", y = "Density") # +
# ggtitle("Posterior distribution of alpha parameter values\n(Experiment 3)")
# VISUALIZE POSTERIOR PARAMS BY CONDITION
vizparams_bycondition <- function(posteriors) {
toplot <- posteriors %>%
select(cost_is,cost_not,cost_lookslike) %>%
# select(cost_istarget,cost_looksliketarget,cost_nottarget) %>%
gather(key = "parameter", value = "cost")
ggplot(toplot, aes(cost, fill = parameter)) + geom_density(alpha = 0.2) +
labs(x = "Inferred value", y = "Density")
}
vizparams_bycondition(posteriors_control)
vizparams_bycondition(posteriors_target)
vizparams_bycondition(posteriors_nottarget)
vizparams_bycondition(posteriors_symmetric)
# VISUALIZE POSTERIOR PREDICTIVES BY CONDITION
target_predictives <- target_bda$predictions
nottarget_predictives <- nottarget_bda$predictions
symmetric_predictives <- symmetric_bda$predictions
control_predictives <- control_bda$predictions %>%
filter(id %in% symmetric_predictives$id)
target_byitem$condition <- "target"
nottarget_byitem$condition <- "nottarget"
symmetric_byitem$condition <- "symmetric"
control_byitem$condition <- "control"
target_predictive_toplot <- cbind(target_predictives,target_byitem$observed_competitor,target_byitem$condition)
nottarget_predictive_toplot <- cbind(nottarget_predictives,nottarget_byitem$observed_competitor,nottarget_byitem$condition)
symmetric_predictive_toplot <- cbind(symmetric_predictives,symmetric_byitem$observed_competitor,symmetric_byitem$condition)
control_predictive_toplot <- cbind(control_predictives,(control_byitem %>% filter (id %in% control_predictives$id))$observed_competitor,
(control_byitem %>% filter (id %in% control_predictives$id))$condition)
colnames(target_predictive_toplot) <- c("id","prediction","observation","condition")
colnames(nottarget_predictive_toplot) <- c("id","prediction","observation","condition")
colnames(control_predictive_toplot) <- c("id","prediction","observation","condition")
colnames(symmetric_predictive_toplot) <- c("id","prediction","observation","condition")
toplot_predictive <- rbind(target_predictive_toplot, nottarget_predictive_toplot,
symmetric_predictive_toplot, control_predictive_toplot)
ggplot(toplot_predictive, aes(x=prediction, y=observation, color = condition)) + geom_point() +
labs(x = "Predicted proportion of competitor chosen", y = "Observed proportion of competitor chosen") +
expand_limits(x = c(0,0.75), y = c(0,0.75)) +
geom_abline()
summary(lmer(observation ~ prediction + (prediction|id), data = toplot_predictive))
# NAMEABILITY PLOTS
toplot_nameability <- rbind(target_byitem,nottarget_byitem,symmetric_byitem,control_byitem)
competitor_nameability_plot <- ggplot(toplot_nameability, aes(x=competitor_nameability, y = observed_competitor)) +
facet_wrap(~condition) +
labs(x = "Competitor nameability", y = "Observed proportion of competitor chosen") +
geom_point() +
geom_smooth(method='lm',formula=y~x)
d_filtered_total <- merge(d_filtered, d_naming, by = "id")
m_nameability <- glm (selection ~ condition * competitor_nameability, family = "binomial", data = d_filtered_total)
summary(glmer(selection ~ condition * competitor_nameability + (1|workerid) + (1 + condition|id), family = "binomial", data = d_filtered_total))
write.csv(symmetric_bda$predictions,"predictives/symmetric_predictives.csv" )
write.csv(control_bda$predictions,"predictives/control_predictives.csv" )
write.csv(target_bda$predictions,"predictives/target_predictives.csv" )
write.csv(nottarget_bda$predictions,"predictives/nottarget_predictives.csv" )
full_posteriors <- read.csv("posteriors/posteriors_fulldata.csv")
posteriors_control <- read.csv("posteriors/posteriors_control.csv")
posteriors_target <- read.csv("posteriors/posteriors_target.csv")
posteriors_nottarget <- read.csv("posteriors/posteriors_nottarget.csv")
posteriors_symmetric <- read.csv("posteriors/posteriors_symmetric.csv")
|
\name{ibdPlot}
\alias{ibdPlot}
\alias{ibdAreasDraw}
\alias{ibdAssignRelatedness}
\alias{ibdAssignRelatednessKing}
\title{
Plot theoretical and observed identity by descent values and assign relationships
}
\description{\code{ibdPlot} produces an IBD plot showing observed
identity by descent values color coded by expected relationship.
Theoretical boundaries for full-sibling, second-degree, and
third-degree relatives are plotted in orange.
\code{ibdAreasDraw} overlays relationship areas for IBD analysis on
the plot.
\code{ibdAssignRelatedness} identifies observed relatives.
\code{ibdAssignRelatedness} identifies observed relatives using the kinship coefficients and IBS0 estimates from the KING model.
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\usage{
ibdPlot(k0, k1, alpha=0.05, relation=NULL, color=NULL,
rel.lwd=2, rel.draw=c("FS", "Deg2", "Deg3"), ...)
ibdAreasDraw(alpha=0.05, m=0.04, po.w=0.1, po.h=0.1,
dup.w=0.1, dup.h=0.1, un.w=0.25, un.h=0.25, rel.lwd=2,
xcol=c("cyan","red","blue","lightgreen","magenta","black"))
ibdAssignRelatedness(k0, k1, alpha=0.05, m=0.04, po.w=0.1, po.h=0.1,
dup.w=0.1, dup.h=0.1, un.w=0.25, un.h=0.25)
ibdAssignRelatednessKing(ibs0, kc, cut.kc.dup=1/(2^(3/2)),
cut.kc.fs=1/(2^(5/2)), cut.kc.deg2=1/(2^(7/2)),
cut.kc.deg3=1/(2^(9/2)), cut.ibs0.err=0.003)
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\arguments{
\item{k0}{A vector of k0 values.}
\item{k1}{A vector of k1 values.}
\item{kc}{A vector of kinship coefficient values (KING model).}
\item{ibs0}{A vector of IBS0 values (KING model).}
\item{alpha}{significance level - finds 100(1-alpha)\% prediction intervals
for second and third degree relatives and 100(1-alpha)\% prediction
ellipse for full siblings.
}
\item{relation}{A vector of relationships. Recognized values are "PO"=parent/offspring, "FS"=full siblings, "HS"=half siblings, "Av"=avuncular, "GpGc"=grandparent-grandchild, "Deg2"=any
second-degree, "FC"=first
cousins, "HAv"=half-avuncular, "Deg3"=any third degree,
"U"=unrelated, and "Q"=unknown.}
\item{color}{A vector of colors for (k0,k1) points.}
\item{rel.lwd}{Line width for theoretical full-sib, Deg2, and Deg3 boundaries.}
\item{rel.draw}{Which theoretical boundaries to plot: one or more of
"FS" (full-sib), "Deg2" (second-degree), "Deg3" (third-degree). If
\code{NULL}, no boundaries are drawn.}
\item{\dots}{Other graphical parameters to pass to \code{\link{plot}} and
\code{\link{points}}.}
\item{m}{width of rectangle along diagonal line}
\item{po.w}{width of parent-offspring rectangle}
\item{po.h}{height of parent-offspring rectangle}
\item{dup.w}{width of duplicate rectangle}
\item{dup.h}{height of duplicate rectangle}
\item{un.w}{width of unrelated rectangle}
\item{un.h}{height of unrelated rectangle}
\item{xcol}{colors for parent-offspring, full-sib, Deg2, Deg3, dup & unrelated areas}
\item{cut.kc.dup}{Kinship coefficient threshold for dividing duplicates and first degree relatives.}
\item{cut.kc.fs}{Kinship coefficient threshold for dividing full siblings and second degree relatives.}
\item{cut.kc.deg2}{Kinship coefficient threshold for dividing second and third degree relatives.}
\item{cut.kc.deg3}{Kinship coefficient threshold for dividing third degree relatives and unrelated.}
\item{cut.ibs0.err}{IBS0 threshold for dividing parent-offsprings pairs from other relatives. Should be 0, but is usually slightly higher due to genotyping errors.}
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\details{
\code{ibdPlot} produces an IBD plot showing observed identity by descent
values color coded by expected relationship, typically as deduced from
pedigree data. Points are plotted according to their corresponding
value in the \code{color} vector, and the \code{relation} vector is used
to make the plot legend. In addition to the relationships listed above,
any relationships output from \code{\link{pedigreePairwiseRelatedness}}
will be recognized.
Theoretical boundary for full-sibs is indicated by ellipse and
boundaries for second and third degree intervals are indicated in orange. For full-sibs, 100(1-alpha)\% prediction
ellipse is based on assuming bivariate normal distribution with known
mean and covariance matrix. For second degree (half siblings, avuncular,
grandparent-grandchild) and third degree (first cousins), 100(1-alpha)\%
prediction intervals for k1 are based on assuming normal distribution with known mean and
variance, computed as in Hill and Weir (2011).
\code{ibdAreasDraw} overlays relationship areas on the plot to help
with analyzing observed relationships.
\code{ibdAssignRelatedness} identifies relatives based on their (k0, k1) coordinates.
\code{ibdAssignRelatednessKing} identifies relatives based on their (ibs0, kc) coordinates (KING model).
}
\value{
\code{ibdAssignRelatedness} and \code{ibdAssignRelatednessKing} return a vector of relationships with
values "Dup"=duplicate, "PO"=parent-offspring, "FS"=full sibling,
"Deg2"=second degree, "Deg3"=third degree, "U"=unrelated, and
"Q"=unknown.
}
\references{
Hill, W.G. and B.S. Weir, Variation in actual relationship as a consequence of mendelian sampling and linkage, Genet. Res. Camb. (2011), 93, 47-64.
Manichaikul, A., Mychaleckyj J.C., Rich S.S., Daly K., Sale M., and Chen W.M., Robust relationship inference in genome-wide association studies, Bioinformatics (2010), 26(22), 2867-2873.
}
\author{Cathy Laurie, Cecelia Laurie, and Adrienne Stilp}
\seealso{
\code{\link{relationsMeanVar}},
\code{\link{pedigreePairwiseRelatedness}}
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\examples{
k0 <- c(0, 0, 0.25, 0.5, 0.75, 1)
k1 <- c(0, 1, 0.5, 0.5, 0.25, 0)
exp.rel <- c("Dup", "PO", "FS", "HS", "FC", "U")
ibdPlot(k0, k1, relation=exp.rel)
ibdAreasDraw()
obs.rel <- ibdAssignRelatedness(k0, k1)
kc <- c(0.5, 0.25, 0.25, 0.125, 0.063, 0)
ibs0 <- c(0, 0, 0.25, 0.5, 0.75, 1)
obs.rel.king <- ibdAssignRelatednessKing(ibs0, kc)
}
\keyword{hplot}
\keyword{manip}
|
/man/ibdPlot.Rd
|
no_license
|
shengfengwang/GWASTools
|
R
| false | false | 6,450 |
rd
|
\name{ibdPlot}
\alias{ibdPlot}
\alias{ibdAreasDraw}
\alias{ibdAssignRelatedness}
\alias{ibdAssignRelatednessKing}
\title{
Plot theoretical and observed identity by descent values and assign relationships
}
\description{\code{ibdPlot} produces an IBD plot showing observed
identity by descent values color coded by expected relationship.
Theoretical boundaries for full-sibling, second-degree, and
third-degree relatives are plotted in orange.
\code{ibdAreasDraw} overlays relationship areas for IBD analysis on
the plot.
\code{ibdAssignRelatedness} identifies observed relatives.
\code{ibdAssignRelatedness} identifies observed relatives using the kinship coefficients and IBS0 estimates from the KING model.
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\usage{
ibdPlot(k0, k1, alpha=0.05, relation=NULL, color=NULL,
rel.lwd=2, rel.draw=c("FS", "Deg2", "Deg3"), ...)
ibdAreasDraw(alpha=0.05, m=0.04, po.w=0.1, po.h=0.1,
dup.w=0.1, dup.h=0.1, un.w=0.25, un.h=0.25, rel.lwd=2,
xcol=c("cyan","red","blue","lightgreen","magenta","black"))
ibdAssignRelatedness(k0, k1, alpha=0.05, m=0.04, po.w=0.1, po.h=0.1,
dup.w=0.1, dup.h=0.1, un.w=0.25, un.h=0.25)
ibdAssignRelatednessKing(ibs0, kc, cut.kc.dup=1/(2^(3/2)),
cut.kc.fs=1/(2^(5/2)), cut.kc.deg2=1/(2^(7/2)),
cut.kc.deg3=1/(2^(9/2)), cut.ibs0.err=0.003)
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\arguments{
\item{k0}{A vector of k0 values.}
\item{k1}{A vector of k1 values.}
\item{kc}{A vector of kinship coefficient values (KING model).}
\item{ibs0}{A vector of IBS0 values (KING model).}
\item{alpha}{significance level - finds 100(1-alpha)\% prediction intervals
for second and third degree relatives and 100(1-alpha)\% prediction
ellipse for full siblings.
}
\item{relation}{A vector of relationships. Recognized values are "PO"=parent/offspring, "FS"=full siblings, "HS"=half siblings, "Av"=avuncular, "GpGc"=grandparent-grandchild, "Deg2"=any
second-degree, "FC"=first
cousins, "HAv"=half-avuncular, "Deg3"=any third degree,
"U"=unrelated, and "Q"=unknown.}
\item{color}{A vector of colors for (k0,k1) points.}
\item{rel.lwd}{Line width for theoretical full-sib, Deg2, and Deg3 boundaries.}
\item{rel.draw}{Which theoretical boundaries to plot: one or more of
"FS" (full-sib), "Deg2" (second-degree), "Deg3" (third-degree). If
\code{NULL}, no boundaries are drawn.}
\item{\dots}{Other graphical parameters to pass to \code{\link{plot}} and
\code{\link{points}}.}
\item{m}{width of rectangle along diagonal line}
\item{po.w}{width of parent-offspring rectangle}
\item{po.h}{height of parent-offspring rectangle}
\item{dup.w}{width of duplicate rectangle}
\item{dup.h}{height of duplicate rectangle}
\item{un.w}{width of unrelated rectangle}
\item{un.h}{height of unrelated rectangle}
\item{xcol}{colors for parent-offspring, full-sib, Deg2, Deg3, dup & unrelated areas}
\item{cut.kc.dup}{Kinship coefficient threshold for dividing duplicates and first degree relatives.}
\item{cut.kc.fs}{Kinship coefficient threshold for dividing full siblings and second degree relatives.}
\item{cut.kc.deg2}{Kinship coefficient threshold for dividing second and third degree relatives.}
\item{cut.kc.deg3}{Kinship coefficient threshold for dividing third degree relatives and unrelated.}
\item{cut.ibs0.err}{IBS0 threshold for dividing parent-offsprings pairs from other relatives. Should be 0, but is usually slightly higher due to genotyping errors.}
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\details{
\code{ibdPlot} produces an IBD plot showing observed identity by descent
values color coded by expected relationship, typically as deduced from
pedigree data. Points are plotted according to their corresponding
value in the \code{color} vector, and the \code{relation} vector is used
to make the plot legend. In addition to the relationships listed above,
any relationships output from \code{\link{pedigreePairwiseRelatedness}}
will be recognized.
Theoretical boundary for full-sibs is indicated by ellipse and
boundaries for second and third degree intervals are indicated in orange. For full-sibs, 100(1-alpha)\% prediction
ellipse is based on assuming bivariate normal distribution with known
mean and covariance matrix. For second degree (half siblings, avuncular,
grandparent-grandchild) and third degree (first cousins), 100(1-alpha)\%
prediction intervals for k1 are based on assuming normal distribution with known mean and
variance, computed as in Hill and Weir (2011).
\code{ibdAreasDraw} overlays relationship areas on the plot to help
with analyzing observed relationships.
\code{ibdAssignRelatedness} identifies relatives based on their (k0, k1) coordinates.
\code{ibdAssignRelatednessKing} identifies relatives based on their (ibs0, kc) coordinates (KING model).
}
\value{
\code{ibdAssignRelatedness} and \code{ibdAssignRelatednessKing} return a vector of relationships with
values "Dup"=duplicate, "PO"=parent-offspring, "FS"=full sibling,
"Deg2"=second degree, "Deg3"=third degree, "U"=unrelated, and
"Q"=unknown.
}
\references{
Hill, W.G. and B.S. Weir, Variation in actual relationship as a consequence of mendelian sampling and linkage, Genet. Res. Camb. (2011), 93, 47-64.
Manichaikul, A., Mychaleckyj J.C., Rich S.S., Daly K., Sale M., and Chen W.M., Robust relationship inference in genome-wide association studies, Bioinformatics (2010), 26(22), 2867-2873.
}
\author{Cathy Laurie, Cecelia Laurie, and Adrienne Stilp}
\seealso{
\code{\link{relationsMeanVar}},
\code{\link{pedigreePairwiseRelatedness}}
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\examples{
k0 <- c(0, 0, 0.25, 0.5, 0.75, 1)
k1 <- c(0, 1, 0.5, 0.5, 0.25, 0)
exp.rel <- c("Dup", "PO", "FS", "HS", "FC", "U")
ibdPlot(k0, k1, relation=exp.rel)
ibdAreasDraw()
obs.rel <- ibdAssignRelatedness(k0, k1)
kc <- c(0.5, 0.25, 0.25, 0.125, 0.063, 0)
ibs0 <- c(0, 0, 0.25, 0.5, 0.75, 1)
obs.rel.king <- ibdAssignRelatednessKing(ibs0, kc)
}
\keyword{hplot}
\keyword{manip}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_mcmoopts.R
\name{get_mcmoopts}
\alias{get_mcmoopts}
\title{Gets options for MCMO questions}
\usage{
get_mcmoopts(item, delim = ",", omit = c(), custom.respopts = c(),
escape = TRUE)
}
\arguments{
\item{item}{Data vector from MCMO question}
\item{delim}{Delimiter for multiple selections, default is a comma}
\item{omit}{Vector of options which should be ignored}
\item{escape}{Escapes R special characters, default is TRUE
# Get options from MCMO question "Q3" in "survey" data.frame object
get_mcmoopts(survey$Q3)
# Omit "Computer science" option
get_mcmoopts(survey$Q3, omit="Computer science")}
}
\description{
Parses data from an MCMO question and returns a vector of possible options.
}
|
/man/get_mcmoopts.Rd
|
no_license
|
rblissett/pubpub
|
R
| false | true | 780 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_mcmoopts.R
\name{get_mcmoopts}
\alias{get_mcmoopts}
\title{Gets options for MCMO questions}
\usage{
get_mcmoopts(item, delim = ",", omit = c(), custom.respopts = c(),
escape = TRUE)
}
\arguments{
\item{item}{Data vector from MCMO question}
\item{delim}{Delimiter for multiple selections, default is a comma}
\item{omit}{Vector of options which should be ignored}
\item{escape}{Escapes R special characters, default is TRUE
# Get options from MCMO question "Q3" in "survey" data.frame object
get_mcmoopts(survey$Q3)
# Omit "Computer science" option
get_mcmoopts(survey$Q3, omit="Computer science")}
}
\description{
Parses data from an MCMO question and returns a vector of possible options.
}
|
context("apartment()")
test_that("apartment()", {
#-----------------------------------------------------------------------------------
# test for apartment()
#-----------------------------------------------------------------------------------
expect_true(exists("apartment"),
info = "Fel: apartment() is missing")
expect_true(exists("print.apartment"),
info = "Fel: print.apartment() saknas")
checkmate::expect_function(apartment, nargs = 2)
expect_function_arguments(apartment, c("rooms", "m2"))
test1<-apartment(1,29)
expect_true(object = is.list(test1),
info="apartment() returnerar inte ett objekt med liststruktur")
expect_equal(object = length(test1),expected = 2,
info="Listan har inte två element")
expect_true(object = all(sapply(test1,FUN = function(x){is.numeric(x)|is.integer(x)})),
info="Elementen i listan är inte numeriska")
expect_class(x = test1, classes = "apartment",
info="Fel: Funktionen returnerar inte ett apartment-objekt.")
expect_error(object = apartment(0,29),
info="Fel: funktionen avbryter inte när antalet rum<0")
expect_error(object = apartment(-3,29),
info="Fel: funktionen avbryter inte när antalet rum<0")
expect_error(object = apartment(3,0),
info="Fel: funktionen avbryter inte när antalet kvadratmeter<0")
expect_error(object = apartment(5,-10),
info="Fel: funktionen avbryter inte när antalet kvadratmeter<0")
expect_error(object = apartment(-5,-10),
info="Fel: funktionen avbryter inte när antalet kvadratmeter och antalet rum är mindre än noll")
#-----------------------------------------------------------------------------------
# test for print.apartment()
#-----------------------------------------------------------------------------------
test2<-apartment(1,28)
expect_output(print(test2), regexp = "studio",
info="Fel: print-metoden för fallet apartment(1,28) fungerar inte")
expect_output(print(test2), regexp = "28",
info="Fel: print-metoden för fallet apartment(1,28) fungerar inte")
test3<-apartment(1,45)
expect_output(print.apartment(test3), regexp = "studio",
info="Fel: print.apartment() fungerar inte för fallet apartment(1,45)")
expect_output(print.apartment(test3), regexp = "45",
info="Fel: print.apartment() fungerar inte för fallet apartment(1,45)")
test4<-apartment(3,88)
expect_output(print(test4), regexp = "apartment",
info="Fel: print-metoden för fallet apartment(3,88) fungerar inte")
expect_output(print(test4), regexp = "88",
info="Fel: print-metoden för fallet apartment(3,88) fungerar inte")
test5<-apartment(10,230)
expect_output(print(test5), regexp = "apartment",
info="Fel: print-metoden för fallet apartment(10,230)) fungerar inte")
expect_output(print(test5), regexp = "230",
info="Fel: print-metoden för fallet apartment(10,230)) fungerar inte")
})
|
/Labs/Tests/Tasks/apartment_tests.R
|
no_license
|
marrycv/KursRprgm
|
R
| false | false | 3,154 |
r
|
context("apartment()")
test_that("apartment()", {
#-----------------------------------------------------------------------------------
# test for apartment()
#-----------------------------------------------------------------------------------
expect_true(exists("apartment"),
info = "Fel: apartment() is missing")
expect_true(exists("print.apartment"),
info = "Fel: print.apartment() saknas")
checkmate::expect_function(apartment, nargs = 2)
expect_function_arguments(apartment, c("rooms", "m2"))
test1<-apartment(1,29)
expect_true(object = is.list(test1),
info="apartment() returnerar inte ett objekt med liststruktur")
expect_equal(object = length(test1),expected = 2,
info="Listan har inte två element")
expect_true(object = all(sapply(test1,FUN = function(x){is.numeric(x)|is.integer(x)})),
info="Elementen i listan är inte numeriska")
expect_class(x = test1, classes = "apartment",
info="Fel: Funktionen returnerar inte ett apartment-objekt.")
expect_error(object = apartment(0,29),
info="Fel: funktionen avbryter inte när antalet rum<0")
expect_error(object = apartment(-3,29),
info="Fel: funktionen avbryter inte när antalet rum<0")
expect_error(object = apartment(3,0),
info="Fel: funktionen avbryter inte när antalet kvadratmeter<0")
expect_error(object = apartment(5,-10),
info="Fel: funktionen avbryter inte när antalet kvadratmeter<0")
expect_error(object = apartment(-5,-10),
info="Fel: funktionen avbryter inte när antalet kvadratmeter och antalet rum är mindre än noll")
#-----------------------------------------------------------------------------------
# test for print.apartment()
#-----------------------------------------------------------------------------------
test2<-apartment(1,28)
expect_output(print(test2), regexp = "studio",
info="Fel: print-metoden för fallet apartment(1,28) fungerar inte")
expect_output(print(test2), regexp = "28",
info="Fel: print-metoden för fallet apartment(1,28) fungerar inte")
test3<-apartment(1,45)
expect_output(print.apartment(test3), regexp = "studio",
info="Fel: print.apartment() fungerar inte för fallet apartment(1,45)")
expect_output(print.apartment(test3), regexp = "45",
info="Fel: print.apartment() fungerar inte för fallet apartment(1,45)")
test4<-apartment(3,88)
expect_output(print(test4), regexp = "apartment",
info="Fel: print-metoden för fallet apartment(3,88) fungerar inte")
expect_output(print(test4), regexp = "88",
info="Fel: print-metoden för fallet apartment(3,88) fungerar inte")
test5<-apartment(10,230)
expect_output(print(test5), regexp = "apartment",
info="Fel: print-metoden för fallet apartment(10,230)) fungerar inte")
expect_output(print(test5), regexp = "230",
info="Fel: print-metoden för fallet apartment(10,230)) fungerar inte")
})
|
library(tidyverse)
library(lubridate)
library(Quandl)
#-------------------------------------------------------------------------------
# Solutions to Lec14.R Exercises
#-------------------------------------------------------------------------------
# Reload bitcoin
bitcoin <- Quandl("BAVERAGE/USD") %>%
tbl_df() %>%
rename(
Avg = `24h Average`,
Total_Volume = `Total Volume`
)
# EXERCISE Q3: Using the interval() and %within% commands, plot the times series
# of the weekly average for the price of bitcoin to dates in 2013 and on.
bitcoin %>%
filter(Date %within% interval(ymd("2013-01-01"), ymd("2016-12-31"))) %>%
mutate(weekly = floor_date(Date, "week")) %>%
group_by(weekly) %>%
summarise(Avg=mean(Avg)) %>%
ggplot(data=., aes(x=weekly, y=Avg)) +
geom_line()
# EXERCISE Q4: Recreate the plot from Exercise 1 above, but with
# -Dates pre 2015-01-01 in one color, and post in another
# -A SINGLE black smoother line. A tough one!
bitcoin %>%
mutate(`Pre 2015` = Date %within% interval(ymd("2015-01-01"), ymd("2016-12-31"))) %>%
ggplot(data=., aes(x=Date, y=Avg)) +
geom_line(aes(col=`Pre 2015`)) +
geom_smooth(se=FALSE, size=0.5, col="black")
# Whoa, what's this...
library(plotly)
ggplotly()
|
/Lec14 More Dates & Times/Lec14_solutions.R
|
no_license
|
2016-09-Middlebury-Data-Science/Topics
|
R
| false | false | 1,253 |
r
|
library(tidyverse)
library(lubridate)
library(Quandl)
#-------------------------------------------------------------------------------
# Solutions to Lec14.R Exercises
#-------------------------------------------------------------------------------
# Reload bitcoin
bitcoin <- Quandl("BAVERAGE/USD") %>%
tbl_df() %>%
rename(
Avg = `24h Average`,
Total_Volume = `Total Volume`
)
# EXERCISE Q3: Using the interval() and %within% commands, plot the times series
# of the weekly average for the price of bitcoin to dates in 2013 and on.
bitcoin %>%
filter(Date %within% interval(ymd("2013-01-01"), ymd("2016-12-31"))) %>%
mutate(weekly = floor_date(Date, "week")) %>%
group_by(weekly) %>%
summarise(Avg=mean(Avg)) %>%
ggplot(data=., aes(x=weekly, y=Avg)) +
geom_line()
# EXERCISE Q4: Recreate the plot from Exercise 1 above, but with
# -Dates pre 2015-01-01 in one color, and post in another
# -A SINGLE black smoother line. A tough one!
bitcoin %>%
mutate(`Pre 2015` = Date %within% interval(ymd("2015-01-01"), ymd("2016-12-31"))) %>%
ggplot(data=., aes(x=Date, y=Avg)) +
geom_line(aes(col=`Pre 2015`)) +
geom_smooth(se=FALSE, size=0.5, col="black")
# Whoa, what's this...
library(plotly)
ggplotly()
|
x <- c(0.18, -1.54, 0.42, 0.95)
w <- c(2, 1, 3, 1)
mew <- c(1.077, 0.300, 0.0025, 0.1471)
for (each in mew){
total <- 0
#print(each)
#print(x[i])
#print(w[i])
for (i in 1:4){
total = total + w[i]*(x[i]-each)**2
}
print(total)
}
w(x-mew)**2
|
/7 - Regression Models/Quiz 1/Question 1.R
|
no_license
|
xush65/Data-Scientist-MOOC
|
R
| false | false | 262 |
r
|
x <- c(0.18, -1.54, 0.42, 0.95)
w <- c(2, 1, 3, 1)
mew <- c(1.077, 0.300, 0.0025, 0.1471)
for (each in mew){
total <- 0
#print(each)
#print(x[i])
#print(w[i])
for (i in 1:4){
total = total + w[i]*(x[i]-each)**2
}
print(total)
}
w(x-mew)**2
|
g<-read_tsv(file=file.path( root,"gadget","summary_Gadget.txt"),guess_max = 100000)
g<-filter(g,Species !='cod')
rec<-filter(g,Age==1 & Quarter==1) %>% mutate(value=Number/1000,source="Gadget",var='Recruits') %>% select(Year,Species,value,var,source) # there are no stock number for age 0?
g$one<-1
Fbar<- filter(g, (Species=='her' & Age %in% (3:6)) | (Species=='spr' & Age %in% (3:5))) %>%
group_by(Year,Species) %>% summarise(Fbar=sum(F_q),no=sum(one)) %>% mutate(value=Fbar/no*4,no=NULL,Fbar=NULL,source="Gadget",var='Fbar')
SSB<-filter(g,Quarter==1) %>% group_by(Year,Species) %>% summarise(value=sum(SSB,na.rm=TRUE)/1000) %>% mutate(source="Gadget",var='SSB')
gadget<-bind_rows(bind_rows(rec,Fbar),SSB)
g<-read_delim(file=file.path( root,"gadget","summary_SMS.out"),delim=' ',guess_max = 100000)
g<-mutate(g,Species=c('cod','her','spr')[Species.n])
rec<-filter(g,Age==1 & Quarter==1) %>% mutate(value=N,source="SMS",var='Recruits') %>% select(Year,Species,value,var,source) # there are no stock number for age 0?
g$one<-1
Fbar<- filter(g, (Species=='her' & Age %in% (3:6)) | (Species=='spr' & Age %in% (3:5))) %>%
group_by(Year,Species) %>% summarise(Fbar=sum(F),no=sum(one)) %>% mutate(value=Fbar/no*4,no=NULL,Fbar=NULL,source="SMS",var='Fbar')
SSB<-filter(g,Quarter==1) %>% group_by(Year,Species) %>% summarise(value=sum(SSB,na.rm=TRUE)) %>% mutate(source="SMS",var='SSB')
sms<-bind_rows(bind_rows(rec,Fbar),SSB)
g<-read_tsv(file=file.path( root,"gadget","summary_table_ICES_ASSES.out"),guess_max = 100000)
g<-mutate(g,Species=c('cod','her','spr')[Species.n])
rec<-g %>% mutate(value=Rec,source="ICES",var='Recruits') %>% select(Year,Species,value,var,source)
Fbar<-g %>% mutate(value=mean.F,source="ICES",var='Fbar') %>% select(Year,Species,value,var,source)
SSB<-g %>% mutate(value=SSB,source="ICES",var='SSB') %>% select(Year,Species,value,var,source)
ICES<-bind_rows(bind_rows(rec,Fbar),SSB)
a<-bind_rows(gadget,sms)
a<-bind_rows(a,ICES) %>% filter(Year<=2018)
X11(h=8,w=10)
b<-filter(a,Species=='her')
print(ggplot(b, aes(x=Year, y=value, group=source)) +
geom_line(aes(color=source),size=1)+
#geom_point(aes(color=source))+
theme(legend.position="right")+
facet_grid(rows=vars(var),scales='free_y') +
labs(x="Year", y="value ",title="Herring")
)
X11(h=8,w=10)
b<-filter(a,Species=='spr')
print(ggplot(b, aes(x=Year, y=value, group=source)) +
geom_line(aes(color=source),size=1)+
#geom_point(aes(color=source))+
theme(legend.position="right")+
facet_grid(rows=vars(var),scales='free_y') +
labs(x="Year", y="value ",title="Sprat")
)
|
/SMS_r_prog/gadget_sms_compare_summary.R
|
permissive
|
ices-eg/wg_WGSAM
|
R
| false | false | 2,645 |
r
|
g<-read_tsv(file=file.path( root,"gadget","summary_Gadget.txt"),guess_max = 100000)
g<-filter(g,Species !='cod')
rec<-filter(g,Age==1 & Quarter==1) %>% mutate(value=Number/1000,source="Gadget",var='Recruits') %>% select(Year,Species,value,var,source) # there are no stock number for age 0?
g$one<-1
Fbar<- filter(g, (Species=='her' & Age %in% (3:6)) | (Species=='spr' & Age %in% (3:5))) %>%
group_by(Year,Species) %>% summarise(Fbar=sum(F_q),no=sum(one)) %>% mutate(value=Fbar/no*4,no=NULL,Fbar=NULL,source="Gadget",var='Fbar')
SSB<-filter(g,Quarter==1) %>% group_by(Year,Species) %>% summarise(value=sum(SSB,na.rm=TRUE)/1000) %>% mutate(source="Gadget",var='SSB')
gadget<-bind_rows(bind_rows(rec,Fbar),SSB)
g<-read_delim(file=file.path( root,"gadget","summary_SMS.out"),delim=' ',guess_max = 100000)
g<-mutate(g,Species=c('cod','her','spr')[Species.n])
rec<-filter(g,Age==1 & Quarter==1) %>% mutate(value=N,source="SMS",var='Recruits') %>% select(Year,Species,value,var,source) # there are no stock number for age 0?
g$one<-1
Fbar<- filter(g, (Species=='her' & Age %in% (3:6)) | (Species=='spr' & Age %in% (3:5))) %>%
group_by(Year,Species) %>% summarise(Fbar=sum(F),no=sum(one)) %>% mutate(value=Fbar/no*4,no=NULL,Fbar=NULL,source="SMS",var='Fbar')
SSB<-filter(g,Quarter==1) %>% group_by(Year,Species) %>% summarise(value=sum(SSB,na.rm=TRUE)) %>% mutate(source="SMS",var='SSB')
sms<-bind_rows(bind_rows(rec,Fbar),SSB)
g<-read_tsv(file=file.path( root,"gadget","summary_table_ICES_ASSES.out"),guess_max = 100000)
g<-mutate(g,Species=c('cod','her','spr')[Species.n])
rec<-g %>% mutate(value=Rec,source="ICES",var='Recruits') %>% select(Year,Species,value,var,source)
Fbar<-g %>% mutate(value=mean.F,source="ICES",var='Fbar') %>% select(Year,Species,value,var,source)
SSB<-g %>% mutate(value=SSB,source="ICES",var='SSB') %>% select(Year,Species,value,var,source)
ICES<-bind_rows(bind_rows(rec,Fbar),SSB)
a<-bind_rows(gadget,sms)
a<-bind_rows(a,ICES) %>% filter(Year<=2018)
X11(h=8,w=10)
b<-filter(a,Species=='her')
print(ggplot(b, aes(x=Year, y=value, group=source)) +
geom_line(aes(color=source),size=1)+
#geom_point(aes(color=source))+
theme(legend.position="right")+
facet_grid(rows=vars(var),scales='free_y') +
labs(x="Year", y="value ",title="Herring")
)
X11(h=8,w=10)
b<-filter(a,Species=='spr')
print(ggplot(b, aes(x=Year, y=value, group=source)) +
geom_line(aes(color=source),size=1)+
#geom_point(aes(color=source))+
theme(legend.position="right")+
facet_grid(rows=vars(var),scales='free_y') +
labs(x="Year", y="value ",title="Sprat")
)
|
tempdataset <- asv.count.all.enviro %>%
separate(sample, into = c("Site", "Month", "Year", "z")) %>%
mutate(Season = ifelse(Month %in% c("10","11","12","1","2","3"), "Winter", "Summer")) %>%
mutate(presence = ifelse(nReads > 0, 1, 0)) %>%
filter(pH > 7.5) %>%
filter(Taxon %in% c("Karlodinium_8ed", "Ditylum_a31", "Thalassiosira_6b3", "Poteriospumella_b57")) %>%
mutate(TempStd = (Temperature - mean(Temperature))/sd(Temperature),
pHStd = (pH - mean(pH))/sd(pH),
SalinityStd = (Salinity - mean(Salinity))/sd(Salinity)) %>%
pivot_wider(id_cols = c(TempStd, pHStd, SalinityStd, Season),
names_from = Taxon,
values_from = presence)
#MODEIFIED model from previous attempts
Karlodinium8ed_envir_model <- stan_glmer(Karlodinium_8ed ~ 0 + (0 + TempStd | Season),
data = tempdataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0, 1),
iter = 1000,
chains = 4)
Karlodinium8ed_combined_model1 <- stan_glmer(Karlodinium_8ed ~ 0 + (0 + TempStd | Season) + (1 | Ditylum_a31),
data = tempdataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
# Karlodinium8ed_combined_model2 <- stan_glmer(Karlodinium_8ed ~ 0 + (0 + TempStd | Season) + (1 | Ditylum_a31) + (1 | Thalassiosira_6b3),
# data = tempdataset,
# family = "binomial",
# prior_intercept = normal(0, 1),
# prior = normal(0,1),
# iter = 1000,
# chains = 4)
# Karlodinium8ed_combined_model3 <- stan_glmer(Karlodinium_8ed ~ 0 + (0 + TempStd | Season) + (1 | Ditylum_a31) + (1 | Thalassiosira_6b3) + (1 | Poteriospumella_b57),
# data = tempdataset,
# family = "binomial",
# prior_intercept = normal(0, 1),
# prior = normal(0,1),
# iter = 1000,
# chains = 4)
# Karlodinium8ed_ecol <- stan_glmer(Karlodinium_8ed ~ (1 | Ditylum_a31) + (1 | Thalassiosira_6b3) + (1 | Poteriospumella_b57),
# data = tempdataset,
# family = "binomial",
# prior_intercept = normal(0, 1),
# prior = normal(0,1),
# iter = 1000,
# chains = 4)
# Karlodinium8ed_min <- stan_glmer(Karlodinium_8ed ~ (1 | Ditylum_a31) + (1 | Thalassiosira_6b3),
# data = tempdataset,
# family = "binomial",
# prior_intercept = normal(0, 1),
# prior = normal(0,1),
# iter = 1000,
# chains = 4)
#adding these observations improves the model a lot!
loo_compare(waic(Karlodinium8ed_envir_model),
waic(Karlodinium8ed_combined_model1)
# waic(Karlodinium8ed_combined_model2),
# waic(Karlodinium8ed_combined_model3),
# waic(Karlodinium8ed_ecol),
# waic(Karlodinium8ed_min)
) #just other species associations (w no enviro data) is actually a better model than most of the others.
#adding biology improves the pres/abs predictions
PREDERROR(Karlodinium8ed_envir_model, tempdataset, outcomename = "Karlodinium_8ed")
# PREDERROR(Karlodinium8ed_combined_model1, tempdataset, outcomename = "Karlodinium_8ed")
#PREDERROR(Karlodinium8ed_combined_model2, tempdataset, outcomename = "Karlodinium_8ed")
PREDERROR(Karlodinium8ed_combined_model1, tempdataset, outcomename = "Karlodinium_8ed")
# PREDERROR(Karlodinium8ed_ecol, tempdataset, outcomename = "Karlodinium_8ed")
#plot(Karlodinium8ed_combined_model3)
# xtabs(~ Karlodinium_8ed + Thalassiosira_6b3, data = tempdataset)
#
#
# tempdataset %>%
# add_fitted_draws(Karlodinium8ed_combined_model3, n = 1000) %>%
# ggplot(aes(x = TempStd, y = Karlodinium_8ed)) +
# geom_point() +
# facet_grid(Season ~ Ditylum_a31, scales = "free_x") +
# stat_lineribbon(aes(y = .value), .width = c(.95, .5)) +
# scale_fill_brewer()
saveRDS(Karlodinium8ed_envir_model, "../BayesianLogisticModels_EcolEnviroCombined/Karlodinium_8ed_envir_model.RDS")
saveRDS(Karlodinium8ed_combined_model1, "../BayesianLogisticModels_EcolEnviroCombined/Karlodinium_8ed_combined_model.RDS")
saveRDS(tempdataset, "../BayesianLogisticModels_EcolEnviroCombined/Karlodinium_8ed_tempdataset.RDS")
|
/Manuscript/BayesianLogisticModels_EcolEnviroCombined/Karlodinium_8ed_combinedModels.R
|
no_license
|
ramongallego/Harmful.Algae.eDNA
|
R
| false | false | 5,418 |
r
|
tempdataset <- asv.count.all.enviro %>%
separate(sample, into = c("Site", "Month", "Year", "z")) %>%
mutate(Season = ifelse(Month %in% c("10","11","12","1","2","3"), "Winter", "Summer")) %>%
mutate(presence = ifelse(nReads > 0, 1, 0)) %>%
filter(pH > 7.5) %>%
filter(Taxon %in% c("Karlodinium_8ed", "Ditylum_a31", "Thalassiosira_6b3", "Poteriospumella_b57")) %>%
mutate(TempStd = (Temperature - mean(Temperature))/sd(Temperature),
pHStd = (pH - mean(pH))/sd(pH),
SalinityStd = (Salinity - mean(Salinity))/sd(Salinity)) %>%
pivot_wider(id_cols = c(TempStd, pHStd, SalinityStd, Season),
names_from = Taxon,
values_from = presence)
#MODEIFIED model from previous attempts
Karlodinium8ed_envir_model <- stan_glmer(Karlodinium_8ed ~ 0 + (0 + TempStd | Season),
data = tempdataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0, 1),
iter = 1000,
chains = 4)
Karlodinium8ed_combined_model1 <- stan_glmer(Karlodinium_8ed ~ 0 + (0 + TempStd | Season) + (1 | Ditylum_a31),
data = tempdataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
# Karlodinium8ed_combined_model2 <- stan_glmer(Karlodinium_8ed ~ 0 + (0 + TempStd | Season) + (1 | Ditylum_a31) + (1 | Thalassiosira_6b3),
# data = tempdataset,
# family = "binomial",
# prior_intercept = normal(0, 1),
# prior = normal(0,1),
# iter = 1000,
# chains = 4)
# Karlodinium8ed_combined_model3 <- stan_glmer(Karlodinium_8ed ~ 0 + (0 + TempStd | Season) + (1 | Ditylum_a31) + (1 | Thalassiosira_6b3) + (1 | Poteriospumella_b57),
# data = tempdataset,
# family = "binomial",
# prior_intercept = normal(0, 1),
# prior = normal(0,1),
# iter = 1000,
# chains = 4)
# Karlodinium8ed_ecol <- stan_glmer(Karlodinium_8ed ~ (1 | Ditylum_a31) + (1 | Thalassiosira_6b3) + (1 | Poteriospumella_b57),
# data = tempdataset,
# family = "binomial",
# prior_intercept = normal(0, 1),
# prior = normal(0,1),
# iter = 1000,
# chains = 4)
# Karlodinium8ed_min <- stan_glmer(Karlodinium_8ed ~ (1 | Ditylum_a31) + (1 | Thalassiosira_6b3),
# data = tempdataset,
# family = "binomial",
# prior_intercept = normal(0, 1),
# prior = normal(0,1),
# iter = 1000,
# chains = 4)
#adding these observations improves the model a lot!
loo_compare(waic(Karlodinium8ed_envir_model),
waic(Karlodinium8ed_combined_model1)
# waic(Karlodinium8ed_combined_model2),
# waic(Karlodinium8ed_combined_model3),
# waic(Karlodinium8ed_ecol),
# waic(Karlodinium8ed_min)
) #just other species associations (w no enviro data) is actually a better model than most of the others.
#adding biology improves the pres/abs predictions
PREDERROR(Karlodinium8ed_envir_model, tempdataset, outcomename = "Karlodinium_8ed")
# PREDERROR(Karlodinium8ed_combined_model1, tempdataset, outcomename = "Karlodinium_8ed")
#PREDERROR(Karlodinium8ed_combined_model2, tempdataset, outcomename = "Karlodinium_8ed")
PREDERROR(Karlodinium8ed_combined_model1, tempdataset, outcomename = "Karlodinium_8ed")
# PREDERROR(Karlodinium8ed_ecol, tempdataset, outcomename = "Karlodinium_8ed")
#plot(Karlodinium8ed_combined_model3)
# xtabs(~ Karlodinium_8ed + Thalassiosira_6b3, data = tempdataset)
#
#
# tempdataset %>%
# add_fitted_draws(Karlodinium8ed_combined_model3, n = 1000) %>%
# ggplot(aes(x = TempStd, y = Karlodinium_8ed)) +
# geom_point() +
# facet_grid(Season ~ Ditylum_a31, scales = "free_x") +
# stat_lineribbon(aes(y = .value), .width = c(.95, .5)) +
# scale_fill_brewer()
saveRDS(Karlodinium8ed_envir_model, "../BayesianLogisticModels_EcolEnviroCombined/Karlodinium_8ed_envir_model.RDS")
saveRDS(Karlodinium8ed_combined_model1, "../BayesianLogisticModels_EcolEnviroCombined/Karlodinium_8ed_combined_model.RDS")
saveRDS(tempdataset, "../BayesianLogisticModels_EcolEnviroCombined/Karlodinium_8ed_tempdataset.RDS")
|
####################################################
#### Calculate RMSE (Root mean square error)
rmse <- function(error){
sqrt(mean(error^2))
}
####################################################
#### Calculate AIC from residual sum
AIC_RSS <- function(n, k, error){
AIC_fit <- (n * log(mean(error^2)) + 2*k)
}
#### Calculate RSE (Residual standard error):
#### sqrt(SSE/(n-p)), i.e., sqrt(RSS/df), where n = sample size,
#### p = numbers of parameters, df=degree of freedom
rse <- function(error, n, p){
(sum(error^2)/(n-p))
}
####################################################
#### Calculate basal area (multiply by 1000 to express in cm2)
BAfun <- function(D){0.00007854*D^2 * 10000}
####################################################
#heightRegression <- function(tag, diameter, actual_height, sub_dat, E_Value){
# should be a dataframe
# tagAndDiameterWithHeightsFrame (tag, diameter, measure height)
# unheightedDiameterFrame (tag, diameter)
# single e_value for the plot
## returns - frame with tags, diameter, computed heights and algorithms
heightRegression <- function(tagAndDiameterWithHeightsFrame, unheightedDiameterFrame, E_Value)
{
if (missing(diameter) || missing(tag) || missing(E_Value)) {
stop("missing parameters")
}
Ht_act <- as.numeric(actual_height)
D <- as.numeric(diameter)
n <- length(Ht_act)
E_Value <- as.numeric(E_Value)
##### Calculating RSD for Uncertainty & Organizing Output
results.fun <- function (fit, equation, Ht_act, D, error, PredHt, coef){
AIC <- AIC_RSS(n = length(Ht_act), length(fit$par), error)
RMSE <- rmse(error)
RSD <- sqrt(sum(error^2)/(length(Ht_act)-2))
Xstand <- (D - mean(D))^2
SEM <- RSD * sqrt(1/length(Ht_act) + Xstand/sum(Xstand))
res.df <- cbind(D, Ht_act, PredHt, Residuals = error, SEM = SEM)
return(list(equation = equation, RSD = RSD, vars = c(coef, AIC = AIC, RMSE = RMSE), res = res.df))
}
##### Below are all the different H-D models to be compared
##### EQ1: Linear model: H = a + b*D, where D = DBH
startvals1 <- list(a = 2, b = 1)
Fit_nls1 <- function(x) {
a <- x[1]
b <- x[2]
t1 <- (a + b* D)
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq1 <- optim(par=startvals1, fn = Fit_nls1, hessian = FALSE, control = list(parscale = unlist(startvals1)))
a1 = Eq1$par[1]
b1 = Eq1$par[2]
PredHt <- a1 + b1 * D
error <- (Ht_act-PredHt)
RMSE_Eq1 <- rmse(error)
AIC_Eq1 <- AIC_RSS(n, length(startvals1), error)
equation <- as.formula(y ~ a + b * D)
res1 <- results.fun(fit = Eq1, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a1, b1))
##### EQ2: Quadratic model, Eq. 27 in Table 2 of page 128 (Huang et al., 2000): H = a + b*D + c*D^2, where D = DBH
startvals2 <- list(a = -5, b = 1, c = 0.01)
Fit_nls2 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- (a + b * D + c * (D^2))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq2 <- optim(par=startvals2, fn = Fit_nls2, hessian = FALSE, control = list(parscale = unlist(startvals2)))
a2 = Eq2$par[1]
b2 = Eq2$par[2]
c2 = Eq2$par[3]
PredHt <- (a2 + b2 * D + c2 * (D^2))
error<- (Ht_act-PredHt)
RMSE_Eq2 <- rmse(error)
AIC_Eq2 <- AIC_RSS(n, length(startvals2), error)
equation <- as.formula(y ~ a + b * D + c * (D^2))
res2 <- results.fun(fit = Eq2, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a2, b2, c2))
##### EQ3: Linear models on log scale (Log~Log): H=exp(a+b*ln(D)), where D = DBH (Eq2 of page 8617 (Djomo et al., 2010; Molto et al., 2013))
startvals3 <- list(a = 1, b =1)
Fit_nls3 <- function(x) {
a <- x[1]
b <- x[2]
t1 <- exp(a + b*log(D))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq3 <- optim(par=startvals3, fn = Fit_nls3, hessian = FALSE, control = list(parscale = unlist(startvals3)))
a3 = Eq3$par[1]
b3 = Eq3$par[2]
PredHt <- exp(a3 + b3*log(D))
error<- (Ht_act-PredHt)
RMSE_Eq3 <- rmse(error)
AIC_Eq3 <- AIC_RSS(n, length(startvals3), error)
equation <- as.formula(y ~ exp(a + b*log(D)))
res3 <- results.fun(fit = Eq3, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a3, b3))
##### EQ4: Quadratic model on log scale: Ln(H) = a + b * ln(D) + c * ln(D^2) (Djomo et al., 2010)
startvals4 <- list(a = -5, b = 1, c = 0.01)
Fit_nls4 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- (a + b * log(D) + c * log(D^2))
t1 <- sum((Ht_act-exp(t1))^2)
return(t1)
}
Eq4 <- optim(par=startvals4, fn = Fit_nls4, hessian = FALSE, control = list(parscale = unlist(startvals4)))
a4 = Eq4$par[1]
b4 = Eq4$par[2]
c4 = Eq4$par[3]
PredHt <- a4 + b4 * log(D) + c4 * log(D^2)
error<- (Ht_act-PredHt)
RMSE_Eq4 <- rmse(error)
AIC_Eq4 <- AIC_RSS(n, length(startvals4), error)
equation <- as.formula(y ~ a + b * log(D) + c * log(D^2))
res4 <- results.fun(fit = Eq4, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a4, b4, c4))
##### EQ5: Non-linear model: H = a + b * ln(D), where D = DBH (Fang and Bailey, 1998; Djomo et al., 2010; Molto et al., 2013)
##### Log-linear eq.1 in page 8617 (Fang and Bailey, 1998; Molto et al., 2013)
startvals5 <- list(a = 1, b =2)
Fit_nls5 <- function(x) {
a <- x[1]
b <- x[2]
t1 <- (a + b*log(D)) #Log-linear eq.1 in page 8617 (Fang and Bailey, 1998; Molto et al., 2013)
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq5 <- optim(par=startvals5, fn = Fit_nls5, hessian = FALSE, control = list(parscale = unlist(startvals5)))
a5 = Eq5$par[1]
b5 = Eq5$par[2]
PredHt <- a5 + b5*log(D)
error<- (Ht_act-PredHt)
RMSE_Eq5 <- rmse(error)
AIC_Eq5 <- AIC_RSS(n, length(startvals5), error)
equation <- as.formula(y ~ a + b*log(D))
res5 <- results.fun(fit = Eq5, equation = equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a5, b5))
##### EQ6: Simplified (two-parameter) Weibull equation: H = a*(1-exp(-D/b)), where D=DBH, page 8617 (Molto et al., 2013)
startvals6 <- list(a = 30, b = 5)
Fit_nls6 <- function(x) {
a <- x[1]
b <- x[2]
t1 <- (a * (1-exp(I(-D/b))))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq6 <- optim(par=startvals6, fn = Fit_nls6, hessian = FALSE, control = list(parscale = unlist(startvals6)))
a6 = Eq6$par[1]
b6 = Eq6$par[2]
PredHt <- a6 * (1-exp(I(-D/b6)))
error<- (Ht_act-PredHt)
RMSE_Eq6 <- rmse(error)
AIC_Eq6 <- AIC_RSS(n, length(startvals6), error)
equation <- as.formula(y ~ a * (1-exp(I(-D/b))))
res6 <- results.fun(fit = Eq6, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a6, b6))
##### EQ7: Three-parameter Weibull equation: H = a * (1-exp(-b*D^c)), where D = DBH
##### Equation 13 of Huang et al., 1992; Lewis et al., 2009; Feldpausch et al., 2012; Mitchard et al., 2014)
startvals7 <- list(a = 54.01, b = -0.053, c = 0.0759)
Fit_nls7 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- (a*(1-exp(-b*D^c)))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq7 <- optim(par=startvals7, fn = Fit_nls7, hessian = FALSE, control = list(parscale = unlist(startvals7)))
a7 = Eq7$par[1]
b7 = Eq7$par[2]
c7 = Eq7$par[3]
PredHt <- a7*(1-exp(-b7*D^c7))
error<- (Ht_act-PredHt)
RMSE_Eq7 <- rmse(error)
AIC_Eq7 <- AIC_RSS(n, length(startvals7), error)
equation <- as.formula(y ~ a*(1-exp(-b*D^c)))
res7 <- results.fun(fit = Eq7, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a7, b7, c7))
##### EQ8: Michaelis-Menten equation: H = a*D /(b+D), in page 8618 (Molto et al., 2013; Equation 3 in Huang et al. (1992))
startvals8 <- list(a = 10, b = 2) #Start points were estimated from Figure 1 in page 8632 (Molto et al., 2013)
Fit_nls8 <- function(x) {
a <- x[1]
b <- x[2]
t1 <- (a * I(D)/(b + I(D)))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq8 <- optim(par=startvals8, fn = Fit_nls8, hessian = FALSE, control = list(parscale = unlist(startvals8)))
a8 = Eq8$par[1]
b8 = Eq8$par[2]
PredHt <- a8 * I(D)/(b8 + I(D))
error<- (Ht_act-PredHt)
RMSE_Eq8 <- rmse(error)
AIC_Eq8 <- AIC_RSS(n, length(startvals8), error)
equation <- as.formula(y ~ (a * I(D)/(b + I(D))))
res8 <- results.fun(fit = Eq8, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a8, b8))
##### EQ9: Power-law equation: H=a*D^b (Eq 1 of Huang et al., 1992; Chave et al., 2005; Feldpausch et al., 2012; Ngomanda et al., 2014)
##### Start points were estimated from Ngomanda et al.(2014)
startvals9 <- list(a = 26.6049, b = -9.4854)
Fit_nls9 <- function(x) {
a <- x[1]
b <- x[2]
t1 <- ( a* D^b )
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq9 <- optim(par=startvals9, fn = Fit_nls9, hessian = FALSE, control = list(parscale = unlist(startvals9)))
a9 = Eq9$par[1]
b9 = Eq9$par[2]
PredHt <- a9* D^b9
error<- (Ht_act-PredHt)
RMSE_Eq9 <- rmse(error)
AIC_Eq9 <- AIC_RSS(n, length(startvals9), error)
equation <- as.formula(y ~ a* D^b)
res9 <- results.fun(fit = Eq9, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a9, b9))
##### EQ10:Model 3: Ln(H) = a + b/D or H = a * exp(b/D), where D=DBH (Djomo et al., 2010; Equation 6 in Huang et al. (1992))
startvals10 <- list(a = 26.6049, b = -9.4854)
Fit_nls10 <- function(x) {
a <- x[1]
b <- x[2]
t1 <- (exp(a + b/D))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq10 <- optim(par=startvals10, fn = Fit_nls10, hessian = FALSE, control = list(parscale = unlist(startvals10)))
a10 = Eq10$par[1]
b10 = Eq10$par[2]
PredHt <- exp(a10 + b10/D)
error<- (Ht_act-PredHt)
RMSE_Eq10 <- rmse(error)
AIC_Eq10 <- AIC_RSS(n, length(startvals10), error)
equation <- as.formula(y ~ exp(a + b/D))
res10 <- results.fun(fit = Eq10, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a10, b10))
##### EQ11: H = a + b*(1-exp(-c*(D-Dmin))), where D=DBH (Fang and Bailey, 1998)
##### Start points were for Balsam poplar of Equation 12 in page 1289 (Huang et al., 1992)
startvals11 <- list(a = 44.7, b =42.3, c = 0.026)
Fit_nls11 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- a + b*(1-exp(-c*(D-min(D))))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq11 <- optim(par=startvals11, fn = Fit_nls11, hessian = FALSE, control = list(parscale = unlist(startvals11)))
a11 = Eq11$par[1]
b11 = Eq11$par[2]
c11 = Eq11$par[3]
PredHt <- a11 + b11 * (1-exp(-c11 * (D - min(D))))
error<- (Ht_act-PredHt)
RMSE_Eq11 <- rmse(error)
AIC_Eq11 <- AIC_RSS(n, length(startvals11), error)
equation <- as.formula(y ~ a + b*(1-exp(-c*(D-min(D)))))
res11 <- results.fun(fit = Eq11, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a11, b11, c11))
##### EQ12:H = a + (a-1.3)*b/(D+b), where D=DBH (Fang and Bailey, 1998)
##### Start points were for Balsam poplar of Equation 12 in page 1289 (Huang et al., 1992)
startvals12 <- list(a = 33.62, b =20.88)
Fit_nls12 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- (a + (a - 1.3) * b/(D + b))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq12 <- optim(par=startvals12, fn = Fit_nls12, hessian = FALSE, control = list(parscale = unlist(startvals12)))
a12 = Eq12$par[1]
b12 = Eq12$par[2]
c12 = Eq12$par[3]
PredHt <- a12 + (a12 - 1.3) * b12/(D + b12)
error<- (Ht_act-PredHt)
RMSE_Eq12 <- rmse(error)
AIC_Eq12 <- AIC_RSS(n, length(startvals12), error)
equation <- as.formula(y ~ (a + (a - 1.3) * b/(D + b)))
res12 <- results.fun(fit = Eq12, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a12, b12, c12))
##### EQ13:H = 1.3 + exp(a + b*D^c), where D=DBH (Eq 10 of Huang et al. (1992, 2000); Colbert et al., 2002)
##### Start points were for Black williows in page 173 (Colbert et al., 2002)
startvals13 <- list(a = 4.5535, b =-3.7529, c = 1)
Fit_nls13 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- (1.3 + exp(a + b * D^c))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq13 <- optim(par=startvals13, fn = Fit_nls13, hessian = FALSE, control = list(parscale = unlist(startvals13)))
a13 = Eq13$par[1]
b13 = Eq13$par[2]
c13 = Eq13$par[3]
PredHt <- 1.3 + exp(a13 + b13 * D^c13)
error<- (Ht_act-PredHt)
RMSE_Eq13 <- rmse(error)
AIC_Eq13 <- AIC_RSS(n, length(startvals13), error)
equation <- as.formula(y ~ 1.3 + exp(a + b * D^c))
res13 <- results.fun(fit = Eq13, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a13, b13, c13))
##### EQ14:H = 1.3 + a*(1-exp(-b*D))^c, where D=DBH (Equation 12 in Huang et al. (1992, 2000); Equation 2 in Sharma and Parton (2007))
##### Start points were for Balsam poplar of Equation 12 in page 1289 (Huang et al., 1992)
startvals14 <- list(a = 1.0462, b = 1.0464, c=0.9465)
Fit_nls14 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- (1.3 + a*(1-exp(-b*D))^c)
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq14 <- optim(par=startvals14, fn = Fit_nls14, hessian = FALSE, control = list(parscale = unlist(startvals14)))
a14 = Eq14$par[1]
b14 = Eq14$par[2]
c14 = Eq14$par[3]
PredHt <- 1.3 + a14 *(1-exp(-b14 * D))^c14
error<- (Ht_act-PredHt)
RMSE_Eq14 <- rmse(error)
AIC_Eq14 <- AIC_RSS(n, length(startvals14), error)
equation <- as.formula(y ~ 1.3 + a*(1-exp(-b*D))^c)
res14 <- results.fun(fit = Eq14, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a14, b14, c14))
##### Following equations from Table 3 in Huang et al. (1992) and Table 2 in Huang et al. (2000).
##### Parameter estimations of Balsam poplar (broad leaf) in Tables 4 and 5 (Huang et al., 1992)
##### were used as start values in the new fitting for Gabonese rainforests.
##### EQ15: Equation 4 in Huang et al.(1992 and 2000): H = 1.3 + a * (1 - exp(-bD)), where D = DBH
startvals15 <- list(a = 25.3302, b = 0.0512)
Fit_nls15 <- function(x) {
a <- x[1]
b <- x[2]
t1 <- (a * (1 - exp(-b * D)) )
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq15 <- optim(par=startvals15, fn = Fit_nls15, hessian = FALSE, control = list(parscale = unlist(startvals15)))
a15 = Eq15$par[1]
b15 = Eq15$par[2]
PredHt <- a15 * (1 - exp(-b15 * D))
error<- (Ht_act-PredHt)
RMSE_Eq15 <- rmse(error)
AIC_Eq15 <- AIC_RSS(n, length(startvals15), error)
equation <- as.formula(y ~ a * (1 - exp(-b * D)))
res15 <- results.fun(fit = Eq15, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a15, b15))
##### EQ16:Equation 5 in Huang et al. (1992 and 2000): H = 1.3 + D^2 /(a + bD)^2, where D = DBH
startvals16 <- list(a = 1.3209, b = 0.1813)
Fit_nls16 <- function(x) {
a <- x[1]
b <- x[2]
t1 <- (1.3 + D^2 /((a + b * D)^2) )
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq16 <- optim(par=startvals16, fn = Fit_nls16, hessian = FALSE, control = list(parscale = unlist(startvals16)))
a16 = Eq16$par[1]
b16 = Eq16$par[2]
PredHt <- 1.3 + D^2 /((a16 + b16 * D)^2)
error<- (Ht_act-PredHt)
RMSE_Eq16 <- rmse(error)
AIC_Eq16 <- AIC_RSS(n, length(startvals16), error)
equation <- as.formula(y ~ 1.3 + D^2 /((a + b * D)^2))
res16 <- results.fun(fit = Eq16, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a16, b16))
##### EQ17: Equation 7 in Huang et al. (1992 and 2000): H = 1.3 + 10^a*(D^b), where D = DBH
startvals17 <- list(a = 0.4305, b = 0.5871)
Fit_nls17 <- function(x) {
a <- x[1]
b <- x[2]
t1 <- (1.3 + (10^a) * (D^b) )
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq17 <- optim(par=startvals17, fn = Fit_nls17, hessian = FALSE, control = list(parscale = unlist(startvals17)))
a17 = Eq17$par[1]
b17 = Eq17$par[2]
PredHt <- 1.3 + (10^a17) * (D^b17)
error<- (Ht_act-PredHt)
RMSE_Eq17 <- rmse(error)
AIC_Eq17 <- AIC_RSS(n, length(startvals17), error)
equation <- as.formula(y ~ 1.3 + (10^a) * (D^b))
res17 <- results.fun(fit = Eq17, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a17, b17))
##### EQ18:Equation 8 in Huang et al. (1992 and 2000): H = 1.3 + (a*D)/(D+1) + b*D, where D = DBH
startvals18 <- list(a = 6.5487, b = 0.4507)
Fit_nls18 <- function(x) {
a <- x[1]
b <- x[2]
t1 <- (1.3 + a*D/(D+1) + b*D )
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq18 <- optim(par=startvals18, fn = Fit_nls18, hessian = FALSE, control = list(parscale = unlist(startvals18)))
a18 = Eq18$par[1]
b18 = Eq18$par[2]
PredHt <- 1.3 + a18 * D/(D+1) + b18*D
error<- (Ht_act-PredHt)
RMSE_Eq18 <- rmse(error)
AIC_Eq18 <- AIC_RSS(n, length(startvals18), error)
equation <- as.formula(y ~ 1.3 + a*D/(D+1) + b*D)
res18 <- results.fun(fit = Eq18, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a18, b18))
##### EQ19:Equation 9 in Huang et al. (1992 and 2000): H = 1.3 + a*(D/(D+1))^b, where D = DBH
startvals19 <- list(a = 27.1752, b = 10.1979)
Fit_nls19 <- function(x) {
a <- x[1]
b <- x[2]
t1 <- (1.3 + a*(D/(1 + D))^b )
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq19 <- optim(par=startvals19, fn = Fit_nls19, hessian = FALSE, control = list(parscale = unlist(startvals19)))
a19 = Eq19$par[1]
b19 = Eq19$par[2]
PredHt <- 1.3 + a19*(D/(1 + D))^b19
error<- (Ht_act-PredHt)
RMSE_Eq19 <- rmse(error)
AIC_Eq19 <- AIC_RSS(n, length(startvals19), error)
equation <- as.formula(y ~ 1.3 + a*(D/(1 + D))^b)
res19 <- results.fun(fit = Eq19, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a19, b19))
##### EQ20:Equation 10 in Huang et al. (1992 and 2000): H = 1.3 + exp(a + b*D^c), where D = DBH
startvals20 <- list(a = 18.804146, b = -16.760486, c=-0.025197)
Fit_nls20 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- 1.3 + exp(a + b * (D^c))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq20 <- optim(par=startvals20, fn = Fit_nls20, hessian = FALSE, control = list(parscale = unlist(startvals20)))
a20 = Eq20$par[1]
b20 = Eq20$par[2]
c20 = Eq20$par[3]
PredHt <- 1.3 + exp(a20 + b20 * (D^c20))
error<- (Ht_act-PredHt)
RMSE_Eq20 <- rmse(error)
AIC_Eq20 <- AIC_RSS(n, length(startvals20), error)
equation <- as.formula(y ~ 1.3 + exp(a + b * (D^c)))
res20 <- results.fun(fit = Eq20, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a20, b20, c20))
##### EQ21: Equation 11 in Huang et al. (1992 and 2000): H = 1.3 + a/(1+b*exp(-c*D)), where D = DBH
startvals21 <- list(a = 2.5241, b = 0.0012, c=0.1404)
Fit_nls21 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- 1.3 + a/(1 + b*exp(-c*D)) #Equation 11 in Huang et al. (1992)
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq21 <- optim(par=startvals21, fn = Fit_nls21, hessian = FALSE, control = list(parscale = unlist(startvals21)))
a21 = Eq21$par[1]
b21 = Eq21$par[2]
c21 = Eq21$par[3]
PredHt <- 1.3 + a21 /(1 + b21 *exp(-c21 * D))
error <- (Ht_act - PredHt)
RMSE_Eq21 <- rmse(error)
AIC_Eq21 <- AIC_RSS(n, length(startvals21), error)
equation <- as.formula(y ~ 1.3 + a/(1 + b*exp(-c*D)))
res21 <- results.fun(fit = Eq21, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a21, b21, c21))
##### EQ22: Equation 14 in Huang et al. (1992 and 2000): H = 1.3 + a*exp(-b*exp(-c*D)), where D = DBH (For Eqs. 12 and 13 in Huang et al. (1992 and 2002), please see the above eqs 14 and 7 in the file.)
startvals22 <- list(a = 1.6368, b = 2.1570, c=1.0951)
Fit_nls22 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- (1.3 + a*exp(-b*exp(-c*D)))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq22 <- optim(par=startvals22, fn = Fit_nls22, hessian = FALSE, control = list(parscale = unlist(startvals22)))
a22 = Eq22$par[1]
b22 = Eq22$par[2]
c22 = Eq22$par[3]
PredHt <- 1.3 + a22 * exp(-b22 * exp(-c22 * D))
error <- (Ht_act - PredHt)
RMSE_Eq22 <- rmse(error)
AIC_Eq22 <- AIC_RSS(n, length(startvals22), error)
equation <- as.formula(y ~ 1.3 + a*exp(-b*exp(-c*D)))
res22 <- results.fun(fit = Eq22, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a22, b22, c22))
##### EQ23: Equation 15 in Huang et al. (1992) and Eq. 16 in Huang et al. (2000): H = (1.3^b + (c^b-1.3^b)*(1-exp(-a*D))/(1-exp(-a*100)))^(1/b)
startvals23 <- list(a = 0.0464, b = 1.0716, c=27.0745)
Fit_nls23 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- ((1.3^b + (c^b-1.3^b)*(1-exp(-a*D))/(1-exp(-a*100)))^(1/b))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq23 <- optim(par=startvals23, fn = Fit_nls23, hessian = FALSE, control = list(parscale = unlist(startvals23)))
a23 = Eq23$par[1]
b23 = Eq23$par[2]
c23 = Eq23$par[3]
PredHt <- (1.3^b23 + (c23^b23-1.3^b23)*(1-exp(-a23*D))/(1-exp(-a23*100)))^(1/b23)
error <- (Ht_act - PredHt)
RMSE_Eq23 <- rmse(error)
AIC_Eq23 <- AIC_RSS(n, length(startvals23), error)
equation <- as.formula(y ~ (1.3^b + (c^b-1.3^b)*(1-exp(-a*D))/(1-exp(-a*100)))^(1/b))
res23 <- results.fun(fit = Eq23, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a23, b23, c23))
##### EQ24: Equation 16 in Huang et al. (1992) and Eq. 17 in Huang et al. (2000): H =1.3 + D^2/(a + b*D + c*D^2)
##### Start points were for Balsam poplar of Equation 12 in page 1289 (Huang et al., 1992)
startvals24 <- list(a = 0.0038, b = 0.7027, c = 0.0270)
Fit_nls24 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- (1.3 + D^2 /(a + b * D + c * D^2))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq24 <- optim(par=startvals24, fn = Fit_nls24, hessian = FALSE, control = list(parscale = unlist(startvals24)))
a24 = Eq24$par[1]
b24 = Eq24$par[2]
c24 = Eq24$par[3]
PredHt <- 1.3 + D^2 /(a24 + b24 * D + c24 * D^2)
error <- (Ht_act - PredHt)
RMSE_Eq24 <- rmse(error)
AIC_Eq24 <- AIC_RSS(n, length(startvals24), error)
equation <- as.formula(y ~ 1.3 + D^2 /(a + b * D + c * D^2))
res24 <- results.fun(fit = Eq24, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a24, b24, c24))
##### EQ25: Equation 17 in Huang et al. (1992) and Eq. 18 in Huang et al. (2000): H = 1.3 + a *D^(b*D^-c), where D = DBH
startvals25 <- list(a = 22.9433, b = -20.9985, c=0.7680)
Fit_nls25 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- (1.3 + a*D^(b*D^-c))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq25 <- optim(par=startvals25, fn = Fit_nls25, hessian = FALSE, control = list(parscale = unlist(startvals25)))
a25 = Eq25$par[1]
b25 = Eq25$par[2]
c25 = Eq25$par[3]
PredHt <- 1.3 + a25 * D^(b25 * D^-c25)
error <- (Ht_act - PredHt)
RMSE_Eq25 <- rmse(error)
AIC_Eq25 <- AIC_RSS(n, length(startvals25), error)
equation <- as.formula(y ~ 1.3 + a*D^(b*D^-c))
res25 <- results.fun(fit = Eq25, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a25, b25, c25))
##### EQ26: Equation 18 in Huang et al. (1992) and Eq. 20 in Huang et al. (2000): H = 1.3 + a*exp(b/(D+c)), where D = DBH
startvals26 <- list(a = 3.2971, b = -0.4014, c = 5.5088)
Fit_nls26 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- (1.3 + a * exp(b /(D+c)))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq26 <- optim(par=startvals26, fn = Fit_nls26, hessian = FALSE, control = list(parscale = unlist(startvals26)))
a26 = Eq26$par[1]
b26 = Eq26$par[2]
c26 = Eq26$par[3]
PredHt <- 1.3 + a26 * exp(b26 /(D + c26))
error <- (Ht_act - PredHt)
RMSE_Eq26 <- rmse(error)
AIC_Eq26 <- AIC_RSS(n, length(startvals26), error)
equation <- as.formula(y ~ 1.3 + a * exp(b /(D+c)))
res26 <- results.fun(fit = Eq26, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a26, b26, c26))
##### EQ27: Equation 19 in Huang et al.(1992) and Eq.19 in Huang et al. (2000): H = 1.3 + a/(1 + b^-1 * D^-c), where D = DBH
startvals27 <- list(a = 34.4682, b = 0.0369, c = 1.0589)
Fit_nls27 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- (1.3 + a/(1+1/(b*D^c)))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq27 <- optim(par=startvals27, fn = Fit_nls27, hessian = FALSE, control = list(parscale = unlist(startvals27)))
a27 = Eq27$par[1]
b27 = Eq27$par[2]
c27 = Eq27$par[3]
PredHt <- 1.3+a27 /(1+1/(b27 * D^c27))
error <- (Ht_act - PredHt)
RMSE_Eq27 <- rmse(error)
AIC_Eq27 <- AIC_RSS(n, length(startvals27), error)
equation <- as.formula(y ~ 1.3 + a/(1+1/(b*D^c)))
res27 <- results.fun(fit = Eq27, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a27, b27, c27))
##### EQ28: Equation 20 in Huang et al. (1992) and Eq. 21 in Huang et al. (2000): H = 1.3 + a*(1 - b*exp(-c*D))^d, where D = DBH
startvals28 <- list(a = 15.2716, b = 0.9574, c = 0.0530, d = 1.1025)
Fit_nls28 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
d <- x[4]
t1 <- (1.3 + a*(1-b*exp(-c*D))^d)
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq28 <- optim(par=startvals28, fn = Fit_nls28, hessian = FALSE, control = list(parscale = unlist(startvals28)))
a28 = Eq28$par[1]
b28 = Eq28$par[2]
c28 = Eq28$par[3]
d28 = Eq28$par[4]
PredHt<- 1.3 + a28 *(1- b28 * exp(-c28 * D))^d28
error <- (Ht_act - PredHt)
RMSE_Eq28 <- rmse(error)
AIC_Eq28 <- AIC_RSS(n, length(startvals28), error)
equation <- as.formula(y ~ 1.3 + a*(1-b*exp(-c*D))^d)
res28 <- results.fun(fit = Eq28, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a28, b28, c28, d28))
##### EQ29: Equation 23 in Huang et al. (2000): H = 1.3 + a*D*exp(-b*D), where D = DBH.
##### Start values were for white spruce (Table 3 in page 131 of Huang et al. (2000))
startvals29 <- list(a = 1.0264, b = 0.01157)
Fit_nls29 <- function(x) {
a <- x[1]
b <- x[2]
t1 <- (1.3 + a * D * exp(-b*D))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq29 <- optim(par=startvals29, fn = Fit_nls29, hessian = FALSE, control = list(parscale = unlist(startvals29)))
a29 = Eq29$par[1]
b29 = Eq29$par[2]
PredHt <- 1.3 + a29 * D * exp(-b29 * D)
error <- (Ht_act - PredHt)
RMSE_Eq29 <- rmse(error)
AIC_Eq29 <- AIC_RSS(n, length(startvals29), error)
equation <- as.formula(y ~ 1.3 + a * D * exp(-b*D))
res29 <- results.fun(fit = Eq29, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a29, b29))
##### EQ30: Equation 24 in Huang et al. (2000): H = 1.3 + a * D^b * exp(-c*D), where D = DBH. Start values were for white spruce (Table 3 in page 131 of Huang et al. (2000))
startvals30 <- list(a = 0.687824, b = 1.175135, c = 0.017699)
Fit_nls30 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- (1.3 + a * D^b * exp(-c*D))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq30 <- optim(par=startvals30, fn = Fit_nls30, hessian = FALSE, control = list(parscale = unlist(startvals30)))
a30 = Eq30$par[1]
b30 = Eq30$par[2]
c30 = Eq30$par[3]
PredHt <- 1.3 + a30 * D^b30 * exp(-c30*D)
error <- (Ht_act - PredHt)
RMSE_Eq30 <- rmse(error)
AIC_Eq30 <- AIC_RSS(n, length(startvals30), error)
equation <- as.formula(y ~ 1.3 + a * D^b * exp(-c*D))
res30 <- results.fun(fit = Eq30, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a30, b30, c30))
##### EQ31: Pantropical DBH-H model (Chave et al., 2014. GCB 20: 3183): Ln(H) = a - E + b*Ln(D)+ c*(Ln(D))^2 , where D = DBH
startvals31 <- list(a = 0.893, b = 0.760, c = -0.034)
Fit_nls31 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- (a - E_Value + b * log(D) + c * (log(I(D)))^2)
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
if(!is.na(E_Value)){
Eq31 <- optim(par=startvals31, fn = Fit_nls31, hessian = FALSE, control = list(parscale = unlist(startvals31)))
a31 = Eq31$par[1]
b31 = Eq31$par[2]
c31 = Eq31$par[3]
PredHt <- a31 - E_Value + b31 * log(D) + c31 * (log(I(D)))^2
error <- (Ht_act - PredHt)
RMSE_Eq31 <- rmse(error)
AIC_Eq31 <- AIC_RSS(n, length(startvals31), error)
equation <- as.formula(y ~ a - E_Value + b * log(D) + c * (log(I(D)))^2)
res31 <- results.fun(fit = Eq31, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a31, b31, c31))
}else{AIC_Eq31 <- 9999999}
##### EQ32: The Mitscherlisch model (Ngomanda et al., 2014): H = a - b * exp(-cD), where D = DBH
startvals32 <- list(a = 44.7, b = 42.3, c = 0.026)
Fit_nls32 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- (a - b * exp(-c * D))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq32 <- optim(par=startvals32, fn = Fit_nls32, hessian = FALSE, control = list(parscale = unlist(startvals32)))
a32 = Eq32$par[1]
b32 = Eq32$par[2]
c32 = Eq32$par[3]
PredHt <- a32 - b32 * exp(-c32 * D)
error <- (Ht_act - PredHt)
RMSE_Eq32 <- rmse(error)
AIC_Eq32 <- AIC_RSS(n, length(startvals32), error)
equation <- as.formula(y ~ a - b * exp(-c * D))
res32 <- results.fun(fit = Eq32, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a32, b32, c32))
##### Model comparison
AIC_best.fit <- paste("Eq", 1, sep="")
AIC_min_temp = AIC_Eq1
AIC.df <- matrix(nrow = 32, ncol = 2, dimnames = list(c(), c("Equ", "AIC")))
for (k in 1: 32){
AIC_min <- eval(parse(text=paste("AIC_Eq", k, sep="")))
AIC.df[k,] <- c(Equ = paste("Eq", k, sep=""), AIC = round(AIC_min, 1))
AIC_min <- min(AIC_min, AIC_min_temp)
if (AIC_min < AIC_min_temp) {AIC_best.fit <- paste("Eq", k, sep="")}
AIC_min_temp = AIC_min
}
AIC.df <- data.frame(AIC.df)
AIC.df$AIC <- as.numeric(as.character(AIC.df$AIC))
best.equ <- eval(parse(text=AIC_best.fit))
rnum <- paste("res", gsub("[^[:digit:]]", "", AIC_best.fit), sep="")
res_fin <- get(rnum)
predht.fun <- function(res, D){
a <- res$vars["a"]
b <- res$vars["b"]
c <- res$vars["c"]
d <- res$vars["d"]
equ.fun <- function(D) {
eval(parse(text = res$equ))
}
ph <- equ.fun(D)
}
predht <- predht.fun(res = res_fin, D)
full.list <- res_fin
full.list$predht <- predht
full.list
}
|
/R/DHModel.R
|
permissive
|
gabonNRI/gabontreedata
|
R
| false | false | 31,279 |
r
|
####################################################
#### Calculate RMSE (Root mean square error)
rmse <- function(error){
sqrt(mean(error^2))
}
####################################################
#### Calculate AIC from residual sum
AIC_RSS <- function(n, k, error){
AIC_fit <- (n * log(mean(error^2)) + 2*k)
}
#### Calculate RSE (Residual standard error):
#### sqrt(SSE/(n-p)), i.e., sqrt(RSS/df), where n = sample size,
#### p = numbers of parameters, df=degree of freedom
rse <- function(error, n, p){
(sum(error^2)/(n-p))
}
####################################################
#### Calculate basal area (multiply by 1000 to express in cm2)
BAfun <- function(D){0.00007854*D^2 * 10000}
####################################################
#heightRegression <- function(tag, diameter, actual_height, sub_dat, E_Value){
# should be a dataframe
# tagAndDiameterWithHeightsFrame (tag, diameter, measure height)
# unheightedDiameterFrame (tag, diameter)
# single e_value for the plot
## returns - frame with tags, diameter, computed heights and algorithms
heightRegression <- function(tagAndDiameterWithHeightsFrame, unheightedDiameterFrame, E_Value)
{
if (missing(diameter) || missing(tag) || missing(E_Value)) {
stop("missing parameters")
}
Ht_act <- as.numeric(actual_height)
D <- as.numeric(diameter)
n <- length(Ht_act)
E_Value <- as.numeric(E_Value)
##### Calculating RSD for Uncertainty & Organizing Output
results.fun <- function (fit, equation, Ht_act, D, error, PredHt, coef){
AIC <- AIC_RSS(n = length(Ht_act), length(fit$par), error)
RMSE <- rmse(error)
RSD <- sqrt(sum(error^2)/(length(Ht_act)-2))
Xstand <- (D - mean(D))^2
SEM <- RSD * sqrt(1/length(Ht_act) + Xstand/sum(Xstand))
res.df <- cbind(D, Ht_act, PredHt, Residuals = error, SEM = SEM)
return(list(equation = equation, RSD = RSD, vars = c(coef, AIC = AIC, RMSE = RMSE), res = res.df))
}
##### Below are all the different H-D models to be compared
##### EQ1: Linear model: H = a + b*D, where D = DBH
startvals1 <- list(a = 2, b = 1)
Fit_nls1 <- function(x) {
a <- x[1]
b <- x[2]
t1 <- (a + b* D)
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq1 <- optim(par=startvals1, fn = Fit_nls1, hessian = FALSE, control = list(parscale = unlist(startvals1)))
a1 = Eq1$par[1]
b1 = Eq1$par[2]
PredHt <- a1 + b1 * D
error <- (Ht_act-PredHt)
RMSE_Eq1 <- rmse(error)
AIC_Eq1 <- AIC_RSS(n, length(startvals1), error)
equation <- as.formula(y ~ a + b * D)
res1 <- results.fun(fit = Eq1, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a1, b1))
##### EQ2: Quadratic model, Eq. 27 in Table 2 of page 128 (Huang et al., 2000): H = a + b*D + c*D^2, where D = DBH
startvals2 <- list(a = -5, b = 1, c = 0.01)
Fit_nls2 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- (a + b * D + c * (D^2))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq2 <- optim(par=startvals2, fn = Fit_nls2, hessian = FALSE, control = list(parscale = unlist(startvals2)))
a2 = Eq2$par[1]
b2 = Eq2$par[2]
c2 = Eq2$par[3]
PredHt <- (a2 + b2 * D + c2 * (D^2))
error<- (Ht_act-PredHt)
RMSE_Eq2 <- rmse(error)
AIC_Eq2 <- AIC_RSS(n, length(startvals2), error)
equation <- as.formula(y ~ a + b * D + c * (D^2))
res2 <- results.fun(fit = Eq2, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a2, b2, c2))
##### EQ3: Linear models on log scale (Log~Log): H=exp(a+b*ln(D)), where D = DBH (Eq2 of page 8617 (Djomo et al., 2010; Molto et al., 2013))
startvals3 <- list(a = 1, b =1)
Fit_nls3 <- function(x) {
a <- x[1]
b <- x[2]
t1 <- exp(a + b*log(D))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq3 <- optim(par=startvals3, fn = Fit_nls3, hessian = FALSE, control = list(parscale = unlist(startvals3)))
a3 = Eq3$par[1]
b3 = Eq3$par[2]
PredHt <- exp(a3 + b3*log(D))
error<- (Ht_act-PredHt)
RMSE_Eq3 <- rmse(error)
AIC_Eq3 <- AIC_RSS(n, length(startvals3), error)
equation <- as.formula(y ~ exp(a + b*log(D)))
res3 <- results.fun(fit = Eq3, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a3, b3))
##### EQ4: Quadratic model on log scale: Ln(H) = a + b * ln(D) + c * ln(D^2) (Djomo et al., 2010)
startvals4 <- list(a = -5, b = 1, c = 0.01)
Fit_nls4 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- (a + b * log(D) + c * log(D^2))
t1 <- sum((Ht_act-exp(t1))^2)
return(t1)
}
Eq4 <- optim(par=startvals4, fn = Fit_nls4, hessian = FALSE, control = list(parscale = unlist(startvals4)))
a4 = Eq4$par[1]
b4 = Eq4$par[2]
c4 = Eq4$par[3]
PredHt <- a4 + b4 * log(D) + c4 * log(D^2)
error<- (Ht_act-PredHt)
RMSE_Eq4 <- rmse(error)
AIC_Eq4 <- AIC_RSS(n, length(startvals4), error)
equation <- as.formula(y ~ a + b * log(D) + c * log(D^2))
res4 <- results.fun(fit = Eq4, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a4, b4, c4))
##### EQ5: Non-linear model: H = a + b * ln(D), where D = DBH (Fang and Bailey, 1998; Djomo et al., 2010; Molto et al., 2013)
##### Log-linear eq.1 in page 8617 (Fang and Bailey, 1998; Molto et al., 2013)
startvals5 <- list(a = 1, b =2)
Fit_nls5 <- function(x) {
a <- x[1]
b <- x[2]
t1 <- (a + b*log(D)) #Log-linear eq.1 in page 8617 (Fang and Bailey, 1998; Molto et al., 2013)
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq5 <- optim(par=startvals5, fn = Fit_nls5, hessian = FALSE, control = list(parscale = unlist(startvals5)))
a5 = Eq5$par[1]
b5 = Eq5$par[2]
PredHt <- a5 + b5*log(D)
error<- (Ht_act-PredHt)
RMSE_Eq5 <- rmse(error)
AIC_Eq5 <- AIC_RSS(n, length(startvals5), error)
equation <- as.formula(y ~ a + b*log(D))
res5 <- results.fun(fit = Eq5, equation = equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a5, b5))
##### EQ6: Simplified (two-parameter) Weibull equation: H = a*(1-exp(-D/b)), where D=DBH, page 8617 (Molto et al., 2013)
startvals6 <- list(a = 30, b = 5)
Fit_nls6 <- function(x) {
a <- x[1]
b <- x[2]
t1 <- (a * (1-exp(I(-D/b))))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq6 <- optim(par=startvals6, fn = Fit_nls6, hessian = FALSE, control = list(parscale = unlist(startvals6)))
a6 = Eq6$par[1]
b6 = Eq6$par[2]
PredHt <- a6 * (1-exp(I(-D/b6)))
error<- (Ht_act-PredHt)
RMSE_Eq6 <- rmse(error)
AIC_Eq6 <- AIC_RSS(n, length(startvals6), error)
equation <- as.formula(y ~ a * (1-exp(I(-D/b))))
res6 <- results.fun(fit = Eq6, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a6, b6))
##### EQ7: Three-parameter Weibull equation: H = a * (1-exp(-b*D^c)), where D = DBH
##### Equation 13 of Huang et al., 1992; Lewis et al., 2009; Feldpausch et al., 2012; Mitchard et al., 2014)
startvals7 <- list(a = 54.01, b = -0.053, c = 0.0759)
Fit_nls7 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- (a*(1-exp(-b*D^c)))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq7 <- optim(par=startvals7, fn = Fit_nls7, hessian = FALSE, control = list(parscale = unlist(startvals7)))
a7 = Eq7$par[1]
b7 = Eq7$par[2]
c7 = Eq7$par[3]
PredHt <- a7*(1-exp(-b7*D^c7))
error<- (Ht_act-PredHt)
RMSE_Eq7 <- rmse(error)
AIC_Eq7 <- AIC_RSS(n, length(startvals7), error)
equation <- as.formula(y ~ a*(1-exp(-b*D^c)))
res7 <- results.fun(fit = Eq7, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a7, b7, c7))
##### EQ8: Michaelis-Menten equation: H = a*D /(b+D), in page 8618 (Molto et al., 2013; Equation 3 in Huang et al. (1992))
startvals8 <- list(a = 10, b = 2) #Start points were estimated from Figure 1 in page 8632 (Molto et al., 2013)
Fit_nls8 <- function(x) {
a <- x[1]
b <- x[2]
t1 <- (a * I(D)/(b + I(D)))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq8 <- optim(par=startvals8, fn = Fit_nls8, hessian = FALSE, control = list(parscale = unlist(startvals8)))
a8 = Eq8$par[1]
b8 = Eq8$par[2]
PredHt <- a8 * I(D)/(b8 + I(D))
error<- (Ht_act-PredHt)
RMSE_Eq8 <- rmse(error)
AIC_Eq8 <- AIC_RSS(n, length(startvals8), error)
equation <- as.formula(y ~ (a * I(D)/(b + I(D))))
res8 <- results.fun(fit = Eq8, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a8, b8))
##### EQ9: Power-law equation: H=a*D^b (Eq 1 of Huang et al., 1992; Chave et al., 2005; Feldpausch et al., 2012; Ngomanda et al., 2014)
##### Start points were estimated from Ngomanda et al.(2014)
startvals9 <- list(a = 26.6049, b = -9.4854)
Fit_nls9 <- function(x) {
a <- x[1]
b <- x[2]
t1 <- ( a* D^b )
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq9 <- optim(par=startvals9, fn = Fit_nls9, hessian = FALSE, control = list(parscale = unlist(startvals9)))
a9 = Eq9$par[1]
b9 = Eq9$par[2]
PredHt <- a9* D^b9
error<- (Ht_act-PredHt)
RMSE_Eq9 <- rmse(error)
AIC_Eq9 <- AIC_RSS(n, length(startvals9), error)
equation <- as.formula(y ~ a* D^b)
res9 <- results.fun(fit = Eq9, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a9, b9))
##### EQ10:Model 3: Ln(H) = a + b/D or H = a * exp(b/D), where D=DBH (Djomo et al., 2010; Equation 6 in Huang et al. (1992))
startvals10 <- list(a = 26.6049, b = -9.4854)
Fit_nls10 <- function(x) {
a <- x[1]
b <- x[2]
t1 <- (exp(a + b/D))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq10 <- optim(par=startvals10, fn = Fit_nls10, hessian = FALSE, control = list(parscale = unlist(startvals10)))
a10 = Eq10$par[1]
b10 = Eq10$par[2]
PredHt <- exp(a10 + b10/D)
error<- (Ht_act-PredHt)
RMSE_Eq10 <- rmse(error)
AIC_Eq10 <- AIC_RSS(n, length(startvals10), error)
equation <- as.formula(y ~ exp(a + b/D))
res10 <- results.fun(fit = Eq10, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a10, b10))
##### EQ11: H = a + b*(1-exp(-c*(D-Dmin))), where D=DBH (Fang and Bailey, 1998)
##### Start points were for Balsam poplar of Equation 12 in page 1289 (Huang et al., 1992)
startvals11 <- list(a = 44.7, b =42.3, c = 0.026)
Fit_nls11 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- a + b*(1-exp(-c*(D-min(D))))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq11 <- optim(par=startvals11, fn = Fit_nls11, hessian = FALSE, control = list(parscale = unlist(startvals11)))
a11 = Eq11$par[1]
b11 = Eq11$par[2]
c11 = Eq11$par[3]
PredHt <- a11 + b11 * (1-exp(-c11 * (D - min(D))))
error<- (Ht_act-PredHt)
RMSE_Eq11 <- rmse(error)
AIC_Eq11 <- AIC_RSS(n, length(startvals11), error)
equation <- as.formula(y ~ a + b*(1-exp(-c*(D-min(D)))))
res11 <- results.fun(fit = Eq11, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a11, b11, c11))
##### EQ12:H = a + (a-1.3)*b/(D+b), where D=DBH (Fang and Bailey, 1998)
##### Start points were for Balsam poplar of Equation 12 in page 1289 (Huang et al., 1992)
startvals12 <- list(a = 33.62, b =20.88)
Fit_nls12 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- (a + (a - 1.3) * b/(D + b))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq12 <- optim(par=startvals12, fn = Fit_nls12, hessian = FALSE, control = list(parscale = unlist(startvals12)))
a12 = Eq12$par[1]
b12 = Eq12$par[2]
c12 = Eq12$par[3]
PredHt <- a12 + (a12 - 1.3) * b12/(D + b12)
error<- (Ht_act-PredHt)
RMSE_Eq12 <- rmse(error)
AIC_Eq12 <- AIC_RSS(n, length(startvals12), error)
equation <- as.formula(y ~ (a + (a - 1.3) * b/(D + b)))
res12 <- results.fun(fit = Eq12, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a12, b12, c12))
##### EQ13:H = 1.3 + exp(a + b*D^c), where D=DBH (Eq 10 of Huang et al. (1992, 2000); Colbert et al., 2002)
##### Start points were for Black williows in page 173 (Colbert et al., 2002)
startvals13 <- list(a = 4.5535, b =-3.7529, c = 1)
Fit_nls13 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- (1.3 + exp(a + b * D^c))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq13 <- optim(par=startvals13, fn = Fit_nls13, hessian = FALSE, control = list(parscale = unlist(startvals13)))
a13 = Eq13$par[1]
b13 = Eq13$par[2]
c13 = Eq13$par[3]
PredHt <- 1.3 + exp(a13 + b13 * D^c13)
error<- (Ht_act-PredHt)
RMSE_Eq13 <- rmse(error)
AIC_Eq13 <- AIC_RSS(n, length(startvals13), error)
equation <- as.formula(y ~ 1.3 + exp(a + b * D^c))
res13 <- results.fun(fit = Eq13, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a13, b13, c13))
##### EQ14:H = 1.3 + a*(1-exp(-b*D))^c, where D=DBH (Equation 12 in Huang et al. (1992, 2000); Equation 2 in Sharma and Parton (2007))
##### Start points were for Balsam poplar of Equation 12 in page 1289 (Huang et al., 1992)
startvals14 <- list(a = 1.0462, b = 1.0464, c=0.9465)
Fit_nls14 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- (1.3 + a*(1-exp(-b*D))^c)
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq14 <- optim(par=startvals14, fn = Fit_nls14, hessian = FALSE, control = list(parscale = unlist(startvals14)))
a14 = Eq14$par[1]
b14 = Eq14$par[2]
c14 = Eq14$par[3]
PredHt <- 1.3 + a14 *(1-exp(-b14 * D))^c14
error<- (Ht_act-PredHt)
RMSE_Eq14 <- rmse(error)
AIC_Eq14 <- AIC_RSS(n, length(startvals14), error)
equation <- as.formula(y ~ 1.3 + a*(1-exp(-b*D))^c)
res14 <- results.fun(fit = Eq14, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a14, b14, c14))
##### Following equations from Table 3 in Huang et al. (1992) and Table 2 in Huang et al. (2000).
##### Parameter estimations of Balsam poplar (broad leaf) in Tables 4 and 5 (Huang et al., 1992)
##### were used as start values in the new fitting for Gabonese rainforests.
##### EQ15: Equation 4 in Huang et al.(1992 and 2000): H = 1.3 + a * (1 - exp(-bD)), where D = DBH
startvals15 <- list(a = 25.3302, b = 0.0512)
Fit_nls15 <- function(x) {
a <- x[1]
b <- x[2]
t1 <- (a * (1 - exp(-b * D)) )
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq15 <- optim(par=startvals15, fn = Fit_nls15, hessian = FALSE, control = list(parscale = unlist(startvals15)))
a15 = Eq15$par[1]
b15 = Eq15$par[2]
PredHt <- a15 * (1 - exp(-b15 * D))
error<- (Ht_act-PredHt)
RMSE_Eq15 <- rmse(error)
AIC_Eq15 <- AIC_RSS(n, length(startvals15), error)
equation <- as.formula(y ~ a * (1 - exp(-b * D)))
res15 <- results.fun(fit = Eq15, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a15, b15))
##### EQ16:Equation 5 in Huang et al. (1992 and 2000): H = 1.3 + D^2 /(a + bD)^2, where D = DBH
startvals16 <- list(a = 1.3209, b = 0.1813)
Fit_nls16 <- function(x) {
a <- x[1]
b <- x[2]
t1 <- (1.3 + D^2 /((a + b * D)^2) )
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq16 <- optim(par=startvals16, fn = Fit_nls16, hessian = FALSE, control = list(parscale = unlist(startvals16)))
a16 = Eq16$par[1]
b16 = Eq16$par[2]
PredHt <- 1.3 + D^2 /((a16 + b16 * D)^2)
error<- (Ht_act-PredHt)
RMSE_Eq16 <- rmse(error)
AIC_Eq16 <- AIC_RSS(n, length(startvals16), error)
equation <- as.formula(y ~ 1.3 + D^2 /((a + b * D)^2))
res16 <- results.fun(fit = Eq16, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a16, b16))
##### EQ17: Equation 7 in Huang et al. (1992 and 2000): H = 1.3 + 10^a*(D^b), where D = DBH
startvals17 <- list(a = 0.4305, b = 0.5871)
Fit_nls17 <- function(x) {
a <- x[1]
b <- x[2]
t1 <- (1.3 + (10^a) * (D^b) )
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq17 <- optim(par=startvals17, fn = Fit_nls17, hessian = FALSE, control = list(parscale = unlist(startvals17)))
a17 = Eq17$par[1]
b17 = Eq17$par[2]
PredHt <- 1.3 + (10^a17) * (D^b17)
error<- (Ht_act-PredHt)
RMSE_Eq17 <- rmse(error)
AIC_Eq17 <- AIC_RSS(n, length(startvals17), error)
equation <- as.formula(y ~ 1.3 + (10^a) * (D^b))
res17 <- results.fun(fit = Eq17, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a17, b17))
##### EQ18:Equation 8 in Huang et al. (1992 and 2000): H = 1.3 + (a*D)/(D+1) + b*D, where D = DBH
startvals18 <- list(a = 6.5487, b = 0.4507)
Fit_nls18 <- function(x) {
a <- x[1]
b <- x[2]
t1 <- (1.3 + a*D/(D+1) + b*D )
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq18 <- optim(par=startvals18, fn = Fit_nls18, hessian = FALSE, control = list(parscale = unlist(startvals18)))
a18 = Eq18$par[1]
b18 = Eq18$par[2]
PredHt <- 1.3 + a18 * D/(D+1) + b18*D
error<- (Ht_act-PredHt)
RMSE_Eq18 <- rmse(error)
AIC_Eq18 <- AIC_RSS(n, length(startvals18), error)
equation <- as.formula(y ~ 1.3 + a*D/(D+1) + b*D)
res18 <- results.fun(fit = Eq18, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a18, b18))
##### EQ19:Equation 9 in Huang et al. (1992 and 2000): H = 1.3 + a*(D/(D+1))^b, where D = DBH
startvals19 <- list(a = 27.1752, b = 10.1979)
Fit_nls19 <- function(x) {
a <- x[1]
b <- x[2]
t1 <- (1.3 + a*(D/(1 + D))^b )
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq19 <- optim(par=startvals19, fn = Fit_nls19, hessian = FALSE, control = list(parscale = unlist(startvals19)))
a19 = Eq19$par[1]
b19 = Eq19$par[2]
PredHt <- 1.3 + a19*(D/(1 + D))^b19
error<- (Ht_act-PredHt)
RMSE_Eq19 <- rmse(error)
AIC_Eq19 <- AIC_RSS(n, length(startvals19), error)
equation <- as.formula(y ~ 1.3 + a*(D/(1 + D))^b)
res19 <- results.fun(fit = Eq19, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a19, b19))
##### EQ20:Equation 10 in Huang et al. (1992 and 2000): H = 1.3 + exp(a + b*D^c), where D = DBH
startvals20 <- list(a = 18.804146, b = -16.760486, c=-0.025197)
Fit_nls20 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- 1.3 + exp(a + b * (D^c))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq20 <- optim(par=startvals20, fn = Fit_nls20, hessian = FALSE, control = list(parscale = unlist(startvals20)))
a20 = Eq20$par[1]
b20 = Eq20$par[2]
c20 = Eq20$par[3]
PredHt <- 1.3 + exp(a20 + b20 * (D^c20))
error<- (Ht_act-PredHt)
RMSE_Eq20 <- rmse(error)
AIC_Eq20 <- AIC_RSS(n, length(startvals20), error)
equation <- as.formula(y ~ 1.3 + exp(a + b * (D^c)))
res20 <- results.fun(fit = Eq20, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a20, b20, c20))
##### EQ21: Equation 11 in Huang et al. (1992 and 2000): H = 1.3 + a/(1+b*exp(-c*D)), where D = DBH
startvals21 <- list(a = 2.5241, b = 0.0012, c=0.1404)
Fit_nls21 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- 1.3 + a/(1 + b*exp(-c*D)) #Equation 11 in Huang et al. (1992)
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq21 <- optim(par=startvals21, fn = Fit_nls21, hessian = FALSE, control = list(parscale = unlist(startvals21)))
a21 = Eq21$par[1]
b21 = Eq21$par[2]
c21 = Eq21$par[3]
PredHt <- 1.3 + a21 /(1 + b21 *exp(-c21 * D))
error <- (Ht_act - PredHt)
RMSE_Eq21 <- rmse(error)
AIC_Eq21 <- AIC_RSS(n, length(startvals21), error)
equation <- as.formula(y ~ 1.3 + a/(1 + b*exp(-c*D)))
res21 <- results.fun(fit = Eq21, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a21, b21, c21))
##### EQ22: Equation 14 in Huang et al. (1992 and 2000): H = 1.3 + a*exp(-b*exp(-c*D)), where D = DBH (For Eqs. 12 and 13 in Huang et al. (1992 and 2002), please see the above eqs 14 and 7 in the file.)
startvals22 <- list(a = 1.6368, b = 2.1570, c=1.0951)
Fit_nls22 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- (1.3 + a*exp(-b*exp(-c*D)))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq22 <- optim(par=startvals22, fn = Fit_nls22, hessian = FALSE, control = list(parscale = unlist(startvals22)))
a22 = Eq22$par[1]
b22 = Eq22$par[2]
c22 = Eq22$par[3]
PredHt <- 1.3 + a22 * exp(-b22 * exp(-c22 * D))
error <- (Ht_act - PredHt)
RMSE_Eq22 <- rmse(error)
AIC_Eq22 <- AIC_RSS(n, length(startvals22), error)
equation <- as.formula(y ~ 1.3 + a*exp(-b*exp(-c*D)))
res22 <- results.fun(fit = Eq22, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a22, b22, c22))
##### EQ23: Equation 15 in Huang et al. (1992) and Eq. 16 in Huang et al. (2000): H = (1.3^b + (c^b-1.3^b)*(1-exp(-a*D))/(1-exp(-a*100)))^(1/b)
startvals23 <- list(a = 0.0464, b = 1.0716, c=27.0745)
Fit_nls23 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- ((1.3^b + (c^b-1.3^b)*(1-exp(-a*D))/(1-exp(-a*100)))^(1/b))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq23 <- optim(par=startvals23, fn = Fit_nls23, hessian = FALSE, control = list(parscale = unlist(startvals23)))
a23 = Eq23$par[1]
b23 = Eq23$par[2]
c23 = Eq23$par[3]
PredHt <- (1.3^b23 + (c23^b23-1.3^b23)*(1-exp(-a23*D))/(1-exp(-a23*100)))^(1/b23)
error <- (Ht_act - PredHt)
RMSE_Eq23 <- rmse(error)
AIC_Eq23 <- AIC_RSS(n, length(startvals23), error)
equation <- as.formula(y ~ (1.3^b + (c^b-1.3^b)*(1-exp(-a*D))/(1-exp(-a*100)))^(1/b))
res23 <- results.fun(fit = Eq23, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a23, b23, c23))
##### EQ24: Equation 16 in Huang et al. (1992) and Eq. 17 in Huang et al. (2000): H =1.3 + D^2/(a + b*D + c*D^2)
##### Start points were for Balsam poplar of Equation 12 in page 1289 (Huang et al., 1992)
startvals24 <- list(a = 0.0038, b = 0.7027, c = 0.0270)
Fit_nls24 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- (1.3 + D^2 /(a + b * D + c * D^2))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq24 <- optim(par=startvals24, fn = Fit_nls24, hessian = FALSE, control = list(parscale = unlist(startvals24)))
a24 = Eq24$par[1]
b24 = Eq24$par[2]
c24 = Eq24$par[3]
PredHt <- 1.3 + D^2 /(a24 + b24 * D + c24 * D^2)
error <- (Ht_act - PredHt)
RMSE_Eq24 <- rmse(error)
AIC_Eq24 <- AIC_RSS(n, length(startvals24), error)
equation <- as.formula(y ~ 1.3 + D^2 /(a + b * D + c * D^2))
res24 <- results.fun(fit = Eq24, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a24, b24, c24))
##### EQ25: Equation 17 in Huang et al. (1992) and Eq. 18 in Huang et al. (2000): H = 1.3 + a *D^(b*D^-c), where D = DBH
startvals25 <- list(a = 22.9433, b = -20.9985, c=0.7680)
Fit_nls25 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- (1.3 + a*D^(b*D^-c))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq25 <- optim(par=startvals25, fn = Fit_nls25, hessian = FALSE, control = list(parscale = unlist(startvals25)))
a25 = Eq25$par[1]
b25 = Eq25$par[2]
c25 = Eq25$par[3]
PredHt <- 1.3 + a25 * D^(b25 * D^-c25)
error <- (Ht_act - PredHt)
RMSE_Eq25 <- rmse(error)
AIC_Eq25 <- AIC_RSS(n, length(startvals25), error)
equation <- as.formula(y ~ 1.3 + a*D^(b*D^-c))
res25 <- results.fun(fit = Eq25, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a25, b25, c25))
##### EQ26: Equation 18 in Huang et al. (1992) and Eq. 20 in Huang et al. (2000): H = 1.3 + a*exp(b/(D+c)), where D = DBH
startvals26 <- list(a = 3.2971, b = -0.4014, c = 5.5088)
Fit_nls26 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- (1.3 + a * exp(b /(D+c)))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq26 <- optim(par=startvals26, fn = Fit_nls26, hessian = FALSE, control = list(parscale = unlist(startvals26)))
a26 = Eq26$par[1]
b26 = Eq26$par[2]
c26 = Eq26$par[3]
PredHt <- 1.3 + a26 * exp(b26 /(D + c26))
error <- (Ht_act - PredHt)
RMSE_Eq26 <- rmse(error)
AIC_Eq26 <- AIC_RSS(n, length(startvals26), error)
equation <- as.formula(y ~ 1.3 + a * exp(b /(D+c)))
res26 <- results.fun(fit = Eq26, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a26, b26, c26))
##### EQ27: Equation 19 in Huang et al.(1992) and Eq.19 in Huang et al. (2000): H = 1.3 + a/(1 + b^-1 * D^-c), where D = DBH
startvals27 <- list(a = 34.4682, b = 0.0369, c = 1.0589)
Fit_nls27 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- (1.3 + a/(1+1/(b*D^c)))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq27 <- optim(par=startvals27, fn = Fit_nls27, hessian = FALSE, control = list(parscale = unlist(startvals27)))
a27 = Eq27$par[1]
b27 = Eq27$par[2]
c27 = Eq27$par[3]
PredHt <- 1.3+a27 /(1+1/(b27 * D^c27))
error <- (Ht_act - PredHt)
RMSE_Eq27 <- rmse(error)
AIC_Eq27 <- AIC_RSS(n, length(startvals27), error)
equation <- as.formula(y ~ 1.3 + a/(1+1/(b*D^c)))
res27 <- results.fun(fit = Eq27, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a27, b27, c27))
##### EQ28: Equation 20 in Huang et al. (1992) and Eq. 21 in Huang et al. (2000): H = 1.3 + a*(1 - b*exp(-c*D))^d, where D = DBH
startvals28 <- list(a = 15.2716, b = 0.9574, c = 0.0530, d = 1.1025)
Fit_nls28 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
d <- x[4]
t1 <- (1.3 + a*(1-b*exp(-c*D))^d)
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq28 <- optim(par=startvals28, fn = Fit_nls28, hessian = FALSE, control = list(parscale = unlist(startvals28)))
a28 = Eq28$par[1]
b28 = Eq28$par[2]
c28 = Eq28$par[3]
d28 = Eq28$par[4]
PredHt<- 1.3 + a28 *(1- b28 * exp(-c28 * D))^d28
error <- (Ht_act - PredHt)
RMSE_Eq28 <- rmse(error)
AIC_Eq28 <- AIC_RSS(n, length(startvals28), error)
equation <- as.formula(y ~ 1.3 + a*(1-b*exp(-c*D))^d)
res28 <- results.fun(fit = Eq28, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a28, b28, c28, d28))
##### EQ29: Equation 23 in Huang et al. (2000): H = 1.3 + a*D*exp(-b*D), where D = DBH.
##### Start values were for white spruce (Table 3 in page 131 of Huang et al. (2000))
startvals29 <- list(a = 1.0264, b = 0.01157)
Fit_nls29 <- function(x) {
a <- x[1]
b <- x[2]
t1 <- (1.3 + a * D * exp(-b*D))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq29 <- optim(par=startvals29, fn = Fit_nls29, hessian = FALSE, control = list(parscale = unlist(startvals29)))
a29 = Eq29$par[1]
b29 = Eq29$par[2]
PredHt <- 1.3 + a29 * D * exp(-b29 * D)
error <- (Ht_act - PredHt)
RMSE_Eq29 <- rmse(error)
AIC_Eq29 <- AIC_RSS(n, length(startvals29), error)
equation <- as.formula(y ~ 1.3 + a * D * exp(-b*D))
res29 <- results.fun(fit = Eq29, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a29, b29))
##### EQ30: Equation 24 in Huang et al. (2000): H = 1.3 + a * D^b * exp(-c*D), where D = DBH. Start values were for white spruce (Table 3 in page 131 of Huang et al. (2000))
startvals30 <- list(a = 0.687824, b = 1.175135, c = 0.017699)
Fit_nls30 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- (1.3 + a * D^b * exp(-c*D))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq30 <- optim(par=startvals30, fn = Fit_nls30, hessian = FALSE, control = list(parscale = unlist(startvals30)))
a30 = Eq30$par[1]
b30 = Eq30$par[2]
c30 = Eq30$par[3]
PredHt <- 1.3 + a30 * D^b30 * exp(-c30*D)
error <- (Ht_act - PredHt)
RMSE_Eq30 <- rmse(error)
AIC_Eq30 <- AIC_RSS(n, length(startvals30), error)
equation <- as.formula(y ~ 1.3 + a * D^b * exp(-c*D))
res30 <- results.fun(fit = Eq30, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a30, b30, c30))
##### EQ31: Pantropical DBH-H model (Chave et al., 2014. GCB 20: 3183): Ln(H) = a - E + b*Ln(D)+ c*(Ln(D))^2 , where D = DBH
startvals31 <- list(a = 0.893, b = 0.760, c = -0.034)
Fit_nls31 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- (a - E_Value + b * log(D) + c * (log(I(D)))^2)
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
if(!is.na(E_Value)){
Eq31 <- optim(par=startvals31, fn = Fit_nls31, hessian = FALSE, control = list(parscale = unlist(startvals31)))
a31 = Eq31$par[1]
b31 = Eq31$par[2]
c31 = Eq31$par[3]
PredHt <- a31 - E_Value + b31 * log(D) + c31 * (log(I(D)))^2
error <- (Ht_act - PredHt)
RMSE_Eq31 <- rmse(error)
AIC_Eq31 <- AIC_RSS(n, length(startvals31), error)
equation <- as.formula(y ~ a - E_Value + b * log(D) + c * (log(I(D)))^2)
res31 <- results.fun(fit = Eq31, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a31, b31, c31))
}else{AIC_Eq31 <- 9999999}
##### EQ32: The Mitscherlisch model (Ngomanda et al., 2014): H = a - b * exp(-cD), where D = DBH
startvals32 <- list(a = 44.7, b = 42.3, c = 0.026)
Fit_nls32 <- function(x) {
a <- x[1]
b <- x[2]
c <- x[3]
t1 <- (a - b * exp(-c * D))
t1 <- sum((Ht_act-t1)^2)
return(t1)
}
Eq32 <- optim(par=startvals32, fn = Fit_nls32, hessian = FALSE, control = list(parscale = unlist(startvals32)))
a32 = Eq32$par[1]
b32 = Eq32$par[2]
c32 = Eq32$par[3]
PredHt <- a32 - b32 * exp(-c32 * D)
error <- (Ht_act - PredHt)
RMSE_Eq32 <- rmse(error)
AIC_Eq32 <- AIC_RSS(n, length(startvals32), error)
equation <- as.formula(y ~ a - b * exp(-c * D))
res32 <- results.fun(fit = Eq32, equation, Ht_act = Ht_act, D = D, error = error,
PredHt = PredHt, coef = c(a32, b32, c32))
##### Model comparison
AIC_best.fit <- paste("Eq", 1, sep="")
AIC_min_temp = AIC_Eq1
AIC.df <- matrix(nrow = 32, ncol = 2, dimnames = list(c(), c("Equ", "AIC")))
for (k in 1: 32){
AIC_min <- eval(parse(text=paste("AIC_Eq", k, sep="")))
AIC.df[k,] <- c(Equ = paste("Eq", k, sep=""), AIC = round(AIC_min, 1))
AIC_min <- min(AIC_min, AIC_min_temp)
if (AIC_min < AIC_min_temp) {AIC_best.fit <- paste("Eq", k, sep="")}
AIC_min_temp = AIC_min
}
AIC.df <- data.frame(AIC.df)
AIC.df$AIC <- as.numeric(as.character(AIC.df$AIC))
best.equ <- eval(parse(text=AIC_best.fit))
rnum <- paste("res", gsub("[^[:digit:]]", "", AIC_best.fit), sep="")
res_fin <- get(rnum)
predht.fun <- function(res, D){
a <- res$vars["a"]
b <- res$vars["b"]
c <- res$vars["c"]
d <- res$vars["d"]
equ.fun <- function(D) {
eval(parse(text = res$equ))
}
ph <- equ.fun(D)
}
predht <- predht.fun(res = res_fin, D)
full.list <- res_fin
full.list$predht <- predht
full.list
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/R_functions.R
\name{boot.dist.1d}
\alias{boot.dist.1d}
\title{Generate a bootstrap distribution for a statistic computed on a vector.}
\usage{
boot.dist.1d(x, B, FUN = mean, ...)
}
\arguments{
\item{x}{A numeric vector of data.}
\item{B}{The number of bootstrap samples to draw.}
\item{FUN}{A function that computes the statistic of interest. FUN must have a vector of data as its first argument and must return a numeric vector of length 1 (i.e. a scalar).}
\item{...}{Additional arguments passed to FUN.}
}
\value{
A numeric vector containing the bootstrap distribution of the statistic specified by FUN.
}
\description{
Given data x, generates a bootstrap distribution with B bootstrap samples, computing a statistic speficied by FUN each time.
}
\examples{
boot.dist.1d(rnorm(50, 0, 1), 1000, median)
}
\keyword{Bootstrap}
\keyword{Bootstrapping,}
\keyword{distribution}
|
/man/boot.dist.1d.Rd
|
no_license
|
mdedge/stfspack
|
R
| false | true | 956 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/R_functions.R
\name{boot.dist.1d}
\alias{boot.dist.1d}
\title{Generate a bootstrap distribution for a statistic computed on a vector.}
\usage{
boot.dist.1d(x, B, FUN = mean, ...)
}
\arguments{
\item{x}{A numeric vector of data.}
\item{B}{The number of bootstrap samples to draw.}
\item{FUN}{A function that computes the statistic of interest. FUN must have a vector of data as its first argument and must return a numeric vector of length 1 (i.e. a scalar).}
\item{...}{Additional arguments passed to FUN.}
}
\value{
A numeric vector containing the bootstrap distribution of the statistic specified by FUN.
}
\description{
Given data x, generates a bootstrap distribution with B bootstrap samples, computing a statistic speficied by FUN each time.
}
\examples{
boot.dist.1d(rnorm(50, 0, 1), 1000, median)
}
\keyword{Bootstrap}
\keyword{Bootstrapping,}
\keyword{distribution}
|
source("R/human_functions.22.11.27.R")
# makes sup figure 5
# Runs permutational T test
# this should take around 20 minutes
num.perm = 9
meta.stool <- read.csv("data/HumanStoolUnpaired_biomassnorm_metalabels.csv", row.names = 1)
meta.serum <- read.csv("data/HumanSerumUnpaired_biomassnorm_metalabels.csv", row.names = 1)
map.stool <- read.csv("data/Map_human_metabolome_stool.22.11.21.csv")
map.serum <- read.csv("data/Map_human_metabolome_serum.22.11.21.csv")
map.stool$Collection.date <- as.Date(map.stool$Collection.date, format = "%m/%d/%y")
map.serum$Collection.date <- as.Date(map.serum$Collection.date, format = "%m/%d/%y")
map.serum$col <- "#56B4E9" # assign colors be genotype
map.stool$col <- "#56B4E9" # assign colors be genotype
map.serum$col[map.serum$Disease.state == "Patient"] <- "#E69F00"
map.stool$col[map.stool$Disease.state == "Patient"] <- "#E69F00"
map.stool <- map.stool[match(row.names(meta.stool), map.stool$sample.id) , ]
map.serum <- map.serum[match(row.names(meta.serum), map.serum$sample.id) , ]
map.stool <- map.stool[!is.na(map.stool$Collection.date) , ]
filter <- NULL
for(i in 1 : nrow(map.stool)){
# remove samples that can't be paired
map.i <- map.stool[i,]
fam.i <- map.stool[map.stool$Family.ID == map.i$Family.ID,]
pairs.pos <- fam.i[fam.i$Disease.state != map.i$Disease.state , ] #possible pairs
if(nrow(pairs.pos) > 0){
closest.date <- min(abs(pairs.pos$Collection.date - map.i$Collection.date))
filter[i] <- closest.date < 90
}else{
filter[i] <- FALSE
}
}
map.stool <- map.stool[filter,]
meta.stool <- meta.stool[match(map.stool$sample.id, row.names(meta.stool)) , ]
all(map.stool$sample.id == row.names(meta.stool))
pats <- unique(map.stool$DonorID[map.stool$Disease.state == "Patient"])
#meta.stool$Label <- NULL
meta.stool$Ethanol <- NULL # stored in EtOH
meta.stool$X <- NULL
stool.t.perm <- matrix(ncol = ncol(meta.stool) -1, nrow = num.perm)
colnames(stool.t.perm) <- colnames(meta.stool[-1])
stool.t.hi.perm <- stool.t.lo.perm <- stool.t.t.perm <- stool.t.e.perm <- stool.t.perm.no <- stool.t.perm
fold <- matrix(ncol = ncol(meta.stool) - 1, nrow = num.perm * length(pats))
d <- length(pats)
permutation <- NULL
comps <- NULL
comps.list <- list()
map.stool$Collection.date <- as.Date(map.stool$Collection.date, "%m/%d/%y")
for(j in 1 : num.perm){
print(j)
map.i <- paired.map(map = map.stool, pats)
## DON'T RESAMPLE Individuals
duple.samps <- all(!duplicated(map.i$sample.id))
while ( !duple.samps ) {
map.i <- paired.map(map = map.stool, pats)
duple.samps <- !all(duplicated(map.i$sample.id))
print("resampleing because one or more samples were used
more than once")
}
p <- match(map.i$sample.id, row.names(meta.stool))
meta.i <- meta.stool[p,]
map.i.ds <- map.stool$sample.id[p]# if a family has 2 patient they may be paired to the same relative
samps.pick <- map.stool$sample.id[p]
meta.stool.norm <- norm.human(samples.pick = samps.pick, metabolites = meta.stool)
meta.stool.norm$Label <- NULL
meta.i <- meta.stool.norm[match(map.i$sample.id, row.names(meta.stool.norm)) , ]
for(i in 1 : ncol(meta.stool.norm)){
s <- meta.i[,i]
res <- t.test(s[map.i$Disease.state == "Patient"],
s[map.i$Disease.state != "Patient"], paired = T)
stool.t.perm[j,i] <- res$p.value
stool.t.e.perm[j,i] <- res$estimate
stool.t.t.perm[j,i] <- res$statistic
stool.t.lo.perm[j,i] <- res$conf.int[1]
stool.t.hi.perm[j,i] <- res$conf.int[1]
here <- (((j - 1) * d) + 1) : (j * d)
fold[ here , i] <- s[map.i$Disease.state == "Patient"] /s[map.i$Disease.state != "Patient"]
permutation[here] <- rep(j, d)
comp <- paste(map.i$sample.id[map.i$Disease.state == "Patient"],
"to",
map.i$sample.id[map.i$Disease.state != "Patient"])
comps[here] <- comp
comps.list[[j]] <- comp
}
}
stool.fold <- cbind(comps, permutation, fold)
stool.t.e.perm <- stool.t.e.perm[, order(colMedians(stool.t.perm)) ]
stool.t.perm <- stool.t.perm[, order(colMedians(stool.t.perm)) ]
stool.t.fdr <- stool.t.perm
for(i in 1 : nrow(stool.t.perm)){
stool.t.fdr[i,] <- p.adjust(stool.t.perm[i,], method = "fdr")
}
#postscript("stool_p_t.test.eps", width = 16.0, height = 6.0)
par(mar=c(12.1,4.1,4.1,2.1))
boxplot(stool.t.fdr, las = 2, ylab = "p after FDR correction",
main = paste("stool METABOLITES", num.perm, "PERMUTATIONS"))
abline(h = 0.05, col = 2)
#dev.off()
aa <- colnames(stool.t.e.perm)
bb <- substr(aa, 1, 1)
cc <- substr(aa, 2, 2)
bb[bb == "X" & grepl("[0-9]", cc)] <- ""
dd <- paste0(bb, substr(aa, 2, 90))
ee <- gsub("[.]", "-", dd)
ee <- gsub("--", "-", ee)
ee <- gsub("-acid", " acid", ee)
colnames(stool.t.e.perm) <- ee
plot.effect(stool.t.e.perm, name = "stool_human_individual_metabolites",
print = F)
`mean p value` <- apply(stool.t.perm, 2, mean)
`mean p after FDR` <- apply(stool.t.fdr, 2, mean)
`mean effect size` <- apply(stool.t.e.perm, 2, mean)
`mean CI 95% lo` <- apply(stool.t.lo.perm, 2, mean)
`mean CI 95% high` <- apply(stool.t.hi.perm, 2, mean)
`mean df` <- res$parameter
`mean t stat` <- apply(stool.t.t.perm, 2, mean)
stool.res <- cbind(`mean p value`,
`mean p after FDR`,
`mean effect size` ,
`mean CI 95% lo` ,
`mean CI 95% high` ,
`mean df`,
`mean t stat`)
write.table(stool.res, "Statistical_summaries/Sup_5a_tests.test")
########## compile source data file ###########################
permutation <- 1 : num.perm
source_data_5a <- cbind(permutation, stool.t.e.perm)
source_data_5a <- cbind("", "", source_data_5a)
source_data_5a[1,1] <- "figure 5b"
source_data_5a[1,2] <- "mean difference between paired samples"
source_data_5a <- rbind(source_data_5a, "", "")
write.csv(source_data_5a,"source_data_5a.csv", row.names = F)
#####################################################################
################################################################################
## ##
## SERUM ##
## ##
## ##
################################################################################
filter <- NULL
for(i in 1 : nrow(map.serum)){
# remove samples that can't be paired
map.i <- map.serum[i,]
fam.i <- map.serum[map.serum$Family.ID == map.i$Family.ID,]
pairs.pos <- fam.i[fam.i$Disease.state != map.i$Disease.state , ] #possible pairs
if(nrow(pairs.pos) > 0){
closest.date <- min(abs(pairs.pos$Collection.date - map.i$Collection.date))
filter[i] <- closest.date < 90
}else{
filter[i] <- FALSE
}
}
map.serum <- map.serum[filter,]
meta.serum <- meta.serum[match(map.serum$sample.id, row.names(meta.serum)) , ]
all(map.serum$sample.id == row.names(meta.serum))
pats <- unique(map.serum$DonorID[map.serum$Disease.state == "Patient"])
#meta.serum$Label <- NULL
meta.serum$Ethanol <- NULL # stored in EtOH
meta.serum$X <- NULL
meta.serum$X4.Pyridoxate <- NULL
serum.t.perm <-matrix(ncol = ncol(meta.serum) -1, nrow = num.perm)
colnames(serum.t.perm) <- colnames(meta.serum[-1])
serum.t.e.perm <- serum.t.perm.no <- serum.t.perm
serum.t.hi.perm <- serum.t.lo.perm <- serum.t.t.perm <- serum.t.e.perm <- serum.t.perm.no <- serum.t.perm
#eff <- array(dim = c(ncol(meta.serum), length(pats), num.perm))
fold <- matrix(ncol = ncol(meta.serum) - 1, nrow = num.perm * length(pats))
dim(fold)
d <- length(pats)
permutation <- NULL
comps <- NULL
comps.list <- list()
map.serum$Collection.date <- as.Date(map.serum$Collection.date, "%m/%d/%y")
for(j in 1 : num.perm){
print(j)
map.i <- paired.map(map = map.serum, pats)
## DON'T RESAMPLE Individuals
duple.samps <- all(!duplicated(map.i$sample.id))
while ( !duple.samps ) {
map.i <- paired.map(map = map.serum, pats)
duple.samps <- !all(duplicated(map.i$sample.id))
print("resampleing because one or more samples were used
more than once")
}
p <- match(map.i$sample.id, row.names(meta.serum))
meta.i <- meta.serum[p,]
map.i.ds <- map.serum$sample.id[p]# if a family has 2 patient they may be paired to the same relative
samps.pick <- map.serum$sample.id[p]
meta.serum.norm <- norm.human(samples.pick = samps.pick, metabolites = meta.serum)
meta.serum.norm$Label <- NULL
meta.i <- meta.serum.norm[match(map.i$sample.id, row.names(meta.serum.norm)) , ]
for(i in 1 : ncol(meta.serum.norm)){
s <- meta.i[,i]
res <- t.test(s[map.i$Disease.state == "Patient"],
s[map.i$Disease.state != "Patient"], paired = T)
serum.t.perm[j,i] <- res$p.value
serum.t.e.perm[j,i] <- res$estimate
serum.t.e.perm[j,i] <- res$estimate
serum.t.t.perm[j,i] <- res$statistic
serum.t.lo.perm[j,i] <- res$conf.int[1]
serum.t.hi.perm[j,i] <- res$conf.int[1]
here <- (((j - 1) * d) + 1) : (j * d)
fold[ here , i] <- s[map.i$Disease.state == "Patient"] /s[map.i$Disease.state != "Patient"]
permutation[here] <- rep(j, d)
comp <- paste(map.i$sample.id[map.i$Disease.state == "Patient"],
"to",
map.i$sample.id[map.i$Disease.state != "Patient"])
comps[here] <- comp
comps.list[[j]] <- comp
}
}
fold <- cbind(comps, permutation, fold)
serum.t.e.perm <- serum.t.e.perm[, order(colMedians(serum.t.perm)) ]
serum.t.perm <- serum.t.perm[, order(colMedians(serum.t.perm)) ]
serum.t.fdr <- serum.t.perm
for(i in 1 : nrow(serum.t.perm)){
serum.t.fdr[i,] <- p.adjust(serum.t.perm[i,], method = "fdr")
}
serum.t.fdr <- serum.t.fdr[, order(colMedians(serum.t.fdr)) ]
par(mar=c(12.1,4.1,4.1,2.1))
boxplot(serum.t.fdr, las = 2, ylab = "p after BH correction",
main = paste("SERUM METABOLITES", num.perm, "PERMUTATIONS"))
abline(h = 0.05, col = 2)
aa <- colnames(serum.t.e.perm)
bb <- substr(aa, 1, 1)
cc <- substr(aa, 2, 2)
bb[bb == "X" & grepl("[0-9]", cc)] <- ""
dd <- paste0(bb, substr(aa, 2, 90))
ee <- gsub("[.]", "-", dd)
ee <- gsub("--", "-", ee)
ee <- gsub("-acid", " acid", ee)
colnames(serum.t.e.perm) <- ee
plot.effect(mat = serum.t.e.perm, name = "Serum_human_individual_metabolites",
print = F, type = "SERUM")
`mean p value` <- apply(serum.t.perm, 2, mean)
`mean p after FDR` <- apply(serum.t.fdr, 2, mean)
`mean effect size` <- apply(serum.t.e.perm, 2, mean)
`mean CI 95% lo` <- apply(serum.t.lo.perm, 2, mean)
`mean CI 95% high` <- apply(serum.t.hi.perm, 2, mean)
`mean df` <- res$parameter
`mean t stat` <- apply(serum.t.t.perm, 2, mean)
serum.res <- cbind(`mean p value`,
`mean p after FDR`,
`mean effect size` ,
`mean CI 95% lo` ,
`mean CI 95% high` ,
`mean df`,
`mean t stat`)
write.table(serum.res,
"Statistical_summaries/Sup_Fig_5b_tests.txt",
quote = F)
########## compile source data file ###########################
permutation <- 1 : num.perm
source_data_5b <- cbind(permutation, serum.t.e.perm)
source_data_5b <- cbind("", "", source_data_5b)
source_data_5b[1,1] <- "figure 5a"
source_data_5b[1,2] <- "mean difference between paired samples"
write.csv(source_data_5b,"source_data_5b.csv", row.names = F)
#####################################################################
system("cat source_data_5a.csv source_data_5b.csv > source_data/source_data_5.csv")
system("rm source_data_5a.csv")
system("rm source_data_5b.csv")
|
/Supplemental/Sup_figure_5.22.12.01.R
|
no_license
|
nvpinkham/Dysautonomia
|
R
| false | false | 11,761 |
r
|
source("R/human_functions.22.11.27.R")
# makes sup figure 5
# Runs permutational T test
# this should take around 20 minutes
num.perm = 9
meta.stool <- read.csv("data/HumanStoolUnpaired_biomassnorm_metalabels.csv", row.names = 1)
meta.serum <- read.csv("data/HumanSerumUnpaired_biomassnorm_metalabels.csv", row.names = 1)
map.stool <- read.csv("data/Map_human_metabolome_stool.22.11.21.csv")
map.serum <- read.csv("data/Map_human_metabolome_serum.22.11.21.csv")
map.stool$Collection.date <- as.Date(map.stool$Collection.date, format = "%m/%d/%y")
map.serum$Collection.date <- as.Date(map.serum$Collection.date, format = "%m/%d/%y")
map.serum$col <- "#56B4E9" # assign colors be genotype
map.stool$col <- "#56B4E9" # assign colors be genotype
map.serum$col[map.serum$Disease.state == "Patient"] <- "#E69F00"
map.stool$col[map.stool$Disease.state == "Patient"] <- "#E69F00"
map.stool <- map.stool[match(row.names(meta.stool), map.stool$sample.id) , ]
map.serum <- map.serum[match(row.names(meta.serum), map.serum$sample.id) , ]
map.stool <- map.stool[!is.na(map.stool$Collection.date) , ]
filter <- NULL
for(i in 1 : nrow(map.stool)){
# remove samples that can't be paired
map.i <- map.stool[i,]
fam.i <- map.stool[map.stool$Family.ID == map.i$Family.ID,]
pairs.pos <- fam.i[fam.i$Disease.state != map.i$Disease.state , ] #possible pairs
if(nrow(pairs.pos) > 0){
closest.date <- min(abs(pairs.pos$Collection.date - map.i$Collection.date))
filter[i] <- closest.date < 90
}else{
filter[i] <- FALSE
}
}
map.stool <- map.stool[filter,]
meta.stool <- meta.stool[match(map.stool$sample.id, row.names(meta.stool)) , ]
all(map.stool$sample.id == row.names(meta.stool))
pats <- unique(map.stool$DonorID[map.stool$Disease.state == "Patient"])
#meta.stool$Label <- NULL
meta.stool$Ethanol <- NULL # stored in EtOH
meta.stool$X <- NULL
stool.t.perm <- matrix(ncol = ncol(meta.stool) -1, nrow = num.perm)
colnames(stool.t.perm) <- colnames(meta.stool[-1])
stool.t.hi.perm <- stool.t.lo.perm <- stool.t.t.perm <- stool.t.e.perm <- stool.t.perm.no <- stool.t.perm
fold <- matrix(ncol = ncol(meta.stool) - 1, nrow = num.perm * length(pats))
d <- length(pats)
permutation <- NULL
comps <- NULL
comps.list <- list()
map.stool$Collection.date <- as.Date(map.stool$Collection.date, "%m/%d/%y")
for(j in 1 : num.perm){
print(j)
map.i <- paired.map(map = map.stool, pats)
## DON'T RESAMPLE Individuals
duple.samps <- all(!duplicated(map.i$sample.id))
while ( !duple.samps ) {
map.i <- paired.map(map = map.stool, pats)
duple.samps <- !all(duplicated(map.i$sample.id))
print("resampleing because one or more samples were used
more than once")
}
p <- match(map.i$sample.id, row.names(meta.stool))
meta.i <- meta.stool[p,]
map.i.ds <- map.stool$sample.id[p]# if a family has 2 patient they may be paired to the same relative
samps.pick <- map.stool$sample.id[p]
meta.stool.norm <- norm.human(samples.pick = samps.pick, metabolites = meta.stool)
meta.stool.norm$Label <- NULL
meta.i <- meta.stool.norm[match(map.i$sample.id, row.names(meta.stool.norm)) , ]
for(i in 1 : ncol(meta.stool.norm)){
s <- meta.i[,i]
res <- t.test(s[map.i$Disease.state == "Patient"],
s[map.i$Disease.state != "Patient"], paired = T)
stool.t.perm[j,i] <- res$p.value
stool.t.e.perm[j,i] <- res$estimate
stool.t.t.perm[j,i] <- res$statistic
stool.t.lo.perm[j,i] <- res$conf.int[1]
stool.t.hi.perm[j,i] <- res$conf.int[1]
here <- (((j - 1) * d) + 1) : (j * d)
fold[ here , i] <- s[map.i$Disease.state == "Patient"] /s[map.i$Disease.state != "Patient"]
permutation[here] <- rep(j, d)
comp <- paste(map.i$sample.id[map.i$Disease.state == "Patient"],
"to",
map.i$sample.id[map.i$Disease.state != "Patient"])
comps[here] <- comp
comps.list[[j]] <- comp
}
}
stool.fold <- cbind(comps, permutation, fold)
stool.t.e.perm <- stool.t.e.perm[, order(colMedians(stool.t.perm)) ]
stool.t.perm <- stool.t.perm[, order(colMedians(stool.t.perm)) ]
stool.t.fdr <- stool.t.perm
for(i in 1 : nrow(stool.t.perm)){
stool.t.fdr[i,] <- p.adjust(stool.t.perm[i,], method = "fdr")
}
#postscript("stool_p_t.test.eps", width = 16.0, height = 6.0)
par(mar=c(12.1,4.1,4.1,2.1))
boxplot(stool.t.fdr, las = 2, ylab = "p after FDR correction",
main = paste("stool METABOLITES", num.perm, "PERMUTATIONS"))
abline(h = 0.05, col = 2)
#dev.off()
aa <- colnames(stool.t.e.perm)
bb <- substr(aa, 1, 1)
cc <- substr(aa, 2, 2)
bb[bb == "X" & grepl("[0-9]", cc)] <- ""
dd <- paste0(bb, substr(aa, 2, 90))
ee <- gsub("[.]", "-", dd)
ee <- gsub("--", "-", ee)
ee <- gsub("-acid", " acid", ee)
colnames(stool.t.e.perm) <- ee
plot.effect(stool.t.e.perm, name = "stool_human_individual_metabolites",
print = F)
`mean p value` <- apply(stool.t.perm, 2, mean)
`mean p after FDR` <- apply(stool.t.fdr, 2, mean)
`mean effect size` <- apply(stool.t.e.perm, 2, mean)
`mean CI 95% lo` <- apply(stool.t.lo.perm, 2, mean)
`mean CI 95% high` <- apply(stool.t.hi.perm, 2, mean)
`mean df` <- res$parameter
`mean t stat` <- apply(stool.t.t.perm, 2, mean)
stool.res <- cbind(`mean p value`,
`mean p after FDR`,
`mean effect size` ,
`mean CI 95% lo` ,
`mean CI 95% high` ,
`mean df`,
`mean t stat`)
write.table(stool.res, "Statistical_summaries/Sup_5a_tests.test")
########## compile source data file ###########################
permutation <- 1 : num.perm
source_data_5a <- cbind(permutation, stool.t.e.perm)
source_data_5a <- cbind("", "", source_data_5a)
source_data_5a[1,1] <- "figure 5b"
source_data_5a[1,2] <- "mean difference between paired samples"
source_data_5a <- rbind(source_data_5a, "", "")
write.csv(source_data_5a,"source_data_5a.csv", row.names = F)
#####################################################################
################################################################################
## ##
## SERUM ##
## ##
## ##
################################################################################
filter <- NULL
for(i in 1 : nrow(map.serum)){
# remove samples that can't be paired
map.i <- map.serum[i,]
fam.i <- map.serum[map.serum$Family.ID == map.i$Family.ID,]
pairs.pos <- fam.i[fam.i$Disease.state != map.i$Disease.state , ] #possible pairs
if(nrow(pairs.pos) > 0){
closest.date <- min(abs(pairs.pos$Collection.date - map.i$Collection.date))
filter[i] <- closest.date < 90
}else{
filter[i] <- FALSE
}
}
map.serum <- map.serum[filter,]
meta.serum <- meta.serum[match(map.serum$sample.id, row.names(meta.serum)) , ]
all(map.serum$sample.id == row.names(meta.serum))
pats <- unique(map.serum$DonorID[map.serum$Disease.state == "Patient"])
#meta.serum$Label <- NULL
meta.serum$Ethanol <- NULL # stored in EtOH
meta.serum$X <- NULL
meta.serum$X4.Pyridoxate <- NULL
serum.t.perm <-matrix(ncol = ncol(meta.serum) -1, nrow = num.perm)
colnames(serum.t.perm) <- colnames(meta.serum[-1])
serum.t.e.perm <- serum.t.perm.no <- serum.t.perm
serum.t.hi.perm <- serum.t.lo.perm <- serum.t.t.perm <- serum.t.e.perm <- serum.t.perm.no <- serum.t.perm
#eff <- array(dim = c(ncol(meta.serum), length(pats), num.perm))
fold <- matrix(ncol = ncol(meta.serum) - 1, nrow = num.perm * length(pats))
dim(fold)
d <- length(pats)
permutation <- NULL
comps <- NULL
comps.list <- list()
map.serum$Collection.date <- as.Date(map.serum$Collection.date, "%m/%d/%y")
for(j in 1 : num.perm){
print(j)
map.i <- paired.map(map = map.serum, pats)
## DON'T RESAMPLE Individuals
duple.samps <- all(!duplicated(map.i$sample.id))
while ( !duple.samps ) {
map.i <- paired.map(map = map.serum, pats)
duple.samps <- !all(duplicated(map.i$sample.id))
print("resampleing because one or more samples were used
more than once")
}
p <- match(map.i$sample.id, row.names(meta.serum))
meta.i <- meta.serum[p,]
map.i.ds <- map.serum$sample.id[p]# if a family has 2 patient they may be paired to the same relative
samps.pick <- map.serum$sample.id[p]
meta.serum.norm <- norm.human(samples.pick = samps.pick, metabolites = meta.serum)
meta.serum.norm$Label <- NULL
meta.i <- meta.serum.norm[match(map.i$sample.id, row.names(meta.serum.norm)) , ]
for(i in 1 : ncol(meta.serum.norm)){
s <- meta.i[,i]
res <- t.test(s[map.i$Disease.state == "Patient"],
s[map.i$Disease.state != "Patient"], paired = T)
serum.t.perm[j,i] <- res$p.value
serum.t.e.perm[j,i] <- res$estimate
serum.t.e.perm[j,i] <- res$estimate
serum.t.t.perm[j,i] <- res$statistic
serum.t.lo.perm[j,i] <- res$conf.int[1]
serum.t.hi.perm[j,i] <- res$conf.int[1]
here <- (((j - 1) * d) + 1) : (j * d)
fold[ here , i] <- s[map.i$Disease.state == "Patient"] /s[map.i$Disease.state != "Patient"]
permutation[here] <- rep(j, d)
comp <- paste(map.i$sample.id[map.i$Disease.state == "Patient"],
"to",
map.i$sample.id[map.i$Disease.state != "Patient"])
comps[here] <- comp
comps.list[[j]] <- comp
}
}
fold <- cbind(comps, permutation, fold)
serum.t.e.perm <- serum.t.e.perm[, order(colMedians(serum.t.perm)) ]
serum.t.perm <- serum.t.perm[, order(colMedians(serum.t.perm)) ]
serum.t.fdr <- serum.t.perm
for(i in 1 : nrow(serum.t.perm)){
serum.t.fdr[i,] <- p.adjust(serum.t.perm[i,], method = "fdr")
}
serum.t.fdr <- serum.t.fdr[, order(colMedians(serum.t.fdr)) ]
par(mar=c(12.1,4.1,4.1,2.1))
boxplot(serum.t.fdr, las = 2, ylab = "p after BH correction",
main = paste("SERUM METABOLITES", num.perm, "PERMUTATIONS"))
abline(h = 0.05, col = 2)
aa <- colnames(serum.t.e.perm)
bb <- substr(aa, 1, 1)
cc <- substr(aa, 2, 2)
bb[bb == "X" & grepl("[0-9]", cc)] <- ""
dd <- paste0(bb, substr(aa, 2, 90))
ee <- gsub("[.]", "-", dd)
ee <- gsub("--", "-", ee)
ee <- gsub("-acid", " acid", ee)
colnames(serum.t.e.perm) <- ee
plot.effect(mat = serum.t.e.perm, name = "Serum_human_individual_metabolites",
print = F, type = "SERUM")
`mean p value` <- apply(serum.t.perm, 2, mean)
`mean p after FDR` <- apply(serum.t.fdr, 2, mean)
`mean effect size` <- apply(serum.t.e.perm, 2, mean)
`mean CI 95% lo` <- apply(serum.t.lo.perm, 2, mean)
`mean CI 95% high` <- apply(serum.t.hi.perm, 2, mean)
`mean df` <- res$parameter
`mean t stat` <- apply(serum.t.t.perm, 2, mean)
serum.res <- cbind(`mean p value`,
`mean p after FDR`,
`mean effect size` ,
`mean CI 95% lo` ,
`mean CI 95% high` ,
`mean df`,
`mean t stat`)
write.table(serum.res,
"Statistical_summaries/Sup_Fig_5b_tests.txt",
quote = F)
########## compile source data file ###########################
permutation <- 1 : num.perm
source_data_5b <- cbind(permutation, serum.t.e.perm)
source_data_5b <- cbind("", "", source_data_5b)
source_data_5b[1,1] <- "figure 5a"
source_data_5b[1,2] <- "mean difference between paired samples"
write.csv(source_data_5b,"source_data_5b.csv", row.names = F)
#####################################################################
system("cat source_data_5a.csv source_data_5b.csv > source_data/source_data_5.csv")
system("rm source_data_5a.csv")
system("rm source_data_5b.csv")
|
\name{delta.ea.single}
\alias{delta.ea.single}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
Calculates the early treatment effect estimate in Study A
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
Calculates the early treatment effect estimate in Study B, generally not called directly by the user
}
\usage{
delta.ea.single(Axzero, Adeltazero, Aszero, Bxzero, Bdeltazero,
Bszero, Bxone, Bdeltaone, Bsone, t, landmark, weightA = NULL,
weightB = NULL, weight.both = NULL, extrapolate)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{Axzero}{
%% ~~Describe \code{Axzero} here~~
observed event times in the control group in Study A
}
\item{Adeltazero}{
%% ~~Describe \code{Adeltazero} here~~
event/censoring indicators in the control group in Study A
}
\item{Aszero}{
%% ~~Describe \code{Aszero} here~~
surrogate marker values in the control group in Study A, NA for individuals not observable at the time the surrogate marker was measured
}
\item{Bxzero}{
%% ~~Describe \code{Bxzero} here~~
observed event times in the control group in Study A
}
\item{Bdeltazero}{
%% ~~Describe \code{Bdeltazero} here~~
event/censoring indicators in the control group in Study A
}
\item{Bszero}{
%% ~~Describe \code{Bszero} here~~
surrogate marker values in the control group in Study A, NA for individuals not observable at the time the surrogate marker was measured
}
\item{Bxone}{
%% ~~Describe \code{Bxone} here~~
observed event times in the treatment group in Study A
}
\item{Bdeltaone}{
%% ~~Describe \code{Bdeltaone} here~~
event/censoring indicators in the treatment group in Study A
}
\item{Bsone}{
%% ~~Describe \code{Bsone} here~~
surrogate marker values in the treatment group in Study A, NA for individuals not observable at the time the surrogate marker was measured
}
\item{t}{
%% ~~Describe \code{t} here~~
time of interest
}
\item{landmark}{
%% ~~Describe \code{landmark} here~~
landmark time of interest, t0
}
\item{weightA}{
%% ~~Describe \code{weightA} here~~
Study A weights used for perturbation resampling
}
\item{weightB}{
%% ~~Describe \code{weightB} here~~
Study A weights used for perturbation resampling
}
\item{weight.both}{
%% ~~Describe \code{weight.both} here~~
Study A (replicated) weights used for perturbation resampling
}
\item{extrapolate}{
%% ~~Describe \code{extrapolate} here~~
TRUE or FALSE; indicates whether local constant extrapolation should be used
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
Details are included in the documentation for design.study and recover.B
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
early treatment effect estimate
}
\author{
%% ~~who you are~~
Layla Parast
}
\examples{
data(dataA)
delta.ea.single(Axzero = dataA$x0, Adeltazero = dataA$delta0,
Aszero = dataA$s0, Bxzero = dataA$x0, Bdeltazero = dataA$delta0,
Bszero = dataA$s0, Bxone = dataA$x1, Bdeltaone = dataA$delta1,
Bsone = dataA$s1, t=1, landmark=0.5, extrapolate = TRUE)
}
\keyword{survival}
\keyword{nonparametric}% __ONLY ONE__ keyword per line
\keyword{internal}
|
/man/delta.ea.single.Rd
|
no_license
|
cran/SurrogateTest
|
R
| false | false | 3,363 |
rd
|
\name{delta.ea.single}
\alias{delta.ea.single}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
Calculates the early treatment effect estimate in Study A
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
Calculates the early treatment effect estimate in Study B, generally not called directly by the user
}
\usage{
delta.ea.single(Axzero, Adeltazero, Aszero, Bxzero, Bdeltazero,
Bszero, Bxone, Bdeltaone, Bsone, t, landmark, weightA = NULL,
weightB = NULL, weight.both = NULL, extrapolate)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{Axzero}{
%% ~~Describe \code{Axzero} here~~
observed event times in the control group in Study A
}
\item{Adeltazero}{
%% ~~Describe \code{Adeltazero} here~~
event/censoring indicators in the control group in Study A
}
\item{Aszero}{
%% ~~Describe \code{Aszero} here~~
surrogate marker values in the control group in Study A, NA for individuals not observable at the time the surrogate marker was measured
}
\item{Bxzero}{
%% ~~Describe \code{Bxzero} here~~
observed event times in the control group in Study A
}
\item{Bdeltazero}{
%% ~~Describe \code{Bdeltazero} here~~
event/censoring indicators in the control group in Study A
}
\item{Bszero}{
%% ~~Describe \code{Bszero} here~~
surrogate marker values in the control group in Study A, NA for individuals not observable at the time the surrogate marker was measured
}
\item{Bxone}{
%% ~~Describe \code{Bxone} here~~
observed event times in the treatment group in Study A
}
\item{Bdeltaone}{
%% ~~Describe \code{Bdeltaone} here~~
event/censoring indicators in the treatment group in Study A
}
\item{Bsone}{
%% ~~Describe \code{Bsone} here~~
surrogate marker values in the treatment group in Study A, NA for individuals not observable at the time the surrogate marker was measured
}
\item{t}{
%% ~~Describe \code{t} here~~
time of interest
}
\item{landmark}{
%% ~~Describe \code{landmark} here~~
landmark time of interest, t0
}
\item{weightA}{
%% ~~Describe \code{weightA} here~~
Study A weights used for perturbation resampling
}
\item{weightB}{
%% ~~Describe \code{weightB} here~~
Study A weights used for perturbation resampling
}
\item{weight.both}{
%% ~~Describe \code{weight.both} here~~
Study A (replicated) weights used for perturbation resampling
}
\item{extrapolate}{
%% ~~Describe \code{extrapolate} here~~
TRUE or FALSE; indicates whether local constant extrapolation should be used
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
Details are included in the documentation for design.study and recover.B
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
early treatment effect estimate
}
\author{
%% ~~who you are~~
Layla Parast
}
\examples{
data(dataA)
delta.ea.single(Axzero = dataA$x0, Adeltazero = dataA$delta0,
Aszero = dataA$s0, Bxzero = dataA$x0, Bdeltazero = dataA$delta0,
Bszero = dataA$s0, Bxone = dataA$x1, Bdeltaone = dataA$delta1,
Bsone = dataA$s1, t=1, landmark=0.5, extrapolate = TRUE)
}
\keyword{survival}
\keyword{nonparametric}% __ONLY ONE__ keyword per line
\keyword{internal}
|
#' @rdname DownsideDeviation
#' @export
SemiDeviation <-
function (R)
{ # @author Peter Carl
# DESCRIPTION:
# This function is just a wrapper of DownsideDeviation with
# MAR = mean(x)
# see below
# FUNCTION:
if (is.vector(R)) {
R = na.omit(R)
return(DownsideDeviation(R, MAR=mean(R), method="full"))
}
else {
R = checkData(R, method = "matrix")
result = apply(R, 2, SemiDeviation)
result = matrix(result, nrow=1)
colnames(result) = colnames(R)
rownames(result) = "Semi-Deviation"
return(result)
}
}
#' @rdname DownsideDeviation
#' @export
SemiVariance <-
function (R)
{
if (is.vector(R)) {
R = na.omit(R)
return(DownsideDeviation(R, MAR=mean(R), method="subset"))
}
else {
R = checkData(R, method = "matrix")
result = apply(R, 2, SemiVariance)
dim(result) = c(1,NCOL(R))
colnames(result) = colnames(R)
rownames(result) = "Semi-Variance"
return(result)
}
}
###############################################################################
# R (http://r-project.org/) Econometrics for Performance and Risk Analysis
#
# Copyright (c) 2004-2012 Peter Carl and Brian G. Peterson
#
# This R package is distributed under the terms of the GNU Public License (GPL)
# for full details see the file COPYING
#
# $Id: SemiDeviation.R 2163 2012-07-16 00:30:19Z braverock $
#
###############################################################################
|
/R/SemiDeviation.R
|
no_license
|
sanjivkv/PerformanceAnalytics
|
R
| false | false | 1,523 |
r
|
#' @rdname DownsideDeviation
#' @export
SemiDeviation <-
function (R)
{ # @author Peter Carl
# DESCRIPTION:
# This function is just a wrapper of DownsideDeviation with
# MAR = mean(x)
# see below
# FUNCTION:
if (is.vector(R)) {
R = na.omit(R)
return(DownsideDeviation(R, MAR=mean(R), method="full"))
}
else {
R = checkData(R, method = "matrix")
result = apply(R, 2, SemiDeviation)
result = matrix(result, nrow=1)
colnames(result) = colnames(R)
rownames(result) = "Semi-Deviation"
return(result)
}
}
#' @rdname DownsideDeviation
#' @export
SemiVariance <-
function (R)
{
if (is.vector(R)) {
R = na.omit(R)
return(DownsideDeviation(R, MAR=mean(R), method="subset"))
}
else {
R = checkData(R, method = "matrix")
result = apply(R, 2, SemiVariance)
dim(result) = c(1,NCOL(R))
colnames(result) = colnames(R)
rownames(result) = "Semi-Variance"
return(result)
}
}
###############################################################################
# R (http://r-project.org/) Econometrics for Performance and Risk Analysis
#
# Copyright (c) 2004-2012 Peter Carl and Brian G. Peterson
#
# This R package is distributed under the terms of the GNU Public License (GPL)
# for full details see the file COPYING
#
# $Id: SemiDeviation.R 2163 2012-07-16 00:30:19Z braverock $
#
###############################################################################
|
context("SQL safety")
test_that("Quotes are handled", {
# without escapes, the "cow', 'pig" string is interpreted a two values
expect_equivalent(name2taxid("cow', 'pig"), NA_character_)
expect_equivalent(length(classification("cow', 'pig")), 1)
expect_equivalent(length(downstream("cow', 'pig")), 1)
expect_equivalent(length(children("cow', 'pig")), 1)
})
test_that("taxid-based functions fail on non-integers", {
expect_error(taxid2name("cow"))
})
|
/tests/testthat/test-safety.R
|
permissive
|
arendsee/taxizedb
|
R
| false | false | 463 |
r
|
context("SQL safety")
test_that("Quotes are handled", {
# without escapes, the "cow', 'pig" string is interpreted a two values
expect_equivalent(name2taxid("cow', 'pig"), NA_character_)
expect_equivalent(length(classification("cow', 'pig")), 1)
expect_equivalent(length(downstream("cow', 'pig")), 1)
expect_equivalent(length(children("cow', 'pig")), 1)
})
test_that("taxid-based functions fail on non-integers", {
expect_error(taxid2name("cow"))
})
|
numPerPatch4416 <- c(2426,2574)
|
/NatureEE-data-archive/Run203071/JAFSdata/JAFSnumPerPatch4416.R
|
no_license
|
flaxmans/NatureEE2017
|
R
| false | false | 32 |
r
|
numPerPatch4416 <- c(2426,2574)
|
#' BEA Regional state personal income data, annual.
#'
#' State personal income and population, annual, various years.
#'
#' @source Bureau of Economic Analysis
#' @format Data frame with 1 row per year.
#' \describe{
#' \item{stabbr}{state abbreviation, factor}
#' \item{year}{Calendar year, integer}
#' \item{pcpi}{Per capita personal income, dollars, numeric}
#' \item{pop}{State population, thousands, numeric}
#' \item{spi}{State personal income, nominal, $ billions, numeric}
#' }
#' @examples
#' spi.a
"spi.a"
"comment(spi.a)"
|
/R/spi.a.r
|
no_license
|
donboyd5/BEAData
|
R
| false | false | 537 |
r
|
#' BEA Regional state personal income data, annual.
#'
#' State personal income and population, annual, various years.
#'
#' @source Bureau of Economic Analysis
#' @format Data frame with 1 row per year.
#' \describe{
#' \item{stabbr}{state abbreviation, factor}
#' \item{year}{Calendar year, integer}
#' \item{pcpi}{Per capita personal income, dollars, numeric}
#' \item{pop}{State population, thousands, numeric}
#' \item{spi}{State personal income, nominal, $ billions, numeric}
#' }
#' @examples
#' spi.a
"spi.a"
"comment(spi.a)"
|
context("3 - Missing data imputation")
test_that("se2msn throws error without valid input", {
expect_error(se2msn("test_se"))
})
test_that("se2msn returns a MSnSet object", {
expect_is(se2msn(test_se), "MSnSet")
})
test_that("manual_impute throws error without valid input", {
expect_error(manual_impute("test_vsn", 0.3, 1.8))
expect_error(manual_impute(test_vsn, "0.3", 1.8))
expect_error(manual_impute(test_vsn, 0.3, "1.8"))
NAs <- apply(SummarizedExperiment::assay(test_vsn), 1, function(x) any(is.na(x)))
no_NAs <- test_vsn[!NAs,]
expect_error(manual_impute(no_NAs, 0.3, 1.8))
})
test_that("manual_impute returns a MSnSet object", {
expect_is(manual_impute(test_vsn), "SummarizedExperiment")
})
test_that("manual_impute returns an object without missing values", {
result <- SummarizedExperiment::assay(manual_impute(test_vsn))
expect_true(all(!is.na(result)))
})
test_that("impute throws error without valid input", {
expect_error(impute("test_vsn", "QRILC"))
expect_error(impute(test_vsn, QRILC))
expect_error(impute(test_vsn, "FOO"))
test_vsn_error <- test_vsn
SummarizedExperiment::rowData(test_vsn_error) <- SummarizedExperiment::rowData(test_vsn_error)[,-(24:25)]
expect_error(impute(test_vsn_error, "QRILC"))
NAs <- apply(SummarizedExperiment::assay(test_vsn), 1, function(x) any(is.na(x)))
no_NAs <- test_vsn[!NAs,]
expect_warning(impute(no_NAs, "QRILC"))
})
test_that("impute returns a MSnSet object", {
expect_is(impute(test_vsn, "QRILC"), "SummarizedExperiment")
expect_is(impute(test_vsn, "man"), "SummarizedExperiment")
})
test_that("impute returns an object without missing values", {
result <- SummarizedExperiment::assay(impute(test_vsn, "QRILC"))
expect_true(all(!is.na(result)))
})
|
/tests/testthat/test_3_imputation.R
|
no_license
|
squirrelandr/DEP
|
R
| false | false | 1,763 |
r
|
context("3 - Missing data imputation")
test_that("se2msn throws error without valid input", {
expect_error(se2msn("test_se"))
})
test_that("se2msn returns a MSnSet object", {
expect_is(se2msn(test_se), "MSnSet")
})
test_that("manual_impute throws error without valid input", {
expect_error(manual_impute("test_vsn", 0.3, 1.8))
expect_error(manual_impute(test_vsn, "0.3", 1.8))
expect_error(manual_impute(test_vsn, 0.3, "1.8"))
NAs <- apply(SummarizedExperiment::assay(test_vsn), 1, function(x) any(is.na(x)))
no_NAs <- test_vsn[!NAs,]
expect_error(manual_impute(no_NAs, 0.3, 1.8))
})
test_that("manual_impute returns a MSnSet object", {
expect_is(manual_impute(test_vsn), "SummarizedExperiment")
})
test_that("manual_impute returns an object without missing values", {
result <- SummarizedExperiment::assay(manual_impute(test_vsn))
expect_true(all(!is.na(result)))
})
test_that("impute throws error without valid input", {
expect_error(impute("test_vsn", "QRILC"))
expect_error(impute(test_vsn, QRILC))
expect_error(impute(test_vsn, "FOO"))
test_vsn_error <- test_vsn
SummarizedExperiment::rowData(test_vsn_error) <- SummarizedExperiment::rowData(test_vsn_error)[,-(24:25)]
expect_error(impute(test_vsn_error, "QRILC"))
NAs <- apply(SummarizedExperiment::assay(test_vsn), 1, function(x) any(is.na(x)))
no_NAs <- test_vsn[!NAs,]
expect_warning(impute(no_NAs, "QRILC"))
})
test_that("impute returns a MSnSet object", {
expect_is(impute(test_vsn, "QRILC"), "SummarizedExperiment")
expect_is(impute(test_vsn, "man"), "SummarizedExperiment")
})
test_that("impute returns an object without missing values", {
result <- SummarizedExperiment::assay(impute(test_vsn, "QRILC"))
expect_true(all(!is.na(result)))
})
|
library(mkde)
### Name: condor
### Title: California condor locations
### Aliases: condor
### Keywords: datasets
### ** Examples
data(condor)
head(condor, 30)
|
/data/genthat_extracted_code/mkde/examples/condor.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 166 |
r
|
library(mkde)
### Name: condor
### Title: California condor locations
### Aliases: condor
### Keywords: datasets
### ** Examples
data(condor)
head(condor, 30)
|
/functions/gene_annotation.R
|
no_license
|
RubenSanchezF/TFM
|
R
| false | false | 1,764 |
r
| ||
#' ---
#' title: "Analyze Complete Cases Models"
#' author: "Martin Skarzynski"
#' date: "`r Sys.Date()`"
#' ---
library(readr)
library(here)
library(dplyr)
library(ggplot2)
library(tidyr)
library(stringr)
library(forcats)
library(plotly)
#library(Cairo)
#devtools::install_github('hadley/ggplot2')
#read in datasets created by scripts 4 & 6
dat_quad1 <- read_rds("dat/6-model-diff-sizes.rds")
dat_quad2 <- read_rds("dat/4-model-first-run.rds")
dat_quad3 <- read_rds("dat/5-model-second-run.rds")
dat_quad4 <- read_rds("dat/6-model-third-run.rds")
dat_quad1 <- dat_quad1 %>%
rename(con=concordance,
n_vars=size,
hr_ci_upper = HR_CI_upper,
hr_ci_lower = HR_CI_lower) %>%
mutate(run = rep(1, nrow(.)),
quad = case_when(con > median(con) &
aic <= median(aic) ~ 1,
con > median(con) &
aic > median(aic) ~ 2,
con <= median(con) &
aic <= median(aic) ~ 3,
con <= median(con) &
aic > median(aic) ~ 4
)
)
dat_quad2 <- dat_quad2 %>%
drop_na() %>%
rename(con=concordance) %>%
mutate(type = str_replace(type, "cox", "coxph"),
type = str_replace(type, "rid", "ridge"),
run = rep(1, nrow(.)),
quad = case_when(con > median(con) &
aic <= median(aic) ~ 1,
con > median(con) &
aic > median(aic) ~ 2,
con <= median(con) &
aic <= median(aic) ~ 3,
con <= median(con) &
aic > median(aic) ~ 4
)
)
dat_quad3 <- dat_quad3 %>%
drop_na() %>%
rename(con=concordance) %>%
mutate(type = str_replace(type, "cox", "coxph"),
type = str_replace(type, "rid", "ridge"),
run = rep(2, nrow(.)),
quad = rep(5, nrow(.)))
dat_quad4 <- dat_quad4 %>%
drop_na() %>%
rename(con=concordance) %>%
mutate(type = str_replace(type, "cox", "coxph"),
type = str_replace(type, "rid", "ridge"),
run = rep(3, nrow(.)),
quad = rep(6, nrow(.)))
dat_quad <- bind_rows(dat_quad1,
dat_quad2,
dat_quad3,
dat_quad4) %>%
mutate(quad = as.factor(quad)) %>%
mutate(quad = fct_recode(quad,
"1A" = '1',
"1B" = '2',
"1C" = '3',
"1D" = '4',
"2" = '5',
"3" = '6'))
#dat_quad <- dat_quad %>%
# mutate(quadrun = interaction(quad, run))
glimpse(dat_quad)
# Figure 1
dq <- dat_quad %>%
mutate(type = str_replace(type, "coxph", "Cox"),
type = str_replace(type, "ridge", "Ridge")) %>%
ggplot(aes(x = aic,
y = con,
size = n_vars,
colour = quad)) +
geom_point(aes(shape = factor(type)),
alpha = 0.75,
stroke = 1) +
scale_shape(solid = FALSE) +
theme_minimal() +
labs(
x = 'Akaike Information Criterion',
y = 'Concordance'
#size = "Model Size",
#shape = "Model Type",
#colour = "Group"
) +
geom_hline(yintercept = 83.5) +
scale_color_manual(breaks = c("3", "2",
"1A","1B",
"1C","1D"),
values = c("forestgreen",
"darkturquoise",
"blue",
"darkviolet",
"darkorange",
"red")) +
scale_size(breaks = c(50, 25, 10, 5, 1))
pltly1 <- ggplotly(dq)
write_rds(pltly1, path = here("plotlyFig1.rds"))
plt1 <- read_rds(path = here("plotlyFig1.rds"))
plt1
# Unpack names using one of the list columns
namevec <- names(unlist(dat_quad$hr_ci_lower))
df_coef <- dat_quad %>%
unnest() %>%
mutate(name = namevec) %>%
filter(n_vars > 1)
#remove ridge from variable name
df_coef$name <- gsub("ridge\\(|\\)", "", df_coef$name)
nrow(df_coef)
# Figure 2
dc <- df_coef %>%
filter(!between(hazard_ratio, .99, 1.01),
name!="HSAITMOR",
name!="age_strat",
name!="age_strat2",
name!="age_strat3",
name!="age_strat4",
name!="HAJ0",
name!="HAA3",
name!="HAG1",
name!="HAQ7",
name!="HAN9",
name!="",
name!="HAT29",
name!="DMPSTAT",
name!="WTPXRP2") %>%
mutate(type = str_replace(type, "coxph", "Cox"),
type = str_replace(type, "ridge", "Ridge")) %>%
mutate(coef_pvalue = if_else(near(coef_pvalue, 0),
coef_pvalue+0.1^17,
coef_pvalue)) %>%
ggplot(aes(x = log2(hazard_ratio),
y = -log10(coef_pvalue),
colour = quad,
shape=type)) +
labs(colour = "Group",
x = 'log2 Hazard Ratio',
y = '-log10 p-value',
shape = "Model Type"
) +
geom_point(alpha = 0.5,
size = 1,
stroke = 1) +
guides(colour = guide_legend(override.aes = list(alpha = 1))) +
geom_text(aes(label=name),
alpha = 0.75,
vjust = 1.2,
show.legend = FALSE,
check_overlap = TRUE) +
theme_minimal() +
theme(plot.margin = margin(t = -15)) +
geom_hline(yintercept = 10) +
scale_color_manual(breaks = c("3", "2",
"1A","1B",
"1C","1D"),
values = c("forestgreen",
"darkturquoise",
"blue",
"darkviolet",
"darkorange",
"red"))
write_rds(dc, here("volc.rds"))
pltly2 <- ggplotly(dc)
pltly2
write_rds(pltly2, path = here("plotlyFig2.rds"))
ggsave(here("img/2-volcano-final.pdf"))
ggsave(here("img/2-volcano-final300dpi.png"))
ggsave(here("img/2-volcano-final200dpi.png"), dpi = 200)
ggsave(here("img/2-volcano-final100dpi.png"), dpi = 100)
#filter out p-values greater than .1^10
df_sig <- df_coef %>%
filter(coef_pvalue<.1^10)
df_sig %>% glimpse
#obtain the order by count for name
ord <- df_sig %>%
count(name) %>%
arrange(n) %>%
filter(n>80) %>%
select(name)
#create name factor variable with levels ordered by count
df_sig$ord_name <- factor(df_sig$name, levels=ord$name)
#df_sig %>%
# count(name) %>%
# mutate(name = fct_reorder(name, n)) %>%
# ggplot(aes(x = name, y = n)) +
# geom_col() +
# coord_flip()
# Figure 3
ds <- df_sig %>%
drop_na() %>%
filter(name!="HSAITMOR",
name!="age_strat",
name!="age_strat2",
name!="age_strat3",
name!="age_strat4",
name!="HAJ0",
name!="HAA3",
name!="HAG1",
name!="HAQ7",
name!="HAN9",
name!="",
name!="HAT29",
name!="DMPSTAT",
name!="WTPXRP2") %>%
mutate_if(is.integer, as.factor) %>%
ggplot(aes(ord_name,fill=quad)) +
geom_bar(position = position_stack(reverse = TRUE)) +
scale_y_continuous(expand = c(0,0)) +
coord_flip() +
theme_minimal() +
theme(legend.position = "top") +
labs(fill = "Group",
x = 'Variable Name',
y = 'Count') +
scale_fill_manual(breaks = c( "1A","1B",
"1C","1D",
"2", "3"),
values = c("forestgreen",
"darkturquoise",
"blue",
"darkviolet",
"darkorange",
"red")) #+
#theme(plot.margin=unit(c(0,1,0,0),"cm"))
plt3 <- ggplotly(ds)
write_rds(ds, here("varbar.rds"))
write_rds(plt3, here("plotlyFig3.rds"))
ggsave(here("img/3-varbar-final300dpi.png"))
ggsave(here("img/3-varbar-final200dpi.png"), dpi = 200)
ggsave(here("img/3-varbar-final100dpi.png"), dpi = 100)
ggsave(here("img/3-varbar-final.pdf"))
# Table 1
df_sig %>%
drop_na() %>%
filter(name!="HSAITMOR",
name!="age_strat",
name!="age_strat2",
name!="age_strat3",
name!="age_strat4",
name!="HAJ0",
name!="HAA3",
name!="HAG1",
name!="HAQ7",
name!="HAN9",
name!="",
name!="HAT29",
name!="DMPSTAT",
name!="WTPXRP2") %>%
group_by(quad) %>%
rename(Name = name) %>%
summarise(n = n()) %>%
arrange(desc(n)) %>%
knitr::kable()
# Table 2
df_sig %>%
drop_na() %>%
filter(name!="HSAITMOR",
name!="age_strat",
name!="age_strat2",
name!="age_strat3",
name!="age_strat4",
name!="HAJ0",
name!="HAA3",
name!="HAG1",
name!="HAQ7",
name!="HAN9",
name!="",
name!="HAT29",
name!="DMPSTAT",
name!="WTPXRP2") %>%
group_by(name) %>%
rename(Name = name) %>%
summarise(medianHR = round(median(hazard_ratio), 2),
n = n()) %>%
arrange(desc(n)) %>%
head(n=10) %>%
knitr::kable() %>% write_rds(here("table1.rds"))
|
/8-interactive-plots.R
|
no_license
|
marskar/nhanes
|
R
| false | false | 10,064 |
r
|
#' ---
#' title: "Analyze Complete Cases Models"
#' author: "Martin Skarzynski"
#' date: "`r Sys.Date()`"
#' ---
library(readr)
library(here)
library(dplyr)
library(ggplot2)
library(tidyr)
library(stringr)
library(forcats)
library(plotly)
#library(Cairo)
#devtools::install_github('hadley/ggplot2')
#read in datasets created by scripts 4 & 6
dat_quad1 <- read_rds("dat/6-model-diff-sizes.rds")
dat_quad2 <- read_rds("dat/4-model-first-run.rds")
dat_quad3 <- read_rds("dat/5-model-second-run.rds")
dat_quad4 <- read_rds("dat/6-model-third-run.rds")
dat_quad1 <- dat_quad1 %>%
rename(con=concordance,
n_vars=size,
hr_ci_upper = HR_CI_upper,
hr_ci_lower = HR_CI_lower) %>%
mutate(run = rep(1, nrow(.)),
quad = case_when(con > median(con) &
aic <= median(aic) ~ 1,
con > median(con) &
aic > median(aic) ~ 2,
con <= median(con) &
aic <= median(aic) ~ 3,
con <= median(con) &
aic > median(aic) ~ 4
)
)
dat_quad2 <- dat_quad2 %>%
drop_na() %>%
rename(con=concordance) %>%
mutate(type = str_replace(type, "cox", "coxph"),
type = str_replace(type, "rid", "ridge"),
run = rep(1, nrow(.)),
quad = case_when(con > median(con) &
aic <= median(aic) ~ 1,
con > median(con) &
aic > median(aic) ~ 2,
con <= median(con) &
aic <= median(aic) ~ 3,
con <= median(con) &
aic > median(aic) ~ 4
)
)
dat_quad3 <- dat_quad3 %>%
drop_na() %>%
rename(con=concordance) %>%
mutate(type = str_replace(type, "cox", "coxph"),
type = str_replace(type, "rid", "ridge"),
run = rep(2, nrow(.)),
quad = rep(5, nrow(.)))
dat_quad4 <- dat_quad4 %>%
drop_na() %>%
rename(con=concordance) %>%
mutate(type = str_replace(type, "cox", "coxph"),
type = str_replace(type, "rid", "ridge"),
run = rep(3, nrow(.)),
quad = rep(6, nrow(.)))
dat_quad <- bind_rows(dat_quad1,
dat_quad2,
dat_quad3,
dat_quad4) %>%
mutate(quad = as.factor(quad)) %>%
mutate(quad = fct_recode(quad,
"1A" = '1',
"1B" = '2',
"1C" = '3',
"1D" = '4',
"2" = '5',
"3" = '6'))
#dat_quad <- dat_quad %>%
# mutate(quadrun = interaction(quad, run))
glimpse(dat_quad)
# Figure 1
dq <- dat_quad %>%
mutate(type = str_replace(type, "coxph", "Cox"),
type = str_replace(type, "ridge", "Ridge")) %>%
ggplot(aes(x = aic,
y = con,
size = n_vars,
colour = quad)) +
geom_point(aes(shape = factor(type)),
alpha = 0.75,
stroke = 1) +
scale_shape(solid = FALSE) +
theme_minimal() +
labs(
x = 'Akaike Information Criterion',
y = 'Concordance'
#size = "Model Size",
#shape = "Model Type",
#colour = "Group"
) +
geom_hline(yintercept = 83.5) +
scale_color_manual(breaks = c("3", "2",
"1A","1B",
"1C","1D"),
values = c("forestgreen",
"darkturquoise",
"blue",
"darkviolet",
"darkorange",
"red")) +
scale_size(breaks = c(50, 25, 10, 5, 1))
pltly1 <- ggplotly(dq)
write_rds(pltly1, path = here("plotlyFig1.rds"))
plt1 <- read_rds(path = here("plotlyFig1.rds"))
plt1
# Unpack names using one of the list columns
namevec <- names(unlist(dat_quad$hr_ci_lower))
df_coef <- dat_quad %>%
unnest() %>%
mutate(name = namevec) %>%
filter(n_vars > 1)
#remove ridge from variable name
df_coef$name <- gsub("ridge\\(|\\)", "", df_coef$name)
nrow(df_coef)
# Figure 2
dc <- df_coef %>%
filter(!between(hazard_ratio, .99, 1.01),
name!="HSAITMOR",
name!="age_strat",
name!="age_strat2",
name!="age_strat3",
name!="age_strat4",
name!="HAJ0",
name!="HAA3",
name!="HAG1",
name!="HAQ7",
name!="HAN9",
name!="",
name!="HAT29",
name!="DMPSTAT",
name!="WTPXRP2") %>%
mutate(type = str_replace(type, "coxph", "Cox"),
type = str_replace(type, "ridge", "Ridge")) %>%
mutate(coef_pvalue = if_else(near(coef_pvalue, 0),
coef_pvalue+0.1^17,
coef_pvalue)) %>%
ggplot(aes(x = log2(hazard_ratio),
y = -log10(coef_pvalue),
colour = quad,
shape=type)) +
labs(colour = "Group",
x = 'log2 Hazard Ratio',
y = '-log10 p-value',
shape = "Model Type"
) +
geom_point(alpha = 0.5,
size = 1,
stroke = 1) +
guides(colour = guide_legend(override.aes = list(alpha = 1))) +
geom_text(aes(label=name),
alpha = 0.75,
vjust = 1.2,
show.legend = FALSE,
check_overlap = TRUE) +
theme_minimal() +
theme(plot.margin = margin(t = -15)) +
geom_hline(yintercept = 10) +
scale_color_manual(breaks = c("3", "2",
"1A","1B",
"1C","1D"),
values = c("forestgreen",
"darkturquoise",
"blue",
"darkviolet",
"darkorange",
"red"))
write_rds(dc, here("volc.rds"))
pltly2 <- ggplotly(dc)
pltly2
write_rds(pltly2, path = here("plotlyFig2.rds"))
ggsave(here("img/2-volcano-final.pdf"))
ggsave(here("img/2-volcano-final300dpi.png"))
ggsave(here("img/2-volcano-final200dpi.png"), dpi = 200)
ggsave(here("img/2-volcano-final100dpi.png"), dpi = 100)
#filter out p-values greater than .1^10
df_sig <- df_coef %>%
filter(coef_pvalue<.1^10)
df_sig %>% glimpse
#obtain the order by count for name
ord <- df_sig %>%
count(name) %>%
arrange(n) %>%
filter(n>80) %>%
select(name)
#create name factor variable with levels ordered by count
df_sig$ord_name <- factor(df_sig$name, levels=ord$name)
#df_sig %>%
# count(name) %>%
# mutate(name = fct_reorder(name, n)) %>%
# ggplot(aes(x = name, y = n)) +
# geom_col() +
# coord_flip()
# Figure 3
ds <- df_sig %>%
drop_na() %>%
filter(name!="HSAITMOR",
name!="age_strat",
name!="age_strat2",
name!="age_strat3",
name!="age_strat4",
name!="HAJ0",
name!="HAA3",
name!="HAG1",
name!="HAQ7",
name!="HAN9",
name!="",
name!="HAT29",
name!="DMPSTAT",
name!="WTPXRP2") %>%
mutate_if(is.integer, as.factor) %>%
ggplot(aes(ord_name,fill=quad)) +
geom_bar(position = position_stack(reverse = TRUE)) +
scale_y_continuous(expand = c(0,0)) +
coord_flip() +
theme_minimal() +
theme(legend.position = "top") +
labs(fill = "Group",
x = 'Variable Name',
y = 'Count') +
scale_fill_manual(breaks = c( "1A","1B",
"1C","1D",
"2", "3"),
values = c("forestgreen",
"darkturquoise",
"blue",
"darkviolet",
"darkorange",
"red")) #+
#theme(plot.margin=unit(c(0,1,0,0),"cm"))
plt3 <- ggplotly(ds)
write_rds(ds, here("varbar.rds"))
write_rds(plt3, here("plotlyFig3.rds"))
ggsave(here("img/3-varbar-final300dpi.png"))
ggsave(here("img/3-varbar-final200dpi.png"), dpi = 200)
ggsave(here("img/3-varbar-final100dpi.png"), dpi = 100)
ggsave(here("img/3-varbar-final.pdf"))
# Table 1
df_sig %>%
drop_na() %>%
filter(name!="HSAITMOR",
name!="age_strat",
name!="age_strat2",
name!="age_strat3",
name!="age_strat4",
name!="HAJ0",
name!="HAA3",
name!="HAG1",
name!="HAQ7",
name!="HAN9",
name!="",
name!="HAT29",
name!="DMPSTAT",
name!="WTPXRP2") %>%
group_by(quad) %>%
rename(Name = name) %>%
summarise(n = n()) %>%
arrange(desc(n)) %>%
knitr::kable()
# Table 2
df_sig %>%
drop_na() %>%
filter(name!="HSAITMOR",
name!="age_strat",
name!="age_strat2",
name!="age_strat3",
name!="age_strat4",
name!="HAJ0",
name!="HAA3",
name!="HAG1",
name!="HAQ7",
name!="HAN9",
name!="",
name!="HAT29",
name!="DMPSTAT",
name!="WTPXRP2") %>%
group_by(name) %>%
rename(Name = name) %>%
summarise(medianHR = round(median(hazard_ratio), 2),
n = n()) %>%
arrange(desc(n)) %>%
head(n=10) %>%
knitr::kable() %>% write_rds(here("table1.rds"))
|
\name{backward}
\alias{backward}
\title{
Inference based model selecton for IRT models.
}
\description{
Select the optimal number of variables by means of LRT or F tests.
}
\usage{
backward(mod, level = 0.05, test = 'LRT', cov_type = 'hessian')
}
\arguments{
\item{mod}{
A fitted rasch model.
}
\item{level}{
Significance level to bu used.
}
\item{test}{
A character string, matchiing one of \emph{LRT} o \emph{Wald}.
}
\item{cov_type}{
A character indicating which covariance matrix estimation is to be used. Possible choices are 'hessian', 'opg' and 'sandwich'.
}
}
\details{
\code{backward} uses \code{\link{drop1}} repeatedly until all explanatory variables in the model are significant at a specified level.
}
\value{
\code{backward} returns an object of class \code{rasch} containing the optimal subset of variables.
}
\author{
Fernando Massa, \email{fmassa@iesta.edu.uy}
}
\seealso{
\code{\link{drop1}}
}
|
/man/backward.Rd
|
no_license
|
nando11235813/raschreg
|
R
| false | false | 949 |
rd
|
\name{backward}
\alias{backward}
\title{
Inference based model selecton for IRT models.
}
\description{
Select the optimal number of variables by means of LRT or F tests.
}
\usage{
backward(mod, level = 0.05, test = 'LRT', cov_type = 'hessian')
}
\arguments{
\item{mod}{
A fitted rasch model.
}
\item{level}{
Significance level to bu used.
}
\item{test}{
A character string, matchiing one of \emph{LRT} o \emph{Wald}.
}
\item{cov_type}{
A character indicating which covariance matrix estimation is to be used. Possible choices are 'hessian', 'opg' and 'sandwich'.
}
}
\details{
\code{backward} uses \code{\link{drop1}} repeatedly until all explanatory variables in the model are significant at a specified level.
}
\value{
\code{backward} returns an object of class \code{rasch} containing the optimal subset of variables.
}
\author{
Fernando Massa, \email{fmassa@iesta.edu.uy}
}
\seealso{
\code{\link{drop1}}
}
|
################################################################################
# Introduction to R
# Brian Vegetabile
# 2016 UCI Statistics Bootcamp
################################################################################
setwd('~/Dropbox/StatGradBootcamp_2019/')
# Data Structures --------------------------------------------------------------
### Vectors
# Vectors are the most basic things that we will work with in R. They are very
# useful for storing information, but they must be chosen correctly to meet the
# needs of the analysis (mixed data, computational considerations, etc.)
# An Atomic Vector is a one dimensional data structure which contains elements of
# the same type.
# - Integers
# - Doubles
# - Characters
# - Logical
# Some Examples of Good Vectors
ints <- c(1L, 2L)
a <- 1
b <- c(1, 2)
words <- c('yes', 'no', 'maybe')
truths <- c(TRUE, FALSE, TRUE)
weird.vector <- c(0,1,c(1,2,4), 5) # This gets unpacked into one vector
# We can find the types of a vectors
typeof(ints)
typeof(a)
typeof(b)
typeof(words)
typeof(truths)
# As well as test the types
is.double(a)
is.character(b)
# An example of a misspecifying a vector
bad.vector <- c(1, 'no')
good.list <- list(1, "no")
# We see that R coerces the vector to a type `character' without informing you
# Always be careful when creating vectors that the type is what you intended
# Indexing of Vectors
# - vector[0] returns the type
# - vector[loc] returns the value at the specified location, if there is no
# value at the location it returns NA
bad.vector[50]
# We also are able to quickly summarize data in a list
numbers <- c(1,3,2,5,2,6,2,6)
length(numbers) # Number of Elements in a vector
mean(numbers) # Mean of the Numbers
summary(numbers) # Five-Number Summary of the Values
# What happens if we do summary on bad.vector?
mean(bad.vector)
summary(bad.vector)
# -> Notice that the first operation of mean on a vector of characters returned
# an error.
### Lists
# Lists are more complicated than vectors and can store any type of data,
# including other lists. These will be useful for returning data in function
# or for passing information around in a compact fashion. Those of you that
# are familiar with python, you can think of these as dict objects in a sense.
good.list <- list(1 , 'no')
named.list <- list('five' = 1, 'six' = 2, 3,4,5)
list.of.list <- list(list(c(1,2,4,6)), list(c(4,5,2,6)))
list.of.list[[1]][[1]]
unlist(list.of.list[[1]])
unlist(list.of.list[1])
lapply(list.of.list[[1]], mean)
numVec <- 1:10
##############################################################
# Baseball data examples
playerinfo <- read.csv("https://raw.githubusercontent.com/dspluta/Stats-Bootcamp/master/rData/playerInfo.csv")
bball <- read.csv('../rData/baseball.dat.txt')
bball$PlayerID <- 1:nrow(bball)
PlayerInfo <- data.frame(PlayerID=bball$PlayerID,
Name = bball$Name,
Salary = bball$Salary)
head(PlayerInfo)
HitData <- bball[,c(19, 2:13)]
head(HitData)
write.csv(PlayerInfo, file = '../rData/playerInfo.csv', row.names = F)
set.seed(1515)
observed.hit.info <- sample(c(TRUE, FALSE), nrow(HitData), prob=c(0.95, 0.05), replace = T)
write.csv(HitData[observed.hit.info,], file = '../rData/performanceInfo.csv', row.names = F)
dat1 <- read.csv('../rData/playerInfo.csv')
dat2 <- read.csv('../rData/performanceInfo.csv')
head(merge(dat1, dat2, by = 1, all = T))
pairs(dat2)
|
/Code/Rtutorial.R
|
no_license
|
dspluta/StatsGradBootcamp2019
|
R
| false | false | 3,585 |
r
|
################################################################################
# Introduction to R
# Brian Vegetabile
# 2016 UCI Statistics Bootcamp
################################################################################
setwd('~/Dropbox/StatGradBootcamp_2019/')
# Data Structures --------------------------------------------------------------
### Vectors
# Vectors are the most basic things that we will work with in R. They are very
# useful for storing information, but they must be chosen correctly to meet the
# needs of the analysis (mixed data, computational considerations, etc.)
# An Atomic Vector is a one dimensional data structure which contains elements of
# the same type.
# - Integers
# - Doubles
# - Characters
# - Logical
# Some Examples of Good Vectors
ints <- c(1L, 2L)
a <- 1
b <- c(1, 2)
words <- c('yes', 'no', 'maybe')
truths <- c(TRUE, FALSE, TRUE)
weird.vector <- c(0,1,c(1,2,4), 5) # This gets unpacked into one vector
# We can find the types of a vectors
typeof(ints)
typeof(a)
typeof(b)
typeof(words)
typeof(truths)
# As well as test the types
is.double(a)
is.character(b)
# An example of a misspecifying a vector
bad.vector <- c(1, 'no')
good.list <- list(1, "no")
# We see that R coerces the vector to a type `character' without informing you
# Always be careful when creating vectors that the type is what you intended
# Indexing of Vectors
# - vector[0] returns the type
# - vector[loc] returns the value at the specified location, if there is no
# value at the location it returns NA
bad.vector[50]
# We also are able to quickly summarize data in a list
numbers <- c(1,3,2,5,2,6,2,6)
length(numbers) # Number of Elements in a vector
mean(numbers) # Mean of the Numbers
summary(numbers) # Five-Number Summary of the Values
# What happens if we do summary on bad.vector?
mean(bad.vector)
summary(bad.vector)
# -> Notice that the first operation of mean on a vector of characters returned
# an error.
### Lists
# Lists are more complicated than vectors and can store any type of data,
# including other lists. These will be useful for returning data in function
# or for passing information around in a compact fashion. Those of you that
# are familiar with python, you can think of these as dict objects in a sense.
good.list <- list(1 , 'no')
named.list <- list('five' = 1, 'six' = 2, 3,4,5)
list.of.list <- list(list(c(1,2,4,6)), list(c(4,5,2,6)))
list.of.list[[1]][[1]]
unlist(list.of.list[[1]])
unlist(list.of.list[1])
lapply(list.of.list[[1]], mean)
numVec <- 1:10
##############################################################
# Baseball data examples
playerinfo <- read.csv("https://raw.githubusercontent.com/dspluta/Stats-Bootcamp/master/rData/playerInfo.csv")
bball <- read.csv('../rData/baseball.dat.txt')
bball$PlayerID <- 1:nrow(bball)
PlayerInfo <- data.frame(PlayerID=bball$PlayerID,
Name = bball$Name,
Salary = bball$Salary)
head(PlayerInfo)
HitData <- bball[,c(19, 2:13)]
head(HitData)
write.csv(PlayerInfo, file = '../rData/playerInfo.csv', row.names = F)
set.seed(1515)
observed.hit.info <- sample(c(TRUE, FALSE), nrow(HitData), prob=c(0.95, 0.05), replace = T)
write.csv(HitData[observed.hit.info,], file = '../rData/performanceInfo.csv', row.names = F)
dat1 <- read.csv('../rData/playerInfo.csv')
dat2 <- read.csv('../rData/performanceInfo.csv')
head(merge(dat1, dat2, by = 1, all = T))
pairs(dat2)
|
#' Model specification error
#'
#' Test for model specification error.
#'
#' @param model An object of class \code{glm}.
#'
#' @return An object of class \code{glm}.
#'
#' @references
#' Pregibon, D. 1979. Data analytic methods for generalized linear models. PhD diss., University of Toronto.
#'
#' Pregibon, D. 1980. Goodness of link tests for generalized linear models. Applied Statistics 29: 15–24.
#'
#' Tukey, J. W. 1949. One degree of freedom for non-additivity. Biometrics 5: 232–242
#'
#' @examples
#'
#' model <- glm(honcomp ~ female + read + science, data = hsb2,
#' family = binomial(link = 'logit'))
#'
#' blr_linktest(model)
#'
#' @export
#'
blr_linktest <- function(model) {
blr_check_model(model)
dat <-
model %>%
use_series(call) %>%
use_series(data) %>%
eval_tidy()
fit <- predict.glm(model, newdata = dat)
fit2 <- fit ^ 2
resp <- model$y
newdat <- tibble(fit = fit, fit2 = fit2, resp = resp)
glm(resp ~ fit + fit2, data = newdat, family = binomial(link = "logit")) %>%
summary()
}
|
/R/blr-linktest.R
|
permissive
|
guhjy/blorr
|
R
| false | false | 1,088 |
r
|
#' Model specification error
#'
#' Test for model specification error.
#'
#' @param model An object of class \code{glm}.
#'
#' @return An object of class \code{glm}.
#'
#' @references
#' Pregibon, D. 1979. Data analytic methods for generalized linear models. PhD diss., University of Toronto.
#'
#' Pregibon, D. 1980. Goodness of link tests for generalized linear models. Applied Statistics 29: 15–24.
#'
#' Tukey, J. W. 1949. One degree of freedom for non-additivity. Biometrics 5: 232–242
#'
#' @examples
#'
#' model <- glm(honcomp ~ female + read + science, data = hsb2,
#' family = binomial(link = 'logit'))
#'
#' blr_linktest(model)
#'
#' @export
#'
blr_linktest <- function(model) {
blr_check_model(model)
dat <-
model %>%
use_series(call) %>%
use_series(data) %>%
eval_tidy()
fit <- predict.glm(model, newdata = dat)
fit2 <- fit ^ 2
resp <- model$y
newdat <- tibble(fit = fit, fit2 = fit2, resp = resp)
glm(resp ~ fit + fit2, data = newdat, family = binomial(link = "logit")) %>%
summary()
}
|
# [문제1] x변수에 1,3,5,7,9 값을 입력, y 변수에 1,2,3,4,5 값을 입력하세요.
x <- c(1,3,5,7,9)
y <- c(1,2,3,4,5)
# [문제2] x 변수와 y 변수를 중복성 없이 하나로 합친후에 u 변수에 넣어 주세요.
u <- sort(union(x,y), decreasing = FALSE)
u
# [문제3] x 변수와 y 변수의 값들중에 중복성만 추출해서 i 변수에 넣어주세요.
i <- intersect(x,y)
# [문제4] x 변수의 값과 y 변수의 값중에 순수하게 x 변수에 들어 있는 값만 추출해서 m 변수에 넣어 주세요.
m <- setdiff(x, y)
m
# [문제5] x 변수의 값과 y 변수의 값이 일치가 되면 TRUE 아니면 FALSE를 출력해주세요.
identical(x, y)
# [문제6] x 변수에 값들을 10을 곱한 결과를 x 변수에 적용하세요.
x <- x * 10
# [문제7] x 변수에 있는 50을 5로 수정하세요.
x
x[3] <- 5
# [문제8] x 변수에 있는 10 30 5 70 90을 원래의 값으로 1,3,5,7,9로 되돌려 주세요.단 union, 정수 나누기, sort 만 사용하세요
x <- c(10, 30, 5, 70, 90)
x <- sort(union((x[-3]%/%10), x[3]))
x[which(5 == x)] <- 50
x <- x%/%10
x
# [문제9] x변수에 11숫자를 제일 뒤에 입력하세요. 단 append와 length를 이용하세요.
append(x, 11, after=length(x))
# [문제10] x 변수에 제일 뒤에 있는 값을 NA로 수정하세요. 단 length를 이용하세요.
x[length(x)] <- NA
# [문제11] lst 변수에 name = 'king' , height = 180, weight = 70 값을 넣어 주세요.
lst <- list(name = 'king', height = 180, weight = 70)
lst
# [문제12] lst 변수에 blood = 'A' 추가하세요.
lst$blood <- 'A'
lst
# [문제13] lst 변수에 name의 값을 'scott'로 수정하세요.
lst$name <- 'scott'
# [문제14] lst변수에 2번인덱스 값만 출력해주세요.
lst[[2]]
# [문제15] lst변수에 blood 이름을 blood type 이름으로 수정하세요.
names(lst) <- c('name', 'height', 'weight', 'blood type')
names(lst)[4] <- 'blood type'
lst
'scott'%in%lst
which(lst=='scott')
# 변수 값 할당연산자(<-, <<-, =)
x <- 1
x
print(x)
y <<- 2
y
print(y)
z = 3
z
print(z)
x+y+z
sum(x <- c(1,2,3,4,5)) #<- : 전역변수 할당 c(): 1차원 배열
x
sum(y = c(1,2,3,4,5)) # = : 지역변수 할당
y
sum(d = c(1,2,3,4,5))
d
x <- 2
print(x)
class(x)
y <- 2L #L: 숫자를 정수로 표현
print(y)
class(y)
Z <- x+y #실수형과 정수형을 연산시 실수형 결과 리턴
print(z)
class(z)
str(z) #타입과 값을 모두 반
is.numeric(z)
is.integer(y)
#character
s1 <- 'hello'
s1
class(s1)
s2 <- "안녕하세요"
class(s2)
is.character(s2)
#boolean
# AND : &
# OR : |
TRUE & TRUE
TRUE & FALSE
TRUE | TRUE
TRUE | FALSE
T & T
T & F
T | T
T | F
T <- TRUE #T를 변수 이름으로 지정
T
class(T)
is.logical(T) #boolean형태의 변수인지 확인
is.logical(z)
F <- FALSE
F
#NA(Not Available): 결측값, 데이터 입력중 실수로 값이 입력되지 않은 경우
a <- 100; b <- 90; c <- NA;
a
b
c
a+b+c #NA가 포함된 연산은 NA리턴
is.na(c) #NA인지 확인
is.na(a)
#NULL: 변수가 초기화되지 않았을 때 사용, undefined 값을 표현
x <- NULL
x
is.na(x)
is.null(x)
y <- 100
x + y #NULL이 포함된 연산은 타입 리턴
z <- NA
z + y #NA가 포함된 연산은 NA리턴
#산술연산자
100 - 99
99 - 100
2*3
100/2
100/3
100%/%3
100%%3
10^2
10**2
#비교연산자
10 > 5
10 < 5
10 >= 5
10 <= 5
10 == 5
10 != 5
10 > 9 & 10 >= 10
10 < 9 | 10 >= 10
#지수표기법
1e2 #1*10^2
5e-1 #5*10^-1
100000 #1e+5
#자료형
#1. vector(벡터)
# - 같은 데이터 타입을 갖는 1차원 배열구조(R의 기본 데이터구조)
# - c() : combine value
# - 중첩불가능
# - 단일 데이터타입
# - 데이터 변환규칙: integer < double(실수) < character
x <- c(1,2,3,4,5)
x
mode(x)
class(x)
str(x)
x <- c(1,2,3,4,'5')
x
mode(x)
class(x)
str(x)
x <- c(1,2,3.14,4,5)
x
mode(x)
class(x)
str(x)
x <- c(1,2,3,c(4,5))
s1 <- c('서울', '대구', '광주', '부산')
mode(s1)
class(s1)
str(s1)
x <- c(1,2,3, sum=c(4,5))
y <- c(1,2,3,sum(c(4,5)))
x
y
x <- c('국어'=90, '수학'=c(95, 90), '과학'=100)
x
z <- c('과목'=c(80, 90, 96))
z
names(z) <- c('국어', '영어', '수학') #names(): 컬럼명 설정
z
names(z) <- NULL
names(z) <- NA
y<-c(1,2,3,4,5)
names(y) <- c('하나', '둘', '셋', '넷', '다섯')
y
length(y)
NROW(y)
y[1]
y[1:3]
y[-1] #1번을 제외하고
y[c(-1,-3)] #1, 3번을 제외하고
y['하나'] #컬럼명으로 찾기
y[c('하나', '다섯')]
y[-1:-4]
x <- 1:100
x
x <- c(1:1000)
x
#sequence: 자동 일련번호 생성
#seq(시작값, 종료값, 증가분)
seq(1, 5, 1)
seq(0, 1000, 5)
10:1
x <- c(2,4,6,8,10)
x
seq_along(x)
rep(1:5, times=2)
rep(1:5, each=2)
rep(1:5, each=2, times=2)
#벡터의 값 수정
x <- c(1:5)
x[2] <- 8
x
x[3:5] <- c(30, 40, 50)
x
#벡터의 값 추가
x[6] <- 60
x[8] <- 80
x
x[7] <- 70
x
append(x, 90, after=8)
x
x <- append(x, 90, after=8)
x
x <- append(x, 50, after=5)
x
#벡터 연산
x <- c(1:5)
x + 10
x * 10
x / 2
x %/% 2
x %% 2
#배열 비교
x <- c(1,2,3)
y <- c(1,2,3)
z <- c(1,2,4)
x == y
x == z
identical(x, y)
identical(x, z)
w <- c(1:5)
x == w
identical(x,w)
x <- c(1,2,3,4)
y <- c(1,4,6)
x == y
identical(x, y) #두 벡터의 값이 같을 시 TRUE
setequal(x, y) #두 벡터가 같은 집할일 시 TRUE
union(x, y) #합집합
intersect(x, y) #교집합
setdiff(x, y) #차집합
1 %in% x
5 %in% x
x <- c('b', 'a', 'd', 'a', NA)
x
'a' %in% x
x == 'a'
x[x=='a']
which('a' == x) #x에서 'a'의 인덱스 리
x[which('a' == x)]
is.na(x)
which(NA == x)
which(is.na(x))
x <- c(1:5)
y <- c(1,2,3,4,5)
setequal(x, y)
identical(x, y) #x는 integer, y는 number타입이라 FALSE
y <- as.integer(y)
identical(x, y)
help(identical)
?identical
#list
#- 서로 다른 데이터 타입을 갖는 벡터 혹은 다른 리스트를 저장 가능한 구조
#- list(키 = 값, 키 = 값..)
x <- list(name = '홍길동', addr = '서울시', pn = '010-1111-1234')
x
str(x)
class(x)
mode(x)
x$name
x$addr
x[1]
x[1:3]
#list 요소추가
x$sal <- 10000
x
#list 요소 제거
x$sal <- NULL
x
#list 요소 수정
x$pn <- '010-1234-1004'
x
y <- list(a=list(val=c(1,2,3)), b=list(val=c(1,2,3,4)))
y
y$a
y$b
|
/180723.R
|
no_license
|
RainyHeaven/DA_Academy
|
R
| false | false | 6,182 |
r
|
# [문제1] x변수에 1,3,5,7,9 값을 입력, y 변수에 1,2,3,4,5 값을 입력하세요.
x <- c(1,3,5,7,9)
y <- c(1,2,3,4,5)
# [문제2] x 변수와 y 변수를 중복성 없이 하나로 합친후에 u 변수에 넣어 주세요.
u <- sort(union(x,y), decreasing = FALSE)
u
# [문제3] x 변수와 y 변수의 값들중에 중복성만 추출해서 i 변수에 넣어주세요.
i <- intersect(x,y)
# [문제4] x 변수의 값과 y 변수의 값중에 순수하게 x 변수에 들어 있는 값만 추출해서 m 변수에 넣어 주세요.
m <- setdiff(x, y)
m
# [문제5] x 변수의 값과 y 변수의 값이 일치가 되면 TRUE 아니면 FALSE를 출력해주세요.
identical(x, y)
# [문제6] x 변수에 값들을 10을 곱한 결과를 x 변수에 적용하세요.
x <- x * 10
# [문제7] x 변수에 있는 50을 5로 수정하세요.
x
x[3] <- 5
# [문제8] x 변수에 있는 10 30 5 70 90을 원래의 값으로 1,3,5,7,9로 되돌려 주세요.단 union, 정수 나누기, sort 만 사용하세요
x <- c(10, 30, 5, 70, 90)
x <- sort(union((x[-3]%/%10), x[3]))
x[which(5 == x)] <- 50
x <- x%/%10
x
# [문제9] x변수에 11숫자를 제일 뒤에 입력하세요. 단 append와 length를 이용하세요.
append(x, 11, after=length(x))
# [문제10] x 변수에 제일 뒤에 있는 값을 NA로 수정하세요. 단 length를 이용하세요.
x[length(x)] <- NA
# [문제11] lst 변수에 name = 'king' , height = 180, weight = 70 값을 넣어 주세요.
lst <- list(name = 'king', height = 180, weight = 70)
lst
# [문제12] lst 변수에 blood = 'A' 추가하세요.
lst$blood <- 'A'
lst
# [문제13] lst 변수에 name의 값을 'scott'로 수정하세요.
lst$name <- 'scott'
# [문제14] lst변수에 2번인덱스 값만 출력해주세요.
lst[[2]]
# [문제15] lst변수에 blood 이름을 blood type 이름으로 수정하세요.
names(lst) <- c('name', 'height', 'weight', 'blood type')
names(lst)[4] <- 'blood type'
lst
'scott'%in%lst
which(lst=='scott')
# 변수 값 할당연산자(<-, <<-, =)
x <- 1
x
print(x)
y <<- 2
y
print(y)
z = 3
z
print(z)
x+y+z
sum(x <- c(1,2,3,4,5)) #<- : 전역변수 할당 c(): 1차원 배열
x
sum(y = c(1,2,3,4,5)) # = : 지역변수 할당
y
sum(d = c(1,2,3,4,5))
d
x <- 2
print(x)
class(x)
y <- 2L #L: 숫자를 정수로 표현
print(y)
class(y)
Z <- x+y #실수형과 정수형을 연산시 실수형 결과 리턴
print(z)
class(z)
str(z) #타입과 값을 모두 반
is.numeric(z)
is.integer(y)
#character
s1 <- 'hello'
s1
class(s1)
s2 <- "안녕하세요"
class(s2)
is.character(s2)
#boolean
# AND : &
# OR : |
TRUE & TRUE
TRUE & FALSE
TRUE | TRUE
TRUE | FALSE
T & T
T & F
T | T
T | F
T <- TRUE #T를 변수 이름으로 지정
T
class(T)
is.logical(T) #boolean형태의 변수인지 확인
is.logical(z)
F <- FALSE
F
#NA(Not Available): 결측값, 데이터 입력중 실수로 값이 입력되지 않은 경우
a <- 100; b <- 90; c <- NA;
a
b
c
a+b+c #NA가 포함된 연산은 NA리턴
is.na(c) #NA인지 확인
is.na(a)
#NULL: 변수가 초기화되지 않았을 때 사용, undefined 값을 표현
x <- NULL
x
is.na(x)
is.null(x)
y <- 100
x + y #NULL이 포함된 연산은 타입 리턴
z <- NA
z + y #NA가 포함된 연산은 NA리턴
#산술연산자
100 - 99
99 - 100
2*3
100/2
100/3
100%/%3
100%%3
10^2
10**2
#비교연산자
10 > 5
10 < 5
10 >= 5
10 <= 5
10 == 5
10 != 5
10 > 9 & 10 >= 10
10 < 9 | 10 >= 10
#지수표기법
1e2 #1*10^2
5e-1 #5*10^-1
100000 #1e+5
#자료형
#1. vector(벡터)
# - 같은 데이터 타입을 갖는 1차원 배열구조(R의 기본 데이터구조)
# - c() : combine value
# - 중첩불가능
# - 단일 데이터타입
# - 데이터 변환규칙: integer < double(실수) < character
x <- c(1,2,3,4,5)
x
mode(x)
class(x)
str(x)
x <- c(1,2,3,4,'5')
x
mode(x)
class(x)
str(x)
x <- c(1,2,3.14,4,5)
x
mode(x)
class(x)
str(x)
x <- c(1,2,3,c(4,5))
s1 <- c('서울', '대구', '광주', '부산')
mode(s1)
class(s1)
str(s1)
x <- c(1,2,3, sum=c(4,5))
y <- c(1,2,3,sum(c(4,5)))
x
y
x <- c('국어'=90, '수학'=c(95, 90), '과학'=100)
x
z <- c('과목'=c(80, 90, 96))
z
names(z) <- c('국어', '영어', '수학') #names(): 컬럼명 설정
z
names(z) <- NULL
names(z) <- NA
y<-c(1,2,3,4,5)
names(y) <- c('하나', '둘', '셋', '넷', '다섯')
y
length(y)
NROW(y)
y[1]
y[1:3]
y[-1] #1번을 제외하고
y[c(-1,-3)] #1, 3번을 제외하고
y['하나'] #컬럼명으로 찾기
y[c('하나', '다섯')]
y[-1:-4]
x <- 1:100
x
x <- c(1:1000)
x
#sequence: 자동 일련번호 생성
#seq(시작값, 종료값, 증가분)
seq(1, 5, 1)
seq(0, 1000, 5)
10:1
x <- c(2,4,6,8,10)
x
seq_along(x)
rep(1:5, times=2)
rep(1:5, each=2)
rep(1:5, each=2, times=2)
#벡터의 값 수정
x <- c(1:5)
x[2] <- 8
x
x[3:5] <- c(30, 40, 50)
x
#벡터의 값 추가
x[6] <- 60
x[8] <- 80
x
x[7] <- 70
x
append(x, 90, after=8)
x
x <- append(x, 90, after=8)
x
x <- append(x, 50, after=5)
x
#벡터 연산
x <- c(1:5)
x + 10
x * 10
x / 2
x %/% 2
x %% 2
#배열 비교
x <- c(1,2,3)
y <- c(1,2,3)
z <- c(1,2,4)
x == y
x == z
identical(x, y)
identical(x, z)
w <- c(1:5)
x == w
identical(x,w)
x <- c(1,2,3,4)
y <- c(1,4,6)
x == y
identical(x, y) #두 벡터의 값이 같을 시 TRUE
setequal(x, y) #두 벡터가 같은 집할일 시 TRUE
union(x, y) #합집합
intersect(x, y) #교집합
setdiff(x, y) #차집합
1 %in% x
5 %in% x
x <- c('b', 'a', 'd', 'a', NA)
x
'a' %in% x
x == 'a'
x[x=='a']
which('a' == x) #x에서 'a'의 인덱스 리
x[which('a' == x)]
is.na(x)
which(NA == x)
which(is.na(x))
x <- c(1:5)
y <- c(1,2,3,4,5)
setequal(x, y)
identical(x, y) #x는 integer, y는 number타입이라 FALSE
y <- as.integer(y)
identical(x, y)
help(identical)
?identical
#list
#- 서로 다른 데이터 타입을 갖는 벡터 혹은 다른 리스트를 저장 가능한 구조
#- list(키 = 값, 키 = 값..)
x <- list(name = '홍길동', addr = '서울시', pn = '010-1111-1234')
x
str(x)
class(x)
mode(x)
x$name
x$addr
x[1]
x[1:3]
#list 요소추가
x$sal <- 10000
x
#list 요소 제거
x$sal <- NULL
x
#list 요소 수정
x$pn <- '010-1234-1004'
x
y <- list(a=list(val=c(1,2,3)), b=list(val=c(1,2,3,4)))
y
y$a
y$b
|
\name{SearchKEGG}
\alias{SearchKEGG}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Function to find a compound information using his id by looking in database which has kegg pathways
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
SearchKEGG(id, type)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{id}{
%% ~~Describe \code{id} here~~
}
\item{type}{
%% ~~Describe \code{type} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
1. 4Creek DJ, Jankevics A, Burgess KE, Breitling R, Barrett MP. IDEOM: an Excel
interface for analysis of LC-MS-based metabolomics data. Bioinformatics. 2012 Apr
1;28(7):1048-9.
}
\author{Sarah Cherkaoui}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (id, type)
{
position <- vector()
if (type == "name") {
position <- Mmatch(id, DBKEGG[, 3])
}
if (type == "pubchem") {
position <- Mmatch(id, DBKEGG[, 9])
}
if (type == "hmdb") {
position <- Mmatch(id, DBKEGG[, 11])
}
if (type == "kegg") {
position <- Mmatch(id, DB1DBKEGG[, 8])
}
if (type == "biocyc") {
position <- Mmatch(id, DBKEGG[, 12])
}
if (type == "inchikey") {
position <- Mmatch(id, DBKEGG[, 15])
}
if (length(position) > 0) {
Name <- fixlc(DBKEGG[position, 3])
Pubchem <- fixlc(DBKEGG[position, 9])
HMDB <- fixlc(DBKEGG[position, 11])
KeggID <- fixlc(DBKEGG[position, 8])
BioCyc <- fixlc(DBKEGG[position, 12])
InchiKey <- fixlc(DBKEGG[position, 15])
Map <- fixlc(DBKEGG[position, 5])
KeggPathway <- fixlc(strsplit(DBKEGG[position, 6], "__"))
KeggPathID <- fixlc(DBSMPDB[position, 7])
return(list(Name = Name, Pubchem = Pubchem, HMDB = HMDB,
KeggID = KeggID, BioCyc = BioCyc, InchiKey = InchiKey,
Map = Map, KeggPathway = KeggPathway, KeggPathID = KeggPathID))
}
else (return("error"))
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
/man/SearchKEGG.Rd
|
no_license
|
cherkaos/MetabToPathways
|
R
| false | false | 2,703 |
rd
|
\name{SearchKEGG}
\alias{SearchKEGG}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Function to find a compound information using his id by looking in database which has kegg pathways
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
SearchKEGG(id, type)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{id}{
%% ~~Describe \code{id} here~~
}
\item{type}{
%% ~~Describe \code{type} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
1. 4Creek DJ, Jankevics A, Burgess KE, Breitling R, Barrett MP. IDEOM: an Excel
interface for analysis of LC-MS-based metabolomics data. Bioinformatics. 2012 Apr
1;28(7):1048-9.
}
\author{Sarah Cherkaoui}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (id, type)
{
position <- vector()
if (type == "name") {
position <- Mmatch(id, DBKEGG[, 3])
}
if (type == "pubchem") {
position <- Mmatch(id, DBKEGG[, 9])
}
if (type == "hmdb") {
position <- Mmatch(id, DBKEGG[, 11])
}
if (type == "kegg") {
position <- Mmatch(id, DB1DBKEGG[, 8])
}
if (type == "biocyc") {
position <- Mmatch(id, DBKEGG[, 12])
}
if (type == "inchikey") {
position <- Mmatch(id, DBKEGG[, 15])
}
if (length(position) > 0) {
Name <- fixlc(DBKEGG[position, 3])
Pubchem <- fixlc(DBKEGG[position, 9])
HMDB <- fixlc(DBKEGG[position, 11])
KeggID <- fixlc(DBKEGG[position, 8])
BioCyc <- fixlc(DBKEGG[position, 12])
InchiKey <- fixlc(DBKEGG[position, 15])
Map <- fixlc(DBKEGG[position, 5])
KeggPathway <- fixlc(strsplit(DBKEGG[position, 6], "__"))
KeggPathID <- fixlc(DBSMPDB[position, 7])
return(list(Name = Name, Pubchem = Pubchem, HMDB = HMDB,
KeggID = KeggID, BioCyc = BioCyc, InchiKey = InchiKey,
Map = Map, KeggPathway = KeggPathway, KeggPathID = KeggPathID))
}
else (return("error"))
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
#' Calculate the size of an object.
#'
#' `obj_size()` computes the size of an object or set of objects;
#' `obj_sizes()` breaks down the individual contribution of multiple objects
#' to the total size.
#'
#' @section Compared to `object.size()`:
#' Compared to [object.size()], `obj_size()`:
#'
#' * Accounts for all types of shared values, not just strings in
#' the global string pool.
#'
#' * Includes the size of environments (up to `env`)
#'
#' * Accurately measures the size of ALTREP objects.
#'
#' @section Environments:
#' `obj_size()` attempts to take into account the size of the
#' environments associated with an object. This is particularly important
#' for closures and formulas, since otherwise you may not realise that you've
#' accidentally captured a large object. However, it's easy to over count:
#' you don't want to include the size of every object in every environment
#' leading back to the [emptyenv()]. `obj_size()` takes
#' a heuristic approach: it never counts the size of the global environment,
#' the base environment, the empty environment, or any namespace.
#'
#' Additionally, the `env` argument allows you to specify another
#' environment at which to stop. This defaults to the environment from which
#' `obj_size()` is called to prevent double-counting of objects created
#' elsewhere.
#'
#' @export
#' @param ... Set of objects to compute size.
#' @param env Environment in which to terminate search. This defaults to the
#' current environment so that you don't include the size of objects that
#' are already stored elsewhere.
#'
#' Regardless of the value here, `obj_size()` never looks past the
#' global or base environments.
#'
#' @return An estimate of the size of the object, in bytes.
#' @examples
#' # obj_size correctly accounts for shared references
#' x <- runif(1e4)
#' obj_size(x)
#'
#' z <- list(a = x, b = x, c = x)
#' obj_size(z)
#'
#' # this means that object size is not transitive
#' obj_size(x)
#' obj_size(z)
#' obj_size(x, z)
#'
#' # use obj_size() to see the unique contribution of each component
#' obj_sizes(x, z)
#' obj_sizes(z, x)
#' obj_sizes(!!!z)
#'
#' # obj_size() also includes the size of environments
#' f <- function() {
#' x <- 1:1e4
#' a ~ b
#' }
#' obj_size(f())
#'
#' #' # In R 3.5 and greater, `:` creates a special "ALTREP" object that only
#' # stores the first and last elements. This will make some vectors much
#' # smaller than you'd otherwise expect
#' obj_size(1:1e6)
obj_size <- function(..., env = parent.frame()) {
dots <- list2(...)
size <- obj_size_(dots, env, size_node(), size_vector())
new_bytes(size)
}
#' @rdname obj_size
#' @export
obj_sizes <- function(..., env = parent.frame()) {
dots <- list2(...)
size <- obj_csize_(dots, env, size_node(), size_vector())
names(size) <- names(dots)
new_bytes(size)
}
size_node <- function(x) as.vector(utils::object.size(quote(expr = )))
size_vector <- function(x) as.vector(utils::object.size(logical()))
new_bytes <- function(x) {
structure(x, class = "lobstr_bytes")
}
#' @export
print.lobstr_bytes <- function(x, digits = 3, ...) {
fx <- format(x, big.mark = ",", scientific = FALSE)
if (length(x) == 1) {
cat_line(fx, " B")
} else {
if (!is.null(names(x))) {
cat_line(format(names(x)), ": ", fx, " B")
} else {
cat_line("* ", fx, " B")
}
}
}
#' @export
c.lobstr_bytes <- function(...) {
new_bytes(NextMethod())
}
#' @export
`[.lobstr_bytes` <- function(...) {
new_bytes(NextMethod())
}
# Helpers for interactive exploration -------------------------------------
comp <- function(x) {
base <- utils::object.size(x)
lobstr <- obj_size(x)
c(base = base, lobstr = lobstr, diff = base - lobstr)
}
insp <- function(x) {
eval(quote(.Internal(inspect(x))))
}
|
/lobstr/R/size.R
|
no_license
|
akhikolla/TestedPackages-NoIssues
|
R
| false | false | 3,783 |
r
|
#' Calculate the size of an object.
#'
#' `obj_size()` computes the size of an object or set of objects;
#' `obj_sizes()` breaks down the individual contribution of multiple objects
#' to the total size.
#'
#' @section Compared to `object.size()`:
#' Compared to [object.size()], `obj_size()`:
#'
#' * Accounts for all types of shared values, not just strings in
#' the global string pool.
#'
#' * Includes the size of environments (up to `env`)
#'
#' * Accurately measures the size of ALTREP objects.
#'
#' @section Environments:
#' `obj_size()` attempts to take into account the size of the
#' environments associated with an object. This is particularly important
#' for closures and formulas, since otherwise you may not realise that you've
#' accidentally captured a large object. However, it's easy to over count:
#' you don't want to include the size of every object in every environment
#' leading back to the [emptyenv()]. `obj_size()` takes
#' a heuristic approach: it never counts the size of the global environment,
#' the base environment, the empty environment, or any namespace.
#'
#' Additionally, the `env` argument allows you to specify another
#' environment at which to stop. This defaults to the environment from which
#' `obj_size()` is called to prevent double-counting of objects created
#' elsewhere.
#'
#' @export
#' @param ... Set of objects to compute size.
#' @param env Environment in which to terminate search. This defaults to the
#' current environment so that you don't include the size of objects that
#' are already stored elsewhere.
#'
#' Regardless of the value here, `obj_size()` never looks past the
#' global or base environments.
#'
#' @return An estimate of the size of the object, in bytes.
#' @examples
#' # obj_size correctly accounts for shared references
#' x <- runif(1e4)
#' obj_size(x)
#'
#' z <- list(a = x, b = x, c = x)
#' obj_size(z)
#'
#' # this means that object size is not transitive
#' obj_size(x)
#' obj_size(z)
#' obj_size(x, z)
#'
#' # use obj_size() to see the unique contribution of each component
#' obj_sizes(x, z)
#' obj_sizes(z, x)
#' obj_sizes(!!!z)
#'
#' # obj_size() also includes the size of environments
#' f <- function() {
#' x <- 1:1e4
#' a ~ b
#' }
#' obj_size(f())
#'
#' #' # In R 3.5 and greater, `:` creates a special "ALTREP" object that only
#' # stores the first and last elements. This will make some vectors much
#' # smaller than you'd otherwise expect
#' obj_size(1:1e6)
obj_size <- function(..., env = parent.frame()) {
dots <- list2(...)
size <- obj_size_(dots, env, size_node(), size_vector())
new_bytes(size)
}
#' @rdname obj_size
#' @export
obj_sizes <- function(..., env = parent.frame()) {
dots <- list2(...)
size <- obj_csize_(dots, env, size_node(), size_vector())
names(size) <- names(dots)
new_bytes(size)
}
size_node <- function(x) as.vector(utils::object.size(quote(expr = )))
size_vector <- function(x) as.vector(utils::object.size(logical()))
new_bytes <- function(x) {
structure(x, class = "lobstr_bytes")
}
#' @export
print.lobstr_bytes <- function(x, digits = 3, ...) {
fx <- format(x, big.mark = ",", scientific = FALSE)
if (length(x) == 1) {
cat_line(fx, " B")
} else {
if (!is.null(names(x))) {
cat_line(format(names(x)), ": ", fx, " B")
} else {
cat_line("* ", fx, " B")
}
}
}
#' @export
c.lobstr_bytes <- function(...) {
new_bytes(NextMethod())
}
#' @export
`[.lobstr_bytes` <- function(...) {
new_bytes(NextMethod())
}
# Helpers for interactive exploration -------------------------------------
comp <- function(x) {
base <- utils::object.size(x)
lobstr <- obj_size(x)
c(base = base, lobstr = lobstr, diff = base - lobstr)
}
insp <- function(x) {
eval(quote(.Internal(inspect(x))))
}
|
# Must be executed BEFORE rgl is loaded on headless devices.
options(rgl.useNULL=TRUE)
library(shiny)
library(shinyRGL)
library(rgl)
library(RColorBrewer)
d2 <- read.csv("d2.k29.csv", sep = " ")
shinyServer(function(input, output) {
#zoom <- 1
#userMatrix <- matrix(c(1, 0, 0, 0, 0, 0.3420201, 0.9396926, 0, 0, -0.9396926, 0.3420201, 0, 0, 0, 0, 1), ncol=4,nrow=4)
#windowRect <-c(0, 0, 256, 256)
output$myPlotRGLT <- renderWebGL({
bintext <-{}
d2_unique_cluster <- unique(d2$db_cluster[d2$db_cluster != 0])
bkxPts <- d2$PC2
bkyPts <- d2$PC1
bkzPts <- d2$PC3
d2subset <- d2[d2$db_cluster != 0 & d2$db_cluster %in% d2_unique_cluster[1:input$tbins],]
fgxPts <- d2subset$PC2
fgyPts <- d2subset$PC1
fgzPts <- d2subset$PC3
fgcol <- d2_unique_cluster[d2subset$db_cluster]
# Add a label in the center of each extracted bin
dbspan <- length(d2_unique_cluster)
gbr<-colorRampPalette(c("green","blue","orange","red"))
palette(adjustcolor(gbr(dbspan+1), alpha.f = 1))
for (i in 1:length(d2_unique_cluster)) {
bintext <- rbind(bintext, c(as.character(d2_unique_cluster[i]),
mean(d2$PC2[d2$db_cluster == d2_unique_cluster[i]]),
mean(d2$PC1[d2$db_cluster == d2_unique_cluster[i]]),
mean(d2$PC3[d2$db_cluster == d2_unique_cluster[i]])))
}
#view in 3d
#open3d(zoom = zoom, userMatrix = userMatrix, windowRect=windowRect)
#par3d(cex=.6)
plot3d(bkxPts[1:input$tpts],
bkyPts[1:input$tpts],
bkzPts[1:input$tpts],
col=rgb(0,0,0),
size=3,
type='p',
alpha=0.3,
xlab="PC1",
ylab="PC2",
zlab="PC3")
points3d(fgxPts,
fgyPts,
fgzPts,
col=fgcol+1,
size=3,
alpha=.75)
if (input$textflag == TRUE) {
text3d(x=bintext[,2], y=bintext[,3], z=bintext[,4],text = bintext[,1])
}
axes3d()
zoom<-par3d()$zoom
userMatrix<-par3d()$userMatrix
windowRect<-par3d()$windowRect
})
})
|
/plot3d/plot3d2/server.R
|
permissive
|
leejz/meta-omics-scripts
|
R
| false | false | 2,127 |
r
|
# Must be executed BEFORE rgl is loaded on headless devices.
options(rgl.useNULL=TRUE)
library(shiny)
library(shinyRGL)
library(rgl)
library(RColorBrewer)
d2 <- read.csv("d2.k29.csv", sep = " ")
shinyServer(function(input, output) {
#zoom <- 1
#userMatrix <- matrix(c(1, 0, 0, 0, 0, 0.3420201, 0.9396926, 0, 0, -0.9396926, 0.3420201, 0, 0, 0, 0, 1), ncol=4,nrow=4)
#windowRect <-c(0, 0, 256, 256)
output$myPlotRGLT <- renderWebGL({
bintext <-{}
d2_unique_cluster <- unique(d2$db_cluster[d2$db_cluster != 0])
bkxPts <- d2$PC2
bkyPts <- d2$PC1
bkzPts <- d2$PC3
d2subset <- d2[d2$db_cluster != 0 & d2$db_cluster %in% d2_unique_cluster[1:input$tbins],]
fgxPts <- d2subset$PC2
fgyPts <- d2subset$PC1
fgzPts <- d2subset$PC3
fgcol <- d2_unique_cluster[d2subset$db_cluster]
# Add a label in the center of each extracted bin
dbspan <- length(d2_unique_cluster)
gbr<-colorRampPalette(c("green","blue","orange","red"))
palette(adjustcolor(gbr(dbspan+1), alpha.f = 1))
for (i in 1:length(d2_unique_cluster)) {
bintext <- rbind(bintext, c(as.character(d2_unique_cluster[i]),
mean(d2$PC2[d2$db_cluster == d2_unique_cluster[i]]),
mean(d2$PC1[d2$db_cluster == d2_unique_cluster[i]]),
mean(d2$PC3[d2$db_cluster == d2_unique_cluster[i]])))
}
#view in 3d
#open3d(zoom = zoom, userMatrix = userMatrix, windowRect=windowRect)
#par3d(cex=.6)
plot3d(bkxPts[1:input$tpts],
bkyPts[1:input$tpts],
bkzPts[1:input$tpts],
col=rgb(0,0,0),
size=3,
type='p',
alpha=0.3,
xlab="PC1",
ylab="PC2",
zlab="PC3")
points3d(fgxPts,
fgyPts,
fgzPts,
col=fgcol+1,
size=3,
alpha=.75)
if (input$textflag == TRUE) {
text3d(x=bintext[,2], y=bintext[,3], z=bintext[,4],text = bintext[,1])
}
axes3d()
zoom<-par3d()$zoom
userMatrix<-par3d()$userMatrix
windowRect<-par3d()$windowRect
})
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{baltimore}
\alias{baltimore}
\title{Baltimore neighborhood demographics}
\format{A data frame with 278 rows and 44 variables:
\describe{
\item{OBJECTID}{identifier for each neighborhood}
\item{Name}{name of the neighborhood}
\item{Population}{size of the population}
\item{White}{number of Caucasians}
\item{Blk_AfAm}{number of African Americans}
\item{...}{lots more}
}}
\usage{
baltimore
}
\description{
A dataset with demographic information for each of Baltimore's neighborhoods.
The data is available from the Baltimore City Open GIS Data \url{http://gis-baltimore.opendata.arcgis.com/}.
}
\examples{
data(baltimore_map)
data(baltimore)
baltimore_map <- dplyr::left_join(baltimore_map, baltimore, by="OBJECTID")
ggplot(baltimore_map, aes(x = long, y = lat, group = group)) +
geom_polygon(aes(fill = Population))
ggplot(baltimore_map, aes(x = long, y = lat, group = group)) +
geom_polygon(aes(fill = Blk_AfAm/Population*100))
}
\keyword{datasets}
|
/man/baltimore.Rd
|
no_license
|
heike/cityshapes
|
R
| false | true | 1,059 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{baltimore}
\alias{baltimore}
\title{Baltimore neighborhood demographics}
\format{A data frame with 278 rows and 44 variables:
\describe{
\item{OBJECTID}{identifier for each neighborhood}
\item{Name}{name of the neighborhood}
\item{Population}{size of the population}
\item{White}{number of Caucasians}
\item{Blk_AfAm}{number of African Americans}
\item{...}{lots more}
}}
\usage{
baltimore
}
\description{
A dataset with demographic information for each of Baltimore's neighborhoods.
The data is available from the Baltimore City Open GIS Data \url{http://gis-baltimore.opendata.arcgis.com/}.
}
\examples{
data(baltimore_map)
data(baltimore)
baltimore_map <- dplyr::left_join(baltimore_map, baltimore, by="OBJECTID")
ggplot(baltimore_map, aes(x = long, y = lat, group = group)) +
geom_polygon(aes(fill = Population))
ggplot(baltimore_map, aes(x = long, y = lat, group = group)) +
geom_polygon(aes(fill = Blk_AfAm/Population*100))
}
\keyword{datasets}
|
# Add columns of logged data to data series, appends cols to existing dataframe,
# returns df to be designated as a named object
## Simon Dedman 2018.10.10
AddLogs <- function(x, # dataframe with columns you want to log within it
toLog){ # single or vector of column names within x to log
for (i in toLog){ #loop through variable columns for each log variable
x <- cbind(x, log1p(x[i])) #bind that as the final column of x
colno <- length(colnames(x)) #get that col number
colnames(x)[colno] <- paste0(i, "_log") #name new col name_log
} #close i, all variables logged
return(x) #output updated dataframe
} #close function
|
/R/AddLogs.R
|
no_license
|
SimonDedman/FarallonInstitute
|
R
| false | false | 676 |
r
|
# Add columns of logged data to data series, appends cols to existing dataframe,
# returns df to be designated as a named object
## Simon Dedman 2018.10.10
AddLogs <- function(x, # dataframe with columns you want to log within it
toLog){ # single or vector of column names within x to log
for (i in toLog){ #loop through variable columns for each log variable
x <- cbind(x, log1p(x[i])) #bind that as the final column of x
colno <- length(colnames(x)) #get that col number
colnames(x)[colno] <- paste0(i, "_log") #name new col name_log
} #close i, all variables logged
return(x) #output updated dataframe
} #close function
|
PM25Emissions <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#merge dataset
md <- merge(SCC,PM25Emissions,by.x="SCC", by.y="SCC")
library(dplyr)
library(ggplot2)
# PLOT 4
PM25Emissions_Baltimore <- PM25Emissions[PM25Emissions$fips == "24510",]
motor_v <- grepl("vehicle", SCC$EI.Sector, ignore.case=TRUE)
SCC_motor_v <- SCC[motor_v, ]
PM25Emissions_Baltimore_SCC_vehicle <- merge(PM25Emissions_Baltimore, SCC_motor_v, by="SCC")
PM25Emissions_Baltimore_SCC_vehicle_year <- group_by(PM25Emissions_Baltimore_SCC_vehicle, year)
PM25Emissions_Baltimore_SCC_vehicle_year_sum <- summarise(PM25Emissions_Baltimore_SCC_vehicle_year, total_year = sum(Emissions))
plot.new()
png(filename="Plot5.png", width = 480, height = 280, units = "px")
plot4 <- qplot(year,total_year,data=PM25Emissions_Baltimore_SCC_vehicle_year_sum , ylab = "Total Emissions", main = "Emissions from motor vehicle sources in Baltimore")
print(plot4)
dev.off()
|
/Exploratory_Data_Analysis/Plot5.R
|
no_license
|
crittersik/JH_Data_Projects_Coursera
|
R
| false | false | 963 |
r
|
PM25Emissions <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#merge dataset
md <- merge(SCC,PM25Emissions,by.x="SCC", by.y="SCC")
library(dplyr)
library(ggplot2)
# PLOT 4
PM25Emissions_Baltimore <- PM25Emissions[PM25Emissions$fips == "24510",]
motor_v <- grepl("vehicle", SCC$EI.Sector, ignore.case=TRUE)
SCC_motor_v <- SCC[motor_v, ]
PM25Emissions_Baltimore_SCC_vehicle <- merge(PM25Emissions_Baltimore, SCC_motor_v, by="SCC")
PM25Emissions_Baltimore_SCC_vehicle_year <- group_by(PM25Emissions_Baltimore_SCC_vehicle, year)
PM25Emissions_Baltimore_SCC_vehicle_year_sum <- summarise(PM25Emissions_Baltimore_SCC_vehicle_year, total_year = sum(Emissions))
plot.new()
png(filename="Plot5.png", width = 480, height = 280, units = "px")
plot4 <- qplot(year,total_year,data=PM25Emissions_Baltimore_SCC_vehicle_year_sum , ylab = "Total Emissions", main = "Emissions from motor vehicle sources in Baltimore")
print(plot4)
dev.off()
|
test_that("residual_transform works", {
n_genes <- 100
n_cells <- 500
beta0 <- rnorm(n = n_genes, mean = 2, sd = 0.3)
sf <- rchisq(n = n_cells, df = 100)
sf <- sf / mean(sf)
Mu <- exp( beta0 %*% t(log(sf)) )
Y <- matrix(rnbinom(n = n_genes * n_cells, mu = Mu, size = 0.1), nrow = n_genes, ncol = n_cells)
summary(MatrixGenerics::colMeans2(Y))
summary(MatrixGenerics::rowMeans2(Y))
resids <- residual_transform(Y, verbose = FALSE)
res2 <- residual_transform(Y, offset_model = FALSE, verbose = FALSE, return_fit = TRUE)
expect_true(all(abs(res2$fit$Beta[,2] - 1) < 0.1))
})
test_that("different input types work", {
n_genes <- 100
n_cells <- 500
beta0 <- rnorm(n = n_genes, mean = 2, sd = 0.3)
sf <- rchisq(n = n_cells, df = 100)
sf <- sf / mean(sf)
Mu <- exp( beta0 %*% t(log(sf)) )
Y <- matrix(rnbinom(n = n_genes * n_cells, mu = Mu, size = 0.1), nrow = n_genes, ncol = n_cells)
# matrix
res <- residual_transform(Y, verbose = FALSE, return_fit = TRUE, residual_type = "pearson")
# glmGamPoi
res2 <- residual_transform(res$fit, residual_type = "pearson")
# SummarizedExperiment
res3 <- residual_transform(res$fit$data, residual_type = "pearson")
expect_equal(res$Residuals, res2)
expect_equal(res$Residuals, res3)
})
test_that("overdisperion = 'global' works", {
set.seed(1)
n_genes <- 100
n_cells <- 500
beta0 <- rnorm(n = n_genes, mean = 2, sd = 0.3)
sf <- rchisq(n = n_cells, df = 100)
sf <- sf / mean(sf)
Mu <- exp( beta0 %*% t(log(sf)) )
Y <- matrix(rnbinom(n = n_genes * n_cells, mu = Mu, size = 1/0.1), nrow = n_genes, ncol = n_cells)
tmp <- transformGamPoi(Y, "rand", overdispersion = "global", verbose = FALSE, on_disk = FALSE, return_fit = TRUE)
expect_equal(tmp$fit$overdispersions, rep(0.1, n_genes), tolerance = 0.1)
})
test_that("transformGamPoi errors if 'residual_type' is specified", {
set.seed(1)
Y <- matrix(rnbinom(n = 24, mu = 3, size = 1/0.1), nrow = 3, ncol = 8)
expect_error(transformGamPoi(Y, residual_type = "pearson"))
})
|
/tests/testthat/test-transformGamPoi.R
|
no_license
|
anita-termeg/transformGamPoi
|
R
| false | false | 2,134 |
r
|
test_that("residual_transform works", {
n_genes <- 100
n_cells <- 500
beta0 <- rnorm(n = n_genes, mean = 2, sd = 0.3)
sf <- rchisq(n = n_cells, df = 100)
sf <- sf / mean(sf)
Mu <- exp( beta0 %*% t(log(sf)) )
Y <- matrix(rnbinom(n = n_genes * n_cells, mu = Mu, size = 0.1), nrow = n_genes, ncol = n_cells)
summary(MatrixGenerics::colMeans2(Y))
summary(MatrixGenerics::rowMeans2(Y))
resids <- residual_transform(Y, verbose = FALSE)
res2 <- residual_transform(Y, offset_model = FALSE, verbose = FALSE, return_fit = TRUE)
expect_true(all(abs(res2$fit$Beta[,2] - 1) < 0.1))
})
test_that("different input types work", {
n_genes <- 100
n_cells <- 500
beta0 <- rnorm(n = n_genes, mean = 2, sd = 0.3)
sf <- rchisq(n = n_cells, df = 100)
sf <- sf / mean(sf)
Mu <- exp( beta0 %*% t(log(sf)) )
Y <- matrix(rnbinom(n = n_genes * n_cells, mu = Mu, size = 0.1), nrow = n_genes, ncol = n_cells)
# matrix
res <- residual_transform(Y, verbose = FALSE, return_fit = TRUE, residual_type = "pearson")
# glmGamPoi
res2 <- residual_transform(res$fit, residual_type = "pearson")
# SummarizedExperiment
res3 <- residual_transform(res$fit$data, residual_type = "pearson")
expect_equal(res$Residuals, res2)
expect_equal(res$Residuals, res3)
})
test_that("overdisperion = 'global' works", {
set.seed(1)
n_genes <- 100
n_cells <- 500
beta0 <- rnorm(n = n_genes, mean = 2, sd = 0.3)
sf <- rchisq(n = n_cells, df = 100)
sf <- sf / mean(sf)
Mu <- exp( beta0 %*% t(log(sf)) )
Y <- matrix(rnbinom(n = n_genes * n_cells, mu = Mu, size = 1/0.1), nrow = n_genes, ncol = n_cells)
tmp <- transformGamPoi(Y, "rand", overdispersion = "global", verbose = FALSE, on_disk = FALSE, return_fit = TRUE)
expect_equal(tmp$fit$overdispersions, rep(0.1, n_genes), tolerance = 0.1)
})
test_that("transformGamPoi errors if 'residual_type' is specified", {
set.seed(1)
Y <- matrix(rnbinom(n = 24, mu = 3, size = 1/0.1), nrow = 3, ncol = 8)
expect_error(transformGamPoi(Y, residual_type = "pearson"))
})
|
setGeneric(name = ".checkFSetAndPropensityModels",
def = function(txObj, moPropen, ...){
standardGeneric(f = ".checkFSetAndPropensityModels")
})
setMethod(f = ".checkFSetAndPropensityModels",
signature = c(txObj = "TxInfoNoSubsets",
moPropen = "ANY"),
definition = function(txObj, moPropen, ..., data) {
return( NULL )
})
setMethod(f = ".checkFSetAndPropensityModels",
signature = c(txObj = "TxInfoWithSubsets",
moPropen = "ModelObjSubset"),
definition = function(txObj, moPropen, ..., data) {
# subsets identified in fSet and its tx options
subsets <- .getSubsets(object = txObj)
# subset to which each patient belongs
ptsSubset <- .getPtsSubset(object = txObj)
# tx received by patients
txReceived <- data[,.getTxName(object = txObj)]
if (is.factor(txReceived)) {
txReceived <- levels(txReceived)[txReceived]
}
# subsets to be included in fitting model(s)
subsetsModeled <- .extractModelNames(modelObj = moPropen)
# identify which patients fall into these subsets
tst <- ptsSubset %in% subsetsModeled
if (any(!tst)) {
# if any patients are not included, ensure that they have only
# one tx option and received only 1 tx
subsetsNotModeled <- sort(x = unique(x = ptsSubset[!tst]))
ssList <- NULL
for (i in subsetsNotModeled) {
if (length(x = subsets[[ i ]]) == 1L) {
# if 1 feasible tx option, identify tx received
txr <- unique(x = txReceived[ptsSubset == i])
if (length(x = txr) != 1L) {
# if more than 1 tx received stop
stop("patients in subset ", i,
" received tx not in accordance with feasible tx sets;",
" subset must be modeled", call. = FALSE)
} else {
ssList <- c(ssList, i)
}
} else {
stop("subset ", i, " has > 1 tx; subset must be modeled")
}
}
if (!is.null(x = ssList)) {
message("subset(s) ", paste(ssList, collapse = ", "),
" excluded from propensity regression")
}
}
if (any(tst)) {
ssList <- NULL
# ensure that tx received agree with feasible tx sets
for (i in subsetsModeled) {
# identify tx received
txr <- unique(x = txReceived[ptsSubset == i])
if (any(!{txr %in% subsets[[ i ]]})) ssList <- c(ssList, i)
}
if (!is.null(x = ssList)) {
message("NOTE: subset(s) ", paste(ssList, collapse = ", "),
" received tx not in accordance with specified feasible tx sets")
}
}
return( NULL )
})
setMethod(f = ".checkFSetAndPropensityModels",
signature = c(txObj = "TxInfoWithSubsets",
moPropen = "modelObj"),
definition = function(txObj, moPropen, ..., data) {
# subsets identified in fSet and its tx options
subsets <- .getSubsets(object = txObj)
# subset to which each patient belongs
ptsSubset <- .getPtsSubset(object = txObj)
# tx received by patients
txReceived <- data[,.getTxName(object = txObj)]
# ensure that if any subsets have only one tx option, pts received only 1 tx
for (i in names(x = subsets)) {
if (length(x = subsets[[ i ]]) == 1L) {
# if 1 feasible tx option, identify tx received
txr <- unique(x = txReceived[ptsSubset == i])
if (length(x = txr) != 1L) {
# if more than 1 tx received stop
stop("patients in subset ", i,
" received tx not in accordance with feasible tx sets;",
" subset must be modeled", call. = FALSE)
} else {
message("subset ", i, " excluded from propensity regression")
}
}
}
return( NULL )
})
setMethod(f = ".checkFSetAndPropensityModels",
signature = c(txObj = "TxInfoWithSubsets",
moPropen = "ModelObj_SubsetList"),
definition = function(txObj, moPropen, ..., data) {
# subsets identified in fSet and its tx options
subsets <- .getSubsets(object = txObj)
# subset to which each patient belongs
ptsSubset <- .getPtsSubset(object = txObj)
# tx received by patients
txReceived <- data[,.getTxName(object = txObj)]
# subsets to be included in fitting model(s)
subsetsModeled <- .extractModelNames(modelObj = moPropen)
# identify which patients fall into these subsets
tst <- ptsSubset %in% subsetsModeled
if (any(!tst)) {
subsetsNotModeled <- sort(x = unique(x = ptsSubset[!tst]))
# if any patients are not included, ensure that they have only
# one tx option and received only 1 tx
ssList <- NULL
for (i in subsetsNotModeled) {
if (length(x = subsets[[ i ]]) == 1L) {
# if 1 feasible tx option, identify tx received
txr <- unique(x = txReceived[ptsSubset == i])
if (length(x = txr) != 1L) {
# if more than 1 tx received stop
stop("patients in subset ", i,
" received tx not in accordance with feasible tx sets;",
" subset must be modeled", call. = FALSE)
} else {
ssList <- c(ssList, i)
}
} else {
stop("subset ", i, " has > 1 tx; subset must be modeled")
}
}
if (!is.null(x = ssList)) {
message("subset(s) ", paste(ssList, collapse = ", "),
" excluded from propensity regression")
}
}
if (any(tst)) {
# ensure that treatments received agree with feasible tx sets
ssList <- NULL
for (i in subsetsModeled) {
# identify tx received
txr <- unique(x = txReceived[ptsSubset == i])
if (any(!{txr %in% subsets[[ i ]]})) {
ssList <- c(ssList, i)
}
}
if (!is.null(x = ssList)) {
message("NOTE: subset(s) ", paste(ssList, collapse = ", "),
" received tx not in accordance with specified feasible tx sets")
}
}
return( NULL )
})
|
/R/checkFSetAndPropensityModels.R
|
no_license
|
cran/DynTxRegime
|
R
| false | false | 7,527 |
r
|
setGeneric(name = ".checkFSetAndPropensityModels",
def = function(txObj, moPropen, ...){
standardGeneric(f = ".checkFSetAndPropensityModels")
})
setMethod(f = ".checkFSetAndPropensityModels",
signature = c(txObj = "TxInfoNoSubsets",
moPropen = "ANY"),
definition = function(txObj, moPropen, ..., data) {
return( NULL )
})
setMethod(f = ".checkFSetAndPropensityModels",
signature = c(txObj = "TxInfoWithSubsets",
moPropen = "ModelObjSubset"),
definition = function(txObj, moPropen, ..., data) {
# subsets identified in fSet and its tx options
subsets <- .getSubsets(object = txObj)
# subset to which each patient belongs
ptsSubset <- .getPtsSubset(object = txObj)
# tx received by patients
txReceived <- data[,.getTxName(object = txObj)]
if (is.factor(txReceived)) {
txReceived <- levels(txReceived)[txReceived]
}
# subsets to be included in fitting model(s)
subsetsModeled <- .extractModelNames(modelObj = moPropen)
# identify which patients fall into these subsets
tst <- ptsSubset %in% subsetsModeled
if (any(!tst)) {
# if any patients are not included, ensure that they have only
# one tx option and received only 1 tx
subsetsNotModeled <- sort(x = unique(x = ptsSubset[!tst]))
ssList <- NULL
for (i in subsetsNotModeled) {
if (length(x = subsets[[ i ]]) == 1L) {
# if 1 feasible tx option, identify tx received
txr <- unique(x = txReceived[ptsSubset == i])
if (length(x = txr) != 1L) {
# if more than 1 tx received stop
stop("patients in subset ", i,
" received tx not in accordance with feasible tx sets;",
" subset must be modeled", call. = FALSE)
} else {
ssList <- c(ssList, i)
}
} else {
stop("subset ", i, " has > 1 tx; subset must be modeled")
}
}
if (!is.null(x = ssList)) {
message("subset(s) ", paste(ssList, collapse = ", "),
" excluded from propensity regression")
}
}
if (any(tst)) {
ssList <- NULL
# ensure that tx received agree with feasible tx sets
for (i in subsetsModeled) {
# identify tx received
txr <- unique(x = txReceived[ptsSubset == i])
if (any(!{txr %in% subsets[[ i ]]})) ssList <- c(ssList, i)
}
if (!is.null(x = ssList)) {
message("NOTE: subset(s) ", paste(ssList, collapse = ", "),
" received tx not in accordance with specified feasible tx sets")
}
}
return( NULL )
})
setMethod(f = ".checkFSetAndPropensityModels",
signature = c(txObj = "TxInfoWithSubsets",
moPropen = "modelObj"),
definition = function(txObj, moPropen, ..., data) {
# subsets identified in fSet and its tx options
subsets <- .getSubsets(object = txObj)
# subset to which each patient belongs
ptsSubset <- .getPtsSubset(object = txObj)
# tx received by patients
txReceived <- data[,.getTxName(object = txObj)]
# ensure that if any subsets have only one tx option, pts received only 1 tx
for (i in names(x = subsets)) {
if (length(x = subsets[[ i ]]) == 1L) {
# if 1 feasible tx option, identify tx received
txr <- unique(x = txReceived[ptsSubset == i])
if (length(x = txr) != 1L) {
# if more than 1 tx received stop
stop("patients in subset ", i,
" received tx not in accordance with feasible tx sets;",
" subset must be modeled", call. = FALSE)
} else {
message("subset ", i, " excluded from propensity regression")
}
}
}
return( NULL )
})
setMethod(f = ".checkFSetAndPropensityModels",
signature = c(txObj = "TxInfoWithSubsets",
moPropen = "ModelObj_SubsetList"),
definition = function(txObj, moPropen, ..., data) {
# subsets identified in fSet and its tx options
subsets <- .getSubsets(object = txObj)
# subset to which each patient belongs
ptsSubset <- .getPtsSubset(object = txObj)
# tx received by patients
txReceived <- data[,.getTxName(object = txObj)]
# subsets to be included in fitting model(s)
subsetsModeled <- .extractModelNames(modelObj = moPropen)
# identify which patients fall into these subsets
tst <- ptsSubset %in% subsetsModeled
if (any(!tst)) {
subsetsNotModeled <- sort(x = unique(x = ptsSubset[!tst]))
# if any patients are not included, ensure that they have only
# one tx option and received only 1 tx
ssList <- NULL
for (i in subsetsNotModeled) {
if (length(x = subsets[[ i ]]) == 1L) {
# if 1 feasible tx option, identify tx received
txr <- unique(x = txReceived[ptsSubset == i])
if (length(x = txr) != 1L) {
# if more than 1 tx received stop
stop("patients in subset ", i,
" received tx not in accordance with feasible tx sets;",
" subset must be modeled", call. = FALSE)
} else {
ssList <- c(ssList, i)
}
} else {
stop("subset ", i, " has > 1 tx; subset must be modeled")
}
}
if (!is.null(x = ssList)) {
message("subset(s) ", paste(ssList, collapse = ", "),
" excluded from propensity regression")
}
}
if (any(tst)) {
# ensure that treatments received agree with feasible tx sets
ssList <- NULL
for (i in subsetsModeled) {
# identify tx received
txr <- unique(x = txReceived[ptsSubset == i])
if (any(!{txr %in% subsets[[ i ]]})) {
ssList <- c(ssList, i)
}
}
if (!is.null(x = ssList)) {
message("NOTE: subset(s) ", paste(ssList, collapse = ", "),
" received tx not in accordance with specified feasible tx sets")
}
}
return( NULL )
})
|
#' Download geologic timescale
#'
#' Downloads a geologic timescale from the Macrostrat.org database.
#'
#' @param Timescale character string; a recognized timescale in the Macrostrat.org database
#'
#' @details Downloads a recognized timescale from the Macrostrat.org database. This includes the name, minimum age, maximum age, midpoint age, and official International Commission on Stratigraphy color hexcode if applicable of each interval in the timescale. Go to https://macrostrat.org/api/defs/timescales?all for a list of recognized timescales.
#'
#' @return A data frame
#'
#' @author Andrew A. Zaffos
#'
#' @examples
#'
#' # Download the ICS recognized periods timescale
#' Timescale<-downloadTime(Timescale="international periods")
#'
#' @rdname downloadTime
#' @export
# Download timescales from Macrostrat
downloadTime<-function(Timescale="interational epochs") {
Timescale<-gsub(" ","%20",Timescale)
URL<-paste0("https://macrostrat.org/api/v2/defs/intervals?format=csv×cale=",Timescale)
Intervals<-utils::read.csv(URL,header=TRUE)
Midpoint<-apply(Intervals[,c("t_age","b_age")],1,stats::median)
Intervals<-cbind(Intervals,Midpoint)
rownames(Intervals)<-Intervals[,"name"]
return(Intervals)
}
|
/velociraptr/R/downloadTime.R
|
no_license
|
paleobiodb/analysis_tools
|
R
| false | false | 1,217 |
r
|
#' Download geologic timescale
#'
#' Downloads a geologic timescale from the Macrostrat.org database.
#'
#' @param Timescale character string; a recognized timescale in the Macrostrat.org database
#'
#' @details Downloads a recognized timescale from the Macrostrat.org database. This includes the name, minimum age, maximum age, midpoint age, and official International Commission on Stratigraphy color hexcode if applicable of each interval in the timescale. Go to https://macrostrat.org/api/defs/timescales?all for a list of recognized timescales.
#'
#' @return A data frame
#'
#' @author Andrew A. Zaffos
#'
#' @examples
#'
#' # Download the ICS recognized periods timescale
#' Timescale<-downloadTime(Timescale="international periods")
#'
#' @rdname downloadTime
#' @export
# Download timescales from Macrostrat
downloadTime<-function(Timescale="interational epochs") {
Timescale<-gsub(" ","%20",Timescale)
URL<-paste0("https://macrostrat.org/api/v2/defs/intervals?format=csv×cale=",Timescale)
Intervals<-utils::read.csv(URL,header=TRUE)
Midpoint<-apply(Intervals[,c("t_age","b_age")],1,stats::median)
Intervals<-cbind(Intervals,Midpoint)
rownames(Intervals)<-Intervals[,"name"]
return(Intervals)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/results_all.R
\name{results_all}
\alias{results_all}
\title{Extract and annotate all results from a DESeq analysis}
\usage{
results_all(object, biomart, vs = "all", vs2 = TRUE, alpha = 0.05,
add_columns, other_columns, simplify = TRUE, ...)
}
\arguments{
\item{object}{a DESeqDataSet}
\item{biomart}{annotations from \code{read_biomart} with column 1 matching row names in results}
\item{vs}{either compare all vs. all (default) or a specific treatment vs. all, or see note.}
\item{vs2}{position of specific treatment in contrast vector, set FALSE for specific treatment vs all}
\item{alpha}{the significance cutoff for the adjusted p-value cutoff (FDR)}
\item{add_columns}{a vector of biomart columns to add to result table, default
gene_name, biotype, chromosome, start and description}
\item{other_columns}{a vector of additional columns in biomart table}
\item{simplify}{return a tibble if only 1 contrast present}
\item{\dots}{additional options passed to \code{results}}
}
\value{
A list of tibbles for each contrast
}
\description{
Extract all possible contrasts and annotate result tables from a DESeq object.
Currently supports simple designs with a single variable.
}
\note{
If you combine factors of interest into a single group following section 3.3 in the DESeq2 vignette,
you can set vs = "combined" to limit the comparisons.
For example, if you combine 3 cell types and 2 treatment, then the default
returns 18 contrasts while "combined" returns 3 contrasts comparing treatment within cell types (first group).
Factors should be separated by space, dash or underscore in the combined treatment group for parsing.
}
\examples{
\dontrun{
data(pasilla)
data(fly)
res <- results_all(pasilla$dds, fly)
res
# Set factor levels in the DESeq object to change contrast order
# since results_all uses combn on levels
dds$trt <- factor(dds$trt, levels=c("heart", "lung", "control"))
apply( combn(c("heart", "lung", "control"), 2), 2, paste, collapse= " vs. ")
}
}
\author{
Chris Stubben
}
|
/man/results_all.Rd
|
no_license
|
topherconley/hciR
|
R
| false | true | 2,085 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/results_all.R
\name{results_all}
\alias{results_all}
\title{Extract and annotate all results from a DESeq analysis}
\usage{
results_all(object, biomart, vs = "all", vs2 = TRUE, alpha = 0.05,
add_columns, other_columns, simplify = TRUE, ...)
}
\arguments{
\item{object}{a DESeqDataSet}
\item{biomart}{annotations from \code{read_biomart} with column 1 matching row names in results}
\item{vs}{either compare all vs. all (default) or a specific treatment vs. all, or see note.}
\item{vs2}{position of specific treatment in contrast vector, set FALSE for specific treatment vs all}
\item{alpha}{the significance cutoff for the adjusted p-value cutoff (FDR)}
\item{add_columns}{a vector of biomart columns to add to result table, default
gene_name, biotype, chromosome, start and description}
\item{other_columns}{a vector of additional columns in biomart table}
\item{simplify}{return a tibble if only 1 contrast present}
\item{\dots}{additional options passed to \code{results}}
}
\value{
A list of tibbles for each contrast
}
\description{
Extract all possible contrasts and annotate result tables from a DESeq object.
Currently supports simple designs with a single variable.
}
\note{
If you combine factors of interest into a single group following section 3.3 in the DESeq2 vignette,
you can set vs = "combined" to limit the comparisons.
For example, if you combine 3 cell types and 2 treatment, then the default
returns 18 contrasts while "combined" returns 3 contrasts comparing treatment within cell types (first group).
Factors should be separated by space, dash or underscore in the combined treatment group for parsing.
}
\examples{
\dontrun{
data(pasilla)
data(fly)
res <- results_all(pasilla$dds, fly)
res
# Set factor levels in the DESeq object to change contrast order
# since results_all uses combn on levels
dds$trt <- factor(dds$trt, levels=c("heart", "lung", "control"))
apply( combn(c("heart", "lung", "control"), 2), 2, paste, collapse= " vs. ")
}
}
\author{
Chris Stubben
}
|
\name{maxdist}
\alias{maxdist}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Auxiliary function used when executing the Bay's algorithm for outlier detection }
\description{
This function is used by the function baysout in this package, to find the largest value of a
distance vector. Returns the value and the index number of the largest distance.
}
\usage{
maxdist(dneighbors)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dneighbors}{ The value and the index number of the largest distance}
}
\author{Caroline Rodriguez}
\seealso{\code{\link{baysout}}}
\keyword{ math }
|
/man/maxdist.Rd
|
no_license
|
neslon/dprep
|
R
| false | false | 651 |
rd
|
\name{maxdist}
\alias{maxdist}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Auxiliary function used when executing the Bay's algorithm for outlier detection }
\description{
This function is used by the function baysout in this package, to find the largest value of a
distance vector. Returns the value and the index number of the largest distance.
}
\usage{
maxdist(dneighbors)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dneighbors}{ The value and the index number of the largest distance}
}
\author{Caroline Rodriguez}
\seealso{\code{\link{baysout}}}
\keyword{ math }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/type-fam_positive_pnpp.R
\name{define_params.fam_positive_pnpp}
\alias{define_params.fam_positive_pnpp}
\title{Define plate type parameters for FAM-positive PNPP}
\usage{
\method{define_params}{fam_positive_pnpp}(plate)
}
\arguments{
\item{plate}{A ddPCR plate}
}
\description{
Define plate type parameters for FAM-positive PNPP
}
\keyword{internal}
|
/man/define_params.fam_positive_pnpp.Rd
|
no_license
|
cran/ddpcr
|
R
| false | true | 443 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/type-fam_positive_pnpp.R
\name{define_params.fam_positive_pnpp}
\alias{define_params.fam_positive_pnpp}
\title{Define plate type parameters for FAM-positive PNPP}
\usage{
\method{define_params}{fam_positive_pnpp}(plate)
}
\arguments{
\item{plate}{A ddPCR plate}
}
\description{
Define plate type parameters for FAM-positive PNPP
}
\keyword{internal}
|
library(dplyr)
library(MCMCvis)
source("../../RUtilityFunctions/bayesianTools.R")
source("../loadTHK99.R")
toFile = T
# Load Data and Scenarios -------------------------------------------------
loadTHK99data(local=T, regions="ALL")
params = c("bWH","bRSLR","bTR")
restoreParams = c("bBW", "bVP", "bHA", "bMC")
predPost_orig = getCI("58-Any-10y-Y-od")
rcp3_2100_orig = getCI("58-Any-10y-Y-RCP3_2100-od")
rcp3_2300_orig = getCI("58-Any-10y-Y-RCP3_2300-od")
rcp85_2100_orig = getCI("58-Any-10y-Y-RCP85_2100-od")
rcp85_2300_orig = getCI("58-Any-10y-Y-RCP85_2300-od")
predPost_orig_pred = MCMCsummary(predPost_orig$samples) %>% as.data.frame() %>% cbind(param=row.names(.), .) %>% mutate(param=as.character(param)) %>% dplyr::filter(grepl("logWET.p", param))
rcp3_2100_orig_pred = MCMCsummary(rcp3_2100_orig$samples) %>% as.data.frame() %>% cbind(param=row.names(.), .) %>% mutate(param=as.character(param)) %>% dplyr::filter(grepl("logWET.p", param))
rcp3_2300_orig_pred = MCMCsummary(rcp3_2300_orig$samples) %>% as.data.frame() %>% cbind(param=row.names(.), .) %>% mutate(param=as.character(param)) %>% dplyr::filter(grepl("logWET.p", param))
rcp85_2100_orig_pred = MCMCsummary(rcp85_2100_orig$samples) %>% as.data.frame() %>% cbind(param=row.names(.), .) %>% mutate(param=as.character(param)) %>% dplyr::filter(grepl("logWET.p", param))
rcp85_2300_orig_pred = MCMCsummary(rcp85_2300_orig$samples) %>% as.data.frame() %>% cbind(param=row.names(.), .) %>% mutate(param=as.character(param)) %>% dplyr::filter(grepl("logWET.p", param))
# Figure 1 - DAG ----------------------------------------------------------
# In PPTX file
# Figure 2 - Restoration Posteriors ---------------------------------------
if (toFile){ png("Figures/Figure2.png", width = 4, height = 4, units = "in", res = 300) }
labs = c(paste0(rep(params, each=2), c("[W]", "[E]")), restoreParams, "bYEAR")
MCMCplot(predPost_orig$samples, params = c(params, restoreParams, "bYEAR"), ref_ovl = T, labels=labs)
if (toFile){ dev.off() }
# Figure X - Model Prediction Comparison ----------------------------------
if (toFile){ png("Figures/FigureX.png", width = 4, height = 4, units = "in", res = 300) }
plot(predPost_orig_pred$`50%`~predPost_orig$data$logWET,
xlim=range(predPost_orig$data$logWET)+c(-1,1), ylim=range(predPost_orig$data$logWET)+c(-1,1),
xlab="Observed WL Loss (log hectare)", ylab="Predicted WL Loss (log hectare)")
ciRange = data.frame(x=numeric(0), y2.5=numeric(0), y97.5=numeric(0))
j=1
for (i in 1:length(predPost_orig$data$logWET))
{
x = predPost_orig$data$logWET[i]
y2.5 = predPost_orig_pred$`2.5%`[i]
y97.5 = predPost_orig_pred$`97.5%`[i]
row = ciRange[ciRange$x == x,]
if (nrow(row) > 0) {
if (y2.5 > row$y2.5) {
ciRange[ciRange$x == x,]$y2.5 = y2.5
}
if (y97.5 > row$y97.5) {
ciRange[ciRange$x == x,]$y97.5 = y97.5
}
} else {
ciRange[j,] = c(x, y2.5, y97.5)
j = j + 1
}
}
xs = unique(predPost_orig$data$logWET)[order(ciRange$x)]
pred1=ciRange$y2.5[order(ciRange$x)]
pred2=ciRange$y97.5[order(ciRange$x)]
polygon(c(rev(xs), xs), c(rev(pred1), pred2), col = 'grey80', border = NA)
lines(ciRange$y2.5[order(ciRange$x)]~unique(predPost_orig$data$logWET)[order(ciRange$x)], lty = 2)
lines(ciRange$y97.5[order(ciRange$x)]~unique(predPost_orig$data$logWET)[order(ciRange$x)], lty = 2)
abline(0,1)
#arrows(predPost_orig$data$logWET, predPost_orig_pred$`2.5%`, predPost_orig$data$logWET, predPost_orig_pred$`97.5%`, length=0.05, angle=90, code=3)
points(predPost_orig_pred$`50%`~predPost_orig$data$logWET, pch=21, col="black", bg="white")
if (toFile){ dev.off() }
# #Residuals
# plot((predPost_orig$data$logWET-predPost_orig_pred$`50%`)~predPost_orig$data$logWET,
# xlim=range(predPost_orig$data$logWET)+c(-1,1), ylim=range(predPost_orig$data$logWET)+c(-1,1),
# xlab="Wetland Loss (log hectare)", ylab="Residuals")
# abline(0,0)
# Figure 3 - RSLR Restoration Correlation ---------------------------------
# plot(rcp85_2300_orig_pred$mean~rcp85_2300_orig$data$RSLR, xlab="RSLR", ylab="Predicted Loss")
# points(rcp85_2300_orig_pred$mean[rcp85_2300_orig$data$RESTORE == 1]~rcp85_2300_orig$data$RSLR[rcp85_2300_orig$data$RESTORE == 1], xlab="RSLR", ylab="Predicted Loss", pch=19, col="blue")
# points(rcp85_2300_orig_pred$mean[rcp85_2300_orig$data$HA == 1]~rcp85_2300_orig$data$RSLR[rcp85_2300_orig$data$HA == 1], xlab="RSLR", ylab="Predicted Loss", pch=19, col="green")
# points(rcp85_2300_orig_pred$mean[rcp85_2300_orig$data$RESTORE == 0]~rcp85_2300_orig$data$RSLR[rcp85_2300_orig$data$RESTORE == 0], xlab="RSLR", ylab="Predicted Loss", pch=19, col="red")
if (toFile){ png("Figures/Figure3.png", width = 4, height = 4, units = "in", res = 300) }
library(sm)
sm.density.compare(predPost_orig$data$logWET, predPost_orig$data$RESTORE, col=c("black","black"), lty=c(1,2), xlab = "Wetland Loss (log hectares)")
legend(2.5,0.27,c("Restoration", "No Restoration"),lty=c(1,2), cex=if(toFile) 0.6 else 1)
if (toFile){ dev.off() }
# Figure 4 - RCP3 vs RCP8.5 -----------------------------------------------
if (toFile){ png("Figures/Figure4.png", width = 8, height = 4, units = "in", res = 300) }
op=par(mfrow=c(1,2))
plot(density(predPost_orig_pred$mean), ylim=c(0,1), xlim=c(min(predPost_orig_pred$mean), 5),
main = "RCP3",lty=0, xlab = "Wetland Loss (log hectares)")
lines(density(predPost_orig_pred$mean), col="black", lty=3)
lines(density(rcp3_2100_orig_pred$mean), col="blue", lty=1)
lines(density(rcp3_2300_orig_pred$mean), col="red", lty=1)
legend(2.2, 0.8, c("Base", "2100", "2300"), col=c("black","blue","red"), lty=c(3,1,1), cex=0.75)
plot(density(predPost_orig_pred$mean), ylim=c(0,1), xlim=c(min(predPost_orig_pred$mean), 5),
main = "RCP8.5",lty=0, xlab = "Wetland Loss (log hectares)")
lines(density(predPost_orig_pred$mean), col="black", lty=3)
lines(density(rcp85_2100_orig_pred$mean), col="blue", lty=1)
lines(density(rcp85_2300_orig_pred$mean), col="red", lty=1)
legend(2.2, 0.8, c("Base", "2100", "2300"), col=c("black","blue","red"), lty=c(3,1,1), cex=0.75)
par(op)
if (toFile){ dev.off() }
# Figure 4 - (Restore v No Restore) v RSLR --------------------------------
predPost_NR = getCI("58-Any-10y-Y-NR")
rcp3_2100_NR = getCI("58-Any-10y-Y-RCP3_2100-NR")
rcp3_2300_NR = getCI("58-Any-10y-Y-RCP3_2300-NR")
rcp85_2100_NR = getCI("58-Any-10y-Y-RCP85_2100-NR")
rcp85_2300_NR = getCI("58-Any-10y-Y-RCP85_2300-NR")
predPost_NR_pred = MCMCsummary(predPost_NR$samples) %>% as.data.frame() %>% cbind(param=row.names(.), .) %>% mutate(param=as.character(param)) %>% dplyr::filter(grepl("logWET.p", param))
rcp3_2100_NR_pred = MCMCsummary(rcp3_2100_NR$samples) %>% as.data.frame() %>% cbind(param=row.names(.), .) %>% mutate(param=as.character(param)) %>% dplyr::filter(grepl("logWET.p", param))
rcp3_2300_NR_pred = MCMCsummary(rcp3_2300_NR$samples) %>% as.data.frame() %>% cbind(param=row.names(.), .) %>% mutate(param=as.character(param)) %>% dplyr::filter(grepl("logWET.p", param))
rcp85_2100_NR_pred = MCMCsummary(rcp85_2100_NR$samples) %>% as.data.frame() %>% cbind(param=row.names(.), .) %>% mutate(param=as.character(param)) %>% dplyr::filter(grepl("logWET.p", param))
rcp85_2300_NR_pred = MCMCsummary(rcp85_2300_NR$samples) %>% as.data.frame() %>% cbind(param=row.names(.), .) %>% mutate(param=as.character(param)) %>% dplyr::filter(grepl("logWET.p", param))
if (toFile){ png("Figures/Figure5.png", width = 8, height = 4, units = "in", res = 300) }
op=par(mfrow=c(1,2))
plot(density(predPost_orig_pred$`50%`), lwd=1,
xlim=c(-1,7), ylim=c(0,1.05), xlab="Wetland Loss (log hectares)", main="")
lines(density(predPost_NR_pred$`50%`), lwd=2)
lines(density(rcp3_2300_orig_pred$`50%`), lwd=1, col="blue")
lines(density(rcp3_2300_NR_pred$`50%`), lwd=2, col="blue")
legend(3.8,0.9,c("Current (Base)", "RCP3 2300", "Restoration", "No Restoration"),
col=c("black","blue","black","black"), lwd=c(1,1,1,2), cex=if(toFile) 0.6 else 1)
title("a)", adj=0, line=0.25, cex.main=0.85)
plot(density(predPost_orig_pred$`50%`), lwd=1,
xlim=c(-1,7), ylim=c(0,1.05), xlab="Wetland Loss (log hectares)", main="")
lines(density(predPost_NR_pred$`50%`), lwd=2)
lines(density(rcp85_2300_orig_pred$`50%`), lwd=1, col="red")
lines(density(rcp85_2300_NR_pred$`50%`), lwd=2, col="red")
legend(3.8,0.9,c("Current (Base)", "RCP8.5 2300", "Restoration", "No Restoration"),
col=c("black","red","black","black"), lwd=c(1,1,1,2), cex=if(toFile) 0.6 else 1)
title("b)", adj=0, line=0.25, cex.main=0.85)
par(op)
if (toFile){ dev.off() }
# Table 1 -----------------------------------------------------------------
compareNullRestores = function(base,null){
c(baseSum=sum(exp(base$`50%`)), nullSum=sum(exp(null$`50%`)),
diff=sum(exp(null$`50%`))-sum(exp(base$`50%`)), ratio=sum(exp(null$`50%`)) / sum(exp(base$`50%`)))
}
# Increase in wetloss when removing restoration from current
compPredPost = compareNullRestores(predPost_orig_pred, predPost_NR_pred)
# Increase in wetland loss when removing restoration under RCP3 2100+2300
compRCP3_2100 = compareNullRestores(rcp3_2100_orig_pred, rcp3_2100_NR_pred)
compRCP3_2300 = compareNullRestores(rcp3_2300_orig_pred, rcp3_2300_NR_pred)
# Increase in wetland loss when removing restoration under RCP8.5 2100+2300
compRPC85_2100 = compareNullRestores(rcp85_2100_orig_pred, rcp85_2100_NR_pred)
compRPC85_2300 = compareNullRestores(rcp85_2300_orig_pred, rcp85_2300_NR_pred)
nullRestoTable = rbind(compPredPost, compRCP3_2100, compRCP3_2300, compRPC85_2100, compRPC85_2300)
write.table(nullRestoTable, "Figures/Table1.txt", sep="\t", quote=F)
# Density Difference from Baseline ----------------------------------------
plot(density(rcp85_2300_orig_pred$mean-predPost_orig_pred$mean), xlim=c(0,1.2))
lines(density(rcp85_2100_orig_pred$mean-predPost_orig_pred$mean), col="red")
lines(density(rcp3_2100_orig_pred$mean-predPost_orig_pred$mean), col="green")
lines(density(rcp3_2300_orig_pred$mean-predPost_orig_pred$mean), col="blue")
c(1:nrow(rcp85_2300_orig_pred))[rcp85_2300_orig_pred$mean-predPost_orig_pred$mean > 1]
# RSLR and HA Correlation -------------------------------------------------
plot(logWET~RSLR, data=thk99buff)
points(logWET~RSLR, data=thk99buff[rcp85_2300_orig_pred$mean-predPost_orig_pred$mean > 1,], col="red", pch=19)
plot(logWET~RSLR, data=thk99buff)
points(logWET~RSLR, data=thk99buff[rcp85_2300_orig$data$RESTORE == 1,], pch=19, col="blue")
points(logWET~RSLR, data=thk99buff[rcp85_2300_orig$data$HA == 1,], pch=19, col="green")
plot(rcp85_2300_orig_pred$mean~rcp85_2300_orig$data$RSLR, xlab="RSLR", ylab="Predicted Loss")
points(rcp85_2300_orig_pred$mean[rcp85_2300_orig$data$RESTORE == 1]~rcp85_2300_orig$data$RSLR[rcp85_2300_orig$data$RESTORE == 1], xlab="RSLR", ylab="Predicted Loss", pch=19, col="blue")
points(rcp85_2300_orig_pred$mean[rcp85_2300_orig$data$HA == 1]~rcp85_2300_orig$data$RSLR[rcp85_2300_orig$data$HA == 1], xlab="RSLR", ylab="Predicted Loss", pch=19, col="green")
points(rcp85_2300_orig_pred$mean[rcp85_2300_orig$data$RESTORE == 0]~rcp85_2300_orig$data$RSLR[rcp85_2300_orig$data$RESTORE == 0], xlab="RSLR", ylab="Predicted Loss", pch=19, col="red")
plot(rcp85_2300_orig_pred$mean~rcp85_2300_orig$data$RSLR, xlab="RSLR", ylab="Predicted Loss")
points(rcp85_2300_orig_pred$mean[rcp85_2300_orig$data$region == 1]~rcp85_2300_orig$data$RSLR[rcp85_2300_orig$data$region == 1], xlab="RSLR", ylab="Predicted Loss", pch=19, col="blue")
points(rcp85_2300_orig_pred$mean[rcp85_2300_orig$data$region == 2]~rcp85_2300_orig$data$RSLR[rcp85_2300_orig$data$region == 2], xlab="RSLR", ylab="Predicted Loss", pch=19, col="red")
plot(thk99buff[thk99buff$ORIG_FID %in% rcp85_2100_orig$data$ORIG_FID,])
plot(thk99buff[thk99buff$ORIG_FID %in% rcp85_2100_orig$data$ORIG_FID[rcp85_2100_orig$data$region == 1],], add=T, col="green", border="green")
plot(thk99buff[thk99buff$ORIG_FID %in% data.frame(rcp85_2100_orig$data)[rcp85_2300_orig_pred$mean-predPost_orig_pred$mean > 1,]$ORIG_FID,], add=T, col="red", border="RED")
coastlines = readOGR("C:/DATA/General Maps/Coastlines/GSHHS_2.3.5_04-2016/USCoast_h_L1_Line.shp", "USCoast_h_L1_Line")
coastlines = spTransform(coastlines, proj4string(thk99buff))
stateMap = readOGR("C:/DATA/General Maps/Gulf States/US_GulfStates.shp", "US_GulfStates")
HUC4 = readOGR("C:/DATA/HUC/HUC_shapes/WBDHU4.shp", "WBDHU4")
HUC4 = spTransform(HUC4, proj4string(thk99buff))
HUC4 = HUC4[HUC4$HUC4 %in% crop(HUC4, extent(thk99buff[thk99buff$region == 9 | thk99buff$region == 10,]))$HUC4,]
plot(thk99buff[thk99buff$region == 9 | thk99buff$region == 10,])
plot(stateMap,add=T,lty=2)
plot(HUC4,add=T)
plot(thk99buff[thk99buff$region == 9 | thk99buff$region == 10,],add=T)
plot(thk99buff[thk99buff$ORIG_FID %in% data.frame(rcp85_2100_orig$data)[rcp85_2300_orig_pred$mean-predPost_orig_pred$mean > 1,]$ORIG_FID,], add=T, col="GREEN", border="RED")
# Deprecated Plots --------------------------------------------------------
# W vs E LA RCP8.5
# if (toFile){ png("Figures/Figure3.png", width = 8, height = 4, units = "in", res = 300) }
# op=par(mfrow=c(1,2))
# plot(density(thk99buff_n$logWET), ylim=c(0,0.9), main = "RCP8.5 2100",lty=0)
# lines(density(rcp85_2100_orig_pred$mean), col="black",lty=3)
# lines(density(rcp85_2100_orig_pred[rcp85_2100_orig$data$region == 1,]$mean), col="blue")
# lines(density(rcp85_2100_orig_pred[rcp85_2100_orig$data$region == 2,]$mean), col="red")
# legend(2, 0.8, c("Combined", "W LA", "E LA"), col=c("black","blue","red"), lty=c(3,1,1,1), cex=0.75)
#
# plot(density(thk99buff_n$logWET), ylim=c(0,0.9), main = "RCP8.5 2300",lty=0)
# lines(density(predPost_orig_pred$mean), col="black",lty=3)
# lines(density(rcp85_2300_orig_pred[rcp85_2300_orig$data$region == 1,]$mean), col="blue")
# lines(density(rcp85_2300_orig_pred[rcp85_2300_orig$data$region == 2,]$mean), col="red")
# legend(2, 0.8, c("Combined", "W LA", "E LA"), col=c("black","blue","red"), lty=c(3,1,1,1), cex=0.75)
# par(op)
# if (toFile){ dev.off() }
#
#
# Restore vs Non-Restore RCP8.5
# if (toFile){ png("Figures/Figure4.png", width = 8, height = 4, units = "in", res = 300) }
# op=par(mfrow=c(1,2))
# plot(density(thk99buff_n$logWET), ylim=c(0,0.9), main = "RCP8.5 2100",lty=0)
# lines(density(predPost_orig_pred$mean), col="black",lty=3)
# lines(density(rcp85_2100_orig_pred[rcp85_2100_orig$data$RESTORE == 1,]$mean), col="blue")
# lines(density(rcp85_2100_orig_pred[rcp85_2100_orig$data$RESTORE == 0,]$mean), col="red")
# #lines(density(rcp85_2100_pred$mean), col="green")
# legend(2, 0.8, c("Combined", "RESTORE", "NONE"), col=c("black","blue","red"), lty=c(3,1,1,1), cex=0.75)
#
# plot(density(thk99buff_n$logWET), ylim=c(0,0.9), main = "RCP8.5 2300",lty=0)
# lines(density(predPost_orig_pred$mean), col="black",lty=3)
# lines(density(rcp85_2300_orig_pred[rcp85_2300_orig$data$RESTORE == 1,]$mean), col="blue")
# lines(density(rcp85_2300_orig_pred[rcp85_2300_orig$data$RESTORE == 0,]$mean), col="red")
# #lines(density(rcp85_2300_pred$mean), col="green")
# legend(2, 0.8, c("Combined", "RESTORE", "NONE"), col=c("black","blue","red"), lty=c(3,1,1,1), cex=0.75)
# par(op)
# if (toFile){ dev.off() }
# Compare Restoration Types RCP 8.5
# if (toFile){ png("Figures/Figure5.png", width = 8, height = 8, units = "in", res = 300) }
# op=par(mfrow=c(2,2))
# # op=par(mfrow=c(1,2))
# plot(density(thk99buff_n$logWET), ylim=c(0,0.9), main = "RCP8.5 2100",lty=0)
# lines(density(predPost_orig_pred$mean), col="black",lty=3)
# lines(density(rcp85_2100_orig_pred[rcp85_2100_orig$data$HA == 1,]$mean), col="blue")
# lines(density(rcp85_2100_orig_pred[rcp85_2100_orig$data$HA == 0,]$mean), col="red")
# #lines(density(rcp85_2100_pred$mean), col="green")
# legend(2, 0.8, c("Combined", "HA", "NONE"), col=c("black","blue","red"), lty=c(3,1,1,1), cex=0.75)
#
# plot(density(thk99buff_n$logWET), ylim=c(0,0.9), main = "RCP8.5 2300",lty=0)
# lines(density(predPost_orig_pred$mean), col="black",lty=3)
# lines(density(rcp85_2300_orig_pred[rcp85_2300_orig$data$HA == 1,]$mean), col="blue")
# lines(density(rcp85_2300_orig_pred[rcp85_2300_orig$data$HA == 0,]$mean), col="red")
# #lines(density(rcp85_2300_pred$mean), col="green")
# legend(2, 0.8, c("Combined", "HA", "NONE"), col=c("black","blue","red"), lty=c(3,1,1,1), cex=0.75)
# # par(op)
# #
# #
# # op=par(mfrow=c(1,2))
# plot(density(thk99buff_n$logWET), ylim=c(0,0.9), main = "RCP8.5 2100",lty=0)
# lines(density(predPost_orig_pred$mean), col="black",lty=3)
# lines(density(rcp85_2100_orig_pred[rcp85_2100_orig$data$BW == 1,]$mean), col="blue")
# lines(density(rcp85_2100_orig_pred[rcp85_2100_orig$data$BW == 0,]$mean), col="red")
# #lines(density(rcp85_2100_pred$mean), col="green")
# legend(2, 0.8, c("Combined", "BW", "NONE"), col=c("black","blue","red"), lty=c(3,1,1,1), cex=0.75)
#
# plot(density(thk99buff_n$logWET), ylim=c(0,0.9), main = "RCP8.5 2300",lty=0)
# lines(density(predPost_orig_pred$mean), col="black",lty=3)
# lines(density(rcp85_2300_orig_pred[rcp85_2300_orig$data$BW == 1,]$mean), col="blue")
# lines(density(rcp85_2300_orig_pred[rcp85_2300_orig$data$BW == 0,]$mean), col="red")
# #lines(density(rcp85_2300_pred$mean), col="green")
# legend(2, 0.8, c("Combined", "BW", "NONE"), col=c("black","blue","red"), lty=c(3,1,1,1), cex=0.75)
# par(op)
# if (toFile){ dev.off() }
#
#
#
#
#
#
# plot(density(thk99buff_n$logWET), ylim=c(0,0.9), main = "RCP8.5 2100",lty=0)
# lines(density(predPost_orig_pred$mean), col="black",lty=3)
# lines(density(rcp85_2100_orig_pred[rcp85_2100_orig$data$BW == 1,]$mean), col="blue")
# lines(density(rcp85_2100_orig_pred[rcp85_2100_orig$data$BW == 0,]$mean), col="red")
# #lines(density(rcp85_2100_pred$mean), col="green")
# legend(2, 0.8, c("Combined", "BW", "NONE"), col=c("black","blue","red"), lty=c(3,1,1,1), cex=0.75)
#
# plot(density(thk99buff_n$logWET), ylim=c(0,0.9), main = "RCP8.5 2300",lty=0)
# lines(density(predPost_orig_pred$mean), col="black",lty=3)
# lines(density(rcp85_2300_orig_pred[rcp85_2300_orig$data$BW == 1,]$mean), col="blue")
# lines(density(rcp85_2300_orig_pred[rcp85_2300_orig$data$BW == 0,]$mean), col="red")
# #lines(density(rcp85_2300_pred$mean), col="green")
# legend(2, 0.8, c("Combined", "BW", "NONE"), col=c("black","blue","red"), lty=c(3,1,1,1), cex=0.75)
# par(op)
library(ggplot2)
# Regional Stacked Density ------------------------------------------------
post_regional = predPost_orig_pred %>%
mutate(region = sapply(predPost_orig$data$region, FUN=function(x) { if (x == 1) return("West LA") else return("East LA") }))
names(post_regional)[4:6] = c("q2.5", "median", "q97.5")
r3_2100_regional = rcp3_2100_orig_pred %>%
mutate(region = sapply(predPost_orig$data$region, FUN=function(x) { if (x == 1) return("West LA") else return("East LA") }))
names(r3_2100_regional)[4:6] = c("q2.5", "median", "q97.5")
r3_2300_regional = rcp3_2300_orig_pred %>%
mutate(region = sapply(predPost_orig$data$region, FUN=function(x) { if (x == 1) return("West LA") else return("East LA") }))
names(r3_2300_regional)[4:6] = c("q2.5", "median", "q97.5")
r85_2100_regional = rcp85_2100_orig_pred %>%
mutate(region = sapply(predPost_orig$data$region, FUN=function(x) { if (x == 1) return("West LA") else return("East LA") }))
names(r85_2100_regional)[4:6] = c("q2.5", "median", "q97.5")
r85_2300_regional = rcp85_2300_orig_pred %>%
mutate(region = sapply(predPost_orig$data$region, FUN=function(x) { if (x == 1) return("West LA") else return("East LA") }))
names(r85_2300_regional)[4:6] = c("q2.5", "median", "q97.5")
# Base
p1 = ggplot(data=post_regional, aes(x=median, group=region, fill=region)) +
geom_density(adjust=1.5, position="fill") +
theme(legend.position = "none", panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black")) +
scale_x_continuous(limits = c(0,max(r85_2300_regional$median)), expand = c(0,0)) +
scale_y_continuous(limits = c(0,1), expand = c(0,0)) +
scale_fill_manual(values = c("darkgray","lightgray")) +
xlab("Wetland Loss") +
ylab("% Total Loss in W LA") +
geom_vline(xintercept=max(post_regional$median), linetype = "dashed")
multiplot(p1+ggtitle("a) RCP3 2100"),
p1+ggtitle("c) RCP8.5 2100"),
p1+ggtitle("b) RCP3 2300"),
p1+ggtitle("d) RCP8.5 2300"), cols=2)
# RCP3
p2 = ggplot(data=r3_2100_regional, aes(x=median, group=region, fill=region)) +
geom_density(adjust=1.5, position="fill", size=1.25) +
theme(legend.position = "none", panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black")) +
scale_x_continuous(limits = c(0,max(r85_2300_regional$median)), expand = c(0,0)) +
scale_y_continuous(limits = c(0,1), expand = c(0,0)) +
scale_fill_manual( values = c("white","white")) +
xlab("Wetland Loss") +
ylab("% Total Loss in W LA")
p3 = ggplot(data=r3_2300_regional, aes(x=median, group=region, fill=region)) +
geom_density(adjust=1.5, position="fill", size=1.25) +
theme(legend.position = "none", panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black")) +
scale_x_continuous(limits = c(0,max(r85_2300_regional$median)), expand = c(0,0)) +
scale_y_continuous(limits = c(0,1), expand = c(0,0)) +
scale_fill_manual( values = c("white","white")) +
xlab("Wetland Loss") +
ylab("% Total Loss in W LA")
# RCP 8.5
p4 = ggplot(data=r85_2100_regional, aes(x=median, group=region, fill=region)) +
geom_density(adjust=1.5, position="fill", size=1.5) +
theme(legend.position = "none", panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black")) +
scale_x_continuous(limits = c(0,max(r85_2300_regional$median)), expand = c(0,0)) +
scale_y_continuous(limits = c(0,1), expand = c(0,0)) +
scale_fill_manual( values = c("white","white")) +
xlab("Wetland Loss") +
ylab("% Total Loss in W LA")
p5 = ggplot(data=r85_2300_regional, aes(x=median, group=region, fill=region)) +
geom_density(adjust=1.5, position="fill", size=1.5) +
theme(legend.position = "none", panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black")) +
scale_x_continuous(limits = c(0,max(r85_2300_regional$median)), expand = c(0,0)) +
scale_y_continuous(limits = c(0,1), expand = c(0,0)) +
scale_fill_manual( values = c("white","white")) +
xlab("Wetland Loss") +
ylab("% Total Loss in W LA")
multiplot(p2+ggtitle("a) RCP3 2100"),
p4+ggtitle("c) RCP8.5 2100"),
p3+ggtitle("b) RCP3 2300"),
p5+ggtitle("d) RCP8.5 2300"),cols=2)
# Regional Stacked Density 2 ----------------------------------------------
ggplot(data=post_regional, aes(x=median, group=region, fill=region)) +
geom_density(adjust=1.5, position="stack") +
theme(legend.position = "none", panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black")) +
scale_x_continuous(limits = c(0,max(r85_2300_regional$median)), expand = c(0,0)) +
# scale_y_continuous(limits = c(0,1), expand = c(0,0)) +
scale_fill_manual(values = c("darkgray","lightgray")) #+
# xlab("Wetland Loss") +
# ylab("% Total Loss in W LA")
ggplot(data=r85_2100_regional, aes(x=median, group=region, fill=region)) +
geom_density(adjust=1.5, position="stack") +
theme(legend.position = "none", panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black")) +
scale_x_continuous(limits = c(0,max(r85_2300_regional$median)), expand = c(0,0)) +
# scale_y_continuous(limits = c(0,1), expand = c(0,0)) +
scale_fill_manual(values = c("darkgray","lightgray")) #+
# xlab("Wetland Loss") +
# ylab("% Total Loss in W LA")
ggplot(data=r85_2300_regional, aes(x=median, group=region, fill=region)) +
geom_density(adjust=1.5, position="stack") +
theme(legend.position = "none", panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
scale_x_continuous(limits = c(0,max(r85_2300_regional$median)), expand = c(0,0)) +
# scale_y_continuous(limits = c(0,1), expand = c(0,0)) +
scale_fill_manual(values = c("darkgray","lightgray")) #+
# xlab("Wetland Loss") +
# ylab("% Total Loss in W LA")
# Restorational Stacked Density -------------------------------------------
post_restorational = predPost_orig_pred
post_restorational$restore = rep(NA, nrow(post_restorational))
post_restorational$hasRestore = rep(NA, nrow(post_restorational))
df = data.frame(rcp85_2300_orig$data)
for (i in 1:nrow(post_restorational)) {
dfrow = df[i,]
type = "None"
for (j in 1:length(restoreParams))
{
rParam = substring(restoreParams,2,4)[j]
if (dfrow[rParam] == 1)
{
if (type != "None")
{
if (type == "HA" || rParam == "HA")
{
type = "HA"
}
} else {
type = rParam
}
}
}
post_restorational[i,]$restore = type
post_restorational[i,]$hasRestore = (type != "None")
}
names(post_restorational)[4:6] = c("q2.5", "median", "q97.5")
r85_restorational = rcp85_2300_orig_pred
r85_restorational$restore = rep(NA, nrow(r85_restorational))
r85_restorational$hasRestore = rep(NA, nrow(r85_restorational))
df = data.frame(rcp85_2300_orig$data)
for (i in 1:nrow(r85_restorational)) {
dfrow = df[i,]
type = "None"
for (j in 1:length(restoreParams))
{
rParam = substring(restoreParams,2,4)[j]
if (dfrow[rParam] == 1)
{
if (type != "None")
{
if (type == "HA" || rParam == "HA")
{
type = "HA"
}
} else {
type = rParam
}
}
}
r85_restorational[i,]$restore = type
r85_restorational[i,]$hasRestore = (type != "None")
}
names(r85_restorational)[4:6] = c("q2.5", "median", "q97.5")
source("../../RUtilityFunctions/multiplot.R")
p1=ggplot(data=post_restorational[post_restorational$restore != "VP",], aes(x=median, group=hasRestore, fill=hasRestore)) +
geom_density(adjust=1, position="stack") +
theme(legend.position = c(0.75,0.75), legend.title = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black")) +
scale_x_continuous(limits = c(0,5), expand = c(0,0)) +
scale_y_continuous(limits = c(0,2.75), expand = c(0,0)) +
scale_fill_manual(guide=guide_legend(reverse=TRUE), values = c("darkgray","palegreen"), labels=c("None", "Restoration")) +
xlab("") +
ylab("") +
ggtitle("a)")
p2=ggplot(data=post_restorational[post_restorational$restore != "VP",], aes(x=median, group=restore, fill=restore)) +
geom_density(adjust=1, position="stack") +
theme(legend.position = c(0.75,0.75), legend.title = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black")) +
scale_x_continuous(limits = c(0,5), expand = c(0,0)) +
scale_y_continuous(limits = c(0,2.75), expand = c(0,0)) +
scale_fill_manual(values = c("lightgoldenrod1","steelblue3","darkgray"), labels=c("Breakwaters", "Hydrological Alteration", "None")) +
xlab("") +
ylab("Prob. Density Total Loss") +
ggtitle("b)")
p3=ggplot(data=r85_restorational[r85_restorational$restore != "VP",], aes(x=median, group=restore, fill=restore)) +
geom_density(adjust=1, position="stack") +
theme(legend.position = c(0.75,0.75), legend.title = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black")) +
scale_x_continuous(limits = c(0,5), expand = c(0,0)) +
scale_y_continuous(limits = c(0,2.75), expand = c(0,0)) +
scale_fill_manual(values = c("lightgoldenrod1","steelblue3","darkgray"), labels=c("Breakwaters", "Hydrological Alteration", "None")) +
xlab("Wetland Loss") +
ylab("") +
ggtitle("c)")
multiplot(p1,p2,p3)
# Restorational Stacked Density 2 -----------------------------------------
ggplot(data=post_restorational[post_restorational$restore != "VP",], aes(x=median, group=restore, fill=restore)) +
geom_density(adjust=1, position="fill") +
theme(legend.position = c(0.75,0.75), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black")) +
scale_x_continuous(limits = c(0,max(r85_restorational$median)), expand = c(0,0)) +
scale_y_continuous(limits = c(0,1), expand = c(0,0)) +
scale_fill_manual(values = c("darkgray","steelblue3","lightgoldenrod1", "Black")) +
# xlab("Wetland Loss") +
# ylab("% Total Loss in W LA") +
# geom_vline(xintercept=max(post_restorational$median), linetype = "dashed") +
geom_rect(mapping=aes(xmin=max(post_restorational$median), ymin=0, xmax=max(r85_restorational$median), ymax=1), color="gray33", fill="gray33")
ggplot(data=r85_restorational[r85_restorational$restore != "VP",], aes(x=median, group=restore, fill=restore)) +
geom_density(adjust=1, position="fill") +
theme(legend.position = c(0.75,0.75), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black")) +
scale_x_continuous(limits = c(0,max(r85_restorational$median)), expand = c(0,0)) +
scale_y_continuous(limits = c(0,1), expand = c(0,0)) +
scale_fill_manual(values = c("darkgray","steelblue3","lightgoldenrod1", "Black")) #+
# scale_fill_manual( values = c("white","white")) +
# xlab("Wetland Loss") +
# ylab("% Total Loss in W LA")
|
/WetlandModel/SLR/figures.R
|
no_license
|
ecospatial/NAS_2016
|
R
| false | false | 29,936 |
r
|
library(dplyr)
library(MCMCvis)
source("../../RUtilityFunctions/bayesianTools.R")
source("../loadTHK99.R")
toFile = T
# Load Data and Scenarios -------------------------------------------------
loadTHK99data(local=T, regions="ALL")
params = c("bWH","bRSLR","bTR")
restoreParams = c("bBW", "bVP", "bHA", "bMC")
predPost_orig = getCI("58-Any-10y-Y-od")
rcp3_2100_orig = getCI("58-Any-10y-Y-RCP3_2100-od")
rcp3_2300_orig = getCI("58-Any-10y-Y-RCP3_2300-od")
rcp85_2100_orig = getCI("58-Any-10y-Y-RCP85_2100-od")
rcp85_2300_orig = getCI("58-Any-10y-Y-RCP85_2300-od")
predPost_orig_pred = MCMCsummary(predPost_orig$samples) %>% as.data.frame() %>% cbind(param=row.names(.), .) %>% mutate(param=as.character(param)) %>% dplyr::filter(grepl("logWET.p", param))
rcp3_2100_orig_pred = MCMCsummary(rcp3_2100_orig$samples) %>% as.data.frame() %>% cbind(param=row.names(.), .) %>% mutate(param=as.character(param)) %>% dplyr::filter(grepl("logWET.p", param))
rcp3_2300_orig_pred = MCMCsummary(rcp3_2300_orig$samples) %>% as.data.frame() %>% cbind(param=row.names(.), .) %>% mutate(param=as.character(param)) %>% dplyr::filter(grepl("logWET.p", param))
rcp85_2100_orig_pred = MCMCsummary(rcp85_2100_orig$samples) %>% as.data.frame() %>% cbind(param=row.names(.), .) %>% mutate(param=as.character(param)) %>% dplyr::filter(grepl("logWET.p", param))
rcp85_2300_orig_pred = MCMCsummary(rcp85_2300_orig$samples) %>% as.data.frame() %>% cbind(param=row.names(.), .) %>% mutate(param=as.character(param)) %>% dplyr::filter(grepl("logWET.p", param))
# Figure 1 - DAG ----------------------------------------------------------
# In PPTX file
# Figure 2 - Restoration Posteriors ---------------------------------------
if (toFile){ png("Figures/Figure2.png", width = 4, height = 4, units = "in", res = 300) }
labs = c(paste0(rep(params, each=2), c("[W]", "[E]")), restoreParams, "bYEAR")
MCMCplot(predPost_orig$samples, params = c(params, restoreParams, "bYEAR"), ref_ovl = T, labels=labs)
if (toFile){ dev.off() }
# Figure X - Model Prediction Comparison ----------------------------------
if (toFile){ png("Figures/FigureX.png", width = 4, height = 4, units = "in", res = 300) }
plot(predPost_orig_pred$`50%`~predPost_orig$data$logWET,
xlim=range(predPost_orig$data$logWET)+c(-1,1), ylim=range(predPost_orig$data$logWET)+c(-1,1),
xlab="Observed WL Loss (log hectare)", ylab="Predicted WL Loss (log hectare)")
ciRange = data.frame(x=numeric(0), y2.5=numeric(0), y97.5=numeric(0))
j=1
for (i in 1:length(predPost_orig$data$logWET))
{
x = predPost_orig$data$logWET[i]
y2.5 = predPost_orig_pred$`2.5%`[i]
y97.5 = predPost_orig_pred$`97.5%`[i]
row = ciRange[ciRange$x == x,]
if (nrow(row) > 0) {
if (y2.5 > row$y2.5) {
ciRange[ciRange$x == x,]$y2.5 = y2.5
}
if (y97.5 > row$y97.5) {
ciRange[ciRange$x == x,]$y97.5 = y97.5
}
} else {
ciRange[j,] = c(x, y2.5, y97.5)
j = j + 1
}
}
xs = unique(predPost_orig$data$logWET)[order(ciRange$x)]
pred1=ciRange$y2.5[order(ciRange$x)]
pred2=ciRange$y97.5[order(ciRange$x)]
polygon(c(rev(xs), xs), c(rev(pred1), pred2), col = 'grey80', border = NA)
lines(ciRange$y2.5[order(ciRange$x)]~unique(predPost_orig$data$logWET)[order(ciRange$x)], lty = 2)
lines(ciRange$y97.5[order(ciRange$x)]~unique(predPost_orig$data$logWET)[order(ciRange$x)], lty = 2)
abline(0,1)
#arrows(predPost_orig$data$logWET, predPost_orig_pred$`2.5%`, predPost_orig$data$logWET, predPost_orig_pred$`97.5%`, length=0.05, angle=90, code=3)
points(predPost_orig_pred$`50%`~predPost_orig$data$logWET, pch=21, col="black", bg="white")
if (toFile){ dev.off() }
# #Residuals
# plot((predPost_orig$data$logWET-predPost_orig_pred$`50%`)~predPost_orig$data$logWET,
# xlim=range(predPost_orig$data$logWET)+c(-1,1), ylim=range(predPost_orig$data$logWET)+c(-1,1),
# xlab="Wetland Loss (log hectare)", ylab="Residuals")
# abline(0,0)
# Figure 3 - RSLR Restoration Correlation ---------------------------------
# plot(rcp85_2300_orig_pred$mean~rcp85_2300_orig$data$RSLR, xlab="RSLR", ylab="Predicted Loss")
# points(rcp85_2300_orig_pred$mean[rcp85_2300_orig$data$RESTORE == 1]~rcp85_2300_orig$data$RSLR[rcp85_2300_orig$data$RESTORE == 1], xlab="RSLR", ylab="Predicted Loss", pch=19, col="blue")
# points(rcp85_2300_orig_pred$mean[rcp85_2300_orig$data$HA == 1]~rcp85_2300_orig$data$RSLR[rcp85_2300_orig$data$HA == 1], xlab="RSLR", ylab="Predicted Loss", pch=19, col="green")
# points(rcp85_2300_orig_pred$mean[rcp85_2300_orig$data$RESTORE == 0]~rcp85_2300_orig$data$RSLR[rcp85_2300_orig$data$RESTORE == 0], xlab="RSLR", ylab="Predicted Loss", pch=19, col="red")
if (toFile){ png("Figures/Figure3.png", width = 4, height = 4, units = "in", res = 300) }
library(sm)
sm.density.compare(predPost_orig$data$logWET, predPost_orig$data$RESTORE, col=c("black","black"), lty=c(1,2), xlab = "Wetland Loss (log hectares)")
legend(2.5,0.27,c("Restoration", "No Restoration"),lty=c(1,2), cex=if(toFile) 0.6 else 1)
if (toFile){ dev.off() }
# Figure 4 - RCP3 vs RCP8.5 -----------------------------------------------
if (toFile){ png("Figures/Figure4.png", width = 8, height = 4, units = "in", res = 300) }
op=par(mfrow=c(1,2))
plot(density(predPost_orig_pred$mean), ylim=c(0,1), xlim=c(min(predPost_orig_pred$mean), 5),
main = "RCP3",lty=0, xlab = "Wetland Loss (log hectares)")
lines(density(predPost_orig_pred$mean), col="black", lty=3)
lines(density(rcp3_2100_orig_pred$mean), col="blue", lty=1)
lines(density(rcp3_2300_orig_pred$mean), col="red", lty=1)
legend(2.2, 0.8, c("Base", "2100", "2300"), col=c("black","blue","red"), lty=c(3,1,1), cex=0.75)
plot(density(predPost_orig_pred$mean), ylim=c(0,1), xlim=c(min(predPost_orig_pred$mean), 5),
main = "RCP8.5",lty=0, xlab = "Wetland Loss (log hectares)")
lines(density(predPost_orig_pred$mean), col="black", lty=3)
lines(density(rcp85_2100_orig_pred$mean), col="blue", lty=1)
lines(density(rcp85_2300_orig_pred$mean), col="red", lty=1)
legend(2.2, 0.8, c("Base", "2100", "2300"), col=c("black","blue","red"), lty=c(3,1,1), cex=0.75)
par(op)
if (toFile){ dev.off() }
# Figure 4 - (Restore v No Restore) v RSLR --------------------------------
predPost_NR = getCI("58-Any-10y-Y-NR")
rcp3_2100_NR = getCI("58-Any-10y-Y-RCP3_2100-NR")
rcp3_2300_NR = getCI("58-Any-10y-Y-RCP3_2300-NR")
rcp85_2100_NR = getCI("58-Any-10y-Y-RCP85_2100-NR")
rcp85_2300_NR = getCI("58-Any-10y-Y-RCP85_2300-NR")
predPost_NR_pred = MCMCsummary(predPost_NR$samples) %>% as.data.frame() %>% cbind(param=row.names(.), .) %>% mutate(param=as.character(param)) %>% dplyr::filter(grepl("logWET.p", param))
rcp3_2100_NR_pred = MCMCsummary(rcp3_2100_NR$samples) %>% as.data.frame() %>% cbind(param=row.names(.), .) %>% mutate(param=as.character(param)) %>% dplyr::filter(grepl("logWET.p", param))
rcp3_2300_NR_pred = MCMCsummary(rcp3_2300_NR$samples) %>% as.data.frame() %>% cbind(param=row.names(.), .) %>% mutate(param=as.character(param)) %>% dplyr::filter(grepl("logWET.p", param))
rcp85_2100_NR_pred = MCMCsummary(rcp85_2100_NR$samples) %>% as.data.frame() %>% cbind(param=row.names(.), .) %>% mutate(param=as.character(param)) %>% dplyr::filter(grepl("logWET.p", param))
rcp85_2300_NR_pred = MCMCsummary(rcp85_2300_NR$samples) %>% as.data.frame() %>% cbind(param=row.names(.), .) %>% mutate(param=as.character(param)) %>% dplyr::filter(grepl("logWET.p", param))
if (toFile){ png("Figures/Figure5.png", width = 8, height = 4, units = "in", res = 300) }
op=par(mfrow=c(1,2))
plot(density(predPost_orig_pred$`50%`), lwd=1,
xlim=c(-1,7), ylim=c(0,1.05), xlab="Wetland Loss (log hectares)", main="")
lines(density(predPost_NR_pred$`50%`), lwd=2)
lines(density(rcp3_2300_orig_pred$`50%`), lwd=1, col="blue")
lines(density(rcp3_2300_NR_pred$`50%`), lwd=2, col="blue")
legend(3.8,0.9,c("Current (Base)", "RCP3 2300", "Restoration", "No Restoration"),
col=c("black","blue","black","black"), lwd=c(1,1,1,2), cex=if(toFile) 0.6 else 1)
title("a)", adj=0, line=0.25, cex.main=0.85)
plot(density(predPost_orig_pred$`50%`), lwd=1,
xlim=c(-1,7), ylim=c(0,1.05), xlab="Wetland Loss (log hectares)", main="")
lines(density(predPost_NR_pred$`50%`), lwd=2)
lines(density(rcp85_2300_orig_pred$`50%`), lwd=1, col="red")
lines(density(rcp85_2300_NR_pred$`50%`), lwd=2, col="red")
legend(3.8,0.9,c("Current (Base)", "RCP8.5 2300", "Restoration", "No Restoration"),
col=c("black","red","black","black"), lwd=c(1,1,1,2), cex=if(toFile) 0.6 else 1)
title("b)", adj=0, line=0.25, cex.main=0.85)
par(op)
if (toFile){ dev.off() }
# Table 1 -----------------------------------------------------------------
compareNullRestores = function(base,null){
c(baseSum=sum(exp(base$`50%`)), nullSum=sum(exp(null$`50%`)),
diff=sum(exp(null$`50%`))-sum(exp(base$`50%`)), ratio=sum(exp(null$`50%`)) / sum(exp(base$`50%`)))
}
# Increase in wetloss when removing restoration from current
compPredPost = compareNullRestores(predPost_orig_pred, predPost_NR_pred)
# Increase in wetland loss when removing restoration under RCP3 2100+2300
compRCP3_2100 = compareNullRestores(rcp3_2100_orig_pred, rcp3_2100_NR_pred)
compRCP3_2300 = compareNullRestores(rcp3_2300_orig_pred, rcp3_2300_NR_pred)
# Increase in wetland loss when removing restoration under RCP8.5 2100+2300
compRPC85_2100 = compareNullRestores(rcp85_2100_orig_pred, rcp85_2100_NR_pred)
compRPC85_2300 = compareNullRestores(rcp85_2300_orig_pred, rcp85_2300_NR_pred)
nullRestoTable = rbind(compPredPost, compRCP3_2100, compRCP3_2300, compRPC85_2100, compRPC85_2300)
write.table(nullRestoTable, "Figures/Table1.txt", sep="\t", quote=F)
# Density Difference from Baseline ----------------------------------------
plot(density(rcp85_2300_orig_pred$mean-predPost_orig_pred$mean), xlim=c(0,1.2))
lines(density(rcp85_2100_orig_pred$mean-predPost_orig_pred$mean), col="red")
lines(density(rcp3_2100_orig_pred$mean-predPost_orig_pred$mean), col="green")
lines(density(rcp3_2300_orig_pred$mean-predPost_orig_pred$mean), col="blue")
c(1:nrow(rcp85_2300_orig_pred))[rcp85_2300_orig_pred$mean-predPost_orig_pred$mean > 1]
# RSLR and HA Correlation -------------------------------------------------
plot(logWET~RSLR, data=thk99buff)
points(logWET~RSLR, data=thk99buff[rcp85_2300_orig_pred$mean-predPost_orig_pred$mean > 1,], col="red", pch=19)
plot(logWET~RSLR, data=thk99buff)
points(logWET~RSLR, data=thk99buff[rcp85_2300_orig$data$RESTORE == 1,], pch=19, col="blue")
points(logWET~RSLR, data=thk99buff[rcp85_2300_orig$data$HA == 1,], pch=19, col="green")
plot(rcp85_2300_orig_pred$mean~rcp85_2300_orig$data$RSLR, xlab="RSLR", ylab="Predicted Loss")
points(rcp85_2300_orig_pred$mean[rcp85_2300_orig$data$RESTORE == 1]~rcp85_2300_orig$data$RSLR[rcp85_2300_orig$data$RESTORE == 1], xlab="RSLR", ylab="Predicted Loss", pch=19, col="blue")
points(rcp85_2300_orig_pred$mean[rcp85_2300_orig$data$HA == 1]~rcp85_2300_orig$data$RSLR[rcp85_2300_orig$data$HA == 1], xlab="RSLR", ylab="Predicted Loss", pch=19, col="green")
points(rcp85_2300_orig_pred$mean[rcp85_2300_orig$data$RESTORE == 0]~rcp85_2300_orig$data$RSLR[rcp85_2300_orig$data$RESTORE == 0], xlab="RSLR", ylab="Predicted Loss", pch=19, col="red")
plot(rcp85_2300_orig_pred$mean~rcp85_2300_orig$data$RSLR, xlab="RSLR", ylab="Predicted Loss")
points(rcp85_2300_orig_pred$mean[rcp85_2300_orig$data$region == 1]~rcp85_2300_orig$data$RSLR[rcp85_2300_orig$data$region == 1], xlab="RSLR", ylab="Predicted Loss", pch=19, col="blue")
points(rcp85_2300_orig_pred$mean[rcp85_2300_orig$data$region == 2]~rcp85_2300_orig$data$RSLR[rcp85_2300_orig$data$region == 2], xlab="RSLR", ylab="Predicted Loss", pch=19, col="red")
plot(thk99buff[thk99buff$ORIG_FID %in% rcp85_2100_orig$data$ORIG_FID,])
plot(thk99buff[thk99buff$ORIG_FID %in% rcp85_2100_orig$data$ORIG_FID[rcp85_2100_orig$data$region == 1],], add=T, col="green", border="green")
plot(thk99buff[thk99buff$ORIG_FID %in% data.frame(rcp85_2100_orig$data)[rcp85_2300_orig_pred$mean-predPost_orig_pred$mean > 1,]$ORIG_FID,], add=T, col="red", border="RED")
coastlines = readOGR("C:/DATA/General Maps/Coastlines/GSHHS_2.3.5_04-2016/USCoast_h_L1_Line.shp", "USCoast_h_L1_Line")
coastlines = spTransform(coastlines, proj4string(thk99buff))
stateMap = readOGR("C:/DATA/General Maps/Gulf States/US_GulfStates.shp", "US_GulfStates")
HUC4 = readOGR("C:/DATA/HUC/HUC_shapes/WBDHU4.shp", "WBDHU4")
HUC4 = spTransform(HUC4, proj4string(thk99buff))
HUC4 = HUC4[HUC4$HUC4 %in% crop(HUC4, extent(thk99buff[thk99buff$region == 9 | thk99buff$region == 10,]))$HUC4,]
plot(thk99buff[thk99buff$region == 9 | thk99buff$region == 10,])
plot(stateMap,add=T,lty=2)
plot(HUC4,add=T)
plot(thk99buff[thk99buff$region == 9 | thk99buff$region == 10,],add=T)
plot(thk99buff[thk99buff$ORIG_FID %in% data.frame(rcp85_2100_orig$data)[rcp85_2300_orig_pred$mean-predPost_orig_pred$mean > 1,]$ORIG_FID,], add=T, col="GREEN", border="RED")
# Deprecated Plots --------------------------------------------------------
# W vs E LA RCP8.5
# if (toFile){ png("Figures/Figure3.png", width = 8, height = 4, units = "in", res = 300) }
# op=par(mfrow=c(1,2))
# plot(density(thk99buff_n$logWET), ylim=c(0,0.9), main = "RCP8.5 2100",lty=0)
# lines(density(rcp85_2100_orig_pred$mean), col="black",lty=3)
# lines(density(rcp85_2100_orig_pred[rcp85_2100_orig$data$region == 1,]$mean), col="blue")
# lines(density(rcp85_2100_orig_pred[rcp85_2100_orig$data$region == 2,]$mean), col="red")
# legend(2, 0.8, c("Combined", "W LA", "E LA"), col=c("black","blue","red"), lty=c(3,1,1,1), cex=0.75)
#
# plot(density(thk99buff_n$logWET), ylim=c(0,0.9), main = "RCP8.5 2300",lty=0)
# lines(density(predPost_orig_pred$mean), col="black",lty=3)
# lines(density(rcp85_2300_orig_pred[rcp85_2300_orig$data$region == 1,]$mean), col="blue")
# lines(density(rcp85_2300_orig_pred[rcp85_2300_orig$data$region == 2,]$mean), col="red")
# legend(2, 0.8, c("Combined", "W LA", "E LA"), col=c("black","blue","red"), lty=c(3,1,1,1), cex=0.75)
# par(op)
# if (toFile){ dev.off() }
#
#
# Restore vs Non-Restore RCP8.5
# if (toFile){ png("Figures/Figure4.png", width = 8, height = 4, units = "in", res = 300) }
# op=par(mfrow=c(1,2))
# plot(density(thk99buff_n$logWET), ylim=c(0,0.9), main = "RCP8.5 2100",lty=0)
# lines(density(predPost_orig_pred$mean), col="black",lty=3)
# lines(density(rcp85_2100_orig_pred[rcp85_2100_orig$data$RESTORE == 1,]$mean), col="blue")
# lines(density(rcp85_2100_orig_pred[rcp85_2100_orig$data$RESTORE == 0,]$mean), col="red")
# #lines(density(rcp85_2100_pred$mean), col="green")
# legend(2, 0.8, c("Combined", "RESTORE", "NONE"), col=c("black","blue","red"), lty=c(3,1,1,1), cex=0.75)
#
# plot(density(thk99buff_n$logWET), ylim=c(0,0.9), main = "RCP8.5 2300",lty=0)
# lines(density(predPost_orig_pred$mean), col="black",lty=3)
# lines(density(rcp85_2300_orig_pred[rcp85_2300_orig$data$RESTORE == 1,]$mean), col="blue")
# lines(density(rcp85_2300_orig_pred[rcp85_2300_orig$data$RESTORE == 0,]$mean), col="red")
# #lines(density(rcp85_2300_pred$mean), col="green")
# legend(2, 0.8, c("Combined", "RESTORE", "NONE"), col=c("black","blue","red"), lty=c(3,1,1,1), cex=0.75)
# par(op)
# if (toFile){ dev.off() }
# Compare Restoration Types RCP 8.5
# if (toFile){ png("Figures/Figure5.png", width = 8, height = 8, units = "in", res = 300) }
# op=par(mfrow=c(2,2))
# # op=par(mfrow=c(1,2))
# plot(density(thk99buff_n$logWET), ylim=c(0,0.9), main = "RCP8.5 2100",lty=0)
# lines(density(predPost_orig_pred$mean), col="black",lty=3)
# lines(density(rcp85_2100_orig_pred[rcp85_2100_orig$data$HA == 1,]$mean), col="blue")
# lines(density(rcp85_2100_orig_pred[rcp85_2100_orig$data$HA == 0,]$mean), col="red")
# #lines(density(rcp85_2100_pred$mean), col="green")
# legend(2, 0.8, c("Combined", "HA", "NONE"), col=c("black","blue","red"), lty=c(3,1,1,1), cex=0.75)
#
# plot(density(thk99buff_n$logWET), ylim=c(0,0.9), main = "RCP8.5 2300",lty=0)
# lines(density(predPost_orig_pred$mean), col="black",lty=3)
# lines(density(rcp85_2300_orig_pred[rcp85_2300_orig$data$HA == 1,]$mean), col="blue")
# lines(density(rcp85_2300_orig_pred[rcp85_2300_orig$data$HA == 0,]$mean), col="red")
# #lines(density(rcp85_2300_pred$mean), col="green")
# legend(2, 0.8, c("Combined", "HA", "NONE"), col=c("black","blue","red"), lty=c(3,1,1,1), cex=0.75)
# # par(op)
# #
# #
# # op=par(mfrow=c(1,2))
# plot(density(thk99buff_n$logWET), ylim=c(0,0.9), main = "RCP8.5 2100",lty=0)
# lines(density(predPost_orig_pred$mean), col="black",lty=3)
# lines(density(rcp85_2100_orig_pred[rcp85_2100_orig$data$BW == 1,]$mean), col="blue")
# lines(density(rcp85_2100_orig_pred[rcp85_2100_orig$data$BW == 0,]$mean), col="red")
# #lines(density(rcp85_2100_pred$mean), col="green")
# legend(2, 0.8, c("Combined", "BW", "NONE"), col=c("black","blue","red"), lty=c(3,1,1,1), cex=0.75)
#
# plot(density(thk99buff_n$logWET), ylim=c(0,0.9), main = "RCP8.5 2300",lty=0)
# lines(density(predPost_orig_pred$mean), col="black",lty=3)
# lines(density(rcp85_2300_orig_pred[rcp85_2300_orig$data$BW == 1,]$mean), col="blue")
# lines(density(rcp85_2300_orig_pred[rcp85_2300_orig$data$BW == 0,]$mean), col="red")
# #lines(density(rcp85_2300_pred$mean), col="green")
# legend(2, 0.8, c("Combined", "BW", "NONE"), col=c("black","blue","red"), lty=c(3,1,1,1), cex=0.75)
# par(op)
# if (toFile){ dev.off() }
#
#
#
#
#
#
# plot(density(thk99buff_n$logWET), ylim=c(0,0.9), main = "RCP8.5 2100",lty=0)
# lines(density(predPost_orig_pred$mean), col="black",lty=3)
# lines(density(rcp85_2100_orig_pred[rcp85_2100_orig$data$BW == 1,]$mean), col="blue")
# lines(density(rcp85_2100_orig_pred[rcp85_2100_orig$data$BW == 0,]$mean), col="red")
# #lines(density(rcp85_2100_pred$mean), col="green")
# legend(2, 0.8, c("Combined", "BW", "NONE"), col=c("black","blue","red"), lty=c(3,1,1,1), cex=0.75)
#
# plot(density(thk99buff_n$logWET), ylim=c(0,0.9), main = "RCP8.5 2300",lty=0)
# lines(density(predPost_orig_pred$mean), col="black",lty=3)
# lines(density(rcp85_2300_orig_pred[rcp85_2300_orig$data$BW == 1,]$mean), col="blue")
# lines(density(rcp85_2300_orig_pred[rcp85_2300_orig$data$BW == 0,]$mean), col="red")
# #lines(density(rcp85_2300_pred$mean), col="green")
# legend(2, 0.8, c("Combined", "BW", "NONE"), col=c("black","blue","red"), lty=c(3,1,1,1), cex=0.75)
# par(op)
library(ggplot2)
# Regional Stacked Density ------------------------------------------------
post_regional = predPost_orig_pred %>%
mutate(region = sapply(predPost_orig$data$region, FUN=function(x) { if (x == 1) return("West LA") else return("East LA") }))
names(post_regional)[4:6] = c("q2.5", "median", "q97.5")
r3_2100_regional = rcp3_2100_orig_pred %>%
mutate(region = sapply(predPost_orig$data$region, FUN=function(x) { if (x == 1) return("West LA") else return("East LA") }))
names(r3_2100_regional)[4:6] = c("q2.5", "median", "q97.5")
r3_2300_regional = rcp3_2300_orig_pred %>%
mutate(region = sapply(predPost_orig$data$region, FUN=function(x) { if (x == 1) return("West LA") else return("East LA") }))
names(r3_2300_regional)[4:6] = c("q2.5", "median", "q97.5")
r85_2100_regional = rcp85_2100_orig_pred %>%
mutate(region = sapply(predPost_orig$data$region, FUN=function(x) { if (x == 1) return("West LA") else return("East LA") }))
names(r85_2100_regional)[4:6] = c("q2.5", "median", "q97.5")
r85_2300_regional = rcp85_2300_orig_pred %>%
mutate(region = sapply(predPost_orig$data$region, FUN=function(x) { if (x == 1) return("West LA") else return("East LA") }))
names(r85_2300_regional)[4:6] = c("q2.5", "median", "q97.5")
# Base
p1 = ggplot(data=post_regional, aes(x=median, group=region, fill=region)) +
geom_density(adjust=1.5, position="fill") +
theme(legend.position = "none", panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black")) +
scale_x_continuous(limits = c(0,max(r85_2300_regional$median)), expand = c(0,0)) +
scale_y_continuous(limits = c(0,1), expand = c(0,0)) +
scale_fill_manual(values = c("darkgray","lightgray")) +
xlab("Wetland Loss") +
ylab("% Total Loss in W LA") +
geom_vline(xintercept=max(post_regional$median), linetype = "dashed")
multiplot(p1+ggtitle("a) RCP3 2100"),
p1+ggtitle("c) RCP8.5 2100"),
p1+ggtitle("b) RCP3 2300"),
p1+ggtitle("d) RCP8.5 2300"), cols=2)
# RCP3
p2 = ggplot(data=r3_2100_regional, aes(x=median, group=region, fill=region)) +
geom_density(adjust=1.5, position="fill", size=1.25) +
theme(legend.position = "none", panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black")) +
scale_x_continuous(limits = c(0,max(r85_2300_regional$median)), expand = c(0,0)) +
scale_y_continuous(limits = c(0,1), expand = c(0,0)) +
scale_fill_manual( values = c("white","white")) +
xlab("Wetland Loss") +
ylab("% Total Loss in W LA")
p3 = ggplot(data=r3_2300_regional, aes(x=median, group=region, fill=region)) +
geom_density(adjust=1.5, position="fill", size=1.25) +
theme(legend.position = "none", panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black")) +
scale_x_continuous(limits = c(0,max(r85_2300_regional$median)), expand = c(0,0)) +
scale_y_continuous(limits = c(0,1), expand = c(0,0)) +
scale_fill_manual( values = c("white","white")) +
xlab("Wetland Loss") +
ylab("% Total Loss in W LA")
# RCP 8.5
p4 = ggplot(data=r85_2100_regional, aes(x=median, group=region, fill=region)) +
geom_density(adjust=1.5, position="fill", size=1.5) +
theme(legend.position = "none", panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black")) +
scale_x_continuous(limits = c(0,max(r85_2300_regional$median)), expand = c(0,0)) +
scale_y_continuous(limits = c(0,1), expand = c(0,0)) +
scale_fill_manual( values = c("white","white")) +
xlab("Wetland Loss") +
ylab("% Total Loss in W LA")
p5 = ggplot(data=r85_2300_regional, aes(x=median, group=region, fill=region)) +
geom_density(adjust=1.5, position="fill", size=1.5) +
theme(legend.position = "none", panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black")) +
scale_x_continuous(limits = c(0,max(r85_2300_regional$median)), expand = c(0,0)) +
scale_y_continuous(limits = c(0,1), expand = c(0,0)) +
scale_fill_manual( values = c("white","white")) +
xlab("Wetland Loss") +
ylab("% Total Loss in W LA")
multiplot(p2+ggtitle("a) RCP3 2100"),
p4+ggtitle("c) RCP8.5 2100"),
p3+ggtitle("b) RCP3 2300"),
p5+ggtitle("d) RCP8.5 2300"),cols=2)
# Regional Stacked Density 2 ----------------------------------------------
ggplot(data=post_regional, aes(x=median, group=region, fill=region)) +
geom_density(adjust=1.5, position="stack") +
theme(legend.position = "none", panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black")) +
scale_x_continuous(limits = c(0,max(r85_2300_regional$median)), expand = c(0,0)) +
# scale_y_continuous(limits = c(0,1), expand = c(0,0)) +
scale_fill_manual(values = c("darkgray","lightgray")) #+
# xlab("Wetland Loss") +
# ylab("% Total Loss in W LA")
ggplot(data=r85_2100_regional, aes(x=median, group=region, fill=region)) +
geom_density(adjust=1.5, position="stack") +
theme(legend.position = "none", panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black")) +
scale_x_continuous(limits = c(0,max(r85_2300_regional$median)), expand = c(0,0)) +
# scale_y_continuous(limits = c(0,1), expand = c(0,0)) +
scale_fill_manual(values = c("darkgray","lightgray")) #+
# xlab("Wetland Loss") +
# ylab("% Total Loss in W LA")
ggplot(data=r85_2300_regional, aes(x=median, group=region, fill=region)) +
geom_density(adjust=1.5, position="stack") +
theme(legend.position = "none", panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
scale_x_continuous(limits = c(0,max(r85_2300_regional$median)), expand = c(0,0)) +
# scale_y_continuous(limits = c(0,1), expand = c(0,0)) +
scale_fill_manual(values = c("darkgray","lightgray")) #+
# xlab("Wetland Loss") +
# ylab("% Total Loss in W LA")
# Restorational Stacked Density -------------------------------------------
post_restorational = predPost_orig_pred
post_restorational$restore = rep(NA, nrow(post_restorational))
post_restorational$hasRestore = rep(NA, nrow(post_restorational))
df = data.frame(rcp85_2300_orig$data)
for (i in 1:nrow(post_restorational)) {
dfrow = df[i,]
type = "None"
for (j in 1:length(restoreParams))
{
rParam = substring(restoreParams,2,4)[j]
if (dfrow[rParam] == 1)
{
if (type != "None")
{
if (type == "HA" || rParam == "HA")
{
type = "HA"
}
} else {
type = rParam
}
}
}
post_restorational[i,]$restore = type
post_restorational[i,]$hasRestore = (type != "None")
}
names(post_restorational)[4:6] = c("q2.5", "median", "q97.5")
r85_restorational = rcp85_2300_orig_pred
r85_restorational$restore = rep(NA, nrow(r85_restorational))
r85_restorational$hasRestore = rep(NA, nrow(r85_restorational))
df = data.frame(rcp85_2300_orig$data)
for (i in 1:nrow(r85_restorational)) {
dfrow = df[i,]
type = "None"
for (j in 1:length(restoreParams))
{
rParam = substring(restoreParams,2,4)[j]
if (dfrow[rParam] == 1)
{
if (type != "None")
{
if (type == "HA" || rParam == "HA")
{
type = "HA"
}
} else {
type = rParam
}
}
}
r85_restorational[i,]$restore = type
r85_restorational[i,]$hasRestore = (type != "None")
}
names(r85_restorational)[4:6] = c("q2.5", "median", "q97.5")
source("../../RUtilityFunctions/multiplot.R")
p1=ggplot(data=post_restorational[post_restorational$restore != "VP",], aes(x=median, group=hasRestore, fill=hasRestore)) +
geom_density(adjust=1, position="stack") +
theme(legend.position = c(0.75,0.75), legend.title = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black")) +
scale_x_continuous(limits = c(0,5), expand = c(0,0)) +
scale_y_continuous(limits = c(0,2.75), expand = c(0,0)) +
scale_fill_manual(guide=guide_legend(reverse=TRUE), values = c("darkgray","palegreen"), labels=c("None", "Restoration")) +
xlab("") +
ylab("") +
ggtitle("a)")
p2=ggplot(data=post_restorational[post_restorational$restore != "VP",], aes(x=median, group=restore, fill=restore)) +
geom_density(adjust=1, position="stack") +
theme(legend.position = c(0.75,0.75), legend.title = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black")) +
scale_x_continuous(limits = c(0,5), expand = c(0,0)) +
scale_y_continuous(limits = c(0,2.75), expand = c(0,0)) +
scale_fill_manual(values = c("lightgoldenrod1","steelblue3","darkgray"), labels=c("Breakwaters", "Hydrological Alteration", "None")) +
xlab("") +
ylab("Prob. Density Total Loss") +
ggtitle("b)")
p3=ggplot(data=r85_restorational[r85_restorational$restore != "VP",], aes(x=median, group=restore, fill=restore)) +
geom_density(adjust=1, position="stack") +
theme(legend.position = c(0.75,0.75), legend.title = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black")) +
scale_x_continuous(limits = c(0,5), expand = c(0,0)) +
scale_y_continuous(limits = c(0,2.75), expand = c(0,0)) +
scale_fill_manual(values = c("lightgoldenrod1","steelblue3","darkgray"), labels=c("Breakwaters", "Hydrological Alteration", "None")) +
xlab("Wetland Loss") +
ylab("") +
ggtitle("c)")
multiplot(p1,p2,p3)
# Restorational Stacked Density 2 -----------------------------------------
ggplot(data=post_restorational[post_restorational$restore != "VP",], aes(x=median, group=restore, fill=restore)) +
geom_density(adjust=1, position="fill") +
theme(legend.position = c(0.75,0.75), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black")) +
scale_x_continuous(limits = c(0,max(r85_restorational$median)), expand = c(0,0)) +
scale_y_continuous(limits = c(0,1), expand = c(0,0)) +
scale_fill_manual(values = c("darkgray","steelblue3","lightgoldenrod1", "Black")) +
# xlab("Wetland Loss") +
# ylab("% Total Loss in W LA") +
# geom_vline(xintercept=max(post_restorational$median), linetype = "dashed") +
geom_rect(mapping=aes(xmin=max(post_restorational$median), ymin=0, xmax=max(r85_restorational$median), ymax=1), color="gray33", fill="gray33")
ggplot(data=r85_restorational[r85_restorational$restore != "VP",], aes(x=median, group=restore, fill=restore)) +
geom_density(adjust=1, position="fill") +
theme(legend.position = c(0.75,0.75), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black")) +
scale_x_continuous(limits = c(0,max(r85_restorational$median)), expand = c(0,0)) +
scale_y_continuous(limits = c(0,1), expand = c(0,0)) +
scale_fill_manual(values = c("darkgray","steelblue3","lightgoldenrod1", "Black")) #+
# scale_fill_manual( values = c("white","white")) +
# xlab("Wetland Loss") +
# ylab("% Total Loss in W LA")
|
#' Doubles an integer using go
#'
#' @param x an integer
#' @useDynLib rigopractice
#' @export
godouble <- function(x) {
.Call("godouble", x, PACKAGE = "rigopractice")
}
#' Sum of Fibonacci Sequece
#'
#' @param x an integer
#' @useDynLib rigopractice
#' @export
gofib <- function(x) {
.Call("gofib", x, PACKAGE = "rigopractice")
}
#' Fast Ver. Sum of Fibonacci Sequece
#'
#' @param x an integer
#' @useDynLib rigopractice
#' @export
gofib_fast <- function(x) {
.Call("gofib_fast", x, PACKAGE = "rigopractice")
}
|
/R/rgo.R
|
no_license
|
ymattu/rigopractice
|
R
| false | false | 520 |
r
|
#' Doubles an integer using go
#'
#' @param x an integer
#' @useDynLib rigopractice
#' @export
godouble <- function(x) {
.Call("godouble", x, PACKAGE = "rigopractice")
}
#' Sum of Fibonacci Sequece
#'
#' @param x an integer
#' @useDynLib rigopractice
#' @export
gofib <- function(x) {
.Call("gofib", x, PACKAGE = "rigopractice")
}
#' Fast Ver. Sum of Fibonacci Sequece
#'
#' @param x an integer
#' @useDynLib rigopractice
#' @export
gofib_fast <- function(x) {
.Call("gofib_fast", x, PACKAGE = "rigopractice")
}
|
pars.fitMOSnormal <-
function(fit, ensembleData, dates=NULL, ...)
{
if(!is.null(dates)) warning("dates ignored")
M <- matchEnsembleMembers(fit,ensembleData)
nForecasts <- ensembleSize(ensembleData)
if (!all(M == 1:nForecasts)) ensembleData <- ensembleData[,M]
# remove instances missing all forecasts or obs
M <- apply(ensembleForecasts(ensembleData), 1, function(z) all(is.na(z)))
M <- M | is.na(ensembleVerifObs(ensembleData))
ensembleData <- ensembleData[!M,]
if (is.null(obs <- ensembleVerifObs(ensembleData)))
stop("verification observations required")
nObs <- ensembleNobs(ensembleData)
obsLabels <- ensembleObsLabels(ensembleData)
nForecasts <- ensembleSize(ensembleData)
MEAN <- STD <- rep(NA, nObs)
ensembleData <- ensembleForecasts(ensembleData)
B <- fit$B
if (!all(Bmiss <- is.na(B))) {
A <- fit$a
C <- fit$c
D <- fit$d
for (i in 1:nObs) {
f <- ensembleData[i,]
S.sq <- var(f)
f <- c(1,f)
MEAN[i] <- c(A,B)%*%f
STD[i] <- sqrt(C + D*S.sq)
}
}
parValues <- cbind(mean = MEAN, sd = STD)
row.names(parValues) <- obsLabels
parValues
}
|
/ensembleMOS/R/pars.fitMOSnormal.R
|
no_license
|
Three-Poles/Reprocessing
|
R
| false | false | 1,141 |
r
|
pars.fitMOSnormal <-
function(fit, ensembleData, dates=NULL, ...)
{
if(!is.null(dates)) warning("dates ignored")
M <- matchEnsembleMembers(fit,ensembleData)
nForecasts <- ensembleSize(ensembleData)
if (!all(M == 1:nForecasts)) ensembleData <- ensembleData[,M]
# remove instances missing all forecasts or obs
M <- apply(ensembleForecasts(ensembleData), 1, function(z) all(is.na(z)))
M <- M | is.na(ensembleVerifObs(ensembleData))
ensembleData <- ensembleData[!M,]
if (is.null(obs <- ensembleVerifObs(ensembleData)))
stop("verification observations required")
nObs <- ensembleNobs(ensembleData)
obsLabels <- ensembleObsLabels(ensembleData)
nForecasts <- ensembleSize(ensembleData)
MEAN <- STD <- rep(NA, nObs)
ensembleData <- ensembleForecasts(ensembleData)
B <- fit$B
if (!all(Bmiss <- is.na(B))) {
A <- fit$a
C <- fit$c
D <- fit$d
for (i in 1:nObs) {
f <- ensembleData[i,]
S.sq <- var(f)
f <- c(1,f)
MEAN[i] <- c(A,B)%*%f
STD[i] <- sqrt(C + D*S.sq)
}
}
parValues <- cbind(mean = MEAN, sd = STD)
row.names(parValues) <- obsLabels
parValues
}
|
cr <- corr(".")
cr <- sort(cr)
set.seed(868)
out <- round(cr[sample(length(cr), 5)], 4)
print(out)
|
/Assignment01/q2-08.R
|
no_license
|
benicovdw/ProgramWithR
|
R
| false | false | 99 |
r
|
cr <- corr(".")
cr <- sort(cr)
set.seed(868)
out <- round(cr[sample(length(cr), 5)], 4)
print(out)
|
require(RColorBrewer)
require(wordcloud)
require('curl')
require('httr')
require(stringi)
if (!require('openfda') ) {
devtools::install_github("ropenhealth/openfda")
library(openfda)
print('loaded open FDA')
}
#Temporary fix for peer certificate error
#httr::set_config( config( ssl_verifypeer = 0L ) )
#require(GOsummaries)
SOURCEDIR <- '../sharedscripts/'
if (!file.exists( paste0( SOURCEDIR, 'key.r') ))
{
SOURCEDIR <- 'sharedscripts/'
} else {
#Temporary fix for peer certificate error
httr::set_config( config( ssl_verifypeer = 0L ) )
# print(SOURCEDIR)
}
source( paste0( SOURCEDIR, 'key.r') )
getcloud_try <- function(mydf, scale1=9, name=1, freq=2, title='Terms') {
# mydf[, name] <- abbreviate(mydf[,name], 20)
# layout(matrix(c(1,2), 2, 1, byrow = FALSE), heights=c(1,12 ) )
par(mar=c(0,0,2,0))
# plot(c(0,1),c(0,1),xlab='',type='n', xaxt='n', yaxt='n',frame.plot=FALSE)
# plot.new()
#****************************************
#* maketiff
# if ( !is.null(mydf) & (1==2) )
# {
# filename <- paste0( mydf[2,1], mydf[1,1] ,'.tif')
# print(filename)
# filename <- gsub('%22', '', filename)
# if ( nrow(mydf) >0 )
# {
# filename <- paste0( mydf[2,1], mydf[1,1] ,'.png')
# print(filename)
# # tiff(file = filename, res=600, compression='lzw',height=8, width=8, units="in", bg = "transparent" )
# png(file = filename, width = 4000, height = 4000, res=600, bg = "transparent" )
# plotWordcloud(mydf[ , 1], mydf[,2],
# color=brewer.pal(8, "Dark2"))
# dev.off()
# }
# }
#end maketiff
mydf[ which( is.nan(mydf[,freq] ) ), freq] <- 1
options(warn=5)
curscale <- scale1
for (i in 1:(2*scale1-1))
{
# print(curscale)
out <- try( ( wordcloud(mydf[, name], mydf[,freq], max.words = 200 ,
color=rep('white', 8), random.order=FALSE,
scale=c(curscale, .5) ) ) )
if( class(out) == "try-error" )
{
curscale <- curscale-.5
} else {
break()
}
}
options(warn=0)
return( c(curscale, .5) )
}
getcloud <- function(mydf, scale1=9, name=1, freq=2, title='Terms', scale=NULL) {
if ( is.null(scale) & nrow(mydf) > 1)
{
scale <- invisible(getcloud_try(mydf, scale1, name, freq, title))
}
par(mar=c(0,0,2,0))
mylabel <- paste('Word Cloud for', title)
mylabel <- title
#text(.5, .5, labels=mylabel, cex=2)
mydf[ which( is.nan(mydf[,freq] ) ), freq] <- 1
if( nrow(mydf) > 1 )
{
wordcloud(mydf[, name], mydf[,freq], max.words = 200 ,
color=brewer.pal(8, "Dark2"), random.order=FALSE,
scale=scale )
}
text(.5, 1, labels=mylabel, cex=1.5)
}
gettables <- function( tmp )
{
mycols <- grep('_table', names(tmp), fixed=TRUE )
outdf <- tmp[[1]]
s <- ""
for (i in seq_along(mycols))
{
for (j in seq_along( tmp[[ mycols[i] ]] ) )
{
if ( tmp[[ mycols[i] ]][[j]]!='None' )
{
s <- paste(s, '<br>', tmp[[ mycols[i] ]][[j]] )
}
}
}
if (nchar(s)==0)
{
return('<i>No tables in this section</i>')
}
return( HTML(s) )
}
getwaittime <- function(session){
if(session$clientData$url_hostname == '10.12.207.87')
{
return( 0)
} else if(session$clientData$url_hostname == '127.0.0.1') {
return (0.0)
}
return(0.0)
}
fda_fetch_p <- function(session, myurl,
message = '',
wait=0,
reps=0,
flag=NULL)
{
prog <- ''
dispurl <- removekey(myurl)
starttime <- Sys.time()
mytitle <- paste('Fetching Data', message )
closeAlert(session, 'alert2')
createAlert(session, 'alert', 'fetchalert',title=mytitle, content = substr(dispurl, 37,100), dismiss = FALSE)
# if ( length(flag) < 1 )
# {
# flag <- paste( 'No Reports for selected term<br>' )
# }
iter <- 1
for (i in 1:10)
{
out <- try( fda_fetch( myurl ) )
if( class(out) == "try-error" )
{
closeAlert(session, 'erroralert')
err <- out
createAlert(session, 'alert2', 'erroralert', title=paste( '<font color="red">Error:', err,' </font>' ),
content= flag )
Sys.sleep(1)
} else if( length(out$results)<1 )
{
s <- GET(myurl)
if( !is.null( httr::content(s)$error) )
{
closeAlert(session, 'erroralert')
err <- ( httr::content(s) )
createAlert(session, 'alert2', 'erroralert', title=paste( '<font color="red">Error:', err$error$message,' </font>' ),
content= flag )
break()
}
else
{
iter <- iter + 1
print( paste('iteration', i, 'with empty df') )
if (iter > 25)
{
break()
}
}
# out<- data.frame(term='error', count=NA)
} else {
closeAlert(session, 'erroralert')
break
}
}
#Sys.sleep( max(0, .31 - as.double(Sys.time()-starttime) ) )
closeAlert(session, 'fetchalert')
return(out)
}
makecomb <- function(session, mydf1, mydf2, totals, type, sortvar='prr' ){
if ( !is.data.frame(mydf1) ) {
return(data.frame(Term=paste( 'No events for', type, getterm1( session ) ), Count=0 , Count=0,Count2=0, PRR='prr'))
}
comb <- merge(mydf1, mydf2[, c('term', 'count')], by.x='term', by.y='term')
if (type=='Drug')
{
#x is # reports for DE, y is # reports for Events
# num <- comb$count.x/totals$totaldrug
# denom <- (comb$count.y-comb$count.x)/(totals$total-totals$totaldrug)
# num2 <- comb$count.x/(totals$totaldrug-comb$count.x)
# denom2 <- comb$count.y/(totals$total-comb$count.y)#Total reports for drug j
n.j <- totals$totaldrug
#Total reports for DE combination
nij <- comb$count.x
num <- nij/n.j
#Total reports forevent i
ni. <- comb$count.y
n.. <- totals$total
denom <- ( ni.-nij )/( n.. - n.j )
} else {
#x is # reports for DE, y is # reports for Drug
# num <- comb$count.x/comb$count.y
# denom <- (totals$totaldrug-comb$count.x)/(totals$total-comb$count.y)
# num2 <- comb$count.x/(comb$count.y - comb$count.x)
# denom2 <-(totals$totaldrug)/(totals$total-totals$totaldrug)
n.j <- comb$count.y
#Total reports for DE combination
nij <- comb$count.x
num <- nij/n.j
ni. <- totals$totaldrug
n.. <- totals$total
denom <- ( ni.-nij )/( n.. - n.j )
}
# ror <- num2/denom2
# comb <- data.frame(comb, prr=num/denom, num, denom)
prr <- prre( n.., ni., n.j, nij )
rrr <- prrd( n.., ni., n.j, nij )
ror <- ror( n.., ni., n.j, nij )
llr <- LLR( n.., ni., n.j, nij )
comb <- data.frame(comb, prr=round( prr, digits = 2), ror=ror, nij, ni., n.j, n..)
comb <- comb[order(comb[, sortvar], decreasing = TRUE),]
row.names(comb)<- seq(1:nrow(comb))
return( list(comb=comb, ror=ror) )
}
getcpalinks <- function(column, names, values, mybaseurl, appendtext='')
{
cpa <- numcoltohyper( paste(column, 'CPA'), column, names, values, type='C',
mybaseurl, append = appendtext)
dynprr <- numcoltohyper( paste(column, 'PRR'), column, names, values, type='P',
mybaseurl, append = appendtext )
return( list( cpa = cpa, dynprr = dynprr ) )
}
getdynprr_as_links <- function(column, names, values, mybaseurl, appendtext='')
{
dynprr <- numcoltohyper( paste(column, 'PRR'), column, names, values, type='DPS',
mybaseurl, append = appendtext)
return( list( dynprr = dynprr ) )
}
#buildurl =======================================================
extractbaseurl <- function(myurl){
myurl <- gsub('//', '/', myurl, fixed=TRUE )
tmp <- strsplit(myurl,'?', fixed=TRUE)
tmp1 <- tmp[[1]][1]
tmp2 <- strsplit(tmp1,'/', fixed=TRUE)
tmp3 <- tmp2[[1]][1:(length(tmp2[[1]])-1)]
tmp4 <- paste0(tmp3, collapse='/' )
tmp4 <- paste0(tmp4, '/')
return(tmp4)
}
getemptyapplist <- function()
{
mynames <- c('DA', 'D', 'E', 'P', 'Z', 'R', 'L', 'LR', 'LRE', 'ENFD',
'AEDEV', 'ENFDEV', 'CLSDEV', '510', 'PMA', 'RLDEV', 'RCLDEV',
'ENFFD', 'DAS', 'EAS', 'DPS', 'LRDAS', 'LREAS' )
s <- vector(mode='character', length = length(mynames))
names(s) <- mynames
return(s)
}
makeapplinks <- function( cururl, append = '' )
{
labels <- getemptyapplist()
labels['DA'] <- '<h4>Drug Apps</h4><b>Dashboard-</b> Overview of reports for a drug'
labels['D'] <- '<b>PRR for a Drug-</b> Calculate Proportional Reporting Rates for Common Events for a drug'
labels['E'] <- '<b>PRR for an Event-</b> Calculate Proportional Reporting Rates for Common Drugs that have a specified event'
labels['P'] <- '<b>Dynamic PRR-</b> Calculate Proportional Reporting Rates for a drug-event pair over time'
labels['Z'] <- '<b>Change Point Analysis-</b> Change point analysis for a drug-event pair over time'
labels['R'] <- '<b>Adverse Event Browser-</b> View reports that meet search criteria'
labels['L'] <- '<b>Label Browser-</b> View labels that meet search criteria'
labels['LR'] <- '<b>Likelihood Ratio Test for Drug-</b> Calculate Likelihood Ratio Tests for Common Events for a drug'
labels['LRE'] <- '<b>Likelihood Ratio Test for Event-</b> Calculate Likelihood Ratio Tests for Common Drugs for an event'
labels['ENFD'] <- '<b>Drug Enforcement Report Browser-</b> View enforcement reports that meet search criteria'
labels['AEDEV'] <- '<hr><h4>Device Apps</h4><b>Adverse Event Browser-</b> View reports that meet search criteria'
labels['ENFDEV'] <- '<b>Device Enforcement Report Browser-</b> View labels that meet search criteria'
labels['CLSDEV'] <- '<b>Device Classification</b> View labels that meet search criteria'
labels['510'] <- '<b>510(k) Viewer-</b> View 510(k) Data'
labels['PMA'] <- '<b>PMA Viewer-</b> View PMA Data'
labels['RLDEV'] <- '<b>Registration and Listing Browser-</b> View Registration and Listing Data'
labels['RCLDEV'] <- '<b>Recall Browser-</b> View Recall Reports'
labels['ENFFD'] <- '<hr><h4>Food Apps</h4><b>Food Enforement Report Browser-</b> View reports that meet search criteria'
labels['DAS'] <- '<h4>Drug Apps Using Suspect Drugs Only</h4><b>PRR for a Drug-</b> Calculate Proportional Reporting Rates for Common Events for a drug'
labels['EAS'] <- '<b>PRR for an Event-</b> Calculate Proportional Reporting Rates for Common Drugs that have a specified event'
labels['DPS'] <- '<b>Dynamic PRR-</b> Calculate Proportional Reporting Rates for a drug-event pair over time'
labels['LRDAS'] <- '<b>Likelihood Ratio Test for Drug-</b> Calculate Likelihood Ratio Tests for Common Events for a drug'
labels['LREAS'] <- '<b>Likelihood Ratio Test for Event-</b> Calculate Likelihood Ratio Tests for Common Drugs for an event'
s <- names(labels)
#s <- getbaseurl(s)
out <- ''
if ( max(append) == '')
{
addquery <- FALSE
} else {
addquery <- TRUE
}
for (i in seq_along(s))
{
out <- paste0(out, coltohyper(append[i], s[i] , mybaseurl = cururl, display=labels[i], makequery=addquery ), '<br>' )
}
return( out )
}
getappname <- function(myurl){
tmp <- strsplit(myurl,'?', fixed=TRUE)
tmp1 <- tmp[[1]][1]
tmp2 <- strsplit(tmp1,'/', fixed=TRUE)
tmp3 <- tmp2[[1]][ length(tmp2[[1]]) ]
return(tmp3)
}
removekey <- function(url) {
mykey <- paste0('api_key=', getkey(),'&')
s <-gsub(mykey, '', url, fixed=TRUE)
return(s)
}
getkey <- function(){
if( exists('getmykey'))
{
return( getmykey() )
}
else
{
return( '' )
}
}
makelink <- function( s, s2 = NULL ) {
if (is.null(s2))
{
s2 <- removekey( s[[1]] )
}
s <- gsub('"', '%22', s, fixed=TRUE)
s <- gsub(' ', '%20', s, fixed=TRUE)
s2 <- gsub('"', '%22', s2, fixed=TRUE)
link <- paste0('<a href="', s, '" target="_blank">', s2, '</a>')
return (link)
}
getbaseurl <- function( which, mycururl=NULL, appname= 'cpa' ){
apps1 <-c('cpa', 'dynprr', 'RR_D', 'RR_E', 'reportview', 'dash', 'LRTest', '510kview')
if (is.null(mycururl))
{
out <- getcururl()
} else{
out <- mycururl
}
if (which=='D'){
s <- 'RR_D'
} else if (which=='DAS'){
s <- 'RR_D_Activesubstance'
} else if(which == 'E' ) {
s <- 'RR_E'
} else if(which == 'EAS' ) {
s <- 'RR_E_Activesubstance'
} else if(which == 'R' ){
s <- 'reportview'
} else if(which == 'P' ){
s <- 'dynprr'
} else if(which == 'DPS' ){
s <- 'dynprr_Activesubstance'
} else if(which == 'DA' ){
s <- 'dash'
} else if(which == 'L' ){
s <- 'labelview'
} else if(which == 'LR' ){
s <- 'LRTest'
} else if (which=='LRDAS'){
s <- 'LR_D_Activesubstancename'
} else if(which == 'LRE' ){
s <- 'LRTest_E'
} else if (which=='LREAS'){
s <- 'LR_E_Activesubstancename'
} else if(which == 'ENFD' ){
s <- 'drugenforceview'
} else if(which == 'AEDEV' ){
s <- 'devicereports'
} else if(which == 'ENFDEV' ){
s <- 'deviceenforceview'
} else if(which == 'CLSDEV' ){
s <- 'deviceclassview'
} else if(which == '510' ){
s <- '510kview'
} else if(which == 'PMA' ){
s <- 'PMAview'
} else if(which == 'RLDEV' ){
s <- 'devicereglist'
} else if(which == 'RCLDEV' ){
s <- 'devicerecallview'
} else if(which == 'ENFFD' ){
s <- 'foodrecallview'
} else {
s <- 'ChangePoint'
}
if ( !( appname %in% apps1) )
{
s <- paste0(s, 2)
}
out <-paste0( out, s, '/' )
return(out)
}
makeexturl <- function(base, names, values){
# values <- gsub('NA', "", values, fixed=TRUE)
values[ which(is.na(values))] <- ''
names2 <- URLencode(names, reserved = TRUE)
values2 <- URLencode(values, reserved = TRUE)
s1 <- paste0( '&', names2)
s2 <- paste0( '=', values2)
# print(s2)
allpairs2 <- paste0(s1, s2, collapse='')
allpairs2 <- sub('&', "?", allpairs2, fixed=TRUE)
allpairs2 <- paste0(base, allpairs2)
s1 <- paste0( '&', names)
s2 <- paste0( '=', values)
allpairs <- paste0(s1, s2, collapse='')
allpairs <- sub('&', "?", allpairs, fixed=TRUE)
allpairs <- paste0(base, allpairs)
return( allpairs )
}
makehyper <- function(base , names, values, display, makequery=TRUE){
if (makequery)
{
url <- makeexturl(base, names, values)
}
else {
url <- base
}
url <- gsub('"','%22', url, fixed=TRUE)
hyper <- paste0('<a href="', url, '" target="_blank">', display, '</a>')
return(hyper)
}
makemedlinelink <- function(s, s2) {
medstring <- paste0("http://www.merriam-webster.com/medlineplus/", s )
out <- makelink(medstring, s2)
return(out)
}
coltohyper <- function(col, which, mybaseurl=NULL, display=NULL, makequery=TRUE, append=''){
baseurl <- getbaseurl(which, mybaseurl)
name <- 't1'
if (is.null(display)){
display <- col
}
out <- vector(mode='character', length=length(col))
for (i in seq_along(col))
{
col[i] <- gsub('"', '', col[i])
out[i] <- makehyper(baseurl , name, paste0('' ,col[i] , append ), display[i], makequery=makequery)
}
return(out)
}
numcoltohyper <- function( dispcol, valcol, names, values, type='R', mybaseurl=NULL, addquotes=FALSE, append='' ){
baseurl <- getbaseurl( type, mybaseurl )
out <- vector(mode='character', length=length(dispcol))
# if (addquotes)
# {
# values <- gsub('"', '', values , fixed=TRUE)
# values <- paste0('%22' ,values , '%22' )
# }
for (i in seq_along(dispcol))
{
if (addquotes)
{
valcol[i] <- gsub('"', '', valcol[i] , fixed=TRUE)
valcol[i] <- paste0('%22' ,valcol[i] , '%22' )
}
curvals <- c( paste0('' ,values , '' ), paste0('' ,valcol[i] , '' ))
curvals <- gsub('%22%22', '', curvals , fixed=TRUE)
# print(paste('baseurl=',baseurl))
# print(paste('names=',names))
# print(paste('curvals=',curvals))
# print(paste('dispcol[i]=',dispcol[i]))
out[i] <- makehyper( baseurl , names, paste0('' ,curvals , append ), prettyNum(dispcol[i], big.mark=',') )
}
return(out)
}
buildURL <- function (v, t, count='' ,limit=NULL, skip=0, usekey=TRUE,
type='event', db= '/drug/', addplus=TRUE, whichkey=1){
# browser()
if(whichkey == 1)
{
mykey <- getkey()
} else {
mykey <- getkey()
}
baseurl <- paste0(db, type ,".json")
qq <- fda_query(baseurl)
# fda_api_key(mykey)
for (i in seq_along(v) )
if ( v[i]!='')
{
# print(t[i])
if (t[i]!=''){
if(addplus)
{
t[i] <- gsub( ' ', '+', t[i],fixed=TRUE)
} else {
t[i] <- gsub( ' ', '%20', t[i],fixed=TRUE)
}
t[i] <- gsub( '%22', '"', t[i],fixed=TRUE)
t[i] <- gsub( ',', '', t[i],fixed=TRUE)
t[i] <- gsub( '^', '', t[i],fixed=TRUE)
t[i] <- gsub( "\\", '%5C' , t[i], fixed=TRUE)
t[i] <- gsub( '""', '%22' , t[i], fixed=TRUE)
t[i] <- gsub( '""', '%22' , t[i], fixed=TRUE)
t[i] <- paste0('(',t[i] ,')')
qq <- qq %>%
fda_filter(v[i], paste0(t[i], collapse='+') )
}
}
if (count!=''){
qq <- qq %>%
fda_count(count)
}
if (!is.null(limit)){
qq$limit <- limit
}
# qq <- qq %>%
# fda_api_key(mykey)
myurl <- (fda_url(qq))
if(usekey == TRUE)
{
myurl <- gsub('?', paste0('?api_key=', mykey, '&'), myurl , fixed=TRUE)
}
#temp
#https://openfda-old.ngrok.io
# myurl <- gsub('api.fda.gov', 'openfda-old.ngrok.io', myurl , fixed=TRUE)
# myurl <- gsub('api.fda.gov', 'openfda.ngrok.io', myurl , fixed=TRUE)
# #temp
myurl <-paste0(myurl, '&skip=', skip)
if(!usekey) {
myurl <- removekey( myurl )
}
myurl <- gsub('Any Variable:', "", myurl, fixed=TRUE)
return( myurl )
}
quotestr <- function(s) {
return( paste0( '%22', s, '%22'))
}
getdaterangeFUN <- function( var, db, type){
v <- c( '_exists_')
t <- c( var )
myurl <- buildURL(v, t, count= var, db=db, type=type )
mydf <- fda_fetch( myurl)
meta <- mydf$meta
tmp <- mydf$result
# print( head(tmp))
myrange <- c( min(tmp$time, na.rm = TRUE), max(tmp$time, na.rm = TRUE))
return( myrange )
}
getdaterange <- reactive({
var <- 'receivedate'
db <- '/drug/'
type <- 'event'
myrange <- getdaterangeFUN(var, db, type)
return( c(myrange, var ) )
})
getdaterangedeviceAE <- reactive({
var <- 'date_received'
db <- '/device/'
type <- 'event'
myrange <- getdaterangeFUN(var, db, type)
return( c(myrange, var ) )
})
getdaterangelabel <- reactive({
var <- 'effective_time'
db <- '/drug/'
type <- 'label'
myrange <- getdaterangeFUN(var, db, type)
return( c(myrange, var ) )
})
getdaterange510K <- reactive({
var <- 'decision_date'
db <- '/device/'
type <- '510K'
myrange <- getdaterangeFUN(var, db, type)
return( c(myrange, var ) )
})
getdaterangePMA <- reactive({
var <- 'decision_date'
db <- '/device/'
type <- 'pma'
myrange <- getdaterangeFUN(var, db, type)
return( c(myrange, var ) )
})
getdaterangeenforce <- reactive({
var <- 'report_date'
db <- '/drug/'
type <- 'enforcement'
myrange <- getdaterangeFUN(var, db, type)
return( c(myrange, var ) )
})
getdaterangedeviceclass <- reactive({
var <- 'report_date'
db <- '/device/'
type <- 'classification'
myrange <- getdaterangeFUN(var, db, type)
return( c(myrange, var ) )
})
getdaterangeenforce_food <- reactive({
var <- 'report_date'
db <- '/food/'
type <- 'enforcement'
myrange <- getdaterangeFUN(var, db, type)
return( c(myrange, var ) )
})
getdaterangeenforce_device <- reactive({
var <- 'report_date'
db <- '/device/'
type <- 'enforcement'
myrange <- getdaterangeFUN(var, db, type)
return( c(myrange, var ) )
})
getdaterangerecall_device <- reactive({
var <- 'event_date_terminated'
db <- '/device/'
type <- 'recall'
myrange <- getdaterangeFUN(var, db, type)
return( c(myrange, var ) )
})
getdaterangereglist_device <- reactive({
var <- 'created_date'
db <- '/device/'
type <- 'registrationlisting'
myrange <- getdaterangeFUN(var, db, type)
return( c(myrange, var ) )
})
listtostring <- function(s, delim=';')
{
myevents <- gsub('\"', '', s, fixed=TRUE)
myevents <- gsub('c(', '', myevents, fixed=TRUE)
myevents <- gsub('list(', '', myevents, fixed=TRUE)
myevents <- gsub(')', '', myevents, fixed=TRUE)
myevents <- gsub(',', delim, myevents, fixed=TRUE)
return(myevents)
}
extractcols <- function( mydf, myvars )
{
myvars <- myvars[ myvars %in% names(mydf) ]
mydf <- mydf[, myvars]
types <- (sapply(mydf, class))
#list of strings, make one string
for (i in seq_along(types))
{
if(types[i] == 'list')
{
mydf[,i] <- listtostring( mydf[ , i ])
}
}
types <- (sapply(mydf, class))
# print(types)
typesval <- types[types!='data.frame' & types!='list']
mydf <- as.data.frame(mydf, stringsAsFactors = FALSE)
if(ncol(mydf)==1)
{
# print(mydf)
# browser()
names(mydf) <- myvars[1]
# print(mydf)
} else {
mydf <- mydf[ , names(typesval) ]
}
if ( is.data.frame( mydf ) ){
return( mydf )
} else {
return( data.frame(Note='No patient data'))
}
}
getjson <- function(myurl)
{
tmp <- tempfile()
curl_download(myurl, tmp, quiet=TRUE)
s <- (scan(tmp, what='character', quote='', sep='?'))
unlink(tmp)
s <- paste0(s, collapse='<BR>')
return(s[1])
}
getopenfdamaxrecords <- function( maxlim=5000 )
{
maxlim <- min(5000, maxlim)
return(maxlim)
}
getvalfromlink <- function(instr)
{
s <- strsplit(instr, '=' )
s <- s[[1]][3]
s <- strsplit( s, '&' )
s <- s[[1]][1]
s <- sub('%20', ' ', s[1])
return(s)
}
getvalvectfromlink <- function(instr)
{
s <- ( sapply(instr, FUN = getvalfromlink ))
names(s) <- NULL
return(s)
}
mytp <- function(x, y, w, refline=1,
mytitle="Text Plot for Terms. Draw a box around terms to see more details",
myylab='LLR') {
# browser()
mycex=1
if (length(w) > 0)
{
# w <- gsub(' ', '_', w, fixed = TRUE)
# xlim <- c( min(x, na.rm = TRUE), max(x, na.rm = TRUE))
# ylim <- c( min(y, na.rm = TRUE), max(y, na.rm = TRUE))
# lay <- wordlayout(x,y,w, xlim=xlim, ylim=ylim)
xlim <- c( min(x, na.rm = TRUE), max(x, na.rm = TRUE))
xlim[2] <- xlim[2] + 0.3*(xlim[2]-xlim[1] )
plot(x,y,type="p",
xlim = xlim,
ylim = c( 1.0, max(y, na.rm = TRUE) ),
log='y',
xlab= 'Number of Events',
ylab= myylab,
col='red',
main=mytitle,
cex=mycex)
text(x, y, w, pos=4, cex=mycex)
} else {
plot(1,1,type="n",
log='y',
xlab= 'Number of Events',
ylab= myylab,
col='red',
main='Please enter a term',
cex=mycex)
}
# text(x,y,w, pos=4, cex=.75)
grid()
abline(h=refline, col='red')
}
renderterm <- function( term, label, label2='' ){
if(term == '') {
term <- 'None'
}
if (label2 != '')
{
out <- paste( '<br><b>', label, '<i><font color="dodgerblue" size="4">', term, '</font></i><br>', label2,'</b><br>' )
} else {
out <- paste( '<br><b>', label, '<i><font color="dodgerblue" size="4" >', term, '</font></i></b><br><br>' )
}
return(out)
}
renderterm2 <- function( term, label, label2='' ){
if(term == '') {
term <- 'None'
}
if (label2 != '')
{
out <- paste( '<br><b>', label, '<i><font color="dodgerblue" size="4">', term, '</font></i><br>', label2,'</b>' )
} else {
out <- paste( '<br><b>', label, '<i><font color="dodgerblue" size="4" >', term, '</font></i></b>' )
}
return(out)
}
getterm1description <- function(exact, term)
{
s <- term
if ( exact!= 'exact' )
{
s <- gsub(' ', ' or ', term, fixed=TRUE)
s <- gsub('or AND or', ' AND ', s, fixed=TRUE)
s <- gsub('or OR or', ' OR ', s, fixed=TRUE)
} else {
s <- paste0( "'", term, "'")
}
return( s )
}
updateviewerinputs <- function(session)
{
updateTextInput(session, "v1", value=( session$input$v1_2 ) )
updateTextInput(session, "t1", value= ( session$input$t1_2 ) )
updateTextInput(session, "v2", value=( session$input$v2_2 ) )
updateTextInput(session, "t2", value= ( session$input$t2_2 ) )
updateTextInput(session, "v3", value=( session$input$v3_2 ) )
updateTextInput(session, "t3", value= ( session$input$t3_2 ) )
updateSliderInput( session, 'skip', value=1)
}
getallvars <- function( cols, mytype = 'text', section= c('all') )
{
# tables <- which( stri_sub(cols, -6, -1) == '_table' )
# text <- which( stri_sub(cols, -6, -1) != '_table' )
if (section[1] !='all')
{
text <- which( stri_sub(cols, 1, 2) %in% section )
cols <- cols[text]
}
cols <- substr(cols, 4, 100)
if (mytype == 'text')
{
return (cols)
} else {
return(paste0(cols, '_table'))
}
}
gettablenames <- function(cols)
{
tables <- which( stri_sub(cols, -6, -1) == '_table' )
return(cols[tables])
}
gettextnames <- function(cols)
{
textvar <- which( stri_sub(cols, -6, -1) != '_table' )
return(cols[textvar])
}
getselectedcols <- function(tmp, type, sections)
{
realcols <- names(tmp)
knowncols <- getallvars( allvars(), type, section=sections)
mycols <- intersect (knowncols, realcols)
outdf <- extractdfcols(tmp, mycols)
return( outdf )
}
simpleCap <- function(x) {
s <- tolower(x)
s <- strsplit(s, " ")[[1]]
first <- paste(toupper(substring(s[1], 1, 1)), substring(s[1], 2),
sep = "", collapse = " ")
out <- paste( s[-1], sep = "", collapse = " ")
out <- paste(first, out)
}
listtocvect <- function(s, delim=', ', trunc=25){
if (is.null( s) ) {
return('')
}
out <- paste0( s, sep='', collapse=delim)
out <- strtrim(out, trunc)
return(out)
}
listtodf <- function(lis, delim=', ', trunc=100){
if ( is.null(lis) )
{
return(NULL)
}
out <- data.frame(rownames=1:length(lis[[1]]), stringsAsFactors =FALSE )
for (i in seq_along(lis) )
{
if (is.list(lis[[i]]))
{
tmp <- sapply(lis[[i]], function(x) listtocvect(x, delim, trunc) )
out[[i]] <- tmp
} else {
out[[i]] <- ''
}
}
# print(lis[[i]])
out <- data.frame(out, stringsAsFactors =FALSE)
names(out) <- names(lis)
return(out)
}
listtostring <- function(s, delim=';')
{
myevents <- gsub('\"', '', s, fixed=TRUE)
myevents <- gsub('c(', '', myevents, fixed=TRUE)
myevents <- gsub(')', '', myevents, fixed=TRUE)
myevents <- gsub(',', delim, myevents, fixed=TRUE)
return(myevents)
}
getdf <- function(mydf, name, message='Empty Table')
{
# print(name)
# print(head(mydf) )
err <- data.frame( Note=message, stringsAsFactors = FALSE )
if ( is.data.frame(mydf) ) {
if (name %in% names(mydf) ) {
tmp <- mydf[, name]
if ( is.data.frame(tmp) ) {
return(tmp)
} else if ( is.data.frame(tmp[[1]]) ) {
return(tmp[[1]])
} else {
return(err)
}
}
}
return( err )
}
extractdfcols <- function(tmp, mycols, numrows=1)
{
# browser()
numcols <- length(mycols)
# browser()
outdf <- data.frame( matrix( ncol=numcols, nrow=0 ), stringsAsFactors = FALSE )
mynames <- names(tmp)
for ( i in 1:numrows)
{
outdf <- rbind(outdf, as.character( rep('None', numcols) ) )
}
names(outdf) <- mycols
cols <- mynames %in% mycols
for (i in seq_along(cols))
{
if (cols[i])
{
if ( length(tmp[[i]]) > 0 )
{
outdf[ mynames[i] ] <- listtostring(tmp[[i]], ';')
} else {
outdf[ mynames[i] ] <- ''
}
}
}
if ( length(outdf)==0 )
{
outdf <- data.frame( Variable ='None')
}
return(outdf)
}
getvarprefixs <- function( cols=allvars() )
{
s <- substr( cols, 1, 2 )
return( unique(s))
}
getsimplecols <- function(mydf)
{
types <- (sapply(mydf, class))
typesval <- types[types!='data.frame' & types!='list']
mynames <- names(typesval)
mydf <- as.data.frame( mydf[ , mynames ] )
names(mydf) <- mynames
return (mydf)
}
buildtable <- function(flat, keyvals, keyname, myvars)
{
mynames <- c( keyname, getallvars( allvars(), mytype = 'text', section= myvars ) )
blank <- matrix(nrow=1, ncol=length(mynames))
blankdf <- as.data.frame( blank, stringsAsFactors=FALSE )
names(blankdf) <- mynames
tmp <- blankdf
# browser()
for (i in 1:nrow(flat) )
{
curid <- keyvals[i]
curdata <- flat[ i, ]
curdf <- data.frame( id=curid , curdata)
curnames <- names(curdf)
# browser()
newdf <- matrix(nrow=nrow(curdf), ncol=length(mynames))
newdf <- as.data.frame( newdf, stringsAsFactors=FALSE )
names(newdf) <- mynames
newdf[ keyname ] <- curdf['id' ]
for (j in 2:length(mynames) )
{
if ( mynames[j] %in% curnames )
{
newdf[ mynames[j] ] <- curdf[ mynames[j] ]
}
}
tmp <- rbind(tmp, newdf)
}
# browser()
tmp <- tmp[ which(!is.na( tmp[ keyname ] ) ), ]
return( tmp )
}
buildtablerow <- function(flat, keyval, keyname, myvars)
{
# browser()
mynames <- c( keyname, getallvars( allvars(), mytype = 'text', section= myvars ) )
blank <- matrix(nrow=1, ncol=length(mynames))
blankdf <- as.data.frame( blank, stringsAsFactors=FALSE )
names(blankdf) <- mynames
tmp <- blankdf
# browser()
for (i in 1:nrow(flat) )
{
curid <- keyval
curdata <- as.data.frame( flat[ i, ] )
curdf <- data.frame(curdata)
curnames <- names(curdf)
# browser()
newdf <- matrix(nrow=nrow(curdf), ncol=length(mynames))
newdf <- as.data.frame( newdf, stringsAsFactors=FALSE )
names(newdf) <- mynames
# newdf[ keyname ] <- curdf[keyname ]
for (j in 1:length(mynames) )
{
if ( mynames[j] %in% curnames )
{
newdf[ mynames[j] ] <- curdf[ mynames[j] ]
}
}
tmp <- rbind(tmp, newdf)
}
# browser()
tmp <- tmp[ which(!is.na( tmp[ keyname ] ) ), ]
return( tmp )
}
|
/sharedscripts/serverhelpers.R
|
no_license
|
jonathanglevine/openfdashinyapps
|
R
| false | false | 30,018 |
r
|
require(RColorBrewer)
require(wordcloud)
require('curl')
require('httr')
require(stringi)
if (!require('openfda') ) {
devtools::install_github("ropenhealth/openfda")
library(openfda)
print('loaded open FDA')
}
#Temporary fix for peer certificate error
#httr::set_config( config( ssl_verifypeer = 0L ) )
#require(GOsummaries)
SOURCEDIR <- '../sharedscripts/'
if (!file.exists( paste0( SOURCEDIR, 'key.r') ))
{
SOURCEDIR <- 'sharedscripts/'
} else {
#Temporary fix for peer certificate error
httr::set_config( config( ssl_verifypeer = 0L ) )
# print(SOURCEDIR)
}
source( paste0( SOURCEDIR, 'key.r') )
getcloud_try <- function(mydf, scale1=9, name=1, freq=2, title='Terms') {
# mydf[, name] <- abbreviate(mydf[,name], 20)
# layout(matrix(c(1,2), 2, 1, byrow = FALSE), heights=c(1,12 ) )
par(mar=c(0,0,2,0))
# plot(c(0,1),c(0,1),xlab='',type='n', xaxt='n', yaxt='n',frame.plot=FALSE)
# plot.new()
#****************************************
#* maketiff
# if ( !is.null(mydf) & (1==2) )
# {
# filename <- paste0( mydf[2,1], mydf[1,1] ,'.tif')
# print(filename)
# filename <- gsub('%22', '', filename)
# if ( nrow(mydf) >0 )
# {
# filename <- paste0( mydf[2,1], mydf[1,1] ,'.png')
# print(filename)
# # tiff(file = filename, res=600, compression='lzw',height=8, width=8, units="in", bg = "transparent" )
# png(file = filename, width = 4000, height = 4000, res=600, bg = "transparent" )
# plotWordcloud(mydf[ , 1], mydf[,2],
# color=brewer.pal(8, "Dark2"))
# dev.off()
# }
# }
#end maketiff
mydf[ which( is.nan(mydf[,freq] ) ), freq] <- 1
options(warn=5)
curscale <- scale1
for (i in 1:(2*scale1-1))
{
# print(curscale)
out <- try( ( wordcloud(mydf[, name], mydf[,freq], max.words = 200 ,
color=rep('white', 8), random.order=FALSE,
scale=c(curscale, .5) ) ) )
if( class(out) == "try-error" )
{
curscale <- curscale-.5
} else {
break()
}
}
options(warn=0)
return( c(curscale, .5) )
}
getcloud <- function(mydf, scale1=9, name=1, freq=2, title='Terms', scale=NULL) {
if ( is.null(scale) & nrow(mydf) > 1)
{
scale <- invisible(getcloud_try(mydf, scale1, name, freq, title))
}
par(mar=c(0,0,2,0))
mylabel <- paste('Word Cloud for', title)
mylabel <- title
#text(.5, .5, labels=mylabel, cex=2)
mydf[ which( is.nan(mydf[,freq] ) ), freq] <- 1
if( nrow(mydf) > 1 )
{
wordcloud(mydf[, name], mydf[,freq], max.words = 200 ,
color=brewer.pal(8, "Dark2"), random.order=FALSE,
scale=scale )
}
text(.5, 1, labels=mylabel, cex=1.5)
}
gettables <- function( tmp )
{
mycols <- grep('_table', names(tmp), fixed=TRUE )
outdf <- tmp[[1]]
s <- ""
for (i in seq_along(mycols))
{
for (j in seq_along( tmp[[ mycols[i] ]] ) )
{
if ( tmp[[ mycols[i] ]][[j]]!='None' )
{
s <- paste(s, '<br>', tmp[[ mycols[i] ]][[j]] )
}
}
}
if (nchar(s)==0)
{
return('<i>No tables in this section</i>')
}
return( HTML(s) )
}
getwaittime <- function(session){
if(session$clientData$url_hostname == '10.12.207.87')
{
return( 0)
} else if(session$clientData$url_hostname == '127.0.0.1') {
return (0.0)
}
return(0.0)
}
fda_fetch_p <- function(session, myurl,
message = '',
wait=0,
reps=0,
flag=NULL)
{
prog <- ''
dispurl <- removekey(myurl)
starttime <- Sys.time()
mytitle <- paste('Fetching Data', message )
closeAlert(session, 'alert2')
createAlert(session, 'alert', 'fetchalert',title=mytitle, content = substr(dispurl, 37,100), dismiss = FALSE)
# if ( length(flag) < 1 )
# {
# flag <- paste( 'No Reports for selected term<br>' )
# }
iter <- 1
for (i in 1:10)
{
out <- try( fda_fetch( myurl ) )
if( class(out) == "try-error" )
{
closeAlert(session, 'erroralert')
err <- out
createAlert(session, 'alert2', 'erroralert', title=paste( '<font color="red">Error:', err,' </font>' ),
content= flag )
Sys.sleep(1)
} else if( length(out$results)<1 )
{
s <- GET(myurl)
if( !is.null( httr::content(s)$error) )
{
closeAlert(session, 'erroralert')
err <- ( httr::content(s) )
createAlert(session, 'alert2', 'erroralert', title=paste( '<font color="red">Error:', err$error$message,' </font>' ),
content= flag )
break()
}
else
{
iter <- iter + 1
print( paste('iteration', i, 'with empty df') )
if (iter > 25)
{
break()
}
}
# out<- data.frame(term='error', count=NA)
} else {
closeAlert(session, 'erroralert')
break
}
}
#Sys.sleep( max(0, .31 - as.double(Sys.time()-starttime) ) )
closeAlert(session, 'fetchalert')
return(out)
}
makecomb <- function(session, mydf1, mydf2, totals, type, sortvar='prr' ){
if ( !is.data.frame(mydf1) ) {
return(data.frame(Term=paste( 'No events for', type, getterm1( session ) ), Count=0 , Count=0,Count2=0, PRR='prr'))
}
comb <- merge(mydf1, mydf2[, c('term', 'count')], by.x='term', by.y='term')
if (type=='Drug')
{
#x is # reports for DE, y is # reports for Events
# num <- comb$count.x/totals$totaldrug
# denom <- (comb$count.y-comb$count.x)/(totals$total-totals$totaldrug)
# num2 <- comb$count.x/(totals$totaldrug-comb$count.x)
# denom2 <- comb$count.y/(totals$total-comb$count.y)#Total reports for drug j
n.j <- totals$totaldrug
#Total reports for DE combination
nij <- comb$count.x
num <- nij/n.j
#Total reports forevent i
ni. <- comb$count.y
n.. <- totals$total
denom <- ( ni.-nij )/( n.. - n.j )
} else {
#x is # reports for DE, y is # reports for Drug
# num <- comb$count.x/comb$count.y
# denom <- (totals$totaldrug-comb$count.x)/(totals$total-comb$count.y)
# num2 <- comb$count.x/(comb$count.y - comb$count.x)
# denom2 <-(totals$totaldrug)/(totals$total-totals$totaldrug)
n.j <- comb$count.y
#Total reports for DE combination
nij <- comb$count.x
num <- nij/n.j
ni. <- totals$totaldrug
n.. <- totals$total
denom <- ( ni.-nij )/( n.. - n.j )
}
# ror <- num2/denom2
# comb <- data.frame(comb, prr=num/denom, num, denom)
prr <- prre( n.., ni., n.j, nij )
rrr <- prrd( n.., ni., n.j, nij )
ror <- ror( n.., ni., n.j, nij )
llr <- LLR( n.., ni., n.j, nij )
comb <- data.frame(comb, prr=round( prr, digits = 2), ror=ror, nij, ni., n.j, n..)
comb <- comb[order(comb[, sortvar], decreasing = TRUE),]
row.names(comb)<- seq(1:nrow(comb))
return( list(comb=comb, ror=ror) )
}
getcpalinks <- function(column, names, values, mybaseurl, appendtext='')
{
cpa <- numcoltohyper( paste(column, 'CPA'), column, names, values, type='C',
mybaseurl, append = appendtext)
dynprr <- numcoltohyper( paste(column, 'PRR'), column, names, values, type='P',
mybaseurl, append = appendtext )
return( list( cpa = cpa, dynprr = dynprr ) )
}
getdynprr_as_links <- function(column, names, values, mybaseurl, appendtext='')
{
dynprr <- numcoltohyper( paste(column, 'PRR'), column, names, values, type='DPS',
mybaseurl, append = appendtext)
return( list( dynprr = dynprr ) )
}
#buildurl =======================================================
extractbaseurl <- function(myurl){
myurl <- gsub('//', '/', myurl, fixed=TRUE )
tmp <- strsplit(myurl,'?', fixed=TRUE)
tmp1 <- tmp[[1]][1]
tmp2 <- strsplit(tmp1,'/', fixed=TRUE)
tmp3 <- tmp2[[1]][1:(length(tmp2[[1]])-1)]
tmp4 <- paste0(tmp3, collapse='/' )
tmp4 <- paste0(tmp4, '/')
return(tmp4)
}
getemptyapplist <- function()
{
mynames <- c('DA', 'D', 'E', 'P', 'Z', 'R', 'L', 'LR', 'LRE', 'ENFD',
'AEDEV', 'ENFDEV', 'CLSDEV', '510', 'PMA', 'RLDEV', 'RCLDEV',
'ENFFD', 'DAS', 'EAS', 'DPS', 'LRDAS', 'LREAS' )
s <- vector(mode='character', length = length(mynames))
names(s) <- mynames
return(s)
}
makeapplinks <- function( cururl, append = '' )
{
labels <- getemptyapplist()
labels['DA'] <- '<h4>Drug Apps</h4><b>Dashboard-</b> Overview of reports for a drug'
labels['D'] <- '<b>PRR for a Drug-</b> Calculate Proportional Reporting Rates for Common Events for a drug'
labels['E'] <- '<b>PRR for an Event-</b> Calculate Proportional Reporting Rates for Common Drugs that have a specified event'
labels['P'] <- '<b>Dynamic PRR-</b> Calculate Proportional Reporting Rates for a drug-event pair over time'
labels['Z'] <- '<b>Change Point Analysis-</b> Change point analysis for a drug-event pair over time'
labels['R'] <- '<b>Adverse Event Browser-</b> View reports that meet search criteria'
labels['L'] <- '<b>Label Browser-</b> View labels that meet search criteria'
labels['LR'] <- '<b>Likelihood Ratio Test for Drug-</b> Calculate Likelihood Ratio Tests for Common Events for a drug'
labels['LRE'] <- '<b>Likelihood Ratio Test for Event-</b> Calculate Likelihood Ratio Tests for Common Drugs for an event'
labels['ENFD'] <- '<b>Drug Enforcement Report Browser-</b> View enforcement reports that meet search criteria'
labels['AEDEV'] <- '<hr><h4>Device Apps</h4><b>Adverse Event Browser-</b> View reports that meet search criteria'
labels['ENFDEV'] <- '<b>Device Enforcement Report Browser-</b> View labels that meet search criteria'
labels['CLSDEV'] <- '<b>Device Classification</b> View labels that meet search criteria'
labels['510'] <- '<b>510(k) Viewer-</b> View 510(k) Data'
labels['PMA'] <- '<b>PMA Viewer-</b> View PMA Data'
labels['RLDEV'] <- '<b>Registration and Listing Browser-</b> View Registration and Listing Data'
labels['RCLDEV'] <- '<b>Recall Browser-</b> View Recall Reports'
labels['ENFFD'] <- '<hr><h4>Food Apps</h4><b>Food Enforement Report Browser-</b> View reports that meet search criteria'
labels['DAS'] <- '<h4>Drug Apps Using Suspect Drugs Only</h4><b>PRR for a Drug-</b> Calculate Proportional Reporting Rates for Common Events for a drug'
labels['EAS'] <- '<b>PRR for an Event-</b> Calculate Proportional Reporting Rates for Common Drugs that have a specified event'
labels['DPS'] <- '<b>Dynamic PRR-</b> Calculate Proportional Reporting Rates for a drug-event pair over time'
labels['LRDAS'] <- '<b>Likelihood Ratio Test for Drug-</b> Calculate Likelihood Ratio Tests for Common Events for a drug'
labels['LREAS'] <- '<b>Likelihood Ratio Test for Event-</b> Calculate Likelihood Ratio Tests for Common Drugs for an event'
s <- names(labels)
#s <- getbaseurl(s)
out <- ''
if ( max(append) == '')
{
addquery <- FALSE
} else {
addquery <- TRUE
}
for (i in seq_along(s))
{
out <- paste0(out, coltohyper(append[i], s[i] , mybaseurl = cururl, display=labels[i], makequery=addquery ), '<br>' )
}
return( out )
}
getappname <- function(myurl){
tmp <- strsplit(myurl,'?', fixed=TRUE)
tmp1 <- tmp[[1]][1]
tmp2 <- strsplit(tmp1,'/', fixed=TRUE)
tmp3 <- tmp2[[1]][ length(tmp2[[1]]) ]
return(tmp3)
}
removekey <- function(url) {
mykey <- paste0('api_key=', getkey(),'&')
s <-gsub(mykey, '', url, fixed=TRUE)
return(s)
}
getkey <- function(){
if( exists('getmykey'))
{
return( getmykey() )
}
else
{
return( '' )
}
}
makelink <- function( s, s2 = NULL ) {
if (is.null(s2))
{
s2 <- removekey( s[[1]] )
}
s <- gsub('"', '%22', s, fixed=TRUE)
s <- gsub(' ', '%20', s, fixed=TRUE)
s2 <- gsub('"', '%22', s2, fixed=TRUE)
link <- paste0('<a href="', s, '" target="_blank">', s2, '</a>')
return (link)
}
getbaseurl <- function( which, mycururl=NULL, appname= 'cpa' ){
apps1 <-c('cpa', 'dynprr', 'RR_D', 'RR_E', 'reportview', 'dash', 'LRTest', '510kview')
if (is.null(mycururl))
{
out <- getcururl()
} else{
out <- mycururl
}
if (which=='D'){
s <- 'RR_D'
} else if (which=='DAS'){
s <- 'RR_D_Activesubstance'
} else if(which == 'E' ) {
s <- 'RR_E'
} else if(which == 'EAS' ) {
s <- 'RR_E_Activesubstance'
} else if(which == 'R' ){
s <- 'reportview'
} else if(which == 'P' ){
s <- 'dynprr'
} else if(which == 'DPS' ){
s <- 'dynprr_Activesubstance'
} else if(which == 'DA' ){
s <- 'dash'
} else if(which == 'L' ){
s <- 'labelview'
} else if(which == 'LR' ){
s <- 'LRTest'
} else if (which=='LRDAS'){
s <- 'LR_D_Activesubstancename'
} else if(which == 'LRE' ){
s <- 'LRTest_E'
} else if (which=='LREAS'){
s <- 'LR_E_Activesubstancename'
} else if(which == 'ENFD' ){
s <- 'drugenforceview'
} else if(which == 'AEDEV' ){
s <- 'devicereports'
} else if(which == 'ENFDEV' ){
s <- 'deviceenforceview'
} else if(which == 'CLSDEV' ){
s <- 'deviceclassview'
} else if(which == '510' ){
s <- '510kview'
} else if(which == 'PMA' ){
s <- 'PMAview'
} else if(which == 'RLDEV' ){
s <- 'devicereglist'
} else if(which == 'RCLDEV' ){
s <- 'devicerecallview'
} else if(which == 'ENFFD' ){
s <- 'foodrecallview'
} else {
s <- 'ChangePoint'
}
if ( !( appname %in% apps1) )
{
s <- paste0(s, 2)
}
out <-paste0( out, s, '/' )
return(out)
}
makeexturl <- function(base, names, values){
# values <- gsub('NA', "", values, fixed=TRUE)
values[ which(is.na(values))] <- ''
names2 <- URLencode(names, reserved = TRUE)
values2 <- URLencode(values, reserved = TRUE)
s1 <- paste0( '&', names2)
s2 <- paste0( '=', values2)
# print(s2)
allpairs2 <- paste0(s1, s2, collapse='')
allpairs2 <- sub('&', "?", allpairs2, fixed=TRUE)
allpairs2 <- paste0(base, allpairs2)
s1 <- paste0( '&', names)
s2 <- paste0( '=', values)
allpairs <- paste0(s1, s2, collapse='')
allpairs <- sub('&', "?", allpairs, fixed=TRUE)
allpairs <- paste0(base, allpairs)
return( allpairs )
}
makehyper <- function(base , names, values, display, makequery=TRUE){
if (makequery)
{
url <- makeexturl(base, names, values)
}
else {
url <- base
}
url <- gsub('"','%22', url, fixed=TRUE)
hyper <- paste0('<a href="', url, '" target="_blank">', display, '</a>')
return(hyper)
}
makemedlinelink <- function(s, s2) {
medstring <- paste0("http://www.merriam-webster.com/medlineplus/", s )
out <- makelink(medstring, s2)
return(out)
}
coltohyper <- function(col, which, mybaseurl=NULL, display=NULL, makequery=TRUE, append=''){
baseurl <- getbaseurl(which, mybaseurl)
name <- 't1'
if (is.null(display)){
display <- col
}
out <- vector(mode='character', length=length(col))
for (i in seq_along(col))
{
col[i] <- gsub('"', '', col[i])
out[i] <- makehyper(baseurl , name, paste0('' ,col[i] , append ), display[i], makequery=makequery)
}
return(out)
}
numcoltohyper <- function( dispcol, valcol, names, values, type='R', mybaseurl=NULL, addquotes=FALSE, append='' ){
baseurl <- getbaseurl( type, mybaseurl )
out <- vector(mode='character', length=length(dispcol))
# if (addquotes)
# {
# values <- gsub('"', '', values , fixed=TRUE)
# values <- paste0('%22' ,values , '%22' )
# }
for (i in seq_along(dispcol))
{
if (addquotes)
{
valcol[i] <- gsub('"', '', valcol[i] , fixed=TRUE)
valcol[i] <- paste0('%22' ,valcol[i] , '%22' )
}
curvals <- c( paste0('' ,values , '' ), paste0('' ,valcol[i] , '' ))
curvals <- gsub('%22%22', '', curvals , fixed=TRUE)
# print(paste('baseurl=',baseurl))
# print(paste('names=',names))
# print(paste('curvals=',curvals))
# print(paste('dispcol[i]=',dispcol[i]))
out[i] <- makehyper( baseurl , names, paste0('' ,curvals , append ), prettyNum(dispcol[i], big.mark=',') )
}
return(out)
}
buildURL <- function (v, t, count='' ,limit=NULL, skip=0, usekey=TRUE,
type='event', db= '/drug/', addplus=TRUE, whichkey=1){
# browser()
if(whichkey == 1)
{
mykey <- getkey()
} else {
mykey <- getkey()
}
baseurl <- paste0(db, type ,".json")
qq <- fda_query(baseurl)
# fda_api_key(mykey)
for (i in seq_along(v) )
if ( v[i]!='')
{
# print(t[i])
if (t[i]!=''){
if(addplus)
{
t[i] <- gsub( ' ', '+', t[i],fixed=TRUE)
} else {
t[i] <- gsub( ' ', '%20', t[i],fixed=TRUE)
}
t[i] <- gsub( '%22', '"', t[i],fixed=TRUE)
t[i] <- gsub( ',', '', t[i],fixed=TRUE)
t[i] <- gsub( '^', '', t[i],fixed=TRUE)
t[i] <- gsub( "\\", '%5C' , t[i], fixed=TRUE)
t[i] <- gsub( '""', '%22' , t[i], fixed=TRUE)
t[i] <- gsub( '""', '%22' , t[i], fixed=TRUE)
t[i] <- paste0('(',t[i] ,')')
qq <- qq %>%
fda_filter(v[i], paste0(t[i], collapse='+') )
}
}
if (count!=''){
qq <- qq %>%
fda_count(count)
}
if (!is.null(limit)){
qq$limit <- limit
}
# qq <- qq %>%
# fda_api_key(mykey)
myurl <- (fda_url(qq))
if(usekey == TRUE)
{
myurl <- gsub('?', paste0('?api_key=', mykey, '&'), myurl , fixed=TRUE)
}
#temp
#https://openfda-old.ngrok.io
# myurl <- gsub('api.fda.gov', 'openfda-old.ngrok.io', myurl , fixed=TRUE)
# myurl <- gsub('api.fda.gov', 'openfda.ngrok.io', myurl , fixed=TRUE)
# #temp
myurl <-paste0(myurl, '&skip=', skip)
if(!usekey) {
myurl <- removekey( myurl )
}
myurl <- gsub('Any Variable:', "", myurl, fixed=TRUE)
return( myurl )
}
quotestr <- function(s) {
return( paste0( '%22', s, '%22'))
}
getdaterangeFUN <- function( var, db, type){
v <- c( '_exists_')
t <- c( var )
myurl <- buildURL(v, t, count= var, db=db, type=type )
mydf <- fda_fetch( myurl)
meta <- mydf$meta
tmp <- mydf$result
# print( head(tmp))
myrange <- c( min(tmp$time, na.rm = TRUE), max(tmp$time, na.rm = TRUE))
return( myrange )
}
getdaterange <- reactive({
var <- 'receivedate'
db <- '/drug/'
type <- 'event'
myrange <- getdaterangeFUN(var, db, type)
return( c(myrange, var ) )
})
getdaterangedeviceAE <- reactive({
var <- 'date_received'
db <- '/device/'
type <- 'event'
myrange <- getdaterangeFUN(var, db, type)
return( c(myrange, var ) )
})
getdaterangelabel <- reactive({
var <- 'effective_time'
db <- '/drug/'
type <- 'label'
myrange <- getdaterangeFUN(var, db, type)
return( c(myrange, var ) )
})
getdaterange510K <- reactive({
var <- 'decision_date'
db <- '/device/'
type <- '510K'
myrange <- getdaterangeFUN(var, db, type)
return( c(myrange, var ) )
})
getdaterangePMA <- reactive({
var <- 'decision_date'
db <- '/device/'
type <- 'pma'
myrange <- getdaterangeFUN(var, db, type)
return( c(myrange, var ) )
})
getdaterangeenforce <- reactive({
var <- 'report_date'
db <- '/drug/'
type <- 'enforcement'
myrange <- getdaterangeFUN(var, db, type)
return( c(myrange, var ) )
})
getdaterangedeviceclass <- reactive({
var <- 'report_date'
db <- '/device/'
type <- 'classification'
myrange <- getdaterangeFUN(var, db, type)
return( c(myrange, var ) )
})
getdaterangeenforce_food <- reactive({
var <- 'report_date'
db <- '/food/'
type <- 'enforcement'
myrange <- getdaterangeFUN(var, db, type)
return( c(myrange, var ) )
})
getdaterangeenforce_device <- reactive({
var <- 'report_date'
db <- '/device/'
type <- 'enforcement'
myrange <- getdaterangeFUN(var, db, type)
return( c(myrange, var ) )
})
getdaterangerecall_device <- reactive({
var <- 'event_date_terminated'
db <- '/device/'
type <- 'recall'
myrange <- getdaterangeFUN(var, db, type)
return( c(myrange, var ) )
})
getdaterangereglist_device <- reactive({
var <- 'created_date'
db <- '/device/'
type <- 'registrationlisting'
myrange <- getdaterangeFUN(var, db, type)
return( c(myrange, var ) )
})
listtostring <- function(s, delim=';')
{
myevents <- gsub('\"', '', s, fixed=TRUE)
myevents <- gsub('c(', '', myevents, fixed=TRUE)
myevents <- gsub('list(', '', myevents, fixed=TRUE)
myevents <- gsub(')', '', myevents, fixed=TRUE)
myevents <- gsub(',', delim, myevents, fixed=TRUE)
return(myevents)
}
extractcols <- function( mydf, myvars )
{
myvars <- myvars[ myvars %in% names(mydf) ]
mydf <- mydf[, myvars]
types <- (sapply(mydf, class))
#list of strings, make one string
for (i in seq_along(types))
{
if(types[i] == 'list')
{
mydf[,i] <- listtostring( mydf[ , i ])
}
}
types <- (sapply(mydf, class))
# print(types)
typesval <- types[types!='data.frame' & types!='list']
mydf <- as.data.frame(mydf, stringsAsFactors = FALSE)
if(ncol(mydf)==1)
{
# print(mydf)
# browser()
names(mydf) <- myvars[1]
# print(mydf)
} else {
mydf <- mydf[ , names(typesval) ]
}
if ( is.data.frame( mydf ) ){
return( mydf )
} else {
return( data.frame(Note='No patient data'))
}
}
getjson <- function(myurl)
{
tmp <- tempfile()
curl_download(myurl, tmp, quiet=TRUE)
s <- (scan(tmp, what='character', quote='', sep='?'))
unlink(tmp)
s <- paste0(s, collapse='<BR>')
return(s[1])
}
getopenfdamaxrecords <- function( maxlim=5000 )
{
maxlim <- min(5000, maxlim)
return(maxlim)
}
getvalfromlink <- function(instr)
{
s <- strsplit(instr, '=' )
s <- s[[1]][3]
s <- strsplit( s, '&' )
s <- s[[1]][1]
s <- sub('%20', ' ', s[1])
return(s)
}
getvalvectfromlink <- function(instr)
{
s <- ( sapply(instr, FUN = getvalfromlink ))
names(s) <- NULL
return(s)
}
mytp <- function(x, y, w, refline=1,
mytitle="Text Plot for Terms. Draw a box around terms to see more details",
myylab='LLR') {
# browser()
mycex=1
if (length(w) > 0)
{
# w <- gsub(' ', '_', w, fixed = TRUE)
# xlim <- c( min(x, na.rm = TRUE), max(x, na.rm = TRUE))
# ylim <- c( min(y, na.rm = TRUE), max(y, na.rm = TRUE))
# lay <- wordlayout(x,y,w, xlim=xlim, ylim=ylim)
xlim <- c( min(x, na.rm = TRUE), max(x, na.rm = TRUE))
xlim[2] <- xlim[2] + 0.3*(xlim[2]-xlim[1] )
plot(x,y,type="p",
xlim = xlim,
ylim = c( 1.0, max(y, na.rm = TRUE) ),
log='y',
xlab= 'Number of Events',
ylab= myylab,
col='red',
main=mytitle,
cex=mycex)
text(x, y, w, pos=4, cex=mycex)
} else {
plot(1,1,type="n",
log='y',
xlab= 'Number of Events',
ylab= myylab,
col='red',
main='Please enter a term',
cex=mycex)
}
# text(x,y,w, pos=4, cex=.75)
grid()
abline(h=refline, col='red')
}
renderterm <- function( term, label, label2='' ){
if(term == '') {
term <- 'None'
}
if (label2 != '')
{
out <- paste( '<br><b>', label, '<i><font color="dodgerblue" size="4">', term, '</font></i><br>', label2,'</b><br>' )
} else {
out <- paste( '<br><b>', label, '<i><font color="dodgerblue" size="4" >', term, '</font></i></b><br><br>' )
}
return(out)
}
renderterm2 <- function( term, label, label2='' ){
if(term == '') {
term <- 'None'
}
if (label2 != '')
{
out <- paste( '<br><b>', label, '<i><font color="dodgerblue" size="4">', term, '</font></i><br>', label2,'</b>' )
} else {
out <- paste( '<br><b>', label, '<i><font color="dodgerblue" size="4" >', term, '</font></i></b>' )
}
return(out)
}
getterm1description <- function(exact, term)
{
s <- term
if ( exact!= 'exact' )
{
s <- gsub(' ', ' or ', term, fixed=TRUE)
s <- gsub('or AND or', ' AND ', s, fixed=TRUE)
s <- gsub('or OR or', ' OR ', s, fixed=TRUE)
} else {
s <- paste0( "'", term, "'")
}
return( s )
}
updateviewerinputs <- function(session)
{
updateTextInput(session, "v1", value=( session$input$v1_2 ) )
updateTextInput(session, "t1", value= ( session$input$t1_2 ) )
updateTextInput(session, "v2", value=( session$input$v2_2 ) )
updateTextInput(session, "t2", value= ( session$input$t2_2 ) )
updateTextInput(session, "v3", value=( session$input$v3_2 ) )
updateTextInput(session, "t3", value= ( session$input$t3_2 ) )
updateSliderInput( session, 'skip', value=1)
}
getallvars <- function( cols, mytype = 'text', section= c('all') )
{
# tables <- which( stri_sub(cols, -6, -1) == '_table' )
# text <- which( stri_sub(cols, -6, -1) != '_table' )
if (section[1] !='all')
{
text <- which( stri_sub(cols, 1, 2) %in% section )
cols <- cols[text]
}
cols <- substr(cols, 4, 100)
if (mytype == 'text')
{
return (cols)
} else {
return(paste0(cols, '_table'))
}
}
gettablenames <- function(cols)
{
tables <- which( stri_sub(cols, -6, -1) == '_table' )
return(cols[tables])
}
gettextnames <- function(cols)
{
textvar <- which( stri_sub(cols, -6, -1) != '_table' )
return(cols[textvar])
}
getselectedcols <- function(tmp, type, sections)
{
realcols <- names(tmp)
knowncols <- getallvars( allvars(), type, section=sections)
mycols <- intersect (knowncols, realcols)
outdf <- extractdfcols(tmp, mycols)
return( outdf )
}
simpleCap <- function(x) {
s <- tolower(x)
s <- strsplit(s, " ")[[1]]
first <- paste(toupper(substring(s[1], 1, 1)), substring(s[1], 2),
sep = "", collapse = " ")
out <- paste( s[-1], sep = "", collapse = " ")
out <- paste(first, out)
}
listtocvect <- function(s, delim=', ', trunc=25){
if (is.null( s) ) {
return('')
}
out <- paste0( s, sep='', collapse=delim)
out <- strtrim(out, trunc)
return(out)
}
listtodf <- function(lis, delim=', ', trunc=100){
if ( is.null(lis) )
{
return(NULL)
}
out <- data.frame(rownames=1:length(lis[[1]]), stringsAsFactors =FALSE )
for (i in seq_along(lis) )
{
if (is.list(lis[[i]]))
{
tmp <- sapply(lis[[i]], function(x) listtocvect(x, delim, trunc) )
out[[i]] <- tmp
} else {
out[[i]] <- ''
}
}
# print(lis[[i]])
out <- data.frame(out, stringsAsFactors =FALSE)
names(out) <- names(lis)
return(out)
}
listtostring <- function(s, delim=';')
{
myevents <- gsub('\"', '', s, fixed=TRUE)
myevents <- gsub('c(', '', myevents, fixed=TRUE)
myevents <- gsub(')', '', myevents, fixed=TRUE)
myevents <- gsub(',', delim, myevents, fixed=TRUE)
return(myevents)
}
getdf <- function(mydf, name, message='Empty Table')
{
# print(name)
# print(head(mydf) )
err <- data.frame( Note=message, stringsAsFactors = FALSE )
if ( is.data.frame(mydf) ) {
if (name %in% names(mydf) ) {
tmp <- mydf[, name]
if ( is.data.frame(tmp) ) {
return(tmp)
} else if ( is.data.frame(tmp[[1]]) ) {
return(tmp[[1]])
} else {
return(err)
}
}
}
return( err )
}
extractdfcols <- function(tmp, mycols, numrows=1)
{
# browser()
numcols <- length(mycols)
# browser()
outdf <- data.frame( matrix( ncol=numcols, nrow=0 ), stringsAsFactors = FALSE )
mynames <- names(tmp)
for ( i in 1:numrows)
{
outdf <- rbind(outdf, as.character( rep('None', numcols) ) )
}
names(outdf) <- mycols
cols <- mynames %in% mycols
for (i in seq_along(cols))
{
if (cols[i])
{
if ( length(tmp[[i]]) > 0 )
{
outdf[ mynames[i] ] <- listtostring(tmp[[i]], ';')
} else {
outdf[ mynames[i] ] <- ''
}
}
}
if ( length(outdf)==0 )
{
outdf <- data.frame( Variable ='None')
}
return(outdf)
}
getvarprefixs <- function( cols=allvars() )
{
s <- substr( cols, 1, 2 )
return( unique(s))
}
getsimplecols <- function(mydf)
{
types <- (sapply(mydf, class))
typesval <- types[types!='data.frame' & types!='list']
mynames <- names(typesval)
mydf <- as.data.frame( mydf[ , mynames ] )
names(mydf) <- mynames
return (mydf)
}
buildtable <- function(flat, keyvals, keyname, myvars)
{
mynames <- c( keyname, getallvars( allvars(), mytype = 'text', section= myvars ) )
blank <- matrix(nrow=1, ncol=length(mynames))
blankdf <- as.data.frame( blank, stringsAsFactors=FALSE )
names(blankdf) <- mynames
tmp <- blankdf
# browser()
for (i in 1:nrow(flat) )
{
curid <- keyvals[i]
curdata <- flat[ i, ]
curdf <- data.frame( id=curid , curdata)
curnames <- names(curdf)
# browser()
newdf <- matrix(nrow=nrow(curdf), ncol=length(mynames))
newdf <- as.data.frame( newdf, stringsAsFactors=FALSE )
names(newdf) <- mynames
newdf[ keyname ] <- curdf['id' ]
for (j in 2:length(mynames) )
{
if ( mynames[j] %in% curnames )
{
newdf[ mynames[j] ] <- curdf[ mynames[j] ]
}
}
tmp <- rbind(tmp, newdf)
}
# browser()
tmp <- tmp[ which(!is.na( tmp[ keyname ] ) ), ]
return( tmp )
}
buildtablerow <- function(flat, keyval, keyname, myvars)
{
# browser()
mynames <- c( keyname, getallvars( allvars(), mytype = 'text', section= myvars ) )
blank <- matrix(nrow=1, ncol=length(mynames))
blankdf <- as.data.frame( blank, stringsAsFactors=FALSE )
names(blankdf) <- mynames
tmp <- blankdf
# browser()
for (i in 1:nrow(flat) )
{
curid <- keyval
curdata <- as.data.frame( flat[ i, ] )
curdf <- data.frame(curdata)
curnames <- names(curdf)
# browser()
newdf <- matrix(nrow=nrow(curdf), ncol=length(mynames))
newdf <- as.data.frame( newdf, stringsAsFactors=FALSE )
names(newdf) <- mynames
# newdf[ keyname ] <- curdf[keyname ]
for (j in 1:length(mynames) )
{
if ( mynames[j] %in% curnames )
{
newdf[ mynames[j] ] <- curdf[ mynames[j] ]
}
}
tmp <- rbind(tmp, newdf)
}
# browser()
tmp <- tmp[ which(!is.na( tmp[ keyname ] ) ), ]
return( tmp )
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PlotExRt.R
\name{PlotExRt}
\alias{PlotExRt}
\title{Plot raw data in images directory}
\usage{
PlotExRt(myData, fileName, outDir)
}
\description{
Plot raw data in images directory
}
|
/man/PlotExRt.Rd
|
no_license
|
comtook/afmFreeEnergy
|
R
| false | true | 259 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PlotExRt.R
\name{PlotExRt}
\alias{PlotExRt}
\title{Plot raw data in images directory}
\usage{
PlotExRt(myData, fileName, outDir)
}
\description{
Plot raw data in images directory
}
|
# This script implements code to measure the compactness of a district.
# In order to measure compactness need a measure of the boundary length.
# Two functions will be implemented to test compactness - one that works with
# a grid and one that works with the graph created from a real shape file.
# These inputs can be swapped between by setting binary indicator grid to 1
# if using a grid of 0 is using dummy shapefile adjacency matrix.
library(igraph)
source("../network_functions.R")
############################ SETUP THE NETWORK ################################
# Wrap this up in a function as you use it at the start of every program.
# The input will be 0,1,2 for graph_type.
# select grid = 0,1,2 for real map, sq, hex respectively.
grid = 2
n = 4
Ndist = 4
Ncounty = 4
gplot = f.graph(n,grid,Ndist,Ncounty,"../")
nodes = vcount(gplot)
g = f.perimeter(gplot,nodes)
V(g)$district = c(1,1,2,2,1,1,2,2,3,3,4,4,3,3,4,4,0)
# Setup the plotting information
V(gplot)$size = 10
if (n > 20) {
V(gplot)$size = 1 # Reduce size of the nodes
V(gplot)$label = NA # Do not print labels
}
if (grid == 0) {
graph_attr(gplot,"layout") = layout_with_graphopt(gplot, charge=0.0001,
mass=30,
spring.length = 0,
spring.constant = 1)
# layout_with_fr is another option worth exploring
# layout does not always give nice results - might to to do several iterations
# to get a good one. That completely avoids overlaps.
} else {
graph_attr(gplot,"layout") = layout_on_grid(gplot, width = n, height = n,
dim = 2)
}
graph_attr(gplot,"margin") = rep(0.01,4)
par(mar=c(0.5,0.5,0.5,0.5)+0.1)
plot(gplot, vertex.color=get.vertex.attribute(g,"district"),
vertex.frame.color=get.vertex.attribute(g,"district"),
edge.color=get.edge.attribute(g,"p1"))
# The edgelist won't change so create it now
Elist = get.edgelist(g)
class(Elist) = "numeric"
E(g)$p1 = V(g)$district[Elist[,1]]
E(g)$p2 = V(g)$district[Elist[,2]]
# Make a note of which edges are perimeter edges
Eint = min(which(Elist[,2]==nodes+1))
# The graph has been updated with a note of vertices that form the boundary. To
# get district boundaries I want to make a note of all conflicting edges and
# all boundary edges.
# The below code returns the a vector of boundary lengths for each district
# Also need district areas
f.compact = function(G,district) {
Ndist = length(unique(district))-1
boundary = area = numeric(Ndist)
for (i in 1:Ndist) {
gsub = subgraph.edges(G,c(which(E(G)$p1==i),which(E(G)$p2==i)))
conflicts = E(gsub)[which(E(gsub)$p1 != E(gsub)$p2)]
boundary[i] = sum(E(gsub)$weight[conflicts])
area[i] = sum(V(G)$area[which(V(G)$district==i)])
}
Ji = sum(boundary^2/area)
Ji
}
Ji = f.compact(g,V(g)$district)
########################## CODE FOR PERIMETER NODES ###########################
# The first step is to determine the perimeter of the full map. For each node
# need to determine if if is an external node. For a hex grid and real map use
# the girth of the subgraph created from its neighbours.
for (i in 1:length(V(g))) {
gneighbors = induced_subgraph(g,neighbors(g,i))
deg = degree(gneighbors)
V(g)$external[i] = ifelse(1 %in% deg | 0 %in% deg, 1, 0)
}
V(g)$external
vertex_attr_names(g)
# Before you add the edges you need to add vertex 0
g = g + vertex(nodes+1,population=0,votes_blue=0,
votes_red=0,district=0,size=0,external=0)
V(g)$name
V(g)$population
V(g)$size
# Note that V(g) is indexed from 1 and the new edge has been added to the end of
# end of the list.
c=cbind(rep(nodes+1, sum(V(g)$external)),which(V(g)$external==1)); c
# How can I flatten this?
get.edgelist(g)
is.matrix(c)
c=t(c)
c=as.vector(c)
c
g = g + edges(c)
get.edgelist(g)
# The edgelist won't change so create it now
# graph_elist = get.edgelist(g)
# df_elist = E.data[,1:2]
# df_elist = df_elist[c("v2","v1")]
# df_elist
# df_elist = as.matrix(df_elist)
# is.matrix(graph_elist)
# class(graph_elist)="numeric"
# graph_elist == df_elist
########################### OLDER DEVELOPMENT CODE #############################
# You want to add information for all nodes that form the perimeter of the full
# map.
diameter(g)
radius(g)
eccentricity(g)
# Possible for a perimeter node and internal node to have the same eccentricity.
# Trying to use girth
girth(g, circle=T)
# could you make a subgraph of the neighbours and then check if that is connected?
vid = neighbors(g,7); vid
gcycle = induced_subgraph(g,vid)
is.connected(gcycle)
# This doesn't work for a grid it will not work for real map data either as
# neighbors of a real graph are always connected. What you are really interested
# in is whether it is cyclical.
# But can I use girth for a real map graph? (THIS CODE ONLY WORKS WHEN GRID=0,2)
vid = neighbors(g,12); vid
gcycle = induced_subgraph(g,vid)
girth(gcycle)
radius(gcycle)
diameter(gcycle)
eccentricity(gcycle)
# This has girth equal to the number of neighbors.
vid = neighbors(g,1); vid
gcycle = induced_subgraph(g,vid)
girth(gcycle)
radius(gcycle)
diameter(gcycle)
eccentricity(gcycle)
# This has a girth of zero as there are no cyclical paths.
vid = neighbors(g,15); vid
gcycle = induced_subgraph(g,vid)
girth(gcycle)
radius(gcycle)
diameter(gcycle)
eccentricity(gcycle)
degree(gcycle)
# The girth is not equal to the number of neighbors
vid = neighbors(g,9); vid
gcycle = induced_subgraph(g,vid)
girth(gcycle)
eccentricity(gcycle)
radius(gcycle)
diameter(gcycle)
degree(gcycle)
vid = neighbors(g,23); vid
gcycle = induced_subgraph(g,vid)
girth(gcycle)
eccentricity(gcycle)
radius(gcycle)
diameter(gcycle)
degree(gcycle)
# Old code for how to get what you thought was the boundary of a district but
# you were wrong.
#for (i in 1:Ndist) {
# i = 2
# k = 3
# D = as.vector(V(g)[which(V(g)$district==i | V(g)$district==k)])
# gsub = induced_subgraph(g,D)
# for (j in 1:length(D)) {
# gneighbors = induced_subgraph(gsub,neighbors(gsub,j))
# V(gsub)$boundary[j] = ifelse(girth(gneighbors,circle=F)[[1]] == 0, 1, 0)
# }
#can you modify breadth first search?
# Choose a starting vertex uniformly from the district vertices
v0 = sample(which(V(gsub)$boundary==1),1)
V(gsub)$name[v0]
# generate a vector of neighbors
v.neighbors = as.vector(neighbors(gsub,v0))
V(gsub)$name[v.neighbors]
# restrict neighbors to vertices on the boundary
# add these to a queue of vertices to search
queue = c(v0,v.neighbors[which(V(gsub)$boundary[v.neighbors]==1)])
V(gsub)$name[queue]
# add queued vertices to explored vector to avoid checking them twice
explored=queue
############################### UP TO HERE ##################################
# loop until all vertices in district are searched
while (length(queue) > 0) {
for (i in 1:length(queue)) {
# get neighbors
v.neighbors = as.vector(neighbors(G,queue[1]))
# restrict neighbors to those in district
q = v.neighbors[which(district[v.neighbors]==distID)]
# restrict neighbors to those not explored
q = setdiff(q,explored)
# update the queue
queue = c(queue[-1],q)
# update explored
explored=c(explored,q)
}
}
|
/test_code/f_compact_dev.R
|
no_license
|
Ellfen/Salamander
|
R
| false | false | 7,314 |
r
|
# This script implements code to measure the compactness of a district.
# In order to measure compactness need a measure of the boundary length.
# Two functions will be implemented to test compactness - one that works with
# a grid and one that works with the graph created from a real shape file.
# These inputs can be swapped between by setting binary indicator grid to 1
# if using a grid of 0 is using dummy shapefile adjacency matrix.
library(igraph)
source("../network_functions.R")
############################ SETUP THE NETWORK ################################
# Wrap this up in a function as you use it at the start of every program.
# The input will be 0,1,2 for graph_type.
# select grid = 0,1,2 for real map, sq, hex respectively.
grid = 2
n = 4
Ndist = 4
Ncounty = 4
gplot = f.graph(n,grid,Ndist,Ncounty,"../")
nodes = vcount(gplot)
g = f.perimeter(gplot,nodes)
V(g)$district = c(1,1,2,2,1,1,2,2,3,3,4,4,3,3,4,4,0)
# Setup the plotting information
V(gplot)$size = 10
if (n > 20) {
V(gplot)$size = 1 # Reduce size of the nodes
V(gplot)$label = NA # Do not print labels
}
if (grid == 0) {
graph_attr(gplot,"layout") = layout_with_graphopt(gplot, charge=0.0001,
mass=30,
spring.length = 0,
spring.constant = 1)
# layout_with_fr is another option worth exploring
# layout does not always give nice results - might to to do several iterations
# to get a good one. That completely avoids overlaps.
} else {
graph_attr(gplot,"layout") = layout_on_grid(gplot, width = n, height = n,
dim = 2)
}
graph_attr(gplot,"margin") = rep(0.01,4)
par(mar=c(0.5,0.5,0.5,0.5)+0.1)
plot(gplot, vertex.color=get.vertex.attribute(g,"district"),
vertex.frame.color=get.vertex.attribute(g,"district"),
edge.color=get.edge.attribute(g,"p1"))
# The edgelist won't change so create it now
Elist = get.edgelist(g)
class(Elist) = "numeric"
E(g)$p1 = V(g)$district[Elist[,1]]
E(g)$p2 = V(g)$district[Elist[,2]]
# Make a note of which edges are perimeter edges
Eint = min(which(Elist[,2]==nodes+1))
# The graph has been updated with a note of vertices that form the boundary. To
# get district boundaries I want to make a note of all conflicting edges and
# all boundary edges.
# The below code returns the a vector of boundary lengths for each district
# Also need district areas
f.compact = function(G,district) {
Ndist = length(unique(district))-1
boundary = area = numeric(Ndist)
for (i in 1:Ndist) {
gsub = subgraph.edges(G,c(which(E(G)$p1==i),which(E(G)$p2==i)))
conflicts = E(gsub)[which(E(gsub)$p1 != E(gsub)$p2)]
boundary[i] = sum(E(gsub)$weight[conflicts])
area[i] = sum(V(G)$area[which(V(G)$district==i)])
}
Ji = sum(boundary^2/area)
Ji
}
Ji = f.compact(g,V(g)$district)
########################## CODE FOR PERIMETER NODES ###########################
# The first step is to determine the perimeter of the full map. For each node
# need to determine if if is an external node. For a hex grid and real map use
# the girth of the subgraph created from its neighbours.
for (i in 1:length(V(g))) {
gneighbors = induced_subgraph(g,neighbors(g,i))
deg = degree(gneighbors)
V(g)$external[i] = ifelse(1 %in% deg | 0 %in% deg, 1, 0)
}
V(g)$external
vertex_attr_names(g)
# Before you add the edges you need to add vertex 0
g = g + vertex(nodes+1,population=0,votes_blue=0,
votes_red=0,district=0,size=0,external=0)
V(g)$name
V(g)$population
V(g)$size
# Note that V(g) is indexed from 1 and the new edge has been added to the end of
# end of the list.
c=cbind(rep(nodes+1, sum(V(g)$external)),which(V(g)$external==1)); c
# How can I flatten this?
get.edgelist(g)
is.matrix(c)
c=t(c)
c=as.vector(c)
c
g = g + edges(c)
get.edgelist(g)
# The edgelist won't change so create it now
# graph_elist = get.edgelist(g)
# df_elist = E.data[,1:2]
# df_elist = df_elist[c("v2","v1")]
# df_elist
# df_elist = as.matrix(df_elist)
# is.matrix(graph_elist)
# class(graph_elist)="numeric"
# graph_elist == df_elist
########################### OLDER DEVELOPMENT CODE #############################
# You want to add information for all nodes that form the perimeter of the full
# map.
diameter(g)
radius(g)
eccentricity(g)
# Possible for a perimeter node and internal node to have the same eccentricity.
# Trying to use girth
girth(g, circle=T)
# could you make a subgraph of the neighbours and then check if that is connected?
vid = neighbors(g,7); vid
gcycle = induced_subgraph(g,vid)
is.connected(gcycle)
# This doesn't work for a grid it will not work for real map data either as
# neighbors of a real graph are always connected. What you are really interested
# in is whether it is cyclical.
# But can I use girth for a real map graph? (THIS CODE ONLY WORKS WHEN GRID=0,2)
vid = neighbors(g,12); vid
gcycle = induced_subgraph(g,vid)
girth(gcycle)
radius(gcycle)
diameter(gcycle)
eccentricity(gcycle)
# This has girth equal to the number of neighbors.
vid = neighbors(g,1); vid
gcycle = induced_subgraph(g,vid)
girth(gcycle)
radius(gcycle)
diameter(gcycle)
eccentricity(gcycle)
# This has a girth of zero as there are no cyclical paths.
vid = neighbors(g,15); vid
gcycle = induced_subgraph(g,vid)
girth(gcycle)
radius(gcycle)
diameter(gcycle)
eccentricity(gcycle)
degree(gcycle)
# The girth is not equal to the number of neighbors
vid = neighbors(g,9); vid
gcycle = induced_subgraph(g,vid)
girth(gcycle)
eccentricity(gcycle)
radius(gcycle)
diameter(gcycle)
degree(gcycle)
vid = neighbors(g,23); vid
gcycle = induced_subgraph(g,vid)
girth(gcycle)
eccentricity(gcycle)
radius(gcycle)
diameter(gcycle)
degree(gcycle)
# Old code for how to get what you thought was the boundary of a district but
# you were wrong.
#for (i in 1:Ndist) {
# i = 2
# k = 3
# D = as.vector(V(g)[which(V(g)$district==i | V(g)$district==k)])
# gsub = induced_subgraph(g,D)
# for (j in 1:length(D)) {
# gneighbors = induced_subgraph(gsub,neighbors(gsub,j))
# V(gsub)$boundary[j] = ifelse(girth(gneighbors,circle=F)[[1]] == 0, 1, 0)
# }
#can you modify breadth first search?
# Choose a starting vertex uniformly from the district vertices
v0 = sample(which(V(gsub)$boundary==1),1)
V(gsub)$name[v0]
# generate a vector of neighbors
v.neighbors = as.vector(neighbors(gsub,v0))
V(gsub)$name[v.neighbors]
# restrict neighbors to vertices on the boundary
# add these to a queue of vertices to search
queue = c(v0,v.neighbors[which(V(gsub)$boundary[v.neighbors]==1)])
V(gsub)$name[queue]
# add queued vertices to explored vector to avoid checking them twice
explored=queue
############################### UP TO HERE ##################################
# loop until all vertices in district are searched
while (length(queue) > 0) {
for (i in 1:length(queue)) {
# get neighbors
v.neighbors = as.vector(neighbors(G,queue[1]))
# restrict neighbors to those in district
q = v.neighbors[which(district[v.neighbors]==distID)]
# restrict neighbors to those not explored
q = setdiff(q,explored)
# update the queue
queue = c(queue[-1],q)
# update explored
explored=c(explored,q)
}
}
|
# load data
source('Get_and_Clean_Data.R')
# open device
png(filename='plot1.png',width=480,height=480,units='px')
# plot data
hist(household.power.consumption$GlobalActivePower,main='Global Active Power',xlab='Global Active Power (kilowatts)',col='red')
# Turn off device
dev.off()
|
/plot1.R
|
no_license
|
Jimbojay/ExData_Plotting1
|
R
| false | false | 289 |
r
|
# load data
source('Get_and_Clean_Data.R')
# open device
png(filename='plot1.png',width=480,height=480,units='px')
# plot data
hist(household.power.consumption$GlobalActivePower,main='Global Active Power',xlab='Global Active Power (kilowatts)',col='red')
# Turn off device
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/format_commas.R
\name{format_commas}
\alias{format_commas}
\title{format_commas}
\usage{
format_commas(numbers)
}
\arguments{
\item{numbers}{a vector of numbers}
}
\description{
format numbers as string with commas
}
\examples{
x <- c(100,1000,10000,10000)
format_commas(x)
}
\keyword{format}
|
/man/format_commas.Rd
|
no_license
|
elliotpalmer/epalmer
|
R
| false | true | 371 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/format_commas.R
\name{format_commas}
\alias{format_commas}
\title{format_commas}
\usage{
format_commas(numbers)
}
\arguments{
\item{numbers}{a vector of numbers}
}
\description{
format numbers as string with commas
}
\examples{
x <- c(100,1000,10000,10000)
format_commas(x)
}
\keyword{format}
|
library(dplyr)
library(leaflet)
leaflet() %>% addTiles() %>%
addMarkers( layerId = "1", lng = -118.456554, lat = 34.078039, label = "orange") %>%
addMarkers( layerId = "2", lng = -118.556554, lat = 34.078039, label = "red" ) %>%
addMarkers( layerId = "3", lng = -118.556554, lat = 34.178039, label = "blue") %>%
addMarkers( layerId = "4", lng = -118.456554, lat = 34.178039, label = "green") %>%
removeMarker( layerId = "2" )
|
/r/shiny/ex/study_leaflet/ex01/06.R
|
permissive
|
mertnuhoglu/study
|
R
| false | false | 436 |
r
|
library(dplyr)
library(leaflet)
leaflet() %>% addTiles() %>%
addMarkers( layerId = "1", lng = -118.456554, lat = 34.078039, label = "orange") %>%
addMarkers( layerId = "2", lng = -118.556554, lat = 34.078039, label = "red" ) %>%
addMarkers( layerId = "3", lng = -118.556554, lat = 34.178039, label = "blue") %>%
addMarkers( layerId = "4", lng = -118.456554, lat = 34.178039, label = "green") %>%
removeMarker( layerId = "2" )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.