content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
library(caret);
data03 <- read.csv("./results03", encoding="UTF-8", row.names = NULL, sep=",");
trainIndex <- createDataPartition(data03$X9.enojni.zarek, p=0.75, list=FALSE);
trainData <- data03[trainIndex,];
testData <- data03[-trainIndex,];
control <- trainControl(method = "boot", number = 30);
temps <- c("Temp","Temp.1","Temp.2","Temp.3","Temp.4","Temp.5","Temp.5","Temp.6",
"Temp.7","Temp.8","Temp.9","Temp.10","Temp.11")
formula1 <- as.formula(paste(c("trainData$X180.spojna.linija ~", "Prirobn", temps), collapse = "+"))
modelWeld <- train( formula1 ,data = trainData, trControl = control, method = 'glm')
coeffs <- modelWeld$finalModel$coefficients[!is.na(modelWeld$finalModel$coefficients)]
#coeffs1 <- coeffs[abs(coeffs) > 50]
coeffs2 <- names(coeffs);
coeffs2 <- coeffs2[coeffs2 != "(Intercept)"];
formula2 <- as.formula(paste(c("trainData$X180.spojna.linija ~", coeffs2),collapse = "+"))
modelWeld1 <- train(formula2, data=trainData, method ='glm', trControl = control)
pred1 <- predict(modelWeld1, testData);
#plot(1:length(pred), data$data.X428.crne.pike, col="blue");
#points(1:length(pred),pred, col="red");
plot(1:length(pred1),pred1, col="red",
ylim=c(min(pred1,testData$X180.spojna.linija),max(pred1,testData$X180.spojna.linija)));
lines(1:length(pred1),pred1, col="red")
points(1:length(pred1), testData$X180.spojna.linija, col="blue");
lines(1:length(pred1), testData$X180.spojna.linija, col="blue");
|
/R/model.R
|
no_license
|
lstopar/HellaParserModeler
|
R
| false | false | 1,451 |
r
|
library(caret);
data03 <- read.csv("./results03", encoding="UTF-8", row.names = NULL, sep=",");
trainIndex <- createDataPartition(data03$X9.enojni.zarek, p=0.75, list=FALSE);
trainData <- data03[trainIndex,];
testData <- data03[-trainIndex,];
control <- trainControl(method = "boot", number = 30);
temps <- c("Temp","Temp.1","Temp.2","Temp.3","Temp.4","Temp.5","Temp.5","Temp.6",
"Temp.7","Temp.8","Temp.9","Temp.10","Temp.11")
formula1 <- as.formula(paste(c("trainData$X180.spojna.linija ~", "Prirobn", temps), collapse = "+"))
modelWeld <- train( formula1 ,data = trainData, trControl = control, method = 'glm')
coeffs <- modelWeld$finalModel$coefficients[!is.na(modelWeld$finalModel$coefficients)]
#coeffs1 <- coeffs[abs(coeffs) > 50]
coeffs2 <- names(coeffs);
coeffs2 <- coeffs2[coeffs2 != "(Intercept)"];
formula2 <- as.formula(paste(c("trainData$X180.spojna.linija ~", coeffs2),collapse = "+"))
modelWeld1 <- train(formula2, data=trainData, method ='glm', trControl = control)
pred1 <- predict(modelWeld1, testData);
#plot(1:length(pred), data$data.X428.crne.pike, col="blue");
#points(1:length(pred),pred, col="red");
plot(1:length(pred1),pred1, col="red",
ylim=c(min(pred1,testData$X180.spojna.linija),max(pred1,testData$X180.spojna.linija)));
lines(1:length(pred1),pred1, col="red")
points(1:length(pred1), testData$X180.spojna.linija, col="blue");
lines(1:length(pred1), testData$X180.spojna.linija, col="blue");
|
# The primes 3, 7, 109, and 673, are quite remarkable.
# By taking any two primes and concatenating them in any order the result will always be prime.
# For example, taking 7 and 109, both 7109 and 1097 are prime.
# The sum of these four primes, 792, represents the lowest sum for a set of four primes with this property.
#
# Find the lowest sum for a set of five primes for which any two primes concatenate to produce another prime.
PrimeSets <- function(n) {
primes <- PrimesTo(10000)
set <- c()
while (length(set)!=n) {
set <- c(primes[1])
primes <- primes[-1]
set <- MakeSet(n, set, primes)
}
sum(set)
}
MakeSet <- function(n, set, primes) {
print(set)
if (length(set)==n) {
return(set)
} else {
for (p in primes) {
if (p > tail(set, 1) && AllPrimes(set, p)) {
new.set <- MakeSet(n, c(set,p), primes)
if (length(new.set)>0) return(new.set)
}
}
return(c())
}
}
AllPrimes <- function(set, k) {
for (p in set) {
if (!IsPrime(as.numeric(paste0(p,k,collapse="")))) return(FALSE)
if (!IsPrime(as.numeric(paste0(k,p,collapse="")))) return(FALSE)
}
TRUE
}
PrimesTo <- function(n) {
which(sapply(2:n, IsPrime))+1
}
IsPrime <- function(n) {
m <- 2
max <- n
while (m < max) {
if (n%%m==0) return(FALSE)
max <- ceiling(n/m)
m <- m+1
}
TRUE
}
# test
#PrimeSets(4)==792
#AllPrimes(c(3, 7, 109),673)==TRUE
#AllPrimes(c(3, 7, 109),929)==FALSE
#a <- PrimeSets(5)
# Unit: seconds
# expr min lq mean median uq max neval
# PrimeSets(5) 232.7264 232.7264 232.7264 232.7264 232.7264 232.7264 1
|
/problem60.R
|
no_license
|
shannonrush/euler
|
R
| false | false | 1,749 |
r
|
# The primes 3, 7, 109, and 673, are quite remarkable.
# By taking any two primes and concatenating them in any order the result will always be prime.
# For example, taking 7 and 109, both 7109 and 1097 are prime.
# The sum of these four primes, 792, represents the lowest sum for a set of four primes with this property.
#
# Find the lowest sum for a set of five primes for which any two primes concatenate to produce another prime.
PrimeSets <- function(n) {
primes <- PrimesTo(10000)
set <- c()
while (length(set)!=n) {
set <- c(primes[1])
primes <- primes[-1]
set <- MakeSet(n, set, primes)
}
sum(set)
}
MakeSet <- function(n, set, primes) {
print(set)
if (length(set)==n) {
return(set)
} else {
for (p in primes) {
if (p > tail(set, 1) && AllPrimes(set, p)) {
new.set <- MakeSet(n, c(set,p), primes)
if (length(new.set)>0) return(new.set)
}
}
return(c())
}
}
AllPrimes <- function(set, k) {
for (p in set) {
if (!IsPrime(as.numeric(paste0(p,k,collapse="")))) return(FALSE)
if (!IsPrime(as.numeric(paste0(k,p,collapse="")))) return(FALSE)
}
TRUE
}
PrimesTo <- function(n) {
which(sapply(2:n, IsPrime))+1
}
IsPrime <- function(n) {
m <- 2
max <- n
while (m < max) {
if (n%%m==0) return(FALSE)
max <- ceiling(n/m)
m <- m+1
}
TRUE
}
# test
#PrimeSets(4)==792
#AllPrimes(c(3, 7, 109),673)==TRUE
#AllPrimes(c(3, 7, 109),929)==FALSE
#a <- PrimeSets(5)
# Unit: seconds
# expr min lq mean median uq max neval
# PrimeSets(5) 232.7264 232.7264 232.7264 232.7264 232.7264 232.7264 1
|
library(dplyr)
require(ggplot2)
require(reshape)
library(gridExtra)
rm(list = ls())
cat("\014")
site_data = read.csv("../Data/CompleteSiteLevelVars.csv")
AllPlotsvars = read.csv("../Data/AllPlotsVarsRichness.csv")
W4 = AllPlotsvars%>%filter(ShortNVC == "W4")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W8 = AllPlotsvars%>%filter(ShortNVC == "W8")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W10 = AllPlotsvars%>%filter(ShortNVC == "W10")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W16 = AllPlotsvars%>%filter(ShortNVC == "W16")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W21 = AllPlotsvars%>%filter(ShortNVC == "W21")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
M15 = AllPlotsvars%>%filter(ShortNVC == "M15")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
M16 = AllPlotsvars%>%filter(ShortNVC == "M16")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
OV27 = AllPlotsvars%>%filter(ShortNVC == "OV27")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W6 = AllPlotsvars%>%filter(ShortNVC == "W6")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W12 = AllPlotsvars%>%filter(ShortNVC == "W12")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W13 = AllPlotsvars%>%filter(ShortNVC == "W13")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W14 = AllPlotsvars%>%filter(ShortNVC == "W14")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W15 = AllPlotsvars%>%filter(ShortNVC == "W15")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W16 = AllPlotsvars%>%filter(ShortNVC == "W16")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
MG7 = AllPlotsvars%>%filter(ShortNVC == "MG7A")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
df = as.data.frame(rbind(W4,W8,W10,W16,W21,M15,M16))
df$SOM_div_10 = df$SOMYr2/10
df$pH_div_10 = df$pHYr2
df$PlotRichness = df$plot_richness/10
df_mod = df[-c(2,3,4)]
melted = melt(df_mod)
ggplot(data=melted, aes(y = value, x = ShortNVC,colour = variable))+
geom_boxplot(varwidth = FALSE, outlier.colour = NULL)+
scale_y_continuous(breaks = seq(0,10, by = 1))+
geom_vline(xintercept = c(1.5,2.5,3.5,4.5,5.5,6.5))
ggplot(AllPlotsvars, aes(x = SOMYr2, y = plot_richness))+geom_point()
ggplot(AllPlotsvars, aes(x=mean_dbh, y=plot_richness), varwidth = TRUE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(mean_dbh, 5)), na.rm = TRUE)
ggplot(AllPlotsvars, aes(x=LiveBasalAreaYr2, y=plot_richness), varwidth = TRUE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(LiveBasalAreaYr2, 1)), na.rm = TRUE)
ggplot(AllPlotsvars, aes(x=SOMYr2, y=plot_richness), varwidth = TRUE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 5)), na.rm = TRUE)
ggplot(AllPlotsvars, aes(x=pHYr2, y=plot_richness), varwidth = TRUE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(pHYr2, 1)), na.rm = TRUE)
data = AllPlotsvars%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
codefreq = as.data.frame(table(AllPlotsvars$ShortNVC))
codefreq = codefreq[order(codefreq$Freq),]
bigNVC = codefreq%>%filter(Freq>10)
bigNVC = bigNVC[-3,]
codes = bigNVC$ShortNVC
data$ShortNVC = as.character(data$ShortNVC)
bigNVC$ShortNVC = as.character(bigNVC$ShortNVC)
colnames(bigNVC) = c("ShortNVC","Freq")
data = data[order(data$ShortNVC),]
AllNVC = unique(data$ShortNVC)
databigNVC = data%>%filter(ShortNVC %in% codes)
databigNVC$SOMYr2 = databigNVC$SOMYr2/10
databigNVC$plot_richness = databigNVC$plot_richness/10
melted = melt(databigNVC)
ggplot(data=melted, aes(y = value, x = ShortNVC,colour = variable))+
geom_boxplot(varwidth = FALSE, outlier.colour = NULL, na.rm = TRUE)
gw4= ggplot(W4, aes(x=SOMYr2, y=plot_richness)) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), varwidth = FALSE, na.rm = TRUE)+
annotate("label", x = 80, y = 80, label = "W4")+
geom_point(alpha = 0.2, colour = "red")+
scale_y_continuous(limits = c(0,100) )
gw6 = ggplot(W6, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "W6")+
geom_point(alpha = 0.2, colour = "red")+
geom_smooth(method = "lm")
scale_y_continuous(limits = c(0,100) )
gw8 = ggplot(W8, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "W8")+
geom_point(alpha = 0.2, colour = "red")+
geom_smooth(method = "lm")
scale_y_continuous(limits = c(0,100) )
gw10 = ggplot(W10, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "W10")+
geom_point(alpha = 0.2, colour = "red")+
geom_smooth(method = "lm")
scale_y_continuous(limits = c(0,100) )
w10lm = lm(plot_richness~SOMYr2, w10)
gw12 = ggplot(W12, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "W12")+
geom_point(alpha = 0.2, colour = "red")+
geom_smooth(method = "lm")
scale_y_continuous(limits = c(0,100) )
gw13 = ggplot(W13, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "W13")+
geom_point(alpha = 0.2, colour = "red")+
scale_y_continuous(limits = c(0,100) )
gw14 = ggplot(W14, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "W14")+
geom_point(alpha = 0.2, colour = "red")+
geom_smooth(method = "lm")
scale_y_continuous(limits = c(0,100) )
gw15 = ggplot(W8, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "W15")+
geom_point(alpha = 0.2, colour = "red")+
scale_y_continuous(limits = c(0,100) )
gw16 = ggplot(W8, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "W16")+
geom_point(alpha = 0.2, colour = "red")+
geom_smooth(method = "lm")
scale_y_continuous(limits = c(0,100) )
gw21 = ggplot(W8, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "W21")+
geom_point(alpha = 0.2, colour = "red")+
geom_smooth(method = "lm")
scale_y_continuous(limits = c(0,100) )
gov27 = ggplot(W8, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "OV27")+
geom_point(alpha = 0.2, colour = "red")+
geom_smooth(method = "lm")
scale_y_continuous(limits = c(0,100) )
gmg7 = ggplot(MG7, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "MG7")+
geom_point(alpha = 0.2, colour = "red")+
geom_smooth(method = "lm")
scale_y_continuous(limits = c(0,100) )
grid.arrange(gw6,gw8,gw10,gw16,gw21,gov27,ncol = 2)
w10all = AllPlotsvars%>%filter(ShortNVC == "W10")
W8all = AllPlotsvars%>%filter(ShortNVC == "W8")
w16all = AllPlotsvars%>%filter(ShortNVC=="W16")
groundcover = read.csv("../data/GroundCover.csv")
w10high1= groundcover%>%filter(SITE==42)%>%filter(PLOT==16)
w10high2 = groundcover%>%filter(SITE==77)%>%filter(PLOT==13)
w10low1 = groundcover%>%filter(SITE==98)%>%filter(PLOT==5)
w10low2 = groundcover%>%filter(SITE==91)%>%filter(PLOT==12)
w8high1 = groundcover%>%filter(SITE==60)%>%filter(PLOT==4)
w8high2= groundcover%>%filter(SITE==55)%>%filter(PLOT==10)
w8low1= groundcover%>%filter(SITE==4)%>%filter(PLOT==8)
w8low2 =groundcover%>%filter(SITE==71)%>%filter(PLOT==9)
Ellenbergs = read.csv("../Data/Ellenbergs.csv")
colnames(Ellenbergs) = c("Amalgams","Taxon.name","L" , "F" , "R" , "N" , "S" )
Ellenbergs$Amalgams = gsub(" ", "", Ellenbergs$Amalgams, fixed = TRUE)
Ellenbergs$Amalgams = as.numeric(Ellenbergs$Amalgams)
vegcodes = read.csv("../Data/vegetation_codes.csv")
colnames(vegcodes) = c("Species","Amalgams")
w10lowveg1 = inner_join(vegcodes,w10low1)
w10lowveg1ellen = inner_join(w10lowveg1,Ellenbergs)
w10lowveg2 = inner_join(vegcodes,w10low2)
w10lowveg2ellen = inner_join(w10lowveg2,Ellenbergs)
w10highveg1 = inner_join(vegcodes,w10high1)
w10highveg1ellen = inner_join(w10highveg1,Ellenbergs)
w10highveg2 = inner_join(vegcodes,w10high2)
w10high2ellen = inner_join(w10highveg2,Ellenbergs)
diffW10 = setdiff(w10highveg1ellen,w10lowveg1ellen)
diffW10low = setdiff(w10lowveg1ellen$Amalgams,w10highveg1ellen$Amalgams)
w10lownothigh = vegcodes%>%filter(Amalgams %in% diffW10low)
diffW10high = setdiff(w10highveg1ellen$Amalgams,w10lowveg1ellen$Amalgams)
w10highnotlow = vegcodes%>%filter(Amalgams %in% diffW10high)
#################
w8lowveg1 = inner_join(vegcodes, w8low1)
w8lowveg1ellen = inner_join(w8lowveg1,Ellenbergs)
w8lowveg2 = inner_join(vegcodes,w8low2)
w8lowveg2ellen = inner_join(w8lowveg2, Ellenbergs)
w8highveg1 = inner_join(vegcodes, w8high1)
w8highveg1ellen = inner_join(w8highveg1, Ellenbergs)
w8highveg2 = inner_join(vegcodes, w8high2)
w8highveg2ellen = inner_join(w8highveg2, Ellenbergs)
get_ave_ellens = function(site,plot){
ellens = vector()
plot = groundcover%>%filter(SITE==site)%>%filter(PLOT==plot)
plotveg = inner_join(vegcodes,plot)
plotvegellen = inner_join(plotveg,Ellenbergs)
ave_N = mean(plotvegellen$N)
ave_R = mean(plotvegellen$R)
ellens[1] = ave_N
ellens[2] = ave_R
return(ellens)
}
w10low1 = get_ave_ellens(98,5)
w10low2 = get_ave_ellens(91,12)
w10high1 = get_ave_ellens(42,16)
w10high2 = get_ave_ellens(77,13)
w8low1 = get_ave_ellens(4,8)
w8low2 = get_ave_ellens(71,9)
w8high1 = get_ave_ellens(60,4)
w8high2 = get_ave_ellens(55,10)
|
/CMEEMainProject/Code/QuickSOMBoxplots.R
|
no_license
|
PetraGuy/CMEECourseWork
|
R
| false | false | 10,036 |
r
|
library(dplyr)
require(ggplot2)
require(reshape)
library(gridExtra)
rm(list = ls())
cat("\014")
site_data = read.csv("../Data/CompleteSiteLevelVars.csv")
AllPlotsvars = read.csv("../Data/AllPlotsVarsRichness.csv")
W4 = AllPlotsvars%>%filter(ShortNVC == "W4")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W8 = AllPlotsvars%>%filter(ShortNVC == "W8")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W10 = AllPlotsvars%>%filter(ShortNVC == "W10")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W16 = AllPlotsvars%>%filter(ShortNVC == "W16")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W21 = AllPlotsvars%>%filter(ShortNVC == "W21")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
M15 = AllPlotsvars%>%filter(ShortNVC == "M15")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
M16 = AllPlotsvars%>%filter(ShortNVC == "M16")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
OV27 = AllPlotsvars%>%filter(ShortNVC == "OV27")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W6 = AllPlotsvars%>%filter(ShortNVC == "W6")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W12 = AllPlotsvars%>%filter(ShortNVC == "W12")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W13 = AllPlotsvars%>%filter(ShortNVC == "W13")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W14 = AllPlotsvars%>%filter(ShortNVC == "W14")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W15 = AllPlotsvars%>%filter(ShortNVC == "W15")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W16 = AllPlotsvars%>%filter(ShortNVC == "W16")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
MG7 = AllPlotsvars%>%filter(ShortNVC == "MG7A")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
df = as.data.frame(rbind(W4,W8,W10,W16,W21,M15,M16))
df$SOM_div_10 = df$SOMYr2/10
df$pH_div_10 = df$pHYr2
df$PlotRichness = df$plot_richness/10
df_mod = df[-c(2,3,4)]
melted = melt(df_mod)
ggplot(data=melted, aes(y = value, x = ShortNVC,colour = variable))+
geom_boxplot(varwidth = FALSE, outlier.colour = NULL)+
scale_y_continuous(breaks = seq(0,10, by = 1))+
geom_vline(xintercept = c(1.5,2.5,3.5,4.5,5.5,6.5))
ggplot(AllPlotsvars, aes(x = SOMYr2, y = plot_richness))+geom_point()
ggplot(AllPlotsvars, aes(x=mean_dbh, y=plot_richness), varwidth = TRUE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(mean_dbh, 5)), na.rm = TRUE)
ggplot(AllPlotsvars, aes(x=LiveBasalAreaYr2, y=plot_richness), varwidth = TRUE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(LiveBasalAreaYr2, 1)), na.rm = TRUE)
ggplot(AllPlotsvars, aes(x=SOMYr2, y=plot_richness), varwidth = TRUE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 5)), na.rm = TRUE)
ggplot(AllPlotsvars, aes(x=pHYr2, y=plot_richness), varwidth = TRUE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(pHYr2, 1)), na.rm = TRUE)
data = AllPlotsvars%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
codefreq = as.data.frame(table(AllPlotsvars$ShortNVC))
codefreq = codefreq[order(codefreq$Freq),]
bigNVC = codefreq%>%filter(Freq>10)
bigNVC = bigNVC[-3,]
codes = bigNVC$ShortNVC
data$ShortNVC = as.character(data$ShortNVC)
bigNVC$ShortNVC = as.character(bigNVC$ShortNVC)
colnames(bigNVC) = c("ShortNVC","Freq")
data = data[order(data$ShortNVC),]
AllNVC = unique(data$ShortNVC)
databigNVC = data%>%filter(ShortNVC %in% codes)
databigNVC$SOMYr2 = databigNVC$SOMYr2/10
databigNVC$plot_richness = databigNVC$plot_richness/10
melted = melt(databigNVC)
ggplot(data=melted, aes(y = value, x = ShortNVC,colour = variable))+
geom_boxplot(varwidth = FALSE, outlier.colour = NULL, na.rm = TRUE)
gw4= ggplot(W4, aes(x=SOMYr2, y=plot_richness)) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), varwidth = FALSE, na.rm = TRUE)+
annotate("label", x = 80, y = 80, label = "W4")+
geom_point(alpha = 0.2, colour = "red")+
scale_y_continuous(limits = c(0,100) )
gw6 = ggplot(W6, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "W6")+
geom_point(alpha = 0.2, colour = "red")+
geom_smooth(method = "lm")
scale_y_continuous(limits = c(0,100) )
gw8 = ggplot(W8, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "W8")+
geom_point(alpha = 0.2, colour = "red")+
geom_smooth(method = "lm")
scale_y_continuous(limits = c(0,100) )
gw10 = ggplot(W10, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "W10")+
geom_point(alpha = 0.2, colour = "red")+
geom_smooth(method = "lm")
scale_y_continuous(limits = c(0,100) )
w10lm = lm(plot_richness~SOMYr2, w10)
gw12 = ggplot(W12, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "W12")+
geom_point(alpha = 0.2, colour = "red")+
geom_smooth(method = "lm")
scale_y_continuous(limits = c(0,100) )
gw13 = ggplot(W13, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "W13")+
geom_point(alpha = 0.2, colour = "red")+
scale_y_continuous(limits = c(0,100) )
gw14 = ggplot(W14, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "W14")+
geom_point(alpha = 0.2, colour = "red")+
geom_smooth(method = "lm")
scale_y_continuous(limits = c(0,100) )
gw15 = ggplot(W8, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "W15")+
geom_point(alpha = 0.2, colour = "red")+
scale_y_continuous(limits = c(0,100) )
gw16 = ggplot(W8, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "W16")+
geom_point(alpha = 0.2, colour = "red")+
geom_smooth(method = "lm")
scale_y_continuous(limits = c(0,100) )
gw21 = ggplot(W8, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "W21")+
geom_point(alpha = 0.2, colour = "red")+
geom_smooth(method = "lm")
scale_y_continuous(limits = c(0,100) )
gov27 = ggplot(W8, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "OV27")+
geom_point(alpha = 0.2, colour = "red")+
geom_smooth(method = "lm")
scale_y_continuous(limits = c(0,100) )
gmg7 = ggplot(MG7, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "MG7")+
geom_point(alpha = 0.2, colour = "red")+
geom_smooth(method = "lm")
scale_y_continuous(limits = c(0,100) )
grid.arrange(gw6,gw8,gw10,gw16,gw21,gov27,ncol = 2)
w10all = AllPlotsvars%>%filter(ShortNVC == "W10")
W8all = AllPlotsvars%>%filter(ShortNVC == "W8")
w16all = AllPlotsvars%>%filter(ShortNVC=="W16")
groundcover = read.csv("../data/GroundCover.csv")
w10high1= groundcover%>%filter(SITE==42)%>%filter(PLOT==16)
w10high2 = groundcover%>%filter(SITE==77)%>%filter(PLOT==13)
w10low1 = groundcover%>%filter(SITE==98)%>%filter(PLOT==5)
w10low2 = groundcover%>%filter(SITE==91)%>%filter(PLOT==12)
w8high1 = groundcover%>%filter(SITE==60)%>%filter(PLOT==4)
w8high2= groundcover%>%filter(SITE==55)%>%filter(PLOT==10)
w8low1= groundcover%>%filter(SITE==4)%>%filter(PLOT==8)
w8low2 =groundcover%>%filter(SITE==71)%>%filter(PLOT==9)
Ellenbergs = read.csv("../Data/Ellenbergs.csv")
colnames(Ellenbergs) = c("Amalgams","Taxon.name","L" , "F" , "R" , "N" , "S" )
Ellenbergs$Amalgams = gsub(" ", "", Ellenbergs$Amalgams, fixed = TRUE)
Ellenbergs$Amalgams = as.numeric(Ellenbergs$Amalgams)
vegcodes = read.csv("../Data/vegetation_codes.csv")
colnames(vegcodes) = c("Species","Amalgams")
w10lowveg1 = inner_join(vegcodes,w10low1)
w10lowveg1ellen = inner_join(w10lowveg1,Ellenbergs)
w10lowveg2 = inner_join(vegcodes,w10low2)
w10lowveg2ellen = inner_join(w10lowveg2,Ellenbergs)
w10highveg1 = inner_join(vegcodes,w10high1)
w10highveg1ellen = inner_join(w10highveg1,Ellenbergs)
w10highveg2 = inner_join(vegcodes,w10high2)
w10high2ellen = inner_join(w10highveg2,Ellenbergs)
diffW10 = setdiff(w10highveg1ellen,w10lowveg1ellen)
diffW10low = setdiff(w10lowveg1ellen$Amalgams,w10highveg1ellen$Amalgams)
w10lownothigh = vegcodes%>%filter(Amalgams %in% diffW10low)
diffW10high = setdiff(w10highveg1ellen$Amalgams,w10lowveg1ellen$Amalgams)
w10highnotlow = vegcodes%>%filter(Amalgams %in% diffW10high)
#################
w8lowveg1 = inner_join(vegcodes, w8low1)
w8lowveg1ellen = inner_join(w8lowveg1,Ellenbergs)
w8lowveg2 = inner_join(vegcodes,w8low2)
w8lowveg2ellen = inner_join(w8lowveg2, Ellenbergs)
w8highveg1 = inner_join(vegcodes, w8high1)
w8highveg1ellen = inner_join(w8highveg1, Ellenbergs)
w8highveg2 = inner_join(vegcodes, w8high2)
w8highveg2ellen = inner_join(w8highveg2, Ellenbergs)
get_ave_ellens = function(site,plot){
ellens = vector()
plot = groundcover%>%filter(SITE==site)%>%filter(PLOT==plot)
plotveg = inner_join(vegcodes,plot)
plotvegellen = inner_join(plotveg,Ellenbergs)
ave_N = mean(plotvegellen$N)
ave_R = mean(plotvegellen$R)
ellens[1] = ave_N
ellens[2] = ave_R
return(ellens)
}
w10low1 = get_ave_ellens(98,5)
w10low2 = get_ave_ellens(91,12)
w10high1 = get_ave_ellens(42,16)
w10high2 = get_ave_ellens(77,13)
w8low1 = get_ave_ellens(4,8)
w8low2 = get_ave_ellens(71,9)
w8high1 = get_ave_ellens(60,4)
w8high2 = get_ave_ellens(55,10)
|
#' @details This function requires having a character vector with one or more valid Report Suites specified.
#'
#' @description Get marketing channel rules for the specified report suites.
#'
#' @title Get Marketing Channel Rules for a Report Suite(s)
#'
#' @param reportsuite.ids Report suite id (or list of report suite ids)
#'
#' @importFrom jsonlite toJSON
#' @importFrom plyr rbind.fill
#'
#' @return Data frame
#'
#' @export
#'
#' @examples
#' \dontrun{
#' expire <- GetMarketingChannelRules("your_report_suite")
#'
#' expire2 <- GetMarketingChannelRules(report_suites$rsid)
#' }
GetMarketingChannelRules <- function(reportsuite.ids) {
request.body <- c()
request.body$rsid_list <- reportsuite.ids
#Hack in locale, every method calls ApiRequest so this hopefully works
#Set encoding to utf-8 as well; if someone wanted to do base64 they are out of luck
request.body$locale <- unbox(AdobeAnalytics$SC.Credentials$locale)
request.body$elementDataEncoding <- unbox("utf8")
response <- ApiRequest(body=toJSON(request.body),func.name="ReportSuite.GetMarketingChannelRules")
#Don't even know if this is possible, holdover from GetSegments code
if(length(response$marketing_channel_rules[[1]]) == 0) {
return(print("No Rules Defined For This Report Suite"))
}
##Parsing this is a mess
#Pull out first level
mkt_channel_rules <- response$marketing_channel_rules[[1]]
response$marketing_channel_rules <- NULL
#Group together first level
parsed <- cbind(response, mkt_channel_rules, row.names = NULL)
#Pull out second level
channel_value <- parsed$channel_value
parsed$channel_value <- NULL
#Group together second level
parsed <- cbind(parsed, channel_value, row.names = NULL)
#Pull out third level
rules <- parsed$rules
parsed$rules <- NULL
parsed$i <- row.names(parsed)
accumulator <- data.frame()
for(i in 1:length(rules)){
temp <- as.data.frame(rules[[i]]$rule_id)
names(temp) <- c("rule_id")
temp$i <- i
temp$hit_attribute_type <- rules[[i]]$hit_attribute$type
temp$hit_attribute_query_string_parameter <- rules[[i]]$hit_attribute$query_string_parameter
temp$operator <- rules[[i]]$operator
temp$matches <- paste(rules[[i]]$matches[[1]], collapse = ',')
accumulator <- rbind.fill(accumulator, temp, row.names = NULL)
rm(temp)
}
parsed <- merge(parsed, accumulator, by = "i")
parsed$i <- NULL
return(parsed)
}
|
/R/GetMarketingChannelRules.R
|
no_license
|
dieguico/RSiteCatalyst
|
R
| false | false | 2,431 |
r
|
#' @details This function requires having a character vector with one or more valid Report Suites specified.
#'
#' @description Get marketing channel rules for the specified report suites.
#'
#' @title Get Marketing Channel Rules for a Report Suite(s)
#'
#' @param reportsuite.ids Report suite id (or list of report suite ids)
#'
#' @importFrom jsonlite toJSON
#' @importFrom plyr rbind.fill
#'
#' @return Data frame
#'
#' @export
#'
#' @examples
#' \dontrun{
#' expire <- GetMarketingChannelRules("your_report_suite")
#'
#' expire2 <- GetMarketingChannelRules(report_suites$rsid)
#' }
GetMarketingChannelRules <- function(reportsuite.ids) {
request.body <- c()
request.body$rsid_list <- reportsuite.ids
#Hack in locale, every method calls ApiRequest so this hopefully works
#Set encoding to utf-8 as well; if someone wanted to do base64 they are out of luck
request.body$locale <- unbox(AdobeAnalytics$SC.Credentials$locale)
request.body$elementDataEncoding <- unbox("utf8")
response <- ApiRequest(body=toJSON(request.body),func.name="ReportSuite.GetMarketingChannelRules")
#Don't even know if this is possible, holdover from GetSegments code
if(length(response$marketing_channel_rules[[1]]) == 0) {
return(print("No Rules Defined For This Report Suite"))
}
##Parsing this is a mess
#Pull out first level
mkt_channel_rules <- response$marketing_channel_rules[[1]]
response$marketing_channel_rules <- NULL
#Group together first level
parsed <- cbind(response, mkt_channel_rules, row.names = NULL)
#Pull out second level
channel_value <- parsed$channel_value
parsed$channel_value <- NULL
#Group together second level
parsed <- cbind(parsed, channel_value, row.names = NULL)
#Pull out third level
rules <- parsed$rules
parsed$rules <- NULL
parsed$i <- row.names(parsed)
accumulator <- data.frame()
for(i in 1:length(rules)){
temp <- as.data.frame(rules[[i]]$rule_id)
names(temp) <- c("rule_id")
temp$i <- i
temp$hit_attribute_type <- rules[[i]]$hit_attribute$type
temp$hit_attribute_query_string_parameter <- rules[[i]]$hit_attribute$query_string_parameter
temp$operator <- rules[[i]]$operator
temp$matches <- paste(rules[[i]]$matches[[1]], collapse = ',')
accumulator <- rbind.fill(accumulator, temp, row.names = NULL)
rm(temp)
}
parsed <- merge(parsed, accumulator, by = "i")
parsed$i <- NULL
return(parsed)
}
|
#load required packages
packages <- c("downloader",
"data.table",
"dummies",
"caret",
"FNN",
"pROC",
"rpart",
"rpart.plot",
"Matrix",
"xgboost",
"reshape2",
"ggplot2")
new_packages <- packages[!(packages %in% installed.packages()[,"Package"])]
if(length(new_packages)) install.packages(new_packages)
library(downloader)
library(data.table)
library(dummies)
library(caret)
library(FNN)
library(pROC)
library(rpart)
library(rpart.plot)
library(Matrix)
library(xgboost)
library(reshape2)
library(ggplot2)
#set working directory
wd <- "/Users/user1/Documents" #specify directory from which to download and load the data
setwd(wd)
set.seed(32541)
#Download the loan.csv file out of the .zip archive found at the following path to access the "loan.csv" dataset:
#https://www.kaggle.com/wendykan/lending-club-loan-data/downloads/lending-club-loan-data.zip
loans <- fread("loan.csv")
#select the numeric and categorical variables to include in the machine learning models
loans1 <- loans[loan_status %in% c("Charged Off", "Fully Paid"), c("dti",
"annual_inc",
"delinq_2yrs",
"inq_last_6mths",
"open_acc",
"pub_rec",
"revol_bal",
"revol_util",
"total_acc",
"out_prncp",
"out_prncp_inv",
"total_pymnt",
"total_pymnt_inv",
"total_rec_prncp",
"total_rec_int",
"total_rec_late_fee",
"recoveries",
"collection_recovery_fee",
"last_pymnt_amnt",
"collections_12_mths_ex_med",
"loan_status",
"purpose",
"home_ownership",
"grade",
"emp_length",
"term",
"addr_state",
"verification_status",
"application_type")]
#one hot encode the categorical variables
loans2 <- data.table(dummy.data.frame(loans1,
names = c("purpose",
"home_ownership",
"grade",
"emp_length",
"term",
"addr_state",
"verification_status",
"application_type"),
sep = "_"))
#split the data into train and test sets
train_index <- createDataPartition(y = loans2$loan_status, p = 0.8, list = FALSE)
train.a <- loans2[train_index]
test.a <- loans2[!train_index]
#knnImputation of missing values
pre_obj <- preProcess(train.a[, -c("loan_status")],
method = "knnImpute")
system.time(
train_imp <- predict(pre_obj,
train.a[, -c("loan_status")])
)
system.time(
test_imp <- predict(pre_obj,
test.a[, -c("loan_status")])
)
train <- data.frame(train_imp)
test <- data.frame(test_imp)
#remove all zero-variance and near-zero-variane variables from data
nzvar <- subset(nearZeroVar(train, saveMetrics = TRUE), nzv ==TRUE & percentUnique < .5)
`%ni%` <- Negate(`%in%`)
train <- train[, names(train) %ni% rownames(nzvar)]
test <- test[, names(test) %ni% rownames(nzvar)]
##################################################################################################
#K-Nearest Neighbors Model
##################################################################################################
#train knn model
k <- 9
system.time(
knn_model <- knn(train = train,
test = test,
cl = train.a$loan_status,
k = k)
)
closest <- train[c(attr(knn_model, "nn.index")[1,]), ]
closest$type <- "train"
test1 <- test
test1$type <- "test"
knn_pred <- knn_model[1:nrow(test.a)] #apply trained knn model to test data
knn_cm <- confusionMatrix(knn_pred, test.a$loan_status)
knn_sens <- sensitivity(knn_pred, as.factor(test.a$loan_status))
knn_spec <- specificity(knn_pred, as.factor(test.a$loan_status))
knn_labels <- test.a$loan_status
knn_predictions <- ifelse(knn_pred[1:nrow(test)] == "Charged Off", 1, 0)
knn_labels <- ifelse(test.a$loan_status == "Charged Off", 1, 0)
knn_roc <- roc(knn_labels, knn_predictions)
knn_auc <- auc(knn_roc)
##################################################################################################
#Logistic Regression Model using Principle Component Analysis
##################################################################################################
#transform predictor variables to principle components
pre_pca <- preProcess(train, method = "pca")
train_pca <- predict(pre_pca, train)
test_pca <- predict(pre_pca, test)
train_pca2 <- cbind(train_pca, ifelse(train.a$loan_status == "Charged Off", 1, 0))
colnames(train_pca2) <- c(colnames(train_pca), "loan_status")
#train PCA logistic regression model
logit_model <- glm(formula = loan_status ~ ., family = binomial(link = "logit"), data = train_pca2)
logit_pred <- predict(logit_model, newdata = test_pca, type = "response") #apply trained PCA logistic regression model to test data
prediction_logit <- as.factor(ifelse(logit_pred >= .5, "Charged Off", "Fully Paid"))
logit_cm <- confusionMatrix(prediction_logit, test.a$loan_status)
logit_sens <- sensitivity(prediction_logit, as.factor(test.a$loan_status))
logit_spec <- specificity(prediction_logit, as.factor(test.a$loan_status))
logit_labels <- ifelse(test.a$loan_status == "Charged Off", 1, 0)
logit_predictions <- ifelse(prediction_logit == "Charged Off", 1, 0)
logit_roc <- roc(logit_labels, logit_predictions)
logit_auc <- auc(logit_roc)
##################################################################################################
#Decision Tree Model
##################################################################################################
train2 <- cbind(train, train.a$loan_status)
colnames(train2) <- c(colnames(train), "loan_status")
#train decision tree model
control_param <- rpart.control(cp = .005)
dtree_model <- rpart(loan_status ~ ., data = train2, control = control_param)
rpart.plot(dtree_model) #visualizes the decision tree
dtree_pred <- data.frame(predict(dtree_model, newdata = test)) #apply trained decision tree model to test data
dtree_pred$fin <- as.vector(pmax(dtree_pred[, 1], dtree_pred[ ,2]))
prediction.tree1 <- as.factor(ifelse(dtree_pred$fin == dtree_pred[, 1], "Charged Off", "Fully Paid"))
dtree_cm <- confusionMatrix(prediction.tree1, test.a$loan_status)
dtree_sens <- sensitivity(prediction.tree1, as.factor(test.a$loan_status))
dtree_spec <- specificity(prediction.tree1, as.factor(test.a$loan_status))
dtree_labels <- ifelse(test.a$loan_status == "Charged Off", 1, 0)
dtree_predictions <- ifelse(prediction.tree1 == "Charged Off", 1, 0)
dtree_roc <- roc(dtree_labels, dtree_predictions)
dtree_auc <- auc(dtree_roc)
##################################################################################################
#Random Forest with a custom model to test different combinations of mtry and ntree values
##################################################################################################
#create custom model for multiple mtry and ntree values
customRF <- list(type = "Classification", library = "randomForest", loop = NULL)
customRF$parameters <- data.frame(parameter = c("mtry", "ntree"), class = rep("numeric", 2), label = c("mtry", "ntree"))
customRF$grid <- function(x, y, len = NULL, search = "grid") {}
customRF$fit <- function(x, y, wts, param, lev, last, weights, classProbs, ...) {
randomForest(x, y, mtry = param$mtry, ntree=param$ntree, ...)
}
customRF$predict <- function(modelFit, newdata, preProc = NULL, submodels = NULL)
predict(modelFit, newdata)
customRF$prob <- function(modelFit, newdata, preProc = NULL, submodels = NULL)
predict(modelFit, newdata, type = "prob")
customRF$sort <- function(x) x[order(x[,1]),]
customRF$levels <- function(x) x$classes
#train the random forest model
fitControl <- trainControl(method="cv",
number=3,
allowParallel = TRUE)
tunegrid <- expand.grid(.mtry=c(9:18), .ntree=c(250, 325, 400))
system.time(
rf_model <- train(as.factor(loan_status) ~ .,
method = customRF,
data = train2,
trControl = fitControl,
tuneGrid = tunegrid,
metric = 'Kappa')
)
print(rf_model) #print the trained model summary
train_plot <- plot(rf_model) #plot the Kappa scores for the different ntree and mtry combinations
print(train_plot)
rf_pred <- predict(rf_model, test) #apply trained random forest model to test data
rf_cm <- confusionMatrix(rf_pred, test.a$loan_status)
rf_sens <- sensitivity(rf_pred, as.factor(test.a$loan_status))
rf_spec <- specificity(rf_pred, as.factor(test.a$loan_status))
rf_labels <- ifelse(test.a$loan_status == "Charged Off", 1, 0)
rf_predictions <- ifelse(rf_pred == "Charged Off", 1, 0)
rf_roc <- roc(rf_labels, rf_predictions)
rf_auc <- auc(rf_roc)
#plot the top 20 most importance variables in the RF model
var_imp_plot <- plot(varImp(rf_model, scale = FALSE), top = 20)
print(var_imp_plot)
##################################################################################################
#Gradient Boosting Model
##################################################################################################
#create sparse matrix for numeric predictors
#################################################
M.a <- sparse.model.matrix(~ dti +
annual_inc +
delinq_2yrs +
inq_last_6mths +
open_acc +
pub_rec +
revol_bal +
revol_util +
total_acc +
out_prncp +
out_prncp_inv +
total_pymnt +
total_pymnt_inv +
total_rec_prncp +
total_rec_int +
total_rec_late_fee +
recoveries +
collection_recovery_fee +
last_pymnt_amnt +
collections_12_mths_ex_med -1, data = rbind(train_imp, test_imp))
#create sparse matrix for categorical predictors
#################################################
cats <- loans[loan_status %in% c("Charged Off", "Fully Paid"), c("purpose",
"home_ownership",
"grade",
"emp_length",
"term",
"addr_state",
"verification_status",
"application_type")]
#reorder to stack train data on top of test data
cats <- rbind(cats[train_index],
cats[!train_index])
cats$account <- c(1:nrow(cats))
#identify unique categorical feature values for each account (record)
d1 <- cats[,list(account, purpose)]
d2 <- cats[,list(account, home_ownership)]
d3 <- cats[, list(account, grade)]
d4 <- cats[, list(account, emp_length)]
d5 <- cats[, list(account, term)]
d6 <- cats[, list(account, addr_state)]
d7 <- cats[, list(account, verification_status)]
d8 <- cats[, list(account, application_type)]
d1[ ,purpose:= paste0("purpose: ", purpose)]
d2[ ,home_ownership:= paste0("home_ownership: ", home_ownership)]
d3[ , grade:= paste0("grade: ", grade)]
d4[ , emp_length:= paste0("emp_length: ", emp_length)]
d5[ , term:= paste0("term: ", term)]
d6[ , addr_state:= paste0("addr_state: ", addr_state)]
d7[ , verification_status:= paste0("verification_status: ", verification_status)]
d8[ , application_type:= paste0("application_type: ", application_type)]
names(d1) <- names(d2) <- names(d3) <- names(d4) <- names(d5) <- names(d6) <- names(d7) <- names(d8) <- c("account","feature_name")
d <- rbind(d1, d2, d3, d4, d5, d6, d7, d8)
rm(d1, d2, d3, d4, d5, d6, d7, d8); gc()
d <- unique(d)
setkey(d, account)
#creates a list of unique accounts (records)
ii <- as.character(unique(d$account))
#creates a list of all unique feature_names
jj <- unique(d$feature_name)
#creates a list the length of dd that gives each account a unique identifier from 1: the number of unique accounts
id_i <- match(d$account,ii)
#same thing for feature_name
id_j <- match(d$feature_name,jj)
id_ij <- cbind(id_i,id_j)
#creates a matrix frame that has the feature_names as column names and accounts as row names, and every point is blank
M.b <- Matrix(0,nrow=length(ii),ncol=length(jj),
dimnames=list(ii,jj),sparse=T)
#if the account and feature_name are found together in the id_i data frame, then mark it as a 1 in the M.b matrix
M.b[id_ij] <- 1
rm(ii,jj,id_i,id_j,id_ij);gc()
#combine the numeric and categorical matrices
M <- cbind(M.a, M.b)
#create xgb matrices for the xgboost model
#################################################
train_data <- M[1:nrow(train.a), ]
trwr <- sample(1:nrow(train_data), round(.85*nrow(train_data), 0), replace = FALSE)
trw_data <- train_data[trwr, ]
tew_data <- train_data[-trwr, ]
test_data <- M[(nrow(train.a)+1):nrow(M), ]
trw_label <- ifelse(train.a[trwr, ]$loan_status == "Charged Off", 1, 0)
tew_label <- ifelse(train.a[-trwr, ]$loan_status == "Charged Off", 1, 0)
test_label <- ifelse(test.a$loan_status == "Charged Off", 1, 0)
dtrain_tr <- xgb.DMatrix(data = trw_data, label = trw_label)
dtrain_te <- xgb.DMatrix(data = tew_data, label = tew_label)
dtest <- xgb.DMatrix(data = test_data, label = test_label)
#train xgboost tree model
##############################
watchlist <- list(train = dtrain_tr, test = dtrain_te)
bst <- xgb.train(data = dtrain_tr,
watchlist = watchlist,
eta = .1,
nround = 200,
objective = "binary:logistic",
eval_metric = "auc")
#perform prediction
gb_pred <- as.factor(ifelse(predict(bst, test_data) >= .5, "Charged Off", "Fully Paid")) #apply trained xgboost model to test data
gb_cm <- confusionMatrix(gb_pred, test.a$loan_status)
gb_sens <- sensitivity(gb_pred, as.factor(test.a$loan_status))
gb_spec <- specificity(gb_pred, as.factor(test.a$loan_status))
gb_labels <- ifelse(test.a$loan_status == "Charged Off", 1, 0)
gb_predictions <- ifelse(gb_pred == "Charged Off", 1, 0)
gb_roc <- roc(gb_labels, gb_predictions)
gb_auc <- auc(gb_roc)
##################################################################################################
#Compare the ability of the five different models to predict the outcomes of the test set
##################################################################################################
results <- data.frame(cbind(rbind(knn_auc,
logit_auc,
dtree_auc,
rf_auc,
gb_auc),
rbind(knn_sens,
logit_sens,
dtree_sens,
rf_sens,
gb_sens),
rbind(knn_spec,
logit_spec,
dtree_spec,
rf_spec,
gb_spec)))
rownames(results) <- c("K-Nearest Neighbors (k=9)",
"Logistic Regression w/ PCA",
"Decision Tree",
"Random Forest",
"Gradient Boosting")
colnames(results) <- c("AUC",
"Sensitivity",
"Specificity")
results <- results[order(-results$AUC), ]
results_viz <- melt(results)
results_viz$model <- rep(row.names(results), 3)
viz_comparison <- ggplot(results_viz, aes(x = model, y = value, fill = model)) +
geom_bar(stat = "identity") +
facet_grid(variable ~ .) +
coord_flip() +
scale_x_discrete(limits = rev(row.names(results))) +
theme(axis.title = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank()) +
labs(title = "Predictive Model Comparisons")
print(viz_comparison)
print(round(results, 3))
|
/lending_club_predictive_modeling.R
|
no_license
|
bshelton141/LACare-DataSciencePlatform-RFP-Demo2
|
R
| false | false | 18,563 |
r
|
#load required packages
packages <- c("downloader",
"data.table",
"dummies",
"caret",
"FNN",
"pROC",
"rpart",
"rpart.plot",
"Matrix",
"xgboost",
"reshape2",
"ggplot2")
new_packages <- packages[!(packages %in% installed.packages()[,"Package"])]
if(length(new_packages)) install.packages(new_packages)
library(downloader)
library(data.table)
library(dummies)
library(caret)
library(FNN)
library(pROC)
library(rpart)
library(rpart.plot)
library(Matrix)
library(xgboost)
library(reshape2)
library(ggplot2)
#set working directory
wd <- "/Users/user1/Documents" #specify directory from which to download and load the data
setwd(wd)
set.seed(32541)
#Download the loan.csv file out of the .zip archive found at the following path to access the "loan.csv" dataset:
#https://www.kaggle.com/wendykan/lending-club-loan-data/downloads/lending-club-loan-data.zip
loans <- fread("loan.csv")
#select the numeric and categorical variables to include in the machine learning models
loans1 <- loans[loan_status %in% c("Charged Off", "Fully Paid"), c("dti",
"annual_inc",
"delinq_2yrs",
"inq_last_6mths",
"open_acc",
"pub_rec",
"revol_bal",
"revol_util",
"total_acc",
"out_prncp",
"out_prncp_inv",
"total_pymnt",
"total_pymnt_inv",
"total_rec_prncp",
"total_rec_int",
"total_rec_late_fee",
"recoveries",
"collection_recovery_fee",
"last_pymnt_amnt",
"collections_12_mths_ex_med",
"loan_status",
"purpose",
"home_ownership",
"grade",
"emp_length",
"term",
"addr_state",
"verification_status",
"application_type")]
#one hot encode the categorical variables
loans2 <- data.table(dummy.data.frame(loans1,
names = c("purpose",
"home_ownership",
"grade",
"emp_length",
"term",
"addr_state",
"verification_status",
"application_type"),
sep = "_"))
#split the data into train and test sets
train_index <- createDataPartition(y = loans2$loan_status, p = 0.8, list = FALSE)
train.a <- loans2[train_index]
test.a <- loans2[!train_index]
#knnImputation of missing values
pre_obj <- preProcess(train.a[, -c("loan_status")],
method = "knnImpute")
system.time(
train_imp <- predict(pre_obj,
train.a[, -c("loan_status")])
)
system.time(
test_imp <- predict(pre_obj,
test.a[, -c("loan_status")])
)
train <- data.frame(train_imp)
test <- data.frame(test_imp)
#remove all zero-variance and near-zero-variane variables from data
nzvar <- subset(nearZeroVar(train, saveMetrics = TRUE), nzv ==TRUE & percentUnique < .5)
`%ni%` <- Negate(`%in%`)
train <- train[, names(train) %ni% rownames(nzvar)]
test <- test[, names(test) %ni% rownames(nzvar)]
##################################################################################################
#K-Nearest Neighbors Model
##################################################################################################
#train knn model
k <- 9
system.time(
knn_model <- knn(train = train,
test = test,
cl = train.a$loan_status,
k = k)
)
closest <- train[c(attr(knn_model, "nn.index")[1,]), ]
closest$type <- "train"
test1 <- test
test1$type <- "test"
knn_pred <- knn_model[1:nrow(test.a)] #apply trained knn model to test data
knn_cm <- confusionMatrix(knn_pred, test.a$loan_status)
knn_sens <- sensitivity(knn_pred, as.factor(test.a$loan_status))
knn_spec <- specificity(knn_pred, as.factor(test.a$loan_status))
knn_labels <- test.a$loan_status
knn_predictions <- ifelse(knn_pred[1:nrow(test)] == "Charged Off", 1, 0)
knn_labels <- ifelse(test.a$loan_status == "Charged Off", 1, 0)
knn_roc <- roc(knn_labels, knn_predictions)
knn_auc <- auc(knn_roc)
##################################################################################################
#Logistic Regression Model using Principle Component Analysis
##################################################################################################
#transform predictor variables to principle components
pre_pca <- preProcess(train, method = "pca")
train_pca <- predict(pre_pca, train)
test_pca <- predict(pre_pca, test)
train_pca2 <- cbind(train_pca, ifelse(train.a$loan_status == "Charged Off", 1, 0))
colnames(train_pca2) <- c(colnames(train_pca), "loan_status")
#train PCA logistic regression model
logit_model <- glm(formula = loan_status ~ ., family = binomial(link = "logit"), data = train_pca2)
logit_pred <- predict(logit_model, newdata = test_pca, type = "response") #apply trained PCA logistic regression model to test data
prediction_logit <- as.factor(ifelse(logit_pred >= .5, "Charged Off", "Fully Paid"))
logit_cm <- confusionMatrix(prediction_logit, test.a$loan_status)
logit_sens <- sensitivity(prediction_logit, as.factor(test.a$loan_status))
logit_spec <- specificity(prediction_logit, as.factor(test.a$loan_status))
logit_labels <- ifelse(test.a$loan_status == "Charged Off", 1, 0)
logit_predictions <- ifelse(prediction_logit == "Charged Off", 1, 0)
logit_roc <- roc(logit_labels, logit_predictions)
logit_auc <- auc(logit_roc)
##################################################################################################
#Decision Tree Model
##################################################################################################
train2 <- cbind(train, train.a$loan_status)
colnames(train2) <- c(colnames(train), "loan_status")
#train decision tree model
control_param <- rpart.control(cp = .005)
dtree_model <- rpart(loan_status ~ ., data = train2, control = control_param)
rpart.plot(dtree_model) #visualizes the decision tree
dtree_pred <- data.frame(predict(dtree_model, newdata = test)) #apply trained decision tree model to test data
dtree_pred$fin <- as.vector(pmax(dtree_pred[, 1], dtree_pred[ ,2]))
prediction.tree1 <- as.factor(ifelse(dtree_pred$fin == dtree_pred[, 1], "Charged Off", "Fully Paid"))
dtree_cm <- confusionMatrix(prediction.tree1, test.a$loan_status)
dtree_sens <- sensitivity(prediction.tree1, as.factor(test.a$loan_status))
dtree_spec <- specificity(prediction.tree1, as.factor(test.a$loan_status))
dtree_labels <- ifelse(test.a$loan_status == "Charged Off", 1, 0)
dtree_predictions <- ifelse(prediction.tree1 == "Charged Off", 1, 0)
dtree_roc <- roc(dtree_labels, dtree_predictions)
dtree_auc <- auc(dtree_roc)
##################################################################################################
#Random Forest with a custom model to test different combinations of mtry and ntree values
##################################################################################################
#create custom model for multiple mtry and ntree values
customRF <- list(type = "Classification", library = "randomForest", loop = NULL)
customRF$parameters <- data.frame(parameter = c("mtry", "ntree"), class = rep("numeric", 2), label = c("mtry", "ntree"))
customRF$grid <- function(x, y, len = NULL, search = "grid") {}
customRF$fit <- function(x, y, wts, param, lev, last, weights, classProbs, ...) {
randomForest(x, y, mtry = param$mtry, ntree=param$ntree, ...)
}
customRF$predict <- function(modelFit, newdata, preProc = NULL, submodels = NULL)
predict(modelFit, newdata)
customRF$prob <- function(modelFit, newdata, preProc = NULL, submodels = NULL)
predict(modelFit, newdata, type = "prob")
customRF$sort <- function(x) x[order(x[,1]),]
customRF$levels <- function(x) x$classes
#train the random forest model
fitControl <- trainControl(method="cv",
number=3,
allowParallel = TRUE)
tunegrid <- expand.grid(.mtry=c(9:18), .ntree=c(250, 325, 400))
system.time(
rf_model <- train(as.factor(loan_status) ~ .,
method = customRF,
data = train2,
trControl = fitControl,
tuneGrid = tunegrid,
metric = 'Kappa')
)
print(rf_model) #print the trained model summary
train_plot <- plot(rf_model) #plot the Kappa scores for the different ntree and mtry combinations
print(train_plot)
rf_pred <- predict(rf_model, test) #apply trained random forest model to test data
rf_cm <- confusionMatrix(rf_pred, test.a$loan_status)
rf_sens <- sensitivity(rf_pred, as.factor(test.a$loan_status))
rf_spec <- specificity(rf_pred, as.factor(test.a$loan_status))
rf_labels <- ifelse(test.a$loan_status == "Charged Off", 1, 0)
rf_predictions <- ifelse(rf_pred == "Charged Off", 1, 0)
rf_roc <- roc(rf_labels, rf_predictions)
rf_auc <- auc(rf_roc)
#plot the top 20 most importance variables in the RF model
var_imp_plot <- plot(varImp(rf_model, scale = FALSE), top = 20)
print(var_imp_plot)
##################################################################################################
#Gradient Boosting Model
##################################################################################################
#create sparse matrix for numeric predictors
#################################################
M.a <- sparse.model.matrix(~ dti +
annual_inc +
delinq_2yrs +
inq_last_6mths +
open_acc +
pub_rec +
revol_bal +
revol_util +
total_acc +
out_prncp +
out_prncp_inv +
total_pymnt +
total_pymnt_inv +
total_rec_prncp +
total_rec_int +
total_rec_late_fee +
recoveries +
collection_recovery_fee +
last_pymnt_amnt +
collections_12_mths_ex_med -1, data = rbind(train_imp, test_imp))
#create sparse matrix for categorical predictors
#################################################
cats <- loans[loan_status %in% c("Charged Off", "Fully Paid"), c("purpose",
"home_ownership",
"grade",
"emp_length",
"term",
"addr_state",
"verification_status",
"application_type")]
#reorder to stack train data on top of test data
cats <- rbind(cats[train_index],
cats[!train_index])
cats$account <- c(1:nrow(cats))
#identify unique categorical feature values for each account (record)
d1 <- cats[,list(account, purpose)]
d2 <- cats[,list(account, home_ownership)]
d3 <- cats[, list(account, grade)]
d4 <- cats[, list(account, emp_length)]
d5 <- cats[, list(account, term)]
d6 <- cats[, list(account, addr_state)]
d7 <- cats[, list(account, verification_status)]
d8 <- cats[, list(account, application_type)]
d1[ ,purpose:= paste0("purpose: ", purpose)]
d2[ ,home_ownership:= paste0("home_ownership: ", home_ownership)]
d3[ , grade:= paste0("grade: ", grade)]
d4[ , emp_length:= paste0("emp_length: ", emp_length)]
d5[ , term:= paste0("term: ", term)]
d6[ , addr_state:= paste0("addr_state: ", addr_state)]
d7[ , verification_status:= paste0("verification_status: ", verification_status)]
d8[ , application_type:= paste0("application_type: ", application_type)]
names(d1) <- names(d2) <- names(d3) <- names(d4) <- names(d5) <- names(d6) <- names(d7) <- names(d8) <- c("account","feature_name")
d <- rbind(d1, d2, d3, d4, d5, d6, d7, d8)
rm(d1, d2, d3, d4, d5, d6, d7, d8); gc()
d <- unique(d)
setkey(d, account)
#creates a list of unique accounts (records)
ii <- as.character(unique(d$account))
#creates a list of all unique feature_names
jj <- unique(d$feature_name)
#creates a list the length of dd that gives each account a unique identifier from 1: the number of unique accounts
id_i <- match(d$account,ii)
#same thing for feature_name
id_j <- match(d$feature_name,jj)
id_ij <- cbind(id_i,id_j)
#creates a matrix frame that has the feature_names as column names and accounts as row names, and every point is blank
M.b <- Matrix(0,nrow=length(ii),ncol=length(jj),
dimnames=list(ii,jj),sparse=T)
#if the account and feature_name are found together in the id_i data frame, then mark it as a 1 in the M.b matrix
M.b[id_ij] <- 1
rm(ii,jj,id_i,id_j,id_ij);gc()
#combine the numeric and categorical matrices
M <- cbind(M.a, M.b)
#create xgb matrices for the xgboost model
#################################################
train_data <- M[1:nrow(train.a), ]
trwr <- sample(1:nrow(train_data), round(.85*nrow(train_data), 0), replace = FALSE)
trw_data <- train_data[trwr, ]
tew_data <- train_data[-trwr, ]
test_data <- M[(nrow(train.a)+1):nrow(M), ]
trw_label <- ifelse(train.a[trwr, ]$loan_status == "Charged Off", 1, 0)
tew_label <- ifelse(train.a[-trwr, ]$loan_status == "Charged Off", 1, 0)
test_label <- ifelse(test.a$loan_status == "Charged Off", 1, 0)
dtrain_tr <- xgb.DMatrix(data = trw_data, label = trw_label)
dtrain_te <- xgb.DMatrix(data = tew_data, label = tew_label)
dtest <- xgb.DMatrix(data = test_data, label = test_label)
#train xgboost tree model
##############################
watchlist <- list(train = dtrain_tr, test = dtrain_te)
bst <- xgb.train(data = dtrain_tr,
watchlist = watchlist,
eta = .1,
nround = 200,
objective = "binary:logistic",
eval_metric = "auc")
#perform prediction
gb_pred <- as.factor(ifelse(predict(bst, test_data) >= .5, "Charged Off", "Fully Paid")) #apply trained xgboost model to test data
gb_cm <- confusionMatrix(gb_pred, test.a$loan_status)
gb_sens <- sensitivity(gb_pred, as.factor(test.a$loan_status))
gb_spec <- specificity(gb_pred, as.factor(test.a$loan_status))
gb_labels <- ifelse(test.a$loan_status == "Charged Off", 1, 0)
gb_predictions <- ifelse(gb_pred == "Charged Off", 1, 0)
gb_roc <- roc(gb_labels, gb_predictions)
gb_auc <- auc(gb_roc)
##################################################################################################
#Compare the ability of the five different models to predict the outcomes of the test set
##################################################################################################
results <- data.frame(cbind(rbind(knn_auc,
logit_auc,
dtree_auc,
rf_auc,
gb_auc),
rbind(knn_sens,
logit_sens,
dtree_sens,
rf_sens,
gb_sens),
rbind(knn_spec,
logit_spec,
dtree_spec,
rf_spec,
gb_spec)))
rownames(results) <- c("K-Nearest Neighbors (k=9)",
"Logistic Regression w/ PCA",
"Decision Tree",
"Random Forest",
"Gradient Boosting")
colnames(results) <- c("AUC",
"Sensitivity",
"Specificity")
results <- results[order(-results$AUC), ]
results_viz <- melt(results)
results_viz$model <- rep(row.names(results), 3)
viz_comparison <- ggplot(results_viz, aes(x = model, y = value, fill = model)) +
geom_bar(stat = "identity") +
facet_grid(variable ~ .) +
coord_flip() +
scale_x_discrete(limits = rev(row.names(results))) +
theme(axis.title = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank()) +
labs(title = "Predictive Model Comparisons")
print(viz_comparison)
print(round(results, 3))
|
library(diverse)
### Name: geese
### Title: Geese dataset
### Aliases: geese
### Keywords: dataset
### ** Examples
str(geese)
summary(geese)
geese[,"2000"]
geese["Mute Swan",]
|
/data/genthat_extracted_code/diverse/examples/geese.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 183 |
r
|
library(diverse)
### Name: geese
### Title: Geese dataset
### Aliases: geese
### Keywords: dataset
### ** Examples
str(geese)
summary(geese)
geese[,"2000"]
geese["Mute Swan",]
|
sol_soupx_muscle <- subset(sol_soupx, idents = c("Type I Myonuclei", "Type IIx Myonuclei", "Type IIa Myonuclei", "Myotendinous Junction", "Satellite Cells"))
fivemonth_soupx_muscle <- subset(fivemonth_soupx, idents = c("Neuromuscular Junction", "Type IIx Myonuclei", "Type IIx Myonuclei #2", "Type IIb Myonuclei #2", "Type IIb Myonuclei", "Myotendinous Junction", "Satellite Cells"))
fivemonth_subset_counts<- GetAssayData(object = fivemonth_soupx_muscle, slot = "counts")
soleus_subset_counts <- GetAssayData(object = sol_soupx_muscle, slot = "counts")
fivemonth_muscle_integration <- CreateSeuratObject(counts = fivemonth_subset_counts, project = "5 Month", min.cells = 3, min.features = 200)
fivemonth_muscle_integration <- subset(fivemonth_muscle_integration, subset = nFeature_RNA > 200 & nFeature_RNA < 3200)
fivemonth_muscle_integration$sol_muscle_integration <- "TA"
fivemonth_muscle_integration <- NormalizeData(fivemonth_muscle_integration, verbose = FALSE)
fivemonth_muscle_integration <- FindVariableFeatures(fivemonth_muscle_integration, selection.method = "vst", nfeatures = 2000)
sol_muscle_integration <- CreateSeuratObject(counts = soleus_subset_counts, project = "24 Month", min.cells = 3, min.features = 200)
sol_muscle_integration<- subset(sol_muscle_integration, subset = nFeature_RNA > 200 & nFeature_RNA < 3200)
sol_muscle_integration$sol_muscle_integration <- "Soleus"
sol_muscle_integration <- NormalizeData(sol_muscle_integration, verbose = FALSE)
sol_muscle_integration <- FindVariableFeatures(sol_muscle_integration, selection.method = "vst", nfeatures = 2000)
#Performing integration
aged.anchors.muscle <- FindIntegrationAnchors(object.list = list(fivemonth_muscle_integration, sol_muscle_integration), dims = 1:20)
sol_ta_muscle_integration <- IntegrateData(anchorset = aged.anchors.muscle, dims = 1:20)
#Cluster, integrated assay should get rid of batch
DefaultAssay(sol_ta_muscle_integration) <- "integrated"
sol_ta_muscle_integration <- ScaleData(sol_ta_muscle_integration, verbose = FALSE)
sol_ta_muscle_integration <- RunPCA(sol_ta_muscle_integration, npcs = 30, verbose = FALSE)
sol_ta_muscle_integration <- RunUMAP(sol_ta_muscle_integration, reduction = "pca", dims = 1:30)
sol_ta_muscle_integration <- FindNeighbors(sol_ta_muscle_integration, reduction = "pca", dims = 1:30)
sol_ta_muscle_integration <- FindClusters(sol_ta_muscle_integration, resolution = 0.5)
sol_ta_muscle_integration@reductions[["umap"]] <- sol_ta_reductions
DimPlot(sol_ta_muscle_integration, reduction = "umap", group.by = "sol_muscle_integration")
DimPlot(sol_ta_muscle_integration, reduction = "umap", label = TRUE)
DefaultAssay(sol_ta_muscle_integration) <- "RNA"
#Make Heatmap
sol_ta_muscle_integration <- subset(sol_ta_muscle_integration, idents = c("8", "9"), invert = TRUE)
sol_ta_muscle_integration <- RenameIdents(sol_ta_muscle_integration, "0" = "Type IIb Myonuclei", "1" = "Type IIx Myonuclei", "2" = "Type IIb Myonuclei", "3" = "Type I Myonuclei", "4" = "Musculotendinous Junction", "5" = "Type IIa Myonuclei", "6" = "Satellite Cells", "7" = "Neuromuscular Junction")
sol_ta_muscle_integration_markers <- FindAllMarkers(sol_ta_muscle_integration, only.pos = TRUE)
top5 <- sol_ta_muscle_integration_markers %>% group_by(cluster) %>% top_n(n = 5, wt = avg_logFC)
sol_ta_muscle_integration <- ScaleData(sol_ta_muscle_integration)
DoHeatmap(sol_ta_muscle_integration, features = top5$gene) + NoLegend()
gene <- c("Myh4", "Mybpc2", "Actn3", "Sox6", "Pde4d", "Myh1", "Vegfa", "Sorbs1", "Actn2", "Tead1", "Myh7", "Tnnc1", "Tpm3", "Tnnt1", "Myl2", "Myh2", "Csrp3", "Ankrd2", "Fam129a", "Myoz2", "Lama2", "Col22a1", "Ankrd1", "Slc24a2", "Adamts20", "Pax7", "Chodl", "Notch3", "Fgfr4", "Vcam1", "Chrne", "Vav3", "Musk", "Ufsp1", "Colq")
|
/scripts/integrations/Soleus muscle integration.R
|
no_license
|
MillayLab/single-myonucleus
|
R
| false | false | 3,794 |
r
|
sol_soupx_muscle <- subset(sol_soupx, idents = c("Type I Myonuclei", "Type IIx Myonuclei", "Type IIa Myonuclei", "Myotendinous Junction", "Satellite Cells"))
fivemonth_soupx_muscle <- subset(fivemonth_soupx, idents = c("Neuromuscular Junction", "Type IIx Myonuclei", "Type IIx Myonuclei #2", "Type IIb Myonuclei #2", "Type IIb Myonuclei", "Myotendinous Junction", "Satellite Cells"))
fivemonth_subset_counts<- GetAssayData(object = fivemonth_soupx_muscle, slot = "counts")
soleus_subset_counts <- GetAssayData(object = sol_soupx_muscle, slot = "counts")
fivemonth_muscle_integration <- CreateSeuratObject(counts = fivemonth_subset_counts, project = "5 Month", min.cells = 3, min.features = 200)
fivemonth_muscle_integration <- subset(fivemonth_muscle_integration, subset = nFeature_RNA > 200 & nFeature_RNA < 3200)
fivemonth_muscle_integration$sol_muscle_integration <- "TA"
fivemonth_muscle_integration <- NormalizeData(fivemonth_muscle_integration, verbose = FALSE)
fivemonth_muscle_integration <- FindVariableFeatures(fivemonth_muscle_integration, selection.method = "vst", nfeatures = 2000)
sol_muscle_integration <- CreateSeuratObject(counts = soleus_subset_counts, project = "24 Month", min.cells = 3, min.features = 200)
sol_muscle_integration<- subset(sol_muscle_integration, subset = nFeature_RNA > 200 & nFeature_RNA < 3200)
sol_muscle_integration$sol_muscle_integration <- "Soleus"
sol_muscle_integration <- NormalizeData(sol_muscle_integration, verbose = FALSE)
sol_muscle_integration <- FindVariableFeatures(sol_muscle_integration, selection.method = "vst", nfeatures = 2000)
#Performing integration
aged.anchors.muscle <- FindIntegrationAnchors(object.list = list(fivemonth_muscle_integration, sol_muscle_integration), dims = 1:20)
sol_ta_muscle_integration <- IntegrateData(anchorset = aged.anchors.muscle, dims = 1:20)
#Cluster, integrated assay should get rid of batch
DefaultAssay(sol_ta_muscle_integration) <- "integrated"
sol_ta_muscle_integration <- ScaleData(sol_ta_muscle_integration, verbose = FALSE)
sol_ta_muscle_integration <- RunPCA(sol_ta_muscle_integration, npcs = 30, verbose = FALSE)
sol_ta_muscle_integration <- RunUMAP(sol_ta_muscle_integration, reduction = "pca", dims = 1:30)
sol_ta_muscle_integration <- FindNeighbors(sol_ta_muscle_integration, reduction = "pca", dims = 1:30)
sol_ta_muscle_integration <- FindClusters(sol_ta_muscle_integration, resolution = 0.5)
sol_ta_muscle_integration@reductions[["umap"]] <- sol_ta_reductions
DimPlot(sol_ta_muscle_integration, reduction = "umap", group.by = "sol_muscle_integration")
DimPlot(sol_ta_muscle_integration, reduction = "umap", label = TRUE)
DefaultAssay(sol_ta_muscle_integration) <- "RNA"
#Make Heatmap
sol_ta_muscle_integration <- subset(sol_ta_muscle_integration, idents = c("8", "9"), invert = TRUE)
sol_ta_muscle_integration <- RenameIdents(sol_ta_muscle_integration, "0" = "Type IIb Myonuclei", "1" = "Type IIx Myonuclei", "2" = "Type IIb Myonuclei", "3" = "Type I Myonuclei", "4" = "Musculotendinous Junction", "5" = "Type IIa Myonuclei", "6" = "Satellite Cells", "7" = "Neuromuscular Junction")
sol_ta_muscle_integration_markers <- FindAllMarkers(sol_ta_muscle_integration, only.pos = TRUE)
top5 <- sol_ta_muscle_integration_markers %>% group_by(cluster) %>% top_n(n = 5, wt = avg_logFC)
sol_ta_muscle_integration <- ScaleData(sol_ta_muscle_integration)
DoHeatmap(sol_ta_muscle_integration, features = top5$gene) + NoLegend()
gene <- c("Myh4", "Mybpc2", "Actn3", "Sox6", "Pde4d", "Myh1", "Vegfa", "Sorbs1", "Actn2", "Tead1", "Myh7", "Tnnc1", "Tpm3", "Tnnt1", "Myl2", "Myh2", "Csrp3", "Ankrd2", "Fam129a", "Myoz2", "Lama2", "Col22a1", "Ankrd1", "Slc24a2", "Adamts20", "Pax7", "Chodl", "Notch3", "Fgfr4", "Vcam1", "Chrne", "Vav3", "Musk", "Ufsp1", "Colq")
|
# Game Logs Hitting
# Game logs script
#devtools::install_github("BillPetti/baseballr",force = T)
#install.packages('webshot')
#Load packages
library(baseballr)
library(webshot)
webshot::install_phantomjs()
library(tidyverse)
library(dplyr)
library(knitr)
#install.packages('kableExtra')
library(kableExtra)
library(gt)
library(lubridate)
#install.packages("downloadthis")
library(downloadthis)
#install.packages("googlesheets4")
library('googlesheets4')
#Read in client list
ncaa_clients <- read_csv("Data/ClientsFinalx.csv")
#pitchers <- ncaa_clients %>% filter(position == "P" | position == "UT")
#write.csv(pitchers,"pitchers.csv")
#hitters <- ncaa_clients %>% filter(position != "P")
# load in function
Errors <- read_sheet("https://docs.google.com/spreadsheets/d/1xRLnN7LaGOrrnmmRmTifnY_EwOaeFZES6-9y9_NZT2U/edit#gid=962710186")
hitters <- read.csv("hitters.csv") %>% rename(`3B` = X3B,`2B` = X2B)
get_ncaa_game_logs_v2 <- function(player_id,
year = 2019,
type = "batting",
span = 'game') {
year_id <- subset(ncaa_season_id_lu, season == year, select = id)
batting_id <- subset(ncaa_season_id_lu, season == year, select = batting_id)
pitching_id <- subset(ncaa_season_id_lu, season == year, select = pitching_id)
if (type == "batting") {
batting_url <- paste0("https://stats.ncaa.org/player/index?id=", year_id,"&stats_player_seq=", player_id,"&year_stat_category_id=", batting_id)
batting_payload <- xml2::read_html(batting_url)
} else {
pitching_url <- paste0("https://stats.ncaa.org/player/index?id=", year_id,"&stats_player_seq=", player_id,"&year_stat_category_id=", pitching_id)
pitching_payload <- xml2::read_html(pitching_url)
}
if (span == 'game') {
if (type == "batting") {
payload_df <- batting_payload %>%
rvest::html_nodes("table") %>%
.[5] %>%
rvest::html_table(fill = TRUE) %>%
as.data.frame() %>%
.[,c(1:23)]
names(payload_df) <- payload_df[2,]
payload_df <- payload_df[-c(1:3),]
payload_df <- payload_df %>%
mutate_at(vars(G:RBI2out), extract_numeric)
if('OPP DP' %in% colnames(payload_df) == TRUE) {
payload_df <- payload_df %>%
dplyr::rename(DP = `OPP DP`)
}
cols_to_num <- c("G","R", "AB", "H", "2B", "3B", "TB", "HR", "RBI",
"BB", "HBP", "SF", "SH", "K", "DP", "CS", "Picked",
"SB", "IBB", "RBI2out")
payload_df <- payload_df %>%
dplyr::mutate_at(cols_to_num, as.numeric)
} else {
payload_df <- pitching_payload %>%
rvest::html_nodes("table") %>%
.[5] %>%
rvest::html_table(fill = TRUE) %>%
as.data.frame() %>%
.[,c(1:35)]
names(payload_df) <- payload_df[2,]
payload_df <- payload_df[-c(1:3),]
if('OPP DP' %in% colnames(payload_df) == TRUE) {
payload_df <- payload_df %>%
dplyr::rename(DP = `OPP DP`)
}
cols_to_num <- c("G", "App", "GS", "IP", "CG", "H", "R", "ER", "BB", "SO", "SHO", "BF", "P-OAB", "2B-A", "3B-A", "Bk", "HR-A", "WP", "HB", "IBB", "Inh Run", "Inh Run Score", "SHA", "SFA", "Pitches", "GO", "FO", "W", "L", "SV", "OrdAppeared", "KL")
payload_df <- payload_df %>%
dplyr::mutate_at(vars(-c("Date")),
list(~gsub("\\/", "", x = .))) %>%
dplyr::mutate_at(cols_to_num, as.numeric)
}
} else {
if(type == 'batting') {
payload_df <- batting_payload %>%
rvest::html_nodes('table') %>%
.[3] %>%
rvest::html_table(fill = T) %>%
as.data.frame() %>%
.[-1,]
names(payload_df) <- payload_df[1,]
payload_df <- payload_df[-1,]
if('OPP DP' %in% colnames(payload_df) == TRUE) {
payload_df <- payload_df %>%
dplyr::rename(DP = `OPP DP`)
}
payload_df <- payload_df %>%
dplyr::select(Year,Team,GP,G,BA,OBPct,SlgPct,R,AB,H,`2B`,`3B`,TB,HR,RBI,BB,HBP,SF,SH,K,DP,CS,Picked,SB,RBI2out)
payload_df <- payload_df %>%
dplyr::mutate(player_id = player_id) %>%
dplyr::select(Year, player_id, everything())
} else {
payload_df <- pitching_payload %>%
rvest::html_nodes('table') %>%
.[3] %>%
rvest::html_table(fill = T) %>%
as.data.frame() %>%
.[-1,]
names(payload_df) <- payload_df[1,]
payload_df <- payload_df[-1,]
payload_df <- payload_df %>%
dplyr::select(Year,Team,GP,G,App,GS,ERA,IP,CG,H,R,ER,BB,SO,SHO,BF,`P-OAB`,`2B-A`,`3B-A`,Bk,`HR-A`,WP,HB,IBB,`Inh Run`,`Inh Run Score`,SHA,SFA,Pitches,GO,FO,W,L,SV,KL)
payload_df <- payload_df %>%
dplyr::mutate(player_id = player_id) %>%
dplyr::select(Year, player_id, everything())
}
}
return(payload_df)
}
#Corey Collins 2021 2471655 Georgia
Collins_logs <- get_ncaa_game_logs_v2(player_id =2471655,year = 2021,span = "game",type = "batting") %>% mutate(player_id = 2471655,Team = "Georgia", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Collins_career <- get_ncaa_game_logs_v2(player_id =2471655,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Adrian Del Castillo 2021 2122652 Miami (FL)
Adrian_logs <- get_ncaa_game_logs_v2(player_id = 2122652 ,year = 2021,span = "game",type = "batting") %>% mutate(player_id = 2122652 ,Team = "Miami", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Adrian_career <- get_ncaa_game_logs_v2(player_id = 2122652,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Kyle Teel 2021 2486493 Virginia
Teel_logs <- get_ncaa_game_logs_v2(player_id =2486493,year = 2021,span = "game",type = "batting") %>% mutate(player_id = 2486493,Team = "Virginia", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Teel_career <- get_ncaa_game_logs_v2(player_id = 2486493,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
#Kris Armstrong 2122294 Florida
Armstrong_logs <-get_ncaa_game_logs_v2(player_id = 2122294,year = 2021,span = "game",type = "batting") %>% mutate(player_id = 2122294,Team = "Florida", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Armstrong_career <- get_ncaa_game_logs_v2(player_id =2122294,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Garrett Blaylock 1978818 Georgia
Blaylock_logs <- get_ncaa_game_logs_v2(player_id = 1978818,year = 2021,span = "game",type = "batting") %>% mutate(player_id = 1978818,Team = "Georgia", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Blaylock_career <- get_ncaa_game_logs_v2(player_id = 1978818,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
#Zack Gelof 2127049 Virginia
Gelof_logs <- get_ncaa_game_logs_v2(player_id = 2127049,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2127049,Team = "Virginia", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Gelof_career <- get_ncaa_game_logs_v2(player_id = 2127049,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
#Nick Goodwin 2471788 Fr Kansas St.
Goodwin_logs <- get_ncaa_game_logs_v2(player_id = 2471788,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2471788,Team = "Kansas St.", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Goodwin_career <- get_ncaa_game_logs_v2(player_id = 2471788,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Kalae Harrison 2480184 Texas A&M
Harrison_logs <-get_ncaa_game_logs_v2(player_id = 2480184,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2480184,Team = "Texas A&M.", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Harrison_career <- get_ncaa_game_logs_v2(player_id =2480184,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Jace Jung 2346820 Texas Tech
Jung_logs <- get_ncaa_game_logs_v2(player_id = 2346820,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2346820,Team = "Texas Tech", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Jung_career <- get_ncaa_game_logs_v2(player_id =2346820,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Josh Rivera 2021 2305256 Florida
Rivera_logs <-get_ncaa_game_logs_v2(player_id = 2305256,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2305256,Team = "Florida", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Rivera_career <- get_ncaa_game_logs_v2(player_id =2305256,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Alejandro Toral 2021 1982080 Miami (FL)
Toral_logs <-get_ncaa_game_logs_v2(player_id = 1982080,year = 2021,span = "game",type = "batting") %>% mutate(player_id =1982080,Team = "Miami (FL)", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Toral_career <- get_ncaa_game_logs_v2(player_id =1982080,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
#Trevor Werner 2309490 Texas A&M
Werner_logs <-get_ncaa_game_logs_v2(player_id = 2309490,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2309490,Team = "Texas A&M", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Werner_career <- get_ncaa_game_logs_v2(player_id =2309490,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Brock Jones 2311353 Stanford
Jones_logs <-get_ncaa_game_logs_v2(player_id = 2311353,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2311353,Team = "Stanford", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Jones_career <- get_ncaa_game_logs_v2(player_id =2311353,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
#Max Marusak 2133783 Texas Tech
Max_logs <-get_ncaa_game_logs_v2(player_id = 2133783,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2133783,Team = "Texas Tech", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Max_career <- get_ncaa_game_logs_v2(player_id =2133783,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Chris Newell 2306474 Virginia
Newell_logs <-get_ncaa_game_logs_v2(player_id = 2306474 ,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2306474 ,Team = "Virginia", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Newell_career <- get_ncaa_game_logs_v2(player_id = 2306474,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Carson Wells 2494065
#Southern California
Wells_logs <-get_ncaa_game_logs_v2(player_id = 2494065,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2494065,Team = "Southern California", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Wells_career <- get_ncaa_game_logs_v2(player_id = 2494065,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
#Gabriel Hughes 2312826 Gonzaga
#Hughes_logs <-get_ncaa_game_logs_v2(player_id = 2312826,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2312826,Team = "Gonzaga", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
#Hughes_career <- get_ncaa_game_logs_v2(player_id = 2312826,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Luca Tresh 2021 2140156 NC State
Tresh_logs <-get_ncaa_game_logs_v2(player_id = 2140156,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2140156,Team = "NC State", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Tresh_career <- get_ncaa_game_logs_v2(player_id = 2140156,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Sean McLain ASU OF 2022 2349359
SMcLain_logs <- get_ncaa_game_logs_v2(player_id = 2349359,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2349359,Team = "Arizona St.", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
SMcLain_career <- get_ncaa_game_logs_v2(player_id = 2349359,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
#write.csv(SMcLain_logs,"SMClain.csv")
#x <- read.csv("SMClain.csv")
# Peyton Graham Oklahoma 2330757
Graham_logs <- get_ncaa_game_logs_v2(player_id = 2330757,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2330757,Team = "Oklahoma ", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Graham_career <- get_ncaa_game_logs_v2(player_id = 2330757,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Jack Brannigan 2326787 Notre Dame
Brannigan_logs <- get_ncaa_game_logs_v2(player_id = 2326787,year = 2021,span = "game",type = "batting") %>% mutate(player_id = 2326787,Team = "Notre Dame", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Brannigan_career <- get_ncaa_game_logs_v2(player_id = 2326787,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
#2131028
MattM_logs <- get_ncaa_game_logs_v2(player_id = 2131028,year = 2021,span = "game",type = "batting") %>% mutate(player_id = 2131028,Team = "UCLA", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
MattM_careers <- get_ncaa_game_logs_v2(player_id = 2131028,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Dylan Crews
crews_logs <- get_ncaa_game_logs_v2(player_id = 2486588,year = 2021,span = "game",type = "batting") %>% mutate(player_id = 2486588,Team = "LSU", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
crews_career <- get_ncaa_game_logs_v2(player_id = 2486588,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# 2485641 -- Enrique #Bradfield Jr. , Enrique
#Fr
#2485641
Enrique_logs <- get_ncaa_game_logs_v2(player_id = 2485641,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2485641,Team = "Vanderbilt", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Enrique_career <- get_ncaa_game_logs_v2(player_id = 2485641,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Young, Carter
#So
#2309744
Carter_logs <- get_ncaa_game_logs_v2(player_id = 2309744,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2309744,Team = "Vanderbilt", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Carter_career <- get_ncaa_game_logs_v2(player_id = 2309744,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
ytd_clogs <- rbind(Tresh_career,Wells_career,Newell_career,Max_career,Jones_career,Werner_career,Toral_career,Rivera_career,Jung_career,Harrison_career,Goodwin_career,Gelof_career,Blaylock_career,Armstrong_career,Teel_career,Adrian_career,Collins_career,SMcLain_career,Graham_career,Brannigan_career,MattM_careers,crews_career,Carter_career,Enrique_career)
#read.csv("day.csv")
ytd_clogs$SB <- sub("^$", 0, ytd_clogs$SB)
ytd_clogs$H <- sub("^$", 0, ytd_clogs$H)
ytd_clogs$R <- sub("^$", 0, ytd_clogs$R)
ytd_clogs$`2B` <- sub("^$", 0, ytd_clogs$`2B`)
ytd_clogs$`3B` <- sub("^$", 0, ytd_clogs$`3B`)
ytd_clogs$HR <- sub("^$", 0, ytd_clogs$HR)
ytd_clogs$RBI <- sub("^$", 0, ytd_clogs$RBI)
ytd_clogs$BB <- sub("^$", 0, ytd_clogs$BB)
ytd_clogs$K <- sub("^$", 0, ytd_clogs$K)
ytd_clogs$CS <- sub("^$", 0, ytd_clogs$CS)
ytd_clogs$G <- sub("^$", 0, ytd_clogs$G)
ytd_clogs <- ytd_clogs %>% mutate(OPS = as.numeric(OBPct) + as.numeric(SlgPct)) %>% inner_join(ncaa_clients)
ytd_clogs <- ytd_clogs%>% mutate(Player = paste(ytd_clogs$Name,ytd_clogs$Yr))
#### DAIlY
x <- x %>% rename(`2B` = X2B,`3B` = X3B) %>% select(-c(X))
hitters
gameday_logs <- rbind(Tresh_logs,Wells_logs,Newell_logs,Max_logs,Jones_logs,Werner_logs,Toral_logs,Rivera_logs,Jung_logs,Harrison_logs,Goodwin_logs,Gelof_logs,Blaylock_logs,Armstrong_logs,Teel_logs,Adrian_logs,Collins_logs,Brannigan_logs,SMcLain_logs,Graham_logs,MattM_logs,crews_logs,Enrique_logs,Carter_logs) %>% mutate(OPS = '') %>% inner_join(ncaa_clients)
game_day <- gameday_logs %>% mutate(Player = paste(gameday_logs$Name,gameday_logs$Yr))
game_day$Date <- lubridate::mdy(gameday_logs$Date)
hitters$Date <- lubridate::mdy(hitters$Date)
# Weekly
week_hitters <- rbind(game_day,hitters)
week_hitters2 <- week_hitters %>% filter(Date == '2020-02-20' | (Date >= '2021-4-12')) %>% arrange(LastName,Date) %>% filter(G == 1 | G == 'G')
week_hitters3 <- rbind(week_hitters2,ytd_clogs)
(hitters_day3 <- week_hitters %>% filter(Date == today()) %>% filter(target != "Target") %>% mutate(across(c("GP","AB","G"), ~ as.numeric(.x))) %>% replace(is.na(.), 0) %>% select(Name,Result,school,Opponent,Date,G))
##Daily
day_hitters <- week_hitters %>% filter(Date == '2020-02-20'| (Date == today() )) %>% arrange(LastName,Date) %>% filter(G == 1 | G == 'G')
# Filter out guys who dnp
day_hitters2 <- rbind(day_hitters,ytd_clogs)
day_hitters <- day_hitters2 %>% arrange(target,LastName)
#write_csv(DAY,"dayh.csv")
Day2 <- day_hitters2 %>% filter(LastName != "Rivera") %>% filter(LastName!= "Armstrong") %>% filter(LastName!= "Crews")
day_hitters2 <- Day2
DAY <- read_csv("day.csv")
#Change date ----- DAILY!
DAY <- day_hitters2 %>% mutate(Player = paste(day_hitters2$Name,day_hitters2$yr,"(",day_hitters2$school,day_hitters2$position,")")) %>% select (Player,Opponent,Result,AVG = BA,Team = school,Date,target,AB,R,H,`2B`,`3B`,HR,RBI,BB,SO = K,OBP = OBPct,SLG = SlgPct,OPS,SB,CS,position,LastName,school,Name,yr,target) %>% arrange(target,LastName,Date,target) %>% filter(target != "Target") %>% gt(groupname_col = "Player") %>% tab_options(column_labels.background.color = "black") %>% cols_hide(columns = c("Date","LastName","target","Team")) %>% tab_options(table.width = "70%", row_group.background.color = "lightblue") %>% tab_style(style = list(cell_fill(color = "Black")),locations = cells_body(rows = Opponent == "Opponent")) %>% tab_style(style = list(cell_text(color = "white")),locations = cells_body(rows = Opponent == "Opponent")) %>% cols_align(align = "center") %>% tab_options(data_row.padding = px(1/2)) %>% tab_options(row_group.padding = px(12)) %>% tab_style(style = list(cell_text(color = "black")),locations = cells_body(rows = Opponent == "Season YTD")) %>% tab_style(style = list(cell_fill(color = "grey")),locations = cells_body(rows = Opponent == "Season YTD")) %>% cols_hide(columns = c("Name","yr","position","Player")) %>% fmt_missing(c("R","2B","3B","HR","RBI","BB","SO","SB","CS","H","AB"), rows = NULL, missing_text = 0) %>% tab_header(title = md("**Hitters**"),subtitle = md("*(4/18/2021)*")) %>% tab_options(heading.background.color = "#EFFBFC",stub.border.style = "dashed",stub.border.color = "#989898",stub.border.width = "1px",summary_row.border.color = "#989898",table.width = "75%",grand_summary_row.background.color = "purple",column_labels.background.color = "black",table.font.color = "black",row_group.border.bottom.color = "black",row_group.border.bottom.width = 2,row_group.padding = 10,row_group.background.color = "#EFFBFC",stub.font.weight = "bold") %>% tab_options(row_group.font.weight = 'bolder',row_group.font.size = 28,row_group.border.bottom.style = "all",row_group.border.top.color = "white",row_group.border.top.width = 38) %>% fmt_missing(c("AVG","OBP","SLG","OPS"),missing_text = " ")
DAY %>%
gtsave(
"DaylogsH.pdf", expand = 10000,
)
### Advisees ## WEEKLY
GAME_LOGS <- week_hitters3 %>% mutate(Player = paste(week_hitters3$Name,week_hitters3$yr,"(",week_hitters3$school,week_hitters3$position,")")) %>% select (Player,Opponent,Result,AVG = BA,Team = school,Date,target,AB,R,H,`2B`,`3B`,HR,RBI,BB,SO = K,OBP = OBPct,SLG = SlgPct,OPS,SB,CS,position,LastName,school,Name,yr,target) %>% arrange(target,LastName,Date,target) %>% filter(target != "Target") %>% gt(groupname_col = "Player") %>% tab_options(column_labels.background.color = "black") %>% cols_hide(columns = c("Date","LastName","target","Team")) %>% tab_options(table.width = "70%", row_group.background.color = "lightblue") %>% tab_style(style = list(cell_fill(color = "Black")),locations = cells_body(rows = Opponent == "Opponent")) %>% tab_style(style = list(cell_text(color = "white")),locations = cells_body(rows = Opponent == "Opponent")) %>% cols_align(align = "center") %>% tab_options(data_row.padding = px(1/2)) %>% tab_options(row_group.padding = px(12)) %>% tab_style(style = list(cell_text(color = "black")),locations = cells_body(rows = Opponent == "Season YTD")) %>% tab_style(style = list(cell_fill(color = "grey")),locations = cells_body(rows = Opponent == "Season YTD")) %>% cols_hide(columns = c("Name","yr","position","Player")) %>% fmt_missing(c("R","2B","3B","HR","RBI","BB","SO","SB","CS","H","AB"), rows = NULL, missing_text = 0) %>% tab_header(title = md("**Advisees**"),subtitle = md("*4/12/2021 -- 4/20/2021*")) %>% tab_options(heading.background.color = "#EFFBFC",stub.border.style = "dashed",stub.border.color = "#989898",stub.border.width = "1px",summary_row.border.color = "#989898",table.width = "75%",grand_summary_row.background.color = "purple",column_labels.background.color = "black",table.font.color = "black",row_group.border.bottom.color = "black",row_group.border.bottom.width = 2,row_group.padding = 10,row_group.background.color = "#EFFBFC",stub.font.weight = "bold") %>% tab_options(row_group.font.weight = 'bolder',row_group.font.size = 28,row_group.border.bottom.style = "bottom",row_group.border.top.color = "white",row_group.border.top.width = 38)
GAME_LOGS %>%
gtsave(
"gamelogs.pdf", expand = 10000,
)
#### Targets ### Weekly ### Change Date
TARGET_LOGS
week_hitters3 %>% filter(LastName != "Werner")
l <- week_hitters3 %>% mutate(Player = paste(week_hitters3$Name,week_hitters3$yr,"(",week_hitters3$school,week_hitters3$position,")")) %>% select (Player,Opponent,Result,AVG = BA,Team = school,Date,target,AB,R,H,`2B`,`3B`,HR,RBI,BB,SO = K,OBP = OBPct,SLG = SlgPct,OPS,SB,CS,position,LastName,school,Name,yr,target) %>% arrange(yr,LastName,Date,target) %>% filter(target == "Target" & LastName != "Hughes") %>% gt(groupname_col = "Player") %>% tab_options(column_labels.background.color = "black") %>% cols_hide(columns = c("Date","LastName","target","Team")) %>% tab_options(table.width = "70%", row_group.background.color = "lightblue") %>% tab_style(style = list(cell_fill(color = "Black")),locations = cells_body(rows = Opponent == "Opponent")) %>% tab_style(style = list(cell_text(color = "white")),locations = cells_body(rows = Opponent == "Opponent")) %>% cols_align(align = "center") %>% tab_options(data_row.padding = px(1/2)) %>% tab_options(row_group.padding = px(12)) %>% tab_style(style = list(cell_text(color = "black")),locations = cells_body(rows = Opponent == "Season YTD")) %>% tab_style(style = list(cell_fill(color = "grey")),locations = cells_body(rows = Opponent == "Season YTD")) %>% cols_hide(columns = c("Name","yr","position","Player")) %>% fmt_missing(c("R","2B","3B","HR","RBI","BB","SO","SB","CS","H","AB"), rows = NULL, missing_text = 0) %>% tab_header(title = md("**Targets**"),subtitle = md("*4/12/2021 -- 4/20/2021*")) %>% tab_options(heading.background.color = "#EFFBFC",stub.border.style = "dashed",stub.border.color = "#989898",stub.border.width = "1px",summary_row.border.color = "#989898",table.width = "75%",grand_summary_row.background.color = "purple",column_labels.background.color = "black",table.font.color = "black",row_group.border.bottom.color = "black",row_group.border.bottom.width = 2,row_group.padding = 10,row_group.background.color = "#EFFBFC",stub.font.weight = "bold") %>% tab_options(row_group.font.weight = 'bolder',row_group.font.size = 28,row_group.border.bottom.style = "bottom",row_group.border.top.color = "white",row_group.border.top.width = 38)
l %>%
gtsave(
"gamelogsTH.pdf", expand = 10000,
)
########### YTD
# YMD - game logs
ytd <- ytd_clogs %>% inner_join(ncaa_clients)
ytd <- ytd %>% mutate(across(c("GP":"SB"), ~ as.numeric(.x))) %>% inner_join(Errors)
YTD_adv <- ytd %>% filter(target != "Target") %>% mutate(OPS = OBPct + SlgPct) %>% arrange(target,LastName) %>% select(Name,Team,AVG = BA,AB,R,H,'2B','3B',HR,RBI,BB,SO = K,OBP = OBPct,SLG = SlgPct,OPS,SB,CS,target,E = Errors) %>% gt(rowname_col = "Hitter",groupname_col = "target" )%>% tab_header(title = md("**Hitters**"),subtitle = "4/18/2021") %>% tab_options(heading.background.color = "#EFFBFC",stub.border.style = "dashed",stub.border.color = "#989898",stub.border.width = "1px",summary_row.border.color = "#989898",table.width = "75%",grand_summary_row.background.color = "black",column_labels.background.color = "black",table.font.color = "black",row_group.border.bottom.color = "black",row_group.border.bottom.width = 2,row_group.padding = 10,row_group.background.color = "#EFFBFC",stub.font.weight = "bold", column_labels.vlines.style = "dashed") %>% fmt_number(columns = vars(AVG),decimals = 3) %>% fmt_missing(columns = vars(AVG,AB,R,H,RBI,BB,SO,SB,CS,`2B`,`3B`,`HR`), missing_text = 0 ) %>% fmt_number(columns = vars(AVG),decimals = 3) %>% fmt_missing(columns = vars(AVG,OBP,SLG), missing_text = "-" ) %>% cols_align(align = "center") %>% opt_table_outline("solid",color = "black") %>% opt_table_lines()
YTD_adv %>%
gtsave(
"ytdhitters.pdf", expand = 10000,
)
########################################################################################################################
YTD_tar <- ytd %>% filter(target == "Target") %>% mutate(OPS = OBPct + SlgPct) %>% arrange(target,LastName) %>% select(Name,Team,AVG = BA,AB,R,H,'2B','3B',HR,RBI,BB,SO = K,OBP = OBPct,SLG = SlgPct,OPS,SB,CS,target) %>% gt(rowname_col = "Hitter",groupname_col = "target" )%>% tab_header(title = md("**Targets**"),subtitle = "4/04/2021") %>% tab_options(heading.background.color = "#EFFBFC",stub.border.style = "dotted",stub.border.color = "#989898",stub.border.width = "1px",summary_row.border.color = "#989898",table.width = "70%",grand_summary_row.background.color = "purple",column_labels.background.color = "black",table.font.color = "black",row_group.border.bottom.color = "black",row_group.border.bottom.width = 2,row_group.padding = 10,row_group.background.color = "#EFFBFC",stub.font.weight = "bold") %>% fmt_number(columns = vars(AVG),decimals = 3) %>% fmt_missing(columns = vars(AVG,AB,R,H,RBI,BB,SO,SB,CS,`2B`,`3B`,`HR`), missing_text = 0 ) %>% fmt_number(columns = vars(AVG),decimals = 3) %>% fmt_missing(columns = vars(AVG,OBP,SLG), missing_text = "-" ) %>% opt_table_lines()
ss %>%
gtsave(
"nightly.pdf", expand = 10000,
)
#####################3
# Weekly game logs for hitters
hitting <- hitting %>% mutate(across(c("GP":"SB"), ~ as.numeric(.x)))
Hitting_weekly <- hitting %>% mutate(OPS = OBPct + SlgPct ) %>% select(Opponent,Result,AVG = BA,Team = school,Date,target,AB,R,H,`2B`,`3B`,HR,RBI,BB,SO = K,OBP = OBPct,SLG = SlgPct,OPS,SB,CS,position,LastName,school,Name,yr,target) %>% mutate(label = paste(hitting$Name,"(",hitting$school,hitting$position,paste0(yr,'`'),")")) %>% mutate() %>% arrange(target,yr,LastName)%>% gt(groupname_col = "label" ) %>% tab_header(title = md("**Hitters**"),subtitle = md("*BORAS CORP COLLEGE DRAFT ADVISEE & TARGET RESULTS – POS. PLAYERS (3/29 – 4/1) Sorted by Advisees/Targets, Draft Eligible Year, and then alphabetically - (Targets stats in red*)")) %>% tab_options(heading.background.color = "#EFFBFC",stub.border.style = "dashed",stub.border.color = "#989898",stub.border.width = "1px",summary_row.border.color = "#989898",table.width = "70%",grand_summary_row.background.color = "purple",column_labels.background.color = "black",table.font.color = "black",row_group.border.bottom.color = "black",row_group.border.bottom.width = 2,row_group.padding = 10,row_group.background.color = "#EFFBFC",stub.font.weight = "bold") %>%fmt_missing(columns = vars(AVG,AB,R,H,RBI,BB,SO,SB,CS,`2B`,`3B`,`HR`), missing_text = 0) %>% fmt_number(columns = vars(AVG),decimals = 3) %>% fmt_missing(
columns = vars(AVG,OBP,SLG,OPS),missing_text = "--" ) %>% cols_hide(columns = vars(LastName,Date,target,position,Name,yr,Team)) %>% tab_style(style = list(cell_fill(color = "lightblue")),locations = cells_body(rows = Opponent == "Season YTD")
) %>% tab_style(style = list(cell_text(color = "Red")),locations = cells_body(rows = target == "Target"))
# weekly hitters
test %>%
gtsave(
"Hittingnightly.pdf"
)
#######
YTD_adv %>%
gtsave(
"YTDAdvHitters2.pdf",
)
#####
# filter ytd_clogs by date!!
gameday_logs$Date <- mdy(gameday_logs$Date)
logs <- gameday_logs %>% filter((Date >= "2021-04-03" & G == 1))
hitting <- rbind(logs,ytd_clogs)
######
night_logs <- rbind(Collins_logs_,Gelof_logs_) %>% mutate(OPS = " ")
carrer_logs <- rbind(Harrison_career,Goodwin_career,Gelof_career,Armstrong_career)
carrer_logs <- carrer_logs %>% mutate("OPS" = as.numeric(OBPct) + as.numeric(SlgPct))
night_logs <- night_logs %>% filter(Date == today()-2 | Date == "2001-01-01" )
fchris <- unique(rbind(night_logs,carrer_logs))
|
/Hitting/WeeklyHitting.R
|
no_license
|
sumairshah2/NCAA
|
R
| false | false | 30,901 |
r
|
# Game Logs Hitting
# Game logs script
#devtools::install_github("BillPetti/baseballr",force = T)
#install.packages('webshot')
#Load packages
library(baseballr)
library(webshot)
webshot::install_phantomjs()
library(tidyverse)
library(dplyr)
library(knitr)
#install.packages('kableExtra')
library(kableExtra)
library(gt)
library(lubridate)
#install.packages("downloadthis")
library(downloadthis)
#install.packages("googlesheets4")
library('googlesheets4')
#Read in client list
ncaa_clients <- read_csv("Data/ClientsFinalx.csv")
#pitchers <- ncaa_clients %>% filter(position == "P" | position == "UT")
#write.csv(pitchers,"pitchers.csv")
#hitters <- ncaa_clients %>% filter(position != "P")
# load in function
Errors <- read_sheet("https://docs.google.com/spreadsheets/d/1xRLnN7LaGOrrnmmRmTifnY_EwOaeFZES6-9y9_NZT2U/edit#gid=962710186")
hitters <- read.csv("hitters.csv") %>% rename(`3B` = X3B,`2B` = X2B)
get_ncaa_game_logs_v2 <- function(player_id,
year = 2019,
type = "batting",
span = 'game') {
year_id <- subset(ncaa_season_id_lu, season == year, select = id)
batting_id <- subset(ncaa_season_id_lu, season == year, select = batting_id)
pitching_id <- subset(ncaa_season_id_lu, season == year, select = pitching_id)
if (type == "batting") {
batting_url <- paste0("https://stats.ncaa.org/player/index?id=", year_id,"&stats_player_seq=", player_id,"&year_stat_category_id=", batting_id)
batting_payload <- xml2::read_html(batting_url)
} else {
pitching_url <- paste0("https://stats.ncaa.org/player/index?id=", year_id,"&stats_player_seq=", player_id,"&year_stat_category_id=", pitching_id)
pitching_payload <- xml2::read_html(pitching_url)
}
if (span == 'game') {
if (type == "batting") {
payload_df <- batting_payload %>%
rvest::html_nodes("table") %>%
.[5] %>%
rvest::html_table(fill = TRUE) %>%
as.data.frame() %>%
.[,c(1:23)]
names(payload_df) <- payload_df[2,]
payload_df <- payload_df[-c(1:3),]
payload_df <- payload_df %>%
mutate_at(vars(G:RBI2out), extract_numeric)
if('OPP DP' %in% colnames(payload_df) == TRUE) {
payload_df <- payload_df %>%
dplyr::rename(DP = `OPP DP`)
}
cols_to_num <- c("G","R", "AB", "H", "2B", "3B", "TB", "HR", "RBI",
"BB", "HBP", "SF", "SH", "K", "DP", "CS", "Picked",
"SB", "IBB", "RBI2out")
payload_df <- payload_df %>%
dplyr::mutate_at(cols_to_num, as.numeric)
} else {
payload_df <- pitching_payload %>%
rvest::html_nodes("table") %>%
.[5] %>%
rvest::html_table(fill = TRUE) %>%
as.data.frame() %>%
.[,c(1:35)]
names(payload_df) <- payload_df[2,]
payload_df <- payload_df[-c(1:3),]
if('OPP DP' %in% colnames(payload_df) == TRUE) {
payload_df <- payload_df %>%
dplyr::rename(DP = `OPP DP`)
}
cols_to_num <- c("G", "App", "GS", "IP", "CG", "H", "R", "ER", "BB", "SO", "SHO", "BF", "P-OAB", "2B-A", "3B-A", "Bk", "HR-A", "WP", "HB", "IBB", "Inh Run", "Inh Run Score", "SHA", "SFA", "Pitches", "GO", "FO", "W", "L", "SV", "OrdAppeared", "KL")
payload_df <- payload_df %>%
dplyr::mutate_at(vars(-c("Date")),
list(~gsub("\\/", "", x = .))) %>%
dplyr::mutate_at(cols_to_num, as.numeric)
}
} else {
if(type == 'batting') {
payload_df <- batting_payload %>%
rvest::html_nodes('table') %>%
.[3] %>%
rvest::html_table(fill = T) %>%
as.data.frame() %>%
.[-1,]
names(payload_df) <- payload_df[1,]
payload_df <- payload_df[-1,]
if('OPP DP' %in% colnames(payload_df) == TRUE) {
payload_df <- payload_df %>%
dplyr::rename(DP = `OPP DP`)
}
payload_df <- payload_df %>%
dplyr::select(Year,Team,GP,G,BA,OBPct,SlgPct,R,AB,H,`2B`,`3B`,TB,HR,RBI,BB,HBP,SF,SH,K,DP,CS,Picked,SB,RBI2out)
payload_df <- payload_df %>%
dplyr::mutate(player_id = player_id) %>%
dplyr::select(Year, player_id, everything())
} else {
payload_df <- pitching_payload %>%
rvest::html_nodes('table') %>%
.[3] %>%
rvest::html_table(fill = T) %>%
as.data.frame() %>%
.[-1,]
names(payload_df) <- payload_df[1,]
payload_df <- payload_df[-1,]
payload_df <- payload_df %>%
dplyr::select(Year,Team,GP,G,App,GS,ERA,IP,CG,H,R,ER,BB,SO,SHO,BF,`P-OAB`,`2B-A`,`3B-A`,Bk,`HR-A`,WP,HB,IBB,`Inh Run`,`Inh Run Score`,SHA,SFA,Pitches,GO,FO,W,L,SV,KL)
payload_df <- payload_df %>%
dplyr::mutate(player_id = player_id) %>%
dplyr::select(Year, player_id, everything())
}
}
return(payload_df)
}
#Corey Collins 2021 2471655 Georgia
Collins_logs <- get_ncaa_game_logs_v2(player_id =2471655,year = 2021,span = "game",type = "batting") %>% mutate(player_id = 2471655,Team = "Georgia", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Collins_career <- get_ncaa_game_logs_v2(player_id =2471655,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Adrian Del Castillo 2021 2122652 Miami (FL)
Adrian_logs <- get_ncaa_game_logs_v2(player_id = 2122652 ,year = 2021,span = "game",type = "batting") %>% mutate(player_id = 2122652 ,Team = "Miami", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Adrian_career <- get_ncaa_game_logs_v2(player_id = 2122652,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Kyle Teel 2021 2486493 Virginia
Teel_logs <- get_ncaa_game_logs_v2(player_id =2486493,year = 2021,span = "game",type = "batting") %>% mutate(player_id = 2486493,Team = "Virginia", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Teel_career <- get_ncaa_game_logs_v2(player_id = 2486493,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
#Kris Armstrong 2122294 Florida
Armstrong_logs <-get_ncaa_game_logs_v2(player_id = 2122294,year = 2021,span = "game",type = "batting") %>% mutate(player_id = 2122294,Team = "Florida", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Armstrong_career <- get_ncaa_game_logs_v2(player_id =2122294,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Garrett Blaylock 1978818 Georgia
Blaylock_logs <- get_ncaa_game_logs_v2(player_id = 1978818,year = 2021,span = "game",type = "batting") %>% mutate(player_id = 1978818,Team = "Georgia", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Blaylock_career <- get_ncaa_game_logs_v2(player_id = 1978818,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
#Zack Gelof 2127049 Virginia
Gelof_logs <- get_ncaa_game_logs_v2(player_id = 2127049,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2127049,Team = "Virginia", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Gelof_career <- get_ncaa_game_logs_v2(player_id = 2127049,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
#Nick Goodwin 2471788 Fr Kansas St.
Goodwin_logs <- get_ncaa_game_logs_v2(player_id = 2471788,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2471788,Team = "Kansas St.", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Goodwin_career <- get_ncaa_game_logs_v2(player_id = 2471788,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Kalae Harrison 2480184 Texas A&M
Harrison_logs <-get_ncaa_game_logs_v2(player_id = 2480184,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2480184,Team = "Texas A&M.", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Harrison_career <- get_ncaa_game_logs_v2(player_id =2480184,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Jace Jung 2346820 Texas Tech
Jung_logs <- get_ncaa_game_logs_v2(player_id = 2346820,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2346820,Team = "Texas Tech", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Jung_career <- get_ncaa_game_logs_v2(player_id =2346820,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Josh Rivera 2021 2305256 Florida
Rivera_logs <-get_ncaa_game_logs_v2(player_id = 2305256,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2305256,Team = "Florida", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Rivera_career <- get_ncaa_game_logs_v2(player_id =2305256,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Alejandro Toral 2021 1982080 Miami (FL)
Toral_logs <-get_ncaa_game_logs_v2(player_id = 1982080,year = 2021,span = "game",type = "batting") %>% mutate(player_id =1982080,Team = "Miami (FL)", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Toral_career <- get_ncaa_game_logs_v2(player_id =1982080,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
#Trevor Werner 2309490 Texas A&M
Werner_logs <-get_ncaa_game_logs_v2(player_id = 2309490,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2309490,Team = "Texas A&M", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Werner_career <- get_ncaa_game_logs_v2(player_id =2309490,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Brock Jones 2311353 Stanford
Jones_logs <-get_ncaa_game_logs_v2(player_id = 2311353,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2311353,Team = "Stanford", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Jones_career <- get_ncaa_game_logs_v2(player_id =2311353,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
#Max Marusak 2133783 Texas Tech
Max_logs <-get_ncaa_game_logs_v2(player_id = 2133783,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2133783,Team = "Texas Tech", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Max_career <- get_ncaa_game_logs_v2(player_id =2133783,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Chris Newell 2306474 Virginia
Newell_logs <-get_ncaa_game_logs_v2(player_id = 2306474 ,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2306474 ,Team = "Virginia", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Newell_career <- get_ncaa_game_logs_v2(player_id = 2306474,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Carson Wells 2494065
#Southern California
Wells_logs <-get_ncaa_game_logs_v2(player_id = 2494065,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2494065,Team = "Southern California", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Wells_career <- get_ncaa_game_logs_v2(player_id = 2494065,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
#Gabriel Hughes 2312826 Gonzaga
#Hughes_logs <-get_ncaa_game_logs_v2(player_id = 2312826,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2312826,Team = "Gonzaga", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
#Hughes_career <- get_ncaa_game_logs_v2(player_id = 2312826,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Luca Tresh 2021 2140156 NC State
Tresh_logs <-get_ncaa_game_logs_v2(player_id = 2140156,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2140156,Team = "NC State", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Tresh_career <- get_ncaa_game_logs_v2(player_id = 2140156,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Sean McLain ASU OF 2022 2349359
SMcLain_logs <- get_ncaa_game_logs_v2(player_id = 2349359,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2349359,Team = "Arizona St.", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
SMcLain_career <- get_ncaa_game_logs_v2(player_id = 2349359,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
#write.csv(SMcLain_logs,"SMClain.csv")
#x <- read.csv("SMClain.csv")
# Peyton Graham Oklahoma 2330757
Graham_logs <- get_ncaa_game_logs_v2(player_id = 2330757,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2330757,Team = "Oklahoma ", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Graham_career <- get_ncaa_game_logs_v2(player_id = 2330757,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Jack Brannigan 2326787 Notre Dame
Brannigan_logs <- get_ncaa_game_logs_v2(player_id = 2326787,year = 2021,span = "game",type = "batting") %>% mutate(player_id = 2326787,Team = "Notre Dame", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Brannigan_career <- get_ncaa_game_logs_v2(player_id = 2326787,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
#2131028
MattM_logs <- get_ncaa_game_logs_v2(player_id = 2131028,year = 2021,span = "game",type = "batting") %>% mutate(player_id = 2131028,Team = "UCLA", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
MattM_careers <- get_ncaa_game_logs_v2(player_id = 2131028,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Dylan Crews
crews_logs <- get_ncaa_game_logs_v2(player_id = 2486588,year = 2021,span = "game",type = "batting") %>% mutate(player_id = 2486588,Team = "LSU", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
crews_career <- get_ncaa_game_logs_v2(player_id = 2486588,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# 2485641 -- Enrique #Bradfield Jr. , Enrique
#Fr
#2485641
Enrique_logs <- get_ncaa_game_logs_v2(player_id = 2485641,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2485641,Team = "Vanderbilt", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Enrique_career <- get_ncaa_game_logs_v2(player_id = 2485641,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
# Young, Carter
#So
#2309744
Carter_logs <- get_ncaa_game_logs_v2(player_id = 2309744,year = 2021,span = "game",type = "batting") %>% mutate(player_id =2309744,Team = "Vanderbilt", OBPct = "",SlgPct = "",BA = "",GP = "",Year = "")
Carter_career <- get_ncaa_game_logs_v2(player_id = 2309744,year = 2021,span = "career",type = "batting") %>% mutate("Date" = "",Opponent = "Season YTD",IBB = "",Result = "") %>% filter(Year == "2020-21")
ytd_clogs <- rbind(Tresh_career,Wells_career,Newell_career,Max_career,Jones_career,Werner_career,Toral_career,Rivera_career,Jung_career,Harrison_career,Goodwin_career,Gelof_career,Blaylock_career,Armstrong_career,Teel_career,Adrian_career,Collins_career,SMcLain_career,Graham_career,Brannigan_career,MattM_careers,crews_career,Carter_career,Enrique_career)
#read.csv("day.csv")
ytd_clogs$SB <- sub("^$", 0, ytd_clogs$SB)
ytd_clogs$H <- sub("^$", 0, ytd_clogs$H)
ytd_clogs$R <- sub("^$", 0, ytd_clogs$R)
ytd_clogs$`2B` <- sub("^$", 0, ytd_clogs$`2B`)
ytd_clogs$`3B` <- sub("^$", 0, ytd_clogs$`3B`)
ytd_clogs$HR <- sub("^$", 0, ytd_clogs$HR)
ytd_clogs$RBI <- sub("^$", 0, ytd_clogs$RBI)
ytd_clogs$BB <- sub("^$", 0, ytd_clogs$BB)
ytd_clogs$K <- sub("^$", 0, ytd_clogs$K)
ytd_clogs$CS <- sub("^$", 0, ytd_clogs$CS)
ytd_clogs$G <- sub("^$", 0, ytd_clogs$G)
ytd_clogs <- ytd_clogs %>% mutate(OPS = as.numeric(OBPct) + as.numeric(SlgPct)) %>% inner_join(ncaa_clients)
ytd_clogs <- ytd_clogs%>% mutate(Player = paste(ytd_clogs$Name,ytd_clogs$Yr))
#### DAIlY
x <- x %>% rename(`2B` = X2B,`3B` = X3B) %>% select(-c(X))
hitters
gameday_logs <- rbind(Tresh_logs,Wells_logs,Newell_logs,Max_logs,Jones_logs,Werner_logs,Toral_logs,Rivera_logs,Jung_logs,Harrison_logs,Goodwin_logs,Gelof_logs,Blaylock_logs,Armstrong_logs,Teel_logs,Adrian_logs,Collins_logs,Brannigan_logs,SMcLain_logs,Graham_logs,MattM_logs,crews_logs,Enrique_logs,Carter_logs) %>% mutate(OPS = '') %>% inner_join(ncaa_clients)
game_day <- gameday_logs %>% mutate(Player = paste(gameday_logs$Name,gameday_logs$Yr))
game_day$Date <- lubridate::mdy(gameday_logs$Date)
hitters$Date <- lubridate::mdy(hitters$Date)
# Weekly
week_hitters <- rbind(game_day,hitters)
week_hitters2 <- week_hitters %>% filter(Date == '2020-02-20' | (Date >= '2021-4-12')) %>% arrange(LastName,Date) %>% filter(G == 1 | G == 'G')
week_hitters3 <- rbind(week_hitters2,ytd_clogs)
(hitters_day3 <- week_hitters %>% filter(Date == today()) %>% filter(target != "Target") %>% mutate(across(c("GP","AB","G"), ~ as.numeric(.x))) %>% replace(is.na(.), 0) %>% select(Name,Result,school,Opponent,Date,G))
##Daily
day_hitters <- week_hitters %>% filter(Date == '2020-02-20'| (Date == today() )) %>% arrange(LastName,Date) %>% filter(G == 1 | G == 'G')
# Filter out guys who dnp
day_hitters2 <- rbind(day_hitters,ytd_clogs)
day_hitters <- day_hitters2 %>% arrange(target,LastName)
#write_csv(DAY,"dayh.csv")
Day2 <- day_hitters2 %>% filter(LastName != "Rivera") %>% filter(LastName!= "Armstrong") %>% filter(LastName!= "Crews")
day_hitters2 <- Day2
DAY <- read_csv("day.csv")
#Change date ----- DAILY!
DAY <- day_hitters2 %>% mutate(Player = paste(day_hitters2$Name,day_hitters2$yr,"(",day_hitters2$school,day_hitters2$position,")")) %>% select (Player,Opponent,Result,AVG = BA,Team = school,Date,target,AB,R,H,`2B`,`3B`,HR,RBI,BB,SO = K,OBP = OBPct,SLG = SlgPct,OPS,SB,CS,position,LastName,school,Name,yr,target) %>% arrange(target,LastName,Date,target) %>% filter(target != "Target") %>% gt(groupname_col = "Player") %>% tab_options(column_labels.background.color = "black") %>% cols_hide(columns = c("Date","LastName","target","Team")) %>% tab_options(table.width = "70%", row_group.background.color = "lightblue") %>% tab_style(style = list(cell_fill(color = "Black")),locations = cells_body(rows = Opponent == "Opponent")) %>% tab_style(style = list(cell_text(color = "white")),locations = cells_body(rows = Opponent == "Opponent")) %>% cols_align(align = "center") %>% tab_options(data_row.padding = px(1/2)) %>% tab_options(row_group.padding = px(12)) %>% tab_style(style = list(cell_text(color = "black")),locations = cells_body(rows = Opponent == "Season YTD")) %>% tab_style(style = list(cell_fill(color = "grey")),locations = cells_body(rows = Opponent == "Season YTD")) %>% cols_hide(columns = c("Name","yr","position","Player")) %>% fmt_missing(c("R","2B","3B","HR","RBI","BB","SO","SB","CS","H","AB"), rows = NULL, missing_text = 0) %>% tab_header(title = md("**Hitters**"),subtitle = md("*(4/18/2021)*")) %>% tab_options(heading.background.color = "#EFFBFC",stub.border.style = "dashed",stub.border.color = "#989898",stub.border.width = "1px",summary_row.border.color = "#989898",table.width = "75%",grand_summary_row.background.color = "purple",column_labels.background.color = "black",table.font.color = "black",row_group.border.bottom.color = "black",row_group.border.bottom.width = 2,row_group.padding = 10,row_group.background.color = "#EFFBFC",stub.font.weight = "bold") %>% tab_options(row_group.font.weight = 'bolder',row_group.font.size = 28,row_group.border.bottom.style = "all",row_group.border.top.color = "white",row_group.border.top.width = 38) %>% fmt_missing(c("AVG","OBP","SLG","OPS"),missing_text = " ")
DAY %>%
gtsave(
"DaylogsH.pdf", expand = 10000,
)
### Advisees ## WEEKLY
GAME_LOGS <- week_hitters3 %>% mutate(Player = paste(week_hitters3$Name,week_hitters3$yr,"(",week_hitters3$school,week_hitters3$position,")")) %>% select (Player,Opponent,Result,AVG = BA,Team = school,Date,target,AB,R,H,`2B`,`3B`,HR,RBI,BB,SO = K,OBP = OBPct,SLG = SlgPct,OPS,SB,CS,position,LastName,school,Name,yr,target) %>% arrange(target,LastName,Date,target) %>% filter(target != "Target") %>% gt(groupname_col = "Player") %>% tab_options(column_labels.background.color = "black") %>% cols_hide(columns = c("Date","LastName","target","Team")) %>% tab_options(table.width = "70%", row_group.background.color = "lightblue") %>% tab_style(style = list(cell_fill(color = "Black")),locations = cells_body(rows = Opponent == "Opponent")) %>% tab_style(style = list(cell_text(color = "white")),locations = cells_body(rows = Opponent == "Opponent")) %>% cols_align(align = "center") %>% tab_options(data_row.padding = px(1/2)) %>% tab_options(row_group.padding = px(12)) %>% tab_style(style = list(cell_text(color = "black")),locations = cells_body(rows = Opponent == "Season YTD")) %>% tab_style(style = list(cell_fill(color = "grey")),locations = cells_body(rows = Opponent == "Season YTD")) %>% cols_hide(columns = c("Name","yr","position","Player")) %>% fmt_missing(c("R","2B","3B","HR","RBI","BB","SO","SB","CS","H","AB"), rows = NULL, missing_text = 0) %>% tab_header(title = md("**Advisees**"),subtitle = md("*4/12/2021 -- 4/20/2021*")) %>% tab_options(heading.background.color = "#EFFBFC",stub.border.style = "dashed",stub.border.color = "#989898",stub.border.width = "1px",summary_row.border.color = "#989898",table.width = "75%",grand_summary_row.background.color = "purple",column_labels.background.color = "black",table.font.color = "black",row_group.border.bottom.color = "black",row_group.border.bottom.width = 2,row_group.padding = 10,row_group.background.color = "#EFFBFC",stub.font.weight = "bold") %>% tab_options(row_group.font.weight = 'bolder',row_group.font.size = 28,row_group.border.bottom.style = "bottom",row_group.border.top.color = "white",row_group.border.top.width = 38)
GAME_LOGS %>%
gtsave(
"gamelogs.pdf", expand = 10000,
)
#### Targets ### Weekly ### Change Date
TARGET_LOGS
week_hitters3 %>% filter(LastName != "Werner")
l <- week_hitters3 %>% mutate(Player = paste(week_hitters3$Name,week_hitters3$yr,"(",week_hitters3$school,week_hitters3$position,")")) %>% select (Player,Opponent,Result,AVG = BA,Team = school,Date,target,AB,R,H,`2B`,`3B`,HR,RBI,BB,SO = K,OBP = OBPct,SLG = SlgPct,OPS,SB,CS,position,LastName,school,Name,yr,target) %>% arrange(yr,LastName,Date,target) %>% filter(target == "Target" & LastName != "Hughes") %>% gt(groupname_col = "Player") %>% tab_options(column_labels.background.color = "black") %>% cols_hide(columns = c("Date","LastName","target","Team")) %>% tab_options(table.width = "70%", row_group.background.color = "lightblue") %>% tab_style(style = list(cell_fill(color = "Black")),locations = cells_body(rows = Opponent == "Opponent")) %>% tab_style(style = list(cell_text(color = "white")),locations = cells_body(rows = Opponent == "Opponent")) %>% cols_align(align = "center") %>% tab_options(data_row.padding = px(1/2)) %>% tab_options(row_group.padding = px(12)) %>% tab_style(style = list(cell_text(color = "black")),locations = cells_body(rows = Opponent == "Season YTD")) %>% tab_style(style = list(cell_fill(color = "grey")),locations = cells_body(rows = Opponent == "Season YTD")) %>% cols_hide(columns = c("Name","yr","position","Player")) %>% fmt_missing(c("R","2B","3B","HR","RBI","BB","SO","SB","CS","H","AB"), rows = NULL, missing_text = 0) %>% tab_header(title = md("**Targets**"),subtitle = md("*4/12/2021 -- 4/20/2021*")) %>% tab_options(heading.background.color = "#EFFBFC",stub.border.style = "dashed",stub.border.color = "#989898",stub.border.width = "1px",summary_row.border.color = "#989898",table.width = "75%",grand_summary_row.background.color = "purple",column_labels.background.color = "black",table.font.color = "black",row_group.border.bottom.color = "black",row_group.border.bottom.width = 2,row_group.padding = 10,row_group.background.color = "#EFFBFC",stub.font.weight = "bold") %>% tab_options(row_group.font.weight = 'bolder',row_group.font.size = 28,row_group.border.bottom.style = "bottom",row_group.border.top.color = "white",row_group.border.top.width = 38)
l %>%
gtsave(
"gamelogsTH.pdf", expand = 10000,
)
########### YTD
# YMD - game logs
ytd <- ytd_clogs %>% inner_join(ncaa_clients)
ytd <- ytd %>% mutate(across(c("GP":"SB"), ~ as.numeric(.x))) %>% inner_join(Errors)
YTD_adv <- ytd %>% filter(target != "Target") %>% mutate(OPS = OBPct + SlgPct) %>% arrange(target,LastName) %>% select(Name,Team,AVG = BA,AB,R,H,'2B','3B',HR,RBI,BB,SO = K,OBP = OBPct,SLG = SlgPct,OPS,SB,CS,target,E = Errors) %>% gt(rowname_col = "Hitter",groupname_col = "target" )%>% tab_header(title = md("**Hitters**"),subtitle = "4/18/2021") %>% tab_options(heading.background.color = "#EFFBFC",stub.border.style = "dashed",stub.border.color = "#989898",stub.border.width = "1px",summary_row.border.color = "#989898",table.width = "75%",grand_summary_row.background.color = "black",column_labels.background.color = "black",table.font.color = "black",row_group.border.bottom.color = "black",row_group.border.bottom.width = 2,row_group.padding = 10,row_group.background.color = "#EFFBFC",stub.font.weight = "bold", column_labels.vlines.style = "dashed") %>% fmt_number(columns = vars(AVG),decimals = 3) %>% fmt_missing(columns = vars(AVG,AB,R,H,RBI,BB,SO,SB,CS,`2B`,`3B`,`HR`), missing_text = 0 ) %>% fmt_number(columns = vars(AVG),decimals = 3) %>% fmt_missing(columns = vars(AVG,OBP,SLG), missing_text = "-" ) %>% cols_align(align = "center") %>% opt_table_outline("solid",color = "black") %>% opt_table_lines()
YTD_adv %>%
gtsave(
"ytdhitters.pdf", expand = 10000,
)
########################################################################################################################
YTD_tar <- ytd %>% filter(target == "Target") %>% mutate(OPS = OBPct + SlgPct) %>% arrange(target,LastName) %>% select(Name,Team,AVG = BA,AB,R,H,'2B','3B',HR,RBI,BB,SO = K,OBP = OBPct,SLG = SlgPct,OPS,SB,CS,target) %>% gt(rowname_col = "Hitter",groupname_col = "target" )%>% tab_header(title = md("**Targets**"),subtitle = "4/04/2021") %>% tab_options(heading.background.color = "#EFFBFC",stub.border.style = "dotted",stub.border.color = "#989898",stub.border.width = "1px",summary_row.border.color = "#989898",table.width = "70%",grand_summary_row.background.color = "purple",column_labels.background.color = "black",table.font.color = "black",row_group.border.bottom.color = "black",row_group.border.bottom.width = 2,row_group.padding = 10,row_group.background.color = "#EFFBFC",stub.font.weight = "bold") %>% fmt_number(columns = vars(AVG),decimals = 3) %>% fmt_missing(columns = vars(AVG,AB,R,H,RBI,BB,SO,SB,CS,`2B`,`3B`,`HR`), missing_text = 0 ) %>% fmt_number(columns = vars(AVG),decimals = 3) %>% fmt_missing(columns = vars(AVG,OBP,SLG), missing_text = "-" ) %>% opt_table_lines()
ss %>%
gtsave(
"nightly.pdf", expand = 10000,
)
#####################3
# Weekly game logs for hitters
hitting <- hitting %>% mutate(across(c("GP":"SB"), ~ as.numeric(.x)))
Hitting_weekly <- hitting %>% mutate(OPS = OBPct + SlgPct ) %>% select(Opponent,Result,AVG = BA,Team = school,Date,target,AB,R,H,`2B`,`3B`,HR,RBI,BB,SO = K,OBP = OBPct,SLG = SlgPct,OPS,SB,CS,position,LastName,school,Name,yr,target) %>% mutate(label = paste(hitting$Name,"(",hitting$school,hitting$position,paste0(yr,'`'),")")) %>% mutate() %>% arrange(target,yr,LastName)%>% gt(groupname_col = "label" ) %>% tab_header(title = md("**Hitters**"),subtitle = md("*BORAS CORP COLLEGE DRAFT ADVISEE & TARGET RESULTS – POS. PLAYERS (3/29 – 4/1) Sorted by Advisees/Targets, Draft Eligible Year, and then alphabetically - (Targets stats in red*)")) %>% tab_options(heading.background.color = "#EFFBFC",stub.border.style = "dashed",stub.border.color = "#989898",stub.border.width = "1px",summary_row.border.color = "#989898",table.width = "70%",grand_summary_row.background.color = "purple",column_labels.background.color = "black",table.font.color = "black",row_group.border.bottom.color = "black",row_group.border.bottom.width = 2,row_group.padding = 10,row_group.background.color = "#EFFBFC",stub.font.weight = "bold") %>%fmt_missing(columns = vars(AVG,AB,R,H,RBI,BB,SO,SB,CS,`2B`,`3B`,`HR`), missing_text = 0) %>% fmt_number(columns = vars(AVG),decimals = 3) %>% fmt_missing(
columns = vars(AVG,OBP,SLG,OPS),missing_text = "--" ) %>% cols_hide(columns = vars(LastName,Date,target,position,Name,yr,Team)) %>% tab_style(style = list(cell_fill(color = "lightblue")),locations = cells_body(rows = Opponent == "Season YTD")
) %>% tab_style(style = list(cell_text(color = "Red")),locations = cells_body(rows = target == "Target"))
# weekly hitters
test %>%
gtsave(
"Hittingnightly.pdf"
)
#######
YTD_adv %>%
gtsave(
"YTDAdvHitters2.pdf",
)
#####
# filter ytd_clogs by date!!
gameday_logs$Date <- mdy(gameday_logs$Date)
logs <- gameday_logs %>% filter((Date >= "2021-04-03" & G == 1))
hitting <- rbind(logs,ytd_clogs)
######
night_logs <- rbind(Collins_logs_,Gelof_logs_) %>% mutate(OPS = " ")
carrer_logs <- rbind(Harrison_career,Goodwin_career,Gelof_career,Armstrong_career)
carrer_logs <- carrer_logs %>% mutate("OPS" = as.numeric(OBPct) + as.numeric(SlgPct))
night_logs <- night_logs %>% filter(Date == today()-2 | Date == "2001-01-01" )
fchris <- unique(rbind(night_logs,carrer_logs))
|
#' A function to print a zoonWorkflow object
#'
#' The function returns a very simple output detailing the function call.
#'
#'@param x object of class zoonWorkflow
#'@param \dots currently ignored
#'
#'@name print.zoonWorkflow
#'@method print zoonWorkflow
#'@export
print.zoonWorkflow <- function(x, ...){
cat('zoonWorkflow Object\n===================\n\n')
cat('Call:', x$call, '\n')
}
|
/zoon/R/print.zoonWorkflow.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 418 |
r
|
#' A function to print a zoonWorkflow object
#'
#' The function returns a very simple output detailing the function call.
#'
#'@param x object of class zoonWorkflow
#'@param \dots currently ignored
#'
#'@name print.zoonWorkflow
#'@method print zoonWorkflow
#'@export
print.zoonWorkflow <- function(x, ...){
cat('zoonWorkflow Object\n===================\n\n')
cat('Call:', x$call, '\n')
}
|
#----------------------------------------------------------------------------------------#
# rSFSW2: FRAMEWORK FOR SOILWAT2 SIMULATIONS: CREATING SIMULATION RUNS, EXECUTING
# SIMULATIONS, AND AGGREGATING OUTPUTS
#
# See demo/SFSW2_project_code.R for details
#----------------------------------------------------------------------------------------#
##############################################################################
#----------------------- DESCRIPTION OF SIMULATION PROJECT ---------------------
# NOTE: The values cannot be changed once a rSFSW2 simulation project is set up. The
# values of settings (file demo/SFSW2_project_settings.R) may be changed from run to run.
#----- Metainformation about computing platform
opt_platform <- list(
host = c("local", "hpc")[1],
no_parallel = any(
identical(tolower(Sys.getenv("NOT_CRAN")), "false"),
identical(tolower(Sys.getenv("TRAVIS")), "true"),
identical(tolower(Sys.getenv("APPVEYOR")), "true"))
)
#------ Paths to simulation framework project folders
project_paths <- list(
dir_prj = dir_prj <- getwd(),
# Path to inputs
dir_in = dir_in <- file.path(dir_prj, "1_Data_SWInput"),
# Folder with default standalone SOILWAT2 input files
dir_in_sw = file.path(dir_in, "SoilWat2_defaults"),
# Folder with data input files
dir_in_dat = file.path(dir_in, "datafiles"),
# Folder with treatment input files according to treatment instructions
dir_in_treat = file.path(dir_in, "treatments"),
# Folder with GISSM regeneration parameters (will contain one file per species)
dir_in_gissm = file.path(dir_in, "regeneration"),
# Path to where large outputs are saved to disk
dir_big = dir_big <- dir_prj,
# Path to where rSOILWAT2 objects are saved to disk
# if saveRsoilwatInput and/or saveRsoilwatOutput
dir_out_sw = file.path(dir_big, "3_Runs"),
# Path to outputs produced by rSFSW2
dir_out = dir_out <- file.path(dir_big, "4_Data_SWOutputAggregated"),
# Path to where rSFSW2 will store temporary files
dir_out_temp = file.path(dir_out, "temp"),
# Path to various other output
dir_out_expDesign = file.path(dir_out, "Experimentals_Input_Data"),
dir_out_traces = file.path(dir_out, "Time_Traces"),
# Path from where external data are extraced
dir_external = dir_ex <- if (identical(opt_platform[["host"]], "local")) {
file.path("/Volumes", "BookDuo_12TB", "BigData", "GIS", "Data")
} else if (identical(opt_platform[["host"]], "hpc")) {
file.path("/home", "fas", "lauenroth", "ds2483", "project", "BigData", "GIS",
"Data")
},
# Path to historic weather and climate data including
# Livneh, Maurer, ClimateAtlas, and NCEPCFSR data
dir_ex_weather = file.path(dir_ex, "Weather_Past"),
# Path to future scenario data
dir_ex_fut = file.path(dir_ex, "Weather_Future"),
# Path to soil data
dir_ex_soil = file.path(dir_ex, "Soils"),
# Path to topographic data
dir_ex_dem = file.path(dir_ex, "Topography")
)
#------ Base names or full names of input files
fnames_in <- list(
fmaster = "SWRuns_InputMaster_Test_v11.csv",
fslayers = "SWRuns_InputData_SoilLayers_v9.csv",
ftreatDesign = "SWRuns_InputData_TreatmentDesign_v14.csv",
fexpDesign = "SWRuns_InputData_ExperimentalDesign_v06.csv",
fclimnorm = "SWRuns_InputData_cloud_v10.csv",
fvegetation = "SWRuns_InputData_prod_v11.csv",
fsite = "SWRuns_InputData_siteparam_v14.csv",
fsoils = "SWRuns_InputData_soils_v12.csv",
fweathersetup = "SWRuns_InputData_weathersetup_v10.csv",
fclimscen_delta = "SWRuns_InputData_ClimateScenarios_Change_v11.csv",
fclimscen_values = "SWRuns_InputData_ClimateScenarios_Values_v11.csv",
LookupClimatePPTScenarios = "climate.ppt.csv",
LookupClimateTempScenarios = "climate.temp.csv",
LookupShiftedPPTScenarios = "shifted.ppt.csv",
LookupEvapCoeffFromTable = "BareSoilEvaporationCoefficientsPerSoilLayer.csv",
LookupTranspCoeffFromTable = "TranspirationCoefficients_v2.csv",
LookupTranspRegionsFromTable = "TranspirationRegionsPerSoilLayer.csv",
LookupSnowDensityFromTable = "MeanMonthlySnowDensities_v2.csv",
LookupVegetationComposition = "VegetationComposition_MeanMonthly_v5.csv",
# Pre-processed input: storage file of input data for repeated access (faster) instead
# of re-reading from (slower) csv files if flag 'use_preprocin' is TRUE
fpreprocin = "SWRuns_InputAll_PreProcessed.rds",
# Database with daily weather data
fdbWeather = file.path(project_paths[["dir_in"]], "dbWeatherData_test.sqlite3"),
# Raster describing spatial interpretation of simulation experiment if scorp == "cell"
fsimraster = file.path(project_paths[["dir_in"]], "sim_raster.grd")
)
#------ Full names of output files
fnames_out <- list(
dbOutput = file.path(project_paths[["dir_out"]], "dbTables.sqlite3"),
dbOutput_current = file.path(project_paths[["dir_out"]], "dbTables_current.sqlite3"),
timerfile = file.path(project_paths[["dir_out"]], "Timing_Simulation.csv")
)
#------ Input data sources and options for data preparation
opt_input <- list(
prior_calculations = c(
"AddRequestedSoilLayers", 0,
"EstimateConstantSoilTemperatureAtUpperAndLowerBoundaryAsMeanAnnualAirTemperature", 1,
"EstimateInitialSoilTemperatureForEachSoilLayer", 1,
"CalculateBareSoilEvaporationCoefficientsFromSoilTexture", 1
),
# Interpolate and add soil layers if not available if 'AddRequestedSoilLayers'
requested_soil_layers = c(5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 150),
# Request data from datasets ('external' to a rSFSW2-project)
req_data = c(
# Daily weather data for current conditions
# - Maurer et al. 2002: 1/8-degree res.; data expected at file.path(
# project_paths[["dir_ex_weather"]], "Maurer+_2002updated", "DAILY_FORCINGS")
"GriddedDailyWeatherFromMaurer2002_NorthAmerica", 0,
# - Thornton et al. 1997: 1-km res.; data expected at file.path(
# project_paths[["dir_ex_weather"]], "DayMet_NorthAmerica",
# "DownloadedSingleCells_FromDayMetv3_NorthAmerica")
"GriddedDailyWeatherFromDayMet_NorthAmerica", 0,
# - McKenney et al. 2011: 10-km res.; use with dbW; data expected at file.path(
# project_paths[["dir_ex_weather"]], "NRCan_10km_Canada", "DAILY_GRIDS")
"GriddedDailyWeatherFromNRCan_10km_Canada", 0,
# - Saha et al. 2010: 0.3125-deg res.; use with dbW; data expected at file.path(
# project_paths[["dir_ex_weather"]], "NCEPCFSR_Global", "CFSR_weather_prog08032012")
"GriddedDailyWeatherFromNCEPCFSR_Global", 0,
# - Livneh et al. 2013: 1/16 degree res.; data expected at file.path(
# project_paths[["dir_ex_weather"]], "Livneh_NA_2013", "MONTHLY_GRIDS")
"GriddedDailyWeatherFromLivneh2013_NorthAmerica", 0,
# Monthly PPT, Tmin, Tmax conditions: if using NEX or GDO-DCP-UC-LLNL,
# climate condition names must be of the form SCENARIO.GCM with SCENARIO being
# used for ensembles; if using climatewizard, climate condition names must be
# equal to what is in the respective directories
# - data expected at file.path(project_paths[["dir_ex_fut"]], "ClimateScenarios")
"ExtractClimateChangeScenarios", 1,
# Mean monthly wind, relative humidity, and 100% - sunshine
# - NCDC 2005: data expected at file.path(project_paths[["dir_ex_weather"]],
# "ClimateAtlasUS")
"ExtractSkyDataFromNOAAClimateAtlas_USA", 0,
# - Saha et al. 2010: project_paths[["dir_ex_weather"]], "NCEPCFSR_Global",
# "CFSR_weather_prog08032012")
"ExtractSkyDataFromNCEPCFSR_Global", 0,
# Topography
# - NED, National Elevation Dataset (ned.usgs.gov): 1-arcsec res; data expected
# at project_paths[["dir_ex_dem"]], "NED_USA", "NED_1arcsec")
"ExtractElevation_NED_USA", 0,
# - Harmonized World Soil Database: 30-arcsec res; data expected
# at project_paths[["dir_ex_dem"]], "HWSD")
"ExtractElevation_HWSD_Global", 0,
# Soil texture
# - Harmonized World Soil Database: 1-km re-gridded; data expected
# at project_paths[["dir_ex_soil"]], "CONUSSoil", "output", "albers")
"ExtractSoilDataFromCONUSSOILFromSTATSGO_USA", 0,
# - ISRIC-WISE 5-arcmin v1.2 (2012): 5-arcmin re-gridded; data expected
# at project_paths[["dir_ex_soil"]], "WISE", "wise5by5min_v1b", "Grid", "smw5by5min")
"ExtractSoilDataFromISRICWISEv12_Global", 0,
# - ISRIC-WISE 30-arsec v1.0 (2016): 30-arcsec re-gridded; data expected
# at project_paths[["dir_ex_soil"]], "WISE", "WISE30sec_v1a")
"ExtractSoilDataFromISRICWISE30secV1a_Global", 0
),
# Approach to determine prioprities of external data source extractions
# - If how_determine_sources == "order", then
# - Elevation: 'ExtractElevation_NED_USA' has priority over
# 'ExtractElevation_HWSD_Global' on a per site basis if both are requested and data
# is available for both
# - Soil texture: 'ExtractSoilDataFromCONUSSOILFromSTATSGO_USA' has first priority,
# then 'ExtractSoilDataFromISRICWISE30secV1a_Global' has second priority, and
# 'ExtractSoilDataFromISRICWISEv12_Global' has third priority on a per site basis
# if more than one are requested and data are available for multiple sources
# - Climate normals: 'ExtractSkyDataFromNOAAClimateAtlas_USA' has priority over
# 'ExtractSkyDataFromNCEPCFSR_Global' on a per site basis if both are requested and
# data is available for both
# - If how_determine_sources == "SWRunInformation", then use information in suitable
# columns of spreadsheet 'SWRunInformation' if available; if not available, then fall
# back to option 'order'
how_determine_sources = "SWRunInformation",
# If a run has multiple sources for daily weather, then take the one in the first
# position of 'dw_source_priority' if available, if not then second etc.
# Do not change/remove/add entries; only re-order to set different priorities
dw_source_priority = c("DayMet_NorthAmerica", "LookupWeatherFolder",
"Maurer2002_NorthAmerica", "Livneh2013_NorthAmerica", "NRCan_10km_Canada",
"NCEPCFSR_Global"),
# Creation of dbWeather
# Compression type of dbWeather; one value of eval(formals(memCompress)[[2]])
set_dbW_compresstype = "gzip"
)
#------ Options for simulation and meta-information of input data
opt_sim <- list(
# Set the random number generator for each task so that repeating runs with the same
# inputs results in the same outputs even under load-balanced parallel computations and
# under re-starts of partially finished runs
reproducible = TRUE,
global_seed = 1235L,
# Daily weather either from database 'dbWeather' or specified via 'WeatherFolder' in
# MasterInput.csv, treatmentDesign.csv, or experimentalDesign.csv
# Use daily weather from dbWeather for current condition
use_dbW_current = TRUE,
# Use daily weather from dbWeather for future scenario conditions
use_dbW_future = TRUE,
# Number of decimal places to which weather data is rounded
dbW_digits = 2,
# Identifying tag of folder names for site weather data if 'LookupWeatherFolder'
tag_WeatherFolder = "weath",
# Approach if there is no soil texture information for the deepest layer(s)
# - [TRUE] adjust soil depth
# - [FALSE] fill soil layer structure from shallower layer(s)
fix_depth_to_layers = FALSE,
# SOILWAT2 requires windspeed input data observed at a height of 2 m above ground
# - NCEP/CRSF data are at 10 m
windspeed_obs_height_m = 2,
# SOILWAT2 simulations are repeated with incrementally increased soil temperature
# profile layer width until a stable soil temperature solution is found or total
# failure is determined
increment_soiltemperature_deltaX_cm = 5,
# Maximal soil depth for which bare-soil evaporation coefficients are calculated
# if 'CalculateBareSoilEvaporationCoefficientsFromSoilTexture' is TRUE
depth_max_bs_evap_cm = 15,
# Shift monthly vegetation/production values in prod.in file by six months
# if TRUE and latitude < 0 (i.e., southern hemisphere)
adjust_veg_input_NS = TRUE,
# Potential natural vegetation based on climate data (Jose Paruelo et al. 1996, 1998)
# - default value: shrub_limit = 0.2 on page 1213 in Paruelo JM,
# Lauenroth WK (1996) Relative abundance of plant functional types in grasslands
# and shrublands of North America. Ecological Applications, 6, 1212-1224.
shrub_limit = 0.2,
# Growing season threshold
# - 10 C based on Trewartha's D temperateness definition:
# temperate climate := has >=4 & < 8 months with > 10C
# - 4 C based standard input of mean monthly biomass values described in
# Bradford et al. 2014 Journal of Ecology
growseason_Tlimit_C = 4
)
#------ Output options
opt_out_fix <- list(
# Column numbers of master input file 'SWRunInformation', e.g, c(3, 7:9), or NULL:
# Selected columns will be part of 'header' table in dbOutput in addition to those of
# create_treatments, experimental_treatments, and climate scenario
Index_RunInformation = NULL,
# Text separator if 'makeInputForExperimentalDesign'
ExpInput_Seperator = "X!X",
# Current subset of dbOutput
# - Create from a subset of temporary text files (fast)
dbOutCurrent_from_tempTXT = FALSE,
# - Subset scenarios to climate.ambient (slow)
dbOutCurrent_from_dbOut = FALSE
)
#----- Spatial setup of simulations
# scorp := one of c("point", "cell"), whether to interpret the simulation locations
# provided in 'SWRunInformation' as point locations (1D-sites) or as means of 2D-cells
# If scorp == "cell" then provide either valid path to 'fsimraster' (takes precedence) or
# (grid resolution and grid crs)
# Currently, implemented for
# - actions[["map_inputs"]]
# - external extractions:
# - soils: "ExtractSoilDataFromISRICWISEv12_Global",
# "ExtractSoilDataFromISRICWISE30secV1a_Global",
# "ExtractSoilDataFromCONUSSOILFromSTATSGO_USA",
# - elevation: "ExtractElevation_NED_USA", "ExtractElevation_HWSD_Global",
# - climate normals: "ExtractSkyDataFromNOAAClimateAtlas_USA"
# NOTE: not implemented for 'ExtractSkyDataFromNCEPCFSR_Global'
in_space <- list(
scorp = scorp <- "point",
# Resolution of raster cells
sim_res = if (scorp == "cell") c(1e4, 1e4) else NA,
# Coordinate reference system (CRS)
sim_crs = if (scorp == "cell") {
"+init=epsg:5072" # NAD83(HARN) / Conus Albers
} else {
"+init=epsg:4326" # WGS84
}
)
#------ Time frames of simulation (may be modified by treatments)
sim_time <- list(
# current simulation years = simstartyr:endyr
# spinup_N = startyr - simstartyr
# years used for results = startyr:endyr
simstartyr = 1979,
startyr = startyr <- 1980,
endyr = endyr <- 2010,
#Future time period(s):
# Each list element of 'future_yrs' will be applied to every climate.conditions
# Each list element of 'future_yrs' is a vector with three elements
# c(delta, DSfut_startyr, DSfut_endyr)
# future simulation years = delta + simstartyr:endyr
# future simulation years downscaled based on
# - current conditions = DScur_startyr:DScur_endyr
# - future conditions = DSfut_startyr:DSfut_endyr
# NOTE: Multiple time periods doesn't work with external type 'ClimateWizardEnsembles'
DScur_startyr = startyr,
DScur_endyr = endyr,
future_yrs = list(
c(d <- 40, startyr + d, endyr + d),
c(d <- 90, startyr + d, endyr + d - 1) # most GCMs don't have data for 2100
)
)
#------ Requested climate conditions
req_scens <- list(
# Name of climatic conditions of the daily weather input when monthly climate
# perturbations are all off
ambient = "Current",
# Names of climate scenarios
# - If a simulation project does not include future climate conditions, then set
# models = NULL
# - If climate datafiles used, then in the order of data in the those datafiles
# - This is a list of all GCMs for CMIP5 provided by GDO-DCP-UC-LLNL: 37 RCP4.5, 35 RCP8.5
# Excluded: 'HadCM3' and 'MIROC4h' because data only available until 2035
models = c(
"RCP45.CanESM2", "RCP45.CESM1-CAM5", "RCP45.HadGEM2-CC",
"RCP85.CanESM2", "RCP85.CESM1-CAM5", "RCP85.HadGEM2-CC"),
sources = c(
# For each climate data set from which to extract, add an element like 'dataset1'
# Priority of extraction: dataset1, dataset2, ... if multiple sources provide data
# for a location
# Dataset = 'project_source' with
# - project = one string out of c("CMIP3", "CMIP5")
# - source = one string out of:
# - "ClimateWizardEnsembles_Global": mean monthly values at 50-km resolution for 2070-2099
# - "ClimateWizardEnsembles_USA": mean monthly change at 12-km resolution between 2070-2099 and 1971-2000
# - "BCSD_GDODCPUCLLNL_USA": monthly time series at 1/8-degree resolution
# - "BCSD_GDODCPUCLLNL_Global": monthly time series at 1/2-degree resolution
# - "BCSD_NEX_USA": monthly time series at 30-arcsec resolution; requires live internet access
# - "BCSD_SageSeer_USA": monthly time-series at 1-km resolution for the western US prepared by Katie Renwick
# - "ESGF_Global": monthly time-series at varying resolution
dataset1 = "CMIP5_BCSD_GDODCPUCLLNL_USA"
),
# Downscaling method (applied to each each climate.conditions)
# Monthly scenario -> daily forcing variables
# One or multiple elements of
# - "raw"
# - "delta" (Hay et al. 2002)
# - "hybrid-delta" (Hamlet et al. 2010), "hybrid-delta-3mod"
# - "wgen-package" (Steinschneider & Brown 2013 WRR, doi:10.1002/wrcr.20528
method_DS = c("raw", "delta", "hybrid-delta-3mod"),
# Downscaling parameters
opt_DS = list(
daily_ppt_limit = 1.5,
monthly_limit = 1.5,
# Method to apply precipitation changes: either "detailed" or "simple"
ppt_type = "detailed",
# Method to fix spline predictions: one of "fail", "none" or "attempt";
# only used if extrapol_type is using splines
# - "fail": downscaling fails if spline extrapolations fall outside estimated
# monthly extremes
# - "none": no correction for extrapolated monthly extreme values, but this will
# likely fail during correction of extreme daily PPT events
# - "attempt": repeated attempts with jittering data to fit spline extrapolations
# within estimated monthly extreme values
fix_spline = "attempt",
# Method to extrapolate beyond observed data
# Options: one of "linear_Boe", "linear_Thermessl2012CC.QMv1b", "linear_none",
# "tricub_fmm", "tricub_monoH.FC", "tricub_natural", "normal_anomalies"
# - "linear": Gudmundsson et al. 2012: "If new model values (e.g. from climate
# projections) are larger than the training values used to estimate the empirical
# CDF, the correction found for the highest quantile of the training period is
# used (Boe ?? et al., 2007; Theme??l et al., 2012)."
# - "tricub": I got really large output values, e.g., obs.hist = 54 cm,
# scen.fut = 64 cm, sbc.fut = 88 cm, hd.fut = 89 cm
# - "linear" (i.e., using Boe et al.'s correction) resulted for the same site to:
# obs.hist = 54 cm, scen.fut = 64 cm, sbc.fut = 75 cm, hd.fut = 75 cm
# - "normal", but no implemented in qmap: Tohver et al. 2014, Appendix A, p. 6:
# "... values that are outside the observed quantile map (e.g. in the early parts
# of the 20th century) are interpolated using standard anomalies (i.e. number of
# standard deviations from the mean) calculated for observed data and GCM data.
# Although this approach ostensibly assumes a normal distribution, it was found
# during testing to be much more stable than attempts to use more sophisticated
# approaches. In particular, the use of Extreme Value Type I or Generalized
# Extreme Value distributions for extending the tail of the probability
# distributions were both found to be highly unstable in practice and introduced
# unacceptable daily extremes in isolated grid cells. These errors occur because
# of irregularities in the shapes of the CDFs for observed and GCM data, which
# relates in part to the relatively small sample size used to construct the
# monthly CDFs (i.e. n = 30)."
extrapol_type = "linear_Thermessl2012CC.QMv1b",
# Test whether data distributions are within sigmaN * stats::sd of mean
sigmaN = 6,
# Additive instead of multiplicative adjustments for precipitation if precipitation
# is above or below 'PPTratioCutoff'; 3 was too small -> resulting in too many
# medium-sized ppt-event
PPTratioCutoff = 10
),
# Climate ensembles created across scenarios
# Ensemble families: NULL or from c("SRESA2", "SRESA1B", "SRESB1")
# This defines the groups for which ensembles of climate scenarios are calculated;
# corresponds to first part of scenario name
ensemble.families = NULL,
# If(!is.null(ensemble.families)) then this needs to have at least one value; this
# variable defines which ranked climate.conditions the ensembles are representing
# for each ensemble.families
ensemble.levels = c(2, 8, 15),
# If TRUE then for each ensemble.levels a file is saved with the scenario numbers
# corresponding to the ensemble.levels
save.scenario.ranks = TRUE
)
#------ Requested output
# Turn aggregation for variable groups on (1) or off (0), don't delete any names
req_out <- list(
# Overall aggregated output table
overall_out = c(
#---Aggregation: SOILWAT2 inputs
"input_SoilProfile", 1,
"input_FractionVegetationComposition", 1,
"input_VegetationBiomassMonthly", 1,
"input_VegetationPeak", 1,
"input_Phenology", 1,
"input_TranspirationCoeff", 1,
"input_ClimatePerturbations", 1,
#---Aggregation: Climate and weather
"yearlyTemp", 1,
"yearlyPPT", 1,
"dailySnowpack", 1,
"dailyFrostInSnowfreePeriod", 1,
"dailyHotDays", 1,
"dailyWarmDays", 1,
"dailyPrecipitationEventSizeDistribution", 1,
"yearlyPET", 1,
"monthlySeasonalityIndices", 1,
#---Aggregation: Climatic dryness
"yearlymonthlyTemperateDrylandIndices", 1,
"yearlyDryWetPeriods", 1,
"dailyWeatherGeneratorCharacteristics", 1,
"dailyPrecipitationFreeEventDistribution", 1,
"monthlySPEIEvents", 1,
#---Aggregation: Climatic control
"monthlyPlantGrowthControls", 1,
"dailyC4_TempVar", 1,
"dailyDegreeDays", 1,
#---Aggregation: Yearly water balance
"yearlyAET", 1,
"yearlyWaterBalanceFluxes", 1,
"dailySoilWaterPulseVsStorage", 1,
#---Aggregation: Daily extreme values
"dailyTranspirationExtremes", 1,
"dailyTotalEvaporationExtremes", 1,
"dailyDrainageExtremes", 1,
"dailyInfiltrationExtremes", 1,
"dailyAETExtremes", 1,
"dailySWPextremes", 1,
"dailyRechargeExtremes", 1,
#---Aggregation: Ecological dryness
# Note: 'dailyNRCS_SoilMoistureTemperatureRegimes*' require at least soil layers at
# 10, 20, 30, 50, 60, 90 cm
"dailyNRCS_SoilMoistureTemperatureRegimes_Intermediates", 0,
"dailyNRCS_SoilMoistureTemperatureRegimes", 0,
"dailyNRCS_Chambers2014_ResilienceResistance", 0,
"dailyNRCS_Maestas2016_ResilienceResistance", 0,
"dailyWetDegreeDays", 1,
"dailyThermalDrynessStartEnd", 1,
"dailyThermalSWPConditionCount", 1,
"monthlySWPdryness", 1,
"dailySWPdrynessANDwetness", 1,
"dailySuitablePeriodsDuration", 1,
"dailySuitablePeriodsAvailableWater", 1,
"dailySuitablePeriodsDrySpells", 1,
"dailySWPdrynessDurationDistribution", 1,
"dailySWPdrynessEventSizeDistribution", 1,
"dailySWPdrynessIntensity", 1,
"dailyThermalDrynessStress", 1,
#---Aggregation: Mean monthly values
"monthlyTemp", 1,
"monthlyPPT", 1,
"monthlySnowpack", 1,
"monthlySoilTemp", 1,
"monthlyRunoff", 1,
"monthlyHydraulicRedistribution", 1,
"monthlyInfiltration", 1,
"monthlyDeepDrainage", 1,
"monthlySWPmatric", 1,
"monthlyVWCbulk", 1,
"monthlyVWCmatric", 1,
"monthlySWCbulk", 1,
"monthlySWAbulk", 1,
"monthlyTranspiration", 1,
"monthlySoilEvaporation", 1,
"monthlyAET", 1,
"monthlyPET", 1,
"monthlyVPD", 1,
"monthlyAETratios", 1,
"monthlyPETratios", 1,
#---Aggregation: Potential regeneration
"dailyRegeneration_bySWPSnow", 0,
"dailyRegeneration_GISSM", 0
),
# Select variables to aggregate daily means and stats::sd (one per day of year, DOY)
# options: NULL or a selection of c("AET", "Transpiration", "EvaporationSoil",
# "EvaporationSurface", "EvaporationTotal", "VWCbulk", "VWCmatric", "SWCbulk",
# "SWPmatric", "Snowpack", "SWAbulk", "Rain", "Snowfall", "Snowmelt", "SnowLoss",
# "Runoff", "Infiltration", "DeepDrainage", "PET", "TotalPrecipitation",
# "TemperatureMin", "TemperatureMax", "SoilTemperature")
mean_daily = c("AET", "Transpiration", "EvaporationSoil", "EvaporationSurface",
"EvaporationTotal", "VWCbulk", "VWCmatric", "SWCbulk", "SWPmatric", "Snowpack",
"SWAbulk", "Rain", "Snowfall", "Snowmelt", "SnowLoss", "Runoff", "Infiltration",
"DeepDrainage", "PET", "TotalPrecipitation", "TemperatureMin", "TemperatureMax",
"SoilTemperature"),
# Select variables to output as aggregated yearly time series
# options: NULL or a selection of c("dailyRegeneration_GISSM")
traces = NULL
)
#------ Parameters for output aggregations
opt_agg <- list(
# Aggregate overall simulation output across soil layers with separate values for
# shallow/top (soil layers < aon_toplayer_cm) and deep/bottom soil layers
aon_toplayer_cm = 20,
# Aggregate mean daily simulation output across soil layers
doy_slyrs = list(
# Do [no] aggregate soil layers
# - TRUE, aggregate into 1-4 layers for mean/stats::sd
# - FALSE, output values for every simulated soil layer
do = TRUE,
# Depth of aggregated soil layers
# Options: depth in centimeters or
# - NULL is interpreted as deepest soil layer (not available for first)
# - NA indicates that no third/fourth aggregated layer is calculated
# (not available for first and second)
# Depth of first aggregated soil layer
first_cm = 10,
# Depth of second aggregated soil layer
second_cm = 20,
# Depth of third aggregated soil layer
third_cm = 60,
# Depth of fourth aggregated soil layer
fourth_cm = NULL
),
# The ccounting of timing variables is shifted by 6 months (e.g., July becomes 1st
# month, etc.) if TRUE and latitude < 0 (i.e., southern hemisphere)
adjust_NorthSouth = TRUE,
# Critical soil water potential(s) [MPa] to calculate 'dry' and 'wet' soils
# (cf. wilting point) and available soil water
SWPcrit_MPa = c(-1.5, -3.0, -3.5, -3.9),
# Critical temperatures [Celsius degrees]
Tmin_crit_C = c(-15, -9, 0),
Tmax_crit_C = c(34, 40),
Tmean_crit_C = c(5, 15, 25, 35),
# Base temperature (degree C) above which degree-days are accumulated
Tbase_DD_C = 0,
# Daily weather frequency distributions
# Bins of x mm precipitation event sizes
bin_prcp_mm = 5,
# Bins of x consecutive days without precipitation
bin_prcpfree_days = 10,
# Parameters for 'dailyRegeneration_bySWPSnow'
dailyRegeneration_bySWPSnow = list(
season.start = "LastSnow", # either doy or "LastSnow"
season.end = "FirstSnow", # either doy or "FirstSnow"
germination.duration = 7, # in days
germination.swp.surface = -0.2, # in MPa, duration must have at least x MPa
establishment.duration = 14, # in days
establishment.swp.surface = -0.4, # in MPa, duration must have at least x MPa
establishment.delay = 1 # establishment starts latest x days after end of germination
),
# NRCS soil moisture regimes (SMR) and soil temperature regimes (STR) settings
NRCS_SMTRs = list(
# Approach for regime determination ('data' -> 'conditions' -> 'regime')
aggregate_at = "conditions",
# Aggregation agreement level (e.g., 0.5 = majority; 1 = all)
crit_agree_frac = 0.9,
# Restrict data to normal years (as defined by SSS 2014) if TRUE; if FALSE, all years
use_normal = TRUE,
SWP_dry = -1.5, #dry means SWP below -1.5 MPa (Soil Survey Staff 2014: p.29)
SWP_sat = -0.033, #saturated means SWP above -0.033 MPa
impermeability = 0.9 #impermeable layer
)
)
##############################################################################
|
/Test_projects/Test2_LookupWeatherFolders/SFSW2_project_descriptions.R
|
no_license
|
DrylandEcology/rSFSW2_tools
|
R
| false | false | 28,606 |
r
|
#----------------------------------------------------------------------------------------#
# rSFSW2: FRAMEWORK FOR SOILWAT2 SIMULATIONS: CREATING SIMULATION RUNS, EXECUTING
# SIMULATIONS, AND AGGREGATING OUTPUTS
#
# See demo/SFSW2_project_code.R for details
#----------------------------------------------------------------------------------------#
##############################################################################
#----------------------- DESCRIPTION OF SIMULATION PROJECT ---------------------
# NOTE: The values cannot be changed once a rSFSW2 simulation project is set up. The
# values of settings (file demo/SFSW2_project_settings.R) may be changed from run to run.
#----- Metainformation about computing platform
opt_platform <- list(
host = c("local", "hpc")[1],
no_parallel = any(
identical(tolower(Sys.getenv("NOT_CRAN")), "false"),
identical(tolower(Sys.getenv("TRAVIS")), "true"),
identical(tolower(Sys.getenv("APPVEYOR")), "true"))
)
#------ Paths to simulation framework project folders
project_paths <- list(
dir_prj = dir_prj <- getwd(),
# Path to inputs
dir_in = dir_in <- file.path(dir_prj, "1_Data_SWInput"),
# Folder with default standalone SOILWAT2 input files
dir_in_sw = file.path(dir_in, "SoilWat2_defaults"),
# Folder with data input files
dir_in_dat = file.path(dir_in, "datafiles"),
# Folder with treatment input files according to treatment instructions
dir_in_treat = file.path(dir_in, "treatments"),
# Folder with GISSM regeneration parameters (will contain one file per species)
dir_in_gissm = file.path(dir_in, "regeneration"),
# Path to where large outputs are saved to disk
dir_big = dir_big <- dir_prj,
# Path to where rSOILWAT2 objects are saved to disk
# if saveRsoilwatInput and/or saveRsoilwatOutput
dir_out_sw = file.path(dir_big, "3_Runs"),
# Path to outputs produced by rSFSW2
dir_out = dir_out <- file.path(dir_big, "4_Data_SWOutputAggregated"),
# Path to where rSFSW2 will store temporary files
dir_out_temp = file.path(dir_out, "temp"),
# Path to various other output
dir_out_expDesign = file.path(dir_out, "Experimentals_Input_Data"),
dir_out_traces = file.path(dir_out, "Time_Traces"),
# Path from where external data are extraced
dir_external = dir_ex <- if (identical(opt_platform[["host"]], "local")) {
file.path("/Volumes", "BookDuo_12TB", "BigData", "GIS", "Data")
} else if (identical(opt_platform[["host"]], "hpc")) {
file.path("/home", "fas", "lauenroth", "ds2483", "project", "BigData", "GIS",
"Data")
},
# Path to historic weather and climate data including
# Livneh, Maurer, ClimateAtlas, and NCEPCFSR data
dir_ex_weather = file.path(dir_ex, "Weather_Past"),
# Path to future scenario data
dir_ex_fut = file.path(dir_ex, "Weather_Future"),
# Path to soil data
dir_ex_soil = file.path(dir_ex, "Soils"),
# Path to topographic data
dir_ex_dem = file.path(dir_ex, "Topography")
)
#------ Base names or full names of input files
fnames_in <- list(
fmaster = "SWRuns_InputMaster_Test_v11.csv",
fslayers = "SWRuns_InputData_SoilLayers_v9.csv",
ftreatDesign = "SWRuns_InputData_TreatmentDesign_v14.csv",
fexpDesign = "SWRuns_InputData_ExperimentalDesign_v06.csv",
fclimnorm = "SWRuns_InputData_cloud_v10.csv",
fvegetation = "SWRuns_InputData_prod_v11.csv",
fsite = "SWRuns_InputData_siteparam_v14.csv",
fsoils = "SWRuns_InputData_soils_v12.csv",
fweathersetup = "SWRuns_InputData_weathersetup_v10.csv",
fclimscen_delta = "SWRuns_InputData_ClimateScenarios_Change_v11.csv",
fclimscen_values = "SWRuns_InputData_ClimateScenarios_Values_v11.csv",
LookupClimatePPTScenarios = "climate.ppt.csv",
LookupClimateTempScenarios = "climate.temp.csv",
LookupShiftedPPTScenarios = "shifted.ppt.csv",
LookupEvapCoeffFromTable = "BareSoilEvaporationCoefficientsPerSoilLayer.csv",
LookupTranspCoeffFromTable = "TranspirationCoefficients_v2.csv",
LookupTranspRegionsFromTable = "TranspirationRegionsPerSoilLayer.csv",
LookupSnowDensityFromTable = "MeanMonthlySnowDensities_v2.csv",
LookupVegetationComposition = "VegetationComposition_MeanMonthly_v5.csv",
# Pre-processed input: storage file of input data for repeated access (faster) instead
# of re-reading from (slower) csv files if flag 'use_preprocin' is TRUE
fpreprocin = "SWRuns_InputAll_PreProcessed.rds",
# Database with daily weather data
fdbWeather = file.path(project_paths[["dir_in"]], "dbWeatherData_test.sqlite3"),
# Raster describing spatial interpretation of simulation experiment if scorp == "cell"
fsimraster = file.path(project_paths[["dir_in"]], "sim_raster.grd")
)
#------ Full names of output files
fnames_out <- list(
dbOutput = file.path(project_paths[["dir_out"]], "dbTables.sqlite3"),
dbOutput_current = file.path(project_paths[["dir_out"]], "dbTables_current.sqlite3"),
timerfile = file.path(project_paths[["dir_out"]], "Timing_Simulation.csv")
)
#------ Input data sources and options for data preparation
opt_input <- list(
prior_calculations = c(
"AddRequestedSoilLayers", 0,
"EstimateConstantSoilTemperatureAtUpperAndLowerBoundaryAsMeanAnnualAirTemperature", 1,
"EstimateInitialSoilTemperatureForEachSoilLayer", 1,
"CalculateBareSoilEvaporationCoefficientsFromSoilTexture", 1
),
# Interpolate and add soil layers if not available if 'AddRequestedSoilLayers'
requested_soil_layers = c(5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 150),
# Request data from datasets ('external' to a rSFSW2-project)
req_data = c(
# Daily weather data for current conditions
# - Maurer et al. 2002: 1/8-degree res.; data expected at file.path(
# project_paths[["dir_ex_weather"]], "Maurer+_2002updated", "DAILY_FORCINGS")
"GriddedDailyWeatherFromMaurer2002_NorthAmerica", 0,
# - Thornton et al. 1997: 1-km res.; data expected at file.path(
# project_paths[["dir_ex_weather"]], "DayMet_NorthAmerica",
# "DownloadedSingleCells_FromDayMetv3_NorthAmerica")
"GriddedDailyWeatherFromDayMet_NorthAmerica", 0,
# - McKenney et al. 2011: 10-km res.; use with dbW; data expected at file.path(
# project_paths[["dir_ex_weather"]], "NRCan_10km_Canada", "DAILY_GRIDS")
"GriddedDailyWeatherFromNRCan_10km_Canada", 0,
# - Saha et al. 2010: 0.3125-deg res.; use with dbW; data expected at file.path(
# project_paths[["dir_ex_weather"]], "NCEPCFSR_Global", "CFSR_weather_prog08032012")
"GriddedDailyWeatherFromNCEPCFSR_Global", 0,
# - Livneh et al. 2013: 1/16 degree res.; data expected at file.path(
# project_paths[["dir_ex_weather"]], "Livneh_NA_2013", "MONTHLY_GRIDS")
"GriddedDailyWeatherFromLivneh2013_NorthAmerica", 0,
# Monthly PPT, Tmin, Tmax conditions: if using NEX or GDO-DCP-UC-LLNL,
# climate condition names must be of the form SCENARIO.GCM with SCENARIO being
# used for ensembles; if using climatewizard, climate condition names must be
# equal to what is in the respective directories
# - data expected at file.path(project_paths[["dir_ex_fut"]], "ClimateScenarios")
"ExtractClimateChangeScenarios", 1,
# Mean monthly wind, relative humidity, and 100% - sunshine
# - NCDC 2005: data expected at file.path(project_paths[["dir_ex_weather"]],
# "ClimateAtlasUS")
"ExtractSkyDataFromNOAAClimateAtlas_USA", 0,
# - Saha et al. 2010: project_paths[["dir_ex_weather"]], "NCEPCFSR_Global",
# "CFSR_weather_prog08032012")
"ExtractSkyDataFromNCEPCFSR_Global", 0,
# Topography
# - NED, National Elevation Dataset (ned.usgs.gov): 1-arcsec res; data expected
# at project_paths[["dir_ex_dem"]], "NED_USA", "NED_1arcsec")
"ExtractElevation_NED_USA", 0,
# - Harmonized World Soil Database: 30-arcsec res; data expected
# at project_paths[["dir_ex_dem"]], "HWSD")
"ExtractElevation_HWSD_Global", 0,
# Soil texture
# - Harmonized World Soil Database: 1-km re-gridded; data expected
# at project_paths[["dir_ex_soil"]], "CONUSSoil", "output", "albers")
"ExtractSoilDataFromCONUSSOILFromSTATSGO_USA", 0,
# - ISRIC-WISE 5-arcmin v1.2 (2012): 5-arcmin re-gridded; data expected
# at project_paths[["dir_ex_soil"]], "WISE", "wise5by5min_v1b", "Grid", "smw5by5min")
"ExtractSoilDataFromISRICWISEv12_Global", 0,
# - ISRIC-WISE 30-arsec v1.0 (2016): 30-arcsec re-gridded; data expected
# at project_paths[["dir_ex_soil"]], "WISE", "WISE30sec_v1a")
"ExtractSoilDataFromISRICWISE30secV1a_Global", 0
),
# Approach to determine prioprities of external data source extractions
# - If how_determine_sources == "order", then
# - Elevation: 'ExtractElevation_NED_USA' has priority over
# 'ExtractElevation_HWSD_Global' on a per site basis if both are requested and data
# is available for both
# - Soil texture: 'ExtractSoilDataFromCONUSSOILFromSTATSGO_USA' has first priority,
# then 'ExtractSoilDataFromISRICWISE30secV1a_Global' has second priority, and
# 'ExtractSoilDataFromISRICWISEv12_Global' has third priority on a per site basis
# if more than one are requested and data are available for multiple sources
# - Climate normals: 'ExtractSkyDataFromNOAAClimateAtlas_USA' has priority over
# 'ExtractSkyDataFromNCEPCFSR_Global' on a per site basis if both are requested and
# data is available for both
# - If how_determine_sources == "SWRunInformation", then use information in suitable
# columns of spreadsheet 'SWRunInformation' if available; if not available, then fall
# back to option 'order'
how_determine_sources = "SWRunInformation",
# If a run has multiple sources for daily weather, then take the one in the first
# position of 'dw_source_priority' if available, if not then second etc.
# Do not change/remove/add entries; only re-order to set different priorities
dw_source_priority = c("DayMet_NorthAmerica", "LookupWeatherFolder",
"Maurer2002_NorthAmerica", "Livneh2013_NorthAmerica", "NRCan_10km_Canada",
"NCEPCFSR_Global"),
# Creation of dbWeather
# Compression type of dbWeather; one value of eval(formals(memCompress)[[2]])
set_dbW_compresstype = "gzip"
)
#------ Options for simulation and meta-information of input data
opt_sim <- list(
# Set the random number generator for each task so that repeating runs with the same
# inputs results in the same outputs even under load-balanced parallel computations and
# under re-starts of partially finished runs
reproducible = TRUE,
global_seed = 1235L,
# Daily weather either from database 'dbWeather' or specified via 'WeatherFolder' in
# MasterInput.csv, treatmentDesign.csv, or experimentalDesign.csv
# Use daily weather from dbWeather for current condition
use_dbW_current = TRUE,
# Use daily weather from dbWeather for future scenario conditions
use_dbW_future = TRUE,
# Number of decimal places to which weather data is rounded
dbW_digits = 2,
# Identifying tag of folder names for site weather data if 'LookupWeatherFolder'
tag_WeatherFolder = "weath",
# Approach if there is no soil texture information for the deepest layer(s)
# - [TRUE] adjust soil depth
# - [FALSE] fill soil layer structure from shallower layer(s)
fix_depth_to_layers = FALSE,
# SOILWAT2 requires windspeed input data observed at a height of 2 m above ground
# - NCEP/CRSF data are at 10 m
windspeed_obs_height_m = 2,
# SOILWAT2 simulations are repeated with incrementally increased soil temperature
# profile layer width until a stable soil temperature solution is found or total
# failure is determined
increment_soiltemperature_deltaX_cm = 5,
# Maximal soil depth for which bare-soil evaporation coefficients are calculated
# if 'CalculateBareSoilEvaporationCoefficientsFromSoilTexture' is TRUE
depth_max_bs_evap_cm = 15,
# Shift monthly vegetation/production values in prod.in file by six months
# if TRUE and latitude < 0 (i.e., southern hemisphere)
adjust_veg_input_NS = TRUE,
# Potential natural vegetation based on climate data (Jose Paruelo et al. 1996, 1998)
# - default value: shrub_limit = 0.2 on page 1213 in Paruelo JM,
# Lauenroth WK (1996) Relative abundance of plant functional types in grasslands
# and shrublands of North America. Ecological Applications, 6, 1212-1224.
shrub_limit = 0.2,
# Growing season threshold
# - 10 C based on Trewartha's D temperateness definition:
# temperate climate := has >=4 & < 8 months with > 10C
# - 4 C based standard input of mean monthly biomass values described in
# Bradford et al. 2014 Journal of Ecology
growseason_Tlimit_C = 4
)
#------ Output options
opt_out_fix <- list(
# Column numbers of master input file 'SWRunInformation', e.g, c(3, 7:9), or NULL:
# Selected columns will be part of 'header' table in dbOutput in addition to those of
# create_treatments, experimental_treatments, and climate scenario
Index_RunInformation = NULL,
# Text separator if 'makeInputForExperimentalDesign'
ExpInput_Seperator = "X!X",
# Current subset of dbOutput
# - Create from a subset of temporary text files (fast)
dbOutCurrent_from_tempTXT = FALSE,
# - Subset scenarios to climate.ambient (slow)
dbOutCurrent_from_dbOut = FALSE
)
#----- Spatial setup of simulations
# scorp := one of c("point", "cell"), whether to interpret the simulation locations
# provided in 'SWRunInformation' as point locations (1D-sites) or as means of 2D-cells
# If scorp == "cell" then provide either valid path to 'fsimraster' (takes precedence) or
# (grid resolution and grid crs)
# Currently, implemented for
# - actions[["map_inputs"]]
# - external extractions:
# - soils: "ExtractSoilDataFromISRICWISEv12_Global",
# "ExtractSoilDataFromISRICWISE30secV1a_Global",
# "ExtractSoilDataFromCONUSSOILFromSTATSGO_USA",
# - elevation: "ExtractElevation_NED_USA", "ExtractElevation_HWSD_Global",
# - climate normals: "ExtractSkyDataFromNOAAClimateAtlas_USA"
# NOTE: not implemented for 'ExtractSkyDataFromNCEPCFSR_Global'
in_space <- list(
scorp = scorp <- "point",
# Resolution of raster cells
sim_res = if (scorp == "cell") c(1e4, 1e4) else NA,
# Coordinate reference system (CRS)
sim_crs = if (scorp == "cell") {
"+init=epsg:5072" # NAD83(HARN) / Conus Albers
} else {
"+init=epsg:4326" # WGS84
}
)
#------ Time frames of simulation (may be modified by treatments)
sim_time <- list(
# current simulation years = simstartyr:endyr
# spinup_N = startyr - simstartyr
# years used for results = startyr:endyr
simstartyr = 1979,
startyr = startyr <- 1980,
endyr = endyr <- 2010,
#Future time period(s):
# Each list element of 'future_yrs' will be applied to every climate.conditions
# Each list element of 'future_yrs' is a vector with three elements
# c(delta, DSfut_startyr, DSfut_endyr)
# future simulation years = delta + simstartyr:endyr
# future simulation years downscaled based on
# - current conditions = DScur_startyr:DScur_endyr
# - future conditions = DSfut_startyr:DSfut_endyr
# NOTE: Multiple time periods doesn't work with external type 'ClimateWizardEnsembles'
DScur_startyr = startyr,
DScur_endyr = endyr,
future_yrs = list(
c(d <- 40, startyr + d, endyr + d),
c(d <- 90, startyr + d, endyr + d - 1) # most GCMs don't have data for 2100
)
)
#------ Requested climate conditions
req_scens <- list(
# Name of climatic conditions of the daily weather input when monthly climate
# perturbations are all off
ambient = "Current",
# Names of climate scenarios
# - If a simulation project does not include future climate conditions, then set
# models = NULL
# - If climate datafiles used, then in the order of data in the those datafiles
# - This is a list of all GCMs for CMIP5 provided by GDO-DCP-UC-LLNL: 37 RCP4.5, 35 RCP8.5
# Excluded: 'HadCM3' and 'MIROC4h' because data only available until 2035
models = c(
"RCP45.CanESM2", "RCP45.CESM1-CAM5", "RCP45.HadGEM2-CC",
"RCP85.CanESM2", "RCP85.CESM1-CAM5", "RCP85.HadGEM2-CC"),
sources = c(
# For each climate data set from which to extract, add an element like 'dataset1'
# Priority of extraction: dataset1, dataset2, ... if multiple sources provide data
# for a location
# Dataset = 'project_source' with
# - project = one string out of c("CMIP3", "CMIP5")
# - source = one string out of:
# - "ClimateWizardEnsembles_Global": mean monthly values at 50-km resolution for 2070-2099
# - "ClimateWizardEnsembles_USA": mean monthly change at 12-km resolution between 2070-2099 and 1971-2000
# - "BCSD_GDODCPUCLLNL_USA": monthly time series at 1/8-degree resolution
# - "BCSD_GDODCPUCLLNL_Global": monthly time series at 1/2-degree resolution
# - "BCSD_NEX_USA": monthly time series at 30-arcsec resolution; requires live internet access
# - "BCSD_SageSeer_USA": monthly time-series at 1-km resolution for the western US prepared by Katie Renwick
# - "ESGF_Global": monthly time-series at varying resolution
dataset1 = "CMIP5_BCSD_GDODCPUCLLNL_USA"
),
# Downscaling method (applied to each each climate.conditions)
# Monthly scenario -> daily forcing variables
# One or multiple elements of
# - "raw"
# - "delta" (Hay et al. 2002)
# - "hybrid-delta" (Hamlet et al. 2010), "hybrid-delta-3mod"
# - "wgen-package" (Steinschneider & Brown 2013 WRR, doi:10.1002/wrcr.20528
method_DS = c("raw", "delta", "hybrid-delta-3mod"),
# Downscaling parameters
opt_DS = list(
daily_ppt_limit = 1.5,
monthly_limit = 1.5,
# Method to apply precipitation changes: either "detailed" or "simple"
ppt_type = "detailed",
# Method to fix spline predictions: one of "fail", "none" or "attempt";
# only used if extrapol_type is using splines
# - "fail": downscaling fails if spline extrapolations fall outside estimated
# monthly extremes
# - "none": no correction for extrapolated monthly extreme values, but this will
# likely fail during correction of extreme daily PPT events
# - "attempt": repeated attempts with jittering data to fit spline extrapolations
# within estimated monthly extreme values
fix_spline = "attempt",
# Method to extrapolate beyond observed data
# Options: one of "linear_Boe", "linear_Thermessl2012CC.QMv1b", "linear_none",
# "tricub_fmm", "tricub_monoH.FC", "tricub_natural", "normal_anomalies"
# - "linear": Gudmundsson et al. 2012: "If new model values (e.g. from climate
# projections) are larger than the training values used to estimate the empirical
# CDF, the correction found for the highest quantile of the training period is
# used (Boe ?? et al., 2007; Theme??l et al., 2012)."
# - "tricub": I got really large output values, e.g., obs.hist = 54 cm,
# scen.fut = 64 cm, sbc.fut = 88 cm, hd.fut = 89 cm
# - "linear" (i.e., using Boe et al.'s correction) resulted for the same site to:
# obs.hist = 54 cm, scen.fut = 64 cm, sbc.fut = 75 cm, hd.fut = 75 cm
# - "normal", but no implemented in qmap: Tohver et al. 2014, Appendix A, p. 6:
# "... values that are outside the observed quantile map (e.g. in the early parts
# of the 20th century) are interpolated using standard anomalies (i.e. number of
# standard deviations from the mean) calculated for observed data and GCM data.
# Although this approach ostensibly assumes a normal distribution, it was found
# during testing to be much more stable than attempts to use more sophisticated
# approaches. In particular, the use of Extreme Value Type I or Generalized
# Extreme Value distributions for extending the tail of the probability
# distributions were both found to be highly unstable in practice and introduced
# unacceptable daily extremes in isolated grid cells. These errors occur because
# of irregularities in the shapes of the CDFs for observed and GCM data, which
# relates in part to the relatively small sample size used to construct the
# monthly CDFs (i.e. n = 30)."
extrapol_type = "linear_Thermessl2012CC.QMv1b",
# Test whether data distributions are within sigmaN * stats::sd of mean
sigmaN = 6,
# Additive instead of multiplicative adjustments for precipitation if precipitation
# is above or below 'PPTratioCutoff'; 3 was too small -> resulting in too many
# medium-sized ppt-event
PPTratioCutoff = 10
),
# Climate ensembles created across scenarios
# Ensemble families: NULL or from c("SRESA2", "SRESA1B", "SRESB1")
# This defines the groups for which ensembles of climate scenarios are calculated;
# corresponds to first part of scenario name
ensemble.families = NULL,
# If(!is.null(ensemble.families)) then this needs to have at least one value; this
# variable defines which ranked climate.conditions the ensembles are representing
# for each ensemble.families
ensemble.levels = c(2, 8, 15),
# If TRUE then for each ensemble.levels a file is saved with the scenario numbers
# corresponding to the ensemble.levels
save.scenario.ranks = TRUE
)
#------ Requested output
# Turn aggregation for variable groups on (1) or off (0), don't delete any names
req_out <- list(
# Overall aggregated output table
overall_out = c(
#---Aggregation: SOILWAT2 inputs
"input_SoilProfile", 1,
"input_FractionVegetationComposition", 1,
"input_VegetationBiomassMonthly", 1,
"input_VegetationPeak", 1,
"input_Phenology", 1,
"input_TranspirationCoeff", 1,
"input_ClimatePerturbations", 1,
#---Aggregation: Climate and weather
"yearlyTemp", 1,
"yearlyPPT", 1,
"dailySnowpack", 1,
"dailyFrostInSnowfreePeriod", 1,
"dailyHotDays", 1,
"dailyWarmDays", 1,
"dailyPrecipitationEventSizeDistribution", 1,
"yearlyPET", 1,
"monthlySeasonalityIndices", 1,
#---Aggregation: Climatic dryness
"yearlymonthlyTemperateDrylandIndices", 1,
"yearlyDryWetPeriods", 1,
"dailyWeatherGeneratorCharacteristics", 1,
"dailyPrecipitationFreeEventDistribution", 1,
"monthlySPEIEvents", 1,
#---Aggregation: Climatic control
"monthlyPlantGrowthControls", 1,
"dailyC4_TempVar", 1,
"dailyDegreeDays", 1,
#---Aggregation: Yearly water balance
"yearlyAET", 1,
"yearlyWaterBalanceFluxes", 1,
"dailySoilWaterPulseVsStorage", 1,
#---Aggregation: Daily extreme values
"dailyTranspirationExtremes", 1,
"dailyTotalEvaporationExtremes", 1,
"dailyDrainageExtremes", 1,
"dailyInfiltrationExtremes", 1,
"dailyAETExtremes", 1,
"dailySWPextremes", 1,
"dailyRechargeExtremes", 1,
#---Aggregation: Ecological dryness
# Note: 'dailyNRCS_SoilMoistureTemperatureRegimes*' require at least soil layers at
# 10, 20, 30, 50, 60, 90 cm
"dailyNRCS_SoilMoistureTemperatureRegimes_Intermediates", 0,
"dailyNRCS_SoilMoistureTemperatureRegimes", 0,
"dailyNRCS_Chambers2014_ResilienceResistance", 0,
"dailyNRCS_Maestas2016_ResilienceResistance", 0,
"dailyWetDegreeDays", 1,
"dailyThermalDrynessStartEnd", 1,
"dailyThermalSWPConditionCount", 1,
"monthlySWPdryness", 1,
"dailySWPdrynessANDwetness", 1,
"dailySuitablePeriodsDuration", 1,
"dailySuitablePeriodsAvailableWater", 1,
"dailySuitablePeriodsDrySpells", 1,
"dailySWPdrynessDurationDistribution", 1,
"dailySWPdrynessEventSizeDistribution", 1,
"dailySWPdrynessIntensity", 1,
"dailyThermalDrynessStress", 1,
#---Aggregation: Mean monthly values
"monthlyTemp", 1,
"monthlyPPT", 1,
"monthlySnowpack", 1,
"monthlySoilTemp", 1,
"monthlyRunoff", 1,
"monthlyHydraulicRedistribution", 1,
"monthlyInfiltration", 1,
"monthlyDeepDrainage", 1,
"monthlySWPmatric", 1,
"monthlyVWCbulk", 1,
"monthlyVWCmatric", 1,
"monthlySWCbulk", 1,
"monthlySWAbulk", 1,
"monthlyTranspiration", 1,
"monthlySoilEvaporation", 1,
"monthlyAET", 1,
"monthlyPET", 1,
"monthlyVPD", 1,
"monthlyAETratios", 1,
"monthlyPETratios", 1,
#---Aggregation: Potential regeneration
"dailyRegeneration_bySWPSnow", 0,
"dailyRegeneration_GISSM", 0
),
# Select variables to aggregate daily means and stats::sd (one per day of year, DOY)
# options: NULL or a selection of c("AET", "Transpiration", "EvaporationSoil",
# "EvaporationSurface", "EvaporationTotal", "VWCbulk", "VWCmatric", "SWCbulk",
# "SWPmatric", "Snowpack", "SWAbulk", "Rain", "Snowfall", "Snowmelt", "SnowLoss",
# "Runoff", "Infiltration", "DeepDrainage", "PET", "TotalPrecipitation",
# "TemperatureMin", "TemperatureMax", "SoilTemperature")
mean_daily = c("AET", "Transpiration", "EvaporationSoil", "EvaporationSurface",
"EvaporationTotal", "VWCbulk", "VWCmatric", "SWCbulk", "SWPmatric", "Snowpack",
"SWAbulk", "Rain", "Snowfall", "Snowmelt", "SnowLoss", "Runoff", "Infiltration",
"DeepDrainage", "PET", "TotalPrecipitation", "TemperatureMin", "TemperatureMax",
"SoilTemperature"),
# Select variables to output as aggregated yearly time series
# options: NULL or a selection of c("dailyRegeneration_GISSM")
traces = NULL
)
#------ Parameters for output aggregations
opt_agg <- list(
# Aggregate overall simulation output across soil layers with separate values for
# shallow/top (soil layers < aon_toplayer_cm) and deep/bottom soil layers
aon_toplayer_cm = 20,
# Aggregate mean daily simulation output across soil layers
doy_slyrs = list(
# Do [no] aggregate soil layers
# - TRUE, aggregate into 1-4 layers for mean/stats::sd
# - FALSE, output values for every simulated soil layer
do = TRUE,
# Depth of aggregated soil layers
# Options: depth in centimeters or
# - NULL is interpreted as deepest soil layer (not available for first)
# - NA indicates that no third/fourth aggregated layer is calculated
# (not available for first and second)
# Depth of first aggregated soil layer
first_cm = 10,
# Depth of second aggregated soil layer
second_cm = 20,
# Depth of third aggregated soil layer
third_cm = 60,
# Depth of fourth aggregated soil layer
fourth_cm = NULL
),
# The ccounting of timing variables is shifted by 6 months (e.g., July becomes 1st
# month, etc.) if TRUE and latitude < 0 (i.e., southern hemisphere)
adjust_NorthSouth = TRUE,
# Critical soil water potential(s) [MPa] to calculate 'dry' and 'wet' soils
# (cf. wilting point) and available soil water
SWPcrit_MPa = c(-1.5, -3.0, -3.5, -3.9),
# Critical temperatures [Celsius degrees]
Tmin_crit_C = c(-15, -9, 0),
Tmax_crit_C = c(34, 40),
Tmean_crit_C = c(5, 15, 25, 35),
# Base temperature (degree C) above which degree-days are accumulated
Tbase_DD_C = 0,
# Daily weather frequency distributions
# Bins of x mm precipitation event sizes
bin_prcp_mm = 5,
# Bins of x consecutive days without precipitation
bin_prcpfree_days = 10,
# Parameters for 'dailyRegeneration_bySWPSnow'
dailyRegeneration_bySWPSnow = list(
season.start = "LastSnow", # either doy or "LastSnow"
season.end = "FirstSnow", # either doy or "FirstSnow"
germination.duration = 7, # in days
germination.swp.surface = -0.2, # in MPa, duration must have at least x MPa
establishment.duration = 14, # in days
establishment.swp.surface = -0.4, # in MPa, duration must have at least x MPa
establishment.delay = 1 # establishment starts latest x days after end of germination
),
# NRCS soil moisture regimes (SMR) and soil temperature regimes (STR) settings
NRCS_SMTRs = list(
# Approach for regime determination ('data' -> 'conditions' -> 'regime')
aggregate_at = "conditions",
# Aggregation agreement level (e.g., 0.5 = majority; 1 = all)
crit_agree_frac = 0.9,
# Restrict data to normal years (as defined by SSS 2014) if TRUE; if FALSE, all years
use_normal = TRUE,
SWP_dry = -1.5, #dry means SWP below -1.5 MPa (Soil Survey Staff 2014: p.29)
SWP_sat = -0.033, #saturated means SWP above -0.033 MPa
impermeability = 0.9 #impermeable layer
)
)
##############################################################################
|
## Filename: ~/800_example_plots/06_endogeneous_y_het.R
## Description: Data Visualisation to show the effects of endogenous sampling
## Author: Anna-Carolina Haensch
## Maintainer: Anna-Carolina Haensch (anna-carolina.haensch@gesis.org)
## Creation: 2017-11-23
###
# Create data
kN <- 1000
x <- c(rnorm(n = kN/5, mean = -20, sd = 10),
rnorm(n = kN/5, mean = -10, sd = 10),
rnorm(n = kN/5, mean = 0, sd = 10),
rnorm(n = kN/5, mean = 10, sd = 10),
rnorm(n = kN/5, mean = 20, sd = 10))
gamma<-vector()
e<-c(rnorm(n = kN/5, mean = 0, sd = 10),
rnorm(n = kN/5, mean = 0, sd = 25),
rnorm(n = kN/5, mean = 0, sd = 100),
rnorm(n = kN/5, mean = 0, sd = 155),
rnorm(n = kN/5, mean = 0, sd = 200))
y <- 10*x + e
data.pop.viz<-data.frame(x=x,y=y, e=e)
mod.viz <- lm(data.pop.viz$y~data.pop.viz$x)
data.pop.predicted <- data.frame(y = predict(mod.viz, data.pop.viz),
x = data.pop.viz$x)
# Create plot
endy<-ggplot(data = data.pop.viz, aes(x = x, y = y)) +
geom_point(aes(color = (e-(min(e)))),show.legend = F) +
geom_line(color = "#D55E00",data = data.pop.predicted, aes(x = x, y = y))+
geom_abline(color = "blueviolet",intercept = 125,slope = 13,alpha = 0.7)+
scale_colour_gradient2(low="white", high="#0072B2")
#Save
pdf(file = "plots/endogenous_y_het_plot.pdf", width = 8, height = 5)
print(endy)
dev.off()
|
/800_example_plots/06_endogenous_y_het.R
|
no_license
|
CaroHaensch/IPD_MA_Survey_Data
|
R
| false | false | 1,398 |
r
|
## Filename: ~/800_example_plots/06_endogeneous_y_het.R
## Description: Data Visualisation to show the effects of endogenous sampling
## Author: Anna-Carolina Haensch
## Maintainer: Anna-Carolina Haensch (anna-carolina.haensch@gesis.org)
## Creation: 2017-11-23
###
# Create data
kN <- 1000
x <- c(rnorm(n = kN/5, mean = -20, sd = 10),
rnorm(n = kN/5, mean = -10, sd = 10),
rnorm(n = kN/5, mean = 0, sd = 10),
rnorm(n = kN/5, mean = 10, sd = 10),
rnorm(n = kN/5, mean = 20, sd = 10))
gamma<-vector()
e<-c(rnorm(n = kN/5, mean = 0, sd = 10),
rnorm(n = kN/5, mean = 0, sd = 25),
rnorm(n = kN/5, mean = 0, sd = 100),
rnorm(n = kN/5, mean = 0, sd = 155),
rnorm(n = kN/5, mean = 0, sd = 200))
y <- 10*x + e
data.pop.viz<-data.frame(x=x,y=y, e=e)
mod.viz <- lm(data.pop.viz$y~data.pop.viz$x)
data.pop.predicted <- data.frame(y = predict(mod.viz, data.pop.viz),
x = data.pop.viz$x)
# Create plot
endy<-ggplot(data = data.pop.viz, aes(x = x, y = y)) +
geom_point(aes(color = (e-(min(e)))),show.legend = F) +
geom_line(color = "#D55E00",data = data.pop.predicted, aes(x = x, y = y))+
geom_abline(color = "blueviolet",intercept = 125,slope = 13,alpha = 0.7)+
scale_colour_gradient2(low="white", high="#0072B2")
#Save
pdf(file = "plots/endogenous_y_het_plot.pdf", width = 8, height = 5)
print(endy)
dev.off()
|
# script to extract filename from full path
library(tidyverse)
name <- "files/0023/expt_A_trial_002.png"
filename <- str_split(name,"/",simplify=TRUE)[3] #get just the filename
trialname <- str_split(filename,"_",simplify=TRUE)[4] #get the trial name + extension
trialname <- parse_number(trialname) #remove the exension
print(paste0("this is trial ",trialname)) #output
|
/solutions_R/parsons1.R
|
no_license
|
tomstafford/parsonsproblems
|
R
| false | false | 375 |
r
|
# script to extract filename from full path
library(tidyverse)
name <- "files/0023/expt_A_trial_002.png"
filename <- str_split(name,"/",simplify=TRUE)[3] #get just the filename
trialname <- str_split(filename,"_",simplify=TRUE)[4] #get the trial name + extension
trialname <- parse_number(trialname) #remove the exension
print(paste0("this is trial ",trialname)) #output
|
library(dplyr)
library(XML)
library(lubridate)
library(tidyr)
library(leaflet)
load("stops.rda")
write_files_to <- "/home/ht/Desktop/"
time_stamp <- Sys.time()
locations <- xmlToDataFrame(xmlParse("http://bustracker.muni.org/InfoPoint/XML/vehiclelocation.xml")) %>%
filter(runid != "<NA>", latitude != "0.0")
stop_departures <- xmlToList(xmlParse("http://bustracker.muni.org/InfoPoint/XML/stopdepartures.xml"))
removenulls <- function(x) {ifelse(is.null(x), NA, x)}
delays <- data.frame(
id = as.numeric(unlist(lapply(stop_departures[-1], function(x) x[[1]]))),
routeID = unlist(lapply(stop_departures[-1], function(x) x[[3]][[5]][[1]])),
direction = unlist(lapply(stop_departures[-1], function(x) x[[3]][[6]])),
dev = unlist(lapply(lapply(stop_departures[-1], function(x) x[[3]][[3]]), removenulls)),
edt = ymd_hms(paste0(format(Sys.time(), "%Y-%m-%d"), " ", unlist(lapply(stop_departures[-1], function(x) x[[3]][[1]])), ":00")),
sdt = unlist(lapply(stop_departures[-1], function(x) x[[3]][[2]]))
)
delays <- delays %>% filter(dev != 0)
delays$id <- as.numeric(as.character(delays$id))
delays <- delays %>%
group_by(routeID, direction) %>%
mutate(ord = order(edt - Sys.time())) %>%
filter(ord < 2)
delays <- inner_join(delays, stops, by = "id")
save(locations, delays, time_stamp, file = paste0(write_files_to, "people_mover_realtime.rda"))
#content <- paste(locations$routeid,locations$direction)
#leaflet() %>%
# setView(lng = -149.9, lat = 61.11, zoom = 11) %>%
# addTiles() %>% addCircles(data = delays, ~longitude, ~latitude) %>%
# addPopups(data = locations, ~longitude, ~latitude, popup = content)
#Layer 1 - Basemap
#Layer 2 - Route Trails
#Layer 3 - Bus Locations/heading/speed/id
#Layer 4 - Delays for stops
|
/anchoragebus/analysis.R
|
permissive
|
codeforanchorage/shiny-server
|
R
| false | false | 1,777 |
r
|
library(dplyr)
library(XML)
library(lubridate)
library(tidyr)
library(leaflet)
load("stops.rda")
write_files_to <- "/home/ht/Desktop/"
time_stamp <- Sys.time()
locations <- xmlToDataFrame(xmlParse("http://bustracker.muni.org/InfoPoint/XML/vehiclelocation.xml")) %>%
filter(runid != "<NA>", latitude != "0.0")
stop_departures <- xmlToList(xmlParse("http://bustracker.muni.org/InfoPoint/XML/stopdepartures.xml"))
removenulls <- function(x) {ifelse(is.null(x), NA, x)}
delays <- data.frame(
id = as.numeric(unlist(lapply(stop_departures[-1], function(x) x[[1]]))),
routeID = unlist(lapply(stop_departures[-1], function(x) x[[3]][[5]][[1]])),
direction = unlist(lapply(stop_departures[-1], function(x) x[[3]][[6]])),
dev = unlist(lapply(lapply(stop_departures[-1], function(x) x[[3]][[3]]), removenulls)),
edt = ymd_hms(paste0(format(Sys.time(), "%Y-%m-%d"), " ", unlist(lapply(stop_departures[-1], function(x) x[[3]][[1]])), ":00")),
sdt = unlist(lapply(stop_departures[-1], function(x) x[[3]][[2]]))
)
delays <- delays %>% filter(dev != 0)
delays$id <- as.numeric(as.character(delays$id))
delays <- delays %>%
group_by(routeID, direction) %>%
mutate(ord = order(edt - Sys.time())) %>%
filter(ord < 2)
delays <- inner_join(delays, stops, by = "id")
save(locations, delays, time_stamp, file = paste0(write_files_to, "people_mover_realtime.rda"))
#content <- paste(locations$routeid,locations$direction)
#leaflet() %>%
# setView(lng = -149.9, lat = 61.11, zoom = 11) %>%
# addTiles() %>% addCircles(data = delays, ~longitude, ~latitude) %>%
# addPopups(data = locations, ~longitude, ~latitude, popup = content)
#Layer 1 - Basemap
#Layer 2 - Route Trails
#Layer 3 - Bus Locations/heading/speed/id
#Layer 4 - Delays for stops
|
\name{BICog}
\alias{BICog}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
BIC function
}
\description{
A function gives the BIC value based on estimated results from \code{\link{Spgr}}. Cn = log(np+q)
}
\usage{
BICog(obj, y, z, x)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{obj}{output of Spgr}
\item{y}{ response vector with n observations .}
\item{z}{ explanatory variables matrix which have the same coefficients among individuals.}
\item{x}{ explanatory variables matrix with p columns which have individual coefficients.}
}
\details{
BIClog is a specical case of BIClogr without repeated measures. Cn in BIC is log(np+q).
}
\value{
a numeric BIC value
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{concavefusion}}, \code{\link{BIClogr}}
}
\examples{
## see the example in concavefusion
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
/man/BIClog.Rd
|
no_license
|
wangx23/Spgr
|
R
| false | false | 1,234 |
rd
|
\name{BICog}
\alias{BICog}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
BIC function
}
\description{
A function gives the BIC value based on estimated results from \code{\link{Spgr}}. Cn = log(np+q)
}
\usage{
BICog(obj, y, z, x)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{obj}{output of Spgr}
\item{y}{ response vector with n observations .}
\item{z}{ explanatory variables matrix which have the same coefficients among individuals.}
\item{x}{ explanatory variables matrix with p columns which have individual coefficients.}
}
\details{
BIClog is a specical case of BIClogr without repeated measures. Cn in BIC is log(np+q).
}
\value{
a numeric BIC value
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{concavefusion}}, \code{\link{BIClogr}}
}
\examples{
## see the example in concavefusion
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
library(shiny)
shinyUI(fluidPage(
titlePanel("Yahoo Finance Gold Futures"),
sidebarLayout(
sidebarPanel(
textInput("symb", "Symbol", "GOLD"),
dateRangeInput("dates",
"Date range",
start = "2017-01-27",
end = as.character(Sys.Date())),
checkboxInput("log", "Plot y axis on log scale",
value = FALSE),
checkboxInput("adjust",
"Adjust prices for inflation", value = FALSE)
),
mainPanel(plotOutput("plot"))
)
))
|
/ui.R
|
no_license
|
Shtogrin0/Lab6
|
R
| false | false | 490 |
r
|
library(shiny)
shinyUI(fluidPage(
titlePanel("Yahoo Finance Gold Futures"),
sidebarLayout(
sidebarPanel(
textInput("symb", "Symbol", "GOLD"),
dateRangeInput("dates",
"Date range",
start = "2017-01-27",
end = as.character(Sys.Date())),
checkboxInput("log", "Plot y axis on log scale",
value = FALSE),
checkboxInput("adjust",
"Adjust prices for inflation", value = FALSE)
),
mainPanel(plotOutput("plot"))
)
))
|
anyprimary1 <- function(myCounty = "CALIFORNIA", myMeasure = "Nother", mySex = "Total"){
countyOSHPD.t <- oshpd_PDD %>%
filter(type == "n_hosp") %>%
select(year, county, sex, ccsCode,Nprimary=measure)
primary_any <- left_join(oshpd_PDD_any.t,countyOSHPD.t,by=c("year","county","sex","ccsCode")) %>%
mutate(Nprimary = ifelse(is.na(Nprimary),0,Nprimary)) %>%
mutate(Nother = Nany - Nprimary,
percentPrimary = 100*Nprimary/Nany,
percentOther = 100*Nother/Nany)
ccsMap <- ccsLinker()
primary_any <- left_join(primary_any,ccsMap,by="ccsCode") %>%
filter(Nany > 50,
ccsCode != "oo259") ### TODO need to study this
primary_any_NOPREG <- primary_any %>% filter(!(birth))
plot_data.0 <- primary_any_NOPREG %>%
pivot_longer(cols=Nany:percentOther,names_to = "theMeasure") %>%
mutate(theMeasure = as.factor(theMeasure))
plot_data.1 <- plot_data.0 %>% filter(sex=="Total")
myN <- 10
if (1==2) {
myMeasure <- "Nother"
myMeasure <- "Nany"
myMeasure <- "Nprimary"
myMeasure <- "percentOther"
myCounty <- "CALIFORNIA"
}
#create a vector of CAUSE for top N
theseCodes <- plot_data.1 %>%
filter(county == myCounty) %>%
group_by(theMeasure) %>% arrange(desc(value)) %>% dplyr::slice(1:myN) %>% #this selects the top N rows for myOSHPDtype
filter(theMeasure == myMeasure) %>% ungroup() %>% pull(ccsCode)
#creates dataframe with data only for CAUSEs from theseCodes, i.e. the top N CAUSES for the specified theseCodes
plot_data.2 <- plot_data.1 %>%
filter(!is.na(ccsName), county == myCounty, ccsCode %in% theseCodes) %>%
#filter(sex == mySex, year == myYear) %>%
group_by(theMeasure) %>%
mutate(ccsName = forcats::fct_reorder(ccsName, filter(., theMeasure == myMeasure) %>%
pull(value)))
ggplot(plot_data.2, aes(x = ccsName, y = value)) +
coord_flip() + geom_bar(stat = "identity", fill = "blue") +
facet_grid(. ~ theMeasure, scales = "free_x") +
scale_y_continuous(labels = scales::comma) + #numbers shown with commas rather than scientific notation
scale_x_discrete(labels = scales::wrap_format(50)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
}
|
/myCBD/myFunctions/make_OSHPD_ANY_PRIMARY_chart.R
|
no_license
|
CDPHrusers/CACommunityBurden
|
R
| false | false | 2,375 |
r
|
anyprimary1 <- function(myCounty = "CALIFORNIA", myMeasure = "Nother", mySex = "Total"){
countyOSHPD.t <- oshpd_PDD %>%
filter(type == "n_hosp") %>%
select(year, county, sex, ccsCode,Nprimary=measure)
primary_any <- left_join(oshpd_PDD_any.t,countyOSHPD.t,by=c("year","county","sex","ccsCode")) %>%
mutate(Nprimary = ifelse(is.na(Nprimary),0,Nprimary)) %>%
mutate(Nother = Nany - Nprimary,
percentPrimary = 100*Nprimary/Nany,
percentOther = 100*Nother/Nany)
ccsMap <- ccsLinker()
primary_any <- left_join(primary_any,ccsMap,by="ccsCode") %>%
filter(Nany > 50,
ccsCode != "oo259") ### TODO need to study this
primary_any_NOPREG <- primary_any %>% filter(!(birth))
plot_data.0 <- primary_any_NOPREG %>%
pivot_longer(cols=Nany:percentOther,names_to = "theMeasure") %>%
mutate(theMeasure = as.factor(theMeasure))
plot_data.1 <- plot_data.0 %>% filter(sex=="Total")
myN <- 10
if (1==2) {
myMeasure <- "Nother"
myMeasure <- "Nany"
myMeasure <- "Nprimary"
myMeasure <- "percentOther"
myCounty <- "CALIFORNIA"
}
#create a vector of CAUSE for top N
theseCodes <- plot_data.1 %>%
filter(county == myCounty) %>%
group_by(theMeasure) %>% arrange(desc(value)) %>% dplyr::slice(1:myN) %>% #this selects the top N rows for myOSHPDtype
filter(theMeasure == myMeasure) %>% ungroup() %>% pull(ccsCode)
#creates dataframe with data only for CAUSEs from theseCodes, i.e. the top N CAUSES for the specified theseCodes
plot_data.2 <- plot_data.1 %>%
filter(!is.na(ccsName), county == myCounty, ccsCode %in% theseCodes) %>%
#filter(sex == mySex, year == myYear) %>%
group_by(theMeasure) %>%
mutate(ccsName = forcats::fct_reorder(ccsName, filter(., theMeasure == myMeasure) %>%
pull(value)))
ggplot(plot_data.2, aes(x = ccsName, y = value)) +
coord_flip() + geom_bar(stat = "identity", fill = "blue") +
facet_grid(. ~ theMeasure, scales = "free_x") +
scale_y_continuous(labels = scales::comma) + #numbers shown with commas rather than scientific notation
scale_x_discrete(labels = scales::wrap_format(50)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
}
|
print.WGCNANet <-
function (x, nlimit=20, ...)
{
cat("Object of class WGCNANet (package stringgaussnet)\n\n")
Nodes <- unique(c(x$Edges$node1,x$Edges$node2))
cat("Number of nodes:",length(Nodes),"\n")
Interactions <- unique(paste(x$Edges$node1,x$Edges$node2,sep="."))
cat("Number of interactions:",length(Interactions),"\n")
cat("Edges preview:\n")
print(head(x$Edges,nlimit))
cat("\nDEGenes preview:\n")
print(head(x$DEGenes,nlimit))
if (!is.null(x$Annotations))
{
cat("\nAnnotations preview:\n")
print(head(x$Annotations,nlimit))
}
}
|
/stringgaussnet/R/print.WGCNANet.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 552 |
r
|
print.WGCNANet <-
function (x, nlimit=20, ...)
{
cat("Object of class WGCNANet (package stringgaussnet)\n\n")
Nodes <- unique(c(x$Edges$node1,x$Edges$node2))
cat("Number of nodes:",length(Nodes),"\n")
Interactions <- unique(paste(x$Edges$node1,x$Edges$node2,sep="."))
cat("Number of interactions:",length(Interactions),"\n")
cat("Edges preview:\n")
print(head(x$Edges,nlimit))
cat("\nDEGenes preview:\n")
print(head(x$DEGenes,nlimit))
if (!is.null(x$Annotations))
{
cat("\nAnnotations preview:\n")
print(head(x$Annotations,nlimit))
}
}
|
test_that("tcmsg works as expected", {
expect_error( tcmsg({ NULL = 1 },'Cannot assign to NULL','variable') , 'Cannot assign to NULL variable' )
expect_warning( tcmsg({ as.numeric('abc') },'Issue in as.numeric()') , 'Issue in as.numeric()' )
})
|
/tests/testthat/test_tcmsg.R
|
no_license
|
cran/easyr
|
R
| false | false | 258 |
r
|
test_that("tcmsg works as expected", {
expect_error( tcmsg({ NULL = 1 },'Cannot assign to NULL','variable') , 'Cannot assign to NULL variable' )
expect_warning( tcmsg({ as.numeric('abc') },'Issue in as.numeric()') , 'Issue in as.numeric()' )
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{swap_mult}
\alias{swap_mult}
\title{Swap elements of multiple binary chains}
\usage{
swap_mult(bin_chains, m)
}
\arguments{
\item{bin_chains}{A two dimensional integer vector with binary values.}
\item{m}{A positive nonzero integer value for the attempted number of swaps
to attempt on \code{bin_chains}.}
}
\description{
\code{swap_mult} is used to swap elements of multiple binary chains if doing
so maintains the same number of transitions between the two states of those
chains.
}
\details{
\code{swap_mult} works by taking a two dimensional integer vector
\code{bin_chains} and \code{m}, a number of times to attempt swaps. It
generates random integers which are valid indices of the two dimensional
vector \code{bin_chains} and tries to swap the elements of the vector at
the indices that it generates, only doing so if this preserves the total
number of transitions between states. After attempting \code{m} swaps,
\code{swap_mult} returns the new, freshly swapped two dimensional vector of
binary chains.
}
|
/man/swap_mult.Rd
|
no_license
|
cwcartmell/maRkov
|
R
| false | true | 1,115 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{swap_mult}
\alias{swap_mult}
\title{Swap elements of multiple binary chains}
\usage{
swap_mult(bin_chains, m)
}
\arguments{
\item{bin_chains}{A two dimensional integer vector with binary values.}
\item{m}{A positive nonzero integer value for the attempted number of swaps
to attempt on \code{bin_chains}.}
}
\description{
\code{swap_mult} is used to swap elements of multiple binary chains if doing
so maintains the same number of transitions between the two states of those
chains.
}
\details{
\code{swap_mult} works by taking a two dimensional integer vector
\code{bin_chains} and \code{m}, a number of times to attempt swaps. It
generates random integers which are valid indices of the two dimensional
vector \code{bin_chains} and tries to swap the elements of the vector at
the indices that it generates, only doing so if this preserves the total
number of transitions between states. After attempting \code{m} swaps,
\code{swap_mult} returns the new, freshly swapped two dimensional vector of
binary chains.
}
|
library(tidyverse)
library(mosaic)
library(dplyr)
library(ggplot2)
library(foreach)
library(gridExtra)
library(grid)
library(stringr)
########################## 1. graph ###############################
DataFinalProject_Final <- read.csv('https://raw.githubusercontent.com/Eliza1494/Data-Mining/master/FinalProjectData/DataFinalProject_Final.csv')
car=DataFinalProject_Final
# size: long-box = long bed, short-box = short bed, SUV is Chevrolet Traverse - midsize
car$adsize <- ifelse(word(car$size, 1, -1) == "Long Box", "Long Bed",
ifelse(word(car$size, 1, -1) == "Short Box", "Short Bed",
ifelse(word(car$size, 1, -1) == "Sport Utility Vehicle (Suv)/Multi Purpose Vehicle", "Midsize",
ifelse(word(car$size, 1, -1) == "Car", word(car$category, 1),
as.character(car$size) ))))
# two sports car are smart for two that is classified as midsize in data
car$adsize <- ifelse(word(car$adsize, 1, -1) == "Sports", "Compact",
ifelse(word(car$adsize, 1, -1) == "Mid-Size", "Midsize",car$adsize))
# use U.S. Fuel Economy Guide
FEGcar=filter(car, !(adsize == 'Long Bed' | adsize == 'Short Bed'))
# the volin plot
size1=ggplot(FEGcar, aes(x=adsize, y=price))+
geom_violin(trim = FALSE) +
ylim(100, 55000)+
scale_x_discrete(limits=c("Compact", "Midsize", "Large"))+
geom_boxplot(width=0.1, aes(x=adsize, y=price, fill=adsize), show.legend = FALSE)+
labs(y="Price ($)", x="Size", title="Price of Cars by Size")+
theme(plot.title = element_text(hjust = 0.5),
axis.text=element_text(size=12), axis.title=element_text(size=14,face="bold"))
Bedcar=filter(car, adsize == 'Long Bed' | adsize == 'Short Bed')
size2=ggplot(Bedcar, aes(x=adsize, y=price))+
geom_violin(trim = FALSE) +
ylim(100, 55000)+
scale_x_discrete(limits=c("Short Bed", "Long Bed"))+
geom_boxplot(width=0.1, aes(x=adsize, y=price, fill=adsize), show.legend = FALSE)+
labs(y="", x="Bed", title="Price of Trucks by Bed")+
theme(plot.title = element_text(hjust = 0.5),
axis.text=element_text(size=12), axis.title=element_text(size=14,face="bold"))
grid.arrange(size1, size2, nrow=1, top = textGrob("Figure 1.7: Price Distributions by Size or Bed",
hjust = 0.5, gp=gpar(fontsize=17), vjust = 0.1))
#write.csv(car,'C:/Users/sxliz/Desktop/Courses/3-spring 2020/5-R-learning/final project/data/fina data/car5_3.csv')
########################### 2. lasso ################################
########################### 2.1 data preprocessing before lasso ################
#car$make_model= paste(car$make, car$model_, sep=" ")
car$MakeAndModel=as.factor(car$MakeAndModel)
car$type=as.character(car$type)
car$type=ifelse(word(car$type, 1, -1)=="Pickup Truck Pickup Truck Sport Utility Vehicle", "Pickup Truck",car$type)
#car$type=as.factor(car$type)
# put Van and minivan into van since we have used size to capture the info
car$type_adjust=ifelse(word(car$type,1,-1)=="Minivan", "Van", car$type)
car$type_adjust=ifelse(word(car$type_adjust,1,-1)=="Commercial Vehicle",
ifelse(word(car$category,-1)=="Van", "Van", car$type_adjust), car$type_adjust)
car$type_adjust=ifelse(word(car$type_adjust,1,-1)=="Truck",
ifelse(word(car$category,2)=="Pickup", "Pickup Truck",
ifelse(word(car$category,2)=="Van", "Van", "Sport Utility Vehicle")), car$type_adjust)
car$type_adjust=as.factor(car$type_adjust)
# create avaerage mileage as the mean of city milegae and highway mileage
car=mutate(car, avg.mileage=(city_mileage+highway_mileage)/2)
# create varibale for transmission speed
car=mutate(car, transmission.speed=as.character(car$transmission_speeds))
# there are 15 NA in transmission speed. They are Ford c-max hybrid and Lexus rx400h, both continuous variables
car$transmission.speed=ifelse(word(car$transmission.speed)=="", "Continuously Variable", car$transmission.speed)
# 10-speed can only be extracted the first number 1, change it to 8 to be classified as high speed
car$transmission.speed=ifelse(word(car$transmission.speed)=="10-Speed", "8-Speed", car$transmission.speed)
car$transmission.speed=ifelse(str_extract(car$transmission.speed, "[^-]+")=="Continuously Variable", "Continuously Variable",
ifelse(str_extract(car$transmission.speed, "[^-]+")<5, "Low Transmission Speed",
ifelse(str_extract(car$transmission.speed, "[^-]+")>7, "High Transmission Speed",
"Medium Transmission Speed")))
car$transmission.speed=as.factor(car$transmission.speed)
# put transmission type into automated manual, automatic, manual, CVT and directed drive
car$transmission.type=as.character(car$transmission_type)
car$transmission.type=ifelse(word(car$transmission.type)=="Automated", "Automated Manual",
ifelse(word(car$transmission.type)=="AUTOMATED_MANUAL", "Automated Manual",
ifelse(word(car$transmission.type)=="Automatic", "Automatic",
ifelse(word(car$transmission.type)=="CVT", "CVT",
ifelse(word(car$transmission.type)=="Direct", "Direct Drive",
ifelse(word(car$transmission.type)=="DIRECT_DRIVE", "Direct Drive",
ifelse(word(car$transmission.type)=="Manua", "Manual",
ifelse(word(car$transmission.type)=="Manual", "Manual", car$transmission.type))))))))
car$transmission.type=as.factor(car$transmission.type)
# now adjust drive train. first, classify into 4 wheel, front wheel, and rear wheel, leaving other there
# DWR and SWR are for trucks, LWB is a term unknown (for some specific cars), 4x2 are two wheels but not sure front or rear
car$drivetrain_ad=as.character(car$drivetrain)
car$drivetrain_ad=ifelse(word(car$drivetrain_ad)=="All-Wheel", "Four-Wheel Drive",
ifelse(word(car$drivetrain_ad)=="Quattro", "Four-Wheel Drive",
ifelse(word(car$drivetrain_ad)=="SH-AWD", "Four-Wheel Drive",
ifelse(word(car$drivetrain_ad)=="rear-wheel", "Rear-Wheel Drive",
ifelse(word(car$drivetrain_ad)=="Front-Wheel", "Front-Wheel Drive",car$drivetrain_ad)))))
# now classify into four and two wheels
car$drivetrain_ad2=ifelse(word(car$drivetrain_ad)=="Front-Wheel", "Two-Wheel Drive",
ifelse(word(car$drivetrain_ad)=="Rear-Wheel", "Two-Wheel Drive",
ifelse(word(car$drivetrain_ad)=="4x2", "Two-Wheel Drive",
ifelse(word(car$drivetrain_ad)=="4X2", "Two-Wheel Drive",car$drivetrain_ad))))
car$drivetrain_ad=as.factor(car$drivetrain_ad)
car$drivetrain_ad2=as.factor(car$drivetrain_ad2)
# won't include steering type most of them are Rack & Pinion as well as anti_brake_system since most of them are 4-wheel ABS
#Engine_Fuel Type
car$Fuel_Engine=as.character(car$fuel_type_)
car=car %>%
mutate(Fuel_Engine =gsub("(Premium Unleaded Required/E85)|(Premium Unleaded Recommended/E85)|(Unleaded/E85)|(Premium Unleaded/E85)|Unleaded/Natural Gasoline|(Recommended)|(Required)", "", Fuel_Engine))
car$Fuel_Engine=ifelse(word(car$TypeEngine2)=="Electric", 'Electric',
ifelse(word(car$TypeEngine2)=="Hybrid", 'Hybrid',
ifelse(word(car$Fuel_Engine)=="Natural", 'Gasoline',
ifelse(word(car$Fuel_Engine)=="Premium", 'Premium Unleaded Gas',
ifelse(word(car$Fuel_Engine)=="Regular", 'Regular Unleaded Gas',
ifelse(word(car$Fuel_Engine)=="Flex-Fuel", 'Flex-Fuel',
ifelse(word(car$Fuel_Engine)=="Diesel", 'Diesel',car$Fuel_Engine)))))))
car$TypeEngine2=as.factor(car$TypeEngine2)
car$Fuel_Engine=as.factor(car$Fuel_Engine)
car$adsize=as.factor(car$adsize)
write.csv(car,'C:/Users/sxliz/Desktop/Courses/3-spring 2020/5-R-learning/final project/data/fina data/car5_4.csv')
CarData1=car[,c(5,6,7,14,16,18,22,23,30,32,33,34,35,36,42,44,45,47,48,53,63,58,59,60,61,62)]
#summary(CarData1)
CarData1=na.omit(CarData1)
#summary(CarData1)
CarData1=filter(CarData1, !(drivetrain_ad2 == "DRW" |drivetrain_ad2 == "LWB"|drivetrain_ad2 == "SWB"))
# try basic OLS
lm_try=lm(price ~ . - (vin + invoice_price + TypeEngine2 + drivetrain_ad), data = CarData1)
summary(lm_try)
plot(car5_4$year, car5_4$price)
|
/FinalProject_Used_Car_Valuation/ClearData.R
|
no_license
|
Eliza1494/Data-Mining
|
R
| false | false | 8,988 |
r
|
library(tidyverse)
library(mosaic)
library(dplyr)
library(ggplot2)
library(foreach)
library(gridExtra)
library(grid)
library(stringr)
########################## 1. graph ###############################
DataFinalProject_Final <- read.csv('https://raw.githubusercontent.com/Eliza1494/Data-Mining/master/FinalProjectData/DataFinalProject_Final.csv')
car=DataFinalProject_Final
# size: long-box = long bed, short-box = short bed, SUV is Chevrolet Traverse - midsize
car$adsize <- ifelse(word(car$size, 1, -1) == "Long Box", "Long Bed",
ifelse(word(car$size, 1, -1) == "Short Box", "Short Bed",
ifelse(word(car$size, 1, -1) == "Sport Utility Vehicle (Suv)/Multi Purpose Vehicle", "Midsize",
ifelse(word(car$size, 1, -1) == "Car", word(car$category, 1),
as.character(car$size) ))))
# two sports car are smart for two that is classified as midsize in data
car$adsize <- ifelse(word(car$adsize, 1, -1) == "Sports", "Compact",
ifelse(word(car$adsize, 1, -1) == "Mid-Size", "Midsize",car$adsize))
# use U.S. Fuel Economy Guide
FEGcar=filter(car, !(adsize == 'Long Bed' | adsize == 'Short Bed'))
# the volin plot
size1=ggplot(FEGcar, aes(x=adsize, y=price))+
geom_violin(trim = FALSE) +
ylim(100, 55000)+
scale_x_discrete(limits=c("Compact", "Midsize", "Large"))+
geom_boxplot(width=0.1, aes(x=adsize, y=price, fill=adsize), show.legend = FALSE)+
labs(y="Price ($)", x="Size", title="Price of Cars by Size")+
theme(plot.title = element_text(hjust = 0.5),
axis.text=element_text(size=12), axis.title=element_text(size=14,face="bold"))
Bedcar=filter(car, adsize == 'Long Bed' | adsize == 'Short Bed')
size2=ggplot(Bedcar, aes(x=adsize, y=price))+
geom_violin(trim = FALSE) +
ylim(100, 55000)+
scale_x_discrete(limits=c("Short Bed", "Long Bed"))+
geom_boxplot(width=0.1, aes(x=adsize, y=price, fill=adsize), show.legend = FALSE)+
labs(y="", x="Bed", title="Price of Trucks by Bed")+
theme(plot.title = element_text(hjust = 0.5),
axis.text=element_text(size=12), axis.title=element_text(size=14,face="bold"))
grid.arrange(size1, size2, nrow=1, top = textGrob("Figure 1.7: Price Distributions by Size or Bed",
hjust = 0.5, gp=gpar(fontsize=17), vjust = 0.1))
#write.csv(car,'C:/Users/sxliz/Desktop/Courses/3-spring 2020/5-R-learning/final project/data/fina data/car5_3.csv')
########################### 2. lasso ################################
########################### 2.1 data preprocessing before lasso ################
#car$make_model= paste(car$make, car$model_, sep=" ")
car$MakeAndModel=as.factor(car$MakeAndModel)
car$type=as.character(car$type)
car$type=ifelse(word(car$type, 1, -1)=="Pickup Truck Pickup Truck Sport Utility Vehicle", "Pickup Truck",car$type)
#car$type=as.factor(car$type)
# put Van and minivan into van since we have used size to capture the info
car$type_adjust=ifelse(word(car$type,1,-1)=="Minivan", "Van", car$type)
car$type_adjust=ifelse(word(car$type_adjust,1,-1)=="Commercial Vehicle",
ifelse(word(car$category,-1)=="Van", "Van", car$type_adjust), car$type_adjust)
car$type_adjust=ifelse(word(car$type_adjust,1,-1)=="Truck",
ifelse(word(car$category,2)=="Pickup", "Pickup Truck",
ifelse(word(car$category,2)=="Van", "Van", "Sport Utility Vehicle")), car$type_adjust)
car$type_adjust=as.factor(car$type_adjust)
# create avaerage mileage as the mean of city milegae and highway mileage
car=mutate(car, avg.mileage=(city_mileage+highway_mileage)/2)
# create varibale for transmission speed
car=mutate(car, transmission.speed=as.character(car$transmission_speeds))
# there are 15 NA in transmission speed. They are Ford c-max hybrid and Lexus rx400h, both continuous variables
car$transmission.speed=ifelse(word(car$transmission.speed)=="", "Continuously Variable", car$transmission.speed)
# 10-speed can only be extracted the first number 1, change it to 8 to be classified as high speed
car$transmission.speed=ifelse(word(car$transmission.speed)=="10-Speed", "8-Speed", car$transmission.speed)
car$transmission.speed=ifelse(str_extract(car$transmission.speed, "[^-]+")=="Continuously Variable", "Continuously Variable",
ifelse(str_extract(car$transmission.speed, "[^-]+")<5, "Low Transmission Speed",
ifelse(str_extract(car$transmission.speed, "[^-]+")>7, "High Transmission Speed",
"Medium Transmission Speed")))
car$transmission.speed=as.factor(car$transmission.speed)
# put transmission type into automated manual, automatic, manual, CVT and directed drive
car$transmission.type=as.character(car$transmission_type)
car$transmission.type=ifelse(word(car$transmission.type)=="Automated", "Automated Manual",
ifelse(word(car$transmission.type)=="AUTOMATED_MANUAL", "Automated Manual",
ifelse(word(car$transmission.type)=="Automatic", "Automatic",
ifelse(word(car$transmission.type)=="CVT", "CVT",
ifelse(word(car$transmission.type)=="Direct", "Direct Drive",
ifelse(word(car$transmission.type)=="DIRECT_DRIVE", "Direct Drive",
ifelse(word(car$transmission.type)=="Manua", "Manual",
ifelse(word(car$transmission.type)=="Manual", "Manual", car$transmission.type))))))))
car$transmission.type=as.factor(car$transmission.type)
# now adjust drive train. first, classify into 4 wheel, front wheel, and rear wheel, leaving other there
# DWR and SWR are for trucks, LWB is a term unknown (for some specific cars), 4x2 are two wheels but not sure front or rear
car$drivetrain_ad=as.character(car$drivetrain)
car$drivetrain_ad=ifelse(word(car$drivetrain_ad)=="All-Wheel", "Four-Wheel Drive",
ifelse(word(car$drivetrain_ad)=="Quattro", "Four-Wheel Drive",
ifelse(word(car$drivetrain_ad)=="SH-AWD", "Four-Wheel Drive",
ifelse(word(car$drivetrain_ad)=="rear-wheel", "Rear-Wheel Drive",
ifelse(word(car$drivetrain_ad)=="Front-Wheel", "Front-Wheel Drive",car$drivetrain_ad)))))
# now classify into four and two wheels
car$drivetrain_ad2=ifelse(word(car$drivetrain_ad)=="Front-Wheel", "Two-Wheel Drive",
ifelse(word(car$drivetrain_ad)=="Rear-Wheel", "Two-Wheel Drive",
ifelse(word(car$drivetrain_ad)=="4x2", "Two-Wheel Drive",
ifelse(word(car$drivetrain_ad)=="4X2", "Two-Wheel Drive",car$drivetrain_ad))))
car$drivetrain_ad=as.factor(car$drivetrain_ad)
car$drivetrain_ad2=as.factor(car$drivetrain_ad2)
# won't include steering type most of them are Rack & Pinion as well as anti_brake_system since most of them are 4-wheel ABS
#Engine_Fuel Type
car$Fuel_Engine=as.character(car$fuel_type_)
car=car %>%
mutate(Fuel_Engine =gsub("(Premium Unleaded Required/E85)|(Premium Unleaded Recommended/E85)|(Unleaded/E85)|(Premium Unleaded/E85)|Unleaded/Natural Gasoline|(Recommended)|(Required)", "", Fuel_Engine))
car$Fuel_Engine=ifelse(word(car$TypeEngine2)=="Electric", 'Electric',
ifelse(word(car$TypeEngine2)=="Hybrid", 'Hybrid',
ifelse(word(car$Fuel_Engine)=="Natural", 'Gasoline',
ifelse(word(car$Fuel_Engine)=="Premium", 'Premium Unleaded Gas',
ifelse(word(car$Fuel_Engine)=="Regular", 'Regular Unleaded Gas',
ifelse(word(car$Fuel_Engine)=="Flex-Fuel", 'Flex-Fuel',
ifelse(word(car$Fuel_Engine)=="Diesel", 'Diesel',car$Fuel_Engine)))))))
car$TypeEngine2=as.factor(car$TypeEngine2)
car$Fuel_Engine=as.factor(car$Fuel_Engine)
car$adsize=as.factor(car$adsize)
write.csv(car,'C:/Users/sxliz/Desktop/Courses/3-spring 2020/5-R-learning/final project/data/fina data/car5_4.csv')
CarData1=car[,c(5,6,7,14,16,18,22,23,30,32,33,34,35,36,42,44,45,47,48,53,63,58,59,60,61,62)]
#summary(CarData1)
CarData1=na.omit(CarData1)
#summary(CarData1)
CarData1=filter(CarData1, !(drivetrain_ad2 == "DRW" |drivetrain_ad2 == "LWB"|drivetrain_ad2 == "SWB"))
# try basic OLS
lm_try=lm(price ~ . - (vin + invoice_price + TypeEngine2 + drivetrain_ad), data = CarData1)
summary(lm_try)
plot(car5_4$year, car5_4$price)
|
#' Get top genes
#'
#' Return top genes in DESeq results
#'
#' @param res an annotated DESeq2 results file
#' @param top Number of top genes to display in matrix
#' @param alpha Adjusted p-value cutoff
#' @param basemean basemean cutoff
#' @param log2FC absolute value log2 fold change cutoff
#' @param sort_fc Sort by fold changes and get top n/2 up and down-regulated,
#' default is to sort by adjusted p-value
#'
#' @return A tibble of top genes
#'
#' @author Chris Stubben
#'
#' @examples
#' data(pasilla)
#' top_genes(pasilla$results)
#' @export
top_genes <- function(res, top=40, alpha = 0.05, basemean, log2FC, sort_fc=FALSE){
## TO DO code for DataFrame - fix for tibbles
x <- subset(res, padj <= alpha )
if(!missing(basemean)) x <- subset(res, baseMean > basemean )
if(!missing(log2FC)) x <- subset(res, abs(log2FoldChange) > log2FC )
if(nrow(x) == 0 ) stop("No rows matching cutoffs")
if(sort_fc){
## sort largest fold changes OR p-value ?
##x1 <- x[order(x$log2FoldChange, decreasing=TRUE), ]
##x2 <- x[order(x$log2FoldChange), ]
x1 <- subset(res, log2FoldChange < 0)
x1 <- x1[order(x1$padj), ]
x2 <- subset(res, log2FoldChange > 0)
x2 <- x2[order(x2$padj), ]
x <- rbind( utils::head(x1, top/2), utils::head(x2, top/2))
}else{
# sort by p-adjusted
x <- x[order(x$padj), ]
x <- utils::head(x, top)
}
x
}
|
/R/top_genes.R
|
no_license
|
topherconley/hciR
|
R
| false | false | 1,424 |
r
|
#' Get top genes
#'
#' Return top genes in DESeq results
#'
#' @param res an annotated DESeq2 results file
#' @param top Number of top genes to display in matrix
#' @param alpha Adjusted p-value cutoff
#' @param basemean basemean cutoff
#' @param log2FC absolute value log2 fold change cutoff
#' @param sort_fc Sort by fold changes and get top n/2 up and down-regulated,
#' default is to sort by adjusted p-value
#'
#' @return A tibble of top genes
#'
#' @author Chris Stubben
#'
#' @examples
#' data(pasilla)
#' top_genes(pasilla$results)
#' @export
top_genes <- function(res, top=40, alpha = 0.05, basemean, log2FC, sort_fc=FALSE){
## TO DO code for DataFrame - fix for tibbles
x <- subset(res, padj <= alpha )
if(!missing(basemean)) x <- subset(res, baseMean > basemean )
if(!missing(log2FC)) x <- subset(res, abs(log2FoldChange) > log2FC )
if(nrow(x) == 0 ) stop("No rows matching cutoffs")
if(sort_fc){
## sort largest fold changes OR p-value ?
##x1 <- x[order(x$log2FoldChange, decreasing=TRUE), ]
##x2 <- x[order(x$log2FoldChange), ]
x1 <- subset(res, log2FoldChange < 0)
x1 <- x1[order(x1$padj), ]
x2 <- subset(res, log2FoldChange > 0)
x2 <- x2[order(x2$padj), ]
x <- rbind( utils::head(x1, top/2), utils::head(x2, top/2))
}else{
# sort by p-adjusted
x <- x[order(x$padj), ]
x <- utils::head(x, top)
}
x
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cluster_genes.R
\name{aggregate_gene_expression}
\alias{aggregate_gene_expression}
\title{Creates a matrix with aggregated expression values for arbitrary groups of
genes}
\usage{
aggregate_gene_expression(
cds,
gene_group_df = NULL,
cell_group_df = NULL,
norm_method = c("log", "binary", "size_only"),
pseudocount = 1,
scale_agg_values = TRUE,
max_agg_value = 3,
min_agg_value = -3,
exclude.na = TRUE
)
}
\arguments{
\item{cds}{The cell_data_set on which this function operates}
\item{gene_group_df}{A dataframe in which the first column contains gene ids
and the second contains groups. If NULL, genes are not grouped.}
\item{cell_group_df}{A dataframe in which the first column contains cell ids
and the second contains groups. If NULL, cells are not grouped.}
\item{norm_method}{How to transform gene expression values before
aggregating them. If "log", a pseudocount is added. If "size_only", values
are divided by cell size factors prior to aggregation.}
\item{pseudocount}{Value to add to expression prior to log transformation
and aggregation.}
\item{scale_agg_values}{Whether to center and scale aggregated groups of
genes.}
\item{max_agg_value}{If scale_agg_values is TRUE, the maximum value the
resulting Z scores can take. Higher values are capped at this threshold.}
\item{min_agg_value}{If scale_agg_values is TRUE, the minimum value the
resulting Z scores can take. Lower values are capped at this threshold.}
\item{exclude.na}{Logical indicating whether or not to exclude NA values
from the aggregated matrix.}
}
\value{
A matrix of dimension NxM, where N is the number of gene groups and
M is the number of cell groups.
}
\description{
Creates a matrix with aggregated expression values for arbitrary groups of
genes
}
|
/man/aggregate_gene_expression.Rd
|
permissive
|
utnesp/monocle3
|
R
| false | true | 1,840 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cluster_genes.R
\name{aggregate_gene_expression}
\alias{aggregate_gene_expression}
\title{Creates a matrix with aggregated expression values for arbitrary groups of
genes}
\usage{
aggregate_gene_expression(
cds,
gene_group_df = NULL,
cell_group_df = NULL,
norm_method = c("log", "binary", "size_only"),
pseudocount = 1,
scale_agg_values = TRUE,
max_agg_value = 3,
min_agg_value = -3,
exclude.na = TRUE
)
}
\arguments{
\item{cds}{The cell_data_set on which this function operates}
\item{gene_group_df}{A dataframe in which the first column contains gene ids
and the second contains groups. If NULL, genes are not grouped.}
\item{cell_group_df}{A dataframe in which the first column contains cell ids
and the second contains groups. If NULL, cells are not grouped.}
\item{norm_method}{How to transform gene expression values before
aggregating them. If "log", a pseudocount is added. If "size_only", values
are divided by cell size factors prior to aggregation.}
\item{pseudocount}{Value to add to expression prior to log transformation
and aggregation.}
\item{scale_agg_values}{Whether to center and scale aggregated groups of
genes.}
\item{max_agg_value}{If scale_agg_values is TRUE, the maximum value the
resulting Z scores can take. Higher values are capped at this threshold.}
\item{min_agg_value}{If scale_agg_values is TRUE, the minimum value the
resulting Z scores can take. Lower values are capped at this threshold.}
\item{exclude.na}{Logical indicating whether or not to exclude NA values
from the aggregated matrix.}
}
\value{
A matrix of dimension NxM, where N is the number of gene groups and
M is the number of cell groups.
}
\description{
Creates a matrix with aggregated expression values for arbitrary groups of
genes
}
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 2.33213848354091e+304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613119301-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 343 |
r
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 2.33213848354091e+304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
#[export]
sftest <- function (x, logged = FALSE) {
x <- Rfast::Sort(x)
n <- length(x)
y <- qnorm((1:n - 0.375)/(n + 0.25))
w <- cor(y, x)^2
ln <- log(n)
lln <- log(ln)
m <- -1.2725 + 1.0521 * (lln - ln)
s <- -0.26758 * (lln + 2/ln) + 1.038
stat <- (log(1 - w) - m)/s
pval <- pnorm(stat, lower.tail = FALSE, log.p = logged)
res <- c(w, stat, pval)
names(res) <- c("squared correlation", "statistic", "p-value")
res
}
#[export]
sftests <- function(x, logged = FALSE) {
x <- Rfast::colSort(x)
n <- dim(x)[1]
y <- qnorm( ( 1:n - 0.375 ) / (n + 0.25) )
w <- as.vector( cor(y, x)^2 )
ln <- log(n)
lln <- log(ln)
m <- - 1.2725 + 1.0521 * ( lln - ln )
s <- - 0.26758 * ( lln + 2/ln ) + 1.038
stat <- ( log(1 - w) - m ) / s
pval <- pnorm(stat, lower.tail = FALSE, log.p = logged)
res <- cbind(w, stat, pval)
colnames(res) <- c("squared correlation", "statistic", "p-value")
res
}
|
/R/sftest.R
|
no_license
|
cran/Rfast
|
R
| false | false | 989 |
r
|
#[export]
sftest <- function (x, logged = FALSE) {
x <- Rfast::Sort(x)
n <- length(x)
y <- qnorm((1:n - 0.375)/(n + 0.25))
w <- cor(y, x)^2
ln <- log(n)
lln <- log(ln)
m <- -1.2725 + 1.0521 * (lln - ln)
s <- -0.26758 * (lln + 2/ln) + 1.038
stat <- (log(1 - w) - m)/s
pval <- pnorm(stat, lower.tail = FALSE, log.p = logged)
res <- c(w, stat, pval)
names(res) <- c("squared correlation", "statistic", "p-value")
res
}
#[export]
sftests <- function(x, logged = FALSE) {
x <- Rfast::colSort(x)
n <- dim(x)[1]
y <- qnorm( ( 1:n - 0.375 ) / (n + 0.25) )
w <- as.vector( cor(y, x)^2 )
ln <- log(n)
lln <- log(ln)
m <- - 1.2725 + 1.0521 * ( lln - ln )
s <- - 0.26758 * ( lln + 2/ln ) + 1.038
stat <- ( log(1 - w) - m ) / s
pval <- pnorm(stat, lower.tail = FALSE, log.p = logged)
res <- cbind(w, stat, pval)
colnames(res) <- c("squared correlation", "statistic", "p-value")
res
}
|
setwd("/data/js95/ALSPAC/ARIES/F7/biopsy2019-rerun/")
## packages
## data
# load("ARIES_betas_noTwin_rfaminst_pcs_20181023.Rdata")
load("ARIES_BP_rerun_20200225.Rdata")
outcome.vec <- colnames(aries)[102:ncol(aries)]
## double check the variable types
# lapply(aries[,1:101], function(x){
# class(x)
# })
## load function for stage 1 LARS, complete data
source("LARS-noimpute-function-20200225.R")
adver <- "mompsy"
## a list for the recency vectors
recency.vec.list <- list(c(18, 30, 42, 57, 69, 81)/12,
c(8, 21, 33, 61, 85)/12,
c(8, 21, 33, 47, 85)/12,
c(18, 30, 42, 57, 69, 81)/12,
c(21, 33, 61, 85)/12,
c(8, 21, 33, 47, 61, 73)/12,
c(8, 21, 33, 61, 73)/12)
names(recency.vec.list) <- c("abuse", "Fscore", "oneadult", "r_faminst", "nbhqual","parc","mompsy")
res <- select.LARS.complete(df = aries,
outcome.vec = outcome.vec,
adver = adver,
covars = c(## baseline characteristics,EXCLUDE SES
"SES_parent", "WHITE", "Female", "mom_birthage", "ppregnum", "birth.weight","sustained.smoke",
# cell counts
"Bcell", "CD4T", "CD8T", "Gran", "Mono", "NK"),
hypos <- c("accumulation", "recency"),
exposures <- "default",
recency.vec = recency.vec.list[adver][[1]],
inf.method = "covTest")
save(res, file = paste0("SLCMA-newDNAm-rerun-", adver, "_20200225.Rdata"))
|
/ALSPAC_data_differences/2. SLCMA scripts/02-newDNAm-origCovars-covTest/02-newDNAm-origCovars-covTest-mompsy-20200225.R
|
no_license
|
alussier17/alussier_scripts
|
R
| false | false | 1,679 |
r
|
setwd("/data/js95/ALSPAC/ARIES/F7/biopsy2019-rerun/")
## packages
## data
# load("ARIES_betas_noTwin_rfaminst_pcs_20181023.Rdata")
load("ARIES_BP_rerun_20200225.Rdata")
outcome.vec <- colnames(aries)[102:ncol(aries)]
## double check the variable types
# lapply(aries[,1:101], function(x){
# class(x)
# })
## load function for stage 1 LARS, complete data
source("LARS-noimpute-function-20200225.R")
adver <- "mompsy"
## a list for the recency vectors
recency.vec.list <- list(c(18, 30, 42, 57, 69, 81)/12,
c(8, 21, 33, 61, 85)/12,
c(8, 21, 33, 47, 85)/12,
c(18, 30, 42, 57, 69, 81)/12,
c(21, 33, 61, 85)/12,
c(8, 21, 33, 47, 61, 73)/12,
c(8, 21, 33, 61, 73)/12)
names(recency.vec.list) <- c("abuse", "Fscore", "oneadult", "r_faminst", "nbhqual","parc","mompsy")
res <- select.LARS.complete(df = aries,
outcome.vec = outcome.vec,
adver = adver,
covars = c(## baseline characteristics,EXCLUDE SES
"SES_parent", "WHITE", "Female", "mom_birthage", "ppregnum", "birth.weight","sustained.smoke",
# cell counts
"Bcell", "CD4T", "CD8T", "Gran", "Mono", "NK"),
hypos <- c("accumulation", "recency"),
exposures <- "default",
recency.vec = recency.vec.list[adver][[1]],
inf.method = "covTest")
save(res, file = paste0("SLCMA-newDNAm-rerun-", adver, "_20200225.Rdata"))
|
library(circlize)
# https://jokergoo.github.io/circlize_book/book/legends.html
# BiocManager::install("ComplexHeatmap")
library(ComplexHeatmap)
library(grid)
s <- c('zook', 'pierre', 'lucy', 'redding')
x <- fetchOSD(s, extended = TRUE)
xx <- x$climate.monthly
xx <- xx[grep('ppt', xx$climate_var), ]
u <- unique(xx$series)
n <- length(u)
s <- split(xx, xx$month)
ll <- levels(xx$month)
circos.clear()
circos.par('clock.wise' = TRUE, 'start.degree' = 90, 'gap.degree' = 6, 'points.overflow.warning' = FALSE)
circos.initialize(sectors = ll, xlim = c(0, n))
circos.track(ylim = c(0, 200), track.height = 0.3)
# circos.info(plot = TRUE)
for(i in ll) {
print(i)
circos.barplot(value = s[[i]]$q50, pos = 1:n - 0.5, col = 2:(n+1), sector.index = i)
circos.text(x = n / 2, y = 200 + convert_y(2, "mm"),
labels = m[as.integer(i)],
facing = "bending.inside", cex = 1, font = 2,
adj = c(0.5, 0), niceFacing = TRUE
)
}
circos.yaxis(side = 'left', labels.cex = 0.66, sector.index = '1')
# discrete
lgd_points <- Legend(at = 1:n, labels = u, type = "points", legend_gp = gpar(col = 2:(n+1)))
lgd_list_vertical <- packLegend(lgd_points)
draw(lgd_list_vertical)
#
# circos.clear()
# circos.par('clock.wise' = TRUE, 'start.degree' = 90)
#
# circos.initialize('A', xlim = c(0, 12))
#
# circos.track(ylim = c(-2, 75), panel.fun = function(x, y) {
#
# circos.boxplot(t(ET), pos = 1:12 - 0.5, col = 2)
# })
#
# circos.info(plot = TRUE)
#
#
#
# sectors = letters[1:8]
# circos.initialize(sectors, xlim = c(0, 1))
# for(i in 1:3) {
# circos.track(ylim = c(0, 1))
# }
# circos.info(plot = TRUE)
#
|
/misc/circlize-polar-WB-ideas.R
|
no_license
|
ncss-tech/sharpshootR
|
R
| false | false | 1,671 |
r
|
library(circlize)
# https://jokergoo.github.io/circlize_book/book/legends.html
# BiocManager::install("ComplexHeatmap")
library(ComplexHeatmap)
library(grid)
s <- c('zook', 'pierre', 'lucy', 'redding')
x <- fetchOSD(s, extended = TRUE)
xx <- x$climate.monthly
xx <- xx[grep('ppt', xx$climate_var), ]
u <- unique(xx$series)
n <- length(u)
s <- split(xx, xx$month)
ll <- levels(xx$month)
circos.clear()
circos.par('clock.wise' = TRUE, 'start.degree' = 90, 'gap.degree' = 6, 'points.overflow.warning' = FALSE)
circos.initialize(sectors = ll, xlim = c(0, n))
circos.track(ylim = c(0, 200), track.height = 0.3)
# circos.info(plot = TRUE)
for(i in ll) {
print(i)
circos.barplot(value = s[[i]]$q50, pos = 1:n - 0.5, col = 2:(n+1), sector.index = i)
circos.text(x = n / 2, y = 200 + convert_y(2, "mm"),
labels = m[as.integer(i)],
facing = "bending.inside", cex = 1, font = 2,
adj = c(0.5, 0), niceFacing = TRUE
)
}
circos.yaxis(side = 'left', labels.cex = 0.66, sector.index = '1')
# discrete
lgd_points <- Legend(at = 1:n, labels = u, type = "points", legend_gp = gpar(col = 2:(n+1)))
lgd_list_vertical <- packLegend(lgd_points)
draw(lgd_list_vertical)
#
# circos.clear()
# circos.par('clock.wise' = TRUE, 'start.degree' = 90)
#
# circos.initialize('A', xlim = c(0, 12))
#
# circos.track(ylim = c(-2, 75), panel.fun = function(x, y) {
#
# circos.boxplot(t(ET), pos = 1:12 - 0.5, col = 2)
# })
#
# circos.info(plot = TRUE)
#
#
#
# sectors = letters[1:8]
# circos.initialize(sectors, xlim = c(0, 1))
# for(i in 1:3) {
# circos.track(ylim = c(0, 1))
# }
# circos.info(plot = TRUE)
#
|
#' Get ERDDAP tabledap data.
#'
#' @export
#'
#' @param x Anything coercable to an object of class info. So the output of
#' a call to [info()], or a datasetid, which will internally be passed
#' through [info()]
#' @param ... Any number of key-value pairs in quotes as query constraints.
#' See Details & examples
#' @param fields Columns to return, as a character vector
#' @param distinct If `TRUE` ERDDAP will sort all of the rows in the results
#' table (starting with the first requested variable, then using the second
#' requested variable if the first variable has a tie, ...), then remove all
#' non-unique rows of data. In many situations, ERDDAP can return distinct
#' values quickly and efficiently. But in some cases, ERDDAP must look through
#' all rows of the source dataset.
#' @param orderby If used, ERDDAP will sort all of the rows in the results
#' table (starting with the first variable, then using the second variable
#' if the first variable has a tie, ...). Normally, the rows of data in the
#' response table are in the order they arrived from the data source. orderBy
#' allows you to request that the results table be sorted in a specific way.
#' For example, use `orderby=c("stationID,time")` to get the results
#' sorted by stationID, then time. The orderby variables MUST be included in
#' the list of requested variables in the fields parameter.
#' @param orderbymax Give a vector of one or more fields, that must be included
#' in the fields parameter as well. Gives back data given constraints. ERDDAP
#' will sort all of the rows in the results table (starting with the first
#' variable, then using the second variable if the first variable has a
#' tie, ...) and then just keeps the rows where the value of the last sort
#' variable is highest (for each combination of other values).
#' @param orderbymin Same as `orderbymax` parameter, except returns
#' minimum value.
#' @param orderbyminmax Same as `orderbymax` parameter, except returns
#' two rows for every combination of the n-1 variables: one row with the
#' minimum value, and one row with the maximum value.
#' @param units One of 'udunits' (units will be described via the UDUNITS
#' standard (e.g.,degrees_C)) or 'ucum' (units will be described via the
#' UCUM standard (e.g., Cel)).
#' @param url A URL for an ERDDAP server.
#' Default: https://upwell.pfeg.noaa.gov/erddap/ - See [eurl()] for
#' more information
#' @param store One of `disk` (default) or `memory`. You can pass
#' options to `disk`
#' @param callopts Curl options passed on to [crul::verb-GET] (must be
#' named parameters)
#'
#' @return An object of class \code{tabledap}. This class is a thin wrapper
#' around a data.frame, so the data you get back is a data.frame with metadata
#' attached as attributes (datasetid, path (path where the csv is stored on
#' your machine), url (url for the request))
#'
#' @details
#' For key-value pair query constraints, the valid operators are =,
#' != (not equals), =~ (a regular expression test), <, <=, >, and >= . For
#' regular expressions you need to add a regular expression. For others, nothing
#' more is needed. Construct the entry like \code{'time>=2001-07-07'} with the
#' parameter on the left, value on the right, and the operator in the middle,
#' all within a set of quotes. Since ERDDAP accepts values other than \code{=},
#' we can't simply do \code{time = '2001-07-07'} as we normally would.
#'
#' Server-side functionality: Some tasks are done server side. You don't have
#' to worry about what that means. They are provided via parameters in this
#' function. See \code{distinct}, \code{orderby}, \code{orderbymax},
#' \code{orderbymin}, \code{orderbyminmax}, and \code{units}.
#'
#' Data is cached based on all parameters you use to get a dataset, including
#' base url, query parameters. If you make the same exact call in the same or
#' a different R session, as long you don't clear the cache, the function only
#' reads data from disk, and does not have to request the data from the web
#' again.
#'
#' If you run into an error like "HTTP Status 500 - There was a (temporary?)
#' problem. Wait a minute, then try again.". it's likely they are hitting
#' up against a size limit, and they should reduce the amount of data they
#' are requesting either via space, time, or variables. Pass in
#' \code{config = verbose()} to the request, and paste the URL into your
#' browser to see if the output is garbled to examine if there's a problem
#' with servers or this package
#'
#' @references https://upwell.pfeg.noaa.gov/erddap/index.html
#' @examples \dontrun{
#' # Just passing the datasetid without fields gives all columns back
#' tabledap('erdCinpKfmBT')
#'
#' # Pass time constraints
#' tabledap('erdCinpKfmBT', 'time>=2006-08-24')
#'
#' # Pass in fields (i.e., columns to retrieve) & time constraints
#' tabledap('erdCinpKfmBT',
#' fields = c('longitude', 'latitude', 'Aplysia_californica_Mean_Density'),
#' 'time>=2006-08-24'
#' )
#'
#' # Get info on a datasetid, then get data given information learned
#' info('erdCalCOFIlrvsiz')$variables
#' tabledap('erdCalCOFIlrvsiz', fields=c('latitude','longitude','larvae_size',
#' 'itis_tsn'), 'time>=2011-10-25', 'time<=2011-10-31')
#'
#' # An example workflow
#' ## Search for data
#' (out <- ed_search(query='fish', which = 'table'))
#' ## Using a datasetid, search for information on a datasetid
#' id <- out$alldata[[1]]$dataset_id
#' vars <- info(id)$variables
#' ## Get data from the dataset
#' vars$variable_name[1:3]
#' tabledap(id, fields = vars$variable_name[1:3])
#'
#' # Time constraint
#' ## Limit by time with date only
#' (info <- info('erdCinpKfmBT'))
#' tabledap(info, fields = c(
#' 'latitude','longitude','Haliotis_fulgens_Mean_Density'),
#' 'time>=2001-07-14')
#'
#' # Use distinct parameter - compare to distinct = FALSE
#' tabledap('sg114_3',
#' fields=c('longitude','latitude','trajectory'),
#' 'time>=2008-12-05', distinct = TRUE)
#'
#' # Use units parameter
#' ## In this example, values are the same, but sometimes they can be different
#' ## given the units value passed
#' tabledap('erdCinpKfmT', fields=c('longitude','latitude','time','temperature'),
#' 'time>=2007-09-19', 'time<=2007-09-21', units='udunits')
#' tabledap('erdCinpKfmT', fields=c('longitude','latitude','time','temperature'),
#' 'time>=2007-09-19', 'time<=2007-09-21', units='ucum')
#'
#' # Use orderby parameter
#' tabledap('erdCinpKfmT', fields=c('longitude','latitude','time','temperature'),
#' 'time>=2007-09-19', 'time<=2007-09-21', orderby='temperature')
#' # Use orderbymax parameter
#' tabledap('erdCinpKfmT', fields=c('longitude','latitude','time','temperature'),
#' 'time>=2007-09-19', 'time<=2007-09-21', orderbymax='temperature')
#' # Use orderbymin parameter
#' tabledap('erdCinpKfmT', fields=c('longitude','latitude','time','temperature'),
#' 'time>=2007-09-19', 'time<=2007-09-21', orderbymin='temperature')
#' # Use orderbyminmax parameter
#' tabledap('erdCinpKfmT', fields=c('longitude','latitude','time','temperature'),
#' 'time>=2007-09-19', 'time<=2007-09-21', orderbyminmax='temperature')
#' # Use orderbymin parameter with multiple values
#' tabledap('erdCinpKfmT',
#' fields=c('longitude','latitude','time','depth','temperature'),
#' 'time>=2007-06-10', 'time<=2007-09-21',
#' orderbymax=c('depth','temperature')
#' )
#'
#' # Integrate with taxize
#' out <- tabledap('erdCalCOFIlrvcntHBtoHI',
#' fields = c('latitude','longitude','scientific_name','itis_tsn'),
#' 'time>=2007-06-10', 'time<=2007-09-21'
#' )
#' tsns <- unique(out$itis_tsn[1:100])
#' library("taxize")
#' classif <- classification(tsns, db = "itis")
#' head(rbind(classif)); tail(rbind(classif))
#'
#' # Write to memory (within R), or to disk
#' (out <- info('erdCinpKfmBT'))
#' ## disk, by default (to prevent bogging down system w/ large datasets)
#' ## the 2nd call is much faster as it's mostly just the time of reading
#' ## in the table from disk
#' system.time( tabledap('erdCinpKfmBT', store = disk()) )
#' system.time( tabledap('erdCinpKfmBT', store = disk()) )
#' ## memory
#' tabledap('erdCinpKfmBT', store = memory())
#'
#' # use a different ERDDAP server
#' ## NOAA IOOS NERACOOS
#' url <- "http://www.neracoos.org/erddap/"
#' tabledap("E01_optics_hist", url = url)
#' }
tabledap <- function(x, ..., fields=NULL, distinct=FALSE, orderby=NULL,
orderbymax=NULL, orderbymin=NULL, orderbyminmax=NULL, units=NULL,
url = eurl(), store = disk(), callopts=list()) {
if (inherits(x, "info")) {
url <- x$base_url
message("info() output passed to x; setting base url to: ", url)
}
x <- as.info(x, url)
fields <- paste(fields, collapse = ",")
lenURL <- nchar(url)
if (substr(url, lenURL, lenURL) != '/') {
url <- paste0(url, '/')
}
url <- sprintf(paste0(url, "tabledap/%s.csv?%s"), attr(x, "datasetid"),
fields)
args <- list(...)
distinct <- if (distinct) 'distinct()' else NULL
units <- if (!is.null(units)) {
makevar(toupper(units), 'units("%s")')
} else {
units
}
orderby <- makevar(orderby, 'orderBy("%s")')
orderbymax <- makevar(orderbymax, 'orderByMax("%s")')
orderbymin <- makevar(orderbymin, 'orderByMin("%s")')
orderbyminmax <- makevar(orderbyminmax, 'orderByMinMax("%s")')
moreargs <- rc(list(distinct, orderby, orderbymax, orderbymin,
orderbyminmax, units))
args <- c(args, moreargs)
args <- lapply(args, URLencode, reserved = TRUE)
args <- paste0(args, collapse = "&")
if (!nchar(args[[1]]) == 0) {
url <- paste0(url, '&', args)
}
resp <- erd_tab_GET(url, dset = attr(x, "datasetid"), store, callopts)
loc <- if (store$store == "disk") resp else "memory"
structure(
read_table(resp),
class = c("tabledap", "data.frame"),
datasetid = attr(x, "datasetid"),
path = loc,
url = url
)
}
#' @export
print.tabledap <- function(x, ...) {
finfo <- file_info(attr(x, "path"))
cat(sprintf("<ERDDAP tabledap> %s", attr(x, "datasetid")), sep = "\n")
path <- attr(x, "path")
path2 <- if (file.exists(path)) path else "<beware: file deleted>"
cat(sprintf(" Path: [%s]", path2), sep = "\n")
if (attr(x, "path") != "memory") {
cat(sprintf(" Last updated: [%s]", finfo$mtime), sep = "\n")
cat(sprintf(" File size: [%s mb]", finfo$size), sep = "\n")
}
print(tibble::as_tibble(x))
}
erd_tab_GET <- function(url, dset, store, callopts) {
cli <- crul::HttpClient$new(url = url, opts = callopts)
if (store$store == "disk") {
# store on disk
key <- gen_key(url, NULL, "csv")
if ( file.exists(file.path(store$path, key)) ) {
file.path(store$path, key)
} else {
dir.create(store$path, showWarnings = FALSE, recursive = TRUE)
if (!store$overwrite) {
stop('overwrite was `FALSE`, see ?disk')
}
res <- cli$get(disk = file.path(store$path, key))
err_handle(res, store, key)
res$content
}
} else {
res <- cli$get()
err_handle(res, store, key)
res
}
}
makevar <- function(x, y){
if (!is.null(x)) {
x <- paste0(x, collapse = ",")
sprintf(y, x)
} else {
NULL
}
}
|
/R/table.R
|
permissive
|
ropensci/rerddap
|
R
| false | false | 11,146 |
r
|
#' Get ERDDAP tabledap data.
#'
#' @export
#'
#' @param x Anything coercable to an object of class info. So the output of
#' a call to [info()], or a datasetid, which will internally be passed
#' through [info()]
#' @param ... Any number of key-value pairs in quotes as query constraints.
#' See Details & examples
#' @param fields Columns to return, as a character vector
#' @param distinct If `TRUE` ERDDAP will sort all of the rows in the results
#' table (starting with the first requested variable, then using the second
#' requested variable if the first variable has a tie, ...), then remove all
#' non-unique rows of data. In many situations, ERDDAP can return distinct
#' values quickly and efficiently. But in some cases, ERDDAP must look through
#' all rows of the source dataset.
#' @param orderby If used, ERDDAP will sort all of the rows in the results
#' table (starting with the first variable, then using the second variable
#' if the first variable has a tie, ...). Normally, the rows of data in the
#' response table are in the order they arrived from the data source. orderBy
#' allows you to request that the results table be sorted in a specific way.
#' For example, use `orderby=c("stationID,time")` to get the results
#' sorted by stationID, then time. The orderby variables MUST be included in
#' the list of requested variables in the fields parameter.
#' @param orderbymax Give a vector of one or more fields, that must be included
#' in the fields parameter as well. Gives back data given constraints. ERDDAP
#' will sort all of the rows in the results table (starting with the first
#' variable, then using the second variable if the first variable has a
#' tie, ...) and then just keeps the rows where the value of the last sort
#' variable is highest (for each combination of other values).
#' @param orderbymin Same as `orderbymax` parameter, except returns
#' minimum value.
#' @param orderbyminmax Same as `orderbymax` parameter, except returns
#' two rows for every combination of the n-1 variables: one row with the
#' minimum value, and one row with the maximum value.
#' @param units One of 'udunits' (units will be described via the UDUNITS
#' standard (e.g.,degrees_C)) or 'ucum' (units will be described via the
#' UCUM standard (e.g., Cel)).
#' @param url A URL for an ERDDAP server.
#' Default: https://upwell.pfeg.noaa.gov/erddap/ - See [eurl()] for
#' more information
#' @param store One of `disk` (default) or `memory`. You can pass
#' options to `disk`
#' @param callopts Curl options passed on to [crul::verb-GET] (must be
#' named parameters)
#'
#' @return An object of class \code{tabledap}. This class is a thin wrapper
#' around a data.frame, so the data you get back is a data.frame with metadata
#' attached as attributes (datasetid, path (path where the csv is stored on
#' your machine), url (url for the request))
#'
#' @details
#' For key-value pair query constraints, the valid operators are =,
#' != (not equals), =~ (a regular expression test), <, <=, >, and >= . For
#' regular expressions you need to add a regular expression. For others, nothing
#' more is needed. Construct the entry like \code{'time>=2001-07-07'} with the
#' parameter on the left, value on the right, and the operator in the middle,
#' all within a set of quotes. Since ERDDAP accepts values other than \code{=},
#' we can't simply do \code{time = '2001-07-07'} as we normally would.
#'
#' Server-side functionality: Some tasks are done server side. You don't have
#' to worry about what that means. They are provided via parameters in this
#' function. See \code{distinct}, \code{orderby}, \code{orderbymax},
#' \code{orderbymin}, \code{orderbyminmax}, and \code{units}.
#'
#' Data is cached based on all parameters you use to get a dataset, including
#' base url, query parameters. If you make the same exact call in the same or
#' a different R session, as long you don't clear the cache, the function only
#' reads data from disk, and does not have to request the data from the web
#' again.
#'
#' If you run into an error like "HTTP Status 500 - There was a (temporary?)
#' problem. Wait a minute, then try again.". it's likely they are hitting
#' up against a size limit, and they should reduce the amount of data they
#' are requesting either via space, time, or variables. Pass in
#' \code{config = verbose()} to the request, and paste the URL into your
#' browser to see if the output is garbled to examine if there's a problem
#' with servers or this package
#'
#' @references https://upwell.pfeg.noaa.gov/erddap/index.html
#' @examples \dontrun{
#' # Just passing the datasetid without fields gives all columns back
#' tabledap('erdCinpKfmBT')
#'
#' # Pass time constraints
#' tabledap('erdCinpKfmBT', 'time>=2006-08-24')
#'
#' # Pass in fields (i.e., columns to retrieve) & time constraints
#' tabledap('erdCinpKfmBT',
#' fields = c('longitude', 'latitude', 'Aplysia_californica_Mean_Density'),
#' 'time>=2006-08-24'
#' )
#'
#' # Get info on a datasetid, then get data given information learned
#' info('erdCalCOFIlrvsiz')$variables
#' tabledap('erdCalCOFIlrvsiz', fields=c('latitude','longitude','larvae_size',
#' 'itis_tsn'), 'time>=2011-10-25', 'time<=2011-10-31')
#'
#' # An example workflow
#' ## Search for data
#' (out <- ed_search(query='fish', which = 'table'))
#' ## Using a datasetid, search for information on a datasetid
#' id <- out$alldata[[1]]$dataset_id
#' vars <- info(id)$variables
#' ## Get data from the dataset
#' vars$variable_name[1:3]
#' tabledap(id, fields = vars$variable_name[1:3])
#'
#' # Time constraint
#' ## Limit by time with date only
#' (info <- info('erdCinpKfmBT'))
#' tabledap(info, fields = c(
#' 'latitude','longitude','Haliotis_fulgens_Mean_Density'),
#' 'time>=2001-07-14')
#'
#' # Use distinct parameter - compare to distinct = FALSE
#' tabledap('sg114_3',
#' fields=c('longitude','latitude','trajectory'),
#' 'time>=2008-12-05', distinct = TRUE)
#'
#' # Use units parameter
#' ## In this example, values are the same, but sometimes they can be different
#' ## given the units value passed
#' tabledap('erdCinpKfmT', fields=c('longitude','latitude','time','temperature'),
#' 'time>=2007-09-19', 'time<=2007-09-21', units='udunits')
#' tabledap('erdCinpKfmT', fields=c('longitude','latitude','time','temperature'),
#' 'time>=2007-09-19', 'time<=2007-09-21', units='ucum')
#'
#' # Use orderby parameter
#' tabledap('erdCinpKfmT', fields=c('longitude','latitude','time','temperature'),
#' 'time>=2007-09-19', 'time<=2007-09-21', orderby='temperature')
#' # Use orderbymax parameter
#' tabledap('erdCinpKfmT', fields=c('longitude','latitude','time','temperature'),
#' 'time>=2007-09-19', 'time<=2007-09-21', orderbymax='temperature')
#' # Use orderbymin parameter
#' tabledap('erdCinpKfmT', fields=c('longitude','latitude','time','temperature'),
#' 'time>=2007-09-19', 'time<=2007-09-21', orderbymin='temperature')
#' # Use orderbyminmax parameter
#' tabledap('erdCinpKfmT', fields=c('longitude','latitude','time','temperature'),
#' 'time>=2007-09-19', 'time<=2007-09-21', orderbyminmax='temperature')
#' # Use orderbymin parameter with multiple values
#' tabledap('erdCinpKfmT',
#' fields=c('longitude','latitude','time','depth','temperature'),
#' 'time>=2007-06-10', 'time<=2007-09-21',
#' orderbymax=c('depth','temperature')
#' )
#'
#' # Integrate with taxize
#' out <- tabledap('erdCalCOFIlrvcntHBtoHI',
#' fields = c('latitude','longitude','scientific_name','itis_tsn'),
#' 'time>=2007-06-10', 'time<=2007-09-21'
#' )
#' tsns <- unique(out$itis_tsn[1:100])
#' library("taxize")
#' classif <- classification(tsns, db = "itis")
#' head(rbind(classif)); tail(rbind(classif))
#'
#' # Write to memory (within R), or to disk
#' (out <- info('erdCinpKfmBT'))
#' ## disk, by default (to prevent bogging down system w/ large datasets)
#' ## the 2nd call is much faster as it's mostly just the time of reading
#' ## in the table from disk
#' system.time( tabledap('erdCinpKfmBT', store = disk()) )
#' system.time( tabledap('erdCinpKfmBT', store = disk()) )
#' ## memory
#' tabledap('erdCinpKfmBT', store = memory())
#'
#' # use a different ERDDAP server
#' ## NOAA IOOS NERACOOS
#' url <- "http://www.neracoos.org/erddap/"
#' tabledap("E01_optics_hist", url = url)
#' }
tabledap <- function(x, ..., fields=NULL, distinct=FALSE, orderby=NULL,
orderbymax=NULL, orderbymin=NULL, orderbyminmax=NULL, units=NULL,
url = eurl(), store = disk(), callopts=list()) {
if (inherits(x, "info")) {
url <- x$base_url
message("info() output passed to x; setting base url to: ", url)
}
x <- as.info(x, url)
fields <- paste(fields, collapse = ",")
lenURL <- nchar(url)
if (substr(url, lenURL, lenURL) != '/') {
url <- paste0(url, '/')
}
url <- sprintf(paste0(url, "tabledap/%s.csv?%s"), attr(x, "datasetid"),
fields)
args <- list(...)
distinct <- if (distinct) 'distinct()' else NULL
units <- if (!is.null(units)) {
makevar(toupper(units), 'units("%s")')
} else {
units
}
orderby <- makevar(orderby, 'orderBy("%s")')
orderbymax <- makevar(orderbymax, 'orderByMax("%s")')
orderbymin <- makevar(orderbymin, 'orderByMin("%s")')
orderbyminmax <- makevar(orderbyminmax, 'orderByMinMax("%s")')
moreargs <- rc(list(distinct, orderby, orderbymax, orderbymin,
orderbyminmax, units))
args <- c(args, moreargs)
args <- lapply(args, URLencode, reserved = TRUE)
args <- paste0(args, collapse = "&")
if (!nchar(args[[1]]) == 0) {
url <- paste0(url, '&', args)
}
resp <- erd_tab_GET(url, dset = attr(x, "datasetid"), store, callopts)
loc <- if (store$store == "disk") resp else "memory"
structure(
read_table(resp),
class = c("tabledap", "data.frame"),
datasetid = attr(x, "datasetid"),
path = loc,
url = url
)
}
#' @export
print.tabledap <- function(x, ...) {
finfo <- file_info(attr(x, "path"))
cat(sprintf("<ERDDAP tabledap> %s", attr(x, "datasetid")), sep = "\n")
path <- attr(x, "path")
path2 <- if (file.exists(path)) path else "<beware: file deleted>"
cat(sprintf(" Path: [%s]", path2), sep = "\n")
if (attr(x, "path") != "memory") {
cat(sprintf(" Last updated: [%s]", finfo$mtime), sep = "\n")
cat(sprintf(" File size: [%s mb]", finfo$size), sep = "\n")
}
print(tibble::as_tibble(x))
}
erd_tab_GET <- function(url, dset, store, callopts) {
cli <- crul::HttpClient$new(url = url, opts = callopts)
if (store$store == "disk") {
# store on disk
key <- gen_key(url, NULL, "csv")
if ( file.exists(file.path(store$path, key)) ) {
file.path(store$path, key)
} else {
dir.create(store$path, showWarnings = FALSE, recursive = TRUE)
if (!store$overwrite) {
stop('overwrite was `FALSE`, see ?disk')
}
res <- cli$get(disk = file.path(store$path, key))
err_handle(res, store, key)
res$content
}
} else {
res <- cli$get()
err_handle(res, store, key)
res
}
}
makevar <- function(x, y){
if (!is.null(x)) {
x <- paste0(x, collapse = ",")
sprintf(y, x)
} else {
NULL
}
}
|
#===============#
#inventory table#
#===============#
# observe({
#
# hideTab(inputId="tabs", target="Sampling overview")
#
# })
data_list<-reactive({
#if (is.null(input$file)) return()
req(input$file)
load(input$file$datapath, envir = .GlobalEnv)
# modify the CS.Rdata
ca<-as.data.table(ca)
#ca<-fread(file)
ca$Region[ca$Region=="NA"|is.na(ca$Region)]<-'NATL'
cainventory<-ca[,.(NoMaturityStage=sum(!is.na(MaturityStage)),NoMaturityStageTrips=length(unique(Trip[!is.na(MaturityStage)])),NoAge=sum(!is.na(Age)),NoAgeTrips=length(unique(Trip[!is.na(Age)])),NoLength=sum(!is.na(LengthClass)),NoLengthTrips=length(unique(Trip[!is.na(LengthClass)])),NoWeight=sum(!is.na(Weight)),NoWeightTrips=length(unique(Trip[!is.na(Weight)]))),by=c("Year","Region","FlagCountry","LandingCountry","Stock","Species","SamplingType","Quarter","CatchCategory","Sex")]
# datatable wants factors for filter =
cainventory$FlagCountry<-as.factor(cainventory$FlagCountry)
cainventory$LandingCountry<-as.factor(cainventory$LandingCountry)
cainventory$Region<-as.factor(cainventory$Region)
cainventory$Stock<-as.factor(cainventory$Stock)
cainventory$Species<-as.factor(cainventory$Species)
cainventory$SamplingType<-as.factor(cainventory$SamplingType)
cainventory$Quarter<-as.factor(cainventory$Quarter)
cainventory$CatchCategory<-as.factor(cainventory$CatchCategory)
cainventory$Sex<-as.factor(cainventory$Sex)
hh$StartQuarter <- quarter(ymd(hh$StartDate))
#sl<-as.data.table(sl)
#tr<-as.data.table(tr)
#preparing master table
sl_master<-merge(sl, tr[,list(CS_TripId, VesselIdentifier, SamplingCountry, SamplingMethod, VesselLengthCategory)], by="CS_TripId", all.x=T)
sl_master <-
merge(sl_master,
hh[, list(
Region,
CS_TripId,
CS_StationId,
StartDate,
StartQuarter,
FishingTime,
PosStartLatDec,
PosStartLonDec,
PosStopLatDec,
PosStopLonDec,
Area,
FishingGround,
StatisticalRectangle,
FishingActivityCategoryEuropeanLvl5,
FishingActivityCategoryEuropeanLvl6,
Gear
)],
by = c("CS_TripId", "CS_StationId"),
all.x = T)
#class(sl_master)
slinventory<-sl_master[,.(NoLength=sum(NoInSubSample),NoLengthTrips=length(unique(Trip[NoInSubSample>0])),WeigthKg=sum(SubSampleWeight_kg)),by=c("Year","Region","FlagCountry","LandingCountry","Stock","Species","SamplingType","StartQuarter","FishingGround","Area" ,"FishingActivityCategoryEuropeanLvl6", "CatchCategory","VesselLengthCategory")][NoLength>0|NoLengthTrips>0,]
slinventory$Region[slinventory$Region=="NA"|is.na(slinventory$Region)]<-'NATL'
slinventory$FlagCountry<-as.factor(slinventory$FlagCountry)
slinventory$LandingCountry<-as.factor(slinventory$LandingCountry)
slinventory$Region<-as.factor(slinventory$Region)
slinventory$Stock<-as.factor(slinventory$Stock)
slinventory$Species<-as.factor(slinventory$Species)
slinventory$SamplingType<-as.factor(slinventory$SamplingType)
slinventory$StartQuarter<-as.factor(slinventory$StartQuarter)
slinventory$FishingGround<-as.factor(slinventory$FishingGround)
slinventory$Area<-as.factor(slinventory$Area)
slinventory$CatchCategory<-as.factor(slinventory$CatchCategory)
# ca_map<-ca
# add length
hl_master<-
merge(hl,
sl_master[, list(
Region,
CS_TripId,
CS_StationId,
CS_SpeciesListId,
StartDate,
StartQuarter,
FishingTime,
PosStartLatDec,
PosStartLonDec,
PosStopLatDec,
PosStopLonDec,
Area,
FishingGround,
StatisticalRectangle,
FishingActivityCategoryEuropeanLvl5,
FishingActivityCategoryEuropeanLvl6,
Gear
)],
by = c("CS_TripId", "CS_StationId", "CS_SpeciesListId"),
all.x = T)
cahl <- mutate(ca,
CS_LengthId = NA,
CS_SpeciesListId = NA,
CS_StatioId = NA,
IndividualSex = NA,
SubSamplingCategory = NA,
NoAtLengthInSample = NA,
NoAtLengthInCatch = NA,
NoAtLengthInSample_ThousandIndiv = NA,
NoAtLengthInSample_MillionIndiv = NA)
hl_master <- rename(hl_master, Stock = stock)
ca_map <- rbind(cahl, hl_master, fill = TRUE)
#ca_map<-as.data.table(ca_map)
ca_map<-ca_map[!(is.na(StatisticalRectangle)|StatisticalRectangle=='99u9'),]
ca_map$lat<- ices.rect(ca_map$StatisticalRectangle)$lat
ca_map$lon <- ices.rect(ca_map$StatisticalRectangle)$lon
#
# ca_map<-ca_map[,.(NoMaturityStage=sum(!is.na(MaturityStage)),NoMaturityStageTrips=length(unique(Trip[!is.na(MaturityStage)])),NoAge=sum(!is.na(Age)),NoAgeTrips=length(unique(Trip[!is.na(Age)])),NoLength=sum(!is.na(LengthClass)),NoLengthTrips=length(unique(Trip[!is.na(LengthClass)])),NoWeight=sum(!is.na(Weight)),NoWeightTrips=length(unique(Trip[!is.na(Weight)]))),by=c("Region","LandingCountry","Species","SamplingType","Quarter","CatchCategory","lat","lon")]
ca_map <- ca_map[,.(NoMaturityStage=sum(!is.na(MaturityStage)),NoMaturityStageTrips=length(unique(Trip[!is.na(MaturityStage)])),NoAge=sum(!is.na(Age)),NoAgeTrips=length(unique(Trip[!is.na(Age)])), NoWeight=sum(!is.na(Weight)),NoWeightTrips=length(unique(Trip[!is.na(Weight)])), NoLength = sum(!is.na(NoAtLengthInSample)), NoLengthTrips = length(unique(Trip[!is.na(NoAtLengthInSample)]))),by=c("Region","LandingCountry","Species","SamplingType","Quarter","CatchCategory","lat","lon")]
#
ca_map2<-ca
ca_map2<-ca_map2[!(is.na(StatisticalRectangle)|StatisticalRectangle=='99u9'),]
ca_map2$lat<- ices.rect(ca_map2$StatisticalRectangle)$lat
ca_map2$lon <- ices.rect(ca_map2$StatisticalRectangle)$lon
ca_map2<-ca_map2[,.(NoMaturityStage=sum(!is.na(MaturityStage)),NoMaturityStageTrips=length(unique(Trip[!is.na(MaturityStage)])),NoAge=sum(!is.na(Age)),NoAgeTrips=length(unique(Trip[!is.na(Age)])),NoLength=sum(!is.na(LengthClass)),NoLengthTrips=length(unique(Trip[!is.na(LengthClass)])),NoWeight=sum(!is.na(Weight)),NoWeightTrips=length(unique(Trip[!is.na(Weight)]))),by=c("Region","FlagCountry","Species","SamplingType","Quarter","CatchCategory","lat","lon")]
#
ca_map$SamplingType<-as.factor(ca_map$SamplingType)
ca_map$Quarter<-as.factor(ca_map$Quarter)
ca_map$LandingCountry<-as.factor(ca_map$LandingCountry)
ca_map$Region<-as.factor(ca_map$Region)
ca_map$Species<-as.factor(ca_map$Species)
ca_map2$SamplingType<-as.factor(ca_map2$SamplingType)
ca_map2$Quarter<-as.factor(ca_map2$Quarter)
ca_map2$FlagCountry<-as.factor(ca_map2$FlagCountry)
ca_map2$Region<-as.factor(ca_map2$Region)
ca_map2$Species<-as.factor(ca_map2$Species)
list1<-vector(mode = "list")
list1[[1]]<-cainventory
list1[[2]]<-slinventory
list1[[3]]<-ca_map
list1[[4]]<-ca_map2
list1
})
# ca for mapping
#do the master table
# output for CA inventory
output$inventorytable_CA <- DT::renderDT(DT::datatable({data_list()[[1]]
}
, options = list(
pageLength = 20,autoWidth=T,scrollX=TRUE
),filter = 'top'
))
# output for SL inventory
output$inventorytable_SL <- DT::renderDT(DT::datatable({data_list()[[2]]}
, options = list(
pageLength = 20,autoWidth=T,scrollX=TRUE
),filter = 'top'
))
#download widget
output$download_filtered_inventorytable_CA <-
downloadHandler(
filename = "ca_inventory_data.csv",
content = function(file){
write.csv(cainventory[input[["inventorytable_CA_rows_all"]], ],
file)
}
)
#download widget
output$download_filtered_inventorytable_SL <-
downloadHandler(
filename = "sl_inventory_data.csv",
content = function(file){
write.csv(ca[input[["inventorytable_SL_rows_all"]], ],
file)
}
)
|
/RegionalOverviews/overviews_shiny/server/tab_inventory.R
|
no_license
|
Manonifremer/RCGs
|
R
| false | false | 8,267 |
r
|
#===============#
#inventory table#
#===============#
# observe({
#
# hideTab(inputId="tabs", target="Sampling overview")
#
# })
data_list<-reactive({
#if (is.null(input$file)) return()
req(input$file)
load(input$file$datapath, envir = .GlobalEnv)
# modify the CS.Rdata
ca<-as.data.table(ca)
#ca<-fread(file)
ca$Region[ca$Region=="NA"|is.na(ca$Region)]<-'NATL'
cainventory<-ca[,.(NoMaturityStage=sum(!is.na(MaturityStage)),NoMaturityStageTrips=length(unique(Trip[!is.na(MaturityStage)])),NoAge=sum(!is.na(Age)),NoAgeTrips=length(unique(Trip[!is.na(Age)])),NoLength=sum(!is.na(LengthClass)),NoLengthTrips=length(unique(Trip[!is.na(LengthClass)])),NoWeight=sum(!is.na(Weight)),NoWeightTrips=length(unique(Trip[!is.na(Weight)]))),by=c("Year","Region","FlagCountry","LandingCountry","Stock","Species","SamplingType","Quarter","CatchCategory","Sex")]
# datatable wants factors for filter =
cainventory$FlagCountry<-as.factor(cainventory$FlagCountry)
cainventory$LandingCountry<-as.factor(cainventory$LandingCountry)
cainventory$Region<-as.factor(cainventory$Region)
cainventory$Stock<-as.factor(cainventory$Stock)
cainventory$Species<-as.factor(cainventory$Species)
cainventory$SamplingType<-as.factor(cainventory$SamplingType)
cainventory$Quarter<-as.factor(cainventory$Quarter)
cainventory$CatchCategory<-as.factor(cainventory$CatchCategory)
cainventory$Sex<-as.factor(cainventory$Sex)
hh$StartQuarter <- quarter(ymd(hh$StartDate))
#sl<-as.data.table(sl)
#tr<-as.data.table(tr)
#preparing master table
sl_master<-merge(sl, tr[,list(CS_TripId, VesselIdentifier, SamplingCountry, SamplingMethod, VesselLengthCategory)], by="CS_TripId", all.x=T)
sl_master <-
merge(sl_master,
hh[, list(
Region,
CS_TripId,
CS_StationId,
StartDate,
StartQuarter,
FishingTime,
PosStartLatDec,
PosStartLonDec,
PosStopLatDec,
PosStopLonDec,
Area,
FishingGround,
StatisticalRectangle,
FishingActivityCategoryEuropeanLvl5,
FishingActivityCategoryEuropeanLvl6,
Gear
)],
by = c("CS_TripId", "CS_StationId"),
all.x = T)
#class(sl_master)
slinventory<-sl_master[,.(NoLength=sum(NoInSubSample),NoLengthTrips=length(unique(Trip[NoInSubSample>0])),WeigthKg=sum(SubSampleWeight_kg)),by=c("Year","Region","FlagCountry","LandingCountry","Stock","Species","SamplingType","StartQuarter","FishingGround","Area" ,"FishingActivityCategoryEuropeanLvl6", "CatchCategory","VesselLengthCategory")][NoLength>0|NoLengthTrips>0,]
slinventory$Region[slinventory$Region=="NA"|is.na(slinventory$Region)]<-'NATL'
slinventory$FlagCountry<-as.factor(slinventory$FlagCountry)
slinventory$LandingCountry<-as.factor(slinventory$LandingCountry)
slinventory$Region<-as.factor(slinventory$Region)
slinventory$Stock<-as.factor(slinventory$Stock)
slinventory$Species<-as.factor(slinventory$Species)
slinventory$SamplingType<-as.factor(slinventory$SamplingType)
slinventory$StartQuarter<-as.factor(slinventory$StartQuarter)
slinventory$FishingGround<-as.factor(slinventory$FishingGround)
slinventory$Area<-as.factor(slinventory$Area)
slinventory$CatchCategory<-as.factor(slinventory$CatchCategory)
# ca_map<-ca
# add length
hl_master<-
merge(hl,
sl_master[, list(
Region,
CS_TripId,
CS_StationId,
CS_SpeciesListId,
StartDate,
StartQuarter,
FishingTime,
PosStartLatDec,
PosStartLonDec,
PosStopLatDec,
PosStopLonDec,
Area,
FishingGround,
StatisticalRectangle,
FishingActivityCategoryEuropeanLvl5,
FishingActivityCategoryEuropeanLvl6,
Gear
)],
by = c("CS_TripId", "CS_StationId", "CS_SpeciesListId"),
all.x = T)
cahl <- mutate(ca,
CS_LengthId = NA,
CS_SpeciesListId = NA,
CS_StatioId = NA,
IndividualSex = NA,
SubSamplingCategory = NA,
NoAtLengthInSample = NA,
NoAtLengthInCatch = NA,
NoAtLengthInSample_ThousandIndiv = NA,
NoAtLengthInSample_MillionIndiv = NA)
hl_master <- rename(hl_master, Stock = stock)
ca_map <- rbind(cahl, hl_master, fill = TRUE)
#ca_map<-as.data.table(ca_map)
ca_map<-ca_map[!(is.na(StatisticalRectangle)|StatisticalRectangle=='99u9'),]
ca_map$lat<- ices.rect(ca_map$StatisticalRectangle)$lat
ca_map$lon <- ices.rect(ca_map$StatisticalRectangle)$lon
#
# ca_map<-ca_map[,.(NoMaturityStage=sum(!is.na(MaturityStage)),NoMaturityStageTrips=length(unique(Trip[!is.na(MaturityStage)])),NoAge=sum(!is.na(Age)),NoAgeTrips=length(unique(Trip[!is.na(Age)])),NoLength=sum(!is.na(LengthClass)),NoLengthTrips=length(unique(Trip[!is.na(LengthClass)])),NoWeight=sum(!is.na(Weight)),NoWeightTrips=length(unique(Trip[!is.na(Weight)]))),by=c("Region","LandingCountry","Species","SamplingType","Quarter","CatchCategory","lat","lon")]
ca_map <- ca_map[,.(NoMaturityStage=sum(!is.na(MaturityStage)),NoMaturityStageTrips=length(unique(Trip[!is.na(MaturityStage)])),NoAge=sum(!is.na(Age)),NoAgeTrips=length(unique(Trip[!is.na(Age)])), NoWeight=sum(!is.na(Weight)),NoWeightTrips=length(unique(Trip[!is.na(Weight)])), NoLength = sum(!is.na(NoAtLengthInSample)), NoLengthTrips = length(unique(Trip[!is.na(NoAtLengthInSample)]))),by=c("Region","LandingCountry","Species","SamplingType","Quarter","CatchCategory","lat","lon")]
#
ca_map2<-ca
ca_map2<-ca_map2[!(is.na(StatisticalRectangle)|StatisticalRectangle=='99u9'),]
ca_map2$lat<- ices.rect(ca_map2$StatisticalRectangle)$lat
ca_map2$lon <- ices.rect(ca_map2$StatisticalRectangle)$lon
ca_map2<-ca_map2[,.(NoMaturityStage=sum(!is.na(MaturityStage)),NoMaturityStageTrips=length(unique(Trip[!is.na(MaturityStage)])),NoAge=sum(!is.na(Age)),NoAgeTrips=length(unique(Trip[!is.na(Age)])),NoLength=sum(!is.na(LengthClass)),NoLengthTrips=length(unique(Trip[!is.na(LengthClass)])),NoWeight=sum(!is.na(Weight)),NoWeightTrips=length(unique(Trip[!is.na(Weight)]))),by=c("Region","FlagCountry","Species","SamplingType","Quarter","CatchCategory","lat","lon")]
#
ca_map$SamplingType<-as.factor(ca_map$SamplingType)
ca_map$Quarter<-as.factor(ca_map$Quarter)
ca_map$LandingCountry<-as.factor(ca_map$LandingCountry)
ca_map$Region<-as.factor(ca_map$Region)
ca_map$Species<-as.factor(ca_map$Species)
ca_map2$SamplingType<-as.factor(ca_map2$SamplingType)
ca_map2$Quarter<-as.factor(ca_map2$Quarter)
ca_map2$FlagCountry<-as.factor(ca_map2$FlagCountry)
ca_map2$Region<-as.factor(ca_map2$Region)
ca_map2$Species<-as.factor(ca_map2$Species)
list1<-vector(mode = "list")
list1[[1]]<-cainventory
list1[[2]]<-slinventory
list1[[3]]<-ca_map
list1[[4]]<-ca_map2
list1
})
# ca for mapping
#do the master table
# output for CA inventory
output$inventorytable_CA <- DT::renderDT(DT::datatable({data_list()[[1]]
}
, options = list(
pageLength = 20,autoWidth=T,scrollX=TRUE
),filter = 'top'
))
# output for SL inventory
output$inventorytable_SL <- DT::renderDT(DT::datatable({data_list()[[2]]}
, options = list(
pageLength = 20,autoWidth=T,scrollX=TRUE
),filter = 'top'
))
#download widget
output$download_filtered_inventorytable_CA <-
downloadHandler(
filename = "ca_inventory_data.csv",
content = function(file){
write.csv(cainventory[input[["inventorytable_CA_rows_all"]], ],
file)
}
)
#download widget
output$download_filtered_inventorytable_SL <-
downloadHandler(
filename = "sl_inventory_data.csv",
content = function(file){
write.csv(ca[input[["inventorytable_SL_rows_all"]], ],
file)
}
)
|
## Read the data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
## Group data by year
data_by_year<-aggregate(Emissions~year,NEI,sum)
## Make the plot
plot(data_by_year$year,data_by_year$Emissions,xlab="Year",ylab="Total emissions (tons)",col='red',pch=16)
lines(data_by_year$year,data_by_year$Emissions,col='red')
## Save plot to png file
dev.copy(png,'plot1.png')
dev.off()
|
/Plot1.R
|
no_license
|
ocirep/Exdata2
|
R
| false | false | 419 |
r
|
## Read the data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
## Group data by year
data_by_year<-aggregate(Emissions~year,NEI,sum)
## Make the plot
plot(data_by_year$year,data_by_year$Emissions,xlab="Year",ylab="Total emissions (tons)",col='red',pch=16)
lines(data_by_year$year,data_by_year$Emissions,col='red')
## Save plot to png file
dev.copy(png,'plot1.png')
dev.off()
|
library(glmnet)
mydata = read.table("./TrainingSet/RF/urinary_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.7,family="gaussian",standardize=FALSE)
sink('./Model/EN/Classifier/urinary_tract/urinary_tract_074.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Classifier/urinary_tract/urinary_tract_074.R
|
no_license
|
leon1003/QSMART
|
R
| false | false | 371 |
r
|
library(glmnet)
mydata = read.table("./TrainingSet/RF/urinary_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.7,family="gaussian",standardize=FALSE)
sink('./Model/EN/Classifier/urinary_tract/urinary_tract_074.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xmu.R
\name{xmu_extract_column}
\alias{xmu_extract_column}
\title{Get one or more columns from mzData or regular data.frame}
\usage{
xmu_extract_column(data, col, drop = FALSE)
}
\arguments{
\item{data}{mxData or data.frame}
\item{col}{the name(s) of the column(s) to extract}
\item{drop}{whether to drop the structure of the data.frame when extracting one column}
}
\value{
\itemize{
\item column of data
}
}
\description{
same effect as \code{df[, col]} but works for \code{\link[=mxData]{mxData()}} and check the names are present
}
\examples{
xmu_extract_column(mtcars, "wt")
xmu_extract_column(mxData(mtcars, type = "raw"), "wt")
xmu_extract_column(mxData(mtcars, type = "raw"), "wt", drop=TRUE)
xmu_extract_column(mxData(mtcars, type = "raw"), c("wt", "mpg"))
}
\seealso{
Other xmu internal not for end user:
\code{\link{umxModel}()},
\code{\link{umxRenameMatrix}()},
\code{\link{umx_APA_pval}()},
\code{\link{umx_fun_mean_sd}()},
\code{\link{umx_get_bracket_addresses}()},
\code{\link{umx_make}()},
\code{\link{umx_standardize}()},
\code{\link{umx_string_to_algebra}()},
\code{\link{xmuHasSquareBrackets}()},
\code{\link{xmuLabel_MATRIX_Model}()},
\code{\link{xmuLabel_Matrix}()},
\code{\link{xmuLabel_RAM_Model}()},
\code{\link{xmuMI}()},
\code{\link{xmuMakeDeviationThresholdsMatrices}()},
\code{\link{xmuMakeOneHeadedPathsFromPathList}()},
\code{\link{xmuMakeTwoHeadedPathsFromPathList}()},
\code{\link{xmuMaxLevels}()},
\code{\link{xmuMinLevels}()},
\code{\link{xmuPropagateLabels}()},
\code{\link{xmuRAM2Ordinal}()},
\code{\link{xmuTwinSuper_Continuous}()},
\code{\link{xmuTwinSuper_NoBinary}()},
\code{\link{xmuTwinUpgradeMeansToCovariateModel}()},
\code{\link{xmu_CI_merge}()},
\code{\link{xmu_CI_stash}()},
\code{\link{xmu_DF_to_mxData_TypeCov}()},
\code{\link{xmu_PadAndPruneForDefVars}()},
\code{\link{xmu_bracket_address2rclabel}()},
\code{\link{xmu_cell_is_on}()},
\code{\link{xmu_check_levels_identical}()},
\code{\link{xmu_check_needs_means}()},
\code{\link{xmu_check_variance}()},
\code{\link{xmu_clean_label}()},
\code{\link{xmu_data_missing}()},
\code{\link{xmu_data_swap_a_block}()},
\code{\link{xmu_describe_data_WLS}()},
\code{\link{xmu_dot_make_paths}()},
\code{\link{xmu_dot_make_residuals}()},
\code{\link{xmu_dot_maker}()},
\code{\link{xmu_dot_move_ranks}()},
\code{\link{xmu_dot_rank_str}()},
\code{\link{xmu_get_CI}()},
\code{\link{xmu_lavaan_process_group}()},
\code{\link{xmu_make_TwinSuperModel}()},
\code{\link{xmu_make_bin_cont_pair_data}()},
\code{\link{xmu_make_mxData}()},
\code{\link{xmu_match.arg}()},
\code{\link{xmu_name_from_lavaan_str}()},
\code{\link{xmu_path2twin}()},
\code{\link{xmu_path_regex}()},
\code{\link{xmu_print_algebras}()},
\code{\link{xmu_rclabel_2_bracket_address}()},
\code{\link{xmu_safe_run_summary}()},
\code{\link{xmu_set_sep_from_suffix}()},
\code{\link{xmu_show_fit_or_comparison}()},
\code{\link{xmu_simplex_corner}()},
\code{\link{xmu_standardize_ACEcov}()},
\code{\link{xmu_standardize_ACEv}()},
\code{\link{xmu_standardize_ACE}()},
\code{\link{xmu_standardize_CP}()},
\code{\link{xmu_standardize_IP}()},
\code{\link{xmu_standardize_RAM}()},
\code{\link{xmu_standardize_SexLim}()},
\code{\link{xmu_standardize_Simplex}()},
\code{\link{xmu_start_value_list}()},
\code{\link{xmu_starts}()},
\code{\link{xmu_summary_RAM_group_parameters}()},
\code{\link{xmu_twin_add_WeightMatrices}()},
\code{\link{xmu_twin_check}()},
\code{\link{xmu_twin_get_var_names}()},
\code{\link{xmu_twin_make_def_means_mats_and_alg}()},
\code{\link{xmu_twin_upgrade_selDvs2SelVars}()}
}
\concept{xmu internal not for end user}
|
/man/xmu_extract_column.Rd
|
no_license
|
tbates/umx
|
R
| false | true | 3,655 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xmu.R
\name{xmu_extract_column}
\alias{xmu_extract_column}
\title{Get one or more columns from mzData or regular data.frame}
\usage{
xmu_extract_column(data, col, drop = FALSE)
}
\arguments{
\item{data}{mxData or data.frame}
\item{col}{the name(s) of the column(s) to extract}
\item{drop}{whether to drop the structure of the data.frame when extracting one column}
}
\value{
\itemize{
\item column of data
}
}
\description{
same effect as \code{df[, col]} but works for \code{\link[=mxData]{mxData()}} and check the names are present
}
\examples{
xmu_extract_column(mtcars, "wt")
xmu_extract_column(mxData(mtcars, type = "raw"), "wt")
xmu_extract_column(mxData(mtcars, type = "raw"), "wt", drop=TRUE)
xmu_extract_column(mxData(mtcars, type = "raw"), c("wt", "mpg"))
}
\seealso{
Other xmu internal not for end user:
\code{\link{umxModel}()},
\code{\link{umxRenameMatrix}()},
\code{\link{umx_APA_pval}()},
\code{\link{umx_fun_mean_sd}()},
\code{\link{umx_get_bracket_addresses}()},
\code{\link{umx_make}()},
\code{\link{umx_standardize}()},
\code{\link{umx_string_to_algebra}()},
\code{\link{xmuHasSquareBrackets}()},
\code{\link{xmuLabel_MATRIX_Model}()},
\code{\link{xmuLabel_Matrix}()},
\code{\link{xmuLabel_RAM_Model}()},
\code{\link{xmuMI}()},
\code{\link{xmuMakeDeviationThresholdsMatrices}()},
\code{\link{xmuMakeOneHeadedPathsFromPathList}()},
\code{\link{xmuMakeTwoHeadedPathsFromPathList}()},
\code{\link{xmuMaxLevels}()},
\code{\link{xmuMinLevels}()},
\code{\link{xmuPropagateLabels}()},
\code{\link{xmuRAM2Ordinal}()},
\code{\link{xmuTwinSuper_Continuous}()},
\code{\link{xmuTwinSuper_NoBinary}()},
\code{\link{xmuTwinUpgradeMeansToCovariateModel}()},
\code{\link{xmu_CI_merge}()},
\code{\link{xmu_CI_stash}()},
\code{\link{xmu_DF_to_mxData_TypeCov}()},
\code{\link{xmu_PadAndPruneForDefVars}()},
\code{\link{xmu_bracket_address2rclabel}()},
\code{\link{xmu_cell_is_on}()},
\code{\link{xmu_check_levels_identical}()},
\code{\link{xmu_check_needs_means}()},
\code{\link{xmu_check_variance}()},
\code{\link{xmu_clean_label}()},
\code{\link{xmu_data_missing}()},
\code{\link{xmu_data_swap_a_block}()},
\code{\link{xmu_describe_data_WLS}()},
\code{\link{xmu_dot_make_paths}()},
\code{\link{xmu_dot_make_residuals}()},
\code{\link{xmu_dot_maker}()},
\code{\link{xmu_dot_move_ranks}()},
\code{\link{xmu_dot_rank_str}()},
\code{\link{xmu_get_CI}()},
\code{\link{xmu_lavaan_process_group}()},
\code{\link{xmu_make_TwinSuperModel}()},
\code{\link{xmu_make_bin_cont_pair_data}()},
\code{\link{xmu_make_mxData}()},
\code{\link{xmu_match.arg}()},
\code{\link{xmu_name_from_lavaan_str}()},
\code{\link{xmu_path2twin}()},
\code{\link{xmu_path_regex}()},
\code{\link{xmu_print_algebras}()},
\code{\link{xmu_rclabel_2_bracket_address}()},
\code{\link{xmu_safe_run_summary}()},
\code{\link{xmu_set_sep_from_suffix}()},
\code{\link{xmu_show_fit_or_comparison}()},
\code{\link{xmu_simplex_corner}()},
\code{\link{xmu_standardize_ACEcov}()},
\code{\link{xmu_standardize_ACEv}()},
\code{\link{xmu_standardize_ACE}()},
\code{\link{xmu_standardize_CP}()},
\code{\link{xmu_standardize_IP}()},
\code{\link{xmu_standardize_RAM}()},
\code{\link{xmu_standardize_SexLim}()},
\code{\link{xmu_standardize_Simplex}()},
\code{\link{xmu_start_value_list}()},
\code{\link{xmu_starts}()},
\code{\link{xmu_summary_RAM_group_parameters}()},
\code{\link{xmu_twin_add_WeightMatrices}()},
\code{\link{xmu_twin_check}()},
\code{\link{xmu_twin_get_var_names}()},
\code{\link{xmu_twin_make_def_means_mats_and_alg}()},
\code{\link{xmu_twin_upgrade_selDvs2SelVars}()}
}
\concept{xmu internal not for end user}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/counts.r
\name{counts}
\alias{counts}
\title{Correlation Counts chart.}
\usage{
counts(
data,
x,
y,
title = NULL,
subtitle = NULL,
xtitle = NULL,
ytitle = NULL,
caption = NULL
)
}
\arguments{
\item{data}{input data.frame}
\item{x}{x variable}
\item{y}{y variable}
\item{title}{main title}
\item{subtitle}{subtitle}
\item{xtitle}{x axis title}
\item{ytitle}{y axis title}
\item{caption}{caption}
}
\value{
An object of class \code{ggplot}
}
\description{
counts function will draw counts chart for correlation analysis.
}
\examples{
plot<- counts(data=mpg, x="cty", y="hwy")
plot
}
|
/man/counts.Rd
|
permissive
|
HeeseokMoon/ggedachart
|
R
| false | true | 683 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/counts.r
\name{counts}
\alias{counts}
\title{Correlation Counts chart.}
\usage{
counts(
data,
x,
y,
title = NULL,
subtitle = NULL,
xtitle = NULL,
ytitle = NULL,
caption = NULL
)
}
\arguments{
\item{data}{input data.frame}
\item{x}{x variable}
\item{y}{y variable}
\item{title}{main title}
\item{subtitle}{subtitle}
\item{xtitle}{x axis title}
\item{ytitle}{y axis title}
\item{caption}{caption}
}
\value{
An object of class \code{ggplot}
}
\description{
counts function will draw counts chart for correlation analysis.
}
\examples{
plot<- counts(data=mpg, x="cty", y="hwy")
plot
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataMax2.R
\docType{data}
\name{maxd2Cov}
\alias{maxd2Cov}
\title{Instance of covariance matrix for 50 sites
Site list produced by maxDistSites("01/01/07 00:00","12/31/14 00:00",50,230,"SO4",1)}
\format{An object of class \code{"cross"}; see \code{\link[qtl]{read.cross}}.}
\usage{
data("maxd2Cov")
}
\description{
Instance of covariance matrix for 50 sites
Site list produced by maxDistSites("01/01/07 00:00","12/31/14 00:00",50,230,"SO4",1)
}
\examples{
data("maxd2Cov")
cov2 <- maxd2Cov
cov2
}
\keyword{datasets}
|
/man/maxd2Cov.Rd
|
no_license
|
hessakh/MESgenCov
|
R
| false | true | 595 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataMax2.R
\docType{data}
\name{maxd2Cov}
\alias{maxd2Cov}
\title{Instance of covariance matrix for 50 sites
Site list produced by maxDistSites("01/01/07 00:00","12/31/14 00:00",50,230,"SO4",1)}
\format{An object of class \code{"cross"}; see \code{\link[qtl]{read.cross}}.}
\usage{
data("maxd2Cov")
}
\description{
Instance of covariance matrix for 50 sites
Site list produced by maxDistSites("01/01/07 00:00","12/31/14 00:00",50,230,"SO4",1)
}
\examples{
data("maxd2Cov")
cov2 <- maxd2Cov
cov2
}
\keyword{datasets}
|
library(strataG)
setwd("~/Dropbox/Crandall_tobo/Final_FASTA/")
summstats<-data.frame()
for(fastafile in list.files("~/Dropbox/Crandall_tobo/Final_FASTA/")){
print(fastafile)
f<-read.fasta(fastafile)
pops<-sub(pattern=".+population:([A-Za-z]+).+",replacement="\\1",x=labels(f))
g<-DNAbin2genind(f, pop=pops)
h<-genind2gtypes(g)
phist<-statPhist(h, nrep=1000)
phist_value<-phist$result[1]
phist_pvalue<-phist$result[2]
tajd<-tajimasD(f)
tajd_value<-tajd[1]
tajd_pvalue<-tajd[2]
k<-labelHaplotypes(f, prefix="Hap")
haps<-length(labels(k$hap.seqs))
hapdiv<-swfscMisc::diversity(k$haps)
summstats<-rbind(summstats,data.frame(fastafile, phist_value, phist_pvalue, tajd_value, tajd_pvalue,haps,hapdiv))
}
|
/summary_stats_from_fasta.R
|
permissive
|
ericcrandall/hawaii_migrate
|
R
| false | false | 739 |
r
|
library(strataG)
setwd("~/Dropbox/Crandall_tobo/Final_FASTA/")
summstats<-data.frame()
for(fastafile in list.files("~/Dropbox/Crandall_tobo/Final_FASTA/")){
print(fastafile)
f<-read.fasta(fastafile)
pops<-sub(pattern=".+population:([A-Za-z]+).+",replacement="\\1",x=labels(f))
g<-DNAbin2genind(f, pop=pops)
h<-genind2gtypes(g)
phist<-statPhist(h, nrep=1000)
phist_value<-phist$result[1]
phist_pvalue<-phist$result[2]
tajd<-tajimasD(f)
tajd_value<-tajd[1]
tajd_pvalue<-tajd[2]
k<-labelHaplotypes(f, prefix="Hap")
haps<-length(labels(k$hap.seqs))
hapdiv<-swfscMisc::diversity(k$haps)
summstats<-rbind(summstats,data.frame(fastafile, phist_value, phist_pvalue, tajd_value, tajd_pvalue,haps,hapdiv))
}
|
## Catrina Nowakowski
## From July 2016 to November 2016
## UConn Civil and Environmental Engineering Department
## Currently updating this script form the summer.
## df_i is a very bad variable name
## Would be a good idea to add a third run through... later
source("Loading_Dataframe.R")
###############################################################################################################
## This script runs a step wise variable slector to build a GLS regression model.
## It uses the p-values to slect each new variable and the ACI and BIC values are tracked and plotted at the end
# to see the progression of model fit quality. This can be used to ditermine the point to run the slector to.
## The code can be run starting at step one to any step EX: variable_i to variable_k
# or variable_i to second look at Variable_k
###############################################################################################################
## Function to run regression with anywhere from 1 to 5 variables
## The reason why I made an additional function to run the model is to simplify running models for different situations
# where I would use the ACI or BIC values to select the number of variables.
## It also makes it easier to alter and modify the code.
gls_different_num_var <- function(num_of_var, df){
## Checks to see how many variables are nessary and slections appropriate function to run to make GLS model
if(num_of_var == 1){
regress_model <- gls(Chlor~ Region_W +Region_C +Region_E + variable_i,
data = df_i, na.action = na.omit, control = list(singular.ok = TRUE),
corr = corExp(form =~ Lat + Long | BiWeek))
} else if(num_of_var == 2){
regress_model <- gls(Chlor~ Region_W +Region_C +Region_E + variable_i + variable_j ,
data = df_i, na.action = na.omit, control = list(singular.ok = TRUE),
corr = corExp(form =~ Lat + Long | BiWeek))
} else if(num_of_var == 3){
regress_model <- gls(Chlor~Region_W +Region_C +Region_E + variable_i + variable_j + variable_k,
data = df_i, na.action = na.omit, control = list(singular.ok = TRUE),
corr = corExp(form =~ Lat + Long | BiWeek))
} else if(num_of_var == 4){
regress_model <- gls(Chlor~ Region_W +Region_C +Region_E + variable_i + variable_j + variable_k + variable_l,
data = df_i, na.action = na.omit, control = list(singular.ok = TRUE),
corr = corExp(form =~ Lat + Long | BiWeek))
} else if(num_of_var == 5){
regress_model <- gls(Chlor~Region_W +Region_C +Region_E + variable_i + variable_j + variable_k + variable_l + variable_m,
data = df_i, na.action = na.omit, control = list(singular.ok = TRUE),
corr = corExp(form =~ Lat + Long | BiWeek))
}
return(regress_model)
## END FUNCTION
}
###############################################################################################################
## Function to produce sample output
## This function removes the potential error of me messing up when I change how I run the regression model
produce_sum_output <- function(regress_model, iteration, iterator){
## Produces a summery of the output, seperates the P-Value
if(iteration == 1){
Summary_Output <- summary(regress_model)
p_val <- as.data.frame(Summary_Output$tTable)
i = iterator
p_val_tracker_i[i] <- p_val$`p-value`[4]
return(p_val_tracker_i)
} else if(iteration == 2){
Summary_Output <- summary(regress_model)
p_val <- as.data.frame(Summary_Output$tTable)
j = iterator
p_val_tracker_j[j] <- p_val$`p-value`[5]
return(p_val_tracker_j)
} else if(iteration == 3){
Summary_Output <- summary(regress_model)
p_val <- as.data.frame(Summary_Output$tTable)
k = iterator
p_val_tracker_k[k] <- p_val$`p-value`[6]
return(p_val_tracker_k)
} else if(iteration == 4){
Summary_Output <- summary(regress_model)
p_val <- as.data.frame(Summary_Output$tTable)
l = iterator
p_val_tracker_l[l] <- p_val$`p-value`[7]
return(p_val_tracker_l)
} else if(iteration == 5){
Summary_Output <- summary(regress_model)
p_val <- as.data.frame(Summary_Output$tTable)
m = iterator
p_val_tracker_m[m] <- p_val$`p-value`[8]
return(p_val_tracker_m)
}
## END FUNCTION
}
###############################################################################################################
## Function to find the best variable
## This function is a follow up for processing after the loop that is used to apply the previous functions
best_var <- function(p_Val_tracker, var_names, df_i, df, iteration){
## identifys the best variable, removes it from the list, adds it to the df_i
if(iteration == 1){
## Picks the p-value closest to zero and finds its location in the tracker vector
ii <- which.min(p_val_tracker_i)
## Stors the value
ii_Val <- p_val_tracker_i[ii]
## Stors the name
ii_name <- var_names[ii]
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
df_i$variable_i <- df[[var_names[ii]]]
## Removes the variable name that was slected from the list so it will not be pulled twice
var_names <- var_names[!var_names == ii_name]
assign("var_names", var_names, pos = 1)
assign("df_i", df_i, pos = 1)
assign("ii_name", ii_name, pos = 1)
} else if(iteration == 2){
## Picks the p-value closest to zero and finds its location in the tracker vector
jj <- which.min(abs(p_val_tracker_j - 0))
## Stors the value
jj_Val <- p_val_tracker_j[jj]
## Stors the name
jj_name <- var_names[jj]
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
df_i$variable_j <- df[[var_names[jj]]]
## Removes the variable name that was slected from the list so it will not be pulled twice
var_names <- var_names[!var_names == jj_name]
assign("var_names", var_names, pos = 1)
assign("df_i", df_i, pos = 1)
assign("jj_name", jj_name, pos = 1)
} else if(iteration == 3){
## Picks the p-value closest to zero and finds its location in the tracker vector
kk <- which.min(abs(p_val_tracker_k - 0))
## Stors the value
kk_Val <- p_val_tracker_k[kk]
## Stors the name
kk_name <- var_names[kk]
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
df_i$variable_k <- df[[var_names[kk]]]
## Removes the variable name that was slected from the list so it will not be pulled twice
var_names <- var_names[!var_names == kk_name]
assign("var_names", var_names, pos = 1)
assign("df_i", df_i, pos = 1)
assign("kk_name", kk_name, pos = 1)
} else if(iteration == 4){
## Picks the p-value closest to zero and finds its location in the tracker vector
ll <- which.min(abs(p_val_tracker_l - 0))
## Stors the value
ll_Val <- p_val_tracker_l[ll]
## Stors the name
ll_name <- var_names[ll]
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
df_i$variable_l <- df[[var_names[ll]]]
## Removes the variable name that was slected from the list so it will not be pulled twice
var_names <- var_names[!var_names == ll_name]
assign("var_names", var_names, pos = 1)
assign("df_i", df_i, pos = 1)
assign("ll_name", ll_name, pos = 1)
} else if(iteration == 5){
## Picks the p-value closest to zero and finds its location in the tracker vector
mm <- which.min(abs(p_val_tracker_m - 0))
## Stors the value
mm_Val <- p_val_tracker_m[mm]
## Stors the name
mm_name <- var_names[mm]
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
df_i$variable_m <- df[[var_names[mm]]]
## Removes the variable name that was slected from the list so it will not be pulled twice
var_names <- var_names[!var_names == mm_name]
assign("var_names", var_names, pos = 1)
assign("df_i", df_i, pos = 1)
assign("mm_name", mm_name, pos = 1)
} else if(iteration == 11){
## Picks the p-value closest to zero and finds its location in the tracker vector
ii <- which.min(p_val_tracker_i)
## Stors the value
ii_Val <- p_val_tracker_i[ii]
## Stors the name
ii_name <- var_names[ii]
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
df_i$variable_i <- df[[var_names[ii]]]
## Removes the variable name that was slected from the list so it will not be pulled twice
var_names <- var_names[!var_names == ii_name]
assign("var_names", var_names, pos = 1)
assign("df_i", df_i, pos = 1)
assign("ii_2_name", ii_name, pos = 1)
} else if(iteration == 22){
## Picks the p-value closest to zero and finds its location in the tracker vector
jj <- which.min(abs(p_val_tracker_j - 0))
## Stors the value
jj_Val <- p_val_tracker_j[jj]
## Stors the name
jj_name <- var_names[jj]
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
df_i$variable_j <- df[[var_names[jj]]]
## Removes the variable name that was slected from the list so it will not be pulled twice
var_names <- var_names[!var_names == jj_name]
assign("var_names", var_names, pos = 1)
assign("df_i", df_i, pos = 1)
assign("jj_2_name", jj_name, pos = 1)
} else if(iteration == 33){
## Picks the p-value closest to zero and finds its location in the tracker vector
kk <- which.min(abs(p_val_tracker_k - 0))
## Stors the value
kk_Val <- p_val_tracker_k[kk]
## Stors the name
kk_name <- var_names[kk]
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
df_i$variable_k <- df[[var_names[kk]]]
## Removes the variable name that was slected from the list so it will not be pulled twice
var_names <- var_names[!var_names == kk_name]
assign("var_names", var_names, pos = 1)
assign("df_i", df_i, pos = 1)
assign("kk_2_name", kk_name, pos = 1)
} else if(iteration == 44){
## Picks the p-value closest to zero and finds its location in the tracker vector
ll <- which.min(abs(p_val_tracker_l - 0))
## Stors the value
ll_Val <- p_val_tracker_l[ll]
## Stors the name
ll_name <- var_names[ll]
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
df_i$variable_l <- df[[var_names[ll]]]
## Removes the variable name that was slected from the list so it will not be pulled twice
var_names <- var_names[!var_names == ll_name]
assign("var_names", var_names, pos = 1)
assign("df_i", df_i, pos = 1)
assign("ll_2_name", ll_name, pos = 1)
} else if(iteration == 55){
## Picks the p-value closest to zero and finds its location in the tracker vector
mm <- which.min(abs(p_val_tracker_m - 0))
## Stors the value
mm_Val <- p_val_tracker_m[mm]
## Stors the name
mm_name <- var_names[mm]
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
df_i$variable_m <- df[[var_names[mm]]]
## Removes the variable name that was slected from the list so it will not be pulled twice
var_names <- var_names[!var_names == mm_name]
assign("var_names", var_names, pos = 1)
assign("df_i", df_i, pos = 1)
assign("mm_2_name", mm_name, pos = 1)
}
## END FUNCTION
}
## Save for second section
var_names_2 <- var_names
## Itorator for AIC and BIC
AIC_BIC_It <- 1
## Initiates the tracker for the next variable Loop
AIC_Track <- matrix(NA, 10)
BIC_Track <- matrix(NA, 10)
###############################################################################################################
## For Variable i
## Initiates the tracker for the next variable Loop
p_val_tracker_i <- matrix(NA, length(var_names))
## Loops through each variable being analized for the regression
for(i in 1:length(var_names)){
## Adds the variable to the regression data frame, each iteration this is updated
df_i$variable_i <- df[[var_names[i]]]
## Runs model using a function I made to use proper number of variables
regress_model <- gls_different_num_var(1, df_i)
## Produces a summery of the output, seperates the P-Value
p_val_tracker_i <- produce_sum_output(regress_model, 1, i)
}
## Picks the p-value closest to zero and finds its location in the tracker vector
## Stores the value
## Stores the name
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
## Removes the variable name that was slected from the list so it will not be pulled twice
best_var(p_Val_tracker_i, var_names, df_i, df, 1)
####################
## Check AIC and BIC
regress_model <- gls_different_num_var(1, df_i)
quick_sum <- summary(regress_model)
AIC_Track[AIC_BIC_It] <- quick_sum$AIC
BIC_Track[AIC_BIC_It] <- quick_sum$BIC
AIC_BIC_It <- 1 + AIC_BIC_It
###############################################################################################################
## For Variable j
## Initiates the tracker for the next variable Loop
p_val_tracker_j <- matrix(NA, length(var_names))
## Loops through each variable being analized for the regression
for(j in 1:length(var_names)){
## Adds the variable to the regression data frame, each iteration this is updated
df_i$variable_j <- df[[var_names[j]]]
## Runs model using a function I made to use proper number of variables
regress_model <- gls_different_num_var(2, df_i)
## Produces a summery of the output, seperates the P-Value
p_val_tracker_j <- produce_sum_output(regress_model, 2, j)
}
## Picks the p-value closest to zero and finds its location in the tracker vector
## Stores the value
## Stores the name
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
## Removes the variable name that was slected from the list so it will not be pulled twice
best_var(p_Val_tracker_j, var_names, df_i, df, 2)
####################
## Check AIC and BIC
regress_model <- gls_different_num_var(2, df_i)
quick_sum <- summary(regress_model)
AIC_Track[AIC_BIC_It] <- quick_sum$AIC
BIC_Track[AIC_BIC_It] <- quick_sum$BIC
AIC_BIC_It <- 1 + AIC_BIC_It
###############################################################################################################
## For Variable k
## Initiates the tracker for the next variable Loop
p_val_tracker_k <- matrix(NA, length(var_names))
## Loops through each variable being analized for the regression
for(k in 1:length(var_names)){
## Adds the variable to the regression data frame, each iteration this is updated
df_i$variable_k <- df[[var_names[k]]]
## Runs model using a function I made to use proper number of variables
regress_model <- gls_different_num_var(3, df_i)
## Produces a summery of the output, seperates the P-Value
p_val_tracker_k <- produce_sum_output(regress_model, 3, k)
}
## Picks the p-value closest to zero and finds its location in the tracker vector
## Stores the value
## Stores the name
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
## Removes the variable name that was slected from the list so it will not be pulled twice
best_var(p_Val_tracker_k, var_names, df_i, df, 3)
####################
## Check AIC and BIC
regress_model <- gls_different_num_var(3, df_i)
quick_sum <- summary(regress_model)
AIC_Track[AIC_BIC_It] <- quick_sum$AIC
BIC_Track[AIC_BIC_It] <- quick_sum$BIC
AIC_BIC_It <- 1 + AIC_BIC_It
###############################################################################################################
## For Variable l
## Initiates the tracker for the next variable Loop
p_val_tracker_l <- matrix(NA, length(var_names))
## Loops through each variable being analized for the regression
for(l in 1:length(var_names)){
## Adds the variable to the regression data frame, each iteration this is updated
df_i$variable_l <- df[[var_names[l]]]
## Runs model using a function I made to use proper number of variables
regress_model <- gls_different_num_var(4, df_i)
## Produces a summery of the output, seperates the P-Value
p_val_tracker_l <- produce_sum_output(regress_model, 4, l)
}
## Picks the p-value closest to zero and finds its location in the tracker vector
## Stores the value
## Stores the name
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
## Removes the variable name that was slected from the list so it will not be pulled twice
best_var(p_Val_tracker_l, var_names, df_i, df, 4)
####################
## Check AIC and BIC
regress_model <- gls_different_num_var(4, df_i)
quick_sum <- summary(regress_model)
AIC_Track[AIC_BIC_It] <- quick_sum$AIC
BIC_Track[AIC_BIC_It] <- quick_sum$BIC
AIC_BIC_It <- 1 + AIC_BIC_It
###############################################################################################################
## For Variable m
## Initiates the tracker for the next variable Loop
p_val_tracker_m <- matrix(NA, length(var_names))
## Loops through each variable being analized for the regression
for(m in 1:length(var_names)){
## Adds the variable to the regression data frame, each iteration this is updated
df_i$variable_m <- df[[var_names[m]]]
## Runs model using a function I made to use proper number of variables
regress_model <- gls_different_num_var(5, df_i)
## Produces a summery of the output, seperates the P-Value
p_val_tracker_m <- produce_sum_output(regress_model, 5, m)
}
## Picks the p-value closest to zero and finds its location in the tracker vector
## Stores the value
## Stores the name
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
## Removes the variable name that was slected from the list so it will not be pulled twice
best_var(p_Val_tracker_m, var_names, df_i, df, 5)
####################
## Check AIC and BIC
regress_model <- gls_different_num_var(5, df_i)
quick_sum <- summary(regress_model)
AIC_Track[AIC_BIC_It] <- quick_sum$AIC
BIC_Track[AIC_BIC_It] <- quick_sum$BIC
AIC_BIC_It <- 1 + AIC_BIC_It
###############################################################################################################
## SECOND TIME THROUGH THE VARIALBES: What this section does is runs through each variable in each position one more time to check to see that that is still
# the best one to use
#################################################################
## Remove Current variable names in use from the list
# var_names_2 <- var_names_2[!var_names_2 == jj_name]
# var_names_2 <- var_names_2[!var_names_2 == kk_name]
# var_names_2 <- var_names_2[!var_names_2 == ll_name]
# var_names_2 <- var_names_2[!var_names_2 == mm_name]
#################################################################
## Remove Current variable names in use from the list
var_names_2 <- var_names_2[!var_names_2 == jj_2_name]
var_names_2 <- var_names_2[!var_names_2 == kk_2_name]
var_names_2 <- var_names_2[!var_names_2 == ll_2_name]
var_names_2 <- var_names_2[!var_names_2 == mm_2_name]
###############################################################################################################
## For Variable i
## Initiates the tracker for the next variable Loop
p_val_tracker_i <- matrix(NA, length(var_names_2))
## Loops through each variable being analized for the regression
for(i in 1:length(var_names_2)){
## Adds the variable to the regression data frame, each iteration this is updated
df_i$variable_i <- df[[var_names_2[i]]]
## Runs model using a function I made to use proper number of variables
regress_model <- gls_different_num_var(1, df_i)
## Produces a summery of the output, seperates the P-Value
p_val_tracker_i <- produce_sum_output(regress_model, 1, i)
}
## Picks the p-value closest to zero and finds its location in the tracker vector
## Stores the value
## Stores the name
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
## Removes the variable name that was slected from the list so it will not be pulled twice
best_var(p_Val_tracker_i, var_names_2, df_i, df, 11)
####################
## Check AIC and BIC
regress_model <- gls_different_num_var(5, df_i)
quick_sum <- summary(regress_model)
AIC_Track[AIC_BIC_It] <- quick_sum$AIC
BIC_Track[AIC_BIC_It] <- quick_sum$BIC
AIC_BIC_It <- 1 + AIC_BIC_It
###############################################################################################################
## For Variable j
## Initiates the tracker for the next variable Loop
p_val_tracker_j <- matrix(NA, length(var_names_2))
## Loops through each variable being analized for the regression
for(j in 1:length(var_names_2)){
## Adds the variable to the regression data frame, each iteration this is updated
df_i$variable_j <- df[[var_names_2[j]]]
## Runs model using a function I made to use proper number of variables
regress_model <- gls_different_num_var(2, df_i)
## Produces a summery of the output, seperates the P-Value
p_val_tracker_j <- produce_sum_output(regress_model, 2, j)
}
## Picks the p-value closest to zero and finds its location in the tracker vector
## Stores the value
## Stores the name
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
## Removes the variable name that was slected from the list so it will not be pulled twice
best_var(p_Val_tracker_j, var_names_2, df_i, df, 22)
####################
## Check AIC and BIC
regress_model <- gls_different_num_var(5, df_i)
quick_sum <- summary(regress_model)
AIC_Track[AIC_BIC_It] <- quick_sum$AIC
BIC_Track[AIC_BIC_It] <- quick_sum$BIC
AIC_BIC_It <- 1 + AIC_BIC_It
###############################################################################################################
## For Variable k
## Initiates the tracker for the next variable Loop
p_val_tracker_k <- matrix(NA, length(var_names_2))
## Loops through each variable being analized for the regression
for(k in 1:length(var_names_2)){
## Adds the variable to the regression data frame, each iteration this is updated
df_i$variable_k <- df[[var_names_2[k]]]
## Runs model using a function I made to use proper number of variables
regress_model <- gls_different_num_var(3, df_i)
## Produces a summery of the output, seperates the P-Value
p_val_tracker_k <- produce_sum_output(regress_model, 3, k)
}
## Picks the p-value closest to zero and finds its location in the tracker vector
## Stores the value
## Stores the name
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
## Removes the variable name that was slected from the list so it will not be pulled twice
best_var(p_Val_tracker_k, var_names_2, df_i, df, 33)
####################
## Check AIC and BIC
regress_model <- gls_different_num_var(5, df_i)
quick_sum <- summary(regress_model)
AIC_Track[AIC_BIC_It] <- quick_sum$AIC
BIC_Track[AIC_BIC_It] <- quick_sum$BIC
AIC_BIC_It <- 1 + AIC_BIC_It
###############################################################################################################
## For Variable l
## Initiates the tracker for the next variable Loop
p_val_tracker_l <- matrix(NA, length(var_names_2))
## Loops through each variable being analized for the regression
for(l in 1:length(var_names_2)){
## Adds the variable to the regression data frame, each iteration this is updated
df_i$variable_l <- df[[var_names_2[l]]]
## Runs model using a function I made to use proper number of variables
regress_model <- gls_different_num_var(4, df_i)
## Produces a summery of the output, seperates the P-Value
p_val_tracker_l <- produce_sum_output(regress_model, 4, l)
}
## Picks the p-value closest to zero and finds its location in the tracker vector
## Stores the value
## Stores the name
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
## Removes the variable name that was slected from the list so it will not be pulled twice
best_var(p_Val_tracker_l, var_names_2, df_i, df, 44)
####################
## Check AIC and BIC
regress_model <- gls_different_num_var(5, df_i)
quick_sum <- summary(regress_model)
AIC_Track[AIC_BIC_It] <- quick_sum$AIC
BIC_Track[AIC_BIC_It] <- quick_sum$BIC
AIC_BIC_It <- 1 + AIC_BIC_It
###############################################################################################################
## For Variable m
## Initiates the tracker for the next variable Loop
p_val_tracker_m <- matrix(NA, length(var_names_2))
## Loops through each variable being analized for the regression
for(m in 1:length(var_names_2)){
## Adds the variable to the regression data frame, each iteration this is updated
df_i$variable_m <- df[[var_names_2[m]]]
## Runs model using a function I made to use proper number of variables
regress_model <- gls_different_num_var(5, df_i)
## Produces a summery of the output, seperates the P-Value
p_val_tracker_m <- produce_sum_output(regress_model, 5, m)
}
## Picks the p-value closest to zero and finds its location in the tracker vector
## Stores the value
## Stores the name
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
## Removes the variable name that was slected from the list so it will not be pulled twice
best_var(p_Val_tracker_m, var_names_2, df_i, df, 55)
####################
## Check AIC and BIC
regress_model <- gls_different_num_var(5, df_i)
quick_sum <- summary(regress_model)
AIC_Track[AIC_BIC_It] <- quick_sum$AIC
BIC_Track[AIC_BIC_It] <- quick_sum$BIC
AIC_BIC_It <- 1 + AIC_BIC_It
#################################################################
## Run the Final Regression Model
regress_model_final <- gls_different_num_var(5, df_i)
## Testing autocorrelations using the best ACI value.
# corAR1 659.7716 265.0276
# corExp 640.406 230.1096
# corGaus 641.7344 229.6906
# corSpher 643.8627 232.0682
### correlation = corSymm(form = ~1|Subject)
###################################################################################################
## Generate Plots
all_var <- df_i
drops = c("Region_W", "Region_C", "Region_E", "Lat", "Long", "BiWeek")
all_var <- all_var[ , !(names(all_var) %in% drops)]
plot(all_var)
###################################################################################################
## VIF
library(car)
print("VIF: ")
Vif_Numbers <- vif(gls(Chlor~ variable_i + variable_j + variable_k + variable_l + variable_m,
data = df_i, na.action="na.exclude") )
print(Vif_Numbers)
###################################################################################################
## Checking the Regression
library(cvTools)
Chlor = df_i$Chlor
The_Cross_Val <- cvFit(regress_model_final, data = df_i, y = Chlor, K =round((length(df_i$Region_W)*.1), 0) )
print(The_Cross_Val)
The_Cross_Val <- cvFit(regress_model_final, data = df_i, y = Chlor, K =length(df_i$Region_W) )
print(The_Cross_Val)
## CVfit a function that takes a tenth of my data and sets it asid while it runs my model
## and it calculates the residuals for the 10% then it will randomly slect another 10%
## and redo all cross validation
## outputs mean bias lower the better
###################################################################################################
Summary_Output <- summary(regress_model_final)
print(Summary_Output)
###################################################################################################
corelation_data_frame <- data.frame(variable_i = df_i$variable_i, variable_j = df_i$variable_j,
variable_k = df_i$variable_k, variable_l = df_i$variable_l,
variable_m = df_i$variable_m)
names(corelation_data_frame)<- c(ii_2_name, jj_2_name, kk_2_name, ll_2_name, mm_2_name)
corelation <- cor(corelation_data_frame)
print(corelation)
####################
## Check AIC and BIC
ACI_BIC <- cbind(AIC_Track, BIC_Track)
ACI_BIC <- as.data.frame(ACI_BIC)
colnames(ACI_BIC) <- c("ACI", "BIC")
ACI_BIC$x <- 1:length(ACI_BIC$ACI)
## This plot is to take a look at each model run from every step above. All of the ACI and BIC values
# where saved and this plot shows them inorder to track to progression of the models created, negative
# slopes indicate the the next model run was better and positive slopes indicate that the next model
# run is not as good as the previos one
AIC_BIC_Plot <- ggplot(ACI_BIC, aes(x)) +
geom_line(aes(y = ACI), color = "red") +
geom_point(aes(y = ACI), color = "red") +
geom_line(aes(y = BIC), color = "blue") +
geom_point(aes(y = BIC), color = "blue")
show(AIC_BIC_Plot)
###################################################################################################
## Residuals
regress_model_resid <- residuals(regress_model_final)
plot(regress_model_resid, main = "Regression model Residuals", ylab = "Residuals"); abline( 0,0, col = "red")
###################################################################################################
## QQ Plots
qqnorm(regress_model_resid, main = "Regression Residuals QQ plot"); qqline(regress_model_resid, col = "red")
###################################################################################################
## Grab vs the predicted
x <- as.data.frame(df_i$Chlor)
names(x) <- c("Samples")
y <- as.data.frame(predict(regress_model_final))
names(y) <- c("Predicted")
y_no_na <- y[!is.na(y)]
RMSE <- round(sqrt( sum( (y$Predicted - x$Samples)^2 , na.rm = TRUE ) / length(y_no_na)), 3)
Correla <- round(cor(x$Samples, y$Predicted, use="complete"), 3)
one_to_one <- qplot(x,y) +
geom_abline(intercept = 0, colour = "red", size = 1) +
geom_smooth(method = "lm", se = FALSE) +
annotate("text", x = 2.5, y = 0, size = 5,label =paste0("R2: ", Correla )) +
annotate("text", x = 2.5, y = .5, size = 5,label =paste0("RMSE: ", RMSE )) +
xlab("Sample Chlorophyll") + ylab("Predicted Chlorophyll")
show(one_to_one)
###################################################################################################
## Testing:
dat <- data.frame(Long_x = df$Long, Lat_y = df$Lat, resids = regress_model_resid )
dat <- dat[!is.na(dat$resids),]
coordinates(dat)<-c('Long_x','Lat_y')
# a <- bubble(dat,zcol='resids')
# plot(a)
var.mod<-variogram(resids~1,data=dat, cloud = TRUE) #, alpha=c(0,45,90,135))
plot(var.mod)
var.mod<-variogram(resids~1,data=dat) #, alpha=c(0,45,90,135))
plot(var.mod)
|
/Regression_Model/GLS_With_Functions.R
|
no_license
|
gudaleon/Lake_Erie_Research
|
R
| false | false | 32,216 |
r
|
## Catrina Nowakowski
## From July 2016 to November 2016
## UConn Civil and Environmental Engineering Department
## Currently updating this script form the summer.
## df_i is a very bad variable name
## Would be a good idea to add a third run through... later
source("Loading_Dataframe.R")
###############################################################################################################
## This script runs a step wise variable slector to build a GLS regression model.
## It uses the p-values to slect each new variable and the ACI and BIC values are tracked and plotted at the end
# to see the progression of model fit quality. This can be used to ditermine the point to run the slector to.
## The code can be run starting at step one to any step EX: variable_i to variable_k
# or variable_i to second look at Variable_k
###############################################################################################################
## Function to run regression with anywhere from 1 to 5 variables
## The reason why I made an additional function to run the model is to simplify running models for different situations
# where I would use the ACI or BIC values to select the number of variables.
## It also makes it easier to alter and modify the code.
gls_different_num_var <- function(num_of_var, df){
## Checks to see how many variables are nessary and slections appropriate function to run to make GLS model
if(num_of_var == 1){
regress_model <- gls(Chlor~ Region_W +Region_C +Region_E + variable_i,
data = df_i, na.action = na.omit, control = list(singular.ok = TRUE),
corr = corExp(form =~ Lat + Long | BiWeek))
} else if(num_of_var == 2){
regress_model <- gls(Chlor~ Region_W +Region_C +Region_E + variable_i + variable_j ,
data = df_i, na.action = na.omit, control = list(singular.ok = TRUE),
corr = corExp(form =~ Lat + Long | BiWeek))
} else if(num_of_var == 3){
regress_model <- gls(Chlor~Region_W +Region_C +Region_E + variable_i + variable_j + variable_k,
data = df_i, na.action = na.omit, control = list(singular.ok = TRUE),
corr = corExp(form =~ Lat + Long | BiWeek))
} else if(num_of_var == 4){
regress_model <- gls(Chlor~ Region_W +Region_C +Region_E + variable_i + variable_j + variable_k + variable_l,
data = df_i, na.action = na.omit, control = list(singular.ok = TRUE),
corr = corExp(form =~ Lat + Long | BiWeek))
} else if(num_of_var == 5){
regress_model <- gls(Chlor~Region_W +Region_C +Region_E + variable_i + variable_j + variable_k + variable_l + variable_m,
data = df_i, na.action = na.omit, control = list(singular.ok = TRUE),
corr = corExp(form =~ Lat + Long | BiWeek))
}
return(regress_model)
## END FUNCTION
}
###############################################################################################################
## Function to produce sample output
## This function removes the potential error of me messing up when I change how I run the regression model
produce_sum_output <- function(regress_model, iteration, iterator){
## Produces a summery of the output, seperates the P-Value
if(iteration == 1){
Summary_Output <- summary(regress_model)
p_val <- as.data.frame(Summary_Output$tTable)
i = iterator
p_val_tracker_i[i] <- p_val$`p-value`[4]
return(p_val_tracker_i)
} else if(iteration == 2){
Summary_Output <- summary(regress_model)
p_val <- as.data.frame(Summary_Output$tTable)
j = iterator
p_val_tracker_j[j] <- p_val$`p-value`[5]
return(p_val_tracker_j)
} else if(iteration == 3){
Summary_Output <- summary(regress_model)
p_val <- as.data.frame(Summary_Output$tTable)
k = iterator
p_val_tracker_k[k] <- p_val$`p-value`[6]
return(p_val_tracker_k)
} else if(iteration == 4){
Summary_Output <- summary(regress_model)
p_val <- as.data.frame(Summary_Output$tTable)
l = iterator
p_val_tracker_l[l] <- p_val$`p-value`[7]
return(p_val_tracker_l)
} else if(iteration == 5){
Summary_Output <- summary(regress_model)
p_val <- as.data.frame(Summary_Output$tTable)
m = iterator
p_val_tracker_m[m] <- p_val$`p-value`[8]
return(p_val_tracker_m)
}
## END FUNCTION
}
###############################################################################################################
## Function to find the best variable
## This function is a follow up for processing after the loop that is used to apply the previous functions
best_var <- function(p_Val_tracker, var_names, df_i, df, iteration){
## identifys the best variable, removes it from the list, adds it to the df_i
if(iteration == 1){
## Picks the p-value closest to zero and finds its location in the tracker vector
ii <- which.min(p_val_tracker_i)
## Stors the value
ii_Val <- p_val_tracker_i[ii]
## Stors the name
ii_name <- var_names[ii]
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
df_i$variable_i <- df[[var_names[ii]]]
## Removes the variable name that was slected from the list so it will not be pulled twice
var_names <- var_names[!var_names == ii_name]
assign("var_names", var_names, pos = 1)
assign("df_i", df_i, pos = 1)
assign("ii_name", ii_name, pos = 1)
} else if(iteration == 2){
## Picks the p-value closest to zero and finds its location in the tracker vector
jj <- which.min(abs(p_val_tracker_j - 0))
## Stors the value
jj_Val <- p_val_tracker_j[jj]
## Stors the name
jj_name <- var_names[jj]
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
df_i$variable_j <- df[[var_names[jj]]]
## Removes the variable name that was slected from the list so it will not be pulled twice
var_names <- var_names[!var_names == jj_name]
assign("var_names", var_names, pos = 1)
assign("df_i", df_i, pos = 1)
assign("jj_name", jj_name, pos = 1)
} else if(iteration == 3){
## Picks the p-value closest to zero and finds its location in the tracker vector
kk <- which.min(abs(p_val_tracker_k - 0))
## Stors the value
kk_Val <- p_val_tracker_k[kk]
## Stors the name
kk_name <- var_names[kk]
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
df_i$variable_k <- df[[var_names[kk]]]
## Removes the variable name that was slected from the list so it will not be pulled twice
var_names <- var_names[!var_names == kk_name]
assign("var_names", var_names, pos = 1)
assign("df_i", df_i, pos = 1)
assign("kk_name", kk_name, pos = 1)
} else if(iteration == 4){
## Picks the p-value closest to zero and finds its location in the tracker vector
ll <- which.min(abs(p_val_tracker_l - 0))
## Stors the value
ll_Val <- p_val_tracker_l[ll]
## Stors the name
ll_name <- var_names[ll]
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
df_i$variable_l <- df[[var_names[ll]]]
## Removes the variable name that was slected from the list so it will not be pulled twice
var_names <- var_names[!var_names == ll_name]
assign("var_names", var_names, pos = 1)
assign("df_i", df_i, pos = 1)
assign("ll_name", ll_name, pos = 1)
} else if(iteration == 5){
## Picks the p-value closest to zero and finds its location in the tracker vector
mm <- which.min(abs(p_val_tracker_m - 0))
## Stors the value
mm_Val <- p_val_tracker_m[mm]
## Stors the name
mm_name <- var_names[mm]
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
df_i$variable_m <- df[[var_names[mm]]]
## Removes the variable name that was slected from the list so it will not be pulled twice
var_names <- var_names[!var_names == mm_name]
assign("var_names", var_names, pos = 1)
assign("df_i", df_i, pos = 1)
assign("mm_name", mm_name, pos = 1)
} else if(iteration == 11){
## Picks the p-value closest to zero and finds its location in the tracker vector
ii <- which.min(p_val_tracker_i)
## Stors the value
ii_Val <- p_val_tracker_i[ii]
## Stors the name
ii_name <- var_names[ii]
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
df_i$variable_i <- df[[var_names[ii]]]
## Removes the variable name that was slected from the list so it will not be pulled twice
var_names <- var_names[!var_names == ii_name]
assign("var_names", var_names, pos = 1)
assign("df_i", df_i, pos = 1)
assign("ii_2_name", ii_name, pos = 1)
} else if(iteration == 22){
## Picks the p-value closest to zero and finds its location in the tracker vector
jj <- which.min(abs(p_val_tracker_j - 0))
## Stors the value
jj_Val <- p_val_tracker_j[jj]
## Stors the name
jj_name <- var_names[jj]
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
df_i$variable_j <- df[[var_names[jj]]]
## Removes the variable name that was slected from the list so it will not be pulled twice
var_names <- var_names[!var_names == jj_name]
assign("var_names", var_names, pos = 1)
assign("df_i", df_i, pos = 1)
assign("jj_2_name", jj_name, pos = 1)
} else if(iteration == 33){
## Picks the p-value closest to zero and finds its location in the tracker vector
kk <- which.min(abs(p_val_tracker_k - 0))
## Stors the value
kk_Val <- p_val_tracker_k[kk]
## Stors the name
kk_name <- var_names[kk]
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
df_i$variable_k <- df[[var_names[kk]]]
## Removes the variable name that was slected from the list so it will not be pulled twice
var_names <- var_names[!var_names == kk_name]
assign("var_names", var_names, pos = 1)
assign("df_i", df_i, pos = 1)
assign("kk_2_name", kk_name, pos = 1)
} else if(iteration == 44){
## Picks the p-value closest to zero and finds its location in the tracker vector
ll <- which.min(abs(p_val_tracker_l - 0))
## Stors the value
ll_Val <- p_val_tracker_l[ll]
## Stors the name
ll_name <- var_names[ll]
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
df_i$variable_l <- df[[var_names[ll]]]
## Removes the variable name that was slected from the list so it will not be pulled twice
var_names <- var_names[!var_names == ll_name]
assign("var_names", var_names, pos = 1)
assign("df_i", df_i, pos = 1)
assign("ll_2_name", ll_name, pos = 1)
} else if(iteration == 55){
## Picks the p-value closest to zero and finds its location in the tracker vector
mm <- which.min(abs(p_val_tracker_m - 0))
## Stors the value
mm_Val <- p_val_tracker_m[mm]
## Stors the name
mm_name <- var_names[mm]
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
df_i$variable_m <- df[[var_names[mm]]]
## Removes the variable name that was slected from the list so it will not be pulled twice
var_names <- var_names[!var_names == mm_name]
assign("var_names", var_names, pos = 1)
assign("df_i", df_i, pos = 1)
assign("mm_2_name", mm_name, pos = 1)
}
## END FUNCTION
}
## Save for second section
var_names_2 <- var_names
## Itorator for AIC and BIC
AIC_BIC_It <- 1
## Initiates the tracker for the next variable Loop
AIC_Track <- matrix(NA, 10)
BIC_Track <- matrix(NA, 10)
###############################################################################################################
## For Variable i
## Initiates the tracker for the next variable Loop
p_val_tracker_i <- matrix(NA, length(var_names))
## Loops through each variable being analized for the regression
for(i in 1:length(var_names)){
## Adds the variable to the regression data frame, each iteration this is updated
df_i$variable_i <- df[[var_names[i]]]
## Runs model using a function I made to use proper number of variables
regress_model <- gls_different_num_var(1, df_i)
## Produces a summery of the output, seperates the P-Value
p_val_tracker_i <- produce_sum_output(regress_model, 1, i)
}
## Picks the p-value closest to zero and finds its location in the tracker vector
## Stores the value
## Stores the name
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
## Removes the variable name that was slected from the list so it will not be pulled twice
best_var(p_Val_tracker_i, var_names, df_i, df, 1)
####################
## Check AIC and BIC
regress_model <- gls_different_num_var(1, df_i)
quick_sum <- summary(regress_model)
AIC_Track[AIC_BIC_It] <- quick_sum$AIC
BIC_Track[AIC_BIC_It] <- quick_sum$BIC
AIC_BIC_It <- 1 + AIC_BIC_It
###############################################################################################################
## For Variable j
## Initiates the tracker for the next variable Loop
p_val_tracker_j <- matrix(NA, length(var_names))
## Loops through each variable being analized for the regression
for(j in 1:length(var_names)){
## Adds the variable to the regression data frame, each iteration this is updated
df_i$variable_j <- df[[var_names[j]]]
## Runs model using a function I made to use proper number of variables
regress_model <- gls_different_num_var(2, df_i)
## Produces a summery of the output, seperates the P-Value
p_val_tracker_j <- produce_sum_output(regress_model, 2, j)
}
## Picks the p-value closest to zero and finds its location in the tracker vector
## Stores the value
## Stores the name
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
## Removes the variable name that was slected from the list so it will not be pulled twice
best_var(p_Val_tracker_j, var_names, df_i, df, 2)
####################
## Check AIC and BIC
regress_model <- gls_different_num_var(2, df_i)
quick_sum <- summary(regress_model)
AIC_Track[AIC_BIC_It] <- quick_sum$AIC
BIC_Track[AIC_BIC_It] <- quick_sum$BIC
AIC_BIC_It <- 1 + AIC_BIC_It
###############################################################################################################
## For Variable k
## Initiates the tracker for the next variable Loop
p_val_tracker_k <- matrix(NA, length(var_names))
## Loops through each variable being analized for the regression
for(k in 1:length(var_names)){
## Adds the variable to the regression data frame, each iteration this is updated
df_i$variable_k <- df[[var_names[k]]]
## Runs model using a function I made to use proper number of variables
regress_model <- gls_different_num_var(3, df_i)
## Produces a summery of the output, seperates the P-Value
p_val_tracker_k <- produce_sum_output(regress_model, 3, k)
}
## Picks the p-value closest to zero and finds its location in the tracker vector
## Stores the value
## Stores the name
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
## Removes the variable name that was slected from the list so it will not be pulled twice
best_var(p_Val_tracker_k, var_names, df_i, df, 3)
####################
## Check AIC and BIC
regress_model <- gls_different_num_var(3, df_i)
quick_sum <- summary(regress_model)
AIC_Track[AIC_BIC_It] <- quick_sum$AIC
BIC_Track[AIC_BIC_It] <- quick_sum$BIC
AIC_BIC_It <- 1 + AIC_BIC_It
###############################################################################################################
## For Variable l
## Initiates the tracker for the next variable Loop
p_val_tracker_l <- matrix(NA, length(var_names))
## Loops through each variable being analized for the regression
for(l in 1:length(var_names)){
## Adds the variable to the regression data frame, each iteration this is updated
df_i$variable_l <- df[[var_names[l]]]
## Runs model using a function I made to use proper number of variables
regress_model <- gls_different_num_var(4, df_i)
## Produces a summery of the output, seperates the P-Value
p_val_tracker_l <- produce_sum_output(regress_model, 4, l)
}
## Picks the p-value closest to zero and finds its location in the tracker vector
## Stores the value
## Stores the name
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
## Removes the variable name that was slected from the list so it will not be pulled twice
best_var(p_Val_tracker_l, var_names, df_i, df, 4)
####################
## Check AIC and BIC
regress_model <- gls_different_num_var(4, df_i)
quick_sum <- summary(regress_model)
AIC_Track[AIC_BIC_It] <- quick_sum$AIC
BIC_Track[AIC_BIC_It] <- quick_sum$BIC
AIC_BIC_It <- 1 + AIC_BIC_It
###############################################################################################################
## For Variable m
## Initiates the tracker for the next variable Loop
p_val_tracker_m <- matrix(NA, length(var_names))
## Loops through each variable being analized for the regression
for(m in 1:length(var_names)){
## Adds the variable to the regression data frame, each iteration this is updated
df_i$variable_m <- df[[var_names[m]]]
## Runs model using a function I made to use proper number of variables
regress_model <- gls_different_num_var(5, df_i)
## Produces a summery of the output, seperates the P-Value
p_val_tracker_m <- produce_sum_output(regress_model, 5, m)
}
## Picks the p-value closest to zero and finds its location in the tracker vector
## Stores the value
## Stores the name
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
## Removes the variable name that was slected from the list so it will not be pulled twice
best_var(p_Val_tracker_m, var_names, df_i, df, 5)
####################
## Check AIC and BIC
regress_model <- gls_different_num_var(5, df_i)
quick_sum <- summary(regress_model)
AIC_Track[AIC_BIC_It] <- quick_sum$AIC
BIC_Track[AIC_BIC_It] <- quick_sum$BIC
AIC_BIC_It <- 1 + AIC_BIC_It
###############################################################################################################
## SECOND TIME THROUGH THE VARIALBES: What this section does is runs through each variable in each position one more time to check to see that that is still
# the best one to use
#################################################################
## Remove Current variable names in use from the list
# var_names_2 <- var_names_2[!var_names_2 == jj_name]
# var_names_2 <- var_names_2[!var_names_2 == kk_name]
# var_names_2 <- var_names_2[!var_names_2 == ll_name]
# var_names_2 <- var_names_2[!var_names_2 == mm_name]
#################################################################
## Remove Current variable names in use from the list
var_names_2 <- var_names_2[!var_names_2 == jj_2_name]
var_names_2 <- var_names_2[!var_names_2 == kk_2_name]
var_names_2 <- var_names_2[!var_names_2 == ll_2_name]
var_names_2 <- var_names_2[!var_names_2 == mm_2_name]
###############################################################################################################
## For Variable i
## Initiates the tracker for the next variable Loop
p_val_tracker_i <- matrix(NA, length(var_names_2))
## Loops through each variable being analized for the regression
for(i in 1:length(var_names_2)){
## Adds the variable to the regression data frame, each iteration this is updated
df_i$variable_i <- df[[var_names_2[i]]]
## Runs model using a function I made to use proper number of variables
regress_model <- gls_different_num_var(1, df_i)
## Produces a summery of the output, seperates the P-Value
p_val_tracker_i <- produce_sum_output(regress_model, 1, i)
}
## Picks the p-value closest to zero and finds its location in the tracker vector
## Stores the value
## Stores the name
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
## Removes the variable name that was slected from the list so it will not be pulled twice
best_var(p_Val_tracker_i, var_names_2, df_i, df, 11)
####################
## Check AIC and BIC
regress_model <- gls_different_num_var(5, df_i)
quick_sum <- summary(regress_model)
AIC_Track[AIC_BIC_It] <- quick_sum$AIC
BIC_Track[AIC_BIC_It] <- quick_sum$BIC
AIC_BIC_It <- 1 + AIC_BIC_It
###############################################################################################################
## For Variable j
## Initiates the tracker for the next variable Loop
p_val_tracker_j <- matrix(NA, length(var_names_2))
## Loops through each variable being analized for the regression
for(j in 1:length(var_names_2)){
## Adds the variable to the regression data frame, each iteration this is updated
df_i$variable_j <- df[[var_names_2[j]]]
## Runs model using a function I made to use proper number of variables
regress_model <- gls_different_num_var(2, df_i)
## Produces a summery of the output, seperates the P-Value
p_val_tracker_j <- produce_sum_output(regress_model, 2, j)
}
## Picks the p-value closest to zero and finds its location in the tracker vector
## Stores the value
## Stores the name
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
## Removes the variable name that was slected from the list so it will not be pulled twice
best_var(p_Val_tracker_j, var_names_2, df_i, df, 22)
####################
## Check AIC and BIC
regress_model <- gls_different_num_var(5, df_i)
quick_sum <- summary(regress_model)
AIC_Track[AIC_BIC_It] <- quick_sum$AIC
BIC_Track[AIC_BIC_It] <- quick_sum$BIC
AIC_BIC_It <- 1 + AIC_BIC_It
###############################################################################################################
## For Variable k
## Initiates the tracker for the next variable Loop
p_val_tracker_k <- matrix(NA, length(var_names_2))
## Loops through each variable being analized for the regression
for(k in 1:length(var_names_2)){
## Adds the variable to the regression data frame, each iteration this is updated
df_i$variable_k <- df[[var_names_2[k]]]
## Runs model using a function I made to use proper number of variables
regress_model <- gls_different_num_var(3, df_i)
## Produces a summery of the output, seperates the P-Value
p_val_tracker_k <- produce_sum_output(regress_model, 3, k)
}
## Picks the p-value closest to zero and finds its location in the tracker vector
## Stores the value
## Stores the name
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
## Removes the variable name that was slected from the list so it will not be pulled twice
best_var(p_Val_tracker_k, var_names_2, df_i, df, 33)
####################
## Check AIC and BIC
regress_model <- gls_different_num_var(5, df_i)
quick_sum <- summary(regress_model)
AIC_Track[AIC_BIC_It] <- quick_sum$AIC
BIC_Track[AIC_BIC_It] <- quick_sum$BIC
AIC_BIC_It <- 1 + AIC_BIC_It
###############################################################################################################
## For Variable l
## Initiates the tracker for the next variable Loop
p_val_tracker_l <- matrix(NA, length(var_names_2))
## Loops through each variable being analized for the regression
for(l in 1:length(var_names_2)){
## Adds the variable to the regression data frame, each iteration this is updated
df_i$variable_l <- df[[var_names_2[l]]]
## Runs model using a function I made to use proper number of variables
regress_model <- gls_different_num_var(4, df_i)
## Produces a summery of the output, seperates the P-Value
p_val_tracker_l <- produce_sum_output(regress_model, 4, l)
}
## Picks the p-value closest to zero and finds its location in the tracker vector
## Stores the value
## Stores the name
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
## Removes the variable name that was slected from the list so it will not be pulled twice
best_var(p_Val_tracker_l, var_names_2, df_i, df, 44)
####################
## Check AIC and BIC
regress_model <- gls_different_num_var(5, df_i)
quick_sum <- summary(regress_model)
AIC_Track[AIC_BIC_It] <- quick_sum$AIC
BIC_Track[AIC_BIC_It] <- quick_sum$BIC
AIC_BIC_It <- 1 + AIC_BIC_It
###############################################################################################################
## For Variable m
## Initiates the tracker for the next variable Loop
p_val_tracker_m <- matrix(NA, length(var_names_2))
## Loops through each variable being analized for the regression
for(m in 1:length(var_names_2)){
## Adds the variable to the regression data frame, each iteration this is updated
df_i$variable_m <- df[[var_names_2[m]]]
## Runs model using a function I made to use proper number of variables
regress_model <- gls_different_num_var(5, df_i)
## Produces a summery of the output, seperates the P-Value
p_val_tracker_m <- produce_sum_output(regress_model, 5, m)
}
## Picks the p-value closest to zero and finds its location in the tracker vector
## Stores the value
## Stores the name
## Adds the best variable that was found above in the data frame to be used in the rest of the itterations
## Removes the variable name that was slected from the list so it will not be pulled twice
best_var(p_Val_tracker_m, var_names_2, df_i, df, 55)
####################
## Check AIC and BIC
regress_model <- gls_different_num_var(5, df_i)
quick_sum <- summary(regress_model)
AIC_Track[AIC_BIC_It] <- quick_sum$AIC
BIC_Track[AIC_BIC_It] <- quick_sum$BIC
AIC_BIC_It <- 1 + AIC_BIC_It
#################################################################
## Run the Final Regression Model
regress_model_final <- gls_different_num_var(5, df_i)
## Testing autocorrelations using the best ACI value.
# corAR1 659.7716 265.0276
# corExp 640.406 230.1096
# corGaus 641.7344 229.6906
# corSpher 643.8627 232.0682
### correlation = corSymm(form = ~1|Subject)
###################################################################################################
## Generate Plots
all_var <- df_i
drops = c("Region_W", "Region_C", "Region_E", "Lat", "Long", "BiWeek")
all_var <- all_var[ , !(names(all_var) %in% drops)]
plot(all_var)
###################################################################################################
## VIF
library(car)
print("VIF: ")
Vif_Numbers <- vif(gls(Chlor~ variable_i + variable_j + variable_k + variable_l + variable_m,
data = df_i, na.action="na.exclude") )
print(Vif_Numbers)
###################################################################################################
## Checking the Regression
library(cvTools)
Chlor = df_i$Chlor
The_Cross_Val <- cvFit(regress_model_final, data = df_i, y = Chlor, K =round((length(df_i$Region_W)*.1), 0) )
print(The_Cross_Val)
The_Cross_Val <- cvFit(regress_model_final, data = df_i, y = Chlor, K =length(df_i$Region_W) )
print(The_Cross_Val)
## CVfit a function that takes a tenth of my data and sets it asid while it runs my model
## and it calculates the residuals for the 10% then it will randomly slect another 10%
## and redo all cross validation
## outputs mean bias lower the better
###################################################################################################
Summary_Output <- summary(regress_model_final)
print(Summary_Output)
###################################################################################################
corelation_data_frame <- data.frame(variable_i = df_i$variable_i, variable_j = df_i$variable_j,
variable_k = df_i$variable_k, variable_l = df_i$variable_l,
variable_m = df_i$variable_m)
names(corelation_data_frame)<- c(ii_2_name, jj_2_name, kk_2_name, ll_2_name, mm_2_name)
corelation <- cor(corelation_data_frame)
print(corelation)
####################
## Check AIC and BIC
ACI_BIC <- cbind(AIC_Track, BIC_Track)
ACI_BIC <- as.data.frame(ACI_BIC)
colnames(ACI_BIC) <- c("ACI", "BIC")
ACI_BIC$x <- 1:length(ACI_BIC$ACI)
## This plot is to take a look at each model run from every step above. All of the ACI and BIC values
# where saved and this plot shows them inorder to track to progression of the models created, negative
# slopes indicate the the next model run was better and positive slopes indicate that the next model
# run is not as good as the previos one
AIC_BIC_Plot <- ggplot(ACI_BIC, aes(x)) +
geom_line(aes(y = ACI), color = "red") +
geom_point(aes(y = ACI), color = "red") +
geom_line(aes(y = BIC), color = "blue") +
geom_point(aes(y = BIC), color = "blue")
show(AIC_BIC_Plot)
###################################################################################################
## Residuals
regress_model_resid <- residuals(regress_model_final)
plot(regress_model_resid, main = "Regression model Residuals", ylab = "Residuals"); abline( 0,0, col = "red")
###################################################################################################
## QQ Plots
qqnorm(regress_model_resid, main = "Regression Residuals QQ plot"); qqline(regress_model_resid, col = "red")
###################################################################################################
## Grab vs the predicted
x <- as.data.frame(df_i$Chlor)
names(x) <- c("Samples")
y <- as.data.frame(predict(regress_model_final))
names(y) <- c("Predicted")
y_no_na <- y[!is.na(y)]
RMSE <- round(sqrt( sum( (y$Predicted - x$Samples)^2 , na.rm = TRUE ) / length(y_no_na)), 3)
Correla <- round(cor(x$Samples, y$Predicted, use="complete"), 3)
one_to_one <- qplot(x,y) +
geom_abline(intercept = 0, colour = "red", size = 1) +
geom_smooth(method = "lm", se = FALSE) +
annotate("text", x = 2.5, y = 0, size = 5,label =paste0("R2: ", Correla )) +
annotate("text", x = 2.5, y = .5, size = 5,label =paste0("RMSE: ", RMSE )) +
xlab("Sample Chlorophyll") + ylab("Predicted Chlorophyll")
show(one_to_one)
###################################################################################################
## Testing:
dat <- data.frame(Long_x = df$Long, Lat_y = df$Lat, resids = regress_model_resid )
dat <- dat[!is.na(dat$resids),]
coordinates(dat)<-c('Long_x','Lat_y')
# a <- bubble(dat,zcol='resids')
# plot(a)
var.mod<-variogram(resids~1,data=dat, cloud = TRUE) #, alpha=c(0,45,90,135))
plot(var.mod)
var.mod<-variogram(resids~1,data=dat) #, alpha=c(0,45,90,135))
plot(var.mod)
|
#' -----------------------------------------------------------------------------
#' Project: ECHO Aim 1 Land Use Regression
#'
#' Task: Create a grid for the study domain and summarize land use and traffic
#' variables for each grid cell
#'
#' Author: Sheena Martenies
#' Date Created: May 1, 2018
#' Contact: sheena.martenies@colostate.edu
#' -----------------------------------------------------------------------------
#' Load required libraries
library(sf)
library(sp)
library(rgdal)
library(raster)
library(ggplot2)
library(ggmap)
library(tidyverse)
ll_wgs84 <- "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
albers <- "+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
utm_13 <- "+init=epsg:26913"
#' read in grid data
grid_250 <- st_read(here::here("Data", "Grid_250_m_AEA.csv"),
stringsAsFactors = F, wkt = "WKT", crs = albers)
#' 1 km buffer around the grid
grid_bound <- st_buffer(st_union(grid_250), dist = 1000) %>%
st_transform(ll_wgs84)
#' -----------------------------------------------------------------------------
#' 1) Summarize land characteristics and traffic exposures for the 250 m grid
#'
#' 1A) Extract raster values using sp polygons
#' -----------------------------------------------------------------------------
grid_spdf <- as(grid_250, "Spatial")
#' Average % tree cover and % impervious surface
tree_cover <- raster(here::here("Data", "Tree_Cover_AEA.tif"))
tree_cover_sp <- raster::extract(tree_cover, grid_spdf, fun=mean, sp=T)
colnames(tree_cover_sp@data)[ncol(tree_cover_sp@data)] <- "tree_cover"
grid_spdf <- merge(grid_spdf, tree_cover_sp, by="grid_id")
impervious <- raster(here::here("Data", "Impervious_AEA.tif"))
impervious_sp <- raster::extract(impervious, grid_spdf, fun=mean, sp=T)
colnames(impervious_sp@data)[ncol(impervious_sp@data)] <- "impervious"
grid_spdf <- merge(grid_spdf, impervious_sp, by="grid_id")
#' clean up environment
rm(tree_cover, tree_cover_sp, impervious, impervious_sp)
#' Most frequent land use category
land_use <- raster(here::here("Data", "Land_Use_AEA.tif"))
grid_spdf$land_use <- unlist(raster::extract(land_use, grid_spdf, fun=modal, sp=F))
grid_spdf$land_use <- as.vector(grid_spdf$land_use[,1])
rm(land_use, land_use_sp)
#' -----------------------------------------------------------------------------
#' 1) Summarize land characteristics and traffic exposures for the 250 m grid
#'
#' 1B) Road segment lengths using sf
#' -----------------------------------------------------------------------------
grid_sf <- st_as_sf(grid_spdf)
#' Highway lengths
highways <- st_read(here::here("Data", "Highways_AEA.csv"),
stringsAsFactors = F, wkt = "WKT", crs = albers)
plot(st_geometry(highways))
#' sf and dplyr
highways_df <- st_intersection(grid_sf, highways) %>%
group_by(grid_id) %>%
summarise %>%
mutate(highway_m = unclass(st_length(.))) %>%
st_set_geometry(NULL)
grid_sf <- left_join(grid_sf, highways_df, by="grid_id")
grid_sf$highway_m <- ifelse(is.na(grid_sf$highway_m), 0, grid_sf$highway_m)
#' Check
sum(unclass(st_length(highways)))
sum(grid_sf$highway_m)
rm(highways, highways_df)
#' Major road lengths
major <- st_read(here::here("Data", "Major_Roads_AEA.csv"),
stringsAsFactors = F, wkt = "WKT", crs = albers)
plot(st_geometry(major))
#' sf and dplyr
major_df <- st_intersection(grid_sf, major) %>%
group_by(grid_id) %>%
summarise %>%
mutate(major_m = unclass(st_length(.))) %>%
st_set_geometry(NULL)
grid_sf <- left_join(grid_sf, major_df, by="grid_id")
grid_sf$major_m <- ifelse(is.na(grid_sf$major_m), 0, grid_sf$major_m)
#' Check
sum(unclass(st_length(major)))
sum(grid_sf$major_m)
rm(major, major_df)
#' traffic density variable: weighted sum of road lengths
#' Current weights are 1, 0.5, and 0.25 for highway, major, and local, respectively
grid_sf$road_km_wt <- (grid_sf$highway_m * 1) + (grid_sf$major_m * 0.5)
#' Sum of AADT across road links within the grid
nhpms_aadt <- st_read(here::here("Data", "NHPMS_AADT_AEA.csv"),
stringsAsFactors = F, wkt = "WKT", crs = albers)
plot(st_geometry(nhpms_aadt))
#' sf and dplyr
aadt_df <- st_intersection(grid_sf, nhpms_aadt) %>%
group_by(grid_id) %>%
summarise(aadt = sum(aadt)) %>%
st_set_geometry(NULL)
grid_sf <- left_join(grid_sf, aadt_df, by="grid_id")
grid_sf$aadt <- ifelse(is.na(grid_sf$aadt), 0, grid_sf$aadt)
rm(nhpms_aadt, aadt_df)
#' -----------------------------------------------------------------------------
#' 4) Summarize census data using grid cell centroids
#' -----------------------------------------------------------------------------
#' Get block groups in study area
#' Population and housing data:
pop_housing <- st_read(here::here("Data", "Population_and_Housing_AEA.csv"),
stringsAsFactors = F, wkt = "WKT", crs = albers) %>%
select(GEOID, pop_density, housing_density)
#' Join based on grid centroids
grid_cent <- st_centroid(grid_250) %>%
select("grid_id")
grid_cent <- st_join(grid_cent, pop_housing) %>%
st_set_geometry(NULL)
grid_sf <- left_join(grid_sf, grid_cent, by="grid_id")
st_write(grid_sf, here::here("Data", "Grid_250_m_Attributes_AEA.csv"),
layer_options = "GEOMETRY=AS_WKT", delete_dsn = T)
|
/Code/Old_Code/LUR Grid Attributes.R
|
no_license
|
smartenies/ECHO_Aim1_BC_ST_Model
|
R
| false | false | 5,362 |
r
|
#' -----------------------------------------------------------------------------
#' Project: ECHO Aim 1 Land Use Regression
#'
#' Task: Create a grid for the study domain and summarize land use and traffic
#' variables for each grid cell
#'
#' Author: Sheena Martenies
#' Date Created: May 1, 2018
#' Contact: sheena.martenies@colostate.edu
#' -----------------------------------------------------------------------------
#' Load required libraries
library(sf)
library(sp)
library(rgdal)
library(raster)
library(ggplot2)
library(ggmap)
library(tidyverse)
ll_wgs84 <- "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
albers <- "+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
utm_13 <- "+init=epsg:26913"
#' read in grid data
grid_250 <- st_read(here::here("Data", "Grid_250_m_AEA.csv"),
stringsAsFactors = F, wkt = "WKT", crs = albers)
#' 1 km buffer around the grid
grid_bound <- st_buffer(st_union(grid_250), dist = 1000) %>%
st_transform(ll_wgs84)
#' -----------------------------------------------------------------------------
#' 1) Summarize land characteristics and traffic exposures for the 250 m grid
#'
#' 1A) Extract raster values using sp polygons
#' -----------------------------------------------------------------------------
grid_spdf <- as(grid_250, "Spatial")
#' Average % tree cover and % impervious surface
tree_cover <- raster(here::here("Data", "Tree_Cover_AEA.tif"))
tree_cover_sp <- raster::extract(tree_cover, grid_spdf, fun=mean, sp=T)
colnames(tree_cover_sp@data)[ncol(tree_cover_sp@data)] <- "tree_cover"
grid_spdf <- merge(grid_spdf, tree_cover_sp, by="grid_id")
impervious <- raster(here::here("Data", "Impervious_AEA.tif"))
impervious_sp <- raster::extract(impervious, grid_spdf, fun=mean, sp=T)
colnames(impervious_sp@data)[ncol(impervious_sp@data)] <- "impervious"
grid_spdf <- merge(grid_spdf, impervious_sp, by="grid_id")
#' clean up environment
rm(tree_cover, tree_cover_sp, impervious, impervious_sp)
#' Most frequent land use category
land_use <- raster(here::here("Data", "Land_Use_AEA.tif"))
grid_spdf$land_use <- unlist(raster::extract(land_use, grid_spdf, fun=modal, sp=F))
grid_spdf$land_use <- as.vector(grid_spdf$land_use[,1])
rm(land_use, land_use_sp)
#' -----------------------------------------------------------------------------
#' 1) Summarize land characteristics and traffic exposures for the 250 m grid
#'
#' 1B) Road segment lengths using sf
#' -----------------------------------------------------------------------------
grid_sf <- st_as_sf(grid_spdf)
#' Highway lengths
highways <- st_read(here::here("Data", "Highways_AEA.csv"),
stringsAsFactors = F, wkt = "WKT", crs = albers)
plot(st_geometry(highways))
#' sf and dplyr
highways_df <- st_intersection(grid_sf, highways) %>%
group_by(grid_id) %>%
summarise %>%
mutate(highway_m = unclass(st_length(.))) %>%
st_set_geometry(NULL)
grid_sf <- left_join(grid_sf, highways_df, by="grid_id")
grid_sf$highway_m <- ifelse(is.na(grid_sf$highway_m), 0, grid_sf$highway_m)
#' Check
sum(unclass(st_length(highways)))
sum(grid_sf$highway_m)
rm(highways, highways_df)
#' Major road lengths
major <- st_read(here::here("Data", "Major_Roads_AEA.csv"),
stringsAsFactors = F, wkt = "WKT", crs = albers)
plot(st_geometry(major))
#' sf and dplyr
major_df <- st_intersection(grid_sf, major) %>%
group_by(grid_id) %>%
summarise %>%
mutate(major_m = unclass(st_length(.))) %>%
st_set_geometry(NULL)
grid_sf <- left_join(grid_sf, major_df, by="grid_id")
grid_sf$major_m <- ifelse(is.na(grid_sf$major_m), 0, grid_sf$major_m)
#' Check
sum(unclass(st_length(major)))
sum(grid_sf$major_m)
rm(major, major_df)
#' traffic density variable: weighted sum of road lengths
#' Current weights are 1, 0.5, and 0.25 for highway, major, and local, respectively
grid_sf$road_km_wt <- (grid_sf$highway_m * 1) + (grid_sf$major_m * 0.5)
#' Sum of AADT across road links within the grid
nhpms_aadt <- st_read(here::here("Data", "NHPMS_AADT_AEA.csv"),
stringsAsFactors = F, wkt = "WKT", crs = albers)
plot(st_geometry(nhpms_aadt))
#' sf and dplyr
aadt_df <- st_intersection(grid_sf, nhpms_aadt) %>%
group_by(grid_id) %>%
summarise(aadt = sum(aadt)) %>%
st_set_geometry(NULL)
grid_sf <- left_join(grid_sf, aadt_df, by="grid_id")
grid_sf$aadt <- ifelse(is.na(grid_sf$aadt), 0, grid_sf$aadt)
rm(nhpms_aadt, aadt_df)
#' -----------------------------------------------------------------------------
#' 4) Summarize census data using grid cell centroids
#' -----------------------------------------------------------------------------
#' Get block groups in study area
#' Population and housing data:
pop_housing <- st_read(here::here("Data", "Population_and_Housing_AEA.csv"),
stringsAsFactors = F, wkt = "WKT", crs = albers) %>%
select(GEOID, pop_density, housing_density)
#' Join based on grid centroids
grid_cent <- st_centroid(grid_250) %>%
select("grid_id")
grid_cent <- st_join(grid_cent, pop_housing) %>%
st_set_geometry(NULL)
grid_sf <- left_join(grid_sf, grid_cent, by="grid_id")
st_write(grid_sf, here::here("Data", "Grid_250_m_Attributes_AEA.csv"),
layer_options = "GEOMETRY=AS_WKT", delete_dsn = T)
|
testlist <- list(hi = 2.72430321516225e-260, lo = -4.14646094246397e+71, mu = 1.3961247739653e-308, sig = 1.62599011100666e-260)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result)
|
/gjam/inst/testfiles/tnormRcpp/libFuzzer_tnormRcpp/tnormRcpp_valgrind_files/1610044518-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 190 |
r
|
testlist <- list(hi = 2.72430321516225e-260, lo = -4.14646094246397e+71, mu = 1.3961247739653e-308, sig = 1.62599011100666e-260)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result)
|
eac1a4504542f33574dd3c5658e7dfa4 query06_query24_1344.qdimacs 125 238
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query06_query24_1344/query06_query24_1344.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false | false | 69 |
r
|
eac1a4504542f33574dd3c5658e7dfa4 query06_query24_1344.qdimacs 125 238
|
library(data.table)
library(caTools)
library(car)
library(dplyr)
library(performanceEstimation) # for SMOTE
library(randomForest)
library(ggplot2)
library(caret)
# setwd("C:/Users/mhenn/Documents/Programming/Academic/BC2407 Medilytics")
setwd("C:/Users/jimmy/NTU/BC2407/Project")
set.seed(2407)
source("functions.R") # Load in the functions
### Define function to run a logreg model based on a chosen disease ###
# chosen_disease is the selected predictor variable
runRFModel <- function(chosen_disease) {
# restore original data to initial, unaltered state
data <- readData("FinalCleanedData.csv", chosen_disease)
# create train and test sets with equal proportions of 1's and 0's using
train_test_split <- sample.split(data$DISEASE, SplitRatio = 0.7)
trainset.ori <- subset(data, train_test_split == T)
testset.ori <- subset(data, train_test_split == F)
# SMOTE ### - highest accuracy rate.
trainset <- smote(DISEASE ~ ., data = trainset.ori,
perc.over = 1,k = sqrt(nrow(trainset.ori)), perc.under = 2)
# write.csv(trainset, paste("SmotedData/", chosen_disease, "_trainset.csv",sep = ""), row.names = FALSE)
# trainset <- readDataOnly(paste("SmotedData/", chosen_disease, "_trainset.csv",sep = ""))
# testset <- readDataOnly(paste("SmotedData/", chosen_disease, "_testset_unseen.csv",sep = ""))
testSplitRatio <- ((3/7)*nrow(trainset))/nrow(testset.ori)
# print(testSplitRatio)
testset_split <- sample.split(testset.ori$DISEASE, SplitRatio = testSplitRatio)
testset <- subset(testset.ori, testset_split == T)
# write.csv(testset, paste("SmotedData/", chosen_disease, "_testset_unseen.csv",sep = ""), row.names = FALSE)
# To check the class distribution of disease in trainset and testset
print(table(trainset$DISEASE))
print(prop.table(table(trainset$DISEASE)))
print(table(testset$DISEASE))
print(prop.table(table(testset$DISEASE)))
set.seed(2407)
# Tune the Random Forest Model - Use TuneRF to obtain the optimum RF model
mtry <- tuneRF(x = trainset[,c(2:36)],
y = trainset$DISEASE,
ntreeTry = 1000,
mtryStart = (ncol(trainset) - 1) / 3,
stepFactor = 1.5,
improve = 1e-05
)
op.mtry <- mtry[mtry[, 2] == min(mtry[, 2]), 1]
op.rf <-randomForest(DISEASE ~ SEXVAR + GENHLTH + PHYS14D + MENT14D + POORHLTH +
HLTHPLN1 + PERSDOC2 + MEDCOST + CHECKUP1 + MARITAL + EDUCA +
RENTHOM1 + VETERAN3 + EMPLOY1 + CHLDCNT + INCOME2 + WTKG3 +
HTM4 + DEAF + BLIND + RFSMOK3 + RFDRHV7 +
TOTINDA + STRFREQ + FRUTDA2 + FTJUDA2 + GRENDA1 + FRNCHDA +
POTADA1 + VEGEDA2 + HIVRISK5, data = trainset,
mtry=op.mtry, importance=T,ntree=500)
# saveRDS(op.rf, paste("Models500/", chosen_disease, "_RF.rds",sep = ""))
op.rf <- readRDS(paste("Models/", chosen_disease, "_RF.rds",sep = ""))
# Let us look at the variable importance of each of the disease
var_imp_df <- importance(op.rf) %>%
data.frame() %>%
mutate(feature = row.names(.))
# Viewing the variable importance in a plot - Accuracy
varImptPlot <- ggplot(var_imp_df, aes(x = reorder(feature, MeanDecreaseAccuracy ),
y = MeanDecreaseAccuracy )) +
geom_bar(stat='identity') +
coord_flip() +
theme_classic() +
labs(
x = chosen_disease,
y = "MeanDecreaseAccuracy",
title = paste("Variable Importance for ", chosen_disease, sep = "")
)
print(varImptPlot)
# ggsave(paste("RFPlots/", chosen_disease, "_VarImpPlot_Accuracy.png",sep = ""))
# Predicting on train set
predTrain <- predict(op.rf, trainset)
# Checking classification accuracy
train_cf <- confusionMatrix(predTrain, trainset$DISEASE, positive = "1")
train_accuracy <- train_cf$overall[1]
print(train_accuracy)
recall_train <- train_cf$byClass["Recall"]
# Predicting on test set - scaled to keep the 3:7 ratio with the trainset
predTest <- predict(op.rf, testset)
# Checking classification accuracy
test_cf <- confusionMatrix(predTest, testset$DISEASE, positive = "1")
test_accuracy <- test_cf$overall[1]
print(test_accuracy)
recall_test <- test_cf$byClass["Recall"]
dor_test <- dor(test_cf)
# Predicting on entire dataset
predOverall <- predict(op.rf, data)
# Checking classification accuracy
overall_cf <- confusionMatrix(predOverall, data$DISEASE, positive = "1")
overall_accuracy <- overall_cf$overall[1]
print(overall_accuracy)
recall_overall <- overall_cf$byClass["Recall"]
cat(" Disease being analyzed is:", chosen_disease
,'\n',"Accuracy on Trainset:", train_accuracy
,'\n',"Recall (Train):", recall_train
,'\n',"Accuracy on Testset:", test_accuracy
,'\n',"Recall (Test):", recall_test
,'\n',"Accuracy on entire dataset:", overall_accuracy
,'\n',"Recall (Overall):", recall_overall)
new_row <- data.frame(chosen_disease,
train_accuracy,
recall_train,
test_accuracy,
recall_test,
overall_accuracy,
recall_overall)
return(new_row)
}
Create the empty table to hold all the data
RandForestResults <- data.table('Disease Name' = character(),
'Train Accuracy' = numeric(),
'Recall (Train)' = numeric(),
'Test Accuracy' = numeric(),
'Recall (Test)' = numeric(),
'Overall Accuracy' = numeric(),
'Recall (Overall)' = numeric())
# list of diseases to parse through the model
disease_list = c("MICHD", "CHCCOPD2", "CHCKDNY2", "CVDSTRK3", "DIABETE4")
for (disease in disease_list) {
new_row <- runRFModel(disease)
RandForestResults <- rbindlist(list(RandForestResults, new_row), use.names = FALSE)
}
|
/RandomForest_AllDiseases.R
|
no_license
|
averliz/Medilytics
|
R
| false | false | 5,980 |
r
|
library(data.table)
library(caTools)
library(car)
library(dplyr)
library(performanceEstimation) # for SMOTE
library(randomForest)
library(ggplot2)
library(caret)
# setwd("C:/Users/mhenn/Documents/Programming/Academic/BC2407 Medilytics")
setwd("C:/Users/jimmy/NTU/BC2407/Project")
set.seed(2407)
source("functions.R") # Load in the functions
### Define function to run a logreg model based on a chosen disease ###
# chosen_disease is the selected predictor variable
runRFModel <- function(chosen_disease) {
# restore original data to initial, unaltered state
data <- readData("FinalCleanedData.csv", chosen_disease)
# create train and test sets with equal proportions of 1's and 0's using
train_test_split <- sample.split(data$DISEASE, SplitRatio = 0.7)
trainset.ori <- subset(data, train_test_split == T)
testset.ori <- subset(data, train_test_split == F)
# SMOTE ### - highest accuracy rate.
trainset <- smote(DISEASE ~ ., data = trainset.ori,
perc.over = 1,k = sqrt(nrow(trainset.ori)), perc.under = 2)
# write.csv(trainset, paste("SmotedData/", chosen_disease, "_trainset.csv",sep = ""), row.names = FALSE)
# trainset <- readDataOnly(paste("SmotedData/", chosen_disease, "_trainset.csv",sep = ""))
# testset <- readDataOnly(paste("SmotedData/", chosen_disease, "_testset_unseen.csv",sep = ""))
testSplitRatio <- ((3/7)*nrow(trainset))/nrow(testset.ori)
# print(testSplitRatio)
testset_split <- sample.split(testset.ori$DISEASE, SplitRatio = testSplitRatio)
testset <- subset(testset.ori, testset_split == T)
# write.csv(testset, paste("SmotedData/", chosen_disease, "_testset_unseen.csv",sep = ""), row.names = FALSE)
# To check the class distribution of disease in trainset and testset
print(table(trainset$DISEASE))
print(prop.table(table(trainset$DISEASE)))
print(table(testset$DISEASE))
print(prop.table(table(testset$DISEASE)))
set.seed(2407)
# Tune the Random Forest Model - Use TuneRF to obtain the optimum RF model
mtry <- tuneRF(x = trainset[,c(2:36)],
y = trainset$DISEASE,
ntreeTry = 1000,
mtryStart = (ncol(trainset) - 1) / 3,
stepFactor = 1.5,
improve = 1e-05
)
op.mtry <- mtry[mtry[, 2] == min(mtry[, 2]), 1]
op.rf <-randomForest(DISEASE ~ SEXVAR + GENHLTH + PHYS14D + MENT14D + POORHLTH +
HLTHPLN1 + PERSDOC2 + MEDCOST + CHECKUP1 + MARITAL + EDUCA +
RENTHOM1 + VETERAN3 + EMPLOY1 + CHLDCNT + INCOME2 + WTKG3 +
HTM4 + DEAF + BLIND + RFSMOK3 + RFDRHV7 +
TOTINDA + STRFREQ + FRUTDA2 + FTJUDA2 + GRENDA1 + FRNCHDA +
POTADA1 + VEGEDA2 + HIVRISK5, data = trainset,
mtry=op.mtry, importance=T,ntree=500)
# saveRDS(op.rf, paste("Models500/", chosen_disease, "_RF.rds",sep = ""))
op.rf <- readRDS(paste("Models/", chosen_disease, "_RF.rds",sep = ""))
# Let us look at the variable importance of each of the disease
var_imp_df <- importance(op.rf) %>%
data.frame() %>%
mutate(feature = row.names(.))
# Viewing the variable importance in a plot - Accuracy
varImptPlot <- ggplot(var_imp_df, aes(x = reorder(feature, MeanDecreaseAccuracy ),
y = MeanDecreaseAccuracy )) +
geom_bar(stat='identity') +
coord_flip() +
theme_classic() +
labs(
x = chosen_disease,
y = "MeanDecreaseAccuracy",
title = paste("Variable Importance for ", chosen_disease, sep = "")
)
print(varImptPlot)
# ggsave(paste("RFPlots/", chosen_disease, "_VarImpPlot_Accuracy.png",sep = ""))
# Predicting on train set
predTrain <- predict(op.rf, trainset)
# Checking classification accuracy
train_cf <- confusionMatrix(predTrain, trainset$DISEASE, positive = "1")
train_accuracy <- train_cf$overall[1]
print(train_accuracy)
recall_train <- train_cf$byClass["Recall"]
# Predicting on test set - scaled to keep the 3:7 ratio with the trainset
predTest <- predict(op.rf, testset)
# Checking classification accuracy
test_cf <- confusionMatrix(predTest, testset$DISEASE, positive = "1")
test_accuracy <- test_cf$overall[1]
print(test_accuracy)
recall_test <- test_cf$byClass["Recall"]
dor_test <- dor(test_cf)
# Predicting on entire dataset
predOverall <- predict(op.rf, data)
# Checking classification accuracy
overall_cf <- confusionMatrix(predOverall, data$DISEASE, positive = "1")
overall_accuracy <- overall_cf$overall[1]
print(overall_accuracy)
recall_overall <- overall_cf$byClass["Recall"]
cat(" Disease being analyzed is:", chosen_disease
,'\n',"Accuracy on Trainset:", train_accuracy
,'\n',"Recall (Train):", recall_train
,'\n',"Accuracy on Testset:", test_accuracy
,'\n',"Recall (Test):", recall_test
,'\n',"Accuracy on entire dataset:", overall_accuracy
,'\n',"Recall (Overall):", recall_overall)
new_row <- data.frame(chosen_disease,
train_accuracy,
recall_train,
test_accuracy,
recall_test,
overall_accuracy,
recall_overall)
return(new_row)
}
Create the empty table to hold all the data
RandForestResults <- data.table('Disease Name' = character(),
'Train Accuracy' = numeric(),
'Recall (Train)' = numeric(),
'Test Accuracy' = numeric(),
'Recall (Test)' = numeric(),
'Overall Accuracy' = numeric(),
'Recall (Overall)' = numeric())
# list of diseases to parse through the model
disease_list = c("MICHD", "CHCCOPD2", "CHCKDNY2", "CVDSTRK3", "DIABETE4")
for (disease in disease_list) {
new_row <- runRFModel(disease)
RandForestResults <- rbindlist(list(RandForestResults, new_row), use.names = FALSE)
}
|
# Script to compile and prepare Corkscrew larch and pine tree ring data
# Load library
library(tidyverse)
library(dplR)
# Set working directory to get files
setwd("C:/Users/hgriesba/Sync/Mackenzie_corkscrew")
coreComp<-function(folderInd=1) {
# get folder structure
runFolders<-c("Run1","Run2","Run3","Run4")
# Change folder to different run folders
setwd(paste("C:/Users/hgriesba/Sync/Mackenzie_corkscrew",runFolders[folderInd],"/",sep="/"))
# List files in the folder that match pattern
coreList<-
list.files(pattern=fixed("CS-*"))
# Now, for each core in coreList:
for (k in 1:length(coreList)) {
coreID=coreList[k] # for each core in coreList, extract core
# First extract site, plot species and core information
site=str_split_fixed(coreID,pattern="-",n=4)[1]
plot=str_split_fixed(coreID,pattern="-",n=4)[2]
species=str_split_fixed(coreID,pattern="-",n=4)[3]
tree=str_split_fixed(coreID,pattern="-",n=4)[4]
# Read in different variables and add rownames as column
rw=read.rwl(fname=paste(getwd(),"/",coreID,"/TUCSON/",coreID,".TXT",sep=""))
ew=read.rwl(fname=paste(getwd(),"/",coreID,"/TUCSON/EARLYWID/",coreID,".TXT",sep=""))
lw=read.rwl(fname=paste(getwd(),"/",coreID,"/TUCSON/LATEWIDT/",coreID,".TXT",sep=""))
lwd=read.rwl(fname=paste(getwd(),"/",coreID,"/TUCSON/LATEDENS/",coreID,".TXT",sep=""))
ewd=read.rwl(fname=paste(getwd(),"/",coreID,"/TUCSON/EARLYDEN/",coreID,".TXT",sep=""))
rwd=read.rwl(fname=paste(getwd(),"/",coreID,"/TUCSON/RINGDENS/",coreID,".TXT",sep=""))
maxd=read.rwl(fname=paste(getwd(),"/",coreID,"/TUCSON/MAXDENSI/",coreID,".TXT",sep=""))
# Create function to create dataframe for each variable prior to rbind
varPrep<-function(varData="rw") {
get(varData) %>% # get varData from string passed to function
rownames_to_column("year") %>% # create year column
setNames(c("year","value")) %>% # rename value column
mutate(site=site,plot=plot,species=species,tree=tree) %>% # create columns
mutate(type=varData) %>% # create type
dplyr::select(site,plot,species,tree,type,year,value,coreID) %>% # reorder
return() # return
}
# Create dataframe of all variables for core
coreData<-
rbind(varPrep("rw"),varPrep("ew"),varPrep("lw"),
varPrep("rwd"),varPrep("ewd"),varPrep("lwd"),
varPrep("maxd"))
if (k==1) {
coreData.Comp=coreData
} else {
coreData.Comp=rbind(coreData,coreData.Comp)
}
} # close loop
return(coreData.Comp)
} # close function
x<-rbind(
coreComp(1),
coreComp(2),
coreComp(3),
coreComp(4)
)
ckscrewData<-x
setwd("~/Documents/Git/larch-dendro-2020")
save(ckscrewData,file="./data/corkscrewData.RData")
|
/data-raw/compileCorkscrew_RWData.R
|
no_license
|
hgriesbauer/larch-dendro-2020
|
R
| false | false | 2,646 |
r
|
# Script to compile and prepare Corkscrew larch and pine tree ring data
# Load library
library(tidyverse)
library(dplR)
# Set working directory to get files
setwd("C:/Users/hgriesba/Sync/Mackenzie_corkscrew")
coreComp<-function(folderInd=1) {
# get folder structure
runFolders<-c("Run1","Run2","Run3","Run4")
# Change folder to different run folders
setwd(paste("C:/Users/hgriesba/Sync/Mackenzie_corkscrew",runFolders[folderInd],"/",sep="/"))
# List files in the folder that match pattern
coreList<-
list.files(pattern=fixed("CS-*"))
# Now, for each core in coreList:
for (k in 1:length(coreList)) {
coreID=coreList[k] # for each core in coreList, extract core
# First extract site, plot species and core information
site=str_split_fixed(coreID,pattern="-",n=4)[1]
plot=str_split_fixed(coreID,pattern="-",n=4)[2]
species=str_split_fixed(coreID,pattern="-",n=4)[3]
tree=str_split_fixed(coreID,pattern="-",n=4)[4]
# Read in different variables and add rownames as column
rw=read.rwl(fname=paste(getwd(),"/",coreID,"/TUCSON/",coreID,".TXT",sep=""))
ew=read.rwl(fname=paste(getwd(),"/",coreID,"/TUCSON/EARLYWID/",coreID,".TXT",sep=""))
lw=read.rwl(fname=paste(getwd(),"/",coreID,"/TUCSON/LATEWIDT/",coreID,".TXT",sep=""))
lwd=read.rwl(fname=paste(getwd(),"/",coreID,"/TUCSON/LATEDENS/",coreID,".TXT",sep=""))
ewd=read.rwl(fname=paste(getwd(),"/",coreID,"/TUCSON/EARLYDEN/",coreID,".TXT",sep=""))
rwd=read.rwl(fname=paste(getwd(),"/",coreID,"/TUCSON/RINGDENS/",coreID,".TXT",sep=""))
maxd=read.rwl(fname=paste(getwd(),"/",coreID,"/TUCSON/MAXDENSI/",coreID,".TXT",sep=""))
# Create function to create dataframe for each variable prior to rbind
varPrep<-function(varData="rw") {
get(varData) %>% # get varData from string passed to function
rownames_to_column("year") %>% # create year column
setNames(c("year","value")) %>% # rename value column
mutate(site=site,plot=plot,species=species,tree=tree) %>% # create columns
mutate(type=varData) %>% # create type
dplyr::select(site,plot,species,tree,type,year,value,coreID) %>% # reorder
return() # return
}
# Create dataframe of all variables for core
coreData<-
rbind(varPrep("rw"),varPrep("ew"),varPrep("lw"),
varPrep("rwd"),varPrep("ewd"),varPrep("lwd"),
varPrep("maxd"))
if (k==1) {
coreData.Comp=coreData
} else {
coreData.Comp=rbind(coreData,coreData.Comp)
}
} # close loop
return(coreData.Comp)
} # close function
x<-rbind(
coreComp(1),
coreComp(2),
coreComp(3),
coreComp(4)
)
ckscrewData<-x
setwd("~/Documents/Git/larch-dendro-2020")
save(ckscrewData,file="./data/corkscrewData.RData")
|
library(heatmaply)
library(htmlwidgets)
library(RColorBrewer)
cpmCounts <- read.table("results/salmon-marchRun.normalized_cpm.lowExpFiltered.txt")
# Shorten contig name for easy display
rownames(cpmCounts) <-
gsub("allCormLibraries_Trinity_21Dec_TRINITY_", "", rownames(cpmCounts))
# Manual curated genes list
acts <- scan("data/putative_ACT.contigId.txt", what = "string")
# Read 61 items
acts <- gsub("allCormLibraries_Trinity_21Dec_TRINITY_", "", acts)
acts <- subset(cpmCounts, rownames(cpmCounts) %in% acts)
str(acts)
# 'data.frame': 51 obs. of 4 variables:
# Create content for mouse-over bubble
cellnote <- matrix(data = rep(rownames(acts), 4),
nrow = length(rownames(acts)),
ncol = length(colnames(acts)))
row.names(cellnote) <- rownames(acts)
colnames(cellnote) <- colnames(acts)
# Calculate distance matrix
rDist <- as.dist(1 - cor(t(acts), method = "pearson"))
rHclust <- hclust(rDist, "complete")
rDendrogram <- as.dendrogram(rHclust)
heatmaply(
main = "Acyltransferase Heatmap",
file = "results/act_heatmap.14Dec.html",
as.matrix(acts[, c(1:4)]),
colors = rev(colorRampPalette(brewer.pal(10, "RdBu"))(256)),
col_text_angle = 90,
draw_cellnote = FALSE,
Rowv = rDendrogram,
Colv = NULL,
scale = "row",
label_names = c("contig", "library", "CPM"),
srtCol = 0,
custom_hovertext = cellnote
)
|
/analysis/ACT_heatmap.R
|
no_license
|
myuen/Crocosmia
|
R
| false | false | 1,375 |
r
|
library(heatmaply)
library(htmlwidgets)
library(RColorBrewer)
cpmCounts <- read.table("results/salmon-marchRun.normalized_cpm.lowExpFiltered.txt")
# Shorten contig name for easy display
rownames(cpmCounts) <-
gsub("allCormLibraries_Trinity_21Dec_TRINITY_", "", rownames(cpmCounts))
# Manual curated genes list
acts <- scan("data/putative_ACT.contigId.txt", what = "string")
# Read 61 items
acts <- gsub("allCormLibraries_Trinity_21Dec_TRINITY_", "", acts)
acts <- subset(cpmCounts, rownames(cpmCounts) %in% acts)
str(acts)
# 'data.frame': 51 obs. of 4 variables:
# Create content for mouse-over bubble
cellnote <- matrix(data = rep(rownames(acts), 4),
nrow = length(rownames(acts)),
ncol = length(colnames(acts)))
row.names(cellnote) <- rownames(acts)
colnames(cellnote) <- colnames(acts)
# Calculate distance matrix
rDist <- as.dist(1 - cor(t(acts), method = "pearson"))
rHclust <- hclust(rDist, "complete")
rDendrogram <- as.dendrogram(rHclust)
heatmaply(
main = "Acyltransferase Heatmap",
file = "results/act_heatmap.14Dec.html",
as.matrix(acts[, c(1:4)]),
colors = rev(colorRampPalette(brewer.pal(10, "RdBu"))(256)),
col_text_angle = 90,
draw_cellnote = FALSE,
Rowv = rDendrogram,
Colv = NULL,
scale = "row",
label_names = c("contig", "library", "CPM"),
srtCol = 0,
custom_hovertext = cellnote
)
|
#
# Invoke this using: Rscript filename.R
#
source("readdata.R")
source("plot1.R")
source("plot2.R")
source("plot3.R")
source("plot4.R")
|
/plotall.R
|
no_license
|
aelhaddi/Exdata
|
R
| false | false | 138 |
r
|
#
# Invoke this using: Rscript filename.R
#
source("readdata.R")
source("plot1.R")
source("plot2.R")
source("plot3.R")
source("plot4.R")
|
utility_log <- log
utility_crra <- function(theta) {
function(c) (c ^ (1- theta) - 1) / (1 - theta)
}
|
/R/func-utility.R
|
no_license
|
Karagul/macromodels
|
R
| false | false | 106 |
r
|
utility_log <- log
utility_crra <- function(theta) {
function(c) (c ^ (1- theta) - 1) / (1 - theta)
}
|
#' Compare AMA tables by strain only
#'
#' Compares all concentrations to other strains at each time point
#' @description Takes in the output of AMA table then compares different strains at each timepoint to the same concentration using a T-Test.
#' @param mydata The output of amaTable. Required that headers are included for each well seperated by an underscore and in this format: Concentration_BacterialStrain
#' @param outGroup A regex expression that matches the non-wildtype strain. Default is PS392.
#' @param alpha The p-value below which results are no longer considered significant.
#' @param pval Raw p-values returned. By default no
#' @return A matrix of each concentration and each p-value.
#' @export
compare2strain = function(mydata,outGroup ="^PS392.*", alpha =0.05,pval = F){
colum = colnames(mydata)
colum = strsplit(colum,split = "_")
df =data.frame()
for (i in 1:length(mydata)){
df =rbind(df, c(colum[[i]][1],colum[[i]][2]), stringsAsFactors =F)
#help = rbind(help,df,stringsAsFactors =F)
}
df[,1] = factor(df[,1])
rm(colum)
mx = matrix(nrow = nrow(mydata), ncol = nlevels(df[,1]))
colnames(mx)= levels(df[,1])
rownames(mx) = rownames(mydata)
for (i in 1:nlevels(df[,1])){
wt = grep(pattern = levels(df[,1])[i],df[,1])
mut = wt[grep(pattern = outGroup,df[wt,2])]
wt = wt[wt!=mut]
for (j in 1:nrow(mydata)){
mx[j,i] = t.test(mydata[j,wt],mydata[j,mut],alternative = "t")$p.value
}
}
if (pval==F){
mx = mx<alpha
}
mx
}
|
/GARP/R/compare2strain.r
|
no_license
|
nunngm/StatAnalysis
|
R
| false | false | 1,511 |
r
|
#' Compare AMA tables by strain only
#'
#' Compares all concentrations to other strains at each time point
#' @description Takes in the output of AMA table then compares different strains at each timepoint to the same concentration using a T-Test.
#' @param mydata The output of amaTable. Required that headers are included for each well seperated by an underscore and in this format: Concentration_BacterialStrain
#' @param outGroup A regex expression that matches the non-wildtype strain. Default is PS392.
#' @param alpha The p-value below which results are no longer considered significant.
#' @param pval Raw p-values returned. By default no
#' @return A matrix of each concentration and each p-value.
#' @export
compare2strain = function(mydata,outGroup ="^PS392.*", alpha =0.05,pval = F){
colum = colnames(mydata)
colum = strsplit(colum,split = "_")
df =data.frame()
for (i in 1:length(mydata)){
df =rbind(df, c(colum[[i]][1],colum[[i]][2]), stringsAsFactors =F)
#help = rbind(help,df,stringsAsFactors =F)
}
df[,1] = factor(df[,1])
rm(colum)
mx = matrix(nrow = nrow(mydata), ncol = nlevels(df[,1]))
colnames(mx)= levels(df[,1])
rownames(mx) = rownames(mydata)
for (i in 1:nlevels(df[,1])){
wt = grep(pattern = levels(df[,1])[i],df[,1])
mut = wt[grep(pattern = outGroup,df[wt,2])]
wt = wt[wt!=mut]
for (j in 1:nrow(mydata)){
mx[j,i] = t.test(mydata[j,wt],mydata[j,mut],alternative = "t")$p.value
}
}
if (pval==F){
mx = mx<alpha
}
mx
}
|
#' Process Image
#'
#' This function processes an image
#' @param file_path path to the document
#' @param language optional, default: English
#' @param profile optional, default: documentConversion
#' @param textType optional, default: normal
#' @param imageSource optional, default: auto
#' @param correctOrientation optional, default: true
#' @param correctSkew optional, default: true
#' @param readBarcodes optional, default: false
#' @param exportFormat optional, default: txt;
#' options: txt, txtUnstructured, rtf, docx, xlsx, pptx, pdfSearchable, pdfTextAndImages, pdfa, xml, xmlForCorrectedImage, alto
#' @param pdfPassword optional, default: NULL
#' @param description optional, default: ""
#' @return Data frame with details of the task associated with the submitted Image
#' @export
#' @references \url{http://ocrsdk.com/documentation/specifications/image-formats/}
#' @references \url{http://ocrsdk.com/documentation/apireference/processImage/}
#' @examples \dontrun{
#' processImage(file_path="file_path", language="English", exportFormat="txtUnstructured")
#' }
processImage <- function(file_path="", language="English", profile="documentConversion",textType="normal", imageSource="auto", correctOrientation="true",
correctSkew="true", readBarcodes="false", exportFormat="txt", description="", pdfPassword="")
{
if (!file.exists(file_path)) stop("File Doesn't Exist. Please check the path.")
querylist <- list(language=language, profile=profile,textType=textType, imageSource=imageSource, correctOrientation=correctOrientation,
correctSkew=correctSkew,readBarcodes=readBarcodes,exportFormat=exportFormat, description=description, pdfPassword=pdfPassword)
body <- upload_file(file_path)
process_details <- abbyy_POST("processImage", query=querylist, body=body)
resdf <- as.data.frame(do.call(rbind, process_details))
# Print some important things
cat("Status of the task: ", resdf$status, "\n")
cat("Task ID: ", resdf$id, "\n")
return(invisible(resdf))
}
|
/R/processImage.R
|
permissive
|
wosiu/abbyyR
|
R
| false | false | 2,019 |
r
|
#' Process Image
#'
#' This function processes an image
#' @param file_path path to the document
#' @param language optional, default: English
#' @param profile optional, default: documentConversion
#' @param textType optional, default: normal
#' @param imageSource optional, default: auto
#' @param correctOrientation optional, default: true
#' @param correctSkew optional, default: true
#' @param readBarcodes optional, default: false
#' @param exportFormat optional, default: txt;
#' options: txt, txtUnstructured, rtf, docx, xlsx, pptx, pdfSearchable, pdfTextAndImages, pdfa, xml, xmlForCorrectedImage, alto
#' @param pdfPassword optional, default: NULL
#' @param description optional, default: ""
#' @return Data frame with details of the task associated with the submitted Image
#' @export
#' @references \url{http://ocrsdk.com/documentation/specifications/image-formats/}
#' @references \url{http://ocrsdk.com/documentation/apireference/processImage/}
#' @examples \dontrun{
#' processImage(file_path="file_path", language="English", exportFormat="txtUnstructured")
#' }
processImage <- function(file_path="", language="English", profile="documentConversion",textType="normal", imageSource="auto", correctOrientation="true",
correctSkew="true", readBarcodes="false", exportFormat="txt", description="", pdfPassword="")
{
if (!file.exists(file_path)) stop("File Doesn't Exist. Please check the path.")
querylist <- list(language=language, profile=profile,textType=textType, imageSource=imageSource, correctOrientation=correctOrientation,
correctSkew=correctSkew,readBarcodes=readBarcodes,exportFormat=exportFormat, description=description, pdfPassword=pdfPassword)
body <- upload_file(file_path)
process_details <- abbyy_POST("processImage", query=querylist, body=body)
resdf <- as.data.frame(do.call(rbind, process_details))
# Print some important things
cat("Status of the task: ", resdf$status, "\n")
cat("Task ID: ", resdf$id, "\n")
return(invisible(resdf))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/arm-level-changes.R
\name{arm_level_changes}
\alias{arm_level_changes}
\title{Arm-level changes}
\source{
\url{https://www.ncbi.nlm.nih.gov/pubmed/29622463}
}
\usage{
arm_level_changes(
segs,
ploidy,
genome = c("hg19", "hg18", "hg38"),
algorithm = c("em", "cncf")
)
}
\arguments{
\item{segs}{FACETS segmentation output.}
\item{ploidy}{Sample ploidy.}
\item{genome}{Genome build.}
\item{algorithm}{Choice between FACETS \code{em} and \code{cncf} algorithm.}
}
\value{
List of items, containing:
\code{data.frame} for all genes mapping onto a segment in the output segmentation, with the columns:
\itemize{
\item{\code{genome_doubled}:} {Boolean indicating whether sample genome is doubled.}
\item{\code{fraction_cna}:} {Fraction of genome altered.}
\item{\code{weighted_fraction_cna}:} {A weighted version of \code{fraction_cna} where only altered chromosomes are counted and weighted according to their length relative to total genome.}
\item{\code{aneuploidy_scores}:} {Count of the number of altered arms, see source URL.}
\item{\code{full_output}:} {Full per-arm copy-number status.}
}
}
\description{
Get the altered chromosome arms in sample. Does not include the acrocentric p arms of chromosomes 12, 14, 15, 31, and 22.
}
|
/man/arm_level_changes.Rd
|
permissive
|
rptashkin/facets-suite
|
R
| false | true | 1,339 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/arm-level-changes.R
\name{arm_level_changes}
\alias{arm_level_changes}
\title{Arm-level changes}
\source{
\url{https://www.ncbi.nlm.nih.gov/pubmed/29622463}
}
\usage{
arm_level_changes(
segs,
ploidy,
genome = c("hg19", "hg18", "hg38"),
algorithm = c("em", "cncf")
)
}
\arguments{
\item{segs}{FACETS segmentation output.}
\item{ploidy}{Sample ploidy.}
\item{genome}{Genome build.}
\item{algorithm}{Choice between FACETS \code{em} and \code{cncf} algorithm.}
}
\value{
List of items, containing:
\code{data.frame} for all genes mapping onto a segment in the output segmentation, with the columns:
\itemize{
\item{\code{genome_doubled}:} {Boolean indicating whether sample genome is doubled.}
\item{\code{fraction_cna}:} {Fraction of genome altered.}
\item{\code{weighted_fraction_cna}:} {A weighted version of \code{fraction_cna} where only altered chromosomes are counted and weighted according to their length relative to total genome.}
\item{\code{aneuploidy_scores}:} {Count of the number of altered arms, see source URL.}
\item{\code{full_output}:} {Full per-arm copy-number status.}
}
}
\description{
Get the altered chromosome arms in sample. Does not include the acrocentric p arms of chromosomes 12, 14, 15, 31, and 22.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/individuals2strata.R
\name{individuals2strata}
\alias{individuals2strata}
\title{Create a strata file from a list of individuals}
\usage{
individuals2strata(data, strata.start, strata.end, filename = NULL)
}
\arguments{
\item{data}{A file or data frame object with individuals in a column. The
column name is \code{INDIVIDUALS}.}
\item{strata.start}{(integer) The start of your strata id. See details for more info.}
\item{strata.end}{(integer) The end of your strata id. See details for more info.}
\item{filename}{(optional) The file name for the strata object if you
want to save it in the working directory.
Default: \code{filename = NULL}, the starta object is in the global
environment only (i.e. not written in the working directory).}
}
\value{
a strata object and file, if requested. The file is tab delimited
with 2 columns named:
\code{INDIVIDUALS} and \code{STRATA}.
The \code{STRATA} column can be any hierarchical grouping.
}
\description{
If your individuals have a consistent naming scheme
(e.g. SPECIES-POPULATION-MATURITY-YEAR-ID = CHI-QUE-ADU-2014-020),
use this function to rapidly create a strata file.
Several functions in \pkg{radiator} and \pkg{assigner} requires
a \code{strata} argument, i.e. a data frame with the individuals and
associated groupings. If you have already run
\href{http://catchenlab.life.illinois.edu/stacks/}{stacks} on your data,
the strata file is similar to a stacks `population map file`, make sure you
have the required column names (\code{INDIVIDUALS} and \code{STRATA}).
}
\details{
\code{strata.start} and \code{strata.end}
The info must be found within the name of your individual sample. If not,
you'll have to create a strata file by hand, the old fashion way.
e.g. if your individuals are identified
in this form : SPECIES-POPULATION-MATURITY-YEAR-ID = CHI-QUE-ADU-2014-020,
then, to have the population id in the \code{STRATA} column,
\code{strata.start = 5} and \code{strata.end = 7}.
The \code{STRATA} column can be any hierarchical grouping.
}
\examples{
\dontrun{
strata.abalone <- individuals2strata(
data = "individuals.abalone.tsv",
strata.start = 5,
strata.end = 7,
filename = "strata.abalone.tsv"
)
}
}
\author{
Thierry Gosselin \email{thierrygosselin@icloud.com}
}
|
/man/individuals2strata.Rd
|
no_license
|
Maschette/radiator
|
R
| false | true | 2,315 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/individuals2strata.R
\name{individuals2strata}
\alias{individuals2strata}
\title{Create a strata file from a list of individuals}
\usage{
individuals2strata(data, strata.start, strata.end, filename = NULL)
}
\arguments{
\item{data}{A file or data frame object with individuals in a column. The
column name is \code{INDIVIDUALS}.}
\item{strata.start}{(integer) The start of your strata id. See details for more info.}
\item{strata.end}{(integer) The end of your strata id. See details for more info.}
\item{filename}{(optional) The file name for the strata object if you
want to save it in the working directory.
Default: \code{filename = NULL}, the starta object is in the global
environment only (i.e. not written in the working directory).}
}
\value{
a strata object and file, if requested. The file is tab delimited
with 2 columns named:
\code{INDIVIDUALS} and \code{STRATA}.
The \code{STRATA} column can be any hierarchical grouping.
}
\description{
If your individuals have a consistent naming scheme
(e.g. SPECIES-POPULATION-MATURITY-YEAR-ID = CHI-QUE-ADU-2014-020),
use this function to rapidly create a strata file.
Several functions in \pkg{radiator} and \pkg{assigner} requires
a \code{strata} argument, i.e. a data frame with the individuals and
associated groupings. If you have already run
\href{http://catchenlab.life.illinois.edu/stacks/}{stacks} on your data,
the strata file is similar to a stacks `population map file`, make sure you
have the required column names (\code{INDIVIDUALS} and \code{STRATA}).
}
\details{
\code{strata.start} and \code{strata.end}
The info must be found within the name of your individual sample. If not,
you'll have to create a strata file by hand, the old fashion way.
e.g. if your individuals are identified
in this form : SPECIES-POPULATION-MATURITY-YEAR-ID = CHI-QUE-ADU-2014-020,
then, to have the population id in the \code{STRATA} column,
\code{strata.start = 5} and \code{strata.end = 7}.
The \code{STRATA} column can be any hierarchical grouping.
}
\examples{
\dontrun{
strata.abalone <- individuals2strata(
data = "individuals.abalone.tsv",
strata.start = 5,
strata.end = 7,
filename = "strata.abalone.tsv"
)
}
}
\author{
Thierry Gosselin \email{thierrygosselin@icloud.com}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/map_exposure.R
\name{interp_track}
\alias{interp_track}
\title{Interpolate a storm track}
\usage{
interp_track(track, tint = 0.25)
}
\arguments{
\item{track}{A dataframe with hurricane track data for a single storm}
\item{tint}{A numeric vector giving the time interval to impute to, in units
of hours (e.g., 0.25, the default, interpolates to 15 minute-intervals).}
}
\value{
A dataframe with hurricane track data for a single storm,
interpolated to the interval specified by \code{tint}.
}
\description{
This function takes a wider-spaced storm track (e.g., every 6 hours) and
interpolates to a finer interval (e.g., every 15 minutes). To do this, it
fits GLMs of latitude and longitude regressed on natural cubic splines of
date-time, and then predicts these splines to new intervals. These
splines use degrees of freedom equal to the number of original observations
divided by two.
}
|
/man/interp_track.Rd
|
no_license
|
13671359855/hurricaneexposure
|
R
| false | true | 971 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/map_exposure.R
\name{interp_track}
\alias{interp_track}
\title{Interpolate a storm track}
\usage{
interp_track(track, tint = 0.25)
}
\arguments{
\item{track}{A dataframe with hurricane track data for a single storm}
\item{tint}{A numeric vector giving the time interval to impute to, in units
of hours (e.g., 0.25, the default, interpolates to 15 minute-intervals).}
}
\value{
A dataframe with hurricane track data for a single storm,
interpolated to the interval specified by \code{tint}.
}
\description{
This function takes a wider-spaced storm track (e.g., every 6 hours) and
interpolates to a finer interval (e.g., every 15 minutes). To do this, it
fits GLMs of latitude and longitude regressed on natural cubic splines of
date-time, and then predicts these splines to new intervals. These
splines use degrees of freedom equal to the number of original observations
divided by two.
}
|
setwd("~/Stapleton_Lab/Projects/Spark/")
if (nchar(Sys.getenv("SPARK_HOME")) < 1) {
Sys.setenv(SPARK_HOME = "/usr/local/Cellar/apache-spark/2.2.1/libexec")
}
library(SparkR, lib.loc = c(file.path(Sys.getenv("SPARK_HOME"), "R", "lib")))
sparkR.session(master = "local[*]", sparkConfig = list(spark.driver.memory = "2g"))
df <- read.csv("~/Stapleton_Lab/Downloads/Carolyn_Lawrence_Dill_G2F_Mar_2017/c._2015_weather_data/g2f_2015_weather_clean.csv")
df2 <- createDataFrame(df)
|
/Spark/learn_g2f.R
|
no_license
|
dhbrand/Projects
|
R
| false | false | 478 |
r
|
setwd("~/Stapleton_Lab/Projects/Spark/")
if (nchar(Sys.getenv("SPARK_HOME")) < 1) {
Sys.setenv(SPARK_HOME = "/usr/local/Cellar/apache-spark/2.2.1/libexec")
}
library(SparkR, lib.loc = c(file.path(Sys.getenv("SPARK_HOME"), "R", "lib")))
sparkR.session(master = "local[*]", sparkConfig = list(spark.driver.memory = "2g"))
df <- read.csv("~/Stapleton_Lab/Downloads/Carolyn_Lawrence_Dill_G2F_Mar_2017/c._2015_weather_data/g2f_2015_weather_clean.csv")
df2 <- createDataFrame(df)
|
library(devFunc)
### Name: checkIntVec
### Title: Checking if all elements of a list are all integer vectors
### Aliases: checkIntVec
### ** Examples
arg1 <- 1L
checkIntVec(list(arg1))
## No test:
checkIntVec(list(1L, TRUE, 2L))
arg2 <- 'R'
checkIntVec(list(arg2))
checkIntVec(list(arg2, TRUE, 2))
## End(No test)
|
/data/genthat_extracted_code/devFunc/examples/checkIntVec.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 324 |
r
|
library(devFunc)
### Name: checkIntVec
### Title: Checking if all elements of a list are all integer vectors
### Aliases: checkIntVec
### ** Examples
arg1 <- 1L
checkIntVec(list(arg1))
## No test:
checkIntVec(list(1L, TRUE, 2L))
arg2 <- 'R'
checkIntVec(list(arg2))
checkIntVec(list(arg2, TRUE, 2))
## End(No test)
|
library(bartMachine)
library(MASS)
set_bart_machine_num_cores(1)
init_java_for_bart_machine_with_mem_in_mb(3000)
###constants for simulation
Nsim = 500
pct_test_data = 0.2
#get the Boston housing data
data(Boston)
X = Boston
#X = cbind(X, rnorm(nrow(X)))
y = X$medv
X$medv = NULL
#unitize the covs
X = data.frame(apply(X, 2, function(xj){(xj - min(xj)) / (max(xj) - min(xj))}))
n_test = round(pct_test_data * nrow(X))
BUMP = (max(y) - min(y)) * .25
############## MAR
create_mar_with_bumpup_model_of_bhd = function(X, y, beta_0, beta){
for (i in 1 : nrow(X)){
prob_M_rm = beta_0 + beta * X$indus[i] + beta * X$lstat[i] + beta * X$age[i]
if (runif(1) < prob_M_rm){
X$rm[i] = NA
}
prob_M_crim = beta_0 + beta * X$nox[i] + beta * X$rad[i] + beta * X$tax[i]
if (runif(1) < prob_M_crim){
X$crim[i] = NA
}
#bump up
if (is.na(X$rm[i])){
y[i] = y[i] + rnorm(1, BUMP, BUMP / 4)
}
#bump down
if (is.na(X$crim[i])){
y[i] = y[i] - rnorm(1, BUMP, BUMP / 4)
}
}
X
list(X = X, y = y)
}
beta_0 = -3
betas = c(0, 1.33, 1.47, 1.67, 2.1, 2.6, 3.1, 3.8)
approx_prop_missing = seq(from = 0, to = 0.7, by = 0.1)
##test to see if gammas are appropriate
#for (i in 1 : length(approx_prop_missing)){
# Xmar_with_bumpup = create_mar_with_bumpup_model_of_bhd(X, y, gammas[i])
# actual_prop_missing = 1 - nrow(na.omit(Xmar_with_bumpup)) / nrow(Xmar_with_bumpup)
# cat("purported prop missing:", approx_prop_missing[i], "actual prop missing", actual_prop_missing, "\n")
#}
results_bart_mar_with_bumpup = matrix(NA, nrow = length(approx_prop_missing), ncol = Nsim)
results_bart_w_rfi_and_mf_mar_with_bumpup = matrix(NA, nrow = length(approx_prop_missing), ncol = Nsim)
results_rf_mar_with_bumpup = matrix(NA, nrow = length(approx_prop_missing), ncol = Nsim)
rownames(results_bart_mar_with_bumpup) = approx_prop_missing
rownames(results_bart_w_rfi_and_mf_mar_with_bumpup) = approx_prop_missing
rownames(results_rf_mar_with_bumpup) = approx_prop_missing
for (nsim in 1 : Nsim){
cat("nsim = ", nsim, "\n")
for (g in 1 : length(betas)){
test_indices = sample(1 : nrow(X), n_test)
#create the missing matrix and subset the training and test
pattern_mixture_mar_mod = create_mar_with_bumpup_model_of_bhd(X, y, beta_0, betas[g])
Xm = pattern_mixture_mar_mod$X
y_with_bump_up = pattern_mixture_mar_mod$y
Xtrain = Xm[-test_indices, ]
ytrain = y_with_bump_up[-test_indices]
Xtest = Xm[test_indices, ]
ytest = y_with_bump_up[test_indices]
#now start training models and predicting on them
#impute both training and test data with MissForest
Xtrain_MF_imputed = missForest(cbind(ytrain, Xtrain))$ximp[, -1]
bart_mod = build_bart_machine(Xtrain, ytrain, run_in_sample = FALSE, use_missing_data = TRUE, use_missing_data_dummies_as_covars = TRUE, verbose = FALSE)
bart_mod_rf_imp = build_bart_machine(Xtrain_MF_imputed, ytrain, run_in_sample = FALSE, verbose = FALSE)
rf_mod = randomForest(x = Xtrain_MF_imputed, y = ytrain)
#impute to create an Xtest without missingness for rf
Xtest_miss_rf = missForest(rbind(Xtest, Xtrain), verbose = FALSE)$ximp[1 : n_test, ]
results_bart_mar_with_bumpup[g, nsim] = bart_predict_for_test_data(bart_mod, Xtest, ytest)$rmse
results_bart_w_rfi_and_mf_mar_with_bumpup[g, nsim] = bart_predict_for_test_data(bart_mod_rf_imp, Xtest_miss_rf, ytest)$rmse
y_hat_rf = predict(rf_mod, Xtest_miss_rf)
results_rf_mar_with_bumpup[g, nsim] = sqrt(sum((ytest - y_hat_rf)^2) / n_test)
destroy_bart_machine(bart_mod)
destroy_bart_machine(bart_mod_rf_imp)
cat("bart oosrmse:", results_bart_mar_with_bumpup[g, nsim], "rf oosrmse:", results_rf_mar_with_bumpup[g, nsim], "bart_with_rf_imp oosrmse:", results_bart_w_rfi_and_mf_mar_with_bumpup[g, nsim], "\n")
#rolling updates!!
avgs_mar_with_bumpup_bart = apply(results_bart_mar_with_bumpup, 1, mean, na.rm = TRUE)
rel_mar_with_bumpup_avgs_bart = avgs_mar_with_bumpup_bart / avgs_mar_with_bumpup_bart[1]
sd_mar_with_bumpup_bart = apply(results_bart_mar_with_bumpup / avgs_mar_with_bumpup_bart[1], 1, sd, na.rm = TRUE)
avgs_mar_with_bumpup_bart_w_rfi_and_mf = apply(results_bart_w_rfi_and_mf_mar_with_bumpup, 1, mean, na.rm = TRUE)
rel_mar_with_bumpup_avgs_bart_w_rfi_and_mf = avgs_mar_with_bumpup_bart_w_rfi_and_mf / avgs_mar_with_bumpup_bart[1]
sd_mar_with_bumpup_bart_w_rfi_and_mf = apply(results_bart_w_rfi_and_mf_mar_with_bumpup / avgs_mar_with_bumpup_bart[1], 1, sd, na.rm = TRUE)
avgs_mar_with_bumpup_rf = apply(results_rf_mar_with_bumpup, 1, mean, na.rm = TRUE)
rel_mar_with_bumpup_avgs_rf = avgs_mar_with_bumpup_rf / avgs_mar_with_bumpup_bart[1]
sd_mar_with_bumpup_rf = apply(results_rf_mar_with_bumpup / avgs_mar_with_bumpup_bart[1], 1, sd, na.rm = TRUE)
par(mar = c(4.2,4,0.2,0.2))
plot(approx_prop_missing,
rel_mar_with_bumpup_avgs_bart,
col = "green",
type = "o",
xlab = "Proportion Missing",
ylab = "Multiple of Baseline Error",
ylim = c(1, 1.75))
for (i in 1 : length(approx_prop_missing)){
x = approx_prop_missing[i]
y = rel_mar_with_bumpup_avgs_bart[i]
moe = 1.96 * sd_mar_with_bumpup_bart[i] / sqrt(nsim)
segments(x, y - moe, x, y + moe, col = "green")
}
points(approx_prop_missing, rel_mar_with_bumpup_avgs_bart_w_rfi_and_mf, col = "blue", type = "o")
for (i in 1 : length(approx_prop_missing)){
x = approx_prop_missing[i]
y = rel_mar_with_bumpup_avgs_bart_w_rfi_and_mf[i]
moe = 1.96 * sd_mar_with_bumpup_bart_w_rfi_and_mf[i] / sqrt(nsim)
segments(x, y - moe, x, y + moe, col = "blue")
}
points(approx_prop_missing, rel_mar_with_bumpup_avgs_rf, col = "red", type = "o")
for (i in 1 : length(approx_prop_missing)){
x = approx_prop_missing[i]
y = rel_mar_with_bumpup_avgs_rf[i]
moe = 1.96 * sd_mar_with_bumpup_rf[i] / sqrt(nsim)
segments(x, y - moe, x, y + moe, col = "red")
}
}
save.image("sec_5_mar_with_big_bumpup_MF_only.RData")
}
|
/missing_data_paper/sec_5_mar_with_bumpup.R
|
permissive
|
mdagost/bartMachine
|
R
| false | false | 5,938 |
r
|
library(bartMachine)
library(MASS)
set_bart_machine_num_cores(1)
init_java_for_bart_machine_with_mem_in_mb(3000)
###constants for simulation
Nsim = 500
pct_test_data = 0.2
#get the Boston housing data
data(Boston)
X = Boston
#X = cbind(X, rnorm(nrow(X)))
y = X$medv
X$medv = NULL
#unitize the covs
X = data.frame(apply(X, 2, function(xj){(xj - min(xj)) / (max(xj) - min(xj))}))
n_test = round(pct_test_data * nrow(X))
BUMP = (max(y) - min(y)) * .25
############## MAR
create_mar_with_bumpup_model_of_bhd = function(X, y, beta_0, beta){
for (i in 1 : nrow(X)){
prob_M_rm = beta_0 + beta * X$indus[i] + beta * X$lstat[i] + beta * X$age[i]
if (runif(1) < prob_M_rm){
X$rm[i] = NA
}
prob_M_crim = beta_0 + beta * X$nox[i] + beta * X$rad[i] + beta * X$tax[i]
if (runif(1) < prob_M_crim){
X$crim[i] = NA
}
#bump up
if (is.na(X$rm[i])){
y[i] = y[i] + rnorm(1, BUMP, BUMP / 4)
}
#bump down
if (is.na(X$crim[i])){
y[i] = y[i] - rnorm(1, BUMP, BUMP / 4)
}
}
X
list(X = X, y = y)
}
beta_0 = -3
betas = c(0, 1.33, 1.47, 1.67, 2.1, 2.6, 3.1, 3.8)
approx_prop_missing = seq(from = 0, to = 0.7, by = 0.1)
##test to see if gammas are appropriate
#for (i in 1 : length(approx_prop_missing)){
# Xmar_with_bumpup = create_mar_with_bumpup_model_of_bhd(X, y, gammas[i])
# actual_prop_missing = 1 - nrow(na.omit(Xmar_with_bumpup)) / nrow(Xmar_with_bumpup)
# cat("purported prop missing:", approx_prop_missing[i], "actual prop missing", actual_prop_missing, "\n")
#}
results_bart_mar_with_bumpup = matrix(NA, nrow = length(approx_prop_missing), ncol = Nsim)
results_bart_w_rfi_and_mf_mar_with_bumpup = matrix(NA, nrow = length(approx_prop_missing), ncol = Nsim)
results_rf_mar_with_bumpup = matrix(NA, nrow = length(approx_prop_missing), ncol = Nsim)
rownames(results_bart_mar_with_bumpup) = approx_prop_missing
rownames(results_bart_w_rfi_and_mf_mar_with_bumpup) = approx_prop_missing
rownames(results_rf_mar_with_bumpup) = approx_prop_missing
for (nsim in 1 : Nsim){
cat("nsim = ", nsim, "\n")
for (g in 1 : length(betas)){
test_indices = sample(1 : nrow(X), n_test)
#create the missing matrix and subset the training and test
pattern_mixture_mar_mod = create_mar_with_bumpup_model_of_bhd(X, y, beta_0, betas[g])
Xm = pattern_mixture_mar_mod$X
y_with_bump_up = pattern_mixture_mar_mod$y
Xtrain = Xm[-test_indices, ]
ytrain = y_with_bump_up[-test_indices]
Xtest = Xm[test_indices, ]
ytest = y_with_bump_up[test_indices]
#now start training models and predicting on them
#impute both training and test data with MissForest
Xtrain_MF_imputed = missForest(cbind(ytrain, Xtrain))$ximp[, -1]
bart_mod = build_bart_machine(Xtrain, ytrain, run_in_sample = FALSE, use_missing_data = TRUE, use_missing_data_dummies_as_covars = TRUE, verbose = FALSE)
bart_mod_rf_imp = build_bart_machine(Xtrain_MF_imputed, ytrain, run_in_sample = FALSE, verbose = FALSE)
rf_mod = randomForest(x = Xtrain_MF_imputed, y = ytrain)
#impute to create an Xtest without missingness for rf
Xtest_miss_rf = missForest(rbind(Xtest, Xtrain), verbose = FALSE)$ximp[1 : n_test, ]
results_bart_mar_with_bumpup[g, nsim] = bart_predict_for_test_data(bart_mod, Xtest, ytest)$rmse
results_bart_w_rfi_and_mf_mar_with_bumpup[g, nsim] = bart_predict_for_test_data(bart_mod_rf_imp, Xtest_miss_rf, ytest)$rmse
y_hat_rf = predict(rf_mod, Xtest_miss_rf)
results_rf_mar_with_bumpup[g, nsim] = sqrt(sum((ytest - y_hat_rf)^2) / n_test)
destroy_bart_machine(bart_mod)
destroy_bart_machine(bart_mod_rf_imp)
cat("bart oosrmse:", results_bart_mar_with_bumpup[g, nsim], "rf oosrmse:", results_rf_mar_with_bumpup[g, nsim], "bart_with_rf_imp oosrmse:", results_bart_w_rfi_and_mf_mar_with_bumpup[g, nsim], "\n")
#rolling updates!!
avgs_mar_with_bumpup_bart = apply(results_bart_mar_with_bumpup, 1, mean, na.rm = TRUE)
rel_mar_with_bumpup_avgs_bart = avgs_mar_with_bumpup_bart / avgs_mar_with_bumpup_bart[1]
sd_mar_with_bumpup_bart = apply(results_bart_mar_with_bumpup / avgs_mar_with_bumpup_bart[1], 1, sd, na.rm = TRUE)
avgs_mar_with_bumpup_bart_w_rfi_and_mf = apply(results_bart_w_rfi_and_mf_mar_with_bumpup, 1, mean, na.rm = TRUE)
rel_mar_with_bumpup_avgs_bart_w_rfi_and_mf = avgs_mar_with_bumpup_bart_w_rfi_and_mf / avgs_mar_with_bumpup_bart[1]
sd_mar_with_bumpup_bart_w_rfi_and_mf = apply(results_bart_w_rfi_and_mf_mar_with_bumpup / avgs_mar_with_bumpup_bart[1], 1, sd, na.rm = TRUE)
avgs_mar_with_bumpup_rf = apply(results_rf_mar_with_bumpup, 1, mean, na.rm = TRUE)
rel_mar_with_bumpup_avgs_rf = avgs_mar_with_bumpup_rf / avgs_mar_with_bumpup_bart[1]
sd_mar_with_bumpup_rf = apply(results_rf_mar_with_bumpup / avgs_mar_with_bumpup_bart[1], 1, sd, na.rm = TRUE)
par(mar = c(4.2,4,0.2,0.2))
plot(approx_prop_missing,
rel_mar_with_bumpup_avgs_bart,
col = "green",
type = "o",
xlab = "Proportion Missing",
ylab = "Multiple of Baseline Error",
ylim = c(1, 1.75))
for (i in 1 : length(approx_prop_missing)){
x = approx_prop_missing[i]
y = rel_mar_with_bumpup_avgs_bart[i]
moe = 1.96 * sd_mar_with_bumpup_bart[i] / sqrt(nsim)
segments(x, y - moe, x, y + moe, col = "green")
}
points(approx_prop_missing, rel_mar_with_bumpup_avgs_bart_w_rfi_and_mf, col = "blue", type = "o")
for (i in 1 : length(approx_prop_missing)){
x = approx_prop_missing[i]
y = rel_mar_with_bumpup_avgs_bart_w_rfi_and_mf[i]
moe = 1.96 * sd_mar_with_bumpup_bart_w_rfi_and_mf[i] / sqrt(nsim)
segments(x, y - moe, x, y + moe, col = "blue")
}
points(approx_prop_missing, rel_mar_with_bumpup_avgs_rf, col = "red", type = "o")
for (i in 1 : length(approx_prop_missing)){
x = approx_prop_missing[i]
y = rel_mar_with_bumpup_avgs_rf[i]
moe = 1.96 * sd_mar_with_bumpup_rf[i] / sqrt(nsim)
segments(x, y - moe, x, y + moe, col = "red")
}
}
save.image("sec_5_mar_with_big_bumpup_MF_only.RData")
}
|
stacked.barplot <- function(dm=NULL, minlen=1, maxlen=37, start=1, end=1e+07, internal.margins=c(0,0,0,1), skip.x=2, bicol=NULL, col.fun=rainbow, axis.col="black", main.col="black", main.adj=1, samey=FALSE, ...) {
# filter input columns
mr <- as.integer(rownames(dm))
mc <- as.integer(colnames(dm))
dm <- dm[,mc>=minlen&mc<=maxlen]
dm <- dm[mr>=start&mr<=end,]
# reset minlen and maxlen in case
# they were inside the range anyway
minlen <- min(colnames(dm))
maxlen <- min(colnames(dm))
# figure out how many graphs we have
ngraph <- ncol(dm)
# warn if it is lots!
if (ngraph>7) {
print("WARNING: you have chosen to plot a lot of graphs to the current device")
print("WARNING: this may be too many for the device to cope with")
print("WARNING: if you get an error message, then consider using a bigger device")
print("WARNING: with bigger dimensions: see ?png ?jpeg ?pdf")
}
# create ngraph rows in the device
split.screen(c(ngraph,1))
# set the colours
if (is.null(bicol)) {
cols <- col.fun(ncol(dm))
} else {
cols <- rep(bicol, ncol(dm))
}
# iterate over the number of graphs
for (i in 1:ngraph) {
maxy <- max(dm[,i])
miny <- min(dm[,i])
if (samey==TRUE) {
maxy <- max(dm)
miny <- min(dm)
}
# select the relevant screen
screen(i)
# set the margins inside each graph
par(mar=internal.margins)
# plot the graph
plot(rownames(dm), dm[,i], col=cols[i], type="l", cex.axis=0.7, bty="n", xaxt="n", ylim=c(miny, maxy), ...)
# only plot x-axis every skip.x graph
if (i%%skip.x==0) {
axis(side=1, cex.axis=0.7, col=axis.col, col.axis=axis.col)
}
# plot a title for each graph
title(colnames(dm)[i], line=-1, cex.main=0.7, col.main=main.col, adj=main.adj)
}
# close the screens
close.screen(all=TRUE)
}
|
/R/stacked.barplot.R
|
permissive
|
mw55309/viRome_legacy
|
R
| false | false | 1,855 |
r
|
stacked.barplot <- function(dm=NULL, minlen=1, maxlen=37, start=1, end=1e+07, internal.margins=c(0,0,0,1), skip.x=2, bicol=NULL, col.fun=rainbow, axis.col="black", main.col="black", main.adj=1, samey=FALSE, ...) {
# filter input columns
mr <- as.integer(rownames(dm))
mc <- as.integer(colnames(dm))
dm <- dm[,mc>=minlen&mc<=maxlen]
dm <- dm[mr>=start&mr<=end,]
# reset minlen and maxlen in case
# they were inside the range anyway
minlen <- min(colnames(dm))
maxlen <- min(colnames(dm))
# figure out how many graphs we have
ngraph <- ncol(dm)
# warn if it is lots!
if (ngraph>7) {
print("WARNING: you have chosen to plot a lot of graphs to the current device")
print("WARNING: this may be too many for the device to cope with")
print("WARNING: if you get an error message, then consider using a bigger device")
print("WARNING: with bigger dimensions: see ?png ?jpeg ?pdf")
}
# create ngraph rows in the device
split.screen(c(ngraph,1))
# set the colours
if (is.null(bicol)) {
cols <- col.fun(ncol(dm))
} else {
cols <- rep(bicol, ncol(dm))
}
# iterate over the number of graphs
for (i in 1:ngraph) {
maxy <- max(dm[,i])
miny <- min(dm[,i])
if (samey==TRUE) {
maxy <- max(dm)
miny <- min(dm)
}
# select the relevant screen
screen(i)
# set the margins inside each graph
par(mar=internal.margins)
# plot the graph
plot(rownames(dm), dm[,i], col=cols[i], type="l", cex.axis=0.7, bty="n", xaxt="n", ylim=c(miny, maxy), ...)
# only plot x-axis every skip.x graph
if (i%%skip.x==0) {
axis(side=1, cex.axis=0.7, col=axis.col, col.axis=axis.col)
}
# plot a title for each graph
title(colnames(dm)[i], line=-1, cex.main=0.7, col.main=main.col, adj=main.adj)
}
# close the screens
close.screen(all=TRUE)
}
|
library(rAmCharts)
### Name: amPlot
### Title: Plot serial data
### Aliases: amPlot amPlot.default amPlot.numeric amPlot.character
### amPlot.factor amPlot.data.frame amPlot.formula
### ** Examples
## Not run:
##D # 'numeric':
##D amPlot(x = rnorm(100))
##D
##D # add line
##D amPlot(x = rnorm(100), type = 'sl', legend = T) %>>%
##D amLines(x = rnorm(100), type = "p")
##D
##D # 'character':
##D start <- as.POSIXct('01-01-2015', format = '%d-%m-%Y')
##D end <- as.POSIXct('31-12-2015', format = '%d-%m-%Y')
##D date <- seq.POSIXt(from = start, to = end, by = 'day')
##D date <- format(date, '%m-%d-%Y')
##D
##D y <- rnorm(length(date))
##D amPlot(x = date, y = y, type = 'l', parseDates = TRUE, dataDateFormat = "MM-DD-YYYY")
##D # notice that by default 'parseDates = FALSE'
##D
##D # 'data.frame'
##D amPlot(iris, col = colnames(iris)[1:2], type = c("l", "st"), zoom = TRUE, legend = TRUE)
##D
##D # 'formula':
##D amPlot(Petal.Length + Sepal.Length ~ Sepal.Width, data = iris, legend = TRUE, zoom = TRUE)
## End(Not run)
## No test:
# Other examples available which can be time consuming depending on your configuration.
library(data.table)
iris <- as.data.table(get("iris", "package:datasets"))
x <- rnorm(100)
# Simple scatter plot with title and color
# Also change type (set to "p" by default), avalaible "l", "sl", "st", "p", "b"
amPlot(x = x, main = "Title", col = "lightblue", type = "b")
x <- sort(rnorm(100))
y <- runif(100)
weights <- runif(100, 0, 15)
amPlot(x = x, y = y, weights = weights)
## End(No test)
|
/data/genthat_extracted_code/rAmCharts/examples/amPlot.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 1,554 |
r
|
library(rAmCharts)
### Name: amPlot
### Title: Plot serial data
### Aliases: amPlot amPlot.default amPlot.numeric amPlot.character
### amPlot.factor amPlot.data.frame amPlot.formula
### ** Examples
## Not run:
##D # 'numeric':
##D amPlot(x = rnorm(100))
##D
##D # add line
##D amPlot(x = rnorm(100), type = 'sl', legend = T) %>>%
##D amLines(x = rnorm(100), type = "p")
##D
##D # 'character':
##D start <- as.POSIXct('01-01-2015', format = '%d-%m-%Y')
##D end <- as.POSIXct('31-12-2015', format = '%d-%m-%Y')
##D date <- seq.POSIXt(from = start, to = end, by = 'day')
##D date <- format(date, '%m-%d-%Y')
##D
##D y <- rnorm(length(date))
##D amPlot(x = date, y = y, type = 'l', parseDates = TRUE, dataDateFormat = "MM-DD-YYYY")
##D # notice that by default 'parseDates = FALSE'
##D
##D # 'data.frame'
##D amPlot(iris, col = colnames(iris)[1:2], type = c("l", "st"), zoom = TRUE, legend = TRUE)
##D
##D # 'formula':
##D amPlot(Petal.Length + Sepal.Length ~ Sepal.Width, data = iris, legend = TRUE, zoom = TRUE)
## End(Not run)
## No test:
# Other examples available which can be time consuming depending on your configuration.
library(data.table)
iris <- as.data.table(get("iris", "package:datasets"))
x <- rnorm(100)
# Simple scatter plot with title and color
# Also change type (set to "p" by default), avalaible "l", "sl", "st", "p", "b"
amPlot(x = x, main = "Title", col = "lightblue", type = "b")
x <- sort(rnorm(100))
y <- runif(100)
weights <- runif(100, 0, 15)
amPlot(x = x, y = y, weights = weights)
## End(No test)
|
library(reticulate)
use_virtualenv("r-tensorflow")
library(keras)
mnist<-dataset_mnist()
x_train<-mnist$train$x
y_train<-mnist$train$y
x_test<-mnist$test$x
y_test<-mnist$test$y
#reshaping , data is in form of 3-d array so converting them into vectors for training
#converting 28*28 images into 784 length vector
x_train<-array_reshape(x_train,c(nrow(x_train), 784))
x_test<-array_reshape(x_test,c(nrow(x_test), 784))
#rescale data
x_train<-x_train /255
x_test<-x_test/255
#using keras's to_categorical() function
y_train<- to_categorical(y_train,10)
y_test<- to_categorical(y_test,10)
#start by creating sequential model then add layers using pipe operator %>%
model<-keras_model_sequential()
model %>%
layer_dense(units = 256, activation = 'relu', input_shape = c(784)) %>%
layer_dropout(rate = 0.4) %>%
layer_dense(units = 128,activation = 'relu') %>%
layer_dropout(rate =0.3) %>%
layer_dense(units = 10,activation = 'softmax')
summary(model)
model %>% compile(
loss = 'categorical_crossentropy',
optimizer = optimizer_rmsprop(),
metrics= c('accuracy')
)
history<- model %>% fit(
x_train,y_train,
epochs = 30, batch_size = 128,
validation_split = 0.2
)
plot(history)
model %>% evaluate(x_test,y_test)
model %>% predict_classes(x_test)
|
/MNIST_recognizer.r
|
no_license
|
saurabhya/MNIST_Keras
|
R
| false | false | 1,343 |
r
|
library(reticulate)
use_virtualenv("r-tensorflow")
library(keras)
mnist<-dataset_mnist()
x_train<-mnist$train$x
y_train<-mnist$train$y
x_test<-mnist$test$x
y_test<-mnist$test$y
#reshaping , data is in form of 3-d array so converting them into vectors for training
#converting 28*28 images into 784 length vector
x_train<-array_reshape(x_train,c(nrow(x_train), 784))
x_test<-array_reshape(x_test,c(nrow(x_test), 784))
#rescale data
x_train<-x_train /255
x_test<-x_test/255
#using keras's to_categorical() function
y_train<- to_categorical(y_train,10)
y_test<- to_categorical(y_test,10)
#start by creating sequential model then add layers using pipe operator %>%
model<-keras_model_sequential()
model %>%
layer_dense(units = 256, activation = 'relu', input_shape = c(784)) %>%
layer_dropout(rate = 0.4) %>%
layer_dense(units = 128,activation = 'relu') %>%
layer_dropout(rate =0.3) %>%
layer_dense(units = 10,activation = 'softmax')
summary(model)
model %>% compile(
loss = 'categorical_crossentropy',
optimizer = optimizer_rmsprop(),
metrics= c('accuracy')
)
history<- model %>% fit(
x_train,y_train,
epochs = 30, batch_size = 128,
validation_split = 0.2
)
plot(history)
model %>% evaluate(x_test,y_test)
model %>% predict_classes(x_test)
|
#' GGMprojpred
#' @description Estimate Gaussian graphical models with projection predictive selection
#' @param X n by p data matrix
#' @param n_cl number of clusters for parallel
#' @param type regularized (using the horeshoe prior distribution) or non-regularized (Bayesian bootstrap with least squares)
#' @param iter number of saved posterior samples
#'
#' @return pcor_mat estimated partial correlation matrix
#' @return inv_cov estimated inverse covariance matrix
#' @return adj_mat adjacenty matrix
#'
#' @examples
#' library(BDgraph)
#'
#' # generate AR-2 graph
#' main <- bdgraph.sim(p = 100, n = 50, graph = "AR2")
#' X <- main$data
#'
#' fit <- GGMprojpred(X, n_cl = detectCores() - 1, type = "hs", iter = 1000)
#' plot_graph(fit$pcor_mat, layout=layout_in_circle, vertex.color = "white")
#' compare(fit$adj_mat, main$G)
#' @export
#' @references
#' Piironen, J., & Vehtari, A. (2017). Comparison of Bayesian predictive methods for model selection. Statistics and Computing, 27(3), 711-735.
#' \href{https://link.springer.com/article/10.1007/s11222-016-9649-y}{https://link.springer.com/article/10.1007/s11222-016-9649-y}
#'
#' Rubin, D. B. (1981). The bayesian bootstrap. The annals of statistics, 130-134. \href{https://www.jstor.org/stable/2240875}{https://www.jstor.org/stable/2240875}
#'
#' Williams, D. R., Piironen, J., Vehtari, A., & Rast, P. (2018). Bayesian Estimation of Gaussian Graphical Models with Projection Predictive Selection. arXiv preprint arXiv:1801.05725.
#' \href{https://arxiv.org/abs/1801.05725}{https://arxiv.org/abs/1801.05725}
GGMprojpred <- function(X, n_cl, type, iter){
# intiial fitting
if(type == "bb" && ncol(X) >= nrow(X)){
stop("The Bayesian bootstrap, at this time, can only be used when the p < n.")
}
# intiial fitting
if(type == "hs"){
fit <- hs_proj(X, n_cl, iter)
}
if(type == "bb"){
fit <- bb_proj(X, n_cl, iter)
}
# reproject
mats <- re_project(fit$beta_mat, fit$fit_cv)
# selected variables using the "or-rule"
or_select <- ifelse(mats$pcor_or == 0, 0, 1)
# acheive symmetry
beta_mat <- beta_symmetric(fit$beta_mat, mats$mat_temp) * or_select
# compute the inverse covariance matrix
inv_cov <- beta_to_inv(beta_mat = beta_mat, or_select, X)
list(pcor_mat = mats$pcor_or, inv_cov = inv_cov$mat_inv, adj_mat = or_select)
}
|
/R/GGMprojpred.R
|
no_license
|
dachylong/GGMprojpred
|
R
| false | false | 2,337 |
r
|
#' GGMprojpred
#' @description Estimate Gaussian graphical models with projection predictive selection
#' @param X n by p data matrix
#' @param n_cl number of clusters for parallel
#' @param type regularized (using the horeshoe prior distribution) or non-regularized (Bayesian bootstrap with least squares)
#' @param iter number of saved posterior samples
#'
#' @return pcor_mat estimated partial correlation matrix
#' @return inv_cov estimated inverse covariance matrix
#' @return adj_mat adjacenty matrix
#'
#' @examples
#' library(BDgraph)
#'
#' # generate AR-2 graph
#' main <- bdgraph.sim(p = 100, n = 50, graph = "AR2")
#' X <- main$data
#'
#' fit <- GGMprojpred(X, n_cl = detectCores() - 1, type = "hs", iter = 1000)
#' plot_graph(fit$pcor_mat, layout=layout_in_circle, vertex.color = "white")
#' compare(fit$adj_mat, main$G)
#' @export
#' @references
#' Piironen, J., & Vehtari, A. (2017). Comparison of Bayesian predictive methods for model selection. Statistics and Computing, 27(3), 711-735.
#' \href{https://link.springer.com/article/10.1007/s11222-016-9649-y}{https://link.springer.com/article/10.1007/s11222-016-9649-y}
#'
#' Rubin, D. B. (1981). The bayesian bootstrap. The annals of statistics, 130-134. \href{https://www.jstor.org/stable/2240875}{https://www.jstor.org/stable/2240875}
#'
#' Williams, D. R., Piironen, J., Vehtari, A., & Rast, P. (2018). Bayesian Estimation of Gaussian Graphical Models with Projection Predictive Selection. arXiv preprint arXiv:1801.05725.
#' \href{https://arxiv.org/abs/1801.05725}{https://arxiv.org/abs/1801.05725}
GGMprojpred <- function(X, n_cl, type, iter){
# intiial fitting
if(type == "bb" && ncol(X) >= nrow(X)){
stop("The Bayesian bootstrap, at this time, can only be used when the p < n.")
}
# intiial fitting
if(type == "hs"){
fit <- hs_proj(X, n_cl, iter)
}
if(type == "bb"){
fit <- bb_proj(X, n_cl, iter)
}
# reproject
mats <- re_project(fit$beta_mat, fit$fit_cv)
# selected variables using the "or-rule"
or_select <- ifelse(mats$pcor_or == 0, 0, 1)
# acheive symmetry
beta_mat <- beta_symmetric(fit$beta_mat, mats$mat_temp) * or_select
# compute the inverse covariance matrix
inv_cov <- beta_to_inv(beta_mat = beta_mat, or_select, X)
list(pcor_mat = mats$pcor_or, inv_cov = inv_cov$mat_inv, adj_mat = or_select)
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 1842
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 1842
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query55_query45_1344.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 823
c no.of clauses 1842
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 1842
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query55_query45_1344.qdimacs 823 1842 E1 [] 0 16 807 1842 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query55_query45_1344/query55_query45_1344.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false | false | 708 |
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 1842
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 1842
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query55_query45_1344.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 823
c no.of clauses 1842
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 1842
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query55_query45_1344.qdimacs 823 1842 E1 [] 0 16 807 1842 NONE
|
calculate_x <- function(i, ra, dt, x){# calculate abscissa of nnd nodes
#####################################################################
# i: ordinal number of nodes #
# ra: range of ith node #
# dt: array of left child #
# x: array of abscissa of nnd nodes(have already initialized to 0) #
#####################################################################
# if leaf
if(dt[i]==0) return(x)
else {
# range ratio of left child of ith node
ratio <- (children(dt[i]+1, dt)+1)/(children(dt[i]+1, dt)+children(dt[i]+2, dt)+2)
x[dt[i]+1] <- ra[1]+diff(ra)*ratio/2# abscissa of left child of ith node
x[dt[i]+2] <- ra[2]-diff(ra)*(1-ratio)/2# abscissa of right child of ith node
# recursive
x <- calculate_x(dt[i]+1, c(ra[1], ra[1]+diff(ra)*ratio), dt, x)
x <- calculate_x(dt[i]+2, c(ra[1]+diff(ra)*ratio, ra[2]), dt, x)
return(x)
}
}
|
/R/calculate_x.R
|
no_license
|
cran/macs
|
R
| false | false | 1,034 |
r
|
calculate_x <- function(i, ra, dt, x){# calculate abscissa of nnd nodes
#####################################################################
# i: ordinal number of nodes #
# ra: range of ith node #
# dt: array of left child #
# x: array of abscissa of nnd nodes(have already initialized to 0) #
#####################################################################
# if leaf
if(dt[i]==0) return(x)
else {
# range ratio of left child of ith node
ratio <- (children(dt[i]+1, dt)+1)/(children(dt[i]+1, dt)+children(dt[i]+2, dt)+2)
x[dt[i]+1] <- ra[1]+diff(ra)*ratio/2# abscissa of left child of ith node
x[dt[i]+2] <- ra[2]-diff(ra)*(1-ratio)/2# abscissa of right child of ith node
# recursive
x <- calculate_x(dt[i]+1, c(ra[1], ra[1]+diff(ra)*ratio), dt, x)
x <- calculate_x(dt[i]+2, c(ra[1]+diff(ra)*ratio, ra[2]), dt, x)
return(x)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/appregistry_operations.R
\name{appregistry_get_attribute_group}
\alias{appregistry_get_attribute_group}
\title{Retrieves an attribute group, either by its name or its ID}
\usage{
appregistry_get_attribute_group(attributeGroup)
}
\arguments{
\item{attributeGroup}{[required] The name or ID of the attribute group that holds the attributes to
describe the application.}
}
\description{
Retrieves an attribute group, either by its name or its ID. The
attribute group can be specified either by its unique ID or by its name.
}
\section{Request syntax}{
\preformatted{svc$get_attribute_group(
attributeGroup = "string"
)
}
}
\keyword{internal}
|
/paws/man/appregistry_get_attribute_group.Rd
|
permissive
|
sanchezvivi/paws
|
R
| false | true | 720 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/appregistry_operations.R
\name{appregistry_get_attribute_group}
\alias{appregistry_get_attribute_group}
\title{Retrieves an attribute group, either by its name or its ID}
\usage{
appregistry_get_attribute_group(attributeGroup)
}
\arguments{
\item{attributeGroup}{[required] The name or ID of the attribute group that holds the attributes to
describe the application.}
}
\description{
Retrieves an attribute group, either by its name or its ID. The
attribute group can be specified either by its unique ID or by its name.
}
\section{Request syntax}{
\preformatted{svc$get_attribute_group(
attributeGroup = "string"
)
}
}
\keyword{internal}
|
library(tsrexplorer)
library(GenomicFeatures)
library(Gviz)
library(viridis)
library(scales)
# This script imports signal tracks and displays them at the specified regions of the yeast
# genome, listed at the end of the script.
gviz_dir <- file.path("yeast_work", "Gviz")
bedgraph_dir <- file.path("yeast_work", "bedgraphs")
rnaseq_dir <- file.path("yeast_data", "RNA_seq")
riboseq_dir <- file.path("yeast_data", "Ribo_seq")
if (!dir.exists(gviz_dir)) {
message("Creating directory 'yeast_work/Gviz'...")
dir.create(gviz_dir)
} else {
message("Directory 'yeast_work/Gviz' already exists.")
}
# Create genomic axis track
axis.track <- GenomeAxisTrack(col = "black", scale = 0.1, col.range = "black")
options(ucscChromosomeNames = FALSE)
# Create gene annotation track
txdb <- makeTxDbFromGFF(annotation)
genome.track <- GeneRegionTrack(txdb, genome = "sacCer3", shape = "arrow", names = "Genes", col = "black",
showId = TRUE, fill = "black", trancriptAnnotation = "gene_symbol", collapseTranscripts = "meta")
# Create data tracks
# Get colors from the viridis palette for the number of tracks to be plotted
show_col(viridis_pal()(20))
# Set y-axis limits
stripe_pos_lim <- c(0,250)
stripe_neg_lim <- c(0,150)
slic_pos_lim <- c(0,300)
slic_neg_lim <- c(0,300)
nano500_pos_lim <- c(0,3000)
nano500_neg_lim <- c(0,3000)
nano25_pos_lim <- c(0,3500)
nano25_neg_lim <- c(0,3500)
rnaseq_pos_lim <- c(0,150)
rnaseq_neg_lim <- c(0,250)
riboseq_pos_lim <- c(0,6)
riboseq_neg_lim <- c(0,5)
# STRIPE-seq 50 ng
S288C_50ng_1_pos <- DataTrack(range = file.path(bedgraph_dir, "S288C_50ng_1_plus.bedgraph"), genome = "sacCer3",
name = "50 ng 1 plus", col.histogram = "#440154FF", fill.histogram = "#440154FF", ylim = stripe_pos_lim)
S288C_50ng_1_neg <- DataTrack(range = file.path(bedgraph_dir, "S288C_50ng_1_minus.bedgraph"), genome = "sacCer3",
name = "50 ng 1 minus", col.histogram = "#440154FF", fill.histogram = "#440154FF", ylim = stripe_neg_lim)
# STRIPE-seq 100 ng
S288C_100ng_1_pos <- DataTrack(range = file.path(bedgraph_dir, "S288C_100ng_1_plus.bedgraph"), genome = "sacCer3",
name = "100 ng 1 plus", col.histogram = "#31688EFF", fill.histogram = "#31688EFF", ylim = stripe_pos_lim)
S288C_100ng_1_neg <- DataTrack(range = file.path(bedgraph_dir, "S288C_100ng_1_minus.bedgraph"), genome = "sacCer3",
name = "100 ng 1 minus", col.histogram = "#31688EFF", fill.histogram = "#31688EFF", ylim = stripe_neg_lim)
# STRIPE-seq 250 ng
S288C_250ng_1_pos <- DataTrack(range = file.path(bedgraph_dir, "S288C_250ng_1_plus.bedgraph"), genome = "sacCer3",
name = "250 ng 1 plus", col.histogram = "#35B779FF", fill.histogram = "#35B779FF", ylim = stripe_pos_lim)
S288C_250ng_1_neg <- DataTrack(range = file.path(bedgraph_dir, "S288C_250ng_1_minus.bedgraph"), genome = "sacCer3",
name = "250 ng 1 minus", col.histogram = "#35B779FF", fill.histogram = "#35B779FF", ylim = stripe_neg_lim)
# STRIPE-seq diamide
S288C_diamide_100ng_1_pos <- DataTrack(range = file.path(bedgraph_dir, "S288C_diamide_100ng_1_plus.bedgraph"), genome = "sacCer3",
name = "Diamide 1 plus", col.histogram = "#414487FF", fill.histogram = "#414487FF", ylim = stripe_pos_lim)
S288C_diamide_100ng_1_neg <- DataTrack(range = file.path(bedgraph_dir, "S288C_diamide_100ng_1_minus.bedgraph"), genome = "sacCer3",
name = "Diamide 1 minus", col.histogram = "#414487FF", fill.histogram = "#414487FF", ylim = stripe_neg_lim)
# SLIC-CAGE
SLIC_CAGE_100ng_1_pos <- DataTrack(range = file.path(bedgraph_dir, "SLIC_CAGE_100ng_1_plus.bedgraph"), genome = "sacCer3",
name = "SLIC-CAGE 100 ng 1 plus", col.histogram = "#56C667FF", fill.histogram = "#56C667FF", ylim = slic_pos_lim)
SLIC_CAGE_100ng_1_neg <- DataTrack(range = file.path(bedgraph_dir, "SLIC_CAGE_100ng_1_minus.bedgraph"), genome="sacCer3",
name = "SLIC-CAGE 100 ng 1 minus", col.histogram = "#56C667FF", fill.histogram = "#56C667FF", ylim = slic_neg_lim)
# 500 ng nanoCAGE
nanoCAGE_500ng_1_pos <- DataTrack(range = file.path(bedgraph_dir, "nanoCAGE_500ng_1_plus.bedgraph"), genome = "sacCer3",
name = "nanoCAGE 500 ng 1 plus", col.histogram = "#94D840FF", fill.histogram = "#94D840FF", ylim = nano500_pos_lim)
nanoCAGE_500ng_1_neg <- DataTrack(range = file.path(bedgraph_dir, "nanoCAGE_500ng_1_minus.bedgraph"), genome = "sacCer3",
name = "nanoCAGE 500 ng 1 minus", col.histogram = "#94D840FF", fill.histogram = "#94D840FF", ylim = nano500_neg_lim)
# 25 ng nanoCAGE
nanoCAGE_25ng_1_pos <- DataTrack(range = file.path(bedgraph_dir, "nanoCAGE_25ng_1_plus.bedgraph"), genome = "sacCer3",
name = "nanoCAGE 25 ng 1 plus", col.histogram = "#DCE318FF", fill.histogram = "#DCE318FF", ylim = nano25_pos_lim)
nanoCAGE_25ng_1_neg <- DataTrack(range = file.path(bedgraph_dir, "nanoCAGE_25ng_1_minus.bedgraph"), genome = "sacCer3",
name = "nanoCAGE 25 ng 1 minus", col.histogram = "#DCE318FF", fill.histogram = "#DCE318FF", ylim = nano25_neg_lim)
# YPD RNA-seq
ypd_rnaseq_1_pos <- DataTrack(range = file.path(rnaseq_dir, "RNASEQ001_S288C_untreated_r1.CPM.bs1.smooth25.plus.bw"), genome = "sacCer3",
name = "YPD RNA-seq 1 plus", col.histogram = "#FDE725FF", fill.histogram = "#FDE725FF", ylim = rnaseq_pos_lim)
ypd_rnaseq_1_neg <- DataTrack(range = file.path(rnaseq_dir, "RNASEQ001_S288C_untreated_r1.CPM.bs1.smooth25.minus.bw"), genome = "sacCer3",
name = "YPD RNA-seq 1 minus", col.histogram = "#FDE725FF", fill.histogram = "#FDE725FF", ylim = rnaseq_neg_lim)
# Diamide RNA-seq
diamide_rnaseq_1_pos <- DataTrack(range = file.path(rnaseq_dir, "RNASEQ007_S288C_diamide_r2.CPM.bs1.smooth25.plus.bw"), genome = "sacCer3",
name = "Diamide RNA-seq 1 plus", col.histogram = "#22A884FF", fill.histogram = "#22A884FF", ylim = rnaseq_pos_lim)
diamide_rnaseq_1_neg <- DataTrack(range = file.path(rnaseq_dir, "RNASEQ007_S288C_diamide_r2.CPM.bs1.smooth25.minus.bw"), genome = "sacCer3",
name = "Diamide RNA-seq 1 minus", col.histogram = "#22A884FF", fill.histogram = "#22A884FF", ylim = rnaseq_neg_lim)
# Ribosome profiling
S288C_riboseq_1_pos <- DataTrack(range = file.path(riboseq_dir, "GSM1949550_rep1_positive.wig"), genome = "sacCer3",
name = "S288C Ribo-seq 1 plus", col.histogram = "#3CBC75FF", fill.histogram = "#3CBC75FF",
ylim = riboseq_pos_lim)
S288C_riboseq_1_neg <- DataTrack(range = file.path(riboseq_dir, "GSM1949550_rep1_negative.wig"), genome = "sacCer3",
name = "S288C Ribo-seq 1 minus", col.histogram = "#3CBC75FF", fill.histogram = "#3CBC75FF",
ylim = riboseq_neg_lim)
S288C_riboseq_2_pos <- DataTrack(range = file.path(riboseq_dir, "GSM1949551_rep2_positive.wig"), genome = "sacCer3",
name = "S288C Ribo-seq 2 plus", col.histogram = "#3CBC75FF", fill.histogram = "#3CBC75FF",
ylim = riboseq_pos_lim)
S288C_riboseq_2_neg <- DataTrack(range = file.path(riboseq_dir, "GSM1949551_rep2_negative.wig"), genome = "sacCer3",
name = "S288C Ribo-seq 2 minus", col.histogram = "#3CBC75FF", fill.histogram = "#3CBC75FF",
ylim = riboseq_neg_lim)
# STRIPE-seq - GCN4
cairo_pdf(file = file.path(gviz_dir, "STRIPE_GCN4.pdf"), width = 12, height = 10)
plotTracks(list(axis.track,
S288C_50ng_1_pos,
S288C_50ng_1_neg,
S288C_100ng_1_pos,
S288C_100ng_1_neg,
S288C_250ng_1_pos,
S288C_250ng_1_neg,
ypd_rnaseq_1_pos,
ypd_rnaseq_1_neg,
genome.track),
chromosome = "V",from = 138000, to = 141000,
background.title = "white",
col.title = "black",
col.axis= "black",
type="histogram",
baseline = 0,
col.baseline = "black"
)
dev.off()
# STRIPE-seq - AIM39
cairo_pdf(file = file.path(gviz_dir, "STRIPE_AIM39.pdf"), width = 12, height = 15)
plotTracks(list(axis.track,
S288C_50ng_1_pos,
S288C_50ng_1_neg,
S288C_100ng_1_pos,
S288C_100ng_1_neg,
S288C_250ng_1_pos,
S288C_250ng_1_neg,
ypd_rnaseq_1_pos,
ypd_rnaseq_1_neg,
genome.track),
chromosome = "XV", from = 230000, to = 232000,
background.title = "white",
col.title = "black",
col.axis = "black",
type = "histogram",
baseline = 0,
col.baseline = "black"
)
dev.off()
# STRIPE-seq + Ribo-seq - AIM39
highlight.track <- HighlightTrack(trackList = list(S288C_100ng_1_pos,
S288C_100ng_1_neg,
ypd_rnaseq_1_pos,
ypd_rnaseq_1_neg,
S288C_riboseq_1_pos,
S288C_riboseq_1_neg,
S288C_riboseq_2_pos,
S288C_riboseq_2_neg),
chromosome = "XV", start = 230100, end = 230200,
inBackground = FALSE,
col = "black", fill = "lightblue", alpha = 0.5)
cairo_pdf(file = file.path(gviz_dir, "STRIPE_AIM39_riboseq.pdf"), width = 12, height = 12)
plotTracks(list(axis.track,
highlight.track,
genome.track),
chromosome = "XV",from = 230000, to = 232000,
background.title = "white",
col.title = "black",
col.axis = "black",
type ="histogram",
baseline = 0,
col.baseline = "black"
)
dev.off()
# STRIPE-seq + CAGE - YGR250C
cairo_pdf(file = file.path(gviz_dir, "STRIPE_CAGE_RIE1.pdf"), width = 12, height = 16)
plotTracks(list(axis.track,
S288C_50ng_1_pos,
S288C_50ng_1_neg,
S288C_100ng_1_pos,
S288C_100ng_1_neg,
S288C_250ng_1_pos,
S288C_250ng_1_neg,
SLIC_CAGE_100ng_1_pos,
SLIC_CAGE_100ng_1_neg,
nanoCAGE_500ng_1_pos,
nanoCAGE_500ng_1_neg,
nanoCAGE_25ng_1_pos,
nanoCAGE_25ng_1_neg,
ypd_rnaseq_1_pos,
ypd_rnaseq_1_neg,
genome.track),
chromosome = "VII",from = 993400, to = 994000,
background.title = "white",
col.title = "black",
col.axis= "black",
type="histogram",
baseline = 0,
col.baseline = "black"
)
dev.off()
# STRIPE-seq + CAGE - RPL17B
cairo_pdf(file = file.path(gviz_dir, "STRIPE_CAGE_RPL17B.pdf"), width = 12, height = 16)
plotTracks(list(axis.track,
S288C_50ng_1_pos,
S288C_50ng_1_neg,
S288C_100ng_1_pos,
S288C_100ng_1_neg,
S288C_250ng_1_pos,
S288C_250ng_1_neg,
SLIC_CAGE_100ng_1_pos,
SLIC_CAGE_100ng_1_neg,
nanoCAGE_500ng_1_pos,
nanoCAGE_500ng_1_neg,
nanoCAGE_25ng_1_pos,
nanoCAGE_25ng_1_neg,
ypd_rnaseq_1_pos,
ypd_rnaseq_1_neg,
genome.track),
chromosome = "X",from = 90500, to = 91000,
background.title = "white",
col.title = "black",
col.axis= "black",
type="histogram",
baseline = 0,
col.baseline = "black"
)
dev.off()
# YPD and diamide STRIPE-seq (HSP150/CIS3 region)
cairo_pdf(file = file.path(gviz_dir, "diamide_HSP150_CIS3.pdf"), width = 12, height = 12)
plotTracks(list(axis.track,
S288C_100ng_1_pos,
S288C_100ng_1_neg,
S288C_diamide_100ng_1_pos,
S288C_diamide_100ng_1_neg,
ypd_rnaseq_1_pos,
ypd_rnaseq_1_neg,
diamide_rnaseq_1_pos,
diamide_rnaseq_1_neg,
genome.track),
chromosome = "X", from = 120000, to = 123500,
background.title = "white",
col.title = "black",
col.axis = "black",
type = "histogram",
baseline = 0,
col.baseline = "black")
dev.off()
# YPD and diamide STRIPE-seq (RPS16B/RPL13A region)
cairo_pdf(file = file.path(gviz_dir, "diamide_RPS16B_RPL13A.pdf"), width = 12, height = 12)
plotTracks(list(axis.track,
S288C_100ng_1_pos,
S288C_100ng_1_neg,
S288C_diamide_100ng_1_pos,
S288C_diamide_100ng_1_neg,
ypd_rnaseq_1_pos,
ypd_rnaseq_1_neg,
diamide_rnaseq_1_pos,
diamide_rnaseq_1_neg,
genome.track),
chromosome = "IV",from = 306600, to = 309750,
background.title = "white",
col.title = "black",
col.axis= "black",
type="histogram",
baseline = 0,
col.baseline = "black")
dev.off()
|
/Policastro_etal_2020/yeast_Gviz.R
|
no_license
|
zentnerlab/STRIPE-seq
|
R
| false | false | 13,878 |
r
|
library(tsrexplorer)
library(GenomicFeatures)
library(Gviz)
library(viridis)
library(scales)
# This script imports signal tracks and displays them at the specified regions of the yeast
# genome, listed at the end of the script.
gviz_dir <- file.path("yeast_work", "Gviz")
bedgraph_dir <- file.path("yeast_work", "bedgraphs")
rnaseq_dir <- file.path("yeast_data", "RNA_seq")
riboseq_dir <- file.path("yeast_data", "Ribo_seq")
if (!dir.exists(gviz_dir)) {
message("Creating directory 'yeast_work/Gviz'...")
dir.create(gviz_dir)
} else {
message("Directory 'yeast_work/Gviz' already exists.")
}
# Create genomic axis track
axis.track <- GenomeAxisTrack(col = "black", scale = 0.1, col.range = "black")
options(ucscChromosomeNames = FALSE)
# Create gene annotation track
txdb <- makeTxDbFromGFF(annotation)
genome.track <- GeneRegionTrack(txdb, genome = "sacCer3", shape = "arrow", names = "Genes", col = "black",
showId = TRUE, fill = "black", trancriptAnnotation = "gene_symbol", collapseTranscripts = "meta")
# Create data tracks
# Get colors from the viridis palette for the number of tracks to be plotted
show_col(viridis_pal()(20))
# Set y-axis limits
stripe_pos_lim <- c(0,250)
stripe_neg_lim <- c(0,150)
slic_pos_lim <- c(0,300)
slic_neg_lim <- c(0,300)
nano500_pos_lim <- c(0,3000)
nano500_neg_lim <- c(0,3000)
nano25_pos_lim <- c(0,3500)
nano25_neg_lim <- c(0,3500)
rnaseq_pos_lim <- c(0,150)
rnaseq_neg_lim <- c(0,250)
riboseq_pos_lim <- c(0,6)
riboseq_neg_lim <- c(0,5)
# STRIPE-seq 50 ng
S288C_50ng_1_pos <- DataTrack(range = file.path(bedgraph_dir, "S288C_50ng_1_plus.bedgraph"), genome = "sacCer3",
name = "50 ng 1 plus", col.histogram = "#440154FF", fill.histogram = "#440154FF", ylim = stripe_pos_lim)
S288C_50ng_1_neg <- DataTrack(range = file.path(bedgraph_dir, "S288C_50ng_1_minus.bedgraph"), genome = "sacCer3",
name = "50 ng 1 minus", col.histogram = "#440154FF", fill.histogram = "#440154FF", ylim = stripe_neg_lim)
# STRIPE-seq 100 ng
S288C_100ng_1_pos <- DataTrack(range = file.path(bedgraph_dir, "S288C_100ng_1_plus.bedgraph"), genome = "sacCer3",
name = "100 ng 1 plus", col.histogram = "#31688EFF", fill.histogram = "#31688EFF", ylim = stripe_pos_lim)
S288C_100ng_1_neg <- DataTrack(range = file.path(bedgraph_dir, "S288C_100ng_1_minus.bedgraph"), genome = "sacCer3",
name = "100 ng 1 minus", col.histogram = "#31688EFF", fill.histogram = "#31688EFF", ylim = stripe_neg_lim)
# STRIPE-seq 250 ng
S288C_250ng_1_pos <- DataTrack(range = file.path(bedgraph_dir, "S288C_250ng_1_plus.bedgraph"), genome = "sacCer3",
name = "250 ng 1 plus", col.histogram = "#35B779FF", fill.histogram = "#35B779FF", ylim = stripe_pos_lim)
S288C_250ng_1_neg <- DataTrack(range = file.path(bedgraph_dir, "S288C_250ng_1_minus.bedgraph"), genome = "sacCer3",
name = "250 ng 1 minus", col.histogram = "#35B779FF", fill.histogram = "#35B779FF", ylim = stripe_neg_lim)
# STRIPE-seq diamide
S288C_diamide_100ng_1_pos <- DataTrack(range = file.path(bedgraph_dir, "S288C_diamide_100ng_1_plus.bedgraph"), genome = "sacCer3",
name = "Diamide 1 plus", col.histogram = "#414487FF", fill.histogram = "#414487FF", ylim = stripe_pos_lim)
S288C_diamide_100ng_1_neg <- DataTrack(range = file.path(bedgraph_dir, "S288C_diamide_100ng_1_minus.bedgraph"), genome = "sacCer3",
name = "Diamide 1 minus", col.histogram = "#414487FF", fill.histogram = "#414487FF", ylim = stripe_neg_lim)
# SLIC-CAGE
SLIC_CAGE_100ng_1_pos <- DataTrack(range = file.path(bedgraph_dir, "SLIC_CAGE_100ng_1_plus.bedgraph"), genome = "sacCer3",
name = "SLIC-CAGE 100 ng 1 plus", col.histogram = "#56C667FF", fill.histogram = "#56C667FF", ylim = slic_pos_lim)
SLIC_CAGE_100ng_1_neg <- DataTrack(range = file.path(bedgraph_dir, "SLIC_CAGE_100ng_1_minus.bedgraph"), genome="sacCer3",
name = "SLIC-CAGE 100 ng 1 minus", col.histogram = "#56C667FF", fill.histogram = "#56C667FF", ylim = slic_neg_lim)
# 500 ng nanoCAGE
nanoCAGE_500ng_1_pos <- DataTrack(range = file.path(bedgraph_dir, "nanoCAGE_500ng_1_plus.bedgraph"), genome = "sacCer3",
name = "nanoCAGE 500 ng 1 plus", col.histogram = "#94D840FF", fill.histogram = "#94D840FF", ylim = nano500_pos_lim)
nanoCAGE_500ng_1_neg <- DataTrack(range = file.path(bedgraph_dir, "nanoCAGE_500ng_1_minus.bedgraph"), genome = "sacCer3",
name = "nanoCAGE 500 ng 1 minus", col.histogram = "#94D840FF", fill.histogram = "#94D840FF", ylim = nano500_neg_lim)
# 25 ng nanoCAGE
nanoCAGE_25ng_1_pos <- DataTrack(range = file.path(bedgraph_dir, "nanoCAGE_25ng_1_plus.bedgraph"), genome = "sacCer3",
name = "nanoCAGE 25 ng 1 plus", col.histogram = "#DCE318FF", fill.histogram = "#DCE318FF", ylim = nano25_pos_lim)
nanoCAGE_25ng_1_neg <- DataTrack(range = file.path(bedgraph_dir, "nanoCAGE_25ng_1_minus.bedgraph"), genome = "sacCer3",
name = "nanoCAGE 25 ng 1 minus", col.histogram = "#DCE318FF", fill.histogram = "#DCE318FF", ylim = nano25_neg_lim)
# YPD RNA-seq
ypd_rnaseq_1_pos <- DataTrack(range = file.path(rnaseq_dir, "RNASEQ001_S288C_untreated_r1.CPM.bs1.smooth25.plus.bw"), genome = "sacCer3",
name = "YPD RNA-seq 1 plus", col.histogram = "#FDE725FF", fill.histogram = "#FDE725FF", ylim = rnaseq_pos_lim)
ypd_rnaseq_1_neg <- DataTrack(range = file.path(rnaseq_dir, "RNASEQ001_S288C_untreated_r1.CPM.bs1.smooth25.minus.bw"), genome = "sacCer3",
name = "YPD RNA-seq 1 minus", col.histogram = "#FDE725FF", fill.histogram = "#FDE725FF", ylim = rnaseq_neg_lim)
# Diamide RNA-seq
diamide_rnaseq_1_pos <- DataTrack(range = file.path(rnaseq_dir, "RNASEQ007_S288C_diamide_r2.CPM.bs1.smooth25.plus.bw"), genome = "sacCer3",
name = "Diamide RNA-seq 1 plus", col.histogram = "#22A884FF", fill.histogram = "#22A884FF", ylim = rnaseq_pos_lim)
diamide_rnaseq_1_neg <- DataTrack(range = file.path(rnaseq_dir, "RNASEQ007_S288C_diamide_r2.CPM.bs1.smooth25.minus.bw"), genome = "sacCer3",
name = "Diamide RNA-seq 1 minus", col.histogram = "#22A884FF", fill.histogram = "#22A884FF", ylim = rnaseq_neg_lim)
# Ribosome profiling
S288C_riboseq_1_pos <- DataTrack(range = file.path(riboseq_dir, "GSM1949550_rep1_positive.wig"), genome = "sacCer3",
name = "S288C Ribo-seq 1 plus", col.histogram = "#3CBC75FF", fill.histogram = "#3CBC75FF",
ylim = riboseq_pos_lim)
S288C_riboseq_1_neg <- DataTrack(range = file.path(riboseq_dir, "GSM1949550_rep1_negative.wig"), genome = "sacCer3",
name = "S288C Ribo-seq 1 minus", col.histogram = "#3CBC75FF", fill.histogram = "#3CBC75FF",
ylim = riboseq_neg_lim)
S288C_riboseq_2_pos <- DataTrack(range = file.path(riboseq_dir, "GSM1949551_rep2_positive.wig"), genome = "sacCer3",
name = "S288C Ribo-seq 2 plus", col.histogram = "#3CBC75FF", fill.histogram = "#3CBC75FF",
ylim = riboseq_pos_lim)
S288C_riboseq_2_neg <- DataTrack(range = file.path(riboseq_dir, "GSM1949551_rep2_negative.wig"), genome = "sacCer3",
name = "S288C Ribo-seq 2 minus", col.histogram = "#3CBC75FF", fill.histogram = "#3CBC75FF",
ylim = riboseq_neg_lim)
# STRIPE-seq - GCN4
cairo_pdf(file = file.path(gviz_dir, "STRIPE_GCN4.pdf"), width = 12, height = 10)
plotTracks(list(axis.track,
S288C_50ng_1_pos,
S288C_50ng_1_neg,
S288C_100ng_1_pos,
S288C_100ng_1_neg,
S288C_250ng_1_pos,
S288C_250ng_1_neg,
ypd_rnaseq_1_pos,
ypd_rnaseq_1_neg,
genome.track),
chromosome = "V",from = 138000, to = 141000,
background.title = "white",
col.title = "black",
col.axis= "black",
type="histogram",
baseline = 0,
col.baseline = "black"
)
dev.off()
# STRIPE-seq - AIM39
cairo_pdf(file = file.path(gviz_dir, "STRIPE_AIM39.pdf"), width = 12, height = 15)
plotTracks(list(axis.track,
S288C_50ng_1_pos,
S288C_50ng_1_neg,
S288C_100ng_1_pos,
S288C_100ng_1_neg,
S288C_250ng_1_pos,
S288C_250ng_1_neg,
ypd_rnaseq_1_pos,
ypd_rnaseq_1_neg,
genome.track),
chromosome = "XV", from = 230000, to = 232000,
background.title = "white",
col.title = "black",
col.axis = "black",
type = "histogram",
baseline = 0,
col.baseline = "black"
)
dev.off()
# STRIPE-seq + Ribo-seq - AIM39
highlight.track <- HighlightTrack(trackList = list(S288C_100ng_1_pos,
S288C_100ng_1_neg,
ypd_rnaseq_1_pos,
ypd_rnaseq_1_neg,
S288C_riboseq_1_pos,
S288C_riboseq_1_neg,
S288C_riboseq_2_pos,
S288C_riboseq_2_neg),
chromosome = "XV", start = 230100, end = 230200,
inBackground = FALSE,
col = "black", fill = "lightblue", alpha = 0.5)
cairo_pdf(file = file.path(gviz_dir, "STRIPE_AIM39_riboseq.pdf"), width = 12, height = 12)
plotTracks(list(axis.track,
highlight.track,
genome.track),
chromosome = "XV",from = 230000, to = 232000,
background.title = "white",
col.title = "black",
col.axis = "black",
type ="histogram",
baseline = 0,
col.baseline = "black"
)
dev.off()
# STRIPE-seq + CAGE - YGR250C
cairo_pdf(file = file.path(gviz_dir, "STRIPE_CAGE_RIE1.pdf"), width = 12, height = 16)
plotTracks(list(axis.track,
S288C_50ng_1_pos,
S288C_50ng_1_neg,
S288C_100ng_1_pos,
S288C_100ng_1_neg,
S288C_250ng_1_pos,
S288C_250ng_1_neg,
SLIC_CAGE_100ng_1_pos,
SLIC_CAGE_100ng_1_neg,
nanoCAGE_500ng_1_pos,
nanoCAGE_500ng_1_neg,
nanoCAGE_25ng_1_pos,
nanoCAGE_25ng_1_neg,
ypd_rnaseq_1_pos,
ypd_rnaseq_1_neg,
genome.track),
chromosome = "VII",from = 993400, to = 994000,
background.title = "white",
col.title = "black",
col.axis= "black",
type="histogram",
baseline = 0,
col.baseline = "black"
)
dev.off()
# STRIPE-seq + CAGE - RPL17B
cairo_pdf(file = file.path(gviz_dir, "STRIPE_CAGE_RPL17B.pdf"), width = 12, height = 16)
plotTracks(list(axis.track,
S288C_50ng_1_pos,
S288C_50ng_1_neg,
S288C_100ng_1_pos,
S288C_100ng_1_neg,
S288C_250ng_1_pos,
S288C_250ng_1_neg,
SLIC_CAGE_100ng_1_pos,
SLIC_CAGE_100ng_1_neg,
nanoCAGE_500ng_1_pos,
nanoCAGE_500ng_1_neg,
nanoCAGE_25ng_1_pos,
nanoCAGE_25ng_1_neg,
ypd_rnaseq_1_pos,
ypd_rnaseq_1_neg,
genome.track),
chromosome = "X",from = 90500, to = 91000,
background.title = "white",
col.title = "black",
col.axis= "black",
type="histogram",
baseline = 0,
col.baseline = "black"
)
dev.off()
# YPD and diamide STRIPE-seq (HSP150/CIS3 region)
cairo_pdf(file = file.path(gviz_dir, "diamide_HSP150_CIS3.pdf"), width = 12, height = 12)
plotTracks(list(axis.track,
S288C_100ng_1_pos,
S288C_100ng_1_neg,
S288C_diamide_100ng_1_pos,
S288C_diamide_100ng_1_neg,
ypd_rnaseq_1_pos,
ypd_rnaseq_1_neg,
diamide_rnaseq_1_pos,
diamide_rnaseq_1_neg,
genome.track),
chromosome = "X", from = 120000, to = 123500,
background.title = "white",
col.title = "black",
col.axis = "black",
type = "histogram",
baseline = 0,
col.baseline = "black")
dev.off()
# YPD and diamide STRIPE-seq (RPS16B/RPL13A region)
cairo_pdf(file = file.path(gviz_dir, "diamide_RPS16B_RPL13A.pdf"), width = 12, height = 12)
plotTracks(list(axis.track,
S288C_100ng_1_pos,
S288C_100ng_1_neg,
S288C_diamide_100ng_1_pos,
S288C_diamide_100ng_1_neg,
ypd_rnaseq_1_pos,
ypd_rnaseq_1_neg,
diamide_rnaseq_1_pos,
diamide_rnaseq_1_neg,
genome.track),
chromosome = "IV",from = 306600, to = 309750,
background.title = "white",
col.title = "black",
col.axis= "black",
type="histogram",
baseline = 0,
col.baseline = "black")
dev.off()
|
#ToDo
#rewrite in Rmd -- to better display notes and findings
#Check the segmentation
#re-run python scrtip, (it runs over all anon image files)
#is it worth croping /cleaning up these images?
#load main MLH1 files to merge MLH1 cell data with the sc data
setwd("~./")
load(file="C:/Users/alpeterson7/Documents/MLH1repo/data/MLH1/MLH1_data_setup_1.5.20.RData")#batch18
#load(file="C:/Users/alpeterson7/Documents/MLH1repo/data/MLH1/MLH1_data_setup_12.20.19.RData")#most recent
#these data generated by running "python script --, it should be re-run when new cells are added to the anon.csv file
SC.skel = read.csv("~./SC_skeletonize/data/SCskel_output_nov19.csv", header = TRUE, strip.white = TRUE)
#format all factors to numbers
SC.skel$bin_size <- as.character(SC.skel$bin_size )
SC.skel$skel_size <- as.character(SC.skel$skel_size)
SC.skel$bin_size <- as.numeric(SC.skel$bin_size )
SC.skel$skel_size <- as.numeric(SC.skel$skel_size)
SC.skel$rand_name <- as.character(SC.skel$rand_name)
#check for duplicatations of the random name
original_DF$Random.Name <- as.character(original_DF$Random.Name)
duplies <- original_DF[duplicated(original_DF$Random.Name),]
#3 duplicate rows in original DF -- because Random name was blank -- how will these be matched to total.SC?
#MERGEING MLH! with SKEL
MLH1.merge.Skel <- merge(original_DF, SC.skel, by.x = "Random.Name", by.y = "rand_name", all= F)
OG.MLH1.merge.Skel <- MLH1.merge.Skel #raw version of DF
#clean up this DataFrame REMOVE 'X'
MLH1.merge.Skel <- MLH1.merge.Skel[ !grepl("X", MLH1.merge.Skel$Batch) , ]
MLH1.merge.Skel <- MLH1.merge.Skel[ !grepl("x", MLH1.merge.Skel$X) , ]
MLH1.merge.Skel <- MLH1.merge.Skel[!(is.na(MLH1.merge.Skel$quality) | MLH1.merge.Skel$quality==""), ]
MLH1.merge.Skel <- add_mouse(MLH1.merge.Skel)
MLH1.merge.Skel <- add_strain(MLH1.merge.Skel)
MLH1.merge.Skel <- add_sex(MLH1.merge.Skel)
MLH1.merge.Skel <- add_category(MLH1.merge.Skel)
MLH1.merge.Skel <- add_subsp(MLH1.merge.Skel)
#where does this DF come from?
#mlh1.skel.og <- add_euth_date(mlh1.skel.og)
#mlh1.skel.og <- add_age(mlh1.skel.og)
#str(mlh1.skel.og)
MLH1.merge.Skel$nMLH1.foci <- as.character(MLH1.merge.Skel$nMLH1.foci)
MLH1.merge.Skel$nMLH1.foci <- as.numeric(MLH1.merge.Skel$nMLH1.foci)
#it seems like this has duplicates
MLH1.merge.Skel.DUPs <- MLH1.merge.Skel[duplicated(MLH1.merge.Skel$Random.Name),]
#with new dataframe, there are 0 duplicates..
#make sure to seperate mice based on adult and juvinille
#remove for now
MLH1.merge.Skel <- MLH1.merge.Skel %>% distinct()
#keep list of outliers (need to adjust threshold with lastest batch)
Total.SC.above.threshold <- MLH1.merge.Skel[MLH1.merge.Skel$skel_size > 25000,]
#write out these cell's info so I can double check them.
#write.table(Total.SC.above.threshold, "~./MLH1repo/data/BivData/total_SC_above_threshold.csv", sep=",", row.names = FALSE)
#
# I noticed that these cells have the same bin sizes and skel sizes
#this is written within RW's code
#now there are quite a few cells above the threshold
#table(Total.SC.above.threshold$category)
#more female cells than male
#remove outliers
MLH1.merge.Skel <- MLH1.merge.Skel[MLH1.merge.Skel$skel_size < 25000,]
#try with subset
#write.table(MLH1.merge.Skel, "~./MLH1repo/data/BivData/total_SC_below_threshold.csv", sep=",", row.names = FALSE)
#to clean up the total SC data -- use the images with no in redo crop (400 vs 2500)
#could also compare to quality =1 cells
#ploting
Total.SC.box <- ggplot(data = MLH1.merge.Skel, aes(y=skel_size, x=strain, color=sex) ) + geom_boxplot()+ggtitle("Total SC Measures")
#should confirm all of the female images-- since they are more likely to have broken SC
#ToDo re-run the pipeline, to get measues for SKIVE and MOLF females
#molf has almost eq SC -- but this is just 1 female
Total.SC.points <- ggplot(data = MLH1.merge.Skel, aes(y=skel_size, x=strain, color=sex) ) +
geom_jitter()+ggtitle("Total SC Measures")
# test if my score of crop, causes more noise / is a cleaner cell segment
#for internal checking, not need to keep
clean.crop <- MLH1.merge.Skel[MLH1.merge.Skel$REDO.crop == 'no',]
# make tables of the means ect to measure how much change there is between these subseted data sets
table(clean.crop$category)
table(MLH1.merge.Skel$category)
#table of clean crop stat
#table of total (minus clean crop, stat)
#% difference in means is the amount of error
#WTF are these?
clean.crop.table <- ddply(.data=clean.crop,
.(category),
summarize,
#mean skel, mean binary, var for the two
n.obs = length(unique(Original.Name)),
mean.skel = mean(skel_size),
mean.bin = mean(bin_size),
var.skel = var(skel_size),
var.bin = var(bin_size)
#ncells = length(unique(fileName)),
#nbivs = length(unique(Obj.ID))
)
not.clean.crop.table <- ddply(.data=MLH1.merge.Skel[!MLH1.merge.Skel$REDO.crop == 'no',],
.(category),
summarize,
#mean skel, mean binary, var for the two
n.obs = length(unique(Original.Name)),
mean.skel = mean(skel_size),
mean.bin = mean(bin_size),
var.skel = var(skel_size),
var.bin = var(bin_size)
#ncells = length(unique(fileName)),
#nbivs = length(unique(Obj.ID))
)
weee <- merge(clean.crop.table, not.clean.crop.table, by.x = 'category', by.y = 'category', all = FALSE)
weee$dif.skel.mean <- ( (weee$mean.skel.x - weee$mean.skel.y) / weee$mean.skel.x ) *100
#for some categories the difference is ~20% between the means
#usuall difference means the croped version has less compared to other
#THIS SHOULDN"T BE MERGED AGAIN!
#MLH1.merge.Skel <- merge(original_DF, SC.skel, by.x = "Random.Name", by.y = "rand_name", all= F)
Total.SC.points2 <- ggplot(data = clean.crop, aes(y=skel_size, x=strain, color=sex) ) +
geom_jitter()+ggtitle("Total SC Measures, clean crops")
#str(MLH1.merge.Skel)
#this table was already cleaned
#MLH1.merge.Skel <- MLH1.merge.Skel[ !grepl("X", MLH1.merge.Skel$X) , ]
#MLH1.merge.Skel <- MLH1.merge.Skel[!(is.na(MLH1.merge.Skel$nMLH1.foci) | MLH1.merge.Skel$nMLH1.foci==""), ]
#this should also have the empty column for removing X
#MLH1.merge.Skel <- MLH1.merge.Skel[MLH1.merge.Skel$nMLH1.foci != "X",]
#MLH1.merge.Skel <- MLH1.merge.Skel[MLH1.merge.Skel$nMLH1.foci != "x",]
#mouse level
mouse.level.skel <- ddply(.data=MLH1.merge.Skel,
.(mouse),
summarize,
ncells = length(unique(Original.Name)),
nmice = length(unique(mouse)),
mean.MLH1 = mean(nMLH1.foci, na.rm = TRUE),
mean.skel = mean(skel_size),
mean.bin = mean(bin_size)
)
mouse.level.skel <- add_strain(mouse.level.skel)
mouse.level.skel <- add_sex(mouse.level.skel)
mouse.level.skel <- add_category(mouse.level.skel)
mouse.level.skel <- add_subsp(mouse.level.skel)
looky.mouse <- ggplot(data = mouse.level.skel[mouse.level.skel$subsp == "Musc",], aes(y=mean.skel, x=mean.MLH1, color=strain) ) + geom_jitter() +facet_wrap(subsp~sex)
looky.mouseDom <- ggplot(data = mouse.level.skel[mouse.level.skel$subsp == "Dom",], aes(y=mean.skel, x=mean.MLH1, color=strain) ) + geom_jitter() +ylim(c(150,1200))+facet_wrap(subsp~sex)
#theme(legend.position="none")
#double check a way to visually inspect the python skel output
#
#repeat random numbers?
#some of richard's analysis show negative relationship with SC length and MLH1 (his QTL word doc). Could this be sign
# of pwd like regulation?
#remove MLH1 tables since they overwrite other ones
#remove plots, tables for internal checking
#what to keep: SC.skel, OG.MLH1.merge.Skel, MLH1.merge.Skel, MLH1.merge.Skel.DUPs, mouse.level.skel
rm(list=setdiff(ls(), c("SC.skel", "OG.MLH1.merge.Skel", "MLH1.merge.Skel", "MLH1.merge.Skel.DUPs","mouse.level.skel")))
#output: matched random names with real names.
#save RData file
save.image("~./MLH1repo/data/total_SC_1.6.20.RData")
|
/doc/TotalSC.R
|
no_license
|
petersoapes/MLH1repo
|
R
| false | false | 8,432 |
r
|
#ToDo
#rewrite in Rmd -- to better display notes and findings
#Check the segmentation
#re-run python scrtip, (it runs over all anon image files)
#is it worth croping /cleaning up these images?
#load main MLH1 files to merge MLH1 cell data with the sc data
setwd("~./")
load(file="C:/Users/alpeterson7/Documents/MLH1repo/data/MLH1/MLH1_data_setup_1.5.20.RData")#batch18
#load(file="C:/Users/alpeterson7/Documents/MLH1repo/data/MLH1/MLH1_data_setup_12.20.19.RData")#most recent
#these data generated by running "python script --, it should be re-run when new cells are added to the anon.csv file
SC.skel = read.csv("~./SC_skeletonize/data/SCskel_output_nov19.csv", header = TRUE, strip.white = TRUE)
#format all factors to numbers
SC.skel$bin_size <- as.character(SC.skel$bin_size )
SC.skel$skel_size <- as.character(SC.skel$skel_size)
SC.skel$bin_size <- as.numeric(SC.skel$bin_size )
SC.skel$skel_size <- as.numeric(SC.skel$skel_size)
SC.skel$rand_name <- as.character(SC.skel$rand_name)
#check for duplicatations of the random name
original_DF$Random.Name <- as.character(original_DF$Random.Name)
duplies <- original_DF[duplicated(original_DF$Random.Name),]
#3 duplicate rows in original DF -- because Random name was blank -- how will these be matched to total.SC?
#MERGEING MLH! with SKEL
MLH1.merge.Skel <- merge(original_DF, SC.skel, by.x = "Random.Name", by.y = "rand_name", all= F)
OG.MLH1.merge.Skel <- MLH1.merge.Skel #raw version of DF
#clean up this DataFrame REMOVE 'X'
MLH1.merge.Skel <- MLH1.merge.Skel[ !grepl("X", MLH1.merge.Skel$Batch) , ]
MLH1.merge.Skel <- MLH1.merge.Skel[ !grepl("x", MLH1.merge.Skel$X) , ]
MLH1.merge.Skel <- MLH1.merge.Skel[!(is.na(MLH1.merge.Skel$quality) | MLH1.merge.Skel$quality==""), ]
MLH1.merge.Skel <- add_mouse(MLH1.merge.Skel)
MLH1.merge.Skel <- add_strain(MLH1.merge.Skel)
MLH1.merge.Skel <- add_sex(MLH1.merge.Skel)
MLH1.merge.Skel <- add_category(MLH1.merge.Skel)
MLH1.merge.Skel <- add_subsp(MLH1.merge.Skel)
#where does this DF come from?
#mlh1.skel.og <- add_euth_date(mlh1.skel.og)
#mlh1.skel.og <- add_age(mlh1.skel.og)
#str(mlh1.skel.og)
MLH1.merge.Skel$nMLH1.foci <- as.character(MLH1.merge.Skel$nMLH1.foci)
MLH1.merge.Skel$nMLH1.foci <- as.numeric(MLH1.merge.Skel$nMLH1.foci)
#it seems like this has duplicates
MLH1.merge.Skel.DUPs <- MLH1.merge.Skel[duplicated(MLH1.merge.Skel$Random.Name),]
#with new dataframe, there are 0 duplicates..
#make sure to seperate mice based on adult and juvinille
#remove for now
MLH1.merge.Skel <- MLH1.merge.Skel %>% distinct()
#keep list of outliers (need to adjust threshold with lastest batch)
Total.SC.above.threshold <- MLH1.merge.Skel[MLH1.merge.Skel$skel_size > 25000,]
#write out these cell's info so I can double check them.
#write.table(Total.SC.above.threshold, "~./MLH1repo/data/BivData/total_SC_above_threshold.csv", sep=",", row.names = FALSE)
#
# I noticed that these cells have the same bin sizes and skel sizes
#this is written within RW's code
#now there are quite a few cells above the threshold
#table(Total.SC.above.threshold$category)
#more female cells than male
#remove outliers
MLH1.merge.Skel <- MLH1.merge.Skel[MLH1.merge.Skel$skel_size < 25000,]
#try with subset
#write.table(MLH1.merge.Skel, "~./MLH1repo/data/BivData/total_SC_below_threshold.csv", sep=",", row.names = FALSE)
#to clean up the total SC data -- use the images with no in redo crop (400 vs 2500)
#could also compare to quality =1 cells
#ploting
Total.SC.box <- ggplot(data = MLH1.merge.Skel, aes(y=skel_size, x=strain, color=sex) ) + geom_boxplot()+ggtitle("Total SC Measures")
#should confirm all of the female images-- since they are more likely to have broken SC
#ToDo re-run the pipeline, to get measues for SKIVE and MOLF females
#molf has almost eq SC -- but this is just 1 female
Total.SC.points <- ggplot(data = MLH1.merge.Skel, aes(y=skel_size, x=strain, color=sex) ) +
geom_jitter()+ggtitle("Total SC Measures")
# test if my score of crop, causes more noise / is a cleaner cell segment
#for internal checking, not need to keep
clean.crop <- MLH1.merge.Skel[MLH1.merge.Skel$REDO.crop == 'no',]
# make tables of the means ect to measure how much change there is between these subseted data sets
table(clean.crop$category)
table(MLH1.merge.Skel$category)
#table of clean crop stat
#table of total (minus clean crop, stat)
#% difference in means is the amount of error
#WTF are these?
clean.crop.table <- ddply(.data=clean.crop,
.(category),
summarize,
#mean skel, mean binary, var for the two
n.obs = length(unique(Original.Name)),
mean.skel = mean(skel_size),
mean.bin = mean(bin_size),
var.skel = var(skel_size),
var.bin = var(bin_size)
#ncells = length(unique(fileName)),
#nbivs = length(unique(Obj.ID))
)
not.clean.crop.table <- ddply(.data=MLH1.merge.Skel[!MLH1.merge.Skel$REDO.crop == 'no',],
.(category),
summarize,
#mean skel, mean binary, var for the two
n.obs = length(unique(Original.Name)),
mean.skel = mean(skel_size),
mean.bin = mean(bin_size),
var.skel = var(skel_size),
var.bin = var(bin_size)
#ncells = length(unique(fileName)),
#nbivs = length(unique(Obj.ID))
)
weee <- merge(clean.crop.table, not.clean.crop.table, by.x = 'category', by.y = 'category', all = FALSE)
weee$dif.skel.mean <- ( (weee$mean.skel.x - weee$mean.skel.y) / weee$mean.skel.x ) *100
#for some categories the difference is ~20% between the means
#usuall difference means the croped version has less compared to other
#THIS SHOULDN"T BE MERGED AGAIN!
#MLH1.merge.Skel <- merge(original_DF, SC.skel, by.x = "Random.Name", by.y = "rand_name", all= F)
Total.SC.points2 <- ggplot(data = clean.crop, aes(y=skel_size, x=strain, color=sex) ) +
geom_jitter()+ggtitle("Total SC Measures, clean crops")
#str(MLH1.merge.Skel)
#this table was already cleaned
#MLH1.merge.Skel <- MLH1.merge.Skel[ !grepl("X", MLH1.merge.Skel$X) , ]
#MLH1.merge.Skel <- MLH1.merge.Skel[!(is.na(MLH1.merge.Skel$nMLH1.foci) | MLH1.merge.Skel$nMLH1.foci==""), ]
#this should also have the empty column for removing X
#MLH1.merge.Skel <- MLH1.merge.Skel[MLH1.merge.Skel$nMLH1.foci != "X",]
#MLH1.merge.Skel <- MLH1.merge.Skel[MLH1.merge.Skel$nMLH1.foci != "x",]
#mouse level
mouse.level.skel <- ddply(.data=MLH1.merge.Skel,
.(mouse),
summarize,
ncells = length(unique(Original.Name)),
nmice = length(unique(mouse)),
mean.MLH1 = mean(nMLH1.foci, na.rm = TRUE),
mean.skel = mean(skel_size),
mean.bin = mean(bin_size)
)
mouse.level.skel <- add_strain(mouse.level.skel)
mouse.level.skel <- add_sex(mouse.level.skel)
mouse.level.skel <- add_category(mouse.level.skel)
mouse.level.skel <- add_subsp(mouse.level.skel)
looky.mouse <- ggplot(data = mouse.level.skel[mouse.level.skel$subsp == "Musc",], aes(y=mean.skel, x=mean.MLH1, color=strain) ) + geom_jitter() +facet_wrap(subsp~sex)
looky.mouseDom <- ggplot(data = mouse.level.skel[mouse.level.skel$subsp == "Dom",], aes(y=mean.skel, x=mean.MLH1, color=strain) ) + geom_jitter() +ylim(c(150,1200))+facet_wrap(subsp~sex)
#theme(legend.position="none")
#double check a way to visually inspect the python skel output
#
#repeat random numbers?
#some of richard's analysis show negative relationship with SC length and MLH1 (his QTL word doc). Could this be sign
# of pwd like regulation?
#remove MLH1 tables since they overwrite other ones
#remove plots, tables for internal checking
#what to keep: SC.skel, OG.MLH1.merge.Skel, MLH1.merge.Skel, MLH1.merge.Skel.DUPs, mouse.level.skel
rm(list=setdiff(ls(), c("SC.skel", "OG.MLH1.merge.Skel", "MLH1.merge.Skel", "MLH1.merge.Skel.DUPs","mouse.level.skel")))
#output: matched random names with real names.
#save RData file
save.image("~./MLH1repo/data/total_SC_1.6.20.RData")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aspm.r
\name{plotASPM}
\alias{plotASPM}
\title{plotASPM plots catch, CPUE, Spawning Biomass and Harvest Rate}
\usage{
plotASPM(infish, CI = NA, defineplot = TRUE, target = 0.48, usef = 7, png = "")
}
\arguments{
\item{infish}{an object generated by the dynamics function}
\item{CI}{defaults to NA, if confidence intervals around the cpue have been
obtained using getLNCI, then the resulting matrix will generate 95pc CIs}
\item{defineplot}{define the plot size and character outside the plot or
automatically inside. Defaults to TRUE}
\item{target}{target depletion level. Defaults to 0.48}
\item{usef}{defines the font to use usef(ont),default = 7 bold times}
\item{png}{save a png file with the name in 'png', default = "", which
means no file produced}
}
\value{
Nothing, but it does plot six graphs in a single plot.
}
\description{
plotASPM after running fitASPM the optimum parameters can be
put through the dynamics function to generate a dataframe containing
the optimum dynamics. These can be plotted using plotASPM, which plots
out the catches, the Spawning Biomass, the relative CPUE and its fit to
the observed CPUE, and the harvest rate. This routine is still under
development to include more options.
}
\examples{
\dontrun{
data(dataspm)
fish <- dataspm$fish
glb <- dataspm$glb
props <- dataspm$props
pars <- c(14,0.3)
aspmLL(pars,fish,glb,props) # should be -2.277029
bestspm <- fitASPM(pars,infish=fish,inglb=glb,inprops=props)
fishery <- dynamics(bestspm$par,fish,glb,props)
plotASPM(fishery,defineplot=TRUE)
ceCI <- getLNCI(fishery[,"PredCE"],bestspm$par[2])
plotASPM(fishery,CI=ceCI)
} # infish=fishery; CI=NA; defineplot=TRUE; target=0.48; usef=7;png="test.png"
}
|
/man/plotASPM.Rd
|
no_license
|
haddonm/datalowSA
|
R
| false | true | 1,846 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aspm.r
\name{plotASPM}
\alias{plotASPM}
\title{plotASPM plots catch, CPUE, Spawning Biomass and Harvest Rate}
\usage{
plotASPM(infish, CI = NA, defineplot = TRUE, target = 0.48, usef = 7, png = "")
}
\arguments{
\item{infish}{an object generated by the dynamics function}
\item{CI}{defaults to NA, if confidence intervals around the cpue have been
obtained using getLNCI, then the resulting matrix will generate 95pc CIs}
\item{defineplot}{define the plot size and character outside the plot or
automatically inside. Defaults to TRUE}
\item{target}{target depletion level. Defaults to 0.48}
\item{usef}{defines the font to use usef(ont),default = 7 bold times}
\item{png}{save a png file with the name in 'png', default = "", which
means no file produced}
}
\value{
Nothing, but it does plot six graphs in a single plot.
}
\description{
plotASPM after running fitASPM the optimum parameters can be
put through the dynamics function to generate a dataframe containing
the optimum dynamics. These can be plotted using plotASPM, which plots
out the catches, the Spawning Biomass, the relative CPUE and its fit to
the observed CPUE, and the harvest rate. This routine is still under
development to include more options.
}
\examples{
\dontrun{
data(dataspm)
fish <- dataspm$fish
glb <- dataspm$glb
props <- dataspm$props
pars <- c(14,0.3)
aspmLL(pars,fish,glb,props) # should be -2.277029
bestspm <- fitASPM(pars,infish=fish,inglb=glb,inprops=props)
fishery <- dynamics(bestspm$par,fish,glb,props)
plotASPM(fishery,defineplot=TRUE)
ceCI <- getLNCI(fishery[,"PredCE"],bestspm$par[2])
plotASPM(fishery,CI=ceCI)
} # infish=fishery; CI=NA; defineplot=TRUE; target=0.48; usef=7;png="test.png"
}
|
# Test Case
# Originating from CTSM
# Linear Time Invarient - Heat Case
rm(list=ls())
PC <- TRUE
if(PC){
if( is.loaded('PSM')) detach(package:PSM)
library(PSM)
} else {
detach(package:PSM)
library(PSM,lib.loc="~/PSM/Rpackages/gridterm")
source("../R/PSM.estimate.R",echo=F)
source("../R/invlogit.R",echo=F)
source("../R/LinKalmanFilter.R",echo=F)
}
# Load the Data and Variables
tmpData <- read.table("Heat_Data.csv",sep=";", col.names=c("TIME","Te","Ti","Q"))
Time=tmpData$TIME
Y=t(matrix(tmpData[,c("Q")]))
U=t(as.matrix(tmpData[,c("Te","Ti")]))
Pop.Data <- list( list(Time=tmpData$TIME,
Y=t(matrix(tmpData[,c("Q")])),
U=t(as.matrix(tmpData[,c("Te","Ti")]))) )
HeatModel <- list(
Matrices = function(phi=NA) {
G1 <- phi[["G1"]] ; G2 <- phi[["G2"]]
H1 <- phi[["H1"]] ; H2 <- phi[["H2"]] ; H3 <- phi[["H3"]]
tmp <- list(
matA = matrix( c(-1*(1/H1+1/H2)/G1,1/(G1*H2),1/(G2*H2) , -1*(1/H2+1/H3)/G2 ) , ncol=2, byrow=T),
matB = diag( c(1/(G1*H1) , 1/(G2*H3) ) ),
matC = matrix( c(0,-1/H3) ,nrow=1),
matD = matrix( c(0,1/H3) ,nrow=1))
return(tmp)
},
X0 = function(Time=NA,phi=NA,U=NA) {
tmp <- phi[["X01"]]
tmp[2] <- phi[["X02"]]
return(matrix(tmp,ncol=1) )} ,
SIG = function(phi=NA) {
return( diag( c(phi[["SIG11"]],phi[["SIG22"]])))} ,
S = function(phi=NA) {
return( matrix(phi[["S"]])) } ,
h = function(eta,theta,covar=NULL) {
phi <- theta
return(phi) } ,
ModelPar = function(THETA){
return(list(theta=list( G1=THETA[1],G2=THETA[2],
H1=THETA[3],H2=THETA[4],H3=THETA[5],
SIG11=THETA[6], SIG22=THETA[7], S=THETA[8],
X01=THETA[9], X02=THETA[10]),
OMEGA=NULL))},
Dose=NULL
)
names(HeatModel)
# -------------------------------------------------------------
# Test of Fortran Code
# -------------------------------------------------------------
Testphi <- c( 13 ,25 , 100 , 1 , 2 , 49 , .5 , .2 , .2 , 0.01)
names(Testphi) <- c("X01","X02","G1","H1","H2","G2","H3","SIG11","SIG22","S")
Ob1 <- LinKalmanFilter( phi=Testphi , Model=HeatModel , Data=Pop.Data[[1]] , echo=FALSE, outputInternals=TRUE,fast=TRUE)
Ob2 <- LinKalmanFilter( phi=Testphi , Model=HeatModel , Data=Pop.Data[[1]] , echo=FALSE, outputInternals=TRUE,fast=FALSE)
names(Ob1)
Ob1$negLogLike
Ob2$negLogLike
IDX = 1:7
Ob1$Yp[,1]
Ob2$Yp[,1]
Ob1$Pp[,,1]
Ob2$Pp[,,1]
(TestmatC <- matrix( c(0,-2) ,nrow=1))
(TestS <- Testphi["S"])
TestmatC %*% Ob1$Pp[,,1] %*% t(TestmatC) + TestS
Ob1$R[,,1]
Ob2$R[,,1]
# -------------------------------------------------------------
# Test of Fortran Code
# -------------------------------------------------------------
# Parameter estimation
# Initial guess from CTSM
# THETA OBJ G1, G2, H1, H2, H3, SIG11,SIG22, S, X01, X02
# CTSM starting guess fails in this implementation
par1 <- list(LB = c( 10, 10,1e-1,1e-1,1e-2, 1e-8, 1e-8, 1e-4, 10, 20),
Init = c( 100, 50, 1, 2, .5, .01, .01, .01, 15, 25),
UB = c( 200, 100, 2, 5, 1, 1, 1, 1, 20, 30)
)
APL.KF(par1$Init,HeatModel,Pop.Data)
#par1$Init <- c( 100 , 50, 1, 2, .5, .001, .001, .001, 13, 25)
#par1$UB <- par1$LB <- NULL
# Check the Model
ModelCheck( Model=HeatModel , Data=Pop.Data[[1]], Par=par1)
# -------------------------------------------------------------
# Test Linear Kalman Filter with CTSM estimated parameters
# -------------------------------------------------------------
# CTSM returns -LL= -623
CTSMphi <- c( 1.3134E+01,2.5330E+01,1.0394E+02,9.6509E-01,2.0215E+00,4.9320E+01,5.0929E-01,7.3779E-08,2.6951E-09,1.0330E-02)
names(CTSMphi) <- c("X01","X02","G1","H1","H2","G2","H3","SIG11","SIG22","S")
CTSMTHETA=c(CTSMphi[["G1"]], CTSMphi[["G2"]], CTSMphi[["H1"]], CTSMphi[["H2"]], CTSMphi[["H3"]], CTSMphi[["SIG11"]],CTSMphi[["SIG22"]], CTSMphi[["S"]], CTSMphi[["X01"]], CTSMphi[["X02"]])
Ob1 <- LinKalmanFilter( phi=CTSMphi , Model=HeatModel , Data=Pop.Data[[1]] , echo=FALSE, outputInternals=TRUE,fast=FALSE)
Ob1$negLogL #[1,] -623.3564 in R
APL.KF(CTSMTHETA,HeatModel,Pop.Data)
# Validation plot versus Data
D <- Pop.Data[[1]]
plot(D$Time , D$Y,col="red")
points( D$Time , Ob1$Yp, pch="+",col="blue")
# -------------------------------------------------------------
# Minimizers
# -------------------------------------------------------------
# Test Run of initial parameters
# Perform minimization with 2 different optimizers
#Rprof()
Min1 <- PSM.estimate(Model=HeatModel,Data=Pop.Data,Par=par1,CI=TRUE,trace=2,optimizer="nlm")
#Rprof(NULL)
#summaryRprof()
Min2 <- PSM.estimate(Model=HeatModel,Data=Pop.Data,Par=par1,CI=TRUE,trace=2,optimizer="optim")
cat( "nlm: " , Min1$sec, "\t ", Min1$opt$minimum, "\n")
cat( "optim: " , Min2$sec, "\t ", Min2$opt$value, "\n")
# -------------------------------------------------------------
# Smoother
# -------------------------------------------------------------
SmoothObj <- PSM.smooth(Model=HeatModel, Data=Pop.Data, THETA=CTSMTHETA, subsample=0,trace=1)
D <- SmoothObj[[1]]
names(D)
D$negLogL
Idx <- 200:500
plot( D$Time[Idx], D$Xs[1,Idx] , type="n" )
for(i in 1:2) polygon( c(D$Time[Idx],rev(D$Time[Idx])) , c(D$Xs[i,Idx],rev(D$Xs[i,Idx]))+sqrt( abs(c(D$Ps[i,i,Idx], - rev(D$Ps[i,i,Idx])))),col=4)
for(i in 1:2) lines( D$Time[Idx], D$Xs[i,Idx], type="l",lwd=2)
# Load the predicted Data from CTSM
CTSM.Val.Data <- read.table("CTSM_Heat_Xp.csv",sep=";", col.names=c("Time","Xp1","Xp2","SDX1","SDX2","Y1","SDY1"))
names(D)
# Measurements Validation
plot(Pop.Data[[1]]$Time, Pop.Data[[1]]$Y )
lines( CTSM.Val.Data$Time, CTSM.Val.Data$Y1, col="red")
Idx <- !is.na(D$Yp)
lines( D$Time[Idx] , D$Yp[Idx] , col="blue")
plot(CTSM.Val.Data$Y1[Idx]/D$Yp[Idx])
# State validation
plot(D$Time[-(1:10)], D$Xp[1,-(1:10)],type="l",col="red")
lines(D$Time[-(1:10)], D$Xp[2,-(1:10)],type="l",col="red")
plot(CTSM.Val.Data$Time , CTSM.Val.Data$Xp2,type="l",col="blue",ylim=range(CTSM.Val.Data[,c("Xp1","Xp2")]))
lines(CTSM.Val.Data$Time , CTSM.Val.Data$Xp1,type="l",col="blue")
# -------------------------------------------------------------
# Include NAs in data
# -------------------------------------------------------------
Pop.DataNA <- Pop.Data
Pop.DataNA[[1]]$Y <- matrix(NA,ncol=length(Pop.DataNA[[1]]$Time)*2,nrow=2)
Pop.DataNA[[1]]$U <- matrix(NA,ncol=length(Pop.DataNA[[1]]$Time)*2,nrow=2)
for(i in 1:length(Pop.DataNA[[1]]$Time)) {
Pop.DataNA[[1]]$Y[(i%%2)+1,i*2-1] <- Pop.Data[[1]]$Y[1,i]
Pop.DataNA[[1]]$U[,i*2-1] <- Pop.DataNA[[1]]$U[,i*2] <- Pop.Data[[1]]$U[,i]
}
Pop.DataNA[[1]]$Time <- seq(0,718.5,.5)
# Show new Y, U and Time
rbind(Pop.DataNA[[1]]$Y[,1:8],
Pop.DataNA[[1]]$U[,1:8],
Pop.DataNA[[1]]$Time[1:8])
# Update model til 2-dim Y
HeatModelNA <- HeatModel
HeatModelNA$Matrices <- function(phi=NA) {
G1 <- phi[["G1"]] ; G2 <- phi[["G2"]]
H1 <- phi[["H1"]] ; H2 <- phi[["H2"]] ; H3 <- phi[["H3"]]
list(
matA = matrix( c(-1*(1/H1+1/H2)/G1,1/(G1*H2),1/(G2*H2) , -1*(1/H2+1/H3)/G2 ) , ncol=2, byrow=T),
matB = diag( c(1/(G1*H1) , 1/(G2*H3) ) ),
matC = matrix( rep(c(0,-1/H3),each=2) ,nrow=2),
matD = matrix( rep(c(0,1/H3),each=2) ,nrow=2))
}
HeatModelNA$S <- function(phi=NA) {
diag(rep(phi[["S"]],2)) }
ModelCheck( Model=HeatModelNA , Data=Pop.DataNA[[1]], Par=par1)
APL.KF(CTSMTHETA,HeatModelNA,Pop.DataNA) #[1] -623.3564
|
/examples/Case_Heat.R
|
no_license
|
waidschrat/PSM
|
R
| false | false | 8,037 |
r
|
# Test Case
# Originating from CTSM
# Linear Time Invarient - Heat Case
rm(list=ls())
PC <- TRUE
if(PC){
if( is.loaded('PSM')) detach(package:PSM)
library(PSM)
} else {
detach(package:PSM)
library(PSM,lib.loc="~/PSM/Rpackages/gridterm")
source("../R/PSM.estimate.R",echo=F)
source("../R/invlogit.R",echo=F)
source("../R/LinKalmanFilter.R",echo=F)
}
# Load the Data and Variables
tmpData <- read.table("Heat_Data.csv",sep=";", col.names=c("TIME","Te","Ti","Q"))
Time=tmpData$TIME
Y=t(matrix(tmpData[,c("Q")]))
U=t(as.matrix(tmpData[,c("Te","Ti")]))
Pop.Data <- list( list(Time=tmpData$TIME,
Y=t(matrix(tmpData[,c("Q")])),
U=t(as.matrix(tmpData[,c("Te","Ti")]))) )
HeatModel <- list(
Matrices = function(phi=NA) {
G1 <- phi[["G1"]] ; G2 <- phi[["G2"]]
H1 <- phi[["H1"]] ; H2 <- phi[["H2"]] ; H3 <- phi[["H3"]]
tmp <- list(
matA = matrix( c(-1*(1/H1+1/H2)/G1,1/(G1*H2),1/(G2*H2) , -1*(1/H2+1/H3)/G2 ) , ncol=2, byrow=T),
matB = diag( c(1/(G1*H1) , 1/(G2*H3) ) ),
matC = matrix( c(0,-1/H3) ,nrow=1),
matD = matrix( c(0,1/H3) ,nrow=1))
return(tmp)
},
X0 = function(Time=NA,phi=NA,U=NA) {
tmp <- phi[["X01"]]
tmp[2] <- phi[["X02"]]
return(matrix(tmp,ncol=1) )} ,
SIG = function(phi=NA) {
return( diag( c(phi[["SIG11"]],phi[["SIG22"]])))} ,
S = function(phi=NA) {
return( matrix(phi[["S"]])) } ,
h = function(eta,theta,covar=NULL) {
phi <- theta
return(phi) } ,
ModelPar = function(THETA){
return(list(theta=list( G1=THETA[1],G2=THETA[2],
H1=THETA[3],H2=THETA[4],H3=THETA[5],
SIG11=THETA[6], SIG22=THETA[7], S=THETA[8],
X01=THETA[9], X02=THETA[10]),
OMEGA=NULL))},
Dose=NULL
)
names(HeatModel)
# -------------------------------------------------------------
# Test of Fortran Code
# -------------------------------------------------------------
Testphi <- c( 13 ,25 , 100 , 1 , 2 , 49 , .5 , .2 , .2 , 0.01)
names(Testphi) <- c("X01","X02","G1","H1","H2","G2","H3","SIG11","SIG22","S")
Ob1 <- LinKalmanFilter( phi=Testphi , Model=HeatModel , Data=Pop.Data[[1]] , echo=FALSE, outputInternals=TRUE,fast=TRUE)
Ob2 <- LinKalmanFilter( phi=Testphi , Model=HeatModel , Data=Pop.Data[[1]] , echo=FALSE, outputInternals=TRUE,fast=FALSE)
names(Ob1)
Ob1$negLogLike
Ob2$negLogLike
IDX = 1:7
Ob1$Yp[,1]
Ob2$Yp[,1]
Ob1$Pp[,,1]
Ob2$Pp[,,1]
(TestmatC <- matrix( c(0,-2) ,nrow=1))
(TestS <- Testphi["S"])
TestmatC %*% Ob1$Pp[,,1] %*% t(TestmatC) + TestS
Ob1$R[,,1]
Ob2$R[,,1]
# -------------------------------------------------------------
# Test of Fortran Code
# -------------------------------------------------------------
# Parameter estimation
# Initial guess from CTSM
# THETA OBJ G1, G2, H1, H2, H3, SIG11,SIG22, S, X01, X02
# CTSM starting guess fails in this implementation
par1 <- list(LB = c( 10, 10,1e-1,1e-1,1e-2, 1e-8, 1e-8, 1e-4, 10, 20),
Init = c( 100, 50, 1, 2, .5, .01, .01, .01, 15, 25),
UB = c( 200, 100, 2, 5, 1, 1, 1, 1, 20, 30)
)
APL.KF(par1$Init,HeatModel,Pop.Data)
#par1$Init <- c( 100 , 50, 1, 2, .5, .001, .001, .001, 13, 25)
#par1$UB <- par1$LB <- NULL
# Check the Model
ModelCheck( Model=HeatModel , Data=Pop.Data[[1]], Par=par1)
# -------------------------------------------------------------
# Test Linear Kalman Filter with CTSM estimated parameters
# -------------------------------------------------------------
# CTSM returns -LL= -623
CTSMphi <- c( 1.3134E+01,2.5330E+01,1.0394E+02,9.6509E-01,2.0215E+00,4.9320E+01,5.0929E-01,7.3779E-08,2.6951E-09,1.0330E-02)
names(CTSMphi) <- c("X01","X02","G1","H1","H2","G2","H3","SIG11","SIG22","S")
CTSMTHETA=c(CTSMphi[["G1"]], CTSMphi[["G2"]], CTSMphi[["H1"]], CTSMphi[["H2"]], CTSMphi[["H3"]], CTSMphi[["SIG11"]],CTSMphi[["SIG22"]], CTSMphi[["S"]], CTSMphi[["X01"]], CTSMphi[["X02"]])
Ob1 <- LinKalmanFilter( phi=CTSMphi , Model=HeatModel , Data=Pop.Data[[1]] , echo=FALSE, outputInternals=TRUE,fast=FALSE)
Ob1$negLogL #[1,] -623.3564 in R
APL.KF(CTSMTHETA,HeatModel,Pop.Data)
# Validation plot versus Data
D <- Pop.Data[[1]]
plot(D$Time , D$Y,col="red")
points( D$Time , Ob1$Yp, pch="+",col="blue")
# -------------------------------------------------------------
# Minimizers
# -------------------------------------------------------------
# Test Run of initial parameters
# Perform minimization with 2 different optimizers
#Rprof()
Min1 <- PSM.estimate(Model=HeatModel,Data=Pop.Data,Par=par1,CI=TRUE,trace=2,optimizer="nlm")
#Rprof(NULL)
#summaryRprof()
Min2 <- PSM.estimate(Model=HeatModel,Data=Pop.Data,Par=par1,CI=TRUE,trace=2,optimizer="optim")
cat( "nlm: " , Min1$sec, "\t ", Min1$opt$minimum, "\n")
cat( "optim: " , Min2$sec, "\t ", Min2$opt$value, "\n")
# -------------------------------------------------------------
# Smoother
# -------------------------------------------------------------
SmoothObj <- PSM.smooth(Model=HeatModel, Data=Pop.Data, THETA=CTSMTHETA, subsample=0,trace=1)
D <- SmoothObj[[1]]
names(D)
D$negLogL
Idx <- 200:500
plot( D$Time[Idx], D$Xs[1,Idx] , type="n" )
for(i in 1:2) polygon( c(D$Time[Idx],rev(D$Time[Idx])) , c(D$Xs[i,Idx],rev(D$Xs[i,Idx]))+sqrt( abs(c(D$Ps[i,i,Idx], - rev(D$Ps[i,i,Idx])))),col=4)
for(i in 1:2) lines( D$Time[Idx], D$Xs[i,Idx], type="l",lwd=2)
# Load the predicted Data from CTSM
CTSM.Val.Data <- read.table("CTSM_Heat_Xp.csv",sep=";", col.names=c("Time","Xp1","Xp2","SDX1","SDX2","Y1","SDY1"))
names(D)
# Measurements Validation
plot(Pop.Data[[1]]$Time, Pop.Data[[1]]$Y )
lines( CTSM.Val.Data$Time, CTSM.Val.Data$Y1, col="red")
Idx <- !is.na(D$Yp)
lines( D$Time[Idx] , D$Yp[Idx] , col="blue")
plot(CTSM.Val.Data$Y1[Idx]/D$Yp[Idx])
# State validation
plot(D$Time[-(1:10)], D$Xp[1,-(1:10)],type="l",col="red")
lines(D$Time[-(1:10)], D$Xp[2,-(1:10)],type="l",col="red")
plot(CTSM.Val.Data$Time , CTSM.Val.Data$Xp2,type="l",col="blue",ylim=range(CTSM.Val.Data[,c("Xp1","Xp2")]))
lines(CTSM.Val.Data$Time , CTSM.Val.Data$Xp1,type="l",col="blue")
# -------------------------------------------------------------
# Include NAs in data
# -------------------------------------------------------------
Pop.DataNA <- Pop.Data
Pop.DataNA[[1]]$Y <- matrix(NA,ncol=length(Pop.DataNA[[1]]$Time)*2,nrow=2)
Pop.DataNA[[1]]$U <- matrix(NA,ncol=length(Pop.DataNA[[1]]$Time)*2,nrow=2)
for(i in 1:length(Pop.DataNA[[1]]$Time)) {
Pop.DataNA[[1]]$Y[(i%%2)+1,i*2-1] <- Pop.Data[[1]]$Y[1,i]
Pop.DataNA[[1]]$U[,i*2-1] <- Pop.DataNA[[1]]$U[,i*2] <- Pop.Data[[1]]$U[,i]
}
Pop.DataNA[[1]]$Time <- seq(0,718.5,.5)
# Show new Y, U and Time
rbind(Pop.DataNA[[1]]$Y[,1:8],
Pop.DataNA[[1]]$U[,1:8],
Pop.DataNA[[1]]$Time[1:8])
# Update model til 2-dim Y
HeatModelNA <- HeatModel
HeatModelNA$Matrices <- function(phi=NA) {
G1 <- phi[["G1"]] ; G2 <- phi[["G2"]]
H1 <- phi[["H1"]] ; H2 <- phi[["H2"]] ; H3 <- phi[["H3"]]
list(
matA = matrix( c(-1*(1/H1+1/H2)/G1,1/(G1*H2),1/(G2*H2) , -1*(1/H2+1/H3)/G2 ) , ncol=2, byrow=T),
matB = diag( c(1/(G1*H1) , 1/(G2*H3) ) ),
matC = matrix( rep(c(0,-1/H3),each=2) ,nrow=2),
matD = matrix( rep(c(0,1/H3),each=2) ,nrow=2))
}
HeatModelNA$S <- function(phi=NA) {
diag(rep(phi[["S"]],2)) }
ModelCheck( Model=HeatModelNA , Data=Pop.DataNA[[1]], Par=par1)
APL.KF(CTSMTHETA,HeatModelNA,Pop.DataNA) #[1] -623.3564
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clusterGenomics.R
\name{get.threshold}
\alias{get.threshold}
\title{get.threshold}
\usage{
get.threshold(X, q, ...)
}
\arguments{
\item{X}{value matrix}
\item{q}{q}
\item{...}{Extra parameters to be given to the function}
}
\value{
threshold
}
\description{
Help-functions only used by part:
Find a stopping threshold given the percentage of heights to be used in the dendrogram
}
\examples{
example_dta<-create_example_data_for_R()
X=as.matrix(example_dta$counts)
#Default ... values:
default.par <- list(q=0.25,Kmax.rec=5,B=100,ref.gen="PC",dist.method="euclidean",cl.method="hclust",linkage="average",cor.method="pearson",nstart=10)
#Check for user modifications:
fixed.par <- c(minDist=NULL,minSize=2,modifyList(default.par,list(cor.method='pearson',linkage='average')))
#Find stopping threshold if minDist is NULL
minDist <- get.threshold(X,q=fixed.par$q,fixed.par)
}
|
/man/get.threshold.Rd
|
permissive
|
Ylefol/TimeSeriesAnalysis
|
R
| false | true | 954 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clusterGenomics.R
\name{get.threshold}
\alias{get.threshold}
\title{get.threshold}
\usage{
get.threshold(X, q, ...)
}
\arguments{
\item{X}{value matrix}
\item{q}{q}
\item{...}{Extra parameters to be given to the function}
}
\value{
threshold
}
\description{
Help-functions only used by part:
Find a stopping threshold given the percentage of heights to be used in the dendrogram
}
\examples{
example_dta<-create_example_data_for_R()
X=as.matrix(example_dta$counts)
#Default ... values:
default.par <- list(q=0.25,Kmax.rec=5,B=100,ref.gen="PC",dist.method="euclidean",cl.method="hclust",linkage="average",cor.method="pearson",nstart=10)
#Check for user modifications:
fixed.par <- c(minDist=NULL,minSize=2,modifyList(default.par,list(cor.method='pearson',linkage='average')))
#Find stopping threshold if minDist is NULL
minDist <- get.threshold(X,q=fixed.par$q,fixed.par)
}
|
library(RMySQL)
library(googleVis)
mydb=dbConnect(MySQL(),user='root',password='root123',dbname='analytics',host='localhost')
courseName <- as.character(myArgs[1])
date=as.character(myArgs[2])
print(courseName)
htmlFileName <- as.character(myArgs[3])
tableFileName <- as.character(myArgs[4])
print(date)
print(htmlFileName)
qry="select count,state from course_enrollment_location_current_state where course_id='$courseName' and date='$date'";
rs = dbSendQuery(mydb,qry)
data = fetch(rs, n=-1)
data$Percentage=data$count/with(data,ave(count),list(state),FUN=sum)*100
G <- gvisGeoChart(data,"state","count","Percentage",options=list(region="IN", displayMode="region",colorAxis="{colors:['purple', 'red', 'orange','green']}",resolution="provinces",width=600, height=400))
T <- gvisTable(data, options=list(width=250, height=400))
cat(G$html$chart, file=htmlFileName)
cat(T$html$chart, file=tableFileName)
|
/dataAnalysis/core/R_repo/map.R
|
no_license
|
qjyzwlz/IITBOMBAYX-DATA-ANALYTICS
|
R
| false | false | 903 |
r
|
library(RMySQL)
library(googleVis)
mydb=dbConnect(MySQL(),user='root',password='root123',dbname='analytics',host='localhost')
courseName <- as.character(myArgs[1])
date=as.character(myArgs[2])
print(courseName)
htmlFileName <- as.character(myArgs[3])
tableFileName <- as.character(myArgs[4])
print(date)
print(htmlFileName)
qry="select count,state from course_enrollment_location_current_state where course_id='$courseName' and date='$date'";
rs = dbSendQuery(mydb,qry)
data = fetch(rs, n=-1)
data$Percentage=data$count/with(data,ave(count),list(state),FUN=sum)*100
G <- gvisGeoChart(data,"state","count","Percentage",options=list(region="IN", displayMode="region",colorAxis="{colors:['purple', 'red', 'orange','green']}",resolution="provinces",width=600, height=400))
T <- gvisTable(data, options=list(width=250, height=400))
cat(G$html$chart, file=htmlFileName)
cat(T$html$chart, file=tableFileName)
|
#' Select scorecard data year.
#'
#' This function is used to select the year of the data.
#'
#' @param sccall Current list of parameters carried forward from prior
#' functions in the chain (ignore)
#' @param year Four-digit year (default is 2013)
#'
#' @section Important notes:
#' \enumerate{
#' \item Not all variables have a year option.
#' \item At this time, only one year at a time is allowed.
#' \item The year selected is not necessarily the year the data were produced.
#' It may be the year the data were collected. For data collected over split
#' years (fall to spring), it is likely the year represents the fall data (\emph{e.g.,}
#' 2011 for 2011/2012 data).
#' }
#'
#' Be sure to check with the College Scorecard
#' \href{https://collegescorecard.ed.gov/assets/FullDataDocumentation.pdf}{data
#' documentation report} when choosing the year.
#'
#' @examples
#' \dontrun{
#' sc_year()
#' sc_year(2012)
#' }
#' @export
sc_year <- function(sccall, year) {
## check first argument
if (identical(class(try(sccall, silent = TRUE)), 'try-error')
|| !is.list(sccall)) {
stop('Chain not properly initialized. Be sure to start with sc_init().',
call. = FALSE)
}
## check second argument
if (missing(year) || !is.numeric(year) || year < 1900 || year > 2099) {
stop('Must provide a 4-digit year in 1900s or 2000s.', call. = FALSE)
}
## get vars
sccall[['year']] <- year
sccall
}
|
/R/select_year.R
|
no_license
|
Deerluluolivia/rscorecard
|
R
| false | false | 1,468 |
r
|
#' Select scorecard data year.
#'
#' This function is used to select the year of the data.
#'
#' @param sccall Current list of parameters carried forward from prior
#' functions in the chain (ignore)
#' @param year Four-digit year (default is 2013)
#'
#' @section Important notes:
#' \enumerate{
#' \item Not all variables have a year option.
#' \item At this time, only one year at a time is allowed.
#' \item The year selected is not necessarily the year the data were produced.
#' It may be the year the data were collected. For data collected over split
#' years (fall to spring), it is likely the year represents the fall data (\emph{e.g.,}
#' 2011 for 2011/2012 data).
#' }
#'
#' Be sure to check with the College Scorecard
#' \href{https://collegescorecard.ed.gov/assets/FullDataDocumentation.pdf}{data
#' documentation report} when choosing the year.
#'
#' @examples
#' \dontrun{
#' sc_year()
#' sc_year(2012)
#' }
#' @export
sc_year <- function(sccall, year) {
## check first argument
if (identical(class(try(sccall, silent = TRUE)), 'try-error')
|| !is.list(sccall)) {
stop('Chain not properly initialized. Be sure to start with sc_init().',
call. = FALSE)
}
## check second argument
if (missing(year) || !is.numeric(year) || year < 1900 || year > 2099) {
stop('Must provide a 4-digit year in 1900s or 2000s.', call. = FALSE)
}
## get vars
sccall[['year']] <- year
sccall
}
|
#' Distribution of emissions by streets
#'
#' @description Create a distribution from sp spatial lines data frame or spatial lines.
#'
#' Use "wrfinput" for a gridInfo from an output from real.exe and "geo" for a output from geog.exe
#'
#' The "sf" (and "sp") uses a grid in SpatialPolygons format instead of create from a model input.
#'
#' @param s SpatialLinesDataFrame of SpatialLines object
#' @param grid grid object with the grid information
#' @param as_raster output format, TRUE for raster, FALSE for matrix
#' @param verbose display additional information
#' @param type "wrfinput", "geo", "sp" or "sf" for grid type
#' @param gcol grid points for a "sp" or "sf" type
#' @param grow grid points for a "sp" or "sf" type
#' @export
#'
#' @importFrom methods as
#' @importFrom data.table data.table
#' @importFrom data.table .SD
#' @import sf
#' @import raster
#' @import ncdf4
#' @import lwgeom
#'
#' @seealso \code{\link{gridInfo}} and \code{\link{rasterSource}}
#'
#'
#' @examples \donttest{
#' roads <- osmar::get_osm(osmar::complete_file(),
#' source = osmar::osmsource_file(paste(system.file("extdata",
#' package="EmissV"),"/streets.osm.xz",sep="")))
#' road_lines <- osmar::as_sp(roads,what = "lines")
#' roads <- sf::st_as_sf(road_lines)
#'
#' d3 <- gridInfo(paste0(system.file("extdata", package = "EmissV"),"/wrfinput_d03"))
#'
#' roadLength <- lineSource(roads,d3,as_raster=TRUE)
#' sp::spplot(roadLength, scales = list(draw=TRUE), ylab="Lat", xlab="Lon",main="Length of roads",
#' sp.layout=list("sp.lines", road_lines))
#' }
#'
#'
#'@source OpenstreetMap data avaliable \url{https://www.openstreetmap.org/} and \url{https://download.geofabrik.de/}
#'
lineSource <- function(s, grid, as_raster = F,verbose = T, type = "wrfinput",
gcol = 100, grow = 100){
wrf_grid <- function(filewrf, type = "wrfinput", epsg = 4326){
cat(paste("using grid info from:", filewrf, "\n"))
wrf <- ncdf4::nc_open(filewrf)
if(type == "wrfinput"){
lat <- ncdf4::ncvar_get(wrf, varid = "XLAT")
lon <- ncdf4::ncvar_get(wrf, varid = "XLONG")
} else if(type == "geo"){ # nocov
lat <- ncdf4::ncvar_get(wrf, varid = "XLAT_M") # nocov
lon <- ncdf4::ncvar_get(wrf, varid = "XLONG_M") # nocov
}
time <- ncdf4::ncvar_get(wrf, varid = "Times")
dx <- ncdf4::ncatt_get(wrf, varid = 0,
attname = "DX")$value
n.lat <- ncdf4::ncatt_get(wrf, varid = 0,
attname = "SOUTH-NORTH_PATCH_END_UNSTAG")$value
n.lon <- ncdf4::ncatt_get(wrf, varid = 0,
attname = "WEST-EAST_PATCH_END_UNSTAG")$value
cat(paste0("Number of lat points ", n.lat, "\n"))
cat(paste0("Number of lon points ", n.lon, "\n"))
ncdf4::nc_close(wrf)
r.lat <- range(lon)
r.lon <- range(lat)
points <- data.frame(lat = c(lat),
long = c(lon))
points$lat <- as.numeric(points$lat)
points$long <- as.numeric(points$long)
dx <- 1.0 * (r.lat[1] - r.lat[2]) / (n.lat+1)
dy <- 1.0 * (r.lon[2] - r.lon[1]) / (n.lon+1)
alpha = 0 * (pi / 180)
dxl <- cos(alpha) * dx - sin(alpha) * dy
dyl <- sin(alpha) * dx + cos(alpha) * dy
grid = list()
for(i in 1:nrow(points)){
# for(i in 1:2){
p1_lat = points$lat[i] - dx/2
p1_lon = points$long[i] + dy/2
p2_lat = points$lat[i] + dx/2
p2_lon = points$long[i] + dy/2
p3_lat = points$lat[i] + dx/2
p3_lon = points$long[i] - dy/2
p4_lat = points$lat[i] - dx/2
p4_lon = points$long[i] - dy/2
mat <- matrix(c(p1_lon,p1_lat,
p2_lon,p2_lat,
p3_lon,p3_lat,
p4_lon,p4_lat,
p1_lon,p1_lat),
ncol=2, byrow=TRUE)
cell <- sf::st_polygon(list(mat))
grid[[i]] = cell
}
geometry <- sf::st_sfc(sf::st_multipolygon(grid))
grid <- sf::st_cast(x = st_sf(geometry = geometry, crs = epsg),
to = "POLYGON")
grid$id <- 1:nrow(grid)
return(grid)
}
emis_grid <- function (spobj, g, sr, type = "lines")
{
net <- sf::st_as_sf(spobj)
net$id <- NULL
g <- sf::st_as_sf(g)
if (!missing(sr)) {
message("Transforming spatial objects to 'sr' ")
net <- sf::st_transform(net, sr)
g <- sf::st_transform(g, sr)
}
if (type == "lines") {
netdf <- sf::st_set_geometry(net, NULL)
snetdf <- sum(netdf, na.rm = TRUE)
ncolnet <- ncol(sf::st_set_geometry(net, NULL))
net <- net[, grep(pattern = TRUE, x = sapply(net, is.numeric))]
namesnet <- names(sf::st_set_geometry(net, NULL))
net$LKM <- sf::st_length(sf::st_cast(net[sf::st_dimension(net) ==
1, ]))
netg <- suppressWarnings(st_intersection(net, g))
netg$LKM2 <- sf::st_length(netg)
xgg <- data.table::data.table(netg)
xgg[, 1:ncolnet] <- xgg[, 1:ncolnet] * as.numeric(xgg$LKM2/xgg$LKM)
xgg[is.na(xgg)] <- 0
dfm <- xgg[, lapply(.SD, sum, na.rm = TRUE), by = "id",
.SDcols = namesnet]
id <- dfm$id
dfm <- dfm * snetdf/sum(dfm, na.rm = TRUE)
dfm$id <- id
names(dfm) <- c("id", namesnet)
gx <- data.frame(id = g$id)
gx <- merge(gx, dfm, by = "id", all.x = TRUE)
gx[is.na(gx)] <- 0
gx <- sf::st_sf(gx, geometry = g$geometry)
return(gx)
}
else if (type == "points") { # nocov start
xgg <- data.table::data.table(sf::st_set_geometry(sf::st_intersection(net,
g), NULL))
xgg[is.na(xgg)] <- 0
dfm <- xgg[, lapply(.SD, sum, na.rm = TRUE), by = "id",
.SDcols = namesnet]
names(dfm) <- c("id", namesnet)
gx <- data.frame(id = g$id)
gx <- merge(gx, dfm, by = "id", all.x = TRUE)
gx[is.na(gx)] <- 0
gx <- sf::st_sf(gx, geometry = g$geometry)
return(gx) # nocov end
}
}
if(type %in% c("wrfinput","geo")){
g <- wrf_grid(filewrf = grid$File,
type = type,
epsg = 4326)
roads2 <- s
# Calculate the length
roads2$length <- sf::st_length(sf::st_as_sf(roads2))
# just length
roads3 <- roads2[, "length"]
# calculate the length of streets in each cell
roads4 <- emis_grid(spobj = roads3, g = g, sr = 4326, type = "lines")
# normalyse
roads4$length <- roads4$length / sum(roads4$length)
# converts to Spatial
roads4sp <- as(roads4, "Spatial")
# make a raster
r <- raster::raster(ncol = grid$Horizontal[1], nrow = grid$Horizontal[2])
raster::extent(r) <- raster::extent(roads4sp)
r <- raster::rasterize(roads4sp, r, field = roads4sp$length,
update = TRUE, updateValue = "NA")
r[is.na(r[])] <- 0
if(as_raster){
return(r)
} else {
roadLength <- raster::flip(r,2)
roadLength <- raster::t(roadLength)
roadLength <- raster::as.matrix(roadLength)
return(raster::as.matrix(roadLength))
}
}
if(type %in% c("sp","sf")){ # nocov start
if("sp" %in% class(grid[1])){
roads2 <- sf::st_as_sf(s)
}else{
roads2 <- s
}
# Calculate the length
roads2$length <- sf::st_length(sf::st_as_sf(roads2))
# just length
roads3 <- roads2[, "length"]
# calculate the length of streets in each cell
ras_Id <- cbind(id = 1:nrow(grid),grid)
roads4 <- emis_grid(spobj = roads3, g = ras_Id, type = "lines")
# normalyse
roads4$length <- roads4$length / sum(roads4$length)
if(as_raster){
# converts to Spatial
roads4sp <- as(roads4, "Spatial")
# make a raster
r <- raster::raster(ncol = gcol, nrow = grow)
raster::extent(r) <- raster::extent(grid)
r <- raster::rasterize(roads4sp, r, field = roads4sp$length,
update = TRUE, updateValue = "NA")
r[is.na(r[])] <- 0
return(r)
}else{
return(roads4)
}
} # nocov end
# OLD algorithm source:
# https://gis.stackexchange.com/questions/119993/convert-line-shapefile-to-raster-value-total-length-of-lines-within-cell
# print("take a coffee, this function may take a few minutes ...")
# if(verbose) print("cropping data for domain (1 of 4) ...")
# x <- grid$Box$x
# y <- grid$Box$y
# box <- sp::bbox(sp::SpatialPoints(cbind(x,y)))
# s <- raster::crop(s,box) # tira ruas fora do dominio
#
# if(verbose) print("converting to a line segment pattern object (2 of 4) ...")
# roadsPSP <- spatstat::as.psp(as(s, 'SpatialLines'))
#
# if(verbose) print("Calculating lengths per cell (3 of 4) ...")
# n.lat <- grid$Horizontal[2]
# n.lon <- grid$Horizontal[1]
# limites <- spatstat::owin(xrange=c(grid$xlim[1],grid$xlim[2]), yrange=c(grid$Ylim[1],grid$Ylim[2]))
# roadLengthIM <- spatstat::pixellate.psp(roadsPSP, W=limites,dimyx=c(n.lat,n.lon))
#
# if(verbose) print("Converting pixel image to raster in meters (4 of 4) ...")
# roadLength <- raster::raster(roadLengthIM, crs=sp::proj4string(s))
#
# if(as_raster) return(roadLength)
#
# if(verbose) print("Converting raster to matrix ...")
# roadLength <- raster::flip(roadLength,2)
# roadLength <- raster::t(roadLength)
# roadLength <- raster::as.matrix(roadLength)
# if(verbose)
# print(paste("Grid output:",n.lon,"columns",n.lat,"rows"))
# return(roadLength/sum(roadLength))
# }
}
|
/R/lineSource.R
|
permissive
|
alvv1986/EmissV
|
R
| false | false | 9,639 |
r
|
#' Distribution of emissions by streets
#'
#' @description Create a distribution from sp spatial lines data frame or spatial lines.
#'
#' Use "wrfinput" for a gridInfo from an output from real.exe and "geo" for a output from geog.exe
#'
#' The "sf" (and "sp") uses a grid in SpatialPolygons format instead of create from a model input.
#'
#' @param s SpatialLinesDataFrame of SpatialLines object
#' @param grid grid object with the grid information
#' @param as_raster output format, TRUE for raster, FALSE for matrix
#' @param verbose display additional information
#' @param type "wrfinput", "geo", "sp" or "sf" for grid type
#' @param gcol grid points for a "sp" or "sf" type
#' @param grow grid points for a "sp" or "sf" type
#' @export
#'
#' @importFrom methods as
#' @importFrom data.table data.table
#' @importFrom data.table .SD
#' @import sf
#' @import raster
#' @import ncdf4
#' @import lwgeom
#'
#' @seealso \code{\link{gridInfo}} and \code{\link{rasterSource}}
#'
#'
#' @examples \donttest{
#' roads <- osmar::get_osm(osmar::complete_file(),
#' source = osmar::osmsource_file(paste(system.file("extdata",
#' package="EmissV"),"/streets.osm.xz",sep="")))
#' road_lines <- osmar::as_sp(roads,what = "lines")
#' roads <- sf::st_as_sf(road_lines)
#'
#' d3 <- gridInfo(paste0(system.file("extdata", package = "EmissV"),"/wrfinput_d03"))
#'
#' roadLength <- lineSource(roads,d3,as_raster=TRUE)
#' sp::spplot(roadLength, scales = list(draw=TRUE), ylab="Lat", xlab="Lon",main="Length of roads",
#' sp.layout=list("sp.lines", road_lines))
#' }
#'
#'
#'@source OpenstreetMap data avaliable \url{https://www.openstreetmap.org/} and \url{https://download.geofabrik.de/}
#'
lineSource <- function(s, grid, as_raster = F,verbose = T, type = "wrfinput",
gcol = 100, grow = 100){
wrf_grid <- function(filewrf, type = "wrfinput", epsg = 4326){
cat(paste("using grid info from:", filewrf, "\n"))
wrf <- ncdf4::nc_open(filewrf)
if(type == "wrfinput"){
lat <- ncdf4::ncvar_get(wrf, varid = "XLAT")
lon <- ncdf4::ncvar_get(wrf, varid = "XLONG")
} else if(type == "geo"){ # nocov
lat <- ncdf4::ncvar_get(wrf, varid = "XLAT_M") # nocov
lon <- ncdf4::ncvar_get(wrf, varid = "XLONG_M") # nocov
}
time <- ncdf4::ncvar_get(wrf, varid = "Times")
dx <- ncdf4::ncatt_get(wrf, varid = 0,
attname = "DX")$value
n.lat <- ncdf4::ncatt_get(wrf, varid = 0,
attname = "SOUTH-NORTH_PATCH_END_UNSTAG")$value
n.lon <- ncdf4::ncatt_get(wrf, varid = 0,
attname = "WEST-EAST_PATCH_END_UNSTAG")$value
cat(paste0("Number of lat points ", n.lat, "\n"))
cat(paste0("Number of lon points ", n.lon, "\n"))
ncdf4::nc_close(wrf)
r.lat <- range(lon)
r.lon <- range(lat)
points <- data.frame(lat = c(lat),
long = c(lon))
points$lat <- as.numeric(points$lat)
points$long <- as.numeric(points$long)
dx <- 1.0 * (r.lat[1] - r.lat[2]) / (n.lat+1)
dy <- 1.0 * (r.lon[2] - r.lon[1]) / (n.lon+1)
alpha = 0 * (pi / 180)
dxl <- cos(alpha) * dx - sin(alpha) * dy
dyl <- sin(alpha) * dx + cos(alpha) * dy
grid = list()
for(i in 1:nrow(points)){
# for(i in 1:2){
p1_lat = points$lat[i] - dx/2
p1_lon = points$long[i] + dy/2
p2_lat = points$lat[i] + dx/2
p2_lon = points$long[i] + dy/2
p3_lat = points$lat[i] + dx/2
p3_lon = points$long[i] - dy/2
p4_lat = points$lat[i] - dx/2
p4_lon = points$long[i] - dy/2
mat <- matrix(c(p1_lon,p1_lat,
p2_lon,p2_lat,
p3_lon,p3_lat,
p4_lon,p4_lat,
p1_lon,p1_lat),
ncol=2, byrow=TRUE)
cell <- sf::st_polygon(list(mat))
grid[[i]] = cell
}
geometry <- sf::st_sfc(sf::st_multipolygon(grid))
grid <- sf::st_cast(x = st_sf(geometry = geometry, crs = epsg),
to = "POLYGON")
grid$id <- 1:nrow(grid)
return(grid)
}
emis_grid <- function (spobj, g, sr, type = "lines")
{
net <- sf::st_as_sf(spobj)
net$id <- NULL
g <- sf::st_as_sf(g)
if (!missing(sr)) {
message("Transforming spatial objects to 'sr' ")
net <- sf::st_transform(net, sr)
g <- sf::st_transform(g, sr)
}
if (type == "lines") {
netdf <- sf::st_set_geometry(net, NULL)
snetdf <- sum(netdf, na.rm = TRUE)
ncolnet <- ncol(sf::st_set_geometry(net, NULL))
net <- net[, grep(pattern = TRUE, x = sapply(net, is.numeric))]
namesnet <- names(sf::st_set_geometry(net, NULL))
net$LKM <- sf::st_length(sf::st_cast(net[sf::st_dimension(net) ==
1, ]))
netg <- suppressWarnings(st_intersection(net, g))
netg$LKM2 <- sf::st_length(netg)
xgg <- data.table::data.table(netg)
xgg[, 1:ncolnet] <- xgg[, 1:ncolnet] * as.numeric(xgg$LKM2/xgg$LKM)
xgg[is.na(xgg)] <- 0
dfm <- xgg[, lapply(.SD, sum, na.rm = TRUE), by = "id",
.SDcols = namesnet]
id <- dfm$id
dfm <- dfm * snetdf/sum(dfm, na.rm = TRUE)
dfm$id <- id
names(dfm) <- c("id", namesnet)
gx <- data.frame(id = g$id)
gx <- merge(gx, dfm, by = "id", all.x = TRUE)
gx[is.na(gx)] <- 0
gx <- sf::st_sf(gx, geometry = g$geometry)
return(gx)
}
else if (type == "points") { # nocov start
xgg <- data.table::data.table(sf::st_set_geometry(sf::st_intersection(net,
g), NULL))
xgg[is.na(xgg)] <- 0
dfm <- xgg[, lapply(.SD, sum, na.rm = TRUE), by = "id",
.SDcols = namesnet]
names(dfm) <- c("id", namesnet)
gx <- data.frame(id = g$id)
gx <- merge(gx, dfm, by = "id", all.x = TRUE)
gx[is.na(gx)] <- 0
gx <- sf::st_sf(gx, geometry = g$geometry)
return(gx) # nocov end
}
}
if(type %in% c("wrfinput","geo")){
g <- wrf_grid(filewrf = grid$File,
type = type,
epsg = 4326)
roads2 <- s
# Calculate the length
roads2$length <- sf::st_length(sf::st_as_sf(roads2))
# just length
roads3 <- roads2[, "length"]
# calculate the length of streets in each cell
roads4 <- emis_grid(spobj = roads3, g = g, sr = 4326, type = "lines")
# normalyse
roads4$length <- roads4$length / sum(roads4$length)
# converts to Spatial
roads4sp <- as(roads4, "Spatial")
# make a raster
r <- raster::raster(ncol = grid$Horizontal[1], nrow = grid$Horizontal[2])
raster::extent(r) <- raster::extent(roads4sp)
r <- raster::rasterize(roads4sp, r, field = roads4sp$length,
update = TRUE, updateValue = "NA")
r[is.na(r[])] <- 0
if(as_raster){
return(r)
} else {
roadLength <- raster::flip(r,2)
roadLength <- raster::t(roadLength)
roadLength <- raster::as.matrix(roadLength)
return(raster::as.matrix(roadLength))
}
}
if(type %in% c("sp","sf")){ # nocov start
if("sp" %in% class(grid[1])){
roads2 <- sf::st_as_sf(s)
}else{
roads2 <- s
}
# Calculate the length
roads2$length <- sf::st_length(sf::st_as_sf(roads2))
# just length
roads3 <- roads2[, "length"]
# calculate the length of streets in each cell
ras_Id <- cbind(id = 1:nrow(grid),grid)
roads4 <- emis_grid(spobj = roads3, g = ras_Id, type = "lines")
# normalyse
roads4$length <- roads4$length / sum(roads4$length)
if(as_raster){
# converts to Spatial
roads4sp <- as(roads4, "Spatial")
# make a raster
r <- raster::raster(ncol = gcol, nrow = grow)
raster::extent(r) <- raster::extent(grid)
r <- raster::rasterize(roads4sp, r, field = roads4sp$length,
update = TRUE, updateValue = "NA")
r[is.na(r[])] <- 0
return(r)
}else{
return(roads4)
}
} # nocov end
# OLD algorithm source:
# https://gis.stackexchange.com/questions/119993/convert-line-shapefile-to-raster-value-total-length-of-lines-within-cell
# print("take a coffee, this function may take a few minutes ...")
# if(verbose) print("cropping data for domain (1 of 4) ...")
# x <- grid$Box$x
# y <- grid$Box$y
# box <- sp::bbox(sp::SpatialPoints(cbind(x,y)))
# s <- raster::crop(s,box) # tira ruas fora do dominio
#
# if(verbose) print("converting to a line segment pattern object (2 of 4) ...")
# roadsPSP <- spatstat::as.psp(as(s, 'SpatialLines'))
#
# if(verbose) print("Calculating lengths per cell (3 of 4) ...")
# n.lat <- grid$Horizontal[2]
# n.lon <- grid$Horizontal[1]
# limites <- spatstat::owin(xrange=c(grid$xlim[1],grid$xlim[2]), yrange=c(grid$Ylim[1],grid$Ylim[2]))
# roadLengthIM <- spatstat::pixellate.psp(roadsPSP, W=limites,dimyx=c(n.lat,n.lon))
#
# if(verbose) print("Converting pixel image to raster in meters (4 of 4) ...")
# roadLength <- raster::raster(roadLengthIM, crs=sp::proj4string(s))
#
# if(as_raster) return(roadLength)
#
# if(verbose) print("Converting raster to matrix ...")
# roadLength <- raster::flip(roadLength,2)
# roadLength <- raster::t(roadLength)
# roadLength <- raster::as.matrix(roadLength)
# if(verbose)
# print(paste("Grid output:",n.lon,"columns",n.lat,"rows"))
# return(roadLength/sum(roadLength))
# }
}
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("K nearest neighbors example"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("k",
"Number of neighbors:",
min = 1,
max = 50,
value = 30)
),
# Show a plot of the generated distribution
mainPanel(
tabsetPanel(
tabPanel(
"Result",
plotOutput("neighbors_plot"),
textOutput("accuracy_text")
),
tabPanel(
"Help",
h2("Using the k nearest neighbors example"),
p("Drag the slide bar to test different value of k for the nearest neighbors model"),
p("Example adapted from the book Elements of Statistical Learning:"),
a("http://statweb.stanford.edu/~tibs/ElemStatLearn/")
)
)
)
)
))
|
/Data Products/Iris_Shiny_App/Iris_Shiny_App/ui.R
|
no_license
|
fredcaram/JohnHopkinsDataScienceAssignments
|
R
| false | false | 1,208 |
r
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("K nearest neighbors example"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("k",
"Number of neighbors:",
min = 1,
max = 50,
value = 30)
),
# Show a plot of the generated distribution
mainPanel(
tabsetPanel(
tabPanel(
"Result",
plotOutput("neighbors_plot"),
textOutput("accuracy_text")
),
tabPanel(
"Help",
h2("Using the k nearest neighbors example"),
p("Drag the slide bar to test different value of k for the nearest neighbors model"),
p("Example adapted from the book Elements of Statistical Learning:"),
a("http://statweb.stanford.edu/~tibs/ElemStatLearn/")
)
)
)
)
))
|
\name{findMRCA}
\alias{findMRCA}
\title{Get the MRCA of a set of taxa}
\usage{
findMRCA(tree, tips=NULL, type=c("node","height"))
}
\arguments{
\item{tree}{a phylogenetic tree as an object of class \code{"phylo"}.}
\item{tips}{a vector containing a set of tip labels.}
\item{type}{either \code{"node"} to return the node of the MRCA; or \code{"height"} to return the height above the root of the MRCA of \code{tips}.}
}
\description{
This function returns node number of the most recent common ancestor of a set of taxa. If \code{tips=NULL} the function is redundant with \code{\link{mrca}} (for \code{type="node"}) or \code{\link{vcv.phylo}}, but much slower (for \code{type="height"}).
}
\details{
If \code{tips==NULL} and \code{type="node"} (the default) it will return the result of a normal function call to \code{\link{mrca}}.
If \code{tips=NULL} and \code{type="height"} it will return a matrix equal to that produced by \code{\link{vcv.phylo}}.
From \code{phytools 0.5-66} forward \code{findMRCA} uses \code{\link{getMRCA}} in the \emph{ape} package internally, which results in a big speed-up. Even though the two functions are thus totally redundant I have left \code{findMRCA} in the package to ensure backward compatibility.
}
\value{
The node number of the MRCA, or a matrix of node numbers (if \code{tips==NULL}) - for \code{type="node"}; or the height of the MRCA, or a matrix of heights (if \code{tips==NULL}) - for \code{type="height"}.
}
\references{
Revell, L. J. (2012) phytools: An R package for phylogenetic comparative biology (and other things). \emph{Methods Ecol. Evol.}, \bold{3}, 217-223.
}
\author{Liam Revell \email{liam.revell@umb.edu}}
\seealso{
\code{\link{findMRCA}}, \code{\link{mrca}}
}
\examples{
data(anoletree)
anc<-findMRCA(anoletree,c("cristatellus","cooki",
"gundlachi"))
plotTree(anoletree,type="fan",fsize=0.7,lwd=1)
nodelabels(node=anc,frame="circle",pch=21,cex=1.5,
bg="blue")
legend("topleft","common ancestor of\nPuerto Rican TG anoles",
pch=21,pt.cex=1.5,pt.bg="blue",cex=0.7,bty="n")
par(mar=c(5.1,4.1,4.1,2.1)) ## reset margin to default
}
\keyword{phylogenetics}
\keyword{utilities}
|
/man/findMRCA.Rd
|
no_license
|
phamasaur/phytools
|
R
| false | false | 2,160 |
rd
|
\name{findMRCA}
\alias{findMRCA}
\title{Get the MRCA of a set of taxa}
\usage{
findMRCA(tree, tips=NULL, type=c("node","height"))
}
\arguments{
\item{tree}{a phylogenetic tree as an object of class \code{"phylo"}.}
\item{tips}{a vector containing a set of tip labels.}
\item{type}{either \code{"node"} to return the node of the MRCA; or \code{"height"} to return the height above the root of the MRCA of \code{tips}.}
}
\description{
This function returns node number of the most recent common ancestor of a set of taxa. If \code{tips=NULL} the function is redundant with \code{\link{mrca}} (for \code{type="node"}) or \code{\link{vcv.phylo}}, but much slower (for \code{type="height"}).
}
\details{
If \code{tips==NULL} and \code{type="node"} (the default) it will return the result of a normal function call to \code{\link{mrca}}.
If \code{tips=NULL} and \code{type="height"} it will return a matrix equal to that produced by \code{\link{vcv.phylo}}.
From \code{phytools 0.5-66} forward \code{findMRCA} uses \code{\link{getMRCA}} in the \emph{ape} package internally, which results in a big speed-up. Even though the two functions are thus totally redundant I have left \code{findMRCA} in the package to ensure backward compatibility.
}
\value{
The node number of the MRCA, or a matrix of node numbers (if \code{tips==NULL}) - for \code{type="node"}; or the height of the MRCA, or a matrix of heights (if \code{tips==NULL}) - for \code{type="height"}.
}
\references{
Revell, L. J. (2012) phytools: An R package for phylogenetic comparative biology (and other things). \emph{Methods Ecol. Evol.}, \bold{3}, 217-223.
}
\author{Liam Revell \email{liam.revell@umb.edu}}
\seealso{
\code{\link{findMRCA}}, \code{\link{mrca}}
}
\examples{
data(anoletree)
anc<-findMRCA(anoletree,c("cristatellus","cooki",
"gundlachi"))
plotTree(anoletree,type="fan",fsize=0.7,lwd=1)
nodelabels(node=anc,frame="circle",pch=21,cex=1.5,
bg="blue")
legend("topleft","common ancestor of\nPuerto Rican TG anoles",
pch=21,pt.cex=1.5,pt.bg="blue",cex=0.7,bty="n")
par(mar=c(5.1,4.1,4.1,2.1)) ## reset margin to default
}
\keyword{phylogenetics}
\keyword{utilities}
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(caret)
library(randomForest)
lca <- read.csv("data/LCA_data.csv")
load("data/ranForestModel.RData")
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
yearly<-lca[lca["LCA_CASE_WAGE_RATE_UNIT"]=="Year",]
hourly<-lca[lca["LCA_CASE_WAGE_RATE_UNIT"]=="Hour",]
##################################
##### Salary range predictor #####
##################################
output$predictedSalary <- renderText({
if (input$pCompany == "Select One"
| input$pCity == "Select One"
| input$pTitle == "Select One") {
paste("Please select company, city & job title above too see predicted range.")
} else {
LCA_CASE_EMPLOYER_NAME <- input$pCompany
LCA_CASE_EMPLOYER_CITY <- input$pCity
LCA_CASE_JOB_TITLE <- input$pTitle
pred <- predict(ranForestModel, data.frame(
LCA_CASE_EMPLOYER_NAME,
LCA_CASE_EMPLOYER_CITY,
LCA_CASE_JOB_TITLE))
paste(pred)
}
})
###############################
##### Company & Job title #####
###############################
output$jobtitle <- renderUI({
# Filter data set by company
if (input$company == "All") {
# Compute job titles across all companies
filtered_lca<-lca
} else {
# Filter job titles choice based on company choice.
filtered_lca <- lca[lca["LCA_CASE_EMPLOYER_NAME"]==input$company,]
}
jobtitles<-filtered_lca[,"LCA_CASE_JOB_TITLE"]
jobtitlenames<-unique(jobtitles)
jobtitlenames<-as.character(jobtitlenames)
jobtitlenames<-append(jobtitlenames, "All", after=0)
selectInput("jobtitle", "Job Title:", choices=jobtitlenames)
})
output$distPlot <- renderPlot({
# Filter by selected company
if (input$company == "All") {
filtered_yearly <- yearly
filtered_hourly <- hourly
} else {
filtered_yearly <- yearly[yearly["LCA_CASE_EMPLOYER_NAME"]==input$company,]
filtered_hourly <- hourly[hourly["LCA_CASE_EMPLOYER_NAME"]==input$company,]
}
# Filter by selected job title
if (!is.null(input$jobtitle)) {
if (input$jobtitle == "All") {
# Do nothing
filtered_yearly <- filtered_yearly
filtered_hourly <- filtered_hourly
} else {
if (length(rownames(filtered_yearly["LCA_CASE_JOB_TITLE"])) > 0) {
filtered_yearly <-
filtered_yearly[filtered_yearly["LCA_CASE_JOB_TITLE"]==input$jobtitle,]
}
if (length(rownames(filtered_hourly["LCA_CASE_JOB_TITLE"])) > 0) {
filtered_hourly <-
filtered_hourly[filtered_hourly["LCA_CASE_JOB_TITLE"]==input$jobtitle,]
}
}
}
salaries <- data.matrix(filtered_yearly["LCA_CASE_WAGE_RATE_FROM"])
salaries <- append(salaries,
data.matrix(filtered_hourly["LCA_CASE_WAGE_RATE_FROM"])*2087)
if(input$removeoutliers) {
salaries <- subset(salaries,
!(salaries > quantile(salaries, probs=c(.005, .995))[2]
| salaries < quantile(salaries, probs=c(.005, .995))[1]))
}
# Render the plot
if (length(salaries) == 0) {
output$plotText <- renderText({"No data to plot! Maybe try including outliers."})
} else {
output$plotText <- renderText({""})
hist(salaries,
breaks = 40,
ylab="# of people hired",
xlab="Annual salary")
}
})
###############################
###### City & Job title #######
###############################
output$jobtitle2 <- renderUI({
# Filter data set by city
if (input$city == "All") {
# Compute job titles across all cities
filtered_lca<-lca
} else {
# Filter job titles choice based on city choice.
filtered_lca <- lca[lca["LCA_CASE_WORKLOC1_CITY"]==input$city,]
}
jobtitles<-filtered_lca[,"LCA_CASE_JOB_TITLE"]
jobtitlenames<-unique(jobtitles)
jobtitlenames<-as.character(jobtitlenames)
jobtitlenames<-append(jobtitlenames, "All", after=0)
selectInput("jobtitle2", "Job Title:", choices=jobtitlenames)
})
output$distPlot2 <- renderPlot({
# Filter by selected city
if (input$city == "All") {
filtered_yearly <- yearly
filtered_hourly <- hourly
} else {
filtered_yearly <- yearly[yearly["LCA_CASE_WORKLOC1_CITY"]==input$city,]
filtered_hourly <- hourly[hourly["LCA_CASE_WORKLOC1_CITY"]==input$city,]
}
# Filter by selected job title
if (!is.null(input$jobtitle2)) {
if (input$jobtitle2 == "All") {
# Do nothing
filtered_yearly <- filtered_yearly
filtered_hourly <- filtered_hourly
} else {
if (length(rownames(filtered_yearly["LCA_CASE_JOB_TITLE"])) > 0) {
filtered_yearly <-
filtered_yearly[filtered_yearly["LCA_CASE_JOB_TITLE"]==input$jobtitle2,]
}
if (length(rownames(filtered_hourly["LCA_CASE_JOB_TITLE"])) > 0) {
filtered_hourly <-
filtered_hourly[filtered_hourly["LCA_CASE_JOB_TITLE"]==input$jobtitle2,]
}
}
}
salaries <- data.matrix(filtered_yearly["LCA_CASE_WAGE_RATE_FROM"])
salaries <- append(salaries,
data.matrix(filtered_hourly["LCA_CASE_WAGE_RATE_FROM"])*2087)
if(input$removeoutliers2) {
salaries <- subset(salaries,
!(salaries > quantile(salaries, probs=c(.005, .995))[2]
| salaries < quantile(salaries, probs=c(.005, .995))[1]))
}
# Render the plot
if (length(salaries) == 0) {
output$plotText2 <- renderText({"No data to plot! Maybe try including outliers."})
} else {
output$plotText2 <- renderText({""})
hist(salaries,
breaks = 40,
ylab="# of people hired",
xlab="Annual salary")
}
})
})
|
/Shiny/App-1/server.R
|
no_license
|
valleygirl/Salary-Trends
|
R
| false | false | 6,091 |
r
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(caret)
library(randomForest)
lca <- read.csv("data/LCA_data.csv")
load("data/ranForestModel.RData")
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
yearly<-lca[lca["LCA_CASE_WAGE_RATE_UNIT"]=="Year",]
hourly<-lca[lca["LCA_CASE_WAGE_RATE_UNIT"]=="Hour",]
##################################
##### Salary range predictor #####
##################################
output$predictedSalary <- renderText({
if (input$pCompany == "Select One"
| input$pCity == "Select One"
| input$pTitle == "Select One") {
paste("Please select company, city & job title above too see predicted range.")
} else {
LCA_CASE_EMPLOYER_NAME <- input$pCompany
LCA_CASE_EMPLOYER_CITY <- input$pCity
LCA_CASE_JOB_TITLE <- input$pTitle
pred <- predict(ranForestModel, data.frame(
LCA_CASE_EMPLOYER_NAME,
LCA_CASE_EMPLOYER_CITY,
LCA_CASE_JOB_TITLE))
paste(pred)
}
})
###############################
##### Company & Job title #####
###############################
output$jobtitle <- renderUI({
# Filter data set by company
if (input$company == "All") {
# Compute job titles across all companies
filtered_lca<-lca
} else {
# Filter job titles choice based on company choice.
filtered_lca <- lca[lca["LCA_CASE_EMPLOYER_NAME"]==input$company,]
}
jobtitles<-filtered_lca[,"LCA_CASE_JOB_TITLE"]
jobtitlenames<-unique(jobtitles)
jobtitlenames<-as.character(jobtitlenames)
jobtitlenames<-append(jobtitlenames, "All", after=0)
selectInput("jobtitle", "Job Title:", choices=jobtitlenames)
})
output$distPlot <- renderPlot({
# Filter by selected company
if (input$company == "All") {
filtered_yearly <- yearly
filtered_hourly <- hourly
} else {
filtered_yearly <- yearly[yearly["LCA_CASE_EMPLOYER_NAME"]==input$company,]
filtered_hourly <- hourly[hourly["LCA_CASE_EMPLOYER_NAME"]==input$company,]
}
# Filter by selected job title
if (!is.null(input$jobtitle)) {
if (input$jobtitle == "All") {
# Do nothing
filtered_yearly <- filtered_yearly
filtered_hourly <- filtered_hourly
} else {
if (length(rownames(filtered_yearly["LCA_CASE_JOB_TITLE"])) > 0) {
filtered_yearly <-
filtered_yearly[filtered_yearly["LCA_CASE_JOB_TITLE"]==input$jobtitle,]
}
if (length(rownames(filtered_hourly["LCA_CASE_JOB_TITLE"])) > 0) {
filtered_hourly <-
filtered_hourly[filtered_hourly["LCA_CASE_JOB_TITLE"]==input$jobtitle,]
}
}
}
salaries <- data.matrix(filtered_yearly["LCA_CASE_WAGE_RATE_FROM"])
salaries <- append(salaries,
data.matrix(filtered_hourly["LCA_CASE_WAGE_RATE_FROM"])*2087)
if(input$removeoutliers) {
salaries <- subset(salaries,
!(salaries > quantile(salaries, probs=c(.005, .995))[2]
| salaries < quantile(salaries, probs=c(.005, .995))[1]))
}
# Render the plot
if (length(salaries) == 0) {
output$plotText <- renderText({"No data to plot! Maybe try including outliers."})
} else {
output$plotText <- renderText({""})
hist(salaries,
breaks = 40,
ylab="# of people hired",
xlab="Annual salary")
}
})
###############################
###### City & Job title #######
###############################
output$jobtitle2 <- renderUI({
# Filter data set by city
if (input$city == "All") {
# Compute job titles across all cities
filtered_lca<-lca
} else {
# Filter job titles choice based on city choice.
filtered_lca <- lca[lca["LCA_CASE_WORKLOC1_CITY"]==input$city,]
}
jobtitles<-filtered_lca[,"LCA_CASE_JOB_TITLE"]
jobtitlenames<-unique(jobtitles)
jobtitlenames<-as.character(jobtitlenames)
jobtitlenames<-append(jobtitlenames, "All", after=0)
selectInput("jobtitle2", "Job Title:", choices=jobtitlenames)
})
output$distPlot2 <- renderPlot({
# Filter by selected city
if (input$city == "All") {
filtered_yearly <- yearly
filtered_hourly <- hourly
} else {
filtered_yearly <- yearly[yearly["LCA_CASE_WORKLOC1_CITY"]==input$city,]
filtered_hourly <- hourly[hourly["LCA_CASE_WORKLOC1_CITY"]==input$city,]
}
# Filter by selected job title
if (!is.null(input$jobtitle2)) {
if (input$jobtitle2 == "All") {
# Do nothing
filtered_yearly <- filtered_yearly
filtered_hourly <- filtered_hourly
} else {
if (length(rownames(filtered_yearly["LCA_CASE_JOB_TITLE"])) > 0) {
filtered_yearly <-
filtered_yearly[filtered_yearly["LCA_CASE_JOB_TITLE"]==input$jobtitle2,]
}
if (length(rownames(filtered_hourly["LCA_CASE_JOB_TITLE"])) > 0) {
filtered_hourly <-
filtered_hourly[filtered_hourly["LCA_CASE_JOB_TITLE"]==input$jobtitle2,]
}
}
}
salaries <- data.matrix(filtered_yearly["LCA_CASE_WAGE_RATE_FROM"])
salaries <- append(salaries,
data.matrix(filtered_hourly["LCA_CASE_WAGE_RATE_FROM"])*2087)
if(input$removeoutliers2) {
salaries <- subset(salaries,
!(salaries > quantile(salaries, probs=c(.005, .995))[2]
| salaries < quantile(salaries, probs=c(.005, .995))[1]))
}
# Render the plot
if (length(salaries) == 0) {
output$plotText2 <- renderText({"No data to plot! Maybe try including outliers."})
} else {
output$plotText2 <- renderText({""})
hist(salaries,
breaks = 40,
ylab="# of people hired",
xlab="Annual salary")
}
})
})
|
#' Hierarchical matching, separately at each hierarchical level
#'
#' @description
#' Implements hierarchical matching, separately at each hierarchical level
#' within the data. For a given level, the raw data that is matched includes
#' every unique combination of values at and below the level of interest. E.g.
#'
#' Level 1: \cr
#' `| Canada |` \cr
#' `| United States |` \cr
#'
#' Level 2: \cr
#' `| Canada | Ontario |` \cr
#' `| United States | New York |` \cr
#' `| United States | Pennsylvania |` \cr
#'
#' Level 3: \cr
#' `| Canada | Ontario | Ottawa |` \cr
#' `| Canada | Ontario | Toronto |` \cr
#' `| United States | New York | Bronx |` \cr
#' `| United States | New York | New York |` \cr
#' `| United States | Pennsylvania | Philadelphia |`
#'
#' @inheritParams hmatch_tokens
#' @inheritParams hmatch_composite
#'
#' @param fn which function to use for matching. Current options are
#' \code{\link{hmatch}}, \code{\link{hmatch_permute}},
#' \code{\link{hmatch_tokens}}, \code{\link{hmatch_settle}}, or
#' \code{\link{hmatch_composite}}. Defaults to "hmatch".
#'
#' Note that some subsequent arguments are only relevant for specific
#' functions (e.g. the `exclude_` arguments are only relevant if `fn =
#' "hmatch_tokens"`).
#' @param type type of join ("left", "inner", "anti", "resolve_left",
#' "resolve_inner", or "resolve_anti"). Defaults to "left". See
#' \link{join_types}.
#'
#' Note that the details of resolve joins vary somewhat among hmatch functions
#' (see documentation for the relevant function), and that function
#' \code{\link{hmatch_composite}} only allows resolve joins.
#' @param levels a vector of names or integer indices corresponding to one or
#' more of the hierarchical columns in `raw` to match at. Defaults to `NULL`
#' in which case matches are made at each hierarchical level.
#' @param always_list logical indicating whether to always return a list, even
#' when argument `levels` specifies a single match level. Defaults to `FALSE`.
#'
#' @return
#' A list of data frames, each returned by a call to `fn` on the unique
#' combination of hierarchical values at the given hierarchical level. The
#' number of elements in the list corresponds to the number of hierarchical
#' columns in `raw`, or, if specified, the number of elements in argument
#' `levels`.
#'
#' However, if `always_list = FALSE` and `length(levels) == 1`, a single data
#' frame is returned (i.e. not wrapped in a list).
#'
#' @examples
#' data(ne_raw)
#' data(ne_ref)
#'
#' # by default calls fn `hmatch` separately for each hierarchical level
#' hmatch_split(ne_raw, ne_ref)
#'
#' # can also specify other hmatch functions, and subsets of hierarchical levels
#' hmatch_split(ne_raw, ne_ref, fn = "hmatch_tokens", levels = 2:3)
#'
#' @export hmatch_split
hmatch_split <- function(raw,
ref,
pattern,
pattern_ref = pattern,
by,
by_ref = by,
fn = "hmatch",
type = "left",
allow_gaps = TRUE,
fuzzy = FALSE,
fuzzy_method = "osa",
fuzzy_dist = 1L,
dict = NULL,
ref_prefix = "ref_",
std_fn = string_std,
...,
levels = NULL,
always_list = FALSE,
man,
code_col,
always_tokenize = FALSE,
token_split = "_",
exclude_freq = 3,
exclude_nchar = 3,
exclude_values = NULL) {
## validate arg fn
fn_name <- as.character(substitute(fn))
fn <- match.fun(fn)
## identify hierarchical columns and split raw and ref by hierarchical level
prep <- hmatch_split_prep(
raw = raw,
ref = ref,
pattern = pattern,
pattern_ref = pattern_ref,
by = by,
by_ref = by_ref,
ref_prefix = ref_prefix,
levels = levels,
lower_levels = FALSE
)
## implement matching routines, split by hierarchical level
if (fn_name %in% c("hmatch", "hmatch_settle", "hmatch_permute")) {
out <- mapply(
fn,
raw = prep$raw_split,
ref = prep$ref_split,
MoreArgs = list(
by = prep$by_raw_split,
by_ref = prep$by_ref_split,
type = type,
allow_gaps = allow_gaps,
fuzzy = fuzzy,
fuzzy_method = fuzzy_method,
fuzzy_dist = fuzzy_dist,
dict = dict,
ref_prefix = ref_prefix,
std_fn = std_fn,
...
),
SIMPLIFY = FALSE
)
} else if (fn_name == "hmatch_composite") {
if (missing(man)) man <- NULL
if (missing(code_col)) code_col <- NULL
if (type %in% c("left", "inner", "anti")) {
type <- paste0("resolve_", type)
warning(
"`hmatch_composite` only implements resolve joins. Updating argument ",
"`type` to ", vec_paste_c(type), call. = FALSE
)
}
out <- mapply(
fn,
raw = prep$raw_split,
ref = prep$ref_split,
MoreArgs = list(
by = prep$by_raw_split,
by_ref = prep$by_ref_split,
type = type,
allow_gaps = allow_gaps,
fuzzy = fuzzy,
fuzzy_method = fuzzy_method,
fuzzy_dist = fuzzy_dist,
dict = dict,
ref_prefix = ref_prefix,
std_fn = std_fn,
...,
man = man, # TODO: probably need to split by level
code_col = code_col
),
SIMPLIFY = FALSE
)
} else if (fn_name == "hmatch_tokens") {
out <- mapply(
fn,
raw = prep$raw_split,
ref = prep$ref_split,
MoreArgs = list(
by = prep$by_raw_split,
by_ref = prep$by_ref_split,
type = type,
allow_gaps = allow_gaps,
fuzzy = fuzzy,
fuzzy_method = fuzzy_method,
fuzzy_dist = fuzzy_dist,
dict = dict,
ref_prefix = ref_prefix,
std_fn = std_fn,
...,
always_tokenize = always_tokenize,
token_split = token_split,
exclude_freq = exclude_freq,
exclude_nchar = exclude_nchar,
exclude_values = exclude_values
),
SIMPLIFY = FALSE
)
}
names(out) <- prep$names
if (length(out) == 1L & !always_list) out <- out[[1]]
return(out)
}
#' @importFrom dplyr arrange group_by_all ungroup
split_raw <- function(raw, by, lev, all_levels = TRUE) {
out <- unique(raw[, by[1:lev], drop = FALSE])
all_na <- apply(out, 1, function(x) all(is.na(x)))
out <- out[!all_na, , drop = FALSE]
out <- out[!is.na(out[[by[lev]]]), , drop = FALSE]
if (all_levels) {
cols_missing <- setdiff(by, names(out))
for (j in cols_missing) {
out[[j]] <- rep(NA_character_, nrow(out))
}
}
out_order <- order(hcodes_int(out, by = by[1:lev]))
out[out_order, , drop = FALSE]
}
split_ref <- function(ref, by, lev, lower_levels = FALSE) {
l <- max_levels(ref, by = by)
out <- if (lower_levels) {
ref[l <= lev, , drop = FALSE]
} else {
ref[l == lev, , drop = FALSE]
}
# if (lev < length(by)) {
# cols_excl <- by[(lev + 1):length(by)]
# out <- out[,!names(out) %in% cols_excl, drop = FALSE]
# }
return(out)
}
#' @noRd
ordered_split <- function(x, f, N) {
s <- split(x, f)
i <- as.numeric(names(s))
out <- vector("list", N)
out[i] <- s
return(out)
}
hmatch_split_prep_levels <- function(levels, by) {
if (is.null(levels)) {
valid <- TRUE
out <- seq_along(by)
} else if (is.numeric(levels)) {
valid <- levels %in% seq_along(by)
out <- levels
} else {
valid <- levels %in% by
out <- match(levels, by)
}
if (!all(valid)) {
stop("the following elements of `levels` could not be matched to a ",
"hierarchical column within `raw`: ",
paste(levels[!valid], collapse = "; "))
}
return(out)
}
#' @importFrom stats setNames
hmatch_split_prep <- function(raw,
ref,
pattern,
pattern_ref,
by,
by_ref = by_ref,
ref_prefix,
levels,
lower_levels = FALSE) {
prep <- prep_match_columns(
raw = raw,
ref = ref,
pattern = pattern,
pattern_ref = pattern_ref,
by = by,
by_ref = by_ref,
ref_prefix = ref_prefix
)
levels <- hmatch_split_prep_levels(levels, prep$by_raw)
raw_split <- lapply(
seq_along(prep$by_raw)[levels],
split_raw,
raw = raw,
by = prep$by_raw
)
ref_split <- lapply(
seq_along(prep$by_ref)[levels],
split_ref,
ref = ref,
by = prep$by_ref_orig,
lower_levels = lower_levels
)
return(list(raw_split = raw_split,
ref_split = ref_split,
by_raw_split = prep$by_raw,
by_ref_split = prep$by_ref_orig,
names = prep$by_raw[levels]))
}
|
/R/hmatch_split.R
|
no_license
|
ntncmch/hmatch
|
R
| false | false | 9,256 |
r
|
#' Hierarchical matching, separately at each hierarchical level
#'
#' @description
#' Implements hierarchical matching, separately at each hierarchical level
#' within the data. For a given level, the raw data that is matched includes
#' every unique combination of values at and below the level of interest. E.g.
#'
#' Level 1: \cr
#' `| Canada |` \cr
#' `| United States |` \cr
#'
#' Level 2: \cr
#' `| Canada | Ontario |` \cr
#' `| United States | New York |` \cr
#' `| United States | Pennsylvania |` \cr
#'
#' Level 3: \cr
#' `| Canada | Ontario | Ottawa |` \cr
#' `| Canada | Ontario | Toronto |` \cr
#' `| United States | New York | Bronx |` \cr
#' `| United States | New York | New York |` \cr
#' `| United States | Pennsylvania | Philadelphia |`
#'
#' @inheritParams hmatch_tokens
#' @inheritParams hmatch_composite
#'
#' @param fn which function to use for matching. Current options are
#' \code{\link{hmatch}}, \code{\link{hmatch_permute}},
#' \code{\link{hmatch_tokens}}, \code{\link{hmatch_settle}}, or
#' \code{\link{hmatch_composite}}. Defaults to "hmatch".
#'
#' Note that some subsequent arguments are only relevant for specific
#' functions (e.g. the `exclude_` arguments are only relevant if `fn =
#' "hmatch_tokens"`).
#' @param type type of join ("left", "inner", "anti", "resolve_left",
#' "resolve_inner", or "resolve_anti"). Defaults to "left". See
#' \link{join_types}.
#'
#' Note that the details of resolve joins vary somewhat among hmatch functions
#' (see documentation for the relevant function), and that function
#' \code{\link{hmatch_composite}} only allows resolve joins.
#' @param levels a vector of names or integer indices corresponding to one or
#' more of the hierarchical columns in `raw` to match at. Defaults to `NULL`
#' in which case matches are made at each hierarchical level.
#' @param always_list logical indicating whether to always return a list, even
#' when argument `levels` specifies a single match level. Defaults to `FALSE`.
#'
#' @return
#' A list of data frames, each returned by a call to `fn` on the unique
#' combination of hierarchical values at the given hierarchical level. The
#' number of elements in the list corresponds to the number of hierarchical
#' columns in `raw`, or, if specified, the number of elements in argument
#' `levels`.
#'
#' However, if `always_list = FALSE` and `length(levels) == 1`, a single data
#' frame is returned (i.e. not wrapped in a list).
#'
#' @examples
#' data(ne_raw)
#' data(ne_ref)
#'
#' # by default calls fn `hmatch` separately for each hierarchical level
#' hmatch_split(ne_raw, ne_ref)
#'
#' # can also specify other hmatch functions, and subsets of hierarchical levels
#' hmatch_split(ne_raw, ne_ref, fn = "hmatch_tokens", levels = 2:3)
#'
#' @export hmatch_split
hmatch_split <- function(raw,
ref,
pattern,
pattern_ref = pattern,
by,
by_ref = by,
fn = "hmatch",
type = "left",
allow_gaps = TRUE,
fuzzy = FALSE,
fuzzy_method = "osa",
fuzzy_dist = 1L,
dict = NULL,
ref_prefix = "ref_",
std_fn = string_std,
...,
levels = NULL,
always_list = FALSE,
man,
code_col,
always_tokenize = FALSE,
token_split = "_",
exclude_freq = 3,
exclude_nchar = 3,
exclude_values = NULL) {
## validate arg fn
fn_name <- as.character(substitute(fn))
fn <- match.fun(fn)
## identify hierarchical columns and split raw and ref by hierarchical level
prep <- hmatch_split_prep(
raw = raw,
ref = ref,
pattern = pattern,
pattern_ref = pattern_ref,
by = by,
by_ref = by_ref,
ref_prefix = ref_prefix,
levels = levels,
lower_levels = FALSE
)
## implement matching routines, split by hierarchical level
if (fn_name %in% c("hmatch", "hmatch_settle", "hmatch_permute")) {
out <- mapply(
fn,
raw = prep$raw_split,
ref = prep$ref_split,
MoreArgs = list(
by = prep$by_raw_split,
by_ref = prep$by_ref_split,
type = type,
allow_gaps = allow_gaps,
fuzzy = fuzzy,
fuzzy_method = fuzzy_method,
fuzzy_dist = fuzzy_dist,
dict = dict,
ref_prefix = ref_prefix,
std_fn = std_fn,
...
),
SIMPLIFY = FALSE
)
} else if (fn_name == "hmatch_composite") {
if (missing(man)) man <- NULL
if (missing(code_col)) code_col <- NULL
if (type %in% c("left", "inner", "anti")) {
type <- paste0("resolve_", type)
warning(
"`hmatch_composite` only implements resolve joins. Updating argument ",
"`type` to ", vec_paste_c(type), call. = FALSE
)
}
out <- mapply(
fn,
raw = prep$raw_split,
ref = prep$ref_split,
MoreArgs = list(
by = prep$by_raw_split,
by_ref = prep$by_ref_split,
type = type,
allow_gaps = allow_gaps,
fuzzy = fuzzy,
fuzzy_method = fuzzy_method,
fuzzy_dist = fuzzy_dist,
dict = dict,
ref_prefix = ref_prefix,
std_fn = std_fn,
...,
man = man, # TODO: probably need to split by level
code_col = code_col
),
SIMPLIFY = FALSE
)
} else if (fn_name == "hmatch_tokens") {
out <- mapply(
fn,
raw = prep$raw_split,
ref = prep$ref_split,
MoreArgs = list(
by = prep$by_raw_split,
by_ref = prep$by_ref_split,
type = type,
allow_gaps = allow_gaps,
fuzzy = fuzzy,
fuzzy_method = fuzzy_method,
fuzzy_dist = fuzzy_dist,
dict = dict,
ref_prefix = ref_prefix,
std_fn = std_fn,
...,
always_tokenize = always_tokenize,
token_split = token_split,
exclude_freq = exclude_freq,
exclude_nchar = exclude_nchar,
exclude_values = exclude_values
),
SIMPLIFY = FALSE
)
}
names(out) <- prep$names
if (length(out) == 1L & !always_list) out <- out[[1]]
return(out)
}
#' @importFrom dplyr arrange group_by_all ungroup
split_raw <- function(raw, by, lev, all_levels = TRUE) {
out <- unique(raw[, by[1:lev], drop = FALSE])
all_na <- apply(out, 1, function(x) all(is.na(x)))
out <- out[!all_na, , drop = FALSE]
out <- out[!is.na(out[[by[lev]]]), , drop = FALSE]
if (all_levels) {
cols_missing <- setdiff(by, names(out))
for (j in cols_missing) {
out[[j]] <- rep(NA_character_, nrow(out))
}
}
out_order <- order(hcodes_int(out, by = by[1:lev]))
out[out_order, , drop = FALSE]
}
split_ref <- function(ref, by, lev, lower_levels = FALSE) {
l <- max_levels(ref, by = by)
out <- if (lower_levels) {
ref[l <= lev, , drop = FALSE]
} else {
ref[l == lev, , drop = FALSE]
}
# if (lev < length(by)) {
# cols_excl <- by[(lev + 1):length(by)]
# out <- out[,!names(out) %in% cols_excl, drop = FALSE]
# }
return(out)
}
#' @noRd
ordered_split <- function(x, f, N) {
s <- split(x, f)
i <- as.numeric(names(s))
out <- vector("list", N)
out[i] <- s
return(out)
}
hmatch_split_prep_levels <- function(levels, by) {
if (is.null(levels)) {
valid <- TRUE
out <- seq_along(by)
} else if (is.numeric(levels)) {
valid <- levels %in% seq_along(by)
out <- levels
} else {
valid <- levels %in% by
out <- match(levels, by)
}
if (!all(valid)) {
stop("the following elements of `levels` could not be matched to a ",
"hierarchical column within `raw`: ",
paste(levels[!valid], collapse = "; "))
}
return(out)
}
#' @importFrom stats setNames
hmatch_split_prep <- function(raw,
ref,
pattern,
pattern_ref,
by,
by_ref = by_ref,
ref_prefix,
levels,
lower_levels = FALSE) {
prep <- prep_match_columns(
raw = raw,
ref = ref,
pattern = pattern,
pattern_ref = pattern_ref,
by = by,
by_ref = by_ref,
ref_prefix = ref_prefix
)
levels <- hmatch_split_prep_levels(levels, prep$by_raw)
raw_split <- lapply(
seq_along(prep$by_raw)[levels],
split_raw,
raw = raw,
by = prep$by_raw
)
ref_split <- lapply(
seq_along(prep$by_ref)[levels],
split_ref,
ref = ref,
by = prep$by_ref_orig,
lower_levels = lower_levels
)
return(list(raw_split = raw_split,
ref_split = ref_split,
by_raw_split = prep$by_raw,
by_ref_split = prep$by_ref_orig,
names = prep$by_raw[levels]))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/big.municipios.R
\docType{data}
\name{big.municipios}
\alias{big.municipios}
\title{Big Municipios not part of a Metro Area}
\format{A data frame with 66 observations on the following 4 variables.}
\usage{
big.municipios
}
\description{
This dataset contains all municipios which were not part of a metro area in 2010 but had
a larger population than the smallest metro area (> 110,000)
\url{http://www.conapo.gob.mx/es/CONAPO/Delimitacion_de_Zonas_Metropolitanas}
}
\section{Variables}{
\itemize{
\item{\code{state_code}}{a numeric vector}
\item{\code{mun_code}}{a numeric vector}
\item{\code{population}}{a numeric vector}
\item{\code{name}}{a character vector}
}
}
\examples{
head(big.municipios)
}
|
/man/big.municipios.Rd
|
permissive
|
oscaramtz/mxmortalitydb
|
R
| false | true | 783 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/big.municipios.R
\docType{data}
\name{big.municipios}
\alias{big.municipios}
\title{Big Municipios not part of a Metro Area}
\format{A data frame with 66 observations on the following 4 variables.}
\usage{
big.municipios
}
\description{
This dataset contains all municipios which were not part of a metro area in 2010 but had
a larger population than the smallest metro area (> 110,000)
\url{http://www.conapo.gob.mx/es/CONAPO/Delimitacion_de_Zonas_Metropolitanas}
}
\section{Variables}{
\itemize{
\item{\code{state_code}}{a numeric vector}
\item{\code{mun_code}}{a numeric vector}
\item{\code{population}}{a numeric vector}
\item{\code{name}}{a character vector}
}
}
\examples{
head(big.municipios)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/check.installed.R
\name{check.installed}
\alias{check.installed}
\title{checks if the required libraries are installed in the system}
\usage{
check.installed(libraries)
}
\arguments{
\item{libraries}{A vector with the libraries that the project uses}
}
\description{
checks if the required libraries are installed in the system
}
|
/man/check.installed.Rd
|
no_license
|
araupontones/gmdacr
|
R
| false | true | 408 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/check.installed.R
\name{check.installed}
\alias{check.installed}
\title{checks if the required libraries are installed in the system}
\usage{
check.installed(libraries)
}
\arguments{
\item{libraries}{A vector with the libraries that the project uses}
}
\description{
checks if the required libraries are installed in the system
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AzureVM.R
\name{azureListScaleSetVM}
\alias{azureListScaleSetVM}
\title{List VMs within a scale set}
\usage{
azureListScaleSetVM(azureActiveContext, scaleSet, resourceGroup, location,
subscriptionID, azToken, verbose = FALSE)
}
\arguments{
\item{azureActiveContext}{A container used for caching variables used by AzureSMR}
\item{scaleSet}{name of the scale refer to (azureListScaleSets)}
\item{resourceGroup}{Name of the resource group}
\item{location}{Azure region, e.g. 'westeurope' or 'southcentralus'}
\item{subscriptionID}{Set the subscriptionID. This is obtained automatically by \code{\link{azureAuthenticate}} when only a single subscriptionID is available via Active Directory}
\item{azToken}{Azure authentication token, obtained by \code{\link{azureAuthenticate}}}
\item{verbose}{Print Tracing information (Default False)}
}
\description{
List VMs within a scale set
}
\seealso{
Other Virtual machine functions: \code{\link{azureDeleteVM}},
\code{\link{azureListScaleSetNetwork}},
\code{\link{azureListScaleSets}},
\code{\link{azureListVM}}, \code{\link{azureStartVM}},
\code{\link{azureStopVM}}, \code{\link{azureVMStatus}}
}
|
/man/azureListScaleSetVM.Rd
|
no_license
|
c3h3/AzureSMR
|
R
| false | true | 1,232 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AzureVM.R
\name{azureListScaleSetVM}
\alias{azureListScaleSetVM}
\title{List VMs within a scale set}
\usage{
azureListScaleSetVM(azureActiveContext, scaleSet, resourceGroup, location,
subscriptionID, azToken, verbose = FALSE)
}
\arguments{
\item{azureActiveContext}{A container used for caching variables used by AzureSMR}
\item{scaleSet}{name of the scale refer to (azureListScaleSets)}
\item{resourceGroup}{Name of the resource group}
\item{location}{Azure region, e.g. 'westeurope' or 'southcentralus'}
\item{subscriptionID}{Set the subscriptionID. This is obtained automatically by \code{\link{azureAuthenticate}} when only a single subscriptionID is available via Active Directory}
\item{azToken}{Azure authentication token, obtained by \code{\link{azureAuthenticate}}}
\item{verbose}{Print Tracing information (Default False)}
}
\description{
List VMs within a scale set
}
\seealso{
Other Virtual machine functions: \code{\link{azureDeleteVM}},
\code{\link{azureListScaleSetNetwork}},
\code{\link{azureListScaleSets}},
\code{\link{azureListVM}}, \code{\link{azureStartVM}},
\code{\link{azureStopVM}}, \code{\link{azureVMStatus}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility_functions.R
\name{my_try_catch}
\alias{my_try_catch}
\title{A better try catch}
\usage{
my_try_catch(expr)
}
\arguments{
\item{expr}{an expression}
}
\description{
A better try catch
}
|
/man/my_try_catch.Rd
|
no_license
|
Giappo/jap
|
R
| false | true | 271 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility_functions.R
\name{my_try_catch}
\alias{my_try_catch}
\title{A better try catch}
\usage{
my_try_catch(expr)
}
\arguments{
\item{expr}{an expression}
}
\description{
A better try catch
}
|
library(testthat)
library(prof.tree)
test_check("prof.tree")
|
/tests/testthat.R
|
no_license
|
artemklevtsov/prof.tree
|
R
| false | false | 62 |
r
|
library(testthat)
library(prof.tree)
test_check("prof.tree")
|
testlist <- list(x = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.13260767614764e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance::fastdist,testlist)
str(result)
|
/multivariance/inst/testfiles/fastdist/AFL_fastdist/fastdist_valgrind_files/1613097210-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 303 |
r
|
testlist <- list(x = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.13260767614764e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance::fastdist,testlist)
str(result)
|
"set.cor" <-
function(y,x,data,z=NULL,n.obs=NULL,use="pairwise",std=TRUE,square=FALSE,main="Regression Models",plot=TRUE,show=FALSE,zero=TRUE) {
setCor(y=y,x=x,data=data,z=z,n.obs=n.obs,use=use,std=std,square=square,main=main,plot=plot,show=show)}
"setCor" <-
function(y,x,data,z=NULL,n.obs=NULL,use="pairwise",std=TRUE,square=FALSE,main="Regression Models",plot=TRUE,show=FALSE,zero=TRUE,alpha=.05) {
#a function to extract subsets of variables (a and b) from a correlation matrix m or data set m
#and find the multiple correlation beta weights + R2 of the a set predicting the b set
#seriously rewritten, March 24, 2009 to make much simpler
#minor additons, October, 20, 2009 to allow for print and summary function
#major addition in April, 2011 to allow for set correlation
#added calculation of residual matrix December 30, 2011
#added option to allow square data matrices
#modified December, 2014 to allow for covariances as well as to fix a bug with single x variable
#modified April, 2015 to handle data with non-numeric values in the data, which are not part of the analysis
#Modified November, 2107 to handle "lm" style input using my parse function.
#modified July 4, 2018 to add intercepts and confidence intervals (requested by Franz Strich)
cl <- match.call()
#convert names to locations
prod <- ex <- NULL #in case we do not have formula input
#first, see if they are in formula mode
if(class(y) == "formula") {
ps <- fparse(y)
y <- ps$y
x <- ps$x
med <- ps$m #but, mediation is not done here, so we just add this to x
# if(!is.null(med)) x <- c(x,med) #not necessary, because we automatically put this in
prod <- ps$prod
z <- ps$z #do we have any variable to partial out
ex <- ps$ex
}
# data <- char2numeric(data) #move to later (01/05/19)
if(is.numeric(y )) y <- colnames(data)[y]
if(is.numeric(x )) x <- colnames(data)[x]
if(is.numeric(z )) z <- colnames(data)[z]
#check for bad input
if(any( !(c(y,x,z,ex) %in% colnames(data)) )) {
print(c(y, x, z, ex)[which(!(c(y, x, z, ex) %in% colnames(data)))])
stop("Variable names are incorrect")}
if(!isCorrelation(data)) {
data <- data[,c(y,x,z,ex)]
data <- char2numeric(data)
if(!is.matrix(data)) data <- as.matrix(data)
if(!is.numeric(data)) stop("The data must be numeric to proceed")
if(!is.null(prod) | (!is.null(ex))) {#we want to find a product term
if(zero) data <- scale(data,scale=FALSE)
if(!is.null(prod)) {
prods <- matrix(NA,ncol=length(prod),nrow=nrow(data))
colnames(prods) <- prod
colnames(prods) <- paste0("V",1:length(prod))
for(i in 1:length(prod)) {
prods[,i] <- apply(data[,prod[[i]]],1,prod)
colnames(prods)[i] <- paste0(prod[[i]],collapse="*")
}
data <- cbind(data,prods)
x <- c(x,colnames(prods))
}
if(!is.null(ex)) {
quads <- matrix(NA,ncol=length(ex),nrow=nrow(data)) #find the quadratric terms
colnames(quads) <- paste0(ex)
for(i in 1:length(ex)) {
quads[,i] <- data[,ex[i]] * data[,ex[i]]
colnames(quads)[i] <- paste0(ex[i],"^2")
}
data <- cbind(data,quads)
x <- c(x,colnames(quads))
}
}
means <- colMeans(data,na.rm=TRUE) #use these later to find the intercept
C <- cov(data,use=use)
if(std) {m <- cov2cor(C)
C <- m} else {m <- C}
raw <- TRUE
# n.obs=dim(data)[1] #this does not take into account missing data
n.obs <- max(pairwiseCount(data),na.rm=TRUE ) #this does
} else {
raw <- FALSE
if(!is.matrix(data)) data <- as.matrix(data)
C <- data
if(std) {m <- cov2cor(C)} else {m <- C}}
#We do all the calculations on the Covariance or correlation matrix (m)
#convert names to locations
nm <- dim(data)[1]
xy <- c(x,y)
numx <- length(x)
numy <- length(y)
numz <- 0
nxy <- numx+numy
m.matrix <- m[c(x,y),c(x,y)]
x.matrix <- m[x,x,drop=FALSE]
xc.matrix <- m[x,x,drop=FALSE] #fixed19/03/15
xy.matrix <- m[x,y,drop=FALSE]
xyc.matrix <- m[x,y,drop=FALSE] #fixed 19/03/15
y.matrix <- m[y,y,drop=FALSE]
if(!is.null(z)){numz <- length(z) #partial out the z variables
zm <- m[z,z,drop=FALSE]
za <- m[x,z,drop=FALSE]
zb <- m[y,z,drop=FALSE]
zmi <- solve(zm)
x.matrix <- x.matrix - za %*% zmi %*% t(za)
y.matrix <- y.matrix - zb %*% zmi %*% t(zb)
xy.matrix <- xy.matrix - za %*% zmi %*% t(zb)
m.matrix <- cbind(rbind(y.matrix,xy.matrix),rbind(t(xy.matrix),x.matrix))
#m.matrix is now the matrix of partialled covariances -- make sure we use this one!
}
if(numx == 1 ) {beta <- matrix(xy.matrix,nrow=1)/x.matrix[1,1]
rownames(beta) <- rownames(xy.matrix)
colnames(beta) <- colnames(xy.matrix)
} else #this is the case of a single x
{ beta <- solve(x.matrix,xy.matrix) #solve the equation bY~aX
beta <- as.matrix(beta)
}
if(raw) {if(numx ==1) {intercept <- means[y] - sum(means[x] * beta[x,y ])} else {if(numy > 1) { intercept <- means[y] - colSums(means[x] * beta[x,y ])} else {intercept <- means[y] - sum(means[x] * beta[x,y ])}} } else {intercept <- NA}
yhat <- t(xy.matrix) %*% solve(x.matrix) %*% (xy.matrix)
resid <- y.matrix - yhat
if (numy > 1 ) {
if(is.null(rownames(beta))) {rownames(beta) <- x}
if(is.null(colnames(beta))) {colnames(beta) <- y}
R2 <- colSums(beta * xy.matrix)/diag(y.matrix) } else {
colnames(beta) <- y
R2 <- sum(beta * xy.matrix)/y.matrix
R2 <- matrix(R2)
rownames(beta) <- x
rownames(R2) <- colnames(R2) <- y
}
VIF <- 1/(1-smc(x.matrix))
#now find the unit weighted correlations
#reverse items in X and Y so that they are all positive signed
#But this doesn't help in predicting y
#we need to weight by the sign of the xy,matrix
#this gives a different key for each y
#need to adjust this for y
# px <- principal(x.matrix)
# keys.x <- diag(as.vector(1- 2* (px$loadings < 0 )) )
# py <- principal(y.matrix)
# keys.y <- diag(as.vector(1- 2* (py$loadings < 0 ) ))
#
# Vx <- sum( t(keys.x) %*% x.matrix %*% t(keys.x))
# Vy <- sum( keys.y %*% y.matrix %*% t(keys.y))
#
# ruw <- colSums(abs(xy.matrix))/sqrt(Vx)
# Ruw <- sum(diag(keys.x) %*% xy.matrix %*% t(keys.y))/sqrt(Vx * Vy)
#end of old way of doing it
#new way (2/17/18)
keys.x <- sign(xy.matrix) #this converts zero order correlations into -1, 0, 1 weights for each y
Vx <- t(keys.x) %*% x.matrix %*% (keys.x) #diag are scale variances
#Vy <- t(keys.x) %*% y.matrix %*% keys.x #diag are y variances ?
Vy <- (y.matrix)
uCxy <- t(keys.x) %*% xy.matrix
ruw <- diag(uCxy)/sqrt(diag(Vx)) #these are the individual multiple Rs
Ruw <- sum(uCxy)/sqrt(sum(Vx)*sum(Vy)) #what are these?
if(numy < 2) {Rset <- 1 - det(m.matrix)/(det(x.matrix) )
Myx <- solve(x.matrix) %*% xy.matrix %*% t(xy.matrix)
cc2 <- cc <- T <- NULL} else {if (numx < 2) {Rset <- 1 - det(m.matrix)/(det(y.matrix) )
Myx <- xy.matrix %*% solve(y.matrix) %*% t(xy.matrix)
cc2 <- cc <- T <- NULL} else {Rset <- 1 - det(m.matrix)/(det(x.matrix) * det(y.matrix))
if(numy > numx) {
Myx <- solve(x.matrix) %*% xy.matrix %*% solve(y.matrix) %*% t(xy.matrix)} else { Myx <- solve(y.matrix) %*% t(xy.matrix )%*% solve(x.matrix) %*% (xy.matrix)}
}
cc2 <- eigen(Myx)$values
cc <- sqrt(cc2)
T <- sum(cc2)/length(cc2)
}
if(!is.null(n.obs)) {k<- length(x)
# uniq <- (1-smc(x.matrix,covar=!std))
uniq <- (1-smc(x.matrix))
se.beta <- list()
ci.lower <- list()
ci.upper <- list()
for (i in 1:length(y)) {
df <- n.obs-k-1 #this is the n.obs - length(x)
se.beta[[i]] <- (sqrt((1-R2[i])/(df))*sqrt(1/uniq))}
se <- matrix(unlist(se.beta),ncol=length(y))
if(!is.null(z)) {colnames(beta) <- paste0(colnames(beta),"*") }
colnames(se) <- colnames(beta)
if(!is.null(z)) {rownames(beta) <- paste0(rownames(beta),"*")}
rownames(se) <- rownames(beta)
# se <- t(t(se) * sqrt(diag(C)[y]))/sqrt(diag(xc.matrix)) #need to use m.matrix
se <- t(t(se) * sqrt(diag(m.matrix)[y]))/sqrt(diag(x.matrix)) #corrected 11/29/18
for(i in 1:length(y)) {ci.lower[[i]] <- beta[,i] - qt(1-alpha/2,df)*se[,i]
ci.upper[[i]] <- beta[,i] + qt(1-alpha/2,df)*se[,i]}
ci.lower <- matrix(unlist(ci.lower),ncol=length(y))
ci.upper <- matrix(unlist(ci.upper),ncol=length(y))
colnames( ci.lower) <- colnames( ci.upper) <- colnames( beta)
rownames( ci.lower)<- rownames( ci.upper) <- rownames(beta)
confid.beta <- cbind(ci.lower,ci.upper)
tvalue <- beta/se
# prob <- 2*(1- pt(abs(tvalue),df))
prob <- -2 * expm1(pt(abs(tvalue),df,log.p=TRUE))
SE2 <- 4*R2*(1-R2)^2*(df^2)/((n.obs^2-1)*(n.obs+3))
SE =sqrt(SE2)
F <- R2*df/(k*(1-R2))
# pF <- 1 - pf(F,k,df)
pF <- -expm1(pf(F,k,df,log.p=TRUE))
shrunkenR2 <- 1-(1-R2)*(n.obs-1)/df
#find the shrunken R2 for set cor (taken from CCAW p 615)
u <- numx * numy
m1 <- n.obs - max(numy ,(numx+numz)) - (numx + numy +3)/2
s <- sqrt((numx ^2 * numy^2 -4)/(numx^2 + numy^2-5))
if(numx*numy ==4) s <- 1
v <- m1 * s + 1 - u/2
R2set.shrunk <- 1 - (1-Rset) * ((v+u)/v)^s
L <- 1-Rset
L1s <- L^(-1/s)
Rset.F <- (L1s-1)*(v/u)
df.m <- n.obs - max(numy ,(numx+numz)) -(numx + numy +3)/2
s1 <- sqrt((numx ^2 * numy^2 -4)/(numx^2 + numy^2-5)) #see cohen p 321
if(numx^2*numy^2 < 5) s1 <- 1
df.v <- df.m * s1 + 1 - numx * numy/2 #which is just v
# df.v <- (u+v) #adjusted for bias to match the CCAW results
#Rset.F <- Rset.F * (u+v)/v
Chisq <- -(n.obs - 1 -(numx + numy +1)/2)*log((1-cc2))
}
# if(numx == 1) {beta <- beta * sqrt(diag(C)[y])
# } else {beta <- t(t(beta) * sqrt(diag(C)[y]))/sqrt(diag(xc.matrix))} #this puts the betas into the raw units
# coeff <- data.frame(beta=beta,se = se,t=tvalue, Probabilty=prob)
# colnames(coeff) <- c("Estimate", "Std. Error" ,"t value", "Pr(>|t|)")
if(is.null(n.obs)) {set.cor <- list(beta=beta,R=sqrt(R2),R2=R2,Rset=Rset,T=T,intercept=intercept,cancor = cc, cancor2=cc2,raw=raw,residual=resid,ruw=ruw,Ruw=Ruw,x.matrix=x.matrix,y.matrix=y.matrix,VIF=VIF,Call = cl)} else {
set.cor <- list(beta=beta,se=se,t=tvalue,Probability = prob,intercept=intercept,ci=confid.beta,R=sqrt(R2),R2=R2,shrunkenR2 = shrunkenR2,seR2 = SE,F=F,probF=pF,df=c(k,df),Rset=Rset,Rset.shrunk=R2set.shrunk,Rset.F=Rset.F,Rsetu=u,Rsetv=df.v,T=T,cancor=cc,cancor2 = cc2,Chisq = Chisq,raw=raw,residual=resid,ruw=ruw,Ruw=Ruw,x.matrix=x.matrix,y.matrix=y.matrix,VIF=VIF,data=data,Call = cl)}
class(set.cor) <- c("psych","setCor")
if(plot) setCor.diagram(set.cor,main=main,show=show)
return(set.cor)
}
#modified July 12,2007 to allow for NA in the overall matrix
#modified July 9, 2008 to give statistical tests
#modified yet again August 15 , 2008 to convert covariances to correlations
#modified January 3, 2011 to work in the case of a single predictor
#modified April 25, 2011 to add the set correlation (from Cohen)
#modified April 21, 2014 to allow for mixed names and locations in call
#modified February 19, 2015 to just find the covariances of the data that are used in the regression
#this gets around the problem that some users have large data sets, but only want a few variables in the regression
#corrected February 17, 2018 to correctly find the unweighted correlations
#mdified Sept 22, 2018 to allow cex and l.cex to be set
#mdified November, 2017 to allow an override of which way to draw the arrows
setCor.diagram <- function(sc,main="Regression model",digits=2,show=FALSE,cex=1,l.cex=1,...) {
if(missing(l.cex)) l.cex <- cex
beta <- round(sc$beta,digits)
x.matrix <- round(sc$x.matrix,digits)
y.matrix <- round(sc$y.matrix,digits)
y.resid <- round(sc$resid,digits)
x.names <- rownames(sc$beta)
y.names <- colnames(sc$beta)
nx <- length(x.names)
ny <- length(y.names)
top <- max(nx,ny)
xlim=c(-nx/3,10)
ylim=c(0,top)
top <- max(nx,ny)
x <- list()
y <- list()
x.scale <- top/(nx+1)
y.scale <- top/(ny+1)
plot(NA,xlim=xlim,ylim=ylim,main=main,axes=FALSE,xlab="",ylab="")
for(i in 1:nx) {x[[i]] <- dia.rect(3,top-i*x.scale,x.names[i],cex=cex,...) }
for (j in 1:ny) {y[[j]] <- dia.rect(7,top-j*y.scale,y.names[j],cex=cex,...) }
for(i in 1:nx) {
for (j in 1:ny) {
dia.arrow(x[[i]]$right,y[[j]]$left,labels = beta[i,j],adj=4-j,cex=l.cex,...)
}
}
if(nx >1) {
for (i in 2:nx) {
for (k in 1:(i-1)) {dia.curved.arrow(x[[i]]$left,x[[k]]$left,x.matrix[i,k],scale=-(abs(i-k)),both=TRUE,dir="u",cex = l.cex,...)}
#dia.curve(x[[i]]$left,x[[k]]$left,x.matrix[i,k],scale=-(abs(i-k))) }
} }
if(ny>1) {for (i in 2:ny) {
for (k in 1:(i-1)) {dia.curved.arrow(y[[i]]$right,y[[k]]$right,y.resid[i,k],scale=(abs(i-k)),dir="u",cex=l.cex, ...)}
}}
for(i in 1:ny) {dia.self(y[[i]],side=3,scale=.2,... )}
if(show) {text((10-nx/3)/2,0,paste("Unweighted matrix correlation = ",round(sc$Ruw,digits)))}
}
print.psych.setCor <- function(x,digits=2) {
cat("Call: ")
print(x$Call)
if(x$raw) {cat("\nMultiple Regression from raw data \n")} else {
cat("\nMultiple Regression from matrix input \n")}
ny <- NCOL(x$beta)
for(i in 1:ny) {cat("\n DV = ",colnames(x$beta)[i],"\n")
if(!is.na(x$intercept[i])) {cat(' intercept = ',round(x$intercept[i],digits=digits),"\n")}
if(!is.null(x$se)) {result.df <- data.frame( round(x$beta[,i],digits),round(x$se[,i],digits),round(x$t[,i],digits),signif(x$Probability[,i],digits),round(x$ci[,i],digits), round(x$ci[,(i +ny)],digits),round(x$VIF,digits))
colnames(result.df) <- c("slope","se", "t", "p","lower.ci","upper.ci", "VIF")
print(result.df)
result.df <- data.frame(R = round(x$R[i],digits), R2 = round(x$R2[i],digits), Ruw = round(x$ruw[i],digits),R2uw = round( x$ruw[i]^2,digits), round(x$shrunkenR2[i],digits),round(x$seR2[i],digits), round(x$F[i],digits),x$df[1],x$df[2], signif(x$probF[i],digits+1))
colnames(result.df) <- c("R","R2", "Ruw", "R2uw","Shrunken R2", "SE of R2", "overall F","df1","df2","p")
cat("\n Multiple Regression\n")
print(result.df) } else {
result.df <- data.frame( round(x$beta[,i],digits),round(x$VIF,digits))
colnames(result.df) <- c("slope", "VIF")
print(result.df)
result.df <- data.frame(R = round(x$R[i],digits), R2 = round(x$R2[i],digits), Ruw = round(x$Ruw[i],digits),R2uw = round( x$Ruw[i]^2,digits))
colnames(result.df) <- c("R","R2", "Ruw", "R2uw")
cat("\n Multiple Regression\n")
print(result.df)
}
}
}
|
/R/set.cor.R
|
no_license
|
Redhat44/psych
|
R
| false | false | 17,840 |
r
|
"set.cor" <-
function(y,x,data,z=NULL,n.obs=NULL,use="pairwise",std=TRUE,square=FALSE,main="Regression Models",plot=TRUE,show=FALSE,zero=TRUE) {
setCor(y=y,x=x,data=data,z=z,n.obs=n.obs,use=use,std=std,square=square,main=main,plot=plot,show=show)}
"setCor" <-
function(y,x,data,z=NULL,n.obs=NULL,use="pairwise",std=TRUE,square=FALSE,main="Regression Models",plot=TRUE,show=FALSE,zero=TRUE,alpha=.05) {
#a function to extract subsets of variables (a and b) from a correlation matrix m or data set m
#and find the multiple correlation beta weights + R2 of the a set predicting the b set
#seriously rewritten, March 24, 2009 to make much simpler
#minor additons, October, 20, 2009 to allow for print and summary function
#major addition in April, 2011 to allow for set correlation
#added calculation of residual matrix December 30, 2011
#added option to allow square data matrices
#modified December, 2014 to allow for covariances as well as to fix a bug with single x variable
#modified April, 2015 to handle data with non-numeric values in the data, which are not part of the analysis
#Modified November, 2107 to handle "lm" style input using my parse function.
#modified July 4, 2018 to add intercepts and confidence intervals (requested by Franz Strich)
cl <- match.call()
#convert names to locations
prod <- ex <- NULL #in case we do not have formula input
#first, see if they are in formula mode
if(class(y) == "formula") {
ps <- fparse(y)
y <- ps$y
x <- ps$x
med <- ps$m #but, mediation is not done here, so we just add this to x
# if(!is.null(med)) x <- c(x,med) #not necessary, because we automatically put this in
prod <- ps$prod
z <- ps$z #do we have any variable to partial out
ex <- ps$ex
}
# data <- char2numeric(data) #move to later (01/05/19)
if(is.numeric(y )) y <- colnames(data)[y]
if(is.numeric(x )) x <- colnames(data)[x]
if(is.numeric(z )) z <- colnames(data)[z]
#check for bad input
if(any( !(c(y,x,z,ex) %in% colnames(data)) )) {
print(c(y, x, z, ex)[which(!(c(y, x, z, ex) %in% colnames(data)))])
stop("Variable names are incorrect")}
if(!isCorrelation(data)) {
data <- data[,c(y,x,z,ex)]
data <- char2numeric(data)
if(!is.matrix(data)) data <- as.matrix(data)
if(!is.numeric(data)) stop("The data must be numeric to proceed")
if(!is.null(prod) | (!is.null(ex))) {#we want to find a product term
if(zero) data <- scale(data,scale=FALSE)
if(!is.null(prod)) {
prods <- matrix(NA,ncol=length(prod),nrow=nrow(data))
colnames(prods) <- prod
colnames(prods) <- paste0("V",1:length(prod))
for(i in 1:length(prod)) {
prods[,i] <- apply(data[,prod[[i]]],1,prod)
colnames(prods)[i] <- paste0(prod[[i]],collapse="*")
}
data <- cbind(data,prods)
x <- c(x,colnames(prods))
}
if(!is.null(ex)) {
quads <- matrix(NA,ncol=length(ex),nrow=nrow(data)) #find the quadratric terms
colnames(quads) <- paste0(ex)
for(i in 1:length(ex)) {
quads[,i] <- data[,ex[i]] * data[,ex[i]]
colnames(quads)[i] <- paste0(ex[i],"^2")
}
data <- cbind(data,quads)
x <- c(x,colnames(quads))
}
}
means <- colMeans(data,na.rm=TRUE) #use these later to find the intercept
C <- cov(data,use=use)
if(std) {m <- cov2cor(C)
C <- m} else {m <- C}
raw <- TRUE
# n.obs=dim(data)[1] #this does not take into account missing data
n.obs <- max(pairwiseCount(data),na.rm=TRUE ) #this does
} else {
raw <- FALSE
if(!is.matrix(data)) data <- as.matrix(data)
C <- data
if(std) {m <- cov2cor(C)} else {m <- C}}
#We do all the calculations on the Covariance or correlation matrix (m)
#convert names to locations
nm <- dim(data)[1]
xy <- c(x,y)
numx <- length(x)
numy <- length(y)
numz <- 0
nxy <- numx+numy
m.matrix <- m[c(x,y),c(x,y)]
x.matrix <- m[x,x,drop=FALSE]
xc.matrix <- m[x,x,drop=FALSE] #fixed19/03/15
xy.matrix <- m[x,y,drop=FALSE]
xyc.matrix <- m[x,y,drop=FALSE] #fixed 19/03/15
y.matrix <- m[y,y,drop=FALSE]
if(!is.null(z)){numz <- length(z) #partial out the z variables
zm <- m[z,z,drop=FALSE]
za <- m[x,z,drop=FALSE]
zb <- m[y,z,drop=FALSE]
zmi <- solve(zm)
x.matrix <- x.matrix - za %*% zmi %*% t(za)
y.matrix <- y.matrix - zb %*% zmi %*% t(zb)
xy.matrix <- xy.matrix - za %*% zmi %*% t(zb)
m.matrix <- cbind(rbind(y.matrix,xy.matrix),rbind(t(xy.matrix),x.matrix))
#m.matrix is now the matrix of partialled covariances -- make sure we use this one!
}
if(numx == 1 ) {beta <- matrix(xy.matrix,nrow=1)/x.matrix[1,1]
rownames(beta) <- rownames(xy.matrix)
colnames(beta) <- colnames(xy.matrix)
} else #this is the case of a single x
{ beta <- solve(x.matrix,xy.matrix) #solve the equation bY~aX
beta <- as.matrix(beta)
}
if(raw) {if(numx ==1) {intercept <- means[y] - sum(means[x] * beta[x,y ])} else {if(numy > 1) { intercept <- means[y] - colSums(means[x] * beta[x,y ])} else {intercept <- means[y] - sum(means[x] * beta[x,y ])}} } else {intercept <- NA}
yhat <- t(xy.matrix) %*% solve(x.matrix) %*% (xy.matrix)
resid <- y.matrix - yhat
if (numy > 1 ) {
if(is.null(rownames(beta))) {rownames(beta) <- x}
if(is.null(colnames(beta))) {colnames(beta) <- y}
R2 <- colSums(beta * xy.matrix)/diag(y.matrix) } else {
colnames(beta) <- y
R2 <- sum(beta * xy.matrix)/y.matrix
R2 <- matrix(R2)
rownames(beta) <- x
rownames(R2) <- colnames(R2) <- y
}
VIF <- 1/(1-smc(x.matrix))
#now find the unit weighted correlations
#reverse items in X and Y so that they are all positive signed
#But this doesn't help in predicting y
#we need to weight by the sign of the xy,matrix
#this gives a different key for each y
#need to adjust this for y
# px <- principal(x.matrix)
# keys.x <- diag(as.vector(1- 2* (px$loadings < 0 )) )
# py <- principal(y.matrix)
# keys.y <- diag(as.vector(1- 2* (py$loadings < 0 ) ))
#
# Vx <- sum( t(keys.x) %*% x.matrix %*% t(keys.x))
# Vy <- sum( keys.y %*% y.matrix %*% t(keys.y))
#
# ruw <- colSums(abs(xy.matrix))/sqrt(Vx)
# Ruw <- sum(diag(keys.x) %*% xy.matrix %*% t(keys.y))/sqrt(Vx * Vy)
#end of old way of doing it
#new way (2/17/18)
keys.x <- sign(xy.matrix) #this converts zero order correlations into -1, 0, 1 weights for each y
Vx <- t(keys.x) %*% x.matrix %*% (keys.x) #diag are scale variances
#Vy <- t(keys.x) %*% y.matrix %*% keys.x #diag are y variances ?
Vy <- (y.matrix)
uCxy <- t(keys.x) %*% xy.matrix
ruw <- diag(uCxy)/sqrt(diag(Vx)) #these are the individual multiple Rs
Ruw <- sum(uCxy)/sqrt(sum(Vx)*sum(Vy)) #what are these?
if(numy < 2) {Rset <- 1 - det(m.matrix)/(det(x.matrix) )
Myx <- solve(x.matrix) %*% xy.matrix %*% t(xy.matrix)
cc2 <- cc <- T <- NULL} else {if (numx < 2) {Rset <- 1 - det(m.matrix)/(det(y.matrix) )
Myx <- xy.matrix %*% solve(y.matrix) %*% t(xy.matrix)
cc2 <- cc <- T <- NULL} else {Rset <- 1 - det(m.matrix)/(det(x.matrix) * det(y.matrix))
if(numy > numx) {
Myx <- solve(x.matrix) %*% xy.matrix %*% solve(y.matrix) %*% t(xy.matrix)} else { Myx <- solve(y.matrix) %*% t(xy.matrix )%*% solve(x.matrix) %*% (xy.matrix)}
}
cc2 <- eigen(Myx)$values
cc <- sqrt(cc2)
T <- sum(cc2)/length(cc2)
}
if(!is.null(n.obs)) {k<- length(x)
# uniq <- (1-smc(x.matrix,covar=!std))
uniq <- (1-smc(x.matrix))
se.beta <- list()
ci.lower <- list()
ci.upper <- list()
for (i in 1:length(y)) {
df <- n.obs-k-1 #this is the n.obs - length(x)
se.beta[[i]] <- (sqrt((1-R2[i])/(df))*sqrt(1/uniq))}
se <- matrix(unlist(se.beta),ncol=length(y))
if(!is.null(z)) {colnames(beta) <- paste0(colnames(beta),"*") }
colnames(se) <- colnames(beta)
if(!is.null(z)) {rownames(beta) <- paste0(rownames(beta),"*")}
rownames(se) <- rownames(beta)
# se <- t(t(se) * sqrt(diag(C)[y]))/sqrt(diag(xc.matrix)) #need to use m.matrix
se <- t(t(se) * sqrt(diag(m.matrix)[y]))/sqrt(diag(x.matrix)) #corrected 11/29/18
for(i in 1:length(y)) {ci.lower[[i]] <- beta[,i] - qt(1-alpha/2,df)*se[,i]
ci.upper[[i]] <- beta[,i] + qt(1-alpha/2,df)*se[,i]}
ci.lower <- matrix(unlist(ci.lower),ncol=length(y))
ci.upper <- matrix(unlist(ci.upper),ncol=length(y))
colnames( ci.lower) <- colnames( ci.upper) <- colnames( beta)
rownames( ci.lower)<- rownames( ci.upper) <- rownames(beta)
confid.beta <- cbind(ci.lower,ci.upper)
tvalue <- beta/se
# prob <- 2*(1- pt(abs(tvalue),df))
prob <- -2 * expm1(pt(abs(tvalue),df,log.p=TRUE))
SE2 <- 4*R2*(1-R2)^2*(df^2)/((n.obs^2-1)*(n.obs+3))
SE =sqrt(SE2)
F <- R2*df/(k*(1-R2))
# pF <- 1 - pf(F,k,df)
pF <- -expm1(pf(F,k,df,log.p=TRUE))
shrunkenR2 <- 1-(1-R2)*(n.obs-1)/df
#find the shrunken R2 for set cor (taken from CCAW p 615)
u <- numx * numy
m1 <- n.obs - max(numy ,(numx+numz)) - (numx + numy +3)/2
s <- sqrt((numx ^2 * numy^2 -4)/(numx^2 + numy^2-5))
if(numx*numy ==4) s <- 1
v <- m1 * s + 1 - u/2
R2set.shrunk <- 1 - (1-Rset) * ((v+u)/v)^s
L <- 1-Rset
L1s <- L^(-1/s)
Rset.F <- (L1s-1)*(v/u)
df.m <- n.obs - max(numy ,(numx+numz)) -(numx + numy +3)/2
s1 <- sqrt((numx ^2 * numy^2 -4)/(numx^2 + numy^2-5)) #see cohen p 321
if(numx^2*numy^2 < 5) s1 <- 1
df.v <- df.m * s1 + 1 - numx * numy/2 #which is just v
# df.v <- (u+v) #adjusted for bias to match the CCAW results
#Rset.F <- Rset.F * (u+v)/v
Chisq <- -(n.obs - 1 -(numx + numy +1)/2)*log((1-cc2))
}
# if(numx == 1) {beta <- beta * sqrt(diag(C)[y])
# } else {beta <- t(t(beta) * sqrt(diag(C)[y]))/sqrt(diag(xc.matrix))} #this puts the betas into the raw units
# coeff <- data.frame(beta=beta,se = se,t=tvalue, Probabilty=prob)
# colnames(coeff) <- c("Estimate", "Std. Error" ,"t value", "Pr(>|t|)")
if(is.null(n.obs)) {set.cor <- list(beta=beta,R=sqrt(R2),R2=R2,Rset=Rset,T=T,intercept=intercept,cancor = cc, cancor2=cc2,raw=raw,residual=resid,ruw=ruw,Ruw=Ruw,x.matrix=x.matrix,y.matrix=y.matrix,VIF=VIF,Call = cl)} else {
set.cor <- list(beta=beta,se=se,t=tvalue,Probability = prob,intercept=intercept,ci=confid.beta,R=sqrt(R2),R2=R2,shrunkenR2 = shrunkenR2,seR2 = SE,F=F,probF=pF,df=c(k,df),Rset=Rset,Rset.shrunk=R2set.shrunk,Rset.F=Rset.F,Rsetu=u,Rsetv=df.v,T=T,cancor=cc,cancor2 = cc2,Chisq = Chisq,raw=raw,residual=resid,ruw=ruw,Ruw=Ruw,x.matrix=x.matrix,y.matrix=y.matrix,VIF=VIF,data=data,Call = cl)}
class(set.cor) <- c("psych","setCor")
if(plot) setCor.diagram(set.cor,main=main,show=show)
return(set.cor)
}
#modified July 12,2007 to allow for NA in the overall matrix
#modified July 9, 2008 to give statistical tests
#modified yet again August 15 , 2008 to convert covariances to correlations
#modified January 3, 2011 to work in the case of a single predictor
#modified April 25, 2011 to add the set correlation (from Cohen)
#modified April 21, 2014 to allow for mixed names and locations in call
#modified February 19, 2015 to just find the covariances of the data that are used in the regression
#this gets around the problem that some users have large data sets, but only want a few variables in the regression
#corrected February 17, 2018 to correctly find the unweighted correlations
#mdified Sept 22, 2018 to allow cex and l.cex to be set
#mdified November, 2017 to allow an override of which way to draw the arrows
setCor.diagram <- function(sc,main="Regression model",digits=2,show=FALSE,cex=1,l.cex=1,...) {
if(missing(l.cex)) l.cex <- cex
beta <- round(sc$beta,digits)
x.matrix <- round(sc$x.matrix,digits)
y.matrix <- round(sc$y.matrix,digits)
y.resid <- round(sc$resid,digits)
x.names <- rownames(sc$beta)
y.names <- colnames(sc$beta)
nx <- length(x.names)
ny <- length(y.names)
top <- max(nx,ny)
xlim=c(-nx/3,10)
ylim=c(0,top)
top <- max(nx,ny)
x <- list()
y <- list()
x.scale <- top/(nx+1)
y.scale <- top/(ny+1)
plot(NA,xlim=xlim,ylim=ylim,main=main,axes=FALSE,xlab="",ylab="")
for(i in 1:nx) {x[[i]] <- dia.rect(3,top-i*x.scale,x.names[i],cex=cex,...) }
for (j in 1:ny) {y[[j]] <- dia.rect(7,top-j*y.scale,y.names[j],cex=cex,...) }
for(i in 1:nx) {
for (j in 1:ny) {
dia.arrow(x[[i]]$right,y[[j]]$left,labels = beta[i,j],adj=4-j,cex=l.cex,...)
}
}
if(nx >1) {
for (i in 2:nx) {
for (k in 1:(i-1)) {dia.curved.arrow(x[[i]]$left,x[[k]]$left,x.matrix[i,k],scale=-(abs(i-k)),both=TRUE,dir="u",cex = l.cex,...)}
#dia.curve(x[[i]]$left,x[[k]]$left,x.matrix[i,k],scale=-(abs(i-k))) }
} }
if(ny>1) {for (i in 2:ny) {
for (k in 1:(i-1)) {dia.curved.arrow(y[[i]]$right,y[[k]]$right,y.resid[i,k],scale=(abs(i-k)),dir="u",cex=l.cex, ...)}
}}
for(i in 1:ny) {dia.self(y[[i]],side=3,scale=.2,... )}
if(show) {text((10-nx/3)/2,0,paste("Unweighted matrix correlation = ",round(sc$Ruw,digits)))}
}
print.psych.setCor <- function(x,digits=2) {
cat("Call: ")
print(x$Call)
if(x$raw) {cat("\nMultiple Regression from raw data \n")} else {
cat("\nMultiple Regression from matrix input \n")}
ny <- NCOL(x$beta)
for(i in 1:ny) {cat("\n DV = ",colnames(x$beta)[i],"\n")
if(!is.na(x$intercept[i])) {cat(' intercept = ',round(x$intercept[i],digits=digits),"\n")}
if(!is.null(x$se)) {result.df <- data.frame( round(x$beta[,i],digits),round(x$se[,i],digits),round(x$t[,i],digits),signif(x$Probability[,i],digits),round(x$ci[,i],digits), round(x$ci[,(i +ny)],digits),round(x$VIF,digits))
colnames(result.df) <- c("slope","se", "t", "p","lower.ci","upper.ci", "VIF")
print(result.df)
result.df <- data.frame(R = round(x$R[i],digits), R2 = round(x$R2[i],digits), Ruw = round(x$ruw[i],digits),R2uw = round( x$ruw[i]^2,digits), round(x$shrunkenR2[i],digits),round(x$seR2[i],digits), round(x$F[i],digits),x$df[1],x$df[2], signif(x$probF[i],digits+1))
colnames(result.df) <- c("R","R2", "Ruw", "R2uw","Shrunken R2", "SE of R2", "overall F","df1","df2","p")
cat("\n Multiple Regression\n")
print(result.df) } else {
result.df <- data.frame( round(x$beta[,i],digits),round(x$VIF,digits))
colnames(result.df) <- c("slope", "VIF")
print(result.df)
result.df <- data.frame(R = round(x$R[i],digits), R2 = round(x$R2[i],digits), Ruw = round(x$Ruw[i],digits),R2uw = round( x$Ruw[i]^2,digits))
colnames(result.df) <- c("R","R2", "Ruw", "R2uw")
cat("\n Multiple Regression\n")
print(result.df)
}
}
}
|
\name{getCounts}
\alias{getCounts}
\title{Number of CNVs per sample}
\description{
This function summarize the number of samples that are having an alteration in a given genomic region
}
\usage{
getCounts(x, group, id="sample")
}
\arguments{
\item{x}{an object of class 'GRanges' obtained after applying 'getCNVs' function}
\item{group}{the name of the grouping variable available in the object 'x'}
\item{id}{the name of the variable having sample id. The default is 'sample'}
}
\value{
This function returns a list having two elements. The first element, called 'counts', contains the number of samples of each segment by the variable 'group'. The second element, called 'n', contains the total number of CNVs by 'group' variable.
}
\references{
Pique-Regi R, Caceres A, Gonzalez JR. "R-Gada: a package for fast detection and visualization of copy number alterations on multiple samples". BMC Bioinformatics, 2010;11:380.
}
\seealso{\code{\link{parSBL}}, \code{\link{setupParGADA}}, \code{\link{parBE}}}
\examples{
\dontrun{
See the vignette
}
}
\keyword{utilities}
|
/man/getCounts.Rd
|
permissive
|
isglobal-brge/R-GADA
|
R
| false | false | 1,093 |
rd
|
\name{getCounts}
\alias{getCounts}
\title{Number of CNVs per sample}
\description{
This function summarize the number of samples that are having an alteration in a given genomic region
}
\usage{
getCounts(x, group, id="sample")
}
\arguments{
\item{x}{an object of class 'GRanges' obtained after applying 'getCNVs' function}
\item{group}{the name of the grouping variable available in the object 'x'}
\item{id}{the name of the variable having sample id. The default is 'sample'}
}
\value{
This function returns a list having two elements. The first element, called 'counts', contains the number of samples of each segment by the variable 'group'. The second element, called 'n', contains the total number of CNVs by 'group' variable.
}
\references{
Pique-Regi R, Caceres A, Gonzalez JR. "R-Gada: a package for fast detection and visualization of copy number alterations on multiple samples". BMC Bioinformatics, 2010;11:380.
}
\seealso{\code{\link{parSBL}}, \code{\link{setupParGADA}}, \code{\link{parBE}}}
\examples{
\dontrun{
See the vignette
}
}
\keyword{utilities}
|
# Jake Yeung
# Date of Creation: 2019-04-19
# File: ~/projects/scchic/scripts/scripts_analysis/tf_activity_debugging/explore_LDA_before_running_model.R
# Explore LDA
rm(list=ls())
library(topicmodels)
library(dplyr)
library(ggplot2)
library(tidytext)
jscale <- 10^7
jpseudo <- 0
source("scripts/Rfunctions/MaraDownstream.R")
source("scripts/Rfunctions/PlotFunctions.R")
# Load UMAPs and LDA ------------------------------------------------------
# jsuffix <- "_GeneTSS.Dedup"
jsuffix <- ""
inf.spring <- "/Users/yeung/data/scchic/robjs/trajectory_from_spring_2019-04-11.RData"
# inf <- "/Users/yeung/data/scchic/from_cluster/lda_outputs_peaks_exprs_merge_clusters_explore/lda_out_meanfilt.PZ-BM-H3K4me1.CountThres0.K-25_50.Robj"
inf <- paste0("/Users/yeung/data/scchic/from_cluster/lda_outputs_peaks_exprs_merge_clusters_explore/lda_out_meanfilt.PZ-BM-H3K4me1.CountThres0.K-25_50", jsuffix, ".Robj")
assertthat::assert_that(file.exists(inf))
assertthat::assert_that(file.exists(inf.spring))
load(inf.spring, v=T)
load(inf, v=T)
out.lda <- out.lda[[length(out.lda)]]
tm.result <- posterior(out.lda)
top.cells <- tidy(out.lda, matrix = "gamma") %>%
group_by(topic) %>%
arrange(desc(gamma)) %>%
mutate(rnk = seq(length(gamma))) %>%
mutate(gamma.zscore = scale(gamma, center = TRUE, scale = TRUE)) %>%
dplyr::rename(cell = document)
top.cells.sum <- top.cells %>%
group_by(topic) %>% # do entropy on 1 to 99% of cells
filter(gamma.zscore < quantile(gamma.zscore, 0.98)) %>%
mutate(zscore.prob = exp(gamma.zscore) / sum(exp(gamma.zscore))) %>%
summarise(entropy = -sum(zscore.prob * log(zscore.prob))) %>%
arrange(entropy)
top.peaks <- tidytext::tidy(out.lda, matrix = "beta", log = FALSE) %>%
group_by(topic) %>%
arrange(desc(beta)) %>%
mutate(rnk = seq(length(beta))) %>%
mutate(beta.zscore = scale(beta, center = TRUE, scale = TRUE))
top.peaks.sum <- top.peaks %>%
group_by(term) %>%
mutate(topics.zscore = scale(beta, center = TRUE, scale = TRUE)) %>%
mutate(topics.zscore.prob = exp(topics.zscore) / sum(exp(topics.zscore))) %>%
summarise(entropy = -sum(zscore.prob * log(zscore.prob))) %>%
arrange(entropy)
mat.impute <- t(tm.result$topics %*% tm.result$terms)
# filter top terms
ggplot(top.cells, aes(x = gamma.zscore)) + geom_density() + facet_wrap(~topic) +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
# get gene list
rnames <- rownames(mat.impute)
rnames.keep <- grepl(";", rnames)
# find tal1 peak
# rnames.tal1 <- grepl("chr4:1150[5-6][0-9]{4}", rnames)
# rnames.keep <- rnames.keep | rnames.tal1
cnames.old <- unname(colnames(mat.impute))
cnames.new <- SwitchColnames(cnames.old, jsplit = "-")
colnames(mat.impute) <- cnames.new
mat.impute.sub <- mat.impute[rnames.keep, ]
genes <- sapply(rownames(mat.impute.sub), function(x){
g <- tryCatch({
return(strsplit(x, ";")[[1]][[2]])
}, error = function(e){
return("Peak")
})
}, USE.NAMES = FALSE)
exprs.long <- data.frame(peak = rownames(mat.impute.sub), gene = genes, as.data.frame(mat.impute.sub)) %>%
tidyr::gather(key = "cell", value = "exprs", c(-peak, -gene))
# Plot UMAP and overlay gene expression -----------------------------------
exprs.long <- left_join(exprs.long, dat.trajs.long)
# # jgene <- "Ebf1"
# jgene <- "T"
# jsub <- subset(exprs.long, peak == "chr4:115065000-115066000") %>%
# mutate(exprs = log10(exprs * jscale + jpseudo))
# jpeak <- jsub$peak[[1]]
# m1 <- PlotXYWithColor(jsub, xvar = "X1", yvar = "X2", cname = "exprs", jtitle = paste(jgene, jpeak))
# print(m1)
#
# jgenes <- c("Hbb-bs", "Gata1", "Foxo1", "Inpp4b", "S100a8", "Hs3st5", "Il2ra", "Prf1", "Klf", "Sox6", "Gata2", "Pax5", "Ly6c2", "Gypa")
# jgenes <- c(jgenes, "Tal1", "Mbd2", "Bcl3", "Foxc1", "Nrf1", "Hmbox1", "Spi1", "Gfi1", "Ebf3", "Cebpd", "Cebpb", "Pax6", "Pou2f2", "Ebf1")
#
#
#
# pdf(paste0("/tmp/lda_check.", jsuffix, ".pdf"), useDingbats = FALSE)
# for (jgene in jgenes){
# jsub <- subset(exprs.long, gene == jgene) %>%
# mutate(exprs = log2(exprs * jscale + jpseudo))
# if (nrow(jsub) == 0){
# print(paste("Skipping", jgene))
# next
# }
# jpeak <- jsub$peak[[1]]
# m1 <- PlotXYWithColor(jsub, xvar = "X1", yvar = "X2", cname = "exprs", jtitle = paste(jgene, jpeak))
# print(m1)
# }
# dev.off()
#
# What are the top hits? --------------------------------------------------
# which peaks are most interesting?
# get UMAP coords
umap.out <- umap(tm.result$topics)
umap.long <- data.frame(cell = unname(rownames(umap.out$layout)), umap1 = umap.out$layout[, 1], umap2 = umap.out$layout[, 2], stringsAsFactors = FALSE)
ggplot(umap.long, aes(x = umap1, y = umap2)) + geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
top.peaks$gene <- sapply(top.peaks$term, function(x) strsplit(x, ";")[[1]][[2]])
# plot interestheing topics
top.cells <- left_join(top.cells, umap.long)
jtop <- 1 # bcells
jtop <- 4 # granulocytes
jtop <- 21 # eryths
m1 <- PlotXYWithColor(top.cells %>% filter(topic == jtop), xvar = "umap1", yvar = "umap2", cname = "gamma") + ggtitle(jtop)
print(m1)
# Do top genes look OK comparing with bulk? ------------------------------
dat <- fread("/Users/yeung/data/scchic/public_data/E-MTAB-3079-query-results.fpkms.tsv", sep = "\t")
colnames(dat) <- gsub(" ", "_", colnames(dat))
dat.long <- gather(dat, key = "CellType", value = "FPKM", -c("Gene_ID", "Gene_Name")) %>%
group_by(Gene_ID) %>%
mutate(FPKM = replace_na(FPKM, 0)) %>%
mutate(logFPKM = log2(FPKM + 1),
zscore = scale(logFPKM, center = TRUE, scale = TRUE))
jtopic <- 21
topn <- 500
top.genes <- subset(top.peaks, topic == jtopic)$gene[1:topn]
jsub <- subset(dat.long, Gene_Name %in% top.genes)
jsub.sorted.summarised <- jsub %>% group_by(CellType) %>% summarise(zscore = median(zscore)) %>% arrange(desc(zscore)) %>% dplyr::select(CellType)
jlevels <- as.character(jsub.sorted.summarised$CellType)
jsub$CellType <- factor(jsub$CellType, levels = jlevels)
ggplot(jsub,
aes(x = CellType , y = zscore)) +
geom_boxplot() +
# geom_violin() +
geom_jitter(width = 0.1, size = 0.5) +
# geom_line() +
theme_classic() +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
ggtitle(jtopic)
top.genes[which(top.genes %in% jsub$Gene_Name)]
jgene <- "Gm21738"
jgene <- "Lalba"
jgene <- "Gm10801"
jgene <- "Gm10800"
jgene <- "Gm27940"
jgene <- "Gm44385"
jgene <- "Igkv1-110"
jgene <- "Kat2b"
jgene <- "Dennd2c"
jgene <- "Tnpo1"
jgene <- "Pcx"
ggplot(jsub %>% filter(Gene_Name == jgene), aes(x = CellType, y = zscore)) + geom_point() +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.text.x = element_text(angle = 45, hjust = 1)) + ggtitle(jgene)
|
/scripts/scripts_analysis/tf_activity_debugging/explore_LDA_before_running_model.R
|
no_license
|
jakeyeung/scChIC-analysis
|
R
| false | false | 6,839 |
r
|
# Jake Yeung
# Date of Creation: 2019-04-19
# File: ~/projects/scchic/scripts/scripts_analysis/tf_activity_debugging/explore_LDA_before_running_model.R
# Explore LDA
rm(list=ls())
library(topicmodels)
library(dplyr)
library(ggplot2)
library(tidytext)
jscale <- 10^7
jpseudo <- 0
source("scripts/Rfunctions/MaraDownstream.R")
source("scripts/Rfunctions/PlotFunctions.R")
# Load UMAPs and LDA ------------------------------------------------------
# jsuffix <- "_GeneTSS.Dedup"
jsuffix <- ""
inf.spring <- "/Users/yeung/data/scchic/robjs/trajectory_from_spring_2019-04-11.RData"
# inf <- "/Users/yeung/data/scchic/from_cluster/lda_outputs_peaks_exprs_merge_clusters_explore/lda_out_meanfilt.PZ-BM-H3K4me1.CountThres0.K-25_50.Robj"
inf <- paste0("/Users/yeung/data/scchic/from_cluster/lda_outputs_peaks_exprs_merge_clusters_explore/lda_out_meanfilt.PZ-BM-H3K4me1.CountThres0.K-25_50", jsuffix, ".Robj")
assertthat::assert_that(file.exists(inf))
assertthat::assert_that(file.exists(inf.spring))
load(inf.spring, v=T)
load(inf, v=T)
out.lda <- out.lda[[length(out.lda)]]
tm.result <- posterior(out.lda)
top.cells <- tidy(out.lda, matrix = "gamma") %>%
group_by(topic) %>%
arrange(desc(gamma)) %>%
mutate(rnk = seq(length(gamma))) %>%
mutate(gamma.zscore = scale(gamma, center = TRUE, scale = TRUE)) %>%
dplyr::rename(cell = document)
top.cells.sum <- top.cells %>%
group_by(topic) %>% # do entropy on 1 to 99% of cells
filter(gamma.zscore < quantile(gamma.zscore, 0.98)) %>%
mutate(zscore.prob = exp(gamma.zscore) / sum(exp(gamma.zscore))) %>%
summarise(entropy = -sum(zscore.prob * log(zscore.prob))) %>%
arrange(entropy)
top.peaks <- tidytext::tidy(out.lda, matrix = "beta", log = FALSE) %>%
group_by(topic) %>%
arrange(desc(beta)) %>%
mutate(rnk = seq(length(beta))) %>%
mutate(beta.zscore = scale(beta, center = TRUE, scale = TRUE))
top.peaks.sum <- top.peaks %>%
group_by(term) %>%
mutate(topics.zscore = scale(beta, center = TRUE, scale = TRUE)) %>%
mutate(topics.zscore.prob = exp(topics.zscore) / sum(exp(topics.zscore))) %>%
summarise(entropy = -sum(zscore.prob * log(zscore.prob))) %>%
arrange(entropy)
mat.impute <- t(tm.result$topics %*% tm.result$terms)
# filter top terms
ggplot(top.cells, aes(x = gamma.zscore)) + geom_density() + facet_wrap(~topic) +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
# get gene list
rnames <- rownames(mat.impute)
rnames.keep <- grepl(";", rnames)
# find tal1 peak
# rnames.tal1 <- grepl("chr4:1150[5-6][0-9]{4}", rnames)
# rnames.keep <- rnames.keep | rnames.tal1
cnames.old <- unname(colnames(mat.impute))
cnames.new <- SwitchColnames(cnames.old, jsplit = "-")
colnames(mat.impute) <- cnames.new
mat.impute.sub <- mat.impute[rnames.keep, ]
genes <- sapply(rownames(mat.impute.sub), function(x){
g <- tryCatch({
return(strsplit(x, ";")[[1]][[2]])
}, error = function(e){
return("Peak")
})
}, USE.NAMES = FALSE)
exprs.long <- data.frame(peak = rownames(mat.impute.sub), gene = genes, as.data.frame(mat.impute.sub)) %>%
tidyr::gather(key = "cell", value = "exprs", c(-peak, -gene))
# Plot UMAP and overlay gene expression -----------------------------------
exprs.long <- left_join(exprs.long, dat.trajs.long)
# # jgene <- "Ebf1"
# jgene <- "T"
# jsub <- subset(exprs.long, peak == "chr4:115065000-115066000") %>%
# mutate(exprs = log10(exprs * jscale + jpseudo))
# jpeak <- jsub$peak[[1]]
# m1 <- PlotXYWithColor(jsub, xvar = "X1", yvar = "X2", cname = "exprs", jtitle = paste(jgene, jpeak))
# print(m1)
#
# jgenes <- c("Hbb-bs", "Gata1", "Foxo1", "Inpp4b", "S100a8", "Hs3st5", "Il2ra", "Prf1", "Klf", "Sox6", "Gata2", "Pax5", "Ly6c2", "Gypa")
# jgenes <- c(jgenes, "Tal1", "Mbd2", "Bcl3", "Foxc1", "Nrf1", "Hmbox1", "Spi1", "Gfi1", "Ebf3", "Cebpd", "Cebpb", "Pax6", "Pou2f2", "Ebf1")
#
#
#
# pdf(paste0("/tmp/lda_check.", jsuffix, ".pdf"), useDingbats = FALSE)
# for (jgene in jgenes){
# jsub <- subset(exprs.long, gene == jgene) %>%
# mutate(exprs = log2(exprs * jscale + jpseudo))
# if (nrow(jsub) == 0){
# print(paste("Skipping", jgene))
# next
# }
# jpeak <- jsub$peak[[1]]
# m1 <- PlotXYWithColor(jsub, xvar = "X1", yvar = "X2", cname = "exprs", jtitle = paste(jgene, jpeak))
# print(m1)
# }
# dev.off()
#
# What are the top hits? --------------------------------------------------
# which peaks are most interesting?
# get UMAP coords
umap.out <- umap(tm.result$topics)
umap.long <- data.frame(cell = unname(rownames(umap.out$layout)), umap1 = umap.out$layout[, 1], umap2 = umap.out$layout[, 2], stringsAsFactors = FALSE)
ggplot(umap.long, aes(x = umap1, y = umap2)) + geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
top.peaks$gene <- sapply(top.peaks$term, function(x) strsplit(x, ";")[[1]][[2]])
# plot interestheing topics
top.cells <- left_join(top.cells, umap.long)
jtop <- 1 # bcells
jtop <- 4 # granulocytes
jtop <- 21 # eryths
m1 <- PlotXYWithColor(top.cells %>% filter(topic == jtop), xvar = "umap1", yvar = "umap2", cname = "gamma") + ggtitle(jtop)
print(m1)
# Do top genes look OK comparing with bulk? ------------------------------
dat <- fread("/Users/yeung/data/scchic/public_data/E-MTAB-3079-query-results.fpkms.tsv", sep = "\t")
colnames(dat) <- gsub(" ", "_", colnames(dat))
dat.long <- gather(dat, key = "CellType", value = "FPKM", -c("Gene_ID", "Gene_Name")) %>%
group_by(Gene_ID) %>%
mutate(FPKM = replace_na(FPKM, 0)) %>%
mutate(logFPKM = log2(FPKM + 1),
zscore = scale(logFPKM, center = TRUE, scale = TRUE))
jtopic <- 21
topn <- 500
top.genes <- subset(top.peaks, topic == jtopic)$gene[1:topn]
jsub <- subset(dat.long, Gene_Name %in% top.genes)
jsub.sorted.summarised <- jsub %>% group_by(CellType) %>% summarise(zscore = median(zscore)) %>% arrange(desc(zscore)) %>% dplyr::select(CellType)
jlevels <- as.character(jsub.sorted.summarised$CellType)
jsub$CellType <- factor(jsub$CellType, levels = jlevels)
ggplot(jsub,
aes(x = CellType , y = zscore)) +
geom_boxplot() +
# geom_violin() +
geom_jitter(width = 0.1, size = 0.5) +
# geom_line() +
theme_classic() +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
ggtitle(jtopic)
top.genes[which(top.genes %in% jsub$Gene_Name)]
jgene <- "Gm21738"
jgene <- "Lalba"
jgene <- "Gm10801"
jgene <- "Gm10800"
jgene <- "Gm27940"
jgene <- "Gm44385"
jgene <- "Igkv1-110"
jgene <- "Kat2b"
jgene <- "Dennd2c"
jgene <- "Tnpo1"
jgene <- "Pcx"
ggplot(jsub %>% filter(Gene_Name == jgene), aes(x = CellType, y = zscore)) + geom_point() +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.text.x = element_text(angle = 45, hjust = 1)) + ggtitle(jgene)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iot_operations.R
\name{iot_detach_principal_policy}
\alias{iot_detach_principal_policy}
\title{Removes the specified policy from the specified certificate}
\usage{
iot_detach_principal_policy(policyName, principal)
}
\arguments{
\item{policyName}{[required] The name of the policy to detach.}
\item{principal}{[required] The principal.
If the principal is a certificate, specify the certificate ARN. If the
principal is an Amazon Cognito identity, specify the identity ID.}
}
\description{
Removes the specified policy from the specified certificate.
}
\details{
\strong{Note:} This API is deprecated. Please use DetachPolicy instead.
}
\section{Request syntax}{
\preformatted{svc$detach_principal_policy(
policyName = "string",
principal = "string"
)
}
}
\keyword{internal}
|
/cran/paws.internet.of.things/man/iot_detach_principal_policy.Rd
|
permissive
|
johnnytommy/paws
|
R
| false | true | 860 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iot_operations.R
\name{iot_detach_principal_policy}
\alias{iot_detach_principal_policy}
\title{Removes the specified policy from the specified certificate}
\usage{
iot_detach_principal_policy(policyName, principal)
}
\arguments{
\item{policyName}{[required] The name of the policy to detach.}
\item{principal}{[required] The principal.
If the principal is a certificate, specify the certificate ARN. If the
principal is an Amazon Cognito identity, specify the identity ID.}
}
\description{
Removes the specified policy from the specified certificate.
}
\details{
\strong{Note:} This API is deprecated. Please use DetachPolicy instead.
}
\section{Request syntax}{
\preformatted{svc$detach_principal_policy(
policyName = "string",
principal = "string"
)
}
}
\keyword{internal}
|
downloadData <- function(dataDirectory) {
sourceDataURL = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
dataDirectory = "./data"
sourceDataZipFilePath = file.path(dataDirectory, "exdata_data_NEI_data.zip")
if (!file.exists(sourceDataZipFilePath)) {
if (!file.exists(dataDirectory)) {
dir.create(dataDirectory)
}
download.file(sourceDataURL, sourceDataZipFilePath, method = "auto")
}
unzip(sourceDataZipFilePath, exdir = dataDirectory)
}
|
/downloadData.R
|
no_license
|
dominiklanger/ExData_Plotting2
|
R
| false | false | 572 |
r
|
downloadData <- function(dataDirectory) {
sourceDataURL = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
dataDirectory = "./data"
sourceDataZipFilePath = file.path(dataDirectory, "exdata_data_NEI_data.zip")
if (!file.exists(sourceDataZipFilePath)) {
if (!file.exists(dataDirectory)) {
dir.create(dataDirectory)
}
download.file(sourceDataURL, sourceDataZipFilePath, method = "auto")
}
unzip(sourceDataZipFilePath, exdir = dataDirectory)
}
|
## load data and packages
library(dplyr)
library(tm)
library(wordcloud)
library(data.table)
library(ggplot2)
## theme function
theme_new <- function(base_size = 24, base_family = "Helvetica"){
theme_bw(base_size = base_size, base_family = base_family) %+replace%
theme(
panel.grid = element_blank(),
panel.border = element_rect(fill = NA, colour = "white", size=1),
panel.background = element_rect(fill = "white", colour = "black"),
strip.background = element_rect(fill = NA),
axis.text.x = element_text(color = "black"),
axis.text.y = element_text(color = "black")
)
}
## read the data
dtm_briefs = fread('/Users/raj2/Dropbox/endrew_amicusbriefs/output/endrew_amicus_dtm.csv')
## separate
new_colnames = make.unique(colnames(dtm_briefs))
colnames(dtm_briefs) = new_colnames
dtm_briefs_parents = dtm_briefs %>%
filter(who_filed_for == 'parent')
dtm_briefs_district = dtm_briefs %>%
filter(who_filed_for == 'district')
## find colsums and sort by frequency
dtm_briefs_colsums_parents = colSums(dtm_briefs_parents[,setdiff(colnames(dtm_briefs_parents),
c('org',
'who_filed_for'))])
dtm_briefs_sorted_p = data.frame(terms = names(sort(dtm_briefs_colsums_parents, decreasing = TRUE)),
freq = sort(dtm_briefs_colsums_parents, decreasing = TRUE)) %>%
mutate(source = 'parents',
prop = freq/sum((dtm_briefs_colsums_parents)))
head(dtm_briefs_sorted_p)
## find colsums and sort by frequency
dtm_briefs_colsums_district = colSums(dtm_briefs_district[,setdiff(colnames(dtm_briefs_district),
c('org',
'who_filed_for'))])
dtm_briefs_sorted_d = data.frame(terms = names(sort(dtm_briefs_colsums_district, decreasing = TRUE)),
freq = sort(dtm_briefs_colsums_district, decreasing = TRUE)) %>%
mutate(source = 'district',
prop = freq/sum((dtm_briefs_colsums_district)))
## add and then do ranks/scatterplot
dtm_briefs_both = rbind.data.frame(dtm_briefs_sorted_p, dtm_briefs_sorted_d) %>%
arrange(desc(prop)) %>%
dplyr::select(-freq) %>%
dcast(terms ~ source, value.var = 'prop') %>%
mutate(average_both = (parents + district)/2) %>%
arrange(desc(average_both))
dtm_briefs_top100 = dtm_briefs_both %>%
slice(2:100) %>%
mutate(text_size = average_both/2)
head(dtm_briefs_both)
## Three plots
### plot one: blank
ggplot(dtm_briefs_top100, aes(x = district, y = parents)) +
#geom_point(color = 'white') +
#geom_text(aes(label=terms,
# size = text_size,
# color = 'white'),
# alpha = 0.8,
# hjust=1, vjust=1) +
theme_new() +
guides(size = FALSE,
color = FALSE) +
geom_abline(intercept = 0, slope = 1,
linetype = 'dashed',
color = 'red') +
xlab('Briefs in favor of the district') +
ylab('Briefs in favor of the parents') +
theme(axis.text = element_blank(),
axis.ticks = element_blank(),
axis.line = element_line(color = 'black'))
ggsave('/Users/raj2/Dropbox/endrew_amicusbriefs/output/blank_amicus.pdf',
plot = last_plot(),
device = 'pdf',
width = 12,
height = 8)
### plot two: all words
ggplot(dtm_briefs_top100, aes(x = district, y = parents)) +
geom_point(color = 'white') +
geom_text(aes(label=terms),
alpha = 0.8,
hjust=1, vjust=1,
size = 6) +
theme_new() +
guides(size = FALSE,
color = FALSE) +
geom_abline(intercept = 0, slope = 1,
linetype = 'dashed',
color = 'red') +
xlab('Briefs in favor of the district') +
ylab('Briefs in favor of the parents') +
theme(axis.text = element_blank(),
axis.ticks = element_blank(),
axis.line = element_line(color = 'black'))
ggsave('/Users/raj2/Dropbox/endrew_amicusbriefs/output/amicus_allwords.pdf',
plot = last_plot(),
device = 'pdf',
width = 12,
height = 8)
## focus on lower ranked words
dtm_briefs_ratio = dtm_briefs_both %>%
mutate(text_size = average_both/2,
ratio_district = district/parents) %>%
filter(!terms %in% c('http', 'whether',
'would',
'must',
'also',
'use', 'et',
'html', 'al', 'cation', 'tri', 'pet', 'rep',
'br')) %>%
filter(district !=0 & parents !=0) %>%
arrange(desc(ratio_district)) %>%
mutate(ratio_district_rank = dense_rank(desc(ratio_district)),
ratio_parent_rank = dense_rank(ratio_district),
topword_parent = ifelse(ratio_parent_rank < 20, 1, 0),
topword_district = ifelse(ratio_district_rank < 15, 1, 0),
similar_both = ifelse(abs(ratio_district-1) < 0.03, 1, 0),
category_ratio = ifelse(topword_district == 1, 'Over-represented in briefs favoring district',
ifelse(topword_parent == 1,
'Over-represented in briefs favoring parents',
ifelse(similar_both == 1,
'Similar representation', 'Other')))) %>%
mutate(term_ordered = factor(terms, unique(terms))) %>%
dplyr::select(term_ordered, ratio_district,
category_ratio) %>%
filter(category_ratio != 'Other')
write.csv(dtm_briefs_ratio, '../endrew_amicus_visualize/data_todisplay/dtm_ratio.csv',
row.names = FALSE)
## do filtering based on slider in shiny
## reorganize
theme_ratio <- function(base_size = 16, base_family = "Helvetica"){
theme_bw(base_size = base_size, base_family = base_family) %+replace%
theme(
panel.grid = element_blank(),
panel.border = element_rect(fill = NA, colour = "white", size=1),
panel.background = element_rect(fill = "white", colour = "black"),
strip.background = element_rect(fill = NA),
axis.text.x = element_text(color = "black"),
axis.text.y = element_text(color = "black")
)
}
ggplot(dtm_briefs_ratio %>% filter(topword_district == 1),
aes(x = term_ordered, y = ratio_district)) +
geom_bar(stat = 'identity', fill = 'wheat4', alpha = 0.3) +
geom_hline(yintercept = 1, linetype = 'dashed', color = 'red') +
coord_flip() +
xlab('') +
theme_ratio() +
ylab('Representation of word in briefs supporting district\nrelative to briefs supporting parents\n(> 1 = over-represented in district;\n< 1 = over-represented in parents)')
ggplot(dtm_briefs_lower, aes(x = district, y = parents)) +
geom_point(color = 'white') +
geom_text(aes(label=terms),
alpha = 0.7,
hjust=1, vjust=1,
size = 6) +
theme_new() +
guides(size = FALSE,
color = FALSE) +
geom_abline(intercept = 0, slope = 1,
linetype = 'dashed',
color = 'red') +
xlab('Briefs in favor of the district') +
ylab('Briefs in favor of the parents') +
theme(axis.text = element_blank(),
axis.ticks = element_blank(),
axis.line = element_line(color = 'black'))
ggsave('/Users/raj2/Dropbox/endrew_amicusbriefs/output/amicus_zoom.pdf',
plot = last_plot(),
device = 'pdf',
width = 12,
height = 8)
|
/code/visualize_dtm.R
|
no_license
|
rebeccajohnson88/endrew_amicusbriefs
|
R
| false | false | 7,673 |
r
|
## load data and packages
library(dplyr)
library(tm)
library(wordcloud)
library(data.table)
library(ggplot2)
## theme function
theme_new <- function(base_size = 24, base_family = "Helvetica"){
theme_bw(base_size = base_size, base_family = base_family) %+replace%
theme(
panel.grid = element_blank(),
panel.border = element_rect(fill = NA, colour = "white", size=1),
panel.background = element_rect(fill = "white", colour = "black"),
strip.background = element_rect(fill = NA),
axis.text.x = element_text(color = "black"),
axis.text.y = element_text(color = "black")
)
}
## read the data
dtm_briefs = fread('/Users/raj2/Dropbox/endrew_amicusbriefs/output/endrew_amicus_dtm.csv')
## separate
new_colnames = make.unique(colnames(dtm_briefs))
colnames(dtm_briefs) = new_colnames
dtm_briefs_parents = dtm_briefs %>%
filter(who_filed_for == 'parent')
dtm_briefs_district = dtm_briefs %>%
filter(who_filed_for == 'district')
## find colsums and sort by frequency
dtm_briefs_colsums_parents = colSums(dtm_briefs_parents[,setdiff(colnames(dtm_briefs_parents),
c('org',
'who_filed_for'))])
dtm_briefs_sorted_p = data.frame(terms = names(sort(dtm_briefs_colsums_parents, decreasing = TRUE)),
freq = sort(dtm_briefs_colsums_parents, decreasing = TRUE)) %>%
mutate(source = 'parents',
prop = freq/sum((dtm_briefs_colsums_parents)))
head(dtm_briefs_sorted_p)
## find colsums and sort by frequency
dtm_briefs_colsums_district = colSums(dtm_briefs_district[,setdiff(colnames(dtm_briefs_district),
c('org',
'who_filed_for'))])
dtm_briefs_sorted_d = data.frame(terms = names(sort(dtm_briefs_colsums_district, decreasing = TRUE)),
freq = sort(dtm_briefs_colsums_district, decreasing = TRUE)) %>%
mutate(source = 'district',
prop = freq/sum((dtm_briefs_colsums_district)))
## add and then do ranks/scatterplot
dtm_briefs_both = rbind.data.frame(dtm_briefs_sorted_p, dtm_briefs_sorted_d) %>%
arrange(desc(prop)) %>%
dplyr::select(-freq) %>%
dcast(terms ~ source, value.var = 'prop') %>%
mutate(average_both = (parents + district)/2) %>%
arrange(desc(average_both))
dtm_briefs_top100 = dtm_briefs_both %>%
slice(2:100) %>%
mutate(text_size = average_both/2)
head(dtm_briefs_both)
## Three plots
### plot one: blank
ggplot(dtm_briefs_top100, aes(x = district, y = parents)) +
#geom_point(color = 'white') +
#geom_text(aes(label=terms,
# size = text_size,
# color = 'white'),
# alpha = 0.8,
# hjust=1, vjust=1) +
theme_new() +
guides(size = FALSE,
color = FALSE) +
geom_abline(intercept = 0, slope = 1,
linetype = 'dashed',
color = 'red') +
xlab('Briefs in favor of the district') +
ylab('Briefs in favor of the parents') +
theme(axis.text = element_blank(),
axis.ticks = element_blank(),
axis.line = element_line(color = 'black'))
ggsave('/Users/raj2/Dropbox/endrew_amicusbriefs/output/blank_amicus.pdf',
plot = last_plot(),
device = 'pdf',
width = 12,
height = 8)
### plot two: all words
ggplot(dtm_briefs_top100, aes(x = district, y = parents)) +
geom_point(color = 'white') +
geom_text(aes(label=terms),
alpha = 0.8,
hjust=1, vjust=1,
size = 6) +
theme_new() +
guides(size = FALSE,
color = FALSE) +
geom_abline(intercept = 0, slope = 1,
linetype = 'dashed',
color = 'red') +
xlab('Briefs in favor of the district') +
ylab('Briefs in favor of the parents') +
theme(axis.text = element_blank(),
axis.ticks = element_blank(),
axis.line = element_line(color = 'black'))
ggsave('/Users/raj2/Dropbox/endrew_amicusbriefs/output/amicus_allwords.pdf',
plot = last_plot(),
device = 'pdf',
width = 12,
height = 8)
## focus on lower ranked words
dtm_briefs_ratio = dtm_briefs_both %>%
mutate(text_size = average_both/2,
ratio_district = district/parents) %>%
filter(!terms %in% c('http', 'whether',
'would',
'must',
'also',
'use', 'et',
'html', 'al', 'cation', 'tri', 'pet', 'rep',
'br')) %>%
filter(district !=0 & parents !=0) %>%
arrange(desc(ratio_district)) %>%
mutate(ratio_district_rank = dense_rank(desc(ratio_district)),
ratio_parent_rank = dense_rank(ratio_district),
topword_parent = ifelse(ratio_parent_rank < 20, 1, 0),
topword_district = ifelse(ratio_district_rank < 15, 1, 0),
similar_both = ifelse(abs(ratio_district-1) < 0.03, 1, 0),
category_ratio = ifelse(topword_district == 1, 'Over-represented in briefs favoring district',
ifelse(topword_parent == 1,
'Over-represented in briefs favoring parents',
ifelse(similar_both == 1,
'Similar representation', 'Other')))) %>%
mutate(term_ordered = factor(terms, unique(terms))) %>%
dplyr::select(term_ordered, ratio_district,
category_ratio) %>%
filter(category_ratio != 'Other')
write.csv(dtm_briefs_ratio, '../endrew_amicus_visualize/data_todisplay/dtm_ratio.csv',
row.names = FALSE)
## do filtering based on slider in shiny
## reorganize
theme_ratio <- function(base_size = 16, base_family = "Helvetica"){
theme_bw(base_size = base_size, base_family = base_family) %+replace%
theme(
panel.grid = element_blank(),
panel.border = element_rect(fill = NA, colour = "white", size=1),
panel.background = element_rect(fill = "white", colour = "black"),
strip.background = element_rect(fill = NA),
axis.text.x = element_text(color = "black"),
axis.text.y = element_text(color = "black")
)
}
ggplot(dtm_briefs_ratio %>% filter(topword_district == 1),
aes(x = term_ordered, y = ratio_district)) +
geom_bar(stat = 'identity', fill = 'wheat4', alpha = 0.3) +
geom_hline(yintercept = 1, linetype = 'dashed', color = 'red') +
coord_flip() +
xlab('') +
theme_ratio() +
ylab('Representation of word in briefs supporting district\nrelative to briefs supporting parents\n(> 1 = over-represented in district;\n< 1 = over-represented in parents)')
ggplot(dtm_briefs_lower, aes(x = district, y = parents)) +
geom_point(color = 'white') +
geom_text(aes(label=terms),
alpha = 0.7,
hjust=1, vjust=1,
size = 6) +
theme_new() +
guides(size = FALSE,
color = FALSE) +
geom_abline(intercept = 0, slope = 1,
linetype = 'dashed',
color = 'red') +
xlab('Briefs in favor of the district') +
ylab('Briefs in favor of the parents') +
theme(axis.text = element_blank(),
axis.ticks = element_blank(),
axis.line = element_line(color = 'black'))
ggsave('/Users/raj2/Dropbox/endrew_amicusbriefs/output/amicus_zoom.pdf',
plot = last_plot(),
device = 'pdf',
width = 12,
height = 8)
|
GAMSERVER<- function(input, output, session){
ns<-session$ns
####################################BACILLUS DIFFERENCE
#################define ui
plotbaca<-reactiveValues(plot=NULL)
reactSampleBacd <- reactive({
###set conditions
#species
bacillus<-subset(bacillus, Species %in% input$SpeciesQb)
bacillusOnem<-subset(bacillusOnem,Species %in% input$SpeciesQb)
drall<-subset(drall,Species %in% input$SpeciesQb)
drallonem<-subset(drallonem,Species %in% input$SpeciesQb)
#dominance
bacillus<-subset(bacillus, Dominance %in% input$Dominanceb)
bacillusOnem<-subset(bacillusOnem, Dominance %in% input$Dominanceb)
drall<-subset(drall, Dominance %in% input$Dominanceb)
drallonem<-subset(drallonem, Dominance %in% input$Dominanceb)
#Depth of Sequencing
bacillus<-subset(bacillus, `Depth of Sequencing` %in% input$DeepSb)
bacillusOnem<-subset(bacillusOnem, `Depth of Sequencing` %in% input$DeepSb)
drall<-subset(drall,`Depth of Sequencing` %in% input$DeepSb)
drallonem<-subset(drallonem,`Depth of Sequencing` %in% input$DeepSb)
#read length
bacillus<-subset(bacillus, Read.Length == input$ReadLb)
bacillusOnem<-subset(bacillusOnem, Read.Length == input$ReadLb)
#software
bacillus<-subset(bacillus,Software %in% input$Softwareb)
bacillusOnem<-subset(bacillusOnem,Software %in% input$Softwareb)
bacillus<-rbind(bacillus,drall)
bacillusOnem<-rbind(bacillusOnem,drallonem)
bacillus$Bacillus.anthracis<-as.numeric(as.character(bacillus$Bacillus.anthracis))
bacillusOnem$Bacillus.anthracis<-as.numeric(as.character(bacillusOnem$Bacillus.anthracis))
bacillus$Bacillus.subtilis<-as.numeric(as.character(bacillus$Bacillus.subtilis))
bacillusOnem$Bacillus.subtilis<-as.numeric(as.character(bacillusOnem$Bacillus.subtilis))
bacillus$Bacillus.clausii<-as.numeric(as.character(bacillus$Bacillus.clausii))
bacillusOnem$Bacillus.clausii<-as.numeric(as.character(bacillusOnem$Bacillus.clausii))
colnames(bacillus)[7]<-"Bacillus anthracis"
colnames(bacillus)[8]<-"Bacillus clausii"
colnames(bacillus)[9]<-"Bacillus subtilis"
colnames(bacillusOnem)[7]<-"Bacillus anthracis"
colnames(bacillusOnem)[8]<-"Bacillus clausii"
colnames(bacillusOnem)[9]<-"Bacillus subtilis"
bacillus["Database"]<-"All genomes present"
bacillusOnem["Database"]<-"Bacillus cereus not in database"
df<-merge(bacillus,bacillusOnem,all=T)
df.m<-melt(df)
colnames(df.m)<-c("Kingdom","Species","Depth of Sequencing","Dominance",
"Read Length","Method","Database","Organism","value")
return(df.m)
})
output$Bacd <- renderPlot({
if(is.null(reactSampleBacd())){
return()
}
plotbaca$plot<-ggplot(reactSampleBacd(),
aes(x=Method, y=value, color=Organism,shape=Database, size=`Depth of Sequencing`)) +
geom_point() + scale_shape(solid = FALSE) + scale_y_log10() +
xlab("\nMethod") + ylab("Read Count (Log10 scale)\n") +
theme(plot.title = element_text(colour = "darkred",size=14,face = "bold",hjust = 0.5),
axis.title.x = element_text(colour = "darkred", size=14,face = "bold"),
axis.title.y = element_text(colour = "darkred", size=14,face = "bold"),
legend.title = element_text(colour = "darkred", size=14,face = "bold")) +
labs(size="Sequencing depth", shape="Dataset", color="Bacillus") +
theme(strip.text.x = element_text(colour="darkred",face="bold",size=15)) +
facet_wrap(~ Dominance)
plotbaca$plot
})
output$downloadBacillus <- downloadHandler(
filename = function() {paste("BacillusDiff-",Sys.Date(),".pdf",sep="")},
content = function(file) {
ggsave(file,plot=plotbaca$plot,device = "pdf",width = 15,height = 10)
}
)
}
|
/Modules/gamodule_server.R
|
no_license
|
microgenomics/HumanMicrobiomeAnalysis
|
R
| false | false | 3,689 |
r
|
GAMSERVER<- function(input, output, session){
ns<-session$ns
####################################BACILLUS DIFFERENCE
#################define ui
plotbaca<-reactiveValues(plot=NULL)
reactSampleBacd <- reactive({
###set conditions
#species
bacillus<-subset(bacillus, Species %in% input$SpeciesQb)
bacillusOnem<-subset(bacillusOnem,Species %in% input$SpeciesQb)
drall<-subset(drall,Species %in% input$SpeciesQb)
drallonem<-subset(drallonem,Species %in% input$SpeciesQb)
#dominance
bacillus<-subset(bacillus, Dominance %in% input$Dominanceb)
bacillusOnem<-subset(bacillusOnem, Dominance %in% input$Dominanceb)
drall<-subset(drall, Dominance %in% input$Dominanceb)
drallonem<-subset(drallonem, Dominance %in% input$Dominanceb)
#Depth of Sequencing
bacillus<-subset(bacillus, `Depth of Sequencing` %in% input$DeepSb)
bacillusOnem<-subset(bacillusOnem, `Depth of Sequencing` %in% input$DeepSb)
drall<-subset(drall,`Depth of Sequencing` %in% input$DeepSb)
drallonem<-subset(drallonem,`Depth of Sequencing` %in% input$DeepSb)
#read length
bacillus<-subset(bacillus, Read.Length == input$ReadLb)
bacillusOnem<-subset(bacillusOnem, Read.Length == input$ReadLb)
#software
bacillus<-subset(bacillus,Software %in% input$Softwareb)
bacillusOnem<-subset(bacillusOnem,Software %in% input$Softwareb)
bacillus<-rbind(bacillus,drall)
bacillusOnem<-rbind(bacillusOnem,drallonem)
bacillus$Bacillus.anthracis<-as.numeric(as.character(bacillus$Bacillus.anthracis))
bacillusOnem$Bacillus.anthracis<-as.numeric(as.character(bacillusOnem$Bacillus.anthracis))
bacillus$Bacillus.subtilis<-as.numeric(as.character(bacillus$Bacillus.subtilis))
bacillusOnem$Bacillus.subtilis<-as.numeric(as.character(bacillusOnem$Bacillus.subtilis))
bacillus$Bacillus.clausii<-as.numeric(as.character(bacillus$Bacillus.clausii))
bacillusOnem$Bacillus.clausii<-as.numeric(as.character(bacillusOnem$Bacillus.clausii))
colnames(bacillus)[7]<-"Bacillus anthracis"
colnames(bacillus)[8]<-"Bacillus clausii"
colnames(bacillus)[9]<-"Bacillus subtilis"
colnames(bacillusOnem)[7]<-"Bacillus anthracis"
colnames(bacillusOnem)[8]<-"Bacillus clausii"
colnames(bacillusOnem)[9]<-"Bacillus subtilis"
bacillus["Database"]<-"All genomes present"
bacillusOnem["Database"]<-"Bacillus cereus not in database"
df<-merge(bacillus,bacillusOnem,all=T)
df.m<-melt(df)
colnames(df.m)<-c("Kingdom","Species","Depth of Sequencing","Dominance",
"Read Length","Method","Database","Organism","value")
return(df.m)
})
output$Bacd <- renderPlot({
if(is.null(reactSampleBacd())){
return()
}
plotbaca$plot<-ggplot(reactSampleBacd(),
aes(x=Method, y=value, color=Organism,shape=Database, size=`Depth of Sequencing`)) +
geom_point() + scale_shape(solid = FALSE) + scale_y_log10() +
xlab("\nMethod") + ylab("Read Count (Log10 scale)\n") +
theme(plot.title = element_text(colour = "darkred",size=14,face = "bold",hjust = 0.5),
axis.title.x = element_text(colour = "darkred", size=14,face = "bold"),
axis.title.y = element_text(colour = "darkred", size=14,face = "bold"),
legend.title = element_text(colour = "darkred", size=14,face = "bold")) +
labs(size="Sequencing depth", shape="Dataset", color="Bacillus") +
theme(strip.text.x = element_text(colour="darkred",face="bold",size=15)) +
facet_wrap(~ Dominance)
plotbaca$plot
})
output$downloadBacillus <- downloadHandler(
filename = function() {paste("BacillusDiff-",Sys.Date(),".pdf",sep="")},
content = function(file) {
ggsave(file,plot=plotbaca$plot,device = "pdf",width = 15,height = 10)
}
)
}
|
#' codido criado em aula para extração dos dados `rick_and_morty` proposto pelo professor William Amorim
library(magrittr)
url <- "https://en.wikipedia.org/wiki/List_of_Rick_and_Morty_episodes"
res <- httr::GET(url)
wiki_page <- httr::content(res)
lista_tab <- wiki_page %>%
xml2::xml_find_all(".//table") %>%
magrittr::extract(2:5) %>%
rvest::html_table(fill = TRUE) %>%
purrr::map(janitor::clean_names) %>%
purrr::map(~dplyr::rename_with(.x, ~stringr::str_remove(.x, "_37")))
num_temporadas <- 1:length(lista_tab)
tab <- lista_tab %>%
purrr::map2(num_temporadas, ~dplyr::mutate(.x, no_season = .y)) %>%
dplyr::bind_rows()
# tidy
rick_and_morty <- tab %>%
dplyr::relocate(no_season, .before = no_inseason) %>%
dplyr::mutate(
title = stringr::str_remove_all(title, '\\"'),
title = stringr::str_remove(title,"\\[.*\\]"),
u_s_viewers_millions = stringr::str_remove(
u_s_viewers_millions,
"\\[.*\\]"
),
u_s_viewers_millions = as.numeric(u_s_viewers_millions),
original_air_date = stringr::str_extract(
original_air_date,
"\\([0-9-]*\\)"
),
original_air_date = stringr::str_remove_all(
original_air_date,
"\\(|\\)"
),
original_air_date = lubridate::as_date(original_air_date)
) %>%
dplyr::select(
num_episodio = no_overall,
num_temporada = no_season,
num_dentro_temporada = no_inseason,
titulo = title,
direcao = directed_by,
roteiro = written_by,
data_transmissao_original = original_air_date,
qtd_espectadores_EUA = u_s_viewers_millions
) %>%
tibble::as_tibble()
readr::write_rds(tab, "data/rick_and_morty_raw.rds")
readr::write_rds(rick_and_morty, "data/rick_and_morty.rds")
|
/R/1-rickandmorty.R
|
no_license
|
alexnuneszo/03_R-para-Ci-ncia-de-Dados-II
|
R
| false | false | 1,721 |
r
|
#' codido criado em aula para extração dos dados `rick_and_morty` proposto pelo professor William Amorim
library(magrittr)
url <- "https://en.wikipedia.org/wiki/List_of_Rick_and_Morty_episodes"
res <- httr::GET(url)
wiki_page <- httr::content(res)
lista_tab <- wiki_page %>%
xml2::xml_find_all(".//table") %>%
magrittr::extract(2:5) %>%
rvest::html_table(fill = TRUE) %>%
purrr::map(janitor::clean_names) %>%
purrr::map(~dplyr::rename_with(.x, ~stringr::str_remove(.x, "_37")))
num_temporadas <- 1:length(lista_tab)
tab <- lista_tab %>%
purrr::map2(num_temporadas, ~dplyr::mutate(.x, no_season = .y)) %>%
dplyr::bind_rows()
# tidy
rick_and_morty <- tab %>%
dplyr::relocate(no_season, .before = no_inseason) %>%
dplyr::mutate(
title = stringr::str_remove_all(title, '\\"'),
title = stringr::str_remove(title,"\\[.*\\]"),
u_s_viewers_millions = stringr::str_remove(
u_s_viewers_millions,
"\\[.*\\]"
),
u_s_viewers_millions = as.numeric(u_s_viewers_millions),
original_air_date = stringr::str_extract(
original_air_date,
"\\([0-9-]*\\)"
),
original_air_date = stringr::str_remove_all(
original_air_date,
"\\(|\\)"
),
original_air_date = lubridate::as_date(original_air_date)
) %>%
dplyr::select(
num_episodio = no_overall,
num_temporada = no_season,
num_dentro_temporada = no_inseason,
titulo = title,
direcao = directed_by,
roteiro = written_by,
data_transmissao_original = original_air_date,
qtd_espectadores_EUA = u_s_viewers_millions
) %>%
tibble::as_tibble()
readr::write_rds(tab, "data/rick_and_morty_raw.rds")
readr::write_rds(rick_and_morty, "data/rick_and_morty.rds")
|
require("jsonlite")
require("RCurl")
require("ggplot2")
require("dplyr")
tbl_df(df2)
df2 <- select(df, ID, DIST, CON, ABSMAG, MAG) %>% filter(MAG > 0, CON != "null") %>% arrange(CON, DIST)
df3 <- df2 %>% group_by(CON) %>% summarise(n = n()) %>% arrange(n)
df3
require(extrafont)
ggplot() +
coord_cartesian() +
scale_x_discrete() +
scale_y_continuous() +
labs(title='Stars') +
labs(x="Constellation", y=paste("# of Stars")) +
layer(data=df3,
mapping=aes(as.character(CON), y=as.numeric(as.character(n))),
stat="identity",
stat_params=list(),
geom="histogram",
geom_params=list(),
position=position_identity()
#position=position_jitter(width=0.3, height=0)
)
|
/02 Data Wrangling/Workflow 3.R
|
no_license
|
abarrett714/DV_RProject2
|
R
| false | false | 735 |
r
|
require("jsonlite")
require("RCurl")
require("ggplot2")
require("dplyr")
tbl_df(df2)
df2 <- select(df, ID, DIST, CON, ABSMAG, MAG) %>% filter(MAG > 0, CON != "null") %>% arrange(CON, DIST)
df3 <- df2 %>% group_by(CON) %>% summarise(n = n()) %>% arrange(n)
df3
require(extrafont)
ggplot() +
coord_cartesian() +
scale_x_discrete() +
scale_y_continuous() +
labs(title='Stars') +
labs(x="Constellation", y=paste("# of Stars")) +
layer(data=df3,
mapping=aes(as.character(CON), y=as.numeric(as.character(n))),
stat="identity",
stat_params=list(),
geom="histogram",
geom_params=list(),
position=position_identity()
#position=position_jitter(width=0.3, height=0)
)
|
#' Apply the same function to all chunks
#' @param .x a disk.frame
#' @param .f a function to apply to each of the chunks
#' @param outdir the output directory
#' @param keep the columns to keep from the input
#' @param chunks The number of chunks to output
#' @param lazy if TRUE then do this lazily
#' @param compress 0-100 fst compression ratio
#' @param overwrite if TRUE removes any existing chunks in the data
#' @param ... for compatibility with `purrr::map`
#' @import fst
#' @importFrom purrr as_mapper map
#' @importFrom future.apply future_lapply
#' @export
map <- function(.x, .f, ...) {
UseMethod("map")
}
#' @export
map.default <- function(.x, .f, ...) {
purrr::map(.x, .f, ...)
}
#' @rdname map
#' @export
map.disk.frame <- function(.x, .f, ..., outdir = NULL, keep = NULL, chunks = nchunks(.x), compress = 50, lazy = T, overwrite = F) {
.f = purrr::as_mapper(.f)
if(lazy) {
attr(.x, "lazyfn") = c(attr(.x, "lazyfn"), .f)
return(.x)
}
if(!is.null(outdir)) {
overwrite_check(outdir, overwrite)
}
stopifnot(is_ready(.x))
keep1 = attr(.x,"keep")
if(is.null(keep)) {
keep = keep1
}
path <- attr(.x, "path")
files <- list.files(path, full.names = T)
files_shortname <- list.files(path)
keep_future = keep
res = future.apply::future_lapply(1:length(files), function(ii) {
ds = disk.frame::get_chunk(.x, ii, keep=keep_future)
res = .f(ds)
if(!is.null(outdir)) {
fst::write_fst(res, file.path(outdir, files_shortname[ii]), compress)
return(ii)
} else {
return(res)
}
})
if(!is.null(outdir)) {
return(disk.frame(outdir))
} else {
return(res)
}
}
#' imap.disk.frame accepts a two argument function where the first argument is a disk.frame and the
#' second is the chunk ID
#' @export
#' @rdname map
imap.disk.frame <- function(.x, .f, outdir = NULL, keep = NULL, chunks = nchunks(.x), compress = 50, lazy = T, overwrite = F) {
##browser
.f = purrr::as_mapper(.f)
if(lazy) {
attr(.x, "lazyfn") = c(attr(.x, "lazyfn"), .f)
return(.x)
}
if(!is.null(outdir)) {
overwrite_check(outdir, overwrite)
}
stopifnot(is_ready(.x))
keep1 = attr(.x,"keep")
if(is.null(keep)) {
keep = keep1
}
path <- attr(.x, "path")
files <- list.files(path, full.names = T)
files_shortname <- list.files(path)
keep_future = keep
res = future.apply::future_lapply(1:length(files), function(ii) {
ds = disk.frame::get_chunk(.x, ii, keep=keep_future)
res = .f(ds, ii)
if(!is.null(outdir)) {
fst::write_fst(res, file.path(outdir, files_shortname[ii]), compress)
return(ii)
} else {
return(res)
}
})
if(!is.null(outdir)) {
return(disk.frame(outdir))
} else {
return(res)
}
}
#' `lazy` is convenience function to apply `.f` to every chunk
#' @export
#' @rdname map
lazy <- function(.x, .f, ...) {
UseMethod("lazy")
}
#' @rdname map
lazy.disk.frame <- function(.x, .f, ...) {
map.disk.frame(.x, .f, ..., lazy = T)
}
#' Lazy chunk_lapply wrapper
#' @export
#' @rdname map
delayed <- function(.x, .f, ...) {
UseMethod("delayed")
}
#' @export
#' @rdname map
delayed.disk.frame <- function(.x, .f, ...) {
map.disk.frame(.x, .f, ..., lazy = T)
}
#' @export
#' @rdname map
chunk_lapply <- function (...) {
warning("chunk_lapply is deprecated in favour of map.disk.frame")
map.disk.frame(...)
}
|
/R/map.r
|
permissive
|
jingmouren/disk.frame
|
R
| false | false | 3,425 |
r
|
#' Apply the same function to all chunks
#' @param .x a disk.frame
#' @param .f a function to apply to each of the chunks
#' @param outdir the output directory
#' @param keep the columns to keep from the input
#' @param chunks The number of chunks to output
#' @param lazy if TRUE then do this lazily
#' @param compress 0-100 fst compression ratio
#' @param overwrite if TRUE removes any existing chunks in the data
#' @param ... for compatibility with `purrr::map`
#' @import fst
#' @importFrom purrr as_mapper map
#' @importFrom future.apply future_lapply
#' @export
map <- function(.x, .f, ...) {
UseMethod("map")
}
#' @export
map.default <- function(.x, .f, ...) {
purrr::map(.x, .f, ...)
}
#' @rdname map
#' @export
map.disk.frame <- function(.x, .f, ..., outdir = NULL, keep = NULL, chunks = nchunks(.x), compress = 50, lazy = T, overwrite = F) {
.f = purrr::as_mapper(.f)
if(lazy) {
attr(.x, "lazyfn") = c(attr(.x, "lazyfn"), .f)
return(.x)
}
if(!is.null(outdir)) {
overwrite_check(outdir, overwrite)
}
stopifnot(is_ready(.x))
keep1 = attr(.x,"keep")
if(is.null(keep)) {
keep = keep1
}
path <- attr(.x, "path")
files <- list.files(path, full.names = T)
files_shortname <- list.files(path)
keep_future = keep
res = future.apply::future_lapply(1:length(files), function(ii) {
ds = disk.frame::get_chunk(.x, ii, keep=keep_future)
res = .f(ds)
if(!is.null(outdir)) {
fst::write_fst(res, file.path(outdir, files_shortname[ii]), compress)
return(ii)
} else {
return(res)
}
})
if(!is.null(outdir)) {
return(disk.frame(outdir))
} else {
return(res)
}
}
#' imap.disk.frame accepts a two argument function where the first argument is a disk.frame and the
#' second is the chunk ID
#' @export
#' @rdname map
imap.disk.frame <- function(.x, .f, outdir = NULL, keep = NULL, chunks = nchunks(.x), compress = 50, lazy = T, overwrite = F) {
##browser
.f = purrr::as_mapper(.f)
if(lazy) {
attr(.x, "lazyfn") = c(attr(.x, "lazyfn"), .f)
return(.x)
}
if(!is.null(outdir)) {
overwrite_check(outdir, overwrite)
}
stopifnot(is_ready(.x))
keep1 = attr(.x,"keep")
if(is.null(keep)) {
keep = keep1
}
path <- attr(.x, "path")
files <- list.files(path, full.names = T)
files_shortname <- list.files(path)
keep_future = keep
res = future.apply::future_lapply(1:length(files), function(ii) {
ds = disk.frame::get_chunk(.x, ii, keep=keep_future)
res = .f(ds, ii)
if(!is.null(outdir)) {
fst::write_fst(res, file.path(outdir, files_shortname[ii]), compress)
return(ii)
} else {
return(res)
}
})
if(!is.null(outdir)) {
return(disk.frame(outdir))
} else {
return(res)
}
}
#' `lazy` is convenience function to apply `.f` to every chunk
#' @export
#' @rdname map
lazy <- function(.x, .f, ...) {
UseMethod("lazy")
}
#' @rdname map
lazy.disk.frame <- function(.x, .f, ...) {
map.disk.frame(.x, .f, ..., lazy = T)
}
#' Lazy chunk_lapply wrapper
#' @export
#' @rdname map
delayed <- function(.x, .f, ...) {
UseMethod("delayed")
}
#' @export
#' @rdname map
delayed.disk.frame <- function(.x, .f, ...) {
map.disk.frame(.x, .f, ..., lazy = T)
}
#' @export
#' @rdname map
chunk_lapply <- function (...) {
warning("chunk_lapply is deprecated in favour of map.disk.frame")
map.disk.frame(...)
}
|
#*******************************************************************************
#
# ------------------- LSD tools for sensitivity analysis ---------------------
#
# Written by Marcelo C. Pereira, University of Campinas
#
# Copyright Marcelo C. Pereira
# Distributed under the GNU General Public License
#
#*******************************************************************************
# ==== Do sensitivity analysis of a fitted model ====
sobol.decomposition.lsd <- function( data, model = NULL, krig.sa = FALSE,
sa.samp = 1000 ) {
if( ! inherits( data, "doe.lsd" ) )
stop( "Invalid data (not from read.doe.lsd())" )
if( is.null( krig.sa ) || ! is.logical( krig.sa ) )
stop( "Invalid Kriging algorithm switch (krig.sa)" )
if( is.null( sa.samp ) || ! is.finite( sa.samp ) || round( sa.samp ) < 1 )
stop( "Invalid number of samples (sa.samp)" )
sa.samp <- round( sa.samp )
if( is.null( model ) ) {
out <- data.sensitivity( data )
} else {
if( inherits( model, "kriging.model.lsd" ) ) {
out <- kriging.sensitivity( data, model, krig.sa = krig.sa,
sa.samp = sa.samp )
} else {
if( inherits( model, "polynomial.model.lsd" ) )
out <- polynomial.sensitivity( data, model, sa.samp = sa.samp )
else
stop( "Invalid model (not from polynomial or kriging.model.lsd())" )
}
}
return( out )
}
# ==== Perform sensitivity analysis directly over data ====
data.sensitivity <- function( data, tries = 5 ) {
# ---- Sensitivity analysis using a B-spline smoothing interpolation model ----
metamodel <- try( sensitivity::sobolSmthSpl( as.matrix( data$resp[ , 1 ] ), data$doe ),
silent = TRUE )
# try a few times, as it usually succeeds...
while( inherits( metamodel, "try-error" ) && tries > 0 ) {
metamodel <- try( sensitivity::sobolSmthSpl( as.matrix( data$resp[ , 1 ] ), data$doe ),
silent = TRUE )
tries <- tries - 1
if( ! inherits( metamodel, "try-error" ) )
break
}
if( inherits( metamodel, "try-error" ) )
return( NULL )
mainEffect <- function( x ) x$S[ , 1 ]
# algorithm provide only the main effects, so distribute the indirect effects evenly (approx.)
totalEffect <- ( 1 - sum( mainEffect( metamodel ) ) )
sa <- cbind( mainEffect( metamodel ),
mainEffect( metamodel ) * totalEffect / sum( mainEffect( metamodel ) ) )
rownames( sa ) <- colnames( data$doe )
colnames( sa ) <- c( "Direct effects", "Interactions" )
sa <- as.data.frame( sa )
sa <- sa[ order( - rowSums( sa, na.rm = TRUE ) ), ]
max.index <- function( x, pos = 1 )
as.integer( sapply( sort( x, index.return = TRUE ), `[`,
length( x ) - pos + 1 )[ 2 ] )
topEffect <- c( max.index( mainEffect( metamodel ), 1 ),
max.index( mainEffect( metamodel ), 2 ),
max.index( mainEffect( metamodel ), 3 ) )
cat( "Top parameters influencing response surface:\n" )
cat( " First:", colnames( data$doe )[ topEffect[ 1 ] ], "\n" )
cat( " Second:", colnames( data$doe )[ topEffect[ 2 ] ], "\n" )
cat( " Third:", colnames( data$doe )[ topEffect[ 3 ] ], "\n\n" )
sa <- list( metamodel = metamodel, sa = sa, topEffect = topEffect )
class( sa ) <- "spline.sensitivity.lsd"
return( sa )
}
|
/R/sobol.R
|
no_license
|
cran/LSDsensitivity
|
R
| false | false | 3,479 |
r
|
#*******************************************************************************
#
# ------------------- LSD tools for sensitivity analysis ---------------------
#
# Written by Marcelo C. Pereira, University of Campinas
#
# Copyright Marcelo C. Pereira
# Distributed under the GNU General Public License
#
#*******************************************************************************
# ==== Do sensitivity analysis of a fitted model ====
sobol.decomposition.lsd <- function( data, model = NULL, krig.sa = FALSE,
sa.samp = 1000 ) {
if( ! inherits( data, "doe.lsd" ) )
stop( "Invalid data (not from read.doe.lsd())" )
if( is.null( krig.sa ) || ! is.logical( krig.sa ) )
stop( "Invalid Kriging algorithm switch (krig.sa)" )
if( is.null( sa.samp ) || ! is.finite( sa.samp ) || round( sa.samp ) < 1 )
stop( "Invalid number of samples (sa.samp)" )
sa.samp <- round( sa.samp )
if( is.null( model ) ) {
out <- data.sensitivity( data )
} else {
if( inherits( model, "kriging.model.lsd" ) ) {
out <- kriging.sensitivity( data, model, krig.sa = krig.sa,
sa.samp = sa.samp )
} else {
if( inherits( model, "polynomial.model.lsd" ) )
out <- polynomial.sensitivity( data, model, sa.samp = sa.samp )
else
stop( "Invalid model (not from polynomial or kriging.model.lsd())" )
}
}
return( out )
}
# ==== Perform sensitivity analysis directly over data ====
data.sensitivity <- function( data, tries = 5 ) {
# ---- Sensitivity analysis using a B-spline smoothing interpolation model ----
metamodel <- try( sensitivity::sobolSmthSpl( as.matrix( data$resp[ , 1 ] ), data$doe ),
silent = TRUE )
# try a few times, as it usually succeeds...
while( inherits( metamodel, "try-error" ) && tries > 0 ) {
metamodel <- try( sensitivity::sobolSmthSpl( as.matrix( data$resp[ , 1 ] ), data$doe ),
silent = TRUE )
tries <- tries - 1
if( ! inherits( metamodel, "try-error" ) )
break
}
if( inherits( metamodel, "try-error" ) )
return( NULL )
mainEffect <- function( x ) x$S[ , 1 ]
# algorithm provide only the main effects, so distribute the indirect effects evenly (approx.)
totalEffect <- ( 1 - sum( mainEffect( metamodel ) ) )
sa <- cbind( mainEffect( metamodel ),
mainEffect( metamodel ) * totalEffect / sum( mainEffect( metamodel ) ) )
rownames( sa ) <- colnames( data$doe )
colnames( sa ) <- c( "Direct effects", "Interactions" )
sa <- as.data.frame( sa )
sa <- sa[ order( - rowSums( sa, na.rm = TRUE ) ), ]
max.index <- function( x, pos = 1 )
as.integer( sapply( sort( x, index.return = TRUE ), `[`,
length( x ) - pos + 1 )[ 2 ] )
topEffect <- c( max.index( mainEffect( metamodel ), 1 ),
max.index( mainEffect( metamodel ), 2 ),
max.index( mainEffect( metamodel ), 3 ) )
cat( "Top parameters influencing response surface:\n" )
cat( " First:", colnames( data$doe )[ topEffect[ 1 ] ], "\n" )
cat( " Second:", colnames( data$doe )[ topEffect[ 2 ] ], "\n" )
cat( " Third:", colnames( data$doe )[ topEffect[ 3 ] ], "\n\n" )
sa <- list( metamodel = metamodel, sa = sa, topEffect = topEffect )
class( sa ) <- "spline.sensitivity.lsd"
return( sa )
}
|
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("PvE Online"),
# Sidebar with a slider input for the number of bins
fluidRow(
column(3,
strong("Step 1: Your Data"),
br(),
checkboxInput("has_header", "My data has a header row", value = FALSE),
HTML('<textarea id="raw_dat" rows="15" cols="30"
placeholder="Paste your data here"></textarea>'),
br(),
helpText("NB: Even if you have a header row, irradiance (E) must be in the first column and photosynthesis (P) must be in the second column.")
),
column(3,
selectInput("model_type", "Step 2: Select a model",
c("Jassby and Platt 1976 (tanh)" = "tanh",
"Linear Model" = "linear")),
uiOutput("model_formula"),
hr(),
strong("Step 3: Estimate Initial Values"),
br(), br(),
uiOutput("coef_guess_ui")
),
column(6,
strong("Step 4: Fit the model"),
br(),
checkboxInput("optimise", "Check here to optimise.", FALSE),
br(), br(),
imageOutput("dat_plot", height = "300px"),
conditionalPanel(
condition = "input.optimise",
helpText(strong("Fitted coefficients")),
helpText(htmlOutput("fit_print"))
))
),
fluidRow(
column(6,
h4("About This Page"),
HTML("This page was created by <a href='http://www.pritchard.co/research'>Daniel Pritchard</a> to support undergraduate teaching. It uses, <a href='http://www.r-project.org'>R</a>, <a href='http://www.rstudio.com'>RStudio</a> and <a href='http://shiny.rstudio.com'>Shiny</a> to present a simplified interface for non-linear curve fitting, with specific applications in algal ecophysiology. It is not intended for use in research applications. For that, please <a href='http://www.pritchard.co/contact'>contact Daniel directly</a>.")
),
column(6,
h4("Useful References"),
HTML("Jassby, A. and Platt, T. 1976. Mathematical formulation of relationship between photosynthesis and light for phytoplankton. <em>Limnology and Oceanography</em>, 21: 540--547.<br>")
)
)
))
|
/ui.R
|
no_license
|
dpritchard/pve_fit_online
|
R
| false | false | 2,452 |
r
|
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("PvE Online"),
# Sidebar with a slider input for the number of bins
fluidRow(
column(3,
strong("Step 1: Your Data"),
br(),
checkboxInput("has_header", "My data has a header row", value = FALSE),
HTML('<textarea id="raw_dat" rows="15" cols="30"
placeholder="Paste your data here"></textarea>'),
br(),
helpText("NB: Even if you have a header row, irradiance (E) must be in the first column and photosynthesis (P) must be in the second column.")
),
column(3,
selectInput("model_type", "Step 2: Select a model",
c("Jassby and Platt 1976 (tanh)" = "tanh",
"Linear Model" = "linear")),
uiOutput("model_formula"),
hr(),
strong("Step 3: Estimate Initial Values"),
br(), br(),
uiOutput("coef_guess_ui")
),
column(6,
strong("Step 4: Fit the model"),
br(),
checkboxInput("optimise", "Check here to optimise.", FALSE),
br(), br(),
imageOutput("dat_plot", height = "300px"),
conditionalPanel(
condition = "input.optimise",
helpText(strong("Fitted coefficients")),
helpText(htmlOutput("fit_print"))
))
),
fluidRow(
column(6,
h4("About This Page"),
HTML("This page was created by <a href='http://www.pritchard.co/research'>Daniel Pritchard</a> to support undergraduate teaching. It uses, <a href='http://www.r-project.org'>R</a>, <a href='http://www.rstudio.com'>RStudio</a> and <a href='http://shiny.rstudio.com'>Shiny</a> to present a simplified interface for non-linear curve fitting, with specific applications in algal ecophysiology. It is not intended for use in research applications. For that, please <a href='http://www.pritchard.co/contact'>contact Daniel directly</a>.")
),
column(6,
h4("Useful References"),
HTML("Jassby, A. and Platt, T. 1976. Mathematical formulation of relationship between photosynthesis and light for phytoplankton. <em>Limnology and Oceanography</em>, 21: 540--547.<br>")
)
)
))
|
#---build timeseries geojson for map
library(data.table)
library(jsonlite)
library(geojsonio)
library(sp)
library(RSQLite)
library(DBI)
library(dbplyr)
library(dplyr)
covid_db <- dbConnect(RSQLite::SQLite(), '/home/ubuntu/cov_api/data/covid_db.sqlite')
dat <- tbl(covid_db, 'counties') %>%
select(countyFIPS, date, case_count, delta, per_delta, r_t, deaths, cases_per_10k, doubling, r_t_three) %>%
distinct() %>%
collect()
dat <- data.table(dat)
dat <- dat[as.Date(dat$date) >= '2020-03-01',]
# dat <- jsonlite::fromJSON(paste0('http://160.1.89.242/alldata?min_date=20200301&max_date=', gsub('-', '', Sys.Date() - 1)))
# dat <- data.table(dat)
dat[, r_t := round(r_t, 2)]
dat[, r_t_three := round(r_t_three, 2)]
dat[, per_delta := round(per_delta* 100, 2) ]
#---make wide timeseries data - every variable/date combo gets a column
u_id <- unique(dat$countyFIPS)
out <- list()
pb <- txtProgressBar(max = length(u_id), style = 3)
for(i in 1:length(u_id)){
sub <- dat[countyFIPS == u_id[i]]
sub <- unique(sub, by=c("countyFIPS", "date"))
out_tmp <- list()
for(j in 1:nrow(sub)){
cols <- paste0(colnames(sub)[3:ncol(sub)],'_', gsub('-', '', sub$date[j]))
tmp <- data.frame(sub[j, 3:ncol(sub)])
colnames(tmp) <- cols
out_tmp[[j]] <- tmp
}
z <- cbind(sub[1, 1], do.call('cbind', out_tmp))
out[[i]] <- z
setTxtProgressBar(pb, i)
}
final <- rbindlist(out)
#merge into county shapes
county_shapes <- readRDS('~/working/cov_api/data/all_counties.RDS')
rn <- row.names(county_shapes@data)
county_shapes$STATE <- as.character(county_shapes$STATE)
county_shapes$COUNTY <- as.character(county_shapes$COUNTY)
county_shapes$FIPS <- paste0(county_shapes$STATE, county_shapes$COUNTY)
county_shapes <- sp::merge(county_shapes, final, by.x = 'FIPS', by.y = 'countyFIPS')
row.names(county_shapes) <- rn
geojsonio::geojson_write(county_shapes, file = "~/working/bigmap/ts.geojson")
#servr::httw('~/working/bigmap/', port = '8000', daemon = FALSE)
setwd('~/working/bigmap')
system('git add --all')
system('git commit -m "update"')
system('git push')
|
/create_geojson_ts.R
|
no_license
|
iankloo/cov_api
|
R
| false | false | 2,085 |
r
|
#---build timeseries geojson for map
library(data.table)
library(jsonlite)
library(geojsonio)
library(sp)
library(RSQLite)
library(DBI)
library(dbplyr)
library(dplyr)
covid_db <- dbConnect(RSQLite::SQLite(), '/home/ubuntu/cov_api/data/covid_db.sqlite')
dat <- tbl(covid_db, 'counties') %>%
select(countyFIPS, date, case_count, delta, per_delta, r_t, deaths, cases_per_10k, doubling, r_t_three) %>%
distinct() %>%
collect()
dat <- data.table(dat)
dat <- dat[as.Date(dat$date) >= '2020-03-01',]
# dat <- jsonlite::fromJSON(paste0('http://160.1.89.242/alldata?min_date=20200301&max_date=', gsub('-', '', Sys.Date() - 1)))
# dat <- data.table(dat)
dat[, r_t := round(r_t, 2)]
dat[, r_t_three := round(r_t_three, 2)]
dat[, per_delta := round(per_delta* 100, 2) ]
#---make wide timeseries data - every variable/date combo gets a column
u_id <- unique(dat$countyFIPS)
out <- list()
pb <- txtProgressBar(max = length(u_id), style = 3)
for(i in 1:length(u_id)){
sub <- dat[countyFIPS == u_id[i]]
sub <- unique(sub, by=c("countyFIPS", "date"))
out_tmp <- list()
for(j in 1:nrow(sub)){
cols <- paste0(colnames(sub)[3:ncol(sub)],'_', gsub('-', '', sub$date[j]))
tmp <- data.frame(sub[j, 3:ncol(sub)])
colnames(tmp) <- cols
out_tmp[[j]] <- tmp
}
z <- cbind(sub[1, 1], do.call('cbind', out_tmp))
out[[i]] <- z
setTxtProgressBar(pb, i)
}
final <- rbindlist(out)
#merge into county shapes
county_shapes <- readRDS('~/working/cov_api/data/all_counties.RDS')
rn <- row.names(county_shapes@data)
county_shapes$STATE <- as.character(county_shapes$STATE)
county_shapes$COUNTY <- as.character(county_shapes$COUNTY)
county_shapes$FIPS <- paste0(county_shapes$STATE, county_shapes$COUNTY)
county_shapes <- sp::merge(county_shapes, final, by.x = 'FIPS', by.y = 'countyFIPS')
row.names(county_shapes) <- rn
geojsonio::geojson_write(county_shapes, file = "~/working/bigmap/ts.geojson")
#servr::httw('~/working/bigmap/', port = '8000', daemon = FALSE)
setwd('~/working/bigmap')
system('git add --all')
system('git commit -m "update"')
system('git push')
|
library(tidyverse)
have <- list.files(path='G://My Drive/DHS New', pattern='.zip|ZIP$') %>%
substr(1, 8) %>%
toupper
avail <- read.csv('C://Users/matt/mortality/scope/available_files_20190522.txt',
col.names='filename') %>%
mutate(file=substr(filename, 75, 82),
type=substr(file, 3, 4),
have=toupper(file) %in% have)
to.download <- avail %>%
filter(!have) %>%
select(filename)
write.csv(to.download, 'C://Users/matt/Desktop/todownload.txt', row.names=F)
|
/scope/Select_to_Download.R
|
no_license
|
mcooper/mortality
|
R
| false | false | 504 |
r
|
library(tidyverse)
have <- list.files(path='G://My Drive/DHS New', pattern='.zip|ZIP$') %>%
substr(1, 8) %>%
toupper
avail <- read.csv('C://Users/matt/mortality/scope/available_files_20190522.txt',
col.names='filename') %>%
mutate(file=substr(filename, 75, 82),
type=substr(file, 3, 4),
have=toupper(file) %in% have)
to.download <- avail %>%
filter(!have) %>%
select(filename)
write.csv(to.download, 'C://Users/matt/Desktop/todownload.txt', row.names=F)
|
##### General R heatmaps for KO counts tables #########
library("ape")
library("RColorBrewer")
library("gplots")
library("ggplot2")
#read the table (INSERT THE TABLE THAT YOU WANT TO READ IN HERE!!!!!)
a <- read.table("<your_counts_table>", sep="\t", header=T, row.names=1)
#remove the last column that contains no data (this is still an error that my current script generates. havent worked out how to fix it yet)
a <- a[,1:(dim(a)[2])-1]
#IF YOU ARE ORDERING BY ROWS (KO#s), PASTE THE RBIND LINE HERE AND UNCOMMENT
#a <- rbind(a['K01424',], a['K00926',], a['K01953',], a['K01744',], a['K01674',], a['K01725',], a['K01673',], a['K00285',], a['K01455',], a['K00605',], a['K00262',], a['K01915',], a['K01425',], a['K00264',], a['K00265',], a['K00266',], a['K00284',], a['K00261',], a['K01745',], a['K05601',], a['K01916',], a['K00367',], a['K00370',], a['K00371',], a['K00373',], a['K02585',], a['K02586',], a['K02587',], a['K02588',], a['K02589',], a['K02590',], a['K02591',], a['K02593',], a['K02594',], a['K02595',], a['K02596',], a['K02597',], a['K00366',], a['K00362',], a['K00363',], a['K00372',], a['K01501',], a['K00368',], a['K00459',], a['K04561',], a['K00376',], a['K04015',], a['K14155',], a['K10775',], a['K01668',])
#IF YOU ARE ORDERING BASED ON YOUR PHYLO TREE, PASTE THE CBIND LINE HERE AND UNCOMMENT
#b <- cbind(a$A00000079, a$A00001271, a$A00000604, a$A00000579, a$A00000836, a$A00001041, a$C00000092, a$C00000008, a$A00000108, a$A00000527, a$A00000287, a$A00000286, a$A00000291, a$A00000292, a$A00000593, a$A00000299, a$A00000793, a$A00000679, a$A00000294, a$A00001644, a$A00001049, a$A00000923, a$A00000794, a$A00000648, a$A00000792, a$A00001208, a$A00000665, a$A00000186, a$A00001053, a$A00000731, a$A00001188, a$A00000308, a$A00001377, a$A00000197, a$A00000466, a$A00000620, a$A00000199, a$A00000198, a$A00000468, a$A00000573, a$A00000465, a$A00000464, a$A00000196, a$A00000200, a$A00000467, a$A00000289, a$A00000290, a$A00000293, a$A00000288, a$A00000528)
#read in the tree to grab names for columns
g <- read.tree("<your_newick.tree>")
#add back the row and column names
colnames(b) <- g$tip.label
rownames(b) <- rownames(a)
#generate Col side colors from levels in list
#Currently takes the third column from the table you used to define the order of the columns.
#If you are self ordering, comment out and remove ColSideColors=csc from the heatmap line
col.levels <- read.table("<your_filename_here>", header=F, sep="\t")
col.levels.list <- as.vector(col.levels[,3])
#define palete of colors to be used for the ColSideColors
#the number in this line is the number of categories you have. it needs to be modified manually
z <- brewer.pal(3, "Purples")
csc <- c(z)[col.levels.list+1]
#define the colors that will be used for the RowSideColors
#To add a colour bar for the genome names, you need to come up with a vector based on your tree that determines which group each falls into.
#these lines define what I did for Rochelles project.
#if you dont want pretty colors, remove "RowSideCols=rsc" from the heatmap line
#row.levels <- read.table("<your_list_of_categories_here>", header=F, sep="\t")
#row.levels.list <- as.vector(row.levels[,1])
#rsc <- c(Chloroflexi="#EDF8E9", '4C0d-2'="#C7E9C0", Gloeobacterales="#A1D99B", Synechococcales="#74C476", Chroococcales="#41AB5D", Nostocales="#238B45", Oscillatoriales="#005A32")[row.levels.list]
#change the output to include the gene name instead of the KO number. for this to work, your list of KO numbers must have the gene names in the second column (R runs on a 1 index, just to be confusing)
col.labels <- as.vector(col.levels[,2])
##actually do the grunt work and make you heatmap
##convert the object to a matrix, and transpose it (otherwise your tree will be the wrong way around)
c <- as.matrix(t(b))
#define the number of breaks and color pallete for the heatmap
#I use rcolorbrewer. pretty colours are only available for values up to 9.
#this line tests that, and if we have any values over 9, it log2 converts the table so we can still use pretty colors.
ifelse(max(c) >8,yes=c <- log2(c+1), no=c <- c)
d <- max(c)
#define the breaks for the heatmap. this essentially puts each real number in the middle of a break. Should be modified for log2 converted tables, but I havent done it yet.
e <- seq(-0.5,d+0.5,1)
#Define the colors
y <- brewer.pal(length(e)-1, "Blues")
pdf("<any_name_you_like.pdf")
#PICK THE LINE YOU WANT AND UNCOMMENT IT
#draw the heatmap. self ordered KO numbers (always my first choice when I have no idea what it will look like)
#x <- heatmap.2(c, Rowv=FALSE, Colv=TRUE, dendrogram='column', scale='none', density='none', breaks=c(e), col=y, trace='none', cexRow=0.6, RowSideColors=rsc, ColSideColors=csc, labCol=col.labels)
#draw the heatmap. no dendrograms
#x <- heatmap.2(c, Rowv=FALSE, Colv=FALSE, dendrogram='none', scale='none', density='none', breaks=c(e), col=y, trace='none', cexRow=0.6, RowSideColors=rsc, ColSideColors=csc, labCol=col.labels)
#draw the heatmap. self order on both axis
#x <- heatmap.2(c, Rowv=TRUE, Colv=TRUE, dendrogram='both', scale='none', density='none', breaks=c(e), col=y, trace='none', cexRow=0.6, RowSideColors=rsc, ColSideColors=csc, labCol=col.labels)
dev.off()
#####################
#if you're having trouble following along at home, the following script works beautifully on the files in ./R_test_files
##### General R heatmaps for KO counts tables #########
library("ape")
library("RColorBrewer")
library("gplots")
library("ggplot2")
#in this example, i combined the two photosynthesis categories, and removed the second header line beforehand
a <- read.table("all_photosynthesis_counts.txt", sep="\t", header=T, row.names=1)
a <- a[,1:(dim(a)[2])-1]
a <- rbind(a['K02111',], a['K02108',], a['K02114',], a['K02112',], a['K02110',], a['K02109',], a['K02115',], a['K02113',], a['K02634',], a['K02635',], a['K02636',], a['K02637',], a['K02638',], a['K02639',], a['K02640',], a['K02641',], a['K08906',], a['K02642',], a['K02643',], a['K03689',], a['K02689',], a['K02690',], a['K02691',], a['K02692',], a['K02693',], a['K02694',], a['K02696',], a['K02697',], a['K02698',], a['K02699',], a['K02700',], a['K02702',], a['K08902',], a['K08903',], a['K08904',], a['K02703',], a['K02704',], a['K02705',], a['K02706',], a['K02707',], a['K02708',], a['K02709',], a['K02710',], a['K02711',], a['K02712',], a['K02713',], a['K02714',], a['K02716',], a['K02717',], a['K02718',], a['K02719',], a['K02720',], a['K02722',], a['K02723',], a['K02724',], a['K02092',], a['K02093',], a['K02094',], a['K02095',], a['K02096',], a['K02097',], a['K02284',], a['K02285',], a['K02286',], a['K02287',], a['K02288',], a['K02289',], a['K02290',], a['K05376',], a['K05377',], a['K05378',], a['K05379',], a['K05380',], a['K05381',], a['K05382',], a['K05383',], a['K05384',], a['K05385',], a['K05386',], a['K02628',], a['K02629',], a['K02630',], a['K02631',], a['K02632',])
b <- cbind(a$A00000079, a$A00001271, a$A00000604, a$A00000579, a$A00000836, a$A00001041, a$C00000092, a$C00000008, a$A00000108, a$A00000527, a$A00000287, a$A00000286, a$A00000291, a$A00000292, a$A00000593, a$A00000299, a$A00000793, a$A00000679, a$A00000294, a$A00001644, a$A00001049, a$A00000923, a$A00000794, a$A00000648, a$A00000792, a$A00001208, a$A00000665, a$A00000186, a$A00001053, a$A00000731, a$A00001188, a$A00000308, a$A00001377, a$A00000197, a$A00000466, a$A00000620, a$A00000199, a$A00000198, a$A00000468, a$A00000573, a$A00000465, a$A00000464, a$A00000196, a$A00000200, a$A00000467, a$A00000289, a$A00000290, a$A00000293, a$A00000288, a$A00000528)
g <- read.tree("cyanowoYS2phylo.tree")
colnames(b) <- g$tip.label
rownames(b) <- rownames(a)
col.levels <- read.table("Photo_KO_gene_category.txt", header=F, sep="\t")
col.levels.list <- as.vector(col.levels[,3])
z <- brewer.pal(3, "Purples")
csc <- c(z)[col.levels.list+1]
row.levels <- read.table("cyano_tree_categories.txt", header=F, sep="\t")
row.levels.list <- as.vector(row.levels[,1])
rsc <- c(Chloroflexi="#EDF8E9", '4C0d-2'="#C7E9C0", Gloeobacterales="#A1D99B", Synechococcales="#74C476", Chroococcales="#41AB5D", Nostocales="#238B45", Oscillatoriales="#005A32")[row.levels.list]
col.labels <- as.vector(col.levels[,2])
##actually do the grunt work and make you heatmap
##convert the object to a matrix, and transpose it (otherwise your tree will be the wrong way around)
c <- as.matrix(t(b))
#define the number of breaks and color pallete for the heatmap
#I use rcolorbrewer. pretty colours are only available for values up to 9.
#this line tests that, and if we have any values over 9, it log2 converts the table so we can still use pretty colors.
ifelse(max(c) >8,yes=c <- log2(c+1), no=c <- c)
d <- max(c)
#define the breaks for the heatmap. this essentially puts each real number in the middle of a break. Should be modified for log2 converted tables, but I havent done it yet.
e <- seq(-0.5,d+0.5,1)
#Define the colors
y <- brewer.pal(length(e)-1, "Blues")
#comment out if you are using RStudio and want to view the graph directly
#pdf("test_photosynthesis.pdf")
#draw the heatmap. self ordered KO numbers (always my first choice when I have no idea what it will look like)
#x <- heatmap.2(c, Rowv=FALSE, Colv=TRUE, dendrogram='column', scale='none', density='none', breaks=c(e), col=y, trace='none', cexRow=0.6, RowSideColors=rsc, ColSideColors=csc, labCol=col.labels)
#draw the heatmap. no dendrograms
x <- heatmap.2(c, Rowv=FALSE, Colv=FALSE, dendrogram='none', scale='none', density='none', breaks=c(e), col=y, trace='none', cexRow=0.6, RowSideColors=rsc, ColSideColors=csc, labCol=col.labels)
#draw the heatmap. no dendrograms
#x <- heatmap.2(c, Rowv=TRUE, Colv=TRUE, dendrogram='both', scale='none', density='none', breaks=c(e), col=y, trace='none', cexRow=0.6, RowSideColors=rsc, ColSideColors=csc, labCol=col.labels)
#comment out if you are using RStudio and you want to view the graph directly
#dev.off()
|
/generic_R_heatmaps_for_KO_tables.R
|
no_license
|
jasteen/KO_counting
|
R
| false | false | 9,934 |
r
|
##### General R heatmaps for KO counts tables #########
library("ape")
library("RColorBrewer")
library("gplots")
library("ggplot2")
#read the table (INSERT THE TABLE THAT YOU WANT TO READ IN HERE!!!!!)
a <- read.table("<your_counts_table>", sep="\t", header=T, row.names=1)
#remove the last column that contains no data (this is still an error that my current script generates. havent worked out how to fix it yet)
a <- a[,1:(dim(a)[2])-1]
#IF YOU ARE ORDERING BY ROWS (KO#s), PASTE THE RBIND LINE HERE AND UNCOMMENT
#a <- rbind(a['K01424',], a['K00926',], a['K01953',], a['K01744',], a['K01674',], a['K01725',], a['K01673',], a['K00285',], a['K01455',], a['K00605',], a['K00262',], a['K01915',], a['K01425',], a['K00264',], a['K00265',], a['K00266',], a['K00284',], a['K00261',], a['K01745',], a['K05601',], a['K01916',], a['K00367',], a['K00370',], a['K00371',], a['K00373',], a['K02585',], a['K02586',], a['K02587',], a['K02588',], a['K02589',], a['K02590',], a['K02591',], a['K02593',], a['K02594',], a['K02595',], a['K02596',], a['K02597',], a['K00366',], a['K00362',], a['K00363',], a['K00372',], a['K01501',], a['K00368',], a['K00459',], a['K04561',], a['K00376',], a['K04015',], a['K14155',], a['K10775',], a['K01668',])
#IF YOU ARE ORDERING BASED ON YOUR PHYLO TREE, PASTE THE CBIND LINE HERE AND UNCOMMENT
#b <- cbind(a$A00000079, a$A00001271, a$A00000604, a$A00000579, a$A00000836, a$A00001041, a$C00000092, a$C00000008, a$A00000108, a$A00000527, a$A00000287, a$A00000286, a$A00000291, a$A00000292, a$A00000593, a$A00000299, a$A00000793, a$A00000679, a$A00000294, a$A00001644, a$A00001049, a$A00000923, a$A00000794, a$A00000648, a$A00000792, a$A00001208, a$A00000665, a$A00000186, a$A00001053, a$A00000731, a$A00001188, a$A00000308, a$A00001377, a$A00000197, a$A00000466, a$A00000620, a$A00000199, a$A00000198, a$A00000468, a$A00000573, a$A00000465, a$A00000464, a$A00000196, a$A00000200, a$A00000467, a$A00000289, a$A00000290, a$A00000293, a$A00000288, a$A00000528)
#read in the tree to grab names for columns
g <- read.tree("<your_newick.tree>")
#add back the row and column names
colnames(b) <- g$tip.label
rownames(b) <- rownames(a)
#generate Col side colors from levels in list
#Currently takes the third column from the table you used to define the order of the columns.
#If you are self ordering, comment out and remove ColSideColors=csc from the heatmap line
col.levels <- read.table("<your_filename_here>", header=F, sep="\t")
col.levels.list <- as.vector(col.levels[,3])
#define palete of colors to be used for the ColSideColors
#the number in this line is the number of categories you have. it needs to be modified manually
z <- brewer.pal(3, "Purples")
csc <- c(z)[col.levels.list+1]
#define the colors that will be used for the RowSideColors
#To add a colour bar for the genome names, you need to come up with a vector based on your tree that determines which group each falls into.
#these lines define what I did for Rochelles project.
#if you dont want pretty colors, remove "RowSideCols=rsc" from the heatmap line
#row.levels <- read.table("<your_list_of_categories_here>", header=F, sep="\t")
#row.levels.list <- as.vector(row.levels[,1])
#rsc <- c(Chloroflexi="#EDF8E9", '4C0d-2'="#C7E9C0", Gloeobacterales="#A1D99B", Synechococcales="#74C476", Chroococcales="#41AB5D", Nostocales="#238B45", Oscillatoriales="#005A32")[row.levels.list]
#change the output to include the gene name instead of the KO number. for this to work, your list of KO numbers must have the gene names in the second column (R runs on a 1 index, just to be confusing)
col.labels <- as.vector(col.levels[,2])
##actually do the grunt work and make you heatmap
##convert the object to a matrix, and transpose it (otherwise your tree will be the wrong way around)
c <- as.matrix(t(b))
#define the number of breaks and color pallete for the heatmap
#I use rcolorbrewer. pretty colours are only available for values up to 9.
#this line tests that, and if we have any values over 9, it log2 converts the table so we can still use pretty colors.
ifelse(max(c) >8,yes=c <- log2(c+1), no=c <- c)
d <- max(c)
#define the breaks for the heatmap. this essentially puts each real number in the middle of a break. Should be modified for log2 converted tables, but I havent done it yet.
e <- seq(-0.5,d+0.5,1)
#Define the colors
y <- brewer.pal(length(e)-1, "Blues")
pdf("<any_name_you_like.pdf")
#PICK THE LINE YOU WANT AND UNCOMMENT IT
#draw the heatmap. self ordered KO numbers (always my first choice when I have no idea what it will look like)
#x <- heatmap.2(c, Rowv=FALSE, Colv=TRUE, dendrogram='column', scale='none', density='none', breaks=c(e), col=y, trace='none', cexRow=0.6, RowSideColors=rsc, ColSideColors=csc, labCol=col.labels)
#draw the heatmap. no dendrograms
#x <- heatmap.2(c, Rowv=FALSE, Colv=FALSE, dendrogram='none', scale='none', density='none', breaks=c(e), col=y, trace='none', cexRow=0.6, RowSideColors=rsc, ColSideColors=csc, labCol=col.labels)
#draw the heatmap. self order on both axis
#x <- heatmap.2(c, Rowv=TRUE, Colv=TRUE, dendrogram='both', scale='none', density='none', breaks=c(e), col=y, trace='none', cexRow=0.6, RowSideColors=rsc, ColSideColors=csc, labCol=col.labels)
dev.off()
#####################
#if you're having trouble following along at home, the following script works beautifully on the files in ./R_test_files
##### General R heatmaps for KO counts tables #########
library("ape")
library("RColorBrewer")
library("gplots")
library("ggplot2")
#in this example, i combined the two photosynthesis categories, and removed the second header line beforehand
a <- read.table("all_photosynthesis_counts.txt", sep="\t", header=T, row.names=1)
a <- a[,1:(dim(a)[2])-1]
a <- rbind(a['K02111',], a['K02108',], a['K02114',], a['K02112',], a['K02110',], a['K02109',], a['K02115',], a['K02113',], a['K02634',], a['K02635',], a['K02636',], a['K02637',], a['K02638',], a['K02639',], a['K02640',], a['K02641',], a['K08906',], a['K02642',], a['K02643',], a['K03689',], a['K02689',], a['K02690',], a['K02691',], a['K02692',], a['K02693',], a['K02694',], a['K02696',], a['K02697',], a['K02698',], a['K02699',], a['K02700',], a['K02702',], a['K08902',], a['K08903',], a['K08904',], a['K02703',], a['K02704',], a['K02705',], a['K02706',], a['K02707',], a['K02708',], a['K02709',], a['K02710',], a['K02711',], a['K02712',], a['K02713',], a['K02714',], a['K02716',], a['K02717',], a['K02718',], a['K02719',], a['K02720',], a['K02722',], a['K02723',], a['K02724',], a['K02092',], a['K02093',], a['K02094',], a['K02095',], a['K02096',], a['K02097',], a['K02284',], a['K02285',], a['K02286',], a['K02287',], a['K02288',], a['K02289',], a['K02290',], a['K05376',], a['K05377',], a['K05378',], a['K05379',], a['K05380',], a['K05381',], a['K05382',], a['K05383',], a['K05384',], a['K05385',], a['K05386',], a['K02628',], a['K02629',], a['K02630',], a['K02631',], a['K02632',])
b <- cbind(a$A00000079, a$A00001271, a$A00000604, a$A00000579, a$A00000836, a$A00001041, a$C00000092, a$C00000008, a$A00000108, a$A00000527, a$A00000287, a$A00000286, a$A00000291, a$A00000292, a$A00000593, a$A00000299, a$A00000793, a$A00000679, a$A00000294, a$A00001644, a$A00001049, a$A00000923, a$A00000794, a$A00000648, a$A00000792, a$A00001208, a$A00000665, a$A00000186, a$A00001053, a$A00000731, a$A00001188, a$A00000308, a$A00001377, a$A00000197, a$A00000466, a$A00000620, a$A00000199, a$A00000198, a$A00000468, a$A00000573, a$A00000465, a$A00000464, a$A00000196, a$A00000200, a$A00000467, a$A00000289, a$A00000290, a$A00000293, a$A00000288, a$A00000528)
g <- read.tree("cyanowoYS2phylo.tree")
colnames(b) <- g$tip.label
rownames(b) <- rownames(a)
col.levels <- read.table("Photo_KO_gene_category.txt", header=F, sep="\t")
col.levels.list <- as.vector(col.levels[,3])
z <- brewer.pal(3, "Purples")
csc <- c(z)[col.levels.list+1]
row.levels <- read.table("cyano_tree_categories.txt", header=F, sep="\t")
row.levels.list <- as.vector(row.levels[,1])
rsc <- c(Chloroflexi="#EDF8E9", '4C0d-2'="#C7E9C0", Gloeobacterales="#A1D99B", Synechococcales="#74C476", Chroococcales="#41AB5D", Nostocales="#238B45", Oscillatoriales="#005A32")[row.levels.list]
col.labels <- as.vector(col.levels[,2])
##actually do the grunt work and make you heatmap
##convert the object to a matrix, and transpose it (otherwise your tree will be the wrong way around)
c <- as.matrix(t(b))
#define the number of breaks and color pallete for the heatmap
#I use rcolorbrewer. pretty colours are only available for values up to 9.
#this line tests that, and if we have any values over 9, it log2 converts the table so we can still use pretty colors.
ifelse(max(c) >8,yes=c <- log2(c+1), no=c <- c)
d <- max(c)
#define the breaks for the heatmap. this essentially puts each real number in the middle of a break. Should be modified for log2 converted tables, but I havent done it yet.
e <- seq(-0.5,d+0.5,1)
#Define the colors
y <- brewer.pal(length(e)-1, "Blues")
#comment out if you are using RStudio and want to view the graph directly
#pdf("test_photosynthesis.pdf")
#draw the heatmap. self ordered KO numbers (always my first choice when I have no idea what it will look like)
#x <- heatmap.2(c, Rowv=FALSE, Colv=TRUE, dendrogram='column', scale='none', density='none', breaks=c(e), col=y, trace='none', cexRow=0.6, RowSideColors=rsc, ColSideColors=csc, labCol=col.labels)
#draw the heatmap. no dendrograms
x <- heatmap.2(c, Rowv=FALSE, Colv=FALSE, dendrogram='none', scale='none', density='none', breaks=c(e), col=y, trace='none', cexRow=0.6, RowSideColors=rsc, ColSideColors=csc, labCol=col.labels)
#draw the heatmap. no dendrograms
#x <- heatmap.2(c, Rowv=TRUE, Colv=TRUE, dendrogram='both', scale='none', density='none', breaks=c(e), col=y, trace='none', cexRow=0.6, RowSideColors=rsc, ColSideColors=csc, labCol=col.labels)
#comment out if you are using RStudio and you want to view the graph directly
#dev.off()
|
library(shiny)
library(shinyjs)
shinyServer(function(input, output, session) {
# add or remove circles when clicking the radio button
observeEvent(input$modelingtype, {
if (input$modelingtype == "bestop") {
print("1")
} else if (input$modelingtype == "customop") {
print("2")
}
}, ignoreInit = FALSE)
})
# shinyServer(function(input, output,session) {
# observeEvent(input$modelingtype, {
# if (input$modelingtype == "bestop") {
# print("1")
# } else if (input$modelingtype == "customop") {
# print("2")
# }
# }, ignoreInit = TRUE)
#
# })
|
/dump/server.R
|
no_license
|
samsark1996/DevDataProd3
|
R
| false | false | 688 |
r
|
library(shiny)
library(shinyjs)
shinyServer(function(input, output, session) {
# add or remove circles when clicking the radio button
observeEvent(input$modelingtype, {
if (input$modelingtype == "bestop") {
print("1")
} else if (input$modelingtype == "customop") {
print("2")
}
}, ignoreInit = FALSE)
})
# shinyServer(function(input, output,session) {
# observeEvent(input$modelingtype, {
# if (input$modelingtype == "bestop") {
# print("1")
# } else if (input$modelingtype == "customop") {
# print("2")
# }
# }, ignoreInit = TRUE)
#
# })
|
class_SymbolHistorical <- function(
ticker,
start.date,
end.date,
envir = .GlobalEnv,
...
){
############################################################################
# PUBLIC CLASS
############################################################################
# PublicClass enables historical functionality for a symbol
if(!is.environment(envir))
if(is.character(envir))
envir <- eval(parse(text = envir))
else
stop("Please specify an available environment")
# Defines an instrument to be traded
self <- new.env() # 'instance' of the object
self$.data <- self$.instrument <- NULL # attributes stored here
self$methods <- self$help <- function(){
# returns all available methods for the symbol
return(
cat("
query_csrp(...):
queries CRSP database from wrds
get_daily_crsp_data(wrds.db, start.date, end.date):
pulls daily bars from CRSP
make_as_stock(ticker, permno=NULL, NAICS=NULL):
assigns attributes, defines as FinancialInstrument
align_symbol_to_days(align.to.dates):
aligns all evaluated symbols across time
set_quantstrat_pos_limit(portfolio, timestamp, min.pos, max.pos):
set's position limits for quantstrat
store_data_on_hard_disk(): // NOT YET SUPPORTED
removes data from RAM and sets into hard disk
filter_by_market_cap(filter, timestamp):
returns logic if symbol meets market cap filter")
)
}
############################################################################
### PRIVATE METHODS ####
############################################################################
self$make_as_stock <- function(
ticker,
permno = NULL,
NAICS = NULL,
...
)
{
# Method defines self with Financial Instrument package, and assigns
# it definitions and attributes
name <- if(!is.null(permno))
paste0(ticker, ":", permno)
else
ticker
FinancialInstrument::stock(
primary_id = name, # not useful because ':' is replaced with '.'
identifiers = list(
ticker = ticker,
permno = permno,
NAICS = NAICS
),
currency = "USD",
multiplier = 1L,
assign_i = TRUE
)
self$.contract <<- IBrokers::twsSTK(ticker)
self$.instrument <<- FinancialInstrument::getInstrument(name)
}
self$query_csrp <- function(
wrds.db,
start.date,
end.date)
{
if(is.null(self$.instrument))
stop(paste0("You must specify '", self$.instrument$identifiers$ticker, "' as a FinancialInstrument first."))
if(is.null(self$.instrument$identifiers$permno))
stop(paste0("You must specify a PERMNO code for '", self$.instrument$identifiers$ticker, "' first."))
start.date <- to.POSIXct(start.date)
end.date <- to.POSIXct(end.date)
# Method returns SQL query for daily bars
DT <- setDT(RJDBC::dbGetQuery(wrds.db,
strwrap(paste0("
SELECT
DATE, HCOMNAM, HTICK, HTSYMBOL,
ABS(PRC) as CLOSE,
ABS(OPENPRC) as OPEN,
BID, ASK, ASKHI, BIDLO, VOL, SHROUT, HNAICS,
CFACPR, HPRIMEXC
FROM
CRSPQ.DSF
JOIN
CRSPQ.DSFHDR
ON
CRSPQ.DSF.PERMNO = CRSPQ.DSFHDR.PERMNO
WHERE
CRSPQ.DSFHDR.PERMNO=", self$.instrument$identifiers$permno),
width = 1000L, simplify = TRUE)
))
colnames(DT) <- c("DATE", "HCOMNAM", "HTICK", "HTSYMBOL",
"CLOSE", "OPEN", "BID", "ASK", "ASKHI",
"BIDLO", "VOL", "SHROUT", "HNAICS",
"CFACPR", "HPRIMEXC")
DT <- DT[DATE %between% c(start.date, end.date)]
YEAR.MKTCAP = DT[ ,mean(SHROUT) * mean(CLOSE), by = zoo::as.yearmon(DATE)]
setnames(YEAR.MKTCAP, c("Year", "Market.Cap"))
self$.instrument$exchange <<- convert_crsp_exchange_code(last(DT[["HPRIMEXC"]]))
self$.instrument$Market.Cap <<- YEAR.MKTCAP # assign attributes
self$.instrument$identifiers$NAICS <<- unique(DT[["HNAICS"]])
return(DT)
}
self$get_daily_crsp_data <- function(...)
{
# Method returns daily bars as XTS
DT = self$query_csrp(...) # quick name reference
if(nrow(DT) == 0){
print(paste0(self$.instrument$identifiers$ticker, " did not trade during specified range; skipping symbol"))
return()
}
stopifnot(!any(duplicated(DT[["DATE"]])))
impute_cols <-
c("CLOSE", "OPEN", "ASKHI", "BIDLO", "VOL", "SHROUT") # "BID", "ASK",
for(j in impute_cols)
set(DT, j = j, value =
if(!all(is.na(DT[[j]]))) # only those that are not all NA
imputeTS::na.locf(
x = DT[[j]],
option = 'locf',
na.remaining = 'keep')
else DT[[j]] # else return the column
)
if(all(unlist(DT[ ,lapply(.SD, # check if all columns are NA
function(x) length(which(is.na(x))) == .N)
,.SDcols = impute_cols]))){
print(paste0(self$.instrument$identifiers$ticker, " table contains all NAs; skipping symbol"))
return()
}
# Remaining NAs will be at the beginning // drop these obs
DT <- DT[complete.cases(DT[ ,c(impute_cols), with = FALSE])]
if(any(is.na(DT[ ,impute_cols, with = FALSE])) || nrow(DT) == 0){
print(paste0("Missing data detected in ", self$.instrument$identifiers$raw_id , " table; skipping symbol"))
return()
}
XTS <- as.xts.data.table(
DT[ ,Date := to.POSIXct(DATE)
][ ,.(Date,
OPEN / CFACPR,
ASKHI / CFACPR,
BIDLO / CFACPR,
CLOSE / CFACPR,
VOL * CFACPR,
SHROUT * CLOSE
)])
colnames(XTS) <- paste0(self$.instrument$identifiers$raw_id ,
c(".Open",".High", ".Low",".Close", ".Volume", ".Market.Cap"))
self$.data <<- XTS
}
self$align_symbol_to_days <- function(align.to.dates)
{
# aligns symbol to specified trading.dates
dates.NA <- align.to.dates[!align.to.dates %in% index(self$.data)]
if(length(dates.NA)){
XTS <- rbind.xts(
cbind(Trading = TRUE, self$.data), # old data
cbind(FALSE, as.xts( # first a row of NAs, the all 0s
do.call(rbind,
replicate(
n = length(dates.NA),
expr = t(rep(as.numeric(NA), 6L)),
simplify = FALSE)),
order.by = dates.NA
))
)
last.obs <- last(index(XTS[ ,1] == 1))
# temporarily fill the gaps with obs, last obs will be
# carried forward, but will be cutoff by 'last.obs'
# *NOTE* no actual imputation is done here!
XTS <- as.xts(apply(XTS, 2, function(j)
imputeTS::na.locf(
x = j,
option = 'locf',
na.remaining = 'keep'
)), order.by = index(XTS))
block.NA <- which(XTS[ ,'Trading'] %in% 0 &
XTS[ ,ncol(XTS)] %in% NA)
XTS <- rbind.xts(
XTS[block.NA, ], # combine periods of no trading
XTS[XTS[ ,"Trading"] == 1, ] # gaps removed
)[paste0("::", last.obs)]
} else
XTS <- cbind(TRUE, self$.data)
colnames(XTS)[1] <- paste(
gsub(".Close","", colnames(Cl(XTS))), "Trading", sep = ".")
stopifnot(ncol(XTS) == 7L) # saftey check
self$.data <<- XTS # place new data into symbols env
}
self$set_quantstrat_pos_limit <- function(
portfolio,
timestamp,
max.pos,
min.pos)
{
quantstrat::addPosLimit(
portfolio = portfolio,
symbol = self$.instrument$identifiers$raw_id ,
timestamp = timestamp,
maxpos = max.pos,
minpos = min.pos
)
}
self$store_data_on_disk <- function()
{
# TO DO, needs to go to one pointer, and draws from indices intead
# take out of memory and store off disk
# pattern <- gsub("[[:punct:]]", "", symbol)
# if(!hasArg(temp.dir))
# temp.dir <- paste0(dir, "temp/", pattern)
# # else use the provided temp.dir
# dir.create(temp.dir, showWarnings = FALSE, recursive = TRUE)
# temp.file <- tempfile(pattern = "", tmpdir = temp.dir)
# assign(
# x = symbol, # get.mmap
# value = as.mmap.xts(get(symbol), file = temp.file),
# envir = envir)
}
self$filter_by_market_cap <- function(filter, timestamp)
{
operation <- stringr::str_extract(filter, ">=|<=|>|<|=|!=")
value <- stringr::str_extract(filter, "[[:digit:]]+$")
market.value <- as.numeric( # Market.Cap at first trading observation
Mc(self$.data)[Td(self$.data) == 1][timestamp])
if(!length(market.value) || is.na(market.value))
return(TRUE)
else if(!eval(parse(text = paste(market.value, operation, value))))
return(FALSE) # passes ## 256000 > 5000 # if fail then not pass
else if(eval(parse(text = paste(market.value, operation, value))))
return(TRUE)
else
stop(paste0("Invalid logic; please check your filter"))
}
class(self) <- c("Symbol")
invisible(self)
}
### TO DO, ADD TICK METHOD TO CLASS SYMBOL
|
/R/symbol.R
|
permissive
|
Nourdine2015/quanttools
|
R
| false | false | 10,895 |
r
|
class_SymbolHistorical <- function(
ticker,
start.date,
end.date,
envir = .GlobalEnv,
...
){
############################################################################
# PUBLIC CLASS
############################################################################
# PublicClass enables historical functionality for a symbol
if(!is.environment(envir))
if(is.character(envir))
envir <- eval(parse(text = envir))
else
stop("Please specify an available environment")
# Defines an instrument to be traded
self <- new.env() # 'instance' of the object
self$.data <- self$.instrument <- NULL # attributes stored here
self$methods <- self$help <- function(){
# returns all available methods for the symbol
return(
cat("
query_csrp(...):
queries CRSP database from wrds
get_daily_crsp_data(wrds.db, start.date, end.date):
pulls daily bars from CRSP
make_as_stock(ticker, permno=NULL, NAICS=NULL):
assigns attributes, defines as FinancialInstrument
align_symbol_to_days(align.to.dates):
aligns all evaluated symbols across time
set_quantstrat_pos_limit(portfolio, timestamp, min.pos, max.pos):
set's position limits for quantstrat
store_data_on_hard_disk(): // NOT YET SUPPORTED
removes data from RAM and sets into hard disk
filter_by_market_cap(filter, timestamp):
returns logic if symbol meets market cap filter")
)
}
############################################################################
### PRIVATE METHODS ####
############################################################################
self$make_as_stock <- function(
ticker,
permno = NULL,
NAICS = NULL,
...
)
{
# Method defines self with Financial Instrument package, and assigns
# it definitions and attributes
name <- if(!is.null(permno))
paste0(ticker, ":", permno)
else
ticker
FinancialInstrument::stock(
primary_id = name, # not useful because ':' is replaced with '.'
identifiers = list(
ticker = ticker,
permno = permno,
NAICS = NAICS
),
currency = "USD",
multiplier = 1L,
assign_i = TRUE
)
self$.contract <<- IBrokers::twsSTK(ticker)
self$.instrument <<- FinancialInstrument::getInstrument(name)
}
self$query_csrp <- function(
wrds.db,
start.date,
end.date)
{
if(is.null(self$.instrument))
stop(paste0("You must specify '", self$.instrument$identifiers$ticker, "' as a FinancialInstrument first."))
if(is.null(self$.instrument$identifiers$permno))
stop(paste0("You must specify a PERMNO code for '", self$.instrument$identifiers$ticker, "' first."))
start.date <- to.POSIXct(start.date)
end.date <- to.POSIXct(end.date)
# Method returns SQL query for daily bars
DT <- setDT(RJDBC::dbGetQuery(wrds.db,
strwrap(paste0("
SELECT
DATE, HCOMNAM, HTICK, HTSYMBOL,
ABS(PRC) as CLOSE,
ABS(OPENPRC) as OPEN,
BID, ASK, ASKHI, BIDLO, VOL, SHROUT, HNAICS,
CFACPR, HPRIMEXC
FROM
CRSPQ.DSF
JOIN
CRSPQ.DSFHDR
ON
CRSPQ.DSF.PERMNO = CRSPQ.DSFHDR.PERMNO
WHERE
CRSPQ.DSFHDR.PERMNO=", self$.instrument$identifiers$permno),
width = 1000L, simplify = TRUE)
))
colnames(DT) <- c("DATE", "HCOMNAM", "HTICK", "HTSYMBOL",
"CLOSE", "OPEN", "BID", "ASK", "ASKHI",
"BIDLO", "VOL", "SHROUT", "HNAICS",
"CFACPR", "HPRIMEXC")
DT <- DT[DATE %between% c(start.date, end.date)]
YEAR.MKTCAP = DT[ ,mean(SHROUT) * mean(CLOSE), by = zoo::as.yearmon(DATE)]
setnames(YEAR.MKTCAP, c("Year", "Market.Cap"))
self$.instrument$exchange <<- convert_crsp_exchange_code(last(DT[["HPRIMEXC"]]))
self$.instrument$Market.Cap <<- YEAR.MKTCAP # assign attributes
self$.instrument$identifiers$NAICS <<- unique(DT[["HNAICS"]])
return(DT)
}
self$get_daily_crsp_data <- function(...)
{
# Method returns daily bars as XTS
DT = self$query_csrp(...) # quick name reference
if(nrow(DT) == 0){
print(paste0(self$.instrument$identifiers$ticker, " did not trade during specified range; skipping symbol"))
return()
}
stopifnot(!any(duplicated(DT[["DATE"]])))
impute_cols <-
c("CLOSE", "OPEN", "ASKHI", "BIDLO", "VOL", "SHROUT") # "BID", "ASK",
for(j in impute_cols)
set(DT, j = j, value =
if(!all(is.na(DT[[j]]))) # only those that are not all NA
imputeTS::na.locf(
x = DT[[j]],
option = 'locf',
na.remaining = 'keep')
else DT[[j]] # else return the column
)
if(all(unlist(DT[ ,lapply(.SD, # check if all columns are NA
function(x) length(which(is.na(x))) == .N)
,.SDcols = impute_cols]))){
print(paste0(self$.instrument$identifiers$ticker, " table contains all NAs; skipping symbol"))
return()
}
# Remaining NAs will be at the beginning // drop these obs
DT <- DT[complete.cases(DT[ ,c(impute_cols), with = FALSE])]
if(any(is.na(DT[ ,impute_cols, with = FALSE])) || nrow(DT) == 0){
print(paste0("Missing data detected in ", self$.instrument$identifiers$raw_id , " table; skipping symbol"))
return()
}
XTS <- as.xts.data.table(
DT[ ,Date := to.POSIXct(DATE)
][ ,.(Date,
OPEN / CFACPR,
ASKHI / CFACPR,
BIDLO / CFACPR,
CLOSE / CFACPR,
VOL * CFACPR,
SHROUT * CLOSE
)])
colnames(XTS) <- paste0(self$.instrument$identifiers$raw_id ,
c(".Open",".High", ".Low",".Close", ".Volume", ".Market.Cap"))
self$.data <<- XTS
}
self$align_symbol_to_days <- function(align.to.dates)
{
# aligns symbol to specified trading.dates
dates.NA <- align.to.dates[!align.to.dates %in% index(self$.data)]
if(length(dates.NA)){
XTS <- rbind.xts(
cbind(Trading = TRUE, self$.data), # old data
cbind(FALSE, as.xts( # first a row of NAs, the all 0s
do.call(rbind,
replicate(
n = length(dates.NA),
expr = t(rep(as.numeric(NA), 6L)),
simplify = FALSE)),
order.by = dates.NA
))
)
last.obs <- last(index(XTS[ ,1] == 1))
# temporarily fill the gaps with obs, last obs will be
# carried forward, but will be cutoff by 'last.obs'
# *NOTE* no actual imputation is done here!
XTS <- as.xts(apply(XTS, 2, function(j)
imputeTS::na.locf(
x = j,
option = 'locf',
na.remaining = 'keep'
)), order.by = index(XTS))
block.NA <- which(XTS[ ,'Trading'] %in% 0 &
XTS[ ,ncol(XTS)] %in% NA)
XTS <- rbind.xts(
XTS[block.NA, ], # combine periods of no trading
XTS[XTS[ ,"Trading"] == 1, ] # gaps removed
)[paste0("::", last.obs)]
} else
XTS <- cbind(TRUE, self$.data)
colnames(XTS)[1] <- paste(
gsub(".Close","", colnames(Cl(XTS))), "Trading", sep = ".")
stopifnot(ncol(XTS) == 7L) # saftey check
self$.data <<- XTS # place new data into symbols env
}
self$set_quantstrat_pos_limit <- function(
portfolio,
timestamp,
max.pos,
min.pos)
{
quantstrat::addPosLimit(
portfolio = portfolio,
symbol = self$.instrument$identifiers$raw_id ,
timestamp = timestamp,
maxpos = max.pos,
minpos = min.pos
)
}
self$store_data_on_disk <- function()
{
# TO DO, needs to go to one pointer, and draws from indices intead
# take out of memory and store off disk
# pattern <- gsub("[[:punct:]]", "", symbol)
# if(!hasArg(temp.dir))
# temp.dir <- paste0(dir, "temp/", pattern)
# # else use the provided temp.dir
# dir.create(temp.dir, showWarnings = FALSE, recursive = TRUE)
# temp.file <- tempfile(pattern = "", tmpdir = temp.dir)
# assign(
# x = symbol, # get.mmap
# value = as.mmap.xts(get(symbol), file = temp.file),
# envir = envir)
}
self$filter_by_market_cap <- function(filter, timestamp)
{
operation <- stringr::str_extract(filter, ">=|<=|>|<|=|!=")
value <- stringr::str_extract(filter, "[[:digit:]]+$")
market.value <- as.numeric( # Market.Cap at first trading observation
Mc(self$.data)[Td(self$.data) == 1][timestamp])
if(!length(market.value) || is.na(market.value))
return(TRUE)
else if(!eval(parse(text = paste(market.value, operation, value))))
return(FALSE) # passes ## 256000 > 5000 # if fail then not pass
else if(eval(parse(text = paste(market.value, operation, value))))
return(TRUE)
else
stop(paste0("Invalid logic; please check your filter"))
}
class(self) <- c("Symbol")
invisible(self)
}
### TO DO, ADD TICK METHOD TO CLASS SYMBOL
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 14206
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 14206
c
c Input Parameter (command line, file):
c input filename QBFLIB/Amendola-Ricca-Truszczynski/Model_instances/ci.e#1.a#3.E#40.A#60.c#296.w#4.s#7.asp.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 4835
c no.of clauses 14206
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 14206
c
c QBFLIB/Amendola-Ricca-Truszczynski/Model_instances/ci.e#1.a#3.E#40.A#60.c#296.w#4.s#7.asp.qdimacs 4835 14206 E1 [] 0 40 4795 14206 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Amendola-Ricca-Truszczynski/Model_instances/ci.e#1.a#3.E#40.A#60.c#296.w#4.s#7.asp/ci.e#1.a#3.E#40.A#60.c#296.w#4.s#7.asp.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false | false | 731 |
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 14206
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 14206
c
c Input Parameter (command line, file):
c input filename QBFLIB/Amendola-Ricca-Truszczynski/Model_instances/ci.e#1.a#3.E#40.A#60.c#296.w#4.s#7.asp.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 4835
c no.of clauses 14206
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 14206
c
c QBFLIB/Amendola-Ricca-Truszczynski/Model_instances/ci.e#1.a#3.E#40.A#60.c#296.w#4.s#7.asp.qdimacs 4835 14206 E1 [] 0 40 4795 14206 NONE
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bind.R
\name{vec_bind}
\alias{vec_bind}
\alias{vec_rbind}
\alias{vec_cbind}
\title{Combine multiple data frames into a single data frames}
\usage{
vec_rbind(..., .type = NULL)
vec_cbind(..., .type = NULL, .nrow = NULL)
}
\arguments{
\item{...}{Data frames or vectors.
\code{vec_rbind()} ignores names. \code{vec_cbind()} preserves outer names,
combining with inner names if also present.
\code{NULL} inputs are silently ignored. Empty (e.g. zero row) inputs
will not appear in the output, but will affect the derived \code{.type}.}
\item{.type}{If \code{NULL}, the default, the output type is determined by
computing the common type across all inputs.
Alternatively, you can supply \code{.type} to force the output to have known
type, or to die trying. \code{.type = character()} and \code{.type = list()} will
succeed for all vectors. See \code{\link[=vec_cast]{vec_cast()}} for more details.}
\item{.nrow}{If, \code{NULL}, the default, will determing the number of
rows in \code{vec_cbind()} output by using the standard recycling rules.
Alternatively, specify the desired number of rows, and any inputs
of length 1 will be recycled appropriately.}
}
\value{
A data frame, or subclass of data frame.
If \code{...} is a mix of different data frame subclases, \code{vec_type2()}
will be used to determine the output type. For \code{vec_rbind()}, this
will determine the type of the container and the type of each column;
for \code{vec_cbind()} it only determines the type of the output container.
If there are no non-\code{NULL} inputs, the result will be \code{data.frame()}.
}
\description{
This pair of functions binds together data frames (and vectors), either
row-wise or column-wise. Row-binding creates a data frame with common type
across all arguments. Column-binding creates a data frame with common length
across all arguments.
}
\examples{
# row binding -----------------------------------------
# common columns are coerced to common type
vec_rbind(
data.frame(x = 1),
data.frame(x = FALSE)
)
# unique columns are filled with NAs
vec_rbind(
data.frame(x = 1),
data.frame(y = "x")
)
# null inputs are ignored
vec_rbind(
data.frame(x = 1),
NULL,
data.frame(x = 2)
)
# bare vectors are treated as rows
vec_rbind(
c(x = 1, y = 2),
c(x = 3)
)
# default names will be supplied if arguments are not named
vec_rbind(
1:2,
1:3,
1:4
)
# column binding --------------------------------------
# each input is recycled to have common length
vec_cbind(
data.frame(x = 1),
data.frame(y = 1:3)
)
# bare vectors are treated as columns
vec_cbind(
data.frame(x = 1),
y = letters[1:3]
)
# outer names are combined with inner names
vec_cbind(
x = data.frame(a = 1, b = 2),
y = 1
)
# duplicate names are flagged
vec_cbind(x = 1, x = 2)
}
\seealso{
\code{\link[=vec_c]{vec_c()}} for combining 1d vectors.
}
|
/man/vec_bind.Rd
|
no_license
|
njtierney/vctrs
|
R
| false | true | 2,930 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bind.R
\name{vec_bind}
\alias{vec_bind}
\alias{vec_rbind}
\alias{vec_cbind}
\title{Combine multiple data frames into a single data frames}
\usage{
vec_rbind(..., .type = NULL)
vec_cbind(..., .type = NULL, .nrow = NULL)
}
\arguments{
\item{...}{Data frames or vectors.
\code{vec_rbind()} ignores names. \code{vec_cbind()} preserves outer names,
combining with inner names if also present.
\code{NULL} inputs are silently ignored. Empty (e.g. zero row) inputs
will not appear in the output, but will affect the derived \code{.type}.}
\item{.type}{If \code{NULL}, the default, the output type is determined by
computing the common type across all inputs.
Alternatively, you can supply \code{.type} to force the output to have known
type, or to die trying. \code{.type = character()} and \code{.type = list()} will
succeed for all vectors. See \code{\link[=vec_cast]{vec_cast()}} for more details.}
\item{.nrow}{If, \code{NULL}, the default, will determing the number of
rows in \code{vec_cbind()} output by using the standard recycling rules.
Alternatively, specify the desired number of rows, and any inputs
of length 1 will be recycled appropriately.}
}
\value{
A data frame, or subclass of data frame.
If \code{...} is a mix of different data frame subclases, \code{vec_type2()}
will be used to determine the output type. For \code{vec_rbind()}, this
will determine the type of the container and the type of each column;
for \code{vec_cbind()} it only determines the type of the output container.
If there are no non-\code{NULL} inputs, the result will be \code{data.frame()}.
}
\description{
This pair of functions binds together data frames (and vectors), either
row-wise or column-wise. Row-binding creates a data frame with common type
across all arguments. Column-binding creates a data frame with common length
across all arguments.
}
\examples{
# row binding -----------------------------------------
# common columns are coerced to common type
vec_rbind(
data.frame(x = 1),
data.frame(x = FALSE)
)
# unique columns are filled with NAs
vec_rbind(
data.frame(x = 1),
data.frame(y = "x")
)
# null inputs are ignored
vec_rbind(
data.frame(x = 1),
NULL,
data.frame(x = 2)
)
# bare vectors are treated as rows
vec_rbind(
c(x = 1, y = 2),
c(x = 3)
)
# default names will be supplied if arguments are not named
vec_rbind(
1:2,
1:3,
1:4
)
# column binding --------------------------------------
# each input is recycled to have common length
vec_cbind(
data.frame(x = 1),
data.frame(y = 1:3)
)
# bare vectors are treated as columns
vec_cbind(
data.frame(x = 1),
y = letters[1:3]
)
# outer names are combined with inner names
vec_cbind(
x = data.frame(a = 1, b = 2),
y = 1
)
# duplicate names are flagged
vec_cbind(x = 1, x = 2)
}
\seealso{
\code{\link[=vec_c]{vec_c()}} for combining 1d vectors.
}
|
#' Download GFDL CMIP5 outputs for a single grid point using OPeNDAP and convert to CF
#'
#' @export
#' @param outfolder Directory for storing output
#' @param start_date Start date for met (will be converted via [base::as.POSIXlt])
#' @param end_date End date for met (will be converted via [base::as.POSIXlt])
#' @param lat.in Latitude coordinate for met
#' @param lon.in Longitude coordinate for met
#' @param overwrite Logical: Download a fresh version even if a local file with
#' the same name already exists?
#' @param verbose Logical, passed on to \code{\link[ncdf4]{ncvar_def}} and
#' \code{\link[ncdf4]{nc_create}} to control printing of debug info
#' @param model Which GFDL model to run (options are CM3, ESM2M, ESM2G)
#' @param scenario Which scenario to run (options are rcp26, rcp45, rcp60, rcp85)
#' @param ensemble_member Which ensemble_member to initialize the run (options are r1i1p1, r3i1p1, r5i1p1)
#' @author James Simkins, Alexey Shiklomanov, Ankur Desai
download.GFDL <- function(outfolder, start_date, end_date, lat.in, lon.in,
overwrite = FALSE, verbose = FALSE,
model = "CM3", scenario = "rcp45", ensemble_member = "r1i1p1", ...) {
if(is.null(model)) model <- "CM3"
if(is.null(scenario)) scenario <- "rcp45"
if(is.null(ensemble_member)) ensemble_member <- "r1i1p1"
start_year <- lubridate::year(start_date)
end_year <- lubridate::year(end_date)
obs_per_year <- 365 * 24 /3 # 3-hr intervals, leap days ignored
#Fix Outfolder to include model and scenario
folder_name <- paste0("GFDL_", model, "_", scenario, "_", ensemble_member)
source_id_foldername <- basename(outfolder)
source_all_foldername <- gsub("GFDL", folder_name, source_id_foldername)
outfolder <- file.path(paste0(outfolder, source_all_foldername))
lat.in <- as.numeric(lat.in)
lat_floor <- floor(lat.in)
lon.in <- as.numeric(lon.in)
lon_floor <- floor(lon.in)
if (lon_floor < 0) {
lon_floor <- 360 + lon_floor
}
lat_GFDL <- lat_floor * (0.5) + 45
lat_GFDL <- floor(lat_GFDL) + 1
lon_GFDL <- lon_floor / 2.5
lon_GFDL <- floor(lon_GFDL) + 1
dap_base <- "http://nomads.gfdl.noaa.gov:9192/opendap/CMIP5/output1/NOAA-GFDL/GFDL"
dir.create(outfolder, showWarnings = FALSE, recursive = TRUE)
ylist <- seq(start_year, end_year, by = 1)
rows <- length(ylist)
results <- data.frame(
file = character(rows),
host = character(rows),
mimetype = character(rows),
formatname = character(rows),
startdate = character(rows),
enddate = character(rows),
dbfile.name = paste("GFDL", model, scenario, ensemble_member, sep = "."), # 'GFDL',
stringsAsFactors = FALSE
)
var <- tibble::tribble(
~DAP.name, ~CF.name, ~units,
"tas", "air_temperature", "Kelvin",
"rlds", "surface_downwelling_longwave_flux_in_air", "W/m2",
"ps", "air_pressure", "Pascal",
"rsds", "surface_downwelling_shortwave_flux_in_air", "W/m2",
"uas", "eastward_wind", "m/s",
"vas", "northward_wind", "m/s",
"huss", "specific_humidity", "g/g",
"pr", "precipitation_flux", "kg/m2/s"
)
for (i in seq_len(rows)) {
year <- ylist[i]
# find start position of currently-wanted year in the 5-year DAP file
time_offset <- 1 + ((year-1) %% 5) * obs_per_year
PEcAn.logger::logger.debug(
sprintf(
"Downloading GFDL year %d (%d of %d)",
year, i, rows
)
)
loc.file <- file.path(
outfolder,
paste("GFDL", model, scenario, ensemble_member, year, "nc", sep = ".")
)
results$file[i] <- loc.file
results$host[i] <- PEcAn.remote::fqdn()
results$startdate[i] <- paste0(year, "-01-01 00:00:00")
results$enddate[i] <- paste0(year, "-12-31 23:59:59")
results$mimetype[i] <- "application/x-netcdf"
results$formatname[i] <- "CF Meteorology"
if (file.exists(loc.file) && !isTRUE(overwrite)) {
PEcAn.logger::logger.error("File already exists. Skipping to next year")
next
}
met_start <- 2006
met_block <- 5
url_year <- met_start + floor((year - met_start) / met_block) * met_block
start_url <- paste0(url_year, "0101")
end_url <- paste0(url_year + met_block - 1, "1231")
## Create dimensions
lat <- ncdf4::ncdim_def(name = "latitude", units = "degree_north", vals = lat.in, create_dimvar = TRUE)
lon <- ncdf4::ncdim_def(name = "longitude", units = "degree_east", vals = lon.in, create_dimvar = TRUE)
time <- ncdf4::ncdim_def(
name = "time",
units = paste("seconds since", results$startdate[i]),
vals = (1:obs_per_year) * 10800, # 3 hr interval * 3600 sec/hr
create_dimvar = TRUE,
unlim = TRUE
)
dim <- list(lat = lat, lon = lon, time = time)
var.list <- list()
dat.list <- list()
## get data off OpenDAP
for (j in seq_len(nrow(var))) {
PEcAn.logger::logger.debug(
sprintf(
"Downloading GFDL var %s (%d of %d)",
var$DAP.name[j], j, nrow(var)
)
)
dap_end <- paste0(
"-", model, "/",
scenario, "/3hr/atmos/3hr/",
ensemble_member, "/v20110601/",
var$DAP.name[j], "/",
var$DAP.name[j], "_3hr_GFDL-",
model, "_",
scenario, "_",
ensemble_member, "_",
start_url, "00-", end_url, "23.nc"
)
dap_file <- paste0(dap_base, dap_end)
dap <- ncdf4::nc_open(dap_file, suppress_dimvals = TRUE)
# Sanity check:
# We're saving the data with timestamps at the end of the interval,
# while GFDL-supplied timestamps vary slightly -- some vars are
# timestamped in middle of interval, others at end.
# But if these disagree by more than 3 hours, we have a problem.
raw_time <- ncdf4::ncvar_get(dap, "time", start = time_offset, count = obs_per_year)
converted_time <- udunits2::ud.convert(raw_time, dap$dim$time$units, dim$time$units)
if(!all(diff(converted_time) == 3 * 60 * 60)){
PEcAn.logger::logger.error(
"Expected timestamps at 3-hour intervals, got",
paste(range(diff(converted_time)), collapse = "-"),
"seconds")
}
if(!all(abs(dim$time$vals - converted_time) < (3 * 60 * 60))){
PEcAn.logger::logger.error(
"Timestamps in GFDL source file differ from expected by more than 3 hours:",
"Expected", paste(range(dim$time$vals), collapse = "-"),
dim$time$units,
", got", paste(range(converted_time), collapse = "-"),
". Greatest difference from expected:",
max(abs(dim$time$vals - converted_time)), "seconds")
}
dat.list[[j]] <- ncdf4::ncvar_get(dap, as.character(var$DAP.name[j]),
start = c(lon_GFDL, lat_GFDL, time_offset),
count = c(1, 1, obs_per_year))
var.list[[j]] <- ncdf4::ncvar_def(name = as.character(var$CF.name[j]),
units = as.character(var$units[j]),
dim = dim,
missval = -999,
verbose = verbose)
ncdf4::nc_close(dap)
}
## put data in new file
loc <- ncdf4::nc_create(filename = loc.file, vars = var.list, verbose = verbose)
for (j in seq_len(nrow(var))) {
ncdf4::ncvar_put(nc = loc, varid = as.character(var$CF.name[j]), vals = dat.list[[j]])
}
ncdf4::nc_close(loc)
}
return(invisible(results))
} # download.GFDL
|
/modules/data.atmosphere/R/download.GFDL.R
|
permissive
|
ashiklom/pecan
|
R
| false | false | 7,555 |
r
|
#' Download GFDL CMIP5 outputs for a single grid point using OPeNDAP and convert to CF
#'
#' @export
#' @param outfolder Directory for storing output
#' @param start_date Start date for met (will be converted via [base::as.POSIXlt])
#' @param end_date End date for met (will be converted via [base::as.POSIXlt])
#' @param lat.in Latitude coordinate for met
#' @param lon.in Longitude coordinate for met
#' @param overwrite Logical: Download a fresh version even if a local file with
#' the same name already exists?
#' @param verbose Logical, passed on to \code{\link[ncdf4]{ncvar_def}} and
#' \code{\link[ncdf4]{nc_create}} to control printing of debug info
#' @param model Which GFDL model to run (options are CM3, ESM2M, ESM2G)
#' @param scenario Which scenario to run (options are rcp26, rcp45, rcp60, rcp85)
#' @param ensemble_member Which ensemble_member to initialize the run (options are r1i1p1, r3i1p1, r5i1p1)
#' @author James Simkins, Alexey Shiklomanov, Ankur Desai
download.GFDL <- function(outfolder, start_date, end_date, lat.in, lon.in,
overwrite = FALSE, verbose = FALSE,
model = "CM3", scenario = "rcp45", ensemble_member = "r1i1p1", ...) {
if(is.null(model)) model <- "CM3"
if(is.null(scenario)) scenario <- "rcp45"
if(is.null(ensemble_member)) ensemble_member <- "r1i1p1"
start_year <- lubridate::year(start_date)
end_year <- lubridate::year(end_date)
obs_per_year <- 365 * 24 /3 # 3-hr intervals, leap days ignored
#Fix Outfolder to include model and scenario
folder_name <- paste0("GFDL_", model, "_", scenario, "_", ensemble_member)
source_id_foldername <- basename(outfolder)
source_all_foldername <- gsub("GFDL", folder_name, source_id_foldername)
outfolder <- file.path(paste0(outfolder, source_all_foldername))
lat.in <- as.numeric(lat.in)
lat_floor <- floor(lat.in)
lon.in <- as.numeric(lon.in)
lon_floor <- floor(lon.in)
if (lon_floor < 0) {
lon_floor <- 360 + lon_floor
}
lat_GFDL <- lat_floor * (0.5) + 45
lat_GFDL <- floor(lat_GFDL) + 1
lon_GFDL <- lon_floor / 2.5
lon_GFDL <- floor(lon_GFDL) + 1
dap_base <- "http://nomads.gfdl.noaa.gov:9192/opendap/CMIP5/output1/NOAA-GFDL/GFDL"
dir.create(outfolder, showWarnings = FALSE, recursive = TRUE)
ylist <- seq(start_year, end_year, by = 1)
rows <- length(ylist)
results <- data.frame(
file = character(rows),
host = character(rows),
mimetype = character(rows),
formatname = character(rows),
startdate = character(rows),
enddate = character(rows),
dbfile.name = paste("GFDL", model, scenario, ensemble_member, sep = "."), # 'GFDL',
stringsAsFactors = FALSE
)
var <- tibble::tribble(
~DAP.name, ~CF.name, ~units,
"tas", "air_temperature", "Kelvin",
"rlds", "surface_downwelling_longwave_flux_in_air", "W/m2",
"ps", "air_pressure", "Pascal",
"rsds", "surface_downwelling_shortwave_flux_in_air", "W/m2",
"uas", "eastward_wind", "m/s",
"vas", "northward_wind", "m/s",
"huss", "specific_humidity", "g/g",
"pr", "precipitation_flux", "kg/m2/s"
)
for (i in seq_len(rows)) {
year <- ylist[i]
# find start position of currently-wanted year in the 5-year DAP file
time_offset <- 1 + ((year-1) %% 5) * obs_per_year
PEcAn.logger::logger.debug(
sprintf(
"Downloading GFDL year %d (%d of %d)",
year, i, rows
)
)
loc.file <- file.path(
outfolder,
paste("GFDL", model, scenario, ensemble_member, year, "nc", sep = ".")
)
results$file[i] <- loc.file
results$host[i] <- PEcAn.remote::fqdn()
results$startdate[i] <- paste0(year, "-01-01 00:00:00")
results$enddate[i] <- paste0(year, "-12-31 23:59:59")
results$mimetype[i] <- "application/x-netcdf"
results$formatname[i] <- "CF Meteorology"
if (file.exists(loc.file) && !isTRUE(overwrite)) {
PEcAn.logger::logger.error("File already exists. Skipping to next year")
next
}
met_start <- 2006
met_block <- 5
url_year <- met_start + floor((year - met_start) / met_block) * met_block
start_url <- paste0(url_year, "0101")
end_url <- paste0(url_year + met_block - 1, "1231")
## Create dimensions
lat <- ncdf4::ncdim_def(name = "latitude", units = "degree_north", vals = lat.in, create_dimvar = TRUE)
lon <- ncdf4::ncdim_def(name = "longitude", units = "degree_east", vals = lon.in, create_dimvar = TRUE)
time <- ncdf4::ncdim_def(
name = "time",
units = paste("seconds since", results$startdate[i]),
vals = (1:obs_per_year) * 10800, # 3 hr interval * 3600 sec/hr
create_dimvar = TRUE,
unlim = TRUE
)
dim <- list(lat = lat, lon = lon, time = time)
var.list <- list()
dat.list <- list()
## get data off OpenDAP
for (j in seq_len(nrow(var))) {
PEcAn.logger::logger.debug(
sprintf(
"Downloading GFDL var %s (%d of %d)",
var$DAP.name[j], j, nrow(var)
)
)
dap_end <- paste0(
"-", model, "/",
scenario, "/3hr/atmos/3hr/",
ensemble_member, "/v20110601/",
var$DAP.name[j], "/",
var$DAP.name[j], "_3hr_GFDL-",
model, "_",
scenario, "_",
ensemble_member, "_",
start_url, "00-", end_url, "23.nc"
)
dap_file <- paste0(dap_base, dap_end)
dap <- ncdf4::nc_open(dap_file, suppress_dimvals = TRUE)
# Sanity check:
# We're saving the data with timestamps at the end of the interval,
# while GFDL-supplied timestamps vary slightly -- some vars are
# timestamped in middle of interval, others at end.
# But if these disagree by more than 3 hours, we have a problem.
raw_time <- ncdf4::ncvar_get(dap, "time", start = time_offset, count = obs_per_year)
converted_time <- udunits2::ud.convert(raw_time, dap$dim$time$units, dim$time$units)
if(!all(diff(converted_time) == 3 * 60 * 60)){
PEcAn.logger::logger.error(
"Expected timestamps at 3-hour intervals, got",
paste(range(diff(converted_time)), collapse = "-"),
"seconds")
}
if(!all(abs(dim$time$vals - converted_time) < (3 * 60 * 60))){
PEcAn.logger::logger.error(
"Timestamps in GFDL source file differ from expected by more than 3 hours:",
"Expected", paste(range(dim$time$vals), collapse = "-"),
dim$time$units,
", got", paste(range(converted_time), collapse = "-"),
". Greatest difference from expected:",
max(abs(dim$time$vals - converted_time)), "seconds")
}
dat.list[[j]] <- ncdf4::ncvar_get(dap, as.character(var$DAP.name[j]),
start = c(lon_GFDL, lat_GFDL, time_offset),
count = c(1, 1, obs_per_year))
var.list[[j]] <- ncdf4::ncvar_def(name = as.character(var$CF.name[j]),
units = as.character(var$units[j]),
dim = dim,
missval = -999,
verbose = verbose)
ncdf4::nc_close(dap)
}
## put data in new file
loc <- ncdf4::nc_create(filename = loc.file, vars = var.list, verbose = verbose)
for (j in seq_len(nrow(var))) {
ncdf4::ncvar_put(nc = loc, varid = as.character(var$CF.name[j]), vals = dat.list[[j]])
}
ncdf4::nc_close(loc)
}
return(invisible(results))
} # download.GFDL
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R, R/gcd.R
\name{gcd}
\alias{gcd}
\alias{scm}
\alias{lcm}
\alias{coprime}
\alias{Rgcd}
\alias{Rscm}
\title{Find the Greatest Common Divisor, Smallest Common Multiple, or Coprimality}
\usage{
gcd(m, n)
scm(m, n)
coprime(m, n)
Rgcd(...)
Rscm(...)
}
\arguments{
\item{m, n, ...}{integer vectors.}
}
\value{
The functions \code{gcd}, \code{scm}, and \code{coprime} return a vector of the
length of longest input vector. If one vector is shorter, it will be
recycled. The \code{gcd} and \code{scm} functions return an integer vector while
\code{coprime} returns a logical vector. The reduction funtions \code{Rgcd} and
\code{Rscm} return a single integer.
}
\description{
These functions provide vectorized computations for the greatest common
divisor (\code{gcd}), smallest common multiple (\code{scm}), and coprimality. Coprime
numbers are also called \emph{mutually prime} or \emph{relatively prime} numbers.
The smallest common multiple is often called the \emph{least common multiple}.
}
\details{
The greatest common divisor uses Euclid's algorithm, a fast and widely
used method. The smallest common multiple and coprimality are computed using
the gcd, where \eqn{scm = \frac{a}{gcd(a, b)} \times b}{scm = a / gcd(a, b) * b}
and two numbers are coprime when \eqn{gcd = 1}.
The \code{gcd}, \code{scm}, and \code{coprime} functions perform element-wise computation.
The \code{Rgcd} and \code{Rscm} functions perform \code{gcd} and \code{scm} over multiple values
using reduction. That is, they compute the greatest common divisor and least
common multiple for an arbitrary number of integers based on the properties
\eqn{gcd(a_1, a_2, ..., a_n) = gcd(gcd(a_1, a_2, ...), a_n)} and
\eqn{scm(a_1, a_2, ..., a_n) = scm(scm(a_1, a_2, ...), a_n)}. The binary
operation is applied to two elements; then the result is used as the first
operand in a call with the next element. This is done iteratively until all
elements are used. It is idiomatically equivalent to \code{Reduce(gcd, x)} or
\code{Reduce(scm, x)}, where \code{x} is a vector of integers, but much faster.
}
\examples{
gcd(c(18, 22, 49, 13), 42)
## [1] 6 2 7 1
Rgcd(18, 24, 36, 12)
## [1] 6
scm(60, 90)
## [1] 180
Rscm(1:10)
## [1] 2520
coprime(60, c(77, 90))
## [1] TRUE FALSE
}
\author{
Paul Egeler, MS
}
|
/man/gcd.Rd
|
permissive
|
cran/primes
|
R
| false | true | 2,364 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R, R/gcd.R
\name{gcd}
\alias{gcd}
\alias{scm}
\alias{lcm}
\alias{coprime}
\alias{Rgcd}
\alias{Rscm}
\title{Find the Greatest Common Divisor, Smallest Common Multiple, or Coprimality}
\usage{
gcd(m, n)
scm(m, n)
coprime(m, n)
Rgcd(...)
Rscm(...)
}
\arguments{
\item{m, n, ...}{integer vectors.}
}
\value{
The functions \code{gcd}, \code{scm}, and \code{coprime} return a vector of the
length of longest input vector. If one vector is shorter, it will be
recycled. The \code{gcd} and \code{scm} functions return an integer vector while
\code{coprime} returns a logical vector. The reduction funtions \code{Rgcd} and
\code{Rscm} return a single integer.
}
\description{
These functions provide vectorized computations for the greatest common
divisor (\code{gcd}), smallest common multiple (\code{scm}), and coprimality. Coprime
numbers are also called \emph{mutually prime} or \emph{relatively prime} numbers.
The smallest common multiple is often called the \emph{least common multiple}.
}
\details{
The greatest common divisor uses Euclid's algorithm, a fast and widely
used method. The smallest common multiple and coprimality are computed using
the gcd, where \eqn{scm = \frac{a}{gcd(a, b)} \times b}{scm = a / gcd(a, b) * b}
and two numbers are coprime when \eqn{gcd = 1}.
The \code{gcd}, \code{scm}, and \code{coprime} functions perform element-wise computation.
The \code{Rgcd} and \code{Rscm} functions perform \code{gcd} and \code{scm} over multiple values
using reduction. That is, they compute the greatest common divisor and least
common multiple for an arbitrary number of integers based on the properties
\eqn{gcd(a_1, a_2, ..., a_n) = gcd(gcd(a_1, a_2, ...), a_n)} and
\eqn{scm(a_1, a_2, ..., a_n) = scm(scm(a_1, a_2, ...), a_n)}. The binary
operation is applied to two elements; then the result is used as the first
operand in a call with the next element. This is done iteratively until all
elements are used. It is idiomatically equivalent to \code{Reduce(gcd, x)} or
\code{Reduce(scm, x)}, where \code{x} is a vector of integers, but much faster.
}
\examples{
gcd(c(18, 22, 49, 13), 42)
## [1] 6 2 7 1
Rgcd(18, 24, 36, 12)
## [1] 6
scm(60, 90)
## [1] 180
Rscm(1:10)
## [1] 2520
coprime(60, c(77, 90))
## [1] TRUE FALSE
}
\author{
Paul Egeler, MS
}
|
## Matrix inversion is usually a costly computation and there may be some
## benefit to caching the inverse of a matrix rather than compute it repeatedly
## Below are a pair of functions that cache the inverse of a matrix.
## This first function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## This second function computes the inverse of the special "matrix" returned
## by makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then the cachesolve should retrieve the
## inverse from the cache.
##
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
return(inv)
}
|
/cachematrix.R
|
no_license
|
roydwilliams/ProgrammingAssignment2
|
R
| false | false | 1,079 |
r
|
## Matrix inversion is usually a costly computation and there may be some
## benefit to caching the inverse of a matrix rather than compute it repeatedly
## Below are a pair of functions that cache the inverse of a matrix.
## This first function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## This second function computes the inverse of the special "matrix" returned
## by makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then the cachesolve should retrieve the
## inverse from the cache.
##
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
return(inv)
}
|
library(gWidgets)
library(ggplot2)
options("guiToolkit"="RGtk2")
win <- gwindow("Density Graphic Module")
big <- ggroup(container=win)
g <- ggroup(horizontal=FALSE, container=big, expand=TRUE)
lbl1 <- glabel("Density Graphic Module", cont = g)
font(lbl1) <- list(weight = "bold", family = "normal", size = 12)
add(g, lbl1, anchor = c(0, 0))
addSpace(g, 20)
lv1 <- glabel("Attribute of Interest:", cont = g)
dplist <- gcombobox(c("gender", "weight"), cont = g)
lv2 <- gcheckbox("Graphic fill", cont = g)
lv3 <- glabel("Transparency:", cont = g)
transp <- gslider(0, 1, by = 0.1, cont = g)
plotData = function(h,...)
{
df <- data.frame(
gender=factor(rep(c("F", "M"), each=200)),
weight=round(c(rnorm(200, mean=55, sd=5),
rnorm(200, mean=65, sd=5)))
)
head(df)
if (svalue(lv2, index = TRUE) == TRUE){
print(ggplot(data = df, aes(x=weight, color=gender)) + ggtitle("Weight by gender") + geom_density(fill = svalue(lv2, index = TRUE), alpha = svalue(transp)))
} else
print(ggplot(data = df, aes(x=weight, color=gender)) + ggtitle("Weight by gender") + geom_density())
}
}
plot_button <- gbutton(
text = "Plot data",
container = g,
handler = plotData
)
parg <- ggroup(horizontal=FALSE, cont=big, label="plot args")
qpg <- ggraphics(container=big, label="plot")
|
/first window.R
|
permissive
|
pabloluna/modulos
|
R
| false | false | 1,352 |
r
|
library(gWidgets)
library(ggplot2)
options("guiToolkit"="RGtk2")
win <- gwindow("Density Graphic Module")
big <- ggroup(container=win)
g <- ggroup(horizontal=FALSE, container=big, expand=TRUE)
lbl1 <- glabel("Density Graphic Module", cont = g)
font(lbl1) <- list(weight = "bold", family = "normal", size = 12)
add(g, lbl1, anchor = c(0, 0))
addSpace(g, 20)
lv1 <- glabel("Attribute of Interest:", cont = g)
dplist <- gcombobox(c("gender", "weight"), cont = g)
lv2 <- gcheckbox("Graphic fill", cont = g)
lv3 <- glabel("Transparency:", cont = g)
transp <- gslider(0, 1, by = 0.1, cont = g)
plotData = function(h,...)
{
df <- data.frame(
gender=factor(rep(c("F", "M"), each=200)),
weight=round(c(rnorm(200, mean=55, sd=5),
rnorm(200, mean=65, sd=5)))
)
head(df)
if (svalue(lv2, index = TRUE) == TRUE){
print(ggplot(data = df, aes(x=weight, color=gender)) + ggtitle("Weight by gender") + geom_density(fill = svalue(lv2, index = TRUE), alpha = svalue(transp)))
} else
print(ggplot(data = df, aes(x=weight, color=gender)) + ggtitle("Weight by gender") + geom_density())
}
}
plot_button <- gbutton(
text = "Plot data",
container = g,
handler = plotData
)
parg <- ggroup(horizontal=FALSE, cont=big, label="plot args")
qpg <- ggraphics(container=big, label="plot")
|
#' Automatic group-wise Anomaly Detection by STL Decomposition
#'
#' `tk_anomaly_diagnostics()` is the preprocessor for `plot_anomaly_diagnostics()`.
#' It performs automatic anomaly detection for one or more time series groups.
#'
#' @param .data A `tibble` or `data.frame` with a time-based column
#' @param .date_var A column containing either date or date-time values
#' @param .value A column containing numeric values
#' @param .frequency Controls the seasonal adjustment (removal of seasonality).
#' Input can be either "auto", a time-based definition (e.g. "2 weeks"),
#' or a numeric number of observations per frequency (e.g. 10).
#' Refer to [tk_get_frequency()].
#' @param .trend Controls the trend component.
#' For STL, trend controls the sensitivity of the LOESS smoother, which is used to remove the remainder.
#' Refer to [tk_get_trend()].
#' @param .alpha Controls the width of the "normal" range. Lower values are more conservative
#' while higher values are less prone to incorrectly classifying "normal" observations.
#' @param .max_anomalies The maximum percent of anomalies permitted to be identified.
#' @param .message A boolean. If `TRUE`, will output information related to automatic frequency
#' and trend selection (if applicable).
#'
#' @return A `tibble` or `data.frame` with STL Decomposition Features
#' (observed, season, trend, remainder, seasadj) and
#' Anomaly Features (remainder_l1, remainder_l2, anomaly, recomposed_l1, and recomposed_l2)
#'
#' @details
#'
#' The `tk_anomaly_diagnostics()` method for anomaly detection that implements a 2-step process to
#' detect outliers in time series.
#'
#' __Step 1: Detrend & Remove Seasonality using STL Decomposition__
#'
#' The decomposition separates the "season" and "trend" components from the "observed" values
#' leaving the "remainder" for anomaly detection.
#'
#' The user can control two parameters: frequency and trend.
#'
#' 1. `.frequency`: Adjusts the "season" component that is removed from the "observed" values.
#' 2. `.trend`: Adjusts the trend window (t.window parameter from [stats::stl()] that is used.
#'
#' The user may supply both `.frequency` and `.trend` as time-based durations (e.g. "6 weeks") or
#' numeric values (e.g. 180) or "auto", which predetermines the frequency and/or trend based on
#' the scale of the time series using the [tk_time_scale_template()].
#'
#' __Step 2: Anomaly Detection__
#'
#' Once "trend" and "season" (seasonality) is removed, anomaly detection is performed on the "remainder".
#' Anomalies are identified, and boundaries (recomposed_l1 and recomposed_l2) are determined.
#'
#' The Anomaly Detection Method uses an inner quartile range (IQR) of +/-25 the median.
#'
#' _IQR Adjustment, alpha parameter_
#'
#' With the default `alpha = 0.05`, the limits are established by expanding
#' the 25/75 baseline by an IQR Factor of 3 (3X).
#' The _IQR Factor = 0.15 / alpha_ (hence 3X with alpha = 0.05):
#'
#' - To increase the IQR Factor controlling the limits, decrease the alpha,
#' which makes it more difficult to be an outlier.
#' - Increase alpha to make it easier to be an outlier.
#'
#'
#' - The IQR outlier detection method is used in `forecast::tsoutliers()`.
#' - A similar outlier detection method is used by Twitter's `AnomalyDetection` package.
#' - Both Twitter and Forecast tsoutliers methods have been implemented in Business Science's `anomalize`
#' package.
#'
#' @seealso
#' - [plot_anomaly_diagnostics()]: Visual anomaly detection
#'
#' @references
#' 1. CLEVELAND, R. B., CLEVELAND, W. S., MCRAE, J. E., AND TERPENNING, I.
#' STL: A Seasonal-Trend Decomposition Procedure Based on Loess.
#' Journal of Official Statistics, Vol. 6, No. 1 (1990), pp. 3-73.
#'
#' 2. Owen S. Vallis, Jordan Hochenbaum and Arun Kejariwal (2014).
#' A Novel Technique for Long-Term Anomaly Detection in the Cloud. Twitter Inc.
#'
#' @examples
#' library(dplyr)
#' library(timetk)
#'
#' walmart_sales_weekly %>%
#' filter(id %in% c("1_1", "1_3")) %>%
#' group_by(id) %>%
#' tk_anomaly_diagnostics(Date, Weekly_Sales)
#'
#' @name tk_anomaly_diagnostics
#' @export
tk_anomaly_diagnostics <- function(.data, .date_var, .value,
.frequency = "auto", .trend = "auto",
.alpha = 0.05, .max_anomalies = 0.2,
.message = TRUE) {
# Tidyeval Setup
date_var_expr <- rlang::enquo(.date_var)
value_expr <- rlang::enquo(.value)
# Checks
if (!is.data.frame(.data)) {
stop(call. = FALSE, "tk_anomaly_diagnostics(.data) is not a data-frame or tibble. Please supply a data.frame or tibble.")
}
if (rlang::quo_is_missing(date_var_expr)) {
stop(call. = FALSE, "tk_anomaly_diagnostics(.date_var) is missing. Please supply a date or date-time column.")
}
if (rlang::quo_is_missing(value_expr)) {
stop(call. = FALSE, "tk_anomaly_diagnostics(.value) is missing. Please supply a numeric column.")
}
UseMethod("tk_anomaly_diagnostics", .data)
}
#' @export
tk_anomaly_diagnostics.data.frame <- function(.data, .date_var, .value,
.frequency = "auto", .trend = "auto",
.alpha = 0.05, .max_anomalies = 0.2,
.message = TRUE) {
# STL Decomposition (observed, season, trend, remainder, seasadj)
ret <- .data %>%
tk_stl_diagnostics(
.date_var = !! rlang::enquo(.date_var),
.value = !! rlang::enquo(.value),
.frequency = .frequency,
.trend = .trend,
.message = .message
)
# Detect Anomalies (remainder_l1, remainder_l2, anomaly)
ret <- ret %>%
mutate_anomalies(
target = remainder,
alpha = .alpha,
max_anoms = .max_anomalies
)
# Recomposition
ret <- ret %>%
dplyr::mutate(
recomposed_l1 = season + trend + remainder_l1,
recomposed_l2 = season + trend + remainder_l2
)
return(ret)
}
#' @export
tk_anomaly_diagnostics.grouped_df <- function(.data, .date_var, .value,
.frequency = "auto", .trend = "auto",
.alpha = 0.05, .max_anomalies = 0.2,
.message = TRUE) {
# Tidy Eval Setup
value_expr <- rlang::enquo(.value)
date_var_expr <- rlang::enquo(.date_var)
group_names <- dplyr::group_vars(.data)
# Process groups individually
.data %>%
tidyr::nest() %>%
dplyr::mutate(nested.col = purrr::map(
.x = data,
.f = function(df) tk_anomaly_diagnostics(
.data = df,
.date_var = !! date_var_expr,
.value = !! value_expr,
.frequency = .frequency,
.trend = .trend,
.alpha = .alpha,
.max_anomalies = .max_anomalies,
.message = .message
)
)) %>%
dplyr::select(-data) %>%
tidyr::unnest(cols = nested.col) %>%
dplyr::group_by_at(.vars = group_names)
}
# UTILS ----
# This is anomalize::anomalize()
mutate_anomalies <- function(data, target, alpha = 0.05, max_anoms = 0.20) {
# Checks
if (missing(target)) stop('Error in anomalize(): argument "target" is missing, with no default', call. = FALSE)
# Setup
target_expr <- rlang::enquo(target)
x <- data %>% dplyr::pull(!! target_expr)
# Explicitly call functions
outlier_list <- iqr_vec(x = x, alpha = alpha, max_anoms = max_anoms, verbose = TRUE)
outlier <- outlier_list$outlier
limit_lower <- outlier_list$critical_limits[[1]]
limit_upper <- outlier_list$critical_limits[[2]]
# Returns
ret <- data %>%
dplyr::mutate(!! paste0(dplyr::quo_name(target_expr), "_l1") := limit_lower,
!! paste0(dplyr::quo_name(target_expr), "_l2") := limit_upper) %>%
tibble::add_column(anomaly = outlier)
return(ret)
}
# This is anomalize::iqr()
iqr_vec <- function(x, alpha = 0.05, max_anoms = 0.2, verbose = FALSE) {
quantile_x <- stats::quantile(x, prob = c(0.25, 0.75), na.rm = TRUE)
iq_range <- quantile_x[[2]] - quantile_x[[1]]
limits <- quantile_x + (0.15 / alpha) * iq_range * c(-1, 1)
outlier_idx <- ((x < limits[1]) | (x > limits[2]))
outlier_vals <- x[outlier_idx]
outlier_response <- ifelse(outlier_idx == TRUE, "Yes", "No")
vals_tbl <- tibble::tibble(value = x) %>%
tibble::rownames_to_column(var = "index") %>%
# Establish limits and assess if outside of limits
dplyr::mutate(
limit_lower = limits[1],
limit_upper = limits[2],
abs_diff_lower = ifelse(value <= limit_lower, abs(value - limit_lower), 0),
abs_diff_upper = ifelse(value >= limit_upper, abs(value - limit_upper), 0),
max_abs_diff = ifelse(abs_diff_lower > abs_diff_upper, abs_diff_lower, abs_diff_upper)
) %>%
dplyr::select(index, dplyr::everything()) %>%
dplyr::select(-c(abs_diff_lower, abs_diff_upper)) %>%
# Sort by absolute distance from centerline of limits
dplyr::mutate(
centerline = (limit_upper + limit_lower) / 2,
sorting = abs(value - centerline)
) %>%
dplyr::arrange(dplyr::desc(sorting)) %>%
dplyr::select(-c(centerline, sorting)) %>%
tibble::rownames_to_column(var = "rank") %>%
dplyr::mutate(
rank = as.numeric(rank),
index = as.numeric(index)
) %>%
# Identify outliers
dplyr::arrange(dplyr::desc(max_abs_diff)) %>%
dplyr::mutate(
outlier = ifelse(max_abs_diff > 0, "Yes", "No"),
below_max_anoms = ifelse(dplyr::row_number() / dplyr::n() > max_anoms,
"No", "Yes"
),
outlier_reported = ifelse(outlier == "Yes" & below_max_anoms == "Yes",
"Yes", "No"
),
direction = dplyr::case_when(
(outlier_reported == "Yes") & (value > limit_upper) ~ "Up",
(outlier_reported == "Yes") & (value < limit_lower) ~ "Down",
TRUE ~ "NA"
),
direction = ifelse(direction == "NA", NA, direction)
)
vals_tbl_filtered <- vals_tbl %>%
dplyr::filter(below_max_anoms == "Yes") %>%
dplyr::select(-c(max_abs_diff:below_max_anoms)) %>%
dplyr::rename(outlier = outlier_reported)
# Critical Limits
if (any(vals_tbl$outlier == "No")) {
# Non outliers identified, pick first limit
limit_tbl <- vals_tbl %>%
dplyr::filter(outlier == "No") %>%
dplyr::slice(1)
limits_vec <- c(
limit_lower = limit_tbl$limit_lower,
limit_upper = limit_tbl$limit_upper
)
} else {
# All outliers, pick last limits
limit_tbl <- vals_tbl %>%
dplyr::slice(n())
limits_vec <- c(
limit_lower = limit_tbl$limit_lower,
limit_upper = limit_tbl$limit_upper
)
}
# Return results
if (verbose) {
outlier_list <- list(
outlier = vals_tbl %>% dplyr::arrange(index) %>% dplyr::pull(outlier_reported),
outlier_idx = vals_tbl %>% dplyr::filter(outlier_reported == "Yes") %>% dplyr::pull(index),
outlier_vals = vals_tbl %>% dplyr::filter(outlier_reported == "Yes") %>% dplyr::pull(value),
outlier_direction = vals_tbl %>% dplyr::filter(outlier_reported == "Yes") %>% dplyr::pull(direction),
critical_limits = limits_vec,
outlier_report = vals_tbl_filtered
)
return(outlier_list)
} else {
return(vals_tbl %>% dplyr::arrange(index) %>% dplyr::pull(outlier_reported))
}
}
|
/R/diagnostics-tk_anomaly_diagnostics.R
|
no_license
|
business-science/timetk
|
R
| false | false | 12,153 |
r
|
#' Automatic group-wise Anomaly Detection by STL Decomposition
#'
#' `tk_anomaly_diagnostics()` is the preprocessor for `plot_anomaly_diagnostics()`.
#' It performs automatic anomaly detection for one or more time series groups.
#'
#' @param .data A `tibble` or `data.frame` with a time-based column
#' @param .date_var A column containing either date or date-time values
#' @param .value A column containing numeric values
#' @param .frequency Controls the seasonal adjustment (removal of seasonality).
#' Input can be either "auto", a time-based definition (e.g. "2 weeks"),
#' or a numeric number of observations per frequency (e.g. 10).
#' Refer to [tk_get_frequency()].
#' @param .trend Controls the trend component.
#' For STL, trend controls the sensitivity of the LOESS smoother, which is used to remove the remainder.
#' Refer to [tk_get_trend()].
#' @param .alpha Controls the width of the "normal" range. Lower values are more conservative
#' while higher values are less prone to incorrectly classifying "normal" observations.
#' @param .max_anomalies The maximum percent of anomalies permitted to be identified.
#' @param .message A boolean. If `TRUE`, will output information related to automatic frequency
#' and trend selection (if applicable).
#'
#' @return A `tibble` or `data.frame` with STL Decomposition Features
#' (observed, season, trend, remainder, seasadj) and
#' Anomaly Features (remainder_l1, remainder_l2, anomaly, recomposed_l1, and recomposed_l2)
#'
#' @details
#'
#' The `tk_anomaly_diagnostics()` method for anomaly detection that implements a 2-step process to
#' detect outliers in time series.
#'
#' __Step 1: Detrend & Remove Seasonality using STL Decomposition__
#'
#' The decomposition separates the "season" and "trend" components from the "observed" values
#' leaving the "remainder" for anomaly detection.
#'
#' The user can control two parameters: frequency and trend.
#'
#' 1. `.frequency`: Adjusts the "season" component that is removed from the "observed" values.
#' 2. `.trend`: Adjusts the trend window (t.window parameter from [stats::stl()] that is used.
#'
#' The user may supply both `.frequency` and `.trend` as time-based durations (e.g. "6 weeks") or
#' numeric values (e.g. 180) or "auto", which predetermines the frequency and/or trend based on
#' the scale of the time series using the [tk_time_scale_template()].
#'
#' __Step 2: Anomaly Detection__
#'
#' Once "trend" and "season" (seasonality) is removed, anomaly detection is performed on the "remainder".
#' Anomalies are identified, and boundaries (recomposed_l1 and recomposed_l2) are determined.
#'
#' The Anomaly Detection Method uses an inner quartile range (IQR) of +/-25 the median.
#'
#' _IQR Adjustment, alpha parameter_
#'
#' With the default `alpha = 0.05`, the limits are established by expanding
#' the 25/75 baseline by an IQR Factor of 3 (3X).
#' The _IQR Factor = 0.15 / alpha_ (hence 3X with alpha = 0.05):
#'
#' - To increase the IQR Factor controlling the limits, decrease the alpha,
#' which makes it more difficult to be an outlier.
#' - Increase alpha to make it easier to be an outlier.
#'
#'
#' - The IQR outlier detection method is used in `forecast::tsoutliers()`.
#' - A similar outlier detection method is used by Twitter's `AnomalyDetection` package.
#' - Both Twitter and Forecast tsoutliers methods have been implemented in Business Science's `anomalize`
#' package.
#'
#' @seealso
#' - [plot_anomaly_diagnostics()]: Visual anomaly detection
#'
#' @references
#' 1. CLEVELAND, R. B., CLEVELAND, W. S., MCRAE, J. E., AND TERPENNING, I.
#' STL: A Seasonal-Trend Decomposition Procedure Based on Loess.
#' Journal of Official Statistics, Vol. 6, No. 1 (1990), pp. 3-73.
#'
#' 2. Owen S. Vallis, Jordan Hochenbaum and Arun Kejariwal (2014).
#' A Novel Technique for Long-Term Anomaly Detection in the Cloud. Twitter Inc.
#'
#' @examples
#' library(dplyr)
#' library(timetk)
#'
#' walmart_sales_weekly %>%
#' filter(id %in% c("1_1", "1_3")) %>%
#' group_by(id) %>%
#' tk_anomaly_diagnostics(Date, Weekly_Sales)
#'
#' @name tk_anomaly_diagnostics
#' @export
tk_anomaly_diagnostics <- function(.data, .date_var, .value,
.frequency = "auto", .trend = "auto",
.alpha = 0.05, .max_anomalies = 0.2,
.message = TRUE) {
# Tidyeval Setup
date_var_expr <- rlang::enquo(.date_var)
value_expr <- rlang::enquo(.value)
# Checks
if (!is.data.frame(.data)) {
stop(call. = FALSE, "tk_anomaly_diagnostics(.data) is not a data-frame or tibble. Please supply a data.frame or tibble.")
}
if (rlang::quo_is_missing(date_var_expr)) {
stop(call. = FALSE, "tk_anomaly_diagnostics(.date_var) is missing. Please supply a date or date-time column.")
}
if (rlang::quo_is_missing(value_expr)) {
stop(call. = FALSE, "tk_anomaly_diagnostics(.value) is missing. Please supply a numeric column.")
}
UseMethod("tk_anomaly_diagnostics", .data)
}
#' @export
tk_anomaly_diagnostics.data.frame <- function(.data, .date_var, .value,
.frequency = "auto", .trend = "auto",
.alpha = 0.05, .max_anomalies = 0.2,
.message = TRUE) {
# STL Decomposition (observed, season, trend, remainder, seasadj)
ret <- .data %>%
tk_stl_diagnostics(
.date_var = !! rlang::enquo(.date_var),
.value = !! rlang::enquo(.value),
.frequency = .frequency,
.trend = .trend,
.message = .message
)
# Detect Anomalies (remainder_l1, remainder_l2, anomaly)
ret <- ret %>%
mutate_anomalies(
target = remainder,
alpha = .alpha,
max_anoms = .max_anomalies
)
# Recomposition
ret <- ret %>%
dplyr::mutate(
recomposed_l1 = season + trend + remainder_l1,
recomposed_l2 = season + trend + remainder_l2
)
return(ret)
}
#' @export
tk_anomaly_diagnostics.grouped_df <- function(.data, .date_var, .value,
.frequency = "auto", .trend = "auto",
.alpha = 0.05, .max_anomalies = 0.2,
.message = TRUE) {
# Tidy Eval Setup
value_expr <- rlang::enquo(.value)
date_var_expr <- rlang::enquo(.date_var)
group_names <- dplyr::group_vars(.data)
# Process groups individually
.data %>%
tidyr::nest() %>%
dplyr::mutate(nested.col = purrr::map(
.x = data,
.f = function(df) tk_anomaly_diagnostics(
.data = df,
.date_var = !! date_var_expr,
.value = !! value_expr,
.frequency = .frequency,
.trend = .trend,
.alpha = .alpha,
.max_anomalies = .max_anomalies,
.message = .message
)
)) %>%
dplyr::select(-data) %>%
tidyr::unnest(cols = nested.col) %>%
dplyr::group_by_at(.vars = group_names)
}
# UTILS ----
# This is anomalize::anomalize()
mutate_anomalies <- function(data, target, alpha = 0.05, max_anoms = 0.20) {
# Checks
if (missing(target)) stop('Error in anomalize(): argument "target" is missing, with no default', call. = FALSE)
# Setup
target_expr <- rlang::enquo(target)
x <- data %>% dplyr::pull(!! target_expr)
# Explicitly call functions
outlier_list <- iqr_vec(x = x, alpha = alpha, max_anoms = max_anoms, verbose = TRUE)
outlier <- outlier_list$outlier
limit_lower <- outlier_list$critical_limits[[1]]
limit_upper <- outlier_list$critical_limits[[2]]
# Returns
ret <- data %>%
dplyr::mutate(!! paste0(dplyr::quo_name(target_expr), "_l1") := limit_lower,
!! paste0(dplyr::quo_name(target_expr), "_l2") := limit_upper) %>%
tibble::add_column(anomaly = outlier)
return(ret)
}
# This is anomalize::iqr()
iqr_vec <- function(x, alpha = 0.05, max_anoms = 0.2, verbose = FALSE) {
quantile_x <- stats::quantile(x, prob = c(0.25, 0.75), na.rm = TRUE)
iq_range <- quantile_x[[2]] - quantile_x[[1]]
limits <- quantile_x + (0.15 / alpha) * iq_range * c(-1, 1)
outlier_idx <- ((x < limits[1]) | (x > limits[2]))
outlier_vals <- x[outlier_idx]
outlier_response <- ifelse(outlier_idx == TRUE, "Yes", "No")
vals_tbl <- tibble::tibble(value = x) %>%
tibble::rownames_to_column(var = "index") %>%
# Establish limits and assess if outside of limits
dplyr::mutate(
limit_lower = limits[1],
limit_upper = limits[2],
abs_diff_lower = ifelse(value <= limit_lower, abs(value - limit_lower), 0),
abs_diff_upper = ifelse(value >= limit_upper, abs(value - limit_upper), 0),
max_abs_diff = ifelse(abs_diff_lower > abs_diff_upper, abs_diff_lower, abs_diff_upper)
) %>%
dplyr::select(index, dplyr::everything()) %>%
dplyr::select(-c(abs_diff_lower, abs_diff_upper)) %>%
# Sort by absolute distance from centerline of limits
dplyr::mutate(
centerline = (limit_upper + limit_lower) / 2,
sorting = abs(value - centerline)
) %>%
dplyr::arrange(dplyr::desc(sorting)) %>%
dplyr::select(-c(centerline, sorting)) %>%
tibble::rownames_to_column(var = "rank") %>%
dplyr::mutate(
rank = as.numeric(rank),
index = as.numeric(index)
) %>%
# Identify outliers
dplyr::arrange(dplyr::desc(max_abs_diff)) %>%
dplyr::mutate(
outlier = ifelse(max_abs_diff > 0, "Yes", "No"),
below_max_anoms = ifelse(dplyr::row_number() / dplyr::n() > max_anoms,
"No", "Yes"
),
outlier_reported = ifelse(outlier == "Yes" & below_max_anoms == "Yes",
"Yes", "No"
),
direction = dplyr::case_when(
(outlier_reported == "Yes") & (value > limit_upper) ~ "Up",
(outlier_reported == "Yes") & (value < limit_lower) ~ "Down",
TRUE ~ "NA"
),
direction = ifelse(direction == "NA", NA, direction)
)
vals_tbl_filtered <- vals_tbl %>%
dplyr::filter(below_max_anoms == "Yes") %>%
dplyr::select(-c(max_abs_diff:below_max_anoms)) %>%
dplyr::rename(outlier = outlier_reported)
# Critical Limits
if (any(vals_tbl$outlier == "No")) {
# Non outliers identified, pick first limit
limit_tbl <- vals_tbl %>%
dplyr::filter(outlier == "No") %>%
dplyr::slice(1)
limits_vec <- c(
limit_lower = limit_tbl$limit_lower,
limit_upper = limit_tbl$limit_upper
)
} else {
# All outliers, pick last limits
limit_tbl <- vals_tbl %>%
dplyr::slice(n())
limits_vec <- c(
limit_lower = limit_tbl$limit_lower,
limit_upper = limit_tbl$limit_upper
)
}
# Return results
if (verbose) {
outlier_list <- list(
outlier = vals_tbl %>% dplyr::arrange(index) %>% dplyr::pull(outlier_reported),
outlier_idx = vals_tbl %>% dplyr::filter(outlier_reported == "Yes") %>% dplyr::pull(index),
outlier_vals = vals_tbl %>% dplyr::filter(outlier_reported == "Yes") %>% dplyr::pull(value),
outlier_direction = vals_tbl %>% dplyr::filter(outlier_reported == "Yes") %>% dplyr::pull(direction),
critical_limits = limits_vec,
outlier_report = vals_tbl_filtered
)
return(outlier_list)
} else {
return(vals_tbl %>% dplyr::arrange(index) %>% dplyr::pull(outlier_reported))
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.