content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
# install.packages("tidyverse")
library(tidyverse)
#mpg2 <- transmute(mpg,
# manufacturer = manufacturer,
#model = model,
#year = year,
#
#)
View(mpg)
mpg_ <- select(mpg, year, model, manufacturer)
View(mpg_)
# fazer tipo pipe do bash
# %>% => operador do pipe
mpg %>%
select (
year, model
) %>%
View
# preview das infos
?mpg
# ordenacao
mpg %>%
select (
year, model
) %>%
arrange(-year) %>%
View
mpg %>%
count(manufacturer) %>%
arrange(n)
?arrange
mpg %>%
count(manufacturer, year) %>%
arrange(n)
mpg %>%
count(manufacturer, year) %>%
arrange(n) %>%
spread(year, n) %>%
View()
?spread
?mpg
# sumarizar, criar nova coluna com diferença, filtrar
mpg %>%
group_by(manufacturer, year) %>%
summarise(
media_cidade = mean(cty),
media_via = mean(hwy)
) %>%
mutate (
diff_media = media_via - media_cidade,
) %>%
arrange(diff_media) %>%
filter(
media_cidade > 20,
media_cidade < 24
) %>%
View()
mpg %>%
ggplot(aes(x = manufacturer)) +
geom_bar() +
coord_flip()
mpg %>%
group_by(manufacturer) %>%
summarise(
media_cidade = mean(cty)
) %>%
ggplot(aes(x = manufacturer, y = media_cidade)) +
geom_bar(stat = "identity") +
coord_flip() +
labs(
x = "Fabricante",
y = "Média",
title = "Consumo médio por fabricante"
)
?geom_bar
?right_join
colnames(mpg)
?mpg
ggplot(mpg, aes(x = cty)) +
geom_histogram(aes(color = drv), fill = 'identify', bins = 30)
mpg %>%
ggplot(aes(cty)) +
geom_histogram(bins = 30) +
facet_wrap(~drv)
?mpg
mpg %>%
ggplot(aes(cty)) +
geom_histogram(bins = 30) +
facet_grid(fl ~drv)
mpg %>%
ggplot(aes(cty)) +
geom_density(bins = 30) +
facet_grid(fl ~drv)
mpg %>%
ggplot(aes(displ, cty)) +
geom_point(alpha=0.1) +
geom_smooth()
mpg %>%
ggplot(aes(displ, cty)) +
geom_point(aes(color = drv), alpha=0.1) +
geom_smooth(se = FALSE)
mpg %>%
ggplot(aes(displ, cty)) +
geom_point(aes(color = drv), alpha=0.1) +
geom_smooth(se = FALSE) +
facet_wrap(~drv)
modelo <- lm(
cty ~ displ,
mpg
)
modelo$coefficients
library(modelr)
mpg <- mpg %>%
add_predictions(modelo)
mpg$pred
mpg %>%
ggplot(aes(displ)) +
geom_point(aes(y = cty), color = "red") +
geom_point(aes(y = pred), color = "blue")
?mpg
mpg <- mpg %>%
add_residuals(modelo)
mpg %>%
ggplot(aes(cty, resid)) +
geom_point(aes(color = drv))
|
/recuperacao_dados/r.R
|
no_license
|
lucamaral/pos
|
R
| false | false | 2,421 |
r
|
# install.packages("tidyverse")
library(tidyverse)
#mpg2 <- transmute(mpg,
# manufacturer = manufacturer,
#model = model,
#year = year,
#
#)
View(mpg)
mpg_ <- select(mpg, year, model, manufacturer)
View(mpg_)
# fazer tipo pipe do bash
# %>% => operador do pipe
mpg %>%
select (
year, model
) %>%
View
# preview das infos
?mpg
# ordenacao
mpg %>%
select (
year, model
) %>%
arrange(-year) %>%
View
mpg %>%
count(manufacturer) %>%
arrange(n)
?arrange
mpg %>%
count(manufacturer, year) %>%
arrange(n)
mpg %>%
count(manufacturer, year) %>%
arrange(n) %>%
spread(year, n) %>%
View()
?spread
?mpg
# sumarizar, criar nova coluna com diferença, filtrar
mpg %>%
group_by(manufacturer, year) %>%
summarise(
media_cidade = mean(cty),
media_via = mean(hwy)
) %>%
mutate (
diff_media = media_via - media_cidade,
) %>%
arrange(diff_media) %>%
filter(
media_cidade > 20,
media_cidade < 24
) %>%
View()
mpg %>%
ggplot(aes(x = manufacturer)) +
geom_bar() +
coord_flip()
mpg %>%
group_by(manufacturer) %>%
summarise(
media_cidade = mean(cty)
) %>%
ggplot(aes(x = manufacturer, y = media_cidade)) +
geom_bar(stat = "identity") +
coord_flip() +
labs(
x = "Fabricante",
y = "Média",
title = "Consumo médio por fabricante"
)
?geom_bar
?right_join
colnames(mpg)
?mpg
ggplot(mpg, aes(x = cty)) +
geom_histogram(aes(color = drv), fill = 'identify', bins = 30)
mpg %>%
ggplot(aes(cty)) +
geom_histogram(bins = 30) +
facet_wrap(~drv)
?mpg
mpg %>%
ggplot(aes(cty)) +
geom_histogram(bins = 30) +
facet_grid(fl ~drv)
mpg %>%
ggplot(aes(cty)) +
geom_density(bins = 30) +
facet_grid(fl ~drv)
mpg %>%
ggplot(aes(displ, cty)) +
geom_point(alpha=0.1) +
geom_smooth()
mpg %>%
ggplot(aes(displ, cty)) +
geom_point(aes(color = drv), alpha=0.1) +
geom_smooth(se = FALSE)
mpg %>%
ggplot(aes(displ, cty)) +
geom_point(aes(color = drv), alpha=0.1) +
geom_smooth(se = FALSE) +
facet_wrap(~drv)
modelo <- lm(
cty ~ displ,
mpg
)
modelo$coefficients
library(modelr)
mpg <- mpg %>%
add_predictions(modelo)
mpg$pred
mpg %>%
ggplot(aes(displ)) +
geom_point(aes(y = cty), color = "red") +
geom_point(aes(y = pred), color = "blue")
?mpg
mpg <- mpg %>%
add_residuals(modelo)
mpg %>%
ggplot(aes(cty, resid)) +
geom_point(aes(color = drv))
|
#' Swiss banknotes data
#'
#' The data set contains six measurements made on 100 genuine and 100 counterfeit old-Swiss 1000-franc bank notes.
#' The data frame and the documentation is a copy of [mclust::banknote].
#'
#' @format
#' A data frame with 200 rows and 7 columns:
#' \describe{
#' \item{Status}{the status of the banknote: `genuine` or `counterfeit`}
#' \item{Length}{Length of bill (mm)}
#' \item{Left}{Width of left edge (mm)}
#' \item{Right}{Width of right edge (mm)}
#' \item{Bottom}{Bottom margin width (mm)}
#' \item{Top}{Top margin width (mm)}
#' \item{Diagonal}{Length of diagonal (mm)}
#' }
#' @source Flury, B. and Riedwyl, H. (1988). Multivariate Statistics: A practical approach. London: Chapman & Hall, Tables 1.1 and 1.2, pp. 5-8.
"banknote"
|
/R/banknote.R
|
no_license
|
cran/andrews
|
R
| false | false | 780 |
r
|
#' Swiss banknotes data
#'
#' The data set contains six measurements made on 100 genuine and 100 counterfeit old-Swiss 1000-franc bank notes.
#' The data frame and the documentation is a copy of [mclust::banknote].
#'
#' @format
#' A data frame with 200 rows and 7 columns:
#' \describe{
#' \item{Status}{the status of the banknote: `genuine` or `counterfeit`}
#' \item{Length}{Length of bill (mm)}
#' \item{Left}{Width of left edge (mm)}
#' \item{Right}{Width of right edge (mm)}
#' \item{Bottom}{Bottom margin width (mm)}
#' \item{Top}{Top margin width (mm)}
#' \item{Diagonal}{Length of diagonal (mm)}
#' }
#' @source Flury, B. and Riedwyl, H. (1988). Multivariate Statistics: A practical approach. London: Chapman & Hall, Tables 1.1 and 1.2, pp. 5-8.
"banknote"
|
###dshw method from the R forecast package to predict electricity consumption.
##I tried to use it on the taylor dataset available in the package.
####I used the parameters of the model reported in the paper (Table 2)
###Taylor, J.W. (2003) Short-term electricity demand forecasting using double seasonal exponential smoothing. Journal of the Operational Research Society, 54, 799-805.
###but when I evaluated the MAPE error for the next 48 half-hours I got different values than reported in the paper (the bottom curve in Figure 4).
###Here is my code and comparison of MAPE errors. What am I doing wrong? How did Taylor calculated the MAPE error? Why the Figure caption says results for the 4-week post sample period when there is only 48 half-hours in the Figure? Thanks for help.
library("forecast")
# first 8 weeks as training set
train <- msts(taylor[1:2688], seasonal.periods=c(48,336), ts.frequency=48)
# the rest - 4 week is test set - starts on 57th day
test <- msts(taylor[2689:4032], seasonal.periods=c(48,336), ts.frequency=48, start=57)
model <- dshw(train, alpha=0.01, beta=0.00, gamma=0.21, omega=0.24, phi=0.92)
# plot of MAPE errors for different horizonts
MAPE <- c()
for(i in 1:48) {
MAPE <- c(MAPE, accuracy(f=model,x=test[1:i])[2,5])
}
plot(MAPE~c(1:48), ylim=c(0,3))
|
/training8.R
|
no_license
|
ahmeduncc/Some-R-scripts
|
R
| false | false | 1,302 |
r
|
###dshw method from the R forecast package to predict electricity consumption.
##I tried to use it on the taylor dataset available in the package.
####I used the parameters of the model reported in the paper (Table 2)
###Taylor, J.W. (2003) Short-term electricity demand forecasting using double seasonal exponential smoothing. Journal of the Operational Research Society, 54, 799-805.
###but when I evaluated the MAPE error for the next 48 half-hours I got different values than reported in the paper (the bottom curve in Figure 4).
###Here is my code and comparison of MAPE errors. What am I doing wrong? How did Taylor calculated the MAPE error? Why the Figure caption says results for the 4-week post sample period when there is only 48 half-hours in the Figure? Thanks for help.
library("forecast")
# first 8 weeks as training set
train <- msts(taylor[1:2688], seasonal.periods=c(48,336), ts.frequency=48)
# the rest - 4 week is test set - starts on 57th day
test <- msts(taylor[2689:4032], seasonal.periods=c(48,336), ts.frequency=48, start=57)
model <- dshw(train, alpha=0.01, beta=0.00, gamma=0.21, omega=0.24, phi=0.92)
# plot of MAPE errors for different horizonts
MAPE <- c()
for(i in 1:48) {
MAPE <- c(MAPE, accuracy(f=model,x=test[1:i])[2,5])
}
plot(MAPE~c(1:48), ylim=c(0,3))
|
#****************************************************************************************************************************************************
load("LR.RData")
data <- reactive({
switch(input$edata,
"Birth weight" = LR)
})
DF0 = reactive({
inFile = input$file
if (is.null(inFile)){
x<-data()
}
else{
if(!input$col){
csv <- read.csv(inFile$datapath, header = input$header, sep = input$sep, quote=input$quote, stringsAsFactors=TRUE)
}
else{
csv <- read.csv(inFile$datapath, header = input$header, sep = input$sep, quote=input$quote, row.names=1, stringsAsFactors=TRUE)
}
validate( need(ncol(csv)>1, "Please check your data (nrow>1, ncol>1), valid row names, column names, and spectators") )
validate( need(nrow(csv)>1, "Please check your data (nrow>1, ncol>1), valid row names, column names, and spectators") )
x <- as.data.frame(csv)
}
if(input$transform) {
x <- as.data.frame(t(x))
names(x)<- make.names(names(x), unique = TRUE, allow_ = FALSE)
}
class <- var.class(x)
b.names <- colnames(x[,class[,1] %in% "binary",drop=FALSE])
x[,b.names]<-sapply(x[,b.names], as.factor)
return(x)
})
## raw variable type
var.type.list0 <- reactive({
var.class(DF0())
})
type.int <- reactive({
colnames(DF0()[,var.type.list0()[,1] %in% "integer", drop=FALSE])
})
output$factor1 = renderUI({
selectInput(
'factor1',
HTML('1. 整数変数をカテゴリ変数に変換する'),
selected = NULL,
choices = type.int(),
multiple = TRUE
)
})
DF1 <- reactive({
df <-DF0()
df[input$factor1] <- as.data.frame(lapply(df[input$factor1], factor))
return(df)
})
var.type.list1 <- reactive({
var.class(DF1())
})
type.fac1 <- reactive({
colnames(DF1()[,var.type.list1()[,1] %in% c("factor", "binary"),drop=FALSE])
})
output$factor2 = renderUI({
selectInput(
'factor2',
HTML('2. カテゴリ変数を実数値の数値変数に変換する'),
selected = NULL,
choices = type.fac1(),
multiple = TRUE
)
})
DF2 <- reactive({
df <-DF1()
df[input$factor2] <- as.data.frame(lapply(df[input$factor2], as.numeric))
return(df)
})
type.fac2 <- reactive({
colnames(DF2()[unlist(lapply(DF2(), is.factor))])
})
output$lvl = renderUI({
selectInput(
'lvl',
HTML('1. カテゴリ変数を選択する'),
selected = NULL,
choices = type.fac2(),
multiple = TRUE
)
})
output$rmrow = renderUI({
shinyWidgets::pickerInput(
'rmrow',
h4(tags$b('いくつかのサンプル/外れ値を削除する')),
selected = NULL,
choices = rownames(DF2()),
multiple = TRUE,
options = shinyWidgets::pickerOptions(
actionsBox=TRUE,
size=5)
)
})
DF2.1 <- reactive({
if(length(input$rmrow)==0) {df <- DF2()}
else{
df <- DF2()[-which(rownames(DF2()) %in% c(input$rmrow)),]
}
return(df)
})
DF3 <- reactive({
if (length(input$lvl)==0 || length(unlist(strsplit(input$ref, "[\n]")))==0 ||length(input$lvl)!=length(unlist(strsplit(input$ref, "[\n]")))){
df <- DF2.1()
}
else{
df <- DF2.1()
x <- input$lvl
y <- unlist(strsplit(input$ref, "[\n]"))
for (i in 1:length(x)){
#df[,x[i]] <- as.factor(as.numeric(df[,x[i]]))
df[,x[i]] <- relevel(df[,x[i]], ref= y[i])
}
}
return(df)
})
output$Xdata <- DT::renderDT(DF3(),
extensions = list(
'Buttons'=NULL,
'Scroller'=NULL),
options = list(
dom = 'Bfrtip',
buttons = c('copy', 'csv', 'excel'),
deferRender = TRUE,
scrollX = TRUE,
scrollY = 300,
scroller = TRUE))
type.num3 <- reactive({
colnames(DF3()[unlist(lapply(DF3(), is.numeric))])
})
type.fac3 <- reactive({
colnames(DF3()[unlist(lapply(DF3(), is.factor))])
})
#output$strnum <- renderPrint({str(DF3()[,type.num3()])})
#output$strfac <- renderPrint({Filter(Negate(is.null), lapply(DF3(),levels))})
## final variable type
var.type.list3 <- reactive({
var.class(DF3())
})
output$var.type <- DT::renderDT(var.type.list3(),
extensions = list(
'Buttons'=NULL,
'Scroller'=NULL),
options = list(
dom = 'Bfrtip',
buttons = c('copy', 'csv', 'excel'),
deferRender = TRUE,
scrollX = TRUE,
scrollY = 200,
scroller = TRUE))
#sum <- reactive({
# desc.numeric(DF3())
# })
output$sum <- DT::renderDT({desc.numeric(DF3())},
extensions = list(
'Buttons'=NULL,
'Scroller'=NULL),
options = list(
dom = 'Bfrtip',
buttons = c('copy', 'csv', 'excel'),
deferRender = TRUE,
scrollX = TRUE,
scrollY = 200,
scroller = TRUE))
output$fsum = DT::renderDT({desc.factor(DF3())},
extensions = list(
'Buttons'=NULL,
'Scroller'=NULL),
options = list(
dom = 'Bfrtip',
buttons = c('copy', 'csv', 'excel'),
deferRender = TRUE,
scrollX = TRUE,
scrollY = 200,
scroller = TRUE))
output$tx = renderUI({
selectInput(
'tx',
tags$b('1. x軸の数値変数を選択する'),
selected=type.num3()[2],
choices = type.num3())
})
output$ty = renderUI({
selectInput(
'ty',
tags$b('2. y軸の数値変数を選択する'),
selected = type.num3()[1],
choices = type.num3())
})
## scatter plot
output$p1 = plotly::renderPlotly({
validate(need(input$tx, "Loading variable"))
validate(need(input$ty, "Loading variable"))
p<- plot_scat(DF3(), input$tx, input$ty, input$xlab, input$ylab)
plotly::ggplotly(p)
})
## histogram
output$hx = renderUI({
selectInput(
'hx',
tags$b('数値変数を選択する'),
selected = type.num3()[1],
choices = type.num3())
})
output$p2 = plotly::renderPlotly({
validate(need(input$hx, "Loading variable"))
p<-plot_hist1(DF3(), input$hx, input$bin)
plotly::ggplotly(p)
})
output$p21 = plotly::renderPlotly({
validate(need(input$hx, "Loading variable"))
p<-plot_density1(DF3(), input$hx)
plotly::ggplotly(p)
})
|
/7_1MFSlr_jp/server_data.R
|
permissive
|
mephas/mephas_web
|
R
| false | false | 5,916 |
r
|
#****************************************************************************************************************************************************
load("LR.RData")
data <- reactive({
switch(input$edata,
"Birth weight" = LR)
})
DF0 = reactive({
inFile = input$file
if (is.null(inFile)){
x<-data()
}
else{
if(!input$col){
csv <- read.csv(inFile$datapath, header = input$header, sep = input$sep, quote=input$quote, stringsAsFactors=TRUE)
}
else{
csv <- read.csv(inFile$datapath, header = input$header, sep = input$sep, quote=input$quote, row.names=1, stringsAsFactors=TRUE)
}
validate( need(ncol(csv)>1, "Please check your data (nrow>1, ncol>1), valid row names, column names, and spectators") )
validate( need(nrow(csv)>1, "Please check your data (nrow>1, ncol>1), valid row names, column names, and spectators") )
x <- as.data.frame(csv)
}
if(input$transform) {
x <- as.data.frame(t(x))
names(x)<- make.names(names(x), unique = TRUE, allow_ = FALSE)
}
class <- var.class(x)
b.names <- colnames(x[,class[,1] %in% "binary",drop=FALSE])
x[,b.names]<-sapply(x[,b.names], as.factor)
return(x)
})
## raw variable type
var.type.list0 <- reactive({
var.class(DF0())
})
type.int <- reactive({
colnames(DF0()[,var.type.list0()[,1] %in% "integer", drop=FALSE])
})
output$factor1 = renderUI({
selectInput(
'factor1',
HTML('1. 整数変数をカテゴリ変数に変換する'),
selected = NULL,
choices = type.int(),
multiple = TRUE
)
})
DF1 <- reactive({
df <-DF0()
df[input$factor1] <- as.data.frame(lapply(df[input$factor1], factor))
return(df)
})
var.type.list1 <- reactive({
var.class(DF1())
})
type.fac1 <- reactive({
colnames(DF1()[,var.type.list1()[,1] %in% c("factor", "binary"),drop=FALSE])
})
output$factor2 = renderUI({
selectInput(
'factor2',
HTML('2. カテゴリ変数を実数値の数値変数に変換する'),
selected = NULL,
choices = type.fac1(),
multiple = TRUE
)
})
DF2 <- reactive({
df <-DF1()
df[input$factor2] <- as.data.frame(lapply(df[input$factor2], as.numeric))
return(df)
})
type.fac2 <- reactive({
colnames(DF2()[unlist(lapply(DF2(), is.factor))])
})
output$lvl = renderUI({
selectInput(
'lvl',
HTML('1. カテゴリ変数を選択する'),
selected = NULL,
choices = type.fac2(),
multiple = TRUE
)
})
output$rmrow = renderUI({
shinyWidgets::pickerInput(
'rmrow',
h4(tags$b('いくつかのサンプル/外れ値を削除する')),
selected = NULL,
choices = rownames(DF2()),
multiple = TRUE,
options = shinyWidgets::pickerOptions(
actionsBox=TRUE,
size=5)
)
})
DF2.1 <- reactive({
if(length(input$rmrow)==0) {df <- DF2()}
else{
df <- DF2()[-which(rownames(DF2()) %in% c(input$rmrow)),]
}
return(df)
})
DF3 <- reactive({
if (length(input$lvl)==0 || length(unlist(strsplit(input$ref, "[\n]")))==0 ||length(input$lvl)!=length(unlist(strsplit(input$ref, "[\n]")))){
df <- DF2.1()
}
else{
df <- DF2.1()
x <- input$lvl
y <- unlist(strsplit(input$ref, "[\n]"))
for (i in 1:length(x)){
#df[,x[i]] <- as.factor(as.numeric(df[,x[i]]))
df[,x[i]] <- relevel(df[,x[i]], ref= y[i])
}
}
return(df)
})
output$Xdata <- DT::renderDT(DF3(),
extensions = list(
'Buttons'=NULL,
'Scroller'=NULL),
options = list(
dom = 'Bfrtip',
buttons = c('copy', 'csv', 'excel'),
deferRender = TRUE,
scrollX = TRUE,
scrollY = 300,
scroller = TRUE))
type.num3 <- reactive({
colnames(DF3()[unlist(lapply(DF3(), is.numeric))])
})
type.fac3 <- reactive({
colnames(DF3()[unlist(lapply(DF3(), is.factor))])
})
#output$strnum <- renderPrint({str(DF3()[,type.num3()])})
#output$strfac <- renderPrint({Filter(Negate(is.null), lapply(DF3(),levels))})
## final variable type
var.type.list3 <- reactive({
var.class(DF3())
})
output$var.type <- DT::renderDT(var.type.list3(),
extensions = list(
'Buttons'=NULL,
'Scroller'=NULL),
options = list(
dom = 'Bfrtip',
buttons = c('copy', 'csv', 'excel'),
deferRender = TRUE,
scrollX = TRUE,
scrollY = 200,
scroller = TRUE))
#sum <- reactive({
# desc.numeric(DF3())
# })
output$sum <- DT::renderDT({desc.numeric(DF3())},
extensions = list(
'Buttons'=NULL,
'Scroller'=NULL),
options = list(
dom = 'Bfrtip',
buttons = c('copy', 'csv', 'excel'),
deferRender = TRUE,
scrollX = TRUE,
scrollY = 200,
scroller = TRUE))
output$fsum = DT::renderDT({desc.factor(DF3())},
extensions = list(
'Buttons'=NULL,
'Scroller'=NULL),
options = list(
dom = 'Bfrtip',
buttons = c('copy', 'csv', 'excel'),
deferRender = TRUE,
scrollX = TRUE,
scrollY = 200,
scroller = TRUE))
output$tx = renderUI({
selectInput(
'tx',
tags$b('1. x軸の数値変数を選択する'),
selected=type.num3()[2],
choices = type.num3())
})
output$ty = renderUI({
selectInput(
'ty',
tags$b('2. y軸の数値変数を選択する'),
selected = type.num3()[1],
choices = type.num3())
})
## scatter plot
output$p1 = plotly::renderPlotly({
validate(need(input$tx, "Loading variable"))
validate(need(input$ty, "Loading variable"))
p<- plot_scat(DF3(), input$tx, input$ty, input$xlab, input$ylab)
plotly::ggplotly(p)
})
## histogram
output$hx = renderUI({
selectInput(
'hx',
tags$b('数値変数を選択する'),
selected = type.num3()[1],
choices = type.num3())
})
output$p2 = plotly::renderPlotly({
validate(need(input$hx, "Loading variable"))
p<-plot_hist1(DF3(), input$hx, input$bin)
plotly::ggplotly(p)
})
output$p21 = plotly::renderPlotly({
validate(need(input$hx, "Loading variable"))
p<-plot_density1(DF3(), input$hx)
plotly::ggplotly(p)
})
|
# equating/linking HSGPA and Econ EOCT to "college-ready" SAT and ACT scores
# will be used to examine thresholds from CollegeBoard and ACT research
## answers the question: For graduates, what HSGPA, Econ EOCT, SAT, and ACT values
## give students an 80% chance of enrolling in college, and 2-year persistence in college
# created on 2014.03.21 by James Appleton
# last updated 2015.01.06 by James Appleton
require(ggplot2)
require(grid)
require(RODBC)
require(plyr)
require(foreign)
require(reshape2)
rm(list=ls())
path <- readLines("c:\\current_path.txt")
# set directories
setwd (paste(path,
"\\Research Projects\\RaisngAchClsngGap",sep=""))
maindir <- paste(path,
"\\Research Projects\\RaisngAchClsngGap",sep="")
dir ()
# function
vplayout <- function(x, y) {
viewport(layout.pos.row = x, layout.pos.col = y)
}
# convert factor variable to numeric
factorconvert <- function(f){as.numeric (levels (f))[f]}
# trim extra preceding and following characters
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
# change variable case; df name in quotations to be accepted
case.cols <- function(x) {
x.df <- get(x)
colnames(x.df) <- tolower(names(x.df))
assign(x,x.df, env = .GlobalEnv)
}
# set years for graduation data
cohortYear_shrt <- c(2010, 2011, 2012) # b/c 2013 doesn't have 4 semesters of time yet
yrs <- length(cohortYear_shrt) # number of years set below
startYear1 <- "2006-07" # for 2011 grads
startYear_shrt1 <- "2007"
startYear2 <- "2007-08" # for 2011 grads
startYear_shrt2 <- "2008"
startYear3 <- "2008-09" # for 2012 grads
startYear_shrt3 <- "2009"
startYear <- c(startYear1, startYear2, startYear3)
startYear_shrt <- c(startYear_shrt1, startYear_shrt2, startYear_shrt3)
###################################################
### Load the NSC data
###################################################
nsc <- read.csv(paste0(path, "\\Research Projects\\NSC Student Tracker\\",
"NSC StudentTracker_2014.10_2014Graduates\\received\\",
"1302550hs_10001139-28963-DETAIL-EFFDT-20141126-RUNDT-20141204.csv"),
sep = ",", header = TRUE)
nsc <- case.cols("nsc")
# change NA enrollment begin and end dates so can't count within enrollment periods
nsc[is.na(nsc$enrollment_begin), "enrollment_begin" ] <- 0
nsc[is.na(nsc$enrollment_end), "enrollment_end" ] <- 0
# keep students graduating in cohort years and assign cohort
nsc$cohort <- NA
for (i in 1:yrs) {
nsc[nsc$high_school_grad_date > (cohortYear_shrt[i] - 1)*10000 + 0801 &
nsc$high_school_grad_date < cohortYear_shrt[i]*10000 + 0731, dim(nsc)[2]] <- cohortYear_shrt[i]
}
# (F)ull-time, (H)alf-time, (L)ess than half-time, (Q) 3/4 time,
# (A) Leave of absence, (W)ithdrawn, (D)eceased
# from: http://www.studentclearinghouse.org/colleges/files/ST_DetailReportGuide.pdf
# create gcps id
nsc[,1] <- as.character(nsc[,1])
nsc$id <- as.numeric(substr(nsc[,1], 1, nchar(nsc[,1]) - 1))
nsc <- nsc[!is.na(nsc$cohort), ]
# create immed.transition and persist.enroll variables
nsc$i.t <- FALSE
nsc$p.e1 <- FALSE
nsc$p.e2 <- FALSE
nsc$p.e3 <- FALSE
for (i in 1:yrs) {
nsc[nsc$i.t == FALSE, "i.t"] <- nsc[nsc$i.t == FALSE, "enrollment_begin"] < cohortYear_shrt[i]*10000 + 1101 &
nsc[nsc$i.t == FALSE, "enrollment_end"] > cohortYear_shrt[i]*10000 + 915 &
nsc[nsc$i.t == FALSE, "cohort"] == cohortYear_shrt[i] #&
#nsc[nsc$i.t == FALSE, "enrollment_status"] == "F"
# nsc[nsc$i.t == FALSE, "i.t"] <- as.numeric(nsc[nsc$i.t == FALSE, "enrollment_begin"]) < cohortYear_shrt[i]*10000 + 1231 &
# as.numeric(nsc[nsc$i.t == FALSE, "enrollment_begin"]) > cohortYear_shrt[i]*10000 + 0801
# nsc[nsc$i.t == FALSE, "cohort"] == cohortYear_shrt[i]
#
it <- ddply(nsc[, c("id", "i.t")], "id", summarise,
immed.t = sum(i.t))
it$i.t <- it$immed.t > 0
nsc <- nsc[, -(which(names(nsc) %in% c("i.t")))]
nsc <- merge(nsc, it[, c(1, 3)], by.x = "id", by.y = "id", all.x = TRUE)
nsc[nsc$p.e1 == FALSE, "p.e1"] <- nsc[nsc$p.e1 == FALSE, "i.t"] == TRUE &
nsc[nsc$p.e1 == FALSE, "enrollment_begin"] < (cohortYear_shrt[i] + 1)*10000 + 501 &
nsc[nsc$p.e1 == FALSE, "enrollment_end"] > (cohortYear_shrt[i] + 1)*10000 + 301 &
nsc[nsc$p.e1 == FALSE, "cohort"] == cohortYear_shrt[i] &
nsc[nsc$p.e1 == FALSE, "enrollment_status"] %in% c("F", "Q")
nsc[nsc$p.e2 == FALSE, "p.e2"] <- nsc[nsc$p.e2 == FALSE, "i.t"] == TRUE &
nsc[nsc$p.e2 == FALSE, "enrollment_begin"] < (cohortYear_shrt[i] + 1)*10000 + 1101 &
nsc[nsc$p.e2 == FALSE, "enrollment_end"] > (cohortYear_shrt[i] + 1)*10000 + 915 &
nsc[nsc$p.e2 == FALSE, "cohort"] == cohortYear_shrt[i] &
nsc[nsc$p.e2 == FALSE, "enrollment_status"] %in% c("F", "Q")
nsc[nsc$p.e3 == FALSE, "p.e3"] <- nsc[nsc$p.e3 == FALSE, "i.t"] == TRUE &
nsc[nsc$p.e3 == FALSE, "enrollment_begin"] < (cohortYear_shrt[i] + 2)*10000 + 501 &
nsc[nsc$p.e3 == FALSE, "enrollment_end"] > (cohortYear_shrt[i] + 2)*10000 + 301 &
nsc[nsc$p.e3 == FALSE, "cohort"] == cohortYear_shrt[i] &
nsc[nsc$p.e3 == FALSE, "enrollment_status"] %in% c("F", "Q")
}
mrg <- ddply(nsc[, c("id", "p.e1", "p.e2", "p.e3", "i.t")], "id", summarise,
pe1 = sum(p.e1),
pe2 = sum(p.e2),
pe3 = sum(p.e3),
i.t = sum(i.t))
mrg$p.e <- mrg$pe1 == 1 & mrg$pe2 == 1 & mrg$pe3 == 1
nsc <- merge(nsc, mrg[, c("id", "i.t", "p.e")], by.x = "id", by.y = "id", all.x = TRUE)
nsc <- unique(nsc[, c(1, 3:5, 10, 25, 29, 31)])
colnames(nsc)[which(names(nsc) == "i.t.x")] <- "i.t"
nsc.model <- nsc[, c("id", "cohort", "i.t", "p.e")]
ma_ch <- odbcConnect("ODS_Prod_MA", uid = "Research", pwd = "Research")
##################################
## get ACT data
##################################
act <- sqlQuery(ma_ch, paste0(
" SELECT [STUNUMB]
,[SCHOOL_YEAR]
,[TEST_KEY]
,[EXAM_ADMIN_DATE]
,[SUBJECT]
,[SCALE_SCORE]
FROM [Assessment].[dbo].[TEST_STU_ACT]
WHERE SCHOOL_YEAR >= 2008 and
SCHOOL_YEAR <= 2012 and
SCALE_SCORE is not null and
SCALE_SCORE != 0
"))
act <- case.cols("act")
names(act)[which(names(act) == "stunumb")] <- "id"
# filter down to average scale score by kid
actStu <- ddply(act[, c(1, 5:6)], c("id", "subject"), summarise,
actSS = mean(scale_score))
stopifnot(anyDuplicated(actStu[, 1:2])==0)
actStu$actSS <- round(actStu$actSS)
stopifnot(actStu$actSS >= 1 & actStu$actSS <= 36)
##################################
## get EOCT econ data
##################################
econECT <- sqlQuery(ma_ch, paste0(
" SELECT [SCHOOL_YR]
,[LOC]
,[EXAM_ADMIN_DATE]
,[GRADE]
,[STUNUMB]
,[SUBJECT]
,[TOTAL_SCALE_SCORE]
FROM [Assessment].[dbo].[TEST_STU_ECT]
WHERE SUBJECT = 'ECO' and
SCHOOL_YR in ('2010', '2011', '2012') and
TOTAL_SCALE_SCORE is not null and
TOTAL_SCALE_SCORE != 0
"))
#close(ma_ch)
# filter down to average scale score by kid
econECT <- ddply(econECT[, c(5, 7)], "STUNUMB", summarise,
econSS = mean(TOTAL_SCALE_SCORE))
stopifnot(anyDuplicated(econECT$STUNUMB)==0)
econECT$econSS <- round(econECT$econSS)
stopifnot(econECT$econSS >= 200 & econECT$econSS <= 650)
##################################
## get GPA data
##################################
for (i in 1:length(cohortYear_shrt)) {
gpa <- sqlQuery(ma_ch, paste0(
"SELECT *
FROM [Predictive_Analytics].[PAVIEW2].[v_Student_Course_History_DETAIL]
WHERE SchoolYear = ", cohortYear_shrt[i], " and
Grade in ('03', '04', '05', '06', '07', '08', '09', '10', '11', '12')
"))
gpa <- case.cols("gpa")
#gpa[grepl("Science", gpa$coresubjectcode), "coreind"] <- 1
assign(paste0("gpa.", cohortYear_shrt[i]), gpa)
}
df <- get(paste0("gpa.", cohortYear_shrt[1]))
for (j in 2:length(cohortYear_shrt)) {
df2 <- get(paste0("gpa.", cohortYear_shrt[i]))
df <- rbind(df, df2)
}
gpa <- df
rm(df, df2, list = ls(pattern = "gpa."))
#format GPA
# generate weighted core GPA
gpa.core <- gpa[gpa$coreind == 1, ]
# for 12th grade keep only 1st semester
gpa.core.12th <- gpa.core[gpa.core$calendarmonth > 7 & gpa.core$grade == 12, ]
# aggregate
gc12.agg <- ddply(gpa.core.12th[, c("permnum", "schoolyear", "creditsattempted",
"creditweightedmark", "coresubjectcode")],
c("permnum", "schoolyear", "coresubjectcode"), summarise,
N = length(permnum),
ca = sum(creditsattempted),
cw = sum(creditweightedmark))
gc12.aggm <- melt(gc12.agg[, c(1:3, 5:6)], id.vars = c(1:3))
gc12.aggr <- dcast(gc12.aggm, permnum + schoolyear ~ coresubjectcode + variable)
gc12.aggr$sem1.gpa.la <- round(gc12.aggr[, "LA_cw"] / gc12.aggr[, "LA_ca"], 1)
gc12.aggr$sem1.gpa.ma <- round(gc12.aggr[, "MA_cw"] / gc12.aggr[, "MA_ca"], 1)
gc12.aggr$sem1.gpa.sc <- round(gc12.aggr[, "SC_cw"] / gc12.aggr[, "SC_ca"], 1)
gc12.aggr$sem1.gpa.ss <- round(gc12.aggr[, "SS_cw"] / gc12.aggr[, "SS_ca"], 1)
gc12.aggr$sem1.gpa.core <- round(apply(gc12.aggr[, c("LA_cw", "MA_cw", "SC_cw", "SS_cw")],
1, function(x) sum(x, na.rm = TRUE)) /
apply(gc12.aggr[, c("LA_ca", "MA_ca", "SC_ca", "SS_ca")],
1, function(x) sum(x, na.rm = TRUE)), 1)
gc12.aggf <- gc12.aggr[, c("permnum", "schoolyear", "sem1.gpa.la", "sem1.gpa.ma",
"sem1.gpa.sc", "sem1.gpa.ss", "sem1.gpa.core")]
rm(gc12.agg, gc12.aggm, gc12.aggr, list=ls(pattern = "gpa"))
gc()
##################################
## get SAT data
##################################
sat <- sqlQuery(ma_ch, paste0(
" SELECT [STUNUMB]
,[TEST_KEY]
,[EXAM_ADMIN_DATE]
,[NONSTAND_IND]
,[SUBJECT]
,[SCORE]
FROM [Assessment].[dbo].[TEST_STU_SAT]
WHERE EXAM_ADMIN_DATE >= ", cohortYear_shrt[1], "0531 and
EXAM_ADMIN_DATE <= ", cohortYear_shrt[length(cohortYear_shrt)], "0531 and
NONSTAND_IND = '' and
SUBJECT in ('MA', 'VE')
"))
close(ma_ch)
sat <- case.cols("sat")
# filter down to average scale score by kid
sat <- ddply(sat[, c(1, 5:6)], c("stunumb", "subject"), summarise,
satSS = mean(score))
stopifnot(anyDuplicated(sat[, 1:2])==0)
sat$satSS <- round(sat$satSS)
stopifnot(sat$satSS >= 200 & sat$satSS <= 800)
# restructure
sat <- dcast(sat, stunumb ~ subject)
###########################
# load the graduation data
###########################
for (i in 2:3) {
fileLoc <- paste0(path,
"\\RBES\\Graduation Rate\\Cohort Graduation Rate Data\\ClassOfSY", cohortYear_shrt[i])
df <- read.csv(paste0("..\\RaisngAchClsngGap\\data\\prep\\DOECohortData_", startYear[i],
"_jja.csv"), sep = ",", header = TRUE)
df <- case.cols("df")
names(df)[40] <- "update.diploma.type"
df <- df[df$grad.rate.type == 4 & df$school.id == "ALL" &
df$update.diploma.type %in% c("G", "C", "B", "V"), ]
df <- merge(df, econECT, by.x = "id", by.y = "STUNUMB", all.x = TRUE)
# remove NAs
a.e <- as.data.frame(df[complete.cases(df[, c(2, 4, 8, 11:13)]), c(2, 4, 8, 11:13)])
colnames(a.e) <- c("loc", "ELA.GPA", "eng.ACT", "school", "gr11", "econSS")
a.m <- as.data.frame(df[complete.cases(df[, c(2, 5, 9, 11:13)]), c(2, 5, 9, 11:13)])
colnames(a.m) <- c("loc", "math.GPA", "math.ACT", "school", "gr11", "econSS")
a.r <- as.data.frame(df[complete.cases(df[, c(2, 4, 10, 11:13)]), c(2, 4, 10, 11:13)])
colnames(a.r) <- c("loc", "ELA.GPA", "rdg.ACT", "school", "gr11", "econSS")
s.m <- as.data.frame(df[complete.cases(df[, c(2, 5, 6, 11:13)]), c(2, 5, 6, 11:13)])
colnames(s.m) <- c("loc", "math.GPA", "math.SAT", "school", "gr11", "econSS")
s.v <- as.data.frame(df[complete.cases(df[, c(2, 4, 7, 11:13)]), c(2, 4, 7, 11:13)])
colnames(s.v) <- c("loc", "ELA.GPA", "verbal.SAT", "school", "gr11", "econSS")
q.titles <- c("Mathematics: GPA and ACT\n(r = ",
"E/LA: GPA and ACT\n(r = ",
"E/LA GPA and Reading ACT\n(r = ",
"Mathematics: GPA and SAT\n(r = ",
"E/LA GPA and SAT Verbal\n(r = ")
q.objects <- cbind(c("aMath", "aEng", "aRD", "sMath", "sVerb"),
c("a.m", "a.e", "a.r", "s.m", "s.v"))
q.labels <- cbind(c("Mathematics GPA",
"English/Language Arts GPA",
"English/Language Arts GPA",
"Mathematics GPA",
"English/Language Arts GPA"),
c("Mathematics ACT Score",
"English ACT Score",
"Reading ACT Score",
"Mathematics SAT Score",
"Verbal SAT Score"))
q <- cbind(q.titles, q.objects, q.labels)
rm(q.titles, q.objects, q.labels)
######################################*
schlTstGPA <- as.data.frame(matrix(rep(NA, 7), nrow = 1))
colnames(schlTstGPA) <- c("N", "perc.11th", "prior.perf", "school", "test", "gpa", "r")
df[, 11] <- lapply(df[, 11], as.character)
modelGPA <- function(x, y) { # y is location code
model <- lm(x[, 3] ~ x[, 6], na.action = "na.omit", x)
gpa <- round((line-summary(model)$coefficients[1, 1])/
summary(model)$coefficients[2, 1], 0)
r <- round(cor(x[, 3], x[, 6]), 2)
#assign(paste0("gpa.", q[i, 2], ".", y), gpa, envir = .GlobalEnv)
newDF <- rbind(schlTstGPA, c(length(model$residuals),
round(length(model$residuals)/mean(x[, 5])*100, 1),
median(x[, 6]), paste0(unique(x[, 4])), q[i, 2], get("gpa"),
get("r")))
assign("schlTstGPA", newDF, envir = .GlobalEnv)
}
schls <- unique(df[, 2])
for (i in 1:5) {
assign("df1", get(paste(q[i, 3], sep = "")))
if (i %in% (4:5)) {
line <- 520
} else if (i == 3) {
line <- 18
} else {
line <- 22
}
for (l in 1:length(schls)) {
df2 <- df1[df1$loc == schls[l], ]
if(length(complete.cases(df2)) >= 10) {
modelGPA(df2, df2[1, 4])
}
}
}
schlTstGPA[, c(1:3, 6:7)] <- lapply(schlTstGPA[, c(1:3, 6:7)], as.numeric)
schlTstGPA <- schlTstGPA[schlTstGPA$N >= 20 & !is.na(schlTstGPA$N), ]
schlTstGPA <- schlTstGPA[order(schlTstGPA$test, schlTstGPA$gpa), ]
write.table(schlTstGPA,
file = paste0("..//student.success.factor//data//metadata//",
"equating//gpa_to_ACT_SAT_by_School.csv"),
sep = ",",
row.names = FALSE,
col.names = TRUE)
###################################################################*
schlTstECT <- as.data.frame(matrix(rep(NA, 6), nrow = 1))
colnames(schlTstECT) <- c("N", "perc.11th", "school", "test", "eoct", "r")
modelECT <- function(x, y) { # y is location code
model <- lm(x[, 3] ~ x[, 6], na.action = "na.omit", x)
gpa <- round((line-summary(model)$coefficients[1, 1])/
summary(model)$coefficients[2, 1], 0)
r <- round(cor(x[, 3], x[, 6]), 2)
#assign(paste0("gpa.", q[i, 2], ".", y), gpa, envir = .GlobalEnv)
newDF <- rbind(schlTstECT, c(length(model$residuals),
round(length(model$residuals)/mean(x[, 5])*100, 1),
paste0(unique(x[, 4])), q[i, 2], get("gpa"),
get("r")))
assign("schlTstECT", newDF, envir = .GlobalEnv)
}
schls <- unique(df[, 2])
for (i in 1:5) {
assign("df1", get(paste(q[i, 3], sep = "")))
if (i %in% (4:5)) {
line <- 520
} else if (i == 2) {
line <- 18
} else {
line <- 22
}
for (l in 1:length(schls)) {
df2 <- df1[df1$loc == schls[l], ]
if(length(complete.cases(df2)) >= 10) {
modelECT(df2, df2[1, 4])
}
}
}
schlTstECT[, c(1:2, 5:6)] <- lapply(schlTstECT[, c(1:2, 5:6)], as.numeric)
schlTstECT <- schlTstECT[schlTstECT$N >= 20 & !is.na(schlTstECT$N), ]
schlTstECT <- schlTstECT[order(schlTstECT$test, schlTstECT$eoct), ]
write.table(schlTstECT,
file = paste0("..//student.success.factor//data//metadata//",
"equating//eoct_to_ACT_SAT_by_School.csv"),
sep = ",",
row.names = FALSE,
col.names = TRUE)
|
/hs_data_pred_NSC/equating_ACT_SAT_eoct_NSC_2.R
|
no_license
|
rrichard-gcps/wsa2pt0
|
R
| false | false | 17,757 |
r
|
# equating/linking HSGPA and Econ EOCT to "college-ready" SAT and ACT scores
# will be used to examine thresholds from CollegeBoard and ACT research
## answers the question: For graduates, what HSGPA, Econ EOCT, SAT, and ACT values
## give students an 80% chance of enrolling in college, and 2-year persistence in college
# created on 2014.03.21 by James Appleton
# last updated 2015.01.06 by James Appleton
require(ggplot2)
require(grid)
require(RODBC)
require(plyr)
require(foreign)
require(reshape2)
rm(list=ls())
path <- readLines("c:\\current_path.txt")
# set directories
setwd (paste(path,
"\\Research Projects\\RaisngAchClsngGap",sep=""))
maindir <- paste(path,
"\\Research Projects\\RaisngAchClsngGap",sep="")
dir ()
# function
vplayout <- function(x, y) {
viewport(layout.pos.row = x, layout.pos.col = y)
}
# convert factor variable to numeric
factorconvert <- function(f){as.numeric (levels (f))[f]}
# trim extra preceding and following characters
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
# change variable case; df name in quotations to be accepted
case.cols <- function(x) {
x.df <- get(x)
colnames(x.df) <- tolower(names(x.df))
assign(x,x.df, env = .GlobalEnv)
}
# set years for graduation data
cohortYear_shrt <- c(2010, 2011, 2012) # b/c 2013 doesn't have 4 semesters of time yet
yrs <- length(cohortYear_shrt) # number of years set below
startYear1 <- "2006-07" # for 2011 grads
startYear_shrt1 <- "2007"
startYear2 <- "2007-08" # for 2011 grads
startYear_shrt2 <- "2008"
startYear3 <- "2008-09" # for 2012 grads
startYear_shrt3 <- "2009"
startYear <- c(startYear1, startYear2, startYear3)
startYear_shrt <- c(startYear_shrt1, startYear_shrt2, startYear_shrt3)
###################################################
### Load the NSC data
###################################################
nsc <- read.csv(paste0(path, "\\Research Projects\\NSC Student Tracker\\",
"NSC StudentTracker_2014.10_2014Graduates\\received\\",
"1302550hs_10001139-28963-DETAIL-EFFDT-20141126-RUNDT-20141204.csv"),
sep = ",", header = TRUE)
nsc <- case.cols("nsc")
# change NA enrollment begin and end dates so can't count within enrollment periods
nsc[is.na(nsc$enrollment_begin), "enrollment_begin" ] <- 0
nsc[is.na(nsc$enrollment_end), "enrollment_end" ] <- 0
# keep students graduating in cohort years and assign cohort
nsc$cohort <- NA
for (i in 1:yrs) {
nsc[nsc$high_school_grad_date > (cohortYear_shrt[i] - 1)*10000 + 0801 &
nsc$high_school_grad_date < cohortYear_shrt[i]*10000 + 0731, dim(nsc)[2]] <- cohortYear_shrt[i]
}
# (F)ull-time, (H)alf-time, (L)ess than half-time, (Q) 3/4 time,
# (A) Leave of absence, (W)ithdrawn, (D)eceased
# from: http://www.studentclearinghouse.org/colleges/files/ST_DetailReportGuide.pdf
# create gcps id
nsc[,1] <- as.character(nsc[,1])
nsc$id <- as.numeric(substr(nsc[,1], 1, nchar(nsc[,1]) - 1))
nsc <- nsc[!is.na(nsc$cohort), ]
# create immed.transition and persist.enroll variables
nsc$i.t <- FALSE
nsc$p.e1 <- FALSE
nsc$p.e2 <- FALSE
nsc$p.e3 <- FALSE
for (i in 1:yrs) {
nsc[nsc$i.t == FALSE, "i.t"] <- nsc[nsc$i.t == FALSE, "enrollment_begin"] < cohortYear_shrt[i]*10000 + 1101 &
nsc[nsc$i.t == FALSE, "enrollment_end"] > cohortYear_shrt[i]*10000 + 915 &
nsc[nsc$i.t == FALSE, "cohort"] == cohortYear_shrt[i] #&
#nsc[nsc$i.t == FALSE, "enrollment_status"] == "F"
# nsc[nsc$i.t == FALSE, "i.t"] <- as.numeric(nsc[nsc$i.t == FALSE, "enrollment_begin"]) < cohortYear_shrt[i]*10000 + 1231 &
# as.numeric(nsc[nsc$i.t == FALSE, "enrollment_begin"]) > cohortYear_shrt[i]*10000 + 0801
# nsc[nsc$i.t == FALSE, "cohort"] == cohortYear_shrt[i]
#
it <- ddply(nsc[, c("id", "i.t")], "id", summarise,
immed.t = sum(i.t))
it$i.t <- it$immed.t > 0
nsc <- nsc[, -(which(names(nsc) %in% c("i.t")))]
nsc <- merge(nsc, it[, c(1, 3)], by.x = "id", by.y = "id", all.x = TRUE)
nsc[nsc$p.e1 == FALSE, "p.e1"] <- nsc[nsc$p.e1 == FALSE, "i.t"] == TRUE &
nsc[nsc$p.e1 == FALSE, "enrollment_begin"] < (cohortYear_shrt[i] + 1)*10000 + 501 &
nsc[nsc$p.e1 == FALSE, "enrollment_end"] > (cohortYear_shrt[i] + 1)*10000 + 301 &
nsc[nsc$p.e1 == FALSE, "cohort"] == cohortYear_shrt[i] &
nsc[nsc$p.e1 == FALSE, "enrollment_status"] %in% c("F", "Q")
nsc[nsc$p.e2 == FALSE, "p.e2"] <- nsc[nsc$p.e2 == FALSE, "i.t"] == TRUE &
nsc[nsc$p.e2 == FALSE, "enrollment_begin"] < (cohortYear_shrt[i] + 1)*10000 + 1101 &
nsc[nsc$p.e2 == FALSE, "enrollment_end"] > (cohortYear_shrt[i] + 1)*10000 + 915 &
nsc[nsc$p.e2 == FALSE, "cohort"] == cohortYear_shrt[i] &
nsc[nsc$p.e2 == FALSE, "enrollment_status"] %in% c("F", "Q")
nsc[nsc$p.e3 == FALSE, "p.e3"] <- nsc[nsc$p.e3 == FALSE, "i.t"] == TRUE &
nsc[nsc$p.e3 == FALSE, "enrollment_begin"] < (cohortYear_shrt[i] + 2)*10000 + 501 &
nsc[nsc$p.e3 == FALSE, "enrollment_end"] > (cohortYear_shrt[i] + 2)*10000 + 301 &
nsc[nsc$p.e3 == FALSE, "cohort"] == cohortYear_shrt[i] &
nsc[nsc$p.e3 == FALSE, "enrollment_status"] %in% c("F", "Q")
}
mrg <- ddply(nsc[, c("id", "p.e1", "p.e2", "p.e3", "i.t")], "id", summarise,
pe1 = sum(p.e1),
pe2 = sum(p.e2),
pe3 = sum(p.e3),
i.t = sum(i.t))
mrg$p.e <- mrg$pe1 == 1 & mrg$pe2 == 1 & mrg$pe3 == 1
nsc <- merge(nsc, mrg[, c("id", "i.t", "p.e")], by.x = "id", by.y = "id", all.x = TRUE)
nsc <- unique(nsc[, c(1, 3:5, 10, 25, 29, 31)])
colnames(nsc)[which(names(nsc) == "i.t.x")] <- "i.t"
nsc.model <- nsc[, c("id", "cohort", "i.t", "p.e")]
ma_ch <- odbcConnect("ODS_Prod_MA", uid = "Research", pwd = "Research")
##################################
## get ACT data
##################################
act <- sqlQuery(ma_ch, paste0(
" SELECT [STUNUMB]
,[SCHOOL_YEAR]
,[TEST_KEY]
,[EXAM_ADMIN_DATE]
,[SUBJECT]
,[SCALE_SCORE]
FROM [Assessment].[dbo].[TEST_STU_ACT]
WHERE SCHOOL_YEAR >= 2008 and
SCHOOL_YEAR <= 2012 and
SCALE_SCORE is not null and
SCALE_SCORE != 0
"))
act <- case.cols("act")
names(act)[which(names(act) == "stunumb")] <- "id"
# filter down to average scale score by kid
actStu <- ddply(act[, c(1, 5:6)], c("id", "subject"), summarise,
actSS = mean(scale_score))
stopifnot(anyDuplicated(actStu[, 1:2])==0)
actStu$actSS <- round(actStu$actSS)
stopifnot(actStu$actSS >= 1 & actStu$actSS <= 36)
##################################
## get EOCT econ data
##################################
econECT <- sqlQuery(ma_ch, paste0(
" SELECT [SCHOOL_YR]
,[LOC]
,[EXAM_ADMIN_DATE]
,[GRADE]
,[STUNUMB]
,[SUBJECT]
,[TOTAL_SCALE_SCORE]
FROM [Assessment].[dbo].[TEST_STU_ECT]
WHERE SUBJECT = 'ECO' and
SCHOOL_YR in ('2010', '2011', '2012') and
TOTAL_SCALE_SCORE is not null and
TOTAL_SCALE_SCORE != 0
"))
#close(ma_ch)
# filter down to average scale score by kid
econECT <- ddply(econECT[, c(5, 7)], "STUNUMB", summarise,
econSS = mean(TOTAL_SCALE_SCORE))
stopifnot(anyDuplicated(econECT$STUNUMB)==0)
econECT$econSS <- round(econECT$econSS)
stopifnot(econECT$econSS >= 200 & econECT$econSS <= 650)
##################################
## get GPA data
##################################
for (i in 1:length(cohortYear_shrt)) {
gpa <- sqlQuery(ma_ch, paste0(
"SELECT *
FROM [Predictive_Analytics].[PAVIEW2].[v_Student_Course_History_DETAIL]
WHERE SchoolYear = ", cohortYear_shrt[i], " and
Grade in ('03', '04', '05', '06', '07', '08', '09', '10', '11', '12')
"))
gpa <- case.cols("gpa")
#gpa[grepl("Science", gpa$coresubjectcode), "coreind"] <- 1
assign(paste0("gpa.", cohortYear_shrt[i]), gpa)
}
df <- get(paste0("gpa.", cohortYear_shrt[1]))
for (j in 2:length(cohortYear_shrt)) {
df2 <- get(paste0("gpa.", cohortYear_shrt[i]))
df <- rbind(df, df2)
}
gpa <- df
rm(df, df2, list = ls(pattern = "gpa."))
#format GPA
# generate weighted core GPA
gpa.core <- gpa[gpa$coreind == 1, ]
# for 12th grade keep only 1st semester
gpa.core.12th <- gpa.core[gpa.core$calendarmonth > 7 & gpa.core$grade == 12, ]
# aggregate
gc12.agg <- ddply(gpa.core.12th[, c("permnum", "schoolyear", "creditsattempted",
"creditweightedmark", "coresubjectcode")],
c("permnum", "schoolyear", "coresubjectcode"), summarise,
N = length(permnum),
ca = sum(creditsattempted),
cw = sum(creditweightedmark))
gc12.aggm <- melt(gc12.agg[, c(1:3, 5:6)], id.vars = c(1:3))
gc12.aggr <- dcast(gc12.aggm, permnum + schoolyear ~ coresubjectcode + variable)
gc12.aggr$sem1.gpa.la <- round(gc12.aggr[, "LA_cw"] / gc12.aggr[, "LA_ca"], 1)
gc12.aggr$sem1.gpa.ma <- round(gc12.aggr[, "MA_cw"] / gc12.aggr[, "MA_ca"], 1)
gc12.aggr$sem1.gpa.sc <- round(gc12.aggr[, "SC_cw"] / gc12.aggr[, "SC_ca"], 1)
gc12.aggr$sem1.gpa.ss <- round(gc12.aggr[, "SS_cw"] / gc12.aggr[, "SS_ca"], 1)
gc12.aggr$sem1.gpa.core <- round(apply(gc12.aggr[, c("LA_cw", "MA_cw", "SC_cw", "SS_cw")],
1, function(x) sum(x, na.rm = TRUE)) /
apply(gc12.aggr[, c("LA_ca", "MA_ca", "SC_ca", "SS_ca")],
1, function(x) sum(x, na.rm = TRUE)), 1)
gc12.aggf <- gc12.aggr[, c("permnum", "schoolyear", "sem1.gpa.la", "sem1.gpa.ma",
"sem1.gpa.sc", "sem1.gpa.ss", "sem1.gpa.core")]
rm(gc12.agg, gc12.aggm, gc12.aggr, list=ls(pattern = "gpa"))
gc()
##################################
## get SAT data
##################################
sat <- sqlQuery(ma_ch, paste0(
" SELECT [STUNUMB]
,[TEST_KEY]
,[EXAM_ADMIN_DATE]
,[NONSTAND_IND]
,[SUBJECT]
,[SCORE]
FROM [Assessment].[dbo].[TEST_STU_SAT]
WHERE EXAM_ADMIN_DATE >= ", cohortYear_shrt[1], "0531 and
EXAM_ADMIN_DATE <= ", cohortYear_shrt[length(cohortYear_shrt)], "0531 and
NONSTAND_IND = '' and
SUBJECT in ('MA', 'VE')
"))
close(ma_ch)
sat <- case.cols("sat")
# filter down to average scale score by kid
sat <- ddply(sat[, c(1, 5:6)], c("stunumb", "subject"), summarise,
satSS = mean(score))
stopifnot(anyDuplicated(sat[, 1:2])==0)
sat$satSS <- round(sat$satSS)
stopifnot(sat$satSS >= 200 & sat$satSS <= 800)
# restructure
sat <- dcast(sat, stunumb ~ subject)
###########################
# load the graduation data
###########################
for (i in 2:3) {
fileLoc <- paste0(path,
"\\RBES\\Graduation Rate\\Cohort Graduation Rate Data\\ClassOfSY", cohortYear_shrt[i])
df <- read.csv(paste0("..\\RaisngAchClsngGap\\data\\prep\\DOECohortData_", startYear[i],
"_jja.csv"), sep = ",", header = TRUE)
df <- case.cols("df")
names(df)[40] <- "update.diploma.type"
df <- df[df$grad.rate.type == 4 & df$school.id == "ALL" &
df$update.diploma.type %in% c("G", "C", "B", "V"), ]
df <- merge(df, econECT, by.x = "id", by.y = "STUNUMB", all.x = TRUE)
# remove NAs
a.e <- as.data.frame(df[complete.cases(df[, c(2, 4, 8, 11:13)]), c(2, 4, 8, 11:13)])
colnames(a.e) <- c("loc", "ELA.GPA", "eng.ACT", "school", "gr11", "econSS")
a.m <- as.data.frame(df[complete.cases(df[, c(2, 5, 9, 11:13)]), c(2, 5, 9, 11:13)])
colnames(a.m) <- c("loc", "math.GPA", "math.ACT", "school", "gr11", "econSS")
a.r <- as.data.frame(df[complete.cases(df[, c(2, 4, 10, 11:13)]), c(2, 4, 10, 11:13)])
colnames(a.r) <- c("loc", "ELA.GPA", "rdg.ACT", "school", "gr11", "econSS")
s.m <- as.data.frame(df[complete.cases(df[, c(2, 5, 6, 11:13)]), c(2, 5, 6, 11:13)])
colnames(s.m) <- c("loc", "math.GPA", "math.SAT", "school", "gr11", "econSS")
s.v <- as.data.frame(df[complete.cases(df[, c(2, 4, 7, 11:13)]), c(2, 4, 7, 11:13)])
colnames(s.v) <- c("loc", "ELA.GPA", "verbal.SAT", "school", "gr11", "econSS")
q.titles <- c("Mathematics: GPA and ACT\n(r = ",
"E/LA: GPA and ACT\n(r = ",
"E/LA GPA and Reading ACT\n(r = ",
"Mathematics: GPA and SAT\n(r = ",
"E/LA GPA and SAT Verbal\n(r = ")
q.objects <- cbind(c("aMath", "aEng", "aRD", "sMath", "sVerb"),
c("a.m", "a.e", "a.r", "s.m", "s.v"))
q.labels <- cbind(c("Mathematics GPA",
"English/Language Arts GPA",
"English/Language Arts GPA",
"Mathematics GPA",
"English/Language Arts GPA"),
c("Mathematics ACT Score",
"English ACT Score",
"Reading ACT Score",
"Mathematics SAT Score",
"Verbal SAT Score"))
q <- cbind(q.titles, q.objects, q.labels)
rm(q.titles, q.objects, q.labels)
######################################*
schlTstGPA <- as.data.frame(matrix(rep(NA, 7), nrow = 1))
colnames(schlTstGPA) <- c("N", "perc.11th", "prior.perf", "school", "test", "gpa", "r")
df[, 11] <- lapply(df[, 11], as.character)
modelGPA <- function(x, y) { # y is location code
model <- lm(x[, 3] ~ x[, 6], na.action = "na.omit", x)
gpa <- round((line-summary(model)$coefficients[1, 1])/
summary(model)$coefficients[2, 1], 0)
r <- round(cor(x[, 3], x[, 6]), 2)
#assign(paste0("gpa.", q[i, 2], ".", y), gpa, envir = .GlobalEnv)
newDF <- rbind(schlTstGPA, c(length(model$residuals),
round(length(model$residuals)/mean(x[, 5])*100, 1),
median(x[, 6]), paste0(unique(x[, 4])), q[i, 2], get("gpa"),
get("r")))
assign("schlTstGPA", newDF, envir = .GlobalEnv)
}
schls <- unique(df[, 2])
for (i in 1:5) {
assign("df1", get(paste(q[i, 3], sep = "")))
if (i %in% (4:5)) {
line <- 520
} else if (i == 3) {
line <- 18
} else {
line <- 22
}
for (l in 1:length(schls)) {
df2 <- df1[df1$loc == schls[l], ]
if(length(complete.cases(df2)) >= 10) {
modelGPA(df2, df2[1, 4])
}
}
}
schlTstGPA[, c(1:3, 6:7)] <- lapply(schlTstGPA[, c(1:3, 6:7)], as.numeric)
schlTstGPA <- schlTstGPA[schlTstGPA$N >= 20 & !is.na(schlTstGPA$N), ]
schlTstGPA <- schlTstGPA[order(schlTstGPA$test, schlTstGPA$gpa), ]
write.table(schlTstGPA,
file = paste0("..//student.success.factor//data//metadata//",
"equating//gpa_to_ACT_SAT_by_School.csv"),
sep = ",",
row.names = FALSE,
col.names = TRUE)
###################################################################*
schlTstECT <- as.data.frame(matrix(rep(NA, 6), nrow = 1))
colnames(schlTstECT) <- c("N", "perc.11th", "school", "test", "eoct", "r")
modelECT <- function(x, y) { # y is location code
model <- lm(x[, 3] ~ x[, 6], na.action = "na.omit", x)
gpa <- round((line-summary(model)$coefficients[1, 1])/
summary(model)$coefficients[2, 1], 0)
r <- round(cor(x[, 3], x[, 6]), 2)
#assign(paste0("gpa.", q[i, 2], ".", y), gpa, envir = .GlobalEnv)
newDF <- rbind(schlTstECT, c(length(model$residuals),
round(length(model$residuals)/mean(x[, 5])*100, 1),
paste0(unique(x[, 4])), q[i, 2], get("gpa"),
get("r")))
assign("schlTstECT", newDF, envir = .GlobalEnv)
}
schls <- unique(df[, 2])
for (i in 1:5) {
assign("df1", get(paste(q[i, 3], sep = "")))
if (i %in% (4:5)) {
line <- 520
} else if (i == 2) {
line <- 18
} else {
line <- 22
}
for (l in 1:length(schls)) {
df2 <- df1[df1$loc == schls[l], ]
if(length(complete.cases(df2)) >= 10) {
modelECT(df2, df2[1, 4])
}
}
}
schlTstECT[, c(1:2, 5:6)] <- lapply(schlTstECT[, c(1:2, 5:6)], as.numeric)
schlTstECT <- schlTstECT[schlTstECT$N >= 20 & !is.na(schlTstECT$N), ]
schlTstECT <- schlTstECT[order(schlTstECT$test, schlTstECT$eoct), ]
write.table(schlTstECT,
file = paste0("..//student.success.factor//data//metadata//",
"equating//eoct_to_ACT_SAT_by_School.csv"),
sep = ",",
row.names = FALSE,
col.names = TRUE)
|
stack_stm<-function(stm.list){
M<-lapply(stm.list, function(x) x$maps)
M<-lapply(M, function(x) lapply(x, function(y) names(y)))
M<-Reduce(stack2, M)
M.out<-mapply(function(x,y)
{setNames(x, y) },
x=stm.list[[1]]$maps, y=M )
out<-stm.list[[1]]
out$maps<-M.out
return(out)
}
#### stack two discrete stm's lists; x,y are the list of state names (i.e. maps)
stack2<-function(x,y){
mapply(function(x,y)
{paste(x,y, sep="") },
x=x, y=y )
}
# Final stack of maps
# cc chars id to stack
# ntrees number of trees to stack
# dirW directory for zip file
paramo<-function(cc, ntrees=10, dirW=c("") )
{
tr<-vector("list", ntrees)
for (i in 1:ntrees){
fl<-paste0(cc, "_", i, ".rds")
stack.L<-vector("list", length(fl))
for (j in 1:length(fl)){
print(paste0("Reading ", paste0(cc[j], ".zip"), " and ", fl[j]))
con<-unz(paste0(dirW, cc[j], ".zip"), filename=paste0(dirW, fl[j]) )
con2 <- gzcon(con)
stack.L[[j]] <- readRDS(con2)
close(con)
}
tr[[i]]<- stack_stm(stack.L)
}
return(tr)
}
|
/R/Functions_Stack_maps.R
|
permissive
|
ellenroufs/scate-shortcourse
|
R
| false | false | 1,097 |
r
|
stack_stm<-function(stm.list){
M<-lapply(stm.list, function(x) x$maps)
M<-lapply(M, function(x) lapply(x, function(y) names(y)))
M<-Reduce(stack2, M)
M.out<-mapply(function(x,y)
{setNames(x, y) },
x=stm.list[[1]]$maps, y=M )
out<-stm.list[[1]]
out$maps<-M.out
return(out)
}
#### stack two discrete stm's lists; x,y are the list of state names (i.e. maps)
stack2<-function(x,y){
mapply(function(x,y)
{paste(x,y, sep="") },
x=x, y=y )
}
# Final stack of maps
# cc chars id to stack
# ntrees number of trees to stack
# dirW directory for zip file
paramo<-function(cc, ntrees=10, dirW=c("") )
{
tr<-vector("list", ntrees)
for (i in 1:ntrees){
fl<-paste0(cc, "_", i, ".rds")
stack.L<-vector("list", length(fl))
for (j in 1:length(fl)){
print(paste0("Reading ", paste0(cc[j], ".zip"), " and ", fl[j]))
con<-unz(paste0(dirW, cc[j], ".zip"), filename=paste0(dirW, fl[j]) )
con2 <- gzcon(con)
stack.L[[j]] <- readRDS(con2)
close(con)
}
tr[[i]]<- stack_stm(stack.L)
}
return(tr)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Data.R
\docType{data}
\name{icd10cm_mdc_regex}
\alias{icd10cm_mdc_regex}
\title{Major Diagnostic Categories (MDC) and ICD-10-CM .}
\format{
Data frame
}
\source{
\url{https://www.cms.gov/icd10m/version39-fullcode-cms/fullcode_cms/P0001.html}
}
\usage{
icd10cm_mdc_regex
}
\description{
Dataset of 65696 rows and 4 variables.
}
\examples{
tail(icd10cm_mdc_regex)
}
\keyword{datasets}
|
/man/icd10cm_mdc_regex.Rd
|
permissive
|
epinotes/useicd10cm
|
R
| false | true | 461 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Data.R
\docType{data}
\name{icd10cm_mdc_regex}
\alias{icd10cm_mdc_regex}
\title{Major Diagnostic Categories (MDC) and ICD-10-CM .}
\format{
Data frame
}
\source{
\url{https://www.cms.gov/icd10m/version39-fullcode-cms/fullcode_cms/P0001.html}
}
\usage{
icd10cm_mdc_regex
}
\description{
Dataset of 65696 rows and 4 variables.
}
\examples{
tail(icd10cm_mdc_regex)
}
\keyword{datasets}
|
library(tidyr)
## Reading 'pageviews_mobile-web_201507-201709.csv' and
pv_mob_web <- read.csv('pageviews_mobile-web_201507-201709.csv')
pv_mob_web$DATE <- as.Date(as.character(pv_mob_web$timestamp), format='%Y%m%d')
pv_mob_app <- read.csv('pageviews_mobile-app_201507-201709.csv')
pv_mob_app$DATE <- as.Date(as.character(pv_mob_app$timestamp), format='%Y%m%d')
pv_desktop <- read.csv('pageviews_desktop_201507-201709.csv')
pv_desktop$DATE <- as.Date(as.character(pv_desktop$timestamp), format='%Y%m%d')
pc_mobile <- read.csv('pagecounts_mobile_200801-201607.csv')
pc_mobile$DATE <- as.Date(as.character(pc_mobile$timestamp), format='%Y%m%d')
pc_desktop <- read.csv('pagecounts_desktop_200801-201607.csv')
pc_desktop$DATE <- as.Date(as.character(pc_desktop$timestamp), format='%Y%m%d')
pageviews <- merge(pv_mob_app[,c(2,7,8)], pv_mob_web[,c(2,7,8)], by = 'DATE')
pv_mobile <- data.frame('DATE' = pageviews$DATE, 'mobileviews' = pageviews$views.x+pageviews$views.y)
pageviews <- merge(pv_desktop[,c(7,8)], pv_mobile, by = 'DATE')
colnames(pageviews) <- c('Date', 'pageview_desktop_views', 'pageview_mobile_views')
pagecounts <- merge(pc_desktop[,c(2,3,7)], pc_mobile[,c(2,3,7)], by = 'DATE', all.x = TRUE)
pagecounts[is.na(pagecounts)] <- 0
pagecounts <- pagecounts[,c(1,3,5)]
colnames(pagecounts) <- c('Date', 'pagecount_desktop_views', 'pagecount_mobile_views')
finaldf <- merge(pagecounts, pageviews, all = T)
finaldf$pagecount_all_views <- finaldf$pagecount_desktop_views + finaldf$pagecount_mobile_views
finaldf$pageview_all_views <- finaldf$pageview_desktop_views + finaldf$pageview_mobile_views
finaldf$DATE <- finaldf$Date
finaldf <- separate(finaldf, 'Date', c('year', 'month', 'day'), sep = '-')
## to write the dataframe into a csv and save it to local.
write.csv(finaldf, file = "en-wikipedia_traffic_200801-201709.csv",row.names=FALSE)
## to save the plot to local
png(filename="plot.png", width = 580, height = 480, units = 'px')
plot(finaldf$DATE, finaldf$pagecount_desktop_views/1000000, type = 'l', lty = 2, lwd = 2,
col = 'red', ylim = c(800,12000),
main = 'Page Views on English Wikipedia (x 1,000,000)', xlab = '', ylab = '')
legend("topleft",legend=c('pagecount_desktop_views', 'pagecount_mobile_views',
'pagecount_all_views', 'pageview_desktop_views',
'pageview_mobile_views', 'pageview_all_views'),
lty=c(2,2,1,2,2,1),col=c("red","red","brown", "blue", "blue", "black" ),lwd=2, bg = 'white',
y = 10000, ncol = 2, cex = 0.75)
lines(finaldf$DATE, finaldf$pagecount_mobile_views/1000000, type = 'l', lty = 2, lwd = 2,
col = 'red')
lines(finaldf$DATE, finaldf$pagecount_all_views/1000000, type = 'l', lty = 1, lwd = 2,
col = 'brown')
lines(finaldf$DATE, finaldf$pageview_desktop_views/1000000, type = 'l', lty = 2, lwd = 2,
col = 'blue')
lines(finaldf$DATE, finaldf$pageview_mobile_views/1000000, type = 'l', lty = 2, lwd = 2,
col = 'blue')
lines(finaldf$DATE, finaldf$pageview_all_views/1000000, type = 'l', lty = 1, lwd = 2,
col = 'black')
grid(lty = 1, lwd = 0.5)
dev.off()
|
/plot_pageviews_pagecounts.R
|
permissive
|
jahnavijasti/data-512-a1
|
R
| false | false | 3,120 |
r
|
library(tidyr)
## Reading 'pageviews_mobile-web_201507-201709.csv' and
pv_mob_web <- read.csv('pageviews_mobile-web_201507-201709.csv')
pv_mob_web$DATE <- as.Date(as.character(pv_mob_web$timestamp), format='%Y%m%d')
pv_mob_app <- read.csv('pageviews_mobile-app_201507-201709.csv')
pv_mob_app$DATE <- as.Date(as.character(pv_mob_app$timestamp), format='%Y%m%d')
pv_desktop <- read.csv('pageviews_desktop_201507-201709.csv')
pv_desktop$DATE <- as.Date(as.character(pv_desktop$timestamp), format='%Y%m%d')
pc_mobile <- read.csv('pagecounts_mobile_200801-201607.csv')
pc_mobile$DATE <- as.Date(as.character(pc_mobile$timestamp), format='%Y%m%d')
pc_desktop <- read.csv('pagecounts_desktop_200801-201607.csv')
pc_desktop$DATE <- as.Date(as.character(pc_desktop$timestamp), format='%Y%m%d')
pageviews <- merge(pv_mob_app[,c(2,7,8)], pv_mob_web[,c(2,7,8)], by = 'DATE')
pv_mobile <- data.frame('DATE' = pageviews$DATE, 'mobileviews' = pageviews$views.x+pageviews$views.y)
pageviews <- merge(pv_desktop[,c(7,8)], pv_mobile, by = 'DATE')
colnames(pageviews) <- c('Date', 'pageview_desktop_views', 'pageview_mobile_views')
pagecounts <- merge(pc_desktop[,c(2,3,7)], pc_mobile[,c(2,3,7)], by = 'DATE', all.x = TRUE)
pagecounts[is.na(pagecounts)] <- 0
pagecounts <- pagecounts[,c(1,3,5)]
colnames(pagecounts) <- c('Date', 'pagecount_desktop_views', 'pagecount_mobile_views')
finaldf <- merge(pagecounts, pageviews, all = T)
finaldf$pagecount_all_views <- finaldf$pagecount_desktop_views + finaldf$pagecount_mobile_views
finaldf$pageview_all_views <- finaldf$pageview_desktop_views + finaldf$pageview_mobile_views
finaldf$DATE <- finaldf$Date
finaldf <- separate(finaldf, 'Date', c('year', 'month', 'day'), sep = '-')
## to write the dataframe into a csv and save it to local.
write.csv(finaldf, file = "en-wikipedia_traffic_200801-201709.csv",row.names=FALSE)
## to save the plot to local
png(filename="plot.png", width = 580, height = 480, units = 'px')
plot(finaldf$DATE, finaldf$pagecount_desktop_views/1000000, type = 'l', lty = 2, lwd = 2,
col = 'red', ylim = c(800,12000),
main = 'Page Views on English Wikipedia (x 1,000,000)', xlab = '', ylab = '')
legend("topleft",legend=c('pagecount_desktop_views', 'pagecount_mobile_views',
'pagecount_all_views', 'pageview_desktop_views',
'pageview_mobile_views', 'pageview_all_views'),
lty=c(2,2,1,2,2,1),col=c("red","red","brown", "blue", "blue", "black" ),lwd=2, bg = 'white',
y = 10000, ncol = 2, cex = 0.75)
lines(finaldf$DATE, finaldf$pagecount_mobile_views/1000000, type = 'l', lty = 2, lwd = 2,
col = 'red')
lines(finaldf$DATE, finaldf$pagecount_all_views/1000000, type = 'l', lty = 1, lwd = 2,
col = 'brown')
lines(finaldf$DATE, finaldf$pageview_desktop_views/1000000, type = 'l', lty = 2, lwd = 2,
col = 'blue')
lines(finaldf$DATE, finaldf$pageview_mobile_views/1000000, type = 'l', lty = 2, lwd = 2,
col = 'blue')
lines(finaldf$DATE, finaldf$pageview_all_views/1000000, type = 'l', lty = 1, lwd = 2,
col = 'black')
grid(lty = 1, lwd = 0.5)
dev.off()
|
"plot.TwoWaySurvfit" <-
function(x,...)
{
factor.names<-x$factor.names
grid.frame<-x$grid.frame
varying.frame<-x$varying.frame
deviation.frame<-x$deviation.frame
p<-x$p
attach(varying.frame);attach(deviation.frame)
las<-1;cex.main<-0.9;tcl<- -0.1;cex.lab<-0.7;cex.axis<-0.7;lwd<-1
#plot t
y.range.t.baseline<-range(c(varying.frame$alpha.t.Baseline-deviation.frame$deviation.t.Baseline,varying.frame$alpha.t.Baseline+deviation.frame$deviation.t.Baseline))
plot(grid.frame$grid.t,varying.frame$alpha.t.Baseline,xlab="Duration time (t)",ylab="",cex=0.1,main="Baseline",ylim=y.range.t.baseline,...)
vector.minus<-varying.frame$alpha.t.Baseline-deviation.frame$deviation.t.Baseline
vector.plus<-varying.frame$alpha.t.Baseline+deviation.frame$deviation.t.Baseline
polygon(cbind(c(grid.frame$grid.t,grid.frame$grid.t[length(grid.frame$grid.t):1]),c(vector.minus,vector.plus[length(vector.plus):1])),col="grey")
lines(grid.frame$grid.t,varying.frame$alpha.t.Baseline,lwd=lwd)
lines(grid.frame$grid.t,varying.frame$alpha.t.Baseline-deviation.frame$deviation.t.Baseline,cex=0.08,col=3)
lines(grid.frame$grid.t,varying.frame$alpha.t.Baseline+deviation.frame$deviation.t.Baseline,cex=0.08,col=3)
abline(h=0,lty=3,cex=0.05)
par(new=TRUE)
plot(grid.frame$grid.t,varying.frame$alpha.t.Baseline,type="n",xlab="",ylab="",bty="o",xaxt="n",yaxt="n",...)
#plot b
y.range.b.baseline<-range(c(varying.frame$alpha.b.Baseline-deviation.frame$deviation.b.Baseline,varying.frame$alpha.b.Baseline+deviation.frame$deviation.b.Baseline))
plot(grid.frame$grid.b,varying.frame$alpha.b.Baseline,xlab="Entry time (b)",ylab="",cex=0.1,main="Baseline",axes=TRUE,ylim=y.range.b.baseline,...)
vector.minus<-varying.frame$alpha.b.Baseline-deviation.frame$deviation.b.Baseline
vector.plus<-varying.frame$alpha.b.Baseline+deviation.frame$deviation.b.Baseline
polygon(cbind(c(grid.frame$grid.b,grid.frame$grid.b[length(grid.frame$grid.b):1]),c(vector.minus,vector.plus[length(vector.plus):1])),col="grey")
lines(grid.frame$grid.b,varying.frame$alpha.b.Baseline,lwd=lwd)
lines(grid.frame$grid.b,varying.frame$alpha.b.Baseline-deviation.frame$deviation.b.Baseline,cex=0.08,col=3)
lines(grid.frame$grid.b,varying.frame$alpha.b.Baseline+deviation.frame$deviation.b.Baseline,cex=0.08,col=3)
abline(h=0,lty=3,cex=0.05)
par(new=TRUE)
plot(grid.frame$grid.b,varying.frame$alpha.b.Baseline,type="n",xlab="",ylab="",bty="o",xaxt="n",yaxt="n",...)
if (dim(x$varying.frame)[2] > 2)
{
#ranges for y in plots
y.range.t<-range(c(unlist(mget(paste("alpha.t.",factor.names[1:length(factor.names)],sep=""),envir=as.environment("varying.frame")))-unlist(mget(paste("deviation.t.",factor.names[1:length(factor.names)],sep=""),envir=as.environment("deviation.frame"))),unlist(mget(paste("alpha.t.",factor.names[1:length(factor.names)],sep=""),envir=as.environment("varying.frame")))+unlist(mget(paste("deviation.t.",factor.names[1:length(factor.names)],sep=""),envir=as.environment("deviation.frame")))))
y.range.b<-range(c(unlist(mget(paste("alpha.b.",factor.names[1:length(factor.names)],sep=""),envir=as.environment("varying.frame")))-unlist(mget(paste("deviation.b.",factor.names[1:length(factor.names)],sep=""),envir=as.environment("deviation.frame"))),unlist(mget(paste("alpha.b.",factor.names[1:length(factor.names)],sep=""),envir=as.environment("varying.frame")))+unlist(mget(paste("deviation.b.",factor.names[1:length(factor.names)],sep=""),envir=as.environment("deviation.frame")))))
y.range<-range(y.range.t,y.range.b)
for (k in 1:p)
{
#plot.t
plot(grid.frame$grid.t,get(paste("alpha.t.",factor.names[k],sep=""),pos="varying.frame"),xlab="Duration time (t)",ylab="",cex=0.1,main=factor.names[k],ylim=y.range,...)
vector.minus<-get(paste("alpha.t.",factor.names[k],sep=""),pos="varying.frame")-get(paste("deviation.t.",factor.names[k],sep=""),pos="deviation.frame")
vector.plus<-get(paste("alpha.t.",factor.names[k],sep=""),pos="varying.frame")+get(paste("deviation.t.",factor.names[k],sep=""),pos="deviation.frame")
polygon(cbind(c(grid.frame$grid.t,grid.frame$grid.t[length(grid.frame$grid.t):1]),c(vector.minus,vector.plus[length(vector.plus):1])),col="grey")
lines(grid.frame$grid.t,get(paste("alpha.t.",factor.names[k],sep=""),pos="varying.frame"),lwd=lwd)
lines(grid.frame$grid.t,get(paste("alpha.t.",factor.names[k],sep=""),pos="varying.frame")-get(paste("deviation.t.",factor.names[k],sep=""),pos="deviation.frame"),cex=0.08,col=3)
lines(grid.frame$grid.t,get(paste("alpha.t.",factor.names[k],sep=""),pos="varying.frame")+get(paste("deviation.t.",factor.names[k],sep=""),pos="deviation.frame"),cex=0.08,col=3)
abline(h=0,lty=3,cex=0.05)
par(new=TRUE)
plot(grid.frame$grid.t,alpha.t.Baseline,type="n",xlab="",ylab="",bty="o",xaxt="n",yaxt="n",...)
#plot.b
plot(grid.frame$grid.b,get(paste("alpha.b.",factor.names[k],sep=""),pos="varying.frame"),xlab="Entry time (b)",ylab="",cex=0.1,main=factor.names[k],axes=TRUE,ylim=y.range,...)
vector.minus<-get(paste("alpha.b.",factor.names[k],sep=""),pos="varying.frame")-get(paste("deviation.b.",factor.names[k],sep=""),pos="deviation.frame")
vector.plus<-get(paste("alpha.b.",factor.names[k],sep=""),pos="varying.frame")+get(paste("deviation.b.",factor.names[k],sep=""),pos="deviation.frame")
polygon(cbind(c(grid.frame$grid.b,grid.frame$grid.b[length(grid.frame$grid.b):1]),c(vector.minus,vector.plus[length(vector.plus):1])),col="grey")
lines(grid.frame$grid.b,get(paste("alpha.b.",factor.names[k],sep=""),pos="varying.frame"),lwd=lwd)
lines(grid.frame$grid.b,get(paste("alpha.b.",factor.names[k],sep=""),pos="varying.frame")-get(paste("deviation.b.",factor.names[k],sep=""),pos="deviation.frame"),cex=0.08,col=3)
lines(grid.frame$grid.b,get(paste("alpha.b.",factor.names[k],sep=""),pos="varying.frame")+get(paste("deviation.b.",factor.names[k],sep=""),pos="deviation.frame"),cex=0.08,col=3)
abline(h=0,lty=3,cex=0.05)
par(new=TRUE)
plot(grid.frame$grid.b,get(paste("alpha.b.",factor.names[k],sep=""),pos="varying.frame"),type="n",xlab="",ylab="",bty="o",xaxt="n",yaxt="n",...)
}
}
detach(varying.frame);detach(deviation.frame)
}
|
/R/plot.TwoWaySurvfit.function.R
|
no_license
|
cran/TwoWaySurvival
|
R
| false | false | 6,060 |
r
|
"plot.TwoWaySurvfit" <-
function(x,...)
{
factor.names<-x$factor.names
grid.frame<-x$grid.frame
varying.frame<-x$varying.frame
deviation.frame<-x$deviation.frame
p<-x$p
attach(varying.frame);attach(deviation.frame)
las<-1;cex.main<-0.9;tcl<- -0.1;cex.lab<-0.7;cex.axis<-0.7;lwd<-1
#plot t
y.range.t.baseline<-range(c(varying.frame$alpha.t.Baseline-deviation.frame$deviation.t.Baseline,varying.frame$alpha.t.Baseline+deviation.frame$deviation.t.Baseline))
plot(grid.frame$grid.t,varying.frame$alpha.t.Baseline,xlab="Duration time (t)",ylab="",cex=0.1,main="Baseline",ylim=y.range.t.baseline,...)
vector.minus<-varying.frame$alpha.t.Baseline-deviation.frame$deviation.t.Baseline
vector.plus<-varying.frame$alpha.t.Baseline+deviation.frame$deviation.t.Baseline
polygon(cbind(c(grid.frame$grid.t,grid.frame$grid.t[length(grid.frame$grid.t):1]),c(vector.minus,vector.plus[length(vector.plus):1])),col="grey")
lines(grid.frame$grid.t,varying.frame$alpha.t.Baseline,lwd=lwd)
lines(grid.frame$grid.t,varying.frame$alpha.t.Baseline-deviation.frame$deviation.t.Baseline,cex=0.08,col=3)
lines(grid.frame$grid.t,varying.frame$alpha.t.Baseline+deviation.frame$deviation.t.Baseline,cex=0.08,col=3)
abline(h=0,lty=3,cex=0.05)
par(new=TRUE)
plot(grid.frame$grid.t,varying.frame$alpha.t.Baseline,type="n",xlab="",ylab="",bty="o",xaxt="n",yaxt="n",...)
#plot b
y.range.b.baseline<-range(c(varying.frame$alpha.b.Baseline-deviation.frame$deviation.b.Baseline,varying.frame$alpha.b.Baseline+deviation.frame$deviation.b.Baseline))
plot(grid.frame$grid.b,varying.frame$alpha.b.Baseline,xlab="Entry time (b)",ylab="",cex=0.1,main="Baseline",axes=TRUE,ylim=y.range.b.baseline,...)
vector.minus<-varying.frame$alpha.b.Baseline-deviation.frame$deviation.b.Baseline
vector.plus<-varying.frame$alpha.b.Baseline+deviation.frame$deviation.b.Baseline
polygon(cbind(c(grid.frame$grid.b,grid.frame$grid.b[length(grid.frame$grid.b):1]),c(vector.minus,vector.plus[length(vector.plus):1])),col="grey")
lines(grid.frame$grid.b,varying.frame$alpha.b.Baseline,lwd=lwd)
lines(grid.frame$grid.b,varying.frame$alpha.b.Baseline-deviation.frame$deviation.b.Baseline,cex=0.08,col=3)
lines(grid.frame$grid.b,varying.frame$alpha.b.Baseline+deviation.frame$deviation.b.Baseline,cex=0.08,col=3)
abline(h=0,lty=3,cex=0.05)
par(new=TRUE)
plot(grid.frame$grid.b,varying.frame$alpha.b.Baseline,type="n",xlab="",ylab="",bty="o",xaxt="n",yaxt="n",...)
if (dim(x$varying.frame)[2] > 2)
{
#ranges for y in plots
y.range.t<-range(c(unlist(mget(paste("alpha.t.",factor.names[1:length(factor.names)],sep=""),envir=as.environment("varying.frame")))-unlist(mget(paste("deviation.t.",factor.names[1:length(factor.names)],sep=""),envir=as.environment("deviation.frame"))),unlist(mget(paste("alpha.t.",factor.names[1:length(factor.names)],sep=""),envir=as.environment("varying.frame")))+unlist(mget(paste("deviation.t.",factor.names[1:length(factor.names)],sep=""),envir=as.environment("deviation.frame")))))
y.range.b<-range(c(unlist(mget(paste("alpha.b.",factor.names[1:length(factor.names)],sep=""),envir=as.environment("varying.frame")))-unlist(mget(paste("deviation.b.",factor.names[1:length(factor.names)],sep=""),envir=as.environment("deviation.frame"))),unlist(mget(paste("alpha.b.",factor.names[1:length(factor.names)],sep=""),envir=as.environment("varying.frame")))+unlist(mget(paste("deviation.b.",factor.names[1:length(factor.names)],sep=""),envir=as.environment("deviation.frame")))))
y.range<-range(y.range.t,y.range.b)
for (k in 1:p)
{
#plot.t
plot(grid.frame$grid.t,get(paste("alpha.t.",factor.names[k],sep=""),pos="varying.frame"),xlab="Duration time (t)",ylab="",cex=0.1,main=factor.names[k],ylim=y.range,...)
vector.minus<-get(paste("alpha.t.",factor.names[k],sep=""),pos="varying.frame")-get(paste("deviation.t.",factor.names[k],sep=""),pos="deviation.frame")
vector.plus<-get(paste("alpha.t.",factor.names[k],sep=""),pos="varying.frame")+get(paste("deviation.t.",factor.names[k],sep=""),pos="deviation.frame")
polygon(cbind(c(grid.frame$grid.t,grid.frame$grid.t[length(grid.frame$grid.t):1]),c(vector.minus,vector.plus[length(vector.plus):1])),col="grey")
lines(grid.frame$grid.t,get(paste("alpha.t.",factor.names[k],sep=""),pos="varying.frame"),lwd=lwd)
lines(grid.frame$grid.t,get(paste("alpha.t.",factor.names[k],sep=""),pos="varying.frame")-get(paste("deviation.t.",factor.names[k],sep=""),pos="deviation.frame"),cex=0.08,col=3)
lines(grid.frame$grid.t,get(paste("alpha.t.",factor.names[k],sep=""),pos="varying.frame")+get(paste("deviation.t.",factor.names[k],sep=""),pos="deviation.frame"),cex=0.08,col=3)
abline(h=0,lty=3,cex=0.05)
par(new=TRUE)
plot(grid.frame$grid.t,alpha.t.Baseline,type="n",xlab="",ylab="",bty="o",xaxt="n",yaxt="n",...)
#plot.b
plot(grid.frame$grid.b,get(paste("alpha.b.",factor.names[k],sep=""),pos="varying.frame"),xlab="Entry time (b)",ylab="",cex=0.1,main=factor.names[k],axes=TRUE,ylim=y.range,...)
vector.minus<-get(paste("alpha.b.",factor.names[k],sep=""),pos="varying.frame")-get(paste("deviation.b.",factor.names[k],sep=""),pos="deviation.frame")
vector.plus<-get(paste("alpha.b.",factor.names[k],sep=""),pos="varying.frame")+get(paste("deviation.b.",factor.names[k],sep=""),pos="deviation.frame")
polygon(cbind(c(grid.frame$grid.b,grid.frame$grid.b[length(grid.frame$grid.b):1]),c(vector.minus,vector.plus[length(vector.plus):1])),col="grey")
lines(grid.frame$grid.b,get(paste("alpha.b.",factor.names[k],sep=""),pos="varying.frame"),lwd=lwd)
lines(grid.frame$grid.b,get(paste("alpha.b.",factor.names[k],sep=""),pos="varying.frame")-get(paste("deviation.b.",factor.names[k],sep=""),pos="deviation.frame"),cex=0.08,col=3)
lines(grid.frame$grid.b,get(paste("alpha.b.",factor.names[k],sep=""),pos="varying.frame")+get(paste("deviation.b.",factor.names[k],sep=""),pos="deviation.frame"),cex=0.08,col=3)
abline(h=0,lty=3,cex=0.05)
par(new=TRUE)
plot(grid.frame$grid.b,get(paste("alpha.b.",factor.names[k],sep=""),pos="varying.frame"),type="n",xlab="",ylab="",bty="o",xaxt="n",yaxt="n",...)
}
}
detach(varying.frame);detach(deviation.frame)
}
|
###################
# plot.recons #
###################
plotRECON <- function(phy, likelihoods, piecolors=NULL, cex=0.5, pie.cex=0.25, file=NULL, height=11, width=8.5, show.tip.label=TRUE, title=NULL, ...){
#plotRECON <- function(phy, likelihoods, piecolors=NULL, cex=0.5, file=NULL, height=11, width=8.5, show.tip.label=TRUE, title=NULL, ...){
if(is.null(piecolors)){
piecolors=c("white","black","red","yellow","forestgreen","blue","coral","aquamarine","darkorchid","gold","grey","yellow","#3288BD","#E31A1C")
}
if(!is.null(file)){
pdf(file, height=height, width=width,useDingbats=FALSE)
}
plot(phy, cex=cex, show.tip.label=show.tip.label, ...)
if(!is.null(title)){
title(main=title)
}
# nodelabels(pie=likelihoods,piecol=piecolors, cex=.25)
nodelabels(pie=likelihoods,piecol=piecolors, cex=pie.cex)
states <- colnames(likelihoods)
legend(x="topleft", states, cex=0.8, pt.bg=piecolors,col="black",pch=21);
if(!is.null(file)){
dev.off()
}
}
|
/corHMM/R/plotRECON.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 968 |
r
|
###################
# plot.recons #
###################
plotRECON <- function(phy, likelihoods, piecolors=NULL, cex=0.5, pie.cex=0.25, file=NULL, height=11, width=8.5, show.tip.label=TRUE, title=NULL, ...){
#plotRECON <- function(phy, likelihoods, piecolors=NULL, cex=0.5, file=NULL, height=11, width=8.5, show.tip.label=TRUE, title=NULL, ...){
if(is.null(piecolors)){
piecolors=c("white","black","red","yellow","forestgreen","blue","coral","aquamarine","darkorchid","gold","grey","yellow","#3288BD","#E31A1C")
}
if(!is.null(file)){
pdf(file, height=height, width=width,useDingbats=FALSE)
}
plot(phy, cex=cex, show.tip.label=show.tip.label, ...)
if(!is.null(title)){
title(main=title)
}
# nodelabels(pie=likelihoods,piecol=piecolors, cex=.25)
nodelabels(pie=likelihoods,piecol=piecolors, cex=pie.cex)
states <- colnames(likelihoods)
legend(x="topleft", states, cex=0.8, pt.bg=piecolors,col="black",pch=21);
if(!is.null(file)){
dev.off()
}
}
|
setwd("data")
OK=read.csv("OK.csv")
OK$State="Oklahoma"
OK=subset(OK,select=c("County","State","Fatalities.Percent"))
OK=aggregate(.~County+State,data=OK,FUN="mean")
data=read.csv("Distance.csv")
data$County=toupper(data$County)
OK$County=toupper(OK$County)
dta=merge(OK,data,by=c("County","State"))
write.csv(dta,file="Corr_OK.csv")
|
/R_codes/Corr/Create_OK_correlation.R
|
no_license
|
dtmlinh/Car-Crash-Fatalities-Exploration-Tool
|
R
| false | false | 334 |
r
|
setwd("data")
OK=read.csv("OK.csv")
OK$State="Oklahoma"
OK=subset(OK,select=c("County","State","Fatalities.Percent"))
OK=aggregate(.~County+State,data=OK,FUN="mean")
data=read.csv("Distance.csv")
data$County=toupper(data$County)
OK$County=toupper(OK$County)
dta=merge(OK,data,by=c("County","State"))
write.csv(dta,file="Corr_OK.csv")
|
#' Wrapper of goodpractice gp
#'
#' @param path Path to a data analysis root.
#' @param checks Character vector, the checks to run. Defaults to
#' all checks.
#' @param extra_preps Custom preparation functions. See
#' \code{\link[goodpractice]{make_prep}} on creating preparation functions.
#' @param extra_checks Custom checks.
#' @param quiet Whether to suppress output from the preparation
#' functions. Note that not all preparation functions produce output,
#' even if this option is set to \code{FALSE}.
#' @return A checkers object that you can query
#' with a simple API. See \code{\link{results}} to start.
#' @export
#' @importFrom goodpractice gp
#' @importFrom goodpractice all_checks
#' @examples
#' check_results <- gp_check(path=system.file("scripts", package="checkers"),
#' checks = "comments",
#' extra_preps = list(scripts = prep_scripts),
#' extra_checks = list(comments = check_well_commented))
#' check_results
gp_check <- function(path = ".", checks = all_checks(), extra_preps = NULL,
extra_checks = NULL, quiet = TRUE){
if(is.null(options()$checker)){
load_config()
}
gp_out <- gp(path = path,
checks = checks,
extra_preps = extra_preps,
extra_checks = extra_checks,
quiet = quiet)
return(gp_out)
}
|
/R/gp_check.R
|
permissive
|
nistara/checkers
|
R
| false | false | 1,357 |
r
|
#' Wrapper of goodpractice gp
#'
#' @param path Path to a data analysis root.
#' @param checks Character vector, the checks to run. Defaults to
#' all checks.
#' @param extra_preps Custom preparation functions. See
#' \code{\link[goodpractice]{make_prep}} on creating preparation functions.
#' @param extra_checks Custom checks.
#' @param quiet Whether to suppress output from the preparation
#' functions. Note that not all preparation functions produce output,
#' even if this option is set to \code{FALSE}.
#' @return A checkers object that you can query
#' with a simple API. See \code{\link{results}} to start.
#' @export
#' @importFrom goodpractice gp
#' @importFrom goodpractice all_checks
#' @examples
#' check_results <- gp_check(path=system.file("scripts", package="checkers"),
#' checks = "comments",
#' extra_preps = list(scripts = prep_scripts),
#' extra_checks = list(comments = check_well_commented))
#' check_results
gp_check <- function(path = ".", checks = all_checks(), extra_preps = NULL,
extra_checks = NULL, quiet = TRUE){
if(is.null(options()$checker)){
load_config()
}
gp_out <- gp(path = path,
checks = checks,
extra_preps = extra_preps,
extra_checks = extra_checks,
quiet = quiet)
return(gp_out)
}
|
library(caTools)
library(data.table)
library(tidyverse)
library(dplyr)
library(magrittr)
#Getting the Data
train_dat= fread("balanced_data_new.csv", stringsAsFactors = T)
names(train_dat)
####### PRE-PROCESSING OF TEST DATA ########
#Putting the names from train data into array
reqd_col = colnames(train_dat)
#Getting the test data
test_dat = fread("test.csv" , stringsAsFactors = T)
cat_col = grep("_cat", names(test_dat), value = T)
bin_col = grep("_bin", names(test_dat), value = T)
#Converting the data into required datatype
test_dat %<>% mutate_at(bin_col, funs(factor(.)))
test_dat %<>% mutate_at(cat_col, funs(factor(.)))
#Storing the id for future use
t_id = as.integer(as.numeric(test_dat$id))
#Removing id
test_dat$id = NULL
#Getting the same columns as train data
reqd_col = reqd_col[2:length(reqd_col)]
test_dat = subset(test_dat, select = reqd_col)
########### HANDLING THE MISSING VALUES ############
#(Here -1 corresponds to missing values)
#Getting the columns containing the missing values
col_miss=colSums(test_dat == -1)
col_miss_nam = names(col_miss[col_miss>0])
#Mode calculation
Mode <- function(x) {
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]
}
#Replacing the missing values with the mode or mean(for continuous data) of the column
mode_val = numeric(length(col_miss_nam))
mode_val[1] = Mode(test_dat$ps_ind_05_cat)
mode_val[2] = mean(test_dat$ps_reg_03[which(test_dat$ps_reg_03 != -1)])
mode_val[3] = Mode(test_dat$ps_car_01_cat)
mode_val[4] = Mode(test_dat$ps_car_07_cat)
mode_val[5] = mean(test_dat$ps_car_11[which(test_dat$ps_car_11 != -1)])
mode_val[6] = mean(test_dat$ps_car_14[which(test_dat$ps_car_14 != -1)])
test_dat[which(test_dat$ps_ind_05_cat == -1)] = mode_val[1]
test_dat[which(test_dat$ps_reg_03 == -1)] = mode_val[2]
test_dat[which(test_dat$ps_car_01_cat == -1)] = mode_val[3]
test_dat[which(test_dat$ps_car_07_cat == -1)] = mode_val[4]
test_dat[which(test_dat$ps_car_11 == -1)] = mode_val[5]
test_dat[which(test_dat$ps_car_14 == -1)] = mode_val[6]
###### CREATING THE MODEL ######
#Training the Model
log_model= glm (target ~ ., data = train_dat, family = binomial)
#Prediction using the model
prediction_lr= predict(log_model, newdata = test_dat, type = "response")
print(prediction_lr)
#Creating the submission file
submission = cbind(t_id, prediction_lr)
submission = as.data.frame(submission)
names(submission)= c("id", "target")
fwrite(submission , "LR_submission_file.csv")
#Uncomment the below code if you want to test the accuracy of the model
#### OPTIONAL SECTION ######
#test_dat = fread("balanced_test_new.csv", stringAsFactors = T)
#test_id = test_dat$id
#tar = test_dat$target
#tar = as.factor = tar
#test_dat$target = NULL
#Training the Model
#log_model= glm (target ~ ., data = train_dat, family = binomial)
#Prediction using the model
#prediction_lr= predict(log_model, newdata = test_dat, type = "response")
#print(prediction_lr)
#Testing the accuracy of the model
# new_lab= numeric(length(prediction_lr))
# new_lab = ifelse(prediction_lr>median(prediction_lr),1,0)
# sum(new_lab == tar)/length(prediction_lr) #Gives the accuracy
# new_lab
|
/logistic_regression.R
|
no_license
|
DrRoad/Porto-Seguro-Safe-Driver-Prediction
|
R
| false | false | 3,272 |
r
|
library(caTools)
library(data.table)
library(tidyverse)
library(dplyr)
library(magrittr)
#Getting the Data
train_dat= fread("balanced_data_new.csv", stringsAsFactors = T)
names(train_dat)
####### PRE-PROCESSING OF TEST DATA ########
#Putting the names from train data into array
reqd_col = colnames(train_dat)
#Getting the test data
test_dat = fread("test.csv" , stringsAsFactors = T)
cat_col = grep("_cat", names(test_dat), value = T)
bin_col = grep("_bin", names(test_dat), value = T)
#Converting the data into required datatype
test_dat %<>% mutate_at(bin_col, funs(factor(.)))
test_dat %<>% mutate_at(cat_col, funs(factor(.)))
#Storing the id for future use
t_id = as.integer(as.numeric(test_dat$id))
#Removing id
test_dat$id = NULL
#Getting the same columns as train data
reqd_col = reqd_col[2:length(reqd_col)]
test_dat = subset(test_dat, select = reqd_col)
########### HANDLING THE MISSING VALUES ############
#(Here -1 corresponds to missing values)
#Getting the columns containing the missing values
col_miss=colSums(test_dat == -1)
col_miss_nam = names(col_miss[col_miss>0])
#Mode calculation
Mode <- function(x) {
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]
}
#Replacing the missing values with the mode or mean(for continuous data) of the column
mode_val = numeric(length(col_miss_nam))
mode_val[1] = Mode(test_dat$ps_ind_05_cat)
mode_val[2] = mean(test_dat$ps_reg_03[which(test_dat$ps_reg_03 != -1)])
mode_val[3] = Mode(test_dat$ps_car_01_cat)
mode_val[4] = Mode(test_dat$ps_car_07_cat)
mode_val[5] = mean(test_dat$ps_car_11[which(test_dat$ps_car_11 != -1)])
mode_val[6] = mean(test_dat$ps_car_14[which(test_dat$ps_car_14 != -1)])
test_dat[which(test_dat$ps_ind_05_cat == -1)] = mode_val[1]
test_dat[which(test_dat$ps_reg_03 == -1)] = mode_val[2]
test_dat[which(test_dat$ps_car_01_cat == -1)] = mode_val[3]
test_dat[which(test_dat$ps_car_07_cat == -1)] = mode_val[4]
test_dat[which(test_dat$ps_car_11 == -1)] = mode_val[5]
test_dat[which(test_dat$ps_car_14 == -1)] = mode_val[6]
###### CREATING THE MODEL ######
#Training the Model
log_model= glm (target ~ ., data = train_dat, family = binomial)
#Prediction using the model
prediction_lr= predict(log_model, newdata = test_dat, type = "response")
print(prediction_lr)
#Creating the submission file
submission = cbind(t_id, prediction_lr)
submission = as.data.frame(submission)
names(submission)= c("id", "target")
fwrite(submission , "LR_submission_file.csv")
#Uncomment the below code if you want to test the accuracy of the model
#### OPTIONAL SECTION ######
#test_dat = fread("balanced_test_new.csv", stringAsFactors = T)
#test_id = test_dat$id
#tar = test_dat$target
#tar = as.factor = tar
#test_dat$target = NULL
#Training the Model
#log_model= glm (target ~ ., data = train_dat, family = binomial)
#Prediction using the model
#prediction_lr= predict(log_model, newdata = test_dat, type = "response")
#print(prediction_lr)
#Testing the accuracy of the model
# new_lab= numeric(length(prediction_lr))
# new_lab = ifelse(prediction_lr>median(prediction_lr),1,0)
# sum(new_lab == tar)/length(prediction_lr) #Gives the accuracy
# new_lab
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/transmute_se.R
\name{transmute_se}
\alias{transmute_se}
\title{transmute standard interface.}
\usage{
transmute_se(.data, transmuteTerms)
}
\arguments{
\item{.data}{data.frame}
\item{transmuteTerms}{character vector of column expressions to transmute by.}
}
\value{
.data grouped by columns named in groupingVars
}
\description{
transmute a data frame by the transmuteTerms. Accepts arbitrary text as
transmuteTerms to allow forms such as "Sepal.Length >= 2 * Sepal.Width".
}
\examples{
datasets::iris \%>\%
transmute_se(c(Sepal_Long = "Sepal.Length >= 2 * Sepal.Width",
Petal_Short = "Petal.Length <= 3.5")) \%>\%
summary()
}
\seealso{
\code{\link[dplyr]{transmute}}, \code{\link[dplyr]{transmute_at}}
}
|
/man/transmute_se.Rd
|
no_license
|
xtmgah/seplyr
|
R
| false | true | 806 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/transmute_se.R
\name{transmute_se}
\alias{transmute_se}
\title{transmute standard interface.}
\usage{
transmute_se(.data, transmuteTerms)
}
\arguments{
\item{.data}{data.frame}
\item{transmuteTerms}{character vector of column expressions to transmute by.}
}
\value{
.data grouped by columns named in groupingVars
}
\description{
transmute a data frame by the transmuteTerms. Accepts arbitrary text as
transmuteTerms to allow forms such as "Sepal.Length >= 2 * Sepal.Width".
}
\examples{
datasets::iris \%>\%
transmute_se(c(Sepal_Long = "Sepal.Length >= 2 * Sepal.Width",
Petal_Short = "Petal.Length <= 3.5")) \%>\%
summary()
}
\seealso{
\code{\link[dplyr]{transmute}}, \code{\link[dplyr]{transmute_at}}
}
|
#' Download the Human DLPFC Visium data from LIBD
#'
#' This function downloads from `ExperimentHub` the dorsolateral prefrontal
#' cortex (DLPFC) human Visium data and results analyzed by LIBD. If
#' `ExperimentHub` is not available, it will download the files from Dropbox
#' using [utils::download.file()] unless the files are present already at
#' `destdir`. Note that `ExperimentHub` will cache the data and automatically
#' detect if you have previously downloaded it, thus making it the preferred
#' way to interact with the data.
#'
#' @param type A `character(1)` specifying which file you want to download. It
#' can either be: `sce` for the
#' \linkS4class{SingleCellExperiment}
#' object containing the spot-level data that includes the information for
#' visualizing the clusters/genes on top of the Visium histology, `sce_layer`
#' for the
#' \linkS4class{SingleCellExperiment}
#' object containing the layer-level data (pseudo-bulked from the spot-level),
#' or `modeling_results` for the list of tables with the `enrichment`,
#' `pairwise`, and `anova` model results from the layer-level data. It can also
#' be `sce_example` which is a reduced version of `sce` just for example
#' purposes. As of BioC version 3.13 `spe` downloads a
#' [SpatialExperiment-class][SpatialExperiment::SpatialExperiment-class] object.
#'
#' @param destdir The destination directory to where files will be downloaded
#' to in case the `ExperimentHub` resource is not available. If you already
#' downloaded the files, you can set this to the current path where the files
#' were previously downloaded to avoid re-downloading them.
#' @param eh An `ExperimentHub` object
#' [ExperimentHub-class][ExperimentHub::ExperimentHub-class].
#' @param bfc A `BiocFileCache` object
#' [BiocFileCache-class][BiocFileCache::BiocFileCache-class]. Used when
#' `eh` is not available.
#'
#' @return The requested object: `sce`, `sce_layer`, `ve` or `modeling_results` that
#' you have to assign to an object. If you didn't you can still avoid
#' re-loading the object by using `.Last.value`.
#'
#' @export
#' @import ExperimentHub
#' @importFrom AnnotationHub query
#' @importFrom methods is
#' @details The data was initially prepared by scripts at
#' https://github.com/LieberInstitute/HumanPilot and further refined by
#' https://github.com/LieberInstitute/spatialLIBD/blob/master/inst/scripts/make-data_spatialLIBD.R.
#'
#' @examples
#'
#' ## Download the SingleCellExperiment object
#' ## at the layer-level
#' if (!exists("sce_layer")) sce_layer <- fetch_data("sce_layer")
#'
#' ## Explore the data
#' sce_layer
fetch_data <-
function(type = c("sce", "sce_layer", "modeling_results", "sce_example", "spe"),
destdir = tempdir(),
eh = ExperimentHub::ExperimentHub(),
bfc = BiocFileCache::BiocFileCache()) {
## Some variables
sce <- sce_layer <- modeling_results <- sce_sub <- spe <- NULL
## Check inputs
stopifnot(methods::is(eh, "ExperimentHub"))
if (!type %in% c("sce", "sce_layer", "modeling_results", "sce_example", "spe")) {
stop(
paste(
"Other 'type' values are not supported.",
"Please use either 'sce', 'sce_layer',",
"'modeling_results', 'sce_example' or 'spe'."
),
call. = FALSE
)
}
## Deal with the special case of VisiumExperiment first
if (type == "spe") {
spe <- sce_to_spe(fetch_data("sce", destdir = destdir, eh = eh))
return(spe)
}
## Other pre-BioC 3.12 regular files
if (type == "sce") {
if (!enough_ram()) {
warning(paste(
"Your system might not have enough memory available.",
"Try with a machine that has more memory",
"or use the 'sce_example'."
))
}
hub_title <- "Human_Pilot_DLPFC_Visium_spatialLIBD_spot_level_SCE"
## While EH is not set-up
file_name <-
"Human_DLPFC_Visium_processedData_sce_scran_spatialLIBD.Rdata"
url <-
"https://www.dropbox.com/s/f4wcvtdq428y73p/Human_DLPFC_Visium_processedData_sce_scran_spatialLIBD.Rdata?dl=1"
} else if (type == "sce_layer") {
hub_title <- "Human_Pilot_DLPFC_Visium_spatialLIBD_layer_level_SCE"
## While EH is not set-up
file_name <-
"Human_DLPFC_Visium_processedData_sce_scran_sce_layer_spatialLIBD.Rdata"
url <-
"https://www.dropbox.com/s/bg8xwysh2vnjwvg/Human_DLPFC_Visium_processedData_sce_scran_sce_layer_spatialLIBD.Rdata?dl=1"
} else if (type == "modeling_results") {
hub_title <- "Human_Pilot_DLPFC_Visium_spatialLIBD_modeling_results"
## While EH is not set-up
file_name <- "Human_DLPFC_Visium_modeling_results.Rdata"
url <-
"https://www.dropbox.com/s/se6rrgb9yhm5gfh/Human_DLPFC_Visium_modeling_results.Rdata?dl=1"
} else if (type == "sce_example") {
hub_title <- "Human_DLPFC_Visium_sce_example.Rdata"
## While EH is not set-up
file_name <- "sce_sub_for_vignette.Rdata"
url <-
"https://www.dropbox.com/s/5ra9o8ku9iyyf70/sce_sub_for_vignette.Rdata?dl=1"
}
file_path <- file.path(destdir, file_name)
## Use local data if present
if (!file.exists(file_path)) {
q <-
AnnotationHub::query(eh,
pattern = c("Human_Pilot_DLPFC_Visium_spatialLIBD", hub_title)
)
if (length(q) == 1) {
## ExperimentHub has the data =)
res <- q[[1]]
if (type %in% c("sce", "sce_example")) {
res <- .update_sce(res)
} else if (type == "sce_layer") {
res <- .update_sce_layer(res)
}
return(res)
} else {
## ExperimentHub backup: download from Dropbox
file_path <- BiocFileCache::bfcrpath(bfc, url)
}
}
## Now load the data
message(Sys.time(), " loading file ", file_path)
load(file_path, verbose = FALSE)
if (type == "sce") {
return(.update_sce(sce))
} else if (type == "sce_layer") {
return(.update_sce_layer(sce_layer))
} else if (type == "modeling_results") {
return(modeling_results)
} else if (type == "sce_example") {
return(.update_sce(sce_sub))
}
}
.update_sce <- function(sce) {
## Rename here the default cluster we want to show in the shiny app
sce$spatialLIBD <- sce$layer_guess_reordered_short
## Add ManualAnnotation which was formerly called Layer, then drop Layer
sce$ManualAnnotation <- sce$Layer
sce$Layer <- NULL
return(sce)
}
.update_sce_layer <- function(sce_layer) {
## Rename here the default cluster we want to show in the shiny app
sce_layer$spatialLIBD <- sce_layer$layer_guess_reordered_short
return(sce_layer)
}
|
/R/fetch_data.R
|
no_license
|
bigfacebig/spatialLIBD
|
R
| false | false | 7,237 |
r
|
#' Download the Human DLPFC Visium data from LIBD
#'
#' This function downloads from `ExperimentHub` the dorsolateral prefrontal
#' cortex (DLPFC) human Visium data and results analyzed by LIBD. If
#' `ExperimentHub` is not available, it will download the files from Dropbox
#' using [utils::download.file()] unless the files are present already at
#' `destdir`. Note that `ExperimentHub` will cache the data and automatically
#' detect if you have previously downloaded it, thus making it the preferred
#' way to interact with the data.
#'
#' @param type A `character(1)` specifying which file you want to download. It
#' can either be: `sce` for the
#' \linkS4class{SingleCellExperiment}
#' object containing the spot-level data that includes the information for
#' visualizing the clusters/genes on top of the Visium histology, `sce_layer`
#' for the
#' \linkS4class{SingleCellExperiment}
#' object containing the layer-level data (pseudo-bulked from the spot-level),
#' or `modeling_results` for the list of tables with the `enrichment`,
#' `pairwise`, and `anova` model results from the layer-level data. It can also
#' be `sce_example` which is a reduced version of `sce` just for example
#' purposes. As of BioC version 3.13 `spe` downloads a
#' [SpatialExperiment-class][SpatialExperiment::SpatialExperiment-class] object.
#'
#' @param destdir The destination directory to where files will be downloaded
#' to in case the `ExperimentHub` resource is not available. If you already
#' downloaded the files, you can set this to the current path where the files
#' were previously downloaded to avoid re-downloading them.
#' @param eh An `ExperimentHub` object
#' [ExperimentHub-class][ExperimentHub::ExperimentHub-class].
#' @param bfc A `BiocFileCache` object
#' [BiocFileCache-class][BiocFileCache::BiocFileCache-class]. Used when
#' `eh` is not available.
#'
#' @return The requested object: `sce`, `sce_layer`, `ve` or `modeling_results` that
#' you have to assign to an object. If you didn't you can still avoid
#' re-loading the object by using `.Last.value`.
#'
#' @export
#' @import ExperimentHub
#' @importFrom AnnotationHub query
#' @importFrom methods is
#' @details The data was initially prepared by scripts at
#' https://github.com/LieberInstitute/HumanPilot and further refined by
#' https://github.com/LieberInstitute/spatialLIBD/blob/master/inst/scripts/make-data_spatialLIBD.R.
#'
#' @examples
#'
#' ## Download the SingleCellExperiment object
#' ## at the layer-level
#' if (!exists("sce_layer")) sce_layer <- fetch_data("sce_layer")
#'
#' ## Explore the data
#' sce_layer
fetch_data <-
function(type = c("sce", "sce_layer", "modeling_results", "sce_example", "spe"),
destdir = tempdir(),
eh = ExperimentHub::ExperimentHub(),
bfc = BiocFileCache::BiocFileCache()) {
## Some variables
sce <- sce_layer <- modeling_results <- sce_sub <- spe <- NULL
## Check inputs
stopifnot(methods::is(eh, "ExperimentHub"))
if (!type %in% c("sce", "sce_layer", "modeling_results", "sce_example", "spe")) {
stop(
paste(
"Other 'type' values are not supported.",
"Please use either 'sce', 'sce_layer',",
"'modeling_results', 'sce_example' or 'spe'."
),
call. = FALSE
)
}
## Deal with the special case of VisiumExperiment first
if (type == "spe") {
spe <- sce_to_spe(fetch_data("sce", destdir = destdir, eh = eh))
return(spe)
}
## Other pre-BioC 3.12 regular files
if (type == "sce") {
if (!enough_ram()) {
warning(paste(
"Your system might not have enough memory available.",
"Try with a machine that has more memory",
"or use the 'sce_example'."
))
}
hub_title <- "Human_Pilot_DLPFC_Visium_spatialLIBD_spot_level_SCE"
## While EH is not set-up
file_name <-
"Human_DLPFC_Visium_processedData_sce_scran_spatialLIBD.Rdata"
url <-
"https://www.dropbox.com/s/f4wcvtdq428y73p/Human_DLPFC_Visium_processedData_sce_scran_spatialLIBD.Rdata?dl=1"
} else if (type == "sce_layer") {
hub_title <- "Human_Pilot_DLPFC_Visium_spatialLIBD_layer_level_SCE"
## While EH is not set-up
file_name <-
"Human_DLPFC_Visium_processedData_sce_scran_sce_layer_spatialLIBD.Rdata"
url <-
"https://www.dropbox.com/s/bg8xwysh2vnjwvg/Human_DLPFC_Visium_processedData_sce_scran_sce_layer_spatialLIBD.Rdata?dl=1"
} else if (type == "modeling_results") {
hub_title <- "Human_Pilot_DLPFC_Visium_spatialLIBD_modeling_results"
## While EH is not set-up
file_name <- "Human_DLPFC_Visium_modeling_results.Rdata"
url <-
"https://www.dropbox.com/s/se6rrgb9yhm5gfh/Human_DLPFC_Visium_modeling_results.Rdata?dl=1"
} else if (type == "sce_example") {
hub_title <- "Human_DLPFC_Visium_sce_example.Rdata"
## While EH is not set-up
file_name <- "sce_sub_for_vignette.Rdata"
url <-
"https://www.dropbox.com/s/5ra9o8ku9iyyf70/sce_sub_for_vignette.Rdata?dl=1"
}
file_path <- file.path(destdir, file_name)
## Use local data if present
if (!file.exists(file_path)) {
q <-
AnnotationHub::query(eh,
pattern = c("Human_Pilot_DLPFC_Visium_spatialLIBD", hub_title)
)
if (length(q) == 1) {
## ExperimentHub has the data =)
res <- q[[1]]
if (type %in% c("sce", "sce_example")) {
res <- .update_sce(res)
} else if (type == "sce_layer") {
res <- .update_sce_layer(res)
}
return(res)
} else {
## ExperimentHub backup: download from Dropbox
file_path <- BiocFileCache::bfcrpath(bfc, url)
}
}
## Now load the data
message(Sys.time(), " loading file ", file_path)
load(file_path, verbose = FALSE)
if (type == "sce") {
return(.update_sce(sce))
} else if (type == "sce_layer") {
return(.update_sce_layer(sce_layer))
} else if (type == "modeling_results") {
return(modeling_results)
} else if (type == "sce_example") {
return(.update_sce(sce_sub))
}
}
.update_sce <- function(sce) {
## Rename here the default cluster we want to show in the shiny app
sce$spatialLIBD <- sce$layer_guess_reordered_short
## Add ManualAnnotation which was formerly called Layer, then drop Layer
sce$ManualAnnotation <- sce$Layer
sce$Layer <- NULL
return(sce)
}
.update_sce_layer <- function(sce_layer) {
## Rename here the default cluster we want to show in the shiny app
sce_layer$spatialLIBD <- sce_layer$layer_guess_reordered_short
return(sce_layer)
}
|
setwd("figs/")
xrng = c(-0.4,1.1)
yrng = c(0,2)
pdf("MonteCarlo1a.pdf")
hist(lMonteCarloSamples$adDraws,100,freq=F,main='',xlab='',ylab='',ylim=yrng,xlim=xrng,border='blue')
dev.off()
pdf("MonteCarlo1b.pdf")
hist(lMonteCarloSamples$adDraws,100,freq=F,main='',xlab='',ylab='',ylim=yrng,xlim=xrng,border='blue')
lines(xx<-seq(min(xrng),max(xrng),by=.01),yy<-dnorm(xx,dM1,sqrt(dC1)),lwd=2)
dev.off()
pdf("MonteCarlo1c.pdf")
hist(lMonteCarloSamples$adDraws,100,freq=F,main='',xlab='',ylab='',ylim=yrng,xlim=xrng,border='blue')
lines(lMonteCarloDensity,lwd=2,col='blue')
lines(xx,yy,lwd=2)
dev.off()
pdf("MonteCarlo1d.pdf")
plot(xx,yy,type='l',lwd=2,main='',xlab='',ylab='',ylim=yrng,xlim=xrng)
lines(lMonteCarloDensity,lwd=2,col='blue')
dev.off()
pdf("ImportanceSampling1a.pdf")
plot(xx,yy,type='l',lwd=2,main='',xlab='',ylab='',ylim=yrng,xlim=xrng)
lines(lMonteCarloDensity,lwd=2,col='blue')
dev.off()
pdf("ImportanceSampling1b.pdf")
plot(xx,yy,type='l',lwd=2,main='',xlab='',ylab='',ylim=yrng,xlim=xrng)
lines(lMonteCarloDensity,lwd=2,col='blue')
lines(lImportanceSamplingDensity,col='red',lwd=2)
dev.off()
pdf("ImportanceSamplingWeights.pdf",height=240)
anOrderDraws = order(lImportanceSamplingSamples$adDraws)[floor(seq(1,nSamples,length.out=200))]
plot(lImportanceSamplingSamples$adDraws[anOrderDraws],
lImportanceSamplingSamples$adWeights[anOrderDraws],xlab='',ylab='',main='',
xlim=xrng)
dev.off()
nParticles <- 20
dResolutionMultiple = 1.5
#par(bg="white")
plot(0,0,type='n',main='',xlab='t',ylab=expression(x[t]),xlim=c(0,10),
ylim=range(lSequentialImportanceSampling$mdParticles[1:10,]))
points(rep(1,nParticles),lSequentialImportanceSampling$mdParticles[1,],pch=19,
cex=(lSequentialImportanceSampling$mdWeights[1,])^.5*2)
points(1-.25,adY[1],pch=23,bg='green',col=NA)
points(1-.2,lKalmanFilter$vdPosteriorMean[1],pch=23,bg='red',col=NA)
legend("topright",inset=0.01, c("Data","Truth","Particles"), pch=c(23,23,19), pt.bg=c("green","red","black"), col=c("green","red","black"))
dev.copy2pdf(file="sis-0.pdf")
#dev.off()
#readline("Hit enter:")
for (i in 2:10) {
#plot(0,0,type='n',main='',xlab='',ylab='',xlim=c(0,10), axes=F,
# ylim=range(lSequentialImportanceSampling$mdParticles[1:10,]))
points(rep(i,nParticles),lSequentialImportanceSampling$mdParticles[i,],pch=19,
cex=(lSequentialImportanceSampling$mdWeights[i,])^.5*2)
for (j in 1:nParticles) {
segments(i-1,lSequentialImportanceSampling$mdParticles[i-1,j],
i ,lSequentialImportanceSampling$mdParticles[i ,j],
lwd=(lSequentialImportanceSampling$mdWeights[i,j])^.3*2)
}
points(i-.25,adY[i],pch=23,bg='green',col=NA)
points(i-.2,lKalmanFilter$vdPosteriorMean[i],pch=23,bg='red',col=NA)
dev.copy2pdf(file=paste("sis-",i-1,".pdf",sep=''))
#dev.off()
#readline("Hit enter:")
}
# Bootstrap filter plot
plot(0,0,type='n',main='',xlab='t',ylab=expression(x[t]),xlim=c(0,10),
ylim=range(lAuxiliaryParticleFilter$mdParticles[1:10,]))
points(rep(1,nParticles),lAuxiliaryParticleFilter$mdParticles[1,],pch=19,
cex=(lAuxiliaryParticleFilter$mdWeights[1,])^.5*2)
points(1-.25,adY[1],pch=23,bg='green',col=NA)
points(1-.2,lKalmanFilter$vdPosteriorMean[1],pch=23,bg='red',col=NA)
legend("bottomright",inset=0.01, c("Data","Truth","Particles"), pch=c(23,23,19),
pt.bg=c("green","red","black"), col=c("green","red","black"))
dev.copy2pdf(file="sir-0.pdf")
dev.off()
#readline("Hit enter:")
adParticleIndices = 1:nParticles
for (i in 2:10) {
plot(0,0,type='n',main='',xlab='t',ylab=expression(x[t]),xlim=c(0,10),
ylim=range(lAuxiliaryParticleFilter$mdParticles[1:10,]))
points(rep(1,nParticles),lAuxiliaryParticleFilter$mdParticles[1,],pch=19,
cex=(lAuxiliaryParticleFilter$mdWeights[1,])^.5*2)
points(1-.25,adY[1],pch=23,bg='green',col=NA)
points(1-.2,lKalmanFilter$vdPosteriorMean[1],pch=23,bg='red',col=NA)
adParticleIndices = 1:nParticles
for (ii in i:2) {
points(rep(ii,nParticles),lAuxiliaryParticleFilter$mdParticles[ii,],pch=19,
cex=(lAuxiliaryParticleFilter$mdWeights[ii,])^.5*2)
for (j in adParticleIndices) {
segments(ii-1,lAuxiliaryParticleFilter$mdParticles[ii-1,lAuxiliaryParticleFilter$mnResampledIndices[ii,j]],
ii ,lAuxiliaryParticleFilter$mdParticles[ii ,j],
lwd=(lAuxiliaryParticleFilter$mdWeights[i,j])^.3*2)
}
points(ii-.25,adY[ii],pch=23,bg='green',col=NA)
points(ii-.2,lKalmanFilter$vdPosteriorMean[ii],pch=23,bg='red',col=NA)
adParticleIndices = unique(lAuxiliaryParticleFilter$mnResampledIndices[ii,adParticleIndices])
}
legend("bottomright",inset=0.01, c("Data","Truth","Particles"), pch=c(23,23,19),
pt.bg=c("green","red","black"), col=c("green","red","black"))
dev.copy2pdf(file=paste("sir-",i-1,".pdf",sep=''))
dev.off()
#readline("Hit enter:")
# dev.copy2pdf
}
# APF plot
plot(0,0,type='n',main='',xlab='t',ylab=expression(x[t]),xlim=c(0,10),
ylim=range(lAuxiliaryParticleFilter$mdParticles[1:10,]))
points(rep(1,nParticles),lAuxiliaryParticleFilter$mdParticles[1,],pch=19,
cex=(lAuxiliaryParticleFilter$mdWeights[1,])^.5*2)
points(1-.25,adY[1],pch=23,bg='green',col=NA)
points(1-.2,lKalmanFilter$vdPosteriorMean[1],pch=23,bg='red',col=NA)
legend("bottomright",inset=0.01, c("Data","Truth","Particles"), pch=c(23,23,19),
pt.bg=c("green","red","black"), col=c("green","red","black"))
dev.copy2pdf(file="apf-0.pdf")
dev.off()
#readline("Hit enter:")
adParticleIndices = 1:nParticles
for (i in 2:10) {
plot(0,0,type='n',main='',xlab='t',ylab=expression(x[t]),xlim=c(0,10),
ylim=range(lAuxiliaryParticleFilter$mdParticles[1:10,]))
points(rep(1,nParticles),lAuxiliaryParticleFilter$mdParticles[1,],pch=19,
cex=(lAuxiliaryParticleFilter$mdWeights[1,])^.5*2)
points(1-.25,adY[1],pch=23,bg='green',col=NA)
points(1-.2,lKalmanFilter$vdPosteriorMean[1],pch=23,bg='red',col=NA)
adParticleIndices = 1:nParticles
for (ii in i:2) {
points(rep(ii,nParticles),lAuxiliaryParticleFilter$mdParticles[ii,],pch=19,
cex=(lAuxiliaryParticleFilter$mdWeights[ii,])^.5*2)
for (j in adParticleIndices) {
segments(ii-1,lAuxiliaryParticleFilter$mdParticles[ii-1,lAuxiliaryParticleFilter$mnResampledIndices[ii,j]],
ii ,lAuxiliaryParticleFilter$mdParticles[ii ,j],
lwd=(lAuxiliaryParticleFilter$mdWeights[i,j])^.3*2)
}
points(ii-.25,adY[ii],pch=23,bg='green',col=NA)
points(ii-.2,lKalmanFilter$vdPosteriorMean[ii],pch=23,bg='red',col=NA)
adParticleIndices = unique(lAuxiliaryParticleFilter$mnResampledIndices[ii,adParticleIndices])
}
legend("bottomright",inset=0.01, c("Data","Truth","Particles"), pch=c(23,23,19),
pt.bg=c("green","red","black"), col=c("green","red","black"))
dev.copy2pdf(file=paste("apf-",i-1,".pdf",sep=''))
dev.off()
#readline("Hit enter:")
# dev.copy2pdf
}
# # ????????????
# for (i in 2:10) {
# plot(0,0,type='n',main='',xlab='',ylab='',xlim=c(0,10),
# ylim=range(lMultivariateBootstrapFilter$adParticles[1:10,]))
# points(rep(1,nParticles),lMultivariateBootstrapFilter$adParticles[1,],pch=19,
# cex=(lMultivariateBootstrapFilter$mdWeights[1,])^.5*2)
# points(1-.2,adY[1],pch=23,bg='green',col=NA)
# points(1-.2,lKalmanFilter$vdPosteriorMean[1],pch=23,bg='red',col=NA)
# adParticleIndices = 1:nParticles
# #dev.copy(pdf,filename=paste("MBF",1,".pdf",sep=''),bg="white",
# # width=480*dResolutionMultiple,height=480*dResolutionMultiple)
# #dev.off()
# for (ii in i:2) {
# points(rep(ii,nParticles),lMultivariateBootstrapFilter$mdParticles[ii,],pch=19,
# cex=(lMultivariateBootstrapFilter$mdWeights[ii,])^.5*2)
# for (j in adParticleIndices) {
# segments(ii-1,lMultivariateBootstrapFilter$adParticles[ii-1,lMultivariateBootstrapFilter$mnResampledIndices[ii,j]],
# ii ,lMultivariateBootstrapFilter$adParticles[ii ,j],
# lwd=(lMultivariateBootstrapFilter$mdWeights[i,j])^.3*2)
# }
# points(ii-.2,adY[ii],pch=23,bg='green',col=NA)
# points(ii-.2,lKalmanFilter$vdPosteriorMean[ii],pch=23,bg='red',col=NA)
# adParticleIndices = unique(lMultivariateBootstrapFilter$mnResampledIndices[ii,adParticleIndices])
# }
# #dev.copy(pdf,filename=paste("MBF",i,".pdf",sep=''),bg="white",
# # width=480*dResolutionMultiple,height=480*dResolutionMultiple)
# #dev.off()
# readline("Hit enter:")
# # dev.copy2pdf
# }
# SIR with fixed parameters
nParticles <- 30
dResolutionMultiple = 1
#par(bg="white")
par(mfrow=c(2,2))
for (nParam in 2:5) {
plot(0,0,type='n',main='',xlab='t',ylab='',xlim=c(0,10),
ylim=range(lMultivariateBootstrapFilter$adParticles[1:10,,nParam]))
if (nParam != 3) { abline(h=0.05,col='red') } else { abline(h=0.95,col='red') }
points(rep(1,nParticles),lMultivariateBootstrapFilter$adParticles[1,,nParam],pch=19,
cex=(lMultivariateBootstrapFilter$mdWeights[1,])^.5*2)
}
dev.copy2pdf(file="MBF-0.pdf")
dev.off()
adParticleIndices = 1:nParticles
for (i in 2:10) {
par(mfrow=c(2,2))
for (nParam in 2:5) {
plot(0,0,type='n',main='',xlab='t',ylab='',xlim=c(0,10),
ylim=range(lMultivariateBootstrapFilter$adParticles[1:10,,nParam]))
if (nParam != 3) { abline(h=0.05,col='red') } else { abline(h=0.95,col='red') }
for (ii in i:2) {
points(rep(ii,nParticles),lMultivariateBootstrapFilter$adParticles[ii,,nParam],pch=19,
cex=(lMultivariateBootstrapFilter$mdWeights[ii,])^.5*2)
for (j in adParticleIndices) {
segments(ii-1,lMultivariateBootstrapFilter$adParticles[ii-1,lMultivariateBootstrapFilter$mnResampledIndices[ii,j],nParam],
ii ,lMultivariateBootstrapFilter$adParticles[ii ,j,nParam],
lwd=(lMultivariateBootstrapFilter$mdWeights[i,j])^.3*2)
}
adParticleIndices = unique(lMultivariateBootstrapFilter$mnResampledIndices[ii,adParticleIndices])
}
#readline("Hit enter:")
}
dev.copy2pdf(file=paste("MBF-",i-1,".pdf",sep=''))
dev.off()
}
# Kernel density example
adRandomPoints = rnorm(10)
adRandomPointWeights = fRenormalizeWeights(runif(10))
dDelta = 0.99
dH2 = 1-((3*dDelta-1)/(2*dDelta))^2
dA = sqrt(1-dH2)
dMean = weighted.mean(adRandomPoints,adRandomPointWeights)
dVar = 0
for (i in 1:10) dVar= dVar+adRandomPointWeights[i]*(adRandomPoints[i]-dMean)^2
adShrunkMean = dA*adRandomPoints+(1-dA)*dMean
xx = seq(min(adRandomPoints)-0.5,max(adRandomPoints)+0.5,by=0.01)
plot(0,0,type='n',xlim=range(xx),ylim=c(0,max(adRandomPointWeights)+0.1),
xlab='',ylab='',main='',axes=F,frame.plot=T)
segments(adRandomPoints,rep(0,10),adRandomPoints,adRandomPointWeights)
adDensity = rep(0,length(xx))
for (i in 1:10) {
points(adRandomPoints[i],adRandomPointWeights[i])
points(adShrunkMean[i],adRandomPointWeights[i],col='red')
adThisDensity = 0.2*adRandomPointWeights[i]*dnorm(xx,adShrunkMean[i],sqrt(dH2*dVar))
lines(xx,adThisDensity,col='red')
adDensity = adDensity+adThisDensity
}
lines(xx,adDensity,lwd=2,col='red')
dev.copy2pdf(file="KernelDensity1.pdf"); dev.off()
dDelta = 0.85
dH2 = 1-((3*dDelta-1)/(2*dDelta))^2
dA = sqrt(1-dH2)
dMean = weighted.mean(adRandomPoints,adRandomPointWeights)
dVar = 0
for (i in 1:10) dVar= dVar+adRandomPointWeights[i]*(adRandomPoints[i]-dMean)^2
adShrunkMean = dA*adRandomPoints+(1-dA)*dMean
xx = seq(min(adRandomPoints)-0.5,max(adRandomPoints)+0.5,by=0.01)
plot(0,0,type='n',xlim=range(xx),ylim=c(0,max(adRandomPointWeights)+0.1),
xlab='',ylab='',main='',axes=F,frame.plot=T)
segments(adRandomPoints,rep(0,10),adRandomPoints,adRandomPointWeights)
adDensity = rep(0,length(xx))
for (i in 1:10) {
points(adRandomPoints[i],adRandomPointWeights[i])
points(adShrunkMean[i],adRandomPointWeights[i],col='red')
adThisDensity = 0.2*adRandomPointWeights[i]*dnorm(xx,adShrunkMean[i],sqrt(dH2*dVar))
lines(xx,adThisDensity,col='red')
adDensity = adDensity+adThisDensity
}
lines(xx,adDensity,lwd=2,col='red')
dev.copy2pdf(file="KernelDensity2.pdf"); dev.off()
setwd("../")
|
/courses/stat615/slides/SMC/figures2.R
|
no_license
|
jarad/jarad.github.com
|
R
| false | false | 12,229 |
r
|
setwd("figs/")
xrng = c(-0.4,1.1)
yrng = c(0,2)
pdf("MonteCarlo1a.pdf")
hist(lMonteCarloSamples$adDraws,100,freq=F,main='',xlab='',ylab='',ylim=yrng,xlim=xrng,border='blue')
dev.off()
pdf("MonteCarlo1b.pdf")
hist(lMonteCarloSamples$adDraws,100,freq=F,main='',xlab='',ylab='',ylim=yrng,xlim=xrng,border='blue')
lines(xx<-seq(min(xrng),max(xrng),by=.01),yy<-dnorm(xx,dM1,sqrt(dC1)),lwd=2)
dev.off()
pdf("MonteCarlo1c.pdf")
hist(lMonteCarloSamples$adDraws,100,freq=F,main='',xlab='',ylab='',ylim=yrng,xlim=xrng,border='blue')
lines(lMonteCarloDensity,lwd=2,col='blue')
lines(xx,yy,lwd=2)
dev.off()
pdf("MonteCarlo1d.pdf")
plot(xx,yy,type='l',lwd=2,main='',xlab='',ylab='',ylim=yrng,xlim=xrng)
lines(lMonteCarloDensity,lwd=2,col='blue')
dev.off()
pdf("ImportanceSampling1a.pdf")
plot(xx,yy,type='l',lwd=2,main='',xlab='',ylab='',ylim=yrng,xlim=xrng)
lines(lMonteCarloDensity,lwd=2,col='blue')
dev.off()
pdf("ImportanceSampling1b.pdf")
plot(xx,yy,type='l',lwd=2,main='',xlab='',ylab='',ylim=yrng,xlim=xrng)
lines(lMonteCarloDensity,lwd=2,col='blue')
lines(lImportanceSamplingDensity,col='red',lwd=2)
dev.off()
pdf("ImportanceSamplingWeights.pdf",height=240)
anOrderDraws = order(lImportanceSamplingSamples$adDraws)[floor(seq(1,nSamples,length.out=200))]
plot(lImportanceSamplingSamples$adDraws[anOrderDraws],
lImportanceSamplingSamples$adWeights[anOrderDraws],xlab='',ylab='',main='',
xlim=xrng)
dev.off()
nParticles <- 20
dResolutionMultiple = 1.5
#par(bg="white")
plot(0,0,type='n',main='',xlab='t',ylab=expression(x[t]),xlim=c(0,10),
ylim=range(lSequentialImportanceSampling$mdParticles[1:10,]))
points(rep(1,nParticles),lSequentialImportanceSampling$mdParticles[1,],pch=19,
cex=(lSequentialImportanceSampling$mdWeights[1,])^.5*2)
points(1-.25,adY[1],pch=23,bg='green',col=NA)
points(1-.2,lKalmanFilter$vdPosteriorMean[1],pch=23,bg='red',col=NA)
legend("topright",inset=0.01, c("Data","Truth","Particles"), pch=c(23,23,19), pt.bg=c("green","red","black"), col=c("green","red","black"))
dev.copy2pdf(file="sis-0.pdf")
#dev.off()
#readline("Hit enter:")
for (i in 2:10) {
#plot(0,0,type='n',main='',xlab='',ylab='',xlim=c(0,10), axes=F,
# ylim=range(lSequentialImportanceSampling$mdParticles[1:10,]))
points(rep(i,nParticles),lSequentialImportanceSampling$mdParticles[i,],pch=19,
cex=(lSequentialImportanceSampling$mdWeights[i,])^.5*2)
for (j in 1:nParticles) {
segments(i-1,lSequentialImportanceSampling$mdParticles[i-1,j],
i ,lSequentialImportanceSampling$mdParticles[i ,j],
lwd=(lSequentialImportanceSampling$mdWeights[i,j])^.3*2)
}
points(i-.25,adY[i],pch=23,bg='green',col=NA)
points(i-.2,lKalmanFilter$vdPosteriorMean[i],pch=23,bg='red',col=NA)
dev.copy2pdf(file=paste("sis-",i-1,".pdf",sep=''))
#dev.off()
#readline("Hit enter:")
}
# Bootstrap filter plot
plot(0,0,type='n',main='',xlab='t',ylab=expression(x[t]),xlim=c(0,10),
ylim=range(lAuxiliaryParticleFilter$mdParticles[1:10,]))
points(rep(1,nParticles),lAuxiliaryParticleFilter$mdParticles[1,],pch=19,
cex=(lAuxiliaryParticleFilter$mdWeights[1,])^.5*2)
points(1-.25,adY[1],pch=23,bg='green',col=NA)
points(1-.2,lKalmanFilter$vdPosteriorMean[1],pch=23,bg='red',col=NA)
legend("bottomright",inset=0.01, c("Data","Truth","Particles"), pch=c(23,23,19),
pt.bg=c("green","red","black"), col=c("green","red","black"))
dev.copy2pdf(file="sir-0.pdf")
dev.off()
#readline("Hit enter:")
adParticleIndices = 1:nParticles
for (i in 2:10) {
plot(0,0,type='n',main='',xlab='t',ylab=expression(x[t]),xlim=c(0,10),
ylim=range(lAuxiliaryParticleFilter$mdParticles[1:10,]))
points(rep(1,nParticles),lAuxiliaryParticleFilter$mdParticles[1,],pch=19,
cex=(lAuxiliaryParticleFilter$mdWeights[1,])^.5*2)
points(1-.25,adY[1],pch=23,bg='green',col=NA)
points(1-.2,lKalmanFilter$vdPosteriorMean[1],pch=23,bg='red',col=NA)
adParticleIndices = 1:nParticles
for (ii in i:2) {
points(rep(ii,nParticles),lAuxiliaryParticleFilter$mdParticles[ii,],pch=19,
cex=(lAuxiliaryParticleFilter$mdWeights[ii,])^.5*2)
for (j in adParticleIndices) {
segments(ii-1,lAuxiliaryParticleFilter$mdParticles[ii-1,lAuxiliaryParticleFilter$mnResampledIndices[ii,j]],
ii ,lAuxiliaryParticleFilter$mdParticles[ii ,j],
lwd=(lAuxiliaryParticleFilter$mdWeights[i,j])^.3*2)
}
points(ii-.25,adY[ii],pch=23,bg='green',col=NA)
points(ii-.2,lKalmanFilter$vdPosteriorMean[ii],pch=23,bg='red',col=NA)
adParticleIndices = unique(lAuxiliaryParticleFilter$mnResampledIndices[ii,adParticleIndices])
}
legend("bottomright",inset=0.01, c("Data","Truth","Particles"), pch=c(23,23,19),
pt.bg=c("green","red","black"), col=c("green","red","black"))
dev.copy2pdf(file=paste("sir-",i-1,".pdf",sep=''))
dev.off()
#readline("Hit enter:")
# dev.copy2pdf
}
# APF plot
plot(0,0,type='n',main='',xlab='t',ylab=expression(x[t]),xlim=c(0,10),
ylim=range(lAuxiliaryParticleFilter$mdParticles[1:10,]))
points(rep(1,nParticles),lAuxiliaryParticleFilter$mdParticles[1,],pch=19,
cex=(lAuxiliaryParticleFilter$mdWeights[1,])^.5*2)
points(1-.25,adY[1],pch=23,bg='green',col=NA)
points(1-.2,lKalmanFilter$vdPosteriorMean[1],pch=23,bg='red',col=NA)
legend("bottomright",inset=0.01, c("Data","Truth","Particles"), pch=c(23,23,19),
pt.bg=c("green","red","black"), col=c("green","red","black"))
dev.copy2pdf(file="apf-0.pdf")
dev.off()
#readline("Hit enter:")
adParticleIndices = 1:nParticles
for (i in 2:10) {
plot(0,0,type='n',main='',xlab='t',ylab=expression(x[t]),xlim=c(0,10),
ylim=range(lAuxiliaryParticleFilter$mdParticles[1:10,]))
points(rep(1,nParticles),lAuxiliaryParticleFilter$mdParticles[1,],pch=19,
cex=(lAuxiliaryParticleFilter$mdWeights[1,])^.5*2)
points(1-.25,adY[1],pch=23,bg='green',col=NA)
points(1-.2,lKalmanFilter$vdPosteriorMean[1],pch=23,bg='red',col=NA)
adParticleIndices = 1:nParticles
for (ii in i:2) {
points(rep(ii,nParticles),lAuxiliaryParticleFilter$mdParticles[ii,],pch=19,
cex=(lAuxiliaryParticleFilter$mdWeights[ii,])^.5*2)
for (j in adParticleIndices) {
segments(ii-1,lAuxiliaryParticleFilter$mdParticles[ii-1,lAuxiliaryParticleFilter$mnResampledIndices[ii,j]],
ii ,lAuxiliaryParticleFilter$mdParticles[ii ,j],
lwd=(lAuxiliaryParticleFilter$mdWeights[i,j])^.3*2)
}
points(ii-.25,adY[ii],pch=23,bg='green',col=NA)
points(ii-.2,lKalmanFilter$vdPosteriorMean[ii],pch=23,bg='red',col=NA)
adParticleIndices = unique(lAuxiliaryParticleFilter$mnResampledIndices[ii,adParticleIndices])
}
legend("bottomright",inset=0.01, c("Data","Truth","Particles"), pch=c(23,23,19),
pt.bg=c("green","red","black"), col=c("green","red","black"))
dev.copy2pdf(file=paste("apf-",i-1,".pdf",sep=''))
dev.off()
#readline("Hit enter:")
# dev.copy2pdf
}
# # ????????????
# for (i in 2:10) {
# plot(0,0,type='n',main='',xlab='',ylab='',xlim=c(0,10),
# ylim=range(lMultivariateBootstrapFilter$adParticles[1:10,]))
# points(rep(1,nParticles),lMultivariateBootstrapFilter$adParticles[1,],pch=19,
# cex=(lMultivariateBootstrapFilter$mdWeights[1,])^.5*2)
# points(1-.2,adY[1],pch=23,bg='green',col=NA)
# points(1-.2,lKalmanFilter$vdPosteriorMean[1],pch=23,bg='red',col=NA)
# adParticleIndices = 1:nParticles
# #dev.copy(pdf,filename=paste("MBF",1,".pdf",sep=''),bg="white",
# # width=480*dResolutionMultiple,height=480*dResolutionMultiple)
# #dev.off()
# for (ii in i:2) {
# points(rep(ii,nParticles),lMultivariateBootstrapFilter$mdParticles[ii,],pch=19,
# cex=(lMultivariateBootstrapFilter$mdWeights[ii,])^.5*2)
# for (j in adParticleIndices) {
# segments(ii-1,lMultivariateBootstrapFilter$adParticles[ii-1,lMultivariateBootstrapFilter$mnResampledIndices[ii,j]],
# ii ,lMultivariateBootstrapFilter$adParticles[ii ,j],
# lwd=(lMultivariateBootstrapFilter$mdWeights[i,j])^.3*2)
# }
# points(ii-.2,adY[ii],pch=23,bg='green',col=NA)
# points(ii-.2,lKalmanFilter$vdPosteriorMean[ii],pch=23,bg='red',col=NA)
# adParticleIndices = unique(lMultivariateBootstrapFilter$mnResampledIndices[ii,adParticleIndices])
# }
# #dev.copy(pdf,filename=paste("MBF",i,".pdf",sep=''),bg="white",
# # width=480*dResolutionMultiple,height=480*dResolutionMultiple)
# #dev.off()
# readline("Hit enter:")
# # dev.copy2pdf
# }
# SIR with fixed parameters
nParticles <- 30
dResolutionMultiple = 1
#par(bg="white")
par(mfrow=c(2,2))
for (nParam in 2:5) {
plot(0,0,type='n',main='',xlab='t',ylab='',xlim=c(0,10),
ylim=range(lMultivariateBootstrapFilter$adParticles[1:10,,nParam]))
if (nParam != 3) { abline(h=0.05,col='red') } else { abline(h=0.95,col='red') }
points(rep(1,nParticles),lMultivariateBootstrapFilter$adParticles[1,,nParam],pch=19,
cex=(lMultivariateBootstrapFilter$mdWeights[1,])^.5*2)
}
dev.copy2pdf(file="MBF-0.pdf")
dev.off()
adParticleIndices = 1:nParticles
for (i in 2:10) {
par(mfrow=c(2,2))
for (nParam in 2:5) {
plot(0,0,type='n',main='',xlab='t',ylab='',xlim=c(0,10),
ylim=range(lMultivariateBootstrapFilter$adParticles[1:10,,nParam]))
if (nParam != 3) { abline(h=0.05,col='red') } else { abline(h=0.95,col='red') }
for (ii in i:2) {
points(rep(ii,nParticles),lMultivariateBootstrapFilter$adParticles[ii,,nParam],pch=19,
cex=(lMultivariateBootstrapFilter$mdWeights[ii,])^.5*2)
for (j in adParticleIndices) {
segments(ii-1,lMultivariateBootstrapFilter$adParticles[ii-1,lMultivariateBootstrapFilter$mnResampledIndices[ii,j],nParam],
ii ,lMultivariateBootstrapFilter$adParticles[ii ,j,nParam],
lwd=(lMultivariateBootstrapFilter$mdWeights[i,j])^.3*2)
}
adParticleIndices = unique(lMultivariateBootstrapFilter$mnResampledIndices[ii,adParticleIndices])
}
#readline("Hit enter:")
}
dev.copy2pdf(file=paste("MBF-",i-1,".pdf",sep=''))
dev.off()
}
# Kernel density example
adRandomPoints = rnorm(10)
adRandomPointWeights = fRenormalizeWeights(runif(10))
dDelta = 0.99
dH2 = 1-((3*dDelta-1)/(2*dDelta))^2
dA = sqrt(1-dH2)
dMean = weighted.mean(adRandomPoints,adRandomPointWeights)
dVar = 0
for (i in 1:10) dVar= dVar+adRandomPointWeights[i]*(adRandomPoints[i]-dMean)^2
adShrunkMean = dA*adRandomPoints+(1-dA)*dMean
xx = seq(min(adRandomPoints)-0.5,max(adRandomPoints)+0.5,by=0.01)
plot(0,0,type='n',xlim=range(xx),ylim=c(0,max(adRandomPointWeights)+0.1),
xlab='',ylab='',main='',axes=F,frame.plot=T)
segments(adRandomPoints,rep(0,10),adRandomPoints,adRandomPointWeights)
adDensity = rep(0,length(xx))
for (i in 1:10) {
points(adRandomPoints[i],adRandomPointWeights[i])
points(adShrunkMean[i],adRandomPointWeights[i],col='red')
adThisDensity = 0.2*adRandomPointWeights[i]*dnorm(xx,adShrunkMean[i],sqrt(dH2*dVar))
lines(xx,adThisDensity,col='red')
adDensity = adDensity+adThisDensity
}
lines(xx,adDensity,lwd=2,col='red')
dev.copy2pdf(file="KernelDensity1.pdf"); dev.off()
dDelta = 0.85
dH2 = 1-((3*dDelta-1)/(2*dDelta))^2
dA = sqrt(1-dH2)
dMean = weighted.mean(adRandomPoints,adRandomPointWeights)
dVar = 0
for (i in 1:10) dVar= dVar+adRandomPointWeights[i]*(adRandomPoints[i]-dMean)^2
adShrunkMean = dA*adRandomPoints+(1-dA)*dMean
xx = seq(min(adRandomPoints)-0.5,max(adRandomPoints)+0.5,by=0.01)
plot(0,0,type='n',xlim=range(xx),ylim=c(0,max(adRandomPointWeights)+0.1),
xlab='',ylab='',main='',axes=F,frame.plot=T)
segments(adRandomPoints,rep(0,10),adRandomPoints,adRandomPointWeights)
adDensity = rep(0,length(xx))
for (i in 1:10) {
points(adRandomPoints[i],adRandomPointWeights[i])
points(adShrunkMean[i],adRandomPointWeights[i],col='red')
adThisDensity = 0.2*adRandomPointWeights[i]*dnorm(xx,adShrunkMean[i],sqrt(dH2*dVar))
lines(xx,adThisDensity,col='red')
adDensity = adDensity+adThisDensity
}
lines(xx,adDensity,lwd=2,col='red')
dev.copy2pdf(file="KernelDensity2.pdf"); dev.off()
setwd("../")
|
#' @export
#'
#' @title
#' Two different varaince estimators for the Horvitz-Thompson estimator
#' @description
#' This function estimates the variance of the Horvitz-Thompson estimator.
#' Two different variance estimators are computed: the original one, due to Horvitz-Thompson
#' and the one due to Sen (1953) and Yates, Grundy (1953).
#' The two approaches yield unbiased estimator under fixed-size sampling schemes.
#' @return
#' This function returns a data frame of every possible sample in
#' within a sampling support, with its corresponding variance estimates.
#' @details
#' The function returns two variance estimator for every possible sample
#' within a fixed-size sampling support.
#' The first estimator is due to Horvitz-Thompson and is given by the following expression:
#' \deqn{\widehat{Var}_1(\hat{t}_{y,\pi}) = \sum_{k \in U}\sum_{l\in U}\frac{\Delta_{kl}}{\pi_{kl}}\frac{y_k}{\pi_k}\frac{y_l}{\pi_l}}
#' The second estimator is due to Sen (1953) and Yates-Grundy (1953). It is given by the following expression:
#' \deqn{\widehat{Var}_2(\hat{t}_{y,\pi}) = -\frac{1}{2}\sum_{k \in U}\sum_{l\in U}\frac{\Delta_{kl}}{\pi_{kl}}(\frac{y_k}{\pi_k} - \frac{y_l}{\pi_l})^2}
#' @author Hugo Andres Gutierrez Rojas <hagutierrezro at gmail.com>
#' @param y Vector containing the information of the characteristic of interest
#' for every unit in the population.
#' @param N Population size.
#' @param n Sample size.
#' @param p A vector containing the selection probabilities of a fixed size without replacement sampling design.
#' The sum of the values of this vector must be one.
#'
#' @references
#' Sarndal, C-E. and Swensson, B. and Wretman, J. (1992), \emph{Model Assisted Survey Sampling}. Springer.\cr
#' Gutierrez, H. A. (2009), \emph{Estrategias de muestreo: Diseno de encuestas
#' y estimacion de parametros}. Editorial Universidad Santo Tomas.
#'
#' @examples
#'
#' # Example 1
#' # Without replacement sampling
#' # Vector U contains the label of a population of size N=5
#' U <- c("Yves", "Ken", "Erik", "Sharon", "Leslie")
#' # Vector y1 and y2 are the values of the variables of interest
#' y1<-c(32, 34, 46, 89, 35)
#' y2<-c(1,1,1,0,0)
#' # The population size is N=5
#' N <- length(U)
#' # The sample size is n=2
#' n <- 2
#' # p is the probability of selection of every possible sample
#' p <- c(0.13, 0.2, 0.15, 0.1, 0.15, 0.04, 0.02, 0.06, 0.07, 0.08)
#'
#' # Calculates the estimated variance for the HT estimator
#' VarSYGHT(y1, N, n, p)
#' VarSYGHT(y2, N, n, p)
#'
#' # Unbiasedness holds in the estimator of the total
#' sum(y1)
#' sum(VarSYGHT(y1, N, n, p)$p * VarSYGHT(y1, N, n, p)$Est.HT)
#' sum(y2)
#' sum(VarSYGHT(y2, N, n, p)$p * VarSYGHT(y2, N, n, p)$Est.HT)
#'
#' # Unbiasedness also holds in the two variances
#' VarHT(y1, N, n, p)
#' sum(VarSYGHT(y1, N, n, p)$p * VarSYGHT(y1, N, n, p)$Est.Var1)
#' sum(VarSYGHT(y1, N, n, p)$p * VarSYGHT(y1, N, n, p)$Est.Var2)
#'
#' VarHT(y2, N, n, p)
#' sum(VarSYGHT(y2, N, n, p)$p * VarSYGHT(y2, N, n, p)$Est.Var1)
#' sum(VarSYGHT(y2, N, n, p)$p * VarSYGHT(y2, N, n, p)$Est.Var2)
#'
#' # Example 2: negative variance estimates
#'
#' x = c(2.5, 2.0, 1.1, 0.5)
#' N = 4
#' n = 2
#' p = c(0.31, 0.20, 0.14, 0.03, 0.01, 0.31)
#'
#' VarSYGHT(x, N, n, p)
#'
#' # Unbiasedness holds in the estimator of the total
#' sum(x)
#' sum(VarSYGHT(x, N, n, p)$p * VarSYGHT(x, N, n, p)$Est.HT)
#'
#' # Unbiasedness also holds in the two variances
#' VarHT(x, N, n, p)
#' sum(VarSYGHT(x, N, n, p)$p * VarSYGHT(x, N, n, p)$Est.Var1)
#' sum(VarSYGHT(x, N, n, p)$p * VarSYGHT(x, N, n, p)$Est.Var2)
VarSYGHT <- function (y, N, n, p)
{
Ind <- Ik(N, n)
pi1 <- as.matrix(Pik(p, Ind))
pi2 <- Pikl(N, n, p)
Delta <- Deltakl(N, n, p)
y <- t(as.matrix(y))
ykylexp <- t(y/pi1) %*% (y/pi1)
A <- (Delta/pi2) * (ykylexp)
Q <- nrow(Ind)
MatDif <- matrix(NA, nrow = N, ncol = N)
for(k in 1:N){
for(l in 1:N){
MatDif[k, l] <- (y[k]/pi1[k] - y[l]/pi1[l])^2
}
}
B <- (Delta/pi2) * MatDif
Est.Var1 = Est.Var2 = Est.HT =NULL
for(i in 1:Q){
index = which(Ind[i,] != 0)
Est.HT[i] = HT(y[index], pi1[index])
Est.Var1[i] = sum(A[index, index])
Est.Var2[i] = - (1/2) * sum(B[index, index])
}
Resultado <- data.frame(I = Ind, p = p, Est.HT = Est.HT, Est.Var1 = Est.Var1, Est.Var2 = Est.Var2)
return(Resultado)
}
|
/R/VarSYGHT.R
|
no_license
|
psirusteam/TeachingSampling
|
R
| false | false | 4,342 |
r
|
#' @export
#'
#' @title
#' Two different varaince estimators for the Horvitz-Thompson estimator
#' @description
#' This function estimates the variance of the Horvitz-Thompson estimator.
#' Two different variance estimators are computed: the original one, due to Horvitz-Thompson
#' and the one due to Sen (1953) and Yates, Grundy (1953).
#' The two approaches yield unbiased estimator under fixed-size sampling schemes.
#' @return
#' This function returns a data frame of every possible sample in
#' within a sampling support, with its corresponding variance estimates.
#' @details
#' The function returns two variance estimator for every possible sample
#' within a fixed-size sampling support.
#' The first estimator is due to Horvitz-Thompson and is given by the following expression:
#' \deqn{\widehat{Var}_1(\hat{t}_{y,\pi}) = \sum_{k \in U}\sum_{l\in U}\frac{\Delta_{kl}}{\pi_{kl}}\frac{y_k}{\pi_k}\frac{y_l}{\pi_l}}
#' The second estimator is due to Sen (1953) and Yates-Grundy (1953). It is given by the following expression:
#' \deqn{\widehat{Var}_2(\hat{t}_{y,\pi}) = -\frac{1}{2}\sum_{k \in U}\sum_{l\in U}\frac{\Delta_{kl}}{\pi_{kl}}(\frac{y_k}{\pi_k} - \frac{y_l}{\pi_l})^2}
#' @author Hugo Andres Gutierrez Rojas <hagutierrezro at gmail.com>
#' @param y Vector containing the information of the characteristic of interest
#' for every unit in the population.
#' @param N Population size.
#' @param n Sample size.
#' @param p A vector containing the selection probabilities of a fixed size without replacement sampling design.
#' The sum of the values of this vector must be one.
#'
#' @references
#' Sarndal, C-E. and Swensson, B. and Wretman, J. (1992), \emph{Model Assisted Survey Sampling}. Springer.\cr
#' Gutierrez, H. A. (2009), \emph{Estrategias de muestreo: Diseno de encuestas
#' y estimacion de parametros}. Editorial Universidad Santo Tomas.
#'
#' @examples
#'
#' # Example 1
#' # Without replacement sampling
#' # Vector U contains the label of a population of size N=5
#' U <- c("Yves", "Ken", "Erik", "Sharon", "Leslie")
#' # Vector y1 and y2 are the values of the variables of interest
#' y1<-c(32, 34, 46, 89, 35)
#' y2<-c(1,1,1,0,0)
#' # The population size is N=5
#' N <- length(U)
#' # The sample size is n=2
#' n <- 2
#' # p is the probability of selection of every possible sample
#' p <- c(0.13, 0.2, 0.15, 0.1, 0.15, 0.04, 0.02, 0.06, 0.07, 0.08)
#'
#' # Calculates the estimated variance for the HT estimator
#' VarSYGHT(y1, N, n, p)
#' VarSYGHT(y2, N, n, p)
#'
#' # Unbiasedness holds in the estimator of the total
#' sum(y1)
#' sum(VarSYGHT(y1, N, n, p)$p * VarSYGHT(y1, N, n, p)$Est.HT)
#' sum(y2)
#' sum(VarSYGHT(y2, N, n, p)$p * VarSYGHT(y2, N, n, p)$Est.HT)
#'
#' # Unbiasedness also holds in the two variances
#' VarHT(y1, N, n, p)
#' sum(VarSYGHT(y1, N, n, p)$p * VarSYGHT(y1, N, n, p)$Est.Var1)
#' sum(VarSYGHT(y1, N, n, p)$p * VarSYGHT(y1, N, n, p)$Est.Var2)
#'
#' VarHT(y2, N, n, p)
#' sum(VarSYGHT(y2, N, n, p)$p * VarSYGHT(y2, N, n, p)$Est.Var1)
#' sum(VarSYGHT(y2, N, n, p)$p * VarSYGHT(y2, N, n, p)$Est.Var2)
#'
#' # Example 2: negative variance estimates
#'
#' x = c(2.5, 2.0, 1.1, 0.5)
#' N = 4
#' n = 2
#' p = c(0.31, 0.20, 0.14, 0.03, 0.01, 0.31)
#'
#' VarSYGHT(x, N, n, p)
#'
#' # Unbiasedness holds in the estimator of the total
#' sum(x)
#' sum(VarSYGHT(x, N, n, p)$p * VarSYGHT(x, N, n, p)$Est.HT)
#'
#' # Unbiasedness also holds in the two variances
#' VarHT(x, N, n, p)
#' sum(VarSYGHT(x, N, n, p)$p * VarSYGHT(x, N, n, p)$Est.Var1)
#' sum(VarSYGHT(x, N, n, p)$p * VarSYGHT(x, N, n, p)$Est.Var2)
VarSYGHT <- function (y, N, n, p)
{
Ind <- Ik(N, n)
pi1 <- as.matrix(Pik(p, Ind))
pi2 <- Pikl(N, n, p)
Delta <- Deltakl(N, n, p)
y <- t(as.matrix(y))
ykylexp <- t(y/pi1) %*% (y/pi1)
A <- (Delta/pi2) * (ykylexp)
Q <- nrow(Ind)
MatDif <- matrix(NA, nrow = N, ncol = N)
for(k in 1:N){
for(l in 1:N){
MatDif[k, l] <- (y[k]/pi1[k] - y[l]/pi1[l])^2
}
}
B <- (Delta/pi2) * MatDif
Est.Var1 = Est.Var2 = Est.HT =NULL
for(i in 1:Q){
index = which(Ind[i,] != 0)
Est.HT[i] = HT(y[index], pi1[index])
Est.Var1[i] = sum(A[index, index])
Est.Var2[i] = - (1/2) * sum(B[index, index])
}
Resultado <- data.frame(I = Ind, p = p, Est.HT = Est.HT, Est.Var1 = Est.Var1, Est.Var2 = Est.Var2)
return(Resultado)
}
|
################################################################
# Examining Electronidex
# Market basket analysis
# Discover Associations Between Products
# Created by Eirik Espe
################################################################
#Calling on packages. Install the packages if you do not have them already.
library(arules)
library(arulesViz)
library(ggplot2)
#Upload the dataset
Tr <- read.transactions("ElectronidexTransactions2017.csv",
format = "basket",
header = FALSE, sep = ",",
rm.duplicates = TRUE)
#Summary statistics
inspect(head(Tr)) #View the first six transactions
length(Tr) #Number of transactions
size(head(Tr)) #Number of items per transactions
#for the first six transactions
#Count of the number of items per transaction
summary(factor(size(Tr)))
#Most and least frequently purchased items
head(sort(itemFrequency(Tr, type="absolute"), decreasing = TRUE), n = 10)
tail(sort(itemFrequency(Tr, type="absolute"), decreasing = TRUE), n = 10)
# 10 most frequently bought items, including support
freq_itemsets <- eclat(Tr)
inspect(freq_itemsets)
# Finding items that was purchased alone
oneItem <- Tr[which(size(Tr) == 1), ]
# In how many transactions is this the case
length(oneItem)
# 2163 items are purchased alone.
# That's in accordance with the summary statistics.
# Which items are most frequently purchased alone
head(sort(itemFrequency(oneItem, type = "absolute"), decreasing = TRUE), n = 10)
#--- Visualization ----
# Frequency plot
itemFrequencyPlot(Tr, topN = 10, type = "absolute", main = "Item Frequency")
image(sample(Tr, 10))
# Plot of items purchased alone
itemFrequencyPlot(oneItem, topN = 10, type = "absolute",
main = "Item Frequency - one item transactions")
# Set up for creating a plot that will help in deciding support and confidence
# for the rules we are creating.
# Support and confidence values
supportLevels <- c(0.1, 0.05, 0.01, 0.005)
confidenceLevels <- c(0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1)
# Empty integers
rules_sup10 <- integer(length = 9)
rules_sup5 <- integer(length = 9)
rules_sup1 <- integer(length = 9)
rules_sup0.5 <- integer(length = 9)
# Apriori algorithm with a support level of 10%
for (i in 1:length(confidenceLevels)) {
rules_sup10[i] <- length(apriori(Tr, parameter = list(sup = supportLevels[1],
conf = confidenceLevels[i],
target = "rules")))
}
# Apriori algorithm with a support level of 5%
for (i in 1:length(confidenceLevels)) {
rules_sup5[i] <- length(apriori(Tr, parameter = list(sup = supportLevels[2],
conf = confidenceLevels[i],
target = "rules")))
}
# Apriori algorithm with a support level of 1%
for (i in 1:length(confidenceLevels)) {
rules_sup1[i] <- length(apriori(Tr, parameter=list(sup=supportLevels[3],
conf=confidenceLevels[i],
target="rules")))
}
# Apriori algorithm with a support level of 0.5%
for (i in 1:length(confidenceLevels)) {
rules_sup0.5[i] <- length(apriori(Tr, parameter=list(sup=supportLevels[4],
conf=confidenceLevels[i],
target="rules")))
}
# Making a plot to see number of rules for different support and confidence levels
# Data frame
num_rules <- data.frame(rules_sup10, rules_sup5, rules_sup1,
rules_sup0.5, confidenceLevels)
# Number of rules found with a support level of 10%, 5%, 1% and 0.5%
ggplot(num_rules, aes(x = confidenceLevels)) +
# Plot line and points (support level of 10%)
geom_line(aes(y = rules_sup10, colour = "Support level of 10%")) +
geom_point(aes(y = rules_sup10, colour = "Support level of 10%")) +
# Plot line and points (support level of 5%)
geom_line(aes(y = rules_sup5, colour = "Support level of 5%")) +
geom_point(aes(y = rules_sup5, colour = "Support level of 5%")) +
# Plot line and points (support level of 1%)
geom_line(aes(y = rules_sup1, colour="Support level of 1%")) +
geom_point(aes(y = rules_sup1, colour="Support level of 1%")) +
# Plot line and points (support level of 0.5%)
geom_line(aes(y = rules_sup0.5, colour = "Support level of 0.5%")) +
geom_point(aes(y = rules_sup0.5, colour = "Support level of 0.5%")) +
# Labs and theme
labs(x = "Confidence levels", y = "Number of rules found",
title = "Apriori algorithm with different support levels") +
theme_bw() +
theme(legend.title=element_blank())
#---Algorithm----
# Using apriori() function to find association rules
rules <- apriori(Tr, parameter = list(supp = 0.01, conf = 0.3))
# Define 'High-confidence' rules
rules_conf <- sort(rules, by = "confidence", decreasing=TRUE)
# Show the support, confidence and lift for for 6 rules with highest confidence
inspect(head(rules_conf))
#Plot
plot(apriori(Tr, parameter = list(supp = 0.01, conf = 0.3)))
#Plot the top 10 rules measured by lift
top10rules <- head(rules, n = 10, by = "lift")
plot(top10rules, method = "graph", engine = "htmlwidget")
# Looking into some smart home devices, as this is products that Blackwell
# Electronics do not have in their current portfolio
# Get rules that lead to buying 'Google Home'
googhome <- apriori(data = Tr, parameter = list(supp = 0.01, conf = 0.3),
appearance = list(default = "lhs",rhs = "Google Home"),
control = list(verbose = F))
# No, rules with these support and confidence levels
# Get rules that lead to buying 'Apple TV'
apptv <- apriori(data = Tr, parameter = list(supp = 0.0001, conf = 0.1),
appearance = list(default = "lhs",rhs = "Apple TV"),
control = list(verbose = F))
# First 6 rules
inspect(head(apptv))
# Count of appearances of Apple TV and Google Home in the transactions
crossTable(Tr)['Apple TV', 'Apple TV']
crossTable(Tr)['Google Home', 'Google Home']
# 151 transactions contained Apple TV and 84 transactions contained Google Home
#--- Product types ----
# Looking at the items that Electronidex are selling
colnames(Tr)
# Creating a list of product types for the different items, to be able to
# compare with Blackwell's product types.
#Assign product types to the items
# list of the products type in the right order
ListProducts <- c("External Hardrives",
"External Hardrives",
"Computer Mice",
"External Hardrives",
"External Hardrives",
"Laptops",
"Desktop",
"Monitors",
"Computer Headphones",
"Laptops",
"Monitors",
"Active Headphones",
"Active Headphones",
"Laptops",
"Laptops",
"Keyboard",
"Smart Home Devices",
"Keyboard",
"Keyboard",
"Monitors",
"Laptops",
"Desktop",
"Monitors",
"Computer Cords",
"Keyboard",
"Accessories",
"Speakers",
"Printers",
"Printer Ink",
"Speakers",
"Printer Ink",
"Printers",
"Accessories",
"Speakers",
"Desktop",
"Desktop",
"Desktop",
"Mouse and Keyboard Combo",
"Laptops",
"Monitors",
"Keyboard",
"Speakers",
"Printers",
"Printer Ink", "Mouse and Keyboard Combo", "Laptops",
"Printer Ink", "Printers", "Computer Cords", "Computer Cords",
"Computer Tablets", "Smart Home Devices", "Computer Stands",
"Computer Mice", "Computer Mice", "Smart Home Devices",
"Computer Stands", "Computer Stands", "Computer Cords",
"Computer Cords", "Computer Stands", "Laptops", "Printer Ink",
"Desktop", "Monitors", "Laptops", "Keyboard", "Computer Mice",
"Printers", "Desktop", "Desktop", "Computer Tablets", "Computer Tablets",
"Computer Cords", "Speakers", "Computer Headphones", "Computer Tablets",
"Computer Headphones", "Accessories", "Desktop", "Monitors", "Laptops",
"Computer Mice", "Computer Headphones", "Mouse and Keyboard Combo",
"Keyboard", "Mouse and Keyboard Combo", "Mouse and Keyboard Combo",
"Mouse and Keyboard Combo", "Speakers", "Computer Headphones", "Keyboard",
"Computer Mice", "Speakers", "Computer Mice", "Computer Headphones",
"Accessories", "Mouse and Keyboard Combo", "Mouse and Keyboard Combo",
"Active Headphones", "Computer Stands", "Active Headphones",
"Active Headphones", "Computer Headphones", "Computer Headphones",
"Active Headphones", "Computer Mice", "Mouse and Keyboard Combo",
"Keyboard", "Speakers", "Smart Home Devices", "Computer Cords",
"Computer Tablets", "Monitors", "Monitors", "External Hardrives",
"Computer Mice", "Smart Home Devices", "Speakers", "Computer Cords",
"Computer Cords", "Monitors", "Computer Mice", "Computer Headphones",
"Computer Headphones")
# Number of transactions and items
Tr
Tr@itemInfo$Producttype <- ListProducts
# Assign product types to the items
Tr <- aggregate(Tr, by= Tr@itemInfo$Producttype)
#Summary statistics
inspect(head(Tr)) #View the first six transactions
length(Tr) #Number of transactions
size(head(Tr)) #Number of items per transactions
#for the first six transactions
summary(Tr) #Summary
# Finding product types that was purchased alone
oneItem <- Tr[which(size(Tr) == 1), ]
# In how many transactions is this the case
length(oneItem)
# Which product types are most frequently purchased alone
head(sort(itemFrequency(oneItem, type = "absolute"), decreasing = TRUE), n = 10)
#Plot with product types, instead of items
plot(head(apriori(Tr, parameter = list(supp = 0.01, conf = 0.75)),
n = 10, by = "lift"), method = "graph", engine = "htmlwidget")
#Rules for smart home devices
smart_home <- apriori(data = Tr, parameter = list(supp = 0.0006, conf = 0.60),
appearance = list(default = "lhs", rhs = "Smart Home Devices"),
control = list(verbose = F))
# Inspect the rules
inspect(smart_home)
#The 5 rules with highest lift
top5rules <- head(smart_home, n = 5, by = "lift")
# Plot
plot(top5rules, method = "paracoord",
control=list(reorder = TRUE,
main= "Top 5 rules for Smart Home Devices"))
|
/Market basket analysis.R
|
no_license
|
EirikEspe/Market-Basket-Analysis
|
R
| false | false | 11,436 |
r
|
################################################################
# Examining Electronidex
# Market basket analysis
# Discover Associations Between Products
# Created by Eirik Espe
################################################################
#Calling on packages. Install the packages if you do not have them already.
library(arules)
library(arulesViz)
library(ggplot2)
#Upload the dataset
Tr <- read.transactions("ElectronidexTransactions2017.csv",
format = "basket",
header = FALSE, sep = ",",
rm.duplicates = TRUE)
#Summary statistics
inspect(head(Tr)) #View the first six transactions
length(Tr) #Number of transactions
size(head(Tr)) #Number of items per transactions
#for the first six transactions
#Count of the number of items per transaction
summary(factor(size(Tr)))
#Most and least frequently purchased items
head(sort(itemFrequency(Tr, type="absolute"), decreasing = TRUE), n = 10)
tail(sort(itemFrequency(Tr, type="absolute"), decreasing = TRUE), n = 10)
# 10 most frequently bought items, including support
freq_itemsets <- eclat(Tr)
inspect(freq_itemsets)
# Finding items that was purchased alone
oneItem <- Tr[which(size(Tr) == 1), ]
# In how many transactions is this the case
length(oneItem)
# 2163 items are purchased alone.
# That's in accordance with the summary statistics.
# Which items are most frequently purchased alone
head(sort(itemFrequency(oneItem, type = "absolute"), decreasing = TRUE), n = 10)
#--- Visualization ----
# Frequency plot
itemFrequencyPlot(Tr, topN = 10, type = "absolute", main = "Item Frequency")
image(sample(Tr, 10))
# Plot of items purchased alone
itemFrequencyPlot(oneItem, topN = 10, type = "absolute",
main = "Item Frequency - one item transactions")
# Set up for creating a plot that will help in deciding support and confidence
# for the rules we are creating.
# Support and confidence values
supportLevels <- c(0.1, 0.05, 0.01, 0.005)
confidenceLevels <- c(0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1)
# Empty integers
rules_sup10 <- integer(length = 9)
rules_sup5 <- integer(length = 9)
rules_sup1 <- integer(length = 9)
rules_sup0.5 <- integer(length = 9)
# Apriori algorithm with a support level of 10%
for (i in 1:length(confidenceLevels)) {
rules_sup10[i] <- length(apriori(Tr, parameter = list(sup = supportLevels[1],
conf = confidenceLevels[i],
target = "rules")))
}
# Apriori algorithm with a support level of 5%
for (i in 1:length(confidenceLevels)) {
rules_sup5[i] <- length(apriori(Tr, parameter = list(sup = supportLevels[2],
conf = confidenceLevels[i],
target = "rules")))
}
# Apriori algorithm with a support level of 1%
for (i in 1:length(confidenceLevels)) {
rules_sup1[i] <- length(apriori(Tr, parameter=list(sup=supportLevels[3],
conf=confidenceLevels[i],
target="rules")))
}
# Apriori algorithm with a support level of 0.5%
for (i in 1:length(confidenceLevels)) {
rules_sup0.5[i] <- length(apriori(Tr, parameter=list(sup=supportLevels[4],
conf=confidenceLevels[i],
target="rules")))
}
# Making a plot to see number of rules for different support and confidence levels
# Data frame
num_rules <- data.frame(rules_sup10, rules_sup5, rules_sup1,
rules_sup0.5, confidenceLevels)
# Number of rules found with a support level of 10%, 5%, 1% and 0.5%
ggplot(num_rules, aes(x = confidenceLevels)) +
# Plot line and points (support level of 10%)
geom_line(aes(y = rules_sup10, colour = "Support level of 10%")) +
geom_point(aes(y = rules_sup10, colour = "Support level of 10%")) +
# Plot line and points (support level of 5%)
geom_line(aes(y = rules_sup5, colour = "Support level of 5%")) +
geom_point(aes(y = rules_sup5, colour = "Support level of 5%")) +
# Plot line and points (support level of 1%)
geom_line(aes(y = rules_sup1, colour="Support level of 1%")) +
geom_point(aes(y = rules_sup1, colour="Support level of 1%")) +
# Plot line and points (support level of 0.5%)
geom_line(aes(y = rules_sup0.5, colour = "Support level of 0.5%")) +
geom_point(aes(y = rules_sup0.5, colour = "Support level of 0.5%")) +
# Labs and theme
labs(x = "Confidence levels", y = "Number of rules found",
title = "Apriori algorithm with different support levels") +
theme_bw() +
theme(legend.title=element_blank())
#---Algorithm----
# Using apriori() function to find association rules
rules <- apriori(Tr, parameter = list(supp = 0.01, conf = 0.3))
# Define 'High-confidence' rules
rules_conf <- sort(rules, by = "confidence", decreasing=TRUE)
# Show the support, confidence and lift for for 6 rules with highest confidence
inspect(head(rules_conf))
#Plot
plot(apriori(Tr, parameter = list(supp = 0.01, conf = 0.3)))
#Plot the top 10 rules measured by lift
top10rules <- head(rules, n = 10, by = "lift")
plot(top10rules, method = "graph", engine = "htmlwidget")
# Looking into some smart home devices, as this is products that Blackwell
# Electronics do not have in their current portfolio
# Get rules that lead to buying 'Google Home'
googhome <- apriori(data = Tr, parameter = list(supp = 0.01, conf = 0.3),
appearance = list(default = "lhs",rhs = "Google Home"),
control = list(verbose = F))
# No, rules with these support and confidence levels
# Get rules that lead to buying 'Apple TV'
apptv <- apriori(data = Tr, parameter = list(supp = 0.0001, conf = 0.1),
appearance = list(default = "lhs",rhs = "Apple TV"),
control = list(verbose = F))
# First 6 rules
inspect(head(apptv))
# Count of appearances of Apple TV and Google Home in the transactions
crossTable(Tr)['Apple TV', 'Apple TV']
crossTable(Tr)['Google Home', 'Google Home']
# 151 transactions contained Apple TV and 84 transactions contained Google Home
#--- Product types ----
# Looking at the items that Electronidex are selling
colnames(Tr)
# Creating a list of product types for the different items, to be able to
# compare with Blackwell's product types.
#Assign product types to the items
# list of the products type in the right order
ListProducts <- c("External Hardrives",
"External Hardrives",
"Computer Mice",
"External Hardrives",
"External Hardrives",
"Laptops",
"Desktop",
"Monitors",
"Computer Headphones",
"Laptops",
"Monitors",
"Active Headphones",
"Active Headphones",
"Laptops",
"Laptops",
"Keyboard",
"Smart Home Devices",
"Keyboard",
"Keyboard",
"Monitors",
"Laptops",
"Desktop",
"Monitors",
"Computer Cords",
"Keyboard",
"Accessories",
"Speakers",
"Printers",
"Printer Ink",
"Speakers",
"Printer Ink",
"Printers",
"Accessories",
"Speakers",
"Desktop",
"Desktop",
"Desktop",
"Mouse and Keyboard Combo",
"Laptops",
"Monitors",
"Keyboard",
"Speakers",
"Printers",
"Printer Ink", "Mouse and Keyboard Combo", "Laptops",
"Printer Ink", "Printers", "Computer Cords", "Computer Cords",
"Computer Tablets", "Smart Home Devices", "Computer Stands",
"Computer Mice", "Computer Mice", "Smart Home Devices",
"Computer Stands", "Computer Stands", "Computer Cords",
"Computer Cords", "Computer Stands", "Laptops", "Printer Ink",
"Desktop", "Monitors", "Laptops", "Keyboard", "Computer Mice",
"Printers", "Desktop", "Desktop", "Computer Tablets", "Computer Tablets",
"Computer Cords", "Speakers", "Computer Headphones", "Computer Tablets",
"Computer Headphones", "Accessories", "Desktop", "Monitors", "Laptops",
"Computer Mice", "Computer Headphones", "Mouse and Keyboard Combo",
"Keyboard", "Mouse and Keyboard Combo", "Mouse and Keyboard Combo",
"Mouse and Keyboard Combo", "Speakers", "Computer Headphones", "Keyboard",
"Computer Mice", "Speakers", "Computer Mice", "Computer Headphones",
"Accessories", "Mouse and Keyboard Combo", "Mouse and Keyboard Combo",
"Active Headphones", "Computer Stands", "Active Headphones",
"Active Headphones", "Computer Headphones", "Computer Headphones",
"Active Headphones", "Computer Mice", "Mouse and Keyboard Combo",
"Keyboard", "Speakers", "Smart Home Devices", "Computer Cords",
"Computer Tablets", "Monitors", "Monitors", "External Hardrives",
"Computer Mice", "Smart Home Devices", "Speakers", "Computer Cords",
"Computer Cords", "Monitors", "Computer Mice", "Computer Headphones",
"Computer Headphones")
# Number of transactions and items
Tr
Tr@itemInfo$Producttype <- ListProducts
# Assign product types to the items
Tr <- aggregate(Tr, by= Tr@itemInfo$Producttype)
#Summary statistics
inspect(head(Tr)) #View the first six transactions
length(Tr) #Number of transactions
size(head(Tr)) #Number of items per transactions
#for the first six transactions
summary(Tr) #Summary
# Finding product types that was purchased alone
oneItem <- Tr[which(size(Tr) == 1), ]
# In how many transactions is this the case
length(oneItem)
# Which product types are most frequently purchased alone
head(sort(itemFrequency(oneItem, type = "absolute"), decreasing = TRUE), n = 10)
#Plot with product types, instead of items
plot(head(apriori(Tr, parameter = list(supp = 0.01, conf = 0.75)),
n = 10, by = "lift"), method = "graph", engine = "htmlwidget")
#Rules for smart home devices
smart_home <- apriori(data = Tr, parameter = list(supp = 0.0006, conf = 0.60),
appearance = list(default = "lhs", rhs = "Smart Home Devices"),
control = list(verbose = F))
# Inspect the rules
inspect(smart_home)
#The 5 rules with highest lift
top5rules <- head(smart_home, n = 5, by = "lift")
# Plot
plot(top5rules, method = "paracoord",
control=list(reorder = TRUE,
main= "Top 5 rules for Smart Home Devices"))
|
rm(list=ls())
# Load Libraries
library("dplyr")
library("readr")
# getting topics classified in dictionary
topics_df <- read_csv("~/github_topics_classified_070721.csv")
topics <- topics_df %>%
filter(main_type=="Database")
topics <- topics$term
topics <- paste(topics, collapse="|")
# run this to print terms
str_replace_all(topics,"'", "")
# Read in readme data
path_for_data = "/project/class/bii_sdad_dspg/uva_2021/dspg21oss/"
setwd(path_for_data)
readme_raw_data <- read_csv("oss_readme_data_071221.csv") %>%
filter(status == "Done") %>%
distinct(slug, readme_text, batch, as_of, status)
# load function
source("~/git/dspg21oss/scripts/detect_sw_co.R")
# using function to classify
chk_sys <- readme_raw_data %>%
top_n(1000, slug) %>%
detect_system_sw(slug, readme_text)
# check utility
chk_utility <- readme_raw_data %>%
top_n(1000, slug) %>%
detect_utility_sw(slug, readme_text)
# check application
chk_app <- readme_raw_data %>%
top_n(1000, slug) %>%
detect_application_sw(slug, readme_text)
# check database
chk_db <- readme_raw_data %>%
top_n(1000, slug) %>%
detect_database_sw(slug, readme_text)
# check ai
chk_ai <- readme_raw_data %>%
top_n(1000, slug) %>%
detect_ai_sw(slug, readme_text)
# check viz
chk_viz <- readme_raw_data %>%
top_n(1000, slug) %>%
detect_viz_sw(slug, readme_text)
# 425 have at least 1, 299 over 5
sys_true <- chk_sys %>%
filter(system_all > 5)
# 84 have at least 1, 8 have over 5
util_true <- chk_utility %>%
filter(utility_all > 5)
# 487 have at least 1 ,188 over 5
app_true <- chk_app %>%
filter(app_all > 0)
# 30 have over 0, 12 have over 5
ai_true <- chk_ai %>%
filter(ai > 5)
# 27 have over 0, 1 has over 5
viz_true <- chk_viz %>%
filter(viz > 5)
# only one column at a time
# if you only want to develop certain categories
system_terms <- get_dictionary_terms(summary_type = "System")
sys_os <- get_dictionary_terms(main_type = "Operating Systems")
windows_terms <- get_dictionary_terms(sub_type = "Windows")
chk <- readme_raw_data %>%
top_n(25, slug) %>%
as_tidytable() %>%
tidytable::mutate.(readme_text = tolower(readme_text)) %>%
detect_types(slug, readme_text, windows_terms)
|
/src/02_classify_readmes/03_new_classification_co.R
|
permissive
|
DSPG-Young-Scholars-Program/dspg21oss
|
R
| false | false | 2,206 |
r
|
rm(list=ls())
# Load Libraries
library("dplyr")
library("readr")
# getting topics classified in dictionary
topics_df <- read_csv("~/github_topics_classified_070721.csv")
topics <- topics_df %>%
filter(main_type=="Database")
topics <- topics$term
topics <- paste(topics, collapse="|")
# run this to print terms
str_replace_all(topics,"'", "")
# Read in readme data
path_for_data = "/project/class/bii_sdad_dspg/uva_2021/dspg21oss/"
setwd(path_for_data)
readme_raw_data <- read_csv("oss_readme_data_071221.csv") %>%
filter(status == "Done") %>%
distinct(slug, readme_text, batch, as_of, status)
# load function
source("~/git/dspg21oss/scripts/detect_sw_co.R")
# using function to classify
chk_sys <- readme_raw_data %>%
top_n(1000, slug) %>%
detect_system_sw(slug, readme_text)
# check utility
chk_utility <- readme_raw_data %>%
top_n(1000, slug) %>%
detect_utility_sw(slug, readme_text)
# check application
chk_app <- readme_raw_data %>%
top_n(1000, slug) %>%
detect_application_sw(slug, readme_text)
# check database
chk_db <- readme_raw_data %>%
top_n(1000, slug) %>%
detect_database_sw(slug, readme_text)
# check ai
chk_ai <- readme_raw_data %>%
top_n(1000, slug) %>%
detect_ai_sw(slug, readme_text)
# check viz
chk_viz <- readme_raw_data %>%
top_n(1000, slug) %>%
detect_viz_sw(slug, readme_text)
# 425 have at least 1, 299 over 5
sys_true <- chk_sys %>%
filter(system_all > 5)
# 84 have at least 1, 8 have over 5
util_true <- chk_utility %>%
filter(utility_all > 5)
# 487 have at least 1 ,188 over 5
app_true <- chk_app %>%
filter(app_all > 0)
# 30 have over 0, 12 have over 5
ai_true <- chk_ai %>%
filter(ai > 5)
# 27 have over 0, 1 has over 5
viz_true <- chk_viz %>%
filter(viz > 5)
# only one column at a time
# if you only want to develop certain categories
system_terms <- get_dictionary_terms(summary_type = "System")
sys_os <- get_dictionary_terms(main_type = "Operating Systems")
windows_terms <- get_dictionary_terms(sub_type = "Windows")
chk <- readme_raw_data %>%
top_n(25, slug) %>%
as_tidytable() %>%
tidytable::mutate.(readme_text = tolower(readme_text)) %>%
detect_types(slug, readme_text, windows_terms)
|
\name{Species.T50.comp}
\alias{Species.T50.comp}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Species.T50.comp
%% ~~function to do ... ~~
}
\description{Compare T50 percentage for two species. The results are plotted using boxplots, different letters indicate significant differences among testS and species. The figure is automatically saved in 16:9 at 300 dpi.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
Species.T50.comp(Germ.Analysis.exp_sp1, Germ.Analysis.exp_sp2, sp_name=NULL , colour="yes", Test.int=NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{Germination.Analysis.output_sp1}{output from Germination.Analysis function for species 1}
\item{Germination.Analysis.output_sp2}{output from Germination.Analysis function for species 2}
\item{colour}{can be "yes" (coloured by test.type) or "no" (B/W output) or a vector that specify the groups of tests (length of vector must be equal to n° of petri)}
\item{Test.int}{character vector where are indicated the types of tests that would be compared}
%% ~~Describe \code{x} here~~
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
\item{Anova }{anova results, summary(aov)}
\item{Tukey }{Post-hoc Tukey test output}
\item{Test }{Test compared}
\item{T50}{T50 percentage for each test analysed}
\item{Boxplot_T50 }{ggplot output}
}
\references{
%% ~put references to the literature/web site here ~
}
\author{Michele Di Musciano (michele.dimusciano@graduate.univaq.it)
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
file_germ_sp1<-read.csv("your_germination_file_sp1.csv", sep=";", header=T)
file_germ_sp2<-read.csv("your_germination_file_sp2.csv", sep=";", header=T)
Germination.Analysis.output_sp1<-Germination.Analysis(file_germ, Nv.seed = NULL, n.seed=20, cv=1.5)
Germination.Analysis.output_sp2<-Germination.Analysis(file_germ, Nv.seed = NULL, n.seed=20, cv=1.5)
Species.T50.comp(Germination.Analysis.output_sp1, Germination.Analysis.output_sp2, colour = "yes")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
/man/Species.T50.comp.Rd
|
no_license
|
micdimu/ecoseeds
|
R
| false | false | 2,493 |
rd
|
\name{Species.T50.comp}
\alias{Species.T50.comp}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Species.T50.comp
%% ~~function to do ... ~~
}
\description{Compare T50 percentage for two species. The results are plotted using boxplots, different letters indicate significant differences among testS and species. The figure is automatically saved in 16:9 at 300 dpi.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
Species.T50.comp(Germ.Analysis.exp_sp1, Germ.Analysis.exp_sp2, sp_name=NULL , colour="yes", Test.int=NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{Germination.Analysis.output_sp1}{output from Germination.Analysis function for species 1}
\item{Germination.Analysis.output_sp2}{output from Germination.Analysis function for species 2}
\item{colour}{can be "yes" (coloured by test.type) or "no" (B/W output) or a vector that specify the groups of tests (length of vector must be equal to n° of petri)}
\item{Test.int}{character vector where are indicated the types of tests that would be compared}
%% ~~Describe \code{x} here~~
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
\item{Anova }{anova results, summary(aov)}
\item{Tukey }{Post-hoc Tukey test output}
\item{Test }{Test compared}
\item{T50}{T50 percentage for each test analysed}
\item{Boxplot_T50 }{ggplot output}
}
\references{
%% ~put references to the literature/web site here ~
}
\author{Michele Di Musciano (michele.dimusciano@graduate.univaq.it)
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
file_germ_sp1<-read.csv("your_germination_file_sp1.csv", sep=";", header=T)
file_germ_sp2<-read.csv("your_germination_file_sp2.csv", sep=";", header=T)
Germination.Analysis.output_sp1<-Germination.Analysis(file_germ, Nv.seed = NULL, n.seed=20, cv=1.5)
Germination.Analysis.output_sp2<-Germination.Analysis(file_germ, Nv.seed = NULL, n.seed=20, cv=1.5)
Species.T50.comp(Germination.Analysis.output_sp1, Germination.Analysis.output_sp2, colour = "yes")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
# popMisfit: The discrepancy value (chi-square value) divided by degree of freedom, which is equal to population RMSEA
setMethod("popMisfit", signature(param = "matrix", misspec = "matrix"), definition = function(param, misspec, dfParam = NULL, fit.measures = "all") {
p <- nrow(param)
blankM <- rep(0, p)
result <- popMisfitMACS(blankM, param, blankM, misspec, dfParam = dfParam, fit.measures = fit.measures)
return(result)
})
setMethod("popMisfit", signature(param = "list", misspec = "list"), definition = function(param, misspec, dfParam = NULL, fit.measures = "all") {
paramCM <- NULL
paramM <- NULL
misspecCM <- NULL
misspecM <- NULL
if (is(param[[1]], "matrix")) {
paramCM <- param[[1]]
p <- nrow(paramCM)
paramM <- rep(0, p)
if (is(param[[2]], "vector")) {
paramM <- param[[2]]
}
} else if (is(param[[2]], "matrix")) {
paramCM <- param[[2]]
if (is(param[[1]], "vector")) {
paramM <- param[[1]]
} else {
stop("Cannot find the mean vector of the parameter values.")
}
} else {
stop("Cannot find covariance matrix in the parameter values")
}
if (is(misspec[[1]], "matrix")) {
misspecCM <- misspec[[1]]
p <- nrow(misspecCM)
misspecM <- rep(0, p)
if (is(misspec[[2]], "vector")) {
misspecM <- misspec[[2]]
}
} else if (is(misspec[[2]], "matrix")) {
misspecCM <- misspec[[2]]
if (is(param[[1]], "vector")) {
misspecM <- misspec[[1]]
} else {
stop("Cannot find the mean vector of the misspecification values.")
}
} else {
stop("Cannot find covariance matrix in the misspecification values")
}
result <- popMisfitMACS(paramM, paramCM, misspecM, misspecCM, dfParam = dfParam, fit.measures = fit.measures)
return(result)
})
setMethod("popMisfit", signature(param = "SimRSet", misspec = "SimRSet"), definition = function(param, misspec, dfParam = NULL, fit.measures = "all") {
paramMacs <- createImpliedMACS(param)
misspecMacs <- createImpliedMACS(misspec)
if (!(all(is.finite(misspecMacs$CM)) && all(eigen(misspecMacs$CM)$values > 0)))
stop("The misspecification set is not valid.")
if (!(all(is.finite(paramMacs$CM)) && all(eigen(paramMacs$CM)$values > 0)))
stop("The real parameter set is not valid")
return(popMisfitMACS(paramMacs$M, paramMacs$CM, misspecMacs$M, misspecMacs$CM, dfParam = dfParam, fit.measures = fit.measures))
})
setMethod("popMisfit", signature(param = "MatrixSet", misspec = "MatrixSet"), definition = function(param, misspec, dfParam = NULL, fit.measures = "all") {
if (!validateObject(param))
stop("The set of actual parameters is not valid.")
if (!validateObject(misspec))
stop("The set of misspecified paramters is not valid.")
param <- reduceMatrices(param)
misspec <- reduceMatrices(misspec)
return(popMisfit(param, misspec, dfParam = dfParam, fit.measures = fit.measures))
})
setMethod("popMisfit", signature(param = "SimSet", misspec = "SimMisspec"), definition = function(param, misspec, dfParam = NULL, fit.measures = "all", equalCon = new("NullSimEqualCon")) {
Output <- runMisspec(param, misspec, equalCon)
param2 <- Output$param
misspec <- Output$misspec
if (is.null(dfParam)) {
p <- length(createImpliedMACS(param2)$M)
nElements <- p + (p * (p + 1)/2)
nFree <- countFreeParameters(param)
if (!isNullObject(equalCon))
nFree <- nFree + countFreeParameters(equalCon)
dfParam <- nElements - nFree
}
return(popMisfit(param2, misspec, dfParam = dfParam, fit.measures = fit.measures))
})
popMisfitMACS <- function(paramM, paramCM, misspecM, misspecCM, dfParam = NULL, fit.measures = "all") {
if (fit.measures == "all") {
fit.measures <- getKeywords()$usedFitPop
if (is.null(dfParam))
fit.measures <- fit.measures[c(1, 3)]
}
p <- length(paramM)
fit.measures <- tolower(fit.measures)
f0 <- popDiscrepancy(paramM, paramCM, misspecM, misspecCM)
rmsea <- NULL
srmr <- NULL
result <- NULL
if (any(fit.measures %in% "f0")) {
result <- c(result, f0)
}
if (any(fit.measures %in% "rmsea")) {
rmsea <- sqrt(f0/dfParam)
result <- c(result, rmsea)
}
if (any(fit.measures %in% "srmr")) {
disSquared <- (cov2cor(misspecCM) - cov2cor(paramCM))^2
numerator <- 2 * sum(disSquared[lower.tri(disSquared, diag = TRUE)])
srmr <- sqrt(numerator/(p * (p + 1)))
result <- c(result, srmr)
}
result <- as.vector(result)
names(result) <- fit.measures
return(result)
}
# F0 in population: The discrepancy due to approximation (Browne & Cudeck, 1992)
popDiscrepancy <- function(paramM, paramCM, misspecM, misspecCM) {
p <- length(misspecM)
inv <- solve(paramCM)
dis.CM <- misspecCM %*% inv
t.1 <- sum(diag(dis.CM))
t.1.1 <- det(dis.CM)
if (t.1.1 < 0)
return(NULL)
t.2 <- log(t.1.1)
dis.M <- as.matrix(misspecM - paramM)
t.3 <- t(dis.M) %*% inv %*% dis.M
discrepancy <- t.1 - t.2 - p + t.3
return(discrepancy)
}
|
/simsem/R/popMisfit-methods.R
|
no_license
|
pairach/simsem
|
R
| false | false | 5,285 |
r
|
# popMisfit: The discrepancy value (chi-square value) divided by degree of freedom, which is equal to population RMSEA
setMethod("popMisfit", signature(param = "matrix", misspec = "matrix"), definition = function(param, misspec, dfParam = NULL, fit.measures = "all") {
p <- nrow(param)
blankM <- rep(0, p)
result <- popMisfitMACS(blankM, param, blankM, misspec, dfParam = dfParam, fit.measures = fit.measures)
return(result)
})
setMethod("popMisfit", signature(param = "list", misspec = "list"), definition = function(param, misspec, dfParam = NULL, fit.measures = "all") {
paramCM <- NULL
paramM <- NULL
misspecCM <- NULL
misspecM <- NULL
if (is(param[[1]], "matrix")) {
paramCM <- param[[1]]
p <- nrow(paramCM)
paramM <- rep(0, p)
if (is(param[[2]], "vector")) {
paramM <- param[[2]]
}
} else if (is(param[[2]], "matrix")) {
paramCM <- param[[2]]
if (is(param[[1]], "vector")) {
paramM <- param[[1]]
} else {
stop("Cannot find the mean vector of the parameter values.")
}
} else {
stop("Cannot find covariance matrix in the parameter values")
}
if (is(misspec[[1]], "matrix")) {
misspecCM <- misspec[[1]]
p <- nrow(misspecCM)
misspecM <- rep(0, p)
if (is(misspec[[2]], "vector")) {
misspecM <- misspec[[2]]
}
} else if (is(misspec[[2]], "matrix")) {
misspecCM <- misspec[[2]]
if (is(param[[1]], "vector")) {
misspecM <- misspec[[1]]
} else {
stop("Cannot find the mean vector of the misspecification values.")
}
} else {
stop("Cannot find covariance matrix in the misspecification values")
}
result <- popMisfitMACS(paramM, paramCM, misspecM, misspecCM, dfParam = dfParam, fit.measures = fit.measures)
return(result)
})
setMethod("popMisfit", signature(param = "SimRSet", misspec = "SimRSet"), definition = function(param, misspec, dfParam = NULL, fit.measures = "all") {
paramMacs <- createImpliedMACS(param)
misspecMacs <- createImpliedMACS(misspec)
if (!(all(is.finite(misspecMacs$CM)) && all(eigen(misspecMacs$CM)$values > 0)))
stop("The misspecification set is not valid.")
if (!(all(is.finite(paramMacs$CM)) && all(eigen(paramMacs$CM)$values > 0)))
stop("The real parameter set is not valid")
return(popMisfitMACS(paramMacs$M, paramMacs$CM, misspecMacs$M, misspecMacs$CM, dfParam = dfParam, fit.measures = fit.measures))
})
setMethod("popMisfit", signature(param = "MatrixSet", misspec = "MatrixSet"), definition = function(param, misspec, dfParam = NULL, fit.measures = "all") {
if (!validateObject(param))
stop("The set of actual parameters is not valid.")
if (!validateObject(misspec))
stop("The set of misspecified paramters is not valid.")
param <- reduceMatrices(param)
misspec <- reduceMatrices(misspec)
return(popMisfit(param, misspec, dfParam = dfParam, fit.measures = fit.measures))
})
setMethod("popMisfit", signature(param = "SimSet", misspec = "SimMisspec"), definition = function(param, misspec, dfParam = NULL, fit.measures = "all", equalCon = new("NullSimEqualCon")) {
Output <- runMisspec(param, misspec, equalCon)
param2 <- Output$param
misspec <- Output$misspec
if (is.null(dfParam)) {
p <- length(createImpliedMACS(param2)$M)
nElements <- p + (p * (p + 1)/2)
nFree <- countFreeParameters(param)
if (!isNullObject(equalCon))
nFree <- nFree + countFreeParameters(equalCon)
dfParam <- nElements - nFree
}
return(popMisfit(param2, misspec, dfParam = dfParam, fit.measures = fit.measures))
})
popMisfitMACS <- function(paramM, paramCM, misspecM, misspecCM, dfParam = NULL, fit.measures = "all") {
if (fit.measures == "all") {
fit.measures <- getKeywords()$usedFitPop
if (is.null(dfParam))
fit.measures <- fit.measures[c(1, 3)]
}
p <- length(paramM)
fit.measures <- tolower(fit.measures)
f0 <- popDiscrepancy(paramM, paramCM, misspecM, misspecCM)
rmsea <- NULL
srmr <- NULL
result <- NULL
if (any(fit.measures %in% "f0")) {
result <- c(result, f0)
}
if (any(fit.measures %in% "rmsea")) {
rmsea <- sqrt(f0/dfParam)
result <- c(result, rmsea)
}
if (any(fit.measures %in% "srmr")) {
disSquared <- (cov2cor(misspecCM) - cov2cor(paramCM))^2
numerator <- 2 * sum(disSquared[lower.tri(disSquared, diag = TRUE)])
srmr <- sqrt(numerator/(p * (p + 1)))
result <- c(result, srmr)
}
result <- as.vector(result)
names(result) <- fit.measures
return(result)
}
# F0 in population: The discrepancy due to approximation (Browne & Cudeck, 1992)
popDiscrepancy <- function(paramM, paramCM, misspecM, misspecCM) {
p <- length(misspecM)
inv <- solve(paramCM)
dis.CM <- misspecCM %*% inv
t.1 <- sum(diag(dis.CM))
t.1.1 <- det(dis.CM)
if (t.1.1 < 0)
return(NULL)
t.2 <- log(t.1.1)
dis.M <- as.matrix(misspecM - paramM)
t.3 <- t(dis.M) %*% inv %*% dis.M
discrepancy <- t.1 - t.2 - p + t.3
return(discrepancy)
}
|
#' @title Add Phenotype data to ExpressionSet
#'
#' @param x \code{ExpressionSet} ExpressionSet to which add phenotype information
#' @param pheno \code{data.frame} Table with the new phenotypes
#' @param identifier \code{character} Name of the ID column on the phenotypes data.frame
#' @param complete_cases \code{bool} If \code{TRUE} only the matching individuals
#' between the ExpressionSet and the phenotypes table will be included on the resulting ExpressionSet. If
#' \code{FALSE} all the individuals on the input ExpressionSet will be on the output ExpressionSet
#'
#' @return
#' @export
#'
#' @examples
addPhenoDataDS <- function(x, pheno, identifier, complete_cases){
if(!(any(identifier %in% colnames(pheno)))){
stop("Identifier [", identifier, "] is not on the phenotypes table")
}
og_pheno <- Biobase::pData(x)
og_pheno_md <- Biobase::varMetadata(x)
new_variables <- colnames(pheno)[!(identifier == colnames(pheno))]
old_variables <- colnames(og_pheno)
og_individuals <- rownames(og_pheno)
new_individuals <- pheno[,identifier]
common_individuals <- new_individuals %in% og_individuals
new_pheno <- pheno[common_individuals,]
og_pheno <- cbind(og_pheno, og_individuals_id = og_individuals)
if(complete_cases == TRUE){
new_pheno <- dplyr::right_join(og_pheno, new_pheno, by = c("og_individuals_id" = identifier))
assay_data <- Biobase::exprs(x)[,colnames(Biobase::exprs(x)) %in% new_individuals]
}
else{
new_pheno <- dplyr::left_join(og_pheno, new_pheno, by = c("og_individuals_id" = identifier))
assay_data <- Biobase::exprs(x)
}
rownames(new_pheno) <- new_pheno$og_individuals_id
new_pheno$og_individuals_id <- NULL
if(any(new_variables %in% old_variables)){stop("Variables conflict between ExpressionSet and new PhenoData")}
for(i in new_variables){
og_pheno_md <- eval(str2expression(paste0("rbind(og_pheno_md, ", i, " = NA)")))
}
new_pheno <- new("AnnotatedDataFrame", data=new_pheno, varMetadata=og_pheno_md)
eset <- Biobase::ExpressionSet(assayData = assay_data,
phenoData = new_pheno,
featureData = Biobase::featureData(x),
annotation = Biobase::annotation(x))
return(eset)
}
|
/R/addPhenoDataDS.R
|
permissive
|
das2000sidd/dsOmics
|
R
| false | false | 2,302 |
r
|
#' @title Add Phenotype data to ExpressionSet
#'
#' @param x \code{ExpressionSet} ExpressionSet to which add phenotype information
#' @param pheno \code{data.frame} Table with the new phenotypes
#' @param identifier \code{character} Name of the ID column on the phenotypes data.frame
#' @param complete_cases \code{bool} If \code{TRUE} only the matching individuals
#' between the ExpressionSet and the phenotypes table will be included on the resulting ExpressionSet. If
#' \code{FALSE} all the individuals on the input ExpressionSet will be on the output ExpressionSet
#'
#' @return
#' @export
#'
#' @examples
addPhenoDataDS <- function(x, pheno, identifier, complete_cases){
if(!(any(identifier %in% colnames(pheno)))){
stop("Identifier [", identifier, "] is not on the phenotypes table")
}
og_pheno <- Biobase::pData(x)
og_pheno_md <- Biobase::varMetadata(x)
new_variables <- colnames(pheno)[!(identifier == colnames(pheno))]
old_variables <- colnames(og_pheno)
og_individuals <- rownames(og_pheno)
new_individuals <- pheno[,identifier]
common_individuals <- new_individuals %in% og_individuals
new_pheno <- pheno[common_individuals,]
og_pheno <- cbind(og_pheno, og_individuals_id = og_individuals)
if(complete_cases == TRUE){
new_pheno <- dplyr::right_join(og_pheno, new_pheno, by = c("og_individuals_id" = identifier))
assay_data <- Biobase::exprs(x)[,colnames(Biobase::exprs(x)) %in% new_individuals]
}
else{
new_pheno <- dplyr::left_join(og_pheno, new_pheno, by = c("og_individuals_id" = identifier))
assay_data <- Biobase::exprs(x)
}
rownames(new_pheno) <- new_pheno$og_individuals_id
new_pheno$og_individuals_id <- NULL
if(any(new_variables %in% old_variables)){stop("Variables conflict between ExpressionSet and new PhenoData")}
for(i in new_variables){
og_pheno_md <- eval(str2expression(paste0("rbind(og_pheno_md, ", i, " = NA)")))
}
new_pheno <- new("AnnotatedDataFrame", data=new_pheno, varMetadata=og_pheno_md)
eset <- Biobase::ExpressionSet(assayData = assay_data,
phenoData = new_pheno,
featureData = Biobase::featureData(x),
annotation = Biobase::annotation(x))
return(eset)
}
|
# load libraries
library(readxl)
library(writexl)
library(ggplot2)
library(phyloseq)
library(vegan)
# load meta / sample data
meta <- data.frame(read_excel('data/table_sample_stats.xlsx', skip=1), stringsAsFactors = F)
# load rarefied
rare <- data.frame(read_excel('data/rarefied_3000.xlsx'))
# check 0 rows
sum(apply(rare,1,sum)==0)
# do a first plot to get a feeling for the data
plot_richness(phyloseq(otu_table(rare,taxa_are_rows = T)))
# calculate alpha diversity indices (just once)
#aDiv <- estimate_richness(phyloseq(otu_table(rare,taxa_are_rows = T)))
#aDiv <- cbind(rownames(aDiv),aDiv)
# merge with sample data (just once)
#merged <- merge(aDiv,meta[,7:12], by.x=1, by.y=1)
#write_xlsx(data.frame(merged, stringsAsFactors = F),'data/alphaDiv_indices.xlsx')
# load
merged <- data.frame(read_excel('data/alphaDiv_indices.xlsx'))
# without aquariaum
merged_2 <- merged[merged$Location!='Aquarium',]
merged_2 <- merged_2[merged_2$Tissue_water=='Algal',]
merged_2$sampling_location <- paste(merged_2$Sampling,'-',merged_2$Location)
merged_3 <- merged[merged$Location!='Aquarium',]
merged_3$sampling_location <- paste(merged_3$Sampling,'-',merged_3$Location)
merged_3$sampling_location_tissue <- paste(merged_3$Tissue_water,'-',merged_3$Sampling,'-',merged_3$Location)
# plots -------------------------------------------------------------------
# path to store
plotPath = 'rarefied/' # path to store plot files
# for each alpha diversity index
for(i in c('Shannon','Observed','Chao1','ACE','Simpson','InvSimpson','Fisher')){
# all + colored
ggplot(merged,aes_string(x='Tissue_water',y=i)) +
geom_boxplot(lwd = 0.9, outlier.shape = NA) +
geom_jitter(aes(color = Location, shape = Sampling),position=position_jitter(0.1), size = 4) +
xlab('') +
ylab(i) +
theme_light()
ggsave(paste0(plotPath,'/boxplot_algal_water_all_',i,'.svg'),width = 5, height = 4)
ggsave(paste0(plotPath,'/boxplot_algal_water_all_',i,'.png'),width = 5, height = 4)
# no aquarium
ggplot(merged_3,aes_string(x='Tissue_water',y=i)) +
geom_boxplot(lwd = 0.9, outlier.shape = NA) +
geom_jitter(shape=16, position=position_jitter(0.1), size = 4, color = '#00000080') +
xlab('') +
ylab(i) +
theme_light()
ggsave(paste0(plotPath,'/boxplot_algal_water_noAquarium_',i,'.svg'),width = 4, height = 4)
ggsave(paste0(plotPath,'/boxplot_algal_water_noAquarium_',i,'.png'),width = 4, height = 4)
# no aquarium + colored
ggplot(merged_3,aes_string(x='Tissue_water',y=i)) +
geom_boxplot(lwd = 0.9, outlier.shape = NA) +
geom_jitter(aes(color = Location, shape = Sampling),position=position_jitter(0.1), size = 4) +
xlab('') +
ylab(i) +
theme_light()
ggsave(paste0(plotPath,'/boxplot_algal_water_noAquarium_color_',i,'.svg'),width = 5, height = 4)
ggsave(paste0(plotPath,'/boxplot_algal_water_noAquarium_color_',i,'.png'),width = 5, height = 4)
# only algal and sampling
ggplot(merged_2,aes_string(x='Sampling',y=i)) +
geom_boxplot(lwd = 0.9, outlier.shape = NA) +
geom_jitter(shape=16, position=position_jitter(0.1), size = 4, color = '#00000080') +
xlab('') +
ylab(i) +
theme_light()
ggsave(paste0(plotPath,'/boxplot_algal_sampling_',i,'.svg'),width = 4, height = 4)
ggsave(paste0(plotPath,'/boxplot_algal_sampling_',i,'.png'),width = 4, height = 4)
# only algal and location
ggplot(merged_2,aes_string(x='Location',y=i)) +
geom_boxplot(lwd = 0.9, outlier.shape = NA) +
geom_jitter(shape=16, position=position_jitter(0.1), size = 4, color = '#00000080') +
xlab('') +
ylab(i) +
theme_light()
ggsave(paste0(plotPath,'/boxplot_algal_location_',i,'.svg'),width = 4, height = 4)
ggsave(paste0(plotPath,'/boxplot_algal_location_',i,'.png'),width = 4, height = 4)
# tide pool vs edge in each sampling
ggplot(merged_2,aes_string(x='sampling_location',y=i)) +
geom_boxplot(lwd = 0.9, outlier.shape = NA) +
geom_jitter(shape=16, position=position_jitter(0.1), size = 4, color = '#00000080') +
xlab('') +
ylab(i) +
theme_light() +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust=1))
ggsave(paste0(plotPath,'/boxplot_algal_sampling_location_',i,'.svg'),width = 6, height = 5)
ggsave(paste0(plotPath,'/boxplot_algal_sampling_location_',i,'.png'),width = 6, height = 5)
# algal and water with tide pool vs edge in each sampling
ggplot(merged_3,aes_string(x='sampling_location_tissue',y=i)) +
geom_boxplot(lwd = 0.9, outlier.shape = NA) +
geom_jitter(shape=16, position=position_jitter(0.1), size = 4, color = '#00000080') +
xlab('') +
ylab(i) +
theme_light() +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
ggsave(paste0(plotPath,'/boxplot_algal_water_sampling_location_',i,'.svg'),width = 7, height = 5)
ggsave(paste0(plotPath,'/boxplot_algal_water_sampling_location_',i,'.png'),width = 7, height = 5)
}
# statistical tests --------------------------------------------------------
# perform statistical tests
summary(aov(merged_3$Shannon~merged_3$Tissue_water)) # ANOVA algal vs water
summary(aov(merged_2$Shannon~merged_2$Location)) # ANOVA tide pool vs edge (algal only)
summary(aov(merged_2$Shannon~merged_2$Sampling)) # ANOVA sampling1 vs sampling2 (algal only)
# more detailed test
TukeyHSD(aov(merged_2$Shannon~merged_2$sampling_location)) # sampling & location (algal only)
TukeyHSD(aov(merged_3$Shannon~merged_3$Tissue_water))
TukeyHSD(aov(merged_3$Shannon~merged_3$Tissue_water*merged_3$Sampling))
TukeyHSD(aov(merged_3$Shannon~merged_3$Tissue_water*merged_3$Location))
TukeyHSD(aov(merged_3$Shannon~merged_3$Tissue_water*merged_3$Location*merged_3$Sampling))
# remove winter tide
merged_3_1 <- merged_3[merged_3$sampling_location_tissue!='Algal - Winter - Edge',]
TukeyHSD(aov(merged_3_1$Shannon~merged_3_1$Tissue_water))
# add winter tide pool to water
merged_3_2 <- merged_3
merged_3_2$Tissue_water[merged_3_2$sampling_location_tissue == 'Algal - Winter - Edge'] <- 'Water'
TukeyHSD(aov(merged_3_2$Shannon~merged_3_2$Tissue_water))
|
/alpha_diversity.R
|
no_license
|
AlexanderBartholomaeus/ReefBuilderMicrobiome
|
R
| false | false | 6,090 |
r
|
# load libraries
library(readxl)
library(writexl)
library(ggplot2)
library(phyloseq)
library(vegan)
# load meta / sample data
meta <- data.frame(read_excel('data/table_sample_stats.xlsx', skip=1), stringsAsFactors = F)
# load rarefied
rare <- data.frame(read_excel('data/rarefied_3000.xlsx'))
# check 0 rows
sum(apply(rare,1,sum)==0)
# do a first plot to get a feeling for the data
plot_richness(phyloseq(otu_table(rare,taxa_are_rows = T)))
# calculate alpha diversity indices (just once)
#aDiv <- estimate_richness(phyloseq(otu_table(rare,taxa_are_rows = T)))
#aDiv <- cbind(rownames(aDiv),aDiv)
# merge with sample data (just once)
#merged <- merge(aDiv,meta[,7:12], by.x=1, by.y=1)
#write_xlsx(data.frame(merged, stringsAsFactors = F),'data/alphaDiv_indices.xlsx')
# load
merged <- data.frame(read_excel('data/alphaDiv_indices.xlsx'))
# without aquariaum
merged_2 <- merged[merged$Location!='Aquarium',]
merged_2 <- merged_2[merged_2$Tissue_water=='Algal',]
merged_2$sampling_location <- paste(merged_2$Sampling,'-',merged_2$Location)
merged_3 <- merged[merged$Location!='Aquarium',]
merged_3$sampling_location <- paste(merged_3$Sampling,'-',merged_3$Location)
merged_3$sampling_location_tissue <- paste(merged_3$Tissue_water,'-',merged_3$Sampling,'-',merged_3$Location)
# plots -------------------------------------------------------------------
# path to store
plotPath = 'rarefied/' # path to store plot files
# for each alpha diversity index
for(i in c('Shannon','Observed','Chao1','ACE','Simpson','InvSimpson','Fisher')){
# all + colored
ggplot(merged,aes_string(x='Tissue_water',y=i)) +
geom_boxplot(lwd = 0.9, outlier.shape = NA) +
geom_jitter(aes(color = Location, shape = Sampling),position=position_jitter(0.1), size = 4) +
xlab('') +
ylab(i) +
theme_light()
ggsave(paste0(plotPath,'/boxplot_algal_water_all_',i,'.svg'),width = 5, height = 4)
ggsave(paste0(plotPath,'/boxplot_algal_water_all_',i,'.png'),width = 5, height = 4)
# no aquarium
ggplot(merged_3,aes_string(x='Tissue_water',y=i)) +
geom_boxplot(lwd = 0.9, outlier.shape = NA) +
geom_jitter(shape=16, position=position_jitter(0.1), size = 4, color = '#00000080') +
xlab('') +
ylab(i) +
theme_light()
ggsave(paste0(plotPath,'/boxplot_algal_water_noAquarium_',i,'.svg'),width = 4, height = 4)
ggsave(paste0(plotPath,'/boxplot_algal_water_noAquarium_',i,'.png'),width = 4, height = 4)
# no aquarium + colored
ggplot(merged_3,aes_string(x='Tissue_water',y=i)) +
geom_boxplot(lwd = 0.9, outlier.shape = NA) +
geom_jitter(aes(color = Location, shape = Sampling),position=position_jitter(0.1), size = 4) +
xlab('') +
ylab(i) +
theme_light()
ggsave(paste0(plotPath,'/boxplot_algal_water_noAquarium_color_',i,'.svg'),width = 5, height = 4)
ggsave(paste0(plotPath,'/boxplot_algal_water_noAquarium_color_',i,'.png'),width = 5, height = 4)
# only algal and sampling
ggplot(merged_2,aes_string(x='Sampling',y=i)) +
geom_boxplot(lwd = 0.9, outlier.shape = NA) +
geom_jitter(shape=16, position=position_jitter(0.1), size = 4, color = '#00000080') +
xlab('') +
ylab(i) +
theme_light()
ggsave(paste0(plotPath,'/boxplot_algal_sampling_',i,'.svg'),width = 4, height = 4)
ggsave(paste0(plotPath,'/boxplot_algal_sampling_',i,'.png'),width = 4, height = 4)
# only algal and location
ggplot(merged_2,aes_string(x='Location',y=i)) +
geom_boxplot(lwd = 0.9, outlier.shape = NA) +
geom_jitter(shape=16, position=position_jitter(0.1), size = 4, color = '#00000080') +
xlab('') +
ylab(i) +
theme_light()
ggsave(paste0(plotPath,'/boxplot_algal_location_',i,'.svg'),width = 4, height = 4)
ggsave(paste0(plotPath,'/boxplot_algal_location_',i,'.png'),width = 4, height = 4)
# tide pool vs edge in each sampling
ggplot(merged_2,aes_string(x='sampling_location',y=i)) +
geom_boxplot(lwd = 0.9, outlier.shape = NA) +
geom_jitter(shape=16, position=position_jitter(0.1), size = 4, color = '#00000080') +
xlab('') +
ylab(i) +
theme_light() +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust=1))
ggsave(paste0(plotPath,'/boxplot_algal_sampling_location_',i,'.svg'),width = 6, height = 5)
ggsave(paste0(plotPath,'/boxplot_algal_sampling_location_',i,'.png'),width = 6, height = 5)
# algal and water with tide pool vs edge in each sampling
ggplot(merged_3,aes_string(x='sampling_location_tissue',y=i)) +
geom_boxplot(lwd = 0.9, outlier.shape = NA) +
geom_jitter(shape=16, position=position_jitter(0.1), size = 4, color = '#00000080') +
xlab('') +
ylab(i) +
theme_light() +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
ggsave(paste0(plotPath,'/boxplot_algal_water_sampling_location_',i,'.svg'),width = 7, height = 5)
ggsave(paste0(plotPath,'/boxplot_algal_water_sampling_location_',i,'.png'),width = 7, height = 5)
}
# statistical tests --------------------------------------------------------
# perform statistical tests
summary(aov(merged_3$Shannon~merged_3$Tissue_water)) # ANOVA algal vs water
summary(aov(merged_2$Shannon~merged_2$Location)) # ANOVA tide pool vs edge (algal only)
summary(aov(merged_2$Shannon~merged_2$Sampling)) # ANOVA sampling1 vs sampling2 (algal only)
# more detailed test
TukeyHSD(aov(merged_2$Shannon~merged_2$sampling_location)) # sampling & location (algal only)
TukeyHSD(aov(merged_3$Shannon~merged_3$Tissue_water))
TukeyHSD(aov(merged_3$Shannon~merged_3$Tissue_water*merged_3$Sampling))
TukeyHSD(aov(merged_3$Shannon~merged_3$Tissue_water*merged_3$Location))
TukeyHSD(aov(merged_3$Shannon~merged_3$Tissue_water*merged_3$Location*merged_3$Sampling))
# remove winter tide
merged_3_1 <- merged_3[merged_3$sampling_location_tissue!='Algal - Winter - Edge',]
TukeyHSD(aov(merged_3_1$Shannon~merged_3_1$Tissue_water))
# add winter tide pool to water
merged_3_2 <- merged_3
merged_3_2$Tissue_water[merged_3_2$sampling_location_tissue == 'Algal - Winter - Edge'] <- 'Water'
TukeyHSD(aov(merged_3_2$Shannon~merged_3_2$Tissue_water))
|
t0 <- 0
C0 <-
structure(c(0),
.Dim = c(1))
D <- 30
V <- 2
times <-
c(0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, 6,
6.5, 7, 7.5, 8, 8.5, 9, 9.5, 10)
N_t <- 20
C_hat <-
c(5.70812264865215, 7.10126075072086,
8.38520678426651, 9.79008249883381, 13.4390409239245,
11.4478987597702, 11.2124282696837, 11.4269217682577,
12.2432859438401, 13.8201804108938, 13.8408670746042,
11.422291744251, 10.5031943081843, 11.9121452242965,
14.0849980781312, 10.5505145523917, 10.1905539351877,
12.2232272590821, 11.7290653047821, 12.2719396535996)
|
/benchmarks/pkpd/one_comp_mm_elim_abs.data.R
|
permissive
|
stan-dev/stat_comp_benchmarks
|
R
| false | false | 533 |
r
|
t0 <- 0
C0 <-
structure(c(0),
.Dim = c(1))
D <- 30
V <- 2
times <-
c(0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, 6,
6.5, 7, 7.5, 8, 8.5, 9, 9.5, 10)
N_t <- 20
C_hat <-
c(5.70812264865215, 7.10126075072086,
8.38520678426651, 9.79008249883381, 13.4390409239245,
11.4478987597702, 11.2124282696837, 11.4269217682577,
12.2432859438401, 13.8201804108938, 13.8408670746042,
11.422291744251, 10.5031943081843, 11.9121452242965,
14.0849980781312, 10.5505145523917, 10.1905539351877,
12.2232272590821, 11.7290653047821, 12.2719396535996)
|
#server
server <- function(input, output) {
load('./datas.RData')
#Colocamos un link para que los analistas puedan descargar los datos que no se pueden responder automatizadamente.
output$descarga_no_estandar <- downloadHandler(
filename = function() {
paste("comunicacionesNoEstandar.csv", sep="")
},
content = function(file) {
data_descarga <- data %>% filter(estandar == FALSE, respondida == 'No respondida', !is.na(codigo_proyecto))
write.csv(data_descarga, file)
}
)
output$descarga_evaluados <- downloadHandler(
filename = function() {
paste("comunicacionesEvaluadosYA.csv", sep="")
},
content = function(file) {
codigos <- data %>% filter(respondida == 'No respondida') %>% pull(codigo_proyecto)
data_descarga <- comparador %>% filter(code %in% codigos, state == 'En evaluación')
write.csv(data_descarga, file)
}
)
#Mostramos los no respondidos y respondidos.
output$estatus <- renderUI({
HTML(paste('<b>No Respondidas</b>:',
as.numeric(resumen_respondida[resumen_respondida$respondida == 'No respondida', 'Proyectos']),
'</br>','<b>Respondidas</b>:',
as.numeric(resumen_respondida[resumen_respondida$respondida == 'Respondida', 'Proyectos'])))
})
#Moestramos los estandarizados.
output$estandar <- renderUI({
HTML(paste('<b>No estandar</b>:',
as.numeric(resumen_estandar[resumen_estandar$estandar == FALSE, 'Proyectos']),
'</br>','<b>Estandar</b>:',
as.numeric(resumen_estandar[resumen_estandar$estandar == TRUE, 'Proyectos'])))
})
#Mostramos por pantalla la cantidad de proyectos con sus diferentes estatus.
output$estatus_proyecto <- renderUI({
data_aux <- data %>% filter(respondida != 'Respondida') %>% pull(codigo_proyecto)
data_aux <- data_aux[!is.na(data_aux)]
resumen_estatus <- comparador %>% filter(code %in% data_aux) %>% group_by(state) %>% tally()
HTML(paste('<b>No Borrador</b>:',
resumen_estatus[resumen_estatus$state == 'Borrador', 'n'],'</br>',
'<b>En Evaluacion</b>:',
resumen_estatus[resumen_estatus$state == 'En evaluación', 'n'],'</br>',
'<b>En Preevaluacion</b>:',
resumen_estatus[resumen_estatus$state == 'Pre-evaluación', 'n']
))
})
#Empezamos con el encabezado HTML para la presentación.
output$titulo <- renderUI({
HTML('<h1 style="color: red">TEXTO MODELO, RESPUESTAS PARA “ESTATUS BORRADOR”.</h1>')
})
#Mostramos la comunicación de SINCO, utilizamos nuestras variables para mostrar los motivos correspondientes de cada proyecto.
output$comunicacion <- renderUI({
data_aux <- data %>% filter(clave == input$in3, respondida == 'No respondida')
nombre_obpp <- comparador %>% filter(code %in% data_aux$codigo_proyecto) %>% pull(obpp_name)
nombre_codigo <- comparador %>% filter(code %in% data_aux$codigo_proyecto) %>% pull(obpp_situr_code)
estatus_proyecto <- comparador %>% filter(code %in% data_aux$codigo_proyecto) %>% pull(state)
data <- data %>% filter(clave == input$in3, estandar, codigo_valido, respondida == 'No respondida')
motivo_1 <- comparadorjuntos %>% filter(asunto == data$motivo_1) %>% pull(asuntos_origen)
motivo_2 <- comparadorjuntos %>% filter(asunto == data$motivo_2) %>% pull(asuntos_origen)
motivo_3 <- comparadorjuntos %>% filter(asunto == data$motivo_3) %>% pull(asuntos_origen)
motivo_4 <- comparadorjuntos %>% filter(asunto == data$motivo_4) %>% pull(asuntos_origen)
motivo_5 <- comparadorjuntos %>% filter(asunto == data$motivo_5) %>% pull(asuntos_origen)
#if(is.na(motivo_2))
# motivo_2 <- ''
#if(is.na(motivo_3))
# motivo_3 <- ''
#if(is.na(motivo_4))
# motivo_4 <- ''
#if(is.na(motivo_5))
# motivo_5 <- ''
informacion_1 <- comparadorjuntos %>% filter(asunto == data$motivo_1) %>% pull(informacion)
informacion_2 <- comparadorjuntos %>% filter(asunto == data$motivo_2) %>% pull(informacion)
informacion_3 <- comparadorjuntos %>% filter(asunto == data$motivo_3) %>% pull(informacion)
informacion_4 <- comparadorjuntos %>% filter(asunto == data$motivo_4) %>% pull(informacion)
informacion_5 <- comparadorjuntos %>% filter(asunto == data$motivo_5) %>% pull(informacion)
texto_html <- paste('<p>
<b><i>Título de las comunicaciones:</i></b><br><br>
Indicaciones para corregir proyecto en estatus borrador –' ,nombre_obpp, '(' ,nombre_codigo, ').<br>
<hr> <br>
<i><b><b1>Texto de las comunicaciones:</b1></b></i><br><br>
<i>Estimados voceros y voceras del Poder Popular ante todo reciban un cordial saludo.</i><br><br>
Por medio de la presente, y posterior a la revisión y evaluación realizada al proyecto cargado en SINCO para su financiamiento, cumplimos con informarle que el proyecto se encuentra en
<b>“Estatus Borrador”</b>, por la(s) siguiente(s) razón(es):<br><br>
<u><i><b>',motivo_1,'</b></i></u><p>',informacion_1,'</p><br>
<u><i><b>',motivo_2,'</b></i></u><p>',informacion_2,'</p><br>
<u><i><b>',motivo_3,'</b></i></u><p>',informacion_3,'</p><br>
<u><i><b>',motivo_4,'</b></i></u><p>',informacion_4,'</p><br>
<u><i><b>',motivo_5,'</b></i></u><p>',informacion_5,'</p><br>
Una vez realizada las correcciones correspondientes, haga clic nuevamente en el botón finalizar del paso 5 para que su proyecto sea evaluado nuevamente.
Recuerde que en caso de presentar inconvenientes puede enviar una comunicación a través del módulo del sistema.<br><br>
<b>EN SINCO, Creamos condiciones para el beneficio colectivo.</b><hr>
<hr>
<b>Estatus del proyecto:</b><p>',estatus_proyecto,'</p></h1>
</p>')
HTML(texto_html)
})
output$respuestas_estandar <- renderUI({
opciones <- data %>% filter(respondida == 'No respondida', codigo_valido == TRUE, estatus_valido == TRUE, estandar == TRUE) %>% pull(clave)
selectInput('in3', '', opciones , multiple=TRUE, selectize=FALSE, selected = opciones[1] )
})
}
|
/Trabajo/x/comunicaciones_borrador/server.R
|
no_license
|
Moscdota2/Archivos
|
R
| false | false | 6,229 |
r
|
#server
server <- function(input, output) {
load('./datas.RData')
#Colocamos un link para que los analistas puedan descargar los datos que no se pueden responder automatizadamente.
output$descarga_no_estandar <- downloadHandler(
filename = function() {
paste("comunicacionesNoEstandar.csv", sep="")
},
content = function(file) {
data_descarga <- data %>% filter(estandar == FALSE, respondida == 'No respondida', !is.na(codigo_proyecto))
write.csv(data_descarga, file)
}
)
output$descarga_evaluados <- downloadHandler(
filename = function() {
paste("comunicacionesEvaluadosYA.csv", sep="")
},
content = function(file) {
codigos <- data %>% filter(respondida == 'No respondida') %>% pull(codigo_proyecto)
data_descarga <- comparador %>% filter(code %in% codigos, state == 'En evaluación')
write.csv(data_descarga, file)
}
)
#Mostramos los no respondidos y respondidos.
output$estatus <- renderUI({
HTML(paste('<b>No Respondidas</b>:',
as.numeric(resumen_respondida[resumen_respondida$respondida == 'No respondida', 'Proyectos']),
'</br>','<b>Respondidas</b>:',
as.numeric(resumen_respondida[resumen_respondida$respondida == 'Respondida', 'Proyectos'])))
})
#Moestramos los estandarizados.
output$estandar <- renderUI({
HTML(paste('<b>No estandar</b>:',
as.numeric(resumen_estandar[resumen_estandar$estandar == FALSE, 'Proyectos']),
'</br>','<b>Estandar</b>:',
as.numeric(resumen_estandar[resumen_estandar$estandar == TRUE, 'Proyectos'])))
})
#Mostramos por pantalla la cantidad de proyectos con sus diferentes estatus.
output$estatus_proyecto <- renderUI({
data_aux <- data %>% filter(respondida != 'Respondida') %>% pull(codigo_proyecto)
data_aux <- data_aux[!is.na(data_aux)]
resumen_estatus <- comparador %>% filter(code %in% data_aux) %>% group_by(state) %>% tally()
HTML(paste('<b>No Borrador</b>:',
resumen_estatus[resumen_estatus$state == 'Borrador', 'n'],'</br>',
'<b>En Evaluacion</b>:',
resumen_estatus[resumen_estatus$state == 'En evaluación', 'n'],'</br>',
'<b>En Preevaluacion</b>:',
resumen_estatus[resumen_estatus$state == 'Pre-evaluación', 'n']
))
})
#Empezamos con el encabezado HTML para la presentación.
output$titulo <- renderUI({
HTML('<h1 style="color: red">TEXTO MODELO, RESPUESTAS PARA “ESTATUS BORRADOR”.</h1>')
})
#Mostramos la comunicación de SINCO, utilizamos nuestras variables para mostrar los motivos correspondientes de cada proyecto.
output$comunicacion <- renderUI({
data_aux <- data %>% filter(clave == input$in3, respondida == 'No respondida')
nombre_obpp <- comparador %>% filter(code %in% data_aux$codigo_proyecto) %>% pull(obpp_name)
nombre_codigo <- comparador %>% filter(code %in% data_aux$codigo_proyecto) %>% pull(obpp_situr_code)
estatus_proyecto <- comparador %>% filter(code %in% data_aux$codigo_proyecto) %>% pull(state)
data <- data %>% filter(clave == input$in3, estandar, codigo_valido, respondida == 'No respondida')
motivo_1 <- comparadorjuntos %>% filter(asunto == data$motivo_1) %>% pull(asuntos_origen)
motivo_2 <- comparadorjuntos %>% filter(asunto == data$motivo_2) %>% pull(asuntos_origen)
motivo_3 <- comparadorjuntos %>% filter(asunto == data$motivo_3) %>% pull(asuntos_origen)
motivo_4 <- comparadorjuntos %>% filter(asunto == data$motivo_4) %>% pull(asuntos_origen)
motivo_5 <- comparadorjuntos %>% filter(asunto == data$motivo_5) %>% pull(asuntos_origen)
#if(is.na(motivo_2))
# motivo_2 <- ''
#if(is.na(motivo_3))
# motivo_3 <- ''
#if(is.na(motivo_4))
# motivo_4 <- ''
#if(is.na(motivo_5))
# motivo_5 <- ''
informacion_1 <- comparadorjuntos %>% filter(asunto == data$motivo_1) %>% pull(informacion)
informacion_2 <- comparadorjuntos %>% filter(asunto == data$motivo_2) %>% pull(informacion)
informacion_3 <- comparadorjuntos %>% filter(asunto == data$motivo_3) %>% pull(informacion)
informacion_4 <- comparadorjuntos %>% filter(asunto == data$motivo_4) %>% pull(informacion)
informacion_5 <- comparadorjuntos %>% filter(asunto == data$motivo_5) %>% pull(informacion)
texto_html <- paste('<p>
<b><i>Título de las comunicaciones:</i></b><br><br>
Indicaciones para corregir proyecto en estatus borrador –' ,nombre_obpp, '(' ,nombre_codigo, ').<br>
<hr> <br>
<i><b><b1>Texto de las comunicaciones:</b1></b></i><br><br>
<i>Estimados voceros y voceras del Poder Popular ante todo reciban un cordial saludo.</i><br><br>
Por medio de la presente, y posterior a la revisión y evaluación realizada al proyecto cargado en SINCO para su financiamiento, cumplimos con informarle que el proyecto se encuentra en
<b>“Estatus Borrador”</b>, por la(s) siguiente(s) razón(es):<br><br>
<u><i><b>',motivo_1,'</b></i></u><p>',informacion_1,'</p><br>
<u><i><b>',motivo_2,'</b></i></u><p>',informacion_2,'</p><br>
<u><i><b>',motivo_3,'</b></i></u><p>',informacion_3,'</p><br>
<u><i><b>',motivo_4,'</b></i></u><p>',informacion_4,'</p><br>
<u><i><b>',motivo_5,'</b></i></u><p>',informacion_5,'</p><br>
Una vez realizada las correcciones correspondientes, haga clic nuevamente en el botón finalizar del paso 5 para que su proyecto sea evaluado nuevamente.
Recuerde que en caso de presentar inconvenientes puede enviar una comunicación a través del módulo del sistema.<br><br>
<b>EN SINCO, Creamos condiciones para el beneficio colectivo.</b><hr>
<hr>
<b>Estatus del proyecto:</b><p>',estatus_proyecto,'</p></h1>
</p>')
HTML(texto_html)
})
output$respuestas_estandar <- renderUI({
opciones <- data %>% filter(respondida == 'No respondida', codigo_valido == TRUE, estatus_valido == TRUE, estandar == TRUE) %>% pull(clave)
selectInput('in3', '', opciones , multiple=TRUE, selectize=FALSE, selected = opciones[1] )
})
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
## Initialize the inverse property
i <- NULL
## Method to set the matrix
set <- function( matrix ) {
x <<- matrix
i <<- NULL
}
## Method the get the matrix
get <- function() {
x
}
## Method to set the inverse of the matrix
setInverse <- function(inverse) {
i <<- inverse
}
## Method to get the inverse of the matrix
getInverse <- function() {
## Return the inverse property
i
}
## Return a list of the methods
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
## Just return the inverse if its already set
if( !is.null(m) ) {
message("getting cached data")
return(m)
}
## Get the matrix from our object
data <- x$get()
## Calculate the inverse using matrix multiplication
m <- solve(data) %*% data # Note: invoke matrix multiplication %*%
## Set the inverse to the object
x$setInverse(m)
## Return the matrix
m
}
## Test
#a <-rbind(c(1, -1/4), c(-1/4, 1))
#cacheSolve(makeCacheMatrix(a))
|
/week3/cachematrix.R
|
no_license
|
RavenTress/datasciencecoursera
|
R
| false | false | 1,387 |
r
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
## Initialize the inverse property
i <- NULL
## Method to set the matrix
set <- function( matrix ) {
x <<- matrix
i <<- NULL
}
## Method the get the matrix
get <- function() {
x
}
## Method to set the inverse of the matrix
setInverse <- function(inverse) {
i <<- inverse
}
## Method to get the inverse of the matrix
getInverse <- function() {
## Return the inverse property
i
}
## Return a list of the methods
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
## Just return the inverse if its already set
if( !is.null(m) ) {
message("getting cached data")
return(m)
}
## Get the matrix from our object
data <- x$get()
## Calculate the inverse using matrix multiplication
m <- solve(data) %*% data # Note: invoke matrix multiplication %*%
## Set the inverse to the object
x$setInverse(m)
## Return the matrix
m
}
## Test
#a <-rbind(c(1, -1/4), c(-1/4, 1))
#cacheSolve(makeCacheMatrix(a))
|
################################################################################
##
## Likelihood Function
##
################################################################################
##
##
##
##
#'@noRd
LikeF <- function(Yt,Xt,Zt=NULL,Event=NULL,Break=NULL,na.action="na.omit",
model="Poisson",StaPar=NULL,a0=0.01,b0=0.01,amp=FALSE){
# DataFrame:
#dataf<-data
#dataf<-dataf[all.vars(formula)]
#Dataframe data
#if(length(all.vars(formula))> dim(data)[2])stop("Check the formula and data.")
#if(is.data.frame(data)==FALSE)stop("The argument needs to be a data frame.")
#attach(dataf)
oldoptions <-options(warn=-1)
on.exit(options(oldoptions))
#if(model=="PEM"){
##Event=get(names(dataf)[2])
#dataf<-data
#dataf<-dataf[c(all.vars(formula)[1],colnames(data)[2],all.vars(formula)[-1])]
##Dataframe data
#if(length(all.vars(formula))> dim(data)[2])stop("Check the formula and data.")
#if(is.data.frame(data)==FALSE)stop("The argument needs to be a data frame.")
##dataf<-dataf[all.vars(formula)]
##Yt=get(names(dataf)[1])
#Ytdd=dataf[[colnames(dataf)[1]]]
#Eventdd=dataf[[colnames(dataf)[2]]]
#Breakdd=GridP(Ytdd, Eventdd, nT = nBreaks)
#iik=2
#Event<-Eventdd
#Break<-Breakdd
#Xtdd=NULL
#Ztdd=NULL
#if(is.null(pz)){
#if(dim(dataf)[2]>2){
#nnnd=dim(dataf)[1]
#ppd=dim(dataf)[2]-iik
#Xtdd=matrix(0,nnnd,ppd)
#for(i in 1:ppd){
##Xt[,i]=get(names(dataf)[i+2])
#Xtdd[,i]=dataf[[names(dataf)[i+iik]]]
#}
#}
#}
# if(is.null(pz)!=TRUE){
#nnnd=dim(dataf)[1]
#ppd=dim(dataf)[2]-2-pz
#if(ppd>=1){
#Xtdd=matrix(0,nnnd,ppd)
#for(i in 1:ppd){
#Xt[,i]=get(names(dataf)[i+2])
#Xtdd[,i]=dataf[[names(dataf)[i+2]]]
#}
#}
#if(pz>=1){
#Ztdd=matrix(0,nnnd,pz)
#for(j in 1:pz){
##Zt[,j]=get(names(dataf)[j+ppd+2])
#Ztdd[,j]=dataf[[names(dataf)[j+ppd+2]]]
#}
#}
#}
#}
#if(model!="PEM"){
##dataf<-data
##dataf<-dataf[all.vars(formula)]
#Event<-NULL
#Break<-NULL
#Dataframe data
#if(length(all.vars(formula))> dim(data)[2])stop("Check the formula and data.")
#if(is.data.frame(data)==FALSE)stop("The argument needs to be a data frame.")
#Ytdd=dataf[[colnames(dataf)[1]]]
#Xtdd=NULL
#Ztdd=NULL
#if(is.null(pz)){
#if(dim(dataf)[2]>1){
#nnnd=dim(dataf)[1]
#ppd=dim(dataf)[2]-1
#Xtdd=matrix(0,nnnd,ppd)
#for(i in 1:ppd){
##Xt[,i]=get(names(dataf)[i+1])
#Xtdd[,i]=dataf[[names(dataf)[i+1]]]
#}
#}
#}
#if(is.null(pz)!=TRUE){
#nnnd=dim(dataf)[1]
#ppd=dim(dataf)[2]-1-pz
#if(ppd>=1){
#Xtdd=matrix(0,nnnd,ppd)
#for(i in 1:ppd){
##Xt[,i]=get(names(dataf)[i+1])
#Xtdd[,i]=dataf[[names(dataf)[i+1]]]
#}
#}
#if(pz>=1){
#Ztdd=matrix(0,nnnd,pz)
#for(j in 1:pz){
#Zt[,j]=get(names(dataf)[j+ppd+1])
#Ztdd[,j]=dataf[[names(dataf)[j+ppd+1]]]
#}
#}
#}
#}
#Yt<-Ytdd
#Xt<-Xtdd
#Zt<-Ztdd
##########################################################
#detach(dataf)
#print(Yt)
#print(Xt)
#print(Zt)
if (a0 <= 0) stop("Bad input value for a0")
if (b0 <= 0) stop("Bad input value for b0")
if (is.null(Yt))stop("Bad input Yt")
if (is.vector(Yt)==FALSE)stop("Bad input for Yt")
if (is.vector(Xt))stop("Bad input for Xt. Put as a matrix.")
if (is.null(StaPar))stop("Bad input for StaPar")
if (is.data.frame(StaPar))stop("Bad input for StaPar")
if (is.vector(StaPar)==FALSE)stop("Bad input for StaPar")
# if (model!="Poisson" && model!="Normal"&& model!="Laplace"&&model!="GED"&&
# model!="Gamma"&& model!="GGamma"&& model!="Weibull")stop("Bad input for model")
if (sum(length(which(is.na(Yt))))>0)stop("Bad input Yt")
if(is.null(Xt)==FALSE){if (sum(length(which(is.na(Xt))))>0)stop("Bad input Xt")}
if(is.null(Xt)==FALSE){if(is.matrix(Xt)==FALSE){Xt=as.matrix(Xt)}}
if(is.null(Zt)==FALSE){if(is.matrix(Zt)==FALSE){Zt=as.matrix(Xt)}}
if(StaPar[1]==0)("Bad input for the static parameter w: value outside the parameter space.")
if (model=="Poisson" || model=="Normal" || model=="Laplace" || model=="GED"|| # Begin TS Models
model=="Gamma" || model=="GGamma" || model=="Weibull"){
n<-length(Yt)
#Likelihood:
l <- array(0,c(n,1))
# psi <- exp((par[1]/2)*(lgamma(3/par[1])-lgamma(1/par[1])) )
if(is.null(Xt)==FALSE){
if(is.null(Zt)==FALSE){
dbeta=dim((Xt))[2] #1
dteta=dim((Zt))[2]
dStaPar=length(StaPar)
Beta=matrix(StaPar[(dStaPar-dbeta-dteta+1):(dStaPar-dteta)],dbeta,1)
Teta=matrix(StaPar[(dStaPar-dteta+1):(dStaPar)],dteta,1)
}else{ #2
# print("CERTOOOOO!")
dbeta=dim((Xt))[2]
dteta=0
dStaPar=length(StaPar)
Beta=matrix(StaPar[(dStaPar-dbeta+1):(dStaPar)],dbeta,1)
Teta=0
}
}
if(is.null(Xt)==TRUE){
if(is.null(Zt)==FALSE){ #3
dbeta=0
dteta=dim((Zt))[2]
dStaPar=length(StaPar)
Teta=matrix(StaPar[(dStaPar-dteta+1):(dStaPar)],dteta,1)
}else{ Beta=Teta=0 } #4
}
# print(Beta)
# print(Teta)
#cat("\nSPar=",StaPar)
# print(Beta)
mab <- matrix(0,2,n+1)
att <- array(0,c((n),1))
btt <- array(0,c((n),1))
at <- array(0,c((n+1),1))
bt <- array(0,c((n+1),1))
#Pred:
at[1] <- a0
bt[1] <- b0
#for(t in 2:(n+1)){
if(model=="Poisson"){
if (min(Yt)<0)stop("Bad input Yt. Negative values.")
# for(t in 2:(n+1)){
if(is.null(Xt)){
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]
#Poisson
jt=t-1
# for(t in 1:n){
l[jt] <- lgamma((Yt[jt] + att[jt])) -
lgamma(Yt[jt]+1)+
att[jt] * log(btt[jt]) -lgamma(att[jt]) - (Yt[jt] + att[jt])*(log(1 + btt[jt]))
# } #end for t
at[t] <- att[t-1]+(Yt[t-1])
bt[t] <- btt[t-1]+(1)
} #end for t
}else{
if (min(Yt)<0)stop("Bad input Yt. Negative values.")
#dbeta=dim((Xt))[2]
for(t in 2:(n+1)){
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]*exp(-(Xt[t-1,1:dbeta]%*%Beta))
#Poisson
jt=t-1
# for(t in 1:n){
l[jt] <- lgamma((Yt[jt] + att[jt])) -
lgamma(Yt[jt]+1)+
att[jt] * log(btt[jt]) -lgamma(att[jt]) - (Yt[jt] + att[jt])*(log(1 + btt[jt]))
# } #end for t
at[t] <- att[t-1]+(Yt[t-1])
bt[t] <-StaPar[1]*bt[t-1]+(1)*exp((Xt[t-1,1:dbeta]%*%Beta))
# cat("\nte=",(Xt[t-1,1:dbeta]%*%Beta))
} #end for t
# cat("at=",at)
# cat("bt=",bt)
#cat("\nlikef=",sum(l))
}
}
if(model=="Normal"){
if(is.null(Xt)){
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]
at[t] <- att[t-1]+(1/2)
if(is.null(Zt)){
bt[t] <- btt[t-1]+(Yt[t-1]^2)/2
# Normal
jt=t-1
# for(t in 1:n){
l[t] <- lgamma((0.5 + att[t])) - 0.5*log(2*3.1428)
+att[t] * log(btt[t])-lgamma(att[t])- (0.5 + att[t])*(log(0.5*((Yt[t])^2) + btt[t]))
#} #end for t
}else{
if(dteta==0){tt=1}else{tt=1:dteta}
bt[t] <- btt[t-1]+(((Yt[t-1]-(Zt[t-1,tt]%*%Teta))^2)/2)
# Normal
jt=t-1
dteta=dim((Zt))[2]
if(dteta==0){tt=1}else{tt=1:dteta}
#for(t in 1:n){
l[jt] <- lgamma((0.5 + att[jt])) - 0.5*log(2*3.1428) +att[jt] * log(btt[jt])
-lgamma(att[jt])- (0.5 + att[jt])*(log(0.5*((Yt[jt]-(Zt[jt,tt]%*%Teta))^2) + btt[jt]))
#} #end for t
}
}
}else{
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]*exp(-(Xt[t-1,1:dbeta]%*%Beta))
at[t] <- att[t-1]+(1/2)
if(is.null(Zt)){
bt[t] <- StaPar[1]*bt[t-1]+((Yt[t-1]^2)/2)*exp((Xt[t-1,1:dbeta]%*%Beta))
# Normal
jt=t-1
#for(t in 1:n){
l[jt] <- lgamma((0.5 + att[jt])) - 0.5*log(2*3.1428) +att[jt] * log(btt[jt])
-lgamma(att[jt])- (0.5 + att[jt])*(log(0.5*((Yt[jt])^2) + btt[jt]))
#} #end for t
# cat("\nte=",btt)
}else{
if(dteta==0){tt=1}else{tt=1:dteta}
bt[t] <- StaPar[1]*bt[t-1]+(((Yt[t-1]-(Zt[t-1,tt]%*%Teta))^2)/2)*exp((Xt[t-1,1:dbeta]%*%Beta))
# Normal
jt=t-1
dteta=dim((Zt))[2]
if(dteta==0){tt=1}else{tt=1:dteta}
#for(t in 1:n){
l[jt] <- lgamma((0.5 + att[jt])) - 0.5*log(2*3.1428) +att[jt] * log(btt[jt])
-lgamma(att[jt])- (0.5 + att[jt])*(log(0.5*((Yt[jt]-(Zt[jt,tt]%*%Teta))^2) + btt[jt]))
#} #end for t
}
} #end for t
}
}
if(model=="Laplace"){
if(is.null(Xt)){
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]
at[t] <- att[t-1]+(1)
if(is.null(Zt)){
bt[t] <- btt[t-1]+sqrt(2)*abs(Yt[t-1])
# Laplace
jt=t-1
l[jt] <- lgamma(att[jt]+1) + log(1/sqrt(2)) + att[jt] * log(btt[jt])
-lgamma(att[jt])- (1 + att[jt])*(log(sqrt(2)*abs(Yt[jt]) + btt[jt]))
}else{
if(dteta==0){tt=1}else{tt=1:dteta}
bt[t] <- btt[t-1]+sqrt(2)*abs(Yt[t-1]-(Zt[t-1,tt]%*%Teta))
# Laplace
jt=t-1
dteta=dim((Zt))[2]
if(dteta==0){tt=1}else{tt=1:dteta}
l[jt] <- lgamma(att[jt]+1) + log(1/sqrt(2)) + att[jt] * log(btt[jt])-lgamma(att[jt])
- (1 + att[jt])*(log(sqrt(2)*abs(Yt[jt]-(Zt[jt,tt]%*%Teta)) + btt[jt]))
}
} #end for t
}else{
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]*exp(-(Xt[t-1,1:dbeta]%*%Beta))
at[t] <- att[t-1]+(1)
if(is.null(Zt)){
bt[t] <- StaPar[1]*bt[t-1]+(sqrt(2)*abs(Yt[t-1]))*exp((Xt[t-1,1:dbeta]%*%Beta))
# Laplace
jt=t-1
l[jt] <- lgamma(att[jt]+1) + log(1/sqrt(2)) + att[jt] * log(btt[jt])
-lgamma(att[jt])- (1 + att[jt])*(log(sqrt(2)*abs(Yt[jt]) + btt[jt]))
}else{
if(dteta==0){tt=1}else{tt=1:dteta}
bt[t] <- StaPar[1]*bt[t-1]+(sqrt(2)*abs(Yt[t-1]-(Zt[t-1,tt]%*%Teta)))*exp((Xt[t-1,1:dbeta]%*%Beta))
# Laplace
jt=t-1
dteta=dim((Zt))[2]
if(dteta==0){tt=1}else{tt=1:dteta}
l[jt] <- lgamma(att[jt]+1) + log(1/sqrt(2)) + att[jt] * log(btt[jt])
-lgamma(att[jt])- (1 + att[jt])*(log(sqrt(2)*abs(Yt[jt]-(Zt[jt,tt]%*%Teta)) + btt[jt]))
}
} #end for t
}
}
if(model=="GED"){
if(is.null(Xt)){
# at[1] <- 1/((1-StaPar[1])*StaPar[2])
# bt[1] <- StaPar[1]/(StaPar[1]*StaPar[2]+abs(StaPar[1]-1)*(StaPar[2]^2))
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]
psi <- ((gamma(3/StaPar[2]))/gamma(1/StaPar[2]))^(StaPar[2]/2)
at[t] <- att[t-1]+(1/StaPar[2])
if(is.null(Zt)){
bt[t] <- btt[t-1]+((abs(Yt[t-1]))^StaPar[2])*psi
# GED
jt=t-1
# for(t in 1:n){
l[jt] <- lgamma((1/StaPar[2] + att[jt])) +
log(StaPar[2]/2) + ((1/2)*lgamma((3/StaPar[2]))-(3/2)*lgamma((1/StaPar[2])) +
att[jt] * log(btt[jt]) -lgamma(att[jt])) - (1/StaPar[2] + att[jt]) * (log(((abs(Yt[jt]))^StaPar[2])*psi + btt[jt]))
# }
}else{
bt[t] <- btt[t-1]+((abs(Yt[t-1]-(0)))^StaPar[2])*psi
# GED
jt=t-1
#dteta=dim((Zt))[2]
if(dteta==0){tt=1}else{tt=1:dteta}
# for(t in 1:n){
l[jt] <- lgamma((1/StaPar[2] + att[jt])) +
log(StaPar[2]/2) + ((1/2)*lgamma((3/StaPar[2]))-(3/2)*lgamma((1/StaPar[2])) +
att[jt] * log(btt[jt]) -lgamma(att[jt])) - (1/StaPar[2] + att[jt]) * (log(((abs(Yt[jt]-(Zt[jt,tt]%*%Teta)))^StaPar[2])*psi + btt[jt]))
# }
}
} #end for t
}else{
at[1] <- 1/((1-StaPar[1])*StaPar[2])
bt[1] <- StaPar[1]/(StaPar[1]*StaPar[2]+abs(StaPar[1]-1)*(StaPar[2]^2))
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]*exp(-(Xt[t-1,1:dbeta]%*%Beta))
psi <- ((gamma(3/StaPar[2]))/gamma(1/StaPar[2]))^(StaPar[2]/2)
at[t] <- att[t-1]+(1/StaPar[2])
if(is.null(Zt)){
bt[t] <- StaPar[1]*bt[t-1]+(((abs(Yt[t-1]))^StaPar[2])*psi)*exp((Xt[t-1,1:dbeta]%*%Beta))
# GED
jt=t-1
# for(t in 1:n){
l[jt] <- lgamma((1/StaPar[2] + att[jt])) +
log(StaPar[2]/2) + ((1/2)*lgamma((3/StaPar[2]))-(3/2)*lgamma((1/StaPar[2])) +
att[jt] * log(btt[jt]) -lgamma(att[jt])) - (1/StaPar[2] + att[jt]) * (log(((abs(Yt[jt]))^StaPar[2])*psi + btt[jt]))
# }
}else{
if(dteta==0){tt=1}else{tt=1:dteta}
bt[t] <- StaPar[1]*bt[t-1]+(((abs(Yt[t-1]-(Zt[t-1,tt]%*%Teta)))^StaPar[2])*psi)*exp((Xt[t-1,1:dbeta]%*%Beta))
# GED
jt=t-1
#dteta=dim((Zt))[2]
if(dteta==0){tt=1}else{tt=1:dteta}
# for(t in 1:n){
l[jt] <- lgamma((1/StaPar[2] + att[jt])) +
log(StaPar[2]/2) + ((1/2)*lgamma((3/StaPar[2]))-(3/2)*lgamma((1/StaPar[2])) +
att[jt] * log(btt[jt]) -lgamma(att[jt])) - (1/StaPar[2] + att[jt]) * (log(((abs(Yt[jt]-(Zt[jt,tt]%*%Teta)))^StaPar[2])*psi + btt[jt]))
# }
}
} #end for t
}
}
if(model=="Gamma"){
if (min(Yt)<0)stop("Bad input Yt. Negative values.")
if(is.null(Xt)){
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]
# Gamma
# jt=t-1
# for(t in 1:n){
#l[jt] <- lgamma(att[jt]+StaPar[2])+(StaPar[2]-1)*log(Yt[jt])+att[jt] * log(btt[jt])
#-lgamma(StaPar[2]) -lgamma(att[jt]) - (StaPar[2] + att[jt])*(log(Yt[jt] + btt[jt]))
# } #end for t
at[t] <- att[t-1]+(StaPar[2])
bt[t] <- btt[t-1]+(Yt[t-1])
} #end for t
}else{
if (min(Yt)<0)stop("Bad input Yt. Negative values.")
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]*exp(-(Xt[t-1,1:dbeta]%*%Beta))
# Gamma
# jt=t-1
# for(t in 1:n){
# l[jt] <- lgamma(att[jt]+StaPar[2])+(StaPar[2]-1)*log(Yt[jt])+att[jt] * log(btt[jt])
#-lgamma(StaPar[2]) -lgamma(att[jt]) - (StaPar[2] + att[jt])*(log(Yt[jt] + btt[jt]))
# } #end for t
at[t] <- att[t-1]+(StaPar[2])
bt[t] <- StaPar[1]*bt[t-1]+(Yt[t-1])*exp((Xt[t-1,1:dbeta]%*%Beta))
} #end for t
}
}
if(model=="GGamma"){
if (min(Yt)<0)stop("Bad input Yt. Negative values.")
if(is.null(Xt)){
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]
# GGamma
# jt=t-1
# l[jt] <- lgamma((StaPar[2] + att[jt])) -lgamma(att[jt])+att[jt]*log(btt[jt]) + log(StaPar[3]) + (StaPar[3]*StaPar[2]-1)*log(Yt[jt]) - lgamma(StaPar[2])+ (-att[jt]-StaPar[2])*log((Yt[jt]^StaPar[3])+btt[jt])
at[t] <- att[t-1]+(StaPar[2])
bt[t] <- btt[t-1]+(Yt[t-1]^StaPar[3])
}
}else{
if (min(Yt)<0)stop("Bad input Yt. Negative values.")
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]*exp(-(Xt[t-1,1:dbeta]%*%Beta))
# GGamma
# jt=t-1
# l[jt] <- lgamma((StaPar[2] + att[jt])) -lgamma(att[jt])+att[jt]*log(btt[jt]) + log(StaPar[3]) + (StaPar[3]*StaPar[2]-1)*log(Yt[jt]) - lgamma(StaPar[2])+ (-att[jt]-StaPar[2])*log((Yt[jt]^StaPar[3])+btt[jt])
at[t] <- att[t-1]+(StaPar[2])
bt[t] <- StaPar[1]*bt[t-1]+(Yt[t-1]^StaPar[3])**exp((Xt[t-1,1:dbeta]%*%Beta))
}
}
}
if(model=="Weibull"){
if (min(Yt)<0)stop("Bad input Yt. Negative values.")
if(is.null(Xt)){
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]
# Weibull
jt=t-1
l[jt] <- lgamma((1 + att[jt])) + log(StaPar[2]) + (StaPar[2]-1)*log(Yt[jt]) - lgamma(att[jt])+ att[jt]*log(btt[jt]) + (-1 - att[jt])*log(Yt[jt]^StaPar[2] + btt[jt])
at[t] <- att[t-1]+(1)
bt[t] <- btt[t-1]+(Yt[t-1]^StaPar[2])
} #end for t
}else{
if (min(Yt)<0)stop("Bad input Yt. Negative values.")
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]*exp(-(Xt[t-1,1:dbeta]%*%Beta))
# Weibull
jt=t-1
l[jt] <- lgamma((1 + att[jt])) + log(StaPar[2]) + (StaPar[2]-1)*log(Yt[jt]) - lgamma(att[jt])+ att[jt]*log(btt[jt]) + (-1 - att[jt])*log(Yt[jt]^StaPar[2] + btt[jt])
at[t] <- att[t-1]+(1)
bt[t] <- StaPar[1]*bt[t-1]+(Yt[t-1]^StaPar[2])*exp(Xt[t-1,1:dbeta]%*%Beta)
} #end for t
}
}
return(-sum(l))
}#End TS Models
if(model=="SRGamma" || model=="SRWeibull"){ # Begin SR
if(model=="SRGamma"){model1="Gamma"}
if(model=="SRWeibull"){model1="Weibull"}
if (a0 <= 0) stop("Bad input value for a0")
if (b0 <= 0) stop("Bad input value for b0")
if (is.null(Yt))stop("Bad input Yt")
if (is.vector(Yt)==FALSE)stop("Bad input for Yt")
if (is.vector(Xt))stop("Bad input for Xt. Put as a matrix.")
if (is.null(StaPar))stop("Bad input for StaPar")
if (is.data.frame(StaPar))stop("Bad input for StaPar")
if (is.vector(StaPar)==FALSE)stop("Bad input for StaPar")
if (model1!="Gamma"&&model1!="Weibull")stop("Bad input for model")
if (sum(length(which(is.na(Yt))))>0)stop("Bad input Yt")
if(is.null(Xt)==FALSE){if (sum(length(which(is.na(Xt))))>0)stop("Bad input Xt")}
#print(StaPar)
if(StaPar[1]==0)("Bad input for the static parameter w: value outside the parameter space.")
# if(StaPar[1]==0){StaPar[1]=1e-10}
n<-length(Yt)
# psi <- exp((par[1]/2)*(lgamma(3/par[1])-lgamma(1/par[1])) )
if(is.null(Xt)==FALSE){
if(is.null(dim(Xt))){
dbeta=dim(t(Xt))[1]
dStaPar=length(StaPar)
Beta=matrix(StaPar[(dStaPar-dbeta+1):(dStaPar)],dbeta,1)
}else{
dbeta=dim(Xt)[2]
dStaPar=length(StaPar)
Beta=matrix(StaPar[(dStaPar-dbeta+1):(dStaPar)],dbeta,1)
}
}
# cat("SPar=",StaPar)
# print(Beta)
mab <- matrix(0,2,n+1)
att <- array(0,c((n),1))
btt <- array(0,c((n),1))
at <- array(0,c((n+1),1))
bt <- array(0,c((n+1),1))
btmu <- array(0,c((n+1),1))
#Pred:
at[1] <- a0
bt[1] <- b0
#Likelihood:
l <- array(0,c(n,1))
if(model1=="Gamma"){
if (min(Yt)<0)stop("Bad input Yt. Negative values.")
if(is.null(Xt)){
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]
# Gamma
jt=t-1
#Gamma
#for(t in 1:n){
l[jt] <- lgamma(att[jt]+StaPar[2])+(StaPar[2]-1)*log(Yt[jt])+att[jt]*log(btt[jt])-lgamma(StaPar[2]) -lgamma(att[jt])-(StaPar[2] + att[jt])*(log(Yt[jt] + btt[jt]))
# } #end for t
at[t] <- att[t-1]+(StaPar[2])
bt[t] <- btt[t-1]+(Yt[t-1])
} #end for t
}else{
# print("Ok!")
if (min(Yt)<0)stop("Bad input Yt. Negative values.")
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1] #*exp(-(Xt[t-1,1:dbeta]%*%Beta))
# Gamma
jt=t-1
# for(t in 1:n){
# print("Ok!")
# cat("\nte=",l)
# cat("\nte=",btt)
l[jt] <- lgamma(att[jt]+StaPar[2])+(StaPar[2]-1)*log(Yt[jt])+(-StaPar[2]*Xt[jt,1:dbeta]%*%Beta)+att[jt] * log(btt[jt])-lgamma(StaPar[2]) -lgamma(att[jt]) - (StaPar[2] + att[jt])*(log(Yt[jt]*exp(-Xt[jt,1:dbeta]%*%Beta) + btt[jt]))
# } #end for t
at[t] <- att[t-1]+(StaPar[2])
bt[t] <- StaPar[1]*bt[t-1]+(Yt[t-1])*exp((-Xt[t-1,1:dbeta]%*%Beta))
btmu[t] <-StaPar[1]*bt[t-1]+(Yt[t-1])*exp(-(Xt[t-1,1:dbeta]%*%Beta))
# cat("\nte=",btt)
} #end for t
} #end if
}
if(model1=="Weibull"){
#Likelihood:
# l <- array(0,c(n,1))
if (min(Yt)<0)stop("Bad input Yt. Negative values.")
if(is.null(Xt)){
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]
at[t] <- att[t-1]+(1)
bt[t] <- btt[t-1]+(Yt[t-1]^StaPar[2])
# Weibull
jt=t-1
# for(t in 1:n){
l[jt] <- lgamma((1 + att[jt])) + log(StaPar[2]) + (StaPar[2]-1)*log(Yt[jt])-lgamma(att[jt])+ att[jt]*log(btt[jt]) + (-1 - att[jt])*log(Yt[jt]^StaPar[2] + btt[jt])
# } #end for t
# cat("\nte=",btt)
} #end for t
}else{
if (min(Yt)<0)stop("Bad input Yt. Negative values.")
#print("Ok!")
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1] #*exp(-(Xt[t-1,1:dbeta]%*%Beta)*StaPar[2])
at[t] <- att[t-1]+(1)
bt[t] <- StaPar[1]*bt[t-1]+(Yt[t-1]^StaPar[2])*exp(-StaPar[2]*Xt[t-1,1:dbeta]%*%Beta)
btmu[t] <- StaPar[1]*bt[t-1]+(Yt[t-1]^StaPar[2])*exp(-StaPar[2]*Xt[t-1,1:dbeta]%*%Beta)
#*exp(-(Xt[t-1,1:dbeta]%*%Beta))
# Weibull
jt=t-1
# for(t in 1:n){
l[jt] <- lgamma(1 + att[jt])+log(StaPar[2])+(StaPar[2]-1)*log(Yt[jt])+(-StaPar[2]*Xt[jt,1:dbeta]%*%Beta)-lgamma(att[jt])+att[jt]*log(btt[jt])+(-1 - att[jt])*log(((Yt[jt]^StaPar[2])*exp(-StaPar[2]*Xt[jt,1:dbeta]%*%Beta)) + btt[jt])
# } #end for t
# cat("\nte=",btt)
} #end for t
}
#print(att)
#print(btt)
} # end if
return(-sum(l))
} # End SR Models
if(model=="PEM"){ #Begin PEM/PH Model
if (a0 <= 0) stop("Bad input value for a0")
if (b0 <= 0) stop("Bad input value for b0")
if (is.null(Yt))stop("Bad input Yt")
if (is.vector(Yt)==FALSE)stop("Bad input for Yt")
if (is.vector(Xt))stop("Bad input for Xt. Put as a matrix.")
if (is.null(StaPar))stop("Bad input for StaPar")
if (is.data.frame(StaPar))stop("Bad input for StaPar")
if (is.vector(StaPar)==FALSE)stop("Bad input for StaPar")
if (model!="PEM")stop("Bad input for model")
if (sum(length(which(is.na(Yt))))>0)stop("Bad input Yt")
if(is.null(Xt)==FALSE){if (sum(length(which(is.na(Xt))))>0)stop("Bad input Xt")}
if(StaPar[1]==0)("Bad input for the static parameter w: value outside the parameter space.")
if (is.null(Event))stop("Bad input Event")
if (is.null(Break))stop("Bad input Break")
n<-length(Break)-1
if(is.null(Xt)==FALSE){
if(is.null(dim(Xt))){
dbeta=dim(t(Xt))[1]
dStaPar=length(StaPar)
Beta=matrix(StaPar[(dStaPar-dbeta+1):(dStaPar)],dbeta,1)
}else{
dbeta=dim(Xt)[2]
dStaPar=length(StaPar)
Beta=matrix(StaPar[(dStaPar-dbeta+1):(dStaPar)],dbeta,1)
}
}
mab <- matrix(0,2,n+1)
att <- array(0,c((n),1))
btt <- array(0,c((n),1))
at <- array(0,c((n+1),1))
bt <- array(0,c((n+1),1))
btmu <- array(0,c((n+1),1))
#Pred:
at[1] <- a0
bt[1] <- b0
if (min(Yt)<0)stop("Bad input Yt. Negative values.")
#Likelihood:
l <- array(0,c(n,1))
if(is.null(Xt)==TRUE){
numF=NumFail(StaPar,Yt,Event,Break,Xt=NULL)
TT=TTime(StaPar,Yt,Event,Break,Xt=NULL)
for(t in 2:(n+1)){ #begin for t
if(amp==TRUE){
d=diff(Break)
tdif<- diff(unique(Yt[Event == 1]))
lamp=tdif[length(tdif)]
d[length(d)]=lamp
m=mean(d)+1
z=d/(m)
att[t-1] <- (StaPar[1]^z[t-1])*at[t-1]
btt[t-1] <- (StaPar[1]^z[t-1])*bt[t-1]
# PEM
jt=t-1
l[jt] <- lgamma(att[jt]+numF[jt])+att[jt] * log(btt[jt])-lgamma(att[jt]) - (numF[jt] + att[jt])*(log(TT[jt] + btt[jt]))
}else{
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]
# PEM
jt=t-1
l[jt] <- lgamma(att[jt]+numF[jt])+att[jt] * log(btt[jt])-lgamma(att[jt]) - (numF[jt] + att[jt])*(log(TT[jt] + btt[jt]))
}
at[t] <- att[t-1]+(numF[t-1])
bt[t] <- btt[t-1]+(TT[t-1])
btmu[t] <- btt[t-1]+(TT[t-1])
# PEM
jt=t-1
# l[jt] <- lgamma(att[jt]+numF[jt])+att[jt] * log(btt[jt])-lgamma(att[jt])
# - (numF[jt] + att[jt])*(log(TT[jt] + btt[jt]))
} #end for t
}else{
if (min(Yt)<0)stop("Bad input Yt. Negative values.")
#Break=GridP(Yt, Event, nT = NULL)
numF=NumFail(StaPar,Yt,Event,Break,Xt)
TT=TTime(StaPar,Yt,Event,Break,Xt)
XtC=ProdXtChi(StaPar,Yt,Break,Event,Xt)
for(t in 2:(n+1)){ #begin for t
if(amp==TRUE){
d=diff(Break)
tdif<- diff(unique(Yt[Event == 1]))
lamp=tdif[length(tdif)]
d[length(d)]=lamp
m=mean(d)+1
z=d/(m)
att[t-1] <- (StaPar[1]^z[t-1])*at[t-1]
btt[t-1] <- (StaPar[1]^z[t-1])*bt[t-1]
# PH
jt=t-1
l[jt] <- XtC[jt] + lgamma(att[jt] + numF[jt]) + att[jt] * log(btt[jt])
-lgamma(att[jt]) - (numF[jt] + att[jt])*(log(TT[jt] + btt[jt]))
}else{
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]
# PH
jt=t-1
l[jt] <- XtC[jt] + lgamma(att[jt] + numF[jt]) + att[jt] * log(btt[jt])
-lgamma(att[jt]) - (numF[jt] + att[jt])*(log(TT[jt] + btt[jt]))
}
at[t] <- att[t-1]+(numF[t-1])
bt[t] <- btt[t-1]+(TT[t-1])
btmu[t] <- btt[t-1]+(TT[t-1])
# PH
# jt=t-1
# l[jt] <- XtC[jt] + lgamma(att[jt] + numF[jt]) + att[jt] * log(btt[jt])
# -lgamma(att[jt]) - (numF[jt] + att[jt])*(log(TT[jt] + btt[jt]))
}
}
#Likelihood:
# l <- array(0,c(n,1))
# if(is.null(Xt)==TRUE){
# #PEM
# for(t in 1:n){
# l[t] <- lgamma(att[t]+numF[t])+att[t] * log(btt[t])-lgamma(att[t]) - (numF[t] + att[t])*(log(TT[t] + btt[t]))
# } #end for t
# }else{
# #PH
# XtC=ProdXtChi(StaPar,Yt,Break,Event,Xt)
# for(t in 1:n){
# l[t] <- XtC[t] + lgamma(att[t] + numF[t]) + att[t] * log(btt[t]) -lgamma(att[t]) - (numF[t] + att[t])*(log(TT[t] + btt[t]))
# }
# }
return(-sum(l))
}#End PEM/PH Model
}
##########################################################
|
/R/LikeF.r
|
no_license
|
cran/NGSSEML
|
R
| false | false | 28,409 |
r
|
################################################################################
##
## Likelihood Function
##
################################################################################
##
##
##
##
#'@noRd
LikeF <- function(Yt,Xt,Zt=NULL,Event=NULL,Break=NULL,na.action="na.omit",
model="Poisson",StaPar=NULL,a0=0.01,b0=0.01,amp=FALSE){
# DataFrame:
#dataf<-data
#dataf<-dataf[all.vars(formula)]
#Dataframe data
#if(length(all.vars(formula))> dim(data)[2])stop("Check the formula and data.")
#if(is.data.frame(data)==FALSE)stop("The argument needs to be a data frame.")
#attach(dataf)
oldoptions <-options(warn=-1)
on.exit(options(oldoptions))
#if(model=="PEM"){
##Event=get(names(dataf)[2])
#dataf<-data
#dataf<-dataf[c(all.vars(formula)[1],colnames(data)[2],all.vars(formula)[-1])]
##Dataframe data
#if(length(all.vars(formula))> dim(data)[2])stop("Check the formula and data.")
#if(is.data.frame(data)==FALSE)stop("The argument needs to be a data frame.")
##dataf<-dataf[all.vars(formula)]
##Yt=get(names(dataf)[1])
#Ytdd=dataf[[colnames(dataf)[1]]]
#Eventdd=dataf[[colnames(dataf)[2]]]
#Breakdd=GridP(Ytdd, Eventdd, nT = nBreaks)
#iik=2
#Event<-Eventdd
#Break<-Breakdd
#Xtdd=NULL
#Ztdd=NULL
#if(is.null(pz)){
#if(dim(dataf)[2]>2){
#nnnd=dim(dataf)[1]
#ppd=dim(dataf)[2]-iik
#Xtdd=matrix(0,nnnd,ppd)
#for(i in 1:ppd){
##Xt[,i]=get(names(dataf)[i+2])
#Xtdd[,i]=dataf[[names(dataf)[i+iik]]]
#}
#}
#}
# if(is.null(pz)!=TRUE){
#nnnd=dim(dataf)[1]
#ppd=dim(dataf)[2]-2-pz
#if(ppd>=1){
#Xtdd=matrix(0,nnnd,ppd)
#for(i in 1:ppd){
#Xt[,i]=get(names(dataf)[i+2])
#Xtdd[,i]=dataf[[names(dataf)[i+2]]]
#}
#}
#if(pz>=1){
#Ztdd=matrix(0,nnnd,pz)
#for(j in 1:pz){
##Zt[,j]=get(names(dataf)[j+ppd+2])
#Ztdd[,j]=dataf[[names(dataf)[j+ppd+2]]]
#}
#}
#}
#}
#if(model!="PEM"){
##dataf<-data
##dataf<-dataf[all.vars(formula)]
#Event<-NULL
#Break<-NULL
#Dataframe data
#if(length(all.vars(formula))> dim(data)[2])stop("Check the formula and data.")
#if(is.data.frame(data)==FALSE)stop("The argument needs to be a data frame.")
#Ytdd=dataf[[colnames(dataf)[1]]]
#Xtdd=NULL
#Ztdd=NULL
#if(is.null(pz)){
#if(dim(dataf)[2]>1){
#nnnd=dim(dataf)[1]
#ppd=dim(dataf)[2]-1
#Xtdd=matrix(0,nnnd,ppd)
#for(i in 1:ppd){
##Xt[,i]=get(names(dataf)[i+1])
#Xtdd[,i]=dataf[[names(dataf)[i+1]]]
#}
#}
#}
#if(is.null(pz)!=TRUE){
#nnnd=dim(dataf)[1]
#ppd=dim(dataf)[2]-1-pz
#if(ppd>=1){
#Xtdd=matrix(0,nnnd,ppd)
#for(i in 1:ppd){
##Xt[,i]=get(names(dataf)[i+1])
#Xtdd[,i]=dataf[[names(dataf)[i+1]]]
#}
#}
#if(pz>=1){
#Ztdd=matrix(0,nnnd,pz)
#for(j in 1:pz){
#Zt[,j]=get(names(dataf)[j+ppd+1])
#Ztdd[,j]=dataf[[names(dataf)[j+ppd+1]]]
#}
#}
#}
#}
#Yt<-Ytdd
#Xt<-Xtdd
#Zt<-Ztdd
##########################################################
#detach(dataf)
#print(Yt)
#print(Xt)
#print(Zt)
if (a0 <= 0) stop("Bad input value for a0")
if (b0 <= 0) stop("Bad input value for b0")
if (is.null(Yt))stop("Bad input Yt")
if (is.vector(Yt)==FALSE)stop("Bad input for Yt")
if (is.vector(Xt))stop("Bad input for Xt. Put as a matrix.")
if (is.null(StaPar))stop("Bad input for StaPar")
if (is.data.frame(StaPar))stop("Bad input for StaPar")
if (is.vector(StaPar)==FALSE)stop("Bad input for StaPar")
# if (model!="Poisson" && model!="Normal"&& model!="Laplace"&&model!="GED"&&
# model!="Gamma"&& model!="GGamma"&& model!="Weibull")stop("Bad input for model")
if (sum(length(which(is.na(Yt))))>0)stop("Bad input Yt")
if(is.null(Xt)==FALSE){if (sum(length(which(is.na(Xt))))>0)stop("Bad input Xt")}
if(is.null(Xt)==FALSE){if(is.matrix(Xt)==FALSE){Xt=as.matrix(Xt)}}
if(is.null(Zt)==FALSE){if(is.matrix(Zt)==FALSE){Zt=as.matrix(Xt)}}
if(StaPar[1]==0)("Bad input for the static parameter w: value outside the parameter space.")
if (model=="Poisson" || model=="Normal" || model=="Laplace" || model=="GED"|| # Begin TS Models
model=="Gamma" || model=="GGamma" || model=="Weibull"){
n<-length(Yt)
#Likelihood:
l <- array(0,c(n,1))
# psi <- exp((par[1]/2)*(lgamma(3/par[1])-lgamma(1/par[1])) )
if(is.null(Xt)==FALSE){
if(is.null(Zt)==FALSE){
dbeta=dim((Xt))[2] #1
dteta=dim((Zt))[2]
dStaPar=length(StaPar)
Beta=matrix(StaPar[(dStaPar-dbeta-dteta+1):(dStaPar-dteta)],dbeta,1)
Teta=matrix(StaPar[(dStaPar-dteta+1):(dStaPar)],dteta,1)
}else{ #2
# print("CERTOOOOO!")
dbeta=dim((Xt))[2]
dteta=0
dStaPar=length(StaPar)
Beta=matrix(StaPar[(dStaPar-dbeta+1):(dStaPar)],dbeta,1)
Teta=0
}
}
if(is.null(Xt)==TRUE){
if(is.null(Zt)==FALSE){ #3
dbeta=0
dteta=dim((Zt))[2]
dStaPar=length(StaPar)
Teta=matrix(StaPar[(dStaPar-dteta+1):(dStaPar)],dteta,1)
}else{ Beta=Teta=0 } #4
}
# print(Beta)
# print(Teta)
#cat("\nSPar=",StaPar)
# print(Beta)
mab <- matrix(0,2,n+1)
att <- array(0,c((n),1))
btt <- array(0,c((n),1))
at <- array(0,c((n+1),1))
bt <- array(0,c((n+1),1))
#Pred:
at[1] <- a0
bt[1] <- b0
#for(t in 2:(n+1)){
if(model=="Poisson"){
if (min(Yt)<0)stop("Bad input Yt. Negative values.")
# for(t in 2:(n+1)){
if(is.null(Xt)){
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]
#Poisson
jt=t-1
# for(t in 1:n){
l[jt] <- lgamma((Yt[jt] + att[jt])) -
lgamma(Yt[jt]+1)+
att[jt] * log(btt[jt]) -lgamma(att[jt]) - (Yt[jt] + att[jt])*(log(1 + btt[jt]))
# } #end for t
at[t] <- att[t-1]+(Yt[t-1])
bt[t] <- btt[t-1]+(1)
} #end for t
}else{
if (min(Yt)<0)stop("Bad input Yt. Negative values.")
#dbeta=dim((Xt))[2]
for(t in 2:(n+1)){
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]*exp(-(Xt[t-1,1:dbeta]%*%Beta))
#Poisson
jt=t-1
# for(t in 1:n){
l[jt] <- lgamma((Yt[jt] + att[jt])) -
lgamma(Yt[jt]+1)+
att[jt] * log(btt[jt]) -lgamma(att[jt]) - (Yt[jt] + att[jt])*(log(1 + btt[jt]))
# } #end for t
at[t] <- att[t-1]+(Yt[t-1])
bt[t] <-StaPar[1]*bt[t-1]+(1)*exp((Xt[t-1,1:dbeta]%*%Beta))
# cat("\nte=",(Xt[t-1,1:dbeta]%*%Beta))
} #end for t
# cat("at=",at)
# cat("bt=",bt)
#cat("\nlikef=",sum(l))
}
}
if(model=="Normal"){
if(is.null(Xt)){
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]
at[t] <- att[t-1]+(1/2)
if(is.null(Zt)){
bt[t] <- btt[t-1]+(Yt[t-1]^2)/2
# Normal
jt=t-1
# for(t in 1:n){
l[t] <- lgamma((0.5 + att[t])) - 0.5*log(2*3.1428)
+att[t] * log(btt[t])-lgamma(att[t])- (0.5 + att[t])*(log(0.5*((Yt[t])^2) + btt[t]))
#} #end for t
}else{
if(dteta==0){tt=1}else{tt=1:dteta}
bt[t] <- btt[t-1]+(((Yt[t-1]-(Zt[t-1,tt]%*%Teta))^2)/2)
# Normal
jt=t-1
dteta=dim((Zt))[2]
if(dteta==0){tt=1}else{tt=1:dteta}
#for(t in 1:n){
l[jt] <- lgamma((0.5 + att[jt])) - 0.5*log(2*3.1428) +att[jt] * log(btt[jt])
-lgamma(att[jt])- (0.5 + att[jt])*(log(0.5*((Yt[jt]-(Zt[jt,tt]%*%Teta))^2) + btt[jt]))
#} #end for t
}
}
}else{
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]*exp(-(Xt[t-1,1:dbeta]%*%Beta))
at[t] <- att[t-1]+(1/2)
if(is.null(Zt)){
bt[t] <- StaPar[1]*bt[t-1]+((Yt[t-1]^2)/2)*exp((Xt[t-1,1:dbeta]%*%Beta))
# Normal
jt=t-1
#for(t in 1:n){
l[jt] <- lgamma((0.5 + att[jt])) - 0.5*log(2*3.1428) +att[jt] * log(btt[jt])
-lgamma(att[jt])- (0.5 + att[jt])*(log(0.5*((Yt[jt])^2) + btt[jt]))
#} #end for t
# cat("\nte=",btt)
}else{
if(dteta==0){tt=1}else{tt=1:dteta}
bt[t] <- StaPar[1]*bt[t-1]+(((Yt[t-1]-(Zt[t-1,tt]%*%Teta))^2)/2)*exp((Xt[t-1,1:dbeta]%*%Beta))
# Normal
jt=t-1
dteta=dim((Zt))[2]
if(dteta==0){tt=1}else{tt=1:dteta}
#for(t in 1:n){
l[jt] <- lgamma((0.5 + att[jt])) - 0.5*log(2*3.1428) +att[jt] * log(btt[jt])
-lgamma(att[jt])- (0.5 + att[jt])*(log(0.5*((Yt[jt]-(Zt[jt,tt]%*%Teta))^2) + btt[jt]))
#} #end for t
}
} #end for t
}
}
if(model=="Laplace"){
if(is.null(Xt)){
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]
at[t] <- att[t-1]+(1)
if(is.null(Zt)){
bt[t] <- btt[t-1]+sqrt(2)*abs(Yt[t-1])
# Laplace
jt=t-1
l[jt] <- lgamma(att[jt]+1) + log(1/sqrt(2)) + att[jt] * log(btt[jt])
-lgamma(att[jt])- (1 + att[jt])*(log(sqrt(2)*abs(Yt[jt]) + btt[jt]))
}else{
if(dteta==0){tt=1}else{tt=1:dteta}
bt[t] <- btt[t-1]+sqrt(2)*abs(Yt[t-1]-(Zt[t-1,tt]%*%Teta))
# Laplace
jt=t-1
dteta=dim((Zt))[2]
if(dteta==0){tt=1}else{tt=1:dteta}
l[jt] <- lgamma(att[jt]+1) + log(1/sqrt(2)) + att[jt] * log(btt[jt])-lgamma(att[jt])
- (1 + att[jt])*(log(sqrt(2)*abs(Yt[jt]-(Zt[jt,tt]%*%Teta)) + btt[jt]))
}
} #end for t
}else{
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]*exp(-(Xt[t-1,1:dbeta]%*%Beta))
at[t] <- att[t-1]+(1)
if(is.null(Zt)){
bt[t] <- StaPar[1]*bt[t-1]+(sqrt(2)*abs(Yt[t-1]))*exp((Xt[t-1,1:dbeta]%*%Beta))
# Laplace
jt=t-1
l[jt] <- lgamma(att[jt]+1) + log(1/sqrt(2)) + att[jt] * log(btt[jt])
-lgamma(att[jt])- (1 + att[jt])*(log(sqrt(2)*abs(Yt[jt]) + btt[jt]))
}else{
if(dteta==0){tt=1}else{tt=1:dteta}
bt[t] <- StaPar[1]*bt[t-1]+(sqrt(2)*abs(Yt[t-1]-(Zt[t-1,tt]%*%Teta)))*exp((Xt[t-1,1:dbeta]%*%Beta))
# Laplace
jt=t-1
dteta=dim((Zt))[2]
if(dteta==0){tt=1}else{tt=1:dteta}
l[jt] <- lgamma(att[jt]+1) + log(1/sqrt(2)) + att[jt] * log(btt[jt])
-lgamma(att[jt])- (1 + att[jt])*(log(sqrt(2)*abs(Yt[jt]-(Zt[jt,tt]%*%Teta)) + btt[jt]))
}
} #end for t
}
}
if(model=="GED"){
if(is.null(Xt)){
# at[1] <- 1/((1-StaPar[1])*StaPar[2])
# bt[1] <- StaPar[1]/(StaPar[1]*StaPar[2]+abs(StaPar[1]-1)*(StaPar[2]^2))
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]
psi <- ((gamma(3/StaPar[2]))/gamma(1/StaPar[2]))^(StaPar[2]/2)
at[t] <- att[t-1]+(1/StaPar[2])
if(is.null(Zt)){
bt[t] <- btt[t-1]+((abs(Yt[t-1]))^StaPar[2])*psi
# GED
jt=t-1
# for(t in 1:n){
l[jt] <- lgamma((1/StaPar[2] + att[jt])) +
log(StaPar[2]/2) + ((1/2)*lgamma((3/StaPar[2]))-(3/2)*lgamma((1/StaPar[2])) +
att[jt] * log(btt[jt]) -lgamma(att[jt])) - (1/StaPar[2] + att[jt]) * (log(((abs(Yt[jt]))^StaPar[2])*psi + btt[jt]))
# }
}else{
bt[t] <- btt[t-1]+((abs(Yt[t-1]-(0)))^StaPar[2])*psi
# GED
jt=t-1
#dteta=dim((Zt))[2]
if(dteta==0){tt=1}else{tt=1:dteta}
# for(t in 1:n){
l[jt] <- lgamma((1/StaPar[2] + att[jt])) +
log(StaPar[2]/2) + ((1/2)*lgamma((3/StaPar[2]))-(3/2)*lgamma((1/StaPar[2])) +
att[jt] * log(btt[jt]) -lgamma(att[jt])) - (1/StaPar[2] + att[jt]) * (log(((abs(Yt[jt]-(Zt[jt,tt]%*%Teta)))^StaPar[2])*psi + btt[jt]))
# }
}
} #end for t
}else{
at[1] <- 1/((1-StaPar[1])*StaPar[2])
bt[1] <- StaPar[1]/(StaPar[1]*StaPar[2]+abs(StaPar[1]-1)*(StaPar[2]^2))
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]*exp(-(Xt[t-1,1:dbeta]%*%Beta))
psi <- ((gamma(3/StaPar[2]))/gamma(1/StaPar[2]))^(StaPar[2]/2)
at[t] <- att[t-1]+(1/StaPar[2])
if(is.null(Zt)){
bt[t] <- StaPar[1]*bt[t-1]+(((abs(Yt[t-1]))^StaPar[2])*psi)*exp((Xt[t-1,1:dbeta]%*%Beta))
# GED
jt=t-1
# for(t in 1:n){
l[jt] <- lgamma((1/StaPar[2] + att[jt])) +
log(StaPar[2]/2) + ((1/2)*lgamma((3/StaPar[2]))-(3/2)*lgamma((1/StaPar[2])) +
att[jt] * log(btt[jt]) -lgamma(att[jt])) - (1/StaPar[2] + att[jt]) * (log(((abs(Yt[jt]))^StaPar[2])*psi + btt[jt]))
# }
}else{
if(dteta==0){tt=1}else{tt=1:dteta}
bt[t] <- StaPar[1]*bt[t-1]+(((abs(Yt[t-1]-(Zt[t-1,tt]%*%Teta)))^StaPar[2])*psi)*exp((Xt[t-1,1:dbeta]%*%Beta))
# GED
jt=t-1
#dteta=dim((Zt))[2]
if(dteta==0){tt=1}else{tt=1:dteta}
# for(t in 1:n){
l[jt] <- lgamma((1/StaPar[2] + att[jt])) +
log(StaPar[2]/2) + ((1/2)*lgamma((3/StaPar[2]))-(3/2)*lgamma((1/StaPar[2])) +
att[jt] * log(btt[jt]) -lgamma(att[jt])) - (1/StaPar[2] + att[jt]) * (log(((abs(Yt[jt]-(Zt[jt,tt]%*%Teta)))^StaPar[2])*psi + btt[jt]))
# }
}
} #end for t
}
}
if(model=="Gamma"){
if (min(Yt)<0)stop("Bad input Yt. Negative values.")
if(is.null(Xt)){
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]
# Gamma
# jt=t-1
# for(t in 1:n){
#l[jt] <- lgamma(att[jt]+StaPar[2])+(StaPar[2]-1)*log(Yt[jt])+att[jt] * log(btt[jt])
#-lgamma(StaPar[2]) -lgamma(att[jt]) - (StaPar[2] + att[jt])*(log(Yt[jt] + btt[jt]))
# } #end for t
at[t] <- att[t-1]+(StaPar[2])
bt[t] <- btt[t-1]+(Yt[t-1])
} #end for t
}else{
if (min(Yt)<0)stop("Bad input Yt. Negative values.")
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]*exp(-(Xt[t-1,1:dbeta]%*%Beta))
# Gamma
# jt=t-1
# for(t in 1:n){
# l[jt] <- lgamma(att[jt]+StaPar[2])+(StaPar[2]-1)*log(Yt[jt])+att[jt] * log(btt[jt])
#-lgamma(StaPar[2]) -lgamma(att[jt]) - (StaPar[2] + att[jt])*(log(Yt[jt] + btt[jt]))
# } #end for t
at[t] <- att[t-1]+(StaPar[2])
bt[t] <- StaPar[1]*bt[t-1]+(Yt[t-1])*exp((Xt[t-1,1:dbeta]%*%Beta))
} #end for t
}
}
if(model=="GGamma"){
if (min(Yt)<0)stop("Bad input Yt. Negative values.")
if(is.null(Xt)){
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]
# GGamma
# jt=t-1
# l[jt] <- lgamma((StaPar[2] + att[jt])) -lgamma(att[jt])+att[jt]*log(btt[jt]) + log(StaPar[3]) + (StaPar[3]*StaPar[2]-1)*log(Yt[jt]) - lgamma(StaPar[2])+ (-att[jt]-StaPar[2])*log((Yt[jt]^StaPar[3])+btt[jt])
at[t] <- att[t-1]+(StaPar[2])
bt[t] <- btt[t-1]+(Yt[t-1]^StaPar[3])
}
}else{
if (min(Yt)<0)stop("Bad input Yt. Negative values.")
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]*exp(-(Xt[t-1,1:dbeta]%*%Beta))
# GGamma
# jt=t-1
# l[jt] <- lgamma((StaPar[2] + att[jt])) -lgamma(att[jt])+att[jt]*log(btt[jt]) + log(StaPar[3]) + (StaPar[3]*StaPar[2]-1)*log(Yt[jt]) - lgamma(StaPar[2])+ (-att[jt]-StaPar[2])*log((Yt[jt]^StaPar[3])+btt[jt])
at[t] <- att[t-1]+(StaPar[2])
bt[t] <- StaPar[1]*bt[t-1]+(Yt[t-1]^StaPar[3])**exp((Xt[t-1,1:dbeta]%*%Beta))
}
}
}
if(model=="Weibull"){
if (min(Yt)<0)stop("Bad input Yt. Negative values.")
if(is.null(Xt)){
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]
# Weibull
jt=t-1
l[jt] <- lgamma((1 + att[jt])) + log(StaPar[2]) + (StaPar[2]-1)*log(Yt[jt]) - lgamma(att[jt])+ att[jt]*log(btt[jt]) + (-1 - att[jt])*log(Yt[jt]^StaPar[2] + btt[jt])
at[t] <- att[t-1]+(1)
bt[t] <- btt[t-1]+(Yt[t-1]^StaPar[2])
} #end for t
}else{
if (min(Yt)<0)stop("Bad input Yt. Negative values.")
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]*exp(-(Xt[t-1,1:dbeta]%*%Beta))
# Weibull
jt=t-1
l[jt] <- lgamma((1 + att[jt])) + log(StaPar[2]) + (StaPar[2]-1)*log(Yt[jt]) - lgamma(att[jt])+ att[jt]*log(btt[jt]) + (-1 - att[jt])*log(Yt[jt]^StaPar[2] + btt[jt])
at[t] <- att[t-1]+(1)
bt[t] <- StaPar[1]*bt[t-1]+(Yt[t-1]^StaPar[2])*exp(Xt[t-1,1:dbeta]%*%Beta)
} #end for t
}
}
return(-sum(l))
}#End TS Models
if(model=="SRGamma" || model=="SRWeibull"){ # Begin SR
if(model=="SRGamma"){model1="Gamma"}
if(model=="SRWeibull"){model1="Weibull"}
if (a0 <= 0) stop("Bad input value for a0")
if (b0 <= 0) stop("Bad input value for b0")
if (is.null(Yt))stop("Bad input Yt")
if (is.vector(Yt)==FALSE)stop("Bad input for Yt")
if (is.vector(Xt))stop("Bad input for Xt. Put as a matrix.")
if (is.null(StaPar))stop("Bad input for StaPar")
if (is.data.frame(StaPar))stop("Bad input for StaPar")
if (is.vector(StaPar)==FALSE)stop("Bad input for StaPar")
if (model1!="Gamma"&&model1!="Weibull")stop("Bad input for model")
if (sum(length(which(is.na(Yt))))>0)stop("Bad input Yt")
if(is.null(Xt)==FALSE){if (sum(length(which(is.na(Xt))))>0)stop("Bad input Xt")}
#print(StaPar)
if(StaPar[1]==0)("Bad input for the static parameter w: value outside the parameter space.")
# if(StaPar[1]==0){StaPar[1]=1e-10}
n<-length(Yt)
# psi <- exp((par[1]/2)*(lgamma(3/par[1])-lgamma(1/par[1])) )
if(is.null(Xt)==FALSE){
if(is.null(dim(Xt))){
dbeta=dim(t(Xt))[1]
dStaPar=length(StaPar)
Beta=matrix(StaPar[(dStaPar-dbeta+1):(dStaPar)],dbeta,1)
}else{
dbeta=dim(Xt)[2]
dStaPar=length(StaPar)
Beta=matrix(StaPar[(dStaPar-dbeta+1):(dStaPar)],dbeta,1)
}
}
# cat("SPar=",StaPar)
# print(Beta)
mab <- matrix(0,2,n+1)
att <- array(0,c((n),1))
btt <- array(0,c((n),1))
at <- array(0,c((n+1),1))
bt <- array(0,c((n+1),1))
btmu <- array(0,c((n+1),1))
#Pred:
at[1] <- a0
bt[1] <- b0
#Likelihood:
l <- array(0,c(n,1))
if(model1=="Gamma"){
if (min(Yt)<0)stop("Bad input Yt. Negative values.")
if(is.null(Xt)){
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]
# Gamma
jt=t-1
#Gamma
#for(t in 1:n){
l[jt] <- lgamma(att[jt]+StaPar[2])+(StaPar[2]-1)*log(Yt[jt])+att[jt]*log(btt[jt])-lgamma(StaPar[2]) -lgamma(att[jt])-(StaPar[2] + att[jt])*(log(Yt[jt] + btt[jt]))
# } #end for t
at[t] <- att[t-1]+(StaPar[2])
bt[t] <- btt[t-1]+(Yt[t-1])
} #end for t
}else{
# print("Ok!")
if (min(Yt)<0)stop("Bad input Yt. Negative values.")
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1] #*exp(-(Xt[t-1,1:dbeta]%*%Beta))
# Gamma
jt=t-1
# for(t in 1:n){
# print("Ok!")
# cat("\nte=",l)
# cat("\nte=",btt)
l[jt] <- lgamma(att[jt]+StaPar[2])+(StaPar[2]-1)*log(Yt[jt])+(-StaPar[2]*Xt[jt,1:dbeta]%*%Beta)+att[jt] * log(btt[jt])-lgamma(StaPar[2]) -lgamma(att[jt]) - (StaPar[2] + att[jt])*(log(Yt[jt]*exp(-Xt[jt,1:dbeta]%*%Beta) + btt[jt]))
# } #end for t
at[t] <- att[t-1]+(StaPar[2])
bt[t] <- StaPar[1]*bt[t-1]+(Yt[t-1])*exp((-Xt[t-1,1:dbeta]%*%Beta))
btmu[t] <-StaPar[1]*bt[t-1]+(Yt[t-1])*exp(-(Xt[t-1,1:dbeta]%*%Beta))
# cat("\nte=",btt)
} #end for t
} #end if
}
if(model1=="Weibull"){
#Likelihood:
# l <- array(0,c(n,1))
if (min(Yt)<0)stop("Bad input Yt. Negative values.")
if(is.null(Xt)){
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]
at[t] <- att[t-1]+(1)
bt[t] <- btt[t-1]+(Yt[t-1]^StaPar[2])
# Weibull
jt=t-1
# for(t in 1:n){
l[jt] <- lgamma((1 + att[jt])) + log(StaPar[2]) + (StaPar[2]-1)*log(Yt[jt])-lgamma(att[jt])+ att[jt]*log(btt[jt]) + (-1 - att[jt])*log(Yt[jt]^StaPar[2] + btt[jt])
# } #end for t
# cat("\nte=",btt)
} #end for t
}else{
if (min(Yt)<0)stop("Bad input Yt. Negative values.")
#print("Ok!")
for(t in 2:(n+1)){ #begin for t
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1] #*exp(-(Xt[t-1,1:dbeta]%*%Beta)*StaPar[2])
at[t] <- att[t-1]+(1)
bt[t] <- StaPar[1]*bt[t-1]+(Yt[t-1]^StaPar[2])*exp(-StaPar[2]*Xt[t-1,1:dbeta]%*%Beta)
btmu[t] <- StaPar[1]*bt[t-1]+(Yt[t-1]^StaPar[2])*exp(-StaPar[2]*Xt[t-1,1:dbeta]%*%Beta)
#*exp(-(Xt[t-1,1:dbeta]%*%Beta))
# Weibull
jt=t-1
# for(t in 1:n){
l[jt] <- lgamma(1 + att[jt])+log(StaPar[2])+(StaPar[2]-1)*log(Yt[jt])+(-StaPar[2]*Xt[jt,1:dbeta]%*%Beta)-lgamma(att[jt])+att[jt]*log(btt[jt])+(-1 - att[jt])*log(((Yt[jt]^StaPar[2])*exp(-StaPar[2]*Xt[jt,1:dbeta]%*%Beta)) + btt[jt])
# } #end for t
# cat("\nte=",btt)
} #end for t
}
#print(att)
#print(btt)
} # end if
return(-sum(l))
} # End SR Models
if(model=="PEM"){ #Begin PEM/PH Model
if (a0 <= 0) stop("Bad input value for a0")
if (b0 <= 0) stop("Bad input value for b0")
if (is.null(Yt))stop("Bad input Yt")
if (is.vector(Yt)==FALSE)stop("Bad input for Yt")
if (is.vector(Xt))stop("Bad input for Xt. Put as a matrix.")
if (is.null(StaPar))stop("Bad input for StaPar")
if (is.data.frame(StaPar))stop("Bad input for StaPar")
if (is.vector(StaPar)==FALSE)stop("Bad input for StaPar")
if (model!="PEM")stop("Bad input for model")
if (sum(length(which(is.na(Yt))))>0)stop("Bad input Yt")
if(is.null(Xt)==FALSE){if (sum(length(which(is.na(Xt))))>0)stop("Bad input Xt")}
if(StaPar[1]==0)("Bad input for the static parameter w: value outside the parameter space.")
if (is.null(Event))stop("Bad input Event")
if (is.null(Break))stop("Bad input Break")
n<-length(Break)-1
if(is.null(Xt)==FALSE){
if(is.null(dim(Xt))){
dbeta=dim(t(Xt))[1]
dStaPar=length(StaPar)
Beta=matrix(StaPar[(dStaPar-dbeta+1):(dStaPar)],dbeta,1)
}else{
dbeta=dim(Xt)[2]
dStaPar=length(StaPar)
Beta=matrix(StaPar[(dStaPar-dbeta+1):(dStaPar)],dbeta,1)
}
}
mab <- matrix(0,2,n+1)
att <- array(0,c((n),1))
btt <- array(0,c((n),1))
at <- array(0,c((n+1),1))
bt <- array(0,c((n+1),1))
btmu <- array(0,c((n+1),1))
#Pred:
at[1] <- a0
bt[1] <- b0
if (min(Yt)<0)stop("Bad input Yt. Negative values.")
#Likelihood:
l <- array(0,c(n,1))
if(is.null(Xt)==TRUE){
numF=NumFail(StaPar,Yt,Event,Break,Xt=NULL)
TT=TTime(StaPar,Yt,Event,Break,Xt=NULL)
for(t in 2:(n+1)){ #begin for t
if(amp==TRUE){
d=diff(Break)
tdif<- diff(unique(Yt[Event == 1]))
lamp=tdif[length(tdif)]
d[length(d)]=lamp
m=mean(d)+1
z=d/(m)
att[t-1] <- (StaPar[1]^z[t-1])*at[t-1]
btt[t-1] <- (StaPar[1]^z[t-1])*bt[t-1]
# PEM
jt=t-1
l[jt] <- lgamma(att[jt]+numF[jt])+att[jt] * log(btt[jt])-lgamma(att[jt]) - (numF[jt] + att[jt])*(log(TT[jt] + btt[jt]))
}else{
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]
# PEM
jt=t-1
l[jt] <- lgamma(att[jt]+numF[jt])+att[jt] * log(btt[jt])-lgamma(att[jt]) - (numF[jt] + att[jt])*(log(TT[jt] + btt[jt]))
}
at[t] <- att[t-1]+(numF[t-1])
bt[t] <- btt[t-1]+(TT[t-1])
btmu[t] <- btt[t-1]+(TT[t-1])
# PEM
jt=t-1
# l[jt] <- lgamma(att[jt]+numF[jt])+att[jt] * log(btt[jt])-lgamma(att[jt])
# - (numF[jt] + att[jt])*(log(TT[jt] + btt[jt]))
} #end for t
}else{
if (min(Yt)<0)stop("Bad input Yt. Negative values.")
#Break=GridP(Yt, Event, nT = NULL)
numF=NumFail(StaPar,Yt,Event,Break,Xt)
TT=TTime(StaPar,Yt,Event,Break,Xt)
XtC=ProdXtChi(StaPar,Yt,Break,Event,Xt)
for(t in 2:(n+1)){ #begin for t
if(amp==TRUE){
d=diff(Break)
tdif<- diff(unique(Yt[Event == 1]))
lamp=tdif[length(tdif)]
d[length(d)]=lamp
m=mean(d)+1
z=d/(m)
att[t-1] <- (StaPar[1]^z[t-1])*at[t-1]
btt[t-1] <- (StaPar[1]^z[t-1])*bt[t-1]
# PH
jt=t-1
l[jt] <- XtC[jt] + lgamma(att[jt] + numF[jt]) + att[jt] * log(btt[jt])
-lgamma(att[jt]) - (numF[jt] + att[jt])*(log(TT[jt] + btt[jt]))
}else{
att[t-1] <- StaPar[1]*at[t-1]
btt[t-1] <- StaPar[1]*bt[t-1]
# PH
jt=t-1
l[jt] <- XtC[jt] + lgamma(att[jt] + numF[jt]) + att[jt] * log(btt[jt])
-lgamma(att[jt]) - (numF[jt] + att[jt])*(log(TT[jt] + btt[jt]))
}
at[t] <- att[t-1]+(numF[t-1])
bt[t] <- btt[t-1]+(TT[t-1])
btmu[t] <- btt[t-1]+(TT[t-1])
# PH
# jt=t-1
# l[jt] <- XtC[jt] + lgamma(att[jt] + numF[jt]) + att[jt] * log(btt[jt])
# -lgamma(att[jt]) - (numF[jt] + att[jt])*(log(TT[jt] + btt[jt]))
}
}
#Likelihood:
# l <- array(0,c(n,1))
# if(is.null(Xt)==TRUE){
# #PEM
# for(t in 1:n){
# l[t] <- lgamma(att[t]+numF[t])+att[t] * log(btt[t])-lgamma(att[t]) - (numF[t] + att[t])*(log(TT[t] + btt[t]))
# } #end for t
# }else{
# #PH
# XtC=ProdXtChi(StaPar,Yt,Break,Event,Xt)
# for(t in 1:n){
# l[t] <- XtC[t] + lgamma(att[t] + numF[t]) + att[t] * log(btt[t]) -lgamma(att[t]) - (numF[t] + att[t])*(log(TT[t] + btt[t]))
# }
# }
return(-sum(l))
}#End PEM/PH Model
}
##########################################################
|
library(dae);library(nlme);library(effects);
library(psych);library(interplot);library(plyr);
library(devtools);library(ez);library(Rmisc);
library(wesanderson)
library(lme4);library(lsmeans);library(plotly);
library(ggplot2);library(ggpubr);library(dplyr)
library(ggthemes);library(extrafont)
library(car);library(ggplot2)
library(optimx);library(simr)
library(tidyverse)
library(hrbrthemes)
library(viridis);library(afex)
library(multcomp);library(emmeans);
library(gridExtra);library(ez)
library(rstatix)
rm(list=ls())
pd <- position_dodge(0.1)
alphalev <- 0.6
source("/Users/heshamelshafei/github/own/toolbox/RainCloudPlots/tutorial_R/R_rainclouds.R")
source("/Users/heshamelshafei/github/own/toolbox/RainCloudPlots/tutorial_R/summarySE.R")
dir_file <- "/Users/heshamelshafei/gitHub/own/doc/"
fname <- paste0(dir_file,"eyes.behav.fft.clean.vis.abs.1s.csv")
alldata <- read.table(fname,sep = ',',header=T)
alldata$sub <- as.factor(alldata$sub)
alldata$eyes <- as.factor(alldata$eyes)
alldata$compare <- as.factor(alldata$big)
alldata$behavior <- as.factor(alldata$small)
alldata$sub <- factor(alldata$sub)
alldata$eye <- ordered(alldata$eye, levels = c("open", "closed"))
list_compare <- c("accuracy_e","rt") #
for (ncom in 1:length(list_compare)){
rep_data <- alldata[alldata$compare == list_compare[ncom],]
rep_data$behavior <- factor(rep_data$behavior)
rep_data$var <- rep_data$val
ext_focus <- "absolute visual"
colormap <- c("#8856a7","#43a2ca")
round_val <- 3
model_beh <- lme4::lmer(var ~ (eye+behavior)^2 + (1|sub), data =rep_data)
model_beh_anova <- Anova(model_beh,type=2,test.statistic=c("F"))
print(model_beh_anova)
# res <- emmeans(model_beh, pairwise ~ eye | roi,adjust = "bonferroni")
# print(res)
e_anova = ezANOVA(
data = rep_data
, dv = .(var)
, wid = .(sub)
, within = .(eye,behavior)
)
print(e_anova$ANOVA)
map_name <- c("#8856a7","#43a2ca")
list_eyes <- c("open","closed")
for (neyes in 1:length(list_eyes)){
sub_data <- rep_data[rep_data$eye == list_eyes[neyes],]
sub_data$eyes <- as.factor(sub_data$eyes)
limit_open <- c(0,3e-5) # c(-0.8,2)#
limit_close <- c(0,5e-5) #limit_open#
if (ncom == 1 & neyes == 1){
plot_lim <- limit_open
} else if (ncom == 1 & neyes == 2){
plot_lim <- limit_close
} else if (ncom == 2 & neyes == 1){
plot_lim <- limit_open
} else if (ncom == 2 & neyes == 2){
plot_lim <-limit_close
}
pplot <- ggplot(sub_data, aes(x = behavior, y = var, fill = behavior)) +
geom_line(aes(group=sub),color='gray',size=0.2,alpha=0.6)+
# geom_boxplot(alpha = .5, width = .35, colour = "black")+
geom_boxplot(outlier.shape = NA, alpha = .5, width = .35, colour = "black")+
scale_colour_manual(values= map_name)+
scale_fill_manual(values = map_name)+
ggtitle(list_compare[ncom])+
# scale_y_continuous(name = ext_focus,limits = plot_lim)+#,
# breaks =seq(plot_lim[1], plot_lim[2], by = 0.4))+
scale_x_discrete(name = list_eyes[neyes])+
# ,labels = c("open" , "closed"))+
theme_pubr(base_size = 12,base_family = "Calibri")+
guides(fill=FALSE,color = FALSE, size = FALSE)
if (ncom == 1 & neyes == 1){
p1 = pplot
} else if (ncom == 1 & neyes == 2){
p3 = pplot
} else if (ncom == 2 & neyes == 1){
p2 = pplot
} else if (ncom == 2 & neyes == 2){
p4 = pplot
}
}
}
fullfig <- ggarrange(p1,p2,p3,p4,ncol=4,nrow=1)
fullfig
ggsave(filename="/Users/heshamelshafei/Dropbox/project_me/figures/eyes/eyes_final_visual_abs.svg",
plot=fullfig,width=10,height=3)
|
/rstudio/eyes_4paper_final_visual_abs.R
|
no_license
|
elshafeh/own
|
R
| false | false | 3,868 |
r
|
library(dae);library(nlme);library(effects);
library(psych);library(interplot);library(plyr);
library(devtools);library(ez);library(Rmisc);
library(wesanderson)
library(lme4);library(lsmeans);library(plotly);
library(ggplot2);library(ggpubr);library(dplyr)
library(ggthemes);library(extrafont)
library(car);library(ggplot2)
library(optimx);library(simr)
library(tidyverse)
library(hrbrthemes)
library(viridis);library(afex)
library(multcomp);library(emmeans);
library(gridExtra);library(ez)
library(rstatix)
rm(list=ls())
pd <- position_dodge(0.1)
alphalev <- 0.6
source("/Users/heshamelshafei/github/own/toolbox/RainCloudPlots/tutorial_R/R_rainclouds.R")
source("/Users/heshamelshafei/github/own/toolbox/RainCloudPlots/tutorial_R/summarySE.R")
dir_file <- "/Users/heshamelshafei/gitHub/own/doc/"
fname <- paste0(dir_file,"eyes.behav.fft.clean.vis.abs.1s.csv")
alldata <- read.table(fname,sep = ',',header=T)
alldata$sub <- as.factor(alldata$sub)
alldata$eyes <- as.factor(alldata$eyes)
alldata$compare <- as.factor(alldata$big)
alldata$behavior <- as.factor(alldata$small)
alldata$sub <- factor(alldata$sub)
alldata$eye <- ordered(alldata$eye, levels = c("open", "closed"))
list_compare <- c("accuracy_e","rt") #
for (ncom in 1:length(list_compare)){
rep_data <- alldata[alldata$compare == list_compare[ncom],]
rep_data$behavior <- factor(rep_data$behavior)
rep_data$var <- rep_data$val
ext_focus <- "absolute visual"
colormap <- c("#8856a7","#43a2ca")
round_val <- 3
model_beh <- lme4::lmer(var ~ (eye+behavior)^2 + (1|sub), data =rep_data)
model_beh_anova <- Anova(model_beh,type=2,test.statistic=c("F"))
print(model_beh_anova)
# res <- emmeans(model_beh, pairwise ~ eye | roi,adjust = "bonferroni")
# print(res)
e_anova = ezANOVA(
data = rep_data
, dv = .(var)
, wid = .(sub)
, within = .(eye,behavior)
)
print(e_anova$ANOVA)
map_name <- c("#8856a7","#43a2ca")
list_eyes <- c("open","closed")
for (neyes in 1:length(list_eyes)){
sub_data <- rep_data[rep_data$eye == list_eyes[neyes],]
sub_data$eyes <- as.factor(sub_data$eyes)
limit_open <- c(0,3e-5) # c(-0.8,2)#
limit_close <- c(0,5e-5) #limit_open#
if (ncom == 1 & neyes == 1){
plot_lim <- limit_open
} else if (ncom == 1 & neyes == 2){
plot_lim <- limit_close
} else if (ncom == 2 & neyes == 1){
plot_lim <- limit_open
} else if (ncom == 2 & neyes == 2){
plot_lim <-limit_close
}
pplot <- ggplot(sub_data, aes(x = behavior, y = var, fill = behavior)) +
geom_line(aes(group=sub),color='gray',size=0.2,alpha=0.6)+
# geom_boxplot(alpha = .5, width = .35, colour = "black")+
geom_boxplot(outlier.shape = NA, alpha = .5, width = .35, colour = "black")+
scale_colour_manual(values= map_name)+
scale_fill_manual(values = map_name)+
ggtitle(list_compare[ncom])+
# scale_y_continuous(name = ext_focus,limits = plot_lim)+#,
# breaks =seq(plot_lim[1], plot_lim[2], by = 0.4))+
scale_x_discrete(name = list_eyes[neyes])+
# ,labels = c("open" , "closed"))+
theme_pubr(base_size = 12,base_family = "Calibri")+
guides(fill=FALSE,color = FALSE, size = FALSE)
if (ncom == 1 & neyes == 1){
p1 = pplot
} else if (ncom == 1 & neyes == 2){
p3 = pplot
} else if (ncom == 2 & neyes == 1){
p2 = pplot
} else if (ncom == 2 & neyes == 2){
p4 = pplot
}
}
}
fullfig <- ggarrange(p1,p2,p3,p4,ncol=4,nrow=1)
fullfig
ggsave(filename="/Users/heshamelshafei/Dropbox/project_me/figures/eyes/eyes_final_visual_abs.svg",
plot=fullfig,width=10,height=3)
|
plot1 <- function(){
library(graphics)
library(dplyr)
# Read the data file
All_data <- read.csv("./household_power_consumption.txt", sep=";", stringsAsFactors=FALSE )
# Extract the data from the file where date== "1/2/2007" & date=="2/2/2007"
test1 <- subset(All_data, Date == "1/2/2007")
test2 <- subset(All_data, Date == "2/2/2007")
test_data <- rbind(test1,test2)
# Plot 1
# Prepare the file
png("plot1.png",480,480)
# Prepare the data
test_data$Global_active_power <- as.numeric(test_data$Global_active_power)
#Draw the histogram
hist(test_data$Global_active_power,col="red", xlab= "Global Active Power (killowatts)", main="Global Active Power")
#Close the device
dev.off()
}
|
/plot1.R
|
no_license
|
Sausan/ExData_Plotting1
|
R
| false | false | 742 |
r
|
plot1 <- function(){
library(graphics)
library(dplyr)
# Read the data file
All_data <- read.csv("./household_power_consumption.txt", sep=";", stringsAsFactors=FALSE )
# Extract the data from the file where date== "1/2/2007" & date=="2/2/2007"
test1 <- subset(All_data, Date == "1/2/2007")
test2 <- subset(All_data, Date == "2/2/2007")
test_data <- rbind(test1,test2)
# Plot 1
# Prepare the file
png("plot1.png",480,480)
# Prepare the data
test_data$Global_active_power <- as.numeric(test_data$Global_active_power)
#Draw the histogram
hist(test_data$Global_active_power,col="red", xlab= "Global Active Power (killowatts)", main="Global Active Power")
#Close the device
dev.off()
}
|
## Author: The Leopards (Samantha Krawczyk, Georgios Anastasiou)
## 28 January 2016
## Validating the use of NDSI as a proxy for glacier area by comparing it with the glacier extent obtained from the GLIMS dataset
library(rgeos)
library(rgdal)
library(maptools)
## preparing the datasets and extracting the feature corresponding to the Rhone Glacier from the GLIMS dataset
LT2009 <- readOGR("output/NDSI_shp_LT51940282009250.shp", "NDSI_shp_LT51940282009250")
untar("data/glims_download_18271.tar.gz", exdir = "data/")
glims <- readOGR(dsn="data/glims_download_18271/glims_polygons.shp", layer="glims_polygons")
rhone <- subset(glims, glims$anlys_id==166719)
writeOGR(rhone, "output", layer="glimsRhone", driver="ESRI Shapefile", overwrite_layer=T)
rhoneTr <- spTransform(rhone, CRS=proj4string(LT2009))
## plotting the two shapefiles together for comparison
plot(rhoneTr, col="orange", main ="Validating NDSI as proxy for glacier extent")
plot(LT2009, add=T, col="light blue")
legend("bottomright", legend=c("NDSI for Sep 2009", "Glacier outline from GLIMS dataset"), fill=c("orange", "light blue"), bg="white", cex=0.75)
box()
|
/R/NDSI_validation.R
|
no_license
|
TheLeopards/Final_Project
|
R
| false | false | 1,132 |
r
|
## Author: The Leopards (Samantha Krawczyk, Georgios Anastasiou)
## 28 January 2016
## Validating the use of NDSI as a proxy for glacier area by comparing it with the glacier extent obtained from the GLIMS dataset
library(rgeos)
library(rgdal)
library(maptools)
## preparing the datasets and extracting the feature corresponding to the Rhone Glacier from the GLIMS dataset
LT2009 <- readOGR("output/NDSI_shp_LT51940282009250.shp", "NDSI_shp_LT51940282009250")
untar("data/glims_download_18271.tar.gz", exdir = "data/")
glims <- readOGR(dsn="data/glims_download_18271/glims_polygons.shp", layer="glims_polygons")
rhone <- subset(glims, glims$anlys_id==166719)
writeOGR(rhone, "output", layer="glimsRhone", driver="ESRI Shapefile", overwrite_layer=T)
rhoneTr <- spTransform(rhone, CRS=proj4string(LT2009))
## plotting the two shapefiles together for comparison
plot(rhoneTr, col="orange", main ="Validating NDSI as proxy for glacier extent")
plot(LT2009, add=T, col="light blue")
legend("bottomright", legend=c("NDSI for Sep 2009", "Glacier outline from GLIMS dataset"), fill=c("orange", "light blue"), bg="white", cex=0.75)
box()
|
rm(list = ls())
library(tidyverse)
library(sedgwickenv)
library(sedgwickspecies)
library(sedgwickcover)
species_cover <-
site_cover %>%
left_join(sedgwick_plants, by = c('species' = 'calflora_binomial')) %>%
dplyr::select(USDA_symbol, site, cover) %>%
filter( !is.na(USDA_symbol)) %>%
distinct() %>%
left_join(sedgwickenv, by = 'site') %>%
dplyr::select( site, USDA_symbol, cover, site_name, type, microsite) %>%
spread( USDA_symbol, cover, fill = 0 ) %>%
gather( USDA_symbol, cover, -c(site:microsite))
global_cover <-
species_cover %>%
group_by( USDA_symbol) %>%
summarise( abu = mean(cover))
type_cover <-
species_cover %>%
group_by( USDA_symbol, type) %>%
summarise( abu = mean(cover))
microsite_cover <-
species_cover %>%
group_by( USDA_symbol, type, microsite) %>%
summarise( abu = mean(cover))
save(global_cover, type_cover, microsite_cover, file = 'output/avg_cover.rda')
|
/code/aggregate_cover.R
|
no_license
|
akleinhesselink/trait_abundance
|
R
| false | false | 942 |
r
|
rm(list = ls())
library(tidyverse)
library(sedgwickenv)
library(sedgwickspecies)
library(sedgwickcover)
species_cover <-
site_cover %>%
left_join(sedgwick_plants, by = c('species' = 'calflora_binomial')) %>%
dplyr::select(USDA_symbol, site, cover) %>%
filter( !is.na(USDA_symbol)) %>%
distinct() %>%
left_join(sedgwickenv, by = 'site') %>%
dplyr::select( site, USDA_symbol, cover, site_name, type, microsite) %>%
spread( USDA_symbol, cover, fill = 0 ) %>%
gather( USDA_symbol, cover, -c(site:microsite))
global_cover <-
species_cover %>%
group_by( USDA_symbol) %>%
summarise( abu = mean(cover))
type_cover <-
species_cover %>%
group_by( USDA_symbol, type) %>%
summarise( abu = mean(cover))
microsite_cover <-
species_cover %>%
group_by( USDA_symbol, type, microsite) %>%
summarise( abu = mean(cover))
save(global_cover, type_cover, microsite_cover, file = 'output/avg_cover.rda')
|
#' Ratio of significantly different zones.
#'
#' @usage sigratio(formula, data, ndisc, methoddisc, methodoverlay = "fuzzyAND")
#'
#' @param formula A formula of spatial variables
#' @param data A data frame of dataset
#' @param ndisc A numeric vector of break numbers for respective
#' explanatory variables
#' @param methoddisc A character vector of discretization methods
#' @param methodoverlay A character of spatial overlay methods, including
#' "fuzzyAND" and "intersection"
#'
#' @return A list of ratios of significantly different zones.
#'
#' @importFrom GD gdrisk
#'
#' @examples
#' sr1 <- sigratio(formula = y ~ xa + xb + xc, data = sim,
#' ndisc = c(4,4,5), methoddisc = "quantile",
#' methodoverlay = "fuzzyAND")
#' sr2 <- sigratio(formula = y ~ xa + xb + xc, data = sim,
#' ndisc = c(4,4,5), methoddisc = "quantile",
#' methodoverlay = "intersection")
#' sr1$n.zone; sr2$n.zone
#' sr1$ratio.sigdif; sr2$ratio.sigdif
#'
#' @export
#'
sigratio <- function(formula, data, ndisc, methoddisc, methodoverlay = "fuzzyAND"){
formula <- as.formula(formula)
formula.vars <- all.vars(formula)
response <- subset(data, select = formula.vars[1])
if (formula.vars[2] == "."){
explanatory <- subset(data, select = -match(formula.vars[1], colnames(data)))
} else {
explanatory <- subset(data, select = formula.vars[-1])
}
ncolx <- ncol(explanatory)
xnames <- colnames(explanatory)
# discretize
xh <- explanatory
if (length(ndisc) == 1){
ndisc <- rep(ndisc, ncolx)
}
if (length(methoddisc) == 1){
methoddisc <- rep(methoddisc, ncolx)
}
for (i in 1:ncolx){
xh[, i] <- discretize(explanatory[, xnames[i]], ndisc[i], methoddisc[i])
}
dataxh <- data[, formula.vars]
dataxh[,-1] <- xh
if (methodoverlay == "gdinteraction"){
dataxh$xa_xb <- apply(xh, 1, paste, collapse = "_") #debug
}
if (methodoverlay == "intersection"){
dataxh$xa_xb <- apply(xh, 1, paste, collapse = "_")
}
if (methodoverlay == "fuzzyAND"){
newlayers <- fuzzyoverlay(response[,1], xh, method = "fuzzyAND") #debug
dataxh$xa_xb <- newlayers$fuzzylayer
}
xh.overlayzones <- table(dataxh$xa_xb)
n.zone <- length(xh.overlayzones)
k <- which(xh.overlayzones > 1)
n.zone.xfdz <- length(xh.overlayzones[k])
# remove n.obs == 1
k <- which(xh.overlayzones != 1)
dataxh2 <- dataxh[which(dataxh$xa_xb %in% names(xh.overlayzones)[k]),]
f2 <- as.formula(paste(formula.vars[1], "xa_xb", sep = "~"))
gdrisk.zones <- gdrisk(f2, data = dataxh2) ## sig < 0.05
sigratio.zone <- length(which(gdrisk.zones$xa_xb$sig <= 0.05))/nrow(gdrisk.zones$xa_xb)
result <- list("n.zone" = n.zone, "n.zone.xFDZ" = n.zone.xfdz,
"ratio.sigdif" = sigratio.zone, "gdrisk.zone" = gdrisk.zones,
"zonal.n.obs" = xh.overlayzones)
class(result) <- "list"
return(result)
}
|
/R/sigratio.R
|
no_license
|
cran/IDSA
|
R
| false | false | 2,919 |
r
|
#' Ratio of significantly different zones.
#'
#' @usage sigratio(formula, data, ndisc, methoddisc, methodoverlay = "fuzzyAND")
#'
#' @param formula A formula of spatial variables
#' @param data A data frame of dataset
#' @param ndisc A numeric vector of break numbers for respective
#' explanatory variables
#' @param methoddisc A character vector of discretization methods
#' @param methodoverlay A character of spatial overlay methods, including
#' "fuzzyAND" and "intersection"
#'
#' @return A list of ratios of significantly different zones.
#'
#' @importFrom GD gdrisk
#'
#' @examples
#' sr1 <- sigratio(formula = y ~ xa + xb + xc, data = sim,
#' ndisc = c(4,4,5), methoddisc = "quantile",
#' methodoverlay = "fuzzyAND")
#' sr2 <- sigratio(formula = y ~ xa + xb + xc, data = sim,
#' ndisc = c(4,4,5), methoddisc = "quantile",
#' methodoverlay = "intersection")
#' sr1$n.zone; sr2$n.zone
#' sr1$ratio.sigdif; sr2$ratio.sigdif
#'
#' @export
#'
sigratio <- function(formula, data, ndisc, methoddisc, methodoverlay = "fuzzyAND"){
formula <- as.formula(formula)
formula.vars <- all.vars(formula)
response <- subset(data, select = formula.vars[1])
if (formula.vars[2] == "."){
explanatory <- subset(data, select = -match(formula.vars[1], colnames(data)))
} else {
explanatory <- subset(data, select = formula.vars[-1])
}
ncolx <- ncol(explanatory)
xnames <- colnames(explanatory)
# discretize
xh <- explanatory
if (length(ndisc) == 1){
ndisc <- rep(ndisc, ncolx)
}
if (length(methoddisc) == 1){
methoddisc <- rep(methoddisc, ncolx)
}
for (i in 1:ncolx){
xh[, i] <- discretize(explanatory[, xnames[i]], ndisc[i], methoddisc[i])
}
dataxh <- data[, formula.vars]
dataxh[,-1] <- xh
if (methodoverlay == "gdinteraction"){
dataxh$xa_xb <- apply(xh, 1, paste, collapse = "_") #debug
}
if (methodoverlay == "intersection"){
dataxh$xa_xb <- apply(xh, 1, paste, collapse = "_")
}
if (methodoverlay == "fuzzyAND"){
newlayers <- fuzzyoverlay(response[,1], xh, method = "fuzzyAND") #debug
dataxh$xa_xb <- newlayers$fuzzylayer
}
xh.overlayzones <- table(dataxh$xa_xb)
n.zone <- length(xh.overlayzones)
k <- which(xh.overlayzones > 1)
n.zone.xfdz <- length(xh.overlayzones[k])
# remove n.obs == 1
k <- which(xh.overlayzones != 1)
dataxh2 <- dataxh[which(dataxh$xa_xb %in% names(xh.overlayzones)[k]),]
f2 <- as.formula(paste(formula.vars[1], "xa_xb", sep = "~"))
gdrisk.zones <- gdrisk(f2, data = dataxh2) ## sig < 0.05
sigratio.zone <- length(which(gdrisk.zones$xa_xb$sig <= 0.05))/nrow(gdrisk.zones$xa_xb)
result <- list("n.zone" = n.zone, "n.zone.xFDZ" = n.zone.xfdz,
"ratio.sigdif" = sigratio.zone, "gdrisk.zone" = gdrisk.zones,
"zonal.n.obs" = xh.overlayzones)
class(result) <- "list"
return(result)
}
|
library(psychomix)
### Name: btmix
### Title: Finite Mixtures of Bradley-Terry Models
### Aliases: btmix FLXMCbtreg
### Keywords: paired comparisons Bradley-Terry model mixture model
### ** Examples
## No test:
## Data
data("GermanParties2009", package = "psychotools")
## omit single observation with education = 1
gp <- subset(GermanParties2009, education != "1")
gp$education <- factor(gp$education)
## Bradley-Terry mixture models
set.seed(1)
## fit models for k = 1, ..., 4 with concomitant variables
cm <- btmix(preference ~ gender + education + age + crisis,
data = gp, k = 1:4, nrep = 3)
## inspect results
plot(cm)
## select model
cm4 <- getModel(cm, which = "4")
## inspect mixture and effects
library("lattice")
xyplot(cm4)
effectsplot(cm4)
effectsplot(cm4, selection = "education")
## vis effects package directly
if(require("effects")) {
eff4 <- allEffects(cm4)
plot(eff4)
}
## End(No test)
|
/data/genthat_extracted_code/psychomix/examples/btmix.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 922 |
r
|
library(psychomix)
### Name: btmix
### Title: Finite Mixtures of Bradley-Terry Models
### Aliases: btmix FLXMCbtreg
### Keywords: paired comparisons Bradley-Terry model mixture model
### ** Examples
## No test:
## Data
data("GermanParties2009", package = "psychotools")
## omit single observation with education = 1
gp <- subset(GermanParties2009, education != "1")
gp$education <- factor(gp$education)
## Bradley-Terry mixture models
set.seed(1)
## fit models for k = 1, ..., 4 with concomitant variables
cm <- btmix(preference ~ gender + education + age + crisis,
data = gp, k = 1:4, nrep = 3)
## inspect results
plot(cm)
## select model
cm4 <- getModel(cm, which = "4")
## inspect mixture and effects
library("lattice")
xyplot(cm4)
effectsplot(cm4)
effectsplot(cm4, selection = "education")
## vis effects package directly
if(require("effects")) {
eff4 <- allEffects(cm4)
plot(eff4)
}
## End(No test)
|
testlist <- list(phi = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = c(1.36656528938164e-311, -1.65791256519293e+82, 1.29418168595419e-228, -1.85353502606261e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21, -7.84828807007467e-146, -7.48864562038427e+21, -1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -5.29147138128251e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 1.07149136185465e+184, 4.41748962512848e+47, -4.05905915828878e-142))
result <- do.call(dcurver:::ddc,testlist)
str(result)
|
/dcurver/inst/testfiles/ddc/AFL_ddc/ddc_valgrind_files/1609867847-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 831 |
r
|
testlist <- list(phi = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = c(1.36656528938164e-311, -1.65791256519293e+82, 1.29418168595419e-228, -1.85353502606261e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21, -7.84828807007467e-146, -7.48864562038427e+21, -1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -5.29147138128251e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 1.07149136185465e+184, 4.41748962512848e+47, -4.05905915828878e-142))
result <- do.call(dcurver:::ddc,testlist)
str(result)
|
WT=rgb(130/255,130/255,130/255)
APH=rgb(240/255,59/255,32/255)
CDC6=rgb(8/255,69/255,148/255)
cols3=c(WT, APH, CDC6)
names(cols3)=c('WT', 'APH', 'CDC6')
#CONST=rgb(35/255,139/255,69/255)
COMM='darkgreen'
APHR=rgb(254/255,178/255,76/255)
CDC6R=rgb(107/255,174/255,214/255)
APHCDC6R='green'
colsresp=c(COMM, APHR, CDC6R, APHCDC6R)
names(colsresp)=c('COMM', 'APH-R', 'CDC6-R', 'APH+CDC6-R')
colWTef='black'
colstot=c(cols3,colsresp)
sel=c('WT', 'APH', 'CDC6', 'APH-R', 'CDC6-R','APH+CDC6-R' , 'COMM', 'WT-nonCOMM','ALL-ORI')
cols=c(colstot,c( 'grey', 'black'))
names(cols)[8:9]=c( 'WT-nonCOMM', 'ALL-ORI')
cols
|
/coloursdef.R
|
no_license
|
VeraPancaldiLab/RepOri3D
|
R
| false | false | 626 |
r
|
WT=rgb(130/255,130/255,130/255)
APH=rgb(240/255,59/255,32/255)
CDC6=rgb(8/255,69/255,148/255)
cols3=c(WT, APH, CDC6)
names(cols3)=c('WT', 'APH', 'CDC6')
#CONST=rgb(35/255,139/255,69/255)
COMM='darkgreen'
APHR=rgb(254/255,178/255,76/255)
CDC6R=rgb(107/255,174/255,214/255)
APHCDC6R='green'
colsresp=c(COMM, APHR, CDC6R, APHCDC6R)
names(colsresp)=c('COMM', 'APH-R', 'CDC6-R', 'APH+CDC6-R')
colWTef='black'
colstot=c(cols3,colsresp)
sel=c('WT', 'APH', 'CDC6', 'APH-R', 'CDC6-R','APH+CDC6-R' , 'COMM', 'WT-nonCOMM','ALL-ORI')
cols=c(colstot,c( 'grey', 'black'))
names(cols)[8:9]=c( 'WT-nonCOMM', 'ALL-ORI')
cols
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dfareporting_functions.R
\name{creativeFields.list}
\alias{creativeFields.list}
\title{Retrieves a list of creative fields, possibly filtered. This method supports paging.}
\usage{
creativeFields.list(profileId, advertiserIds = NULL, ids = NULL,
maxResults = NULL, pageToken = NULL, searchString = NULL,
sortField = NULL, sortOrder = NULL)
}
\arguments{
\item{profileId}{User profile ID associated with this request}
\item{advertiserIds}{Select only creative fields that belong to these advertisers}
\item{ids}{Select only creative fields with these IDs}
\item{maxResults}{Maximum number of results to return}
\item{pageToken}{Value of the nextPageToken from the previous result page}
\item{searchString}{Allows searching for creative fields by name or ID}
\item{sortField}{Field by which to sort the list}
\item{sortOrder}{Order of sorted results, default is ASCENDING}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/dfatrafficking
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/dfatrafficking)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/doubleclick-advertisers/}{Google Documentation}
}
|
/googledfareportingv26.auto/man/creativeFields.list.Rd
|
permissive
|
GVersteeg/autoGoogleAPI
|
R
| false | true | 1,481 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dfareporting_functions.R
\name{creativeFields.list}
\alias{creativeFields.list}
\title{Retrieves a list of creative fields, possibly filtered. This method supports paging.}
\usage{
creativeFields.list(profileId, advertiserIds = NULL, ids = NULL,
maxResults = NULL, pageToken = NULL, searchString = NULL,
sortField = NULL, sortOrder = NULL)
}
\arguments{
\item{profileId}{User profile ID associated with this request}
\item{advertiserIds}{Select only creative fields that belong to these advertisers}
\item{ids}{Select only creative fields with these IDs}
\item{maxResults}{Maximum number of results to return}
\item{pageToken}{Value of the nextPageToken from the previous result page}
\item{searchString}{Allows searching for creative fields by name or ID}
\item{sortField}{Field by which to sort the list}
\item{sortOrder}{Order of sorted results, default is ASCENDING}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/dfatrafficking
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/dfatrafficking)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/doubleclick-advertisers/}{Google Documentation}
}
|
library(shiny)
library(ggvis)
library(reshape2)
# note: i deleted the top four rows and trailing empty columns in csv so i could read it in properly
life <- read.csv('API_SP.DYN.LE00.IN_DS2_en_csv_v2.csv', sep="\t")
fert <- read.csv('API_SP.DYN.TFRT.IN_DS2_en_csv_v2.csv', sep="\t")
cont <- read.csv('Metadata_Country_API_SP.DYN.LE00.IN_DS2_en_csv_v2.csv')
pop <- read.csv('API_SP.POP.TOTL_DS2_en_csv_v2.csv')
# unclassified row
life <- life[-109,-c(2,3,4)]
fert <- fert[-109,c(1,5:59)]
pop <- pop[-109,c(1,5:59)]
colnames(life) <- gsub("^X", "", colnames(life))
colnames(fert) <- gsub("^X", "", colnames(fert))
colnames(pop) <- gsub("^X", "", colnames(pop))
life <- melt(life, id.vars = c("Country.Name"))
names(life) <- c('country', 'year', 'life_expectancy')
fert <- melt(fert, id.vars = c("Country.Name"))
names(fert) <- c('country', 'year', 'fertility_rate')
pop <- melt(pop, id.vars = c("Country.Name"))
names(pop) <- c('country', 'year', 'population')
life$region <- rep(cont$Region, 55)
life$fertility_rate <- fert$fertility_rate
life$population <- pop$population
life <- life[-which(life$region == ""), ]
life <- life[complete.cases(life),]
life$year <- as.numeric(as.character(life$year))
life <- life[order(life$population, decreasing = T),]
ui <- fluidPage(
headerPanel(title = 'Life Expectancy vs. Fertility Rate Interactive Plot'),
mainPanel(
uiOutput("ggvis_ui"),
ggvisOutput("ggvis"),
selectInput("continent", label = NULL,
choices = list("All Regions" = 1, "East Asia & Pacific" = "East Asia & Pacific",
"Europe & Central Asia" = "Europe & Central Asia",
"Latin America & Caribbean" = "Latin America & Caribbean",
"Middle East & North Africa" = "Middle East & North Africa",
"North America" = "North America", "South Asia" = "South Asia",
"Sub-Saharan Africa" = "Sub-Saharan Africa"),
selected = 1),
sliderInput("size", "Population Scale", 1, 10, 5, ticks = FALSE),
sliderInput("year", "Year", 1960, 2014, 1983, sep = "", ticks = FALSE, animate = animationOptions(interval = 100))
)
)
server <- function(input, output) {
life$id <- 1:nrow(life)
all_values <- function(x) {
if(is.null(x)) return(NULL)
row <- life[life$id == x$id, ]
paste(paste("<b>", row$country, "</b>"),paste("Life Expectancy: ",row$life_expectancy),
paste("Fertility Rate: ",row$fertility_rate),paste("Population: ",row$population),sep="<br />")
}
vis <- reactive({
life$population <- life$population*input$size
life <- life[life$year == input$year,]
if (input$continent != 1) {
keep <- life[life$region == input$continent,]
exclude <- life[-(life$region == input$continent),]
ggvis() %>%
layer_points(data=keep, ~life_expectancy, ~fertility_rate, size := ~population/1000000, key := ~id,
fill = ~factor(region), fillOpacity := 0.7, fillOpacity.hover := 1, stroke := "black") %>%
layer_points(data=exclude, ~life_expectancy, ~fertility_rate, size := ~population/1000000, key := ~id,
fill = ~factor(region), fillOpacity := 0.1, fillOpacity.hover := 1, stroke := "black") %>%
add_axis("x", title = "Life Expectancy") %>%
add_axis("y", title = "Fertility Rate") %>%
add_tooltip(all_values, "hover") %>%
add_legend("fill", title = "Region") %>%
scale_numeric("x", domain = c(10,90), clamp=TRUE) %>%
scale_numeric("y", domain = c(0.5,9), clamp=TRUE) %>%
scale_ordinal("fill", domain=c("East Asia & Pacific", "Europe & Central Asia", "Latin America & Caribbean", "Middle East & North Africa",
"North America", "South Asia", "Sub-Saharan Africa"), range=c("#e41a1c", "#377eb8", "#4daf4a", "#984ea3",
"#ff7f00", "#ffff33", "#a65628")) %>%
set_options(width = 800, height = 500)
}else {
keep <- life
ggvis() %>%
layer_points(data=keep, ~life_expectancy, ~fertility_rate, size := ~population/1000000, key := ~id,
fill = ~factor(region), fillOpacity := 0.7, fillOpacity.hover := 1, stroke := "black") %>%
add_axis("x", title = "Life Expectancy") %>%
add_axis("y", title = "Fertility Rate") %>%
add_tooltip(all_values, "hover") %>%
add_legend("fill", title = "Region") %>%
scale_numeric("x", domain = c(10,90), clamp=TRUE) %>%
scale_numeric("y", domain = c(0.5,9), clamp=TRUE) %>%
scale_ordinal("fill", range=c("#e41a1c", "#377eb8", "#4daf4a", "#984ea3", "#ff7f00", "#ffff33", "#a65628")) %>%
set_options(width = 800, height = 500)
}
})
vis %>% bind_shiny("ggvis", "ggvis_ui")
}
shinyApp(ui = ui, server = server)
|
/hw2/app.R
|
no_license
|
usfviz/mikaelahs-hw2
|
R
| false | false | 4,961 |
r
|
library(shiny)
library(ggvis)
library(reshape2)
# note: i deleted the top four rows and trailing empty columns in csv so i could read it in properly
life <- read.csv('API_SP.DYN.LE00.IN_DS2_en_csv_v2.csv', sep="\t")
fert <- read.csv('API_SP.DYN.TFRT.IN_DS2_en_csv_v2.csv', sep="\t")
cont <- read.csv('Metadata_Country_API_SP.DYN.LE00.IN_DS2_en_csv_v2.csv')
pop <- read.csv('API_SP.POP.TOTL_DS2_en_csv_v2.csv')
# unclassified row
life <- life[-109,-c(2,3,4)]
fert <- fert[-109,c(1,5:59)]
pop <- pop[-109,c(1,5:59)]
colnames(life) <- gsub("^X", "", colnames(life))
colnames(fert) <- gsub("^X", "", colnames(fert))
colnames(pop) <- gsub("^X", "", colnames(pop))
life <- melt(life, id.vars = c("Country.Name"))
names(life) <- c('country', 'year', 'life_expectancy')
fert <- melt(fert, id.vars = c("Country.Name"))
names(fert) <- c('country', 'year', 'fertility_rate')
pop <- melt(pop, id.vars = c("Country.Name"))
names(pop) <- c('country', 'year', 'population')
life$region <- rep(cont$Region, 55)
life$fertility_rate <- fert$fertility_rate
life$population <- pop$population
life <- life[-which(life$region == ""), ]
life <- life[complete.cases(life),]
life$year <- as.numeric(as.character(life$year))
life <- life[order(life$population, decreasing = T),]
ui <- fluidPage(
headerPanel(title = 'Life Expectancy vs. Fertility Rate Interactive Plot'),
mainPanel(
uiOutput("ggvis_ui"),
ggvisOutput("ggvis"),
selectInput("continent", label = NULL,
choices = list("All Regions" = 1, "East Asia & Pacific" = "East Asia & Pacific",
"Europe & Central Asia" = "Europe & Central Asia",
"Latin America & Caribbean" = "Latin America & Caribbean",
"Middle East & North Africa" = "Middle East & North Africa",
"North America" = "North America", "South Asia" = "South Asia",
"Sub-Saharan Africa" = "Sub-Saharan Africa"),
selected = 1),
sliderInput("size", "Population Scale", 1, 10, 5, ticks = FALSE),
sliderInput("year", "Year", 1960, 2014, 1983, sep = "", ticks = FALSE, animate = animationOptions(interval = 100))
)
)
server <- function(input, output) {
life$id <- 1:nrow(life)
all_values <- function(x) {
if(is.null(x)) return(NULL)
row <- life[life$id == x$id, ]
paste(paste("<b>", row$country, "</b>"),paste("Life Expectancy: ",row$life_expectancy),
paste("Fertility Rate: ",row$fertility_rate),paste("Population: ",row$population),sep="<br />")
}
vis <- reactive({
life$population <- life$population*input$size
life <- life[life$year == input$year,]
if (input$continent != 1) {
keep <- life[life$region == input$continent,]
exclude <- life[-(life$region == input$continent),]
ggvis() %>%
layer_points(data=keep, ~life_expectancy, ~fertility_rate, size := ~population/1000000, key := ~id,
fill = ~factor(region), fillOpacity := 0.7, fillOpacity.hover := 1, stroke := "black") %>%
layer_points(data=exclude, ~life_expectancy, ~fertility_rate, size := ~population/1000000, key := ~id,
fill = ~factor(region), fillOpacity := 0.1, fillOpacity.hover := 1, stroke := "black") %>%
add_axis("x", title = "Life Expectancy") %>%
add_axis("y", title = "Fertility Rate") %>%
add_tooltip(all_values, "hover") %>%
add_legend("fill", title = "Region") %>%
scale_numeric("x", domain = c(10,90), clamp=TRUE) %>%
scale_numeric("y", domain = c(0.5,9), clamp=TRUE) %>%
scale_ordinal("fill", domain=c("East Asia & Pacific", "Europe & Central Asia", "Latin America & Caribbean", "Middle East & North Africa",
"North America", "South Asia", "Sub-Saharan Africa"), range=c("#e41a1c", "#377eb8", "#4daf4a", "#984ea3",
"#ff7f00", "#ffff33", "#a65628")) %>%
set_options(width = 800, height = 500)
}else {
keep <- life
ggvis() %>%
layer_points(data=keep, ~life_expectancy, ~fertility_rate, size := ~population/1000000, key := ~id,
fill = ~factor(region), fillOpacity := 0.7, fillOpacity.hover := 1, stroke := "black") %>%
add_axis("x", title = "Life Expectancy") %>%
add_axis("y", title = "Fertility Rate") %>%
add_tooltip(all_values, "hover") %>%
add_legend("fill", title = "Region") %>%
scale_numeric("x", domain = c(10,90), clamp=TRUE) %>%
scale_numeric("y", domain = c(0.5,9), clamp=TRUE) %>%
scale_ordinal("fill", range=c("#e41a1c", "#377eb8", "#4daf4a", "#984ea3", "#ff7f00", "#ffff33", "#a65628")) %>%
set_options(width = 800, height = 500)
}
})
vis %>% bind_shiny("ggvis", "ggvis_ui")
}
shinyApp(ui = ui, server = server)
|
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
args <- commandArgs(TRUE)
options(digits=22)
library("Matrix")
A = as.matrix(readMM(paste(args[1], "A.mtx", sep="")))
if( nrow(A)>1 ){
B = apply(A, 2, cumprod);
} else {
B = A;
}
writeMM(as(B, "CsparseMatrix"), paste(args[2], "B", sep=""));
|
/src/test/scripts/functions/unary/matrix/Cumprod.R
|
permissive
|
stc-tester/incubator-systemml
|
R
| false | false | 1,190 |
r
|
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
args <- commandArgs(TRUE)
options(digits=22)
library("Matrix")
A = as.matrix(readMM(paste(args[1], "A.mtx", sep="")))
if( nrow(A)>1 ){
B = apply(A, 2, cumprod);
} else {
B = A;
}
writeMM(as(B, "CsparseMatrix"), paste(args[2], "B", sep=""));
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/renderers.R
\name{renderers}
\alias{renderers}
\alias{gifski_renderer}
\alias{file_renderer}
\alias{ffmpeg_renderer}
\alias{magick_renderer}
\alias{sprite_renderer}
\title{Renderers provided by gganimate}
\usage{
gifski_renderer(file = tempfile(fileext = ".gif"), loop = TRUE,
width = NULL, height = NULL)
file_renderer(dir = "~", prefix = "gganim_plot", overwrite = FALSE)
ffmpeg_renderer(format = "mp4", ffmpeg = NULL, options = list(pix_fmt
= "yuv420p"))
magick_renderer(loop = TRUE)
sprite_renderer()
}
\arguments{
\item{file}{The animation file}
\item{loop}{Logical. Should the produced gif loop}
\item{width, height}{Dimensions of the animation in pixels. If \code{NULL} will
take the dimensions from the frame, otherwise it will rescale it.}
\item{dir}{The directory to copy the frames to}
\item{prefix}{The filename prefix to use for the image files}
\item{overwrite}{Logical. If TRUE, existing files will be overwritten.}
\item{format}{The video format to encode the animation into}
\item{ffmpeg}{The location of the \code{ffmpeg} executable. If \code{NULL} it will be
assumed to be on the search path}
\item{options}{Either a character vector of command line options for ffmpeg
or a named list of option-value pairs that will be converted to command line
options automatically}
}
\value{
The provided renderers are factory functions that returns a new function
that take \code{frames} and \code{fps} as arguments, the former being a character
vector with file paths to the images holding the separate frames, in the
order they should appear, and the latter being the framerate to use for the
animation in frames-per-second.
The return type of the different returned renderers are:
\itemize{
\item \strong{\code{gifski_renderer}}: Returns a \link{gif_image} object
\item \strong{\code{file_renderer}}: Returns a vector of file paths
\item \strong{\code{ffmpeg_renderer}}: Returns a \link{video_file} object
\item \strong{\code{magick_renderer}}: Returns a \code{magick-image} object
}
}
\description{
The purpose of the renderer function is to take a list of image files and
assemble them into an animation. \code{gganimate} provide a range of renderers
but it is also possible to provide your own, if the supplied ones are lacking
in any way. A renderer is given as argument to \code{\link[=animate]{animate()}}/print() and
recieves the paths to the individual frames once they have been created.
}
\details{
It is possible to provide your own renderer function providing that it
matches the required signature (\code{frames} and \code{fps} argument). The return
value of your provided function will be the return value ultimately given by
\code{\link[=animate]{animate()}}
}
|
/man/renderers.Rd
|
no_license
|
nemochina2008/gganimate
|
R
| false | true | 2,781 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/renderers.R
\name{renderers}
\alias{renderers}
\alias{gifski_renderer}
\alias{file_renderer}
\alias{ffmpeg_renderer}
\alias{magick_renderer}
\alias{sprite_renderer}
\title{Renderers provided by gganimate}
\usage{
gifski_renderer(file = tempfile(fileext = ".gif"), loop = TRUE,
width = NULL, height = NULL)
file_renderer(dir = "~", prefix = "gganim_plot", overwrite = FALSE)
ffmpeg_renderer(format = "mp4", ffmpeg = NULL, options = list(pix_fmt
= "yuv420p"))
magick_renderer(loop = TRUE)
sprite_renderer()
}
\arguments{
\item{file}{The animation file}
\item{loop}{Logical. Should the produced gif loop}
\item{width, height}{Dimensions of the animation in pixels. If \code{NULL} will
take the dimensions from the frame, otherwise it will rescale it.}
\item{dir}{The directory to copy the frames to}
\item{prefix}{The filename prefix to use for the image files}
\item{overwrite}{Logical. If TRUE, existing files will be overwritten.}
\item{format}{The video format to encode the animation into}
\item{ffmpeg}{The location of the \code{ffmpeg} executable. If \code{NULL} it will be
assumed to be on the search path}
\item{options}{Either a character vector of command line options for ffmpeg
or a named list of option-value pairs that will be converted to command line
options automatically}
}
\value{
The provided renderers are factory functions that returns a new function
that take \code{frames} and \code{fps} as arguments, the former being a character
vector with file paths to the images holding the separate frames, in the
order they should appear, and the latter being the framerate to use for the
animation in frames-per-second.
The return type of the different returned renderers are:
\itemize{
\item \strong{\code{gifski_renderer}}: Returns a \link{gif_image} object
\item \strong{\code{file_renderer}}: Returns a vector of file paths
\item \strong{\code{ffmpeg_renderer}}: Returns a \link{video_file} object
\item \strong{\code{magick_renderer}}: Returns a \code{magick-image} object
}
}
\description{
The purpose of the renderer function is to take a list of image files and
assemble them into an animation. \code{gganimate} provide a range of renderers
but it is also possible to provide your own, if the supplied ones are lacking
in any way. A renderer is given as argument to \code{\link[=animate]{animate()}}/print() and
recieves the paths to the individual frames once they have been created.
}
\details{
It is possible to provide your own renderer function providing that it
matches the required signature (\code{frames} and \code{fps} argument). The return
value of your provided function will be the return value ultimately given by
\code{\link[=animate]{animate()}}
}
|
#setwd("/Users/TerryLai/Dropbox/TerryLai/simulation/oubmbm/")
setwd("/Users/terrylai/Dropbox/TerryLai/simulation/oubmbm")
rm(list=ls())
#source("/Users/TerryLai/Dropbox/TerryLai/R_code/abc_V2/oubmbmabc.r")
source("/Users/terrylai/Dropbox/TerryLai/R_code/abc_infor_prior/oubmbmabc.r")
root<-0
#true params
true.alpha.y<-0.15
true.sigma.x<-1
true.tau<- 0.35
true.b0 <- 0
true.b1 <- 0.5
true.b2 <- 0.5
#hyper parameters
alpha.y.rate <- 1/0.15
sigma.x.rate <-1
tau.rate <- 1
b0.min=-1
b0.max=1
b1.min=0
b1.max=1
b2.min=0
b2.max=1
prior.model.params=c(alpha.y.rate, sigma.x.rate, tau.rate)
names(prior.model.params)<-c("alpha.y.rate", "sigma.x.rate", "tau.rate")
prior.reg.params=c(b0.min, b0.max, b1.min, b1.max, b2.min, b2.max)
numbsim<-1;lambda<-2.0;mu<-0.5;frac<-0.6;age<-2
taxa.size.array<-c(10)#,20,50,100)
sims<-50000
model.params.array<-array(0,c(3,sims))
rownames(model.params.array)<-c("alpha.y","sigma.x","tau")
reg.params.array<-array(0,c(3,sims))
row.names(reg.params.array)<-c("b0", "b1", "b2")
y.sum.stat.array<-array(0,c(2,sims))
rownames(y.sum.stat.array)<-c("y.mean","y.sd")
x1.sum.stat.array<-array(0,c(2,sims))
rownames(x1.sum.stat.array)<-c("x1.mean","x1.sd")
x2.sum.stat.array<-array(0,c(2,sims))
rownames(x2.sum.stat.array)<-c("x2.mean","x2.sd")
sum.stat.distance.array<-array(0,c(sims))
for(taxa.size.Index in 1:length(taxa.size.array)){
n<-taxa.size.array[taxa.size.Index]
print(paste("taxa",n,sep=""))
sim.oubmbm.trait<-array(0,c(n,3,sims))
tree<-sim.bd.taxa.age(n=n,numbsim=1,lambda=lambda,mu=mu,frac=frac,age=age,mrca=TRUE)[[1]]
tree<-reorder(tree,"postorder")
# tree$edge
# plot(tree)
# nodelabels()
# tiplabels()
true.trait<-oubmbmmodel(model.params=c(true.alpha.y,true.sigma.x,true.tau),reg.params=c(true.b0,true.b1,true.b2),root=root,tree=tree)
assign(paste("true.trait.taxa",taxa.size.array[taxa.size.Index],sep=""),true.trait)
y.raw.sum.stat<-sum.stat(trait=true.trait$y,tree=tree)
x1.raw.sum.stat<-sum.stat(trait=true.trait$x1,tree=tree)
x2.raw.sum.stat<-sum.stat(trait=true.trait$x2,tree=tree)
raw.sum.stat <- cbind(t(y.raw.sum.stat),t(x1.raw.sum.stat),t(x2.raw.sum.stat))
assign(paste("raw.sum.stat.taxa",taxa.size.array[taxa.size.Index],sep=""),raw.sum.stat)
for(simIndex in 1:sims){
if(simIndex %%1000==0){print(simIndex)}
prior.params <- oubmbmprior(prior.model.params=prior.model.params,prior.reg.params=prior.reg.params)
model.params.array[,simIndex]<-prior.params$model.params#for record only
reg.params.array[,simIndex]<-prior.params$reg.params#for record only
sim.trait <-oubmbmmodel(model.params=prior.params$model.params,reg.params=prior.params$reg.params,root=root,tree=tree)
sim.oubmbm.trait[,1,simIndex]<-sim.trait$y
sim.oubmbm.trait[,2,simIndex]<-sim.trait$x1
sim.oubmbm.trait[,3,simIndex]<-sim.trait$x2
y.sum.stat.array[,simIndex]<- sum.stat(trait=sim.trait$y,tree=tree)
x1.sum.stat.array[,simIndex]<- sum.stat(trait=sim.trait$x1,tree=tree)
x2.sum.stat.array[,simIndex]<- sum.stat(trait=sim.trait$x2,tree=tree)
}#end of loop
### Use abc package
sim.sum.stat <- cbind(t(y.sum.stat.array),t(x1.sum.stat.array),t(x2.sum.stat.array))
oubmbm.par.sim <- cbind(t(model.params.array),t(reg.params.array))
assign(paste("sim.oubmbm.trait.taxa",taxa.size.array[taxa.size.Index],sep=""),sim.oubmbm.trait)
assign(paste("sim.sum.stat.taxa",taxa.size.array[taxa.size.Index],sep=""),sim.sum.stat)
assign(paste("oubmbm.par.sim.taxa",taxa.size.array[taxa.size.Index],sep=""),oubmbm.par.sim)
### The rejection alogoritm
assign(paste("rej.taxa",taxa.size.array[taxa.size.Index],sep=""),abc(target=c(y.raw.sum.stat,x1.raw.sum.stat,x2.raw.sum.stat), param=oubmbm.par.sim, sumstat=sim.sum.stat, tol=0.05, method="rejection"))
setwd("/Users/terrylai/Documents/simulate_data/")
save.image(paste("oubmbmsimstaxa",taxa.size.array[taxa.size.Index], ".RData", sep=""))
}#end of taxasize
|
/maincode/simulation/informative/oubmbmsimstaxa10.r
|
no_license
|
LaiYenShuo/Master-thesis
|
R
| false | false | 3,959 |
r
|
#setwd("/Users/TerryLai/Dropbox/TerryLai/simulation/oubmbm/")
setwd("/Users/terrylai/Dropbox/TerryLai/simulation/oubmbm")
rm(list=ls())
#source("/Users/TerryLai/Dropbox/TerryLai/R_code/abc_V2/oubmbmabc.r")
source("/Users/terrylai/Dropbox/TerryLai/R_code/abc_infor_prior/oubmbmabc.r")
root<-0
#true params
true.alpha.y<-0.15
true.sigma.x<-1
true.tau<- 0.35
true.b0 <- 0
true.b1 <- 0.5
true.b2 <- 0.5
#hyper parameters
alpha.y.rate <- 1/0.15
sigma.x.rate <-1
tau.rate <- 1
b0.min=-1
b0.max=1
b1.min=0
b1.max=1
b2.min=0
b2.max=1
prior.model.params=c(alpha.y.rate, sigma.x.rate, tau.rate)
names(prior.model.params)<-c("alpha.y.rate", "sigma.x.rate", "tau.rate")
prior.reg.params=c(b0.min, b0.max, b1.min, b1.max, b2.min, b2.max)
numbsim<-1;lambda<-2.0;mu<-0.5;frac<-0.6;age<-2
taxa.size.array<-c(10)#,20,50,100)
sims<-50000
model.params.array<-array(0,c(3,sims))
rownames(model.params.array)<-c("alpha.y","sigma.x","tau")
reg.params.array<-array(0,c(3,sims))
row.names(reg.params.array)<-c("b0", "b1", "b2")
y.sum.stat.array<-array(0,c(2,sims))
rownames(y.sum.stat.array)<-c("y.mean","y.sd")
x1.sum.stat.array<-array(0,c(2,sims))
rownames(x1.sum.stat.array)<-c("x1.mean","x1.sd")
x2.sum.stat.array<-array(0,c(2,sims))
rownames(x2.sum.stat.array)<-c("x2.mean","x2.sd")
sum.stat.distance.array<-array(0,c(sims))
for(taxa.size.Index in 1:length(taxa.size.array)){
n<-taxa.size.array[taxa.size.Index]
print(paste("taxa",n,sep=""))
sim.oubmbm.trait<-array(0,c(n,3,sims))
tree<-sim.bd.taxa.age(n=n,numbsim=1,lambda=lambda,mu=mu,frac=frac,age=age,mrca=TRUE)[[1]]
tree<-reorder(tree,"postorder")
# tree$edge
# plot(tree)
# nodelabels()
# tiplabels()
true.trait<-oubmbmmodel(model.params=c(true.alpha.y,true.sigma.x,true.tau),reg.params=c(true.b0,true.b1,true.b2),root=root,tree=tree)
assign(paste("true.trait.taxa",taxa.size.array[taxa.size.Index],sep=""),true.trait)
y.raw.sum.stat<-sum.stat(trait=true.trait$y,tree=tree)
x1.raw.sum.stat<-sum.stat(trait=true.trait$x1,tree=tree)
x2.raw.sum.stat<-sum.stat(trait=true.trait$x2,tree=tree)
raw.sum.stat <- cbind(t(y.raw.sum.stat),t(x1.raw.sum.stat),t(x2.raw.sum.stat))
assign(paste("raw.sum.stat.taxa",taxa.size.array[taxa.size.Index],sep=""),raw.sum.stat)
for(simIndex in 1:sims){
if(simIndex %%1000==0){print(simIndex)}
prior.params <- oubmbmprior(prior.model.params=prior.model.params,prior.reg.params=prior.reg.params)
model.params.array[,simIndex]<-prior.params$model.params#for record only
reg.params.array[,simIndex]<-prior.params$reg.params#for record only
sim.trait <-oubmbmmodel(model.params=prior.params$model.params,reg.params=prior.params$reg.params,root=root,tree=tree)
sim.oubmbm.trait[,1,simIndex]<-sim.trait$y
sim.oubmbm.trait[,2,simIndex]<-sim.trait$x1
sim.oubmbm.trait[,3,simIndex]<-sim.trait$x2
y.sum.stat.array[,simIndex]<- sum.stat(trait=sim.trait$y,tree=tree)
x1.sum.stat.array[,simIndex]<- sum.stat(trait=sim.trait$x1,tree=tree)
x2.sum.stat.array[,simIndex]<- sum.stat(trait=sim.trait$x2,tree=tree)
}#end of loop
### Use abc package
sim.sum.stat <- cbind(t(y.sum.stat.array),t(x1.sum.stat.array),t(x2.sum.stat.array))
oubmbm.par.sim <- cbind(t(model.params.array),t(reg.params.array))
assign(paste("sim.oubmbm.trait.taxa",taxa.size.array[taxa.size.Index],sep=""),sim.oubmbm.trait)
assign(paste("sim.sum.stat.taxa",taxa.size.array[taxa.size.Index],sep=""),sim.sum.stat)
assign(paste("oubmbm.par.sim.taxa",taxa.size.array[taxa.size.Index],sep=""),oubmbm.par.sim)
### The rejection alogoritm
assign(paste("rej.taxa",taxa.size.array[taxa.size.Index],sep=""),abc(target=c(y.raw.sum.stat,x1.raw.sum.stat,x2.raw.sum.stat), param=oubmbm.par.sim, sumstat=sim.sum.stat, tol=0.05, method="rejection"))
setwd("/Users/terrylai/Documents/simulate_data/")
save.image(paste("oubmbmsimstaxa",taxa.size.array[taxa.size.Index], ".RData", sep=""))
}#end of taxasize
|
source("LoadHouseholdPowerConsumptionData.R")
##The file can be downloaded from here: https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
HPC <- LoadHouseholdPowerConsumptionData("exdata-data-household_power_consumption/household_power_consumption.txt")
HPC$FullDate <- strptime(paste(HPC$Date, HPC$Time), format = "%Y-%m-%d %H:%M:%S")
HPC <- subset(HPC, Date >= "2007-02-01" & Date <= "2007-02-02")
par(mfcol = c(2, 2))
plot(HPC$FullDate, HPC$Global_active_power, type = "l", ylab = "Global Active Power (Kilowatts)", xlab = "")
plot(HPC$FullDate, HPC$Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = "")
points(HPC$FullDate, HPC$Sub_metering_2, type = "l", col = "red")
points(HPC$FullDate, HPC$Sub_metering_3, type = "l", col = "blue")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col=c("black", "red", "blue"), lwd=c(3,3,3), xjust = 1, cex = 0.75)
plot(HPC$FullDate, HPC$Voltage, type = "l", ylab = "Voltage", xlab = "datetime")
plot(HPC$FullDate, HPC$Global_reactive_power, type = "l", ylab = "Global_reactive_power", xlab = "datetime")
dev.copy(png, "plot4.png", width = 480, height = 480, units = 'px')
dev.off()
|
/4.ExploratoryDataAnalysis_Week1/plot4.R
|
no_license
|
williamrelf/DataScienceCoursera
|
R
| false | false | 1,223 |
r
|
source("LoadHouseholdPowerConsumptionData.R")
##The file can be downloaded from here: https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
HPC <- LoadHouseholdPowerConsumptionData("exdata-data-household_power_consumption/household_power_consumption.txt")
HPC$FullDate <- strptime(paste(HPC$Date, HPC$Time), format = "%Y-%m-%d %H:%M:%S")
HPC <- subset(HPC, Date >= "2007-02-01" & Date <= "2007-02-02")
par(mfcol = c(2, 2))
plot(HPC$FullDate, HPC$Global_active_power, type = "l", ylab = "Global Active Power (Kilowatts)", xlab = "")
plot(HPC$FullDate, HPC$Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = "")
points(HPC$FullDate, HPC$Sub_metering_2, type = "l", col = "red")
points(HPC$FullDate, HPC$Sub_metering_3, type = "l", col = "blue")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col=c("black", "red", "blue"), lwd=c(3,3,3), xjust = 1, cex = 0.75)
plot(HPC$FullDate, HPC$Voltage, type = "l", ylab = "Voltage", xlab = "datetime")
plot(HPC$FullDate, HPC$Global_reactive_power, type = "l", ylab = "Global_reactive_power", xlab = "datetime")
dev.copy(png, "plot4.png", width = 480, height = 480, units = 'px')
dev.off()
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
buildcov_deriv <- function(beta, dist, l, covmodel, nugget) {
.Call(`_ARCokrig_buildcov_deriv`, beta, dist, l, covmodel, nugget)
}
log_objective_prior <- function(beta, dist, RInv, X, covmodel, nugget, prior) {
.Call(`_ARCokrig_log_objective_prior`, beta, dist, RInv, X, covmodel, nugget, prior)
}
buildcov <- function(phi, dist, covmodel, nugget) {
.Call(`_ARCokrig_buildcov`, phi, dist, covmodel, nugget)
}
compute_distance <- function(input1, input2) {
.Call(`_ARCokrig_compute_distance`, input1, input2)
}
sample_mvt <- function(mu, L, sigma, df, nsample) {
.Call(`_ARCokrig_sample_mvt`, mu, L, sigma, df, nsample)
}
compute_S <- function(output, Q) {
.Call(`_ARCokrig_compute_S`, output, Q)
}
compute_Svec <- function(output, Q) {
.Call(`_ARCokrig_compute_Svec`, output, Q)
}
compute_S_sum <- function(y_t, H_t, y_t1, RInv, K) {
.Call(`_ARCokrig_compute_S_sum`, y_t, H_t, y_t1, RInv, K)
}
compute_prediction <- function(y_t, Ht, y_t1, yhat_t1, vhat_t1, RInv, Hnew, Wnew_t1, Rmo, R_sk) {
.Call(`_ARCokrig_compute_prediction`, y_t, Ht, y_t1, yhat_t1, vhat_t1, RInv, Hnew, Wnew_t1, Rmo, R_sk)
}
conditional_simulation <- function(y_t, Ht, y_t1, RInv, Hnew, Wnew_t1, Rmo, R_sk) {
.Call(`_ARCokrig_conditional_simulation`, y_t, Ht, y_t1, RInv, Hnew, Wnew_t1, Rmo, R_sk)
}
compute_param <- function(y_t, Ht, y_t1, RInv) {
.Call(`_ARCokrig_compute_param`, y_t, Ht, y_t1, RInv)
}
|
/fuzzedpackages/ARCokrig/R/RcppExports.R
|
no_license
|
akhikolla/testpackages
|
R
| false | false | 1,559 |
r
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
buildcov_deriv <- function(beta, dist, l, covmodel, nugget) {
.Call(`_ARCokrig_buildcov_deriv`, beta, dist, l, covmodel, nugget)
}
log_objective_prior <- function(beta, dist, RInv, X, covmodel, nugget, prior) {
.Call(`_ARCokrig_log_objective_prior`, beta, dist, RInv, X, covmodel, nugget, prior)
}
buildcov <- function(phi, dist, covmodel, nugget) {
.Call(`_ARCokrig_buildcov`, phi, dist, covmodel, nugget)
}
compute_distance <- function(input1, input2) {
.Call(`_ARCokrig_compute_distance`, input1, input2)
}
sample_mvt <- function(mu, L, sigma, df, nsample) {
.Call(`_ARCokrig_sample_mvt`, mu, L, sigma, df, nsample)
}
compute_S <- function(output, Q) {
.Call(`_ARCokrig_compute_S`, output, Q)
}
compute_Svec <- function(output, Q) {
.Call(`_ARCokrig_compute_Svec`, output, Q)
}
compute_S_sum <- function(y_t, H_t, y_t1, RInv, K) {
.Call(`_ARCokrig_compute_S_sum`, y_t, H_t, y_t1, RInv, K)
}
compute_prediction <- function(y_t, Ht, y_t1, yhat_t1, vhat_t1, RInv, Hnew, Wnew_t1, Rmo, R_sk) {
.Call(`_ARCokrig_compute_prediction`, y_t, Ht, y_t1, yhat_t1, vhat_t1, RInv, Hnew, Wnew_t1, Rmo, R_sk)
}
conditional_simulation <- function(y_t, Ht, y_t1, RInv, Hnew, Wnew_t1, Rmo, R_sk) {
.Call(`_ARCokrig_conditional_simulation`, y_t, Ht, y_t1, RInv, Hnew, Wnew_t1, Rmo, R_sk)
}
compute_param <- function(y_t, Ht, y_t1, RInv) {
.Call(`_ARCokrig_compute_param`, y_t, Ht, y_t1, RInv)
}
|
# 04c_hc_compare.R
# E Flynn
# 08/27/2020
#
# TODO:
# - how did I get this data??
# - where is the RNA-seq?
# - THIS SHOULD BE A SUBSET!! of 4d (TODO - reorder files)
# - what is the fraction that agrees between each of the methods?
require('tidyverse')
# TODO - how did I get these data?
human_compare <- read_csv("data/human_sl_compare.csv") # 17119
mouse_compare <- read_csv("data/mouse_sl_compare.csv") # 8836
compare_dat <- human_compare %>% bind_rows(mouse_compare)
comb_metadata <- read_csv("data/01_metadata/combined_human_mouse_meta.csv", col_types="cccccccdcd")
microarray <- comb_metadata %>% filter(data_type=="microarray") %>% select(-present, -num_reads)
source_type <- read_csv("data/sample_source_type.csv")
compare_df <- compare_dat %>%
rename(sample_acc=gsm) %>%
inner_join(microarray %>% select(sample_acc, organism, expr_sex, p_male)) %>% # .. hmmm, we get 15426/25955
inner_join(source_type %>% select(acc, source_type, cl_line), by=c("sample_acc"="acc")) %>% # not lossy
select(sample_acc, organism, source_type, cl_line, everything())
# TODO - how are these missing?!
missing_gsms <- setdiff(compare_dat$gsm, microarray$sample_acc)
compare_df2 <- compare_df %>%
#filter(!source_type %in% c("named_cl", "unnamed_cl")) %>%
filter(!is.na(text_sex)) %>%
filter(!(is.na(toker_sex) & is.na(massir_sex))) %>%
mutate(compare_col=case_when(
is.na(massir_sex) & text_sex==toker_sex ~ 1,
is.na(massir_sex) & text_sex!=toker_sex ~ -1,
is.na(toker_sex) & massir_sex==text_sex ~ 1,
is.na(toker_sex) & massir_sex!=text_sex ~ -1,
toker_sex==text_sex & massir_sex==text_sex ~ 2,
toker_sex!=text_sex & massir_sex!=text_sex ~ -2,
toker_sex == text_sex & massir_sex != text_sex ~ 0,
toker_sex != text_sex & massir_sex == text_sex ~ 0
))
compare_df2 %>%
left_join(comb_metadata %>% dplyr::select(sample_acc, study_acc)) %>%
distinct(study_acc) %>%
separate_rows(study_5acc, sep=";") %>%
distinct(study_acc) %>%
nrow()
matched_comp <- compare_df2 %>%
filter(compare_col %in% c(1,2))
matched_comp %>%
left_join(comb_metadata %>% dplyr::select(sample_acc, study_acc)) %>%
distinct(study_acc) %>%
separate_rows(study_acc, sep=";") %>%
distinct(study_acc) %>%
nrow()
compare_df2 %>%
mutate(expr_sex=ifelse(p_male < 0.7 & p_male > 0.3, "unknown", expr_sex))
# how many of the matching ones do we call correctly? at each threshold?
matching <- compare_df2 %>% filter(compare_col %in% c(1,2))
summarizeAcc <- function(df, x){
df %>%
mutate(threshold=x) %>%
dplyr::select(sample_acc, organism, source_type, text_sex, expr_sex, p_male, compare_col, threshold) %>%
mutate(expr_sex=ifelse(p_male < threshold & p_male > 1-threshold, "unknown", expr_sex)) %>%
mutate(match=case_when(
expr_sex=="unknown" ~ 0,
expr_sex==text_sex ~ 1,
expr_sex!=text_sex ~ -1)) %>%
group_by(organism, source_type, threshold,match) %>%
count() %>%
pivot_wider(names_from=match, names_prefix="match_type", values_from=n, values_fill=0) %>%
mutate(nsamples=sum(`match_type-1`+match_type0+match_type1)) %>%
mutate(across(`match_type-1`:match_type1, ~./nsamples))
}
df <- do.call(rbind, lapply(seq(0.6,0.9, 0.1), function(x)
summarizeAcc(matching, x))) %>%
mutate(frac_unlab=match_type0,
frac_correct=abs(match_type1-`match_type-1`)/(match_type1+`match_type-1`))
df %>% filter(threshold==0.7) # 90.9% (5.15% unlab) for mouse, 99.5% (3.17% unlab) for human at threshold 0.7
# how many mismatch across all? at each threshold?
mismatch <- compare_df2 %>% filter(compare_col %in% c(-1,-2))
df2 <- do.call(rbind, lapply(seq(0.6,0.9, 0.1), function(x)
summarizeAcc(mismatch, x)))
df3 <- do.call(rbind, lapply(seq(0.6,0.9, 0.1), function(x)
summarizeAcc(compare_df2, x))) %>%
mutate(num_unlab=match_type0*nsamples) %>%
group_by(organism, threshold) %>%
summarize(num_unlab=sum(num_unlab), n=sum(nsamples)) %>%
mutate(frac_unlab=num_unlab/n)
df3
# //TODO - add unlabeled counts
mismatch_df2 <- df2 %>%
mutate(num_mismatch=`match_type-1`*nsamples) %>%
#mutate(frac_unlab=match_type0) %>%
select(-contains("type"), -contains("frac")) %>%
rename(n_mismatch_other=nsamples) %>%
left_join(compare_df %>% group_by(organism, source_type) %>% count()) %>%
mutate(frac_mismatch=num_mismatch/n)
mismatch_df2 %>%
ungroup() %>%
group_by(organism, threshold) %>%
summarize(num_mismatch=sum(num_mismatch), n=sum(n)) %>%
mutate(frac_mismatch=num_mismatch/n)
# -- make a supplementary table with these counts -- #
summary_hc_samples <- mismatch_df2 %>%
ungroup() %>%
mutate(cell_line=(source_type %in% c("unnamed_cl", "named_cl"))) %>%
group_by(organism, threshold, cell_line) %>%
summarize(num_mismatch=sum(num_mismatch), n=sum(n)) %>%
mutate(frac_mismatch=num_mismatch/n) %>%
arrange(cell_line,threshold, organism) %>%
rename(num_samples=n)
study_cts_hc <- do.call(rbind, lapply(c(0.6, 0.7, 0.8, 0.9), function(threshold) {
compare_df2 %>%
left_join(comb_metadata %>% dplyr::select(sample_acc, study_acc)) %>%
separate_rows(study_acc, sep=";") %>%
mutate(cell_line=(source_type %in% c("unnamed_cl", "named_cl"))) %>%
mutate(expr_sex=ifelse(p_male < threshold & p_male > (1-threshold), "unknown", expr_sex)) %>%
mutate(match=case_when(
expr_sex=="unknown" ~ 0,
expr_sex==text_sex ~ 1,
expr_sex!=text_sex ~ -1)) %>%
mutate(compare2=case_when(
match==-1 & compare_col %in% c(-1, -2) ~ -1,
match==1 & compare_col %in% c(1, 2) ~ 1,
TRUE ~ 0,
)) %>%
group_by(organism, cell_line, study_acc) %>%
summarize(unk=sum(compare2==0),
match=sum(compare2==1),
mismatch=sum(compare2==-1),
tot=n()) %>%
summarize(mismatch=sum(mismatch>0), num_studies=n()) %>%
mutate(mismatch=mismatch/num_studies) %>%
filter(!cell_line) %>%
dplyr::select(-cell_line) %>%
mutate(threshold=threshold)
}))
summary_hc <- summary_hc_samples %>%
filter(!cell_line) %>%
dplyr::select(-cell_line, -num_mismatch) %>%
rename(frac_samples_mismatch=frac_mismatch) %>%
left_join(study_cts_hc %>%
rename(frac_studies_mismatch=mismatch),
by=c("organism", "threshold")) %>%
dplyr::select(organism, threshold, num_samples, num_studies, frac_samples_mismatch, frac_studies_mismatch)
summary_hc %>%
arrange(organism) %>%
mutate(across(contains("frac"), ~signif(.,3))) %>%
write_csv("tables/supp_misannot_hc.csv")
|
/code/07_figures/04c_hc_compare.R
|
no_license
|
erflynn/sl_label
|
R
| false | false | 6,484 |
r
|
# 04c_hc_compare.R
# E Flynn
# 08/27/2020
#
# TODO:
# - how did I get this data??
# - where is the RNA-seq?
# - THIS SHOULD BE A SUBSET!! of 4d (TODO - reorder files)
# - what is the fraction that agrees between each of the methods?
require('tidyverse')
# TODO - how did I get these data?
human_compare <- read_csv("data/human_sl_compare.csv") # 17119
mouse_compare <- read_csv("data/mouse_sl_compare.csv") # 8836
compare_dat <- human_compare %>% bind_rows(mouse_compare)
comb_metadata <- read_csv("data/01_metadata/combined_human_mouse_meta.csv", col_types="cccccccdcd")
microarray <- comb_metadata %>% filter(data_type=="microarray") %>% select(-present, -num_reads)
source_type <- read_csv("data/sample_source_type.csv")
compare_df <- compare_dat %>%
rename(sample_acc=gsm) %>%
inner_join(microarray %>% select(sample_acc, organism, expr_sex, p_male)) %>% # .. hmmm, we get 15426/25955
inner_join(source_type %>% select(acc, source_type, cl_line), by=c("sample_acc"="acc")) %>% # not lossy
select(sample_acc, organism, source_type, cl_line, everything())
# TODO - how are these missing?!
missing_gsms <- setdiff(compare_dat$gsm, microarray$sample_acc)
compare_df2 <- compare_df %>%
#filter(!source_type %in% c("named_cl", "unnamed_cl")) %>%
filter(!is.na(text_sex)) %>%
filter(!(is.na(toker_sex) & is.na(massir_sex))) %>%
mutate(compare_col=case_when(
is.na(massir_sex) & text_sex==toker_sex ~ 1,
is.na(massir_sex) & text_sex!=toker_sex ~ -1,
is.na(toker_sex) & massir_sex==text_sex ~ 1,
is.na(toker_sex) & massir_sex!=text_sex ~ -1,
toker_sex==text_sex & massir_sex==text_sex ~ 2,
toker_sex!=text_sex & massir_sex!=text_sex ~ -2,
toker_sex == text_sex & massir_sex != text_sex ~ 0,
toker_sex != text_sex & massir_sex == text_sex ~ 0
))
compare_df2 %>%
left_join(comb_metadata %>% dplyr::select(sample_acc, study_acc)) %>%
distinct(study_acc) %>%
separate_rows(study_5acc, sep=";") %>%
distinct(study_acc) %>%
nrow()
matched_comp <- compare_df2 %>%
filter(compare_col %in% c(1,2))
matched_comp %>%
left_join(comb_metadata %>% dplyr::select(sample_acc, study_acc)) %>%
distinct(study_acc) %>%
separate_rows(study_acc, sep=";") %>%
distinct(study_acc) %>%
nrow()
compare_df2 %>%
mutate(expr_sex=ifelse(p_male < 0.7 & p_male > 0.3, "unknown", expr_sex))
# how many of the matching ones do we call correctly? at each threshold?
matching <- compare_df2 %>% filter(compare_col %in% c(1,2))
summarizeAcc <- function(df, x){
df %>%
mutate(threshold=x) %>%
dplyr::select(sample_acc, organism, source_type, text_sex, expr_sex, p_male, compare_col, threshold) %>%
mutate(expr_sex=ifelse(p_male < threshold & p_male > 1-threshold, "unknown", expr_sex)) %>%
mutate(match=case_when(
expr_sex=="unknown" ~ 0,
expr_sex==text_sex ~ 1,
expr_sex!=text_sex ~ -1)) %>%
group_by(organism, source_type, threshold,match) %>%
count() %>%
pivot_wider(names_from=match, names_prefix="match_type", values_from=n, values_fill=0) %>%
mutate(nsamples=sum(`match_type-1`+match_type0+match_type1)) %>%
mutate(across(`match_type-1`:match_type1, ~./nsamples))
}
df <- do.call(rbind, lapply(seq(0.6,0.9, 0.1), function(x)
summarizeAcc(matching, x))) %>%
mutate(frac_unlab=match_type0,
frac_correct=abs(match_type1-`match_type-1`)/(match_type1+`match_type-1`))
df %>% filter(threshold==0.7) # 90.9% (5.15% unlab) for mouse, 99.5% (3.17% unlab) for human at threshold 0.7
# how many mismatch across all? at each threshold?
mismatch <- compare_df2 %>% filter(compare_col %in% c(-1,-2))
df2 <- do.call(rbind, lapply(seq(0.6,0.9, 0.1), function(x)
summarizeAcc(mismatch, x)))
df3 <- do.call(rbind, lapply(seq(0.6,0.9, 0.1), function(x)
summarizeAcc(compare_df2, x))) %>%
mutate(num_unlab=match_type0*nsamples) %>%
group_by(organism, threshold) %>%
summarize(num_unlab=sum(num_unlab), n=sum(nsamples)) %>%
mutate(frac_unlab=num_unlab/n)
df3
# //TODO - add unlabeled counts
mismatch_df2 <- df2 %>%
mutate(num_mismatch=`match_type-1`*nsamples) %>%
#mutate(frac_unlab=match_type0) %>%
select(-contains("type"), -contains("frac")) %>%
rename(n_mismatch_other=nsamples) %>%
left_join(compare_df %>% group_by(organism, source_type) %>% count()) %>%
mutate(frac_mismatch=num_mismatch/n)
mismatch_df2 %>%
ungroup() %>%
group_by(organism, threshold) %>%
summarize(num_mismatch=sum(num_mismatch), n=sum(n)) %>%
mutate(frac_mismatch=num_mismatch/n)
# -- make a supplementary table with these counts -- #
summary_hc_samples <- mismatch_df2 %>%
ungroup() %>%
mutate(cell_line=(source_type %in% c("unnamed_cl", "named_cl"))) %>%
group_by(organism, threshold, cell_line) %>%
summarize(num_mismatch=sum(num_mismatch), n=sum(n)) %>%
mutate(frac_mismatch=num_mismatch/n) %>%
arrange(cell_line,threshold, organism) %>%
rename(num_samples=n)
study_cts_hc <- do.call(rbind, lapply(c(0.6, 0.7, 0.8, 0.9), function(threshold) {
compare_df2 %>%
left_join(comb_metadata %>% dplyr::select(sample_acc, study_acc)) %>%
separate_rows(study_acc, sep=";") %>%
mutate(cell_line=(source_type %in% c("unnamed_cl", "named_cl"))) %>%
mutate(expr_sex=ifelse(p_male < threshold & p_male > (1-threshold), "unknown", expr_sex)) %>%
mutate(match=case_when(
expr_sex=="unknown" ~ 0,
expr_sex==text_sex ~ 1,
expr_sex!=text_sex ~ -1)) %>%
mutate(compare2=case_when(
match==-1 & compare_col %in% c(-1, -2) ~ -1,
match==1 & compare_col %in% c(1, 2) ~ 1,
TRUE ~ 0,
)) %>%
group_by(organism, cell_line, study_acc) %>%
summarize(unk=sum(compare2==0),
match=sum(compare2==1),
mismatch=sum(compare2==-1),
tot=n()) %>%
summarize(mismatch=sum(mismatch>0), num_studies=n()) %>%
mutate(mismatch=mismatch/num_studies) %>%
filter(!cell_line) %>%
dplyr::select(-cell_line) %>%
mutate(threshold=threshold)
}))
summary_hc <- summary_hc_samples %>%
filter(!cell_line) %>%
dplyr::select(-cell_line, -num_mismatch) %>%
rename(frac_samples_mismatch=frac_mismatch) %>%
left_join(study_cts_hc %>%
rename(frac_studies_mismatch=mismatch),
by=c("organism", "threshold")) %>%
dplyr::select(organism, threshold, num_samples, num_studies, frac_samples_mismatch, frac_studies_mismatch)
summary_hc %>%
arrange(organism) %>%
mutate(across(contains("frac"), ~signif(.,3))) %>%
write_csv("tables/supp_misannot_hc.csv")
|
##Run model
library(hydroGOF)
library(zoo)
library(plyr)
library(nsga2R)
require(hydroTSM)
##set working directory######
setwd("E:\\USU_Research_work\\TOPNET PROJECT\\MODEL COMPARISON\\A1_watershed_final\\A1_TOPNETRUN_CC") #for A1
##RUN MODEL FOR EACH RCP AND SAVED IT DIFFERENT DIRECTORY
##RCP26_65, RCP26_99 etc
system(paste("topnet_modified"))
####Do analysis only for the streamlow and runoff ########
##Baseline period analysis
bn=29
ff=scan("FlowAtStreamNodes_cms.txt", what="")
#l=basin_number+2
l=bn+2
ff1=ff[seq(l,length(ff),1)] ## need to change theis things later
simu_flow=matrix(as.numeric(ff1[seq(2,length(ff1),l-1)])) ##need to change this later
sf=scan("A1_calibratedflow.txt", what="")
sf1=sf[seq(l,length(sf),1)] ## need to change theis things later
obs=matrix(as.numeric(sf1[seq(2,length(sf1),l-1)])) ##need to change this later
date_base=seq(as.Date("2003/1/1"), as.Date("2012/12/31"), "day")
date_proj=seq(as.Date("2056/1/1"), as.Date("2065/12/31"), "day")
date_proj=seq(as.Date("2090/1/1"), as.Date("2099/12/31"), "day")
sf_base<- data.frame(monthlyfunction(obs, FUN=mean, na.rm=TRUE,dates=date_base))
sf_base1<- data.frame(monthlyfunction(simu_flow, FUN=mean, na.rm=TRUE,dates=date_proj))
par(mfrow=c(2,2))
plot(date_base,obs)
plot(date_proj,simu_flow,col='red',ylim=c(0,50))
plot(seq(1,12,1),sf_base,ylim=c(0,12))
lines(seq(1,12,1),sf_base1)
plot(seq(1,12,1),(mf_base-mf_base1)*100/mf_base)
###projected runoff change
dir=paste("E:\\USU_Research_work\\TOPNET PROJECT\\MODEL COMPARISON\\A1_watershed_final\\A1_results\\Climate change analysis\\",folder[i],sep="")
setwd("E:\\USU_Research_work\\TOPNET PROJECT\\MODEL COMPARISON\\A1_watershed_final\\A1_CC_data\\CanESM2")
stream_file=list.files(path ="E:\\USU_Research_work\\TOPNET PROJECT\\MODEL COMPARISON\\A1_watershed_final\\A1_CC_data\\MRI-CGCM3",pattern ="*.5.txt")
mf_proj=data.frame(matrix(NA,nrow=1,ncol=12))
for ( i in 1:length(stream_file)){
ff=scan(stream_file[i], what="")
#l=basin_number+2
l=bn+2
ff1=ff[seq(l,length(ff),1)] ## need to change theis things later
simu_flow=matrix(as.numeric(ff1[seq(2,length(ff1),l-1)])) ##need to change this later
d1=strtoi(unlist(strsplit(stream_file[i],NULL))[23])
if(d1==5)date_proj=seq(as.Date("2056/1/1"), as.Date("2065/12/31"), "day")else
date_proj=seq(as.Date("2090/1/1"), as.Date("2099/12/31"), "day")
mf_proj[i,]<- data.frame(monthlyfunction(simu_flow, FUN=mean, na.rm=TRUE,dates=date_proj))
}
###########change in stream flow regime variables############
par(mfrow=c(2,1))
a=seq(1,12,1)
plot(a,sf_base,ylim=c(0,8),main="year 56-65")
lines(a,mf_proj[1,],col='red')
lines(a,mf_proj[2,],col='blue')
legend("topright", c('base','rcp4.5','rcp8.5'), cex=0.8, col=c("black","red","blue"), pch=21:22, lty=1:2)
plot(a,sf_base,ylim=c(0,12),main='year 90-99')
lines(a,mf_proj[3,],col='grey')
lines(a,mf_proj[4,],col='green')
legend("topright", c('base','rcp4.5','rcp8.5'), cex=0.8, col=c("black","grey","green"), pch=21:22, lty=1:2)
plot(a,mf_base)
lines(a,mf_proj[2,],col='red')
lines(a,mf_proj[4,],col='blue')
lines(a,mf_proj[6,],col='grey')
lines(a,mf_proj[8,],col='green')
legend("topleft", c('base','rcp2.6','rcp4.5','rcp6.0','rcp8.5'), cex=0.8, col=c("black","red",'blue','grey','green'), pch=21:22, lty=1:2)
pr=scan("Precipitation_mm.txt", what="")
#l=basin_number+2
l=131+2#for A2
pr1=pr[seq(l,length(pr),1)] ## need to change theis things later
simu_rain=matrix(as.numeric(pr1[seq(2,length(pr1),l-1)])) ##need to change this later
plot(cumsum(simu_rain))
ev=scan("Evaporation_mm.txt", what="")
#l=basin_number+2
l=113+2#for A2
ev1=ev[seq(l,length(ev),1)] ## need to change theis things later
simu_eva=matrix(as.numeric(ev1[seq(2,length(ev1),l-1)])) ##need to change this later
plot(cumsum(simu_eva))
lines(cumsum(simu_rain))
time_fore= seq(as.Date("2056/1/1"), as.Date("2065/12/31"), "day") # create time series based on start and end date
plot(time_fore,simu_flow, type="o", col="blue",,xlab=" Time(days)",ylab="stream flow (m3/s)",cex.lab=1.5,cex.axis=1.5,cex.main=1.5,cex.sub=1.5)
lines(time_c,cv_flow$simu, type="o", pch=22, lty=2, col="red")
legend('topright', max(calibrated_flow$observ), c("observed flow","simulated flow"), bty="n",cex=1.2, col=c("blue","red"), pch=21:22, lty=1:2,pt.cex=1.2);
CE_c=NSE(cv_flow$simu[1:1096],cv_flow$observ[1:1096])
Bias_c=sum(cv_flow$simu[1:1096])/sum(cv_flow$observ[1:1096]) ##PBIAS=100*[sum(sim-obs)/sum(obs)]
Mean_error_c=rmse(cv_flow$simu[1:1096],cv_flow$observ[1:1096], na.rm=TRUE)
text(14250, 5, paste("Bias=",format(Bias_c,digit=2)),cex = 1.25)
text(14250, 4.6, paste("NSE=",format(CE_c,digit=2)),cex = 1.25)
text(14250, 4.2, paste("MSE=",format(Mean_error_c,digit=2)),cex =1.25)
##compare runoff with base line period 2003-2012############
###cumulative rainfall and run off ####
pr=scan("Precipitation_mm.txt", what="")
#l=basin_number+2
l=29+2#for A2
pr1=pr[seq(l,length(pr),1)] ## need to change theis things later
simu_rain=matrix(as.numeric(pr1[seq(2,length(pr1),l-1)])) ##need to change this later
plot(time_c,cumsum(0.2*cv_flow$observ[1:1096]),col='blue',ylab="cumulative flow (mm)")
lines(time_c,cumsum(0.2*cv_flow$simu[1:1096]),col='red')
plot(time_c,cumsum(simu_rain[732:1827]),ylab="cumulative rainfall/flow (mm)")
lines(time_c,cumsum(0.2*cv_flow$observ[1:1096]),col='blue',ylab="cumulative flow (mm)")
lines(time_c,cumsum(0.2*cv_flow$simu[1:1096]),col='red')
legend(14650, 800, c("rainfall","observed flow","simulated flow"), cex=0.8, col=c("black","blue","red"), pch=21, lty=1);
##validation plot###
plot(time_cv,cumsum(0.2*cv_flow$observ),col='blue',ylab="cumulative flow (mm)")
lines(time_cv,cumsum(0.2*cv_flow$simu),col='red')
pr=scan("Precipitation_mm.txt", what="")
#l=basin_number+2
l=29+2#for A2
pr1=pr[seq(l,length(pr),1)] ## need to change theis things later
simu_rain=matrix(as.numeric(pr1[seq(2,length(pr1),l-1)])) ##need to change this later
plot(time_cv,cumsum(simu_rain),ylab="cumulative rainfall/flow (mm)")
lines(time_cv,cumsum(0.2*cv_flow$observ),col='blue',ylab="cumulative flow (mm)")
lines(time_cv,cumsum(0.2*cv_flow$simu),col='red')
legend(14650, 800, c("rainfall","observed flow","simulated flow"), cex=0.8, col=c("black","blue","red"), pch=21, lty=1);
#lines(simu_rain, type="o", col="red",ylim=rev(range(simu_rain)))
#plot(calibrated_flow$simu, type="o", pch=22, lty=2, col="red",ylim=(range(calibrated_flow$simu)))
par(mar=c(2, 4, 0, 6)+0.25)
plot(time, calibrated_flow$simu, axes=T, ylim=c(0,50), xlab="", ylab="",type="l",lty=1,col="red", main="")
points(time, calibrated_flow$simu,pch=20,col="red")
axis(2, ylim=c(0,30),col="red",lwd=2)
mtext(2,text="Stream flow(m3/s)",line=2)
par(new=T)
plot(time, calibrated_flow$observ, axes=T, ylim=c(0,50), xlab="", ylab="",type="l",lty=2,col="blue", main="")
points(time, calibrated_flow$observ,pch=20,col="blue")
axis(2, ylim=c(0,600),col="blue",lwd=2)
#mtext(2,text="NSE",line=2)
par(new=T)
plot(time, simu_rain[732:1827,], axes=F, ylim=rev(c(0,100)), col="darkgrey",xlab="", ylab="", type="l",lty=3, main="",lwd=2)
axis(4, ylim=rev(range(simu_rain[732:1827,])),col="darkgrey",lwd=1,line=1.5)
#points(time, simu_rain[732:1827,],pch=20)
mtext(4,text="Rainfall (mm/day)",line=-.1)
#axis(1,pretty(range(time),4))
mtext(" tr factor ",side=1,col="black",line=2) ## change title
legend(15270,40,legend=c("Simulate flow","Observed flow","Precipitation "),lty=c(1,2,3),col=c("red","blue","darkgrey")) ## change legend location
###overall water balance######
sim_cum=cumsum(calibrated_flow$simu)
obs_cum=cumsum(calibrated_flow$observ)
sim_precp=cumsum(simu_rain[732:1827,])
plot(time,sim_precp, type="o", col="red",ylim=range(sim_precp),lty=1,xlab="time(days)",ylab="cumulative precipitation(mm/day)")
plot(time,obs_cum, type="o", col="blue",ylim=range(obs_cum),lty=1,xlab="time(days)",ylab="cumulative Streamflow(m3/s)")
# Graph trucks with red dashed line and square points
lines(time,sim_cum, type="o", pch=22, lty=2, col="red")
legend(15270,400,legend=c("Observed flow","Simulatedflow"),lty=c(1,2),col=c("blue","red"))
########cheking whether water is going###########
setwd("E:\\USU_Research_work\\TOPNET PROJECT\\MODEL COMPARISON\\A1_watershed_final_DAYMET\\matlab_cali")
system(paste("topnet_modified"))
ff=scan("FlowAtStreamNodes_cms.txt", what="")
#l=basin_number+2
l=29+2
ff1=ff[seq(l,length(ff),1)] ## need to change theis things later
simu_flow=matrix(as.numeric(ff1[seq(2,length(ff1),l-1)])) ##need to change this later
sf=scan("streamflow_calibration.dat", what="")
sf1=sf[seq(21,length(sf),1)] ## need to change theis things later
obs_flow=(as.numeric(sf1[seq(1,length(sf1),3)])) ##need to change this later
time_all= seq(as.Date("1980/1/1"), as.Date("2012/12/31"), "day") #create time series
time_cali= seq(as.Date("2008/1/1"), as.Date("2012/12/31"), "day") # create time series based on start and end date
date_overlap=match(time_cali,time_all,) # get overlap time interval
observd_flow=matrix(obs_flow[date_overlap])
observd_flow[observd_flow<0] <- NA
calibrated_flow=data.frame(simu=simu_flow[732:1827,],observ=observd_flow[732:1827,]) ##take only from 2010/01/01---2012/12/31
time= seq(as.Date("2010/1/1"), as.Date("2012/12/31"), "day") # create time series based on start and end date
plot(time,calibrated_flow$observ, type="o", col="blue",,xlab=" Time days",ylab="stream flow (m3/s)")
lines(time,calibrated_flow$simu, type="o", pch=22, lty=2, col="red")
legend(14650, max(calibrated_flow$observ), c("observed flow","simulated flow"), cex=0.8, col=c("blue","red"), pch=21:22, lty=1:2);
CE=NSE(calibrated_flow$simu,calibrated_flow$observ)
Bias=sum(calibrated_flow$simu)/sum(calibrated_flow$observ) ##PBIAS=100*[sum(sim-obs)/sum(obs)]
Mean_error=rmse(calibrated_flow$simu,calibrated_flow$observ, na.rm=TRUE)
pr=scan("Precipitation_mm.txt", what="")
#l=basin_number+2
l=29+2#for A2
pr1=pr[seq(l,length(pr),1)] ## need to change theis things later
simu_rain=matrix(as.numeric(pr1[seq(2,length(pr1),l-1)])) ##need to change this later
#lines(simu_rain, type="o", col="red",ylim=rev(range(simu_rain)))
#plot(calibrated_flow$simu, type="o", pch=22, lty=2, col="red",ylim=(range(calibrated_flow$simu)))
cs=scan("Canopy_storage_mm.txt", what="") ##canopy storgae
#l=basin_number+2
l=29+2#for A2
cs1=cs[seq(l,length(cs),1)] ## need to change theis things later
simu_canopy=matrix(as.numeric(cs1[seq(2,length(cs1),l-1)])) ##need to change this later
dw=scan("Depth_to_Water_mm.txt", what="") ##depth to water
#l=basin_number+2
l=29+2#for A2
dw1=dw[seq(l,length(dw),1)] ## need to change theis things later
simu_dw=matrix(as.numeric(dw1[seq(2,length(dw1),l-1)])) ##need to change this later
ss=scan("Soil_storage_mm.txt", what="") ##depth to water
#l=basin_number+2
l=29+2#for A2
ss1=ss[seq(l,length(ss),1)] ## need to change theis things later
simu_ss=matrix(as.numeric(ss1[seq(2,length(ss1),l-1)])) ##need to change this later
ep=scan("Evaporation_mm.txt", what="") ##depth to water
#l=basin_number+2
l=29+2#for A2
ep1=ep[seq(l,length(ep),1)] ## need to change theis things later
simu_ep=matrix(as.numeric(ep1[seq(2,length(ep1),l-1)])) ##need to change this later
pe=scan("Potential_evapotranspiration_mm.txt", what="") ##depth to water
#l=basin_number+2
l=29+2#for A2
pe1=pe[seq(l,length(pe),1)] ## need to change theis things later
simu_pe=matrix(as.numeric(pe1[seq(2,length(pe1),l-1)])) ##need to change this later
tav=scan("TemperatureAve_C.txt", what="") ##depth to water
#l=basin_number+2
l=29+2#for A2
tav1=tav[seq(l,length(tav),1)] ## need to change theis things later
simu_tav=matrix(as.numeric(tav1[seq(2,length(tav1),l-1)])) ##need to change this later
df=matrix(simu_tav[732:1091],nrow=30,ncol=12)
plot(df)
par(mfrow=c(2,2))
plot(time[366:731],cumsum(simu_rain[1097:1462]))
lines(time[366:731],cumsum(simu_ep[1097:1462]),col='red')
wff=cumsum(simu_ep[732:1097])
plot(time[1:366],cumsum(simu_rain[732:1097]))
lines(time[1:366],cumsum(0.2*simu_flow[732:1097]),col='red')
lines(time[1:366],cumsum(0.2*observd_flow[732:1097]),col='blue')
plot(time[366:731],cumsum(simu_rain[1097:1462]))
lines(time[366:731],cumsum(0.2*simu_flow[1097:1462]),col='red')
lines(time[366:731],cumsum(0.2*observd_flow[1097:1462]),col='blue')
plot(simu_flow[732:1097]-observd_flow[732:1097])
plot(time[1:366],cumsum(simu_pe[732:1097]))
lines(time[1:366],cumsum(simu_rain[732:1097]),col='green')
plot(time[1:366],simu_tav[732:1097])
lines(time[1:366],cumsum(simu_rain[732:1097]),col='green')
df=simu_flow[732:1097]-observd_flow[732:1097]
df[df<0]=0
plot(time[1:366],cumsum(0.2*observd_flow[732:1097]),col='blue')
lines(time[1:366],cumsum(0.2*simu_flow[732:1097]),col='black')
lines(simu_flow[732:1097])
plot(observd_flow[732:1097])
plot(time[1:366],df,col='red')
lines(cumsum(his_rain_sim))
lines(cumsum(qmr1),col='red')
lines(cumsum(his_corr),col='green')
par(mar=c(2, 4, 0, 6)+0.25)
plot(time, calibrated_flow$simu, axes=T, ylim=c(0,50), xlab="", ylab="",type="l",lty=1,col="red", main="")
points(time, calibrated_flow$simu,pch=20,col="red")
axis(2, ylim=c(0,30),col="red",lwd=2)
mtext(2,text="Stream flow(m3/s)",line=2)
par(new=T)
plot(time, calibrated_flow$observ, axes=T, ylim=c(0,50), xlab="", ylab="",type="l",lty=2,col="blue", main="")
points(time, calibrated_flow$observ,pch=20,col="blue")
axis(2, ylim=c(0,600),col="blue",lwd=2)
#mtext(2,text="NSE",line=2)
par(new=T)
plot(time, simu_rain[732:1827,], axes=F, ylim=rev(c(0,100)), col="darkgrey",xlab="", ylab="", type="l",lty=3, main="",lwd=2)
axis(4, ylim=rev(range(simu_rain[732:1827,])),col="darkgrey",lwd=1,line=1.5)
#points(time, simu_rain[732:1827,],pch=20)
mtext(4,text="Rainfall (mm/day)",line=-.1)
#axis(1,pretty(range(time),4))
mtext(" tr factor ",side=1,col="black",line=2) ## change title
legend(15270,40,legend=c("Simulate flow","Observed flow","Precipitation "),lty=c(1,2,3),col=c("red","blue","darkgrey")) ## change legend location
###overall water balance######
sim_cum=cumsum(calibrated_flow$simu)
obs_cum=cumsum(calibrated_flow$observ)
sim_precp=cumsum(simu_rain[732:1827,])
plot(time,sim_precp, type="o", col="red",ylim=range(sim_precp),lty=1,xlab="time(days)",ylab="cumulative precipitation(mm/day)")
plot(time,obs_cum, type="o", col="blue",ylim=range(obs_cum),lty=1,xlab="time(days)",ylab="cumulative Streamflow(m3/s)")
# Graph trucks with red dashed line and square points
lines(time,sim_cum, type="o", pch=22, lty=2, col="red")
legend(15270,400,legend=c("Observed flow","Simulatedflow"),lty=c(1,2),col=c("blue","red"))
|
/TOPNET_PROCESS_CODE/TOPNET_AUTOMATION/TOPNET_RUN_R_CODE/Climate_change_RUN.R
|
no_license
|
nazmussazib/TOPNET_PROJECT
|
R
| false | false | 14,539 |
r
|
##Run model
library(hydroGOF)
library(zoo)
library(plyr)
library(nsga2R)
require(hydroTSM)
##set working directory######
setwd("E:\\USU_Research_work\\TOPNET PROJECT\\MODEL COMPARISON\\A1_watershed_final\\A1_TOPNETRUN_CC") #for A1
##RUN MODEL FOR EACH RCP AND SAVED IT DIFFERENT DIRECTORY
##RCP26_65, RCP26_99 etc
system(paste("topnet_modified"))
####Do analysis only for the streamlow and runoff ########
##Baseline period analysis
bn=29
ff=scan("FlowAtStreamNodes_cms.txt", what="")
#l=basin_number+2
l=bn+2
ff1=ff[seq(l,length(ff),1)] ## need to change theis things later
simu_flow=matrix(as.numeric(ff1[seq(2,length(ff1),l-1)])) ##need to change this later
sf=scan("A1_calibratedflow.txt", what="")
sf1=sf[seq(l,length(sf),1)] ## need to change theis things later
obs=matrix(as.numeric(sf1[seq(2,length(sf1),l-1)])) ##need to change this later
date_base=seq(as.Date("2003/1/1"), as.Date("2012/12/31"), "day")
date_proj=seq(as.Date("2056/1/1"), as.Date("2065/12/31"), "day")
date_proj=seq(as.Date("2090/1/1"), as.Date("2099/12/31"), "day")
sf_base<- data.frame(monthlyfunction(obs, FUN=mean, na.rm=TRUE,dates=date_base))
sf_base1<- data.frame(monthlyfunction(simu_flow, FUN=mean, na.rm=TRUE,dates=date_proj))
par(mfrow=c(2,2))
plot(date_base,obs)
plot(date_proj,simu_flow,col='red',ylim=c(0,50))
plot(seq(1,12,1),sf_base,ylim=c(0,12))
lines(seq(1,12,1),sf_base1)
plot(seq(1,12,1),(mf_base-mf_base1)*100/mf_base)
###projected runoff change
dir=paste("E:\\USU_Research_work\\TOPNET PROJECT\\MODEL COMPARISON\\A1_watershed_final\\A1_results\\Climate change analysis\\",folder[i],sep="")
setwd("E:\\USU_Research_work\\TOPNET PROJECT\\MODEL COMPARISON\\A1_watershed_final\\A1_CC_data\\CanESM2")
stream_file=list.files(path ="E:\\USU_Research_work\\TOPNET PROJECT\\MODEL COMPARISON\\A1_watershed_final\\A1_CC_data\\MRI-CGCM3",pattern ="*.5.txt")
mf_proj=data.frame(matrix(NA,nrow=1,ncol=12))
for ( i in 1:length(stream_file)){
ff=scan(stream_file[i], what="")
#l=basin_number+2
l=bn+2
ff1=ff[seq(l,length(ff),1)] ## need to change theis things later
simu_flow=matrix(as.numeric(ff1[seq(2,length(ff1),l-1)])) ##need to change this later
d1=strtoi(unlist(strsplit(stream_file[i],NULL))[23])
if(d1==5)date_proj=seq(as.Date("2056/1/1"), as.Date("2065/12/31"), "day")else
date_proj=seq(as.Date("2090/1/1"), as.Date("2099/12/31"), "day")
mf_proj[i,]<- data.frame(monthlyfunction(simu_flow, FUN=mean, na.rm=TRUE,dates=date_proj))
}
###########change in stream flow regime variables############
par(mfrow=c(2,1))
a=seq(1,12,1)
plot(a,sf_base,ylim=c(0,8),main="year 56-65")
lines(a,mf_proj[1,],col='red')
lines(a,mf_proj[2,],col='blue')
legend("topright", c('base','rcp4.5','rcp8.5'), cex=0.8, col=c("black","red","blue"), pch=21:22, lty=1:2)
plot(a,sf_base,ylim=c(0,12),main='year 90-99')
lines(a,mf_proj[3,],col='grey')
lines(a,mf_proj[4,],col='green')
legend("topright", c('base','rcp4.5','rcp8.5'), cex=0.8, col=c("black","grey","green"), pch=21:22, lty=1:2)
plot(a,mf_base)
lines(a,mf_proj[2,],col='red')
lines(a,mf_proj[4,],col='blue')
lines(a,mf_proj[6,],col='grey')
lines(a,mf_proj[8,],col='green')
legend("topleft", c('base','rcp2.6','rcp4.5','rcp6.0','rcp8.5'), cex=0.8, col=c("black","red",'blue','grey','green'), pch=21:22, lty=1:2)
pr=scan("Precipitation_mm.txt", what="")
#l=basin_number+2
l=131+2#for A2
pr1=pr[seq(l,length(pr),1)] ## need to change theis things later
simu_rain=matrix(as.numeric(pr1[seq(2,length(pr1),l-1)])) ##need to change this later
plot(cumsum(simu_rain))
ev=scan("Evaporation_mm.txt", what="")
#l=basin_number+2
l=113+2#for A2
ev1=ev[seq(l,length(ev),1)] ## need to change theis things later
simu_eva=matrix(as.numeric(ev1[seq(2,length(ev1),l-1)])) ##need to change this later
plot(cumsum(simu_eva))
lines(cumsum(simu_rain))
time_fore= seq(as.Date("2056/1/1"), as.Date("2065/12/31"), "day") # create time series based on start and end date
plot(time_fore,simu_flow, type="o", col="blue",,xlab=" Time(days)",ylab="stream flow (m3/s)",cex.lab=1.5,cex.axis=1.5,cex.main=1.5,cex.sub=1.5)
lines(time_c,cv_flow$simu, type="o", pch=22, lty=2, col="red")
legend('topright', max(calibrated_flow$observ), c("observed flow","simulated flow"), bty="n",cex=1.2, col=c("blue","red"), pch=21:22, lty=1:2,pt.cex=1.2);
CE_c=NSE(cv_flow$simu[1:1096],cv_flow$observ[1:1096])
Bias_c=sum(cv_flow$simu[1:1096])/sum(cv_flow$observ[1:1096]) ##PBIAS=100*[sum(sim-obs)/sum(obs)]
Mean_error_c=rmse(cv_flow$simu[1:1096],cv_flow$observ[1:1096], na.rm=TRUE)
text(14250, 5, paste("Bias=",format(Bias_c,digit=2)),cex = 1.25)
text(14250, 4.6, paste("NSE=",format(CE_c,digit=2)),cex = 1.25)
text(14250, 4.2, paste("MSE=",format(Mean_error_c,digit=2)),cex =1.25)
##compare runoff with base line period 2003-2012############
###cumulative rainfall and run off ####
pr=scan("Precipitation_mm.txt", what="")
#l=basin_number+2
l=29+2#for A2
pr1=pr[seq(l,length(pr),1)] ## need to change theis things later
simu_rain=matrix(as.numeric(pr1[seq(2,length(pr1),l-1)])) ##need to change this later
plot(time_c,cumsum(0.2*cv_flow$observ[1:1096]),col='blue',ylab="cumulative flow (mm)")
lines(time_c,cumsum(0.2*cv_flow$simu[1:1096]),col='red')
plot(time_c,cumsum(simu_rain[732:1827]),ylab="cumulative rainfall/flow (mm)")
lines(time_c,cumsum(0.2*cv_flow$observ[1:1096]),col='blue',ylab="cumulative flow (mm)")
lines(time_c,cumsum(0.2*cv_flow$simu[1:1096]),col='red')
legend(14650, 800, c("rainfall","observed flow","simulated flow"), cex=0.8, col=c("black","blue","red"), pch=21, lty=1);
##validation plot###
plot(time_cv,cumsum(0.2*cv_flow$observ),col='blue',ylab="cumulative flow (mm)")
lines(time_cv,cumsum(0.2*cv_flow$simu),col='red')
pr=scan("Precipitation_mm.txt", what="")
#l=basin_number+2
l=29+2#for A2
pr1=pr[seq(l,length(pr),1)] ## need to change theis things later
simu_rain=matrix(as.numeric(pr1[seq(2,length(pr1),l-1)])) ##need to change this later
plot(time_cv,cumsum(simu_rain),ylab="cumulative rainfall/flow (mm)")
lines(time_cv,cumsum(0.2*cv_flow$observ),col='blue',ylab="cumulative flow (mm)")
lines(time_cv,cumsum(0.2*cv_flow$simu),col='red')
legend(14650, 800, c("rainfall","observed flow","simulated flow"), cex=0.8, col=c("black","blue","red"), pch=21, lty=1);
#lines(simu_rain, type="o", col="red",ylim=rev(range(simu_rain)))
#plot(calibrated_flow$simu, type="o", pch=22, lty=2, col="red",ylim=(range(calibrated_flow$simu)))
par(mar=c(2, 4, 0, 6)+0.25)
plot(time, calibrated_flow$simu, axes=T, ylim=c(0,50), xlab="", ylab="",type="l",lty=1,col="red", main="")
points(time, calibrated_flow$simu,pch=20,col="red")
axis(2, ylim=c(0,30),col="red",lwd=2)
mtext(2,text="Stream flow(m3/s)",line=2)
par(new=T)
plot(time, calibrated_flow$observ, axes=T, ylim=c(0,50), xlab="", ylab="",type="l",lty=2,col="blue", main="")
points(time, calibrated_flow$observ,pch=20,col="blue")
axis(2, ylim=c(0,600),col="blue",lwd=2)
#mtext(2,text="NSE",line=2)
par(new=T)
plot(time, simu_rain[732:1827,], axes=F, ylim=rev(c(0,100)), col="darkgrey",xlab="", ylab="", type="l",lty=3, main="",lwd=2)
axis(4, ylim=rev(range(simu_rain[732:1827,])),col="darkgrey",lwd=1,line=1.5)
#points(time, simu_rain[732:1827,],pch=20)
mtext(4,text="Rainfall (mm/day)",line=-.1)
#axis(1,pretty(range(time),4))
mtext(" tr factor ",side=1,col="black",line=2) ## change title
legend(15270,40,legend=c("Simulate flow","Observed flow","Precipitation "),lty=c(1,2,3),col=c("red","blue","darkgrey")) ## change legend location
###overall water balance######
sim_cum=cumsum(calibrated_flow$simu)
obs_cum=cumsum(calibrated_flow$observ)
sim_precp=cumsum(simu_rain[732:1827,])
plot(time,sim_precp, type="o", col="red",ylim=range(sim_precp),lty=1,xlab="time(days)",ylab="cumulative precipitation(mm/day)")
plot(time,obs_cum, type="o", col="blue",ylim=range(obs_cum),lty=1,xlab="time(days)",ylab="cumulative Streamflow(m3/s)")
# Graph trucks with red dashed line and square points
lines(time,sim_cum, type="o", pch=22, lty=2, col="red")
legend(15270,400,legend=c("Observed flow","Simulatedflow"),lty=c(1,2),col=c("blue","red"))
########cheking whether water is going###########
setwd("E:\\USU_Research_work\\TOPNET PROJECT\\MODEL COMPARISON\\A1_watershed_final_DAYMET\\matlab_cali")
system(paste("topnet_modified"))
ff=scan("FlowAtStreamNodes_cms.txt", what="")
#l=basin_number+2
l=29+2
ff1=ff[seq(l,length(ff),1)] ## need to change theis things later
simu_flow=matrix(as.numeric(ff1[seq(2,length(ff1),l-1)])) ##need to change this later
sf=scan("streamflow_calibration.dat", what="")
sf1=sf[seq(21,length(sf),1)] ## need to change theis things later
obs_flow=(as.numeric(sf1[seq(1,length(sf1),3)])) ##need to change this later
time_all= seq(as.Date("1980/1/1"), as.Date("2012/12/31"), "day") #create time series
time_cali= seq(as.Date("2008/1/1"), as.Date("2012/12/31"), "day") # create time series based on start and end date
date_overlap=match(time_cali,time_all,) # get overlap time interval
observd_flow=matrix(obs_flow[date_overlap])
observd_flow[observd_flow<0] <- NA
calibrated_flow=data.frame(simu=simu_flow[732:1827,],observ=observd_flow[732:1827,]) ##take only from 2010/01/01---2012/12/31
time= seq(as.Date("2010/1/1"), as.Date("2012/12/31"), "day") # create time series based on start and end date
plot(time,calibrated_flow$observ, type="o", col="blue",,xlab=" Time days",ylab="stream flow (m3/s)")
lines(time,calibrated_flow$simu, type="o", pch=22, lty=2, col="red")
legend(14650, max(calibrated_flow$observ), c("observed flow","simulated flow"), cex=0.8, col=c("blue","red"), pch=21:22, lty=1:2);
CE=NSE(calibrated_flow$simu,calibrated_flow$observ)
Bias=sum(calibrated_flow$simu)/sum(calibrated_flow$observ) ##PBIAS=100*[sum(sim-obs)/sum(obs)]
Mean_error=rmse(calibrated_flow$simu,calibrated_flow$observ, na.rm=TRUE)
pr=scan("Precipitation_mm.txt", what="")
#l=basin_number+2
l=29+2#for A2
pr1=pr[seq(l,length(pr),1)] ## need to change theis things later
simu_rain=matrix(as.numeric(pr1[seq(2,length(pr1),l-1)])) ##need to change this later
#lines(simu_rain, type="o", col="red",ylim=rev(range(simu_rain)))
#plot(calibrated_flow$simu, type="o", pch=22, lty=2, col="red",ylim=(range(calibrated_flow$simu)))
cs=scan("Canopy_storage_mm.txt", what="") ##canopy storgae
#l=basin_number+2
l=29+2#for A2
cs1=cs[seq(l,length(cs),1)] ## need to change theis things later
simu_canopy=matrix(as.numeric(cs1[seq(2,length(cs1),l-1)])) ##need to change this later
dw=scan("Depth_to_Water_mm.txt", what="") ##depth to water
#l=basin_number+2
l=29+2#for A2
dw1=dw[seq(l,length(dw),1)] ## need to change theis things later
simu_dw=matrix(as.numeric(dw1[seq(2,length(dw1),l-1)])) ##need to change this later
ss=scan("Soil_storage_mm.txt", what="") ##depth to water
#l=basin_number+2
l=29+2#for A2
ss1=ss[seq(l,length(ss),1)] ## need to change theis things later
simu_ss=matrix(as.numeric(ss1[seq(2,length(ss1),l-1)])) ##need to change this later
ep=scan("Evaporation_mm.txt", what="") ##depth to water
#l=basin_number+2
l=29+2#for A2
ep1=ep[seq(l,length(ep),1)] ## need to change theis things later
simu_ep=matrix(as.numeric(ep1[seq(2,length(ep1),l-1)])) ##need to change this later
pe=scan("Potential_evapotranspiration_mm.txt", what="") ##depth to water
#l=basin_number+2
l=29+2#for A2
pe1=pe[seq(l,length(pe),1)] ## need to change theis things later
simu_pe=matrix(as.numeric(pe1[seq(2,length(pe1),l-1)])) ##need to change this later
tav=scan("TemperatureAve_C.txt", what="") ##depth to water
#l=basin_number+2
l=29+2#for A2
tav1=tav[seq(l,length(tav),1)] ## need to change theis things later
simu_tav=matrix(as.numeric(tav1[seq(2,length(tav1),l-1)])) ##need to change this later
df=matrix(simu_tav[732:1091],nrow=30,ncol=12)
plot(df)
par(mfrow=c(2,2))
plot(time[366:731],cumsum(simu_rain[1097:1462]))
lines(time[366:731],cumsum(simu_ep[1097:1462]),col='red')
wff=cumsum(simu_ep[732:1097])
plot(time[1:366],cumsum(simu_rain[732:1097]))
lines(time[1:366],cumsum(0.2*simu_flow[732:1097]),col='red')
lines(time[1:366],cumsum(0.2*observd_flow[732:1097]),col='blue')
plot(time[366:731],cumsum(simu_rain[1097:1462]))
lines(time[366:731],cumsum(0.2*simu_flow[1097:1462]),col='red')
lines(time[366:731],cumsum(0.2*observd_flow[1097:1462]),col='blue')
plot(simu_flow[732:1097]-observd_flow[732:1097])
plot(time[1:366],cumsum(simu_pe[732:1097]))
lines(time[1:366],cumsum(simu_rain[732:1097]),col='green')
plot(time[1:366],simu_tav[732:1097])
lines(time[1:366],cumsum(simu_rain[732:1097]),col='green')
df=simu_flow[732:1097]-observd_flow[732:1097]
df[df<0]=0
plot(time[1:366],cumsum(0.2*observd_flow[732:1097]),col='blue')
lines(time[1:366],cumsum(0.2*simu_flow[732:1097]),col='black')
lines(simu_flow[732:1097])
plot(observd_flow[732:1097])
plot(time[1:366],df,col='red')
lines(cumsum(his_rain_sim))
lines(cumsum(qmr1),col='red')
lines(cumsum(his_corr),col='green')
par(mar=c(2, 4, 0, 6)+0.25)
plot(time, calibrated_flow$simu, axes=T, ylim=c(0,50), xlab="", ylab="",type="l",lty=1,col="red", main="")
points(time, calibrated_flow$simu,pch=20,col="red")
axis(2, ylim=c(0,30),col="red",lwd=2)
mtext(2,text="Stream flow(m3/s)",line=2)
par(new=T)
plot(time, calibrated_flow$observ, axes=T, ylim=c(0,50), xlab="", ylab="",type="l",lty=2,col="blue", main="")
points(time, calibrated_flow$observ,pch=20,col="blue")
axis(2, ylim=c(0,600),col="blue",lwd=2)
#mtext(2,text="NSE",line=2)
par(new=T)
plot(time, simu_rain[732:1827,], axes=F, ylim=rev(c(0,100)), col="darkgrey",xlab="", ylab="", type="l",lty=3, main="",lwd=2)
axis(4, ylim=rev(range(simu_rain[732:1827,])),col="darkgrey",lwd=1,line=1.5)
#points(time, simu_rain[732:1827,],pch=20)
mtext(4,text="Rainfall (mm/day)",line=-.1)
#axis(1,pretty(range(time),4))
mtext(" tr factor ",side=1,col="black",line=2) ## change title
legend(15270,40,legend=c("Simulate flow","Observed flow","Precipitation "),lty=c(1,2,3),col=c("red","blue","darkgrey")) ## change legend location
###overall water balance######
sim_cum=cumsum(calibrated_flow$simu)
obs_cum=cumsum(calibrated_flow$observ)
sim_precp=cumsum(simu_rain[732:1827,])
plot(time,sim_precp, type="o", col="red",ylim=range(sim_precp),lty=1,xlab="time(days)",ylab="cumulative precipitation(mm/day)")
plot(time,obs_cum, type="o", col="blue",ylim=range(obs_cum),lty=1,xlab="time(days)",ylab="cumulative Streamflow(m3/s)")
# Graph trucks with red dashed line and square points
lines(time,sim_cum, type="o", pch=22, lty=2, col="red")
legend(15270,400,legend=c("Observed flow","Simulatedflow"),lty=c(1,2),col=c("blue","red"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EPIBLASTER.R
\name{getcor}
\alias{getcor}
\title{Get correlation matrix}
\usage{
getcor(A = NULL, B = NULL, method = "pearson", ...)
}
\arguments{
\item{A}{is a matrix or data.frame.}
\item{B}{is a matrix or data.frame.}
\item{method}{a character string indicating which correlation coefficient is to be computed.
Current version only supports "pearson" correlation.}
\item{...}{not used.}
}
\value{
correlation matrix
}
\description{
Fast calculation of correlation matrix on CPU
(the idea is from \pkg{WGCNA} fast function for pearson correlations)
}
\examples{
set.seed(123)
A <- matrix(rnorm(100, mean = 5, sd = 10), ncol = 10)
B <- matrix(rnorm(200, mean = 10, sd = 100), ncol = 20)
C <- getcor(A, B)
}
\author{
Beibei Jiang \email{beibei_jiang@psych.mpg.de}
}
|
/man/getcor.Rd
|
no_license
|
cran/episcan
|
R
| false | true | 847 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EPIBLASTER.R
\name{getcor}
\alias{getcor}
\title{Get correlation matrix}
\usage{
getcor(A = NULL, B = NULL, method = "pearson", ...)
}
\arguments{
\item{A}{is a matrix or data.frame.}
\item{B}{is a matrix or data.frame.}
\item{method}{a character string indicating which correlation coefficient is to be computed.
Current version only supports "pearson" correlation.}
\item{...}{not used.}
}
\value{
correlation matrix
}
\description{
Fast calculation of correlation matrix on CPU
(the idea is from \pkg{WGCNA} fast function for pearson correlations)
}
\examples{
set.seed(123)
A <- matrix(rnorm(100, mean = 5, sd = 10), ncol = 10)
B <- matrix(rnorm(200, mean = 10, sd = 100), ncol = 20)
C <- getcor(A, B)
}
\author{
Beibei Jiang \email{beibei_jiang@psych.mpg.de}
}
|
#Load libraries
library(colorspace)
library (ggplot2)
library (dplyr)
library (tidyr)
library (reshape2)
library(readxl)
library(kableExtra)
library(lubridate)
library(plotly)
library(hms)
###Area Chart of Report Use for All Sessions over the Year
#Read .xlsx
data <- read_excel("/Users/file.xlsx", sheet = "sheet1")
dim (data)
##Calculating Total Duration by School Week and Report Type
###This version uses manual changes to the excel file to include values for weeks and report type combinations that are null)
###To Do: Incorporate complete() function to fill in missing week/report type combinations
###filter out actions where reportType = blank or Cognos Bug,
dataSess <- data %>%
filter(reportType != "", reportType != "Cognos Bug") %>%
select(schoolWeek, reportType, actionHrs) %>%
group_by(schoolWeek, reportType) %>%
summarize(totReportWeek = sum(actionHrs))
dim (dataSess)
#Convert week from factor to num
dataSess$schoolWeek <- as.numeric(as.character(dataSess$schoolWeek))
#Create area plot
##Colorblind palette values (for some kinds of colorblind)
cbPalette <- c("#000000", "#E69F00", "#56B4E9", "#009E73",
"#F0E442", "#0072B2", "#D55E00", "#CC79A7")
p <- ggplot(dataSess, aes(x=schoolWeek, y=totReportWeek, fill=reportType)) +
geom_area(position = "stack", stat = "identity") +
scale_fill_manual(values=cbPalette) +
theme_classic() +
theme(legend.position="top", legend.title = element_text(size=14),
legend.text = element_text(size = 12),
axis.text.x = element_text(size = 12 , angle = 60, hjust = 1))
p + labs(title = "Weekly Usage by Report Type",
subtitle = "N = TK Sessions",
x = "Week of the School Year",
y = "Hours of Use",
caption = "TBA",
fill = "Report Category") +
#Add manual scale, breaks and month names
scale_x_continuous(limits=c(1,52), breaks =
c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52),
labels = c("8/1/2016", "8/8/2016","8/15/2016","8/22/2016","8/29/2016",
"9/5/2016","9/12/2016","9/19/2016","9/26/2016",
"10/3/2016","10/10/2016","10/17/2016","10/24/2016","10/31/2016",
"11/7/2016","11/14/2016","11/21/2016","11/28/2016",
"12/5/2016","12/12/2016","12/19/2016","12/26/2016",
"1/2/2017","1/9/2017","1/16/2017","1/23/2017","1/30/2017",
"2/6/2017","2/13/2017","2/20/2017","2/27/2017",
"3/6/2017","3/13/2017","3/20/2017","3/27/2017",
"4/3/2017","4/10/2017","4/17/2017","4/24/2017",
"5/1/2017","5/8/2017","5/15/2017","5/22/2017","5/29/2017",
"6/5/2017","6/12/2017","6/19/2017","6/26/2017",
"7/3/2017","7/10/2017","7/17/2017","7/24/2017")) +
##Add verical lines and labels for important calendar events
geom_vline(xintercept=1, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=1, label="State Test Results Released (Middle School)", y=115), colour="black", angle=90, vjust = 1.2, text=element_text(size= 7)) +
geom_vline(xintercept=3, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=3, label="High School State Testing", y=115), colour="black", angle=90, vjust = 1.2, text=element_text(size= 7)) +
geom_vline(xintercept=5, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=5, label="State Test Results Released (High School)", y=115), colour="black", angle=90, vjust = 1.2, text=element_text(size=7)) +
geom_vline(xintercept=6, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=6, label="First Day of School", y=115), colour="black", angle=90, vjust = 1.2, text=element_text(size=7)) +
geom_vline(xintercept=16, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=16, label="Re-Rostered IDW Reports Released", y=115), colour="black", angle=90, vjust = 1.2, text=element_text(size=7)) +
geom_vline(xintercept=22, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=22, label="Holiday", y=115), colour="black", angle=90, vjust = 1.2, text=element_text(size=7)) +
geom_vline(xintercept=26, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=26, label="High School State Testing", y=115), colour="black", angle=90, vjust = 1.2, text=element_text(size=7)) +
geom_vline(xintercept=29, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=29, label="Training on College Reports", y=115), colour="black", angle=90, vjust = 1.2, text=element_text(size=7)) +
geom_vline(xintercept=30, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=30, label="Holiday", y=115), colour="black", angle=90, vjust = 1.2, text=element_text(size=7)) +
geom_vline(xintercept=35, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=35, label="ELA State Test", y=115), colour="black", angle=90, vjust = 1.2, text=element_text(size=7)) +
geom_vline(xintercept=37, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=37, label="Holiday", y=115), colour="black", angle=90, vjust = 1.2, text=element_text(size=7)) +
geom_vline(xintercept=40, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=40, label="Math State Test", y=115), colour="black", angle=90, vjust = 1.2, text=element_text(size=7)) +
geom_vline(xintercept=40, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=40, label="Math State Test", y=115), colour="black", angle=90, vjust = 1.2, text=element_text(size=7)) +
geom_vline(xintercept=44, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=44, label="Preliminary State Test Results Released (ELA 3rd-8th)", y=110), colour="black", angle=90, vjust = 1.2, text=element_text(size=7)) +
geom_vline(xintercept=46, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=46, label="High School State Testing / State Test Preliminary Results Released (Math 3rd-8th)", y=96), colour="black", angle=90, vjust = 1.2, text=element_text(size=7)) +
geom_vline(xintercept=47, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=47, label="Last Day of School", y=115), colour="black", angle=90, vjust = 1.2, text=element_text(size=7))
p
##Links/sources
##FOR CREATING FILE WITH MISSING DATES TO ZERO
##https://blog.exploratory.io/populating-missing-dates-with-complete-and-fill-functions-in-r-and-#exploratory-79f2a321e6b5
|
/areaMapTimelineCode.R
|
no_license
|
hawna/Visualizations
|
R
| false | false | 6,913 |
r
|
#Load libraries
library(colorspace)
library (ggplot2)
library (dplyr)
library (tidyr)
library (reshape2)
library(readxl)
library(kableExtra)
library(lubridate)
library(plotly)
library(hms)
###Area Chart of Report Use for All Sessions over the Year
#Read .xlsx
data <- read_excel("/Users/file.xlsx", sheet = "sheet1")
dim (data)
##Calculating Total Duration by School Week and Report Type
###This version uses manual changes to the excel file to include values for weeks and report type combinations that are null)
###To Do: Incorporate complete() function to fill in missing week/report type combinations
###filter out actions where reportType = blank or Cognos Bug,
dataSess <- data %>%
filter(reportType != "", reportType != "Cognos Bug") %>%
select(schoolWeek, reportType, actionHrs) %>%
group_by(schoolWeek, reportType) %>%
summarize(totReportWeek = sum(actionHrs))
dim (dataSess)
#Convert week from factor to num
dataSess$schoolWeek <- as.numeric(as.character(dataSess$schoolWeek))
#Create area plot
##Colorblind palette values (for some kinds of colorblind)
cbPalette <- c("#000000", "#E69F00", "#56B4E9", "#009E73",
"#F0E442", "#0072B2", "#D55E00", "#CC79A7")
p <- ggplot(dataSess, aes(x=schoolWeek, y=totReportWeek, fill=reportType)) +
geom_area(position = "stack", stat = "identity") +
scale_fill_manual(values=cbPalette) +
theme_classic() +
theme(legend.position="top", legend.title = element_text(size=14),
legend.text = element_text(size = 12),
axis.text.x = element_text(size = 12 , angle = 60, hjust = 1))
p + labs(title = "Weekly Usage by Report Type",
subtitle = "N = TK Sessions",
x = "Week of the School Year",
y = "Hours of Use",
caption = "TBA",
fill = "Report Category") +
#Add manual scale, breaks and month names
scale_x_continuous(limits=c(1,52), breaks =
c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52),
labels = c("8/1/2016", "8/8/2016","8/15/2016","8/22/2016","8/29/2016",
"9/5/2016","9/12/2016","9/19/2016","9/26/2016",
"10/3/2016","10/10/2016","10/17/2016","10/24/2016","10/31/2016",
"11/7/2016","11/14/2016","11/21/2016","11/28/2016",
"12/5/2016","12/12/2016","12/19/2016","12/26/2016",
"1/2/2017","1/9/2017","1/16/2017","1/23/2017","1/30/2017",
"2/6/2017","2/13/2017","2/20/2017","2/27/2017",
"3/6/2017","3/13/2017","3/20/2017","3/27/2017",
"4/3/2017","4/10/2017","4/17/2017","4/24/2017",
"5/1/2017","5/8/2017","5/15/2017","5/22/2017","5/29/2017",
"6/5/2017","6/12/2017","6/19/2017","6/26/2017",
"7/3/2017","7/10/2017","7/17/2017","7/24/2017")) +
##Add verical lines and labels for important calendar events
geom_vline(xintercept=1, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=1, label="State Test Results Released (Middle School)", y=115), colour="black", angle=90, vjust = 1.2, text=element_text(size= 7)) +
geom_vline(xintercept=3, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=3, label="High School State Testing", y=115), colour="black", angle=90, vjust = 1.2, text=element_text(size= 7)) +
geom_vline(xintercept=5, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=5, label="State Test Results Released (High School)", y=115), colour="black", angle=90, vjust = 1.2, text=element_text(size=7)) +
geom_vline(xintercept=6, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=6, label="First Day of School", y=115), colour="black", angle=90, vjust = 1.2, text=element_text(size=7)) +
geom_vline(xintercept=16, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=16, label="Re-Rostered IDW Reports Released", y=115), colour="black", angle=90, vjust = 1.2, text=element_text(size=7)) +
geom_vline(xintercept=22, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=22, label="Holiday", y=115), colour="black", angle=90, vjust = 1.2, text=element_text(size=7)) +
geom_vline(xintercept=26, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=26, label="High School State Testing", y=115), colour="black", angle=90, vjust = 1.2, text=element_text(size=7)) +
geom_vline(xintercept=29, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=29, label="Training on College Reports", y=115), colour="black", angle=90, vjust = 1.2, text=element_text(size=7)) +
geom_vline(xintercept=30, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=30, label="Holiday", y=115), colour="black", angle=90, vjust = 1.2, text=element_text(size=7)) +
geom_vline(xintercept=35, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=35, label="ELA State Test", y=115), colour="black", angle=90, vjust = 1.2, text=element_text(size=7)) +
geom_vline(xintercept=37, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=37, label="Holiday", y=115), colour="black", angle=90, vjust = 1.2, text=element_text(size=7)) +
geom_vline(xintercept=40, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=40, label="Math State Test", y=115), colour="black", angle=90, vjust = 1.2, text=element_text(size=7)) +
geom_vline(xintercept=40, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=40, label="Math State Test", y=115), colour="black", angle=90, vjust = 1.2, text=element_text(size=7)) +
geom_vline(xintercept=44, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=44, label="Preliminary State Test Results Released (ELA 3rd-8th)", y=110), colour="black", angle=90, vjust = 1.2, text=element_text(size=7)) +
geom_vline(xintercept=46, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=46, label="High School State Testing / State Test Preliminary Results Released (Math 3rd-8th)", y=96), colour="black", angle=90, vjust = 1.2, text=element_text(size=7)) +
geom_vline(xintercept=47, linetype="dotted", color="red", size=1.5) +
geom_text(aes(x=47, label="Last Day of School", y=115), colour="black", angle=90, vjust = 1.2, text=element_text(size=7))
p
##Links/sources
##FOR CREATING FILE WITH MISSING DATES TO ZERO
##https://blog.exploratory.io/populating-missing-dates-with-complete-and-fill-functions-in-r-and-#exploratory-79f2a321e6b5
|
library(party)
n=100
nullVIM40_5f<-matrix(0,nrow=n,ncol=100)
nullVIMAUC40_5f<-matrix(0,nrow=n,ncol=100)
nullVIM40_5r<-matrix(0,nrow=n,ncol=100)
nullVIMAUC40_5r<-matrix(0,nrow=n,ncol=100)
for(i in 1:n){
dataf<-read.table(paste("nullDATA40_5intstrongfullnew.",i, sep=""), header=TRUE)
datar<-read.table(paste("nullDATA40_5intstrongrednew.",i, sep=""), header=TRUE)
control<-cforest_control(mtry=39,ntree=1000,replace=FALSE,fraction=0.632)
RFf<-cforest(y2~.,data=dataf,controls=control)
RFr<-cforest(y3~.,data=datar,controls=control)
nullVIM40_5f[i,]<-varimp(RFf)
nullVIMAUC40_5f[i,]<-varimpAUC(RFf)
nullVIM40_5r[i,]<-varimp(RFr)
nullVIMAUC40_5r[i,]<-varimpAUC(RFr)
}
col<-as.vector(100)
for(j in 1:100){
col[j]=paste("V",j+1, sep="")
}
colnames(nullVIM40_5f)<-col
colnames(nullVIMAUC40_5f)<-col
colnames(nullVIM40_5r)<-col
colnames(nullVIMAUC40_5r)<-col
write.table(nullVIM40_5f, "nullVIM40_5intstrongfullnew", row.names=FALSE, col.names=TRUE,quote=FALSE)
write.table(nullVIMAUC40_5f, "nullVIMAUC40_5intstrongfullnew", row.names=FALSE, col.names=TRUE,quote=FALSE)
write.table(nullVIM40_5r, "nullVIM40_5intstrongrednew", row.names=FALSE, col.names=TRUE,quote=FALSE)
write.table(nullVIMAUC40_5r, "nullVIMAUC40_5intstrongrednew", row.names=FALSE, col.names=TRUE,quote=FALSE)
|
/Laras/VIM40_5strongnull.R
|
no_license
|
iqbalrosiadi/intern_igmm
|
R
| false | false | 1,280 |
r
|
library(party)
n=100
nullVIM40_5f<-matrix(0,nrow=n,ncol=100)
nullVIMAUC40_5f<-matrix(0,nrow=n,ncol=100)
nullVIM40_5r<-matrix(0,nrow=n,ncol=100)
nullVIMAUC40_5r<-matrix(0,nrow=n,ncol=100)
for(i in 1:n){
dataf<-read.table(paste("nullDATA40_5intstrongfullnew.",i, sep=""), header=TRUE)
datar<-read.table(paste("nullDATA40_5intstrongrednew.",i, sep=""), header=TRUE)
control<-cforest_control(mtry=39,ntree=1000,replace=FALSE,fraction=0.632)
RFf<-cforest(y2~.,data=dataf,controls=control)
RFr<-cforest(y3~.,data=datar,controls=control)
nullVIM40_5f[i,]<-varimp(RFf)
nullVIMAUC40_5f[i,]<-varimpAUC(RFf)
nullVIM40_5r[i,]<-varimp(RFr)
nullVIMAUC40_5r[i,]<-varimpAUC(RFr)
}
col<-as.vector(100)
for(j in 1:100){
col[j]=paste("V",j+1, sep="")
}
colnames(nullVIM40_5f)<-col
colnames(nullVIMAUC40_5f)<-col
colnames(nullVIM40_5r)<-col
colnames(nullVIMAUC40_5r)<-col
write.table(nullVIM40_5f, "nullVIM40_5intstrongfullnew", row.names=FALSE, col.names=TRUE,quote=FALSE)
write.table(nullVIMAUC40_5f, "nullVIMAUC40_5intstrongfullnew", row.names=FALSE, col.names=TRUE,quote=FALSE)
write.table(nullVIM40_5r, "nullVIM40_5intstrongrednew", row.names=FALSE, col.names=TRUE,quote=FALSE)
write.table(nullVIMAUC40_5r, "nullVIMAUC40_5intstrongrednew", row.names=FALSE, col.names=TRUE,quote=FALSE)
|
source(here::here("code/packages.R"))
source(here::here("code/file_paths.R"))
dir.create(file.path(here("out")), showWarnings = FALSE)
dir.create(file.path(here("out", "predictions")), showWarnings = FALSE)
YY <- c("depression", "anxiety")
XX <- c("psoriasis", "eczema")
exposure <- XX[2]
outcome <- YY[2]
# 0 - little function to load data and summarise as static df -----------------
load_data_fn <- function(X, Y, fupmax = Inf){
ABBRVexp <- substr(X, 1, 3)
# load data ---------------------------------------------------------------
df_model <- readRDS(paste0(datapath, "out/df_model", ABBRVexp, "_", Y,".rds"))
# restrict to skin disease pop --------------------------------------------
df_exp <- df_model %>%
filter(exposed == str_to_title(X))
## can't have time-updated covariates so collapse
df_exp_select <- df_exp %>%
dplyr::select(setid, patid, exposed, indexdate, enddate, dob, gender, comorbid, alc, smokstatus,
severity, sleep, sleep_all, gc90days, death, eth_edited, bmi, bmi_cat, country, ruc,
carstairs, cci, age, cal_period, out,
tstart, tstop, t)
## recode variables with `max` value during follow up (bmi, comorbidity, alc, sleep)
df_exp_tuc <- df_exp_select %>%
ungroup() %>%
group_by(patid) %>%
mutate(rownum = 1:n(),
sumfup = cumsum(t)) %>%
ungroup() %>%
dplyr::select(rownum,sumfup, patid, comorbid, cci, severity, alc, sleep, gc90days, out) %>%
filter(rownum == 1 | sumfup <= fupmax) %>% # filter to events only up to fupmax (argument to function)
mutate_if(is.factor, ~as.integer(ordered(.))) %>%
group_by(patid) %>%
summarise(across(everything(), max))
df_exp_tuc$out[df_exp_tuc$rownum == 1 & df_exp_tuc$sumfup < fupmax] <- 0 # suppress out variable = 0 if t > fupmax
## special for smoking because of weird categories
df_exp_smok <- df_exp_select %>%
group_by(patid) %>%
summarise(smoker = ifelse(any(smokstatus %in% c("Current Smoker", "Ex-Smoker", "Current Or Ex-Smoker")), 1, 0))
## variables we just want the value at indexdate
df_exp_index <- df_exp_select %>%
group_by(patid) %>%
dplyr::select(indexdate, enddate, exposed, gender, dob, age, bmi, bmi_cat, eth_edited, country, ruc, carstairs, cal_period) %>%
slice(1)
## need to add duration of disease (in 1-year increments)
df_exp_fup <- df_exp %>%
dplyr::select(setid, patid, tstart, tstop, t) %>%
group_by(setid, patid) %>%
mutate(t = tstop[n()] - tstart[1]) %>%
mutate(years = t/365.25) %>%
slice(1) %>%
ungroup()
df_exp_static <- df_exp_index %>%
left_join(df_exp_tuc, by = c("patid")) %>%
left_join(df_exp_smok, by = c("patid")) %>%
left_join(df_exp_fup, by = c("patid"))
df_exp_static$gender <- factor(df_exp_static$gender, levels = c(NA, "Male", "Female", "Indeterminate", NA))
df_exp_static$age <- (df_exp_static$tstart)/365.25
mean_age <- mean(df_exp_static$age, na.rm = T)
df_exp_static$age <- df_exp_static$age - mean_age
df_exp_static$smoker <- factor(df_exp_static$smoker)
### remove ordering of factor variables (this was used to select the max(var) per patid but will mess up the regression presentation)
df_exp_static$cci <- factor(df_exp_static$cci, levels = 1:3, labels = c("Low", "Moderate", "Severe"))
df_exp_static$comorbid <- factor(df_exp_static$comorbid, levels = 1:2, labels = c("No", "Yes"))
df_exp_static$alc <- factor(df_exp_static$alc, levels = 1:2, labels = c("No", "Yes"))
df_exp_static$sleep <- factor(df_exp_static$sleep, levels = 1:2, labels = c("No", "Yes"))
df_exp_static$gc90days <- factor(df_exp_static$gc90days, levels = 1:2, labels = c("No", "Yes"))
if(X == "eczema"){
df_exp_static$severity <- factor(df_exp_static$severity, levels = 1:3, labels = c("Mild", "Moderate", "Severe"))
}else{
df_exp_static$severity <- factor(df_exp_static$severity, levels = 1:2, labels = c("Mild", "Moderate/severe"))
}
df_exp_static
}
# 1a - Logistic regression method -----------------------------------------------
for(exposure in XX) {
ABBRVexp <- substr(exposure, 1, 3)
for(outcome in YY) {
df_exp_static <- load_data_fn(X = exposure, Y = outcome)
# sample 80% train --------------------------------------------------------
length_data <- dim(df_exp_static)[1]
set.seed(12)
patid_sample <- sample(df_exp_static$patid, size = round(length_data*0.8))
df_exp_train <- df_exp_static %>%
ungroup() %>%
filter(patid %in% patid_sample)
df_exp_test <- df_exp_static %>%
ungroup() %>%
filter(!patid %in% patid_sample)
# univariable logistic regression with covariates -------------------------
covars <- c("age", "gender", "carstairs",
"cci", "bmi_cat", "smoker")
univ_roc <- function(covariate){
uni_1 <- glm(out ~ get(covariate), data = df_exp_train, family = "binomial")
pred_vals <- predict(uni_1, type = "link", data = df_exp_train)
df_predictions <- df_exp_train %>%
filter(!is.na(get(covariate))) %>%
mutate(lp = pred_vals) %>%
dplyr::select(patid, all_of(covariate), out, lp) %>%
mutate(risk = 1/(1 + exp(-lp)))
# report AUC -------------------------------------------------------------
roc_calc <- pROC::roc(df_predictions$out, df_predictions$risk)
roc_calc$auc
plot(roc_calc, main = covariate, xlim = c(1,0), ylim = c(0,1))
text(0.8, 0.8, round(roc_calc$auc,2), font = 2, pos = 4)
df_calibration <- df_predictions %>%
ungroup() %>%
mutate(risk_dec = ntile(risk, 10)) %>%
group_by(risk_dec) %>%
summarise(n = n(), observed = mean(out), predicted = mean(risk))
smoothfit <- loess(df_calibration$observed ~ df_calibration$predicted, degree = 2)
scatter.smooth(df_calibration$predicted, df_calibration$observed,
col = 7, type = "p", xlim = c(0,0.2), ylim = c(0,0.2),
xlab = "Predicted probability", ylab = "Observed probability",
lpars = list(col = 4, lty = 2))
abline(coef = c(0,1))
}
pdf(paste0(here("out/predictions"), "/01_univ_auc", ABBRVexp, "_", substr(outcome, 1, 3), ".pdf"), 8, 8)
par(mfrow = c(3,4))
sapply(covars, FUN = univ_roc)
dev.off()
}
}
# 1b - build multivariable logistic regression models --------------------------
pdf(paste0(here("out/predictions"), "/02_multimodel_logisticpredict.pdf"), 10, 10)
par(mfrow = c(4,4), mgp=c(3,1,0))
ii <- 0
for(exposure in XX) {
ABBRVexp <- substr(exposure, 1, 3)
for(outcome in YY) {
ii <- ii+1
df_exp_static <- load_data_fn(X = exposure, Y = outcome)
# sample 80% train
length_data <- dim(df_exp_static)[1]
set.seed(12)
patid_sample <- sample(df_exp_static$patid, size = round(length_data*0.8))
df_exp_train <- df_exp_static %>%
ungroup() %>%
filter(patid %in% patid_sample)
df_exp_test <- df_exp_static %>%
ungroup() %>%
filter(!patid %in% patid_sample)
if(ABBRVexp == "pso"){
multi_1 <- glm(out ~ age + gender + carstairs + cci + bmi_cat + smoker + alc, data = df_exp_train, family = "binomial")
model_covars <- c("Intercept","age", "gender", "carstairs", "cci", "bmi_cat", "smoker", "alc")
pretty_model_covars <- cbind.data.frame(
model_covars,
pretty = c("Intercept", "Age (centred)", "Gender", "Carstairs index of deprivation", "CCI", "BMI (centred)", "Smoker", "Harmful alcohol use")
)
}
if(ABBRVexp == "ecz"){
multi_1 <- glm(out ~ age + gender + carstairs + cci + bmi_cat + smoker + alc + sleep + gc90days, data = df_exp_train, family = "binomial")
model_covars <- c("Intercept", "age", "gender", "carstairs", "cci", "bmi_cat", "smoker", "alc","sleep", "gc90days")
pretty_model_covars <- cbind.data.frame(
model_covars,
pretty = c("Intercept", "Age (centred)", "Gender", "Carstairs index of deprivation", "CCI", "BMI (centred)", "Smoker", "Harmful alcohol use", "Sleep problems", "Oral GC use (90 day risk window)")
)
}
predict_cis <- confint.default(multi_1) %>%
as.data.frame(row.names = F) %>%
janitor::clean_names()
predict_gt <- broom::tidy(multi_1, conf.int = F) %>%
bind_cols(predict_cis) %>%
dplyr::select(variable = term, logOR = estimate, conf.low = x2_5_percent, conf.high = x97_5_percent, p.value) %>%
drop_na() %>%
mutate(conf_int = paste0(signif(conf.low, 2), " , ", signif(conf.high, 2)),
p = ifelse(p.value < 0.0001, "*", paste0(signif(p.value, 1)))) %>%
dplyr::select(-conf.low, -conf.high, -p.value) %>%
separate(variable, into = c("delete", "level"), paste(model_covars, collapse = "|"), remove = FALSE) %>%
mutate(level=str_remove(level, "\\)")) %>%
mutate(temp = str_extract(variable, paste(model_covars, collapse = "|"))) %>%
left_join(pretty_model_covars, by = c("temp" = "model_covars")) %>%
mutate(OR = exp(logOR)) %>%
dplyr::select(var = pretty, level, OR, logOR, conf_int, p) %>%
gt() %>%
cols_align(columns = 3:6, align = "right") %>%
fmt_number(n_sigfig = 3, columns = where(is.numeric)) %>%
cols_label(
var = "Variable",
level = "Level",
logOR = "log(OR)",
conf_int = "95% CI",
p = md("*p*")
) %>%
tab_footnote("* p < 0.0001", locations = cells_column_labels("p"))
predict_gt
gt::gtsave(
predict_gt,
filename = paste0("tab1_", ABBRVexp, "_", substr(outcome, 1, 3), "_predictmodel_logistic.html"),
path = here::here("out//predictions//")
)
pred_vals_train <- predict(multi_1, type = "link", newdata = df_exp_train)
pred_vals_test <- predict(multi_1, type = "link", newdata = df_exp_test)
df_predictions_train <- df_exp_train %>%
#filter_at(all_of(model_covars[-1]), all_vars(!is.na(.))) %>%
mutate(lp = pred_vals_train) %>%
dplyr::select(patid, all_of(model_covars[-1]), out, lp) %>%
mutate(risk = 1/(1 + exp(-lp)))
df_predictions_test <- df_exp_test %>%
#filter_at(all_of(model_covars[-1]), all_vars(!is.na(.))) %>%
mutate(lp = pred_vals_test) %>%
dplyr::select(patid, all_of(model_covars[-1]), out, lp) %>%
mutate(risk = 1/(1 + exp(-lp)))
# PLOT PLOTS PLOTS
# report AUC
plot_roc <- function(test_train) {
roc_df <- get(paste0("df_predictions_", test_train))
roc_calc <- pROC::roc(roc_df$out, roc_df$risk)
roc_calc$auc
par(new = T)
ii <- ifelse(test_train=="train", 0, 0.2)
col_plot <- ifelse(test_train == "train", 4, 2)
lines(roc_calc$specificities, roc_calc$sensitivities, col = col_plot)
text(0.9, 0.9-ii, round(roc_calc$auc,2), col = col_plot, font = 2, pos = 4)
}
plot(1:0, 0:1, xlim = c(1,0), ylim = c(0,1), col = 0,
ylab = "Sensitivity", xlab = "Specificity",
main = paste0(exposure, " ~ ", outcome))
abline(coef = c(1,-1))
plot_roc("train")
plot_roc("test")
legend("bottomright", legend = c("Train", "Test"), col = c(4,2), lty = 1, bty = "n")
mtext(paste0(ii,"A"), side=3, adj=0, font=2)
## plot_boxplot of risk scores
risk_nonoutcome_train <- df_predictions_train[df_predictions_train$out == 0, "risk"] %>% pull()
risk_withoutcome_train <- df_predictions_train[df_predictions_train$out == 1, "risk"] %>% pull()
risk_nonoutcome_test <- df_predictions_test[df_predictions_test$out == 0, "risk"] %>% pull()
risk_withoutcome_test <- df_predictions_test[df_predictions_test$out == 1, "risk"] %>% pull()
# Make a list of these 2 vectors
risk_list <- list(
risk_nonoutcome_train,
risk_withoutcome_train,
risk_nonoutcome_test,
risk_withoutcome_test
)
# Change the names of the elements of the list :
names(risk_list) <- c(paste("Train data \n Control \n n=", length(risk_nonoutcome_train), sep = ""),
paste("Train data \n Case \n n=", length(risk_withoutcome_train), sep = ""),
paste("Test data \n Control \n n=", length(risk_nonoutcome_test), sep = ""),
paste("Test data \n Case \n n=", length(risk_withoutcome_test), sep = "")
)
# Change the mgp argument: avoid text overlaps axis
# Final Boxplot
mu1a <- signif(mean(risk_nonoutcome_train, na.rm = T), digits = 3)
mu1b <- signif(mean(risk_nonoutcome_test, na.rm = T), digits = 3)
text1_train <- bquote(mu ~ "=" ~ .(mu1a))
text1_test <- bquote(mu ~ "=" ~ .(mu1b))
mu2a <- signif(mean(risk_withoutcome_train, na.rm = T), digits = 2)
mu2b <- signif(mean(risk_withoutcome_test, na.rm = T), digits = 2)
text2_train <- bquote(mu ~ "=" ~ .(mu2a))
text2_test <- bquote(mu ~ "=" ~ .(mu2a))
col1 <- 1
par(mgp = c(3,2,0), tck = NA, tcl = -0.25)
boxplot(risk_list ,
col= ggplot2::alpha(c(2,4,2,4), 0.2),
ylab="Survival risk", outline = FALSE, ylim = c(0,0.5),
pars=list(mgp=c(4,2,.5)))
text(0.75, 0.4, text1_train, pos = 4, cex = 0.7, col =2)
text(1.75, 0.4, text2_train, pos = 4, cex = 0.7, col = 4)
text(2.75, 0.4, text1_test, pos = 4, cex = 0.7, col = 2,)
text(3.75, 0.4, text2_test, pos = 4, cex = 0.7, col = 4)
mtext(paste0(ii,"B"), side=3, adj=0, font=2)
par(mgp = c(3,1,0), tck = NA, tcl = -0.5)
#### GAM plot
plot_calibration <- function(test_train) {
df_calibration <- get(paste0("df_predictions_", test_train))
gam1 <- gam(out ~ s(risk, k=4) , data = df_calibration, family = "binomial")
sample_plot <- sample(1:dim(df_calibration)[1], size = 0.1*dim(df_calibration)[1])
col_plot <- ifelse(test_train =="train", 4, 2)
plot_adjust <- ifelse(test_train =="train", 0.01, -0.01)
axismax <- max(df_predictions_train$risk, na.rm = T)
df_calibration$out_plot <- ifelse(df_calibration$out==1, axismax, 0)
points(df_calibration$risk[sample_plot], df_calibration$out_plot[sample_plot]+plot_adjust, col = ggplot2::alpha(col_plot,0.025), cex = 0.2)
tt <- seq(range(df_calibration$risk, na.rm = T)[1],range(df_calibration$risk, na.rm = T)[2],0.001)
preds <- predict(gam1, newdata = list(risk=tt), type = "link", se.fit = TRUE)
critval <- 1.96;
upperCI <- preds$fit + (critval * preds$se.fit);
lowerCI <- preds$fit - (critval * preds$se.fit)
fit <- preds$fit
fitPlotF <- gam1$family$linkinv(fit);
CI1plotF <- gam1$family$linkinv(upperCI);
CI2plotF <- gam1$family$linkinv(lowerCI)
## Plot GAM fits
polygon(c(tt,rev(tt)),c(CI1plotF,rev(CI2plotF)),col=ggplot2::alpha(col_plot,0.2),lty=0)
lines(tt, fitPlotF ,col=col_plot,lwd=1)
}
axismax <- max(df_predictions_train$risk, na.rm = T)
plot(c(0,axismax), c(0,axismax),
ylim = c(0-0.02, axismax+0.02), xlim = c(0, axismax),
xlab = "Predicted probability", ylab = "Observed outcome",
col = 0)
abline(coef = c(0,1), col = ggplot2::alpha(1,0.2))
plot_calibration("train")
plot_calibration("test")
legend("left", legend = c("Train", "Test"), col = c(4,2), lty = 2, bty = "n")
axis(side = 4, at = c(0-0.02, axismax+0.02), labels = c("Control","Case"), tick = FALSE, padj = -1)
mtext(paste0(ii,"C"), side=3, adj=0, font=2)
plot_validation <- function(test_train) {
cal_df <- get(paste0("df_predictions_", test_train))
df_calibration <- cal_df %>%
ungroup() %>%
mutate(risk_dec = ntile(risk, 10)) %>%
group_by(risk_dec) %>%
summarise(n = n(), observed = mean(out), predicted = mean(risk))
col_plot <- ifelse(test_train == "train", 4, 2)
xy <- xy.coords(df_calibration$predicted, df_calibration$observed, "Predicted probability", "Observed probability")
x <- xy$x
y <- xy$y
pred <- loess.smooth(x, y, span = 2/3, degree = 2)
if(test_train == "test"){
par(new = T)
}
points(x, y, col = col_plot, cex = 1.2)
lines(pred$x, pred$y, lty = 2, col = col_plot)
}
axismax <- max(df_predictions_train$risk, na.rm = T)
plot(0:axismax, 0:axismax,
ylim = c(0, axismax), xlim = c(0, axismax),
xlab = "Predicted probability", ylab = "Observed probability",
col = 0)
abline(coef = c(0,1))
plot_validation("train")
plot_validation("test")
legend("bottomright", legend = c("Train", "Test"), col = c(4,2), lty = 2, bty = "n")
mtext(paste0(ii,"D"), side=3, adj=0, font=2)
}
}
dev.off()
# 2a - up to 1 year -------------------------------------------------------
pdf(paste0(here("out/predictions"), "/03_multimodel_logisticpredict_1year.pdf"), 10, 10)
par(mfrow = c(4,4), mgp=c(3,1,0))
ii <- 0
for(exposure in XX) {
ABBRVexp <- substr(exposure, 1, 3)
for(outcome in YY) {
ii <- ii+1
df_exp_static <- load_data_fn(X = exposure, Y = outcome, fupmax = 365.25)
# restrict to 1 year follow up
# sample 80% train
length_data <- dim(df_exp_static)[1]
set.seed(12)
patid_sample <- sample(df_exp_static$patid, size = round(length_data*0.8))
df_exp_train <- df_exp_static %>%
ungroup() %>%
filter(patid %in% patid_sample)
df_exp_test <- df_exp_static %>%
ungroup() %>%
filter(!patid %in% patid_sample)
df_exp_train$out %>% table()
if(ABBRVexp == "pso"){
glm(out ~ age + gender + carstairs + cci, data = df_exp_train, family = "binomial")
multi_1 <- glm(out ~ age + gender + carstairs + cci + bmi_cat + smoker + alc, data = df_exp_train, family = "binomial")
model_covars <- c("Intercept","age", "gender", "carstairs", "cci", "bmi_cat", "smoker", "alc")
pretty_model_covars <- cbind.data.frame(
model_covars,
pretty = c("Intercept", "Age (centred)", "Gender", "Carstairs index of deprivation", "CCI", "BMI (centred)", "Smoker", "Harmful alcohol use")
)
}
if(ABBRVexp == "ecz"){
multi_1 <- glm(out ~ age + gender + carstairs + cci + bmi_cat + smoker + alc + sleep + gc90days, data = df_exp_train, family = "binomial")
model_covars <- c("Intercept", "age", "gender", "carstairs", "cci", "bmi_cat", "smoker", "alc","sleep", "gc90days")
pretty_model_covars <- cbind.data.frame(
model_covars,
pretty = c("Intercept", "Age (centred)", "Gender", "Carstairs index of deprivation", "CCI", "BMI (centred)", "Smoker", "Harmful alcohol use", "Sleep problems", "Oral GC use (90 day risk window)")
)
}
predict_cis <- confint.default(multi_1) %>%
as.data.frame(row.names = F) %>%
janitor::clean_names()
predict_gt <- broom::tidy(multi_1, conf.int = F) %>%
bind_cols(predict_cis) %>%
dplyr::select(variable = term, logOR = estimate, conf.low = x2_5_percent, conf.high = x97_5_percent, p.value) %>%
drop_na() %>%
mutate(conf_int = paste0(signif(conf.low, 2), " , ", signif(conf.high, 2)),
p = ifelse(p.value < 0.0001, "*", paste0(signif(p.value, 1)))) %>%
dplyr::select(-conf.low, -conf.high, -p.value) %>%
separate(variable, into = c("delete", "level"), paste(model_covars, collapse = "|"), remove = FALSE) %>%
mutate(level=str_remove(level, "\\)")) %>%
mutate(temp = str_extract(variable, paste(model_covars, collapse = "|"))) %>%
left_join(pretty_model_covars, by = c("temp" = "model_covars")) %>%
mutate(OR = exp(logOR)) %>%
dplyr::select(var = pretty, level, OR, logOR, conf_int, p) %>%
gt() %>%
cols_align(columns = 3:6, align = "right") %>%
fmt_number(n_sigfig = 3, columns = where(is.numeric)) %>%
cols_label(
var = "Variable",
level = "Level",
logOR = "log(OR)",
conf_int = "95% CI",
p = md("*p*")
) %>%
tab_footnote("* p < 0.0001", locations = cells_column_labels("p"))
predict_gt
gt::gtsave(
predict_gt,
filename = paste0("tab1_", ABBRVexp, "_", substr(outcome, 1, 3), "_predictmodel_logistic_1yr.rtf"),
path = here::here("out//predictions//")
)
gt::gtsave(
predict_gt,
filename = paste0("tab1_", ABBRVexp, "_", substr(outcome, 1, 3), "_predictmodel_logistic_1yr.html"),
path = here::here("out//predictions//")
)
pred_vals_train <- predict(multi_1, type = "link", newdata = df_exp_train)
pred_vals_test <- predict(multi_1, type = "link", newdata = df_exp_test)
df_predictions_train <- df_exp_train %>%
#filter_at(all_of(model_covars[-1]), all_vars(!is.na(.))) %>%
mutate(lp = pred_vals_train) %>%
dplyr::select(patid, all_of(model_covars[-1]), out, lp) %>%
mutate(risk = 1/(1 + exp(-lp)))
df_predictions_test <- df_exp_test %>%
#filter_at(all_of(model_covars[-1]), all_vars(!is.na(.))) %>%
mutate(lp = pred_vals_test) %>%
dplyr::select(patid, all_of(model_covars[-1]), out, lp) %>%
mutate(risk = 1/(1 + exp(-lp)))
# PLOT PLOTS PLOTS
# report AUC
plot_roc <- function(test_train) {
roc_df <- get(paste0("df_predictions_", test_train))
roc_calc <- pROC::roc(roc_df$out, roc_df$risk)
roc_calc$auc
par(new = T)
ii <- ifelse(test_train=="train", 0, 0.2)
col_plot <- ifelse(test_train == "train", 4, 2)
lines(roc_calc$specificities, roc_calc$sensitivities, col = col_plot)
text(0.9, 0.9-ii, round(roc_calc$auc,2), col = col_plot, font = 2, pos = 4)
}
plot(1:0, 0:1, xlim = c(1,0), ylim = c(0,1), col = 0,
ylab = "Sensitivity", xlab = "Specificity",
main = paste0(exposure, " ~ ", outcome))
abline(coef = c(1,-1))
plot_roc("train")
plot_roc("test")
legend("bottomright", legend = c("Train", "Test"), col = c(4,2), lty = 1, bty = "n")
mtext(paste0(ii,"A"), side=3, adj=0, font=2)
## plot_boxplot of risk scores
risk_nonoutcome_train <- df_predictions_train[df_predictions_train$out == 0, "risk"] %>% pull()
risk_withoutcome_train <- df_predictions_train[df_predictions_train$out == 1, "risk"] %>% pull()
risk_nonoutcome_test <- df_predictions_test[df_predictions_test$out == 0, "risk"] %>% pull()
risk_withoutcome_test <- df_predictions_test[df_predictions_test$out == 1, "risk"] %>% pull()
# Make a list of these 2 vectors
risk_list <- list(
risk_nonoutcome_train,
risk_withoutcome_train,
risk_nonoutcome_test,
risk_withoutcome_test
)
# Change the names of the elements of the list :
names(risk_list) <- c(paste("Train data \n Control \n n=", length(risk_nonoutcome_train), sep = ""),
paste("Train data \n Case \n n=", length(risk_withoutcome_train), sep = ""),
paste("Test data \n Control \n n=", length(risk_nonoutcome_test), sep = ""),
paste("Test data \n Case \n n=", length(risk_withoutcome_test), sep = "")
)
# Change the mgp argument: avoid text overlaps axis
# Final Boxplot
mu1a <- signif(mean(risk_nonoutcome_train, na.rm = T), digits = 3)
mu1b <- signif(mean(risk_nonoutcome_test, na.rm = T), digits = 3)
text1_train <- bquote(mu ~ "=" ~ .(mu1a))
text1_test <- bquote(mu ~ "=" ~ .(mu1b))
mu2a <- signif(mean(risk_withoutcome_train, na.rm = T), digits = 2)
mu2b <- signif(mean(risk_withoutcome_test, na.rm = T), digits = 2)
text2_train <- bquote(mu ~ "=" ~ .(mu2a))
text2_test <- bquote(mu ~ "=" ~ .(mu2a))
col1 <- 1
par(mgp = c(3,2,0), tck = NA, tcl = -0.25)
boxplot(risk_list ,
col= ggplot2::alpha(c(2,4,2,4), 0.2),
ylab="Survival risk", outline = FALSE, ylim = c(0,0.5),
pars=list(mgp=c(4,2,.5)))
text(0.75, 0.4, text1_train, pos = 4, cex = 0.7, col =2)
text(1.75, 0.4, text2_train, pos = 4, cex = 0.7, col = 4)
text(2.75, 0.4, text1_test, pos = 4, cex = 0.7, col = 2,)
text(3.75, 0.4, text2_test, pos = 4, cex = 0.7, col = 4)
mtext(paste0(ii,"B"), side=3, adj=0, font=2)
par(mgp = c(3,1,0), tck = NA, tcl = -0.5)
#### GAM plot
plot_calibration <- function(test_train) {
df_calibration <- get(paste0("df_predictions_", test_train))
gam1 <- gam(out ~ s(risk, k=4) , data = df_calibration, family = "binomial")
sample_plot <- sample(1:dim(df_calibration)[1], size = 0.1*dim(df_calibration)[1])
col_plot <- ifelse(test_train =="train", 4, 2)
plot_adjust <- ifelse(test_train =="train", 0.01, -0.01)
axismax <- max(df_predictions_train$risk, na.rm = T)
df_calibration$out_plot <- ifelse(df_calibration$out==1, axismax, 0)
points(df_calibration$risk[sample_plot], df_calibration$out_plot[sample_plot]+plot_adjust, col = ggplot2::alpha(col_plot,0.025), cex = 0.2)
tt <- seq(range(df_calibration$risk, na.rm = T)[1],range(df_calibration$risk, na.rm = T)[2],0.001)
preds <- predict(gam1, newdata = list(risk=tt), type = "link", se.fit = TRUE)
critval <- 1.96;
upperCI <- preds$fit + (critval * preds$se.fit);
lowerCI <- preds$fit - (critval * preds$se.fit)
fit <- preds$fit
fitPlotF <- gam1$family$linkinv(fit);
CI1plotF <- gam1$family$linkinv(upperCI);
CI2plotF <- gam1$family$linkinv(lowerCI)
## Plot GAM fits
polygon(c(tt,rev(tt)),c(CI1plotF,rev(CI2plotF)),col=ggplot2::alpha(col_plot,0.2),lty=0)
lines(tt, fitPlotF ,col=col_plot,lwd=1)
}
axismax <- max(df_predictions_train$risk, na.rm = T)
plot(c(0,axismax), c(0,axismax),
ylim = c(0-0.02, axismax+0.02), xlim = c(0, axismax),
xlab = "Predicted probability", ylab = "Observed outcome",
col = 0)
abline(coef = c(0,1), col = ggplot2::alpha(1,0.2))
plot_calibration("train")
plot_calibration("test")
legend("left", legend = c("Train", "Test"), col = c(4,2), lty = 2, bty = "n")
axis(side = 4, at = c(0-0.02, axismax+0.02), labels = c("Control","Case"), tick = FALSE, padj = -1)
mtext(paste0(ii,"C"), side=3, adj=0, font=2)
plot_validation <- function(test_train) {
cal_df <- get(paste0("df_predictions_", test_train))
df_calibration <- cal_df %>%
ungroup() %>%
mutate(risk_dec = ntile(risk, 10)) %>%
group_by(risk_dec) %>%
summarise(n = n(), observed = mean(out), predicted = mean(risk))
col_plot <- ifelse(test_train == "train", 4, 2)
xy <- xy.coords(df_calibration$predicted, df_calibration$observed, "Predicted probability", "Observed probability")
x <- xy$x
y <- xy$y
pred <- loess.smooth(x, y, span = 2/3, degree = 2)
if(test_train == "test"){
par(new = T)
}
points(x, y, col = col_plot, cex = 1.2)
lines(pred$x, pred$y, lty = 2, col = col_plot)
}
axismax <- max(df_predictions_train$risk, na.rm = T)
plot(0:axismax, 0:axismax,
ylim = c(0, axismax), xlim = c(0, axismax),
xlab = "Predicted probability", ylab = "Observed probability",
col = 0)
abline(coef = c(0,1))
plot_validation("train")
plot_validation("test")
legend("bottomright", legend = c("Train", "Test"), col = c(4,2), lty = 2, bty = "n")
mtext(paste0(ii,"D"), side=3, adj=0, font=2)
}
}
dev.off()
# 2b - up to 3 years -------------------------------------------------------
pdf(paste0(here("out/predictions"), "/03_multimodel_logisticpredict_3year.pdf"), 10, 10)
par(mfrow = c(4,4), mgp=c(3,1,0))
ii <- 0
for(exposure in XX) {
ABBRVexp <- substr(exposure, 1, 3)
for(outcome in YY) {
ii <- ii+1
df_exp_static <- load_data_fn(X = exposure, Y = outcome, fupmax = 365.25*3)
# restrict to 1 year follow up
# sample 80% train
length_data <- dim(df_exp_static)[1]
set.seed(12)
patid_sample <- sample(df_exp_static$patid, size = round(length_data*0.8))
df_exp_train <- df_exp_static %>%
ungroup() %>%
filter(patid %in% patid_sample)
df_exp_test <- df_exp_static %>%
ungroup() %>%
filter(!patid %in% patid_sample)
if(ABBRVexp == "pso"){
multi_1 <- glm(out ~ age + gender + carstairs + cci + bmi_cat + smoker + alc, data = df_exp_train, family = "binomial")
model_covars <- c("Intercept","age", "gender", "carstairs", "cci", "bmi_cat", "smoker", "alc")
pretty_model_covars <- cbind.data.frame(
model_covars,
pretty = c("Intercept", "Age (centred)", "Gender", "Carstairs index of deprivation", "CCI", "BMI (centred)", "Smoker", "Harmful alcohol use")
)
}
if(ABBRVexp == "ecz"){
multi_1 <- glm(out ~ age + gender + carstairs + cci + bmi_cat + smoker + alc + sleep + gc90days, data = df_exp_train, family = "binomial")
model_covars <- c("Intercept", "age", "gender", "carstairs", "cci", "bmi_cat", "smoker", "alc","sleep", "gc90days")
pretty_model_covars <- cbind.data.frame(
model_covars,
pretty = c("Intercept", "Age (centred)", "Gender", "Carstairs index of deprivation", "CCI", "BMI (centred)", "Smoker", "Harmful alcohol use", "Sleep problems", "Oral GC use (90 day risk window)")
)
}
predict_cis <- confint.default(multi_1) %>%
as.data.frame(row.names = F) %>%
janitor::clean_names()
predict_gt <- broom::tidy(multi_1, conf.int = F) %>%
bind_cols(predict_cis) %>%
dplyr::select(variable = term, logOR = estimate, conf.low = x2_5_percent, conf.high = x97_5_percent, p.value) %>%
drop_na() %>%
mutate(conf_int = paste0(signif(conf.low, 2), " , ", signif(conf.high, 2)),
p = ifelse(p.value < 0.0001, "*", paste0(signif(p.value, 1)))) %>%
dplyr::select(-conf.low, -conf.high, -p.value) %>%
separate(variable, into = c("delete", "level"), paste(model_covars, collapse = "|"), remove = FALSE) %>%
mutate(level=str_remove(level, "\\)")) %>%
mutate(temp = str_extract(variable, paste(model_covars, collapse = "|"))) %>%
left_join(pretty_model_covars, by = c("temp" = "model_covars")) %>%
mutate(OR = exp(logOR)) %>%
dplyr::select(var = pretty, level, OR, logOR, conf_int, p) %>%
gt() %>%
cols_align(columns = 3:6, align = "right") %>%
fmt_number(n_sigfig = 3, columns = where(is.numeric)) %>%
cols_label(
var = "Variable",
level = "Level",
logOR = "log(OR)",
conf_int = "95% CI",
p = md("*p*")
) %>%
tab_footnote("* p < 0.0001", locations = cells_column_labels("p"))
predict_gt
gt::gtsave(
predict_gt,
filename = paste0("tab1_", ABBRVexp, "_", substr(outcome, 1, 3), "_predictmodel_logistic_1yr.html"),
path = here::here("out//predictions//")
)
pred_vals_train <- predict(multi_1, type = "link", newdata = df_exp_train)
pred_vals_test <- predict(multi_1, type = "link", newdata = df_exp_test)
df_predictions_train <- df_exp_train %>%
#filter_at(all_of(model_covars[-1]), all_vars(!is.na(.))) %>%
mutate(lp = pred_vals_train) %>%
dplyr::select(patid, all_of(model_covars[-1]), out, lp) %>%
mutate(risk = 1/(1 + exp(-lp)))
df_predictions_test <- df_exp_test %>%
#filter_at(all_of(model_covars[-1]), all_vars(!is.na(.))) %>%
mutate(lp = pred_vals_test) %>%
dplyr::select(patid, all_of(model_covars[-1]), out, lp) %>%
mutate(risk = 1/(1 + exp(-lp)))
# PLOT PLOTS PLOTS
# report AUC
plot_roc <- function(test_train) {
roc_df <- get(paste0("df_predictions_", test_train))
roc_calc <- pROC::roc(roc_df$out, roc_df$risk)
roc_calc$auc
par(new = T)
ii <- ifelse(test_train=="train", 0, 0.2)
col_plot <- ifelse(test_train == "train", 4, 2)
lines(roc_calc$specificities, roc_calc$sensitivities, col = col_plot)
text(0.9, 0.9-ii, round(roc_calc$auc,2), col = col_plot, font = 2, pos = 4)
}
plot(1:0, 0:1, xlim = c(1,0), ylim = c(0,1), col = 0,
ylab = "Sensitivity", xlab = "Specificity",
main = paste0(exposure, " ~ ", outcome))
abline(coef = c(1,-1))
plot_roc("train")
plot_roc("test")
legend("bottomright", legend = c("Train", "Test"), col = c(4,2), lty = 1, bty = "n")
mtext(paste0(ii,"A"), side=3, adj=0, font=2)
## plot_boxplot of risk scores
risk_nonoutcome_train <- df_predictions_train[df_predictions_train$out == 0, "risk"] %>% pull()
risk_withoutcome_train <- df_predictions_train[df_predictions_train$out == 1, "risk"] %>% pull()
risk_nonoutcome_test <- df_predictions_test[df_predictions_test$out == 0, "risk"] %>% pull()
risk_withoutcome_test <- df_predictions_test[df_predictions_test$out == 1, "risk"] %>% pull()
# Make a list of these 2 vectors
risk_list <- list(
risk_nonoutcome_train,
risk_withoutcome_train,
risk_nonoutcome_test,
risk_withoutcome_test
)
# Change the names of the elements of the list :
names(risk_list) <- c(paste("Train data \n Control \n n=", length(risk_nonoutcome_train), sep = ""),
paste("Train data \n Case \n n=", length(risk_withoutcome_train), sep = ""),
paste("Test data \n Control \n n=", length(risk_nonoutcome_test), sep = ""),
paste("Test data \n Case \n n=", length(risk_withoutcome_test), sep = "")
)
# Change the mgp argument: avoid text overlaps axis
# Final Boxplot
mu1a <- signif(mean(risk_nonoutcome_train, na.rm = T), digits = 3)
mu1b <- signif(mean(risk_nonoutcome_test, na.rm = T), digits = 3)
text1_train <- bquote(mu ~ "=" ~ .(mu1a))
text1_test <- bquote(mu ~ "=" ~ .(mu1b))
mu2a <- signif(mean(risk_withoutcome_train, na.rm = T), digits = 2)
mu2b <- signif(mean(risk_withoutcome_test, na.rm = T), digits = 2)
text2_train <- bquote(mu ~ "=" ~ .(mu2a))
text2_test <- bquote(mu ~ "=" ~ .(mu2a))
col1 <- 1
par(mgp = c(3,2,0), tck = NA, tcl = -0.25)
boxplot(risk_list ,
col= ggplot2::alpha(c(2,4,2,4), 0.2),
ylab="Survival risk", outline = FALSE, ylim = c(0,0.5),
pars=list(mgp=c(4,2,.5)))
text(0.75, 0.4, text1_train, pos = 4, cex = 0.7, col =2)
text(1.75, 0.4, text2_train, pos = 4, cex = 0.7, col = 4)
text(2.75, 0.4, text1_test, pos = 4, cex = 0.7, col = 2,)
text(3.75, 0.4, text2_test, pos = 4, cex = 0.7, col = 4)
mtext(paste0(ii,"B"), side=3, adj=0, font=2)
par(mgp = c(3,1,0), tck = NA, tcl = -0.5)
#### GAM plot
plot_calibration <- function(test_train) {
df_calibration <- get(paste0("df_predictions_", test_train))
gam1 <- gam(out ~ s(risk, k=4) , data = df_calibration, family = "binomial")
sample_plot <- sample(1:dim(df_calibration)[1], size = 0.1*dim(df_calibration)[1])
col_plot <- ifelse(test_train =="train", 4, 2)
plot_adjust <- ifelse(test_train =="train", 0.01, -0.01)
axismax <- max(df_predictions_train$risk, na.rm = T)
df_calibration$out_plot <- ifelse(df_calibration$out==1, axismax, 0)
points(df_calibration$risk[sample_plot], df_calibration$out_plot[sample_plot]+plot_adjust, col = ggplot2::alpha(col_plot,0.025), cex = 0.2)
tt <- seq(range(df_calibration$risk, na.rm = T)[1],range(df_calibration$risk, na.rm = T)[2],0.001)
preds <- predict(gam1, newdata = list(risk=tt), type = "link", se.fit = TRUE)
critval <- 1.96;
upperCI <- preds$fit + (critval * preds$se.fit);
lowerCI <- preds$fit - (critval * preds$se.fit)
fit <- preds$fit
fitPlotF <- gam1$family$linkinv(fit);
CI1plotF <- gam1$family$linkinv(upperCI);
CI2plotF <- gam1$family$linkinv(lowerCI)
## Plot GAM fits
polygon(c(tt,rev(tt)),c(CI1plotF,rev(CI2plotF)),col=ggplot2::alpha(col_plot,0.2),lty=0)
lines(tt, fitPlotF ,col=col_plot,lwd=1)
}
axismax <- max(df_predictions_train$risk, na.rm = T)
plot(c(0,axismax), c(0,axismax),
ylim = c(0-0.02, axismax+0.02), xlim = c(0, axismax),
xlab = "Predicted probability", ylab = "Observed outcome",
col = 0)
abline(coef = c(0,1), col = ggplot2::alpha(1,0.2))
plot_calibration("train")
plot_calibration("test")
legend("left", legend = c("Train", "Test"), col = c(4,2), lty = 2, bty = "n")
axis(side = 4, at = c(0-0.02, axismax+0.02), labels = c("Control","Case"), tick = FALSE, padj = -1)
mtext(paste0(ii,"C"), side=3, adj=0, font=2)
plot_validation <- function(test_train) {
cal_df <- get(paste0("df_predictions_", test_train))
df_calibration <- cal_df %>%
ungroup() %>%
mutate(risk_dec = ntile(risk, 10)) %>%
group_by(risk_dec) %>%
summarise(n = n(), observed = mean(out), predicted = mean(risk))
col_plot <- ifelse(test_train == "train", 4, 2)
xy <- xy.coords(df_calibration$predicted, df_calibration$observed, "Predicted probability", "Observed probability")
x <- xy$x
y <- xy$y
pred <- loess.smooth(x, y, span = 2/3, degree = 2)
if(test_train == "test"){
par(new = T)
}
points(x, y, col = col_plot, cex = 1.2)
lines(pred$x, pred$y, lty = 2, col = col_plot)
}
axismax <- max(df_predictions_train$risk, na.rm = T)
plot(0:axismax, 0:axismax,
ylim = c(0, axismax), xlim = c(0, axismax),
xlab = "Predicted probability", ylab = "Observed probability",
col = 0)
abline(coef = c(0,1))
plot_validation("train")
plot_validation("test")
legend("bottomright", legend = c("Train", "Test"), col = c(4,2), lty = 2, bty = "n")
mtext(paste0(ii,"D"), side=3, adj=0, font=2)
}
}
dev.off()
|
/code/analysis/11_predictive_models.R
|
no_license
|
hendersonad/2021_skinCMDs
|
R
| false | false | 38,257 |
r
|
source(here::here("code/packages.R"))
source(here::here("code/file_paths.R"))
dir.create(file.path(here("out")), showWarnings = FALSE)
dir.create(file.path(here("out", "predictions")), showWarnings = FALSE)
YY <- c("depression", "anxiety")
XX <- c("psoriasis", "eczema")
exposure <- XX[2]
outcome <- YY[2]
# 0 - little function to load data and summarise as static df -----------------
load_data_fn <- function(X, Y, fupmax = Inf){
ABBRVexp <- substr(X, 1, 3)
# load data ---------------------------------------------------------------
df_model <- readRDS(paste0(datapath, "out/df_model", ABBRVexp, "_", Y,".rds"))
# restrict to skin disease pop --------------------------------------------
df_exp <- df_model %>%
filter(exposed == str_to_title(X))
## can't have time-updated covariates so collapse
df_exp_select <- df_exp %>%
dplyr::select(setid, patid, exposed, indexdate, enddate, dob, gender, comorbid, alc, smokstatus,
severity, sleep, sleep_all, gc90days, death, eth_edited, bmi, bmi_cat, country, ruc,
carstairs, cci, age, cal_period, out,
tstart, tstop, t)
## recode variables with `max` value during follow up (bmi, comorbidity, alc, sleep)
df_exp_tuc <- df_exp_select %>%
ungroup() %>%
group_by(patid) %>%
mutate(rownum = 1:n(),
sumfup = cumsum(t)) %>%
ungroup() %>%
dplyr::select(rownum,sumfup, patid, comorbid, cci, severity, alc, sleep, gc90days, out) %>%
filter(rownum == 1 | sumfup <= fupmax) %>% # filter to events only up to fupmax (argument to function)
mutate_if(is.factor, ~as.integer(ordered(.))) %>%
group_by(patid) %>%
summarise(across(everything(), max))
df_exp_tuc$out[df_exp_tuc$rownum == 1 & df_exp_tuc$sumfup < fupmax] <- 0 # suppress out variable = 0 if t > fupmax
## special for smoking because of weird categories
df_exp_smok <- df_exp_select %>%
group_by(patid) %>%
summarise(smoker = ifelse(any(smokstatus %in% c("Current Smoker", "Ex-Smoker", "Current Or Ex-Smoker")), 1, 0))
## variables we just want the value at indexdate
df_exp_index <- df_exp_select %>%
group_by(patid) %>%
dplyr::select(indexdate, enddate, exposed, gender, dob, age, bmi, bmi_cat, eth_edited, country, ruc, carstairs, cal_period) %>%
slice(1)
## need to add duration of disease (in 1-year increments)
df_exp_fup <- df_exp %>%
dplyr::select(setid, patid, tstart, tstop, t) %>%
group_by(setid, patid) %>%
mutate(t = tstop[n()] - tstart[1]) %>%
mutate(years = t/365.25) %>%
slice(1) %>%
ungroup()
df_exp_static <- df_exp_index %>%
left_join(df_exp_tuc, by = c("patid")) %>%
left_join(df_exp_smok, by = c("patid")) %>%
left_join(df_exp_fup, by = c("patid"))
df_exp_static$gender <- factor(df_exp_static$gender, levels = c(NA, "Male", "Female", "Indeterminate", NA))
df_exp_static$age <- (df_exp_static$tstart)/365.25
mean_age <- mean(df_exp_static$age, na.rm = T)
df_exp_static$age <- df_exp_static$age - mean_age
df_exp_static$smoker <- factor(df_exp_static$smoker)
### remove ordering of factor variables (this was used to select the max(var) per patid but will mess up the regression presentation)
df_exp_static$cci <- factor(df_exp_static$cci, levels = 1:3, labels = c("Low", "Moderate", "Severe"))
df_exp_static$comorbid <- factor(df_exp_static$comorbid, levels = 1:2, labels = c("No", "Yes"))
df_exp_static$alc <- factor(df_exp_static$alc, levels = 1:2, labels = c("No", "Yes"))
df_exp_static$sleep <- factor(df_exp_static$sleep, levels = 1:2, labels = c("No", "Yes"))
df_exp_static$gc90days <- factor(df_exp_static$gc90days, levels = 1:2, labels = c("No", "Yes"))
if(X == "eczema"){
df_exp_static$severity <- factor(df_exp_static$severity, levels = 1:3, labels = c("Mild", "Moderate", "Severe"))
}else{
df_exp_static$severity <- factor(df_exp_static$severity, levels = 1:2, labels = c("Mild", "Moderate/severe"))
}
df_exp_static
}
# 1a - Logistic regression method -----------------------------------------------
for(exposure in XX) {
ABBRVexp <- substr(exposure, 1, 3)
for(outcome in YY) {
df_exp_static <- load_data_fn(X = exposure, Y = outcome)
# sample 80% train --------------------------------------------------------
length_data <- dim(df_exp_static)[1]
set.seed(12)
patid_sample <- sample(df_exp_static$patid, size = round(length_data*0.8))
df_exp_train <- df_exp_static %>%
ungroup() %>%
filter(patid %in% patid_sample)
df_exp_test <- df_exp_static %>%
ungroup() %>%
filter(!patid %in% patid_sample)
# univariable logistic regression with covariates -------------------------
covars <- c("age", "gender", "carstairs",
"cci", "bmi_cat", "smoker")
univ_roc <- function(covariate){
uni_1 <- glm(out ~ get(covariate), data = df_exp_train, family = "binomial")
pred_vals <- predict(uni_1, type = "link", data = df_exp_train)
df_predictions <- df_exp_train %>%
filter(!is.na(get(covariate))) %>%
mutate(lp = pred_vals) %>%
dplyr::select(patid, all_of(covariate), out, lp) %>%
mutate(risk = 1/(1 + exp(-lp)))
# report AUC -------------------------------------------------------------
roc_calc <- pROC::roc(df_predictions$out, df_predictions$risk)
roc_calc$auc
plot(roc_calc, main = covariate, xlim = c(1,0), ylim = c(0,1))
text(0.8, 0.8, round(roc_calc$auc,2), font = 2, pos = 4)
df_calibration <- df_predictions %>%
ungroup() %>%
mutate(risk_dec = ntile(risk, 10)) %>%
group_by(risk_dec) %>%
summarise(n = n(), observed = mean(out), predicted = mean(risk))
smoothfit <- loess(df_calibration$observed ~ df_calibration$predicted, degree = 2)
scatter.smooth(df_calibration$predicted, df_calibration$observed,
col = 7, type = "p", xlim = c(0,0.2), ylim = c(0,0.2),
xlab = "Predicted probability", ylab = "Observed probability",
lpars = list(col = 4, lty = 2))
abline(coef = c(0,1))
}
pdf(paste0(here("out/predictions"), "/01_univ_auc", ABBRVexp, "_", substr(outcome, 1, 3), ".pdf"), 8, 8)
par(mfrow = c(3,4))
sapply(covars, FUN = univ_roc)
dev.off()
}
}
# 1b - build multivariable logistic regression models --------------------------
pdf(paste0(here("out/predictions"), "/02_multimodel_logisticpredict.pdf"), 10, 10)
par(mfrow = c(4,4), mgp=c(3,1,0))
ii <- 0
for(exposure in XX) {
ABBRVexp <- substr(exposure, 1, 3)
for(outcome in YY) {
ii <- ii+1
df_exp_static <- load_data_fn(X = exposure, Y = outcome)
# sample 80% train
length_data <- dim(df_exp_static)[1]
set.seed(12)
patid_sample <- sample(df_exp_static$patid, size = round(length_data*0.8))
df_exp_train <- df_exp_static %>%
ungroup() %>%
filter(patid %in% patid_sample)
df_exp_test <- df_exp_static %>%
ungroup() %>%
filter(!patid %in% patid_sample)
if(ABBRVexp == "pso"){
multi_1 <- glm(out ~ age + gender + carstairs + cci + bmi_cat + smoker + alc, data = df_exp_train, family = "binomial")
model_covars <- c("Intercept","age", "gender", "carstairs", "cci", "bmi_cat", "smoker", "alc")
pretty_model_covars <- cbind.data.frame(
model_covars,
pretty = c("Intercept", "Age (centred)", "Gender", "Carstairs index of deprivation", "CCI", "BMI (centred)", "Smoker", "Harmful alcohol use")
)
}
if(ABBRVexp == "ecz"){
multi_1 <- glm(out ~ age + gender + carstairs + cci + bmi_cat + smoker + alc + sleep + gc90days, data = df_exp_train, family = "binomial")
model_covars <- c("Intercept", "age", "gender", "carstairs", "cci", "bmi_cat", "smoker", "alc","sleep", "gc90days")
pretty_model_covars <- cbind.data.frame(
model_covars,
pretty = c("Intercept", "Age (centred)", "Gender", "Carstairs index of deprivation", "CCI", "BMI (centred)", "Smoker", "Harmful alcohol use", "Sleep problems", "Oral GC use (90 day risk window)")
)
}
predict_cis <- confint.default(multi_1) %>%
as.data.frame(row.names = F) %>%
janitor::clean_names()
predict_gt <- broom::tidy(multi_1, conf.int = F) %>%
bind_cols(predict_cis) %>%
dplyr::select(variable = term, logOR = estimate, conf.low = x2_5_percent, conf.high = x97_5_percent, p.value) %>%
drop_na() %>%
mutate(conf_int = paste0(signif(conf.low, 2), " , ", signif(conf.high, 2)),
p = ifelse(p.value < 0.0001, "*", paste0(signif(p.value, 1)))) %>%
dplyr::select(-conf.low, -conf.high, -p.value) %>%
separate(variable, into = c("delete", "level"), paste(model_covars, collapse = "|"), remove = FALSE) %>%
mutate(level=str_remove(level, "\\)")) %>%
mutate(temp = str_extract(variable, paste(model_covars, collapse = "|"))) %>%
left_join(pretty_model_covars, by = c("temp" = "model_covars")) %>%
mutate(OR = exp(logOR)) %>%
dplyr::select(var = pretty, level, OR, logOR, conf_int, p) %>%
gt() %>%
cols_align(columns = 3:6, align = "right") %>%
fmt_number(n_sigfig = 3, columns = where(is.numeric)) %>%
cols_label(
var = "Variable",
level = "Level",
logOR = "log(OR)",
conf_int = "95% CI",
p = md("*p*")
) %>%
tab_footnote("* p < 0.0001", locations = cells_column_labels("p"))
predict_gt
gt::gtsave(
predict_gt,
filename = paste0("tab1_", ABBRVexp, "_", substr(outcome, 1, 3), "_predictmodel_logistic.html"),
path = here::here("out//predictions//")
)
pred_vals_train <- predict(multi_1, type = "link", newdata = df_exp_train)
pred_vals_test <- predict(multi_1, type = "link", newdata = df_exp_test)
df_predictions_train <- df_exp_train %>%
#filter_at(all_of(model_covars[-1]), all_vars(!is.na(.))) %>%
mutate(lp = pred_vals_train) %>%
dplyr::select(patid, all_of(model_covars[-1]), out, lp) %>%
mutate(risk = 1/(1 + exp(-lp)))
df_predictions_test <- df_exp_test %>%
#filter_at(all_of(model_covars[-1]), all_vars(!is.na(.))) %>%
mutate(lp = pred_vals_test) %>%
dplyr::select(patid, all_of(model_covars[-1]), out, lp) %>%
mutate(risk = 1/(1 + exp(-lp)))
# PLOT PLOTS PLOTS
# report AUC
plot_roc <- function(test_train) {
roc_df <- get(paste0("df_predictions_", test_train))
roc_calc <- pROC::roc(roc_df$out, roc_df$risk)
roc_calc$auc
par(new = T)
ii <- ifelse(test_train=="train", 0, 0.2)
col_plot <- ifelse(test_train == "train", 4, 2)
lines(roc_calc$specificities, roc_calc$sensitivities, col = col_plot)
text(0.9, 0.9-ii, round(roc_calc$auc,2), col = col_plot, font = 2, pos = 4)
}
plot(1:0, 0:1, xlim = c(1,0), ylim = c(0,1), col = 0,
ylab = "Sensitivity", xlab = "Specificity",
main = paste0(exposure, " ~ ", outcome))
abline(coef = c(1,-1))
plot_roc("train")
plot_roc("test")
legend("bottomright", legend = c("Train", "Test"), col = c(4,2), lty = 1, bty = "n")
mtext(paste0(ii,"A"), side=3, adj=0, font=2)
## plot_boxplot of risk scores
risk_nonoutcome_train <- df_predictions_train[df_predictions_train$out == 0, "risk"] %>% pull()
risk_withoutcome_train <- df_predictions_train[df_predictions_train$out == 1, "risk"] %>% pull()
risk_nonoutcome_test <- df_predictions_test[df_predictions_test$out == 0, "risk"] %>% pull()
risk_withoutcome_test <- df_predictions_test[df_predictions_test$out == 1, "risk"] %>% pull()
# Make a list of these 2 vectors
risk_list <- list(
risk_nonoutcome_train,
risk_withoutcome_train,
risk_nonoutcome_test,
risk_withoutcome_test
)
# Change the names of the elements of the list :
names(risk_list) <- c(paste("Train data \n Control \n n=", length(risk_nonoutcome_train), sep = ""),
paste("Train data \n Case \n n=", length(risk_withoutcome_train), sep = ""),
paste("Test data \n Control \n n=", length(risk_nonoutcome_test), sep = ""),
paste("Test data \n Case \n n=", length(risk_withoutcome_test), sep = "")
)
# Change the mgp argument: avoid text overlaps axis
# Final Boxplot
mu1a <- signif(mean(risk_nonoutcome_train, na.rm = T), digits = 3)
mu1b <- signif(mean(risk_nonoutcome_test, na.rm = T), digits = 3)
text1_train <- bquote(mu ~ "=" ~ .(mu1a))
text1_test <- bquote(mu ~ "=" ~ .(mu1b))
mu2a <- signif(mean(risk_withoutcome_train, na.rm = T), digits = 2)
mu2b <- signif(mean(risk_withoutcome_test, na.rm = T), digits = 2)
text2_train <- bquote(mu ~ "=" ~ .(mu2a))
text2_test <- bquote(mu ~ "=" ~ .(mu2a))
col1 <- 1
par(mgp = c(3,2,0), tck = NA, tcl = -0.25)
boxplot(risk_list ,
col= ggplot2::alpha(c(2,4,2,4), 0.2),
ylab="Survival risk", outline = FALSE, ylim = c(0,0.5),
pars=list(mgp=c(4,2,.5)))
text(0.75, 0.4, text1_train, pos = 4, cex = 0.7, col =2)
text(1.75, 0.4, text2_train, pos = 4, cex = 0.7, col = 4)
text(2.75, 0.4, text1_test, pos = 4, cex = 0.7, col = 2,)
text(3.75, 0.4, text2_test, pos = 4, cex = 0.7, col = 4)
mtext(paste0(ii,"B"), side=3, adj=0, font=2)
par(mgp = c(3,1,0), tck = NA, tcl = -0.5)
#### GAM plot
plot_calibration <- function(test_train) {
df_calibration <- get(paste0("df_predictions_", test_train))
gam1 <- gam(out ~ s(risk, k=4) , data = df_calibration, family = "binomial")
sample_plot <- sample(1:dim(df_calibration)[1], size = 0.1*dim(df_calibration)[1])
col_plot <- ifelse(test_train =="train", 4, 2)
plot_adjust <- ifelse(test_train =="train", 0.01, -0.01)
axismax <- max(df_predictions_train$risk, na.rm = T)
df_calibration$out_plot <- ifelse(df_calibration$out==1, axismax, 0)
points(df_calibration$risk[sample_plot], df_calibration$out_plot[sample_plot]+plot_adjust, col = ggplot2::alpha(col_plot,0.025), cex = 0.2)
tt <- seq(range(df_calibration$risk, na.rm = T)[1],range(df_calibration$risk, na.rm = T)[2],0.001)
preds <- predict(gam1, newdata = list(risk=tt), type = "link", se.fit = TRUE)
critval <- 1.96;
upperCI <- preds$fit + (critval * preds$se.fit);
lowerCI <- preds$fit - (critval * preds$se.fit)
fit <- preds$fit
fitPlotF <- gam1$family$linkinv(fit);
CI1plotF <- gam1$family$linkinv(upperCI);
CI2plotF <- gam1$family$linkinv(lowerCI)
## Plot GAM fits
polygon(c(tt,rev(tt)),c(CI1plotF,rev(CI2plotF)),col=ggplot2::alpha(col_plot,0.2),lty=0)
lines(tt, fitPlotF ,col=col_plot,lwd=1)
}
axismax <- max(df_predictions_train$risk, na.rm = T)
plot(c(0,axismax), c(0,axismax),
ylim = c(0-0.02, axismax+0.02), xlim = c(0, axismax),
xlab = "Predicted probability", ylab = "Observed outcome",
col = 0)
abline(coef = c(0,1), col = ggplot2::alpha(1,0.2))
plot_calibration("train")
plot_calibration("test")
legend("left", legend = c("Train", "Test"), col = c(4,2), lty = 2, bty = "n")
axis(side = 4, at = c(0-0.02, axismax+0.02), labels = c("Control","Case"), tick = FALSE, padj = -1)
mtext(paste0(ii,"C"), side=3, adj=0, font=2)
plot_validation <- function(test_train) {
cal_df <- get(paste0("df_predictions_", test_train))
df_calibration <- cal_df %>%
ungroup() %>%
mutate(risk_dec = ntile(risk, 10)) %>%
group_by(risk_dec) %>%
summarise(n = n(), observed = mean(out), predicted = mean(risk))
col_plot <- ifelse(test_train == "train", 4, 2)
xy <- xy.coords(df_calibration$predicted, df_calibration$observed, "Predicted probability", "Observed probability")
x <- xy$x
y <- xy$y
pred <- loess.smooth(x, y, span = 2/3, degree = 2)
if(test_train == "test"){
par(new = T)
}
points(x, y, col = col_plot, cex = 1.2)
lines(pred$x, pred$y, lty = 2, col = col_plot)
}
axismax <- max(df_predictions_train$risk, na.rm = T)
plot(0:axismax, 0:axismax,
ylim = c(0, axismax), xlim = c(0, axismax),
xlab = "Predicted probability", ylab = "Observed probability",
col = 0)
abline(coef = c(0,1))
plot_validation("train")
plot_validation("test")
legend("bottomright", legend = c("Train", "Test"), col = c(4,2), lty = 2, bty = "n")
mtext(paste0(ii,"D"), side=3, adj=0, font=2)
}
}
dev.off()
# 2a - up to 1 year -------------------------------------------------------
pdf(paste0(here("out/predictions"), "/03_multimodel_logisticpredict_1year.pdf"), 10, 10)
par(mfrow = c(4,4), mgp=c(3,1,0))
ii <- 0
for(exposure in XX) {
ABBRVexp <- substr(exposure, 1, 3)
for(outcome in YY) {
ii <- ii+1
df_exp_static <- load_data_fn(X = exposure, Y = outcome, fupmax = 365.25)
# restrict to 1 year follow up
# sample 80% train
length_data <- dim(df_exp_static)[1]
set.seed(12)
patid_sample <- sample(df_exp_static$patid, size = round(length_data*0.8))
df_exp_train <- df_exp_static %>%
ungroup() %>%
filter(patid %in% patid_sample)
df_exp_test <- df_exp_static %>%
ungroup() %>%
filter(!patid %in% patid_sample)
df_exp_train$out %>% table()
if(ABBRVexp == "pso"){
glm(out ~ age + gender + carstairs + cci, data = df_exp_train, family = "binomial")
multi_1 <- glm(out ~ age + gender + carstairs + cci + bmi_cat + smoker + alc, data = df_exp_train, family = "binomial")
model_covars <- c("Intercept","age", "gender", "carstairs", "cci", "bmi_cat", "smoker", "alc")
pretty_model_covars <- cbind.data.frame(
model_covars,
pretty = c("Intercept", "Age (centred)", "Gender", "Carstairs index of deprivation", "CCI", "BMI (centred)", "Smoker", "Harmful alcohol use")
)
}
if(ABBRVexp == "ecz"){
multi_1 <- glm(out ~ age + gender + carstairs + cci + bmi_cat + smoker + alc + sleep + gc90days, data = df_exp_train, family = "binomial")
model_covars <- c("Intercept", "age", "gender", "carstairs", "cci", "bmi_cat", "smoker", "alc","sleep", "gc90days")
pretty_model_covars <- cbind.data.frame(
model_covars,
pretty = c("Intercept", "Age (centred)", "Gender", "Carstairs index of deprivation", "CCI", "BMI (centred)", "Smoker", "Harmful alcohol use", "Sleep problems", "Oral GC use (90 day risk window)")
)
}
predict_cis <- confint.default(multi_1) %>%
as.data.frame(row.names = F) %>%
janitor::clean_names()
predict_gt <- broom::tidy(multi_1, conf.int = F) %>%
bind_cols(predict_cis) %>%
dplyr::select(variable = term, logOR = estimate, conf.low = x2_5_percent, conf.high = x97_5_percent, p.value) %>%
drop_na() %>%
mutate(conf_int = paste0(signif(conf.low, 2), " , ", signif(conf.high, 2)),
p = ifelse(p.value < 0.0001, "*", paste0(signif(p.value, 1)))) %>%
dplyr::select(-conf.low, -conf.high, -p.value) %>%
separate(variable, into = c("delete", "level"), paste(model_covars, collapse = "|"), remove = FALSE) %>%
mutate(level=str_remove(level, "\\)")) %>%
mutate(temp = str_extract(variable, paste(model_covars, collapse = "|"))) %>%
left_join(pretty_model_covars, by = c("temp" = "model_covars")) %>%
mutate(OR = exp(logOR)) %>%
dplyr::select(var = pretty, level, OR, logOR, conf_int, p) %>%
gt() %>%
cols_align(columns = 3:6, align = "right") %>%
fmt_number(n_sigfig = 3, columns = where(is.numeric)) %>%
cols_label(
var = "Variable",
level = "Level",
logOR = "log(OR)",
conf_int = "95% CI",
p = md("*p*")
) %>%
tab_footnote("* p < 0.0001", locations = cells_column_labels("p"))
predict_gt
gt::gtsave(
predict_gt,
filename = paste0("tab1_", ABBRVexp, "_", substr(outcome, 1, 3), "_predictmodel_logistic_1yr.rtf"),
path = here::here("out//predictions//")
)
gt::gtsave(
predict_gt,
filename = paste0("tab1_", ABBRVexp, "_", substr(outcome, 1, 3), "_predictmodel_logistic_1yr.html"),
path = here::here("out//predictions//")
)
pred_vals_train <- predict(multi_1, type = "link", newdata = df_exp_train)
pred_vals_test <- predict(multi_1, type = "link", newdata = df_exp_test)
df_predictions_train <- df_exp_train %>%
#filter_at(all_of(model_covars[-1]), all_vars(!is.na(.))) %>%
mutate(lp = pred_vals_train) %>%
dplyr::select(patid, all_of(model_covars[-1]), out, lp) %>%
mutate(risk = 1/(1 + exp(-lp)))
df_predictions_test <- df_exp_test %>%
#filter_at(all_of(model_covars[-1]), all_vars(!is.na(.))) %>%
mutate(lp = pred_vals_test) %>%
dplyr::select(patid, all_of(model_covars[-1]), out, lp) %>%
mutate(risk = 1/(1 + exp(-lp)))
# PLOT PLOTS PLOTS
# report AUC
plot_roc <- function(test_train) {
roc_df <- get(paste0("df_predictions_", test_train))
roc_calc <- pROC::roc(roc_df$out, roc_df$risk)
roc_calc$auc
par(new = T)
ii <- ifelse(test_train=="train", 0, 0.2)
col_plot <- ifelse(test_train == "train", 4, 2)
lines(roc_calc$specificities, roc_calc$sensitivities, col = col_plot)
text(0.9, 0.9-ii, round(roc_calc$auc,2), col = col_plot, font = 2, pos = 4)
}
plot(1:0, 0:1, xlim = c(1,0), ylim = c(0,1), col = 0,
ylab = "Sensitivity", xlab = "Specificity",
main = paste0(exposure, " ~ ", outcome))
abline(coef = c(1,-1))
plot_roc("train")
plot_roc("test")
legend("bottomright", legend = c("Train", "Test"), col = c(4,2), lty = 1, bty = "n")
mtext(paste0(ii,"A"), side=3, adj=0, font=2)
## plot_boxplot of risk scores
risk_nonoutcome_train <- df_predictions_train[df_predictions_train$out == 0, "risk"] %>% pull()
risk_withoutcome_train <- df_predictions_train[df_predictions_train$out == 1, "risk"] %>% pull()
risk_nonoutcome_test <- df_predictions_test[df_predictions_test$out == 0, "risk"] %>% pull()
risk_withoutcome_test <- df_predictions_test[df_predictions_test$out == 1, "risk"] %>% pull()
# Make a list of these 2 vectors
risk_list <- list(
risk_nonoutcome_train,
risk_withoutcome_train,
risk_nonoutcome_test,
risk_withoutcome_test
)
# Change the names of the elements of the list :
names(risk_list) <- c(paste("Train data \n Control \n n=", length(risk_nonoutcome_train), sep = ""),
paste("Train data \n Case \n n=", length(risk_withoutcome_train), sep = ""),
paste("Test data \n Control \n n=", length(risk_nonoutcome_test), sep = ""),
paste("Test data \n Case \n n=", length(risk_withoutcome_test), sep = "")
)
# Change the mgp argument: avoid text overlaps axis
# Final Boxplot
mu1a <- signif(mean(risk_nonoutcome_train, na.rm = T), digits = 3)
mu1b <- signif(mean(risk_nonoutcome_test, na.rm = T), digits = 3)
text1_train <- bquote(mu ~ "=" ~ .(mu1a))
text1_test <- bquote(mu ~ "=" ~ .(mu1b))
mu2a <- signif(mean(risk_withoutcome_train, na.rm = T), digits = 2)
mu2b <- signif(mean(risk_withoutcome_test, na.rm = T), digits = 2)
text2_train <- bquote(mu ~ "=" ~ .(mu2a))
text2_test <- bquote(mu ~ "=" ~ .(mu2a))
col1 <- 1
par(mgp = c(3,2,0), tck = NA, tcl = -0.25)
boxplot(risk_list ,
col= ggplot2::alpha(c(2,4,2,4), 0.2),
ylab="Survival risk", outline = FALSE, ylim = c(0,0.5),
pars=list(mgp=c(4,2,.5)))
text(0.75, 0.4, text1_train, pos = 4, cex = 0.7, col =2)
text(1.75, 0.4, text2_train, pos = 4, cex = 0.7, col = 4)
text(2.75, 0.4, text1_test, pos = 4, cex = 0.7, col = 2,)
text(3.75, 0.4, text2_test, pos = 4, cex = 0.7, col = 4)
mtext(paste0(ii,"B"), side=3, adj=0, font=2)
par(mgp = c(3,1,0), tck = NA, tcl = -0.5)
#### GAM plot
plot_calibration <- function(test_train) {
df_calibration <- get(paste0("df_predictions_", test_train))
gam1 <- gam(out ~ s(risk, k=4) , data = df_calibration, family = "binomial")
sample_plot <- sample(1:dim(df_calibration)[1], size = 0.1*dim(df_calibration)[1])
col_plot <- ifelse(test_train =="train", 4, 2)
plot_adjust <- ifelse(test_train =="train", 0.01, -0.01)
axismax <- max(df_predictions_train$risk, na.rm = T)
df_calibration$out_plot <- ifelse(df_calibration$out==1, axismax, 0)
points(df_calibration$risk[sample_plot], df_calibration$out_plot[sample_plot]+plot_adjust, col = ggplot2::alpha(col_plot,0.025), cex = 0.2)
tt <- seq(range(df_calibration$risk, na.rm = T)[1],range(df_calibration$risk, na.rm = T)[2],0.001)
preds <- predict(gam1, newdata = list(risk=tt), type = "link", se.fit = TRUE)
critval <- 1.96;
upperCI <- preds$fit + (critval * preds$se.fit);
lowerCI <- preds$fit - (critval * preds$se.fit)
fit <- preds$fit
fitPlotF <- gam1$family$linkinv(fit);
CI1plotF <- gam1$family$linkinv(upperCI);
CI2plotF <- gam1$family$linkinv(lowerCI)
## Plot GAM fits
polygon(c(tt,rev(tt)),c(CI1plotF,rev(CI2plotF)),col=ggplot2::alpha(col_plot,0.2),lty=0)
lines(tt, fitPlotF ,col=col_plot,lwd=1)
}
axismax <- max(df_predictions_train$risk, na.rm = T)
plot(c(0,axismax), c(0,axismax),
ylim = c(0-0.02, axismax+0.02), xlim = c(0, axismax),
xlab = "Predicted probability", ylab = "Observed outcome",
col = 0)
abline(coef = c(0,1), col = ggplot2::alpha(1,0.2))
plot_calibration("train")
plot_calibration("test")
legend("left", legend = c("Train", "Test"), col = c(4,2), lty = 2, bty = "n")
axis(side = 4, at = c(0-0.02, axismax+0.02), labels = c("Control","Case"), tick = FALSE, padj = -1)
mtext(paste0(ii,"C"), side=3, adj=0, font=2)
plot_validation <- function(test_train) {
cal_df <- get(paste0("df_predictions_", test_train))
df_calibration <- cal_df %>%
ungroup() %>%
mutate(risk_dec = ntile(risk, 10)) %>%
group_by(risk_dec) %>%
summarise(n = n(), observed = mean(out), predicted = mean(risk))
col_plot <- ifelse(test_train == "train", 4, 2)
xy <- xy.coords(df_calibration$predicted, df_calibration$observed, "Predicted probability", "Observed probability")
x <- xy$x
y <- xy$y
pred <- loess.smooth(x, y, span = 2/3, degree = 2)
if(test_train == "test"){
par(new = T)
}
points(x, y, col = col_plot, cex = 1.2)
lines(pred$x, pred$y, lty = 2, col = col_plot)
}
axismax <- max(df_predictions_train$risk, na.rm = T)
plot(0:axismax, 0:axismax,
ylim = c(0, axismax), xlim = c(0, axismax),
xlab = "Predicted probability", ylab = "Observed probability",
col = 0)
abline(coef = c(0,1))
plot_validation("train")
plot_validation("test")
legend("bottomright", legend = c("Train", "Test"), col = c(4,2), lty = 2, bty = "n")
mtext(paste0(ii,"D"), side=3, adj=0, font=2)
}
}
dev.off()
# 2b - up to 3 years -------------------------------------------------------
pdf(paste0(here("out/predictions"), "/03_multimodel_logisticpredict_3year.pdf"), 10, 10)
par(mfrow = c(4,4), mgp=c(3,1,0))
ii <- 0
for(exposure in XX) {
ABBRVexp <- substr(exposure, 1, 3)
for(outcome in YY) {
ii <- ii+1
df_exp_static <- load_data_fn(X = exposure, Y = outcome, fupmax = 365.25*3)
# restrict to 1 year follow up
# sample 80% train
length_data <- dim(df_exp_static)[1]
set.seed(12)
patid_sample <- sample(df_exp_static$patid, size = round(length_data*0.8))
df_exp_train <- df_exp_static %>%
ungroup() %>%
filter(patid %in% patid_sample)
df_exp_test <- df_exp_static %>%
ungroup() %>%
filter(!patid %in% patid_sample)
if(ABBRVexp == "pso"){
multi_1 <- glm(out ~ age + gender + carstairs + cci + bmi_cat + smoker + alc, data = df_exp_train, family = "binomial")
model_covars <- c("Intercept","age", "gender", "carstairs", "cci", "bmi_cat", "smoker", "alc")
pretty_model_covars <- cbind.data.frame(
model_covars,
pretty = c("Intercept", "Age (centred)", "Gender", "Carstairs index of deprivation", "CCI", "BMI (centred)", "Smoker", "Harmful alcohol use")
)
}
if(ABBRVexp == "ecz"){
multi_1 <- glm(out ~ age + gender + carstairs + cci + bmi_cat + smoker + alc + sleep + gc90days, data = df_exp_train, family = "binomial")
model_covars <- c("Intercept", "age", "gender", "carstairs", "cci", "bmi_cat", "smoker", "alc","sleep", "gc90days")
pretty_model_covars <- cbind.data.frame(
model_covars,
pretty = c("Intercept", "Age (centred)", "Gender", "Carstairs index of deprivation", "CCI", "BMI (centred)", "Smoker", "Harmful alcohol use", "Sleep problems", "Oral GC use (90 day risk window)")
)
}
predict_cis <- confint.default(multi_1) %>%
as.data.frame(row.names = F) %>%
janitor::clean_names()
predict_gt <- broom::tidy(multi_1, conf.int = F) %>%
bind_cols(predict_cis) %>%
dplyr::select(variable = term, logOR = estimate, conf.low = x2_5_percent, conf.high = x97_5_percent, p.value) %>%
drop_na() %>%
mutate(conf_int = paste0(signif(conf.low, 2), " , ", signif(conf.high, 2)),
p = ifelse(p.value < 0.0001, "*", paste0(signif(p.value, 1)))) %>%
dplyr::select(-conf.low, -conf.high, -p.value) %>%
separate(variable, into = c("delete", "level"), paste(model_covars, collapse = "|"), remove = FALSE) %>%
mutate(level=str_remove(level, "\\)")) %>%
mutate(temp = str_extract(variable, paste(model_covars, collapse = "|"))) %>%
left_join(pretty_model_covars, by = c("temp" = "model_covars")) %>%
mutate(OR = exp(logOR)) %>%
dplyr::select(var = pretty, level, OR, logOR, conf_int, p) %>%
gt() %>%
cols_align(columns = 3:6, align = "right") %>%
fmt_number(n_sigfig = 3, columns = where(is.numeric)) %>%
cols_label(
var = "Variable",
level = "Level",
logOR = "log(OR)",
conf_int = "95% CI",
p = md("*p*")
) %>%
tab_footnote("* p < 0.0001", locations = cells_column_labels("p"))
predict_gt
gt::gtsave(
predict_gt,
filename = paste0("tab1_", ABBRVexp, "_", substr(outcome, 1, 3), "_predictmodel_logistic_1yr.html"),
path = here::here("out//predictions//")
)
pred_vals_train <- predict(multi_1, type = "link", newdata = df_exp_train)
pred_vals_test <- predict(multi_1, type = "link", newdata = df_exp_test)
df_predictions_train <- df_exp_train %>%
#filter_at(all_of(model_covars[-1]), all_vars(!is.na(.))) %>%
mutate(lp = pred_vals_train) %>%
dplyr::select(patid, all_of(model_covars[-1]), out, lp) %>%
mutate(risk = 1/(1 + exp(-lp)))
df_predictions_test <- df_exp_test %>%
#filter_at(all_of(model_covars[-1]), all_vars(!is.na(.))) %>%
mutate(lp = pred_vals_test) %>%
dplyr::select(patid, all_of(model_covars[-1]), out, lp) %>%
mutate(risk = 1/(1 + exp(-lp)))
# PLOT PLOTS PLOTS
# report AUC
plot_roc <- function(test_train) {
roc_df <- get(paste0("df_predictions_", test_train))
roc_calc <- pROC::roc(roc_df$out, roc_df$risk)
roc_calc$auc
par(new = T)
ii <- ifelse(test_train=="train", 0, 0.2)
col_plot <- ifelse(test_train == "train", 4, 2)
lines(roc_calc$specificities, roc_calc$sensitivities, col = col_plot)
text(0.9, 0.9-ii, round(roc_calc$auc,2), col = col_plot, font = 2, pos = 4)
}
plot(1:0, 0:1, xlim = c(1,0), ylim = c(0,1), col = 0,
ylab = "Sensitivity", xlab = "Specificity",
main = paste0(exposure, " ~ ", outcome))
abline(coef = c(1,-1))
plot_roc("train")
plot_roc("test")
legend("bottomright", legend = c("Train", "Test"), col = c(4,2), lty = 1, bty = "n")
mtext(paste0(ii,"A"), side=3, adj=0, font=2)
## plot_boxplot of risk scores
risk_nonoutcome_train <- df_predictions_train[df_predictions_train$out == 0, "risk"] %>% pull()
risk_withoutcome_train <- df_predictions_train[df_predictions_train$out == 1, "risk"] %>% pull()
risk_nonoutcome_test <- df_predictions_test[df_predictions_test$out == 0, "risk"] %>% pull()
risk_withoutcome_test <- df_predictions_test[df_predictions_test$out == 1, "risk"] %>% pull()
# Make a list of these 2 vectors
risk_list <- list(
risk_nonoutcome_train,
risk_withoutcome_train,
risk_nonoutcome_test,
risk_withoutcome_test
)
# Change the names of the elements of the list :
names(risk_list) <- c(paste("Train data \n Control \n n=", length(risk_nonoutcome_train), sep = ""),
paste("Train data \n Case \n n=", length(risk_withoutcome_train), sep = ""),
paste("Test data \n Control \n n=", length(risk_nonoutcome_test), sep = ""),
paste("Test data \n Case \n n=", length(risk_withoutcome_test), sep = "")
)
# Change the mgp argument: avoid text overlaps axis
# Final Boxplot
mu1a <- signif(mean(risk_nonoutcome_train, na.rm = T), digits = 3)
mu1b <- signif(mean(risk_nonoutcome_test, na.rm = T), digits = 3)
text1_train <- bquote(mu ~ "=" ~ .(mu1a))
text1_test <- bquote(mu ~ "=" ~ .(mu1b))
mu2a <- signif(mean(risk_withoutcome_train, na.rm = T), digits = 2)
mu2b <- signif(mean(risk_withoutcome_test, na.rm = T), digits = 2)
text2_train <- bquote(mu ~ "=" ~ .(mu2a))
text2_test <- bquote(mu ~ "=" ~ .(mu2a))
col1 <- 1
par(mgp = c(3,2,0), tck = NA, tcl = -0.25)
boxplot(risk_list ,
col= ggplot2::alpha(c(2,4,2,4), 0.2),
ylab="Survival risk", outline = FALSE, ylim = c(0,0.5),
pars=list(mgp=c(4,2,.5)))
text(0.75, 0.4, text1_train, pos = 4, cex = 0.7, col =2)
text(1.75, 0.4, text2_train, pos = 4, cex = 0.7, col = 4)
text(2.75, 0.4, text1_test, pos = 4, cex = 0.7, col = 2,)
text(3.75, 0.4, text2_test, pos = 4, cex = 0.7, col = 4)
mtext(paste0(ii,"B"), side=3, adj=0, font=2)
par(mgp = c(3,1,0), tck = NA, tcl = -0.5)
#### GAM plot
plot_calibration <- function(test_train) {
df_calibration <- get(paste0("df_predictions_", test_train))
gam1 <- gam(out ~ s(risk, k=4) , data = df_calibration, family = "binomial")
sample_plot <- sample(1:dim(df_calibration)[1], size = 0.1*dim(df_calibration)[1])
col_plot <- ifelse(test_train =="train", 4, 2)
plot_adjust <- ifelse(test_train =="train", 0.01, -0.01)
axismax <- max(df_predictions_train$risk, na.rm = T)
df_calibration$out_plot <- ifelse(df_calibration$out==1, axismax, 0)
points(df_calibration$risk[sample_plot], df_calibration$out_plot[sample_plot]+plot_adjust, col = ggplot2::alpha(col_plot,0.025), cex = 0.2)
tt <- seq(range(df_calibration$risk, na.rm = T)[1],range(df_calibration$risk, na.rm = T)[2],0.001)
preds <- predict(gam1, newdata = list(risk=tt), type = "link", se.fit = TRUE)
critval <- 1.96;
upperCI <- preds$fit + (critval * preds$se.fit);
lowerCI <- preds$fit - (critval * preds$se.fit)
fit <- preds$fit
fitPlotF <- gam1$family$linkinv(fit);
CI1plotF <- gam1$family$linkinv(upperCI);
CI2plotF <- gam1$family$linkinv(lowerCI)
## Plot GAM fits
polygon(c(tt,rev(tt)),c(CI1plotF,rev(CI2plotF)),col=ggplot2::alpha(col_plot,0.2),lty=0)
lines(tt, fitPlotF ,col=col_plot,lwd=1)
}
axismax <- max(df_predictions_train$risk, na.rm = T)
plot(c(0,axismax), c(0,axismax),
ylim = c(0-0.02, axismax+0.02), xlim = c(0, axismax),
xlab = "Predicted probability", ylab = "Observed outcome",
col = 0)
abline(coef = c(0,1), col = ggplot2::alpha(1,0.2))
plot_calibration("train")
plot_calibration("test")
legend("left", legend = c("Train", "Test"), col = c(4,2), lty = 2, bty = "n")
axis(side = 4, at = c(0-0.02, axismax+0.02), labels = c("Control","Case"), tick = FALSE, padj = -1)
mtext(paste0(ii,"C"), side=3, adj=0, font=2)
plot_validation <- function(test_train) {
cal_df <- get(paste0("df_predictions_", test_train))
df_calibration <- cal_df %>%
ungroup() %>%
mutate(risk_dec = ntile(risk, 10)) %>%
group_by(risk_dec) %>%
summarise(n = n(), observed = mean(out), predicted = mean(risk))
col_plot <- ifelse(test_train == "train", 4, 2)
xy <- xy.coords(df_calibration$predicted, df_calibration$observed, "Predicted probability", "Observed probability")
x <- xy$x
y <- xy$y
pred <- loess.smooth(x, y, span = 2/3, degree = 2)
if(test_train == "test"){
par(new = T)
}
points(x, y, col = col_plot, cex = 1.2)
lines(pred$x, pred$y, lty = 2, col = col_plot)
}
axismax <- max(df_predictions_train$risk, na.rm = T)
plot(0:axismax, 0:axismax,
ylim = c(0, axismax), xlim = c(0, axismax),
xlab = "Predicted probability", ylab = "Observed probability",
col = 0)
abline(coef = c(0,1))
plot_validation("train")
plot_validation("test")
legend("bottomright", legend = c("Train", "Test"), col = c(4,2), lty = 2, bty = "n")
mtext(paste0(ii,"D"), side=3, adj=0, font=2)
}
}
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helperfunctions_simulation.R
\name{create_coverage_array}
\alias{create_coverage_array}
\title{Create the Coverage Array of the Simulation}
\usage{
create_coverage_array(sim_curves, gen_curves, effect_index, uni = NULL,
m_fac = 1.96)
}
\arguments{
\item{sim_curves}{The large list of simulation results. Use object$mul.}
\item{gen_curves}{The original data generating curve as part of the output of
multifamm:::extract_components(), so use output$cov_preds.}
\item{effect_index}{The index position of the effect to be evaluated in the
gen_curves and sim_curves effect lists. If the intercept is to be
evaluated, this can be specified as 1 or 2 (both scalar and functional
intercept are sumed up).}
\item{uni}{Vector giving the associated order of the data generating effects
when evaluating univariate models (object$uni). Is NULL for evaluation of
multivariate models.}
\item{m_fac}{Multiplication factor used to create the upper and lower
credibility bounds. Defaults to 1.96 (ca. 95\%).}
}
\description{
This function takes the index of the covariate
to be evaluated and then checks whether the estimated covariate effect of the
simulation run covers the true data generating effect function. The output is
a logical array where the first dimension gives the dimension of the data,
the second dimension gives the time point to be evaluated and the third
dimension gives the simulation run.
}
|
/man/create_coverage_array.Rd
|
no_license
|
alexvolkmann/multifammPaper
|
R
| false | true | 1,479 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helperfunctions_simulation.R
\name{create_coverage_array}
\alias{create_coverage_array}
\title{Create the Coverage Array of the Simulation}
\usage{
create_coverage_array(sim_curves, gen_curves, effect_index, uni = NULL,
m_fac = 1.96)
}
\arguments{
\item{sim_curves}{The large list of simulation results. Use object$mul.}
\item{gen_curves}{The original data generating curve as part of the output of
multifamm:::extract_components(), so use output$cov_preds.}
\item{effect_index}{The index position of the effect to be evaluated in the
gen_curves and sim_curves effect lists. If the intercept is to be
evaluated, this can be specified as 1 or 2 (both scalar and functional
intercept are sumed up).}
\item{uni}{Vector giving the associated order of the data generating effects
when evaluating univariate models (object$uni). Is NULL for evaluation of
multivariate models.}
\item{m_fac}{Multiplication factor used to create the upper and lower
credibility bounds. Defaults to 1.96 (ca. 95\%).}
}
\description{
This function takes the index of the covariate
to be evaluated and then checks whether the estimated covariate effect of the
simulation run covers the true data generating effect function. The output is
a logical array where the first dimension gives the dimension of the data,
the second dimension gives the time point to be evaluated and the third
dimension gives the simulation run.
}
|
#' Find convex hull that outlines an individual tree
#'
#' \code{convex_hull} finds the outer hull of a set of points.
#' @param x A two column matrix with column names "X" and "Y"
#' @param plot Whether to plot the results for visualization
#' @return A \code{\link[sp]{SpatialPolygons}} object containing a convex hull based on input points.
#'
#' @export
convex_hull<-function(x,plot=FALSE){
ch<-grDevices::chull(x$X,x$Y)
poly_coords<-x[c(ch,ch[1]),c("X","Y")]
sp_poly <- sp::SpatialPolygons(list(sp::Polygons(list(sp::Polygon(poly_coords)), ID=1)))
return(sp_poly)
if(plot){
plot(sp_poly)
points(cbind(x$X,x$Y))
}
}
|
/R/convex_hull.R
|
no_license
|
weecology/TreeSegmentation
|
R
| false | false | 640 |
r
|
#' Find convex hull that outlines an individual tree
#'
#' \code{convex_hull} finds the outer hull of a set of points.
#' @param x A two column matrix with column names "X" and "Y"
#' @param plot Whether to plot the results for visualization
#' @return A \code{\link[sp]{SpatialPolygons}} object containing a convex hull based on input points.
#'
#' @export
convex_hull<-function(x,plot=FALSE){
ch<-grDevices::chull(x$X,x$Y)
poly_coords<-x[c(ch,ch[1]),c("X","Y")]
sp_poly <- sp::SpatialPolygons(list(sp::Polygons(list(sp::Polygon(poly_coords)), ID=1)))
return(sp_poly)
if(plot){
plot(sp_poly)
points(cbind(x$X,x$Y))
}
}
|
library(googleAnalyticsR)
library(future.apply)
library(tidyverse)
library(bigrquery)
## setup multisession R for your parallel data fetches -------------------------------------
plan(multisession)
# login as new_user = TRUE if switching accounts. Otherwise do not set new_user = true
ga_auth()
# ga_auth(new_user = TRUE)
# get list of custom dimensions -------------------------------------
customdimensions_list <- as.data.frame(ga_custom_vars_list(17015991, "UA-17015991-1",
type = c("customDimensions")))
Sys.setenv(GA_AUTH_FILE = "C:/Users/User/Documents/.httr-oauth")
# need alternative for mac
# get account list -------------------------------------
account_list <- ga_account_list()
## the ViewIds to fetch all at once -------------------------------------
gaids <- c(account_list[2122,'viewId'], account_list[2125,'viewId'], account_list[2128,'viewId'])
# selecting segments -------------------------------------
my_segments <- ga_segment_list()
segs <- my_segments$items
segment_for_allusers <- "gaid::-1"
seg_allUsers <- segment_ga4("All Users", segment_id = segment_for_allusers)
my_fetch <- function(x) {
google_analytics(x,
date_range = c("2018-01-01","yesterday"),
metrics = c("sessions", "transactions", "transactionRevenue"),
dimensions = c("yearMonth", "deviceCategory", "userType"),
segments = c(seg_allUsers),
anti_sample = TRUE,
max = -1)
}
## makes 3 API calls at once -------------------------------------
all_data <- future_lapply(gaids, my_fetch)
df1 <- data.frame(all_data[1])
df1 <- df1 %>% mutate(viewID = account_list[2122,'viewName'])
df2 <- data.frame(all_data[2])
df2 <- df2 %>% mutate(viewID = account_list[2125,'viewName'])
df3 <- data.frame(all_data[3])
df3 <- df3 %>% mutate(viewID = account_list[2128,'viewName'])
df_all <- rbind(df1,df2,df3)
# query multiple segments -------------------------------------
segment_for_newusers <- "gaid::-2"
seg_newusers <- segment_ga4("new Users", segment_id = segment_for_newusers)
segment_for_returnusers <- "gaid::-3"
seg_returnusers <- segment_ga4("return Users", segment_id = segment_for_returnusers)
segment_for_paidusers <- "gaid::-4"
seg_paidusers <- segment_ga4("paid Users", segment_id = segment_for_paidusers)
segment_for_organicusers <- "gaid::-5"
seg_organicusers <- segment_ga4("organic Users", segment_id = segment_for_organicusers)
segment_for_searchusers <- "gaid::-6"
seg_searchusers <- segment_ga4("search Users", segment_id = segment_for_searchusers)
segment_for_directusers <- "gaid::-7"
seg_directusers <- segment_ga4("direct Users", segment_id = segment_for_directusers)
segment_for_referralusers <- "gaid::-8"
seg_referralusers <- segment_ga4("referral Users", segment_id = segment_for_referralusers)
segment_for_convusers <- "gaid::-9"
seg_convusers <- segment_ga4("conv Users", segment_id = segment_for_convusers)
segment_for_transactionusers <- "gaid::-10"
seg_transactionusers <- segment_ga4("transaction Users", segment_id = segment_for_transactionusers)
segment_for_mobiletabletusers <- "gaid::-11"
seg_mobiletabletusers <- segment_ga4("mobiletablet Users", segment_id = segment_for_mobiletabletusers)
segmentlist <- c(seg_allUsers,
seg_newusers,
seg_returnusers,
seg_paidusers,
seg_organicusers,
seg_searchusers,
seg_directusers,
seg_referralusers,
seg_convusers)
segmentlisting <- split(segmentlist, (seq_along(segmentlist) - 1L) %/% 4L)
ga_data_final_segment <- data.frame()
for (i in segmentlisting) {
ga_data_segment_eg <-
google_analytics(view_id, #=This is a (dynamic) ViewID parameter
date_range = c(startDate2, endDate),
metrics = c("sessions", "transactions", "transactionRevenue"),
dimensions = c("yearMonth", "deviceCategory", "userType"),
segments = i,
anti_sample = TRUE,
max = -1)
ga_data_final_segment <- rbind(ga_data_final_segment, ga_data_segment_eg)
}
## pick a profile with data to query
ga_id <- account_list[1123,'viewId']
## get a list of what metrics and dimensions you can use
ga_auth()
meta <- google_analytics_meta()
googleAnalyticsR:::gadget_GASegment()
## make two segment elements
se <- segment_element("sessions",
operator = "GREATER_THAN",
type = "METRIC",
comparisonValue = 3,
scope = "USER")
se3 <- segment_element("medium",
operator = "REGEXP",
type = "DIMENSION",
expressions = "^(email|referral)$",
scope = "SESSION")
sv_simple <- segment_vector_simple(list(list(segment_ga_google5sec)))
seg_defined <- segment_define(sv_simple)
segment4 <- segment_ga4("simple", user_segment = seg_defined)
# segments: semicolon is "AND", a comma is "OR"
segment_def_medium <- "sessions::condition::ga:medium=~^(email|referral)$"
seg_obj_medium <- segment_ga4("test", segment_id = segment_def_medium)
segment_def_google30sec <- "sessions::condition::ga:source=~^(google)$;ga:timeOnPage>30"
seg_obj_google30sec <- segment_ga4("test", segment_id = segment_def_google30sec)
segment_def_morethan3sessions <- "sessions::condition::ga:sessions>3"
seg_obj_morethan3sessions <- segment_ga4("test", segment_id = segment_def_morethan3sessions)
segment_def_orgtraffic_w_conversions <- "sessions::condition::ga:medium=~^(organic)$;ga:goal11Completions>0"
seg_obj_orgtraffic_w_conversions <- segment_ga4("test", segment_id = segment_def_orgtraffic_w_conversions)
segment_seq_example <- google_analytics_4(ga_id,
date_range = c("2017-01-01","2017-03-01"),
dimensions = c('source','country'),
segments = seg_obj_orgtraffic_w_conversions,
metrics = c('sessions','bounceRate', 'timeOnPage', 'goal11Completions')
)
segment_seq_example
segment_def_mktids <- "sessions::condition::ga:dimension2=@mktid"
seg_obj_mktids <- segment_ga4("test", segment_id = segment_def_mktids)
segment_seq_mktids <- google_analytics_4(ga_id,
date_range = c("2017-01-01","2017-03-01"),
dimensions = c('source','dimension2'),
segments = seg_obj_mktids,
metrics = c('sessions','bounceRate', 'timeOnPage', 'goal11Completions')
)
segment_seq_mktids
google_analytics_4(ga_id, #=This is a (dynamic) ViewID parameter
date_range = c("2018-01-01","2018-01-30"),
metrics = c("sessions", "users"),
dimensions = c("deviceCategory", "sourceMedium", "date"),
#anti_sample = TRUE,
max = -1,
useResourceQuotas = TRUE)
# get data directly from bigquery --------------------------------------------------
project <- "api-project-929144044809"
get_data_query <- paste0(
"SELECT
date,
device_category,
cabin_class,
country_ga,
country_selection,
# get total entry per page
SUM(first_ent) AS Homepage,
SUM(second_ent) AS CIB_ChooseFlight,
SUM(third_ent) AS CIB_PassengerDetails,
SUM(fourth_ent) AS CIB_PaymentDetails,
SUM(fifth_ent) AS CIB_BookingConfirmation,
# get total completion at each step
SUM(first_cplt) AS Homepage_Complete,
SUM(second_cplt) AS CIB_ChooseFlight_Complete,
SUM(third_cplt) AS CIB_PassengerDetails_Complete,
SUM(fourth_cplt) AS CIB_PaymentDetails_Complete,
# get total drop-off at each step
SUM(first_ent)-SUM(first_cplt) AS Homepage_Drop,
SUM(second_ent)-SUM(second_cplt) AS CIB_ChooseFlight_Drop,
SUM(third_ent)-SUM(third_cplt) AS CIB_PassengerDetails_Drop,
SUM(fourth_ent)-SUM(fourth_cplt) AS CIB_PaymentDetails_Drop,
# get direct entrance not from previous step
SUM(second_ent)-SUM(first_cplt) AS CIB_ChooseFlight_Indirect,
SUM(third_ent)-SUM(second_cplt) AS CIB_PassengerDetails_Indirect,
SUM(fourth_ent)-SUM(third_cplt) AS CIB_PaymentDetails_Indirect,
SUM(fifth_ent)-SUM(fourth_cplt) AS CIB_BookingConfirmation_Indirect,
# add in new requested dimension
channel,
source,
medium,
campaign,
source_medium
FROM (
#open funnel where a step requires ONLY the previous step
SELECT
a.date AS date,
a.vid AS vid,
a.sid AS sid,
a.device_category AS device_category,
# regroup cabin class value
(CASE
WHEN REGEXP_MATCH(b.cabin_class, r'.*ECONOMY') THEN 'ECONOMY/PREMIUM ECONOMY'
WHEN REGEXP_MATCH(b.cabin_class, r'.*BUSINESS') THEN 'BUSINESS'
WHEN REGEXP_MATCH(b.cabin_class, r'.*(FIRST|SUITE)') THEN 'FIRST'
ELSE 'NA' END) AS cabin_class,
b.country_ga AS country_ga,
b.country_selection AS country_selection,
b.channel AS channel,
b.source AS source,
b.medium AS medium,
b.campaign AS campaign,
b.source_medium AS source_medium,
a.firstPage AS firstPage,
a.secondPage AS secondPage,
a.thirdPage AS thirdPage,
a.fourthPage AS fourthPage,
a.fifthPage AS fifthPage,
# get entrance to each step
IF(a.firstPage >0, 1,0) AS first_ent,
IF(a.secondPage >0, 1,0) AS second_ent,
IF(a.thirdPage >0, 1,0) AS third_ent,
IF(a.fourthPage >0, 1,0) AS fourth_ent,
IF(a.fifthPage >0, 1,0) AS fifth_ent,
# get completion of each step to the next
IF(a.firstPage > 0
AND a.firstPage < a.secondPage,1,0) AS first_cplt,
IF(a.secondPage > 0
AND a.secondPage < a.thirdPage,1,0) AS second_cplt,
IF(a.thirdPage > 0
AND a.thirdPage < a.fourthPage,1,0) AS third_cplt,
IF(a.fourthPage > 0
AND a.fourthPage < a.fifthPage,1,0) AS fourth_cplt
FROM (
SELECT
(CASE
WHEN s0.date IS NULL THEN s4.date
WHEN s4.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s4.vid
WHEN s4.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s4.sid
WHEN s4.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s4.device_category
WHEN s4.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
IF(s0.firstPage IS NULL,0,s0.firstPage) AS firstPage,
IF(s0.secondPage IS NULL,0,s0.secondPage) AS secondPage,
IF(s0.thirdPage IS NULL,0,s0.thirdPage) AS thirdPage,
IF(s0.fourthPage IS NULL,0,s0.fourthPage) AS fourthPage,
IF(s4.firstHit IS NULL,0,s4.firstHit) AS fifthPage from(
SELECT
(CASE
WHEN s0.date IS NULL THEN s3.date
WHEN s3.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s3.vid
WHEN s3.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s3.sid
WHEN s3.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s3.device_category
WHEN s3.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstPage AS firstPage,
s0.secondPage AS secondPage,
s0.thirdPage AS thirdPage,
s3.firstHit AS fourthPage
FROM (
SELECT
(CASE
WHEN s0.date IS NULL THEN s2.date
WHEN s2.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s2.vid
WHEN s2.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s2.sid
WHEN s2.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s2.device_category
WHEN s2.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstPage AS firstPage,
s0.secondPage AS secondPage,
s2.firstHit AS thirdPage from(
SELECT
(CASE
WHEN s0.date IS NULL THEN s1.date
WHEN s1.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s1.vid
WHEN s1.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s1.sid
WHEN s1.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s1.device_category
WHEN s1.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstHit AS firstPage,
s1.firstHit AS secondPage
FROM (
# Begin Subquery #1 aka s0
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24')),
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/Homepage')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s0
# End Subquery #1 aka s0
FULL OUTER JOIN EACH (
# Begin Subquery #2 aka s1
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/CIB_ChooseFlight')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s1
# End Subquery #2 aka s1
ON
s0.vid = s1.vid
AND s0.sid = s1.sid) s0
FULL OUTER JOIN EACH (
# Begin Subquery #3 aka s2
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/CIB_PassengerDetails')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s2
# End Subquery #3 aka s2
ON
s0.vid = s2.vid
AND s0.sid= s2.sid) AS s0
FULL OUTER JOIN EACH (
# Begin Subquery #4 aka s3
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/CIB_PaymentDetails')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s3
# End Subquery #4 aka s3
ON
s0.vid = s3.vid
AND s0.sid= s3.sid) s0
FULL OUTER JOIN EACH (
# Begin Subquery #5 aka s4
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/CIB_BookingConfirmation')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s4
ON
s0.vid = s4.vid
AND s0.sid= s4.sid) a
LEFT JOIN (
SELECT
date,
fullVisitorId AS vid,
visitId AS sid,
geoNetwork.country AS country_ga,
channelGrouping AS channel,
trafficSource.source AS source,
trafficSource.medium AS medium,
trafficSource.campaign AS campaign,
CONCAT(trafficSource.source, ' / ', trafficSource.medium) AS source_medium,
MAX(IF(customDimensions.index = 9, customDimensions.value, NULL)) WITHIN RECORD AS country_selection,
MAX(IF(customDimensions.index = 31, customDimensions.value, NULL)) WITHIN RECORD AS cabin_class
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))) b
ON
a.vid = b.vid
AND a.sid = b.sid
AND a.date = b.date)
GROUP BY
date,
device_category,
cabin_class,
country_ga,
country_selection,
channel,
source,
medium,
campaign,
source_medium"
)
df_beforepos <- bq_table_download(bq_project_query(project,
get_data_query,
use_legacy_sql = TRUE))
get_data_query2 <- paste0(
"SELECT
date,
device_category,
cabin_class,
country_ga,
# point of sale --------------------------------------------
point_of_sale,
# ----------------------------------------------------------
country_selection,
# get total entry per page
SUM(first_ent) AS Homepage,
SUM(second_ent) AS CIB_ChooseFlight,
SUM(third_ent) AS CIB_PassengerDetails,
SUM(fourth_ent) AS CIB_PaymentDetails,
SUM(fifth_ent) AS CIB_BookingConfirmation,
# get total completion at each step
SUM(first_cplt) AS Homepage_Complete,
SUM(second_cplt) AS CIB_ChooseFlight_Complete,
SUM(third_cplt) AS CIB_PassengerDetails_Complete,
SUM(fourth_cplt) AS CIB_PaymentDetails_Complete,
# get total drop-off at each step
SUM(first_ent)-SUM(first_cplt) AS Homepage_Drop,
SUM(second_ent)-SUM(second_cplt) AS CIB_ChooseFlight_Drop,
SUM(third_ent)-SUM(third_cplt) AS CIB_PassengerDetails_Drop,
SUM(fourth_ent)-SUM(fourth_cplt) AS CIB_PaymentDetails_Drop,
# get direct entrance not from previous step
SUM(second_ent)-SUM(first_cplt) AS CIB_ChooseFlight_Indirect,
SUM(third_ent)-SUM(second_cplt) AS CIB_PassengerDetails_Indirect,
SUM(fourth_ent)-SUM(third_cplt) AS CIB_PaymentDetails_Indirect,
SUM(fifth_ent)-SUM(fourth_cplt) AS CIB_BookingConfirmation_Indirect,
# add in new requested dimension
channel,
source,
medium,
campaign,
source_medium
FROM (
#open funnel where a step requires ONLY the previous step
SELECT
a.date AS date,
a.vid AS vid,
a.sid AS sid,
a.device_category AS device_category,
# regroup cabin class value
(CASE
WHEN REGEXP_MATCH(b.cabin_class, r'.*ECONOMY') THEN 'ECONOMY/PREMIUM ECONOMY'
WHEN REGEXP_MATCH(b.cabin_class, r'.*BUSINESS') THEN 'BUSINESS'
WHEN REGEXP_MATCH(b.cabin_class, r'.*(FIRST|SUITE)') THEN 'FIRST'
ELSE 'NA' END) AS cabin_class,
b.country_ga AS country_ga,
# point of sale -------------------------------------------------------
b.point_of_sale AS point_of_sale,
# ---------------------------------------------------------------------
b.country_selection AS country_selection,
b.channel AS channel,
b.source AS source,
b.medium AS medium,
b.campaign AS campaign,
b.source_medium AS source_medium,
a.firstPage AS firstPage,
a.secondPage AS secondPage,
a.thirdPage AS thirdPage,
a.fourthPage AS fourthPage,
a.fifthPage AS fifthPage,
# get entrance to each step
IF(a.firstPage >0, 1,0) AS first_ent,
IF(a.secondPage >0, 1,0) AS second_ent,
IF(a.thirdPage >0, 1,0) AS third_ent,
IF(a.fourthPage >0, 1,0) AS fourth_ent,
IF(a.fifthPage >0, 1,0) AS fifth_ent,
# get completion of each step to the next
IF(a.firstPage > 0
AND a.firstPage < a.secondPage,1,0) AS first_cplt,
IF(a.secondPage > 0
AND a.secondPage < a.thirdPage,1,0) AS second_cplt,
IF(a.thirdPage > 0
AND a.thirdPage < a.fourthPage,1,0) AS third_cplt,
IF(a.fourthPage > 0
AND a.fourthPage < a.fifthPage,1,0) AS fourth_cplt
FROM (
SELECT
(CASE
WHEN s0.date IS NULL THEN s4.date
WHEN s4.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s4.vid
WHEN s4.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s4.sid
WHEN s4.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s4.device_category
WHEN s4.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
IF(s0.firstPage IS NULL,0,s0.firstPage) AS firstPage,
IF(s0.secondPage IS NULL,0,s0.secondPage) AS secondPage,
IF(s0.thirdPage IS NULL,0,s0.thirdPage) AS thirdPage,
IF(s0.fourthPage IS NULL,0,s0.fourthPage) AS fourthPage,
IF(s4.firstHit IS NULL,0,s4.firstHit) AS fifthPage from(
SELECT
(CASE
WHEN s0.date IS NULL THEN s3.date
WHEN s3.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s3.vid
WHEN s3.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s3.sid
WHEN s3.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s3.device_category
WHEN s3.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstPage AS firstPage,
s0.secondPage AS secondPage,
s0.thirdPage AS thirdPage,
s3.firstHit AS fourthPage
FROM (
SELECT
(CASE
WHEN s0.date IS NULL THEN s2.date
WHEN s2.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s2.vid
WHEN s2.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s2.sid
WHEN s2.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s2.device_category
WHEN s2.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstPage AS firstPage,
s0.secondPage AS secondPage,
s2.firstHit AS thirdPage from(
SELECT
(CASE
WHEN s0.date IS NULL THEN s1.date
WHEN s1.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s1.vid
WHEN s1.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s1.sid
WHEN s1.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s1.device_category
WHEN s1.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstHit AS firstPage,
s1.firstHit AS secondPage
FROM (
# Begin Subquery #1 aka s0
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24')),
WHERE
REGEXP_MATCH(hits.page.pagePath, '/Homepage')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s0
# End Subquery #1 aka s0
FULL OUTER JOIN EACH (
# Begin Subquery #2 aka s1
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/CIB_ChooseFlight')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s1
# End Subquery #2 aka s1
ON
s0.vid = s1.vid
AND s0.sid = s1.sid) s0
FULL OUTER JOIN EACH (
# Begin Subquery #3 aka s2
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/CIB_PassengerDetails')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s2
# End Subquery #3 aka s2
ON
s0.vid = s2.vid
AND s0.sid= s2.sid) AS s0
FULL OUTER JOIN EACH (
# Begin Subquery #4 aka s3
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/CIB_PaymentDetails')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s3
# End Subquery #4 aka s3
ON
s0.vid = s3.vid
AND s0.sid= s3.sid) s0
FULL OUTER JOIN EACH (
# Begin Subquery #5 aka s4
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/CIB_BookingConfirmation')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s4
ON
s0.vid = s4.vid
AND s0.sid= s4.sid) a
LEFT JOIN (
SELECT
date,
fullVisitorId AS vid,
visitId AS sid,
geoNetwork.country AS country_ga,
channelGrouping AS channel,
trafficSource.source AS source,
trafficSource.medium AS medium,
trafficSource.campaign AS campaign,
CONCAT(trafficSource.source, ' / ', trafficSource.medium) AS source_medium,
MAX(IF(customDimensions.index = 9, customDimensions.value, NULL)) WITHIN RECORD AS country_selection,
MAX(IF(customDimensions.index = 31, customDimensions.value, NULL)) WITHIN RECORD AS cabin_class,
# new CD --------------------------------------------------------------------
MAX(IF(customDimensions.index = 22, customDimensions.value, NULL)) WITHIN RECORD AS point_of_sale
# ---------------------------------------------------------------------------
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))) b
ON
a.vid = b.vid
AND a.sid = b.sid
AND a.date = b.date)
GROUP BY
date,
device_category,
cabin_class,
country_ga,
country_selection,
point_of_sale,
channel,
source,
medium,
campaign,
source_medium"
)
df_afterpos <- bq_table_download(bq_project_query(project,
get_data_query2,
use_legacy_sql = TRUE))
get_data_query_ORB <- paste0(
"SELECT
date,
device_category,
cabin_class,
country_ga,
country_selection,
# get total entry per page
SUM(first_ent) AS Homepage,
SUM(second_ent) AS ORB_ChooseFlight,
SUM(third_ent) AS ORB_PassengerDetails,
SUM(fourth_ent) AS ORB_PaymentDetails,
SUM(fifth_ent) AS ORB_BookingConfirmation,
# get total completion at each step
SUM(first_cplt) AS Homepage_Complete,
SUM(second_cplt) AS ORB_ChooseFlight_Complete,
SUM(third_cplt) AS ORB_PassengerDetails_Complete,
SUM(fourth_cplt) AS ORB_PaymentDetails_Complete,
# get total drop-off at each step
SUM(first_ent)-SUM(first_cplt) AS Homepage_Drop,
SUM(second_ent)-SUM(second_cplt) AS ORB_ChooseFlight_Drop,
SUM(third_ent)-SUM(third_cplt) AS ORB_PassengerDetails_Drop,
SUM(fourth_ent)-SUM(fourth_cplt) AS ORB_PaymentDetails_Drop,
# get direct entrance not from previous step
SUM(second_ent)-SUM(first_cplt) AS ORB_ChooseFlight_Indirect,
SUM(third_ent)-SUM(second_cplt) AS ORB_PassengerDetails_Indirect,
SUM(fourth_ent)-SUM(third_cplt) AS ORB_PaymentDetails_Indirect,
SUM(fifth_ent)-SUM(fourth_cplt) AS ORB_BookingConfirmation_Indirect,
# add in new requested dimension
channel,
source,
medium,
campaign,
source_medium
FROM (
#open funnel where a step requires ONLY the previous step
SELECT
a.date AS date,
a.vid AS vid,
a.sid AS sid,
a.device_category AS device_category,
# regroup cabin class value
(CASE
WHEN REGEXP_MATCH(b.cabin_class, r'.*ECONOMY') THEN 'ECONOMY/PREMIUM ECONOMY'
WHEN REGEXP_MATCH(b.cabin_class, r'.*BUSINESS') THEN 'BUSINESS'
WHEN REGEXP_MATCH(b.cabin_class, r'.*(FIRST|SUITE)') THEN 'FIRST'
ELSE 'NA' END) AS cabin_class,
b.country_ga AS country_ga,
b.country_selection AS country_selection,
b.channel AS channel,
b.source AS source,
b.medium AS medium,
b.campaign AS campaign,
b.source_medium AS source_medium,
a.firstPage AS firstPage,
a.secondPage AS secondPage,
a.thirdPage AS thirdPage,
a.fourthPage AS fourthPage,
a.fifthPage AS fifthPage,
# get entrance to each step
IF(a.firstPage >0, 1,0) AS first_ent,
IF(a.secondPage >0, 1,0) AS second_ent,
IF(a.thirdPage >0, 1,0) AS third_ent,
IF(a.fourthPage >0, 1,0) AS fourth_ent,
IF(a.fifthPage >0, 1,0) AS fifth_ent,
# get completion of each step to the next
IF(a.firstPage > 0
AND a.firstPage < a.secondPage,1,0) AS first_cplt,
IF(a.secondPage > 0
AND a.secondPage < a.thirdPage,1,0) AS second_cplt,
IF(a.thirdPage > 0
AND a.thirdPage < a.fourthPage,1,0) AS third_cplt,
IF(a.fourthPage > 0
AND a.fourthPage < a.fifthPage,1,0) AS fourth_cplt
FROM (
SELECT
(CASE
WHEN s0.date IS NULL THEN s4.date
WHEN s4.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s4.vid
WHEN s4.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s4.sid
WHEN s4.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s4.device_category
WHEN s4.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
IF(s0.firstPage IS NULL,0,s0.firstPage) AS firstPage,
IF(s0.secondPage IS NULL,0,s0.secondPage) AS secondPage,
IF(s0.thirdPage IS NULL,0,s0.thirdPage) AS thirdPage,
IF(s0.fourthPage IS NULL,0,s0.fourthPage) AS fourthPage,
IF(s4.firstHit IS NULL,0,s4.firstHit) AS fifthPage from(
SELECT
(CASE
WHEN s0.date IS NULL THEN s3.date
WHEN s3.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s3.vid
WHEN s3.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s3.sid
WHEN s3.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s3.device_category
WHEN s3.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstPage AS firstPage,
s0.secondPage AS secondPage,
s0.thirdPage AS thirdPage,
s3.firstHit AS fourthPage
FROM (
SELECT
(CASE
WHEN s0.date IS NULL THEN s2.date
WHEN s2.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s2.vid
WHEN s2.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s2.sid
WHEN s2.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s2.device_category
WHEN s2.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstPage AS firstPage,
s0.secondPage AS secondPage,
s2.firstHit AS thirdPage from(
SELECT
(CASE
WHEN s0.date IS NULL THEN s1.date
WHEN s1.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s1.vid
WHEN s1.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s1.sid
WHEN s1.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s1.device_category
WHEN s1.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstHit AS firstPage,
s1.firstHit AS secondPage
FROM (
# Begin Subquery #1 aka s0
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/Homepage')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s0
# End Subquery #1 aka s0
FULL OUTER JOIN EACH (
# Begin Subquery #2 aka s1
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/ORB_ChooseFlight')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s1
# End Subquery #2 aka s1
ON
s0.vid = s1.vid
AND s0.sid = s1.sid) s0
FULL OUTER JOIN EACH (
# Begin Subquery #3 aka s2
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/ORB_PassengerDetails')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s2
# End Subquery #3 aka s2
ON
s0.vid = s2.vid
AND s0.sid= s2.sid) AS s0
FULL OUTER JOIN EACH (
# Begin Subquery #4 aka s3
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/ORB_PaymentDetails')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s3
# End Subquery #4 aka s3
ON
s0.vid = s3.vid
AND s0.sid= s3.sid) s0
FULL OUTER JOIN EACH (
# Begin Subquery #5 aka s4
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/ORB_BookingConfirmation')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s4
ON
s0.vid = s4.vid
AND s0.sid= s4.sid) a
LEFT JOIN (
SELECT
date,
fullVisitorId AS vid,
visitId AS sid,
geoNetwork.country AS country_ga,
channelGrouping AS channel,
trafficSource.source AS source,
trafficSource.medium AS medium,
trafficSource.campaign AS campaign,
CONCAT(trafficSource.source, ' / ', trafficSource.medium) AS source_medium,
MAX(IF(customDimensions.index = 9, customDimensions.value, NULL)) WITHIN RECORD AS country_selection,
MAX(IF(customDimensions.index = 31, customDimensions.value, NULL)) WITHIN RECORD AS cabin_class
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))) b
ON
a.vid = b.vid
AND a.sid = b.sid
AND a.date = b.date)
GROUP BY
date,
device_category,
cabin_class,
country_ga,
country_selection,
channel,
source,
medium,
campaign,
source_medium"
)
df_ORB_beforepos <- bq_table_download(bq_project_query(project,
get_data_query_ORB,
use_legacy_sql = TRUE))
get_data_query_ORB2 <- paste0(
"SELECT
date,
device_category,
cabin_class,
country_ga,
# point of sale --------------------------------------------
point_of_sale,
# ----------------------------------------------------------
country_selection,
# get total entry per page
SUM(first_ent) AS Homepage,
SUM(second_ent) AS ORB_ChooseFlight,
SUM(third_ent) AS ORB_PassengerDetails,
SUM(fourth_ent) AS ORB_PaymentDetails,
SUM(fifth_ent) AS ORB_BookingConfirmation,
# get total completion at each step
SUM(first_cplt) AS Homepage_Complete,
SUM(second_cplt) AS ORB_ChooseFlight_Complete,
SUM(third_cplt) AS ORB_PassengerDetails_Complete,
SUM(fourth_cplt) AS ORB_PaymentDetails_Complete,
# get total drop-off at each step
SUM(first_ent)-SUM(first_cplt) AS Homepage_Drop,
SUM(second_ent)-SUM(second_cplt) AS ORB_ChooseFlight_Drop,
SUM(third_ent)-SUM(third_cplt) AS ORB_PassengerDetails_Drop,
SUM(fourth_ent)-SUM(fourth_cplt) AS ORB_PaymentDetails_Drop,
# get direct entrance not from previous step
SUM(second_ent)-SUM(first_cplt) AS ORB_ChooseFlight_Indirect,
SUM(third_ent)-SUM(second_cplt) AS ORB_PassengerDetails_Indirect,
SUM(fourth_ent)-SUM(third_cplt) AS ORB_PaymentDetails_Indirect,
SUM(fifth_ent)-SUM(fourth_cplt) AS ORB_BookingConfirmation_Indirect,
# add in new requested dimension
channel,
source,
medium,
campaign,
source_medium
FROM (
#open funnel where a step requires ONLY the previous step
SELECT
a.date AS date,
a.vid AS vid,
a.sid AS sid,
a.device_category AS device_category,
# regroup cabin class value
(CASE
WHEN REGEXP_MATCH(b.cabin_class, r'.*ECONOMY') THEN 'ECONOMY/PREMIUM ECONOMY'
WHEN REGEXP_MATCH(b.cabin_class, r'.*BUSINESS') THEN 'BUSINESS'
WHEN REGEXP_MATCH(b.cabin_class, r'.*(FIRST|SUITE)') THEN 'FIRST'
ELSE 'NA' END) AS cabin_class,
b.country_ga AS country_ga,
# point of sale -------------------------------------------------------
b.point_of_sale AS point_of_sale,
# ---------------------------------------------------------------------
b.country_selection AS country_selection,
b.channel AS channel,
b.source AS source,
b.medium AS medium,
b.campaign AS campaign,
b.source_medium AS source_medium,
a.firstPage AS firstPage,
a.secondPage AS secondPage,
a.thirdPage AS thirdPage,
a.fourthPage AS fourthPage,
a.fifthPage AS fifthPage,
# get entrance to each step
IF(a.firstPage >0, 1,0) AS first_ent,
IF(a.secondPage >0, 1,0) AS second_ent,
IF(a.thirdPage >0, 1,0) AS third_ent,
IF(a.fourthPage >0, 1,0) AS fourth_ent,
IF(a.fifthPage >0, 1,0) AS fifth_ent,
# get completion of each step to the next
IF(a.firstPage > 0
AND a.firstPage < a.secondPage,1,0) AS first_cplt,
IF(a.secondPage > 0
AND a.secondPage < a.thirdPage,1,0) AS second_cplt,
IF(a.thirdPage > 0
AND a.thirdPage < a.fourthPage,1,0) AS third_cplt,
IF(a.fourthPage > 0
AND a.fourthPage < a.fifthPage,1,0) AS fourth_cplt
FROM (
SELECT
(CASE
WHEN s0.date IS NULL THEN s4.date
WHEN s4.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s4.vid
WHEN s4.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s4.sid
WHEN s4.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s4.device_category
WHEN s4.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
IF(s0.firstPage IS NULL,0,s0.firstPage) AS firstPage,
IF(s0.secondPage IS NULL,0,s0.secondPage) AS secondPage,
IF(s0.thirdPage IS NULL,0,s0.thirdPage) AS thirdPage,
IF(s0.fourthPage IS NULL,0,s0.fourthPage) AS fourthPage,
IF(s4.firstHit IS NULL,0,s4.firstHit) AS fifthPage from(
SELECT
(CASE
WHEN s0.date IS NULL THEN s3.date
WHEN s3.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s3.vid
WHEN s3.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s3.sid
WHEN s3.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s3.device_category
WHEN s3.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstPage AS firstPage,
s0.secondPage AS secondPage,
s0.thirdPage AS thirdPage,
s3.firstHit AS fourthPage
FROM (
SELECT
(CASE
WHEN s0.date IS NULL THEN s2.date
WHEN s2.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s2.vid
WHEN s2.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s2.sid
WHEN s2.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s2.device_category
WHEN s2.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstPage AS firstPage,
s0.secondPage AS secondPage,
s2.firstHit AS thirdPage from(
SELECT
(CASE
WHEN s0.date IS NULL THEN s1.date
WHEN s1.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s1.vid
WHEN s1.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s1.sid
WHEN s1.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s1.device_category
WHEN s1.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstHit AS firstPage,
s1.firstHit AS secondPage
FROM (
# Begin Subquery #1 aka s0
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/Homepage')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s0
# End Subquery #1 aka s0
FULL OUTER JOIN EACH (
# Begin Subquery #2 aka s1
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/ORB_ChooseFlight')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s1
# End Subquery #2 aka s1
ON
s0.vid = s1.vid
AND s0.sid = s1.sid) s0
FULL OUTER JOIN EACH (
# Begin Subquery #3 aka s2
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/ORB_PassengerDetails')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s2
# End Subquery #3 aka s2
ON
s0.vid = s2.vid
AND s0.sid= s2.sid) AS s0
FULL OUTER JOIN EACH (
# Begin Subquery #4 aka s3
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/ORB_PaymentDetails')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s3
# End Subquery #4 aka s3
ON
s0.vid = s3.vid
AND s0.sid= s3.sid) s0
FULL OUTER JOIN EACH (
# Begin Subquery #5 aka s4
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/ORB_BookingConfirmation')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s4
ON
s0.vid = s4.vid
AND s0.sid= s4.sid) a
LEFT JOIN (
SELECT
date,
fullVisitorId AS vid,
visitId AS sid,
geoNetwork.country AS country_ga,
channelGrouping AS channel,
trafficSource.source AS source,
trafficSource.medium AS medium,
trafficSource.campaign AS campaign,
CONCAT(trafficSource.source, ' / ', trafficSource.medium) AS source_medium,
MAX(IF(customDimensions.index = 9, customDimensions.value, NULL)) WITHIN RECORD AS country_selection,
MAX(IF(customDimensions.index = 31, customDimensions.value, NULL)) WITHIN RECORD AS cabin_class,
# new CD --------------------------------------------------------------------
MAX(IF(customDimensions.index = 22, customDimensions.value, NULL)) WITHIN RECORD AS point_of_sale
# ---------------------------------------------------------------------------
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))) b
ON
a.vid = b.vid
AND a.sid = b.sid
AND a.date = b.date)
GROUP BY
date,
device_category,
cabin_class,
country_ga,
country_selection,
point_of_sale,
channel,
source,
medium,
campaign,
source_medium"
)
df_ORB_afterpos <- bq_table_download(bq_project_query(project,
get_data_query_ORB2,
use_legacy_sql = TRUE))
get_data_query_ORB3 <- paste0(
"SELECT *,
CASE
WHEN country_ga IS NOT NULL THEN country_ga
WHEN country_ga IS NULL AND point_of_sale IS NULL THEN 'NULL'
WHEN point_of_sale IS NOT NULL AND country_ga IS NULL THEN point_of_sale
END AS POS_matched
from(
SELECT
date,
device_category,
cabin_class,
country_ga,
# point of sale --------------------------------------------
point_of_sale,
# ----------------------------------------------------------
country_selection,
# get total entry per page
SUM(first_ent) AS Homepage,
SUM(second_ent) AS ORB_ChooseFlight,
SUM(third_ent) AS ORB_PassengerDetails,
SUM(fourth_ent) AS ORB_PaymentDetails,
SUM(fifth_ent) AS ORB_BookingConfirmation,
# get total completion at each step
SUM(first_cplt) AS Homepage_Complete,
SUM(second_cplt) AS ORB_ChooseFlight_Complete,
SUM(third_cplt) AS ORB_PassengerDetails_Complete,
SUM(fourth_cplt) AS ORB_PaymentDetails_Complete,
# get total drop-off at each step
SUM(first_ent)-SUM(first_cplt) AS Homepage_Drop,
SUM(second_ent)-SUM(second_cplt) AS ORB_ChooseFlight_Drop,
SUM(third_ent)-SUM(third_cplt) AS ORB_PassengerDetails_Drop,
SUM(fourth_ent)-SUM(fourth_cplt) AS ORB_PaymentDetails_Drop,
# get direct entrance not from previous step
SUM(second_ent)-SUM(first_cplt) AS ORB_ChooseFlight_Indirect,
SUM(third_ent)-SUM(second_cplt) AS ORB_PassengerDetails_Indirect,
SUM(fourth_ent)-SUM(third_cplt) AS ORB_PaymentDetails_Indirect,
SUM(fifth_ent)-SUM(fourth_cplt) AS ORB_BookingConfirmation_Indirect,
# add in new requested dimension
channel,
source,
medium,
campaign,
source_medium
FROM (
#open funnel where a step requires ONLY the previous step
SELECT
a.date AS date,
a.vid AS vid,
a.sid AS sid,
a.device_category AS device_category,
# regroup cabin class value
(CASE
WHEN REGEXP_MATCH(b.cabin_class, r'.*ECONOMY') THEN 'ECONOMY/PREMIUM ECONOMY'
WHEN REGEXP_MATCH(b.cabin_class, r'.*BUSINESS') THEN 'BUSINESS'
WHEN REGEXP_MATCH(b.cabin_class, r'.*(FIRST|SUITE)') THEN 'FIRST'
ELSE 'NA' END) AS cabin_class,
b.country_ga AS country_ga,
# point of sale -------------------------------------------------------
b.point_of_sale AS point_of_sale,
# ---------------------------------------------------------------------
b.country_selection AS country_selection,
b.channel AS channel,
b.source AS source,
b.medium AS medium,
b.campaign AS campaign,
b.source_medium AS source_medium,
a.firstPage AS firstPage,
a.secondPage AS secondPage,
a.thirdPage AS thirdPage,
a.fourthPage AS fourthPage,
a.fifthPage AS fifthPage,
# get entrance to each step
IF(a.firstPage >0, 1,0) AS first_ent,
IF(a.secondPage >0, 1,0) AS second_ent,
IF(a.thirdPage >0, 1,0) AS third_ent,
IF(a.fourthPage >0, 1,0) AS fourth_ent,
IF(a.fifthPage >0, 1,0) AS fifth_ent,
# get completion of each step to the next
IF(a.firstPage > 0
AND a.firstPage < a.secondPage,1,0) AS first_cplt,
IF(a.secondPage > 0
AND a.secondPage < a.thirdPage,1,0) AS second_cplt,
IF(a.thirdPage > 0
AND a.thirdPage < a.fourthPage,1,0) AS third_cplt,
IF(a.fourthPage > 0
AND a.fourthPage < a.fifthPage,1,0) AS fourth_cplt
FROM (
SELECT
(CASE
WHEN s0.date IS NULL THEN s4.date
WHEN s4.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s4.vid
WHEN s4.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s4.sid
WHEN s4.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s4.device_category
WHEN s4.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
IF(s0.firstPage IS NULL,0,s0.firstPage) AS firstPage,
IF(s0.secondPage IS NULL,0,s0.secondPage) AS secondPage,
IF(s0.thirdPage IS NULL,0,s0.thirdPage) AS thirdPage,
IF(s0.fourthPage IS NULL,0,s0.fourthPage) AS fourthPage,
IF(s4.firstHit IS NULL,0,s4.firstHit) AS fifthPage from(
SELECT
(CASE
WHEN s0.date IS NULL THEN s3.date
WHEN s3.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s3.vid
WHEN s3.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s3.sid
WHEN s3.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s3.device_category
WHEN s3.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstPage AS firstPage,
s0.secondPage AS secondPage,
s0.thirdPage AS thirdPage,
s3.firstHit AS fourthPage
FROM (
SELECT
(CASE
WHEN s0.date IS NULL THEN s2.date
WHEN s2.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s2.vid
WHEN s2.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s2.sid
WHEN s2.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s2.device_category
WHEN s2.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstPage AS firstPage,
s0.secondPage AS secondPage,
s2.firstHit AS thirdPage from(
SELECT
(CASE
WHEN s0.date IS NULL THEN s1.date
WHEN s1.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s1.vid
WHEN s1.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s1.sid
WHEN s1.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s1.device_category
WHEN s1.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstHit AS firstPage,
s1.firstHit AS secondPage
FROM (
# Begin Subquery #1 aka s0
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/Homepage')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s0
# End Subquery #1 aka s0
FULL OUTER JOIN EACH (
# Begin Subquery #2 aka s1
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/ORB_ChooseFlight')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s1
# End Subquery #2 aka s1
ON
s0.vid = s1.vid
AND s0.sid = s1.sid) s0
FULL OUTER JOIN EACH (
# Begin Subquery #3 aka s2
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/ORB_PassengerDetails')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s2
# End Subquery #3 aka s2
ON
s0.vid = s2.vid
AND s0.sid= s2.sid) AS s0
FULL OUTER JOIN EACH (
# Begin Subquery #4 aka s3
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/ORB_PaymentDetails')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s3
# End Subquery #4 aka s3
ON
s0.vid = s3.vid
AND s0.sid= s3.sid) s0
FULL OUTER JOIN EACH (
# Begin Subquery #5 aka s4
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/ORB_BookingConfirmation')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s4
ON
s0.vid = s4.vid
AND s0.sid= s4.sid) a
LEFT JOIN (
SELECT
date,
fullVisitorId AS vid,
visitId AS sid,
geoNetwork.country AS country_ga,
channelGrouping AS channel,
trafficSource.source AS source,
trafficSource.medium AS medium,
trafficSource.campaign AS campaign,
CONCAT(trafficSource.source, ' / ', trafficSource.medium) AS source_medium,
MAX(IF(customDimensions.index = 9, customDimensions.value, NULL)) WITHIN RECORD AS country_selection,
MAX(IF(customDimensions.index = 31, customDimensions.value, NULL)) WITHIN RECORD AS cabin_class,
# new CD --------------------------------------------------------------------
MAX(IF(customDimensions.index = 22, customDimensions.value, NULL)) WITHIN RECORD AS point_of_sale
# ---------------------------------------------------------------------------
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))) b
ON
a.vid = b.vid
AND a.sid = b.sid
AND a.date = b.date)
GROUP BY
date,
device_category,
cabin_class,
country_ga,
country_selection,
point_of_sale,
channel,
source,
medium,
campaign,
source_medium) as fulltable
LEFT JOIN (
SELECT
*
FROM
[api-project-929144044809:46948678.CountryByteCode]) CountryByteCode
ON
fulltable.point_of_sale = CountryByteCode.Country_Code"
)
df_ORB_joinpos <- bq_table_download(bq_project_query(project,
get_data_query_ORB3,
use_legacy_sql = TRUE))
get_data_query_CIB4 <- paste0(
"SELECT *,
CASE
WHEN country_ga IS NOT NULL THEN country_ga
WHEN country_ga IS NULL AND point_of_sale IS NULL THEN 'NULL'
WHEN point_of_sale IS NOT NULL AND country_ga IS NULL THEN point_of_sale
END AS POS_matched
from(
SELECT
date,
device_category,
cabin_class,
country_ga,
# point of sale --------------------------------------------
point_of_sale,
# ----------------------------------------------------------
country_selection,
# get total entry per page
SUM(first_ent) AS Homepage,
SUM(second_ent) AS CIB_ChooseFlight,
SUM(third_ent) AS CIB_PassengerDetails,
SUM(fourth_ent) AS CIB_PaymentDetails,
SUM(fifth_ent) AS CIB_BookingConfirmation,
# get total completion at each step
SUM(first_cplt) AS Homepage_Complete,
SUM(second_cplt) AS CIB_ChooseFlight_Complete,
SUM(third_cplt) AS CIB_PassengerDetails_Complete,
SUM(fourth_cplt) AS CIB_PaymentDetails_Complete,
# get total drop-off at each step
SUM(first_ent)-SUM(first_cplt) AS Homepage_Drop,
SUM(second_ent)-SUM(second_cplt) AS CIB_ChooseFlight_Drop,
SUM(third_ent)-SUM(third_cplt) AS CIB_PassengerDetails_Drop,
SUM(fourth_ent)-SUM(fourth_cplt) AS CIB_PaymentDetails_Drop,
# get direct entrance not from previous step
SUM(second_ent)-SUM(first_cplt) AS CIB_ChooseFlight_Indirect,
SUM(third_ent)-SUM(second_cplt) AS CIB_PassengerDetails_Indirect,
SUM(fourth_ent)-SUM(third_cplt) AS CIB_PaymentDetails_Indirect,
SUM(fifth_ent)-SUM(fourth_cplt) AS CIB_BookingConfirmation_Indirect,
# add in new requested dimension
channel,
source,
medium,
campaign,
source_medium
FROM (
#open funnel where a step requires ONLY the previous step
SELECT
a.date AS date,
a.vid AS vid,
a.sid AS sid,
a.device_category AS device_category,
# regroup cabin class value
(CASE
WHEN REGEXP_MATCH(b.cabin_class, r'.*ECONOMY') THEN 'ECONOMY/PREMIUM ECONOMY'
WHEN REGEXP_MATCH(b.cabin_class, r'.*BUSINESS') THEN 'BUSINESS'
WHEN REGEXP_MATCH(b.cabin_class, r'.*(FIRST|SUITE)') THEN 'FIRST'
ELSE 'NA' END) AS cabin_class,
b.country_ga AS country_ga,
# point of sale -------------------------------------------------------
b.point_of_sale AS point_of_sale,
# ---------------------------------------------------------------------
b.country_selection AS country_selection,
b.channel AS channel,
b.source AS source,
b.medium AS medium,
b.campaign AS campaign,
b.source_medium AS source_medium,
a.firstPage AS firstPage,
a.secondPage AS secondPage,
a.thirdPage AS thirdPage,
a.fourthPage AS fourthPage,
a.fifthPage AS fifthPage,
# get entrance to each step
IF(a.firstPage >0, 1,0) AS first_ent,
IF(a.secondPage >0, 1,0) AS second_ent,
IF(a.thirdPage >0, 1,0) AS third_ent,
IF(a.fourthPage >0, 1,0) AS fourth_ent,
IF(a.fifthPage >0, 1,0) AS fifth_ent,
# get completion of each step to the next
IF(a.firstPage > 0
AND a.firstPage < a.secondPage,1,0) AS first_cplt,
IF(a.secondPage > 0
AND a.secondPage < a.thirdPage,1,0) AS second_cplt,
IF(a.thirdPage > 0
AND a.thirdPage < a.fourthPage,1,0) AS third_cplt,
IF(a.fourthPage > 0
AND a.fourthPage < a.fifthPage,1,0) AS fourth_cplt
FROM (
SELECT
(CASE
WHEN s0.date IS NULL THEN s4.date
WHEN s4.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s4.vid
WHEN s4.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s4.sid
WHEN s4.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s4.device_category
WHEN s4.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
IF(s0.firstPage IS NULL,0,s0.firstPage) AS firstPage,
IF(s0.secondPage IS NULL,0,s0.secondPage) AS secondPage,
IF(s0.thirdPage IS NULL,0,s0.thirdPage) AS thirdPage,
IF(s0.fourthPage IS NULL,0,s0.fourthPage) AS fourthPage,
IF(s4.firstHit IS NULL,0,s4.firstHit) AS fifthPage from(
SELECT
(CASE
WHEN s0.date IS NULL THEN s3.date
WHEN s3.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s3.vid
WHEN s3.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s3.sid
WHEN s3.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s3.device_category
WHEN s3.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstPage AS firstPage,
s0.secondPage AS secondPage,
s0.thirdPage AS thirdPage,
s3.firstHit AS fourthPage
FROM (
SELECT
(CASE
WHEN s0.date IS NULL THEN s2.date
WHEN s2.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s2.vid
WHEN s2.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s2.sid
WHEN s2.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s2.device_category
WHEN s2.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstPage AS firstPage,
s0.secondPage AS secondPage,
s2.firstHit AS thirdPage from(
SELECT
(CASE
WHEN s0.date IS NULL THEN s1.date
WHEN s1.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s1.vid
WHEN s1.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s1.sid
WHEN s1.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s1.device_category
WHEN s1.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstHit AS firstPage,
s1.firstHit AS secondPage
FROM (
# Begin Subquery #1 aka s0
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24')),
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/Homepage')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s0
# End Subquery #1 aka s0
FULL OUTER JOIN EACH (
# Begin Subquery #2 aka s1
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/CIB_ChooseFlight')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s1
# End Subquery #2 aka s1
ON
s0.vid = s1.vid
AND s0.sid = s1.sid) s0
FULL OUTER JOIN EACH (
# Begin Subquery #3 aka s2
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/CIB_PassengerDetails')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s2
# End Subquery #3 aka s2
ON
s0.vid = s2.vid
AND s0.sid= s2.sid) AS s0
FULL OUTER JOIN EACH (
# Begin Subquery #4 aka s3
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/CIB_PaymentDetails')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s3
# End Subquery #4 aka s3
ON
s0.vid = s3.vid
AND s0.sid= s3.sid) s0
FULL OUTER JOIN EACH (
# Begin Subquery #5 aka s4
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/CIB_BookingConfirmation')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s4
ON
s0.vid = s4.vid
AND s0.sid= s4.sid) a
LEFT JOIN (
SELECT
date,
fullVisitorId AS vid,
visitId AS sid,
geoNetwork.country AS country_ga,
channelGrouping AS channel,
trafficSource.source AS source,
trafficSource.medium AS medium,
trafficSource.campaign AS campaign,
CONCAT(trafficSource.source, ' / ', trafficSource.medium) AS source_medium,
MAX(IF(customDimensions.index = 9, customDimensions.value, NULL)) WITHIN RECORD AS country_selection,
MAX(IF(customDimensions.index = 31, customDimensions.value, NULL)) WITHIN RECORD AS cabin_class,
# new CD --------------------------------------------------------------------
MAX(IF(customDimensions.index = 22, customDimensions.value, NULL)) WITHIN RECORD AS point_of_sale
# ---------------------------------------------------------------------------
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))) b
ON
a.vid = b.vid
AND a.sid = b.sid
AND a.date = b.date)
GROUP BY
date,
device_category,
cabin_class,
country_ga,
country_selection,
point_of_sale,
channel,
source,
medium,
campaign,
source_medium) as fulltable
LEFT JOIN (
SELECT
*
FROM
[api-project-929144044809:46948678.CountryByteCode]) CountryByteCode
ON
fulltable.point_of_sale = CountryByteCode.Country_Code"
)
df_CIB_joinpos <- bq_table_download(bq_project_query(project,
get_data_query_CIB4,
use_legacy_sql = TRUE))
cib <- df_CIB_joinpos %>%
group_by(POS_matched) %>%
summarise(Homepage = sum(fulltable_Homepage)) %>%
mutate(percent = round(Homepage / sum(Homepage),2))
orb <- df_ORB_joinpos %>%
group_by(POS_matched, fulltable_device_category) %>%
summarise(Homepage = sum(fulltable_Homepage)) %>%
mutate(percent = round(Homepage / sum(Homepage),2))
old_orb <- df_ORB_afterpos %>%
mutate(POS = case_when(!is.na(country_ga) ~ country_ga,
is.na(country_ga) & is.na(point_of_sale) ~ 'null',
!is.na(point_of_sale) & is.na(country_ga) ~ point_of_sale)) %>%
#filter(POS == 'Australia') %>%
group_by(POS, device_category) %>%
summarise(Homepage = sum(Homepage))
# Management API -------------------------------------
accountlist_rownumber <- 1723
mgmt_viewid <- account_list[accountlist_rownumber,'viewId']
# Management API (adwords) -------------------------------------
adwords_listing <- ga_adwords_list(account_list[accountlist_rownumber,'accountId'],
account_list[accountlist_rownumber,'webPropertyId'])
adwords_listing <- as.data.frame(adwords_listing$items)
ga_adwords(account_list[accountlist_rownumber,'accountId'],
account_list[accountlist_rownumber,'webPropertyId'])
# Management API (custom data source) -------------------------------------
custom_datasources <- ga_custom_datasource(account_list[accountlist_rownumber,'accountId'],
account_list[accountlist_rownumber,'webPropertyId'])
# Management API (Experiments) -------------------------------------
experiments <- ga_experiment_list(account_list[accountlist_rownumber,'accountId'],
account_list[accountlist_rownumber,'webPropertyId'],
account_list[accountlist_rownumber,'viewId'])
experiments <- as.data.frame(experiments$items)
# Management API (filters) -------------------------------------
filter_list <- ga_filter_view_list(account_list[accountlist_rownumber,'accountId'],
account_list[accountlist_rownumber,'webPropertyId'],
account_list[accountlist_rownumber,'viewId'])
filter_list <- as.data.frame(filter_list$items)
# Chi square test ----------------------------------------
chi_sq_testfunc <- function(chisq_viewid) {
google_analytics(chisq_viewid,
date_range = c("2019-01-01","2019-02-01"),
metrics = c("sessions"),
dimensions = c("channelGrouping", "deviceCategory"),
segments = c(seg_allUsers),
useResourceQuotas = TRUE,
# anti_sample = TRUE,
max = -1)
}
chi_sq_df <- chi_sq_testfunc(54454548)
chi_sq_df_clean <- chi_sq_df %>%
select(-segment) %>%
spread(deviceCategory, sessions) %>%
mutate_at(vars(-channelGrouping), funs(replace(., is.na(.), 0))) %>%
mutate_at(vars(-channelGrouping), as.numeric) %>%
filter(desktop > 10 & mobile > 10 & tablet > 10)
# The [,-1] just gets rid of the row names -- the 1, 2, 3 column
# in the above.
chisq_test_results <- chisq.test(chi_sq_df_clean[,-1])
result_interpretation <- ifelse(chisq_test_results$p.value < 0.05, "The p-value is smaller than 0.05
so we can reject the hypothesis that there is no relationship
between column 1 and column 2",
"The p-value is larger than 0.05
so we canot reject the hypothesis that there is no relationship
between column 1 and column 2")
chisq.test(chi_sq_df_clean[,-1])
result_interpretation
|
/GoogleAnalytics_R.R
|
no_license
|
santiagovama/R
|
R
| false | false | 69,977 |
r
|
library(googleAnalyticsR)
library(future.apply)
library(tidyverse)
library(bigrquery)
## setup multisession R for your parallel data fetches -------------------------------------
plan(multisession)
# login as new_user = TRUE if switching accounts. Otherwise do not set new_user = true
ga_auth()
# ga_auth(new_user = TRUE)
# get list of custom dimensions -------------------------------------
customdimensions_list <- as.data.frame(ga_custom_vars_list(17015991, "UA-17015991-1",
type = c("customDimensions")))
Sys.setenv(GA_AUTH_FILE = "C:/Users/User/Documents/.httr-oauth")
# need alternative for mac
# get account list -------------------------------------
account_list <- ga_account_list()
## the ViewIds to fetch all at once -------------------------------------
gaids <- c(account_list[2122,'viewId'], account_list[2125,'viewId'], account_list[2128,'viewId'])
# selecting segments -------------------------------------
my_segments <- ga_segment_list()
segs <- my_segments$items
segment_for_allusers <- "gaid::-1"
seg_allUsers <- segment_ga4("All Users", segment_id = segment_for_allusers)
my_fetch <- function(x) {
google_analytics(x,
date_range = c("2018-01-01","yesterday"),
metrics = c("sessions", "transactions", "transactionRevenue"),
dimensions = c("yearMonth", "deviceCategory", "userType"),
segments = c(seg_allUsers),
anti_sample = TRUE,
max = -1)
}
## makes 3 API calls at once -------------------------------------
all_data <- future_lapply(gaids, my_fetch)
df1 <- data.frame(all_data[1])
df1 <- df1 %>% mutate(viewID = account_list[2122,'viewName'])
df2 <- data.frame(all_data[2])
df2 <- df2 %>% mutate(viewID = account_list[2125,'viewName'])
df3 <- data.frame(all_data[3])
df3 <- df3 %>% mutate(viewID = account_list[2128,'viewName'])
df_all <- rbind(df1,df2,df3)
# query multiple segments -------------------------------------
segment_for_newusers <- "gaid::-2"
seg_newusers <- segment_ga4("new Users", segment_id = segment_for_newusers)
segment_for_returnusers <- "gaid::-3"
seg_returnusers <- segment_ga4("return Users", segment_id = segment_for_returnusers)
segment_for_paidusers <- "gaid::-4"
seg_paidusers <- segment_ga4("paid Users", segment_id = segment_for_paidusers)
segment_for_organicusers <- "gaid::-5"
seg_organicusers <- segment_ga4("organic Users", segment_id = segment_for_organicusers)
segment_for_searchusers <- "gaid::-6"
seg_searchusers <- segment_ga4("search Users", segment_id = segment_for_searchusers)
segment_for_directusers <- "gaid::-7"
seg_directusers <- segment_ga4("direct Users", segment_id = segment_for_directusers)
segment_for_referralusers <- "gaid::-8"
seg_referralusers <- segment_ga4("referral Users", segment_id = segment_for_referralusers)
segment_for_convusers <- "gaid::-9"
seg_convusers <- segment_ga4("conv Users", segment_id = segment_for_convusers)
segment_for_transactionusers <- "gaid::-10"
seg_transactionusers <- segment_ga4("transaction Users", segment_id = segment_for_transactionusers)
segment_for_mobiletabletusers <- "gaid::-11"
seg_mobiletabletusers <- segment_ga4("mobiletablet Users", segment_id = segment_for_mobiletabletusers)
segmentlist <- c(seg_allUsers,
seg_newusers,
seg_returnusers,
seg_paidusers,
seg_organicusers,
seg_searchusers,
seg_directusers,
seg_referralusers,
seg_convusers)
segmentlisting <- split(segmentlist, (seq_along(segmentlist) - 1L) %/% 4L)
ga_data_final_segment <- data.frame()
for (i in segmentlisting) {
ga_data_segment_eg <-
google_analytics(view_id, #=This is a (dynamic) ViewID parameter
date_range = c(startDate2, endDate),
metrics = c("sessions", "transactions", "transactionRevenue"),
dimensions = c("yearMonth", "deviceCategory", "userType"),
segments = i,
anti_sample = TRUE,
max = -1)
ga_data_final_segment <- rbind(ga_data_final_segment, ga_data_segment_eg)
}
## pick a profile with data to query
ga_id <- account_list[1123,'viewId']
## get a list of what metrics and dimensions you can use
ga_auth()
meta <- google_analytics_meta()
googleAnalyticsR:::gadget_GASegment()
## make two segment elements
se <- segment_element("sessions",
operator = "GREATER_THAN",
type = "METRIC",
comparisonValue = 3,
scope = "USER")
se3 <- segment_element("medium",
operator = "REGEXP",
type = "DIMENSION",
expressions = "^(email|referral)$",
scope = "SESSION")
sv_simple <- segment_vector_simple(list(list(segment_ga_google5sec)))
seg_defined <- segment_define(sv_simple)
segment4 <- segment_ga4("simple", user_segment = seg_defined)
# segments: semicolon is "AND", a comma is "OR"
segment_def_medium <- "sessions::condition::ga:medium=~^(email|referral)$"
seg_obj_medium <- segment_ga4("test", segment_id = segment_def_medium)
segment_def_google30sec <- "sessions::condition::ga:source=~^(google)$;ga:timeOnPage>30"
seg_obj_google30sec <- segment_ga4("test", segment_id = segment_def_google30sec)
segment_def_morethan3sessions <- "sessions::condition::ga:sessions>3"
seg_obj_morethan3sessions <- segment_ga4("test", segment_id = segment_def_morethan3sessions)
segment_def_orgtraffic_w_conversions <- "sessions::condition::ga:medium=~^(organic)$;ga:goal11Completions>0"
seg_obj_orgtraffic_w_conversions <- segment_ga4("test", segment_id = segment_def_orgtraffic_w_conversions)
segment_seq_example <- google_analytics_4(ga_id,
date_range = c("2017-01-01","2017-03-01"),
dimensions = c('source','country'),
segments = seg_obj_orgtraffic_w_conversions,
metrics = c('sessions','bounceRate', 'timeOnPage', 'goal11Completions')
)
segment_seq_example
segment_def_mktids <- "sessions::condition::ga:dimension2=@mktid"
seg_obj_mktids <- segment_ga4("test", segment_id = segment_def_mktids)
segment_seq_mktids <- google_analytics_4(ga_id,
date_range = c("2017-01-01","2017-03-01"),
dimensions = c('source','dimension2'),
segments = seg_obj_mktids,
metrics = c('sessions','bounceRate', 'timeOnPage', 'goal11Completions')
)
segment_seq_mktids
google_analytics_4(ga_id, #=This is a (dynamic) ViewID parameter
date_range = c("2018-01-01","2018-01-30"),
metrics = c("sessions", "users"),
dimensions = c("deviceCategory", "sourceMedium", "date"),
#anti_sample = TRUE,
max = -1,
useResourceQuotas = TRUE)
# get data directly from bigquery --------------------------------------------------
project <- "api-project-929144044809"
get_data_query <- paste0(
"SELECT
date,
device_category,
cabin_class,
country_ga,
country_selection,
# get total entry per page
SUM(first_ent) AS Homepage,
SUM(second_ent) AS CIB_ChooseFlight,
SUM(third_ent) AS CIB_PassengerDetails,
SUM(fourth_ent) AS CIB_PaymentDetails,
SUM(fifth_ent) AS CIB_BookingConfirmation,
# get total completion at each step
SUM(first_cplt) AS Homepage_Complete,
SUM(second_cplt) AS CIB_ChooseFlight_Complete,
SUM(third_cplt) AS CIB_PassengerDetails_Complete,
SUM(fourth_cplt) AS CIB_PaymentDetails_Complete,
# get total drop-off at each step
SUM(first_ent)-SUM(first_cplt) AS Homepage_Drop,
SUM(second_ent)-SUM(second_cplt) AS CIB_ChooseFlight_Drop,
SUM(third_ent)-SUM(third_cplt) AS CIB_PassengerDetails_Drop,
SUM(fourth_ent)-SUM(fourth_cplt) AS CIB_PaymentDetails_Drop,
# get direct entrance not from previous step
SUM(second_ent)-SUM(first_cplt) AS CIB_ChooseFlight_Indirect,
SUM(third_ent)-SUM(second_cplt) AS CIB_PassengerDetails_Indirect,
SUM(fourth_ent)-SUM(third_cplt) AS CIB_PaymentDetails_Indirect,
SUM(fifth_ent)-SUM(fourth_cplt) AS CIB_BookingConfirmation_Indirect,
# add in new requested dimension
channel,
source,
medium,
campaign,
source_medium
FROM (
#open funnel where a step requires ONLY the previous step
SELECT
a.date AS date,
a.vid AS vid,
a.sid AS sid,
a.device_category AS device_category,
# regroup cabin class value
(CASE
WHEN REGEXP_MATCH(b.cabin_class, r'.*ECONOMY') THEN 'ECONOMY/PREMIUM ECONOMY'
WHEN REGEXP_MATCH(b.cabin_class, r'.*BUSINESS') THEN 'BUSINESS'
WHEN REGEXP_MATCH(b.cabin_class, r'.*(FIRST|SUITE)') THEN 'FIRST'
ELSE 'NA' END) AS cabin_class,
b.country_ga AS country_ga,
b.country_selection AS country_selection,
b.channel AS channel,
b.source AS source,
b.medium AS medium,
b.campaign AS campaign,
b.source_medium AS source_medium,
a.firstPage AS firstPage,
a.secondPage AS secondPage,
a.thirdPage AS thirdPage,
a.fourthPage AS fourthPage,
a.fifthPage AS fifthPage,
# get entrance to each step
IF(a.firstPage >0, 1,0) AS first_ent,
IF(a.secondPage >0, 1,0) AS second_ent,
IF(a.thirdPage >0, 1,0) AS third_ent,
IF(a.fourthPage >0, 1,0) AS fourth_ent,
IF(a.fifthPage >0, 1,0) AS fifth_ent,
# get completion of each step to the next
IF(a.firstPage > 0
AND a.firstPage < a.secondPage,1,0) AS first_cplt,
IF(a.secondPage > 0
AND a.secondPage < a.thirdPage,1,0) AS second_cplt,
IF(a.thirdPage > 0
AND a.thirdPage < a.fourthPage,1,0) AS third_cplt,
IF(a.fourthPage > 0
AND a.fourthPage < a.fifthPage,1,0) AS fourth_cplt
FROM (
SELECT
(CASE
WHEN s0.date IS NULL THEN s4.date
WHEN s4.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s4.vid
WHEN s4.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s4.sid
WHEN s4.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s4.device_category
WHEN s4.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
IF(s0.firstPage IS NULL,0,s0.firstPage) AS firstPage,
IF(s0.secondPage IS NULL,0,s0.secondPage) AS secondPage,
IF(s0.thirdPage IS NULL,0,s0.thirdPage) AS thirdPage,
IF(s0.fourthPage IS NULL,0,s0.fourthPage) AS fourthPage,
IF(s4.firstHit IS NULL,0,s4.firstHit) AS fifthPage from(
SELECT
(CASE
WHEN s0.date IS NULL THEN s3.date
WHEN s3.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s3.vid
WHEN s3.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s3.sid
WHEN s3.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s3.device_category
WHEN s3.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstPage AS firstPage,
s0.secondPage AS secondPage,
s0.thirdPage AS thirdPage,
s3.firstHit AS fourthPage
FROM (
SELECT
(CASE
WHEN s0.date IS NULL THEN s2.date
WHEN s2.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s2.vid
WHEN s2.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s2.sid
WHEN s2.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s2.device_category
WHEN s2.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstPage AS firstPage,
s0.secondPage AS secondPage,
s2.firstHit AS thirdPage from(
SELECT
(CASE
WHEN s0.date IS NULL THEN s1.date
WHEN s1.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s1.vid
WHEN s1.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s1.sid
WHEN s1.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s1.device_category
WHEN s1.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstHit AS firstPage,
s1.firstHit AS secondPage
FROM (
# Begin Subquery #1 aka s0
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24')),
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/Homepage')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s0
# End Subquery #1 aka s0
FULL OUTER JOIN EACH (
# Begin Subquery #2 aka s1
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/CIB_ChooseFlight')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s1
# End Subquery #2 aka s1
ON
s0.vid = s1.vid
AND s0.sid = s1.sid) s0
FULL OUTER JOIN EACH (
# Begin Subquery #3 aka s2
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/CIB_PassengerDetails')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s2
# End Subquery #3 aka s2
ON
s0.vid = s2.vid
AND s0.sid= s2.sid) AS s0
FULL OUTER JOIN EACH (
# Begin Subquery #4 aka s3
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/CIB_PaymentDetails')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s3
# End Subquery #4 aka s3
ON
s0.vid = s3.vid
AND s0.sid= s3.sid) s0
FULL OUTER JOIN EACH (
# Begin Subquery #5 aka s4
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/CIB_BookingConfirmation')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s4
ON
s0.vid = s4.vid
AND s0.sid= s4.sid) a
LEFT JOIN (
SELECT
date,
fullVisitorId AS vid,
visitId AS sid,
geoNetwork.country AS country_ga,
channelGrouping AS channel,
trafficSource.source AS source,
trafficSource.medium AS medium,
trafficSource.campaign AS campaign,
CONCAT(trafficSource.source, ' / ', trafficSource.medium) AS source_medium,
MAX(IF(customDimensions.index = 9, customDimensions.value, NULL)) WITHIN RECORD AS country_selection,
MAX(IF(customDimensions.index = 31, customDimensions.value, NULL)) WITHIN RECORD AS cabin_class
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))) b
ON
a.vid = b.vid
AND a.sid = b.sid
AND a.date = b.date)
GROUP BY
date,
device_category,
cabin_class,
country_ga,
country_selection,
channel,
source,
medium,
campaign,
source_medium"
)
df_beforepos <- bq_table_download(bq_project_query(project,
get_data_query,
use_legacy_sql = TRUE))
get_data_query2 <- paste0(
"SELECT
date,
device_category,
cabin_class,
country_ga,
# point of sale --------------------------------------------
point_of_sale,
# ----------------------------------------------------------
country_selection,
# get total entry per page
SUM(first_ent) AS Homepage,
SUM(second_ent) AS CIB_ChooseFlight,
SUM(third_ent) AS CIB_PassengerDetails,
SUM(fourth_ent) AS CIB_PaymentDetails,
SUM(fifth_ent) AS CIB_BookingConfirmation,
# get total completion at each step
SUM(first_cplt) AS Homepage_Complete,
SUM(second_cplt) AS CIB_ChooseFlight_Complete,
SUM(third_cplt) AS CIB_PassengerDetails_Complete,
SUM(fourth_cplt) AS CIB_PaymentDetails_Complete,
# get total drop-off at each step
SUM(first_ent)-SUM(first_cplt) AS Homepage_Drop,
SUM(second_ent)-SUM(second_cplt) AS CIB_ChooseFlight_Drop,
SUM(third_ent)-SUM(third_cplt) AS CIB_PassengerDetails_Drop,
SUM(fourth_ent)-SUM(fourth_cplt) AS CIB_PaymentDetails_Drop,
# get direct entrance not from previous step
SUM(second_ent)-SUM(first_cplt) AS CIB_ChooseFlight_Indirect,
SUM(third_ent)-SUM(second_cplt) AS CIB_PassengerDetails_Indirect,
SUM(fourth_ent)-SUM(third_cplt) AS CIB_PaymentDetails_Indirect,
SUM(fifth_ent)-SUM(fourth_cplt) AS CIB_BookingConfirmation_Indirect,
# add in new requested dimension
channel,
source,
medium,
campaign,
source_medium
FROM (
#open funnel where a step requires ONLY the previous step
SELECT
a.date AS date,
a.vid AS vid,
a.sid AS sid,
a.device_category AS device_category,
# regroup cabin class value
(CASE
WHEN REGEXP_MATCH(b.cabin_class, r'.*ECONOMY') THEN 'ECONOMY/PREMIUM ECONOMY'
WHEN REGEXP_MATCH(b.cabin_class, r'.*BUSINESS') THEN 'BUSINESS'
WHEN REGEXP_MATCH(b.cabin_class, r'.*(FIRST|SUITE)') THEN 'FIRST'
ELSE 'NA' END) AS cabin_class,
b.country_ga AS country_ga,
# point of sale -------------------------------------------------------
b.point_of_sale AS point_of_sale,
# ---------------------------------------------------------------------
b.country_selection AS country_selection,
b.channel AS channel,
b.source AS source,
b.medium AS medium,
b.campaign AS campaign,
b.source_medium AS source_medium,
a.firstPage AS firstPage,
a.secondPage AS secondPage,
a.thirdPage AS thirdPage,
a.fourthPage AS fourthPage,
a.fifthPage AS fifthPage,
# get entrance to each step
IF(a.firstPage >0, 1,0) AS first_ent,
IF(a.secondPage >0, 1,0) AS second_ent,
IF(a.thirdPage >0, 1,0) AS third_ent,
IF(a.fourthPage >0, 1,0) AS fourth_ent,
IF(a.fifthPage >0, 1,0) AS fifth_ent,
# get completion of each step to the next
IF(a.firstPage > 0
AND a.firstPage < a.secondPage,1,0) AS first_cplt,
IF(a.secondPage > 0
AND a.secondPage < a.thirdPage,1,0) AS second_cplt,
IF(a.thirdPage > 0
AND a.thirdPage < a.fourthPage,1,0) AS third_cplt,
IF(a.fourthPage > 0
AND a.fourthPage < a.fifthPage,1,0) AS fourth_cplt
FROM (
SELECT
(CASE
WHEN s0.date IS NULL THEN s4.date
WHEN s4.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s4.vid
WHEN s4.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s4.sid
WHEN s4.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s4.device_category
WHEN s4.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
IF(s0.firstPage IS NULL,0,s0.firstPage) AS firstPage,
IF(s0.secondPage IS NULL,0,s0.secondPage) AS secondPage,
IF(s0.thirdPage IS NULL,0,s0.thirdPage) AS thirdPage,
IF(s0.fourthPage IS NULL,0,s0.fourthPage) AS fourthPage,
IF(s4.firstHit IS NULL,0,s4.firstHit) AS fifthPage from(
SELECT
(CASE
WHEN s0.date IS NULL THEN s3.date
WHEN s3.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s3.vid
WHEN s3.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s3.sid
WHEN s3.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s3.device_category
WHEN s3.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstPage AS firstPage,
s0.secondPage AS secondPage,
s0.thirdPage AS thirdPage,
s3.firstHit AS fourthPage
FROM (
SELECT
(CASE
WHEN s0.date IS NULL THEN s2.date
WHEN s2.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s2.vid
WHEN s2.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s2.sid
WHEN s2.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s2.device_category
WHEN s2.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstPage AS firstPage,
s0.secondPage AS secondPage,
s2.firstHit AS thirdPage from(
SELECT
(CASE
WHEN s0.date IS NULL THEN s1.date
WHEN s1.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s1.vid
WHEN s1.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s1.sid
WHEN s1.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s1.device_category
WHEN s1.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstHit AS firstPage,
s1.firstHit AS secondPage
FROM (
# Begin Subquery #1 aka s0
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24')),
WHERE
REGEXP_MATCH(hits.page.pagePath, '/Homepage')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s0
# End Subquery #1 aka s0
FULL OUTER JOIN EACH (
# Begin Subquery #2 aka s1
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/CIB_ChooseFlight')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s1
# End Subquery #2 aka s1
ON
s0.vid = s1.vid
AND s0.sid = s1.sid) s0
FULL OUTER JOIN EACH (
# Begin Subquery #3 aka s2
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/CIB_PassengerDetails')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s2
# End Subquery #3 aka s2
ON
s0.vid = s2.vid
AND s0.sid= s2.sid) AS s0
FULL OUTER JOIN EACH (
# Begin Subquery #4 aka s3
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/CIB_PaymentDetails')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s3
# End Subquery #4 aka s3
ON
s0.vid = s3.vid
AND s0.sid= s3.sid) s0
FULL OUTER JOIN EACH (
# Begin Subquery #5 aka s4
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/CIB_BookingConfirmation')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s4
ON
s0.vid = s4.vid
AND s0.sid= s4.sid) a
LEFT JOIN (
SELECT
date,
fullVisitorId AS vid,
visitId AS sid,
geoNetwork.country AS country_ga,
channelGrouping AS channel,
trafficSource.source AS source,
trafficSource.medium AS medium,
trafficSource.campaign AS campaign,
CONCAT(trafficSource.source, ' / ', trafficSource.medium) AS source_medium,
MAX(IF(customDimensions.index = 9, customDimensions.value, NULL)) WITHIN RECORD AS country_selection,
MAX(IF(customDimensions.index = 31, customDimensions.value, NULL)) WITHIN RECORD AS cabin_class,
# new CD --------------------------------------------------------------------
MAX(IF(customDimensions.index = 22, customDimensions.value, NULL)) WITHIN RECORD AS point_of_sale
# ---------------------------------------------------------------------------
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))) b
ON
a.vid = b.vid
AND a.sid = b.sid
AND a.date = b.date)
GROUP BY
date,
device_category,
cabin_class,
country_ga,
country_selection,
point_of_sale,
channel,
source,
medium,
campaign,
source_medium"
)
df_afterpos <- bq_table_download(bq_project_query(project,
get_data_query2,
use_legacy_sql = TRUE))
get_data_query_ORB <- paste0(
"SELECT
date,
device_category,
cabin_class,
country_ga,
country_selection,
# get total entry per page
SUM(first_ent) AS Homepage,
SUM(second_ent) AS ORB_ChooseFlight,
SUM(third_ent) AS ORB_PassengerDetails,
SUM(fourth_ent) AS ORB_PaymentDetails,
SUM(fifth_ent) AS ORB_BookingConfirmation,
# get total completion at each step
SUM(first_cplt) AS Homepage_Complete,
SUM(second_cplt) AS ORB_ChooseFlight_Complete,
SUM(third_cplt) AS ORB_PassengerDetails_Complete,
SUM(fourth_cplt) AS ORB_PaymentDetails_Complete,
# get total drop-off at each step
SUM(first_ent)-SUM(first_cplt) AS Homepage_Drop,
SUM(second_ent)-SUM(second_cplt) AS ORB_ChooseFlight_Drop,
SUM(third_ent)-SUM(third_cplt) AS ORB_PassengerDetails_Drop,
SUM(fourth_ent)-SUM(fourth_cplt) AS ORB_PaymentDetails_Drop,
# get direct entrance not from previous step
SUM(second_ent)-SUM(first_cplt) AS ORB_ChooseFlight_Indirect,
SUM(third_ent)-SUM(second_cplt) AS ORB_PassengerDetails_Indirect,
SUM(fourth_ent)-SUM(third_cplt) AS ORB_PaymentDetails_Indirect,
SUM(fifth_ent)-SUM(fourth_cplt) AS ORB_BookingConfirmation_Indirect,
# add in new requested dimension
channel,
source,
medium,
campaign,
source_medium
FROM (
#open funnel where a step requires ONLY the previous step
SELECT
a.date AS date,
a.vid AS vid,
a.sid AS sid,
a.device_category AS device_category,
# regroup cabin class value
(CASE
WHEN REGEXP_MATCH(b.cabin_class, r'.*ECONOMY') THEN 'ECONOMY/PREMIUM ECONOMY'
WHEN REGEXP_MATCH(b.cabin_class, r'.*BUSINESS') THEN 'BUSINESS'
WHEN REGEXP_MATCH(b.cabin_class, r'.*(FIRST|SUITE)') THEN 'FIRST'
ELSE 'NA' END) AS cabin_class,
b.country_ga AS country_ga,
b.country_selection AS country_selection,
b.channel AS channel,
b.source AS source,
b.medium AS medium,
b.campaign AS campaign,
b.source_medium AS source_medium,
a.firstPage AS firstPage,
a.secondPage AS secondPage,
a.thirdPage AS thirdPage,
a.fourthPage AS fourthPage,
a.fifthPage AS fifthPage,
# get entrance to each step
IF(a.firstPage >0, 1,0) AS first_ent,
IF(a.secondPage >0, 1,0) AS second_ent,
IF(a.thirdPage >0, 1,0) AS third_ent,
IF(a.fourthPage >0, 1,0) AS fourth_ent,
IF(a.fifthPage >0, 1,0) AS fifth_ent,
# get completion of each step to the next
IF(a.firstPage > 0
AND a.firstPage < a.secondPage,1,0) AS first_cplt,
IF(a.secondPage > 0
AND a.secondPage < a.thirdPage,1,0) AS second_cplt,
IF(a.thirdPage > 0
AND a.thirdPage < a.fourthPage,1,0) AS third_cplt,
IF(a.fourthPage > 0
AND a.fourthPage < a.fifthPage,1,0) AS fourth_cplt
FROM (
SELECT
(CASE
WHEN s0.date IS NULL THEN s4.date
WHEN s4.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s4.vid
WHEN s4.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s4.sid
WHEN s4.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s4.device_category
WHEN s4.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
IF(s0.firstPage IS NULL,0,s0.firstPage) AS firstPage,
IF(s0.secondPage IS NULL,0,s0.secondPage) AS secondPage,
IF(s0.thirdPage IS NULL,0,s0.thirdPage) AS thirdPage,
IF(s0.fourthPage IS NULL,0,s0.fourthPage) AS fourthPage,
IF(s4.firstHit IS NULL,0,s4.firstHit) AS fifthPage from(
SELECT
(CASE
WHEN s0.date IS NULL THEN s3.date
WHEN s3.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s3.vid
WHEN s3.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s3.sid
WHEN s3.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s3.device_category
WHEN s3.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstPage AS firstPage,
s0.secondPage AS secondPage,
s0.thirdPage AS thirdPage,
s3.firstHit AS fourthPage
FROM (
SELECT
(CASE
WHEN s0.date IS NULL THEN s2.date
WHEN s2.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s2.vid
WHEN s2.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s2.sid
WHEN s2.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s2.device_category
WHEN s2.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstPage AS firstPage,
s0.secondPage AS secondPage,
s2.firstHit AS thirdPage from(
SELECT
(CASE
WHEN s0.date IS NULL THEN s1.date
WHEN s1.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s1.vid
WHEN s1.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s1.sid
WHEN s1.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s1.device_category
WHEN s1.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstHit AS firstPage,
s1.firstHit AS secondPage
FROM (
# Begin Subquery #1 aka s0
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/Homepage')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s0
# End Subquery #1 aka s0
FULL OUTER JOIN EACH (
# Begin Subquery #2 aka s1
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/ORB_ChooseFlight')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s1
# End Subquery #2 aka s1
ON
s0.vid = s1.vid
AND s0.sid = s1.sid) s0
FULL OUTER JOIN EACH (
# Begin Subquery #3 aka s2
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/ORB_PassengerDetails')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s2
# End Subquery #3 aka s2
ON
s0.vid = s2.vid
AND s0.sid= s2.sid) AS s0
FULL OUTER JOIN EACH (
# Begin Subquery #4 aka s3
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/ORB_PaymentDetails')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s3
# End Subquery #4 aka s3
ON
s0.vid = s3.vid
AND s0.sid= s3.sid) s0
FULL OUTER JOIN EACH (
# Begin Subquery #5 aka s4
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/ORB_BookingConfirmation')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s4
ON
s0.vid = s4.vid
AND s0.sid= s4.sid) a
LEFT JOIN (
SELECT
date,
fullVisitorId AS vid,
visitId AS sid,
geoNetwork.country AS country_ga,
channelGrouping AS channel,
trafficSource.source AS source,
trafficSource.medium AS medium,
trafficSource.campaign AS campaign,
CONCAT(trafficSource.source, ' / ', trafficSource.medium) AS source_medium,
MAX(IF(customDimensions.index = 9, customDimensions.value, NULL)) WITHIN RECORD AS country_selection,
MAX(IF(customDimensions.index = 31, customDimensions.value, NULL)) WITHIN RECORD AS cabin_class
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))) b
ON
a.vid = b.vid
AND a.sid = b.sid
AND a.date = b.date)
GROUP BY
date,
device_category,
cabin_class,
country_ga,
country_selection,
channel,
source,
medium,
campaign,
source_medium"
)
df_ORB_beforepos <- bq_table_download(bq_project_query(project,
get_data_query_ORB,
use_legacy_sql = TRUE))
get_data_query_ORB2 <- paste0(
"SELECT
date,
device_category,
cabin_class,
country_ga,
# point of sale --------------------------------------------
point_of_sale,
# ----------------------------------------------------------
country_selection,
# get total entry per page
SUM(first_ent) AS Homepage,
SUM(second_ent) AS ORB_ChooseFlight,
SUM(third_ent) AS ORB_PassengerDetails,
SUM(fourth_ent) AS ORB_PaymentDetails,
SUM(fifth_ent) AS ORB_BookingConfirmation,
# get total completion at each step
SUM(first_cplt) AS Homepage_Complete,
SUM(second_cplt) AS ORB_ChooseFlight_Complete,
SUM(third_cplt) AS ORB_PassengerDetails_Complete,
SUM(fourth_cplt) AS ORB_PaymentDetails_Complete,
# get total drop-off at each step
SUM(first_ent)-SUM(first_cplt) AS Homepage_Drop,
SUM(second_ent)-SUM(second_cplt) AS ORB_ChooseFlight_Drop,
SUM(third_ent)-SUM(third_cplt) AS ORB_PassengerDetails_Drop,
SUM(fourth_ent)-SUM(fourth_cplt) AS ORB_PaymentDetails_Drop,
# get direct entrance not from previous step
SUM(second_ent)-SUM(first_cplt) AS ORB_ChooseFlight_Indirect,
SUM(third_ent)-SUM(second_cplt) AS ORB_PassengerDetails_Indirect,
SUM(fourth_ent)-SUM(third_cplt) AS ORB_PaymentDetails_Indirect,
SUM(fifth_ent)-SUM(fourth_cplt) AS ORB_BookingConfirmation_Indirect,
# add in new requested dimension
channel,
source,
medium,
campaign,
source_medium
FROM (
#open funnel where a step requires ONLY the previous step
SELECT
a.date AS date,
a.vid AS vid,
a.sid AS sid,
a.device_category AS device_category,
# regroup cabin class value
(CASE
WHEN REGEXP_MATCH(b.cabin_class, r'.*ECONOMY') THEN 'ECONOMY/PREMIUM ECONOMY'
WHEN REGEXP_MATCH(b.cabin_class, r'.*BUSINESS') THEN 'BUSINESS'
WHEN REGEXP_MATCH(b.cabin_class, r'.*(FIRST|SUITE)') THEN 'FIRST'
ELSE 'NA' END) AS cabin_class,
b.country_ga AS country_ga,
# point of sale -------------------------------------------------------
b.point_of_sale AS point_of_sale,
# ---------------------------------------------------------------------
b.country_selection AS country_selection,
b.channel AS channel,
b.source AS source,
b.medium AS medium,
b.campaign AS campaign,
b.source_medium AS source_medium,
a.firstPage AS firstPage,
a.secondPage AS secondPage,
a.thirdPage AS thirdPage,
a.fourthPage AS fourthPage,
a.fifthPage AS fifthPage,
# get entrance to each step
IF(a.firstPage >0, 1,0) AS first_ent,
IF(a.secondPage >0, 1,0) AS second_ent,
IF(a.thirdPage >0, 1,0) AS third_ent,
IF(a.fourthPage >0, 1,0) AS fourth_ent,
IF(a.fifthPage >0, 1,0) AS fifth_ent,
# get completion of each step to the next
IF(a.firstPage > 0
AND a.firstPage < a.secondPage,1,0) AS first_cplt,
IF(a.secondPage > 0
AND a.secondPage < a.thirdPage,1,0) AS second_cplt,
IF(a.thirdPage > 0
AND a.thirdPage < a.fourthPage,1,0) AS third_cplt,
IF(a.fourthPage > 0
AND a.fourthPage < a.fifthPage,1,0) AS fourth_cplt
FROM (
SELECT
(CASE
WHEN s0.date IS NULL THEN s4.date
WHEN s4.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s4.vid
WHEN s4.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s4.sid
WHEN s4.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s4.device_category
WHEN s4.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
IF(s0.firstPage IS NULL,0,s0.firstPage) AS firstPage,
IF(s0.secondPage IS NULL,0,s0.secondPage) AS secondPage,
IF(s0.thirdPage IS NULL,0,s0.thirdPage) AS thirdPage,
IF(s0.fourthPage IS NULL,0,s0.fourthPage) AS fourthPage,
IF(s4.firstHit IS NULL,0,s4.firstHit) AS fifthPage from(
SELECT
(CASE
WHEN s0.date IS NULL THEN s3.date
WHEN s3.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s3.vid
WHEN s3.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s3.sid
WHEN s3.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s3.device_category
WHEN s3.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstPage AS firstPage,
s0.secondPage AS secondPage,
s0.thirdPage AS thirdPage,
s3.firstHit AS fourthPage
FROM (
SELECT
(CASE
WHEN s0.date IS NULL THEN s2.date
WHEN s2.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s2.vid
WHEN s2.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s2.sid
WHEN s2.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s2.device_category
WHEN s2.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstPage AS firstPage,
s0.secondPage AS secondPage,
s2.firstHit AS thirdPage from(
SELECT
(CASE
WHEN s0.date IS NULL THEN s1.date
WHEN s1.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s1.vid
WHEN s1.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s1.sid
WHEN s1.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s1.device_category
WHEN s1.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstHit AS firstPage,
s1.firstHit AS secondPage
FROM (
# Begin Subquery #1 aka s0
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/Homepage')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s0
# End Subquery #1 aka s0
FULL OUTER JOIN EACH (
# Begin Subquery #2 aka s1
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/ORB_ChooseFlight')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s1
# End Subquery #2 aka s1
ON
s0.vid = s1.vid
AND s0.sid = s1.sid) s0
FULL OUTER JOIN EACH (
# Begin Subquery #3 aka s2
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/ORB_PassengerDetails')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s2
# End Subquery #3 aka s2
ON
s0.vid = s2.vid
AND s0.sid= s2.sid) AS s0
FULL OUTER JOIN EACH (
# Begin Subquery #4 aka s3
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/ORB_PaymentDetails')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s3
# End Subquery #4 aka s3
ON
s0.vid = s3.vid
AND s0.sid= s3.sid) s0
FULL OUTER JOIN EACH (
# Begin Subquery #5 aka s4
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/ORB_BookingConfirmation')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s4
ON
s0.vid = s4.vid
AND s0.sid= s4.sid) a
LEFT JOIN (
SELECT
date,
fullVisitorId AS vid,
visitId AS sid,
geoNetwork.country AS country_ga,
channelGrouping AS channel,
trafficSource.source AS source,
trafficSource.medium AS medium,
trafficSource.campaign AS campaign,
CONCAT(trafficSource.source, ' / ', trafficSource.medium) AS source_medium,
MAX(IF(customDimensions.index = 9, customDimensions.value, NULL)) WITHIN RECORD AS country_selection,
MAX(IF(customDimensions.index = 31, customDimensions.value, NULL)) WITHIN RECORD AS cabin_class,
# new CD --------------------------------------------------------------------
MAX(IF(customDimensions.index = 22, customDimensions.value, NULL)) WITHIN RECORD AS point_of_sale
# ---------------------------------------------------------------------------
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))) b
ON
a.vid = b.vid
AND a.sid = b.sid
AND a.date = b.date)
GROUP BY
date,
device_category,
cabin_class,
country_ga,
country_selection,
point_of_sale,
channel,
source,
medium,
campaign,
source_medium"
)
df_ORB_afterpos <- bq_table_download(bq_project_query(project,
get_data_query_ORB2,
use_legacy_sql = TRUE))
get_data_query_ORB3 <- paste0(
"SELECT *,
CASE
WHEN country_ga IS NOT NULL THEN country_ga
WHEN country_ga IS NULL AND point_of_sale IS NULL THEN 'NULL'
WHEN point_of_sale IS NOT NULL AND country_ga IS NULL THEN point_of_sale
END AS POS_matched
from(
SELECT
date,
device_category,
cabin_class,
country_ga,
# point of sale --------------------------------------------
point_of_sale,
# ----------------------------------------------------------
country_selection,
# get total entry per page
SUM(first_ent) AS Homepage,
SUM(second_ent) AS ORB_ChooseFlight,
SUM(third_ent) AS ORB_PassengerDetails,
SUM(fourth_ent) AS ORB_PaymentDetails,
SUM(fifth_ent) AS ORB_BookingConfirmation,
# get total completion at each step
SUM(first_cplt) AS Homepage_Complete,
SUM(second_cplt) AS ORB_ChooseFlight_Complete,
SUM(third_cplt) AS ORB_PassengerDetails_Complete,
SUM(fourth_cplt) AS ORB_PaymentDetails_Complete,
# get total drop-off at each step
SUM(first_ent)-SUM(first_cplt) AS Homepage_Drop,
SUM(second_ent)-SUM(second_cplt) AS ORB_ChooseFlight_Drop,
SUM(third_ent)-SUM(third_cplt) AS ORB_PassengerDetails_Drop,
SUM(fourth_ent)-SUM(fourth_cplt) AS ORB_PaymentDetails_Drop,
# get direct entrance not from previous step
SUM(second_ent)-SUM(first_cplt) AS ORB_ChooseFlight_Indirect,
SUM(third_ent)-SUM(second_cplt) AS ORB_PassengerDetails_Indirect,
SUM(fourth_ent)-SUM(third_cplt) AS ORB_PaymentDetails_Indirect,
SUM(fifth_ent)-SUM(fourth_cplt) AS ORB_BookingConfirmation_Indirect,
# add in new requested dimension
channel,
source,
medium,
campaign,
source_medium
FROM (
#open funnel where a step requires ONLY the previous step
SELECT
a.date AS date,
a.vid AS vid,
a.sid AS sid,
a.device_category AS device_category,
# regroup cabin class value
(CASE
WHEN REGEXP_MATCH(b.cabin_class, r'.*ECONOMY') THEN 'ECONOMY/PREMIUM ECONOMY'
WHEN REGEXP_MATCH(b.cabin_class, r'.*BUSINESS') THEN 'BUSINESS'
WHEN REGEXP_MATCH(b.cabin_class, r'.*(FIRST|SUITE)') THEN 'FIRST'
ELSE 'NA' END) AS cabin_class,
b.country_ga AS country_ga,
# point of sale -------------------------------------------------------
b.point_of_sale AS point_of_sale,
# ---------------------------------------------------------------------
b.country_selection AS country_selection,
b.channel AS channel,
b.source AS source,
b.medium AS medium,
b.campaign AS campaign,
b.source_medium AS source_medium,
a.firstPage AS firstPage,
a.secondPage AS secondPage,
a.thirdPage AS thirdPage,
a.fourthPage AS fourthPage,
a.fifthPage AS fifthPage,
# get entrance to each step
IF(a.firstPage >0, 1,0) AS first_ent,
IF(a.secondPage >0, 1,0) AS second_ent,
IF(a.thirdPage >0, 1,0) AS third_ent,
IF(a.fourthPage >0, 1,0) AS fourth_ent,
IF(a.fifthPage >0, 1,0) AS fifth_ent,
# get completion of each step to the next
IF(a.firstPage > 0
AND a.firstPage < a.secondPage,1,0) AS first_cplt,
IF(a.secondPage > 0
AND a.secondPage < a.thirdPage,1,0) AS second_cplt,
IF(a.thirdPage > 0
AND a.thirdPage < a.fourthPage,1,0) AS third_cplt,
IF(a.fourthPage > 0
AND a.fourthPage < a.fifthPage,1,0) AS fourth_cplt
FROM (
SELECT
(CASE
WHEN s0.date IS NULL THEN s4.date
WHEN s4.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s4.vid
WHEN s4.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s4.sid
WHEN s4.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s4.device_category
WHEN s4.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
IF(s0.firstPage IS NULL,0,s0.firstPage) AS firstPage,
IF(s0.secondPage IS NULL,0,s0.secondPage) AS secondPage,
IF(s0.thirdPage IS NULL,0,s0.thirdPage) AS thirdPage,
IF(s0.fourthPage IS NULL,0,s0.fourthPage) AS fourthPage,
IF(s4.firstHit IS NULL,0,s4.firstHit) AS fifthPage from(
SELECT
(CASE
WHEN s0.date IS NULL THEN s3.date
WHEN s3.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s3.vid
WHEN s3.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s3.sid
WHEN s3.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s3.device_category
WHEN s3.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstPage AS firstPage,
s0.secondPage AS secondPage,
s0.thirdPage AS thirdPage,
s3.firstHit AS fourthPage
FROM (
SELECT
(CASE
WHEN s0.date IS NULL THEN s2.date
WHEN s2.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s2.vid
WHEN s2.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s2.sid
WHEN s2.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s2.device_category
WHEN s2.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstPage AS firstPage,
s0.secondPage AS secondPage,
s2.firstHit AS thirdPage from(
SELECT
(CASE
WHEN s0.date IS NULL THEN s1.date
WHEN s1.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s1.vid
WHEN s1.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s1.sid
WHEN s1.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s1.device_category
WHEN s1.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstHit AS firstPage,
s1.firstHit AS secondPage
FROM (
# Begin Subquery #1 aka s0
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/Homepage')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s0
# End Subquery #1 aka s0
FULL OUTER JOIN EACH (
# Begin Subquery #2 aka s1
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/ORB_ChooseFlight')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s1
# End Subquery #2 aka s1
ON
s0.vid = s1.vid
AND s0.sid = s1.sid) s0
FULL OUTER JOIN EACH (
# Begin Subquery #3 aka s2
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/ORB_PassengerDetails')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s2
# End Subquery #3 aka s2
ON
s0.vid = s2.vid
AND s0.sid= s2.sid) AS s0
FULL OUTER JOIN EACH (
# Begin Subquery #4 aka s3
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/ORB_PaymentDetails')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s3
# End Subquery #4 aka s3
ON
s0.vid = s3.vid
AND s0.sid= s3.sid) s0
FULL OUTER JOIN EACH (
# Begin Subquery #5 aka s4
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/ORB_BookingConfirmation')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s4
ON
s0.vid = s4.vid
AND s0.sid= s4.sid) a
LEFT JOIN (
SELECT
date,
fullVisitorId AS vid,
visitId AS sid,
geoNetwork.country AS country_ga,
channelGrouping AS channel,
trafficSource.source AS source,
trafficSource.medium AS medium,
trafficSource.campaign AS campaign,
CONCAT(trafficSource.source, ' / ', trafficSource.medium) AS source_medium,
MAX(IF(customDimensions.index = 9, customDimensions.value, NULL)) WITHIN RECORD AS country_selection,
MAX(IF(customDimensions.index = 31, customDimensions.value, NULL)) WITHIN RECORD AS cabin_class,
# new CD --------------------------------------------------------------------
MAX(IF(customDimensions.index = 22, customDimensions.value, NULL)) WITHIN RECORD AS point_of_sale
# ---------------------------------------------------------------------------
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))) b
ON
a.vid = b.vid
AND a.sid = b.sid
AND a.date = b.date)
GROUP BY
date,
device_category,
cabin_class,
country_ga,
country_selection,
point_of_sale,
channel,
source,
medium,
campaign,
source_medium) as fulltable
LEFT JOIN (
SELECT
*
FROM
[api-project-929144044809:46948678.CountryByteCode]) CountryByteCode
ON
fulltable.point_of_sale = CountryByteCode.Country_Code"
)
df_ORB_joinpos <- bq_table_download(bq_project_query(project,
get_data_query_ORB3,
use_legacy_sql = TRUE))
get_data_query_CIB4 <- paste0(
"SELECT *,
CASE
WHEN country_ga IS NOT NULL THEN country_ga
WHEN country_ga IS NULL AND point_of_sale IS NULL THEN 'NULL'
WHEN point_of_sale IS NOT NULL AND country_ga IS NULL THEN point_of_sale
END AS POS_matched
from(
SELECT
date,
device_category,
cabin_class,
country_ga,
# point of sale --------------------------------------------
point_of_sale,
# ----------------------------------------------------------
country_selection,
# get total entry per page
SUM(first_ent) AS Homepage,
SUM(second_ent) AS CIB_ChooseFlight,
SUM(third_ent) AS CIB_PassengerDetails,
SUM(fourth_ent) AS CIB_PaymentDetails,
SUM(fifth_ent) AS CIB_BookingConfirmation,
# get total completion at each step
SUM(first_cplt) AS Homepage_Complete,
SUM(second_cplt) AS CIB_ChooseFlight_Complete,
SUM(third_cplt) AS CIB_PassengerDetails_Complete,
SUM(fourth_cplt) AS CIB_PaymentDetails_Complete,
# get total drop-off at each step
SUM(first_ent)-SUM(first_cplt) AS Homepage_Drop,
SUM(second_ent)-SUM(second_cplt) AS CIB_ChooseFlight_Drop,
SUM(third_ent)-SUM(third_cplt) AS CIB_PassengerDetails_Drop,
SUM(fourth_ent)-SUM(fourth_cplt) AS CIB_PaymentDetails_Drop,
# get direct entrance not from previous step
SUM(second_ent)-SUM(first_cplt) AS CIB_ChooseFlight_Indirect,
SUM(third_ent)-SUM(second_cplt) AS CIB_PassengerDetails_Indirect,
SUM(fourth_ent)-SUM(third_cplt) AS CIB_PaymentDetails_Indirect,
SUM(fifth_ent)-SUM(fourth_cplt) AS CIB_BookingConfirmation_Indirect,
# add in new requested dimension
channel,
source,
medium,
campaign,
source_medium
FROM (
#open funnel where a step requires ONLY the previous step
SELECT
a.date AS date,
a.vid AS vid,
a.sid AS sid,
a.device_category AS device_category,
# regroup cabin class value
(CASE
WHEN REGEXP_MATCH(b.cabin_class, r'.*ECONOMY') THEN 'ECONOMY/PREMIUM ECONOMY'
WHEN REGEXP_MATCH(b.cabin_class, r'.*BUSINESS') THEN 'BUSINESS'
WHEN REGEXP_MATCH(b.cabin_class, r'.*(FIRST|SUITE)') THEN 'FIRST'
ELSE 'NA' END) AS cabin_class,
b.country_ga AS country_ga,
# point of sale -------------------------------------------------------
b.point_of_sale AS point_of_sale,
# ---------------------------------------------------------------------
b.country_selection AS country_selection,
b.channel AS channel,
b.source AS source,
b.medium AS medium,
b.campaign AS campaign,
b.source_medium AS source_medium,
a.firstPage AS firstPage,
a.secondPage AS secondPage,
a.thirdPage AS thirdPage,
a.fourthPage AS fourthPage,
a.fifthPage AS fifthPage,
# get entrance to each step
IF(a.firstPage >0, 1,0) AS first_ent,
IF(a.secondPage >0, 1,0) AS second_ent,
IF(a.thirdPage >0, 1,0) AS third_ent,
IF(a.fourthPage >0, 1,0) AS fourth_ent,
IF(a.fifthPage >0, 1,0) AS fifth_ent,
# get completion of each step to the next
IF(a.firstPage > 0
AND a.firstPage < a.secondPage,1,0) AS first_cplt,
IF(a.secondPage > 0
AND a.secondPage < a.thirdPage,1,0) AS second_cplt,
IF(a.thirdPage > 0
AND a.thirdPage < a.fourthPage,1,0) AS third_cplt,
IF(a.fourthPage > 0
AND a.fourthPage < a.fifthPage,1,0) AS fourth_cplt
FROM (
SELECT
(CASE
WHEN s0.date IS NULL THEN s4.date
WHEN s4.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s4.vid
WHEN s4.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s4.sid
WHEN s4.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s4.device_category
WHEN s4.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
IF(s0.firstPage IS NULL,0,s0.firstPage) AS firstPage,
IF(s0.secondPage IS NULL,0,s0.secondPage) AS secondPage,
IF(s0.thirdPage IS NULL,0,s0.thirdPage) AS thirdPage,
IF(s0.fourthPage IS NULL,0,s0.fourthPage) AS fourthPage,
IF(s4.firstHit IS NULL,0,s4.firstHit) AS fifthPage from(
SELECT
(CASE
WHEN s0.date IS NULL THEN s3.date
WHEN s3.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s3.vid
WHEN s3.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s3.sid
WHEN s3.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s3.device_category
WHEN s3.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstPage AS firstPage,
s0.secondPage AS secondPage,
s0.thirdPage AS thirdPage,
s3.firstHit AS fourthPage
FROM (
SELECT
(CASE
WHEN s0.date IS NULL THEN s2.date
WHEN s2.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s2.vid
WHEN s2.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s2.sid
WHEN s2.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s2.device_category
WHEN s2.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstPage AS firstPage,
s0.secondPage AS secondPage,
s2.firstHit AS thirdPage from(
SELECT
(CASE
WHEN s0.date IS NULL THEN s1.date
WHEN s1.date IS NULL THEN s0.date
ELSE s0.date END) AS date,
(CASE
WHEN s0.vid IS NULL THEN s1.vid
WHEN s1.vid IS NULL THEN s0.vid
ELSE s0.vid END) AS vid,
(CASE
WHEN s0.sid IS NULL THEN s1.sid
WHEN s1.sid IS NULL THEN s0.sid
ELSE s0.sid END) AS sid,
(CASE
WHEN s0.device_category IS NULL THEN s1.device_category
WHEN s1.device_category IS NULL THEN s0.device_category
ELSE s0.device_category END) AS device_category,
s0.firstHit AS firstPage,
s1.firstHit AS secondPage
FROM (
# Begin Subquery #1 aka s0
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24')),
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/Homepage')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s0
# End Subquery #1 aka s0
FULL OUTER JOIN EACH (
# Begin Subquery #2 aka s1
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/CIB_ChooseFlight')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s1
# End Subquery #2 aka s1
ON
s0.vid = s1.vid
AND s0.sid = s1.sid) s0
FULL OUTER JOIN EACH (
# Begin Subquery #3 aka s2
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/CIB_PassengerDetails')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s2
# End Subquery #3 aka s2
ON
s0.vid = s2.vid
AND s0.sid= s2.sid) AS s0
FULL OUTER JOIN EACH (
# Begin Subquery #4 aka s3
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/CIB_PaymentDetails')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s3
# End Subquery #4 aka s3
ON
s0.vid = s3.vid
AND s0.sid= s3.sid) s0
FULL OUTER JOIN EACH (
# Begin Subquery #5 aka s4
SELECT
fullVisitorId AS vid,
visitId AS sid,
date,
device.deviceCategory AS device_category,
MIN(hits.hitNumber) AS firstHit
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))
WHERE
REGEXP_MATCH(hits.page.pagePath, r'/CIB_BookingConfirmation')
AND totals.visits = 1
GROUP BY
vid,
sid,
date,
device_category) s4
ON
s0.vid = s4.vid
AND s0.sid= s4.sid) a
LEFT JOIN (
SELECT
date,
fullVisitorId AS vid,
visitId AS sid,
geoNetwork.country AS country_ga,
channelGrouping AS channel,
trafficSource.source AS source,
trafficSource.medium AS medium,
trafficSource.campaign AS campaign,
CONCAT(trafficSource.source, ' / ', trafficSource.medium) AS source_medium,
MAX(IF(customDimensions.index = 9, customDimensions.value, NULL)) WITHIN RECORD AS country_selection,
MAX(IF(customDimensions.index = 31, customDimensions.value, NULL)) WITHIN RECORD AS cabin_class,
# new CD --------------------------------------------------------------------
MAX(IF(customDimensions.index = 22, customDimensions.value, NULL)) WITHIN RECORD AS point_of_sale
# ---------------------------------------------------------------------------
FROM
TABLE_DATE_RANGE([api-project-929144044809:46948678.ga_sessions_],
TIMESTAMP('2018-07-22'), TIMESTAMP('2018-07-24'))) b
ON
a.vid = b.vid
AND a.sid = b.sid
AND a.date = b.date)
GROUP BY
date,
device_category,
cabin_class,
country_ga,
country_selection,
point_of_sale,
channel,
source,
medium,
campaign,
source_medium) as fulltable
LEFT JOIN (
SELECT
*
FROM
[api-project-929144044809:46948678.CountryByteCode]) CountryByteCode
ON
fulltable.point_of_sale = CountryByteCode.Country_Code"
)
df_CIB_joinpos <- bq_table_download(bq_project_query(project,
get_data_query_CIB4,
use_legacy_sql = TRUE))
cib <- df_CIB_joinpos %>%
group_by(POS_matched) %>%
summarise(Homepage = sum(fulltable_Homepage)) %>%
mutate(percent = round(Homepage / sum(Homepage),2))
orb <- df_ORB_joinpos %>%
group_by(POS_matched, fulltable_device_category) %>%
summarise(Homepage = sum(fulltable_Homepage)) %>%
mutate(percent = round(Homepage / sum(Homepage),2))
old_orb <- df_ORB_afterpos %>%
mutate(POS = case_when(!is.na(country_ga) ~ country_ga,
is.na(country_ga) & is.na(point_of_sale) ~ 'null',
!is.na(point_of_sale) & is.na(country_ga) ~ point_of_sale)) %>%
#filter(POS == 'Australia') %>%
group_by(POS, device_category) %>%
summarise(Homepage = sum(Homepage))
# Management API -------------------------------------
accountlist_rownumber <- 1723
mgmt_viewid <- account_list[accountlist_rownumber,'viewId']
# Management API (adwords) -------------------------------------
adwords_listing <- ga_adwords_list(account_list[accountlist_rownumber,'accountId'],
account_list[accountlist_rownumber,'webPropertyId'])
adwords_listing <- as.data.frame(adwords_listing$items)
ga_adwords(account_list[accountlist_rownumber,'accountId'],
account_list[accountlist_rownumber,'webPropertyId'])
# Management API (custom data source) -------------------------------------
custom_datasources <- ga_custom_datasource(account_list[accountlist_rownumber,'accountId'],
account_list[accountlist_rownumber,'webPropertyId'])
# Management API (Experiments) -------------------------------------
experiments <- ga_experiment_list(account_list[accountlist_rownumber,'accountId'],
account_list[accountlist_rownumber,'webPropertyId'],
account_list[accountlist_rownumber,'viewId'])
experiments <- as.data.frame(experiments$items)
# Management API (filters) -------------------------------------
filter_list <- ga_filter_view_list(account_list[accountlist_rownumber,'accountId'],
account_list[accountlist_rownumber,'webPropertyId'],
account_list[accountlist_rownumber,'viewId'])
filter_list <- as.data.frame(filter_list$items)
# Chi square test ----------------------------------------
chi_sq_testfunc <- function(chisq_viewid) {
google_analytics(chisq_viewid,
date_range = c("2019-01-01","2019-02-01"),
metrics = c("sessions"),
dimensions = c("channelGrouping", "deviceCategory"),
segments = c(seg_allUsers),
useResourceQuotas = TRUE,
# anti_sample = TRUE,
max = -1)
}
chi_sq_df <- chi_sq_testfunc(54454548)
chi_sq_df_clean <- chi_sq_df %>%
select(-segment) %>%
spread(deviceCategory, sessions) %>%
mutate_at(vars(-channelGrouping), funs(replace(., is.na(.), 0))) %>%
mutate_at(vars(-channelGrouping), as.numeric) %>%
filter(desktop > 10 & mobile > 10 & tablet > 10)
# The [,-1] just gets rid of the row names -- the 1, 2, 3 column
# in the above.
chisq_test_results <- chisq.test(chi_sq_df_clean[,-1])
result_interpretation <- ifelse(chisq_test_results$p.value < 0.05, "The p-value is smaller than 0.05
so we can reject the hypothesis that there is no relationship
between column 1 and column 2",
"The p-value is larger than 0.05
so we canot reject the hypothesis that there is no relationship
between column 1 and column 2")
chisq.test(chi_sq_df_clean[,-1])
result_interpretation
|
## Author: illuminatist
## This R script is creates an understanding of the concept of caching and also how lexical scoping can be used to prevent
## unauthorised access to data. Many languages which have dynamic scoping cannot simply prevent unauthorised access to data
## rather we need to introduce concept of private and protected members(like in Java and C++).
## makeCacheMatrix() is used for creating a user defined data structure which stores a matrix and inverse of matrix once
## computed. makeCacheMatrix() returns a list containing functions :
## 1) set
## 2) get
## 3) setinv
## 4) getinv
## NOTE: makeCacheMatrix() stores the matrix(x) and its inverse(inv) within its lexical scope. This means that x and inv cannot
## be directly modified from outside the function.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL ## stores the inverse the matrix when computed first
set <- function(y) { ## set() function is used for altering the value of x from outside the scope of
x <<- y ## makeCacheMatrix() using "<<-" operator
inv <<- NULL ## IMP: when set() is called x is modified we need to reset the value of inv back to NULL
}
get <- function() x ## returns the value of x
setinv <- function(inverse) inv <<- inverse ## set the value of inv equal to inverse
getinv <- function() inv ## returns value of inv
list(set = set, get = get, ## returns the list containing the above four functions
setinv = setinv,
getinv = getinv)
}
## cacheSolve() takes the the return list of makeCacheMatrix() as input returns the inverse of the matrix strored in x either
## by computing or returning the cached value of inverse.
cacheSolve <- function(x, ...) {
inv <- x$getinv() ## calls getinv() of x to store the value of inverse in variable inv.
if(!is.null(inv)) { ## if inv is NOT NULL implies the inverse has been calculated before therefore return the
message("getting cached data") ## cached value of the inverse
return(inv)
}
data <- x$get() ## else store the value of the matrix in data
inv <- solve(data, ...) ## compute the inverse of matrix stored in data and return inv
x$setinv(inv)
inv
}
|
/cachematrix.R
|
no_license
|
illuminatist/ProgrammingAssignment2
|
R
| false | false | 2,434 |
r
|
## Author: illuminatist
## This R script is creates an understanding of the concept of caching and also how lexical scoping can be used to prevent
## unauthorised access to data. Many languages which have dynamic scoping cannot simply prevent unauthorised access to data
## rather we need to introduce concept of private and protected members(like in Java and C++).
## makeCacheMatrix() is used for creating a user defined data structure which stores a matrix and inverse of matrix once
## computed. makeCacheMatrix() returns a list containing functions :
## 1) set
## 2) get
## 3) setinv
## 4) getinv
## NOTE: makeCacheMatrix() stores the matrix(x) and its inverse(inv) within its lexical scope. This means that x and inv cannot
## be directly modified from outside the function.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL ## stores the inverse the matrix when computed first
set <- function(y) { ## set() function is used for altering the value of x from outside the scope of
x <<- y ## makeCacheMatrix() using "<<-" operator
inv <<- NULL ## IMP: when set() is called x is modified we need to reset the value of inv back to NULL
}
get <- function() x ## returns the value of x
setinv <- function(inverse) inv <<- inverse ## set the value of inv equal to inverse
getinv <- function() inv ## returns value of inv
list(set = set, get = get, ## returns the list containing the above four functions
setinv = setinv,
getinv = getinv)
}
## cacheSolve() takes the the return list of makeCacheMatrix() as input returns the inverse of the matrix strored in x either
## by computing or returning the cached value of inverse.
cacheSolve <- function(x, ...) {
inv <- x$getinv() ## calls getinv() of x to store the value of inverse in variable inv.
if(!is.null(inv)) { ## if inv is NOT NULL implies the inverse has been calculated before therefore return the
message("getting cached data") ## cached value of the inverse
return(inv)
}
data <- x$get() ## else store the value of the matrix in data
inv <- solve(data, ...) ## compute the inverse of matrix stored in data and return inv
x$setinv(inv)
inv
}
|
library(xpose4)
### Name: cwres.vs.pred.bw
### Title: Box-and-whisker plot of conditional weighted residuals vs
### population predictions for Xpose 4
### Aliases: cwres.vs.pred.bw
### Keywords: methods
### ** Examples
## Here we load the example xpose database
xpdb <- simpraz.xpdb
cwres.vs.pred.bw(xpdb)
|
/data/genthat_extracted_code/xpose4/examples/cwres.vs.pred.bw.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 318 |
r
|
library(xpose4)
### Name: cwres.vs.pred.bw
### Title: Box-and-whisker plot of conditional weighted residuals vs
### population predictions for Xpose 4
### Aliases: cwres.vs.pred.bw
### Keywords: methods
### ** Examples
## Here we load the example xpose database
xpdb <- simpraz.xpdb
cwres.vs.pred.bw(xpdb)
|
% Generated by roxygen2 (4.0.0): do not edit by hand
\name{mediators}
\alias{mediators}
\title{Identify mediator nodes in the network}
\usage{
mediators(adjacency)
}
\arguments{
\item{adjacency}{an adjacency matrix}
}
\value{
a vector of node names
}
\description{
Identify mediator nodes in the network
}
|
/man/mediators.Rd
|
no_license
|
sritchie73/networkTools
|
R
| false | false | 307 |
rd
|
% Generated by roxygen2 (4.0.0): do not edit by hand
\name{mediators}
\alias{mediators}
\title{Identify mediator nodes in the network}
\usage{
mediators(adjacency)
}
\arguments{
\item{adjacency}{an adjacency matrix}
}
\value{
a vector of node names
}
\description{
Identify mediator nodes in the network
}
|
group_1D <- function (dataObj, gene, ranges){
userGroups <- group_1D_worker ( dataObj$PCR, gene, ranges)
if ( max(userGroups$groupID) == 0 ){
userGroups <- group_1D_worker ( dataObj$FACS, gene, ranges)
}
userGroups <- checkGrouping ( userGroups, dataObj )
userGroups
}
group_1D_worker <- function (ma, gene, ranges ) {
position <- which ( colnames(ma) == gene )
userGroups <- data.frame( cellName = rownames(ma), userInput = rep.int(0, nrow(ma)), groupID = rep.int(0, nrow(ma)) )
if ( length(position) > 0 ){
min <- min(ma[,position])
max <- max(ma[,position])+1
ranges = ranges[order(ranges)]
minor = 0
now <- as.vector( which( ma[,position] >= min & ma[,position] < ranges[1] ))
userGroups$userInput[now] = paste ('min <= x <',ranges[1] )
userGroups$groupID[now] = 1
for ( i in 2:length(ranges) ) {
now <- as.vector( which( ma[,position] >= ranges[i-1] & ma[,position] < ranges[i] ))
userGroups$userInput[now] = paste(ranges[i-1],'<= x <',ranges[i])
if ( length(now) > 0 ){
userGroups$groupID[now] = i
}
else {
minor = minor + 1
}
}
now <- as.vector( which( ma[,position] >= ranges[length(ranges)] & ma[,position] < max ))
userGroups$userInput[now] = paste(ranges[length(ranges)],'<= x < max')
userGroups$groupID[now] = length(ranges) +1
userGroups <- checkGrouping ( userGroups )
}
userGroups
}
checkGrouping <- function ( userGroups, data=NULL ){
if ( !is.null(data) ){
if ( length(rownames(data$PCR)) != nrow(userGroups) ) {
### CRAP - rebuilt the grouping information - the data files have been re-created!
rn <- rownames(data$PCR)
for ( i in 1:length(rn) ){
rownames(userGroups) <- userGroups[,1]
userGroups2 <- as.matrix(userGroups[ rownames(data$PCR), ])
missing <- which(is.na(userGroups2[,1]))
userGroups2[missing,1] <- rn[missing]
userGroups2[missing,2] <- 'previousely dropped'
userGroups2[missing,3] <- 0
userGroups2[, 3] <- as.numeric(as.vector(userGroups2[, 3])) +1
userGroups2 <- as.data.frame(userGroups2)
userGroups2[,3] <- as.numeric(userGroups2[,3])
userGroups <- userGroups2
}
}
}else {
userGroups$groupID <- as.vector( as.numeric( userGroups$groupID ))
if ( length(which(userGroups$groupID == 0)) > 0 ){
userGroups$groupID = userGroups$groupID + 1
}
ta <-table(userGroups$groupID)
exp <- 1:max(as.numeric(userGroups$groupID))
miss <- exp[(exp %in% names(ta)) == F]
for ( i in 1:length(miss) ){
miss[i] = miss[i] -(i -1)
userGroups$groupID[which(userGroups$groupID > miss[i] )] = userGroups$groupID[which(userGroups$groupID > miss[i] )] -1
}
}
userGroups
}
regroup <- function ( dataObj, group2sample = list ( '1' = c( 'Sample1', 'Sample2' ) ) ) {
userGroups <- data.frame( cellName = rownames(dataObj$PCR), userInput = rep.int(0, nrow(dataObj$PCR)), groupID = rep.int(0, nrow(dataObj$PCR)) )
n <- names(group2sample)
n <- n[order( n )]
minor = 0
for ( i in 1:length(n) ){
if ( sum(is.na(match(group2sample[[i]], userGroups$cellName))==F) == 0 ){
minor = minor +1
}
else {
userGroups[ match(group2sample[[i]], userGroups$cellName),3] = i - minor
}
}
if ( length(which(userGroups[,3] == 0)) > 0 ){
userGroups[,3]
system (paste('echo "', length(which(userGroups[,3] == 0)),"cells were not grouped using the updated grouping' > Grouping_R_Error.txt", collaps=" ") )
}
checkGrouping ( userGroups, dataObj )
}
group_on_strings <- function (dataObj, strings = c() ) {
userGroups <- data.frame( cellName = rownames(dataObj$PCR), userInput = rep.int(0, nrow(dataObj$PCR)), groupID = rep.int(0, nrow(dataObj$PCR)) )
minor = 0
for ( i in 1:length(strings) ) {
g <- grep(strings[i], userGroups$cellName)
if ( length(g) == 0 ){
system (paste ('echo "The group name',strings[i] ,'did not match to any sample" > Grouping_R_Error.txt', collaps=" ") )
minor = minor +1
}
else {
userGroups[g ,3] = i - minor
userGroups[g ,2] = strings[i]
}
}
checkGrouping ( userGroups, dataObj )
}
createGroups_randomForest <- function (dataObj, fname='RandomForest_groupings.txt' ) {
## load('RandomForestdistRFobject.RData') <- this has to be done before calling this function!!
persistingCells <- rownames( dataObj$PCR )
if ( exists('distRF') ) {
expected_groupings <- unique(scan ( fname ))
for ( i in 1:length(expected_groupings) ) {
res = pamNew(distRF$cl1, expected_groupings[i] )
N <- names( res )
## probably some cells have been kicked in the meantime - I need to kick them too
N <- intersect( persistingCells, N )
userGroups <- matrix(ncol=3, nrow=0)
for ( a in 1:length(N) ){
userGroups <- rbind (userGroups, c( N[a], 'no info', as.numeric(res[[N[a]]]) ) )
}
colnames(userGroups) <- c('cellName', 'userInput', 'groupID' )
## write this information into a file that can be used as group
userGroups = data.frame( userGroups)
save ( userGroups , file= paste("forest_group_n", expected_groupings[i],'.RData', sep=''))
fileConn<-file(paste("Grouping.randomForest.n",expected_groupings[i],".txt", sep="") )
writeLines(c(paste("load('forest_group_n",expected_groupings[i],".RData')",sep=""),
"userGroups <- checkGrouping ( userGroups[is.na(match(userGroups$cellName, rownames(data.filtered$PCR) ))==F, ], data.filtered )"
), fileConn)
close(fileConn)
}
}
}
createGeneGroups_randomForest <- function (dataObj, expected_grouping=10 ) {
## load('RandomForestdistRFobject_genes.RData') <- this has to be done before calling this function!!
persistingGenes <- colnames( dataObj$PCR )
if ( round(length(persistingGenes)/4) < expected_grouping ){
expected_grouping <- round(length(persistingGenes)/4)
}
if (expected_grouping < 2 ){
expected_grouping <- 2
}
if ( exists('distRF') ) {
res = pamNew(distRF$cl1, expected_grouping )
N <- names( res )
## probably some cells have been kicked in the meantime - I need to kick them too
N <- intersect( persistingGenes , N )
geneGroups <- matrix(ncol=3, nrow=0)
for ( a in 1:length(N) ){
geneGroups <- rbind (geneGroups, c( N[a], 'no info', as.numeric(res[[N[a]]]) ) )
}
colnames(geneGroups) <- c('geneName', 'userInput', 'groupID' )
## write this information into a file that can be used as group
geneGroups = data.frame( geneGroups)
save ( geneGroups , file= paste("forest_gene_group_n", expected_grouping,'.RData', sep=''))
fileConn<-file(paste("Gene_grouping.randomForest.txt", sep="") )
writeLines(c(paste("load('forest_gene_group_n",expected_grouping,".RData')",sep=""),
"geneGroups <- checkGrouping ( geneGroups[is.na(match(geneGroups$geneName, colnames(data.filtered$PCR) ))==F, ] )",
"write.table( geneGroups[order(geneGroups[,3]),], file='GeneClusters.xls' , row.names=F, sep='\t',quote=F )"
), fileConn)
close(fileConn)
}
}
|
/SCExV/root/R_lib/Tool_grouping.R
|
no_license
|
StemSysBio/SCExV
|
R
| false | false | 6,858 |
r
|
group_1D <- function (dataObj, gene, ranges){
userGroups <- group_1D_worker ( dataObj$PCR, gene, ranges)
if ( max(userGroups$groupID) == 0 ){
userGroups <- group_1D_worker ( dataObj$FACS, gene, ranges)
}
userGroups <- checkGrouping ( userGroups, dataObj )
userGroups
}
group_1D_worker <- function (ma, gene, ranges ) {
position <- which ( colnames(ma) == gene )
userGroups <- data.frame( cellName = rownames(ma), userInput = rep.int(0, nrow(ma)), groupID = rep.int(0, nrow(ma)) )
if ( length(position) > 0 ){
min <- min(ma[,position])
max <- max(ma[,position])+1
ranges = ranges[order(ranges)]
minor = 0
now <- as.vector( which( ma[,position] >= min & ma[,position] < ranges[1] ))
userGroups$userInput[now] = paste ('min <= x <',ranges[1] )
userGroups$groupID[now] = 1
for ( i in 2:length(ranges) ) {
now <- as.vector( which( ma[,position] >= ranges[i-1] & ma[,position] < ranges[i] ))
userGroups$userInput[now] = paste(ranges[i-1],'<= x <',ranges[i])
if ( length(now) > 0 ){
userGroups$groupID[now] = i
}
else {
minor = minor + 1
}
}
now <- as.vector( which( ma[,position] >= ranges[length(ranges)] & ma[,position] < max ))
userGroups$userInput[now] = paste(ranges[length(ranges)],'<= x < max')
userGroups$groupID[now] = length(ranges) +1
userGroups <- checkGrouping ( userGroups )
}
userGroups
}
checkGrouping <- function ( userGroups, data=NULL ){
if ( !is.null(data) ){
if ( length(rownames(data$PCR)) != nrow(userGroups) ) {
### CRAP - rebuilt the grouping information - the data files have been re-created!
rn <- rownames(data$PCR)
for ( i in 1:length(rn) ){
rownames(userGroups) <- userGroups[,1]
userGroups2 <- as.matrix(userGroups[ rownames(data$PCR), ])
missing <- which(is.na(userGroups2[,1]))
userGroups2[missing,1] <- rn[missing]
userGroups2[missing,2] <- 'previousely dropped'
userGroups2[missing,3] <- 0
userGroups2[, 3] <- as.numeric(as.vector(userGroups2[, 3])) +1
userGroups2 <- as.data.frame(userGroups2)
userGroups2[,3] <- as.numeric(userGroups2[,3])
userGroups <- userGroups2
}
}
}else {
userGroups$groupID <- as.vector( as.numeric( userGroups$groupID ))
if ( length(which(userGroups$groupID == 0)) > 0 ){
userGroups$groupID = userGroups$groupID + 1
}
ta <-table(userGroups$groupID)
exp <- 1:max(as.numeric(userGroups$groupID))
miss <- exp[(exp %in% names(ta)) == F]
for ( i in 1:length(miss) ){
miss[i] = miss[i] -(i -1)
userGroups$groupID[which(userGroups$groupID > miss[i] )] = userGroups$groupID[which(userGroups$groupID > miss[i] )] -1
}
}
userGroups
}
regroup <- function ( dataObj, group2sample = list ( '1' = c( 'Sample1', 'Sample2' ) ) ) {
userGroups <- data.frame( cellName = rownames(dataObj$PCR), userInput = rep.int(0, nrow(dataObj$PCR)), groupID = rep.int(0, nrow(dataObj$PCR)) )
n <- names(group2sample)
n <- n[order( n )]
minor = 0
for ( i in 1:length(n) ){
if ( sum(is.na(match(group2sample[[i]], userGroups$cellName))==F) == 0 ){
minor = minor +1
}
else {
userGroups[ match(group2sample[[i]], userGroups$cellName),3] = i - minor
}
}
if ( length(which(userGroups[,3] == 0)) > 0 ){
userGroups[,3]
system (paste('echo "', length(which(userGroups[,3] == 0)),"cells were not grouped using the updated grouping' > Grouping_R_Error.txt", collaps=" ") )
}
checkGrouping ( userGroups, dataObj )
}
group_on_strings <- function (dataObj, strings = c() ) {
userGroups <- data.frame( cellName = rownames(dataObj$PCR), userInput = rep.int(0, nrow(dataObj$PCR)), groupID = rep.int(0, nrow(dataObj$PCR)) )
minor = 0
for ( i in 1:length(strings) ) {
g <- grep(strings[i], userGroups$cellName)
if ( length(g) == 0 ){
system (paste ('echo "The group name',strings[i] ,'did not match to any sample" > Grouping_R_Error.txt', collaps=" ") )
minor = minor +1
}
else {
userGroups[g ,3] = i - minor
userGroups[g ,2] = strings[i]
}
}
checkGrouping ( userGroups, dataObj )
}
createGroups_randomForest <- function (dataObj, fname='RandomForest_groupings.txt' ) {
## load('RandomForestdistRFobject.RData') <- this has to be done before calling this function!!
persistingCells <- rownames( dataObj$PCR )
if ( exists('distRF') ) {
expected_groupings <- unique(scan ( fname ))
for ( i in 1:length(expected_groupings) ) {
res = pamNew(distRF$cl1, expected_groupings[i] )
N <- names( res )
## probably some cells have been kicked in the meantime - I need to kick them too
N <- intersect( persistingCells, N )
userGroups <- matrix(ncol=3, nrow=0)
for ( a in 1:length(N) ){
userGroups <- rbind (userGroups, c( N[a], 'no info', as.numeric(res[[N[a]]]) ) )
}
colnames(userGroups) <- c('cellName', 'userInput', 'groupID' )
## write this information into a file that can be used as group
userGroups = data.frame( userGroups)
save ( userGroups , file= paste("forest_group_n", expected_groupings[i],'.RData', sep=''))
fileConn<-file(paste("Grouping.randomForest.n",expected_groupings[i],".txt", sep="") )
writeLines(c(paste("load('forest_group_n",expected_groupings[i],".RData')",sep=""),
"userGroups <- checkGrouping ( userGroups[is.na(match(userGroups$cellName, rownames(data.filtered$PCR) ))==F, ], data.filtered )"
), fileConn)
close(fileConn)
}
}
}
createGeneGroups_randomForest <- function (dataObj, expected_grouping=10 ) {
## load('RandomForestdistRFobject_genes.RData') <- this has to be done before calling this function!!
persistingGenes <- colnames( dataObj$PCR )
if ( round(length(persistingGenes)/4) < expected_grouping ){
expected_grouping <- round(length(persistingGenes)/4)
}
if (expected_grouping < 2 ){
expected_grouping <- 2
}
if ( exists('distRF') ) {
res = pamNew(distRF$cl1, expected_grouping )
N <- names( res )
## probably some cells have been kicked in the meantime - I need to kick them too
N <- intersect( persistingGenes , N )
geneGroups <- matrix(ncol=3, nrow=0)
for ( a in 1:length(N) ){
geneGroups <- rbind (geneGroups, c( N[a], 'no info', as.numeric(res[[N[a]]]) ) )
}
colnames(geneGroups) <- c('geneName', 'userInput', 'groupID' )
## write this information into a file that can be used as group
geneGroups = data.frame( geneGroups)
save ( geneGroups , file= paste("forest_gene_group_n", expected_grouping,'.RData', sep=''))
fileConn<-file(paste("Gene_grouping.randomForest.txt", sep="") )
writeLines(c(paste("load('forest_gene_group_n",expected_grouping,".RData')",sep=""),
"geneGroups <- checkGrouping ( geneGroups[is.na(match(geneGroups$geneName, colnames(data.filtered$PCR) ))==F, ] )",
"write.table( geneGroups[order(geneGroups[,3]),], file='GeneClusters.xls' , row.names=F, sep='\t',quote=F )"
), fileConn)
close(fileConn)
}
}
|
## makeCacheMatrix creates a list of functions that store a matrix
## The calling function is then able to set the matrix and get its values
##calling "set" nullifies the "inverse calculated" flag
## call this function to initialize the code and the data
## example: b <- makeCacheMatrix(matrix(c(1,2,3,5),2,2))
##
##c.bahr code based on the class example
makeCacheMatrix <- function(x = matrix()) {
x_inverse <- NULL
set <- function(y) {
## asssign to "global" enviroment
x<<- y
x_inverse <<- NULL
}
## return set value
get <- function() x
setinverse <- function(x_inv) x_inverse <<- x_inv
getinverse <- function() x_inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cachesolve calculates the matrix inverse if it discovers
## that the matrix has been changed (and initially)
## call this code to get the answer
## example: cacheSolve(b)
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
xinverse <- x$getinverse()
if(!is.null(xinverse)) {
message("getting cached inverse")
return (xinverse)
}
datamatrix <- x$get()
xinverse <- solve(datamatrix)
x$setinverse(xinverse)
xinverse
}
|
/cachematrix.R
|
no_license
|
charlesbahr/ProgrammingAssignment2
|
R
| false | false | 1,271 |
r
|
## makeCacheMatrix creates a list of functions that store a matrix
## The calling function is then able to set the matrix and get its values
##calling "set" nullifies the "inverse calculated" flag
## call this function to initialize the code and the data
## example: b <- makeCacheMatrix(matrix(c(1,2,3,5),2,2))
##
##c.bahr code based on the class example
makeCacheMatrix <- function(x = matrix()) {
x_inverse <- NULL
set <- function(y) {
## asssign to "global" enviroment
x<<- y
x_inverse <<- NULL
}
## return set value
get <- function() x
setinverse <- function(x_inv) x_inverse <<- x_inv
getinverse <- function() x_inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cachesolve calculates the matrix inverse if it discovers
## that the matrix has been changed (and initially)
## call this code to get the answer
## example: cacheSolve(b)
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
xinverse <- x$getinverse()
if(!is.null(xinverse)) {
message("getting cached inverse")
return (xinverse)
}
datamatrix <- x$get()
xinverse <- solve(datamatrix)
x$setinverse(xinverse)
xinverse
}
|
height <- function(A0, A, Hd0) {
height.log <- (1.33 + 5.84 / A0
- 10.61 / A
+ 0.64 * log(Hd0))
height <- exp(height.log)
return (height)
}
numberOfTrees <- function(A0, A, Hd0, N0) {
numberOfTrees.log <- (0.28 - 0.19 / A0
+ 0.45 / A
- 0.02 * log(Hd0)
+ 0.96 * log(N0))
numberOfTrees <- exp(numberOfTrees.log)
numberOfTrees <- ceiling(numberOfTrees)
return (numberOfTrees)
}
basalArea <- function(A0, A, Hd0, B0, N0) {
basalArea.log <- (0.20 + 9.23 / A0
- 12.62 / A
+ 0.46 * log(Hd0)
+ 0.37 * log(B0)
+ 0.15 * log(N0))
basalArea <- exp(basalArea.log)
return (basalArea)
}
volume <- function(A0, A, Hd0, B0, N0) {
volume.log <- (0.87 + 16.43 / A0
- 21.91 / A
+ 1.09 * log(Hd0)
+ 0.46 * log(B0)
+ 0.05 * log(N0))
volume <- exp(volume.log)
return (volume)
}
|
/hw5/functions.r
|
no_license
|
litaxc/forest
|
R
| false | false | 1,091 |
r
|
height <- function(A0, A, Hd0) {
height.log <- (1.33 + 5.84 / A0
- 10.61 / A
+ 0.64 * log(Hd0))
height <- exp(height.log)
return (height)
}
numberOfTrees <- function(A0, A, Hd0, N0) {
numberOfTrees.log <- (0.28 - 0.19 / A0
+ 0.45 / A
- 0.02 * log(Hd0)
+ 0.96 * log(N0))
numberOfTrees <- exp(numberOfTrees.log)
numberOfTrees <- ceiling(numberOfTrees)
return (numberOfTrees)
}
basalArea <- function(A0, A, Hd0, B0, N0) {
basalArea.log <- (0.20 + 9.23 / A0
- 12.62 / A
+ 0.46 * log(Hd0)
+ 0.37 * log(B0)
+ 0.15 * log(N0))
basalArea <- exp(basalArea.log)
return (basalArea)
}
volume <- function(A0, A, Hd0, B0, N0) {
volume.log <- (0.87 + 16.43 / A0
- 21.91 / A
+ 1.09 * log(Hd0)
+ 0.46 * log(B0)
+ 0.05 * log(N0))
volume <- exp(volume.log)
return (volume)
}
|
library(RMySQL)
library(DBI)
library(shiny)
library(shinythemes)
library(shinyWidgets)
library(dqshiny)
library(DT)
library(shinyjs)
library(plotly)
library(dplyr)
library(jsonlite)
library(qdapRegex)
library(emojifont)
library(shinyBS)
library(stringr)
output$pageStub <- renderUI(fluidPage(theme = "slate.min.css",
tags$style(HTML("
.dataTables_wrapper .dataTables_length, .dataTables_wrapper .dataTables_filter {
color: #a9a8ae;
}
#SwissModel a {
color: #5b33ff;
}
.has-feedback .form-control {
padding-right: 0px;
}
")
),
navbarPage("Graph and Data",
tabPanel(title = "Graph",
fluidRow(column(2), plotOutput("legend", width="1200px", height="100px")),
fluidRow(plotlyOutput("plot", width="1200px", height="800px"))
),
tabPanel(title="Graph Data Table",
fluidRow(column(12,
div(DT::dataTableOutput("data_table"),
style = "font-size:95%; width:1200px")
)
)
),
tabPanel(title="Substrates",
fluidRow(column(11,
div(DT::dataTableOutput("substrates_table"),
style = "font-size:95%; width:1200px")
),
column(1, downloadButton("downloadSubstrateData", "Download"))
)
),
tabPanel(title="PDB Structures",
mainPanel(
fluidRow(column(7, DT::dataTableOutput("PDB_structures")),
## For 3D model by U of Pitt
shinydashboard::box(title="3D Structure", width = 5, status="primary", solidHeader =TRUE,
uiOutput("structure_3d")),
column(2, textOutput("text2"))),
fluidRow(column(8, htmlOutput("SwissModel"))),
)
),
tabPanel(title="PDB Binding Sites and Drugs",
mainPanel(
fluidRow(column(8, DT::dataTableOutput("binding_drug"),
style = "width:1200px"))
)
)
)
)
)
observe({
## set name for graph title
req(input$uniProtID)
name <- dat[dat$uniProtID==input$uniProtID, "geneNamePreferred"]
#' \link[app.R]{getGraph}
G <- getGraph(input$uniProtID, input$direction,
as.numeric(input$length), limit=as.numeric(input$limit))
## main graph app
if ("neo" %in% class(G)) {
## Needs at least 1 protein to have a proteinName attribute needs fixing.
# G$nodes$proteinName <- apply(G$nodes, 1, function(x){
# ifelse(is.na(x[['proteinName']]),
# strsplit(x[['altProtNames']], "|", fixed=T)[[1]],
# x[['proteinName']])
# })
if ("proteinName" %in% colnames(G$nodes)) {
G$nodes$proteinName <- apply(G$nodes, 1, function(x) {
ifelse(is.na(x[['proteinName']]),
strsplit(x[['altProtNames']], "|", fixed=T)[[1]][1],
x[['proteinName']])
})
## might need to check if alt names is in there too but should really
## always be there with the Merge in Neo4j by Python class.
} else {
G$nodes$proteinName <- apply(G$nodes, 1, function(x) {
strsplit(x[['altProtNames']], "|", fixed=T)[[1]][1]
})
}
final <- G$relationships %>%
group_by(id, startNode, endNode) %>%
summarise(
startNode=startNode,
endNode=endNode,
type=type,
id=id,
entries=list(unique(entries)),
name=name) %>%
relocate(id, .after = type) %>%
distinct()
graph_object <- igraph::graph_from_data_frame(
d = final,
directed = TRUE,
vertices = G$nodes
)
index.protein.searched <- which(G$nodes$uniprotID == input$uniProtID)
L.g <- layout.circle(graph_object)
vs.g<- V(graph_object)
es.g <- get.edgelist(graph_object)
Nv.g <- length(vs.g) #number of nodes
Ne.g <- length(es.g[,1]) #number of edges
L.g <- layout.fruchterman.reingold(graph_object)
Xn.g <- L.g[,1]
Yn.g <- L.g[,2]
v.colors <- c("dodgerblue", "#0afb02", "#fcf51c")
# for different interactions types
e.colors <- c("orchid", "orange", "#4444fb", "#5cf61d", "#6b6bae",
"#0cf3fa", "#f20e42", "#cdafb6", "#8bd0f8", "#b40fb9", "#fdfbfd")
v.attrs <- vertex_attr(graph_object)
edge_attr(graph_object, "color", index = E(graph_object)) <-
e.colors[as.factor(edge_attr(graph_object)$name)]
e.attrs <- edge_attr(graph_object)
output$plot <- renderPlotly({
## set color of your protein to red, all others color of molecule type
## this factoring becomes issue with list vs. vectors with more than 1 factor
#colors <- v.colors[as.factor(v.attrs$label)]
## change to list instead and Protein factor comes before Molecule using forcats::fct_rev
colors <- v.colors[forcats::fct_rev(as.factor(data.frame(v.attrs$label)))]
colors[index.protein.searched] <- "red"
sizes <- rep(20, Nv.g)
sizes[index.protein.searched] <- 30
# Creates the nodes (plots the points)
network.g <- plot_ly(x = ~Xn.g, y = ~Yn.g, #Node points
mode = "text+markers",
text = vs.g$name,
hoverinfo = "text",
hovertext = paste0("Gene Name: ", v.attrs$name, "\n",
"Protein Name: ", v.attrs$proteinName, "\n",
"UniProt ID: ", v.attrs$uniprotID, "\n",
"Organism: ", v.attrs$organism, "\n",
"TaxID: ", v.attrs$taxid),
marker = list(
color = colors,
size = sizes),
textfont = list(color = '#efeff5', size = 16, layer="above"),
)
#Create edges
edge_shapes.g <- list()
names(Xn.g) <- names(vs.g)
names(Yn.g) <- names(vs.g)
for(i in 1:Ne.g) {
v0.g <- as.character(es.g[i,1])
v1.g <- as.character(es.g[i,2])
dir <- c(Xn.g[v1.g], Yn.g[v1.g]) - c(Xn.g[v0.g], Yn.g[v0.g])
## if self make small arrow
if (all(dir == 0)) {
new.p1 <- c(Xn.g[v1.g], Yn.g[v1.g])*(.9999)
new.p2 <- c(Xn.g[v1.g], Yn.g[v1.g])*(1.0001)
} else {
new.p1 <- c(Xn.g[v0.g], Yn.g[v0.g]) + .2*normalize(dir)
new.p2 <- c(Xn.g[v1.g], Yn.g[v1.g]) + -.1*normalize(dir)
}
edge_shape.g = list(
type = "line",
line = list(color = e.attrs$color[i], width = 2, layer="below"),
opacity = 0.7,
x0 = new.p1[1],
y0 = new.p1[2],
x1 = new.p2[1],
y1 = new.p2[2]
)
edge_shapes.g[[i]] <- edge_shape.g
}
axis.g <- list(title = "", showgrid = FALSE,
showticklabels = FALSE, zeroline = FALSE)
title <- ifelse(input$length==1 & input$direction=="down",
sprintf("<b>%s Substrates", name),
sprintf("<b>%s Paths", name))
p.g <- plotly::layout(
network.g,
title = list(text=title,
font=list(size=30, style="italic", color="#c7c7df")
),
shapes = edge_shapes.g,
xaxis = axis.g,
yaxis = axis.g,
showlegend=FALSE,
margin = list(l=50, r=50, b=100, t=100, pad=4),
plot_bgcolor = "#19191f",
paper_bgcolor = "#19191f"
)
arrow.x.start <- lapply(edge_shapes.g, function(x) x$x0)
arrow.x.end <- lapply(edge_shapes.g, function(x) x$x1)
arrow.y.start <- lapply(edge_shapes.g, function(x) x$y0)
arrow.y.end <- lapply(edge_shapes.g, function(x) x$y1)
## edge properties
ent <- lapply(e.attrs$entries, function(x) {
string = ""
t <- list()
for (i in 1:length(x)) {
string <- paste0(string, i, ". ")
t[[i]] = jsonlite::fromJSON(x[i][[1]])
for (j in paste(names(t[[i]]), ":", t[[i]], "\n")) {
string = paste0(string, j)
}
}
return(string)
})
p.g %>% add_trace(type = 'scatter') %>%
add_annotations( x = ~arrow.x.end,
y = ~arrow.y.end,
xref = "x", yref = "y",
axref = "x", ayref = "y",
text = "",
hoverinfo = c(~arrow.x.end, ~arrow.y.end),
hovertext = paste(ent),
opacity = 0.7,
ax = ~arrow.x.end,
ay = ~arrow.y.end,
layer="below") %>%
add_annotations( x = ~arrow.x.end,
y = ~arrow.y.end,
xref = "x", yref = "y",
axref = "x", ayref = "y",
text = "",
showarrow = T,
arrowcolor = ~e.attrs$color,
opacity = 0.7,
ax = ~arrow.x.start,
ay = ~arrow.y.start,
layer="below")
})
output$legend <- renderPlot({
par(mar=c(1,1,1.8,1))
plot(NULL, xaxt='n',yaxt='n',bty='n',ylab='',xlab='', xlim=0:1, ylim=0:1)
legend("topleft", legend = levels(as.factor(e.attrs$name)), lty = 1, lwd = 3,
col = c(unique(e.colors)), box.lty = 0, ncol = 5, cex=1.2, text.col = "#c7c7df")
mtext("Reaction type", at=0.2, cex=2, col = "#c7c7df")
}, bg = "#19191f")
## get table from graph
relation <- data.frame(G$relationships)
relation <- relation %>% rename(reaction=name)
df1 <- left_join(relation, G$nodes, by = c("startNode"="id"))
df2 <- left_join(df1, G$nodes, by = c("endNode"="id"))
df3 <- df2[c("name.x", "uniprotID.x", "proteinName.x", "taxid.x", "organism.x",
"name.y", "uniprotID.y", "proteinName.y", "taxid.y", "organism.y",
"reaction", "entries")]
## list the entries
df3$entries <- apply(df3, 1, function(x) {
l = jsonlite::fromJSON(x['entries'][[1]])
string = ""
if (!is.null(l$interactionID)) {
if (grepl("EBI-[0-9]+", l$interactionID)) {
l$interactionID = sprintf("<a href='https://www.ebi.ac.uk/intact/interaction/%s' target='_blank'>%s</a>",
l$interactionID, l$interactionID)
} else if (grepl("CLE[0-9]+", l$interactionID)) {
if (!is.null(l$`publicationID(s)`)) {
link.id <- ex_between(l$`publicationID(s)`, "[", "]")[[1]]
publication <- ex_between(l$`publicationID(s)`, "<%", "[")[[1]]
publication <- gsub("%", "", publication)
l$`publicationID(s)` <- sprintf("<a href='https://www.ebi.ac.uk/merops/cgi-bin/refs?id=%s' target='_blank'>%s</a>",
link.id, publication)
l$interactionID <- sprintf("<a href='https://www.ebi.ac.uk/merops/cgi-bin/show_substrate?SpAcc=%s' target='_blank'>%s</a>",
x['uniprotID.y'], l$interactionID)
} else {
l$interactionID <- sprintf("<a href='https://www.ebi.ac.uk/merops/cgi-bin/show_substrate?SpAcc=%s' target='_blank'>%s</a>",
x['uniprotID.y'], l$interactionID)
}
}
}
for (j in paste(names(l), ":", l, "<br>"))
string = paste(string, j)
return(trimws(string, which="both"))
})
## According to PSP download agreement must make link to their site
## if displaying modification site information derived by PSP
##
links <- apply(df3, 1, function(row) {
if (grepl('PhosphoSitePlus', row[["entries"]])) {
row[["uniprotID.x"]] <- sprintf("<a href='https://www.phosphosite.org/uniprotAccAction?id=%s' target='_blank'>%s</a>",
row[["uniprotID.x"]], row[["uniprotID.x"]])
row[["uniprotID.y"]] <- sprintf("<a href='https://www.phosphosite.org/uniprotAccAction?id=%s' target='_blank'>%s</a>",
row[["uniprotID.y"]], row[["uniprotID.y"]])
} ## CHEBI
else if (grepl('CHEBI', row[["uniprotID.y"]])) {
row[["uniprotID.x"]] <- sprintf("<a href='https://www.uniprot.org/uniprot/%s' target='_blank'>%s</a>",
row[["uniprotID.x"]], row[["uniprotID.x"]])
row[["uniprotID.y"]] <- sprintf("<a href='https://www.ebi.ac.uk/chebi/searchId.do;?chebiId=%s' target='_blank'>%s</a>",
row[["uniprotID.y"]], row[["uniprotID.y"]])
} else {
row[["uniprotID.x"]] <- sprintf("<a href='https://www.uniprot.org/uniprot/%s' target='_blank'>%s</a>",
row[["uniprotID.x"]], row[["uniprotID.x"]])
row[["uniprotID.y"]] <- sprintf("<a href='https://www.uniprot.org/uniprot/%s' target='_blank'>%s</a>",
row[["uniprotID.y"]], row[["uniprotID.y"]])
}
c(row[["uniprotID.x"]], row[["uniprotID.y"]])
})
df3[,c("uniprotID.x", "uniprotID.y")] <- t(links)
colnames(df3) <- c("Prot Gene Name", "Prot UniProt ID", "Prot Protein Name", "Prot taxid", "Prot organism",
"Sub Gene Name", "Sub UniProt ID", "Sub Protein Name", "Sub taxid", "Sub organism",
"Reaction type", "Reaction info")
output$data_table <- DT::renderDataTable(
datatable(df3, style = "bootstrap", class = "compact",
filter = "top",
options = list(
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'color': '#fff'});",
"}"),
# https://github.com/rstudio/DT/issues/171
autoWidth = T,
width = "100%",
scrollX=T,
bSortClasses = TRUE,
targets = 12,
render = JS(
"function(data, type, row, meta) {",
"return type === 'display' && data.length > 60 ?",
"'<span title=\"' + data + '\">' + data.substr(0, 60) + '...</span>' : data;",
"}"),
LengthMenu = c(5, 30, 50),
columnDefs = list(
list(className = 'dt-body-left', targets=1:12),
list(width='325px', targets=12)),
scrollY = '500px',
pageLength = 50
),
escape = F
)
)
## no neo4j graph
} else {
output$plot <- renderPlotly({ empty_plot("No interaction data in neo4j for your protein!
There may be structure data on other tabs.
Check 'PDB Structures' or 'PDB Binding Sites and Drugs' tabs
or select a different UniProt ID.")
})
}
################################################################################################
## NEW!!! strictly a substrates table even if user selects some sort of pathway
## Ne4j query returns "row" type instead of "graph" type
#' \link[app.R]{getSubstrates}
Rows <- getSubstrates(input$uniProtID)
if ("neo" %in% class(Rows) & length(Rows) > 0) {
rowDF <- dplyr::bind_cols(Rows)
colnames(rowDF) <- names(Rows)
rowDF$Prot1protNameAlt <- ifelse(rowDF$Prot1protNameAlt=="null", NA, rowDF$Prot1protNameAlt)
rowDF$Prot2protNameAlt <- ifelse(rowDF$Prot2protNameAlt=="null", NA, rowDF$Prot2protNameAlt)
## if Protein name is blank get first alternative
rowDF$Prot1protName <- apply(rowDF, 1, function(x) {
if(is.na(x['Prot1protName'])) {
if (!is.na(x['Prot1protNameAlt'])) {
return(jsonlite::fromJSON(x['Prot1protNameAlt'][[1]])[1])
} else {
return(NA)
}
} else {
return(x['Prot1protName'])
}
}
)
## if Sub name is blank get first alternative
rowDF$Prot2protName <- apply(rowDF, 1, function(x) {
if(is.na(x['Prot2protName'])) {
if (!is.na(x['Prot2protNameAlt'])) {
return(jsonlite::fromJSON(x['Prot2protNameAlt'][[1]])[1])
} else {
return(NA)
}
} else {
return(x['Prot2protName'])
}
})
## for download without hyperlinks
downloadSubs <- rowDF[,c(1:3, 5:9, 11:14)]
colnames(downloadSubs) <- c("Prot Gene Name", "Prot UniProt ID", "Prot Protein Name", "Prot taxid", "Prot organism",
"Sub Gene Name", "Sub UniProt ID", "Sub Protein Name", "Sub taxid", "Sub organism",
"Reaction type", "Reaction info")
## Add links to Relationships
rowDF$`Relationship details` <- apply(rowDF, 1, function(x) {
string = ""
l <- jsonlite::fromJSON(x["Relationship details"][[1]])
for (i in 1:length(l)) {
string <- paste0(string, '<b>', i, ". ", "</b>")
l.i <- jsonlite::fromJSON(l[[i]])
if (!is.null(l.i$interactionID)) {
## intact
if (grepl("EBI-[0-9]+", l.i$interactionID)) {
l.i$interactionID = sprintf("<a href='https://www.ebi.ac.uk/intact/interaction/%s' target='_blank'>%s</a>",
l.i$interactionID, l.i$interactionID)
## merops
} else if (grepl("CLE[0-9]+", l.i$interactionID)) {
if (!is.null(l.i$`publicationID(s)`)) {
link.id <- ex_between(l.i$`publicationID(s)`, "[", "]")[[1]]
publication <- ex_between(l.i$`publicationID(s)`, "<%", "[")[[1]]
publication <- gsub("%", "", publication)
l.i$`publicationID(s)` <- sprintf("<a href='https://www.ebi.ac.uk/merops/cgi-bin/refs?id=%s' target='_blank'>%s</a>",
link.id, publication)
l.i$interactionID <- sprintf("<a href='https://www.ebi.ac.uk/merops/cgi-bin/show_substrate?SpAcc=%s' target='_blank'>%s</a>",
x['Prot2UPID'], l.i$interactionID)
} else {
l.i$interactionID <- sprintf("<a href='https://www.ebi.ac.uk/merops/cgi-bin/show_substrate?SpAcc=%s' target='_blank'>%s</a>",
x['Prot2UPID'], l.i$interactionID)
}
}
}
for (j in paste(names(l.i), ":", l.i, "<br>")) {
string = paste0(string, j)
}
}
return(string)
})
# Links to PSP, UniProt, and CHEBI
linksRow <- apply(rowDF, 1, function(row) {
if (grepl('PhosphoSitePlus', row[["Relationship details"]])) {
row[["Prot1UPID"]] <- sprintf("<a href='https://www.phosphosite.org/uniprotAccAction?id=%s' target='_blank'>%s</a>",
row[["Prot1UPID"]], row[["Prot1UPID"]])
row[["Prot2UPID"]] <- sprintf("<a href='https://www.phosphosite.org/uniprotAccAction?id=%s' target='_blank'>%s</a>",
row[["Prot2UPID"]], row[["Prot2UPID"]])
} ## CHEBI
else if (grepl('CHEBI', row[["Prot2UPID"]])) {
row[["Prot1UPID"]] <- sprintf("<a href='https://www.uniprot.org/uniprot/%s' target='_blank'>%s</a>",
row[["Prot1UPID"]], row[["Prot1UPID"]])
row[["Prot2UPID"]] <- sprintf("<a href='https://www.ebi.ac.uk/chebi/searchId.do;?chebiId=%s' target='_blank'>%s</a>",
row[["Prot2UPID"]], row[["Prot2UPID"]])
} else {
row[["Prot1UPID"]] <- sprintf("<a href='https://www.uniprot.org/uniprot/%s' target='_blank'>%s</a>",
row[["Prot1UPID"]], row[["Prot1UPID"]])
row[["Prot2UPID"]] <- sprintf("<a href='https://www.uniprot.org/uniprot/%s' target='_blank'>%s</a>",
row[["Prot2UPID"]], row[["Prot2UPID"]])
}
c(row[["Prot1UPID"]], row[["Prot2UPID"]])
})
rowDF[,c("Prot1UPID", "Prot2UPID")] <- t(linksRow)
#rowDF2 <- rowDF[,c(1:8, 10:13)]
rowDF2 <- rowDF[,c(1:3, 5:9, 11:14)]
colnames(rowDF2) <- c("Prot Gene Name", "Prot UniProt ID", "Prot Protein Name", "Prot taxid", "Prot organism",
"Sub Gene Name", "Sub UniProt ID", "Sub Protein Name", "Sub taxid", "Sub organism",
"Reaction type", "Reaction info")
output$substrates_table <- DT::renderDataTable(
datatable(rowDF2, style = "bootstrap", class = "compact",
filter = "top",
options = list(
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'color': '#fff'});",
"}"),
# https://github.com/rstudio/DT/issues/171
autoWidth = T,
width = "100%",
scrollX=T,
bSortClasses = TRUE,
targets = 12,
render = JS(
"function(data, type, row, meta) {",
"return type === 'display' && data.length > 60 ?",
"'<span title=\"' + data + '\">' + data.substr(0, 60) + '...</span>' : data;",
"}"),
LengthMenu = c(5, 30, 50),
columnDefs = list(
# targets = 1:12,
list(className = 'dt-body-left', targets=1:12),
list(width='325px', targets=12)),
scrollY = '500px',
pageLength = 50
),
escape = F
)
)
output$downloadSubstrateData <- downloadHandler(
filename = function() {
paste0(file.prefix(), "_", gsub(" ", "_", date()), "_", input$uniProtID, "_SUBSTRATES.csv")
},
content = function(file) {
write.csv(downloadSubs, file, row.names = FALSE)
}
)
}
################################################################################################
## pdb structures tab
pdb.data <- loadData(structures.query(input$uniProtID))
pdb.data$pdbID <- sprintf("<a href='https://www.rcsb.org/structure/%s' target='_blank'>%s</a>",
pdb.data$pdbID, pdb.data$pdbID)
scrolly = "500px"
if (nrow(pdb.data) == 0) {
url <- a(input$uniProtID, href=sprintf("https://swissmodel.expasy.org/repository/uniprot/%s",
input$uniProtID), target='_blank')
scrolly = "0px"
output$SwissModel <- renderUI({
HTML(paste0("There are no structures for your UniProt protein.","<br>",
"Click link for Swiss-Model model of ", url))
})
}
output$PDB_structures <- DT::renderDataTable(
datatable(pdb.data, style = "bootstrap", class = "compact",
filter = "top",
selection=list(mode = "single", target = "cell"),
options = list(
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'color': '#fff'});",
"}"),
scrollY = scrolly,
pageLength = 25),
escape = F
)
)
################################################################################################
## binding sites and drugs
#O00311
pdb.drug.bind.data <- loadData(drugBankBinding.query(input$uniProtID))
## link to DrugBank
pdb.drug.bind.data$drugBankID <- apply(pdb.drug.bind.data, 1, function(x) {
if (is.na(x['drugBankID']) & !(is.na(x['ligandShort']))) {
sprintf("<a href='https://go.drugbank.com/unearth/q?utf8=%%E2%%9C%%93&searcher=drugs&query=%s' target='_blank'>DB Search<a/>",
x['ligandShort'])
} else if (is.na(x['drugBankID']) & (is.na(x['ligandShort']))) {
sprintf("<a href='https://go.drugbank.com/unearth/q?utf8=%%E2%%9C%%93&searcher=drugs&query=' target='_blank'>DB Search<a/>", "")
} else {
sprintf("<a href='https://go.drugbank.com/drugs/%s' target='_blank'>%s<a/>", x['drugBankID'], x['drugBankID'])
}
})
## link to RCSB ligands
pdb.drug.bind.data$ligandShort <- ifelse(is.na(pdb.drug.bind.data$ligandShort), NA,
sprintf("<a href='https://www.rcsb.org/ligand/%s' target='_blank'>%s<a/>",
pdb.drug.bind.data$ligandShort,
pdb.drug.bind.data$ligandShort))
#pdb.drug.bind.data$ligandShort <- factor(pdb.drug.bind.data$ligandShort)
pdb.drug.bind.data$pdbID <- sprintf("<a href='https://www.rcsb.org/structure/%s' target='_blank'>%s</a>",
pdb.drug.bind.data$pdbID, pdb.drug.bind.data$pdbID)
output$binding_drug <- DT::renderDataTable(
datatable(pdb.drug.bind.data, style = "bootstrap", class = "compact",
filter = "top",
options = list(
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'color': '#fff'});",
"}"),
#https://rstudio.github.io/DT/options.html
autoWidth = T,
width = "100%",
scrollX=T,
targets = 10,
render = JS(
"function(data, type, row, meta) {",
"return type === 'display' && data.length > 10 ?",
"'<span title=\"' + data + '\">' + data.substr(0, 10) + '...</span>' : data;",
"}")
,
scrollY = '500px',
pageLength = 50),
colnames = c("UniProt Protein Chain", "PDB ID", "PDB Site ID", "Structure Residue #",
"UniProt Residue #", "Residue", "Residue Chain", "Ligand Residue #",
"Ligand Short", "Ligand Long", "Ligand Chain", "DrugBank ID"),
escape = F
)
)
################################################################################################
## 3D images from PITT javascript script, see works cited
observe ({
req(input$PDB_structures_cells_selected)
if (length(input$PDB_structures_cells_selected)>0) {
pdb <- ex_between(pdb.data[input$PDB_structures_cells_selected],">","</a")[[1]]
} else {
pdb=""
}
output$structure_3d <- renderUI({
tabPanel("3D Structure",
tags$head(tags$script(src="http://3Dmol.csb.pitt.edu/build/3Dmol-min.js")),
tags$div(
style="height: 400px; width: 700px; position: relative;",
class='viewer_3Dmoljs',
'data-pdb'=pdb,
'data-backgroundcolor'='0xffffff',
'data-style'='cartoon'))
})
})
})
|
/applicationFinal/Graph.R
|
no_license
|
BJWiley233/SubDBplus
|
R
| false | false | 30,672 |
r
|
library(RMySQL)
library(DBI)
library(shiny)
library(shinythemes)
library(shinyWidgets)
library(dqshiny)
library(DT)
library(shinyjs)
library(plotly)
library(dplyr)
library(jsonlite)
library(qdapRegex)
library(emojifont)
library(shinyBS)
library(stringr)
output$pageStub <- renderUI(fluidPage(theme = "slate.min.css",
tags$style(HTML("
.dataTables_wrapper .dataTables_length, .dataTables_wrapper .dataTables_filter {
color: #a9a8ae;
}
#SwissModel a {
color: #5b33ff;
}
.has-feedback .form-control {
padding-right: 0px;
}
")
),
navbarPage("Graph and Data",
tabPanel(title = "Graph",
fluidRow(column(2), plotOutput("legend", width="1200px", height="100px")),
fluidRow(plotlyOutput("plot", width="1200px", height="800px"))
),
tabPanel(title="Graph Data Table",
fluidRow(column(12,
div(DT::dataTableOutput("data_table"),
style = "font-size:95%; width:1200px")
)
)
),
tabPanel(title="Substrates",
fluidRow(column(11,
div(DT::dataTableOutput("substrates_table"),
style = "font-size:95%; width:1200px")
),
column(1, downloadButton("downloadSubstrateData", "Download"))
)
),
tabPanel(title="PDB Structures",
mainPanel(
fluidRow(column(7, DT::dataTableOutput("PDB_structures")),
## For 3D model by U of Pitt
shinydashboard::box(title="3D Structure", width = 5, status="primary", solidHeader =TRUE,
uiOutput("structure_3d")),
column(2, textOutput("text2"))),
fluidRow(column(8, htmlOutput("SwissModel"))),
)
),
tabPanel(title="PDB Binding Sites and Drugs",
mainPanel(
fluidRow(column(8, DT::dataTableOutput("binding_drug"),
style = "width:1200px"))
)
)
)
)
)
observe({
## set name for graph title
req(input$uniProtID)
name <- dat[dat$uniProtID==input$uniProtID, "geneNamePreferred"]
#' \link[app.R]{getGraph}
G <- getGraph(input$uniProtID, input$direction,
as.numeric(input$length), limit=as.numeric(input$limit))
## main graph app
if ("neo" %in% class(G)) {
## Needs at least 1 protein to have a proteinName attribute needs fixing.
# G$nodes$proteinName <- apply(G$nodes, 1, function(x){
# ifelse(is.na(x[['proteinName']]),
# strsplit(x[['altProtNames']], "|", fixed=T)[[1]],
# x[['proteinName']])
# })
if ("proteinName" %in% colnames(G$nodes)) {
G$nodes$proteinName <- apply(G$nodes, 1, function(x) {
ifelse(is.na(x[['proteinName']]),
strsplit(x[['altProtNames']], "|", fixed=T)[[1]][1],
x[['proteinName']])
})
## might need to check if alt names is in there too but should really
## always be there with the Merge in Neo4j by Python class.
} else {
G$nodes$proteinName <- apply(G$nodes, 1, function(x) {
strsplit(x[['altProtNames']], "|", fixed=T)[[1]][1]
})
}
final <- G$relationships %>%
group_by(id, startNode, endNode) %>%
summarise(
startNode=startNode,
endNode=endNode,
type=type,
id=id,
entries=list(unique(entries)),
name=name) %>%
relocate(id, .after = type) %>%
distinct()
graph_object <- igraph::graph_from_data_frame(
d = final,
directed = TRUE,
vertices = G$nodes
)
index.protein.searched <- which(G$nodes$uniprotID == input$uniProtID)
L.g <- layout.circle(graph_object)
vs.g<- V(graph_object)
es.g <- get.edgelist(graph_object)
Nv.g <- length(vs.g) #number of nodes
Ne.g <- length(es.g[,1]) #number of edges
L.g <- layout.fruchterman.reingold(graph_object)
Xn.g <- L.g[,1]
Yn.g <- L.g[,2]
v.colors <- c("dodgerblue", "#0afb02", "#fcf51c")
# for different interactions types
e.colors <- c("orchid", "orange", "#4444fb", "#5cf61d", "#6b6bae",
"#0cf3fa", "#f20e42", "#cdafb6", "#8bd0f8", "#b40fb9", "#fdfbfd")
v.attrs <- vertex_attr(graph_object)
edge_attr(graph_object, "color", index = E(graph_object)) <-
e.colors[as.factor(edge_attr(graph_object)$name)]
e.attrs <- edge_attr(graph_object)
output$plot <- renderPlotly({
## set color of your protein to red, all others color of molecule type
## this factoring becomes issue with list vs. vectors with more than 1 factor
#colors <- v.colors[as.factor(v.attrs$label)]
## change to list instead and Protein factor comes before Molecule using forcats::fct_rev
colors <- v.colors[forcats::fct_rev(as.factor(data.frame(v.attrs$label)))]
colors[index.protein.searched] <- "red"
sizes <- rep(20, Nv.g)
sizes[index.protein.searched] <- 30
# Creates the nodes (plots the points)
network.g <- plot_ly(x = ~Xn.g, y = ~Yn.g, #Node points
mode = "text+markers",
text = vs.g$name,
hoverinfo = "text",
hovertext = paste0("Gene Name: ", v.attrs$name, "\n",
"Protein Name: ", v.attrs$proteinName, "\n",
"UniProt ID: ", v.attrs$uniprotID, "\n",
"Organism: ", v.attrs$organism, "\n",
"TaxID: ", v.attrs$taxid),
marker = list(
color = colors,
size = sizes),
textfont = list(color = '#efeff5', size = 16, layer="above"),
)
#Create edges
edge_shapes.g <- list()
names(Xn.g) <- names(vs.g)
names(Yn.g) <- names(vs.g)
for(i in 1:Ne.g) {
v0.g <- as.character(es.g[i,1])
v1.g <- as.character(es.g[i,2])
dir <- c(Xn.g[v1.g], Yn.g[v1.g]) - c(Xn.g[v0.g], Yn.g[v0.g])
## if self make small arrow
if (all(dir == 0)) {
new.p1 <- c(Xn.g[v1.g], Yn.g[v1.g])*(.9999)
new.p2 <- c(Xn.g[v1.g], Yn.g[v1.g])*(1.0001)
} else {
new.p1 <- c(Xn.g[v0.g], Yn.g[v0.g]) + .2*normalize(dir)
new.p2 <- c(Xn.g[v1.g], Yn.g[v1.g]) + -.1*normalize(dir)
}
edge_shape.g = list(
type = "line",
line = list(color = e.attrs$color[i], width = 2, layer="below"),
opacity = 0.7,
x0 = new.p1[1],
y0 = new.p1[2],
x1 = new.p2[1],
y1 = new.p2[2]
)
edge_shapes.g[[i]] <- edge_shape.g
}
axis.g <- list(title = "", showgrid = FALSE,
showticklabels = FALSE, zeroline = FALSE)
title <- ifelse(input$length==1 & input$direction=="down",
sprintf("<b>%s Substrates", name),
sprintf("<b>%s Paths", name))
p.g <- plotly::layout(
network.g,
title = list(text=title,
font=list(size=30, style="italic", color="#c7c7df")
),
shapes = edge_shapes.g,
xaxis = axis.g,
yaxis = axis.g,
showlegend=FALSE,
margin = list(l=50, r=50, b=100, t=100, pad=4),
plot_bgcolor = "#19191f",
paper_bgcolor = "#19191f"
)
arrow.x.start <- lapply(edge_shapes.g, function(x) x$x0)
arrow.x.end <- lapply(edge_shapes.g, function(x) x$x1)
arrow.y.start <- lapply(edge_shapes.g, function(x) x$y0)
arrow.y.end <- lapply(edge_shapes.g, function(x) x$y1)
## edge properties
ent <- lapply(e.attrs$entries, function(x) {
string = ""
t <- list()
for (i in 1:length(x)) {
string <- paste0(string, i, ". ")
t[[i]] = jsonlite::fromJSON(x[i][[1]])
for (j in paste(names(t[[i]]), ":", t[[i]], "\n")) {
string = paste0(string, j)
}
}
return(string)
})
p.g %>% add_trace(type = 'scatter') %>%
add_annotations( x = ~arrow.x.end,
y = ~arrow.y.end,
xref = "x", yref = "y",
axref = "x", ayref = "y",
text = "",
hoverinfo = c(~arrow.x.end, ~arrow.y.end),
hovertext = paste(ent),
opacity = 0.7,
ax = ~arrow.x.end,
ay = ~arrow.y.end,
layer="below") %>%
add_annotations( x = ~arrow.x.end,
y = ~arrow.y.end,
xref = "x", yref = "y",
axref = "x", ayref = "y",
text = "",
showarrow = T,
arrowcolor = ~e.attrs$color,
opacity = 0.7,
ax = ~arrow.x.start,
ay = ~arrow.y.start,
layer="below")
})
output$legend <- renderPlot({
par(mar=c(1,1,1.8,1))
plot(NULL, xaxt='n',yaxt='n',bty='n',ylab='',xlab='', xlim=0:1, ylim=0:1)
legend("topleft", legend = levels(as.factor(e.attrs$name)), lty = 1, lwd = 3,
col = c(unique(e.colors)), box.lty = 0, ncol = 5, cex=1.2, text.col = "#c7c7df")
mtext("Reaction type", at=0.2, cex=2, col = "#c7c7df")
}, bg = "#19191f")
## get table from graph
relation <- data.frame(G$relationships)
relation <- relation %>% rename(reaction=name)
df1 <- left_join(relation, G$nodes, by = c("startNode"="id"))
df2 <- left_join(df1, G$nodes, by = c("endNode"="id"))
df3 <- df2[c("name.x", "uniprotID.x", "proteinName.x", "taxid.x", "organism.x",
"name.y", "uniprotID.y", "proteinName.y", "taxid.y", "organism.y",
"reaction", "entries")]
## list the entries
df3$entries <- apply(df3, 1, function(x) {
l = jsonlite::fromJSON(x['entries'][[1]])
string = ""
if (!is.null(l$interactionID)) {
if (grepl("EBI-[0-9]+", l$interactionID)) {
l$interactionID = sprintf("<a href='https://www.ebi.ac.uk/intact/interaction/%s' target='_blank'>%s</a>",
l$interactionID, l$interactionID)
} else if (grepl("CLE[0-9]+", l$interactionID)) {
if (!is.null(l$`publicationID(s)`)) {
link.id <- ex_between(l$`publicationID(s)`, "[", "]")[[1]]
publication <- ex_between(l$`publicationID(s)`, "<%", "[")[[1]]
publication <- gsub("%", "", publication)
l$`publicationID(s)` <- sprintf("<a href='https://www.ebi.ac.uk/merops/cgi-bin/refs?id=%s' target='_blank'>%s</a>",
link.id, publication)
l$interactionID <- sprintf("<a href='https://www.ebi.ac.uk/merops/cgi-bin/show_substrate?SpAcc=%s' target='_blank'>%s</a>",
x['uniprotID.y'], l$interactionID)
} else {
l$interactionID <- sprintf("<a href='https://www.ebi.ac.uk/merops/cgi-bin/show_substrate?SpAcc=%s' target='_blank'>%s</a>",
x['uniprotID.y'], l$interactionID)
}
}
}
for (j in paste(names(l), ":", l, "<br>"))
string = paste(string, j)
return(trimws(string, which="both"))
})
## According to PSP download agreement must make link to their site
## if displaying modification site information derived by PSP
##
links <- apply(df3, 1, function(row) {
if (grepl('PhosphoSitePlus', row[["entries"]])) {
row[["uniprotID.x"]] <- sprintf("<a href='https://www.phosphosite.org/uniprotAccAction?id=%s' target='_blank'>%s</a>",
row[["uniprotID.x"]], row[["uniprotID.x"]])
row[["uniprotID.y"]] <- sprintf("<a href='https://www.phosphosite.org/uniprotAccAction?id=%s' target='_blank'>%s</a>",
row[["uniprotID.y"]], row[["uniprotID.y"]])
} ## CHEBI
else if (grepl('CHEBI', row[["uniprotID.y"]])) {
row[["uniprotID.x"]] <- sprintf("<a href='https://www.uniprot.org/uniprot/%s' target='_blank'>%s</a>",
row[["uniprotID.x"]], row[["uniprotID.x"]])
row[["uniprotID.y"]] <- sprintf("<a href='https://www.ebi.ac.uk/chebi/searchId.do;?chebiId=%s' target='_blank'>%s</a>",
row[["uniprotID.y"]], row[["uniprotID.y"]])
} else {
row[["uniprotID.x"]] <- sprintf("<a href='https://www.uniprot.org/uniprot/%s' target='_blank'>%s</a>",
row[["uniprotID.x"]], row[["uniprotID.x"]])
row[["uniprotID.y"]] <- sprintf("<a href='https://www.uniprot.org/uniprot/%s' target='_blank'>%s</a>",
row[["uniprotID.y"]], row[["uniprotID.y"]])
}
c(row[["uniprotID.x"]], row[["uniprotID.y"]])
})
df3[,c("uniprotID.x", "uniprotID.y")] <- t(links)
colnames(df3) <- c("Prot Gene Name", "Prot UniProt ID", "Prot Protein Name", "Prot taxid", "Prot organism",
"Sub Gene Name", "Sub UniProt ID", "Sub Protein Name", "Sub taxid", "Sub organism",
"Reaction type", "Reaction info")
output$data_table <- DT::renderDataTable(
datatable(df3, style = "bootstrap", class = "compact",
filter = "top",
options = list(
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'color': '#fff'});",
"}"),
# https://github.com/rstudio/DT/issues/171
autoWidth = T,
width = "100%",
scrollX=T,
bSortClasses = TRUE,
targets = 12,
render = JS(
"function(data, type, row, meta) {",
"return type === 'display' && data.length > 60 ?",
"'<span title=\"' + data + '\">' + data.substr(0, 60) + '...</span>' : data;",
"}"),
LengthMenu = c(5, 30, 50),
columnDefs = list(
list(className = 'dt-body-left', targets=1:12),
list(width='325px', targets=12)),
scrollY = '500px',
pageLength = 50
),
escape = F
)
)
## no neo4j graph
} else {
output$plot <- renderPlotly({ empty_plot("No interaction data in neo4j for your protein!
There may be structure data on other tabs.
Check 'PDB Structures' or 'PDB Binding Sites and Drugs' tabs
or select a different UniProt ID.")
})
}
################################################################################################
## NEW!!! strictly a substrates table even if user selects some sort of pathway
## Ne4j query returns "row" type instead of "graph" type
#' \link[app.R]{getSubstrates}
Rows <- getSubstrates(input$uniProtID)
if ("neo" %in% class(Rows) & length(Rows) > 0) {
rowDF <- dplyr::bind_cols(Rows)
colnames(rowDF) <- names(Rows)
rowDF$Prot1protNameAlt <- ifelse(rowDF$Prot1protNameAlt=="null", NA, rowDF$Prot1protNameAlt)
rowDF$Prot2protNameAlt <- ifelse(rowDF$Prot2protNameAlt=="null", NA, rowDF$Prot2protNameAlt)
## if Protein name is blank get first alternative
rowDF$Prot1protName <- apply(rowDF, 1, function(x) {
if(is.na(x['Prot1protName'])) {
if (!is.na(x['Prot1protNameAlt'])) {
return(jsonlite::fromJSON(x['Prot1protNameAlt'][[1]])[1])
} else {
return(NA)
}
} else {
return(x['Prot1protName'])
}
}
)
## if Sub name is blank get first alternative
rowDF$Prot2protName <- apply(rowDF, 1, function(x) {
if(is.na(x['Prot2protName'])) {
if (!is.na(x['Prot2protNameAlt'])) {
return(jsonlite::fromJSON(x['Prot2protNameAlt'][[1]])[1])
} else {
return(NA)
}
} else {
return(x['Prot2protName'])
}
})
## for download without hyperlinks
downloadSubs <- rowDF[,c(1:3, 5:9, 11:14)]
colnames(downloadSubs) <- c("Prot Gene Name", "Prot UniProt ID", "Prot Protein Name", "Prot taxid", "Prot organism",
"Sub Gene Name", "Sub UniProt ID", "Sub Protein Name", "Sub taxid", "Sub organism",
"Reaction type", "Reaction info")
## Add links to Relationships
rowDF$`Relationship details` <- apply(rowDF, 1, function(x) {
string = ""
l <- jsonlite::fromJSON(x["Relationship details"][[1]])
for (i in 1:length(l)) {
string <- paste0(string, '<b>', i, ". ", "</b>")
l.i <- jsonlite::fromJSON(l[[i]])
if (!is.null(l.i$interactionID)) {
## intact
if (grepl("EBI-[0-9]+", l.i$interactionID)) {
l.i$interactionID = sprintf("<a href='https://www.ebi.ac.uk/intact/interaction/%s' target='_blank'>%s</a>",
l.i$interactionID, l.i$interactionID)
## merops
} else if (grepl("CLE[0-9]+", l.i$interactionID)) {
if (!is.null(l.i$`publicationID(s)`)) {
link.id <- ex_between(l.i$`publicationID(s)`, "[", "]")[[1]]
publication <- ex_between(l.i$`publicationID(s)`, "<%", "[")[[1]]
publication <- gsub("%", "", publication)
l.i$`publicationID(s)` <- sprintf("<a href='https://www.ebi.ac.uk/merops/cgi-bin/refs?id=%s' target='_blank'>%s</a>",
link.id, publication)
l.i$interactionID <- sprintf("<a href='https://www.ebi.ac.uk/merops/cgi-bin/show_substrate?SpAcc=%s' target='_blank'>%s</a>",
x['Prot2UPID'], l.i$interactionID)
} else {
l.i$interactionID <- sprintf("<a href='https://www.ebi.ac.uk/merops/cgi-bin/show_substrate?SpAcc=%s' target='_blank'>%s</a>",
x['Prot2UPID'], l.i$interactionID)
}
}
}
for (j in paste(names(l.i), ":", l.i, "<br>")) {
string = paste0(string, j)
}
}
return(string)
})
# Links to PSP, UniProt, and CHEBI
linksRow <- apply(rowDF, 1, function(row) {
if (grepl('PhosphoSitePlus', row[["Relationship details"]])) {
row[["Prot1UPID"]] <- sprintf("<a href='https://www.phosphosite.org/uniprotAccAction?id=%s' target='_blank'>%s</a>",
row[["Prot1UPID"]], row[["Prot1UPID"]])
row[["Prot2UPID"]] <- sprintf("<a href='https://www.phosphosite.org/uniprotAccAction?id=%s' target='_blank'>%s</a>",
row[["Prot2UPID"]], row[["Prot2UPID"]])
} ## CHEBI
else if (grepl('CHEBI', row[["Prot2UPID"]])) {
row[["Prot1UPID"]] <- sprintf("<a href='https://www.uniprot.org/uniprot/%s' target='_blank'>%s</a>",
row[["Prot1UPID"]], row[["Prot1UPID"]])
row[["Prot2UPID"]] <- sprintf("<a href='https://www.ebi.ac.uk/chebi/searchId.do;?chebiId=%s' target='_blank'>%s</a>",
row[["Prot2UPID"]], row[["Prot2UPID"]])
} else {
row[["Prot1UPID"]] <- sprintf("<a href='https://www.uniprot.org/uniprot/%s' target='_blank'>%s</a>",
row[["Prot1UPID"]], row[["Prot1UPID"]])
row[["Prot2UPID"]] <- sprintf("<a href='https://www.uniprot.org/uniprot/%s' target='_blank'>%s</a>",
row[["Prot2UPID"]], row[["Prot2UPID"]])
}
c(row[["Prot1UPID"]], row[["Prot2UPID"]])
})
rowDF[,c("Prot1UPID", "Prot2UPID")] <- t(linksRow)
#rowDF2 <- rowDF[,c(1:8, 10:13)]
rowDF2 <- rowDF[,c(1:3, 5:9, 11:14)]
colnames(rowDF2) <- c("Prot Gene Name", "Prot UniProt ID", "Prot Protein Name", "Prot taxid", "Prot organism",
"Sub Gene Name", "Sub UniProt ID", "Sub Protein Name", "Sub taxid", "Sub organism",
"Reaction type", "Reaction info")
output$substrates_table <- DT::renderDataTable(
datatable(rowDF2, style = "bootstrap", class = "compact",
filter = "top",
options = list(
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'color': '#fff'});",
"}"),
# https://github.com/rstudio/DT/issues/171
autoWidth = T,
width = "100%",
scrollX=T,
bSortClasses = TRUE,
targets = 12,
render = JS(
"function(data, type, row, meta) {",
"return type === 'display' && data.length > 60 ?",
"'<span title=\"' + data + '\">' + data.substr(0, 60) + '...</span>' : data;",
"}"),
LengthMenu = c(5, 30, 50),
columnDefs = list(
# targets = 1:12,
list(className = 'dt-body-left', targets=1:12),
list(width='325px', targets=12)),
scrollY = '500px',
pageLength = 50
),
escape = F
)
)
output$downloadSubstrateData <- downloadHandler(
filename = function() {
paste0(file.prefix(), "_", gsub(" ", "_", date()), "_", input$uniProtID, "_SUBSTRATES.csv")
},
content = function(file) {
write.csv(downloadSubs, file, row.names = FALSE)
}
)
}
################################################################################################
## pdb structures tab
pdb.data <- loadData(structures.query(input$uniProtID))
pdb.data$pdbID <- sprintf("<a href='https://www.rcsb.org/structure/%s' target='_blank'>%s</a>",
pdb.data$pdbID, pdb.data$pdbID)
scrolly = "500px"
if (nrow(pdb.data) == 0) {
url <- a(input$uniProtID, href=sprintf("https://swissmodel.expasy.org/repository/uniprot/%s",
input$uniProtID), target='_blank')
scrolly = "0px"
output$SwissModel <- renderUI({
HTML(paste0("There are no structures for your UniProt protein.","<br>",
"Click link for Swiss-Model model of ", url))
})
}
output$PDB_structures <- DT::renderDataTable(
datatable(pdb.data, style = "bootstrap", class = "compact",
filter = "top",
selection=list(mode = "single", target = "cell"),
options = list(
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'color': '#fff'});",
"}"),
scrollY = scrolly,
pageLength = 25),
escape = F
)
)
################################################################################################
## binding sites and drugs
#O00311
pdb.drug.bind.data <- loadData(drugBankBinding.query(input$uniProtID))
## link to DrugBank
pdb.drug.bind.data$drugBankID <- apply(pdb.drug.bind.data, 1, function(x) {
if (is.na(x['drugBankID']) & !(is.na(x['ligandShort']))) {
sprintf("<a href='https://go.drugbank.com/unearth/q?utf8=%%E2%%9C%%93&searcher=drugs&query=%s' target='_blank'>DB Search<a/>",
x['ligandShort'])
} else if (is.na(x['drugBankID']) & (is.na(x['ligandShort']))) {
sprintf("<a href='https://go.drugbank.com/unearth/q?utf8=%%E2%%9C%%93&searcher=drugs&query=' target='_blank'>DB Search<a/>", "")
} else {
sprintf("<a href='https://go.drugbank.com/drugs/%s' target='_blank'>%s<a/>", x['drugBankID'], x['drugBankID'])
}
})
## link to RCSB ligands
pdb.drug.bind.data$ligandShort <- ifelse(is.na(pdb.drug.bind.data$ligandShort), NA,
sprintf("<a href='https://www.rcsb.org/ligand/%s' target='_blank'>%s<a/>",
pdb.drug.bind.data$ligandShort,
pdb.drug.bind.data$ligandShort))
#pdb.drug.bind.data$ligandShort <- factor(pdb.drug.bind.data$ligandShort)
pdb.drug.bind.data$pdbID <- sprintf("<a href='https://www.rcsb.org/structure/%s' target='_blank'>%s</a>",
pdb.drug.bind.data$pdbID, pdb.drug.bind.data$pdbID)
output$binding_drug <- DT::renderDataTable(
datatable(pdb.drug.bind.data, style = "bootstrap", class = "compact",
filter = "top",
options = list(
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'color': '#fff'});",
"}"),
#https://rstudio.github.io/DT/options.html
autoWidth = T,
width = "100%",
scrollX=T,
targets = 10,
render = JS(
"function(data, type, row, meta) {",
"return type === 'display' && data.length > 10 ?",
"'<span title=\"' + data + '\">' + data.substr(0, 10) + '...</span>' : data;",
"}")
,
scrollY = '500px',
pageLength = 50),
colnames = c("UniProt Protein Chain", "PDB ID", "PDB Site ID", "Structure Residue #",
"UniProt Residue #", "Residue", "Residue Chain", "Ligand Residue #",
"Ligand Short", "Ligand Long", "Ligand Chain", "DrugBank ID"),
escape = F
)
)
################################################################################################
## 3D images from PITT javascript script, see works cited
observe ({
req(input$PDB_structures_cells_selected)
if (length(input$PDB_structures_cells_selected)>0) {
pdb <- ex_between(pdb.data[input$PDB_structures_cells_selected],">","</a")[[1]]
} else {
pdb=""
}
output$structure_3d <- renderUI({
tabPanel("3D Structure",
tags$head(tags$script(src="http://3Dmol.csb.pitt.edu/build/3Dmol-min.js")),
tags$div(
style="height: 400px; width: 700px; position: relative;",
class='viewer_3Dmoljs',
'data-pdb'=pdb,
'data-backgroundcolor'='0xffffff',
'data-style'='cartoon'))
})
})
})
|
# Unexported, low-level function for fitting negative binomial GLMs
#
# Users typically call \code{\link{nbinomWaldTest}} or \code{\link{nbinomLRT}}
# which calls this function to perform fitting. These functions return
# a \code{\link{DESeqDataSet}} object with the appropriate columns
# added. This function returns results as a list.
#
# object a DESeqDataSet
# modelMatrix the design matrix
# modelFormula a formula specifying how to construct the design matrix
# alpha_hat the dispersion parameter estimates
# lambda the 'ridge' term added for the penalized GLM on the log2 scale
# renameCols whether to give columns variable_B_vs_A style names
# betaTol control parameter: stop when the following is satisfied:
# abs(dev - dev_old)/(abs(dev) + 0.1) < betaTol
# maxit control parameter: maximum number of iteration to allow for
# convergence
# useOptim whether to use optim on rows which have not converged:
# Fisher scoring is not ideal with multiple groups and sparse
# count distributions
# useQR whether to use the QR decomposition on the design matrix X
# forceOptim whether to use optim on all rows
# warnNonposVar whether to warn about non positive variances,
# for advanced users only running LRT without beta prior,
# this might be desirable to be ignored.
#
# return a list of results, with coefficients and standard
# errors on the log2 scale
fitNbinomGLMs <- function(object, modelMatrix=NULL, modelFormula, alpha_hat, lambda,
renameCols=TRUE, betaTol=1e-8, maxit=100, useOptim=TRUE,
useQR=TRUE, forceOptim=FALSE, warnNonposVar=TRUE, minmu=0.5,
type = c("DESeq2", "glmGamPoi")) {
type <- match.arg(type, c("DESeq2", "glmGamPoi"))
if (missing(modelFormula)) {
modelFormula <- design(object)
}
if (is.null(modelMatrix)) {
modelAsFormula <- TRUE
modelMatrix <- stats::model.matrix.default(modelFormula, data=as.data.frame(colData(object)))
} else {
modelAsFormula <- FALSE
}
stopifnot(all(colSums(abs(modelMatrix)) > 0))
# rename columns, for use as columns in DataFrame
# and to emphasize the reference level comparison
modelMatrixNames <- colnames(modelMatrix)
modelMatrixNames[modelMatrixNames == "(Intercept)"] <- "Intercept"
modelMatrixNames <- make.names(modelMatrixNames)
if (renameCols) {
convertNames <- renameModelMatrixColumns(colData(object),
modelFormula)
convertNames <- convertNames[convertNames$from %in% modelMatrixNames,,drop=FALSE]
modelMatrixNames[match(convertNames$from, modelMatrixNames)] <- convertNames$to
}
colnames(modelMatrix) <- modelMatrixNames
normalizationFactors <- getSizeOrNormFactors(object)
if (missing(alpha_hat)) {
alpha_hat <- dispersions(object)
}
if (length(alpha_hat) != nrow(object)) {
stop("alpha_hat needs to be the same length as nrows(object)")
}
# set a wide prior for all coefficients
if (missing(lambda)) {
lambda <- rep(1e-6, ncol(modelMatrix))
}
# use weights if they are present in assays(object)
wlist <- getAndCheckWeights(object, modelMatrix)
weights <- wlist$weights
useWeights <- wlist$useWeights
if(type == "glmGamPoi"){
stopifnot("type = 'glmGamPoi' cannot handle weights" = ! useWeights,
"type = 'glmGamPoi' does not support NA's in alpha_hat" = all(! is.na(alpha_hat)))
gp_res <- glmGamPoi::glm_gp(counts(object), design = modelMatrix,
size_factors = FALSE, offset = log(normalizationFactors),
overdispersion = alpha_hat, verbose = FALSE)
logLikeMat <- dnbinom(counts(object), mu=gp_res$Mu, size=1/alpha_hat, log=TRUE)
logLike <- rowSums(logLikeMat)
res <- list(logLike = logLike, betaConv = rep(TRUE, nrow(object)), betaMatrix = gp_res$Beta / log(2),
betaSE = NULL, mu = gp_res$Mu, betaIter = rep(NA,nrow(object)),
modelMatrix=modelMatrix,
nterms=ncol(modelMatrix), hat_diagonals = NULL)
return(res)
}
# bypass the beta fitting if the model formula is only intercept and
# the prior variance is large (1e6)
# i.e., LRT with reduced ~ 1 and no beta prior
justIntercept <- if (modelAsFormula) {
modelFormula == formula(~ 1)
} else {
ncol(modelMatrix) == 1 & all(modelMatrix == 1)
}
if (justIntercept & all(lambda <= 1e-6)) {
alpha <- alpha_hat
betaConv <- rep(TRUE, nrow(object))
betaIter <- rep(1,nrow(object))
betaMatrix <- if (useWeights) {
matrix(log2(rowSums(weights*counts(object, normalized=TRUE))
/rowSums(weights)),ncol=1)
} else {
matrix(log2(rowMeans(counts(object, normalized=TRUE))),ncol=1)
}
mu <- normalizationFactors * as.numeric(2^betaMatrix)
logLikeMat <- dnbinom(counts(object), mu=mu, size=1/alpha, log=TRUE)
logLike <- if (useWeights) {
rowSums(weights*logLikeMat)
} else {
rowSums(logLikeMat)
}
modelMatrix <- stats::model.matrix.default(~ 1, data=as.data.frame(colData(object)))
colnames(modelMatrix) <- modelMatrixNames <- "Intercept"
w <- if (useWeights) {
weights * (mu^-1 + alpha)^-1
} else {
(mu^-1 + alpha)^-1
}
xtwx <- rowSums(w)
sigma <- xtwx^-1
betaSE <- matrix(log2(exp(1)) * sqrt(sigma),ncol=1)
hat_diagonals <- w * xtwx^-1;
res <- list(logLike = logLike, betaConv = betaConv, betaMatrix = betaMatrix,
betaSE = betaSE, mu = mu, betaIter = betaIter,
modelMatrix=modelMatrix,
nterms=1, hat_diagonals=hat_diagonals)
return(res)
}
qrx <- qr(modelMatrix)
# if full rank, estimate initial betas for IRLS below
if (qrx$rank == ncol(modelMatrix)) {
Q <- qr.Q(qrx)
R <- qr.R(qrx)
y <- t(log(counts(object,normalized=TRUE) + .1))
beta_mat <- t(solve(R, t(Q) %*% y))
} else {
if ("Intercept" %in% modelMatrixNames) {
beta_mat <- matrix(0, ncol=ncol(modelMatrix), nrow=nrow(object))
# use the natural log as fitBeta occurs in the natural log scale
logBaseMean <- log(rowMeans(counts(object,normalized=TRUE)))
beta_mat[,which(modelMatrixNames == "Intercept")] <- logBaseMean
} else {
beta_mat <- matrix(1, ncol=ncol(modelMatrix), nrow=nrow(object))
}
}
# here we convert from the log2 scale of the betas
# and the beta prior variance to the log scale
# used in fitBeta.
# so we divide by the square of the
# conversion factor, log(2)
lambdaNatLogScale <- lambda / log(2)^2
betaRes <- fitBetaWrapper(ySEXP = counts(object), xSEXP = modelMatrix,
nfSEXP = normalizationFactors,
alpha_hatSEXP = alpha_hat,
beta_matSEXP = beta_mat,
lambdaSEXP = lambdaNatLogScale,
weightsSEXP = weights,
useWeightsSEXP = useWeights,
tolSEXP = betaTol, maxitSEXP = maxit,
useQRSEXP=useQR, minmuSEXP=minmu)
# Note on deviance: the 'deviance' calculated in fitBeta() (C++)
# is not returned in mcols(object)$deviance. instead, we calculate
# the log likelihood below and use -2 * logLike.
# (reason is that we have other ways of estimating beta:
# above intercept code, and below optim code)
mu <- normalizationFactors * t(exp(modelMatrix %*% t(betaRes$beta_mat)))
dispersionVector <- rep(dispersions(object), times=ncol(object))
logLike <- nbinomLogLike(counts(object), mu, dispersions(object), weights, useWeights)
# test for stability
rowStable <- apply(betaRes$beta_mat,1,function(row) sum(is.na(row))) == 0
# test for positive variances
rowVarPositive <- apply(betaRes$beta_var_mat,1,function(row) sum(row <= 0)) == 0
# test for convergence, stability and positive variances
betaConv <- betaRes$iter < maxit
# here we transform the betaMatrix and betaSE to a log2 scale
betaMatrix <- log2(exp(1))*betaRes$beta_mat
colnames(betaMatrix) <- modelMatrixNames
colnames(modelMatrix) <- modelMatrixNames
# warn below regarding these rows with negative variance
betaSE <- log2(exp(1))*sqrt(pmax(betaRes$beta_var_mat,0))
colnames(betaSE) <- paste0("SE_",modelMatrixNames)
# switch based on whether we should also use optim
# on rows which did not converge
rowsForOptim <- if (useOptim) {
which(!betaConv | !rowStable | !rowVarPositive)
} else {
which(!rowStable | !rowVarPositive)
}
if (forceOptim) {
rowsForOptim <- seq_along(betaConv)
}
if (length(rowsForOptim) > 0) {
# we use optim if didn't reach convergence with the IRLS code
resOptim <- fitNbinomGLMsOptim(object,modelMatrix,lambda,
rowsForOptim,rowStable,
normalizationFactors,alpha_hat,
weights,useWeights,
betaMatrix,betaSE,betaConv,
beta_mat,
mu,logLike,minmu=minmu)
betaMatrix <- resOptim$betaMatrix
betaSE <- resOptim$betaSE
betaConv <- resOptim$betaConv
mu <- resOptim$mu
logLike <- resOptim$logLike
}
stopifnot(!any(is.na(betaSE)))
nNonposVar <- sum(rowSums(betaSE == 0) > 0)
if (warnNonposVar & nNonposVar > 0) warning(nNonposVar,"rows had non-positive estimates of variance for coefficients")
list(logLike = logLike, betaConv = betaConv, betaMatrix = betaMatrix,
betaSE = betaSE, mu = mu, betaIter = betaRes$iter, modelMatrix=modelMatrix,
nterms=ncol(modelMatrix), hat_diagonals=betaRes$hat_diagonals)
}
# this function calls fitNbinomGLMs() twice:
# 1 - without the beta prior, in order to calculate the
# beta prior variance and hat matrix
# 2 - again but with the prior in order to get beta matrix and standard errors
fitGLMsWithPrior <- function(object, betaTol, maxit, useOptim, useQR, betaPriorVar, modelMatrix=NULL, minmu=0.5) {
objectNZ <- object[!mcols(object)$allZero,,drop=FALSE]
modelMatrixType <- attr(object, "modelMatrixType")
if (missing(betaPriorVar) | !(all(c("mu","H") %in% assayNames(objectNZ)))) {
# stop unless modelMatrix was NOT supplied, the code below all works
# by building model matrices using the formula, doesn't work with incoming model matrices
stopifnot(is.null(modelMatrix))
# fit the negative binomial GLM without a prior,
# used to construct the prior variances
# and for the hat matrix diagonals for calculating Cook's distance
fit <- fitNbinomGLMs(objectNZ,
betaTol=betaTol, maxit=maxit,
useOptim=useOptim, useQR=useQR,
renameCols = (modelMatrixType == "standard"),
minmu=minmu)
modelMatrix <- fit$modelMatrix
modelMatrixNames <- colnames(modelMatrix)
H <- fit$hat_diagonal
betaMatrix <- fit$betaMatrix
mu <- fit$mu
modelMatrixNames[modelMatrixNames == "(Intercept)"] <- "Intercept"
modelMatrixNames <- make.names(modelMatrixNames)
colnames(betaMatrix) <- modelMatrixNames
# save the MLE log fold changes for addMLE argument of results
convertNames <- renameModelMatrixColumns(colData(object),
design(objectNZ))
convertNames <- convertNames[convertNames$from %in% modelMatrixNames,,drop=FALSE]
modelMatrixNames[match(convertNames$from, modelMatrixNames)] <- convertNames$to
mleBetaMatrix <- fit$betaMatrix
colnames(mleBetaMatrix) <- paste0("MLE_",modelMatrixNames)
# store for use in estimateBetaPriorVar below
mcols(objectNZ) <- cbind(mcols(objectNZ), DataFrame(mleBetaMatrix))
} else {
# we can skip the first MLE fit because the
# beta prior variance and hat matrix diagonals were provided
if (is.null(modelMatrix)) {
modelMatrix <- getModelMatrix(object)
}
H <- assays(objectNZ)[["H"]]
mu <- assays(objectNZ)[["mu"]]
mleBetaMatrix <- as.matrix(mcols(objectNZ)[,grep("MLE_",names(mcols(objectNZ))),drop=FALSE])
}
if (missing(betaPriorVar)) {
betaPriorVar <- estimateBetaPriorVar(objectNZ, modelMatrix=modelMatrix)
} else {
# else we are provided the prior variance:
# check if the lambda is the correct length
# given the design formula
if (modelMatrixType == "expanded") {
modelMatrix <- makeExpandedModelMatrix(objectNZ)
}
p <- ncol(modelMatrix)
if (length(betaPriorVar) != p) {
stop(paste("betaPriorVar should have length",p,"to match:",paste(colnames(modelMatrix),collapse=", ")))
}
}
# refit the negative binomial GLM with a prior on betas
if (any(betaPriorVar == 0)) {
stop("beta prior variances are equal to zero for some variables")
}
lambda <- 1/betaPriorVar
if (modelMatrixType == "standard") {
fit <- fitNbinomGLMs(objectNZ, lambda=lambda,
betaTol=betaTol, maxit=maxit,
useOptim=useOptim, useQR=useQR,
minmu=minmu)
modelMatrix <- fit$modelMatrix
} else if (modelMatrixType == "expanded") {
modelMatrix <- makeExpandedModelMatrix(objectNZ)
fit <- fitNbinomGLMs(objectNZ, lambda=lambda,
betaTol=betaTol, maxit=maxit,
useOptim=useOptim, useQR=useQR,
modelMatrix=modelMatrix, renameCols=FALSE,
minmu=minmu)
} else if (modelMatrixType == "user-supplied") {
fit <- fitNbinomGLMs(objectNZ, lambda=lambda,
betaTol=betaTol, maxit=maxit,
useOptim=useOptim, useQR=useQR,
modelMatrix=modelMatrix, renameCols=FALSE,
minmu=minmu)
}
res <- list(fit=fit, H=H, betaPriorVar=betaPriorVar, mu=mu,
modelMatrix=modelMatrix, mleBetaMatrix=mleBetaMatrix)
res
}
# breaking out the optim backup code from fitNbinomGLMs
fitNbinomGLMsOptim <- function(object,modelMatrix,lambda,
rowsForOptim,rowStable,
normalizationFactors,alpha_hat,
weights,useWeights,
betaMatrix,betaSE,betaConv,
beta_mat,
mu,logLike,minmu=0.5) {
x <- modelMatrix
lambdaNatLogScale <- lambda / log(2)^2
large <- 30
for (row in rowsForOptim) {
betaRow <- if (rowStable[row] & all(abs(betaMatrix[row,]) < large)) {
betaMatrix[row,]
} else {
beta_mat[row,]
}
nf <- normalizationFactors[row,]
k <- counts(object)[row,]
alpha <- alpha_hat[row]
objectiveFn <- function(p) {
mu_row <- as.numeric(nf * 2^(x %*% p))
logLikeVector <- dnbinom(k,mu=mu_row,size=1/alpha,log=TRUE)
logLike <- if (useWeights) {
sum(weights[row,] * logLikeVector)
} else {
sum(logLikeVector)
}
logPrior <- sum(dnorm(p,0,sqrt(1/lambda),log=TRUE))
negLogPost <- -1 * (logLike + logPrior)
if (is.finite(negLogPost)) negLogPost else 10^300
}
o <- optim(betaRow, objectiveFn, method="L-BFGS-B",lower=-large, upper=large)
ridge <- if (length(lambdaNatLogScale) > 1) {
diag(lambdaNatLogScale)
} else {
as.matrix(lambdaNatLogScale,ncol=1)
}
# if we converged, change betaConv to TRUE
if (o$convergence == 0) {
betaConv[row] <- TRUE
}
# with or without convergence, store the estimate from optim
betaMatrix[row,] <- o$par
# calculate the standard errors
mu_row <- as.numeric(nf * 2^(x %*% o$par))
# store the new mu vector
mu[row,] <- mu_row
mu_row[mu_row < minmu] <- minmu
w <- if (useWeights) {
diag((mu_row^-1 + alpha)^-1)
} else {
diag(weights[row,] * (mu_row^-1 + alpha)^-1)
}
xtwx <- t(x) %*% w %*% x
xtwxRidgeInv <- solve(xtwx + ridge)
sigma <- xtwxRidgeInv %*% xtwx %*% xtwxRidgeInv
# warn below regarding these rows with negative variance
betaSE[row,] <- log2(exp(1)) * sqrt(pmax(diag(sigma),0))
logLikeVector <- dnbinom(k,mu=mu_row,size=1/alpha,log=TRUE)
logLike[row] <- if (useWeights) {
sum(weights[row,] * logLikeVector)
} else {
sum(logLikeVector)
}
}
return(list(betaMatrix=betaMatrix,betaSE=betaSE,
betaConv=betaConv,mu=mu,logLike=logLike))
}
|
/R/fitNbinomGLMs.R
|
no_license
|
wesleybarriosufv/DESeq2
|
R
| false | false | 16,846 |
r
|
# Unexported, low-level function for fitting negative binomial GLMs
#
# Users typically call \code{\link{nbinomWaldTest}} or \code{\link{nbinomLRT}}
# which calls this function to perform fitting. These functions return
# a \code{\link{DESeqDataSet}} object with the appropriate columns
# added. This function returns results as a list.
#
# object a DESeqDataSet
# modelMatrix the design matrix
# modelFormula a formula specifying how to construct the design matrix
# alpha_hat the dispersion parameter estimates
# lambda the 'ridge' term added for the penalized GLM on the log2 scale
# renameCols whether to give columns variable_B_vs_A style names
# betaTol control parameter: stop when the following is satisfied:
# abs(dev - dev_old)/(abs(dev) + 0.1) < betaTol
# maxit control parameter: maximum number of iteration to allow for
# convergence
# useOptim whether to use optim on rows which have not converged:
# Fisher scoring is not ideal with multiple groups and sparse
# count distributions
# useQR whether to use the QR decomposition on the design matrix X
# forceOptim whether to use optim on all rows
# warnNonposVar whether to warn about non positive variances,
# for advanced users only running LRT without beta prior,
# this might be desirable to be ignored.
#
# return a list of results, with coefficients and standard
# errors on the log2 scale
fitNbinomGLMs <- function(object, modelMatrix=NULL, modelFormula, alpha_hat, lambda,
renameCols=TRUE, betaTol=1e-8, maxit=100, useOptim=TRUE,
useQR=TRUE, forceOptim=FALSE, warnNonposVar=TRUE, minmu=0.5,
type = c("DESeq2", "glmGamPoi")) {
type <- match.arg(type, c("DESeq2", "glmGamPoi"))
if (missing(modelFormula)) {
modelFormula <- design(object)
}
if (is.null(modelMatrix)) {
modelAsFormula <- TRUE
modelMatrix <- stats::model.matrix.default(modelFormula, data=as.data.frame(colData(object)))
} else {
modelAsFormula <- FALSE
}
stopifnot(all(colSums(abs(modelMatrix)) > 0))
# rename columns, for use as columns in DataFrame
# and to emphasize the reference level comparison
modelMatrixNames <- colnames(modelMatrix)
modelMatrixNames[modelMatrixNames == "(Intercept)"] <- "Intercept"
modelMatrixNames <- make.names(modelMatrixNames)
if (renameCols) {
convertNames <- renameModelMatrixColumns(colData(object),
modelFormula)
convertNames <- convertNames[convertNames$from %in% modelMatrixNames,,drop=FALSE]
modelMatrixNames[match(convertNames$from, modelMatrixNames)] <- convertNames$to
}
colnames(modelMatrix) <- modelMatrixNames
normalizationFactors <- getSizeOrNormFactors(object)
if (missing(alpha_hat)) {
alpha_hat <- dispersions(object)
}
if (length(alpha_hat) != nrow(object)) {
stop("alpha_hat needs to be the same length as nrows(object)")
}
# set a wide prior for all coefficients
if (missing(lambda)) {
lambda <- rep(1e-6, ncol(modelMatrix))
}
# use weights if they are present in assays(object)
wlist <- getAndCheckWeights(object, modelMatrix)
weights <- wlist$weights
useWeights <- wlist$useWeights
if(type == "glmGamPoi"){
stopifnot("type = 'glmGamPoi' cannot handle weights" = ! useWeights,
"type = 'glmGamPoi' does not support NA's in alpha_hat" = all(! is.na(alpha_hat)))
gp_res <- glmGamPoi::glm_gp(counts(object), design = modelMatrix,
size_factors = FALSE, offset = log(normalizationFactors),
overdispersion = alpha_hat, verbose = FALSE)
logLikeMat <- dnbinom(counts(object), mu=gp_res$Mu, size=1/alpha_hat, log=TRUE)
logLike <- rowSums(logLikeMat)
res <- list(logLike = logLike, betaConv = rep(TRUE, nrow(object)), betaMatrix = gp_res$Beta / log(2),
betaSE = NULL, mu = gp_res$Mu, betaIter = rep(NA,nrow(object)),
modelMatrix=modelMatrix,
nterms=ncol(modelMatrix), hat_diagonals = NULL)
return(res)
}
# bypass the beta fitting if the model formula is only intercept and
# the prior variance is large (1e6)
# i.e., LRT with reduced ~ 1 and no beta prior
justIntercept <- if (modelAsFormula) {
modelFormula == formula(~ 1)
} else {
ncol(modelMatrix) == 1 & all(modelMatrix == 1)
}
if (justIntercept & all(lambda <= 1e-6)) {
alpha <- alpha_hat
betaConv <- rep(TRUE, nrow(object))
betaIter <- rep(1,nrow(object))
betaMatrix <- if (useWeights) {
matrix(log2(rowSums(weights*counts(object, normalized=TRUE))
/rowSums(weights)),ncol=1)
} else {
matrix(log2(rowMeans(counts(object, normalized=TRUE))),ncol=1)
}
mu <- normalizationFactors * as.numeric(2^betaMatrix)
logLikeMat <- dnbinom(counts(object), mu=mu, size=1/alpha, log=TRUE)
logLike <- if (useWeights) {
rowSums(weights*logLikeMat)
} else {
rowSums(logLikeMat)
}
modelMatrix <- stats::model.matrix.default(~ 1, data=as.data.frame(colData(object)))
colnames(modelMatrix) <- modelMatrixNames <- "Intercept"
w <- if (useWeights) {
weights * (mu^-1 + alpha)^-1
} else {
(mu^-1 + alpha)^-1
}
xtwx <- rowSums(w)
sigma <- xtwx^-1
betaSE <- matrix(log2(exp(1)) * sqrt(sigma),ncol=1)
hat_diagonals <- w * xtwx^-1;
res <- list(logLike = logLike, betaConv = betaConv, betaMatrix = betaMatrix,
betaSE = betaSE, mu = mu, betaIter = betaIter,
modelMatrix=modelMatrix,
nterms=1, hat_diagonals=hat_diagonals)
return(res)
}
qrx <- qr(modelMatrix)
# if full rank, estimate initial betas for IRLS below
if (qrx$rank == ncol(modelMatrix)) {
Q <- qr.Q(qrx)
R <- qr.R(qrx)
y <- t(log(counts(object,normalized=TRUE) + .1))
beta_mat <- t(solve(R, t(Q) %*% y))
} else {
if ("Intercept" %in% modelMatrixNames) {
beta_mat <- matrix(0, ncol=ncol(modelMatrix), nrow=nrow(object))
# use the natural log as fitBeta occurs in the natural log scale
logBaseMean <- log(rowMeans(counts(object,normalized=TRUE)))
beta_mat[,which(modelMatrixNames == "Intercept")] <- logBaseMean
} else {
beta_mat <- matrix(1, ncol=ncol(modelMatrix), nrow=nrow(object))
}
}
# here we convert from the log2 scale of the betas
# and the beta prior variance to the log scale
# used in fitBeta.
# so we divide by the square of the
# conversion factor, log(2)
lambdaNatLogScale <- lambda / log(2)^2
betaRes <- fitBetaWrapper(ySEXP = counts(object), xSEXP = modelMatrix,
nfSEXP = normalizationFactors,
alpha_hatSEXP = alpha_hat,
beta_matSEXP = beta_mat,
lambdaSEXP = lambdaNatLogScale,
weightsSEXP = weights,
useWeightsSEXP = useWeights,
tolSEXP = betaTol, maxitSEXP = maxit,
useQRSEXP=useQR, minmuSEXP=minmu)
# Note on deviance: the 'deviance' calculated in fitBeta() (C++)
# is not returned in mcols(object)$deviance. instead, we calculate
# the log likelihood below and use -2 * logLike.
# (reason is that we have other ways of estimating beta:
# above intercept code, and below optim code)
mu <- normalizationFactors * t(exp(modelMatrix %*% t(betaRes$beta_mat)))
dispersionVector <- rep(dispersions(object), times=ncol(object))
logLike <- nbinomLogLike(counts(object), mu, dispersions(object), weights, useWeights)
# test for stability
rowStable <- apply(betaRes$beta_mat,1,function(row) sum(is.na(row))) == 0
# test for positive variances
rowVarPositive <- apply(betaRes$beta_var_mat,1,function(row) sum(row <= 0)) == 0
# test for convergence, stability and positive variances
betaConv <- betaRes$iter < maxit
# here we transform the betaMatrix and betaSE to a log2 scale
betaMatrix <- log2(exp(1))*betaRes$beta_mat
colnames(betaMatrix) <- modelMatrixNames
colnames(modelMatrix) <- modelMatrixNames
# warn below regarding these rows with negative variance
betaSE <- log2(exp(1))*sqrt(pmax(betaRes$beta_var_mat,0))
colnames(betaSE) <- paste0("SE_",modelMatrixNames)
# switch based on whether we should also use optim
# on rows which did not converge
rowsForOptim <- if (useOptim) {
which(!betaConv | !rowStable | !rowVarPositive)
} else {
which(!rowStable | !rowVarPositive)
}
if (forceOptim) {
rowsForOptim <- seq_along(betaConv)
}
if (length(rowsForOptim) > 0) {
# we use optim if didn't reach convergence with the IRLS code
resOptim <- fitNbinomGLMsOptim(object,modelMatrix,lambda,
rowsForOptim,rowStable,
normalizationFactors,alpha_hat,
weights,useWeights,
betaMatrix,betaSE,betaConv,
beta_mat,
mu,logLike,minmu=minmu)
betaMatrix <- resOptim$betaMatrix
betaSE <- resOptim$betaSE
betaConv <- resOptim$betaConv
mu <- resOptim$mu
logLike <- resOptim$logLike
}
stopifnot(!any(is.na(betaSE)))
nNonposVar <- sum(rowSums(betaSE == 0) > 0)
if (warnNonposVar & nNonposVar > 0) warning(nNonposVar,"rows had non-positive estimates of variance for coefficients")
list(logLike = logLike, betaConv = betaConv, betaMatrix = betaMatrix,
betaSE = betaSE, mu = mu, betaIter = betaRes$iter, modelMatrix=modelMatrix,
nterms=ncol(modelMatrix), hat_diagonals=betaRes$hat_diagonals)
}
# this function calls fitNbinomGLMs() twice:
# 1 - without the beta prior, in order to calculate the
# beta prior variance and hat matrix
# 2 - again but with the prior in order to get beta matrix and standard errors
fitGLMsWithPrior <- function(object, betaTol, maxit, useOptim, useQR, betaPriorVar, modelMatrix=NULL, minmu=0.5) {
objectNZ <- object[!mcols(object)$allZero,,drop=FALSE]
modelMatrixType <- attr(object, "modelMatrixType")
if (missing(betaPriorVar) | !(all(c("mu","H") %in% assayNames(objectNZ)))) {
# stop unless modelMatrix was NOT supplied, the code below all works
# by building model matrices using the formula, doesn't work with incoming model matrices
stopifnot(is.null(modelMatrix))
# fit the negative binomial GLM without a prior,
# used to construct the prior variances
# and for the hat matrix diagonals for calculating Cook's distance
fit <- fitNbinomGLMs(objectNZ,
betaTol=betaTol, maxit=maxit,
useOptim=useOptim, useQR=useQR,
renameCols = (modelMatrixType == "standard"),
minmu=minmu)
modelMatrix <- fit$modelMatrix
modelMatrixNames <- colnames(modelMatrix)
H <- fit$hat_diagonal
betaMatrix <- fit$betaMatrix
mu <- fit$mu
modelMatrixNames[modelMatrixNames == "(Intercept)"] <- "Intercept"
modelMatrixNames <- make.names(modelMatrixNames)
colnames(betaMatrix) <- modelMatrixNames
# save the MLE log fold changes for addMLE argument of results
convertNames <- renameModelMatrixColumns(colData(object),
design(objectNZ))
convertNames <- convertNames[convertNames$from %in% modelMatrixNames,,drop=FALSE]
modelMatrixNames[match(convertNames$from, modelMatrixNames)] <- convertNames$to
mleBetaMatrix <- fit$betaMatrix
colnames(mleBetaMatrix) <- paste0("MLE_",modelMatrixNames)
# store for use in estimateBetaPriorVar below
mcols(objectNZ) <- cbind(mcols(objectNZ), DataFrame(mleBetaMatrix))
} else {
# we can skip the first MLE fit because the
# beta prior variance and hat matrix diagonals were provided
if (is.null(modelMatrix)) {
modelMatrix <- getModelMatrix(object)
}
H <- assays(objectNZ)[["H"]]
mu <- assays(objectNZ)[["mu"]]
mleBetaMatrix <- as.matrix(mcols(objectNZ)[,grep("MLE_",names(mcols(objectNZ))),drop=FALSE])
}
if (missing(betaPriorVar)) {
betaPriorVar <- estimateBetaPriorVar(objectNZ, modelMatrix=modelMatrix)
} else {
# else we are provided the prior variance:
# check if the lambda is the correct length
# given the design formula
if (modelMatrixType == "expanded") {
modelMatrix <- makeExpandedModelMatrix(objectNZ)
}
p <- ncol(modelMatrix)
if (length(betaPriorVar) != p) {
stop(paste("betaPriorVar should have length",p,"to match:",paste(colnames(modelMatrix),collapse=", ")))
}
}
# refit the negative binomial GLM with a prior on betas
if (any(betaPriorVar == 0)) {
stop("beta prior variances are equal to zero for some variables")
}
lambda <- 1/betaPriorVar
if (modelMatrixType == "standard") {
fit <- fitNbinomGLMs(objectNZ, lambda=lambda,
betaTol=betaTol, maxit=maxit,
useOptim=useOptim, useQR=useQR,
minmu=minmu)
modelMatrix <- fit$modelMatrix
} else if (modelMatrixType == "expanded") {
modelMatrix <- makeExpandedModelMatrix(objectNZ)
fit <- fitNbinomGLMs(objectNZ, lambda=lambda,
betaTol=betaTol, maxit=maxit,
useOptim=useOptim, useQR=useQR,
modelMatrix=modelMatrix, renameCols=FALSE,
minmu=minmu)
} else if (modelMatrixType == "user-supplied") {
fit <- fitNbinomGLMs(objectNZ, lambda=lambda,
betaTol=betaTol, maxit=maxit,
useOptim=useOptim, useQR=useQR,
modelMatrix=modelMatrix, renameCols=FALSE,
minmu=minmu)
}
res <- list(fit=fit, H=H, betaPriorVar=betaPriorVar, mu=mu,
modelMatrix=modelMatrix, mleBetaMatrix=mleBetaMatrix)
res
}
# breaking out the optim backup code from fitNbinomGLMs
fitNbinomGLMsOptim <- function(object,modelMatrix,lambda,
rowsForOptim,rowStable,
normalizationFactors,alpha_hat,
weights,useWeights,
betaMatrix,betaSE,betaConv,
beta_mat,
mu,logLike,minmu=0.5) {
x <- modelMatrix
lambdaNatLogScale <- lambda / log(2)^2
large <- 30
for (row in rowsForOptim) {
betaRow <- if (rowStable[row] & all(abs(betaMatrix[row,]) < large)) {
betaMatrix[row,]
} else {
beta_mat[row,]
}
nf <- normalizationFactors[row,]
k <- counts(object)[row,]
alpha <- alpha_hat[row]
objectiveFn <- function(p) {
mu_row <- as.numeric(nf * 2^(x %*% p))
logLikeVector <- dnbinom(k,mu=mu_row,size=1/alpha,log=TRUE)
logLike <- if (useWeights) {
sum(weights[row,] * logLikeVector)
} else {
sum(logLikeVector)
}
logPrior <- sum(dnorm(p,0,sqrt(1/lambda),log=TRUE))
negLogPost <- -1 * (logLike + logPrior)
if (is.finite(negLogPost)) negLogPost else 10^300
}
o <- optim(betaRow, objectiveFn, method="L-BFGS-B",lower=-large, upper=large)
ridge <- if (length(lambdaNatLogScale) > 1) {
diag(lambdaNatLogScale)
} else {
as.matrix(lambdaNatLogScale,ncol=1)
}
# if we converged, change betaConv to TRUE
if (o$convergence == 0) {
betaConv[row] <- TRUE
}
# with or without convergence, store the estimate from optim
betaMatrix[row,] <- o$par
# calculate the standard errors
mu_row <- as.numeric(nf * 2^(x %*% o$par))
# store the new mu vector
mu[row,] <- mu_row
mu_row[mu_row < minmu] <- minmu
w <- if (useWeights) {
diag((mu_row^-1 + alpha)^-1)
} else {
diag(weights[row,] * (mu_row^-1 + alpha)^-1)
}
xtwx <- t(x) %*% w %*% x
xtwxRidgeInv <- solve(xtwx + ridge)
sigma <- xtwxRidgeInv %*% xtwx %*% xtwxRidgeInv
# warn below regarding these rows with negative variance
betaSE[row,] <- log2(exp(1)) * sqrt(pmax(diag(sigma),0))
logLikeVector <- dnbinom(k,mu=mu_row,size=1/alpha,log=TRUE)
logLike[row] <- if (useWeights) {
sum(weights[row,] * logLikeVector)
} else {
sum(logLikeVector)
}
}
return(list(betaMatrix=betaMatrix,betaSE=betaSE,
betaConv=betaConv,mu=mu,logLike=logLike))
}
|
source('interventionAnalyzer.R')
#Group by type IMPORTANT: Reset all intervention costs to default values in interventionConfig.R before running
for(intervention in redEnLTBI_Interventions_specialMag) {
intConfig <- interventionConfig(intervention)
costs <- intConfig$costs
params <- intConfig$params
interData <- hill(costs,params[["sigmaL"]],params[["f"]],params[["trans"]],
params[["incLTBI"]])
write.csv(interData, paste(c(intFilePrefix,intervention,intFileSuffix),
collapse=""))
}
for(intervention in incLTBItrmt_Interventions) {
intConfig <- interventionConfig(intervention)
costs <- intConfig$costs
params <- intConfig$params
interData <- hill(costs,params[["sigmaL"]],params[["f"]],params[["trans"]],
params[["incLTBI"]])
write.csv(interData, paste(c(intFilePrefix,intervention,intFileSuffix),
collapse=""))
}
#Cost Vary
for(x in 0:9){
for(intervention in redEnLTBI_Interventions_specialMag) {
intConfig <- interventionConfig(intervention,x)
costs <- intConfig$costs
params <- intConfig$params
interData <- hill(costs,params[["sigmaL"]],params[["f"]],params[["trans"]],
params[["incLTBI"]])
write.csv(interData, paste(c(intFilePrefix,intervention,x,intFileSuffix),
collapse=""))
}
# for(intervention in incLTBItrmt_Interventions) {
# intConfig <- interventionConfig(intervention,x)
# costs <- intConfig$costs
# params <- intConfig$params
# interData <- hill(costs,params[["sigmaL"]],params[["f"]],params[["trans"]],
# params[["incLTBI"]])
# write.csv(interData, paste(c(intFilePrefix,intervention,x,intFileSuffix),
# collapse=""))
# }
}
|
/in_progress/models/costBenefitAnalysis/interventionGroups.R
|
no_license
|
mmcdermott/disease-modeling
|
R
| false | false | 1,839 |
r
|
source('interventionAnalyzer.R')
#Group by type IMPORTANT: Reset all intervention costs to default values in interventionConfig.R before running
for(intervention in redEnLTBI_Interventions_specialMag) {
intConfig <- interventionConfig(intervention)
costs <- intConfig$costs
params <- intConfig$params
interData <- hill(costs,params[["sigmaL"]],params[["f"]],params[["trans"]],
params[["incLTBI"]])
write.csv(interData, paste(c(intFilePrefix,intervention,intFileSuffix),
collapse=""))
}
for(intervention in incLTBItrmt_Interventions) {
intConfig <- interventionConfig(intervention)
costs <- intConfig$costs
params <- intConfig$params
interData <- hill(costs,params[["sigmaL"]],params[["f"]],params[["trans"]],
params[["incLTBI"]])
write.csv(interData, paste(c(intFilePrefix,intervention,intFileSuffix),
collapse=""))
}
#Cost Vary
for(x in 0:9){
for(intervention in redEnLTBI_Interventions_specialMag) {
intConfig <- interventionConfig(intervention,x)
costs <- intConfig$costs
params <- intConfig$params
interData <- hill(costs,params[["sigmaL"]],params[["f"]],params[["trans"]],
params[["incLTBI"]])
write.csv(interData, paste(c(intFilePrefix,intervention,x,intFileSuffix),
collapse=""))
}
# for(intervention in incLTBItrmt_Interventions) {
# intConfig <- interventionConfig(intervention,x)
# costs <- intConfig$costs
# params <- intConfig$params
# interData <- hill(costs,params[["sigmaL"]],params[["f"]],params[["trans"]],
# params[["incLTBI"]])
# write.csv(interData, paste(c(intFilePrefix,intervention,x,intFileSuffix),
# collapse=""))
# }
}
|
###can use any coef_gen file###
library(ggplot2)
library(MASS)
source("src/main_1_func.R")
source("src/settings.R")
source("src/coef_gen3.R")
source("src/coef_gen4.R")
source("src/curve_plot_func.R")
source("src/coef_gen5.R")
set1=setting()
randID=runif(1,1,10)
setin=1
delta=set1$delta[setin];error=set1$error[setin];m=set1$m[setin]; shapes=set1[setin,1:m];int1=set1$int1[setin]; sd=set1$sd[setin]; cdist=set1$cdist[setin]; coefgen=set1$coefgen[setin]; adap=set1$adap[setin]; ni=set1[setin,(m+1):(2*m)]
error=0.2
ni=as.matrix(ni,ncol=1)
n=sum(ni)
pos1=seq(0,2,by=int1)[-1]
y1=matrix(0,nrow=n,ncol=(length(pos1)+1))
y1_noerror=matrix(0,nrow=m,ncol=length(pos1))
ind=0
for(gp in 1:m)
{
y1tmp=ygen4(shapes[gp],ni[gp],error,int1)
y1_noerror[gp,]=ygen4_noerr_pl(1,shapes[gp],int1)
#y1tmp_noerror[gp,]=ygen4_noerr_pl(1,shapes[gp],int1)
y1[(ind+1):(ind+ni[gp]),]=cbind(y1tmp,rep(gp,ni[gp]))
# y1_noerror=rbind(y1_noerror,y1tmp_noerror)
plot(pos1,apply(y1tmp,2,mean),xlab=gp,type="l")
ind=ind+ni[gp]
}
data=NULL
id=fun=grp=pos=NULL
for( i in 1: nrow(y1))
{
id=c(id,rep(i,length(pos1)))
pos=c(pos,pos1)
fun=c(fun,y1[i,-ncol(y1)])
grp=c(grp,rep(y1[i,ncol(y1)],length(pos1)))
}
data=cbind(id,pos,fun,grp)
data=data.frame(id=id,pos=pos,fun=fun,grp=grp)
head(data)
###Group 1
clustr=1
shapei=y1_noerror[clustr,]
data_sub=data[which(data$grp==clustr),]
data_sub=data_sub[,-4]
data_sub=data.frame(data_sub,smo=0)
tmp1=data.frame(id=rep(0,length(pos1)),pos=pos1,fun=shapei,smo=1)
data_sub=rbind(data_sub,tmp1)
data_sub$id=as.character(data_sub$id)
data_sub$smo=as.character(data_sub$smo)
head(data_sub)
p1 <- ggplot(data_sub, aes(x=pos, y=fun,group=id)) +
ylab(" ")+
geom_line(aes(linetype=id))+
scale_linetype_manual(values=c( "solid","longdash","dotted", "twodash", "dotdash"))+
ggtitle("Cluster 1")+
theme(plot.title = element_text(hjust = 0.5))+
theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"),legend.position="none")
plot(p1)
###Group 2
clustr=2
shapei=y1_noerror[clustr,]
data_sub=data[which(data$grp==clustr),]
data_sub=data_sub[,-4]
data_sub=data.frame(data_sub,smo=0)
tmp1=data.frame(id=rep(0,length(pos1)),pos=pos1,fun=shapei,smo=1)
data_sub=rbind(data_sub,tmp1)
data_sub$id=as.character(data_sub$id)
data_sub$smo=as.character(data_sub$smo)
p2 <- ggplot(data_sub, aes(x=pos, y=fun,group=id)) +
ylab(" ")+
geom_line(aes(linetype=id))+
scale_linetype_manual(values=c( "solid","longdash","dotted", "twodash", "dotdash"))+
ggtitle("Cluster 2")+
theme(plot.title = element_text(hjust = 0.5))+
theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"),legend.position="none")
plot(p2)
###Group 3
clustr=3
shapei=y1_noerror[clustr,]
data_sub=data[which(data$grp==clustr),]
data_sub=data_sub[,-4]
data_sub=data.frame(data_sub,smo=0)
tmp1=data.frame(id=rep(0,length(pos1)),pos=pos1,fun=shapei,smo=1)
data_sub=rbind(data_sub,tmp1)
data_sub$id=as.character(data_sub$id)
data_sub$smo=as.character(data_sub$smo)
p3 <- ggplot(data_sub, aes(x=pos, y=fun,group=id)) +
ylab(" ")+
geom_line(aes(linetype=id))+
scale_linetype_manual(values=c( "solid","longdash","dotted", "twodash", "dotdash"))+
ggtitle("Cluster 3")+
theme(plot.title = element_text(hjust = 0.5))+
theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"),legend.position="none")
plot(p3)
###Group 4
clustr=4
shapei=y1_noerror[clustr,]
data_sub=data[which(data$grp==clustr),]
data_sub=data_sub[,-4]
data_sub=data.frame(data_sub,smo=0)
tmp1=data.frame(id=rep(0,length(pos1)),pos=pos1,fun=shapei,smo=1)
data_sub=rbind(data_sub,tmp1)
data_sub$id=as.character(data_sub$id)
data_sub$smo=as.character(data_sub$smo)
p4 <- ggplot(data_sub, aes(x=pos, y=fun,group=id)) +
ylab(" ")+
geom_line(aes(linetype=id))+
scale_linetype_manual(values=c( "solid","longdash","dotted", "twodash", "dotdash"))+
ggtitle("Cluster 4")+
theme(plot.title = element_text(hjust = 0.5))+
theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"),legend.position="none")
plot(p4)
###Group 5
clustr=5
shapei=y1_noerror[clustr,]
data_sub=data[which(data$grp==clustr),]
data_sub=data_sub[,-4]
data_sub=data.frame(data_sub,smo=0)
tmp1=data.frame(id=rep(0,length(pos1)),pos=pos1,fun=shapei,smo=1)
data_sub=rbind(data_sub,tmp1)
data_sub$id=as.character(data_sub$id)
data_sub$smo=as.character(data_sub$smo)
p5 <- ggplot(data_sub, aes(x=pos, y=fun,group=id)) +
ylab(" ")+
geom_line(aes(linetype=id))+
scale_linetype_manual(values=c( "solid","longdash","dotted", "twodash", "dotdash"))+
ggtitle("Cluster 5")+
theme(plot.title = element_text(hjust = 0.5))+
theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"),legend.position="none")
plot(p5)
## Group 6
clustr=6
shapei=y1_noerror[clustr,]
data_sub=data[which(data$grp==clustr),]
data_sub=data_sub[,-4]
data_sub=data.frame(data_sub,smo=0)
tmp1=data.frame(id=rep(0,length(pos1)),pos=pos1,fun=shapei,smo=1)
data_sub=rbind(data_sub,tmp1)
data_sub$id=as.character(data_sub$id)
data_sub$smo=as.character(data_sub$smo)
p6 <- ggplot(data_sub, aes(x=pos, y=fun,group=id)) +
ylab(" ")+
geom_line(aes(linetype=id))+
scale_linetype_manual(values=c( "solid","longdash","dotted", "twodash", "dotdash"))+
ggtitle("Cluster 6")+
theme(plot.title = element_text(hjust = 0.5))+
theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"),legend.position="none")
plot(p6)
## Group 7
clustr=7
shapei=y1_noerror[clustr,]
data_sub=data[which(data$grp==clustr),]
data_sub=data_sub[,-4]
data_sub=data.frame(data_sub,smo=0)
tmp1=data.frame(id=rep(0,length(pos1)),pos=pos1,fun=shapei,smo=1)
data_sub=rbind(data_sub,tmp1)
data_sub$id=as.character(data_sub$id)
data_sub$smo=as.character(data_sub$smo)
p7 <- ggplot(data_sub, aes(x=pos, y=fun,group=id)) +
ylab(" ")+
geom_line(aes(linetype=id))+
scale_linetype_manual(values=c( "solid","longdash","dotted", "twodash", "dotdash"))+
ggtitle("Cluster 7")+
theme(plot.title = element_text(hjust = 0.5))+
theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"),legend.position="none")
plot(p7)
## Group 8
clustr=8
shapei=y1_noerror[clustr,]
data_sub=data[which(data$grp==clustr),]
data_sub=data_sub[,-4]
data_sub=data.frame(data_sub,smo=0)
tmp1=data.frame(id=rep(0,length(pos1)),pos=pos1,fun=shapei,smo=1)
data_sub=rbind(data_sub,tmp1)
data_sub$id=as.character(data_sub$id)
data_sub$smo=as.character(data_sub$smo)
p8 <- ggplot(data_sub, aes(x=pos, y=fun,group=id)) +
ylab(" ")+
geom_line(aes(linetype=id))+
scale_linetype_manual(values=c( "solid","longdash","dotted", "twodash", "dotdash"))+
ggtitle("Cluster 8")+
theme(plot.title = element_text(hjust = 0.5))+
theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"),legend.position="none")
plot(p8)
multiplot(p1, p2, p3, p4, p5,p6,p7,p8, cols=4)
|
/plots.R
|
no_license
|
shuanggema/CPrT
|
R
| false | false | 8,174 |
r
|
###can use any coef_gen file###
library(ggplot2)
library(MASS)
source("src/main_1_func.R")
source("src/settings.R")
source("src/coef_gen3.R")
source("src/coef_gen4.R")
source("src/curve_plot_func.R")
source("src/coef_gen5.R")
set1=setting()
randID=runif(1,1,10)
setin=1
delta=set1$delta[setin];error=set1$error[setin];m=set1$m[setin]; shapes=set1[setin,1:m];int1=set1$int1[setin]; sd=set1$sd[setin]; cdist=set1$cdist[setin]; coefgen=set1$coefgen[setin]; adap=set1$adap[setin]; ni=set1[setin,(m+1):(2*m)]
error=0.2
ni=as.matrix(ni,ncol=1)
n=sum(ni)
pos1=seq(0,2,by=int1)[-1]
y1=matrix(0,nrow=n,ncol=(length(pos1)+1))
y1_noerror=matrix(0,nrow=m,ncol=length(pos1))
ind=0
for(gp in 1:m)
{
y1tmp=ygen4(shapes[gp],ni[gp],error,int1)
y1_noerror[gp,]=ygen4_noerr_pl(1,shapes[gp],int1)
#y1tmp_noerror[gp,]=ygen4_noerr_pl(1,shapes[gp],int1)
y1[(ind+1):(ind+ni[gp]),]=cbind(y1tmp,rep(gp,ni[gp]))
# y1_noerror=rbind(y1_noerror,y1tmp_noerror)
plot(pos1,apply(y1tmp,2,mean),xlab=gp,type="l")
ind=ind+ni[gp]
}
data=NULL
id=fun=grp=pos=NULL
for( i in 1: nrow(y1))
{
id=c(id,rep(i,length(pos1)))
pos=c(pos,pos1)
fun=c(fun,y1[i,-ncol(y1)])
grp=c(grp,rep(y1[i,ncol(y1)],length(pos1)))
}
data=cbind(id,pos,fun,grp)
data=data.frame(id=id,pos=pos,fun=fun,grp=grp)
head(data)
###Group 1
clustr=1
shapei=y1_noerror[clustr,]
data_sub=data[which(data$grp==clustr),]
data_sub=data_sub[,-4]
data_sub=data.frame(data_sub,smo=0)
tmp1=data.frame(id=rep(0,length(pos1)),pos=pos1,fun=shapei,smo=1)
data_sub=rbind(data_sub,tmp1)
data_sub$id=as.character(data_sub$id)
data_sub$smo=as.character(data_sub$smo)
head(data_sub)
p1 <- ggplot(data_sub, aes(x=pos, y=fun,group=id)) +
ylab(" ")+
geom_line(aes(linetype=id))+
scale_linetype_manual(values=c( "solid","longdash","dotted", "twodash", "dotdash"))+
ggtitle("Cluster 1")+
theme(plot.title = element_text(hjust = 0.5))+
theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"),legend.position="none")
plot(p1)
###Group 2
clustr=2
shapei=y1_noerror[clustr,]
data_sub=data[which(data$grp==clustr),]
data_sub=data_sub[,-4]
data_sub=data.frame(data_sub,smo=0)
tmp1=data.frame(id=rep(0,length(pos1)),pos=pos1,fun=shapei,smo=1)
data_sub=rbind(data_sub,tmp1)
data_sub$id=as.character(data_sub$id)
data_sub$smo=as.character(data_sub$smo)
p2 <- ggplot(data_sub, aes(x=pos, y=fun,group=id)) +
ylab(" ")+
geom_line(aes(linetype=id))+
scale_linetype_manual(values=c( "solid","longdash","dotted", "twodash", "dotdash"))+
ggtitle("Cluster 2")+
theme(plot.title = element_text(hjust = 0.5))+
theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"),legend.position="none")
plot(p2)
###Group 3
clustr=3
shapei=y1_noerror[clustr,]
data_sub=data[which(data$grp==clustr),]
data_sub=data_sub[,-4]
data_sub=data.frame(data_sub,smo=0)
tmp1=data.frame(id=rep(0,length(pos1)),pos=pos1,fun=shapei,smo=1)
data_sub=rbind(data_sub,tmp1)
data_sub$id=as.character(data_sub$id)
data_sub$smo=as.character(data_sub$smo)
p3 <- ggplot(data_sub, aes(x=pos, y=fun,group=id)) +
ylab(" ")+
geom_line(aes(linetype=id))+
scale_linetype_manual(values=c( "solid","longdash","dotted", "twodash", "dotdash"))+
ggtitle("Cluster 3")+
theme(plot.title = element_text(hjust = 0.5))+
theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"),legend.position="none")
plot(p3)
###Group 4
clustr=4
shapei=y1_noerror[clustr,]
data_sub=data[which(data$grp==clustr),]
data_sub=data_sub[,-4]
data_sub=data.frame(data_sub,smo=0)
tmp1=data.frame(id=rep(0,length(pos1)),pos=pos1,fun=shapei,smo=1)
data_sub=rbind(data_sub,tmp1)
data_sub$id=as.character(data_sub$id)
data_sub$smo=as.character(data_sub$smo)
p4 <- ggplot(data_sub, aes(x=pos, y=fun,group=id)) +
ylab(" ")+
geom_line(aes(linetype=id))+
scale_linetype_manual(values=c( "solid","longdash","dotted", "twodash", "dotdash"))+
ggtitle("Cluster 4")+
theme(plot.title = element_text(hjust = 0.5))+
theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"),legend.position="none")
plot(p4)
###Group 5
clustr=5
shapei=y1_noerror[clustr,]
data_sub=data[which(data$grp==clustr),]
data_sub=data_sub[,-4]
data_sub=data.frame(data_sub,smo=0)
tmp1=data.frame(id=rep(0,length(pos1)),pos=pos1,fun=shapei,smo=1)
data_sub=rbind(data_sub,tmp1)
data_sub$id=as.character(data_sub$id)
data_sub$smo=as.character(data_sub$smo)
p5 <- ggplot(data_sub, aes(x=pos, y=fun,group=id)) +
ylab(" ")+
geom_line(aes(linetype=id))+
scale_linetype_manual(values=c( "solid","longdash","dotted", "twodash", "dotdash"))+
ggtitle("Cluster 5")+
theme(plot.title = element_text(hjust = 0.5))+
theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"),legend.position="none")
plot(p5)
## Group 6
clustr=6
shapei=y1_noerror[clustr,]
data_sub=data[which(data$grp==clustr),]
data_sub=data_sub[,-4]
data_sub=data.frame(data_sub,smo=0)
tmp1=data.frame(id=rep(0,length(pos1)),pos=pos1,fun=shapei,smo=1)
data_sub=rbind(data_sub,tmp1)
data_sub$id=as.character(data_sub$id)
data_sub$smo=as.character(data_sub$smo)
p6 <- ggplot(data_sub, aes(x=pos, y=fun,group=id)) +
ylab(" ")+
geom_line(aes(linetype=id))+
scale_linetype_manual(values=c( "solid","longdash","dotted", "twodash", "dotdash"))+
ggtitle("Cluster 6")+
theme(plot.title = element_text(hjust = 0.5))+
theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"),legend.position="none")
plot(p6)
## Group 7
clustr=7
shapei=y1_noerror[clustr,]
data_sub=data[which(data$grp==clustr),]
data_sub=data_sub[,-4]
data_sub=data.frame(data_sub,smo=0)
tmp1=data.frame(id=rep(0,length(pos1)),pos=pos1,fun=shapei,smo=1)
data_sub=rbind(data_sub,tmp1)
data_sub$id=as.character(data_sub$id)
data_sub$smo=as.character(data_sub$smo)
p7 <- ggplot(data_sub, aes(x=pos, y=fun,group=id)) +
ylab(" ")+
geom_line(aes(linetype=id))+
scale_linetype_manual(values=c( "solid","longdash","dotted", "twodash", "dotdash"))+
ggtitle("Cluster 7")+
theme(plot.title = element_text(hjust = 0.5))+
theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"),legend.position="none")
plot(p7)
## Group 8
clustr=8
shapei=y1_noerror[clustr,]
data_sub=data[which(data$grp==clustr),]
data_sub=data_sub[,-4]
data_sub=data.frame(data_sub,smo=0)
tmp1=data.frame(id=rep(0,length(pos1)),pos=pos1,fun=shapei,smo=1)
data_sub=rbind(data_sub,tmp1)
data_sub$id=as.character(data_sub$id)
data_sub$smo=as.character(data_sub$smo)
p8 <- ggplot(data_sub, aes(x=pos, y=fun,group=id)) +
ylab(" ")+
geom_line(aes(linetype=id))+
scale_linetype_manual(values=c( "solid","longdash","dotted", "twodash", "dotdash"))+
ggtitle("Cluster 8")+
theme(plot.title = element_text(hjust = 0.5))+
theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"),legend.position="none")
plot(p8)
multiplot(p1, p2, p3, p4, p5,p6,p7,p8, cols=4)
|
#' @param bw the smoothing bandwidth to be used, see
#' \code{\link{density}} for details
#' @param adjust adjustment of the bandwidth, see
#' \code{\link{density}} for details
#' @param kernel kernel used for density estimation, see
#' \code{\link{density}} for details
#' @param trim This parameter only matters if you are displaying multiple
#' densities in one plot. If \code{FALSE}, the default, each density is
#' computed on the full range of the data. If \code{TRUE}, each density
#' is computed over the range of that group: this typically means the
#' estimated x values will not line-up, and hence you won't be able to
#' stack density values.
#' @section Computed variables:
#' \describe{
#' \item{density}{density estimate}
#' \item{count}{density * number of points - useful for stacked density
#' plots}
#' \item{scaled}{density estimate, scaled to maximum of 1}
#' }
#' @export
#' @rdname geom_density
stat_density <- function(mapping = NULL, data = NULL, geom = "area",
position = "stack", bw = "nrd0", adjust = 1, kernel = "gaussian",
trim = FALSE, na.rm = FALSE,
show.legend = NA, inherit.aes = TRUE, ...) {
layer(
data = data,
mapping = mapping,
stat = StatDensity,
geom = geom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
bw = bw,
adjust = adjust,
kernel = kernel,
trim = trim,
na.rm = na.rm,
...
)
)
}
#' @rdname ggplot2-ggproto
#' @format NULL
#' @usage NULL
#' @export
StatDensity <- ggproto("StatDensity", Stat,
required_aes = "x",
default_aes = aes(y = ..density.., fill = NA),
compute_group = function(data, scales, bw = "nrd0", adjust = 1, kernel = "gaussian",
trim = FALSE, na.rm = FALSE) {
if (trim) {
range <- range(data$x, na.rm = TRUE)
} else {
range <- scales$x$dimension()
}
compute_density(data$x, data$weight, from = range[1], to = range[2],
bw = bw, adjust = adjust, kernel = kernel)
}
)
compute_density <- function(x, w, from, to, bw = "nrd0", adjust = 1,
kernel = "gaussian") {
n <- length(x)
if (is.null(w)) {
w <- rep(1 / n, n)
}
# if less than 3 points, spread density evenly over points
if (n < 3) {
return(data.frame(
x = x,
density = w / sum(w),
scaled = w / max(w),
count = 1,
n = n
))
}
dens <- stats::density(x, weights = w, bw = bw, adjust = adjust,
kernel = kernel, from = from, to = to)
data.frame(
x = dens$x,
density = dens$y,
scaled = dens$y / max(dens$y, na.rm = TRUE),
count = dens$y * n,
n = n
)
}
|
/R/stat-density.r
|
no_license
|
jiho/ggplot2
|
R
| false | false | 2,774 |
r
|
#' @param bw the smoothing bandwidth to be used, see
#' \code{\link{density}} for details
#' @param adjust adjustment of the bandwidth, see
#' \code{\link{density}} for details
#' @param kernel kernel used for density estimation, see
#' \code{\link{density}} for details
#' @param trim This parameter only matters if you are displaying multiple
#' densities in one plot. If \code{FALSE}, the default, each density is
#' computed on the full range of the data. If \code{TRUE}, each density
#' is computed over the range of that group: this typically means the
#' estimated x values will not line-up, and hence you won't be able to
#' stack density values.
#' @section Computed variables:
#' \describe{
#' \item{density}{density estimate}
#' \item{count}{density * number of points - useful for stacked density
#' plots}
#' \item{scaled}{density estimate, scaled to maximum of 1}
#' }
#' @export
#' @rdname geom_density
stat_density <- function(mapping = NULL, data = NULL, geom = "area",
position = "stack", bw = "nrd0", adjust = 1, kernel = "gaussian",
trim = FALSE, na.rm = FALSE,
show.legend = NA, inherit.aes = TRUE, ...) {
layer(
data = data,
mapping = mapping,
stat = StatDensity,
geom = geom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
bw = bw,
adjust = adjust,
kernel = kernel,
trim = trim,
na.rm = na.rm,
...
)
)
}
#' @rdname ggplot2-ggproto
#' @format NULL
#' @usage NULL
#' @export
StatDensity <- ggproto("StatDensity", Stat,
required_aes = "x",
default_aes = aes(y = ..density.., fill = NA),
compute_group = function(data, scales, bw = "nrd0", adjust = 1, kernel = "gaussian",
trim = FALSE, na.rm = FALSE) {
if (trim) {
range <- range(data$x, na.rm = TRUE)
} else {
range <- scales$x$dimension()
}
compute_density(data$x, data$weight, from = range[1], to = range[2],
bw = bw, adjust = adjust, kernel = kernel)
}
)
compute_density <- function(x, w, from, to, bw = "nrd0", adjust = 1,
kernel = "gaussian") {
n <- length(x)
if (is.null(w)) {
w <- rep(1 / n, n)
}
# if less than 3 points, spread density evenly over points
if (n < 3) {
return(data.frame(
x = x,
density = w / sum(w),
scaled = w / max(w),
count = 1,
n = n
))
}
dens <- stats::density(x, weights = w, bw = bw, adjust = adjust,
kernel = kernel, from = from, to = to)
data.frame(
x = dens$x,
density = dens$y,
scaled = dens$y / max(dens$y, na.rm = TRUE),
count = dens$y * n,
n = n
)
}
|
###Data Science 301-3 Final Project
#Load Packages ---------------------------------------------------------------------------------
library(tidyverse)
library(skimr)
library(janitor)
library(rsample)
library(GGally)
library(glmnet)
library(modelr)
library(ranger)
library(vip)
library(pdp)
library(xgboost)
library(MASS)
library(tidyselect)
#Set the seed ----------------------------------------------------------------------------------
set.seed(3739)
#Load Data -------------------------------------------------------------------------------------
shot_logs_2015 <- read_csv("data/unprocessed/shot_logs.csv") %>%
clean_names()
players_dat <- read_csv("data/unprocessed/players.csv") %>%
clean_names()
defense_dat <- read_csv("data/unprocessed/NBA Season Data.csv") %>%
clean_names()
#Data Wrangling -------------------------------------------------------------------------------
shot_logs_2015_updated <- shot_logs_2015 %>%
dplyr::select(c(final_margin, shot_number, period, game_clock, shot_clock,
dribbles, touch_time, shot_dist, pts_type, shot_result,
closest_defender, close_def_dist, fgm, pts, player_name))
shot_logs_2015_updated <- shot_logs_2015_updated %>%
mutate(closest_defender = sub("(\\w+),\\s(\\w+)","\\2 \\1", shot_logs_2015_updated$closest_defender))
players_dat <- players_dat %>%
filter(active_to >= 2015) %>%
dplyr::select(c(height, name, position, weight, nba_3ptpct,
nba_efgpct, nba_fg_percent, nba_ppg)) %>%
rename(c("player_name" = "name")) %>%
mutate(player_name = tolower(player_name))
defense_dat <- defense_dat %>%
filter(year == 2015) %>%
dplyr::select(c(player, per, stl_percent, blk_percent, dws, dws_48, dbpm, defense)) %>%
rename(c("closest_defender" = "player"))
defense_dat <- defense_dat %>%
group_by(closest_defender) %>%
transmute(
per = mean(per),
stl_percent = mean(stl_percent),
blk_percent = mean(blk_percent),
dws = sum(dws),
dws_48 = mean(dws_48),
dbpm = mean(dbpm),
defense = mean(defense)
) %>%
distinct(closest_defender,.keep_all = TRUE)
#Data Set Merging ------------------------------------------------------------------------------
nba_2015_total_dat <- merge(shot_logs_2015_updated, players_dat, by = "player_name")
nba_2015_total_dat <- merge(nba_2015_total_dat, defense_dat, by = "closest_defender")
nba_2015_total_dat <- nba_2015_total_dat %>%
na.omit(players_dat, na.action = "omit")
# sum(is.na(nba_2015_total_dat))
#
# write_csv(nba_2015_total_dat, path = "data/processed")
nba_model_dat <- nba_2015_total_dat %>%
seplyr::deselect(c("player_name", "shot_result", "closest_defender", "pts", "position", "final_margin", "id"))
nba_model_dat$height <- as_factor(nba_model_dat$height)
nba_model_dat$game_clock <- as.numeric(nba_model_dat$game_clock)
nba_model_dat %>%
skim_without_charts()
#Data Splitting --------------------------------------------------------------------------------
nba_model_dat$id <- 1:nrow(nba_model_dat)
train <- nba_model_dat %>% sample_frac(.75)
test <- anti_join(nba_model_dat, train, by = 'id')
nba_dat_split <- tibble(
train = train %>% list(),
test = test %>% list()
)
#Modeling --------------------------------------------------------------------------------------
#Simple linear modeling
lm_fit_1 <- nba_model_dat %>% lm(formula = fgm ~ dribbles + close_def_dist)
lm_fit_1 %>%
broom::glance()
modelr::mse(lm_fit_1, nba_model_dat)
#Simple logistic model
glm_fits <- nba_dat_split %>%
mutate(mod_01 = map(train, glm,
formula = fgm ~ close_def_dist + shot_dist + touch_time + dribbles + shot_clock,
family = binomial))
glm_fits %>%
pluck("mod_01", 1) %>%
tidy()
glm_fits %>%
pluck("mod_01", 1) %>%
predict(type = "response") %>%
skim_without_charts()
demo_tib <- glm_fits %>%
mutate(train_prob = map(mod_01, predict, type = "response"),
train_direction = map(train_prob, ~ if_else(.x > 0.5, 1, 0)))
demo_tib %>%
unnest(cols = c(train, train_direction)) %>%
count(train_direction) %>%
mutate(prop = n / sum(n))
demo_tib %>%
unnest(cols = c(train, train_direction)) %>%
count(fgm, train_direction) %>%
mutate(prop = n / sum(n)) %>%
arrange(desc(fgm))
demo_tib %>%
unnest(cols = c(train, train_direction)) %>%
mutate(correct = if_else(train_direction == fgm, 1, 0)) %>%
summarise(train_accuracy = mean(correct),
train_error = 1 - train_accuracy)
demo_tib <- demo_tib %>%
mutate(test_prob = map2(mod_01, test, predict, type = "response"),
test_direction = map(test_prob, ~ if_else(.x > 0.5, 1, 0)))
demo_tib %>%
unnest(cols = c(test, test_direction)) %>%
mutate(correct = if_else(test_direction == fgm, 1, 0)) %>%
summarise(test_accuracy = mean(correct),
test_error = 1 - test_accuracy)
glm_fits_2 <- nba_dat_split %>%
mutate(mod_01 = map(train, glm,
formula = fgm ~ close_def_dist + shot_dist + touch_time + shot_clock + dbpm + nba_efgpct,
family = binomial))
glm_fits_2 %>%
pluck("mod_01", 1) %>%
tidy()
glm_fits_2 %>%
pluck("mod_01", 1) %>%
predict(type = "response") %>%
skim_without_charts()
demo_tib_2 <- glm_fits_2 %>%
mutate(train_prob = map(mod_01, predict, type = "response"),
train_direction = map(train_prob, ~ if_else(.x > 0.5, 1, 0)))
demo_tib_2 %>%
unnest(cols = c(train, train_direction)) %>%
count(train_direction) %>%
mutate(prop = n / sum(n))
demo_tib_2 %>%
unnest(cols = c(train, train_direction)) %>%
count(fgm, train_direction) %>%
mutate(prop = n / sum(n)) %>%
arrange(desc(fgm))
demo_tib_2 %>%
unnest(cols = c(train, train_direction)) %>%
mutate(correct = if_else(train_direction == fgm, 1, 0)) %>%
summarise(train_accuracy = mean(correct),
train_error = 1 - train_accuracy)
demo_tib_2 <- demo_tib_2 %>%
mutate(test_prob = map2(mod_01, test, predict, type = "response"),
test_direction = map(test_prob, ~ if_else(.x > 0.5, 1, 0)))
demo_tib_2 %>%
unnest(cols = c(test, test_direction)) %>%
mutate(correct = if_else(test_direction == fgm, 1, 0)) %>%
summarise(test_accuracy = mean(correct),
test_error = 1 - test_accuracy)
#Random Forest
#Helper functions ----------------------------------------------------------------------------
misclass_ranger <- function(model, test, outcome){
if(!is_tibble(test)){
test <- test %>% as_tibble()
}
preds <- predict(model, test)$predictions
misclass <- mean(test[[outcome]] != preds)
return(misclass)
}
nba_rf_class <- nba_dat_split %>%
crossing(mtry = 1:(ncol(train) - 1)) %>%
mutate(model = map2(.x = train, .y = mtry,
.f = function(x, y) ranger(fgm ~ .-id,
mtry = y,
data = x,
splitrule = "gini",
importance = "impurity",
classification = TRUE)),
train_misclass = map2(model, train, misclass_ranger, outcome = "fgm"),
test_misclass = map2(model, test, misclass_ranger, outcome = "fgm"),
oob_misclass = map(.x = model,
.f = function(x) x[["prediction.error"]])
)
nba_rf_class %>%
pluck("test_misclass")
ggplot(nba_rf_class) +
geom_line(aes(mtry, unlist(oob_misclass), color = "OOB Error")) +
geom_line(aes(mtry, unlist(train_misclass), color = "Training Error")) +
geom_line(aes(mtry, unlist(test_misclass), color = "Test Error")) +
labs(x = "mtry", y = "Misclassification Rate") +
scale_color_manual("", values = c("purple", "blue", "red")) +
theme_bw()
nba_class_mtry5 = ranger(fgm ~ .-id,
data = nba_model_dat,
mtry = 5,
importance = "impurity",
splitrule = "gini",
probability = TRUE)
vip(nba_class_mtry5)
pred_probs_rf <- predict(nba_class_mtry5, test, type = "response")
summary(pred_probs_rf)
pred_probs_rf$predictions[,2]
out_rf <- tibble(Id = test$id,
Category = as.character(as.integer(pred_probs_rf$predictions[,2] > .5)))
out_rf
###Boosted model
if(outcome_type == "factor" & nlevels(dat[[outcome]]) == 2){
tmp <- dat %>% select(outcome) %>% onehot::onehot() %>% predict(dat)
lab <- tmp[,1]
} else {
lab <- dat[[outcome]]
}
xgb_matrix <- function(dat, outcome, exclude_vars){
if(!is_tibble(dat)){
dat <- as_tibble(dat)
}
dat_types <- dat %>% map_chr(class)
outcome_type <- class(dat[[outcome]])
if("character" %in% dat_types){
print("You must encode characters as factors.")
return(NULL)
} else {
if(outcome_type == "factor" & nlevels(dat[[outcome]]) == 2){
tmp <- dat %>% select(outcome) %>% onehot::onehot() %>% predict(dat)
lab <- tmp[,1]
} else {
lab <- dat[[outcome]]
}
mat <- dat %>% dplyr::select(-outcome, -all_of(exclude_vars)) %>%
onehot::onehot() %>%
predict(dat)
return(xgb.DMatrix(data = mat,
label = lab))
}}
xg_error <- function(model, test_mat, metric = "mse"){
preds = predict(model, test_mat)
vals = getinfo(test_mat, "label")
if(metric == "mse"){
err <- mean((preds - vals)^2)
} else if(metric == "misclass") {
err <- mean(preds != vals)
}
return(err)
}
#Boosted model class 1
nba_xg_class <- nba_dat_split %>%
crossing(learn_rate = 10^seq(-10, -.1, length.out = 20)) %>%
mutate(
train_mat = map(train, xgb_matrix, outcome = all_of("fgm"), exclude_vars = "height"),
test_mat = map(test, xgb_matrix, outcome = all_of("fgm"), exclude_vars = "height"),
xg_model = map2(.x = train_mat, .y = learn_rate,
.f = function(x, y) xgb.train(params = list(eta = y,
depth = 5,
objective = "multi:softmax",
num_class = 2),
data = x,
nrounds = 100,
silent = TRUE)),
xg_train_misclass = map2(xg_model, train_mat, xg_error, metric = "misclass"),
xg_test_misclass = map2(xg_model, test_mat, xg_error, metric = "misclass")
)
nba_xg_class %>%
pluck("xg_test_misclass")
ggplot(nba_xg_class) +
geom_line(aes(learn_rate, unlist(xg_test_misclass)))
xg_class_mod <- nba_xg_class %>%
arrange(unlist(xg_test_misclass)) %>%
pluck("xg_model", 1)
vip(xg_class_mod)
#Boosted model class update
nba_xg_class_6 <- nba_dat_split %>%
crossing(learn_rate = 10^seq(-10, -.1, length.out = 20)) %>%
mutate(
train_mat = map(train, xgb_matrix, outcome = all_of("fgm"), exclude_vars = "height"),
test_mat = map(test, xgb_matrix, outcome = all_of("fgm"), exclude_vars = "height"),
xg_model = map2(.x = train_mat, .y = learn_rate,
.f = function(x, y) xgb.train(params = list(eta = y,
depth = 5,
objective = "multi:softmax",
num_class = 2),
data = x,
nrounds = 50,
silent = TRUE)),
xg_train_misclass = map2(xg_model, train_mat, xg_error, metric = "misclass"),
xg_test_misclass = map2(xg_model, test_mat, xg_error, metric = "misclass")
)
nba_xg_class_6 %>%
pluck("xg_test_misclass")
ggplot(nba_xg_class_6) +
geom_line(aes(learn_rate, unlist(xg_test_misclass)))
xg_class_mod <- nba_xg_class_6 %>%
arrange(unlist(xg_test_misclass)) %>%
pluck("xg_model", 1)
xg_class_mod
vip(xg_class_mod)
|
/final-project-andrewfenichel/Fenichel_Andrew_FinalProject.R
|
no_license
|
andrewfenichel/final-project-afenichel-1
|
R
| false | false | 12,122 |
r
|
###Data Science 301-3 Final Project
#Load Packages ---------------------------------------------------------------------------------
library(tidyverse)
library(skimr)
library(janitor)
library(rsample)
library(GGally)
library(glmnet)
library(modelr)
library(ranger)
library(vip)
library(pdp)
library(xgboost)
library(MASS)
library(tidyselect)
#Set the seed ----------------------------------------------------------------------------------
set.seed(3739)
#Load Data -------------------------------------------------------------------------------------
shot_logs_2015 <- read_csv("data/unprocessed/shot_logs.csv") %>%
clean_names()
players_dat <- read_csv("data/unprocessed/players.csv") %>%
clean_names()
defense_dat <- read_csv("data/unprocessed/NBA Season Data.csv") %>%
clean_names()
#Data Wrangling -------------------------------------------------------------------------------
shot_logs_2015_updated <- shot_logs_2015 %>%
dplyr::select(c(final_margin, shot_number, period, game_clock, shot_clock,
dribbles, touch_time, shot_dist, pts_type, shot_result,
closest_defender, close_def_dist, fgm, pts, player_name))
shot_logs_2015_updated <- shot_logs_2015_updated %>%
mutate(closest_defender = sub("(\\w+),\\s(\\w+)","\\2 \\1", shot_logs_2015_updated$closest_defender))
players_dat <- players_dat %>%
filter(active_to >= 2015) %>%
dplyr::select(c(height, name, position, weight, nba_3ptpct,
nba_efgpct, nba_fg_percent, nba_ppg)) %>%
rename(c("player_name" = "name")) %>%
mutate(player_name = tolower(player_name))
defense_dat <- defense_dat %>%
filter(year == 2015) %>%
dplyr::select(c(player, per, stl_percent, blk_percent, dws, dws_48, dbpm, defense)) %>%
rename(c("closest_defender" = "player"))
defense_dat <- defense_dat %>%
group_by(closest_defender) %>%
transmute(
per = mean(per),
stl_percent = mean(stl_percent),
blk_percent = mean(blk_percent),
dws = sum(dws),
dws_48 = mean(dws_48),
dbpm = mean(dbpm),
defense = mean(defense)
) %>%
distinct(closest_defender,.keep_all = TRUE)
#Data Set Merging ------------------------------------------------------------------------------
nba_2015_total_dat <- merge(shot_logs_2015_updated, players_dat, by = "player_name")
nba_2015_total_dat <- merge(nba_2015_total_dat, defense_dat, by = "closest_defender")
nba_2015_total_dat <- nba_2015_total_dat %>%
na.omit(players_dat, na.action = "omit")
# sum(is.na(nba_2015_total_dat))
#
# write_csv(nba_2015_total_dat, path = "data/processed")
nba_model_dat <- nba_2015_total_dat %>%
seplyr::deselect(c("player_name", "shot_result", "closest_defender", "pts", "position", "final_margin", "id"))
nba_model_dat$height <- as_factor(nba_model_dat$height)
nba_model_dat$game_clock <- as.numeric(nba_model_dat$game_clock)
nba_model_dat %>%
skim_without_charts()
#Data Splitting --------------------------------------------------------------------------------
nba_model_dat$id <- 1:nrow(nba_model_dat)
train <- nba_model_dat %>% sample_frac(.75)
test <- anti_join(nba_model_dat, train, by = 'id')
nba_dat_split <- tibble(
train = train %>% list(),
test = test %>% list()
)
#Modeling --------------------------------------------------------------------------------------
#Simple linear modeling
lm_fit_1 <- nba_model_dat %>% lm(formula = fgm ~ dribbles + close_def_dist)
lm_fit_1 %>%
broom::glance()
modelr::mse(lm_fit_1, nba_model_dat)
#Simple logistic model
glm_fits <- nba_dat_split %>%
mutate(mod_01 = map(train, glm,
formula = fgm ~ close_def_dist + shot_dist + touch_time + dribbles + shot_clock,
family = binomial))
glm_fits %>%
pluck("mod_01", 1) %>%
tidy()
glm_fits %>%
pluck("mod_01", 1) %>%
predict(type = "response") %>%
skim_without_charts()
demo_tib <- glm_fits %>%
mutate(train_prob = map(mod_01, predict, type = "response"),
train_direction = map(train_prob, ~ if_else(.x > 0.5, 1, 0)))
demo_tib %>%
unnest(cols = c(train, train_direction)) %>%
count(train_direction) %>%
mutate(prop = n / sum(n))
demo_tib %>%
unnest(cols = c(train, train_direction)) %>%
count(fgm, train_direction) %>%
mutate(prop = n / sum(n)) %>%
arrange(desc(fgm))
demo_tib %>%
unnest(cols = c(train, train_direction)) %>%
mutate(correct = if_else(train_direction == fgm, 1, 0)) %>%
summarise(train_accuracy = mean(correct),
train_error = 1 - train_accuracy)
demo_tib <- demo_tib %>%
mutate(test_prob = map2(mod_01, test, predict, type = "response"),
test_direction = map(test_prob, ~ if_else(.x > 0.5, 1, 0)))
demo_tib %>%
unnest(cols = c(test, test_direction)) %>%
mutate(correct = if_else(test_direction == fgm, 1, 0)) %>%
summarise(test_accuracy = mean(correct),
test_error = 1 - test_accuracy)
glm_fits_2 <- nba_dat_split %>%
mutate(mod_01 = map(train, glm,
formula = fgm ~ close_def_dist + shot_dist + touch_time + shot_clock + dbpm + nba_efgpct,
family = binomial))
glm_fits_2 %>%
pluck("mod_01", 1) %>%
tidy()
glm_fits_2 %>%
pluck("mod_01", 1) %>%
predict(type = "response") %>%
skim_without_charts()
demo_tib_2 <- glm_fits_2 %>%
mutate(train_prob = map(mod_01, predict, type = "response"),
train_direction = map(train_prob, ~ if_else(.x > 0.5, 1, 0)))
demo_tib_2 %>%
unnest(cols = c(train, train_direction)) %>%
count(train_direction) %>%
mutate(prop = n / sum(n))
demo_tib_2 %>%
unnest(cols = c(train, train_direction)) %>%
count(fgm, train_direction) %>%
mutate(prop = n / sum(n)) %>%
arrange(desc(fgm))
demo_tib_2 %>%
unnest(cols = c(train, train_direction)) %>%
mutate(correct = if_else(train_direction == fgm, 1, 0)) %>%
summarise(train_accuracy = mean(correct),
train_error = 1 - train_accuracy)
demo_tib_2 <- demo_tib_2 %>%
mutate(test_prob = map2(mod_01, test, predict, type = "response"),
test_direction = map(test_prob, ~ if_else(.x > 0.5, 1, 0)))
demo_tib_2 %>%
unnest(cols = c(test, test_direction)) %>%
mutate(correct = if_else(test_direction == fgm, 1, 0)) %>%
summarise(test_accuracy = mean(correct),
test_error = 1 - test_accuracy)
#Random Forest
#Helper functions ----------------------------------------------------------------------------
misclass_ranger <- function(model, test, outcome){
if(!is_tibble(test)){
test <- test %>% as_tibble()
}
preds <- predict(model, test)$predictions
misclass <- mean(test[[outcome]] != preds)
return(misclass)
}
nba_rf_class <- nba_dat_split %>%
crossing(mtry = 1:(ncol(train) - 1)) %>%
mutate(model = map2(.x = train, .y = mtry,
.f = function(x, y) ranger(fgm ~ .-id,
mtry = y,
data = x,
splitrule = "gini",
importance = "impurity",
classification = TRUE)),
train_misclass = map2(model, train, misclass_ranger, outcome = "fgm"),
test_misclass = map2(model, test, misclass_ranger, outcome = "fgm"),
oob_misclass = map(.x = model,
.f = function(x) x[["prediction.error"]])
)
nba_rf_class %>%
pluck("test_misclass")
ggplot(nba_rf_class) +
geom_line(aes(mtry, unlist(oob_misclass), color = "OOB Error")) +
geom_line(aes(mtry, unlist(train_misclass), color = "Training Error")) +
geom_line(aes(mtry, unlist(test_misclass), color = "Test Error")) +
labs(x = "mtry", y = "Misclassification Rate") +
scale_color_manual("", values = c("purple", "blue", "red")) +
theme_bw()
nba_class_mtry5 = ranger(fgm ~ .-id,
data = nba_model_dat,
mtry = 5,
importance = "impurity",
splitrule = "gini",
probability = TRUE)
vip(nba_class_mtry5)
pred_probs_rf <- predict(nba_class_mtry5, test, type = "response")
summary(pred_probs_rf)
pred_probs_rf$predictions[,2]
out_rf <- tibble(Id = test$id,
Category = as.character(as.integer(pred_probs_rf$predictions[,2] > .5)))
out_rf
###Boosted model
if(outcome_type == "factor" & nlevels(dat[[outcome]]) == 2){
tmp <- dat %>% select(outcome) %>% onehot::onehot() %>% predict(dat)
lab <- tmp[,1]
} else {
lab <- dat[[outcome]]
}
xgb_matrix <- function(dat, outcome, exclude_vars){
if(!is_tibble(dat)){
dat <- as_tibble(dat)
}
dat_types <- dat %>% map_chr(class)
outcome_type <- class(dat[[outcome]])
if("character" %in% dat_types){
print("You must encode characters as factors.")
return(NULL)
} else {
if(outcome_type == "factor" & nlevels(dat[[outcome]]) == 2){
tmp <- dat %>% select(outcome) %>% onehot::onehot() %>% predict(dat)
lab <- tmp[,1]
} else {
lab <- dat[[outcome]]
}
mat <- dat %>% dplyr::select(-outcome, -all_of(exclude_vars)) %>%
onehot::onehot() %>%
predict(dat)
return(xgb.DMatrix(data = mat,
label = lab))
}}
xg_error <- function(model, test_mat, metric = "mse"){
preds = predict(model, test_mat)
vals = getinfo(test_mat, "label")
if(metric == "mse"){
err <- mean((preds - vals)^2)
} else if(metric == "misclass") {
err <- mean(preds != vals)
}
return(err)
}
#Boosted model class 1
nba_xg_class <- nba_dat_split %>%
crossing(learn_rate = 10^seq(-10, -.1, length.out = 20)) %>%
mutate(
train_mat = map(train, xgb_matrix, outcome = all_of("fgm"), exclude_vars = "height"),
test_mat = map(test, xgb_matrix, outcome = all_of("fgm"), exclude_vars = "height"),
xg_model = map2(.x = train_mat, .y = learn_rate,
.f = function(x, y) xgb.train(params = list(eta = y,
depth = 5,
objective = "multi:softmax",
num_class = 2),
data = x,
nrounds = 100,
silent = TRUE)),
xg_train_misclass = map2(xg_model, train_mat, xg_error, metric = "misclass"),
xg_test_misclass = map2(xg_model, test_mat, xg_error, metric = "misclass")
)
nba_xg_class %>%
pluck("xg_test_misclass")
ggplot(nba_xg_class) +
geom_line(aes(learn_rate, unlist(xg_test_misclass)))
xg_class_mod <- nba_xg_class %>%
arrange(unlist(xg_test_misclass)) %>%
pluck("xg_model", 1)
vip(xg_class_mod)
#Boosted model class update
nba_xg_class_6 <- nba_dat_split %>%
crossing(learn_rate = 10^seq(-10, -.1, length.out = 20)) %>%
mutate(
train_mat = map(train, xgb_matrix, outcome = all_of("fgm"), exclude_vars = "height"),
test_mat = map(test, xgb_matrix, outcome = all_of("fgm"), exclude_vars = "height"),
xg_model = map2(.x = train_mat, .y = learn_rate,
.f = function(x, y) xgb.train(params = list(eta = y,
depth = 5,
objective = "multi:softmax",
num_class = 2),
data = x,
nrounds = 50,
silent = TRUE)),
xg_train_misclass = map2(xg_model, train_mat, xg_error, metric = "misclass"),
xg_test_misclass = map2(xg_model, test_mat, xg_error, metric = "misclass")
)
nba_xg_class_6 %>%
pluck("xg_test_misclass")
ggplot(nba_xg_class_6) +
geom_line(aes(learn_rate, unlist(xg_test_misclass)))
xg_class_mod <- nba_xg_class_6 %>%
arrange(unlist(xg_test_misclass)) %>%
pluck("xg_model", 1)
xg_class_mod
vip(xg_class_mod)
|
###General R script for processing and visualizing mash outputs
##10/25/20
#load libraries
library(readr)
library(tidyverse)
library(vegan)
library(adegenet)
library("maps")
setwd("~/Downloads")
#read in mash output
capsellatrim <- read_delim("tbl1_capsellatrim_tailcrop.tab",
"\t", escape_double = FALSE, col_names = FALSE,
trim_ws = TRUE)
#function to clean dists
clean_dist<-function(mashdist){
#rename cols
colnames(mashdist)<-c("f1", "f2","dist","num", "frac")
#rm extra cols
mashdist$num<-NULL
mashdist$frac<-NULL
#spread to matrix
in_wide<-spread(mashdist, key=f2, value = dist)
#set rownames for mat
row.names(imp_wide)<-in_wide$f1
#rm col
in_wide$f1<-NULL
#make into numeric matrix
in_mat<-data.matrix(in_wide)
#make dist object
in_dist<-as.dist(in_mat)
in_dist
}
dists<-clean_dist(capsellatrim)
#read in metadata from SRA
phenolist <- read_delim("~/Downloads/SraRunTable (29).txt", ",", escape_double = FALSE, trim_ws = TRUE)
#perform principal coordinate analysis
cailliez<-cailliez(dists)
pcoa<-dudi.pco(cailliez, scannf = T, full=T)
#pull pcs
PCs<-data.frame(PC1=pcoa$tab[,1],
PC2=pcoa$tab[,2],
PC3=pcoa$tab[,3],
PC4=pcoa$tab[,4],
PC5=pcoa$tab[,5],
PC6=pcoa$tab[,6],
file=substr(attr(mds$points, 'dimnames')[[1]], 1, 10), stringsAsFactors = F)
colnames(PCs)[7]<-"Run"
#add in metadata
PCall<-inner_join(PCs, phenolist, by="Run")
#save(PCall, file = "PCall_mash_oct25.rda")
###Visualization####
#clean pop names
pops <- strsplit(PCall$Isolation_Source, "_")
pops2 <- NA
for (i in 1:length(pops)){
pops2[i] <- pops[[i]][1]
}
PCall$pops<-pops2
#read in metadata from paper supplement
pop_loc_capsella <- read_delim("~/Downloads/pop_loc_capsella.txt",
"\t", escape_double = FALSE, col_names = FALSE,
trim_ws = TRUE)
unique(pop_loc_capsella$X2)
unique(PCall$pops)
#replace erroneous pop names
PCall$pops[PCall$pops %in% c("6")]<-"FR6"
PCall$pops[PCall$pops %in% c("53")]<-"SP53"
PCall$pops[PCall$pops %in% c("39")]<-"IT39"
PCall$pops[PCall$pops %in% c("22")]<-"IT22"
PCall$pops[PCall$pops %in% c("IRRU2","IRRU3")]<-"IRRU"
PCall$pops[PCall$pops %in% c("JO56","JO59")]<-"JO"
PCall$pops[PCall$pops %in% c("OBL")]<-"OBL-RU5"
PCall$pops[PCall$pops %in% c("SABO10", "SABO4", "SABO5", "SABO6", "SABO7", "SABO9")]<-"SABO"
PCall$pops[PCall$pops %in% c("SE42","SE43")]<-"SE4x"
PCall$pops[PCall$pops %in% c("SY61", "SY64", "SY67" , "SY68" , "SY69")]<-"SY6x"
PCall$pops[PCall$pops %in% c("SY70")]<-"SY7x"
PCall$pops[PCall$pops %in% c( "TR71" , "TR73" , "TR75" , "TR79" , "TR83" )]<-"TR"
PCall$pops[PCall$pops %in% c( "VORU0", "VORU1" , "VORU2" , "VORU3")]<-"VORU"
PCall$pops[PCall$pops %in% c("CHS")]<-"CSH"
PCall$pops[PCall$pops %in% c("SY70")]<-"SY7x"
pop_loc_capsella$pops<-pop_loc_capsella$X2
named_pops <- left_join(PCall, pop_loc_capsella, by = "pops") %>% filter(!is.na(X1))
#assign countries to regions for visualization
countrykey <- data.frame(country = unique(named_pops$geo_loc_name),
region = c("Europe", "Europe","Europe","Africa","Russia",rep("Europe", 2), "ME", "ME", rep("Europe", 3),"ME", "ME",
"USA", "China", "Taiwan"))
named_pops$region <- countrykey$region[match(named_pops$geo_loc_name, countrykey$country)]
#PCoA visualization
ggplot(named_pops)+
geom_jitter(aes(x=PC1, y=PC2, col=region), size=2, width = 0.005)+
geom_hline(aes(yintercept=-0.001))+
geom_vline(aes(xintercept=0))+
theme_classic()
#assign clusters from PC1&2
named_pops$group <- "Europe1"
named_pops$group[named_pops$PC1<0&abs(named_pops$PC2)<0.0025]<-"Asia1"
named_pops$group[named_pops$PC1<0&abs(named_pops$PC2)>0.007]<-"Asia2"
named_pops$group[named_pops$PC1>0&named_pops$PC2>(-0.001)]<-"Europe2"
named_pops$group[named_pops$PC1>0&named_pops$PC2<(-0.001)]<-"ME"
#download worldmap
worlddata <- map_data("world")
#plot worldmap with assigned pops
ggplot(worlddata)+
geom_polygon(aes(x=long, y=lat, group=group), fill="grey90", col="black")+
geom_jitter(data=named_pops, aes(x=X5, y=X4, col=group), size=4)+
scale_color_manual(values=c("chartreuse3","forestgreen", "red", "firebrick", "royalblue4"))+
coord_cartesian(xlim=c(-15,165), ylim=c(20,70))+
theme_classic()
ggsave("capsella_map_main.png", height = 3, width=5.5)
#plot USA portion
ggplot(worlddata)+
geom_polygon(aes(x=long, y=lat, group=group), fill="grey90", col="black")+
geom_jitter(data=named_pops, aes(x=X5, y=X4, col=group), size=8)+
scale_color_manual(values=c("chartreuse3","forestgreen", "red", "firebrick", "royalblue4"), guide="none")+
coord_cartesian(xlim=c(-125,-70), ylim=c(25,50))+
theme_classic()
ggsave("capsella_map_US.png", height = 3, width=5.5)
|
/mash_clean_viz.R
|
no_license
|
avanwallendael/mash_sim
|
R
| false | false | 4,898 |
r
|
###General R script for processing and visualizing mash outputs
##10/25/20
#load libraries
library(readr)
library(tidyverse)
library(vegan)
library(adegenet)
library("maps")
setwd("~/Downloads")
#read in mash output
capsellatrim <- read_delim("tbl1_capsellatrim_tailcrop.tab",
"\t", escape_double = FALSE, col_names = FALSE,
trim_ws = TRUE)
#function to clean dists
clean_dist<-function(mashdist){
#rename cols
colnames(mashdist)<-c("f1", "f2","dist","num", "frac")
#rm extra cols
mashdist$num<-NULL
mashdist$frac<-NULL
#spread to matrix
in_wide<-spread(mashdist, key=f2, value = dist)
#set rownames for mat
row.names(imp_wide)<-in_wide$f1
#rm col
in_wide$f1<-NULL
#make into numeric matrix
in_mat<-data.matrix(in_wide)
#make dist object
in_dist<-as.dist(in_mat)
in_dist
}
dists<-clean_dist(capsellatrim)
#read in metadata from SRA
phenolist <- read_delim("~/Downloads/SraRunTable (29).txt", ",", escape_double = FALSE, trim_ws = TRUE)
#perform principal coordinate analysis
cailliez<-cailliez(dists)
pcoa<-dudi.pco(cailliez, scannf = T, full=T)
#pull pcs
PCs<-data.frame(PC1=pcoa$tab[,1],
PC2=pcoa$tab[,2],
PC3=pcoa$tab[,3],
PC4=pcoa$tab[,4],
PC5=pcoa$tab[,5],
PC6=pcoa$tab[,6],
file=substr(attr(mds$points, 'dimnames')[[1]], 1, 10), stringsAsFactors = F)
colnames(PCs)[7]<-"Run"
#add in metadata
PCall<-inner_join(PCs, phenolist, by="Run")
#save(PCall, file = "PCall_mash_oct25.rda")
###Visualization####
#clean pop names
pops <- strsplit(PCall$Isolation_Source, "_")
pops2 <- NA
for (i in 1:length(pops)){
pops2[i] <- pops[[i]][1]
}
PCall$pops<-pops2
#read in metadata from paper supplement
pop_loc_capsella <- read_delim("~/Downloads/pop_loc_capsella.txt",
"\t", escape_double = FALSE, col_names = FALSE,
trim_ws = TRUE)
unique(pop_loc_capsella$X2)
unique(PCall$pops)
#replace erroneous pop names
PCall$pops[PCall$pops %in% c("6")]<-"FR6"
PCall$pops[PCall$pops %in% c("53")]<-"SP53"
PCall$pops[PCall$pops %in% c("39")]<-"IT39"
PCall$pops[PCall$pops %in% c("22")]<-"IT22"
PCall$pops[PCall$pops %in% c("IRRU2","IRRU3")]<-"IRRU"
PCall$pops[PCall$pops %in% c("JO56","JO59")]<-"JO"
PCall$pops[PCall$pops %in% c("OBL")]<-"OBL-RU5"
PCall$pops[PCall$pops %in% c("SABO10", "SABO4", "SABO5", "SABO6", "SABO7", "SABO9")]<-"SABO"
PCall$pops[PCall$pops %in% c("SE42","SE43")]<-"SE4x"
PCall$pops[PCall$pops %in% c("SY61", "SY64", "SY67" , "SY68" , "SY69")]<-"SY6x"
PCall$pops[PCall$pops %in% c("SY70")]<-"SY7x"
PCall$pops[PCall$pops %in% c( "TR71" , "TR73" , "TR75" , "TR79" , "TR83" )]<-"TR"
PCall$pops[PCall$pops %in% c( "VORU0", "VORU1" , "VORU2" , "VORU3")]<-"VORU"
PCall$pops[PCall$pops %in% c("CHS")]<-"CSH"
PCall$pops[PCall$pops %in% c("SY70")]<-"SY7x"
pop_loc_capsella$pops<-pop_loc_capsella$X2
named_pops <- left_join(PCall, pop_loc_capsella, by = "pops") %>% filter(!is.na(X1))
#assign countries to regions for visualization
countrykey <- data.frame(country = unique(named_pops$geo_loc_name),
region = c("Europe", "Europe","Europe","Africa","Russia",rep("Europe", 2), "ME", "ME", rep("Europe", 3),"ME", "ME",
"USA", "China", "Taiwan"))
named_pops$region <- countrykey$region[match(named_pops$geo_loc_name, countrykey$country)]
#PCoA visualization
ggplot(named_pops)+
geom_jitter(aes(x=PC1, y=PC2, col=region), size=2, width = 0.005)+
geom_hline(aes(yintercept=-0.001))+
geom_vline(aes(xintercept=0))+
theme_classic()
#assign clusters from PC1&2
named_pops$group <- "Europe1"
named_pops$group[named_pops$PC1<0&abs(named_pops$PC2)<0.0025]<-"Asia1"
named_pops$group[named_pops$PC1<0&abs(named_pops$PC2)>0.007]<-"Asia2"
named_pops$group[named_pops$PC1>0&named_pops$PC2>(-0.001)]<-"Europe2"
named_pops$group[named_pops$PC1>0&named_pops$PC2<(-0.001)]<-"ME"
#download worldmap
worlddata <- map_data("world")
#plot worldmap with assigned pops
ggplot(worlddata)+
geom_polygon(aes(x=long, y=lat, group=group), fill="grey90", col="black")+
geom_jitter(data=named_pops, aes(x=X5, y=X4, col=group), size=4)+
scale_color_manual(values=c("chartreuse3","forestgreen", "red", "firebrick", "royalblue4"))+
coord_cartesian(xlim=c(-15,165), ylim=c(20,70))+
theme_classic()
ggsave("capsella_map_main.png", height = 3, width=5.5)
#plot USA portion
ggplot(worlddata)+
geom_polygon(aes(x=long, y=lat, group=group), fill="grey90", col="black")+
geom_jitter(data=named_pops, aes(x=X5, y=X4, col=group), size=8)+
scale_color_manual(values=c("chartreuse3","forestgreen", "red", "firebrick", "royalblue4"), guide="none")+
coord_cartesian(xlim=c(-125,-70), ylim=c(25,50))+
theme_classic()
ggsave("capsella_map_US.png", height = 3, width=5.5)
|
# extract BLS data and keep the following time series
# (all series are number in thousands, 16 years and over)
# Not Seasonally Adjusted
# LNU00000000 (Unadj) Population Level
# LNU01000000 (Unadj) Civilian Labor Force Level
# LNU02000000 (Unadj) Employment Level
# LNU03000000 (Unadj) Unemployment Level
# LNU05000000 (Unadj) Not in Labor Force = LNU00000000-LNU01000000
# LNU01300000 (Unadj) Labor Force Participation Rate = LNU01000000/LNU00000000*100
# LNU02300000 (Unadj) Employment-Population Ratio = LNU02000000/LNU00000000*100
# LNU04000000 (Unadj) Unemployment Rate = LNU03000000/LNU01000000*100
# LNU07000000 (Unadj) Labor Force Flows Employed to Employed
# LNU07100000 (Unadj) Labor Force Flows Unemployed to Employed
# LNU07200000 (Unadj) Labor Force Flows Not in Labor Force to Employed
# LNU07400000 (Unadj) Labor Force Flows Employed to Unemployed
# LNU07500000 (Unadj) Labor Force Flows Unemployed to Unemployed
# LNU07600000 (Unadj) Labor Force Flows Not in Labor Force to Unemployed
# LNU07800000 (Unadj) Labor Force Flows Employed to Not in Labor Force
# LNU07900000 (Unadj) Labor Force Flows Unemployed to Not in Labor Force
# LNU08000000 (Unadj) Labor Force Flows Not in Labor Force to Not in Labor Force
# Seasonally Adjusted
# LNS10000000 (Seas) Population Level
# LNS11000000 (Seas) Civilian Labor Force Level
# LNS12000000 (Seas) Employment Level
# LNS13000000 (Seas) Unemployment Level
# LNS15000000 (Seas) Not in Labor Force
# LNS11300000 (Seas) Labor Force Participation Rate = LNU01000000/LNU00000000*100
# LNS12300000 (Seas) Employment-Population Ratio = LNU02000000/LNU00000000*100
# LNS14000000 (Seas) Unemployment Rate = LNU03000000/LNU01000000*100
# LNS17000000 (Seas) Labor Force Flows Employed to Employed
# LNS17100000 (Seas) Labor Force Flows Unemployed to Employed
# LNS17200000 (Seas) Labor Force Flows Not in Labor Force to Employed
# LNS17400000 (Seas) Labor Force Flows Employed to Unemployed
# LNS17500000 (Seas) Labor Force Flows Unemployed to Unemployed
# LNS17600000 (Seas) Labor Force Flows Not in Labor Force to Unemployed
# LNS17800000 (Seas) Labor Force Flows Employed to Not in Labor Force
# LNS17900000 (Seas) Labor Force Flows Unemployed to Not in Labor Force
# LNS18000000 (Seas) Labor Force Flows Not in Labor Force to Not in Labor Force
datafile <- "ln.data.1.AllData"
choice <- menu(choices = c("Use existing BLS dataset", "Download new dataset from BLS server"),
title = "Download data from BLS?")
if (choice == 2) {
download.file(url = str_c("https://download.bls.gov/pub/time.series/ln/", datafile),
dest = str_c(ddir_bls, datafile))
}
message("Extracting BLS data")
df_blsdata_raw <- read_table2(file = str_c(ddir_bls, datafile), col_types = c("cicdi"))
# blsdata_raw <-
# datafile %T>%
# {download.file(url = str_c("https://download.bls.gov/pub/time.series/ln/", .),
# dest = str_c(ddir_bls, .))} %>%
# {read_table2(file = str_c(ddir_bls, .), col_types = c("cicdi"))}
rm(datafile, choice)
df_blsdata <-
df_blsdata_raw %>%
filter(!is.na(series_id)) %>%
select(-footnote_codes) %>%
filter(series_id %in% c(# NSA stocks: POP, LF, E, U, I
"LNU00000000", "LNU01000000", "LNU02000000", "LNU03000000", "LNU05000000",
# NSA rates: LFPR, EPR, UR
"LNU01300000", "LNU02300000", "LNU04000000",
# NSA flows: EE, UE, IE
"LNU07000000", "LNU07100000", "LNU07200000",
# NSA flows: EU, UU, IU
"LNU07400000", "LNU07500000", "LNU07600000",
# NSA flows: EI, UI, II
"LNU07800000", "LNU07900000", "LNU08000000",
# SA stocks: POP, LF, E, U, I
"LNS10000000", "LNS11000000", "LNS12000000", "LNS13000000", "LNS15000000",
# SA rates: LFPR, EPR, UR
"LNS11300000", "LNS12300000", "LNS14000000",
# SA flows: EE, UE, IE
"LNS17000000", "LNS17100000", "LNS17200000",
# SA flows: EU, UU, IU
"LNS17400000", "LNS17500000", "LNS17600000",
# SA flows: EI, UI, II
"LNS17800000", "LNS17900000", "LNS18000000"
)) %>%
# keep only monthly data, drop quarterly and annual data
filter(!(str_detect(period, pattern = "Q") | str_detect(period, "M13"))) %>%
mutate(period = str_c(year, str_sub(period, 2, 3)) %>% as.numeric()) %>%
filter(period >= 197501) %>%
select(-year)
rm(df_blsdata_raw)
# stocks
df_stocks_bls <-
df_blsdata %>%
filter(series_id %in% c("LNU02000000", "LNU03000000", "LNU05000000",
"LNS12000000", "LNS13000000", "LNS15000000")) %>%
# spread(series_id, value) %>%
# filter(complete.cases(.)) %>%
# gather(series_id, value, -period) %>%
rename(s = value) %>%
mutate(seas = case_when(str_sub(series_id, 3, 3) == "S" ~ "SA",
str_sub(series_id, 3, 3) == "U" ~ "NSA"),
lfs = case_when(series_id %in% c("LNU02000000", "LNS12000000") ~ "E",
series_id %in% c("LNU03000000", "LNS13000000") ~ "U",
series_id %in% c("LNU05000000", "LNS15000000") ~ "I",
TRUE ~ NA_character_)) %>%
select(series_id, period, lfs, seas, s)
# flows
df_flows_bls <-
df_blsdata %>%
filter(series_id %in% c("LNU07000000", "LNU07100000", "LNU07200000",
"LNU07400000", "LNU07500000", "LNU07600000",
"LNU07800000", "LNU07900000", "LNU08000000",
"LNS17000000", "LNS17100000", "LNS17200000",
"LNS17400000", "LNS17500000", "LNS17600000",
"LNS17800000", "LNS17900000", "LNS18000000")) %>%
rename(f = value,
period_2 = period) %>%
mutate(seas = case_when(str_sub(series_id, 3, 3) == "S" ~ "SA",
str_sub(series_id, 3, 3) == "U" ~ "NSA"),
lfs_1 = case_when(series_id %in% c("LNU07000000", "LNU07400000", "LNU07800000", "LNS17000000", "LNS17400000", "LNS17800000") ~ "E",
series_id %in% c("LNU07100000", "LNU07500000", "LNU07900000", "LNS17100000", "LNS17500000", "LNS17900000") ~ "U",
series_id %in% c("LNU07200000", "LNU07600000", "LNU08000000", "LNS17200000", "LNS17600000", "LNS18000000") ~ "I",
TRUE ~ NA_character_),
lfs_2 = case_when(series_id %in% c("LNU07000000", "LNU07100000", "LNU07200000", "LNS17000000", "LNS17100000", "LNS17200000") ~ "E",
series_id %in% c("LNU07400000", "LNU07500000", "LNU07600000", "LNS17400000", "LNS17500000", "LNS17600000") ~ "U",
series_id %in% c("LNU07800000", "LNU07900000", "LNU08000000", "LNS17800000", "LNS17900000", "LNS18000000") ~ "I",
TRUE ~ NA_character_)) %>%
select(series_id, period_2, lfs_1, lfs_2, seas, f)
# construct stock by adding up flows grouped by first LF status (lfs_1 is status in previous month)
df_flows_bls_sum_1 <-
df_flows_bls %>%
group_by(period_2, lfs_1, seas) %>%
summarise(s = sum(f)) %>%
group_by(lfs_1, seas) %>%
mutate(period = lag(period_2, default = 199001)) %>%
ungroup() %>%
rename(lfs = lfs_1) %>%
select(period, lfs, seas, s)
# construct stock by adding flows grouped by second LF status (lfs_2 is status in current month)
df_flows_bls_sum_2 <-
df_flows_bls %>%
group_by(period_2, lfs_2, seas) %>%
summarise(s = sum(f)) %>%
rename(period = period_2) %>%
ungroup() %>%
rename(lfs = lfs_2) %>%
select(period, lfs, seas, s)
# compare the difference between stocks constructed by summing BLS flows with BLS stocks
df_blsdata %>%
filter(series_id %in% c("LNU02000000", "LNU03000000", "LNU05000000",
"LNS12000000", "LNS13000000", "LNS15000000")) %>%
mutate(series_id = recode(series_id, "LNU02000000" = "E_NSA",
"LNU03000000" = "U_NSA",
"LNU05000000" = "I_NSA",
"LNS12000000" = "E_SA",
"LNS13000000" = "U_SA",
"LNS15000000" = "I_SA")) %>%
separate(series_id, into = c("lfs", "seas")) %>%
rename(s = value) %>%
bind_rows(stocks = .,
sumflows1 = df_flows_bls_sum_1,
sumflows2 = df_flows_bls_sum_2,
.id = "source") %>%
spread(source, s) %>%
mutate(err_sum_of_flows1 = (sumflows1 - stocks) / stocks,
err_sum_of_flows2 = (sumflows2 - stocks) / stocks) %>%
unite(measure, lfs, seas, sep = ".") %>%
gather(source, value, -c(period, measure)) %>%
mutate(yearm = period %>% as.character() %>% as.yearmon(format = "%Y%m")) %>%
filter(str_sub(source, 1, 3) == "err") %>%
filter(!is.na(value)) %>%
separate(measure, into = c("lfs", "seas")) %>%
ggplot(aes(x = yearm, y = value, col = source)) +
geom_line() +
geom_hline(yintercept = 0, linetype = "dotted") +
scale_x_yearmon() +
scale_y_continuous(labels = scales::percent) +
scale_color_discrete(labels = c("sum in month 1", "sum in month 2")) +
labs(x = "", y = "", title = "Difference between stocks constructed by summing BLS flows and actual BLS stocks",
color = "") +
facet_grid(lfs ~ seas, scales = "free_y")
# construct population shares by LFS
df_stocksandshares_bls <-
df_stocks_bls %>%
group_by(period, seas) %>%
mutate(shr_lfs2pop = s / sum(s)) %>%
ungroup()
# plot population shares by LFS
df_stocksandshares_bls %>%
mutate(yearm = period %>% as.character() %>% as.yearmon(format = "%Y%m")) %>%
ggplot(aes(x = yearm, y = shr_lfs2pop)) +
geom_line() +
scale_x_yearmon() +
facet_grid(lfs ~ seas, scales = "free")
# construct transition rates
df_flowsandrates_bls <-
df_flows_bls %>%
group_by(series_id) %>%
mutate(period_1 = lag(period_2, default = 199001)) %>%
group_by(period_2, lfs_1, seas) %>%
mutate(rate = f / sum(f)) %>%
ungroup() %>%
select(series_id, period_1, period_2, lfs_1, lfs_2, seas, f, rate)
save(df_blsdata, df_stocksandshares_bls, df_flowsandrates_bls, file = str_c(odir_bls, "BLS_lf.Rdata"))
# load(file = str_c(odir_bls, "BLS_lf.Rdata"))
rm(df_flows_bls_sum_1, df_flows_bls_sum_2,
df_blsdata, df_stocks_bls, df_flows_bls, df_flowsandrates_bls, df_stocksandshares_bls)
|
/code/c01_extract_bls_data.R
|
no_license
|
jduras/cps-flows
|
R
| false | false | 11,477 |
r
|
# extract BLS data and keep the following time series
# (all series are number in thousands, 16 years and over)
# Not Seasonally Adjusted
# LNU00000000 (Unadj) Population Level
# LNU01000000 (Unadj) Civilian Labor Force Level
# LNU02000000 (Unadj) Employment Level
# LNU03000000 (Unadj) Unemployment Level
# LNU05000000 (Unadj) Not in Labor Force = LNU00000000-LNU01000000
# LNU01300000 (Unadj) Labor Force Participation Rate = LNU01000000/LNU00000000*100
# LNU02300000 (Unadj) Employment-Population Ratio = LNU02000000/LNU00000000*100
# LNU04000000 (Unadj) Unemployment Rate = LNU03000000/LNU01000000*100
# LNU07000000 (Unadj) Labor Force Flows Employed to Employed
# LNU07100000 (Unadj) Labor Force Flows Unemployed to Employed
# LNU07200000 (Unadj) Labor Force Flows Not in Labor Force to Employed
# LNU07400000 (Unadj) Labor Force Flows Employed to Unemployed
# LNU07500000 (Unadj) Labor Force Flows Unemployed to Unemployed
# LNU07600000 (Unadj) Labor Force Flows Not in Labor Force to Unemployed
# LNU07800000 (Unadj) Labor Force Flows Employed to Not in Labor Force
# LNU07900000 (Unadj) Labor Force Flows Unemployed to Not in Labor Force
# LNU08000000 (Unadj) Labor Force Flows Not in Labor Force to Not in Labor Force
# Seasonally Adjusted
# LNS10000000 (Seas) Population Level
# LNS11000000 (Seas) Civilian Labor Force Level
# LNS12000000 (Seas) Employment Level
# LNS13000000 (Seas) Unemployment Level
# LNS15000000 (Seas) Not in Labor Force
# LNS11300000 (Seas) Labor Force Participation Rate = LNU01000000/LNU00000000*100
# LNS12300000 (Seas) Employment-Population Ratio = LNU02000000/LNU00000000*100
# LNS14000000 (Seas) Unemployment Rate = LNU03000000/LNU01000000*100
# LNS17000000 (Seas) Labor Force Flows Employed to Employed
# LNS17100000 (Seas) Labor Force Flows Unemployed to Employed
# LNS17200000 (Seas) Labor Force Flows Not in Labor Force to Employed
# LNS17400000 (Seas) Labor Force Flows Employed to Unemployed
# LNS17500000 (Seas) Labor Force Flows Unemployed to Unemployed
# LNS17600000 (Seas) Labor Force Flows Not in Labor Force to Unemployed
# LNS17800000 (Seas) Labor Force Flows Employed to Not in Labor Force
# LNS17900000 (Seas) Labor Force Flows Unemployed to Not in Labor Force
# LNS18000000 (Seas) Labor Force Flows Not in Labor Force to Not in Labor Force
datafile <- "ln.data.1.AllData"
choice <- menu(choices = c("Use existing BLS dataset", "Download new dataset from BLS server"),
title = "Download data from BLS?")
if (choice == 2) {
download.file(url = str_c("https://download.bls.gov/pub/time.series/ln/", datafile),
dest = str_c(ddir_bls, datafile))
}
message("Extracting BLS data")
df_blsdata_raw <- read_table2(file = str_c(ddir_bls, datafile), col_types = c("cicdi"))
# blsdata_raw <-
# datafile %T>%
# {download.file(url = str_c("https://download.bls.gov/pub/time.series/ln/", .),
# dest = str_c(ddir_bls, .))} %>%
# {read_table2(file = str_c(ddir_bls, .), col_types = c("cicdi"))}
rm(datafile, choice)
df_blsdata <-
df_blsdata_raw %>%
filter(!is.na(series_id)) %>%
select(-footnote_codes) %>%
filter(series_id %in% c(# NSA stocks: POP, LF, E, U, I
"LNU00000000", "LNU01000000", "LNU02000000", "LNU03000000", "LNU05000000",
# NSA rates: LFPR, EPR, UR
"LNU01300000", "LNU02300000", "LNU04000000",
# NSA flows: EE, UE, IE
"LNU07000000", "LNU07100000", "LNU07200000",
# NSA flows: EU, UU, IU
"LNU07400000", "LNU07500000", "LNU07600000",
# NSA flows: EI, UI, II
"LNU07800000", "LNU07900000", "LNU08000000",
# SA stocks: POP, LF, E, U, I
"LNS10000000", "LNS11000000", "LNS12000000", "LNS13000000", "LNS15000000",
# SA rates: LFPR, EPR, UR
"LNS11300000", "LNS12300000", "LNS14000000",
# SA flows: EE, UE, IE
"LNS17000000", "LNS17100000", "LNS17200000",
# SA flows: EU, UU, IU
"LNS17400000", "LNS17500000", "LNS17600000",
# SA flows: EI, UI, II
"LNS17800000", "LNS17900000", "LNS18000000"
)) %>%
# keep only monthly data, drop quarterly and annual data
filter(!(str_detect(period, pattern = "Q") | str_detect(period, "M13"))) %>%
mutate(period = str_c(year, str_sub(period, 2, 3)) %>% as.numeric()) %>%
filter(period >= 197501) %>%
select(-year)
rm(df_blsdata_raw)
# stocks
df_stocks_bls <-
df_blsdata %>%
filter(series_id %in% c("LNU02000000", "LNU03000000", "LNU05000000",
"LNS12000000", "LNS13000000", "LNS15000000")) %>%
# spread(series_id, value) %>%
# filter(complete.cases(.)) %>%
# gather(series_id, value, -period) %>%
rename(s = value) %>%
mutate(seas = case_when(str_sub(series_id, 3, 3) == "S" ~ "SA",
str_sub(series_id, 3, 3) == "U" ~ "NSA"),
lfs = case_when(series_id %in% c("LNU02000000", "LNS12000000") ~ "E",
series_id %in% c("LNU03000000", "LNS13000000") ~ "U",
series_id %in% c("LNU05000000", "LNS15000000") ~ "I",
TRUE ~ NA_character_)) %>%
select(series_id, period, lfs, seas, s)
# flows
df_flows_bls <-
df_blsdata %>%
filter(series_id %in% c("LNU07000000", "LNU07100000", "LNU07200000",
"LNU07400000", "LNU07500000", "LNU07600000",
"LNU07800000", "LNU07900000", "LNU08000000",
"LNS17000000", "LNS17100000", "LNS17200000",
"LNS17400000", "LNS17500000", "LNS17600000",
"LNS17800000", "LNS17900000", "LNS18000000")) %>%
rename(f = value,
period_2 = period) %>%
mutate(seas = case_when(str_sub(series_id, 3, 3) == "S" ~ "SA",
str_sub(series_id, 3, 3) == "U" ~ "NSA"),
lfs_1 = case_when(series_id %in% c("LNU07000000", "LNU07400000", "LNU07800000", "LNS17000000", "LNS17400000", "LNS17800000") ~ "E",
series_id %in% c("LNU07100000", "LNU07500000", "LNU07900000", "LNS17100000", "LNS17500000", "LNS17900000") ~ "U",
series_id %in% c("LNU07200000", "LNU07600000", "LNU08000000", "LNS17200000", "LNS17600000", "LNS18000000") ~ "I",
TRUE ~ NA_character_),
lfs_2 = case_when(series_id %in% c("LNU07000000", "LNU07100000", "LNU07200000", "LNS17000000", "LNS17100000", "LNS17200000") ~ "E",
series_id %in% c("LNU07400000", "LNU07500000", "LNU07600000", "LNS17400000", "LNS17500000", "LNS17600000") ~ "U",
series_id %in% c("LNU07800000", "LNU07900000", "LNU08000000", "LNS17800000", "LNS17900000", "LNS18000000") ~ "I",
TRUE ~ NA_character_)) %>%
select(series_id, period_2, lfs_1, lfs_2, seas, f)
# construct stock by adding up flows grouped by first LF status (lfs_1 is status in previous month)
df_flows_bls_sum_1 <-
df_flows_bls %>%
group_by(period_2, lfs_1, seas) %>%
summarise(s = sum(f)) %>%
group_by(lfs_1, seas) %>%
mutate(period = lag(period_2, default = 199001)) %>%
ungroup() %>%
rename(lfs = lfs_1) %>%
select(period, lfs, seas, s)
# construct stock by adding flows grouped by second LF status (lfs_2 is status in current month)
df_flows_bls_sum_2 <-
df_flows_bls %>%
group_by(period_2, lfs_2, seas) %>%
summarise(s = sum(f)) %>%
rename(period = period_2) %>%
ungroup() %>%
rename(lfs = lfs_2) %>%
select(period, lfs, seas, s)
# compare the difference between stocks constructed by summing BLS flows with BLS stocks
df_blsdata %>%
filter(series_id %in% c("LNU02000000", "LNU03000000", "LNU05000000",
"LNS12000000", "LNS13000000", "LNS15000000")) %>%
mutate(series_id = recode(series_id, "LNU02000000" = "E_NSA",
"LNU03000000" = "U_NSA",
"LNU05000000" = "I_NSA",
"LNS12000000" = "E_SA",
"LNS13000000" = "U_SA",
"LNS15000000" = "I_SA")) %>%
separate(series_id, into = c("lfs", "seas")) %>%
rename(s = value) %>%
bind_rows(stocks = .,
sumflows1 = df_flows_bls_sum_1,
sumflows2 = df_flows_bls_sum_2,
.id = "source") %>%
spread(source, s) %>%
mutate(err_sum_of_flows1 = (sumflows1 - stocks) / stocks,
err_sum_of_flows2 = (sumflows2 - stocks) / stocks) %>%
unite(measure, lfs, seas, sep = ".") %>%
gather(source, value, -c(period, measure)) %>%
mutate(yearm = period %>% as.character() %>% as.yearmon(format = "%Y%m")) %>%
filter(str_sub(source, 1, 3) == "err") %>%
filter(!is.na(value)) %>%
separate(measure, into = c("lfs", "seas")) %>%
ggplot(aes(x = yearm, y = value, col = source)) +
geom_line() +
geom_hline(yintercept = 0, linetype = "dotted") +
scale_x_yearmon() +
scale_y_continuous(labels = scales::percent) +
scale_color_discrete(labels = c("sum in month 1", "sum in month 2")) +
labs(x = "", y = "", title = "Difference between stocks constructed by summing BLS flows and actual BLS stocks",
color = "") +
facet_grid(lfs ~ seas, scales = "free_y")
# construct population shares by LFS
df_stocksandshares_bls <-
df_stocks_bls %>%
group_by(period, seas) %>%
mutate(shr_lfs2pop = s / sum(s)) %>%
ungroup()
# plot population shares by LFS
df_stocksandshares_bls %>%
mutate(yearm = period %>% as.character() %>% as.yearmon(format = "%Y%m")) %>%
ggplot(aes(x = yearm, y = shr_lfs2pop)) +
geom_line() +
scale_x_yearmon() +
facet_grid(lfs ~ seas, scales = "free")
# construct transition rates
df_flowsandrates_bls <-
df_flows_bls %>%
group_by(series_id) %>%
mutate(period_1 = lag(period_2, default = 199001)) %>%
group_by(period_2, lfs_1, seas) %>%
mutate(rate = f / sum(f)) %>%
ungroup() %>%
select(series_id, period_1, period_2, lfs_1, lfs_2, seas, f, rate)
save(df_blsdata, df_stocksandshares_bls, df_flowsandrates_bls, file = str_c(odir_bls, "BLS_lf.Rdata"))
# load(file = str_c(odir_bls, "BLS_lf.Rdata"))
rm(df_flows_bls_sum_1, df_flows_bls_sum_2,
df_blsdata, df_stocks_bls, df_flows_bls, df_flowsandrates_bls, df_stocksandshares_bls)
|
####
# sample workflow for data analysis from manuscript with the BRMS library
# this code fits the baseline, ambient(no treatment effect), and all the treatment effect models to a sample dataset
####
# libraries not loaded with other script
require('tidyverse')
# load the sample data frame
load('example.Rdata')
# read in the model-fitting function
source('brmsModelFunctions.R')
######## fit using poisson distribution #####################
# data for poisson models is called CA_low_stan
# fit null or baseline model
null.model <- model.fit(CA_low_stan, "Null")
# Save model outputs if needed
# saveRDS(null.model, "nullmodel_CA_Low.rds")
# fit ambient or no treatment effect model
ambient.model <- model.fit(CA_low_stan, "Ambient")
# fit single treatment effects model
warming.model <- model.fit(CA_low_stan, "Warm")
removal.model <- model.fit(CA_low_stan, "Removal")
# fit both treatments
removalpluswarming.model <- model.fit(CA_low_stan, "Removal_plus_warming")
# fit full model with both treatments and interaction
removaltimeswarming.model <- model.fit(CA_low_stan, "Removal_times_warming")
# model comparison with WAIC
null.model <- add_criterion(null.model, "waic")
ambient.model <- add_criterion(ambient.model, "waic")
removal.model <- add_criterion(removal.model, "waic")
warming.model <- add_criterion(warming.model, "waic")
removalpluswarming.model <- add_criterion(removalpluswarming.model, "waic")
removaltimeswarming.model <- add_criterion(removaltimeswarming.model, "waic")
CA_low_waic <- loo_compare(null.model, ambient.model, removal.model, warming.model,
removalpluswarming.model, removaltimeswarming.model, criterion = "waic")
# model comparison with WAIC weights
model_weights(null.model, ambient.model, removal.model, warming.model,
removalpluswarming.model, removaltimeswarming.model,
weights = "waic") %>%
as_tibble() %>%
rename(weight = value) %>%
mutate(model = c("Null", "Ambient", "Removal", "Warm", "Removal_plus_warming", "Removal_times_warming"),
weight = weight %>% round(digits = 2)) %>%
select(model, weight) %>%
arrange(desc(weight)) %>%
knitr::kable()
|
/brmsworkflow.R
|
permissive
|
kostask84/MS_VariableResponsesAlpinePlants
|
R
| false | false | 2,197 |
r
|
####
# sample workflow for data analysis from manuscript with the BRMS library
# this code fits the baseline, ambient(no treatment effect), and all the treatment effect models to a sample dataset
####
# libraries not loaded with other script
require('tidyverse')
# load the sample data frame
load('example.Rdata')
# read in the model-fitting function
source('brmsModelFunctions.R')
######## fit using poisson distribution #####################
# data for poisson models is called CA_low_stan
# fit null or baseline model
null.model <- model.fit(CA_low_stan, "Null")
# Save model outputs if needed
# saveRDS(null.model, "nullmodel_CA_Low.rds")
# fit ambient or no treatment effect model
ambient.model <- model.fit(CA_low_stan, "Ambient")
# fit single treatment effects model
warming.model <- model.fit(CA_low_stan, "Warm")
removal.model <- model.fit(CA_low_stan, "Removal")
# fit both treatments
removalpluswarming.model <- model.fit(CA_low_stan, "Removal_plus_warming")
# fit full model with both treatments and interaction
removaltimeswarming.model <- model.fit(CA_low_stan, "Removal_times_warming")
# model comparison with WAIC
null.model <- add_criterion(null.model, "waic")
ambient.model <- add_criterion(ambient.model, "waic")
removal.model <- add_criterion(removal.model, "waic")
warming.model <- add_criterion(warming.model, "waic")
removalpluswarming.model <- add_criterion(removalpluswarming.model, "waic")
removaltimeswarming.model <- add_criterion(removaltimeswarming.model, "waic")
CA_low_waic <- loo_compare(null.model, ambient.model, removal.model, warming.model,
removalpluswarming.model, removaltimeswarming.model, criterion = "waic")
# model comparison with WAIC weights
model_weights(null.model, ambient.model, removal.model, warming.model,
removalpluswarming.model, removaltimeswarming.model,
weights = "waic") %>%
as_tibble() %>%
rename(weight = value) %>%
mutate(model = c("Null", "Ambient", "Removal", "Warm", "Removal_plus_warming", "Removal_times_warming"),
weight = weight %>% round(digits = 2)) %>%
select(model, weight) %>%
arrange(desc(weight)) %>%
knitr::kable()
|
library(class)
## Descriptive Analysis
##forestfires <- read.csv("kl.csv",header = TRUE, fill = TRUE)
forestfires<-read.csv("new_diag.txt",sep = ",")
#print(forestfires$Age)
#forestfires<-forestfires[which(forestfires$area != 0),]
n<-nrow(forestfires);
test <- sample(1:n, round(n)/10)
forestfires.train <- forestfires[-test, ]
forestfires.test <- forestfires[test, ]
print(n)
for (i in 1:n)
if (forestfires[i,10]=="N")
forestfires[i,10]=1.0
if(forestfires[i,10]=="O")
forestfires[i,10]=0
print(forestfires)
x=table(forestfires$season,forestfires$output)
print(chisq.test(x));
print(chisq.test(forestfires$age,forestfires$output));
print(chisq.test(forestfires$diseases,forestfires$output));
print(chisq.test(forestfires$accident,forestfires$output));
print(chisq.test(forestfires$surgical,forestfires$output));
print(chisq.test(forestfires$fever,forestfires$output));
print(chisq.test(forestfires$freq,forestfires$output));
print(chisq.test(forestfires$smoke,forestfires$output));
print(chisq.test(forestfires$hours,forestfires$output));
##regression lines of various attributes with output
plot(forestfires$season~forestfires$output)
abline(lm(forestfires$season~forestfires$output))
plot(forestfires$age~forestfires$output)
abline(lm(forestfires$age~forestfires$output))
plot(forestfires$diseases~forestfires$output)
abline(lm(forestfires$diseases~forestfires$output))
plot(forestfires$accident~forestfires$output)
abline(lm(forestfires$accident~forestfires$output))
plot(forestfires$surgical~forestfires$output)
abline(lm(forestfires$surgical~forestfires$output))
plot(forestfires$fever~forestfires$output)
abline(lm(forestfires$fever~forestfires$output))
plot(forestfires$freq~forestfires$output)
abline(lm(forestfires$freq~forestfires$output))
plot(forestfires$smoke~forestfires$output)
abline(lm(forestfires$smoke~forestfires$output))
plot(forestfires$hours~forestfires$output)
abline(lm(forestfires$hours~forestfires$output))
##correlation of output with various attributes
cor(forestfires$season,forestfires$output)
cor(forestfires$age,forestfires$output)
cor(forestfires$diseases,forestfires$output)
cor(forestfires$accident,forestfires$output)
cor(forestfires$surgical,forestfires$output)
cor(forestfires$fever,forestfires$output)
cor(forestfires$freq,forestfires$output)
cor(forestfires$smoke,forestfires$output)
cor(forestfires$hours,forestfires$output)
### root mean square error
rmserror <-function(error)
{
sqrt(mean(error^2))
}
linear1=lm(forestfires$season~forestfires$output)
x<-rmserror(linear1$residuals)
print(x)
linear1=lm(forestfires$age~forestfires$output)
x<-rmserror(linear1$residuals)
print(x)
linear1=lm(forestfires$diseases~forestfires$output)
x<-rmserror(linear1$residuals)
print(x)
linear1=lm(forestfires$accident~forestfires$output)
x<-rmserror(linear1$residuals)
print(x)
linear1=lm(forestfires$surgical~forestfires$output)
x<-rmserror(linear1$residuals)
print(x)
linear1=lm(forestfires$fever~forestfires$output)
x<-rmserror(linear1$residuals)
print(x)
linear1=lm(forestfires$freq~forestfires$output)
x<-rmserror(linear1$residuals)
print(x)
linear1=lm(forestfires$smoke~forestfires$output)
x<-rmserror(linear1$residuals)
print(x)
linear1=lm(forestfires$hours~forestfires$output)
x<-rmserror(linear1$residuals)
print(x)
null <- lm((output + 1) ~ 1, forestfires[,c(-3, -4)])
full <- lm((output + 1)~., forestfires[,c(-3, -4)])
summary(full)
par(mfrow=c(2,2))
plot(full, which=c(1,2,4,5))
print(forestfires)
FFMC2 <- (forestfires.train$)^2
FFMC3 <- (forestfires.train$FFMC)^3
DMC2 <- (forestfires.train$DMC)^2
DMC3 <- (forestfires.train$DMC)^3
DC2 <- (forestfires.train$DC)^2
DC3 <- (forestfires.train$DC)^3
ISI2 <- (forestfires.train$ISI)^2
ISI3 <- (forestfires.train$ISI)^3
temp2 <- (forestfires.train$temp)^2
temp3 <- (forestfires.train$temp)^3
RH2 <- (forestfires.train$RH)^2
RH3 <- (forestfires.train$RH)^3
wind2 <- (forestfires.train$wind)^2
wind3 <- (forestfires.train$wind)^3
rain2 <- (forestfires.train$rain)^2
rain3 <- (forestfires.train$rain)^3
lenearmodel <- lm( y ~ forestfires.train$FFMC + I(FFMC2) + I(FFMC3) +
forestfires.train$DMC + I(DMC2) + I(DMC3) +
forestfires.train$DC + I(DC2) + I(DC3) +
forestfires.train$ISI + (ISI2) + (ISI3) +
forestfires.train$temp + I(temp2) + I(temp3) +
forestfires.train$RH + I(RH2) + I(RH3) +
forestfires.train$wind + I(wind2) + I(wind3) +
forestfires.train$rain + I(rain2) + I(rain3) )
#set.seed(100)
#x<-read.csv("abc.txt")
#print(x)
#dim(x)
#ind<-sample(2,nrow(x),replace=TRUE,prob=c(0.7,0.3))
#train<-x[ind==1,]
#test<-x[ind==2,]
#print(train)
# knn
#library(class)
#train_input<-as.matrix(train[,-7])
#train_output<-as.vector(train[,7])
#test_input<-as.matrix(test[,-7])
#prediction<-knn(train_input[-7],test_input[-7],train_output[7],k=5)
##
##s<-sample(250,125)
#train<-x[s,]
#test<-x[-s,]
#dim(test)
#dim(train)
#print(test)
#print(train)
#cl<-factor(c(rep("a",25), rep("b",25)))
#cl
#knn(train, test, cl, k = 2, prob=TRUE)
|
/projectknn.R
|
no_license
|
rohitsemwal16/Data-Analytics
|
R
| false | false | 5,136 |
r
|
library(class)
## Descriptive Analysis
##forestfires <- read.csv("kl.csv",header = TRUE, fill = TRUE)
forestfires<-read.csv("new_diag.txt",sep = ",")
#print(forestfires$Age)
#forestfires<-forestfires[which(forestfires$area != 0),]
n<-nrow(forestfires);
test <- sample(1:n, round(n)/10)
forestfires.train <- forestfires[-test, ]
forestfires.test <- forestfires[test, ]
print(n)
for (i in 1:n)
if (forestfires[i,10]=="N")
forestfires[i,10]=1.0
if(forestfires[i,10]=="O")
forestfires[i,10]=0
print(forestfires)
x=table(forestfires$season,forestfires$output)
print(chisq.test(x));
print(chisq.test(forestfires$age,forestfires$output));
print(chisq.test(forestfires$diseases,forestfires$output));
print(chisq.test(forestfires$accident,forestfires$output));
print(chisq.test(forestfires$surgical,forestfires$output));
print(chisq.test(forestfires$fever,forestfires$output));
print(chisq.test(forestfires$freq,forestfires$output));
print(chisq.test(forestfires$smoke,forestfires$output));
print(chisq.test(forestfires$hours,forestfires$output));
##regression lines of various attributes with output
plot(forestfires$season~forestfires$output)
abline(lm(forestfires$season~forestfires$output))
plot(forestfires$age~forestfires$output)
abline(lm(forestfires$age~forestfires$output))
plot(forestfires$diseases~forestfires$output)
abline(lm(forestfires$diseases~forestfires$output))
plot(forestfires$accident~forestfires$output)
abline(lm(forestfires$accident~forestfires$output))
plot(forestfires$surgical~forestfires$output)
abline(lm(forestfires$surgical~forestfires$output))
plot(forestfires$fever~forestfires$output)
abline(lm(forestfires$fever~forestfires$output))
plot(forestfires$freq~forestfires$output)
abline(lm(forestfires$freq~forestfires$output))
plot(forestfires$smoke~forestfires$output)
abline(lm(forestfires$smoke~forestfires$output))
plot(forestfires$hours~forestfires$output)
abline(lm(forestfires$hours~forestfires$output))
##correlation of output with various attributes
cor(forestfires$season,forestfires$output)
cor(forestfires$age,forestfires$output)
cor(forestfires$diseases,forestfires$output)
cor(forestfires$accident,forestfires$output)
cor(forestfires$surgical,forestfires$output)
cor(forestfires$fever,forestfires$output)
cor(forestfires$freq,forestfires$output)
cor(forestfires$smoke,forestfires$output)
cor(forestfires$hours,forestfires$output)
### root mean square error
rmserror <-function(error)
{
sqrt(mean(error^2))
}
linear1=lm(forestfires$season~forestfires$output)
x<-rmserror(linear1$residuals)
print(x)
linear1=lm(forestfires$age~forestfires$output)
x<-rmserror(linear1$residuals)
print(x)
linear1=lm(forestfires$diseases~forestfires$output)
x<-rmserror(linear1$residuals)
print(x)
linear1=lm(forestfires$accident~forestfires$output)
x<-rmserror(linear1$residuals)
print(x)
linear1=lm(forestfires$surgical~forestfires$output)
x<-rmserror(linear1$residuals)
print(x)
linear1=lm(forestfires$fever~forestfires$output)
x<-rmserror(linear1$residuals)
print(x)
linear1=lm(forestfires$freq~forestfires$output)
x<-rmserror(linear1$residuals)
print(x)
linear1=lm(forestfires$smoke~forestfires$output)
x<-rmserror(linear1$residuals)
print(x)
linear1=lm(forestfires$hours~forestfires$output)
x<-rmserror(linear1$residuals)
print(x)
null <- lm((output + 1) ~ 1, forestfires[,c(-3, -4)])
full <- lm((output + 1)~., forestfires[,c(-3, -4)])
summary(full)
par(mfrow=c(2,2))
plot(full, which=c(1,2,4,5))
print(forestfires)
FFMC2 <- (forestfires.train$)^2
FFMC3 <- (forestfires.train$FFMC)^3
DMC2 <- (forestfires.train$DMC)^2
DMC3 <- (forestfires.train$DMC)^3
DC2 <- (forestfires.train$DC)^2
DC3 <- (forestfires.train$DC)^3
ISI2 <- (forestfires.train$ISI)^2
ISI3 <- (forestfires.train$ISI)^3
temp2 <- (forestfires.train$temp)^2
temp3 <- (forestfires.train$temp)^3
RH2 <- (forestfires.train$RH)^2
RH3 <- (forestfires.train$RH)^3
wind2 <- (forestfires.train$wind)^2
wind3 <- (forestfires.train$wind)^3
rain2 <- (forestfires.train$rain)^2
rain3 <- (forestfires.train$rain)^3
lenearmodel <- lm( y ~ forestfires.train$FFMC + I(FFMC2) + I(FFMC3) +
forestfires.train$DMC + I(DMC2) + I(DMC3) +
forestfires.train$DC + I(DC2) + I(DC3) +
forestfires.train$ISI + (ISI2) + (ISI3) +
forestfires.train$temp + I(temp2) + I(temp3) +
forestfires.train$RH + I(RH2) + I(RH3) +
forestfires.train$wind + I(wind2) + I(wind3) +
forestfires.train$rain + I(rain2) + I(rain3) )
#set.seed(100)
#x<-read.csv("abc.txt")
#print(x)
#dim(x)
#ind<-sample(2,nrow(x),replace=TRUE,prob=c(0.7,0.3))
#train<-x[ind==1,]
#test<-x[ind==2,]
#print(train)
# knn
#library(class)
#train_input<-as.matrix(train[,-7])
#train_output<-as.vector(train[,7])
#test_input<-as.matrix(test[,-7])
#prediction<-knn(train_input[-7],test_input[-7],train_output[7],k=5)
##
##s<-sample(250,125)
#train<-x[s,]
#test<-x[-s,]
#dim(test)
#dim(train)
#print(test)
#print(train)
#cl<-factor(c(rep("a",25), rep("b",25)))
#cl
#knn(train, test, cl, k = 2, prob=TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/movingaves.R
\name{movingaves}
\alias{movingaves}
\title{Moving Averages}
\usage{
movingaves(x, window, integer = FALSE, max = FALSE)
}
\arguments{
\item{x}{Integer or numeric vector.}
\item{window}{Integer value specifying window length.}
\item{integer}{Logical value for whether \code{x} is an integer vector.}
\item{max}{Logical value for whether to return maximum moving average (as
opposed to vector of moving averages).}
}
\value{
Numeric value or vector depending on \code{max}.
}
\description{
Calculates moving averages or maximum moving average. For optimal speed, use
\code{integer = TRUE} if \code{x} is an integer vector and
\code{integer = FALSE} otherwise.
}
\examples{
# Load accelerometer data for first 5 participants in NHANES 2003-2004
data(unidata)
# Get data from ID number 21005
id.part1 <- unidata[unidata[, "seqn"] == 21005, "seqn"]
counts.part1 <- unidata[unidata[, "seqn"] == 21005, "paxinten"]
# Create vector of all 10-minute moving averages
all.movingaves <- movingaves(x = counts.part1, window = 10, integer = TRUE)
# Calculate maximum 10-minute moving average
max.movingave <- movingaves(x = counts.part1, window = 10, integer = TRUE,
max = TRUE)
}
|
/accelerometry/man/movingaves.Rd
|
no_license
|
akhikolla/InformationHouse
|
R
| false | true | 1,342 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/movingaves.R
\name{movingaves}
\alias{movingaves}
\title{Moving Averages}
\usage{
movingaves(x, window, integer = FALSE, max = FALSE)
}
\arguments{
\item{x}{Integer or numeric vector.}
\item{window}{Integer value specifying window length.}
\item{integer}{Logical value for whether \code{x} is an integer vector.}
\item{max}{Logical value for whether to return maximum moving average (as
opposed to vector of moving averages).}
}
\value{
Numeric value or vector depending on \code{max}.
}
\description{
Calculates moving averages or maximum moving average. For optimal speed, use
\code{integer = TRUE} if \code{x} is an integer vector and
\code{integer = FALSE} otherwise.
}
\examples{
# Load accelerometer data for first 5 participants in NHANES 2003-2004
data(unidata)
# Get data from ID number 21005
id.part1 <- unidata[unidata[, "seqn"] == 21005, "seqn"]
counts.part1 <- unidata[unidata[, "seqn"] == 21005, "paxinten"]
# Create vector of all 10-minute moving averages
all.movingaves <- movingaves(x = counts.part1, window = 10, integer = TRUE)
# Calculate maximum 10-minute moving average
max.movingave <- movingaves(x = counts.part1, window = 10, integer = TRUE,
max = TRUE)
}
|
mydata = read.csv(file.choose())
mydata
data_train <- mydata[1:28,1:2]
data_test <- mydata[1:3,3:4]
data_train
data_test
acf(data_train[,2])
pacf(data_train[,2])
result <- arima(data_train[,2],order=c(1,0,0),method=c("CSS"))
result
library(forecast)
forecast(result,1)
196.6542/5 # to containers
|
/Supply Chain Analytics to Manage Blood at a Blood Bank/AR model.R
|
no_license
|
changdio/Operations-Analytics
|
R
| false | false | 315 |
r
|
mydata = read.csv(file.choose())
mydata
data_train <- mydata[1:28,1:2]
data_test <- mydata[1:3,3:4]
data_train
data_test
acf(data_train[,2])
pacf(data_train[,2])
result <- arima(data_train[,2],order=c(1,0,0),method=c("CSS"))
result
library(forecast)
forecast(result,1)
196.6542/5 # to containers
|
\name{applyFilter}
\alias{applyFilter}
\title{Apply an RPM filter to the data}
\description{Restricts downstream analysis to only those guides with a
specified abundance in terms of mapped reads per million}
\usage{
applyFilter(Data, thresh)
}
\arguments{
\item{Data}{Data (probably should make this a specific class, like "deepn")
from \code{\link{import}} or \code{\link{importFromDeepn}}.}
\item{thresh}{Reads per million (RPM) threshold to apply.}
}
\details{
More specifics on filter.
}
\value{
A \code{deepn} object.
}
\author{Patrick Breheny}
\seealso{
\code{\link{import}},
\code{\link{rpm}}
}
\examples{
\dontrun{applyFilter(Data, 3)}
}
|
/man/applyFilter.Rd
|
no_license
|
emptyewer/statmaker
|
R
| false | false | 652 |
rd
|
\name{applyFilter}
\alias{applyFilter}
\title{Apply an RPM filter to the data}
\description{Restricts downstream analysis to only those guides with a
specified abundance in terms of mapped reads per million}
\usage{
applyFilter(Data, thresh)
}
\arguments{
\item{Data}{Data (probably should make this a specific class, like "deepn")
from \code{\link{import}} or \code{\link{importFromDeepn}}.}
\item{thresh}{Reads per million (RPM) threshold to apply.}
}
\details{
More specifics on filter.
}
\value{
A \code{deepn} object.
}
\author{Patrick Breheny}
\seealso{
\code{\link{import}},
\code{\link{rpm}}
}
\examples{
\dontrun{applyFilter(Data, 3)}
}
|
library(twoBit)
#library(plyr)
#library(ggplot2)
#library(cowplot)
collect_sequences <- function(twobit.filename, bed, seq.length = 1000,start_pos=0) {
twobit = twoBit::twobit.load(path.expand(twobit.filename))
N = dim(bed)[1]
result = vector(mode="list", length = N)
is.minus = bed[,6] == '-'
starts = bed[,2] +start_pos
ends = bed[,2] + seq.length +start_pos
starts[is.minus] = bed[is.minus, 3] - seq.length - start_pos
ends[is.minus] = bed[is.minus, 3] - start_pos
chroms = as.character(bed[,1])
for (i in 1:N) {
chrom = chroms[i]
seq = twoBit::twobit.sequence(twobit, chrom, starts[i], ends[i])
if (is.minus[i]){
seq = twoBit::twobit.reverse.complement(seq)
}
result[i] <- seq
}
return(result)
}
final_data<-data.frame()
range_matrix<-data.frame()
bases<-c("A","G","C","T")
hexamers_3mer=vector()
#########################################################
####
#### Generate K-mer index
#### Change for the proper K-mer length
#### number of loops
#### and 'paste' line
####
#########################################################
for (a in bases) {
for (b in bases) {
for (c in bases) {
# for (d in bases) {
# for (e in bases) {
# for (f in bases) {
# hexamer <- paste(a,b,c,d,e,f,sep="")
hexamer <- paste(a,b,c,sep="")
hexamers_3mer<-rbind(hexamers_3mer,hexamer)
# }
# }
# }
}
}
}
hexamers_2mer=vector()
for (a in bases) {
for (b in bases) {
hexamer <- paste(a,b,sep="")
hexamers_2mer<-rbind(hexamers_2mer,hexamer)
}
}
prepare_kmer_data<-function(stable_genes,unstable_genes,pos_start=0,pos_end=1500,steps=500,seqLen=1000,hexamers_in){
seqLen=seqLen
hexamers=hexamers_in
pos_start=pos_start
pos_end=pos_end
steps=steps
FILE_stable=stable_genes
FILE_UNstable=unstable_genes
hg19 <-"/sonas-hs/siepel/nlsas/data/home/ablumber/genomes/hg19.2bit"
lim_range=vector()
for (i in seq(pos_start,pos_end,steps)){
genes<-read.table(FILE_stable)
nam<-paste("plot_",i,sep="")
N=nrow(genes)
seqs<- collect_sequences(hg19, genes, seq.length = seqLen ,start_pos=i)
results_sense<-data.frame()
results_position=1
N=length(seqs)
for (seq_test in seqs){
count_position<-0
hexamer_count=vector()
hexamer_count_as=vector()
#print(results_position)
for (hexamer in hexamers) {
count=0
x <- seq_test
m <- gregexpr(paste("(?=(", hexamer, "))", sep=""), x, perl=TRUE)
m <- lapply(m, function(i) {
attr(i,"match.length") <- attr(i,"capture.length")
i
})
count_sense <- length(regmatches(x,m)[[1]])
# hexamer_count[count_position]<-count
count_position=count_position+1
hexamer_count[count_position]<-count_sense
#if (sum(hexamer_count)<=(nchar(seq_test)-(nchar(hexamer))+1)) {
}
results_sense<-rbind(results_sense,hexamer_count)
#}
}
#write.table(results_sense,"hexamer_lincRNA_K562_sense_raw_data.txt",quote=F,row.names=F,col.names=F,sep="\t")
col.sum.stable<-apply(results_sense, 2 ,sum)
N=nrow(results_sense)
col.sum.stable<-col.sum.stable/N
print(N)
print(sum(col.sum.stable))
#col.sum.stable<-as.data.frame(col.sum.stable)
names(col.sum.stable)<-hexamers
genes<-read.table(FILE_UNstable)
n=nrow(genes)
seqs<- collect_sequences(hg19, genes,seq.length = seqLen ,start_pos=i )
results_antisense<-data.frame()
results_position=1
N=length(seqs)
for (seq_test in seqs){
count_position<-0
hexamer_count=vector()
hexamer_count_as=vector()
#print(results_position)
for (hexamer in hexamers) {
count=0
x <- seq_test
m <- gregexpr(paste("(?=(", hexamer, "))", sep=""), x, perl=TRUE)
m <- lapply(m, function(i) {
attr(i,"match.length") <- attr(i,"capture.length")
i
})
count_sense <- length(regmatches(x,m)[[1]])
# hexamer_count[count_position]<-count
count_position=count_position+1
hexamer_count[count_position]<-count_sense
#if (sum(hexamer_count)<=(nchar(seq_test)-(nchar(hexamer))+1)) {
}
results_antisense<-rbind(results_antisense,hexamer_count)
#}
}
#write.table(results_antisense,"hexamer_lincRNA_K562_sense_raw_data.txt",quote=F,row.names=F,col.names=F,sep="\t")
col.sum.unstable<-apply(results_antisense, 2 ,sum)
print(sum(col.sum.unstable))
N=nrow(results_antisense)
col.sum.unstable<-col.sum.unstable/N
print(N)
print(sum(col.sum.unstable))
#col.sum.unstable<-as.data.frame(col.sum.unstable)
names(col.sum.unstable)<-hexamers
col.sum.stable<-as.data.frame(col.sum.stable)
col.sum.unstable<-as.data.frame(col.sum.unstable)
mydata<-cbind(col.sum.stable,col.sum.unstable)
mydata$ratio<-mydata$col.sum.stable/mydata$col.sum.unstable
mydata$V2<-hexamers
mydata$log_ratio<-log(mydata$ratio,2)
mydata<-mydata[order(mydata$log_ratio),]
mydata$V2 <- factor(mydata$V2, levels = mydata$V2[order(mydata$log_ratio)])
mydata$position<-i
lim_range<-c(lim_range, range(mydata$log_ratio))
temp<-range(mydata$log_ratio)
temp[3]<-i
range_matrix<-rbind(range_matrix,temp)
final_data<-rbind(final_data,mydata)
#p1<-ggplot(mydata, aes(x=V2,y=log_ratio)) + geom_point(size=6,color="blue")
#+ geom_bar(stat="identity")
#p1<-p1+xlab("") + ylab("Stable/Unstable (log scale) " ) + ylim(range(lim_range)) + theme(axis.text.x = element_text(angle=45, vjust=0.5))
#assign(nam,p1)
print(i)
print("Hello")
}
return(final_data)
}
###
##
#
#
# mRNA
#
FILE_stable<-"/local1/home/ablumber/K562/updated_data/K562_updated_matched_spliced_protein_class_5.bed"
FILE_UNstable<-"/local1/home/ablumber/K562/updated_data/K562_updated_matched_spliced_protein_class_1.bed"
setwd("/local1/home/ablumber/K562/updated_data/kmer_count/temp_files/")
final_data_3mer<-prepare_kmer_data(stable_genes=FILE_stable,unstable_genes=FILE_UNstable,hexamers_in=hexamers_3mer)
write.table(final_data_3mer,"final_data_from_mRNA_3mer_step1.txt",quote=F,row.names=F,col.names=T,sep="\t")
write.table(range_matrix,"range_matrix_from_step1.txt",quote=F,row.names=F,col.names=T,sep="\t")
final_data_2mer<-prepare_kmer_data(stable_genes=FILE_stable,unstable_genes=FILE_UNstable,hexamers_in=hexamers_2mer)
write.table(final_data_2mer,"final_data_from_mRNA_2mer_step1.txt",quote=F,row.names=F,col.names=T,sep="\t")
FILE_stable<-"/local1/home/ablumber/K562/updated_data/K562_updated_matched_spliced_lincRNA_class_5.bed"
FILE_UNstable<-"/local1/home/ablumber/K562/updated_data/K562_updated_matched_spliced_lincRNA_class_1.bed"
final_data_3mer<-prepare_kmer_data(stable_genes=FILE_stable,unstable_genes=FILE_UNstable,hexamers_in=hexamers_3mer)
write.table(final_data_3mer,"final_data_from_lincs_3mer_step1.txt",quote=F,row.names=F,col.names=T,sep="\t")
final_data_2mer<-prepare_kmer_data(stable_genes=FILE_stable,unstable_genes=FILE_UNstable,hexamers_in=hexamers_2mer)
write.table(final_data_2mer,"final_data_from_lincs_2mer_step1.txt",quote=F,row.names=F,col.names=T,sep="\t")
FILE_stable<-"/local1/home/ablumber/CAGE/TSS_data/final_stable_k562_high_cage_10.srt.mrg.bed"
FILE_UNstable<-"/local1/home/ablumber/CAGE/TSS_data/final_UNstable_k562_high_CAGE_10.match.srt.mrg.bed"
setwd("/local1/home/ablumber/K562/updated_data/kmer_count/temp_files/")
final_data_3mer<-prepare_kmer_data(stable_genes=FILE_stable,unstable_genes=FILE_UNstable,pos_start=0,pos_end=600,steps=200, seqLen=400, hexamers_in=hexamers_3mer)
write.table(final_data_3mer,"final_data_from_eRNA_3mer_step1.txt",quote=F,row.names=F,col.names=T,sep="\t")
final_data_2mer<-prepare_kmer_data(stable_genes=FILE_stable,unstable_genes=FILE_UNstable,pos_start=0,pos_end=600,steps=200,seqLen=400, hexamers_in=hexamers_2mer)
write.table(final_data_2mer,"final_data_from_eRNA_2mer_step1.txt",quote=F,row.names=F,col.names=T,sep="\t")
|
/scripts/fig4_and_5/kmer_count_step1.r
|
no_license
|
EasyPiPi/blumberg_et_al
|
R
| false | false | 7,743 |
r
|
library(twoBit)
#library(plyr)
#library(ggplot2)
#library(cowplot)
collect_sequences <- function(twobit.filename, bed, seq.length = 1000,start_pos=0) {
twobit = twoBit::twobit.load(path.expand(twobit.filename))
N = dim(bed)[1]
result = vector(mode="list", length = N)
is.minus = bed[,6] == '-'
starts = bed[,2] +start_pos
ends = bed[,2] + seq.length +start_pos
starts[is.minus] = bed[is.minus, 3] - seq.length - start_pos
ends[is.minus] = bed[is.minus, 3] - start_pos
chroms = as.character(bed[,1])
for (i in 1:N) {
chrom = chroms[i]
seq = twoBit::twobit.sequence(twobit, chrom, starts[i], ends[i])
if (is.minus[i]){
seq = twoBit::twobit.reverse.complement(seq)
}
result[i] <- seq
}
return(result)
}
final_data<-data.frame()
range_matrix<-data.frame()
bases<-c("A","G","C","T")
hexamers_3mer=vector()
#########################################################
####
#### Generate K-mer index
#### Change for the proper K-mer length
#### number of loops
#### and 'paste' line
####
#########################################################
for (a in bases) {
for (b in bases) {
for (c in bases) {
# for (d in bases) {
# for (e in bases) {
# for (f in bases) {
# hexamer <- paste(a,b,c,d,e,f,sep="")
hexamer <- paste(a,b,c,sep="")
hexamers_3mer<-rbind(hexamers_3mer,hexamer)
# }
# }
# }
}
}
}
hexamers_2mer=vector()
for (a in bases) {
for (b in bases) {
hexamer <- paste(a,b,sep="")
hexamers_2mer<-rbind(hexamers_2mer,hexamer)
}
}
prepare_kmer_data<-function(stable_genes,unstable_genes,pos_start=0,pos_end=1500,steps=500,seqLen=1000,hexamers_in){
seqLen=seqLen
hexamers=hexamers_in
pos_start=pos_start
pos_end=pos_end
steps=steps
FILE_stable=stable_genes
FILE_UNstable=unstable_genes
hg19 <-"/sonas-hs/siepel/nlsas/data/home/ablumber/genomes/hg19.2bit"
lim_range=vector()
for (i in seq(pos_start,pos_end,steps)){
genes<-read.table(FILE_stable)
nam<-paste("plot_",i,sep="")
N=nrow(genes)
seqs<- collect_sequences(hg19, genes, seq.length = seqLen ,start_pos=i)
results_sense<-data.frame()
results_position=1
N=length(seqs)
for (seq_test in seqs){
count_position<-0
hexamer_count=vector()
hexamer_count_as=vector()
#print(results_position)
for (hexamer in hexamers) {
count=0
x <- seq_test
m <- gregexpr(paste("(?=(", hexamer, "))", sep=""), x, perl=TRUE)
m <- lapply(m, function(i) {
attr(i,"match.length") <- attr(i,"capture.length")
i
})
count_sense <- length(regmatches(x,m)[[1]])
# hexamer_count[count_position]<-count
count_position=count_position+1
hexamer_count[count_position]<-count_sense
#if (sum(hexamer_count)<=(nchar(seq_test)-(nchar(hexamer))+1)) {
}
results_sense<-rbind(results_sense,hexamer_count)
#}
}
#write.table(results_sense,"hexamer_lincRNA_K562_sense_raw_data.txt",quote=F,row.names=F,col.names=F,sep="\t")
col.sum.stable<-apply(results_sense, 2 ,sum)
N=nrow(results_sense)
col.sum.stable<-col.sum.stable/N
print(N)
print(sum(col.sum.stable))
#col.sum.stable<-as.data.frame(col.sum.stable)
names(col.sum.stable)<-hexamers
genes<-read.table(FILE_UNstable)
n=nrow(genes)
seqs<- collect_sequences(hg19, genes,seq.length = seqLen ,start_pos=i )
results_antisense<-data.frame()
results_position=1
N=length(seqs)
for (seq_test in seqs){
count_position<-0
hexamer_count=vector()
hexamer_count_as=vector()
#print(results_position)
for (hexamer in hexamers) {
count=0
x <- seq_test
m <- gregexpr(paste("(?=(", hexamer, "))", sep=""), x, perl=TRUE)
m <- lapply(m, function(i) {
attr(i,"match.length") <- attr(i,"capture.length")
i
})
count_sense <- length(regmatches(x,m)[[1]])
# hexamer_count[count_position]<-count
count_position=count_position+1
hexamer_count[count_position]<-count_sense
#if (sum(hexamer_count)<=(nchar(seq_test)-(nchar(hexamer))+1)) {
}
results_antisense<-rbind(results_antisense,hexamer_count)
#}
}
#write.table(results_antisense,"hexamer_lincRNA_K562_sense_raw_data.txt",quote=F,row.names=F,col.names=F,sep="\t")
col.sum.unstable<-apply(results_antisense, 2 ,sum)
print(sum(col.sum.unstable))
N=nrow(results_antisense)
col.sum.unstable<-col.sum.unstable/N
print(N)
print(sum(col.sum.unstable))
#col.sum.unstable<-as.data.frame(col.sum.unstable)
names(col.sum.unstable)<-hexamers
col.sum.stable<-as.data.frame(col.sum.stable)
col.sum.unstable<-as.data.frame(col.sum.unstable)
mydata<-cbind(col.sum.stable,col.sum.unstable)
mydata$ratio<-mydata$col.sum.stable/mydata$col.sum.unstable
mydata$V2<-hexamers
mydata$log_ratio<-log(mydata$ratio,2)
mydata<-mydata[order(mydata$log_ratio),]
mydata$V2 <- factor(mydata$V2, levels = mydata$V2[order(mydata$log_ratio)])
mydata$position<-i
lim_range<-c(lim_range, range(mydata$log_ratio))
temp<-range(mydata$log_ratio)
temp[3]<-i
range_matrix<-rbind(range_matrix,temp)
final_data<-rbind(final_data,mydata)
#p1<-ggplot(mydata, aes(x=V2,y=log_ratio)) + geom_point(size=6,color="blue")
#+ geom_bar(stat="identity")
#p1<-p1+xlab("") + ylab("Stable/Unstable (log scale) " ) + ylim(range(lim_range)) + theme(axis.text.x = element_text(angle=45, vjust=0.5))
#assign(nam,p1)
print(i)
print("Hello")
}
return(final_data)
}
###
##
#
#
# mRNA
#
FILE_stable<-"/local1/home/ablumber/K562/updated_data/K562_updated_matched_spliced_protein_class_5.bed"
FILE_UNstable<-"/local1/home/ablumber/K562/updated_data/K562_updated_matched_spliced_protein_class_1.bed"
setwd("/local1/home/ablumber/K562/updated_data/kmer_count/temp_files/")
final_data_3mer<-prepare_kmer_data(stable_genes=FILE_stable,unstable_genes=FILE_UNstable,hexamers_in=hexamers_3mer)
write.table(final_data_3mer,"final_data_from_mRNA_3mer_step1.txt",quote=F,row.names=F,col.names=T,sep="\t")
write.table(range_matrix,"range_matrix_from_step1.txt",quote=F,row.names=F,col.names=T,sep="\t")
final_data_2mer<-prepare_kmer_data(stable_genes=FILE_stable,unstable_genes=FILE_UNstable,hexamers_in=hexamers_2mer)
write.table(final_data_2mer,"final_data_from_mRNA_2mer_step1.txt",quote=F,row.names=F,col.names=T,sep="\t")
FILE_stable<-"/local1/home/ablumber/K562/updated_data/K562_updated_matched_spliced_lincRNA_class_5.bed"
FILE_UNstable<-"/local1/home/ablumber/K562/updated_data/K562_updated_matched_spliced_lincRNA_class_1.bed"
final_data_3mer<-prepare_kmer_data(stable_genes=FILE_stable,unstable_genes=FILE_UNstable,hexamers_in=hexamers_3mer)
write.table(final_data_3mer,"final_data_from_lincs_3mer_step1.txt",quote=F,row.names=F,col.names=T,sep="\t")
final_data_2mer<-prepare_kmer_data(stable_genes=FILE_stable,unstable_genes=FILE_UNstable,hexamers_in=hexamers_2mer)
write.table(final_data_2mer,"final_data_from_lincs_2mer_step1.txt",quote=F,row.names=F,col.names=T,sep="\t")
FILE_stable<-"/local1/home/ablumber/CAGE/TSS_data/final_stable_k562_high_cage_10.srt.mrg.bed"
FILE_UNstable<-"/local1/home/ablumber/CAGE/TSS_data/final_UNstable_k562_high_CAGE_10.match.srt.mrg.bed"
setwd("/local1/home/ablumber/K562/updated_data/kmer_count/temp_files/")
final_data_3mer<-prepare_kmer_data(stable_genes=FILE_stable,unstable_genes=FILE_UNstable,pos_start=0,pos_end=600,steps=200, seqLen=400, hexamers_in=hexamers_3mer)
write.table(final_data_3mer,"final_data_from_eRNA_3mer_step1.txt",quote=F,row.names=F,col.names=T,sep="\t")
final_data_2mer<-prepare_kmer_data(stable_genes=FILE_stable,unstable_genes=FILE_UNstable,pos_start=0,pos_end=600,steps=200,seqLen=400, hexamers_in=hexamers_2mer)
write.table(final_data_2mer,"final_data_from_eRNA_2mer_step1.txt",quote=F,row.names=F,col.names=T,sep="\t")
|
data <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
data <- subset(data, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
data$DateTime <- as.POSIXct(paste(data$Date, data$Time))
plot(x=data$DateTime, y=data$Sub_metering_1,
xlab = '', ylab = 'Energy sub metering',
type = 'l')
lines(x=data$DateTime, y=data$Sub_metering_2,col='red')
lines(x=data$DateTime, y=data$Sub_metering_3,col='blue')
legend("topright", col = c('black', 'red', 'blue'), legend = c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'), lwd = 1)
dev.copy(png, file="Plot3.png", height=480, width=480)
dev.off()
|
/Plot3.R
|
no_license
|
lannus/ExData_Plotting1
|
R
| false | false | 801 |
r
|
data <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
data <- subset(data, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
data$DateTime <- as.POSIXct(paste(data$Date, data$Time))
plot(x=data$DateTime, y=data$Sub_metering_1,
xlab = '', ylab = 'Energy sub metering',
type = 'l')
lines(x=data$DateTime, y=data$Sub_metering_2,col='red')
lines(x=data$DateTime, y=data$Sub_metering_3,col='blue')
legend("topright", col = c('black', 'red', 'blue'), legend = c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'), lwd = 1)
dev.copy(png, file="Plot3.png", height=480, width=480)
dev.off()
|
# ============ ULTIMA figura (barplots) junho 2020 ================
# figura 3 paper Seeds - diferentes tentativas - Zupo et al.
#=======================================================================
#pacotes
library(plyr)
library(ggplot2)
library(gridExtra)
library(ggpubr)
#lendo tables
tali <- read.csv(file = "data/last.csv", sep = ";", dec = ".", header = T)
tali$prop<-tali$prop_sp*100
#germ <- tali[c(1:9),]
#viab <- tali[c(10:18),]
# o único jeito é fazer com germ e viab separado - 2 figuras lado a lado
f0 <- ggplot() +
geom_bar(data=tali, aes(y = prop, x = trat, fill = fate), stat="identity",
position='stack') +
scale_fill_manual(values=c("#E7298A", "#E6AB02", "#2171B5"), labels = c("Decreased", "Stimulated", "Unchanged"))+
scale_x_discrete(limits=c("100-1","100-3", "200 -1" ),
labels=c("100°C-1 min","100°C-3 min", "200°C-1 min" ))+
xlab("") +
ylab("Species (%)") +
theme_classic() +
theme(legend.position="bottom", legend.text = element_text(size =9), legend.key.size = unit(0.35, "cm")) +
facet_grid( ~ x)
f0<- f0+labs(fill ="")
png("figs/figura6_cor2.png", res = 300, width = 1700, height = 1100)
ggarrange(f0,
common.legend = TRUE, legend = "bottom")
dev.off()
## o jeito LONGO de fazer essa figura####
###########################################
f1 <-ggplot(data=germ, aes(x=trat, y=prop_sp, fill = fate, width=.5)) + # width faz a barra ficar mais fina (ou grossa)
geom_bar(position="stack", stat="identity")+
scale_fill_manual(values=c('#CCCCCC','#666666', '#333333'), labels = c("Decreased", "Stimulated", "Unchanged"))+
scale_x_discrete(limits=c("100-1","100-3", "200 -1" ),
labels=c("100 - 1 min","100 - 3 min", "200 - 1 min" ))+
xlab("") +
ylab("Proportion of species") +
theme_classic() +
theme (axis.text = element_text(size = 7), axis.title=element_text(size=8),
axis.text.x=element_text(size=8),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(), panel.border=element_blank()) +
theme(axis.line.x = element_line(color="black", size = 0), ## to write x and y axis again, ja que removi da borda
axis.line.y = element_line(color="black", size = 0))+
theme(legend.position="bottom", legend.text = element_text(size =9), legend.key.size = unit(0.35, "cm"))
f1<- f1+labs(fill ="")
f2 <- ggplot(data=viab, aes(x=trat, y=prop_sp, fill = fate, width=.5)) + # width faz a barra ficar mais fina (ou grossa)
geom_bar(position="stack", stat="identity")+
scale_fill_manual(values=c('#CCCCCC','#666666', '#333333'),labels = c("Decreased", "Stimulated", "Unchanged"))+
scale_x_discrete(limits=c("100-1","100-3", "200 -1" ),
labels=c("100°C - 1 min","100°C - 3 min", "200°C - 1 min" ))+
xlab("") +
ylab("Proportion of species") +
theme_classic() +
theme (axis.text = element_text(size = 7), axis.title=element_text(size=8),
axis.text.x=element_text(size=8),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(), panel.border=element_blank()) +
theme(axis.line.x = element_line(color="black", size = 0), ## to write x and y axis again, ja que removi da borda
axis.line.y = element_line(color="black", size = 0))+
theme(legend.position="bottom", legend.text = element_text(size =9), legend.key.size = unit(0.35, "cm"))
f2<- f2+labs(fill ="")
png("figs/figura6.png", res = 300, width = 2000, height = 800)
ggarrange(f1, f2, labels = c("a", "b"),label.x = c(0.14, 0.14),
common.legend = TRUE, legend = "bottom")
dev.off()
#as outras tentativas, furadas, eram: 1 - grouped barplot 2- barplot (germ) + line graph (viab) e 3 - so UM stacked barplot (mas q confundia germ e viab - fail)
|
/R_scripts/last_fig.R
|
no_license
|
talitazupo/seeds
|
R
| false | false | 3,769 |
r
|
# ============ ULTIMA figura (barplots) junho 2020 ================
# figura 3 paper Seeds - diferentes tentativas - Zupo et al.
#=======================================================================
#pacotes
library(plyr)
library(ggplot2)
library(gridExtra)
library(ggpubr)
#lendo tables
tali <- read.csv(file = "data/last.csv", sep = ";", dec = ".", header = T)
tali$prop<-tali$prop_sp*100
#germ <- tali[c(1:9),]
#viab <- tali[c(10:18),]
# o único jeito é fazer com germ e viab separado - 2 figuras lado a lado
f0 <- ggplot() +
geom_bar(data=tali, aes(y = prop, x = trat, fill = fate), stat="identity",
position='stack') +
scale_fill_manual(values=c("#E7298A", "#E6AB02", "#2171B5"), labels = c("Decreased", "Stimulated", "Unchanged"))+
scale_x_discrete(limits=c("100-1","100-3", "200 -1" ),
labels=c("100°C-1 min","100°C-3 min", "200°C-1 min" ))+
xlab("") +
ylab("Species (%)") +
theme_classic() +
theme(legend.position="bottom", legend.text = element_text(size =9), legend.key.size = unit(0.35, "cm")) +
facet_grid( ~ x)
f0<- f0+labs(fill ="")
png("figs/figura6_cor2.png", res = 300, width = 1700, height = 1100)
ggarrange(f0,
common.legend = TRUE, legend = "bottom")
dev.off()
## o jeito LONGO de fazer essa figura####
###########################################
f1 <-ggplot(data=germ, aes(x=trat, y=prop_sp, fill = fate, width=.5)) + # width faz a barra ficar mais fina (ou grossa)
geom_bar(position="stack", stat="identity")+
scale_fill_manual(values=c('#CCCCCC','#666666', '#333333'), labels = c("Decreased", "Stimulated", "Unchanged"))+
scale_x_discrete(limits=c("100-1","100-3", "200 -1" ),
labels=c("100 - 1 min","100 - 3 min", "200 - 1 min" ))+
xlab("") +
ylab("Proportion of species") +
theme_classic() +
theme (axis.text = element_text(size = 7), axis.title=element_text(size=8),
axis.text.x=element_text(size=8),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(), panel.border=element_blank()) +
theme(axis.line.x = element_line(color="black", size = 0), ## to write x and y axis again, ja que removi da borda
axis.line.y = element_line(color="black", size = 0))+
theme(legend.position="bottom", legend.text = element_text(size =9), legend.key.size = unit(0.35, "cm"))
f1<- f1+labs(fill ="")
f2 <- ggplot(data=viab, aes(x=trat, y=prop_sp, fill = fate, width=.5)) + # width faz a barra ficar mais fina (ou grossa)
geom_bar(position="stack", stat="identity")+
scale_fill_manual(values=c('#CCCCCC','#666666', '#333333'),labels = c("Decreased", "Stimulated", "Unchanged"))+
scale_x_discrete(limits=c("100-1","100-3", "200 -1" ),
labels=c("100°C - 1 min","100°C - 3 min", "200°C - 1 min" ))+
xlab("") +
ylab("Proportion of species") +
theme_classic() +
theme (axis.text = element_text(size = 7), axis.title=element_text(size=8),
axis.text.x=element_text(size=8),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(), panel.border=element_blank()) +
theme(axis.line.x = element_line(color="black", size = 0), ## to write x and y axis again, ja que removi da borda
axis.line.y = element_line(color="black", size = 0))+
theme(legend.position="bottom", legend.text = element_text(size =9), legend.key.size = unit(0.35, "cm"))
f2<- f2+labs(fill ="")
png("figs/figura6.png", res = 300, width = 2000, height = 800)
ggarrange(f1, f2, labels = c("a", "b"),label.x = c(0.14, 0.14),
common.legend = TRUE, legend = "bottom")
dev.off()
#as outras tentativas, furadas, eram: 1 - grouped barplot 2- barplot (germ) + line graph (viab) e 3 - so UM stacked barplot (mas q confundia germ e viab - fail)
|
# Graphing Scripts
library(doBy)
library(plyr)
library(car)
# Graphing funciton
one.way.plot = function(DV,IV1,SubjNo,x.label ="Add X Label", main.label = "Add main header", y.label = "Add y label", log.test = FALSE){
print("Logit Transform DV")
ylim.grph <- c(0,1)
if(log.test == TRUE){
logit(DV) -> DV
ylim.grph <- c(-4,1)}
tapply(DV, INDEX = list(IV1), FUN = mean,na.rm = T) -> graph.data
tapply(DV, INDEX = list(IV1), FUN = sd, na.rm = T) -> graph.se
graph.se/sqrt(length(unique(SubjNo))) -> graph.se
barplot(graph.data, beside = T, col = c("white"), ylim = ylim.grph, ylab = y.label, xlab = x.label, main = main.label, border = NA) -> dat.g
rect(par("usr")[1],par("usr")[3],par("usr")[2],par("usr")[4],col = "lightgray")
abline(h=(seq(-3,1)), col="black", lty="dotted")
legend(4.5,1.3,rownames(graph.data), fill = c("red","blue"))
points(dat.g, graph.data, col = c("red","blue"), bg = c("red","blue"), pch = 22, cex = 6)
arrows(dat.g, (graph.data+graph.se), dat.g, (graph.data-graph.se), angle = 90, lwd = 2, lty = 1, code = 0)
}
|
/ChildExperiment/Eyes/Scripts/Graphing.r
|
no_license
|
hughrabagliati/QUD-Kids
|
R
| false | false | 1,106 |
r
|
# Graphing Scripts
library(doBy)
library(plyr)
library(car)
# Graphing funciton
one.way.plot = function(DV,IV1,SubjNo,x.label ="Add X Label", main.label = "Add main header", y.label = "Add y label", log.test = FALSE){
print("Logit Transform DV")
ylim.grph <- c(0,1)
if(log.test == TRUE){
logit(DV) -> DV
ylim.grph <- c(-4,1)}
tapply(DV, INDEX = list(IV1), FUN = mean,na.rm = T) -> graph.data
tapply(DV, INDEX = list(IV1), FUN = sd, na.rm = T) -> graph.se
graph.se/sqrt(length(unique(SubjNo))) -> graph.se
barplot(graph.data, beside = T, col = c("white"), ylim = ylim.grph, ylab = y.label, xlab = x.label, main = main.label, border = NA) -> dat.g
rect(par("usr")[1],par("usr")[3],par("usr")[2],par("usr")[4],col = "lightgray")
abline(h=(seq(-3,1)), col="black", lty="dotted")
legend(4.5,1.3,rownames(graph.data), fill = c("red","blue"))
points(dat.g, graph.data, col = c("red","blue"), bg = c("red","blue"), pch = 22, cex = 6)
arrows(dat.g, (graph.data+graph.se), dat.g, (graph.data-graph.se), angle = 90, lwd = 2, lty = 1, code = 0)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/documentation.R
\docType{data}
\name{tutorial_3a_table_4}
\alias{tutorial_3a_table_4}
\alias{T3AT4}
\alias{t3at4}
\title{The data used in Tutorial 3A, Table 4}
\format{
An object of class \code{data.frame} with 10 rows and 6 columns.
}
\source{
\url{https://designingexperiments.com/data/}
Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
}
\usage{
data(tutorial_3a_table_4)
}
\description{
Data from Tutorial 3A Table 4 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
}
\details{
\itemize{
\item group.
\item score.
\item X0.
\item X1.
\item X2.
\item x3.}
}
\section{Synonym}{
T3AT4
}
\examples{
# Load the data
data(tutorial_3a_table_4)
# Or, alternatively load the data as
data(T3AT4)
# View the structure
str(tutorial_3a_table_4)
# Brief summary of the data.
summary(tutorial_3a_table_4)
}
\references{
Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
{A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
}
\author{
Ken Kelley \email{kkelley@nd.edu}
}
\keyword{datasets}
|
/man/tutorial_3a_table_4.Rd
|
no_license
|
yelleKneK/AMCP
|
R
| false | true | 1,331 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/documentation.R
\docType{data}
\name{tutorial_3a_table_4}
\alias{tutorial_3a_table_4}
\alias{T3AT4}
\alias{t3at4}
\title{The data used in Tutorial 3A, Table 4}
\format{
An object of class \code{data.frame} with 10 rows and 6 columns.
}
\source{
\url{https://designingexperiments.com/data/}
Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
}
\usage{
data(tutorial_3a_table_4)
}
\description{
Data from Tutorial 3A Table 4 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
}
\details{
\itemize{
\item group.
\item score.
\item X0.
\item X1.
\item X2.
\item x3.}
}
\section{Synonym}{
T3AT4
}
\examples{
# Load the data
data(tutorial_3a_table_4)
# Or, alternatively load the data as
data(T3AT4)
# View the structure
str(tutorial_3a_table_4)
# Brief summary of the data.
summary(tutorial_3a_table_4)
}
\references{
Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
{A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
}
\author{
Ken Kelley \email{kkelley@nd.edu}
}
\keyword{datasets}
|
#' A Bagging Prediction Model Using LASSO Selection Algorithm.
#'
#' This function performs a bagging prediction for linear and logistic regression model using the LASSO selection algorithm.
#'
#' @param x input matrix. The dimension of the matrix is nobs x nvars; each row is a vector of observations of the variables.
#' @param y response variable. For family="gaussian", y is a vector of quantitative response. For family="binomial" should be a factor with two levels '0' and '1' and the level of '1' is the target class.
#' @param family response type (see above).
#' @param M the number of base-level models (LASSO linear or logistic regression models) to obtain a final prediction. Note that it also corresponds to the number of bootstrap samples to draw. Defaults to 100.
#' @param subspace.size the number of random subspaces to construct an ensemble prediction model. Defaults to 10.
#' @param predictor.subset the subset of randomly selected predictors from the training set to reduce the original p-dimensional feature space. Defaults to (9/10)*ncol(x) where ncol(x) represents the the original p-dimensional feature space of input matrix x.
#' @param boot.scale the scale of sample size in each bootstrap re-sampling, relative to the original sample size. Defaults to 1.0, equaling to the original size of training samples.
#' @param kfold the number of folds of cross validation - default is 10. Although kfold can be as large as the sample size (leave-one-out CV), it is not recommended for large datasets. Smallest value allowable is kfold=3.
#' @param predictor.importance logical. Should the importance of each predictor in the bagging LASSO model be evaluated? Defaults to TRUE. A permutation-based variable importance measure estimated by the out-of-bag error rate is adapted for the bagging model.
#' @param trimmed logical. Should a trimmed bagging strategy be performed? Defaults to FALSE. Traditional bagging draws bootstrap samples from the training sample, applies the base-level model to each bootstrap sample, and then averages over all obtained prediction rules. The idea of trimmed bagging is to exclude the bootstrapped prediction rules that yield the highest error rates and to aggregate over the remaining ones.
#' @param weighted logical. Should a weighted rank aggregation procedure be performed? Defaults to TRUE. This procedure uses a Monte Carlo cross-entropy algorithm combining the ranks of a set of based-level model under consideration via a weighted aggregation that optimizes a distance criterion to determine the best performance base-level model.
#' @param verbose logical. Should the iterative process information of bagging model be presented? Defaults to TRUE.
#' @param seed the seed for random sampling, with the default value 0123.
#' @export
#' @import glmnet
#' @import RankAggreg
#' @import mlbench
#' @references
#' [1] Guo, P., Zeng, F., Hu, X., Zhang, D., Zhu, S., Deng, Y., & Hao, Y. (2015). Improved Variable
#' Selection Algorithm Using a LASSO-Type Penalty, with an Application to Assessing Hepatitis B
#' Infection Relevant Factors in Community Residents. PLoS One, 27;10(7):e0134151.
#' [2] Tibshirani, R. (1996). Regression Shrinkage and Selection via the Lasso. Journal of the royal
#' statistical society series B (statistical methodology), 73(3):273-282.
#' [3] Breiman, L. (2001). Random Forests. Machine Learning, 45(1), 5-32.
#' @examples
#' # Example 1: Bagging LASSO linear regression model.
#' library(mlbench)
#' set.seed(0123)
#' mydata <- mlbench.threenorm(100, d=10)
#' x <- mydata$x
#' y <- mydata$classes
#' mydata <- as.data.frame(cbind(x, y))
#' colnames(mydata) <- c(paste("A", 1:10, sep=""), "y")
#' mydata$y <- ifelse(mydata$y==1, 0, 1)
#' # Split into training and testing data.
#' S1 <- as.vector(which(mydata$y==0))
#' S2 <- as.vector(which(mydata$y==1))
#' S3 <- sample(S1, ceiling(length(S1)*0.8), replace=FALSE)
#' S4 <- sample(S2, ceiling(length(S2)*0.8), replace=FALSE)
#' TrainInd <- c(S3, S4)
#' TestInd <- setdiff(1:length(mydata$y), TrainInd)
#' TrainXY <- mydata[TrainInd, ]
#' TestXY <- mydata[TestInd, ]
#' # Fit a bagging LASSO linear regression model, where the parameters
#' # of M in the following example is set as small values to reduce the
#' # running time, however the default value is proposed.
#' Bagging.fit <- Bagging.lasso(x=TrainXY[, -10], y=TrainXY[, 10],
#' family=c("gaussian"), M=2, predictor.subset=round((9/10)*ncol(x)),
#' predictor.importance=TRUE, trimmed=FALSE, weighted=TRUE, seed=0123)
#' # Print a 'bagging' object fitted by the Bagging.fit function.
#' Print.bagging(Bagging.fit)
#' # Make predictions from a bagging LASSO linear regression model.
#' pred <- Predict.bagging(Bagging.fit, newx=TestXY[, -10], y=NULL, trimmed=FALSE)
#' pred
#' # Generate the plot of variable importance.
#' Plot.importance(Bagging.fit)
#' # Example 2: Bagging LASSO logistic regression model.
#' library(mlbench)
#' set.seed(0123)
#' mydata <- mlbench.threenorm(100, d=10)
#' x <- mydata$x
#' y <- mydata$classes
#' mydata <- as.data.frame(cbind(x, y))
#' colnames(mydata) <- c(paste("A", 1:10, sep=""), "y")
#' mydata$y <- ifelse(mydata$y==1, 0, 1)
#' # Split into training and testing data.
#' S1 <- as.vector(which(mydata$y==0))
#' S2 <- as.vector(which(mydata$y==1))
#' S3 <- sample(S1, ceiling(length(S1)*0.8), replace=FALSE)
#' S4 <- sample(S2, ceiling(length(S2)*0.8), replace=FALSE)
#' TrainInd <- c(S3, S4)
#' TestInd <- setdiff(1:length(mydata$y), TrainInd)
#' TrainXY <- mydata[TrainInd, ]
#' TestXY <- mydata[TestInd, ]
#' # Fit a bagging LASSO logistic regression model, where the parameters
#' # of M in the following example is set as small values to reduce the
#' # running time, however the default value is proposed.
#' Bagging.fit <- Bagging.lasso(x=TrainXY[, -11], y=TrainXY[, 11],
#' family=c("binomial"), M=2, predictor.subset=round((9/10)*ncol(x)),
#' predictor.importance=TRUE, trimmed=FALSE, weighted=TRUE, seed=0123)
#' # Print a 'bagging' object fitted by the Bagging.fit function.
#' Print.bagging(Bagging.fit)
#' # Make predictions from a bagging LASSO logistic regression model.
#' pred <- Predict.bagging(Bagging.fit, newx=TestXY[, -11], y=NULL, trimmed=FALSE)
#' pred
#' # Generate the plot of variable importance.
#' Plot.importance(Bagging.fit)
Bagging.lasso <- function(x, y, family=c("gaussian", "binomial"), M=100, subspace.size=10, predictor.subset=round((9/10)*ncol(x)), boot.scale=1.0, kfold=10,
predictor.importance=TRUE, trimmed=FALSE, weighted=TRUE, verbose=TRUE, seed=0123){
rmse <- function(truth, predicted){
predicted <- as.numeric(predicted)
mse <- mean((truth-predicted)*(truth-predicted))
rmse <- sqrt(mse)
rmse
}
mae <- function(truth, predicted){
predicted <- as.numeric(predicted)
mae <- mean(abs(predicted-truth))
mae
}
re <- function(truth, predicted){
predicted <- as.numeric(predicted)
mse <- mean(((truth-predicted)/truth)*((truth-predicted)/truth))
re <- sqrt(mse)
re
}
smape <- function(truth, predicted){
predicted <- as.numeric(predicted)
smape <- mean(abs(truth-predicted)/((abs(truth)+abs(predicted))/2))
smape
}
accuracy <- function(truth, predicted){
if(length(truth) > 0)
sum(truth==predicted)/length(truth) else
return(0)
}
sensitivity <- function(truth, predicted){
# 1 means positive (present)
if(sum(truth==1) > 0)
sum(predicted[truth==1]==1)/sum(truth==1) else
return(0)
}
specificity <- function(truth, predicted){
if(sum(truth==0) > 0)
sum(predicted[truth==0]==0)/sum(truth==0) else
return(0)
}
AUC <- function(truth, probs){
# probs - probability of class 1
q <- seq(0, 1, .01)
sens <- rep(0, length(q))
spec <- rep(0, length(q))
ly <- levels(truth)
for(i in 1:length(q)){
pred <- probs >= q[i]
pred[pred] <- 1
pred <- factor(pred, levels=ly)
sens[i] <- sensitivity(truth, pred)
spec[i] <- specificity(truth, pred)
}
# make sure it starts and ends at 0, 1
sens <- c(1, sens, 0)
spec <- c(0, spec, 1)
trap.rule <- function(x,y) sum(diff(x)*(y[-1]+y[-length(y)]))/2
auc <- trap.rule(rev(1-spec), rev(sens))
auc
}
kia <- function(truth, predicted){
TP <- sum(predicted[truth==1]==1)
TN <- sum(predicted[truth==0]==0)
FN <- sum(truth==1)-TP
FP <- sum(truth==0)-TN
N <- TP+TN+FN+FP
Pobs <- (TP+TN)/N
Pexp <- ((TP+FN)*(TP+FP)+(FP+TN)*(FN+TN))/N^2
kia <- (Pobs-Pexp)/(1-Pexp)
kia
}
convertScores <- function(scores){
scores <- t(scores)
ranks <- matrix(0, nrow(scores), ncol(scores))
weights <- ranks
for(i in 1:nrow(scores)){
ms <- sort(scores[i,], decreasing=TRUE, ind=TRUE)
ranks[i,] <- colnames(scores)[ms$ix]
weights[i,] <- ms$x
}
list(ranks = ranks, weights = weights)
}
if (family==c("gaussian")) {
x <- as.matrix(x)
y <- as.numeric(y)
if(!is.null(seed)) {
set.seed(seed) } else
set.seed(0123)
validation <- c("rmse", "mae", "re", "smape")
distance <- c("Spearman")
rownames(x) <- NULL
n <- length(y)
nvm <- length(validation)
fittedModels <- list()
trimmedModels <- list()
x_length <- ncol(x)
RecordM <- matrix(0, M, ncol(x))
colnames(RecordM) <- colnames(x)
model.rmse <- c()
for(k in 1:M){
s <- sample(round(boot.scale*n), replace=TRUE) # Size% of original samples
training <- x[s, ]
testing <- x[-unique(s), ]
trainY <- y[s]
# Random subspace
Res <- list()
predicted <- list()
for(n0 in 1:subspace.size){
s0 <- sample(x=dim(x)[2], size=predictor.subset, replace=FALSE)
training_1 <- training[,s0]
testing_1 <- testing[,s0]
Res[[n0]] <- cv.glmnet(x=as.matrix(training_1), y=trainY, type.measure="mse", nfolds=kfold, family="gaussian")
predicted[[n0]] <- predict(Res[[n0]], newx=as.matrix(testing_1), s=c("lambda.min"), type=c("link"))
}
# Compute validation measures
scores <- matrix(0, subspace.size, nvm)
rownames(scores) <- 1:subspace.size
colnames(scores) <- validation
truth <- y[-unique(s)]
for(i in 1:subspace.size){
for(j in 1:nvm){
scores[i,j] <- switch(validation[j],
"rmse" = 1/rmse(truth, predicted[[i]]),
"mae" = 1/mae(truth, predicted[[i]]),
"re" = 1/re(truth, predicted[[i]]),
"smape" = 1/smape(truth, predicted[[i]])
)
}
}
# Perform rank aggregation
algorithms <- as.character(1:subspace.size)
convScores <- convertScores(scores)
if(nvm > 1 & subspace.size <= 8)
if(weighted)
fittedModels[[k]] <- Res[[which(algorithms == BruteAggreg(convScores$ranks,
subspace.size, convScores$weights, distance=distance)$top.list[1])]]
else
fittedModels[[k]] <- Res[[which(algorithms == BruteAggreg(convScores$ranks, subspace.size,
distance=distance)$top.list[1])]]
else if(nvm > 1 & subspace.size > 8)
if(weighted)
fittedModels[[k]] <- Res[[which(algorithms == RankAggreg(convScores$ranks,
subspace.size, convScores$weights, distance=distance, verbose=FALSE)$top.list[1])]]
else
fittedModels[[k]] <- Res[[which(algorithms == RankAggreg(convScores$ranks, subspace.size,
distance=distance, verbose=FALSE)$top.list[1])]]
else
fittedModels[[k]] <- Res[[which.max(scores[,1])]]
# Variable importance evaluation
if(predictor.importance){
model.final <- fittedModels[[k]]$glmnet.fit
LassoM.coef <- coef(model.final, s=fittedModels[[k]]$lambda.min)
Var_subset <- names(LassoM.coef[as.vector(LassoM.coef[,1]!=0),])
Var_subset <- Var_subset[Var_subset!=c("(Intercept)")]
if(!is.null(Var_subset)){
training_2 <- training[, Var_subset]
Res1 <- cv.glmnet(x=as.matrix(training_2), y=trainY, type.measure="mse", nfolds=kfold, family="gaussian")
model.final1 <- Res1$glmnet.fit
LassoM.coef1 <- coef(model.final1, s=Res1$lambda.min)[-1,]
if (length(LassoM.coef1)!=0){
for(i0 in 1:length(LassoM.coef1)){
for(j0 in 1:x_length){
if (names(LassoM.coef1)[i0]==colnames(RecordM)[j0]){RecordM[k,j0]=LassoM.coef1[i0]}
else {RecordM[k,j0]=RecordM[k,j0]}
}
}
}
}
}
# Trimmed bagging
if(trimmed){
glmFit <- fittedModels[[k]]
model.glmFit <- glmFit$glmnet.fit
model.var <- rownames(as.matrix(coef(model.glmFit, s=glmFit$lambda.min)))
model.testing <- as.matrix(testing[, model.var[-1]])
model.predicted <- predict(glmFit, newx=model.testing, s=c("lambda.min"), type=c("link"))
model.rmse[k] <- rmse(truth, model.predicted)
}
# Running message
if(verbose)
cat("Iter ", k, "\n")
} # Loop end 1:M
# Variable importance socres
RecordM <- abs(RecordM)
varImportance <- abs(as.matrix(apply(RecordM, 2, mean), ncol(RecordM), 1))
# Trimmed bagging
if(trimmed){
trimmedModels <- fittedModels
for(r in 1:M){
trimmedModels[[rank(model.rmse)[r]]] <- fittedModels[[r]]
}
}
} # linear regression model end
if (family==c("binomial")) {
x <- as.matrix(x)
y <- as.factor(y)
ly <- levels(y)
if(length(ly) == 2 && any(ly != c("0", "1"))){
stop("For logistic regression model, levels in y must be 0 and 1")}
if(!is.null(seed)) {
set.seed(seed)} else
set.seed(0123)
validation <- c("accuracy", "sensitivity", "specificity", "auc", "kia")
distance <- c("Spearman")
rownames(x) <- NULL
n <- length(y)
nvm <- length(validation)
fittedModels <- list()
trimmedModels <- list()
RecordM <- matrix(0, M, ncol(x))
colnames(RecordM) <- colnames(x)
model.accuracy <- c()
for(k in 1:M){
repeat{
s <- sample(round(boot.scale*n), replace=TRUE) # Size% of original samples
if(length(table(y[s])) >= 2 & length(table(y[-s])) >= 2)
break
}
training <- x[s, ]
testing <- x[-unique(s), ]
trainY <- y[s]
# Random subspace
Res <- list()
probabilities <- list()
predicted <- list()
for(n0 in 1:subspace.size){
s0 <- sample(length(colnames(x)), replace=FALSE)
s0 <- s0[1:predictor.subset]
training_1 <- training[, s0]
testing_1 <- testing[, s0]
Res[[n0]] <- cv.glmnet(x=as.matrix(training_1), y=as.factor(trainY), type.measure="deviance", nfolds=kfold, family="binomial")
predicted[[n0]] <- predict(Res[[n0]], newx=as.matrix(testing_1), s=c("lambda.min"), type=c("class"))
probabilities[[n0]] <- predict(Res[[n0]], newx=as.matrix(testing_1), s=c("lambda.min"), type=c("response"))
}
# Compute validation measures
scores <- matrix(0, subspace.size, nvm)
rownames(scores) <- 1:subspace.size
colnames(scores) <- validation
truth <- y[-unique(s)]
for(i in 1:subspace.size){
for(j in 1:nvm){
scores[i,j] <- switch(validation[j],
"accuracy" = accuracy(truth, factor(predicted[[i]], levels=ly)),
"sensitivity" = sensitivity(truth, factor(predicted[[i]], levels=ly)),
"specificity" = specificity(truth, factor(predicted[[i]], levels=ly)),
"kia" = kia(truth, factor(predicted[[i]], levels=ly)),
"auc" = AUC(truth, probabilities[[i]])
)
}
}
# Perform rank aggregation
algorithms <- as.character(1:subspace.size)
convScores <- convertScores(scores)
if(nvm > 1 & subspace.size <= 8)
if(weighted)
fittedModels[[k]] <- Res[[which(algorithms == BruteAggreg(convScores$ranks,
subspace.size, convScores$weights, distance=distance)$top.list[1])]]
else
fittedModels[[k]] <- Res[[which(algorithms == BruteAggreg(convScores$ranks, subspace.size,
distance=distance)$top.list[1])]]
else if(nvm > 1 & subspace.size > 8)
if(weighted)
fittedModels[[k]] <- Res[[which(algorithms == RankAggreg(convScores$ranks,
subspace.size, convScores$weights, distance=distance, verbose=FALSE)$top.list[1])]]
else
fittedModels[[k]] <- Res[[which(algorithms == RankAggreg(convScores$ranks, subspace.size,
distance=distance, verbose=FALSE)$top.list[1])]]
else
fittedModels[[k]] <- Res[[which.max(scores[, 1])]]
# Variable importance evaluation
if(predictor.importance){
model.final <- fittedModels[[k]]$glmnet.fit
LassoM.coef <- coef(model.final, s=fittedModels[[k]]$lambda.min)
Var_subset <- names(LassoM.coef[as.vector(LassoM.coef[,1]!=0), ])
Var_subset <- Var_subset[Var_subset!=c("(Intercept)")]
if(!is.null(Var_subset)){
training_2 <- training[,Var_subset]
Res1 <- cv.glmnet(x=as.matrix(training_2), y=as.factor(trainY), type.measure="deviance", nfolds=kfold, family="binomial")
model.final1 <- Res1$glmnet.fit
LassoM.coef1 <- coef(model.final1, s=Res1$lambda.min)[-1,]
if (length(LassoM.coef1)!=0){
for(i0 in 1:length(LassoM.coef1)){
for(j0 in 1:length(colnames(RecordM))){
if (names(LassoM.coef1)[i0]==colnames(RecordM)[j0]){RecordM[k, j0]=LassoM.coef1[i0]}
else {RecordM[k, j0]=RecordM[k, j0]}
}
}
}
}
}
# Trimmed bagging
if(trimmed){
glmFit <- fittedModels[[k]]
model.glmFit <- glmFit$glmnet.fit
model.var <- rownames(as.matrix(coef(model.glmFit, s=glmFit$lambda.min)))
model.testing <- as.matrix(testing[, model.var[-1]])
model.predicted <- predict(glmFit, newx=model.testing, s=c("lambda.min"), type=c("class"))
model.accuracy[k] <- accuracy(truth, model.predicted)
}
# Running message
if(verbose)
cat("Iter ", k, "\n")
} # Loop End 1:M
# Variable importance socres
RecordM <- abs(RecordM)
varImportance <- as.matrix(apply(RecordM, 2, mean), ncol(RecordM), 1)
# Trimmed bagging
if(trimmed){
trimmedModels <- fittedModels
for(r in 1:M){
trimmedModels[[rank(model.accuracy)[r]]] <- fittedModels[[r]]
}
}
} # logistic regression model end
result <- list(family=family, M=M, predictor.subset=predictor.subset, subspace.size=subspace.size, validation.metric=validation, boot.scale=boot.scale,
distance=distance, models.fitted=fittedModels, models.trimmed=trimmedModels, y.true=y, conv.scores=convScores, importance=varImportance)
class(result) <- "bagging"
result
}
|
/R/Bagging.lasso.R
|
no_license
|
cran/SparseLearner
|
R
| false | false | 21,460 |
r
|
#' A Bagging Prediction Model Using LASSO Selection Algorithm.
#'
#' This function performs a bagging prediction for linear and logistic regression model using the LASSO selection algorithm.
#'
#' @param x input matrix. The dimension of the matrix is nobs x nvars; each row is a vector of observations of the variables.
#' @param y response variable. For family="gaussian", y is a vector of quantitative response. For family="binomial" should be a factor with two levels '0' and '1' and the level of '1' is the target class.
#' @param family response type (see above).
#' @param M the number of base-level models (LASSO linear or logistic regression models) to obtain a final prediction. Note that it also corresponds to the number of bootstrap samples to draw. Defaults to 100.
#' @param subspace.size the number of random subspaces to construct an ensemble prediction model. Defaults to 10.
#' @param predictor.subset the subset of randomly selected predictors from the training set to reduce the original p-dimensional feature space. Defaults to (9/10)*ncol(x) where ncol(x) represents the the original p-dimensional feature space of input matrix x.
#' @param boot.scale the scale of sample size in each bootstrap re-sampling, relative to the original sample size. Defaults to 1.0, equaling to the original size of training samples.
#' @param kfold the number of folds of cross validation - default is 10. Although kfold can be as large as the sample size (leave-one-out CV), it is not recommended for large datasets. Smallest value allowable is kfold=3.
#' @param predictor.importance logical. Should the importance of each predictor in the bagging LASSO model be evaluated? Defaults to TRUE. A permutation-based variable importance measure estimated by the out-of-bag error rate is adapted for the bagging model.
#' @param trimmed logical. Should a trimmed bagging strategy be performed? Defaults to FALSE. Traditional bagging draws bootstrap samples from the training sample, applies the base-level model to each bootstrap sample, and then averages over all obtained prediction rules. The idea of trimmed bagging is to exclude the bootstrapped prediction rules that yield the highest error rates and to aggregate over the remaining ones.
#' @param weighted logical. Should a weighted rank aggregation procedure be performed? Defaults to TRUE. This procedure uses a Monte Carlo cross-entropy algorithm combining the ranks of a set of based-level model under consideration via a weighted aggregation that optimizes a distance criterion to determine the best performance base-level model.
#' @param verbose logical. Should the iterative process information of bagging model be presented? Defaults to TRUE.
#' @param seed the seed for random sampling, with the default value 0123.
#' @export
#' @import glmnet
#' @import RankAggreg
#' @import mlbench
#' @references
#' [1] Guo, P., Zeng, F., Hu, X., Zhang, D., Zhu, S., Deng, Y., & Hao, Y. (2015). Improved Variable
#' Selection Algorithm Using a LASSO-Type Penalty, with an Application to Assessing Hepatitis B
#' Infection Relevant Factors in Community Residents. PLoS One, 27;10(7):e0134151.
#' [2] Tibshirani, R. (1996). Regression Shrinkage and Selection via the Lasso. Journal of the royal
#' statistical society series B (statistical methodology), 73(3):273-282.
#' [3] Breiman, L. (2001). Random Forests. Machine Learning, 45(1), 5-32.
#' @examples
#' # Example 1: Bagging LASSO linear regression model.
#' library(mlbench)
#' set.seed(0123)
#' mydata <- mlbench.threenorm(100, d=10)
#' x <- mydata$x
#' y <- mydata$classes
#' mydata <- as.data.frame(cbind(x, y))
#' colnames(mydata) <- c(paste("A", 1:10, sep=""), "y")
#' mydata$y <- ifelse(mydata$y==1, 0, 1)
#' # Split into training and testing data.
#' S1 <- as.vector(which(mydata$y==0))
#' S2 <- as.vector(which(mydata$y==1))
#' S3 <- sample(S1, ceiling(length(S1)*0.8), replace=FALSE)
#' S4 <- sample(S2, ceiling(length(S2)*0.8), replace=FALSE)
#' TrainInd <- c(S3, S4)
#' TestInd <- setdiff(1:length(mydata$y), TrainInd)
#' TrainXY <- mydata[TrainInd, ]
#' TestXY <- mydata[TestInd, ]
#' # Fit a bagging LASSO linear regression model, where the parameters
#' # of M in the following example is set as small values to reduce the
#' # running time, however the default value is proposed.
#' Bagging.fit <- Bagging.lasso(x=TrainXY[, -10], y=TrainXY[, 10],
#' family=c("gaussian"), M=2, predictor.subset=round((9/10)*ncol(x)),
#' predictor.importance=TRUE, trimmed=FALSE, weighted=TRUE, seed=0123)
#' # Print a 'bagging' object fitted by the Bagging.fit function.
#' Print.bagging(Bagging.fit)
#' # Make predictions from a bagging LASSO linear regression model.
#' pred <- Predict.bagging(Bagging.fit, newx=TestXY[, -10], y=NULL, trimmed=FALSE)
#' pred
#' # Generate the plot of variable importance.
#' Plot.importance(Bagging.fit)
#' # Example 2: Bagging LASSO logistic regression model.
#' library(mlbench)
#' set.seed(0123)
#' mydata <- mlbench.threenorm(100, d=10)
#' x <- mydata$x
#' y <- mydata$classes
#' mydata <- as.data.frame(cbind(x, y))
#' colnames(mydata) <- c(paste("A", 1:10, sep=""), "y")
#' mydata$y <- ifelse(mydata$y==1, 0, 1)
#' # Split into training and testing data.
#' S1 <- as.vector(which(mydata$y==0))
#' S2 <- as.vector(which(mydata$y==1))
#' S3 <- sample(S1, ceiling(length(S1)*0.8), replace=FALSE)
#' S4 <- sample(S2, ceiling(length(S2)*0.8), replace=FALSE)
#' TrainInd <- c(S3, S4)
#' TestInd <- setdiff(1:length(mydata$y), TrainInd)
#' TrainXY <- mydata[TrainInd, ]
#' TestXY <- mydata[TestInd, ]
#' # Fit a bagging LASSO logistic regression model, where the parameters
#' # of M in the following example is set as small values to reduce the
#' # running time, however the default value is proposed.
#' Bagging.fit <- Bagging.lasso(x=TrainXY[, -11], y=TrainXY[, 11],
#' family=c("binomial"), M=2, predictor.subset=round((9/10)*ncol(x)),
#' predictor.importance=TRUE, trimmed=FALSE, weighted=TRUE, seed=0123)
#' # Print a 'bagging' object fitted by the Bagging.fit function.
#' Print.bagging(Bagging.fit)
#' # Make predictions from a bagging LASSO logistic regression model.
#' pred <- Predict.bagging(Bagging.fit, newx=TestXY[, -11], y=NULL, trimmed=FALSE)
#' pred
#' # Generate the plot of variable importance.
#' Plot.importance(Bagging.fit)
Bagging.lasso <- function(x, y, family=c("gaussian", "binomial"), M=100, subspace.size=10, predictor.subset=round((9/10)*ncol(x)), boot.scale=1.0, kfold=10,
predictor.importance=TRUE, trimmed=FALSE, weighted=TRUE, verbose=TRUE, seed=0123){
rmse <- function(truth, predicted){
predicted <- as.numeric(predicted)
mse <- mean((truth-predicted)*(truth-predicted))
rmse <- sqrt(mse)
rmse
}
mae <- function(truth, predicted){
predicted <- as.numeric(predicted)
mae <- mean(abs(predicted-truth))
mae
}
re <- function(truth, predicted){
predicted <- as.numeric(predicted)
mse <- mean(((truth-predicted)/truth)*((truth-predicted)/truth))
re <- sqrt(mse)
re
}
smape <- function(truth, predicted){
predicted <- as.numeric(predicted)
smape <- mean(abs(truth-predicted)/((abs(truth)+abs(predicted))/2))
smape
}
accuracy <- function(truth, predicted){
if(length(truth) > 0)
sum(truth==predicted)/length(truth) else
return(0)
}
sensitivity <- function(truth, predicted){
# 1 means positive (present)
if(sum(truth==1) > 0)
sum(predicted[truth==1]==1)/sum(truth==1) else
return(0)
}
specificity <- function(truth, predicted){
if(sum(truth==0) > 0)
sum(predicted[truth==0]==0)/sum(truth==0) else
return(0)
}
AUC <- function(truth, probs){
# probs - probability of class 1
q <- seq(0, 1, .01)
sens <- rep(0, length(q))
spec <- rep(0, length(q))
ly <- levels(truth)
for(i in 1:length(q)){
pred <- probs >= q[i]
pred[pred] <- 1
pred <- factor(pred, levels=ly)
sens[i] <- sensitivity(truth, pred)
spec[i] <- specificity(truth, pred)
}
# make sure it starts and ends at 0, 1
sens <- c(1, sens, 0)
spec <- c(0, spec, 1)
trap.rule <- function(x,y) sum(diff(x)*(y[-1]+y[-length(y)]))/2
auc <- trap.rule(rev(1-spec), rev(sens))
auc
}
kia <- function(truth, predicted){
TP <- sum(predicted[truth==1]==1)
TN <- sum(predicted[truth==0]==0)
FN <- sum(truth==1)-TP
FP <- sum(truth==0)-TN
N <- TP+TN+FN+FP
Pobs <- (TP+TN)/N
Pexp <- ((TP+FN)*(TP+FP)+(FP+TN)*(FN+TN))/N^2
kia <- (Pobs-Pexp)/(1-Pexp)
kia
}
convertScores <- function(scores){
scores <- t(scores)
ranks <- matrix(0, nrow(scores), ncol(scores))
weights <- ranks
for(i in 1:nrow(scores)){
ms <- sort(scores[i,], decreasing=TRUE, ind=TRUE)
ranks[i,] <- colnames(scores)[ms$ix]
weights[i,] <- ms$x
}
list(ranks = ranks, weights = weights)
}
if (family==c("gaussian")) {
x <- as.matrix(x)
y <- as.numeric(y)
if(!is.null(seed)) {
set.seed(seed) } else
set.seed(0123)
validation <- c("rmse", "mae", "re", "smape")
distance <- c("Spearman")
rownames(x) <- NULL
n <- length(y)
nvm <- length(validation)
fittedModels <- list()
trimmedModels <- list()
x_length <- ncol(x)
RecordM <- matrix(0, M, ncol(x))
colnames(RecordM) <- colnames(x)
model.rmse <- c()
for(k in 1:M){
s <- sample(round(boot.scale*n), replace=TRUE) # Size% of original samples
training <- x[s, ]
testing <- x[-unique(s), ]
trainY <- y[s]
# Random subspace
Res <- list()
predicted <- list()
for(n0 in 1:subspace.size){
s0 <- sample(x=dim(x)[2], size=predictor.subset, replace=FALSE)
training_1 <- training[,s0]
testing_1 <- testing[,s0]
Res[[n0]] <- cv.glmnet(x=as.matrix(training_1), y=trainY, type.measure="mse", nfolds=kfold, family="gaussian")
predicted[[n0]] <- predict(Res[[n0]], newx=as.matrix(testing_1), s=c("lambda.min"), type=c("link"))
}
# Compute validation measures
scores <- matrix(0, subspace.size, nvm)
rownames(scores) <- 1:subspace.size
colnames(scores) <- validation
truth <- y[-unique(s)]
for(i in 1:subspace.size){
for(j in 1:nvm){
scores[i,j] <- switch(validation[j],
"rmse" = 1/rmse(truth, predicted[[i]]),
"mae" = 1/mae(truth, predicted[[i]]),
"re" = 1/re(truth, predicted[[i]]),
"smape" = 1/smape(truth, predicted[[i]])
)
}
}
# Perform rank aggregation
algorithms <- as.character(1:subspace.size)
convScores <- convertScores(scores)
if(nvm > 1 & subspace.size <= 8)
if(weighted)
fittedModels[[k]] <- Res[[which(algorithms == BruteAggreg(convScores$ranks,
subspace.size, convScores$weights, distance=distance)$top.list[1])]]
else
fittedModels[[k]] <- Res[[which(algorithms == BruteAggreg(convScores$ranks, subspace.size,
distance=distance)$top.list[1])]]
else if(nvm > 1 & subspace.size > 8)
if(weighted)
fittedModels[[k]] <- Res[[which(algorithms == RankAggreg(convScores$ranks,
subspace.size, convScores$weights, distance=distance, verbose=FALSE)$top.list[1])]]
else
fittedModels[[k]] <- Res[[which(algorithms == RankAggreg(convScores$ranks, subspace.size,
distance=distance, verbose=FALSE)$top.list[1])]]
else
fittedModels[[k]] <- Res[[which.max(scores[,1])]]
# Variable importance evaluation
if(predictor.importance){
model.final <- fittedModels[[k]]$glmnet.fit
LassoM.coef <- coef(model.final, s=fittedModels[[k]]$lambda.min)
Var_subset <- names(LassoM.coef[as.vector(LassoM.coef[,1]!=0),])
Var_subset <- Var_subset[Var_subset!=c("(Intercept)")]
if(!is.null(Var_subset)){
training_2 <- training[, Var_subset]
Res1 <- cv.glmnet(x=as.matrix(training_2), y=trainY, type.measure="mse", nfolds=kfold, family="gaussian")
model.final1 <- Res1$glmnet.fit
LassoM.coef1 <- coef(model.final1, s=Res1$lambda.min)[-1,]
if (length(LassoM.coef1)!=0){
for(i0 in 1:length(LassoM.coef1)){
for(j0 in 1:x_length){
if (names(LassoM.coef1)[i0]==colnames(RecordM)[j0]){RecordM[k,j0]=LassoM.coef1[i0]}
else {RecordM[k,j0]=RecordM[k,j0]}
}
}
}
}
}
# Trimmed bagging
if(trimmed){
glmFit <- fittedModels[[k]]
model.glmFit <- glmFit$glmnet.fit
model.var <- rownames(as.matrix(coef(model.glmFit, s=glmFit$lambda.min)))
model.testing <- as.matrix(testing[, model.var[-1]])
model.predicted <- predict(glmFit, newx=model.testing, s=c("lambda.min"), type=c("link"))
model.rmse[k] <- rmse(truth, model.predicted)
}
# Running message
if(verbose)
cat("Iter ", k, "\n")
} # Loop end 1:M
# Variable importance socres
RecordM <- abs(RecordM)
varImportance <- abs(as.matrix(apply(RecordM, 2, mean), ncol(RecordM), 1))
# Trimmed bagging
if(trimmed){
trimmedModels <- fittedModels
for(r in 1:M){
trimmedModels[[rank(model.rmse)[r]]] <- fittedModels[[r]]
}
}
} # linear regression model end
if (family==c("binomial")) {
x <- as.matrix(x)
y <- as.factor(y)
ly <- levels(y)
if(length(ly) == 2 && any(ly != c("0", "1"))){
stop("For logistic regression model, levels in y must be 0 and 1")}
if(!is.null(seed)) {
set.seed(seed)} else
set.seed(0123)
validation <- c("accuracy", "sensitivity", "specificity", "auc", "kia")
distance <- c("Spearman")
rownames(x) <- NULL
n <- length(y)
nvm <- length(validation)
fittedModels <- list()
trimmedModels <- list()
RecordM <- matrix(0, M, ncol(x))
colnames(RecordM) <- colnames(x)
model.accuracy <- c()
for(k in 1:M){
repeat{
s <- sample(round(boot.scale*n), replace=TRUE) # Size% of original samples
if(length(table(y[s])) >= 2 & length(table(y[-s])) >= 2)
break
}
training <- x[s, ]
testing <- x[-unique(s), ]
trainY <- y[s]
# Random subspace
Res <- list()
probabilities <- list()
predicted <- list()
for(n0 in 1:subspace.size){
s0 <- sample(length(colnames(x)), replace=FALSE)
s0 <- s0[1:predictor.subset]
training_1 <- training[, s0]
testing_1 <- testing[, s0]
Res[[n0]] <- cv.glmnet(x=as.matrix(training_1), y=as.factor(trainY), type.measure="deviance", nfolds=kfold, family="binomial")
predicted[[n0]] <- predict(Res[[n0]], newx=as.matrix(testing_1), s=c("lambda.min"), type=c("class"))
probabilities[[n0]] <- predict(Res[[n0]], newx=as.matrix(testing_1), s=c("lambda.min"), type=c("response"))
}
# Compute validation measures
scores <- matrix(0, subspace.size, nvm)
rownames(scores) <- 1:subspace.size
colnames(scores) <- validation
truth <- y[-unique(s)]
for(i in 1:subspace.size){
for(j in 1:nvm){
scores[i,j] <- switch(validation[j],
"accuracy" = accuracy(truth, factor(predicted[[i]], levels=ly)),
"sensitivity" = sensitivity(truth, factor(predicted[[i]], levels=ly)),
"specificity" = specificity(truth, factor(predicted[[i]], levels=ly)),
"kia" = kia(truth, factor(predicted[[i]], levels=ly)),
"auc" = AUC(truth, probabilities[[i]])
)
}
}
# Perform rank aggregation
algorithms <- as.character(1:subspace.size)
convScores <- convertScores(scores)
if(nvm > 1 & subspace.size <= 8)
if(weighted)
fittedModels[[k]] <- Res[[which(algorithms == BruteAggreg(convScores$ranks,
subspace.size, convScores$weights, distance=distance)$top.list[1])]]
else
fittedModels[[k]] <- Res[[which(algorithms == BruteAggreg(convScores$ranks, subspace.size,
distance=distance)$top.list[1])]]
else if(nvm > 1 & subspace.size > 8)
if(weighted)
fittedModels[[k]] <- Res[[which(algorithms == RankAggreg(convScores$ranks,
subspace.size, convScores$weights, distance=distance, verbose=FALSE)$top.list[1])]]
else
fittedModels[[k]] <- Res[[which(algorithms == RankAggreg(convScores$ranks, subspace.size,
distance=distance, verbose=FALSE)$top.list[1])]]
else
fittedModels[[k]] <- Res[[which.max(scores[, 1])]]
# Variable importance evaluation
if(predictor.importance){
model.final <- fittedModels[[k]]$glmnet.fit
LassoM.coef <- coef(model.final, s=fittedModels[[k]]$lambda.min)
Var_subset <- names(LassoM.coef[as.vector(LassoM.coef[,1]!=0), ])
Var_subset <- Var_subset[Var_subset!=c("(Intercept)")]
if(!is.null(Var_subset)){
training_2 <- training[,Var_subset]
Res1 <- cv.glmnet(x=as.matrix(training_2), y=as.factor(trainY), type.measure="deviance", nfolds=kfold, family="binomial")
model.final1 <- Res1$glmnet.fit
LassoM.coef1 <- coef(model.final1, s=Res1$lambda.min)[-1,]
if (length(LassoM.coef1)!=0){
for(i0 in 1:length(LassoM.coef1)){
for(j0 in 1:length(colnames(RecordM))){
if (names(LassoM.coef1)[i0]==colnames(RecordM)[j0]){RecordM[k, j0]=LassoM.coef1[i0]}
else {RecordM[k, j0]=RecordM[k, j0]}
}
}
}
}
}
# Trimmed bagging
if(trimmed){
glmFit <- fittedModels[[k]]
model.glmFit <- glmFit$glmnet.fit
model.var <- rownames(as.matrix(coef(model.glmFit, s=glmFit$lambda.min)))
model.testing <- as.matrix(testing[, model.var[-1]])
model.predicted <- predict(glmFit, newx=model.testing, s=c("lambda.min"), type=c("class"))
model.accuracy[k] <- accuracy(truth, model.predicted)
}
# Running message
if(verbose)
cat("Iter ", k, "\n")
} # Loop End 1:M
# Variable importance socres
RecordM <- abs(RecordM)
varImportance <- as.matrix(apply(RecordM, 2, mean), ncol(RecordM), 1)
# Trimmed bagging
if(trimmed){
trimmedModels <- fittedModels
for(r in 1:M){
trimmedModels[[rank(model.accuracy)[r]]] <- fittedModels[[r]]
}
}
} # logistic regression model end
result <- list(family=family, M=M, predictor.subset=predictor.subset, subspace.size=subspace.size, validation.metric=validation, boot.scale=boot.scale,
distance=distance, models.fitted=fittedModels, models.trimmed=trimmedModels, y.true=y, conv.scores=convScores, importance=varImportance)
class(result) <- "bagging"
result
}
|
#' Extract Diagnostic Quantities of \pkg{OncoBayes2} Models
#'
#' Extract quantities that can be used to diagnose sampling behavior
#' of the algorithms applied by \pkg{Stan} at the back-end of
#' \pkg{OncoBayes2}.
#'
#' @name diagnostic-quantities
#' @aliases log_posterior nuts_params rhat neff_ratio
#'
#' @param object A \code{blrmfit} or \code{blrmtrial} object.
#' @param pars An optional character vector of parameter names.
#' For \code{nuts_params} these will be NUTS sampler parameter
#' names rather than model parameters. If \code{pars} is omitted
#' all parameters are included.
#' @param ... Arguments passed to individual methods.
#'
#' @return The exact form of the output depends on the method.
#'
#' @details For more details see
#' \code{\link[bayesplot:bayesplot-extractors]{bayesplot-extractors}}.
#'
#' @template start-example
#' @examples
#' example_model("single_agent", silent=TRUE)
#'
#' head(log_posterior(blrmfit))
#'
#' np <- nuts_params(blrmfit)
#' str(np)
#' # extract the number of divergence transitions
#' sum(subset(np, Parameter == "divergent__")$Value)
#'
#' head(rhat(blrmfit))
#' head(neff_ratio(blrmfit))
#'
#' @template stop-example
#'
NULL
#' @rdname diagnostic-quantities
#' @method log_posterior blrmfit
#' @importFrom bayesplot log_posterior
#' @export log_posterior
#' @export
log_posterior.blrmfit <- function(object, ...) {
.contains_draws(object)
bayesplot::log_posterior(object$stanfit, ...)
}
#' @rdname diagnostic-quantities
#' @method nuts_params blrmfit
#' @importFrom bayesplot nuts_params
#' @export nuts_params
#' @export
nuts_params.blrmfit <- function(object, pars = NULL, ...) {
.contains_draws(object)
bayesplot::nuts_params(object$stanfit, pars = pars, ...)
}
#' @rdname diagnostic-quantities
#' @method rhat blrmfit
#' @importFrom bayesplot rhat
#' @export rhat
#' @export
rhat.blrmfit <- function(object, pars = NULL, ...) {
.contains_draws(object)
bayesplot::rhat(object$stanfit, pars = pars, ...)
}
#' @rdname diagnostic-quantities
#' @method neff_ratio blrmfit
#' @importFrom bayesplot neff_ratio
#' @export neff_ratio
#' @export
neff_ratio.blrmfit <- function(object, pars = NULL, ...) {
.contains_draws(object)
bayesplot::neff_ratio(object$stanfit, pars = pars, ...)
}
## --- internal
.contains_draws <- function(object) {
assert_that(nsamples(object) > 0, msg="The model does not contain posterior draws.")
}
|
/R/diagnostics.R
|
no_license
|
cran/OncoBayes2
|
R
| false | false | 2,413 |
r
|
#' Extract Diagnostic Quantities of \pkg{OncoBayes2} Models
#'
#' Extract quantities that can be used to diagnose sampling behavior
#' of the algorithms applied by \pkg{Stan} at the back-end of
#' \pkg{OncoBayes2}.
#'
#' @name diagnostic-quantities
#' @aliases log_posterior nuts_params rhat neff_ratio
#'
#' @param object A \code{blrmfit} or \code{blrmtrial} object.
#' @param pars An optional character vector of parameter names.
#' For \code{nuts_params} these will be NUTS sampler parameter
#' names rather than model parameters. If \code{pars} is omitted
#' all parameters are included.
#' @param ... Arguments passed to individual methods.
#'
#' @return The exact form of the output depends on the method.
#'
#' @details For more details see
#' \code{\link[bayesplot:bayesplot-extractors]{bayesplot-extractors}}.
#'
#' @template start-example
#' @examples
#' example_model("single_agent", silent=TRUE)
#'
#' head(log_posterior(blrmfit))
#'
#' np <- nuts_params(blrmfit)
#' str(np)
#' # extract the number of divergence transitions
#' sum(subset(np, Parameter == "divergent__")$Value)
#'
#' head(rhat(blrmfit))
#' head(neff_ratio(blrmfit))
#'
#' @template stop-example
#'
NULL
#' @rdname diagnostic-quantities
#' @method log_posterior blrmfit
#' @importFrom bayesplot log_posterior
#' @export log_posterior
#' @export
log_posterior.blrmfit <- function(object, ...) {
.contains_draws(object)
bayesplot::log_posterior(object$stanfit, ...)
}
#' @rdname diagnostic-quantities
#' @method nuts_params blrmfit
#' @importFrom bayesplot nuts_params
#' @export nuts_params
#' @export
nuts_params.blrmfit <- function(object, pars = NULL, ...) {
.contains_draws(object)
bayesplot::nuts_params(object$stanfit, pars = pars, ...)
}
#' @rdname diagnostic-quantities
#' @method rhat blrmfit
#' @importFrom bayesplot rhat
#' @export rhat
#' @export
rhat.blrmfit <- function(object, pars = NULL, ...) {
.contains_draws(object)
bayesplot::rhat(object$stanfit, pars = pars, ...)
}
#' @rdname diagnostic-quantities
#' @method neff_ratio blrmfit
#' @importFrom bayesplot neff_ratio
#' @export neff_ratio
#' @export
neff_ratio.blrmfit <- function(object, pars = NULL, ...) {
.contains_draws(object)
bayesplot::neff_ratio(object$stanfit, pars = pars, ...)
}
## --- internal
.contains_draws <- function(object) {
assert_that(nsamples(object) > 0, msg="The model does not contain posterior draws.")
}
|
## plot1.R
setwd("/Users/amrastog/datasciencecoursera/ExploratoryDataAnalysis")
## Read data
data = read.table("household_power_consumption.txt", na.string='?',sep=';',header=TRUE)
data$Date=strptime(paste(data$Date,data$Time), "%d/%m/%Y %H:%M:%S")
data=subset(data, (data$Date>=strptime("2007-02-01","%Y-%m-%d")&(data$Date<strptime("2007-02-03","%Y-%m-%d"))))
hist(data$Global_active_power, col="red",main="Global Active Power",xlab="Global Active Power (Kilowatts)")
dev.copy(png, file = "plot1.png", height=480,width=480) ## Copy my plot to a PNG file
dev.off() ## Don't forget to close the PNG device!
|
/plot1.R
|
no_license
|
rusteyz/ExData_Plotting1
|
R
| false | false | 632 |
r
|
## plot1.R
setwd("/Users/amrastog/datasciencecoursera/ExploratoryDataAnalysis")
## Read data
data = read.table("household_power_consumption.txt", na.string='?',sep=';',header=TRUE)
data$Date=strptime(paste(data$Date,data$Time), "%d/%m/%Y %H:%M:%S")
data=subset(data, (data$Date>=strptime("2007-02-01","%Y-%m-%d")&(data$Date<strptime("2007-02-03","%Y-%m-%d"))))
hist(data$Global_active_power, col="red",main="Global Active Power",xlab="Global Active Power (Kilowatts)")
dev.copy(png, file = "plot1.png", height=480,width=480) ## Copy my plot to a PNG file
dev.off() ## Don't forget to close the PNG device!
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exemplarInpainting.R
\name{exemplarInpainting}
\alias{exemplarInpainting}
\title{Uses example images to inpaint or approximate an existing image.}
\usage{
exemplarInpainting(
img,
paintMask,
imageList,
featureRadius = 2,
scaleInpaintIntensity = 0,
sharpen = FALSE,
feather = 1,
predalgorithm = "lm",
debug = FALSE
)
}
\arguments{
\item{img}{antsImage to be approximated / painted}
\item{paintMask}{painting mask with values 1 or values 1 and 2 - if there is
a 2 then it will learn from label 1 to paint label 2. should cover the
brain.}
\item{imageList}{a list containing antsImages}
\item{featureRadius}{- radius of image neighborhood e.g. 2}
\item{scaleInpaintIntensity}{- brighter or darker painted voxels, default of
0 sets this parameter automatically}
\item{sharpen}{- sharpen the approximated image}
\item{feather}{- value (e.g. 1) that helps feather the mask for smooth
blending}
\item{predalgorithm}{- string svm or lm}
\item{debug}{- TRUE or FALSE}
}
\value{
inpainted image
}
\description{
Employs a robust regression approach to learn the relationship between a
sample image and a list of images that are mapped to the same space as the
sample image. The regression uses data from an image neighborhood.
}
\examples{
set.seed(123)
fi<-abs(replicate(100, rnorm(100)))
fi[1:10,]<-fi[,1:10]<-fi[91:100,]<-fi[,91:100]<-0
mask<-fi
mask[ mask > 0 ]<-1
mask2<-mask
mask2[11:20,11:20]<-2
mask<-as.antsImage( mask , "float" )
fi<-as.antsImage( fi , "float" )
fi<-smoothImage(fi,3)
mo<-as.antsImage( replicate(100, rnorm(100)) , "float" )
mo2<-as.antsImage( replicate(100, rnorm(100)) , "float" )
ilist<-list(mo,mo2)
painted<-exemplarInpainting(fi,mask,ilist)
mask2<-as.antsImage( mask2 , "float" )
painted2<-exemplarInpainting(fi,mask2,ilist)
# just use 1 image, so no regression is performed
painted3<-exemplarInpainting(fi,mask2, list(ilist[[1]]))
}
\author{
Brian B. Avants
}
\keyword{inpainting}
\keyword{template}
|
/man/exemplarInpainting.Rd
|
permissive
|
ANTsX/ANTsR
|
R
| false | true | 2,030 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exemplarInpainting.R
\name{exemplarInpainting}
\alias{exemplarInpainting}
\title{Uses example images to inpaint or approximate an existing image.}
\usage{
exemplarInpainting(
img,
paintMask,
imageList,
featureRadius = 2,
scaleInpaintIntensity = 0,
sharpen = FALSE,
feather = 1,
predalgorithm = "lm",
debug = FALSE
)
}
\arguments{
\item{img}{antsImage to be approximated / painted}
\item{paintMask}{painting mask with values 1 or values 1 and 2 - if there is
a 2 then it will learn from label 1 to paint label 2. should cover the
brain.}
\item{imageList}{a list containing antsImages}
\item{featureRadius}{- radius of image neighborhood e.g. 2}
\item{scaleInpaintIntensity}{- brighter or darker painted voxels, default of
0 sets this parameter automatically}
\item{sharpen}{- sharpen the approximated image}
\item{feather}{- value (e.g. 1) that helps feather the mask for smooth
blending}
\item{predalgorithm}{- string svm or lm}
\item{debug}{- TRUE or FALSE}
}
\value{
inpainted image
}
\description{
Employs a robust regression approach to learn the relationship between a
sample image and a list of images that are mapped to the same space as the
sample image. The regression uses data from an image neighborhood.
}
\examples{
set.seed(123)
fi<-abs(replicate(100, rnorm(100)))
fi[1:10,]<-fi[,1:10]<-fi[91:100,]<-fi[,91:100]<-0
mask<-fi
mask[ mask > 0 ]<-1
mask2<-mask
mask2[11:20,11:20]<-2
mask<-as.antsImage( mask , "float" )
fi<-as.antsImage( fi , "float" )
fi<-smoothImage(fi,3)
mo<-as.antsImage( replicate(100, rnorm(100)) , "float" )
mo2<-as.antsImage( replicate(100, rnorm(100)) , "float" )
ilist<-list(mo,mo2)
painted<-exemplarInpainting(fi,mask,ilist)
mask2<-as.antsImage( mask2 , "float" )
painted2<-exemplarInpainting(fi,mask2,ilist)
# just use 1 image, so no regression is performed
painted3<-exemplarInpainting(fi,mask2, list(ilist[[1]]))
}
\author{
Brian B. Avants
}
\keyword{inpainting}
\keyword{template}
|
#####---------------------------------------------------------------------------
## implement recycling rule for function arguments
#####---------------------------------------------------------------------------
recycle <-
function(...) {
dots <- list(...)
maxL <- max(vapply(dots, length, integer(1)))
lapply(dots, rep, length=maxL)
}
#####---------------------------------------------------------------------------
## Hoyt / Nakagami-q distribution
## correlated bivariate normal distribution rewritten in polar coordinates
## pdf, cdf, and inverse cdf of the distribution of the radius
#####---------------------------------------------------------------------------
## determine parameters for Hoyt distribution
getHoytParam <-
function(x) {
UseMethod("getHoytParam")
}
## based on data frame with (x,y)-coords
getHoytParam.data.frame <-
function(x) {
sigma <- cov(getXYmat(x)) # covariance matrix
x <- eigen(sigma)$values # eigenvalues
NextMethod("getHoytParam")
}
## based on list of covariance matrices
getHoytParam.list <-
function(x) {
if(!all(vapply(x, is.matrix, logical(1)))) { stop("x must be a matrix") }
if(!all(vapply(x, is.numeric, logical(1)))) { stop("x must be numeric") }
if(!all(vapply(x, dim, integer(2)) == 2L)) { stop("x must be (2 x 2)-matrix") }
getEV <- function(sigma) { # eigenvalues from covariance matrix
if(!isTRUE(all.equal(sigma, t(sigma)))) {
stop("x must be symmetric")
}
lambda <- eigen(sigma)$values
if(!all(lambda >= -sqrt(.Machine$double.eps) * abs(lambda[1]))) {
stop("x is numerically not positive definite")
}
lambda
}
ev <- lapply(x, getEV) # eigenvalues for all matrices
ev1 <- vapply(ev, head, FUN.VALUE=numeric(1), n=1) # all first eigenvalues
ev2 <- vapply(ev, tail, FUN.VALUE=numeric(1), n=1) # all second eigenvalues
qpar <- 1/sqrt(((ev1+ev2)/ev2) - 1) # Hoyt q
omega <- ev1+ev2 # Hoyt omega
return(list(q=qpar, omega=omega))
}
## based on covariance matrix
getHoytParam.matrix <-
function(x) {
if(any(dim(x) != 2L)) { stop("x must be a (2 x 2)-matrix") }
if(!isTRUE(all.equal(x, t(x)))) { stop("x must be symmetric") }
x <- eigen(x)$values
NextMethod("getHoytParam")
}
## based on 2-vector of eigenvalues
## not vectorized
getHoytParam.default <-
function(x) {
if(!is.numeric(x)) { stop("x must be numeric") }
if(any(x < 0)) { stop("x must be >= 0") }
if(length(x) != 2L) { stop("x must have length 2") }
if(!all(x >= -sqrt(.Machine$double.eps) * abs(max(x)))) {
stop("x is numerically not positive definite")
}
x <- sort(x, decreasing=TRUE) # largest eigenvalue first
ev1 <- x[1]
ev2 <- x[2]
qpar <- 1 / sqrt(((ev1+ev2) / ev2) - 1) # Hoyt q
omega <- ev1+ev2 # Hoyt omega
return(list(q=qpar, omega=omega))
}
# determine eigenvalues from Hoyt parameters
getEVfromHoyt <-
function(qpar, omega) {
nnaQ <- which(!is.na(qpar))
nnaO <- which(!is.na(omega))
stopifnot(all(qpar[nnaQ] > 0), all(qpar[nnaQ] < 1), all(omega[nnaO] > 0))
ev2 <- omega / ((1/qpar^2) + 1) # 2nd eigenvalue
ev1 <- omega - ev2 # 1st eigenvalue
## sort each pair of eigenvalues in descending order
ev1ord <- pmax(ev1, ev2)
ev2ord <- pmin(ev1, ev2)
return(list(ev1=ev1ord, ev2=ev2ord))
}
#####---------------------------------------------------------------------------
## pdf Hoyt distribution
## https://reference.wolfram.com/language/ref/HoytDistribution.html
dHoyt <-
function(x, qpar, omega) {
is.na(x) <- is.nan(x) # replace NaN with NA
is.na(qpar) <- (qpar < 0) | (qpar > 1) | !is.finite(qpar)
is.na(omega) <- (omega <= 0) | !is.finite(omega)
argL <- recycle(x, qpar, omega)
x <- argL[[1]]
qpar <- argL[[2]]
omega <- argL[[3]]
dens <- numeric(length(x)) # initialize density to 0
keep <- which((x >= 0) | !is.finite(x)) # keep non-negative x, NA, -Inf, Inf
if(length(keep) < 1L) { return(dens) } # nothing to do
lfac1 <- log(x[keep]) + log(1 + qpar[keep]^2) - log(qpar[keep]*omega[keep])
lfac2 <- -x[keep]^2*(1+qpar[keep]^2)^2/(4*qpar[keep]^2*omega[keep])
bArg <- (x[keep]^2*(1-qpar[keep]^4) /(4*qpar[keep]^2*omega[keep]))
lfac3 <- log(besselI(bArg, nu=0, expon.scaled=TRUE)) + bArg
res <- exp(lfac1+lfac2+lfac3) # this may be NaN
dens[keep] <- ifelse(is.nan(res), 0, res) # if so, set to 0
return(dens)
}
## Hoyt, RS. 1947. Probability functions for the modulus and angle of the
## normal complex variate. Bell System Technical Journal, 26(2). 318-359.
## Hoyt pdf is for scaled variables with S := 1/sqrt(Su^2+Sv^2), u=U/S, v=V/S
## -> set r to r/S and pdf to pdf/S
# dCNhoyt <- function(r, sigma) {
# ev <- eigen(sigma)$values
# b <- abs(diff(ev)) / sum(ev)
# S <- sqrt(sum(ev))
# r <- r/S
#
# fac1 <- (2*r/sqrt(1-b^2)) * exp(-r^2/(1-b^2))
# bArg <- (b*r^2/(1-b^2))
# fac2 <- exp(log(besselI(bArg, nu=0, expon.scaled=TRUE)) + bArg)
# dens <- fac1*fac2 / S
#
# return(dens)
# }
## equivalent
## Greenwalt, CR & Shultz, ME. 1968.
## Principles of Error Theory and Cartographic Applications
## ACIC TR-96, Appendix D-3, eq. 3
# dGreenwalt <- function(r, sigma) {
# ev <- eigen(sigma)$values
# fac1 <- 1/prod(sqrt(ev))
# fac2 <- r*exp(-(r^2/(4*ev[1])) * (1 + (ev[1]/ev[2])))
# bArg <- (r^2/(4*ev[1])) * ((ev[1]/ev[2]) - 1)
# fac3 <- exp(log(besselI(bArg, nu=0, expon.scaled=TRUE)) + bArg)
# dens <- fac1*fac2*fac3
#
# return(dens)
# }
#####---------------------------------------------------------------------------
## generalized Marcum Q-function from non-central chi^2 distribution
## Nuttall, AH. (1975). Some integrals involving the Q-M function.
## IEEE Transactions on Information Theory, 21 (1), 95-96
marcumQ <-
function(a, b, nu, lower.tail=TRUE) {
pchisq(b^2, df=2*nu, ncp=a^2, lower.tail=lower.tail)
}
#####---------------------------------------------------------------------------
## cdf Hoyt distribution in closed form
## Paris, JF. 2009. Nakagami-q (Hoyt) distribution function with applications.
## Electronics Letters, 45(4). 210-211. Erratum: doi:10.1049/el.2009.0828
pHoyt <-
function(q, qpar, omega, lower.tail=TRUE) {
is.na(qpar) <- (qpar < 0) | (qpar > 1) | !is.finite(qpar)
is.na(omega) <- (omega <= 0) | !is.finite(omega)
argL <- recycle(q, qpar, omega)
q <- argL[[1]]
qpar <- argL[[2]]
omega <- argL[[3]]
pp <- numeric(length(q)) # initialize probabilities to 0
keep <- which((q >= 0) | !is.finite(q)) # keep non-negative q, NA, NaN, -Inf, Inf
alphaQ <- (sqrt((1 - qpar[keep]^4))/(2*qpar[keep])) * sqrt((1 + qpar[keep])/(1 - qpar[keep]))
betaQ <- (sqrt((1 - qpar[keep]^4))/(2*qpar[keep])) * sqrt((1 - qpar[keep])/(1 + qpar[keep]))
y <- q[keep] / sqrt(omega[keep])
if(lower.tail) {
pp[keep] <- marcumQ( betaQ*y, alphaQ*y, nu=1, lower.tail=lower.tail) -
marcumQ(alphaQ*y, betaQ*y, nu=1, lower.tail=lower.tail)
## special cases not caught so far
pp[q == -Inf] <- 0
pp[q == Inf] <- 1
} else {
pp[keep] <- 1 + marcumQ( betaQ*y, alphaQ*y, nu=1, lower.tail=lower.tail) -
marcumQ(alphaQ*y, betaQ*y, nu=1, lower.tail=lower.tail)
## special cases not caught so far
pp[q < 0] <- 1
pp[q == Inf] <- 0
}
return(pp)
}
## equivalent
## Hoyt, RS. 1947. Probability functions for the modulus and angle of the
## normal complex variate. Bell System Technical Journal, 26(2). 318-359.
# pCNhoyt <- function(qq, sigma) {
# ev <- eigen(sigma)$values
# b <- abs(diff(ev)) / sum(ev)
# S <- sqrt(sum(ev))
# qq <- qq/S # rescale
#
# intFun <- function(r, b) {
# fac1 <- r*exp(-(r^2/(1-b^2)))
# bArg <- (b*r^2/(1-b^2))
# fac2 <- exp(log(besselI(bArg, nu=0, expon.scaled=TRUE)) + bArg)
# res <- fac1*fac2 # this may be NaN
# ifelse(is.finite(res), res, 0) # if so, return 0
# }
#
# pp <- (1/sqrt(1-b^2)) * sapply(qq, function(x) 2*integrate(intFun, 0, x, b=b)$value)
# return(pp)
# }
## equivalent
## Greenwalt, CR & Shultz, ME. 1968.
## Principles of Error Theory and Cartographic Applications
## ACIC TR-96, Appendix D-3, eq3
# pCNgreenwalt <- function(qq, sigma) {
# intFun <- function(r, ev) {
# fac1 <- r*exp(-(r^2/(4*ev[1])) * (1 + (ev[1]/ev[2])))
# ## modified Bessel function of first kind and order 0
# bArg <- (r^2/(4*ev[1])) * ((ev[1]/ev[2]) - 1)
# fac2 <- exp(log(besselI(bArg, nu=0, expon.scaled=TRUE)) + bArg)
# res <- fac1*fac2 # this may be NaN
# return(ifelse(is.finite(res), res, 0)) # if so, return 0
# }
#
# ev <- eigen(sigma)$values
# pp <- (1/prod(sqrt(ev))) * sapply(qq, function(x) integrate(intFun, 0, x, ev=ev)$value)
# return(pp)
# }
## equivalent
## Hoover, WE. 1984. Algorithms For Confidence Circles, and Ellipses.
## Washington, D.C., National Oceanic and Atmospheric Administration.
## NOAA Technical Report NOS 107 C&GS 3, 1-29. p. 9.
# pCNhoover <- function(qq, sigma) {
# ev <- eigen(sigma)$values
# Hk <- qq / sqrt(ev[1])
# Hc <- sqrt(ev[2] / ev[1])
# Hbeta <- 2*Hc / pi
# Hgamma <- (Hk/(2*Hc))^2
#
# Hw <- function(phi, Hc) {
# (Hc^2 - 1)*cos(phi) - (Hc^2 + 1)
# }
#
# Hf <- function(phi, Hc, Hgamma) {
# (exp(Hgamma*Hw(phi, Hc)) - 1) / Hw(phi, Hc)
# }
#
# Hbeta * integrate(Hf, 0, pi, Hc=Hc, Hgamma=Hgamma)$value
# }
#####---------------------------------------------------------------------------
## Hoyt quantile function through root finding of cdf
qHoyt <-
function(p, qpar, omega, lower.tail=TRUE, loUp=NULL) {
is.na(qpar) <- (qpar < 0) | (qpar > 1) | !is.finite(qpar)
is.na(omega) <- (omega <= 0) | !is.finite(omega)
argL <- recycle(p, qpar, omega)
p <- argL[[1]]
qpar <- argL[[2]]
omega <- argL[[3]]
qq <- rep(NA_real_, length(p))
keep <- which((p >= 0) & (p < 1))
if(length(keep) < 1L) { return(qq) } # nothing to do
if(is.null(loUp)) { # no search interval given
## use Grubbs chi^2 quantile for setting root finding interval
## Grubbs-Liu chi^2 and Hoyt can diverge
GP <- getGPfromHP(qpar, omega) # Grubbs parameters
qGrubbs <- qChisqGrubbs(p[keep], m=GP$m, v=GP$v, muX=GP$muX,
varX=GP$varX, l=GP$l, delta=GP$delta,
lower.tail=lower.tail, type="Liu")
qGrubbs.6 <- qChisqGrubbs(0.6, m=GP$m, v=GP$v, muX=GP$muX,
varX=GP$varX, l=GP$l, delta=GP$delta,
lower.tail=lower.tail, type="Liu")
qLo <- ifelse(p[keep] <= 0.5, 0, 0.25*qGrubbs)
qUp <- ifelse(p[keep] <= 0.5, qGrubbs.6, 1.75*qGrubbs)
loUp <- split(cbind(qLo, qUp), seq_along(p))
} else {
if(is.matrix(loUp)) {
loUp <- split(loUp, seq_len(nrow(loUp)))
} else if(is.vector(loUp)) {
loUp <- list(loUp)
} else if(!is.list(loUp)) {
stop("loUp must be a list, a matrix, a vector, or missing entirely")
}
}
cdf <- function(x, p, qpar, omega, lower.tail) {
pHoyt(x, qpar=qpar, omega=omega, lower.tail=lower.tail) - p
}
getQ <- function(p, qpar, omega, loUp, lower.tail) {
tryCatch(uniroot(cdf, interval=loUp, p=p, qpar=qpar, omega=omega,
lower.tail=lower.tail)$root,
error=function(e) return(NA_real_))
}
qq[keep] <- unlist(Map(getQ, p=p[keep], qpar=qpar[keep], omega=omega[keep],
loUp=loUp[keep], lower.tail=lower.tail[1]))
return(qq)
}
#####---------------------------------------------------------------------------
## random numbers from Hoyt distribution
rHoyt <-
function(n, qpar, omega, method=c("eigen", "chol", "cdf"), loUp=NULL) {
is.na(qpar) <- (qpar < 0) | (qpar > 1) | !is.finite(qpar)
is.na(omega) <- (omega <= 0) | !is.finite(omega)
method <- match.arg(method)
## if n is a vector, its length determines number of random variates
n <- if(length(n) > 1L) { length(n) } else { n }
qpar <- qpar[1] # only first shape parameter is used
omega <- omega[1] # only first scale parameter is used
rn <- if(method == "eigen") {
lambda <- unlist(getEVfromHoyt(qpar, omega)) # eigenvalues
## simulated 2D normal vectors with mean 0
X <- matrix(rnorm(n*length(lambda)), nrow=n) # with identity cov-mat
xy <- X %*% diag(sqrt(lambda), length(lambda))
sqrt(rowSums(xy^2)) # distances to center
} else if(method == "chol") {
lambda <- getEVfromHoyt(qpar, omega)
sigma <- cbind(c(lambda$ev1, 0), c(0, lambda$ev2))
CF <- chol(sigma, pivot=TRUE) # Cholesky-factor
idx <- order(attr(CF, "pivot"))
CFord <- CF[, idx]
## simulated 2D normal vectors with mean 0
xy <- matrix(rnorm(n*ncol(sigma)), nrow=n) %*% CFord
sqrt(rowSums(xy^2)) # distances to center
} else if(method == "cdf") {
## root finding of pHoyt() given uniform random probabilities:
## find x such that F(x) - U = 0
cdf <- function(x, u, qpar, omega) {
pHoyt(x, qpar=qpar, omega=omega) - u
}
## find quantile via uniroot() with error handling
getQ <- function(u, qpar, omega, loUp) {
tryCatch(uniroot(cdf, interval=loUp, u=u, qpar=qpar, omega=omega)$root,
error=function(e) return(NA_real_))
}
u <- runif(n) # uniform random numbers
## determine search interval(s) for uniroot()
if(is.null(loUp)) { # no search interval given
## use Grubbs chi^2 quantile for setting root finding interval
## Grubbs-Liu chi^2 and Hoyt can diverge
GP <- getGPfromHP(qpar, omega) # Grubbs parameters and quantiles
qGrubbs <- qChisqGrubbs(u, m=GP$m, v=GP$v, muX=GP$muX,
varX=GP$varX, l=GP$l, delta=GP$delta, type="Liu")
qGrubbs.6 <- qChisqGrubbs(0.6, m=GP$m, v=GP$v, muX=GP$muX,
varX=GP$varX, l=GP$l, delta=GP$delta, type="Liu")
qLo <- ifelse(u <= 0.5, 0, 0.25*qGrubbs)
qUp <- ifelse(u <= 0.5, qGrubbs.6, 1.75*qGrubbs)
loUp <- split(cbind(qLo, qUp), seq_along(u))
} else {
if(is.matrix(loUp)) {
loUp <- split(loUp, seq_len(nrow(loUp)))
} else if(is.vector(loUp)) {
loUp <- list(loUp)
} else if(!is.list(loUp)) {
stop("loUp must be a list, a matrix, a vector, or missing entirely")
}
}
unlist(Map(getQ, u=u, qpar=qpar, omega=omega, loUp=loUp))
}
return(rn)
}
|
/R/hoyt.R
|
no_license
|
cran/shotGroups
|
R
| false | false | 15,858 |
r
|
#####---------------------------------------------------------------------------
## implement recycling rule for function arguments
#####---------------------------------------------------------------------------
recycle <-
function(...) {
dots <- list(...)
maxL <- max(vapply(dots, length, integer(1)))
lapply(dots, rep, length=maxL)
}
#####---------------------------------------------------------------------------
## Hoyt / Nakagami-q distribution
## correlated bivariate normal distribution rewritten in polar coordinates
## pdf, cdf, and inverse cdf of the distribution of the radius
#####---------------------------------------------------------------------------
## determine parameters for Hoyt distribution
getHoytParam <-
function(x) {
UseMethod("getHoytParam")
}
## based on data frame with (x,y)-coords
getHoytParam.data.frame <-
function(x) {
sigma <- cov(getXYmat(x)) # covariance matrix
x <- eigen(sigma)$values # eigenvalues
NextMethod("getHoytParam")
}
## based on list of covariance matrices
getHoytParam.list <-
function(x) {
if(!all(vapply(x, is.matrix, logical(1)))) { stop("x must be a matrix") }
if(!all(vapply(x, is.numeric, logical(1)))) { stop("x must be numeric") }
if(!all(vapply(x, dim, integer(2)) == 2L)) { stop("x must be (2 x 2)-matrix") }
getEV <- function(sigma) { # eigenvalues from covariance matrix
if(!isTRUE(all.equal(sigma, t(sigma)))) {
stop("x must be symmetric")
}
lambda <- eigen(sigma)$values
if(!all(lambda >= -sqrt(.Machine$double.eps) * abs(lambda[1]))) {
stop("x is numerically not positive definite")
}
lambda
}
ev <- lapply(x, getEV) # eigenvalues for all matrices
ev1 <- vapply(ev, head, FUN.VALUE=numeric(1), n=1) # all first eigenvalues
ev2 <- vapply(ev, tail, FUN.VALUE=numeric(1), n=1) # all second eigenvalues
qpar <- 1/sqrt(((ev1+ev2)/ev2) - 1) # Hoyt q
omega <- ev1+ev2 # Hoyt omega
return(list(q=qpar, omega=omega))
}
## based on covariance matrix
getHoytParam.matrix <-
function(x) {
if(any(dim(x) != 2L)) { stop("x must be a (2 x 2)-matrix") }
if(!isTRUE(all.equal(x, t(x)))) { stop("x must be symmetric") }
x <- eigen(x)$values
NextMethod("getHoytParam")
}
## based on 2-vector of eigenvalues
## not vectorized
getHoytParam.default <-
function(x) {
if(!is.numeric(x)) { stop("x must be numeric") }
if(any(x < 0)) { stop("x must be >= 0") }
if(length(x) != 2L) { stop("x must have length 2") }
if(!all(x >= -sqrt(.Machine$double.eps) * abs(max(x)))) {
stop("x is numerically not positive definite")
}
x <- sort(x, decreasing=TRUE) # largest eigenvalue first
ev1 <- x[1]
ev2 <- x[2]
qpar <- 1 / sqrt(((ev1+ev2) / ev2) - 1) # Hoyt q
omega <- ev1+ev2 # Hoyt omega
return(list(q=qpar, omega=omega))
}
# determine eigenvalues from Hoyt parameters
getEVfromHoyt <-
function(qpar, omega) {
nnaQ <- which(!is.na(qpar))
nnaO <- which(!is.na(omega))
stopifnot(all(qpar[nnaQ] > 0), all(qpar[nnaQ] < 1), all(omega[nnaO] > 0))
ev2 <- omega / ((1/qpar^2) + 1) # 2nd eigenvalue
ev1 <- omega - ev2 # 1st eigenvalue
## sort each pair of eigenvalues in descending order
ev1ord <- pmax(ev1, ev2)
ev2ord <- pmin(ev1, ev2)
return(list(ev1=ev1ord, ev2=ev2ord))
}
#####---------------------------------------------------------------------------
## pdf Hoyt distribution
## https://reference.wolfram.com/language/ref/HoytDistribution.html
dHoyt <-
function(x, qpar, omega) {
is.na(x) <- is.nan(x) # replace NaN with NA
is.na(qpar) <- (qpar < 0) | (qpar > 1) | !is.finite(qpar)
is.na(omega) <- (omega <= 0) | !is.finite(omega)
argL <- recycle(x, qpar, omega)
x <- argL[[1]]
qpar <- argL[[2]]
omega <- argL[[3]]
dens <- numeric(length(x)) # initialize density to 0
keep <- which((x >= 0) | !is.finite(x)) # keep non-negative x, NA, -Inf, Inf
if(length(keep) < 1L) { return(dens) } # nothing to do
lfac1 <- log(x[keep]) + log(1 + qpar[keep]^2) - log(qpar[keep]*omega[keep])
lfac2 <- -x[keep]^2*(1+qpar[keep]^2)^2/(4*qpar[keep]^2*omega[keep])
bArg <- (x[keep]^2*(1-qpar[keep]^4) /(4*qpar[keep]^2*omega[keep]))
lfac3 <- log(besselI(bArg, nu=0, expon.scaled=TRUE)) + bArg
res <- exp(lfac1+lfac2+lfac3) # this may be NaN
dens[keep] <- ifelse(is.nan(res), 0, res) # if so, set to 0
return(dens)
}
## Hoyt, RS. 1947. Probability functions for the modulus and angle of the
## normal complex variate. Bell System Technical Journal, 26(2). 318-359.
## Hoyt pdf is for scaled variables with S := 1/sqrt(Su^2+Sv^2), u=U/S, v=V/S
## -> set r to r/S and pdf to pdf/S
# dCNhoyt <- function(r, sigma) {
# ev <- eigen(sigma)$values
# b <- abs(diff(ev)) / sum(ev)
# S <- sqrt(sum(ev))
# r <- r/S
#
# fac1 <- (2*r/sqrt(1-b^2)) * exp(-r^2/(1-b^2))
# bArg <- (b*r^2/(1-b^2))
# fac2 <- exp(log(besselI(bArg, nu=0, expon.scaled=TRUE)) + bArg)
# dens <- fac1*fac2 / S
#
# return(dens)
# }
## equivalent
## Greenwalt, CR & Shultz, ME. 1968.
## Principles of Error Theory and Cartographic Applications
## ACIC TR-96, Appendix D-3, eq. 3
# dGreenwalt <- function(r, sigma) {
# ev <- eigen(sigma)$values
# fac1 <- 1/prod(sqrt(ev))
# fac2 <- r*exp(-(r^2/(4*ev[1])) * (1 + (ev[1]/ev[2])))
# bArg <- (r^2/(4*ev[1])) * ((ev[1]/ev[2]) - 1)
# fac3 <- exp(log(besselI(bArg, nu=0, expon.scaled=TRUE)) + bArg)
# dens <- fac1*fac2*fac3
#
# return(dens)
# }
#####---------------------------------------------------------------------------
## generalized Marcum Q-function from non-central chi^2 distribution
## Nuttall, AH. (1975). Some integrals involving the Q-M function.
## IEEE Transactions on Information Theory, 21 (1), 95-96
marcumQ <-
function(a, b, nu, lower.tail=TRUE) {
pchisq(b^2, df=2*nu, ncp=a^2, lower.tail=lower.tail)
}
#####---------------------------------------------------------------------------
## cdf Hoyt distribution in closed form
## Paris, JF. 2009. Nakagami-q (Hoyt) distribution function with applications.
## Electronics Letters, 45(4). 210-211. Erratum: doi:10.1049/el.2009.0828
pHoyt <-
function(q, qpar, omega, lower.tail=TRUE) {
is.na(qpar) <- (qpar < 0) | (qpar > 1) | !is.finite(qpar)
is.na(omega) <- (omega <= 0) | !is.finite(omega)
argL <- recycle(q, qpar, omega)
q <- argL[[1]]
qpar <- argL[[2]]
omega <- argL[[3]]
pp <- numeric(length(q)) # initialize probabilities to 0
keep <- which((q >= 0) | !is.finite(q)) # keep non-negative q, NA, NaN, -Inf, Inf
alphaQ <- (sqrt((1 - qpar[keep]^4))/(2*qpar[keep])) * sqrt((1 + qpar[keep])/(1 - qpar[keep]))
betaQ <- (sqrt((1 - qpar[keep]^4))/(2*qpar[keep])) * sqrt((1 - qpar[keep])/(1 + qpar[keep]))
y <- q[keep] / sqrt(omega[keep])
if(lower.tail) {
pp[keep] <- marcumQ( betaQ*y, alphaQ*y, nu=1, lower.tail=lower.tail) -
marcumQ(alphaQ*y, betaQ*y, nu=1, lower.tail=lower.tail)
## special cases not caught so far
pp[q == -Inf] <- 0
pp[q == Inf] <- 1
} else {
pp[keep] <- 1 + marcumQ( betaQ*y, alphaQ*y, nu=1, lower.tail=lower.tail) -
marcumQ(alphaQ*y, betaQ*y, nu=1, lower.tail=lower.tail)
## special cases not caught so far
pp[q < 0] <- 1
pp[q == Inf] <- 0
}
return(pp)
}
## equivalent
## Hoyt, RS. 1947. Probability functions for the modulus and angle of the
## normal complex variate. Bell System Technical Journal, 26(2). 318-359.
# pCNhoyt <- function(qq, sigma) {
# ev <- eigen(sigma)$values
# b <- abs(diff(ev)) / sum(ev)
# S <- sqrt(sum(ev))
# qq <- qq/S # rescale
#
# intFun <- function(r, b) {
# fac1 <- r*exp(-(r^2/(1-b^2)))
# bArg <- (b*r^2/(1-b^2))
# fac2 <- exp(log(besselI(bArg, nu=0, expon.scaled=TRUE)) + bArg)
# res <- fac1*fac2 # this may be NaN
# ifelse(is.finite(res), res, 0) # if so, return 0
# }
#
# pp <- (1/sqrt(1-b^2)) * sapply(qq, function(x) 2*integrate(intFun, 0, x, b=b)$value)
# return(pp)
# }
## equivalent
## Greenwalt, CR & Shultz, ME. 1968.
## Principles of Error Theory and Cartographic Applications
## ACIC TR-96, Appendix D-3, eq3
# pCNgreenwalt <- function(qq, sigma) {
# intFun <- function(r, ev) {
# fac1 <- r*exp(-(r^2/(4*ev[1])) * (1 + (ev[1]/ev[2])))
# ## modified Bessel function of first kind and order 0
# bArg <- (r^2/(4*ev[1])) * ((ev[1]/ev[2]) - 1)
# fac2 <- exp(log(besselI(bArg, nu=0, expon.scaled=TRUE)) + bArg)
# res <- fac1*fac2 # this may be NaN
# return(ifelse(is.finite(res), res, 0)) # if so, return 0
# }
#
# ev <- eigen(sigma)$values
# pp <- (1/prod(sqrt(ev))) * sapply(qq, function(x) integrate(intFun, 0, x, ev=ev)$value)
# return(pp)
# }
## equivalent
## Hoover, WE. 1984. Algorithms For Confidence Circles, and Ellipses.
## Washington, D.C., National Oceanic and Atmospheric Administration.
## NOAA Technical Report NOS 107 C&GS 3, 1-29. p. 9.
# pCNhoover <- function(qq, sigma) {
# ev <- eigen(sigma)$values
# Hk <- qq / sqrt(ev[1])
# Hc <- sqrt(ev[2] / ev[1])
# Hbeta <- 2*Hc / pi
# Hgamma <- (Hk/(2*Hc))^2
#
# Hw <- function(phi, Hc) {
# (Hc^2 - 1)*cos(phi) - (Hc^2 + 1)
# }
#
# Hf <- function(phi, Hc, Hgamma) {
# (exp(Hgamma*Hw(phi, Hc)) - 1) / Hw(phi, Hc)
# }
#
# Hbeta * integrate(Hf, 0, pi, Hc=Hc, Hgamma=Hgamma)$value
# }
#####---------------------------------------------------------------------------
## Hoyt quantile function through root finding of cdf
qHoyt <-
function(p, qpar, omega, lower.tail=TRUE, loUp=NULL) {
is.na(qpar) <- (qpar < 0) | (qpar > 1) | !is.finite(qpar)
is.na(omega) <- (omega <= 0) | !is.finite(omega)
argL <- recycle(p, qpar, omega)
p <- argL[[1]]
qpar <- argL[[2]]
omega <- argL[[3]]
qq <- rep(NA_real_, length(p))
keep <- which((p >= 0) & (p < 1))
if(length(keep) < 1L) { return(qq) } # nothing to do
if(is.null(loUp)) { # no search interval given
## use Grubbs chi^2 quantile for setting root finding interval
## Grubbs-Liu chi^2 and Hoyt can diverge
GP <- getGPfromHP(qpar, omega) # Grubbs parameters
qGrubbs <- qChisqGrubbs(p[keep], m=GP$m, v=GP$v, muX=GP$muX,
varX=GP$varX, l=GP$l, delta=GP$delta,
lower.tail=lower.tail, type="Liu")
qGrubbs.6 <- qChisqGrubbs(0.6, m=GP$m, v=GP$v, muX=GP$muX,
varX=GP$varX, l=GP$l, delta=GP$delta,
lower.tail=lower.tail, type="Liu")
qLo <- ifelse(p[keep] <= 0.5, 0, 0.25*qGrubbs)
qUp <- ifelse(p[keep] <= 0.5, qGrubbs.6, 1.75*qGrubbs)
loUp <- split(cbind(qLo, qUp), seq_along(p))
} else {
if(is.matrix(loUp)) {
loUp <- split(loUp, seq_len(nrow(loUp)))
} else if(is.vector(loUp)) {
loUp <- list(loUp)
} else if(!is.list(loUp)) {
stop("loUp must be a list, a matrix, a vector, or missing entirely")
}
}
cdf <- function(x, p, qpar, omega, lower.tail) {
pHoyt(x, qpar=qpar, omega=omega, lower.tail=lower.tail) - p
}
getQ <- function(p, qpar, omega, loUp, lower.tail) {
tryCatch(uniroot(cdf, interval=loUp, p=p, qpar=qpar, omega=omega,
lower.tail=lower.tail)$root,
error=function(e) return(NA_real_))
}
qq[keep] <- unlist(Map(getQ, p=p[keep], qpar=qpar[keep], omega=omega[keep],
loUp=loUp[keep], lower.tail=lower.tail[1]))
return(qq)
}
#####---------------------------------------------------------------------------
## random numbers from Hoyt distribution
rHoyt <-
function(n, qpar, omega, method=c("eigen", "chol", "cdf"), loUp=NULL) {
is.na(qpar) <- (qpar < 0) | (qpar > 1) | !is.finite(qpar)
is.na(omega) <- (omega <= 0) | !is.finite(omega)
method <- match.arg(method)
## if n is a vector, its length determines number of random variates
n <- if(length(n) > 1L) { length(n) } else { n }
qpar <- qpar[1] # only first shape parameter is used
omega <- omega[1] # only first scale parameter is used
rn <- if(method == "eigen") {
lambda <- unlist(getEVfromHoyt(qpar, omega)) # eigenvalues
## simulated 2D normal vectors with mean 0
X <- matrix(rnorm(n*length(lambda)), nrow=n) # with identity cov-mat
xy <- X %*% diag(sqrt(lambda), length(lambda))
sqrt(rowSums(xy^2)) # distances to center
} else if(method == "chol") {
lambda <- getEVfromHoyt(qpar, omega)
sigma <- cbind(c(lambda$ev1, 0), c(0, lambda$ev2))
CF <- chol(sigma, pivot=TRUE) # Cholesky-factor
idx <- order(attr(CF, "pivot"))
CFord <- CF[, idx]
## simulated 2D normal vectors with mean 0
xy <- matrix(rnorm(n*ncol(sigma)), nrow=n) %*% CFord
sqrt(rowSums(xy^2)) # distances to center
} else if(method == "cdf") {
## root finding of pHoyt() given uniform random probabilities:
## find x such that F(x) - U = 0
cdf <- function(x, u, qpar, omega) {
pHoyt(x, qpar=qpar, omega=omega) - u
}
## find quantile via uniroot() with error handling
getQ <- function(u, qpar, omega, loUp) {
tryCatch(uniroot(cdf, interval=loUp, u=u, qpar=qpar, omega=omega)$root,
error=function(e) return(NA_real_))
}
u <- runif(n) # uniform random numbers
## determine search interval(s) for uniroot()
if(is.null(loUp)) { # no search interval given
## use Grubbs chi^2 quantile for setting root finding interval
## Grubbs-Liu chi^2 and Hoyt can diverge
GP <- getGPfromHP(qpar, omega) # Grubbs parameters and quantiles
qGrubbs <- qChisqGrubbs(u, m=GP$m, v=GP$v, muX=GP$muX,
varX=GP$varX, l=GP$l, delta=GP$delta, type="Liu")
qGrubbs.6 <- qChisqGrubbs(0.6, m=GP$m, v=GP$v, muX=GP$muX,
varX=GP$varX, l=GP$l, delta=GP$delta, type="Liu")
qLo <- ifelse(u <= 0.5, 0, 0.25*qGrubbs)
qUp <- ifelse(u <= 0.5, qGrubbs.6, 1.75*qGrubbs)
loUp <- split(cbind(qLo, qUp), seq_along(u))
} else {
if(is.matrix(loUp)) {
loUp <- split(loUp, seq_len(nrow(loUp)))
} else if(is.vector(loUp)) {
loUp <- list(loUp)
} else if(!is.list(loUp)) {
stop("loUp must be a list, a matrix, a vector, or missing entirely")
}
}
unlist(Map(getQ, u=u, qpar=qpar, omega=omega, loUp=loUp))
}
return(rn)
}
|
library(TMDb)
### Name: person_latest
### Title: Retrieve new entry people on TMDb.
### Aliases: person_latest
### Keywords: person_latest
### ** Examples
## Not run:
##D
##D ## An example of an authenticated request,
##D ## where api_key is fictitious.
##D ## You can obtain your own at https://www.themoviedb.org/documentation/api
##D
##D api_key <- "key"
##D
##D person_latest(api_key = api_key)
## End(Not run)
|
/data/genthat_extracted_code/TMDb/examples/person_latest.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 430 |
r
|
library(TMDb)
### Name: person_latest
### Title: Retrieve new entry people on TMDb.
### Aliases: person_latest
### Keywords: person_latest
### ** Examples
## Not run:
##D
##D ## An example of an authenticated request,
##D ## where api_key is fictitious.
##D ## You can obtain your own at https://www.themoviedb.org/documentation/api
##D
##D api_key <- "key"
##D
##D person_latest(api_key = api_key)
## End(Not run)
|
##################################################################
### Play-by-play, Drive Summary, and Simple Box Score Function ###
# Author: Maksim Horowitz #
# Code Style Guide: Google R Format #
##################################################################
# Play-by Play Function
#' Parsed Descriptive Play-by-Play Dataset for a Single Game
#' @description This function intakes the JSON play-by-play data of a single
#' game and parses the play description column into individual variables
#' allowing the user to segment the game in a variety of different ways for
#' model building and analysis.
#' @param GameID (character or numeric) A 10 digit game ID associated with a
#' given NFL game.
#' @details Through list manipulation using the do.call and rbind functions
#' a 10 column dataframe with basic information populates directly from the NFL
#' JSON API. These columns include the following:
#' \itemize{
#' \item{"Drive"} - Drive number
#' \item{"sp"} - Whether the play resulted in a score (any kind of score)
#' \item{"qrt"} - Quarter of Game
#' \item{"down"} - Down of the given play
#' \item{"time"} - Time at start of play
#' \item{"yrdln"} - Between 0 and 50
#' \item{"ydstogo"} - For a first down
#' \item{"ydsnet"} - Total yards gained on a given drive
#' \item{"posteam"} - The offensive team
#' \item{"desc"} - A detailed description of what occured during the play
#' }
#'
#' Through string manipulation and parsing of the description column using the
#' base R and stringR, 51 columns were added to the original dataframe allowing
#' the user to have a detailed breakdown of the events of each play.
#' The added variables are specified below:
#' \itemize{
#' \item{"Date"} - Date of game
#' \item{"GameID"} - The ID of the specified game
#' \item{"TimeSecs"} - Time remaining in game in seconds
#' \item{"PlayTimeDiff"} - The time difference between plays in seconds
#' \item{"DefensiveTeam"} - The defensive team on the play (for punts the
#' receiving team is on defense, for kickoffs the receiving team is on offense)
#' \item{"TimeUnder"} -
#' \item{"SideofField"} - The side of the field that the line of scrimmage
#' is on
#' \item{yrdline100} - Distance to opponents enzone, ranges from 1-99.
#' situation
#' \item{GoalToGo} - Binary variable indicting if the play is in a goal-to-go
#' situation
#' \item{"FirstDown"} - Binary: 0 if the play did not result in a first down
#' and 1 if it did
#' \item{"PlayAttempted"} - A variabled used to count the number of plays in a
#' game (should always be equal to 1)
#' \item{"Yards.Gained"} - Amount of yards gained on the play
#' \item{"Touchdown"} - Binary: 1 if the play resulted in a TD else 0
#' \item{"ExPointResult"} - Result of the extra-point: Made, Missed, Blocked
#' \item{"TwoPointConv"} - Result of two-point conversion: Success of Failure
#' \item{"DefTwoPoint"} - Result of defesnive two-point conversion: Success of Failure
#' \item{"Safety"} - Binary: 1 if safety was recorded else 0
#' \item{"PlayType"} - The type of play that occured. Potential values are:
#' \itemize{
#' \item{Kickoff, Punt, Onside Kick}
#' \item{Passs, Run}
#' \item{Sack}
#' \item{Field Goal, Extra Point}
#' \item{Quarter End, Two Minute Warning, End of Game}
#' \item{No Play, QB Kneel, Spike, Timeout}
#' }
#' \item{"Passer"} - The passer on the play if it was a pass play
#' \item{"PassAttempt"} - Binary variable indicating whether a pass was attempted
#' or not
#' \item{"PassOutcome"} - Pass Result: Complete or Incomplete
#' \item{"PassLength"} - Categorical variable indicating the length of the pass:
#' Short or Deep
#' \item{"PassLocation"} - Categorical variable: left, middle, right
#' \item{"InterceptionThrown"} - Binary variable indicating whether an
#' interception was thrown
#' \item{"Interceptor"} - The player who intercepted the ball
#' \item{"Rusher"} - The runner on the play if it was a running play
#' \item{"RushAttempt"} - Binary variable indicating whether or not a run was
#' attempted.
#' \item{"RunLocation"} - The location of the run - left, middle, right
#' \item{"RunGap"} - The gap that the running back ran through
#' \item{"Receiver"} - The player who recorded the reception on a complete pass
#' \item{"Reception"} - Binary Variable indicating a reception on a completed
#' pass: 1 if a reception was recorded else 0
#' \item{"ReturnResult"} - Result of a punt, kickoff, interception, or
#' fumble return
#' \item{"Returner"} - The punt or kickoff returner
#' \item{"Tackler1"} - The primary tackler on the play
#' \item{"Tackler2"} - The secondary tackler on the play
#' \item{"FieldGoalResult"} - Outcome of a fieldgoal: made, missed, blocked
#' \item{"FieldGoalDistance"} - Field goal length in yards
#' \item{"Fumble"} - Binary variable indicating whether a fumble occured or not:
#' 1 if a fumble occured else no
#' \item{"RecFumbTeam"} - Team that recovered the fumble
#' \item{"RecFumbPlayer"} - Player that recovered the fumble
#' \item{"Sack"} - Binary variable indicating whether a sack was recorded: 1 if
#' a sack was recorded else 0
#' \item{"Challenge.Replay"} - Binary variable indicating whether or not the
#' play was reviewed by the replay offical on challenges or replay reviews
#' \item{"ChalReplayResult"} - Result of the replay review: Upheld or Overturned
#' \item{"Accepted.Penalty"} - Binary variable indicating whether a penalty was
#' accpeted on the play
#' \item{"PenalizedTeam"} - The team who was penalized on the play
#' \item{"PenaltyType"} - Type of penalty on the play. Values include:
#' \itemize{
#' \item{Unnecessary Roughness, Roughing the Passer}
#' \item{Illegal Formation, Defensive Offside}
#' \item{Delay of Game, False Start, Illegal Shift}
#' \item{Illegal Block Above the Waist, Personal Foul}
#' \item{Unnecessary Roughness, Illegal Blindside Bloc}
#' \item{Defensive Pass Interference, Offensive Pass Interference}
#' \item{Fair Catch Interferenc, Unsportsmanlike Conduct}
#' \item{Running Into the Kicker, Illegal Kick}
#' \item{Illegal Contact, Defensive Holding}
#' \item{Illegal Motion, Low Block}
#' \item{Illegal Substitution, Neutral Zone Infraction}
#' \item{Ineligible Downfield Pass, Roughing the Passer}
#' \item{Illegal Use of Hands, Defensive Delay of Game}
#' \item{Defensive 12 On-field, Offensive Offside}
#' \item{Tripping, Taunting, Chop Block}
#' \item{Interference with Opportunity to Catch, Illegal Touch Pass}
#' \item{Illegal Touch Kick, Offside on Free Kick}
#' \item{Intentional Grounding, Horse Collar}
#' \item{Illegal Forward Pass, Player Out of Bounds on Punt}
#' \item{Clipping, Roughing the Kicker, Ineligible Downfield Kick}
#' \item{Offensive 12 On-field, Disqualification}
#' }
#' \item{"PenalizedPlayer"} - The penalized player
#' \item{"Penalty.Yards"} - The number of yards that the penalty resulted in
#' \item{"PosTeamScore"} - The score of the possession team (offensive team)
#' \item{"DefTeamScore"} - The score of the defensive team
#' \item{"ScoreDiff"} - The difference in score between the offensive and
#' defensive teams (offensive.score - def.score)
#' \item{"AbsScoreDiff"} - The absolute score difference on the given play
#'
#' }
#'
#' @return A dataframe with 61 columns specifying various statistics and
#' outcomes associated with each play of the specified NFL game.
#' @examples
#' # Parsed play-by-play of the final game in the 2015 NFL season
#'
#' # Save the gameID into a variable
#' nfl2015.finalregseasongame.gameID <- "2016010310"
#'
#' # Input the variable into the function to output the desired dataframe
#' finalgame2015.pbp <- game_play_by_play(nfl2015.finalregseasongame.gameID)
#'
#' # Subset the dataframe based on passing plays
#' subset(finalgame2015.pbp, PlayType == "Pass")
#' @export
game_play_by_play <- function(GameID) {
# Google R stlye format
#########################
#########################
# Converting JSON data
# Converting GameID into URL string
urlstring <- proper_jsonurl_formatting(GameID)
nfl.json <- RJSONIO::fromJSON(RCurl::getURL(urlstring))
number.drives <- length(nfl.json[[1]]$drives) - 1
PBP <- NULL
for (ii in 1:number.drives) {
PBP <- rbind(PBP, cbind("Drive" = ii,
data.frame(do.call(rbind,
(nfl.json[[1]]$drives[[ii]]$plays))
)[,c(1:9)])
)
}
# Adjusting Possession Team
PBP$posteam <- ifelse(PBP$posteam == "NULL", dplyr::lag(PBP$posteam),
PBP$posteam)
# Fixing Possession team for Kick-Offs
kickoff.index <- which(sapply(PBP$desc, regexpr,
pattern =
"kicks") != -1)
pos.teams <- unlist(unique(PBP$posteam))[1:2]
correct.kickoff.pos <- ifelse(PBP$posteam[kickoff.index] == pos.teams[1],
pos.teams[2], pos.teams[1])
PBP[kickoff.index, "posteam"] <- correct.kickoff.pos
# Yard Line Information
# In the earlier seasons when there was a dead ball (i.e. timeout)
# the yardline info was left blank or NULL. Also if the ball was at midfield then
# there was no team associated so I had to add a space to make the strsplit
# work
yline.info.1 <- ifelse(PBP$yrdln == "50", "MID 50", PBP$yrdln)
yline.info.1 <- ifelse(nchar(PBP$yrdln) == 0 |
PBP$yrdln == "NULL", dplyr::lag(PBP$yrdln),
yline.info.1)
yline.info <- sapply(yline.info.1, strsplit, split = " ")
PBP$SideofField <- sapply(yline.info, FUN = function(x) x[1])
PBP$yrdln <- as.numeric(sapply(yline.info, FUN = function(x) x[2]))
# Yard Line on 100 yards Scale: Distance from Opponent Endzone
PBP$yrdline100 <- ifelse(PBP$SideofField == PBP$posteam | PBP$yrdln == 50,
100 - PBP$yrdln, PBP$yrdln )
# Game Date
date.step1 <- stringr::str_extract(urlstring, pattern = "/[0-9]{10}/")
date.step2 <- stringr::str_extract(date.step1, pattern = "[0-9]{8}")
year <- substr(date.step2, start = 1, stop = 4)
month <- substr(date.step2, start = 5, stop = 6)
day <- substr(date.step2, start = nchar(date.step2)-1,
stop = nchar(date.step2))
date <- as.Date(paste(month, day, year, sep = "/"), format = "%m/%d/%Y")
PBP$Date <- date
PBP$GameID <- stringr::str_extract(date.step1, pattern = "[0-9]{10}")
# Adding Zero time to Quarter End
quarter.end <- which(sapply(PBP$desc, regexpr,
pattern = "END QUARTER|END GAME") != -1)
PBP$time[quarter.end] <- "00:00"
# Time in Seconds
qtr.timeinsecs <- lubridate::period_to_seconds(lubridate::ms(PBP$time))
# Quarter 1
qtr.timeinsecs[which(PBP$qtr == 1)] <- qtr.timeinsecs[
which(PBP$qtr == 1)] + (900*3)
# Quarter 2
qtr.timeinsecs[which(PBP$qtr == 2)] <- qtr.timeinsecs[
which(PBP$qtr == 2)] + (900*2)
# Quarter 3
qtr.timeinsecs[which(PBP$qtr == 3)] <- qtr.timeinsecs[
which(PBP$qtr == 3)] + 900
PBP$TimeSecs <- qtr.timeinsecs
# Time Difference (in seconds)
plays.time.diff <- abs(c(0, diff(qtr.timeinsecs)))
PBP$PlayTimeDiff <- plays.time.diff
## Challenge or Replay Review ##
# Binary
PBP$Challenge.Replay <- 0
replay.offic <- grep(PBP$desc, pattern = "Replay Official reviewed")
challenged <- grep(PBP$desc, pattern = "challenge")
PBP$Challenge.Replay[c(replay.offic, challenged)] <- 1
# Results
PBP$ChalReplayResult <- NA
upheld.play <- grep(PBP$desc, pattern = "the play was Upheld")
reversed.play <- grep(PBP$desc, pattern = "the play was REVERSED")
PBP$ChalReplayResult[upheld.play] <- "Upheld"
PBP$ChalReplayResult[reversed.play] <- "Reversed"
######################################
# Picking Apart the Description Column
######################################
# Yards Gained
yards.step1 <- sapply(PBP$desc, stringr::str_extract,
pattern = "for (-)?([0-9]{1,2})?")
PBP$Yards.Gained <- as.numeric( ifelse( grepl(x = yards.step1,
pattern = "(-)?([0-9]{1,2})"),
stringr::str_extract(yards.step1,
"(-)?([0-9]{1,2})"),
"0")
)
# Two Point Conversion
PBP$TwoPointConv <- NA
two.point.result.ind <- which(sapply(PBP$desc, regexpr,
pattern =
"TWO-POINT CONVERSION ATTEMPT") != -1)
two.point.result2 <- stringr::str_extract_all(PBP$desc[two.point.result.ind],
pattern = "ATTEMPT FAILS|SUCCEEDS")
two.point.result.final1 <- unlist(lapply(two.point.result2, tail, 1))
two.point.result.final2 <- ifelse(two.point.result.final1 == "ATTEMPT FAILS",
"Failure", "Success")
if (length(two.point.result.final2) != 0) {
PBP$TwoPointConv[two.point.result.ind] <- two.point.result.final2
}
# Penalty - Binary Column
PBP$Accepted.Penalty <- NA
penalty.play <- sapply(PBP$desc, stringr::str_extract, pattern = "PENALTY")
PBP$Accepted.Penalty <- ifelse(!is.na(penalty.play), 1, 0)
# Penalized Team
penalized.team.s1 <- sapply(PBP$desc, stringr::str_extract,
"PENALTY on [A-Z]{2,3}")
PBP$PenalizedTeam <- stringr::str_extract(penalized.team.s1,
"[A-Z]{2,3}$")
# Penalty - What was the penalty?
penalty.type.s1 <- sapply(PBP$desc, stringr::str_extract,
pattern ="PENALTY(.){5,25},.+, [0-9] yard(s)")
penalty.type.s2 <- stringr::str_extract(pattern = ",.+,", penalty.type.s1)
penalty.type.final <- stringr::str_sub(penalty.type.s2, 3, -2)
PBP$PenaltyType <- penalty.type.final
# Penalized Player
PBP$PenalizedPlayer <- NA
penalized.player.int <- sapply(PBP$desc[ which(PBP$Accepted.Penalty == 1) ],
stringr::str_extract,
pattern =
"[A-Z]{2,3}-[A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?( (S|J)r)?")
penalized.player2 <- stringr::str_extract(penalized.player.int,
pattern =
"[A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?( (S|J)r)?")
PBP$PenalizedPlayer[PBP$Accepted.Penalty == 1] <- penalized.player2
# Penalty Yards
PBP$Penalty.Yards <- NA
penalty.yards.step1 <- sapply(PBP$desc, stringr::str_extract,
pattern = ", [0-9]{1,2} yard(s?), enforced")
PBP$Penalty.Yards <- ifelse(!is.na(penalty.yards.step1),
as.numeric(stringr::str_extract(
penalty.yards.step1,
"[0-9]{1,2}")
), 0)
# Modifying Down Column
PBP$down <- unlist(PBP$down)
PBP$down[which(PBP$down == 0)] <- NA
# Defenseive Team Column
PBP$DefensiveTeam <- NA
teams.step1 <- stringr::str_extract(unlist(unique(PBP$posteam)), "[A-Z]{2,3}")
teams <- teams.step1[which(!is.na(teams.step1))]
Team1 <- teams[1]
Team2 <- teams[2]
PBP$DefensiveTeam[which(PBP$posteam == Team1)] <- Team2
PBP$DefensiveTeam[which(PBP$posteam == Team2)] <- Team1
### Type of Play Initialized ###
PBP$PlayType <- NA
## Passer ##
passer.step1 <- sapply(PBP$desc, stringr::str_extract,
pattern = "[A-Z]\\.[A-Z][A-z]{1,20} pass")
PBP$Passer <- stringr::str_extract(passer.step1,
pattern = "[A-Z]\\.[A-Z][A-z]{1,20}")
## Receiver ##
receiver.step1 <- sapply(PBP$desc, stringr::str_extract,
pattern =
"pass (incomplete)?( )?[a-z]{4,5} [a-z]{4,6} to [A-Z]\\.[A-Z][A-z]{1,20}")
PBP$Receiver <- stringr::str_extract(receiver.step1,
pattern = "[A-Z]\\.[A-Z][A-z]{1,20}")
## Tacklers ##
tacklers.step1 <- sapply(PBP$desc, stringr::str_extract,
pattern = "(yard(s?)|no gain) \\([A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?(;)?( )?([A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?)?\\)\\.")
# Identifying the tacklers on the play (either one or two)
tacklers1 <- stringr::str_extract(tacklers.step1,
pattern =
"\\([A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?")
tacklers1 <- stringr::str_extract(tacklers1,
pattern =
"[A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?")
# Pulling out tacklers names
tacklers2 <- stringr::str_extract(tacklers.step1,
pattern =
";( )[A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?")
tacklers2 <- stringr::str_extract(tacklers2,
pattern =
"[A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?")
PBP$Tackler1 <- tacklers1
PBP$Tackler2 <- tacklers2
# Pass Plays
PBP$PassOutcome <- NA
pass.play <- which(sapply( PBP$desc, regexpr, pattern = "pass") != -1)
incomplete.pass.play <- which(sapply(PBP$desc, regexpr,
pattern =
"(pass incomplete)|INTERCEPTED") != -1)
PBP$PlayType[pass.play] <- "Pass"
# Pass Outcome
PBP$PassOutcome[incomplete.pass.play] <- "Incomplete Pass"
PBP$PassOutcome[ setdiff(pass.play, incomplete.pass.play) ] <- "Complete"
# Pass Length
PBP$PassLength <- NA
short.pass <- which(sapply(PBP$desc, regexpr,
pattern = "pass (incomplete )?short") != -1)
deep.pass <- which(sapply(PBP$desc, regexpr,
pattern = "pass (incomplete )?deep") != -1)
PBP$PassLength[short.pass] <- "Short"
PBP$PassLength[deep.pass] <- "Deep"
# Pass Location
PBP$PassLocation <- NA
pass.left <- which(sapply(PBP$desc, regexpr,
pattern = "(short|deep) left") != -1)
pass.reft <- which(sapply(PBP$desc, regexpr,
pattern = "(short|deep) right") != -1)
pass.middle <- which(sapply(PBP$desc, regexpr,
pattern = "(short|deep) middle") != -1)
PBP$PassLocation[pass.left] <- "left"
PBP$PassLocation[pass.reft] <- "right"
PBP$PassLocation[pass.middle] <- "middle"
# Pass Attempt
PBP$PassAttempt <- NA
PBP$PassAttempt <- ifelse( sapply(PBP$desc, grepl,
pattern = "pass"), 1, 0)
# Reception Made
PBP$Reception <- 0
PBP$Reception[setdiff(pass.play,incomplete.pass.play)] <- 1
# Interception Thrown
PBP$InterceptionThrown <- ifelse(
sapply(PBP$desc, grepl,
pattern = "INTERCEPTED"), 1, 0
)
# Punt
punt.play <- which(sapply(PBP$desc, regexpr, pattern = "punts") != -1)
PBP$PlayType[punt.play] <- "Punt"
# Field Goal
fieldgoal <- which(sapply(PBP$desc, regexpr,
pattern = "field goal") != -1)
fieldgoal.null <- which(sapply(PBP$desc, regexpr,
pattern = "field goal(.)+NULLIFIED") != -1)
fieldgoal.rev <- which(sapply(PBP$desc, regexpr,
pattern = "field goal(.)+REVERSED") != -1)
fieldgoal <- setdiff(fieldgoal, c(fieldgoal.null, fieldgoal.rev))
missed.fg <- which(sapply(PBP$desc, regexpr,
pattern = "field goal is No Good") != -1)
blocked.fg <- which(sapply(PBP$desc, regexpr,
pattern = "field goal is BLOCKED") != -1)
PBP$PlayType[fieldgoal] <- "Field Goal"
# Field Goal Distance
fieldgoaldist.prelim <- sapply(PBP$desc[fieldgoal], stringr::str_extract,
pattern = "[0-9]{1,2} yard field goal")
fieldgoaldist <- sapply(fieldgoaldist.prelim, stringr::str_extract,
pattern = "[0-9]{1,2}")
PBP$FieldGoalDistance <- NA
PBP$FieldGoalDistance[fieldgoal] <- fieldgoaldist
# Field Goal Result
PBP$FieldGoalResult <- NA
PBP$FieldGoalResult[missed.fg] <- "No Good"
PBP$FieldGoalResult[blocked.fg] <- "Blocked"
PBP$FieldGoalResult[setdiff(fieldgoal,c(missed.fg, blocked.fg))] <- "Good"
# Extra Point
extrapoint.good <- which(sapply(PBP$desc, regexpr,
pattern = "extra point is GOOD") != -1)
extrapoint.nogood <- which(sapply(PBP$desc, regexpr,
pattern = "(extra point is No Good)") != -1)
extrapoint.blocked <- which(sapply(PBP$desc, regexpr,
pattern = "(extra point is Blocked)") != -1)
extrapoint.aborted <- which(sapply(PBP$desc, regexpr,
pattern = "(extra point is Aborted)") != -1)
PBP$PlayType[c(extrapoint.good,
extrapoint.nogood,
extrapoint.blocked,
extrapoint.aborted)] <- "Extra Point"
# Extra Point Result
PBP$ExPointResult <- NA
PBP$ExPointResult[extrapoint.good] <- "Made"
PBP$ExPointResult[extrapoint.nogood] <- "Missed"
PBP$ExPointResult[extrapoint.blocked] <- "Blocked"
PBP$ExPointResult[extrapoint.blocked] <- "Aborted"
# Touchdown Play
touchdown.step1 <- sapply(PBP$desc, stringr::str_extract,
pattern = "TOUCHDOWN")
nullified <- grep(PBP$desc, pattern = "TOUCHDOWN NULLIFIED")
reversed <- grep(PBP$desc, pattern = "TOUCHDOWN(.)+REVERSED")
touchdown.step1[c(nullified, reversed)] <- NA
PBP$Touchdown <- ifelse(!is.na(touchdown.step1), 1, 0)
TDs.b4.extrapt <- which(PBP$PlayType == "Extra Point" |
!is.na(PBP$TwoPointConv)) - 1
extra.TDs <- setdiff(TDs.b4.extrapt, which(PBP$Touchdown == 1))
if (length(extra.TDs) > 0) {
PBP$Touchdown[TDs.b4.extrapt] <- 1
}
# Defensive 2-pt conversion
def.twopt.suc <- which(sapply(PBP$desc, regexpr,
pattern = "DEFENSIVE TWO-POINT ATTEMPT\\. (.){1,70}\\. ATTEMPT SUCCEEDS") != -1)
def.twopt.fail <- which(sapply(PBP$desc, regexpr,
pattern = "DEFENSIVE TWO-POINT ATTEMPT\\. (.){1,70}\\. ATTEMPT FAILS") != -1)
PBP$DefTwoPoint <- NA
PBP$DefTwoPoint[def.twopt.suc] <- "Success"
PBP$DefTwoPoint[def.twopt.fail] <- "Failure"
all.2pts <- intersect(c(def.twopt.suc, def.twopt.fail), two.point.result.ind)
PBP$TwoPointConv[all.2pts] <- "Failure"
# Fumbles
PBP$Fumble <- 0
fumble.index1 <- which(sapply(PBP$desc, regexpr, pattern = "FUMBLE") != -1)
fumble.overruled <- which(sapply(PBP$desc[fumble.index1],
regexpr,
pattern = "(NULLIFIED)|(Reversed)") != -1)
fumble.index <- setdiff(fumble.index1, fumble.overruled)
PBP$Fumble[fumble.index] <- 1
# Timeouts
timeouts <- which(sapply(PBP$desc, regexpr,
pattern = "[A-z]imeout #[1-5] by") != -1)
PBP$PlayType[timeouts] <- "Timeout"
# Quarter End
end.quarter <- which(sapply(PBP$desc, regexpr,
pattern = "END QUARTER") != -1)
PBP$PlayType[end.quarter] <- "Quarter End"
# 2 Minute Warning
two.minute.warning <- which(sapply(PBP$desc, regexpr,
pattern = "Two-Minute Warning") != -1)
PBP$PlayType[two.minute.warning] <- "Two Minute Warning"
# Sack
sack.plays <- which(sapply(PBP$desc, regexpr, pattern = "sacked") != -1)
PBP$PlayType[sack.plays] <- "Sack"
# Sack- Binary
PBP$Sack <- 0
PBP$Sack[sack.plays] <- 1
# Safety - Binary
safety.plays <- which(sapply(PBP$desc, regexpr, pattern = "SAFETY") != -1)
PBP$Safety <- 0
PBP$Safety[safety.plays] <- 1
# QB Kneel
qb.kneel <- which(sapply(PBP$desc, regexpr, pattern = "kneels") != -1)
PBP$PlayType[qb.kneel] <- "QB Kneel"
# Kick Off
kickoff <- which(sapply(PBP$desc, regexpr,
pattern = "kick(s)? [0-9]{2,3}") != -1)
PBP$PlayType[kickoff] <- "Kickoff"
# Onside Kick
onside <- which(sapply(PBP$desc, regexpr, pattern = "onside") != -1)
PBP$PlayType[onside] <- "Onside Kick"
# Spike
spike.play <- which(sapply(PBP$desc, regexpr, pattern = "spiked") != -1)
PBP$PlayType[spike.play] <- "Spike"
# No Play
no.play <- which(sapply(PBP$desc, regexpr,
pattern = "No Play") != -1)
PBP$PlayType[no.play] <- "No Play"
# End of Game
end.game <- which(sapply(PBP$desc, regexpr, pattern = "END GAME") != -1)
PBP$PlayType[end.game] <- "End of Game"
# First Down
PBP$FirstDown <- 0
first.downplays <- which(PBP$down == 1)
first.downs <- first.downplays-1
PBP$FirstDown[first.downs] <- ifelse(PBP$down[first.downs] ==0, NA, 1)
# Running Play
running.play <- which(is.na(PBP$PlayType))
PBP$PlayType[running.play] <- "Run"
PBP$RushAttempt <- ifelse(PBP$PlayType == "Run", 1,0)
# Run Direction
PBP$RunLocation <- NA
run.left <- which(sapply(PBP[which(PBP$PlayType == "Run"),"desc"], regexpr,
pattern = "left") != -1)
run.right <- which(sapply(PBP[which(PBP$PlayType == "Run"),"desc"], regexpr,
pattern = "right") != -1)
run.middle <- which(sapply(PBP[which(PBP$PlayType == "Run"),"desc"], regexpr,
pattern = "middle") != -1)
PBP[running.play,"RunLocation"][run.left] <- "left"
PBP[running.play,"RunLocation"][run.right] <- "right"
PBP[running.play,"RunLocation"][run.middle] <- "middle"
# Run Gap
PBP$RunGap <- NA
run.guard <- which(sapply(PBP[which(PBP$PlayType == "Run"),"desc"], regexpr,
pattern = "guard") != -1)
run.tackle <- which(sapply(PBP[which(PBP$PlayType == "Run"),"desc"], regexpr,
pattern = "tackle") != -1)
run.end <- which(sapply(PBP[which(PBP$PlayType == "Run"),"desc"], regexpr,
pattern = "end") != -1)
PBP[running.play,"RunGap"][run.guard] <- "guard"
PBP[running.play,"RunGap"][run.tackle] <- "tackle"
PBP[running.play,"RunGap"][run.end] <- "end"
# Rusher
rusherStep1 <- sapply(PBP[which(PBP$PlayType == "Run"),"desc"],
stringr::str_extract,
pattern = "[A-Z]\\.[A-Z][A-z]{1,20}")
PBP[running.play,"Rusher"] <- rusherStep1
## Punt and Kick Return Outcome ##
# Punt Outcome
punt.tds <- which(sapply(PBP$desc[punt.play], regexpr,
pattern = "TOUCHDOWN") != -1)
punt.tds.null <- which(sapply(PBP$desc[punt.play], regexpr,
pattern = "NULLIFIED") != -1)
punt.tds.rev <- which(sapply(PBP$desc[punt.play], regexpr,
pattern = "REVERSED") != -1)
punt.tds <- setdiff(punt.tds, c(punt.tds.null, punt.tds.rev))
punts.touchbacks <- which(sapply(PBP$desc[punt.play], regexpr,
pattern = "Touchback") != -1)
punts.faircatch <- which(sapply(PBP$desc[punt.play], regexpr,
pattern = "fair catch") != -1)
# Kickoff Outcome
kick.tds <- which(sapply(PBP$desc[kickoff], regexpr,
pattern = "TOUCHDOWN") != -1)
kick.tds.null <- which(sapply(PBP$desc[kickoff], regexpr,
pattern = "NULLIFIED") != -1)
kick.tds.rev <- which(sapply(PBP$desc[kickoff], regexpr,
pattern = "REVERSED") != -1)
kick.tds <- setdiff(kick.tds, c(kick.tds.null, kick.tds.rev))
kick.tds.null <- which(sapply(PBP$desc[kickoff], regexpr,
pattern = "NULLIFIED") != -1)
kick.tds.rev <- which(sapply(PBP$desc[kickoff], regexpr,
pattern = "REVERSED") != -1)
kick.tds <- setdiff(kick.tds, c(kick.tds.null, kick.tds.rev))
kick.touchbacks <- which(sapply(PBP$desc[kickoff], regexpr,
pattern = "Touchback") != -1)
kick.faircatch <- which(sapply(PBP$desc[kickoff], regexpr,
pattern = "fair catch") != -1)
kick.kneels <- which(sapply(PBP$desc[kickoff], regexpr,
pattern = "kneel(s)?") != -1)
# Interception Outcome
intercept.td <- which(sapply(PBP$desc[which(PBP$InterceptionThrown == 1)],
regexpr, pattern = "TOUCHDOWN") != -1)
intercept.td.null <- which(sapply(PBP$desc[which(PBP$InterceptionThrown == 1)]
, regexpr, pattern = "NULLIFIED") != -1)
intercept.td.rev <- which(sapply(PBP$desc[which(PBP$InterceptionThrown == 1)],
regexpr, pattern = "REVERSED") != -1)
intercept.td <- setdiff(intercept.td, c(intercept.td.null, intercept.td.rev))
# Fumble Outcome
fumble.td <- which(sapply(PBP$desc[fumble.index],
regexpr, pattern = "TOUCHDOWN") != -1)
# May be able to remove bottom two lines
fumble.td.null <- which(sapply(PBP$desc[fumble.index],
regexpr, pattern = "NULLIFIED") != -1)
fumble.td.rev <- which(sapply(PBP$desc[fumble.index],
regexpr, pattern = "REVERSED") != -1)
fumble.td <- setdiff(fumble.td, c(fumble.td.null, fumble.td.rev))
PBP$ReturnResult <- NA
PBP$ReturnResult[punt.play][punt.tds] <- "Touchdown"
PBP$ReturnResult[punt.play][punts.touchbacks] <- "Touchback"
PBP$ReturnResult[punt.play][punts.faircatch] <- "Fair Catch"
PBP$ReturnResult[kickoff][kick.tds] <- "Touchdown"
PBP$ReturnResult[kickoff][kick.touchbacks] <- "Touchback"
PBP$ReturnResult[kickoff][c(kick.faircatch,kick.kneels)] <- "Fair Catch"
PBP$ReturnResult[which(PBP$InterceptionThrown == 1)][intercept.td] <- "Touchdown"
PBP$ReturnResult[fumble.index][fumble.td] <- "Touchdown"
## Returner ##
# Punt Returner
# Fair Catches
punt.returner1 <- sapply(PBP$desc[punt.play], stringr::str_extract,
pattern = "by [A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?\\.$")
punt.returner2 <- sapply(punt.returner1, stringr::str_extract,
pattern = "[A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?")
# Touchdowns or Returns
punt.returner3 <- sapply(PBP$desc[punt.play], stringr::str_extract,
pattern = "(\\. [A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})? to [A-Z]{2,3} [0-9]{1,2})|\\. [A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})? for [0-9]{1,2} yard(s)")
punt.returner4 <- sapply(punt.returner3, stringr::str_extract,
pattern = "[A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?")
# Kickoff Returner
# Fair Catches
kickret1 <- sapply(PBP$desc[kickoff], stringr::str_extract,
pattern = "by [A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?\\.$")
kickret2 <- sapply(kickret1, stringr::str_extract,
pattern = "[A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?")
# Touchdowns or Returns
kickret3 <- sapply(PBP$desc[kickoff], stringr::str_extract,
pattern = "(\\. [A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})? to [A-Z]{2,3} [0-9]{1,2})|(\\. [A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})? for [0-9]{1,2} yard(s))|(\\. [A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})? pushed)")
kickret4 <- sapply(kickret3, stringr::str_extract,
pattern = "[A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?")
# All Returners
all.returners <- rep(NA, time = nrow(PBP))
all.returners[kickoff][which(!is.na(kickret2))] <- kickret2[which(!is.na(kickret2))]
all.returners[kickoff][which(!is.na(kickret4))] <- kickret4[which(!is.na(kickret4))]
all.returners[punt.play][which(!is.na(punt.returner2))] <- punt.returner2[which(!is.na(punt.returner2))]
all.returners[punt.play][which(!is.na(punt.returner4))] <- punt.returner4[which(!is.na(punt.returner4))]
PBP$Returner <- all.returners
# Interceptor
interceptor1 <- sapply(PBP$desc[which(PBP$InterceptionThrown == 1)],
stringr::str_extract,
pattern = "INTERCEPTED by [A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?")
interceptor2 <- sapply(interceptor1, stringr::str_extract,
pattern = "[A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?")
PBP$Interceptor <- NA
PBP$Interceptor[which(PBP$InterceptionThrown == 1)] <- interceptor2
# Fumbler Recovery Team and Player
recover.step1 <- sapply(PBP$desc[fumble.index], stringr::str_extract,
pattern = "[A-Z]{2,3}-[A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?")
recover.team <- sapply(recover.step1, stringr::str_extract,
pattern = "[A-Z]{2,3}")
recover.player <- sapply(recover.step1, stringr::str_extract,
pattern = "[A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?")
PBP$RecFumbTeam <- NA
PBP$RecFumbTeam[fumble.index] <- recover.team
PBP$RecFumbPlayer <- NA
PBP$RecFumbPlayer[fumble.index] <- recover.player
# The next few variables are counting variables
# Used to help set up model for predictions
# Plays
PBP$PlayAttempted <- 1
# Time Under
PBP$TimeUnder <- substr(lubridate::ceiling_date(as.POSIXct(paste("00:",
PBP$time,
sep = ""),
format = "%H:%M:%S"
), "minute"),
15, 16)
PBP$TimeUnder <- as.numeric(as.character(PBP$TimeUnder))
# Calculating Score of Game for Possesion team and Defensive Team
team.home.score <- rep(0, times = nrow(PBP))
team.away.score <- rep(0, times = nrow(PBP))
away.team.name <- nfl.json[[1]]$away$abbr
home.team.name <- nfl.json[[1]]$home$abbr
## Away Team ##
# Regular offensive passing, rushing
team.away.score[which(PBP$Touchdown == 1
& PBP$posteam == away.team.name
& !PBP$ReturnResult %in% "Touchdown"
& !PBP$PlayType %in% "Kickoff")] <- 6
# Give points for Kickoff TDs
team.away.score[which(PBP$Touchdown == 1
& PBP$posteam == away.team.name
& PBP$ReturnResult %in% "Touchdown"
& PBP$PlayType %in% "Kickoff")] <- 6
# Give points for Punt Return TDs
team.away.score[which(PBP$Touchdown == 1
& PBP$posteam == home.team.name
& PBP$ReturnResult %in% "Touchdown"
& PBP$PlayType %in% "Punt")] <- 6
# Give points for Interceptions
team.away.score[which(PBP$Touchdown == 1
& PBP$posteam == home.team.name
& PBP$ReturnResult %in% "Touchdown"
& !is.na(PBP$Interceptor))] <- 6
# Make sure to give away team points for fumble ret for TD
team.away.score[which(PBP$Touchdown == 1
& PBP$posteam == home.team.name
& PBP$ReturnResult %in% "Touchdown"
& !PBP$PlayType %in% "Kickoff"
& PBP$RecFumbTeam == away.team.name)] <- 6
# Fumble and the team that fumbled recovers and scores a TD
team.away.score[which(PBP$Touchdown == 1
& PBP$posteam == away.team.name
& PBP$ReturnResult %in% "Touchdown"
& PBP$RecFumbTeam == away.team.name)] <- 6
# Points for two point conversion
team.away.score[which(PBP$TwoPointConv == "Success"
& PBP$posteam == away.team.name)] <- 2
# Points for safeties
team.away.score[which(PBP$Safety == 1
& PBP$posteam == home.team.name)] <- 2
# Points for made extra point
team.away.score[which(PBP$ExPointResult == "Made"
& PBP$posteam == away.team.name)] <- 1
# Points for made field goal
team.away.score[which(PBP$FieldGoalResult == "Good"
& PBP$posteam == away.team.name)] <- 3
team.away.score <- cumsum(team.away.score)
away.team.pos <- which(PBP$posteam == away.team.name)
away.team.def <- which(PBP$DefensiveTeam == away.team.name)
## Home Team ##
# Regular offensive passing, rushing, or kickoff TD
team.home.score[PBP$Touchdown == 1
& PBP$posteam == home.team.name
& !PBP$ReturnResult %in% "Touchdown"
& !PBP$PlayType %in% "Kickoff"] <- 6
# Give points for Kickoffs
team.home.score[which(PBP$Touchdown == 1
& PBP$posteam == home.team.name
& PBP$ReturnResult %in% "Touchdown"
& PBP$PlayType %in% "Kickoff")] <- 6
# Give points for Punts
team.home.score[which(PBP$Touchdown == 1
& PBP$posteam == away.team.name
& PBP$ReturnResult %in% "Touchdown"
& PBP$PlayType %in% "Punt")] <- 6
# Give points for Interceptions
team.home.score[which(PBP$Touchdown == 1
& PBP$posteam == away.team.name
& PBP$ReturnResult %in% "Touchdown"
& !is.na(PBP$Interceptor))] <- 6
team.home.score[which(PBP$Touchdown == 1
& PBP$posteam == away.team.name
& PBP$ReturnResult %in% "Touchdown"
& !PBP$PlayType %in% "Kickoff"
& PBP$RecFumbTeam == home.team.name)] <- 6
# Fumble and the team that fumbled recovered and scored a TD
team.home.score[which(PBP$Touchdown == 1
& PBP$posteam == home.team.name
& PBP$ReturnResult %in% "Touchdown"
& PBP$RecFumbTeam == home.team.name)] <- 6
# Points for two point conversion
team.home.score[which(PBP$TwoPointConv == "Success"
& PBP$posteam == home.team.name)] <- 2
# Points for safeties
team.home.score[which(PBP$Safety == 1
& PBP$posteam == away.team.name)] <- 2
# Points for made extra point
team.home.score[which(PBP$ExPointResult == "Made"
& PBP$posteam == home.team.name)] <- 1
# Points for made field goal
team.home.score[which(PBP$FieldGoalResult == "Good"
& PBP$posteam == home.team.name)] <- 3
team.home.score <- cumsum(team.home.score)
home.team.pos <- which(PBP$posteam == home.team.name)
home.team.def <- which(PBP$DefensiveTeam == home.team.name)
## Possesion and Defensive Team Scores
PBP$PosTeamScore <- NA
PBP$DefTeamScore <- NA
### Inputting Scores
PBP$PosTeamScore[home.team.pos] <- team.home.score[home.team.pos]
PBP$PosTeamScore[away.team.pos] <- team.away.score[away.team.pos]
PBP$DefTeamScore[home.team.def] <- team.home.score[home.team.def]
PBP$DefTeamScore[away.team.def] <- team.away.score[away.team.def]
# Score Differential and Abs Score Differential
PBP$ScoreDiff <- PBP$PosTeamScore - PBP$DefTeamScore
PBP$AbsScoreDiff <- abs(PBP$PosTeamScore - PBP$DefTeamScore)
# Goal to Go
PBP$GoalToGo <- ifelse(PBP$posteam != PBP$SideofField & PBP$yrdln <= 10, 1, 0)
##################
## Unlisting Listed Columns
PBP$sp <- unlist(PBP$sp)
PBP$qtr <- unlist(PBP$qtr)
PBP$time <- unlist(PBP$time)
PBP$ydstogo <- unlist(PBP$ydstogo)
PBP$ydsnet <- unlist(PBP$ydsnet)
PBP$posteam <- unlist(PBP$posteam)
PBP$desc <- unlist(PBP$desc)
PBP$FieldGoalDistance <- unlist(PBP$FieldGoalDistance)
## Final OutPut ##
PBP[,c("Date", "GameID", "Drive", "qtr", "down", "time", "TimeUnder",
"TimeSecs", "PlayTimeDiff", "SideofField", "yrdln", "yrdline100",
"ydstogo", "ydsnet", "GoalToGo", "FirstDown",
"posteam", "DefensiveTeam", "desc", "PlayAttempted", "Yards.Gained",
"sp", "Touchdown", "ExPointResult", "TwoPointConv", "DefTwoPoint",
"Safety", "PlayType", "Passer", "PassAttempt", "PassOutcome",
"PassLength", "PassLocation", "InterceptionThrown", "Interceptor",
"Rusher", "RushAttempt", "RunLocation", "RunGap", "Receiver",
"Reception", "ReturnResult", "Returner", "Tackler1", "Tackler2",
"FieldGoalResult", "FieldGoalDistance",
"Fumble", "RecFumbTeam", "RecFumbPlayer", "Sack", "Challenge.Replay",
"ChalReplayResult", "Accepted.Penalty", "PenalizedTeam", "PenaltyType",
"PenalizedPlayer", "Penalty.Yards", "PosTeamScore", "DefTeamScore",
"ScoreDiff", "AbsScoreDiff")]
}
##################################################################
#' Parsed Descriptive Play-by-Play Function for a Full Season
#' @description This function outputs all plays of an entire season in one dataframe.
#' It calls the game_play_by_play function and applies it over every
#' game in the season by extracting each game ID and url in the specified season.
#'
#' @param Season (numeric) A 4-digit year corresponding to an NFL season of
#' interest
#'
#' @details This function calls the extracting_gameids,
#' proper_jsonurl_formatting, and game_play_by_play to aggregate all the plays
#' from a given season. This dataframe is prime for use with the dplyr and
#' plyr packages.
#' @return A dataframe contains all the play-by-play information for a single
#' season. This includes all the 52 variables collected in our
#' game_play_by_play function (see documentation for game_play_by_play for
#' details)
#' @examples
#' # Play-by-Play Data from All games in 2010
#' pbp.data.2010 <- season_play_by_play(2010)
#'
#' # Looking at all Baltimore Ravens Offensive Plays
#' subset(pbp.data.2010, posteam = "BAL")
#' @export
season_play_by_play <- function(Season) {
# Google R stlye format
# Below the function put together the proper URLs for each game in each
# season and runs the game_play_by_play function across the entire season
game_ids <- extracting_gameids(Season)
pbp_data_unformatted <- lapply(game_ids, FUN = game_play_by_play)
df_pbp_data <- do.call(rbind, pbp_data_unformatted)
df_pbp_data
}
##################################################################
# Drive Summary Function
#' Drive Summary and Results
#' @description This function outputs the results dataframe of each drive of a
#' given game
#' @param GameID (character or numeric) A 10 digit game ID associated with a
#' given NFL game.
#' @details The outputted dataframe has 16 variables associated with a specific
#' aspect of a drive including the scoring result, number of plays, the duration
#' of the drive, and the offensive and defensive teams. All 16 variable are
#' explained in more detail below:
#' \itemize{
#' \item{"posteam"} - The offensive team on the drive
#' \item{"qrt"} - The quarter at the end of the drive
#' \item{"fs"} - Number of first downs in the drive
#' \item{"result"} - End result of the drive
#' \item{"penyds"} - Net penalty yards of the drive for the offensive team
#' \item{"ydsgained"} - Number of yards gained on the drive
#' \item{"numplaus"} - Number of plays on the drive
#' \item{"postime"} - The duration of the
#' \item{"Startqrt"} - The quarter at the beginning of the drive
#' \item{"StartTime} - The time left in the quarter at the start of the drive
#' \item{"StartYardln"} - Yardline at the start of the drive
#' \item{"StartTeam"} - The offensive team on the drive
#' }
#' @return A dataframe that has the summary statistics for each drive
#' final output includes first downs, drive result, penalty yards,
#' of plays, time of possession, quarter at the start of the drive,
#' Time at Start of Drive, yardline at start of drive,
#' team with possession at start, end of drive quarter, end of drive time,
#' end of drive Yard line, end of drive team with possession
#' @examples
#' # Parsed drive Summarize of final game in 2015 NFL Season
#' nfl2015.finalregseasongame.gameID <- "2016010310"
#' drive_summary(nfl2015.finalregseasongame.gameID)
#' @export
drive_summary <- function(GameID) {
# Google R stlye format
######################
######################
# Generating Game URL
urlstring <- proper_jsonurl_formatting(GameID)
# Converting JSON data
nfl.json.data <- RJSONIO::fromJSON(RCurl::getURL(urlstring))
# Creating Dataframe of Drive Outcomes
drive.data <- data.frame(do.call(rbind, (nfl.json.data[[1]]$drives)))
# Gathering Start of Drive Time, Location, and Quarter Info
start.data <- data.frame(do.call(rbind, (drive.data$start)))
colnames(start.data) <- c("StartQrt", "StartTime", "StartYardln", "StartTeam")
# Gathering End of Drive Time, Location, and Quarter Info
end.data <- data.frame(do.call(rbind, (drive.data$end)))
colnames(end.data) <- c("EndQrt", "EndTime", "EndYardln", "EndTeam")
start.index <- which(colnames(drive.data) == "start")
end.index <- which(colnames(drive.data) == "end")
# Combining all datasets into one
drive.data.final <- cbind(drive.data[, -c(start.index,end.index)],
start.data, end.data)
# Removing last row and 4th column of irrelevant information
drive.data.final[-nrow(drive.data),-c(3,4)]
}
##################################################################
# Simple Box Score
#' Simple Game Boxscore
#' @description This function pulls data from an NFL url and contructs it into a formatted
#' boxscore.
#' @param GameID (character or numeric) A 10 digit game ID associated with a
#' given NFL game.
#' @param home (boolean): home = TRUE will pull home stats,
#' home = FALSE pulls away stats
#' @return A list of playerstatistics including passing, rushing, receiving,
#' defense, kicking, kick return, and punt return statistics for the specified
#' game.
#' @examples
#' # Parsed drive Summarize of final game in 2015 NFL Season
#' nfl2015.finalregseasongame.gameID <- "2016010310"
#' simple_boxscore(nfl2015.finalregseasongame.gameID, home = TRUE)
#' @export
simple_boxscore <- function(GameID, home = TRUE) {
# Google R stlye format
##################
##################
# Generating Game URL
urlstring <- proper_jsonurl_formatting(GameID)
# Start of Function
nfl.json.data <- RJSONIO::fromJSON(RCurl::getURL(urlstring))
# Date of Game
datestep1 <- stringr::str_extract(urlstring, pattern = "/[0-9]{10}/")
datestep2 <- stringr::str_extract(datestep1, pattern = "[0-9]{8}")
year <- substr(datestep2, start = 1, stop = 4)
month <- substr(datestep2, start = 5, stop = 6)
day <- substr(datestep2, start = nchar(datestep2)-1, stop = nchar(datestep2))
date <- as.Date(paste(month, day, year, sep = "/"), format = "%m/%d/%Y")
# Parsing Data
if (home == TRUE) {
home.team.name <- nfl.json.data[[1]]$home$abbr
# Passing Stats
qb.stats <- data.frame(stat = "passing", date, GameID, home.team.name,
t(sapply(nfl.json.data[[1]]$home$stats$passing, c)))
qb.stats$playerID <- rownames(t(sapply(nfl.json.data[[1]]$home$stats$passing,
c)))
# Running Stats
rb.stats <- data.frame(stat = "rush", date, GameID, home.team.name,
t(sapply(nfl.json.data[[1]]$home$stats$rushing, c)))
rb.stats$playerID <- rownames(t(sapply(nfl.json.data[[1]]$home$stats$rushing,
c)))
# Receiving Stats
wr.stats <- data.frame(stat = "receiving", date, GameID, home.team.name,
t(sapply(nfl.json.data[[1]]$home$stats$receiving, c)))
wr.stats$playerID <- rownames(t(sapply(nfl.json.data[[1]]$home$stats$receiving,
c)))
# Defensive Stats
def.stats <- data.frame(stat = "defense", date, GameID, home.team.name,
t(sapply(nfl.json.data[[1]]$home$stats$defense, c)))
def.stats$playerID <- rownames(
t(sapply(nfl.json.data[[1]]$home$stats$defense
, c)))
# Kicking Stats
kicker.stats <- data.frame(stat = "kicking", date, GameID, home.team.name,
t(sapply(nfl.json.data[[1]]$home$stats$kicking,
c)))
kicker.stats$playerID <- rownames(t(
sapply(nfl.json.data[[1]]$home$stats$kicking,
c)))
# Fumble Stats
fumb.stats <- data.frame(stat = "fumbles", date, GameID, home.team.name,
t(sapply(nfl.json.data[[1]]$home$stats$fumbles, c)))
fumb.stats$playerID <- rownames(t(
sapply(nfl.json.data[[1]]$home$stats$fumbles,
c)))
# Kick Return Stats
kr.stats <- data.frame(stat = "kickreturn", date, GameID, home.team.name,
t(sapply(nfl.json.data[[1]]$home$stats$kickret, c)))
kr.stats$playerID <- rownames(t(
sapply(nfl.json.data[[1]]$home$stats$kickret,
c)))
# Punt Return Stats
pr.stats <- data.frame(stat = "puntreturn", date, GameID, home.team.name,
t(sapply(nfl.json.data[[1]]$home$stats$puntret, c)))
pr.stats$playerID <- rownames(t(
sapply(nfl.json.data[[1]]$home$stats$puntret,
c)))
# List of Stats
home.team.stats <- list(HomePassing = qb.stats,
HomeRushing = rb.stats,
HomeReceiving = wr.stats,
HomeDef = def.stats,
HomeKicking = kicker.stats,
HomeFumbles = fumb.stats, HomeKR = kr.stats,
HomePR = pr.stats)
home.team.stats
} else {
away.team.name <- nfl.json.data[[1]]$away$abbr
# Passing Away Stats
qb.away.stats <- data.frame(stat = "passing", GameID, away.team.name,
t(sapply(nfl.json.data[[1]]$away$stats$passing,
c)))
qb.away.stats$playerID <- rownames(t(sapply(nfl.json.data[[1]]$away$stats$passing,
c)))
# Running Away Stats
rb.away.stats <- data.frame(stat = "rushing", date, GameID, away.team.name,
t(sapply(nfl.json.data[[1]]$away$stats$rushing,
c)))
rb.away.stats$playerID <- rownames(t(sapply(nfl.json.data[[1]]$away$stats$rushing,
c)))
# Receiving Away Stats
wr.away.stats <- data.frame(stat = "receiving", date, GameID, away.team.name,
t(sapply(nfl.json.data[[1]]$away$stats$receiving,
c)))
wr.away.stats$playerID <- rownames(t(sapply(nfl.json.data[[1]]$away$stats$receiving,
c)))
# Defensive Away Stats
def.away.stats <- data.frame(stat = "defense", date, GameID, away.team.name,
t(sapply(nfl.json.data[[1]]$away$stats$defense,
c)))
def.away.stats$playerID <- rownames(t(sapply(nfl.json.data[[1]]$away$stats$defense,
c)))
# Kicking Away Stats
kicker.away.stats <- data.frame(stat = "kicking", date, GameID, away.team.name,
t(sapply(nfl.json.data[[1]]$away$stats$kicking
, c)))
kicker.away.stats$playerID <- rownames(t(sapply(nfl.json.data[[1]]$away$stats$kicking,
c)))
# Fumble Away Stats
fumb.away.stats <- data.frame(stat = "fumbles", date, GameID, away.team.name,
t(sapply(nfl.json.data[[1]]$away$stats$fumbles,
c)))
fumb.away.stats$playerID <- rownames(t(sapply(nfl.json.data[[1]]$away$stats$fumbles,
c)))
# Kick Return Away Stats
kr.away.stats <- data.frame(stat = "kickreturn", date, GameID, away.team.name,
t(sapply(nfl.json.data[[1]]$away$stats$kickret,
c)))
kr.away.stats$playerID <- rownames(t(sapply(nfl.json.data[[1]]$away$stats$kickret,
c)))
# Punt Return Away Stats
pr.away.stats <- data.frame(stat = "puntreturn", date, GameID,
away.team.name,
t(sapply(nfl.json.data[[1]]$away$stats$puntret,
c)))
pr.away.stats$playerID <- rownames(t(sapply(
nfl.json.data[[1]]$away$stats$puntret,
c)))
# List of Away Stats
awayTeamStats <- list(AwayPassing = qb.away.stats,
AwayRushing = rb.away.stats,
AwayReceiving = wr.away.stats,
AwayDef = def.away.stats,
AwayKicking = kicker.away.stats,
AwayFumb = fumb.away.stats,
AwayKR = kr.away.stats,
AwayPR = pr.away.stats)
awayTeamStats
}
}
|
/R/PlayByPlayBoxScore.R
|
no_license
|
paulhendricks/nflscrapR
|
R
| false | false | 55,041 |
r
|
##################################################################
### Play-by-play, Drive Summary, and Simple Box Score Function ###
# Author: Maksim Horowitz #
# Code Style Guide: Google R Format #
##################################################################
# Play-by Play Function
#' Parsed Descriptive Play-by-Play Dataset for a Single Game
#' @description This function intakes the JSON play-by-play data of a single
#' game and parses the play description column into individual variables
#' allowing the user to segment the game in a variety of different ways for
#' model building and analysis.
#' @param GameID (character or numeric) A 10 digit game ID associated with a
#' given NFL game.
#' @details Through list manipulation using the do.call and rbind functions
#' a 10 column dataframe with basic information populates directly from the NFL
#' JSON API. These columns include the following:
#' \itemize{
#' \item{"Drive"} - Drive number
#' \item{"sp"} - Whether the play resulted in a score (any kind of score)
#' \item{"qrt"} - Quarter of Game
#' \item{"down"} - Down of the given play
#' \item{"time"} - Time at start of play
#' \item{"yrdln"} - Between 0 and 50
#' \item{"ydstogo"} - For a first down
#' \item{"ydsnet"} - Total yards gained on a given drive
#' \item{"posteam"} - The offensive team
#' \item{"desc"} - A detailed description of what occured during the play
#' }
#'
#' Through string manipulation and parsing of the description column using the
#' base R and stringR, 51 columns were added to the original dataframe allowing
#' the user to have a detailed breakdown of the events of each play.
#' The added variables are specified below:
#' \itemize{
#' \item{"Date"} - Date of game
#' \item{"GameID"} - The ID of the specified game
#' \item{"TimeSecs"} - Time remaining in game in seconds
#' \item{"PlayTimeDiff"} - The time difference between plays in seconds
#' \item{"DefensiveTeam"} - The defensive team on the play (for punts the
#' receiving team is on defense, for kickoffs the receiving team is on offense)
#' \item{"TimeUnder"} -
#' \item{"SideofField"} - The side of the field that the line of scrimmage
#' is on
#' \item{yrdline100} - Distance to opponents enzone, ranges from 1-99.
#' situation
#' \item{GoalToGo} - Binary variable indicting if the play is in a goal-to-go
#' situation
#' \item{"FirstDown"} - Binary: 0 if the play did not result in a first down
#' and 1 if it did
#' \item{"PlayAttempted"} - A variabled used to count the number of plays in a
#' game (should always be equal to 1)
#' \item{"Yards.Gained"} - Amount of yards gained on the play
#' \item{"Touchdown"} - Binary: 1 if the play resulted in a TD else 0
#' \item{"ExPointResult"} - Result of the extra-point: Made, Missed, Blocked
#' \item{"TwoPointConv"} - Result of two-point conversion: Success of Failure
#' \item{"DefTwoPoint"} - Result of defesnive two-point conversion: Success of Failure
#' \item{"Safety"} - Binary: 1 if safety was recorded else 0
#' \item{"PlayType"} - The type of play that occured. Potential values are:
#' \itemize{
#' \item{Kickoff, Punt, Onside Kick}
#' \item{Passs, Run}
#' \item{Sack}
#' \item{Field Goal, Extra Point}
#' \item{Quarter End, Two Minute Warning, End of Game}
#' \item{No Play, QB Kneel, Spike, Timeout}
#' }
#' \item{"Passer"} - The passer on the play if it was a pass play
#' \item{"PassAttempt"} - Binary variable indicating whether a pass was attempted
#' or not
#' \item{"PassOutcome"} - Pass Result: Complete or Incomplete
#' \item{"PassLength"} - Categorical variable indicating the length of the pass:
#' Short or Deep
#' \item{"PassLocation"} - Categorical variable: left, middle, right
#' \item{"InterceptionThrown"} - Binary variable indicating whether an
#' interception was thrown
#' \item{"Interceptor"} - The player who intercepted the ball
#' \item{"Rusher"} - The runner on the play if it was a running play
#' \item{"RushAttempt"} - Binary variable indicating whether or not a run was
#' attempted.
#' \item{"RunLocation"} - The location of the run - left, middle, right
#' \item{"RunGap"} - The gap that the running back ran through
#' \item{"Receiver"} - The player who recorded the reception on a complete pass
#' \item{"Reception"} - Binary Variable indicating a reception on a completed
#' pass: 1 if a reception was recorded else 0
#' \item{"ReturnResult"} - Result of a punt, kickoff, interception, or
#' fumble return
#' \item{"Returner"} - The punt or kickoff returner
#' \item{"Tackler1"} - The primary tackler on the play
#' \item{"Tackler2"} - The secondary tackler on the play
#' \item{"FieldGoalResult"} - Outcome of a fieldgoal: made, missed, blocked
#' \item{"FieldGoalDistance"} - Field goal length in yards
#' \item{"Fumble"} - Binary variable indicating whether a fumble occured or not:
#' 1 if a fumble occured else no
#' \item{"RecFumbTeam"} - Team that recovered the fumble
#' \item{"RecFumbPlayer"} - Player that recovered the fumble
#' \item{"Sack"} - Binary variable indicating whether a sack was recorded: 1 if
#' a sack was recorded else 0
#' \item{"Challenge.Replay"} - Binary variable indicating whether or not the
#' play was reviewed by the replay offical on challenges or replay reviews
#' \item{"ChalReplayResult"} - Result of the replay review: Upheld or Overturned
#' \item{"Accepted.Penalty"} - Binary variable indicating whether a penalty was
#' accpeted on the play
#' \item{"PenalizedTeam"} - The team who was penalized on the play
#' \item{"PenaltyType"} - Type of penalty on the play. Values include:
#' \itemize{
#' \item{Unnecessary Roughness, Roughing the Passer}
#' \item{Illegal Formation, Defensive Offside}
#' \item{Delay of Game, False Start, Illegal Shift}
#' \item{Illegal Block Above the Waist, Personal Foul}
#' \item{Unnecessary Roughness, Illegal Blindside Bloc}
#' \item{Defensive Pass Interference, Offensive Pass Interference}
#' \item{Fair Catch Interferenc, Unsportsmanlike Conduct}
#' \item{Running Into the Kicker, Illegal Kick}
#' \item{Illegal Contact, Defensive Holding}
#' \item{Illegal Motion, Low Block}
#' \item{Illegal Substitution, Neutral Zone Infraction}
#' \item{Ineligible Downfield Pass, Roughing the Passer}
#' \item{Illegal Use of Hands, Defensive Delay of Game}
#' \item{Defensive 12 On-field, Offensive Offside}
#' \item{Tripping, Taunting, Chop Block}
#' \item{Interference with Opportunity to Catch, Illegal Touch Pass}
#' \item{Illegal Touch Kick, Offside on Free Kick}
#' \item{Intentional Grounding, Horse Collar}
#' \item{Illegal Forward Pass, Player Out of Bounds on Punt}
#' \item{Clipping, Roughing the Kicker, Ineligible Downfield Kick}
#' \item{Offensive 12 On-field, Disqualification}
#' }
#' \item{"PenalizedPlayer"} - The penalized player
#' \item{"Penalty.Yards"} - The number of yards that the penalty resulted in
#' \item{"PosTeamScore"} - The score of the possession team (offensive team)
#' \item{"DefTeamScore"} - The score of the defensive team
#' \item{"ScoreDiff"} - The difference in score between the offensive and
#' defensive teams (offensive.score - def.score)
#' \item{"AbsScoreDiff"} - The absolute score difference on the given play
#'
#' }
#'
#' @return A dataframe with 61 columns specifying various statistics and
#' outcomes associated with each play of the specified NFL game.
#' @examples
#' # Parsed play-by-play of the final game in the 2015 NFL season
#'
#' # Save the gameID into a variable
#' nfl2015.finalregseasongame.gameID <- "2016010310"
#'
#' # Input the variable into the function to output the desired dataframe
#' finalgame2015.pbp <- game_play_by_play(nfl2015.finalregseasongame.gameID)
#'
#' # Subset the dataframe based on passing plays
#' subset(finalgame2015.pbp, PlayType == "Pass")
#' @export
game_play_by_play <- function(GameID) {
# Google R stlye format
#########################
#########################
# Converting JSON data
# Converting GameID into URL string
urlstring <- proper_jsonurl_formatting(GameID)
nfl.json <- RJSONIO::fromJSON(RCurl::getURL(urlstring))
number.drives <- length(nfl.json[[1]]$drives) - 1
PBP <- NULL
for (ii in 1:number.drives) {
PBP <- rbind(PBP, cbind("Drive" = ii,
data.frame(do.call(rbind,
(nfl.json[[1]]$drives[[ii]]$plays))
)[,c(1:9)])
)
}
# Adjusting Possession Team
PBP$posteam <- ifelse(PBP$posteam == "NULL", dplyr::lag(PBP$posteam),
PBP$posteam)
# Fixing Possession team for Kick-Offs
kickoff.index <- which(sapply(PBP$desc, regexpr,
pattern =
"kicks") != -1)
pos.teams <- unlist(unique(PBP$posteam))[1:2]
correct.kickoff.pos <- ifelse(PBP$posteam[kickoff.index] == pos.teams[1],
pos.teams[2], pos.teams[1])
PBP[kickoff.index, "posteam"] <- correct.kickoff.pos
# Yard Line Information
# In the earlier seasons when there was a dead ball (i.e. timeout)
# the yardline info was left blank or NULL. Also if the ball was at midfield then
# there was no team associated so I had to add a space to make the strsplit
# work
yline.info.1 <- ifelse(PBP$yrdln == "50", "MID 50", PBP$yrdln)
yline.info.1 <- ifelse(nchar(PBP$yrdln) == 0 |
PBP$yrdln == "NULL", dplyr::lag(PBP$yrdln),
yline.info.1)
yline.info <- sapply(yline.info.1, strsplit, split = " ")
PBP$SideofField <- sapply(yline.info, FUN = function(x) x[1])
PBP$yrdln <- as.numeric(sapply(yline.info, FUN = function(x) x[2]))
# Yard Line on 100 yards Scale: Distance from Opponent Endzone
PBP$yrdline100 <- ifelse(PBP$SideofField == PBP$posteam | PBP$yrdln == 50,
100 - PBP$yrdln, PBP$yrdln )
# Game Date
date.step1 <- stringr::str_extract(urlstring, pattern = "/[0-9]{10}/")
date.step2 <- stringr::str_extract(date.step1, pattern = "[0-9]{8}")
year <- substr(date.step2, start = 1, stop = 4)
month <- substr(date.step2, start = 5, stop = 6)
day <- substr(date.step2, start = nchar(date.step2)-1,
stop = nchar(date.step2))
date <- as.Date(paste(month, day, year, sep = "/"), format = "%m/%d/%Y")
PBP$Date <- date
PBP$GameID <- stringr::str_extract(date.step1, pattern = "[0-9]{10}")
# Adding Zero time to Quarter End
quarter.end <- which(sapply(PBP$desc, regexpr,
pattern = "END QUARTER|END GAME") != -1)
PBP$time[quarter.end] <- "00:00"
# Time in Seconds
qtr.timeinsecs <- lubridate::period_to_seconds(lubridate::ms(PBP$time))
# Quarter 1
qtr.timeinsecs[which(PBP$qtr == 1)] <- qtr.timeinsecs[
which(PBP$qtr == 1)] + (900*3)
# Quarter 2
qtr.timeinsecs[which(PBP$qtr == 2)] <- qtr.timeinsecs[
which(PBP$qtr == 2)] + (900*2)
# Quarter 3
qtr.timeinsecs[which(PBP$qtr == 3)] <- qtr.timeinsecs[
which(PBP$qtr == 3)] + 900
PBP$TimeSecs <- qtr.timeinsecs
# Time Difference (in seconds)
plays.time.diff <- abs(c(0, diff(qtr.timeinsecs)))
PBP$PlayTimeDiff <- plays.time.diff
## Challenge or Replay Review ##
# Binary
PBP$Challenge.Replay <- 0
replay.offic <- grep(PBP$desc, pattern = "Replay Official reviewed")
challenged <- grep(PBP$desc, pattern = "challenge")
PBP$Challenge.Replay[c(replay.offic, challenged)] <- 1
# Results
PBP$ChalReplayResult <- NA
upheld.play <- grep(PBP$desc, pattern = "the play was Upheld")
reversed.play <- grep(PBP$desc, pattern = "the play was REVERSED")
PBP$ChalReplayResult[upheld.play] <- "Upheld"
PBP$ChalReplayResult[reversed.play] <- "Reversed"
######################################
# Picking Apart the Description Column
######################################
# Yards Gained
yards.step1 <- sapply(PBP$desc, stringr::str_extract,
pattern = "for (-)?([0-9]{1,2})?")
PBP$Yards.Gained <- as.numeric( ifelse( grepl(x = yards.step1,
pattern = "(-)?([0-9]{1,2})"),
stringr::str_extract(yards.step1,
"(-)?([0-9]{1,2})"),
"0")
)
# Two Point Conversion
PBP$TwoPointConv <- NA
two.point.result.ind <- which(sapply(PBP$desc, regexpr,
pattern =
"TWO-POINT CONVERSION ATTEMPT") != -1)
two.point.result2 <- stringr::str_extract_all(PBP$desc[two.point.result.ind],
pattern = "ATTEMPT FAILS|SUCCEEDS")
two.point.result.final1 <- unlist(lapply(two.point.result2, tail, 1))
two.point.result.final2 <- ifelse(two.point.result.final1 == "ATTEMPT FAILS",
"Failure", "Success")
if (length(two.point.result.final2) != 0) {
PBP$TwoPointConv[two.point.result.ind] <- two.point.result.final2
}
# Penalty - Binary Column
PBP$Accepted.Penalty <- NA
penalty.play <- sapply(PBP$desc, stringr::str_extract, pattern = "PENALTY")
PBP$Accepted.Penalty <- ifelse(!is.na(penalty.play), 1, 0)
# Penalized Team
penalized.team.s1 <- sapply(PBP$desc, stringr::str_extract,
"PENALTY on [A-Z]{2,3}")
PBP$PenalizedTeam <- stringr::str_extract(penalized.team.s1,
"[A-Z]{2,3}$")
# Penalty - What was the penalty?
penalty.type.s1 <- sapply(PBP$desc, stringr::str_extract,
pattern ="PENALTY(.){5,25},.+, [0-9] yard(s)")
penalty.type.s2 <- stringr::str_extract(pattern = ",.+,", penalty.type.s1)
penalty.type.final <- stringr::str_sub(penalty.type.s2, 3, -2)
PBP$PenaltyType <- penalty.type.final
# Penalized Player
PBP$PenalizedPlayer <- NA
penalized.player.int <- sapply(PBP$desc[ which(PBP$Accepted.Penalty == 1) ],
stringr::str_extract,
pattern =
"[A-Z]{2,3}-[A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?( (S|J)r)?")
penalized.player2 <- stringr::str_extract(penalized.player.int,
pattern =
"[A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?( (S|J)r)?")
PBP$PenalizedPlayer[PBP$Accepted.Penalty == 1] <- penalized.player2
# Penalty Yards
PBP$Penalty.Yards <- NA
penalty.yards.step1 <- sapply(PBP$desc, stringr::str_extract,
pattern = ", [0-9]{1,2} yard(s?), enforced")
PBP$Penalty.Yards <- ifelse(!is.na(penalty.yards.step1),
as.numeric(stringr::str_extract(
penalty.yards.step1,
"[0-9]{1,2}")
), 0)
# Modifying Down Column
PBP$down <- unlist(PBP$down)
PBP$down[which(PBP$down == 0)] <- NA
# Defenseive Team Column
PBP$DefensiveTeam <- NA
teams.step1 <- stringr::str_extract(unlist(unique(PBP$posteam)), "[A-Z]{2,3}")
teams <- teams.step1[which(!is.na(teams.step1))]
Team1 <- teams[1]
Team2 <- teams[2]
PBP$DefensiveTeam[which(PBP$posteam == Team1)] <- Team2
PBP$DefensiveTeam[which(PBP$posteam == Team2)] <- Team1
### Type of Play Initialized ###
PBP$PlayType <- NA
## Passer ##
passer.step1 <- sapply(PBP$desc, stringr::str_extract,
pattern = "[A-Z]\\.[A-Z][A-z]{1,20} pass")
PBP$Passer <- stringr::str_extract(passer.step1,
pattern = "[A-Z]\\.[A-Z][A-z]{1,20}")
## Receiver ##
receiver.step1 <- sapply(PBP$desc, stringr::str_extract,
pattern =
"pass (incomplete)?( )?[a-z]{4,5} [a-z]{4,6} to [A-Z]\\.[A-Z][A-z]{1,20}")
PBP$Receiver <- stringr::str_extract(receiver.step1,
pattern = "[A-Z]\\.[A-Z][A-z]{1,20}")
## Tacklers ##
tacklers.step1 <- sapply(PBP$desc, stringr::str_extract,
pattern = "(yard(s?)|no gain) \\([A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?(;)?( )?([A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?)?\\)\\.")
# Identifying the tacklers on the play (either one or two)
tacklers1 <- stringr::str_extract(tacklers.step1,
pattern =
"\\([A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?")
tacklers1 <- stringr::str_extract(tacklers1,
pattern =
"[A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?")
# Pulling out tacklers names
tacklers2 <- stringr::str_extract(tacklers.step1,
pattern =
";( )[A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?")
tacklers2 <- stringr::str_extract(tacklers2,
pattern =
"[A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?")
PBP$Tackler1 <- tacklers1
PBP$Tackler2 <- tacklers2
# Pass Plays
PBP$PassOutcome <- NA
pass.play <- which(sapply( PBP$desc, regexpr, pattern = "pass") != -1)
incomplete.pass.play <- which(sapply(PBP$desc, regexpr,
pattern =
"(pass incomplete)|INTERCEPTED") != -1)
PBP$PlayType[pass.play] <- "Pass"
# Pass Outcome
PBP$PassOutcome[incomplete.pass.play] <- "Incomplete Pass"
PBP$PassOutcome[ setdiff(pass.play, incomplete.pass.play) ] <- "Complete"
# Pass Length
PBP$PassLength <- NA
short.pass <- which(sapply(PBP$desc, regexpr,
pattern = "pass (incomplete )?short") != -1)
deep.pass <- which(sapply(PBP$desc, regexpr,
pattern = "pass (incomplete )?deep") != -1)
PBP$PassLength[short.pass] <- "Short"
PBP$PassLength[deep.pass] <- "Deep"
# Pass Location
PBP$PassLocation <- NA
pass.left <- which(sapply(PBP$desc, regexpr,
pattern = "(short|deep) left") != -1)
pass.reft <- which(sapply(PBP$desc, regexpr,
pattern = "(short|deep) right") != -1)
pass.middle <- which(sapply(PBP$desc, regexpr,
pattern = "(short|deep) middle") != -1)
PBP$PassLocation[pass.left] <- "left"
PBP$PassLocation[pass.reft] <- "right"
PBP$PassLocation[pass.middle] <- "middle"
# Pass Attempt
PBP$PassAttempt <- NA
PBP$PassAttempt <- ifelse( sapply(PBP$desc, grepl,
pattern = "pass"), 1, 0)
# Reception Made
PBP$Reception <- 0
PBP$Reception[setdiff(pass.play,incomplete.pass.play)] <- 1
# Interception Thrown
PBP$InterceptionThrown <- ifelse(
sapply(PBP$desc, grepl,
pattern = "INTERCEPTED"), 1, 0
)
# Punt
punt.play <- which(sapply(PBP$desc, regexpr, pattern = "punts") != -1)
PBP$PlayType[punt.play] <- "Punt"
# Field Goal
fieldgoal <- which(sapply(PBP$desc, regexpr,
pattern = "field goal") != -1)
fieldgoal.null <- which(sapply(PBP$desc, regexpr,
pattern = "field goal(.)+NULLIFIED") != -1)
fieldgoal.rev <- which(sapply(PBP$desc, regexpr,
pattern = "field goal(.)+REVERSED") != -1)
fieldgoal <- setdiff(fieldgoal, c(fieldgoal.null, fieldgoal.rev))
missed.fg <- which(sapply(PBP$desc, regexpr,
pattern = "field goal is No Good") != -1)
blocked.fg <- which(sapply(PBP$desc, regexpr,
pattern = "field goal is BLOCKED") != -1)
PBP$PlayType[fieldgoal] <- "Field Goal"
# Field Goal Distance
fieldgoaldist.prelim <- sapply(PBP$desc[fieldgoal], stringr::str_extract,
pattern = "[0-9]{1,2} yard field goal")
fieldgoaldist <- sapply(fieldgoaldist.prelim, stringr::str_extract,
pattern = "[0-9]{1,2}")
PBP$FieldGoalDistance <- NA
PBP$FieldGoalDistance[fieldgoal] <- fieldgoaldist
# Field Goal Result
PBP$FieldGoalResult <- NA
PBP$FieldGoalResult[missed.fg] <- "No Good"
PBP$FieldGoalResult[blocked.fg] <- "Blocked"
PBP$FieldGoalResult[setdiff(fieldgoal,c(missed.fg, blocked.fg))] <- "Good"
# Extra Point
extrapoint.good <- which(sapply(PBP$desc, regexpr,
pattern = "extra point is GOOD") != -1)
extrapoint.nogood <- which(sapply(PBP$desc, regexpr,
pattern = "(extra point is No Good)") != -1)
extrapoint.blocked <- which(sapply(PBP$desc, regexpr,
pattern = "(extra point is Blocked)") != -1)
extrapoint.aborted <- which(sapply(PBP$desc, regexpr,
pattern = "(extra point is Aborted)") != -1)
PBP$PlayType[c(extrapoint.good,
extrapoint.nogood,
extrapoint.blocked,
extrapoint.aborted)] <- "Extra Point"
# Extra Point Result
PBP$ExPointResult <- NA
PBP$ExPointResult[extrapoint.good] <- "Made"
PBP$ExPointResult[extrapoint.nogood] <- "Missed"
PBP$ExPointResult[extrapoint.blocked] <- "Blocked"
PBP$ExPointResult[extrapoint.blocked] <- "Aborted"
# Touchdown Play
touchdown.step1 <- sapply(PBP$desc, stringr::str_extract,
pattern = "TOUCHDOWN")
nullified <- grep(PBP$desc, pattern = "TOUCHDOWN NULLIFIED")
reversed <- grep(PBP$desc, pattern = "TOUCHDOWN(.)+REVERSED")
touchdown.step1[c(nullified, reversed)] <- NA
PBP$Touchdown <- ifelse(!is.na(touchdown.step1), 1, 0)
TDs.b4.extrapt <- which(PBP$PlayType == "Extra Point" |
!is.na(PBP$TwoPointConv)) - 1
extra.TDs <- setdiff(TDs.b4.extrapt, which(PBP$Touchdown == 1))
if (length(extra.TDs) > 0) {
PBP$Touchdown[TDs.b4.extrapt] <- 1
}
# Defensive 2-pt conversion
def.twopt.suc <- which(sapply(PBP$desc, regexpr,
pattern = "DEFENSIVE TWO-POINT ATTEMPT\\. (.){1,70}\\. ATTEMPT SUCCEEDS") != -1)
def.twopt.fail <- which(sapply(PBP$desc, regexpr,
pattern = "DEFENSIVE TWO-POINT ATTEMPT\\. (.){1,70}\\. ATTEMPT FAILS") != -1)
PBP$DefTwoPoint <- NA
PBP$DefTwoPoint[def.twopt.suc] <- "Success"
PBP$DefTwoPoint[def.twopt.fail] <- "Failure"
all.2pts <- intersect(c(def.twopt.suc, def.twopt.fail), two.point.result.ind)
PBP$TwoPointConv[all.2pts] <- "Failure"
# Fumbles
PBP$Fumble <- 0
fumble.index1 <- which(sapply(PBP$desc, regexpr, pattern = "FUMBLE") != -1)
fumble.overruled <- which(sapply(PBP$desc[fumble.index1],
regexpr,
pattern = "(NULLIFIED)|(Reversed)") != -1)
fumble.index <- setdiff(fumble.index1, fumble.overruled)
PBP$Fumble[fumble.index] <- 1
# Timeouts
timeouts <- which(sapply(PBP$desc, regexpr,
pattern = "[A-z]imeout #[1-5] by") != -1)
PBP$PlayType[timeouts] <- "Timeout"
# Quarter End
end.quarter <- which(sapply(PBP$desc, regexpr,
pattern = "END QUARTER") != -1)
PBP$PlayType[end.quarter] <- "Quarter End"
# 2 Minute Warning
two.minute.warning <- which(sapply(PBP$desc, regexpr,
pattern = "Two-Minute Warning") != -1)
PBP$PlayType[two.minute.warning] <- "Two Minute Warning"
# Sack
sack.plays <- which(sapply(PBP$desc, regexpr, pattern = "sacked") != -1)
PBP$PlayType[sack.plays] <- "Sack"
# Sack- Binary
PBP$Sack <- 0
PBP$Sack[sack.plays] <- 1
# Safety - Binary
safety.plays <- which(sapply(PBP$desc, regexpr, pattern = "SAFETY") != -1)
PBP$Safety <- 0
PBP$Safety[safety.plays] <- 1
# QB Kneel
qb.kneel <- which(sapply(PBP$desc, regexpr, pattern = "kneels") != -1)
PBP$PlayType[qb.kneel] <- "QB Kneel"
# Kick Off
kickoff <- which(sapply(PBP$desc, regexpr,
pattern = "kick(s)? [0-9]{2,3}") != -1)
PBP$PlayType[kickoff] <- "Kickoff"
# Onside Kick
onside <- which(sapply(PBP$desc, regexpr, pattern = "onside") != -1)
PBP$PlayType[onside] <- "Onside Kick"
# Spike
spike.play <- which(sapply(PBP$desc, regexpr, pattern = "spiked") != -1)
PBP$PlayType[spike.play] <- "Spike"
# No Play
no.play <- which(sapply(PBP$desc, regexpr,
pattern = "No Play") != -1)
PBP$PlayType[no.play] <- "No Play"
# End of Game
end.game <- which(sapply(PBP$desc, regexpr, pattern = "END GAME") != -1)
PBP$PlayType[end.game] <- "End of Game"
# First Down
PBP$FirstDown <- 0
first.downplays <- which(PBP$down == 1)
first.downs <- first.downplays-1
PBP$FirstDown[first.downs] <- ifelse(PBP$down[first.downs] ==0, NA, 1)
# Running Play
running.play <- which(is.na(PBP$PlayType))
PBP$PlayType[running.play] <- "Run"
PBP$RushAttempt <- ifelse(PBP$PlayType == "Run", 1,0)
# Run Direction
PBP$RunLocation <- NA
run.left <- which(sapply(PBP[which(PBP$PlayType == "Run"),"desc"], regexpr,
pattern = "left") != -1)
run.right <- which(sapply(PBP[which(PBP$PlayType == "Run"),"desc"], regexpr,
pattern = "right") != -1)
run.middle <- which(sapply(PBP[which(PBP$PlayType == "Run"),"desc"], regexpr,
pattern = "middle") != -1)
PBP[running.play,"RunLocation"][run.left] <- "left"
PBP[running.play,"RunLocation"][run.right] <- "right"
PBP[running.play,"RunLocation"][run.middle] <- "middle"
# Run Gap
PBP$RunGap <- NA
run.guard <- which(sapply(PBP[which(PBP$PlayType == "Run"),"desc"], regexpr,
pattern = "guard") != -1)
run.tackle <- which(sapply(PBP[which(PBP$PlayType == "Run"),"desc"], regexpr,
pattern = "tackle") != -1)
run.end <- which(sapply(PBP[which(PBP$PlayType == "Run"),"desc"], regexpr,
pattern = "end") != -1)
PBP[running.play,"RunGap"][run.guard] <- "guard"
PBP[running.play,"RunGap"][run.tackle] <- "tackle"
PBP[running.play,"RunGap"][run.end] <- "end"
# Rusher
rusherStep1 <- sapply(PBP[which(PBP$PlayType == "Run"),"desc"],
stringr::str_extract,
pattern = "[A-Z]\\.[A-Z][A-z]{1,20}")
PBP[running.play,"Rusher"] <- rusherStep1
## Punt and Kick Return Outcome ##
# Punt Outcome
punt.tds <- which(sapply(PBP$desc[punt.play], regexpr,
pattern = "TOUCHDOWN") != -1)
punt.tds.null <- which(sapply(PBP$desc[punt.play], regexpr,
pattern = "NULLIFIED") != -1)
punt.tds.rev <- which(sapply(PBP$desc[punt.play], regexpr,
pattern = "REVERSED") != -1)
punt.tds <- setdiff(punt.tds, c(punt.tds.null, punt.tds.rev))
punts.touchbacks <- which(sapply(PBP$desc[punt.play], regexpr,
pattern = "Touchback") != -1)
punts.faircatch <- which(sapply(PBP$desc[punt.play], regexpr,
pattern = "fair catch") != -1)
# Kickoff Outcome
kick.tds <- which(sapply(PBP$desc[kickoff], regexpr,
pattern = "TOUCHDOWN") != -1)
kick.tds.null <- which(sapply(PBP$desc[kickoff], regexpr,
pattern = "NULLIFIED") != -1)
kick.tds.rev <- which(sapply(PBP$desc[kickoff], regexpr,
pattern = "REVERSED") != -1)
kick.tds <- setdiff(kick.tds, c(kick.tds.null, kick.tds.rev))
kick.tds.null <- which(sapply(PBP$desc[kickoff], regexpr,
pattern = "NULLIFIED") != -1)
kick.tds.rev <- which(sapply(PBP$desc[kickoff], regexpr,
pattern = "REVERSED") != -1)
kick.tds <- setdiff(kick.tds, c(kick.tds.null, kick.tds.rev))
kick.touchbacks <- which(sapply(PBP$desc[kickoff], regexpr,
pattern = "Touchback") != -1)
kick.faircatch <- which(sapply(PBP$desc[kickoff], regexpr,
pattern = "fair catch") != -1)
kick.kneels <- which(sapply(PBP$desc[kickoff], regexpr,
pattern = "kneel(s)?") != -1)
# Interception Outcome
intercept.td <- which(sapply(PBP$desc[which(PBP$InterceptionThrown == 1)],
regexpr, pattern = "TOUCHDOWN") != -1)
intercept.td.null <- which(sapply(PBP$desc[which(PBP$InterceptionThrown == 1)]
, regexpr, pattern = "NULLIFIED") != -1)
intercept.td.rev <- which(sapply(PBP$desc[which(PBP$InterceptionThrown == 1)],
regexpr, pattern = "REVERSED") != -1)
intercept.td <- setdiff(intercept.td, c(intercept.td.null, intercept.td.rev))
# Fumble Outcome
fumble.td <- which(sapply(PBP$desc[fumble.index],
regexpr, pattern = "TOUCHDOWN") != -1)
# May be able to remove bottom two lines
fumble.td.null <- which(sapply(PBP$desc[fumble.index],
regexpr, pattern = "NULLIFIED") != -1)
fumble.td.rev <- which(sapply(PBP$desc[fumble.index],
regexpr, pattern = "REVERSED") != -1)
fumble.td <- setdiff(fumble.td, c(fumble.td.null, fumble.td.rev))
PBP$ReturnResult <- NA
PBP$ReturnResult[punt.play][punt.tds] <- "Touchdown"
PBP$ReturnResult[punt.play][punts.touchbacks] <- "Touchback"
PBP$ReturnResult[punt.play][punts.faircatch] <- "Fair Catch"
PBP$ReturnResult[kickoff][kick.tds] <- "Touchdown"
PBP$ReturnResult[kickoff][kick.touchbacks] <- "Touchback"
PBP$ReturnResult[kickoff][c(kick.faircatch,kick.kneels)] <- "Fair Catch"
PBP$ReturnResult[which(PBP$InterceptionThrown == 1)][intercept.td] <- "Touchdown"
PBP$ReturnResult[fumble.index][fumble.td] <- "Touchdown"
## Returner ##
# Punt Returner
# Fair Catches
punt.returner1 <- sapply(PBP$desc[punt.play], stringr::str_extract,
pattern = "by [A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?\\.$")
punt.returner2 <- sapply(punt.returner1, stringr::str_extract,
pattern = "[A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?")
# Touchdowns or Returns
punt.returner3 <- sapply(PBP$desc[punt.play], stringr::str_extract,
pattern = "(\\. [A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})? to [A-Z]{2,3} [0-9]{1,2})|\\. [A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})? for [0-9]{1,2} yard(s)")
punt.returner4 <- sapply(punt.returner3, stringr::str_extract,
pattern = "[A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?")
# Kickoff Returner
# Fair Catches
kickret1 <- sapply(PBP$desc[kickoff], stringr::str_extract,
pattern = "by [A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?\\.$")
kickret2 <- sapply(kickret1, stringr::str_extract,
pattern = "[A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?")
# Touchdowns or Returns
kickret3 <- sapply(PBP$desc[kickoff], stringr::str_extract,
pattern = "(\\. [A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})? to [A-Z]{2,3} [0-9]{1,2})|(\\. [A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})? for [0-9]{1,2} yard(s))|(\\. [A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})? pushed)")
kickret4 <- sapply(kickret3, stringr::str_extract,
pattern = "[A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?")
# All Returners
all.returners <- rep(NA, time = nrow(PBP))
all.returners[kickoff][which(!is.na(kickret2))] <- kickret2[which(!is.na(kickret2))]
all.returners[kickoff][which(!is.na(kickret4))] <- kickret4[which(!is.na(kickret4))]
all.returners[punt.play][which(!is.na(punt.returner2))] <- punt.returner2[which(!is.na(punt.returner2))]
all.returners[punt.play][which(!is.na(punt.returner4))] <- punt.returner4[which(!is.na(punt.returner4))]
PBP$Returner <- all.returners
# Interceptor
interceptor1 <- sapply(PBP$desc[which(PBP$InterceptionThrown == 1)],
stringr::str_extract,
pattern = "INTERCEPTED by [A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?")
interceptor2 <- sapply(interceptor1, stringr::str_extract,
pattern = "[A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?")
PBP$Interceptor <- NA
PBP$Interceptor[which(PBP$InterceptionThrown == 1)] <- interceptor2
# Fumbler Recovery Team and Player
recover.step1 <- sapply(PBP$desc[fumble.index], stringr::str_extract,
pattern = "[A-Z]{2,3}-[A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?")
recover.team <- sapply(recover.step1, stringr::str_extract,
pattern = "[A-Z]{2,3}")
recover.player <- sapply(recover.step1, stringr::str_extract,
pattern = "[A-z]{1,3}\\.( )?[A-Z][A-z]{1,20}(('|-)?[A-z]{1,15})?")
PBP$RecFumbTeam <- NA
PBP$RecFumbTeam[fumble.index] <- recover.team
PBP$RecFumbPlayer <- NA
PBP$RecFumbPlayer[fumble.index] <- recover.player
# The next few variables are counting variables
# Used to help set up model for predictions
# Plays
PBP$PlayAttempted <- 1
# Time Under
PBP$TimeUnder <- substr(lubridate::ceiling_date(as.POSIXct(paste("00:",
PBP$time,
sep = ""),
format = "%H:%M:%S"
), "minute"),
15, 16)
PBP$TimeUnder <- as.numeric(as.character(PBP$TimeUnder))
# Calculating Score of Game for Possesion team and Defensive Team
team.home.score <- rep(0, times = nrow(PBP))
team.away.score <- rep(0, times = nrow(PBP))
away.team.name <- nfl.json[[1]]$away$abbr
home.team.name <- nfl.json[[1]]$home$abbr
## Away Team ##
# Regular offensive passing, rushing
team.away.score[which(PBP$Touchdown == 1
& PBP$posteam == away.team.name
& !PBP$ReturnResult %in% "Touchdown"
& !PBP$PlayType %in% "Kickoff")] <- 6
# Give points for Kickoff TDs
team.away.score[which(PBP$Touchdown == 1
& PBP$posteam == away.team.name
& PBP$ReturnResult %in% "Touchdown"
& PBP$PlayType %in% "Kickoff")] <- 6
# Give points for Punt Return TDs
team.away.score[which(PBP$Touchdown == 1
& PBP$posteam == home.team.name
& PBP$ReturnResult %in% "Touchdown"
& PBP$PlayType %in% "Punt")] <- 6
# Give points for Interceptions
team.away.score[which(PBP$Touchdown == 1
& PBP$posteam == home.team.name
& PBP$ReturnResult %in% "Touchdown"
& !is.na(PBP$Interceptor))] <- 6
# Make sure to give away team points for fumble ret for TD
team.away.score[which(PBP$Touchdown == 1
& PBP$posteam == home.team.name
& PBP$ReturnResult %in% "Touchdown"
& !PBP$PlayType %in% "Kickoff"
& PBP$RecFumbTeam == away.team.name)] <- 6
# Fumble and the team that fumbled recovers and scores a TD
team.away.score[which(PBP$Touchdown == 1
& PBP$posteam == away.team.name
& PBP$ReturnResult %in% "Touchdown"
& PBP$RecFumbTeam == away.team.name)] <- 6
# Points for two point conversion
team.away.score[which(PBP$TwoPointConv == "Success"
& PBP$posteam == away.team.name)] <- 2
# Points for safeties
team.away.score[which(PBP$Safety == 1
& PBP$posteam == home.team.name)] <- 2
# Points for made extra point
team.away.score[which(PBP$ExPointResult == "Made"
& PBP$posteam == away.team.name)] <- 1
# Points for made field goal
team.away.score[which(PBP$FieldGoalResult == "Good"
& PBP$posteam == away.team.name)] <- 3
team.away.score <- cumsum(team.away.score)
away.team.pos <- which(PBP$posteam == away.team.name)
away.team.def <- which(PBP$DefensiveTeam == away.team.name)
## Home Team ##
# Regular offensive passing, rushing, or kickoff TD
team.home.score[PBP$Touchdown == 1
& PBP$posteam == home.team.name
& !PBP$ReturnResult %in% "Touchdown"
& !PBP$PlayType %in% "Kickoff"] <- 6
# Give points for Kickoffs
team.home.score[which(PBP$Touchdown == 1
& PBP$posteam == home.team.name
& PBP$ReturnResult %in% "Touchdown"
& PBP$PlayType %in% "Kickoff")] <- 6
# Give points for Punts
team.home.score[which(PBP$Touchdown == 1
& PBP$posteam == away.team.name
& PBP$ReturnResult %in% "Touchdown"
& PBP$PlayType %in% "Punt")] <- 6
# Give points for Interceptions
team.home.score[which(PBP$Touchdown == 1
& PBP$posteam == away.team.name
& PBP$ReturnResult %in% "Touchdown"
& !is.na(PBP$Interceptor))] <- 6
team.home.score[which(PBP$Touchdown == 1
& PBP$posteam == away.team.name
& PBP$ReturnResult %in% "Touchdown"
& !PBP$PlayType %in% "Kickoff"
& PBP$RecFumbTeam == home.team.name)] <- 6
# Fumble and the team that fumbled recovered and scored a TD
team.home.score[which(PBP$Touchdown == 1
& PBP$posteam == home.team.name
& PBP$ReturnResult %in% "Touchdown"
& PBP$RecFumbTeam == home.team.name)] <- 6
# Points for two point conversion
team.home.score[which(PBP$TwoPointConv == "Success"
& PBP$posteam == home.team.name)] <- 2
# Points for safeties
team.home.score[which(PBP$Safety == 1
& PBP$posteam == away.team.name)] <- 2
# Points for made extra point
team.home.score[which(PBP$ExPointResult == "Made"
& PBP$posteam == home.team.name)] <- 1
# Points for made field goal
team.home.score[which(PBP$FieldGoalResult == "Good"
& PBP$posteam == home.team.name)] <- 3
team.home.score <- cumsum(team.home.score)
home.team.pos <- which(PBP$posteam == home.team.name)
home.team.def <- which(PBP$DefensiveTeam == home.team.name)
## Possesion and Defensive Team Scores
PBP$PosTeamScore <- NA
PBP$DefTeamScore <- NA
### Inputting Scores
PBP$PosTeamScore[home.team.pos] <- team.home.score[home.team.pos]
PBP$PosTeamScore[away.team.pos] <- team.away.score[away.team.pos]
PBP$DefTeamScore[home.team.def] <- team.home.score[home.team.def]
PBP$DefTeamScore[away.team.def] <- team.away.score[away.team.def]
# Score Differential and Abs Score Differential
PBP$ScoreDiff <- PBP$PosTeamScore - PBP$DefTeamScore
PBP$AbsScoreDiff <- abs(PBP$PosTeamScore - PBP$DefTeamScore)
# Goal to Go
PBP$GoalToGo <- ifelse(PBP$posteam != PBP$SideofField & PBP$yrdln <= 10, 1, 0)
##################
## Unlisting Listed Columns
PBP$sp <- unlist(PBP$sp)
PBP$qtr <- unlist(PBP$qtr)
PBP$time <- unlist(PBP$time)
PBP$ydstogo <- unlist(PBP$ydstogo)
PBP$ydsnet <- unlist(PBP$ydsnet)
PBP$posteam <- unlist(PBP$posteam)
PBP$desc <- unlist(PBP$desc)
PBP$FieldGoalDistance <- unlist(PBP$FieldGoalDistance)
## Final OutPut ##
PBP[,c("Date", "GameID", "Drive", "qtr", "down", "time", "TimeUnder",
"TimeSecs", "PlayTimeDiff", "SideofField", "yrdln", "yrdline100",
"ydstogo", "ydsnet", "GoalToGo", "FirstDown",
"posteam", "DefensiveTeam", "desc", "PlayAttempted", "Yards.Gained",
"sp", "Touchdown", "ExPointResult", "TwoPointConv", "DefTwoPoint",
"Safety", "PlayType", "Passer", "PassAttempt", "PassOutcome",
"PassLength", "PassLocation", "InterceptionThrown", "Interceptor",
"Rusher", "RushAttempt", "RunLocation", "RunGap", "Receiver",
"Reception", "ReturnResult", "Returner", "Tackler1", "Tackler2",
"FieldGoalResult", "FieldGoalDistance",
"Fumble", "RecFumbTeam", "RecFumbPlayer", "Sack", "Challenge.Replay",
"ChalReplayResult", "Accepted.Penalty", "PenalizedTeam", "PenaltyType",
"PenalizedPlayer", "Penalty.Yards", "PosTeamScore", "DefTeamScore",
"ScoreDiff", "AbsScoreDiff")]
}
##################################################################
#' Parsed Descriptive Play-by-Play Function for a Full Season
#' @description This function outputs all plays of an entire season in one dataframe.
#' It calls the game_play_by_play function and applies it over every
#' game in the season by extracting each game ID and url in the specified season.
#'
#' @param Season (numeric) A 4-digit year corresponding to an NFL season of
#' interest
#'
#' @details This function calls the extracting_gameids,
#' proper_jsonurl_formatting, and game_play_by_play to aggregate all the plays
#' from a given season. This dataframe is prime for use with the dplyr and
#' plyr packages.
#' @return A dataframe contains all the play-by-play information for a single
#' season. This includes all the 52 variables collected in our
#' game_play_by_play function (see documentation for game_play_by_play for
#' details)
#' @examples
#' # Play-by-Play Data from All games in 2010
#' pbp.data.2010 <- season_play_by_play(2010)
#'
#' # Looking at all Baltimore Ravens Offensive Plays
#' subset(pbp.data.2010, posteam = "BAL")
#' @export
season_play_by_play <- function(Season) {
# Google R stlye format
# Below the function put together the proper URLs for each game in each
# season and runs the game_play_by_play function across the entire season
game_ids <- extracting_gameids(Season)
pbp_data_unformatted <- lapply(game_ids, FUN = game_play_by_play)
df_pbp_data <- do.call(rbind, pbp_data_unformatted)
df_pbp_data
}
##################################################################
# Drive Summary Function
#' Drive Summary and Results
#' @description This function outputs the results dataframe of each drive of a
#' given game
#' @param GameID (character or numeric) A 10 digit game ID associated with a
#' given NFL game.
#' @details The outputted dataframe has 16 variables associated with a specific
#' aspect of a drive including the scoring result, number of plays, the duration
#' of the drive, and the offensive and defensive teams. All 16 variable are
#' explained in more detail below:
#' \itemize{
#' \item{"posteam"} - The offensive team on the drive
#' \item{"qrt"} - The quarter at the end of the drive
#' \item{"fs"} - Number of first downs in the drive
#' \item{"result"} - End result of the drive
#' \item{"penyds"} - Net penalty yards of the drive for the offensive team
#' \item{"ydsgained"} - Number of yards gained on the drive
#' \item{"numplaus"} - Number of plays on the drive
#' \item{"postime"} - The duration of the
#' \item{"Startqrt"} - The quarter at the beginning of the drive
#' \item{"StartTime} - The time left in the quarter at the start of the drive
#' \item{"StartYardln"} - Yardline at the start of the drive
#' \item{"StartTeam"} - The offensive team on the drive
#' }
#' @return A dataframe that has the summary statistics for each drive
#' final output includes first downs, drive result, penalty yards,
#' of plays, time of possession, quarter at the start of the drive,
#' Time at Start of Drive, yardline at start of drive,
#' team with possession at start, end of drive quarter, end of drive time,
#' end of drive Yard line, end of drive team with possession
#' @examples
#' # Parsed drive Summarize of final game in 2015 NFL Season
#' nfl2015.finalregseasongame.gameID <- "2016010310"
#' drive_summary(nfl2015.finalregseasongame.gameID)
#' @export
drive_summary <- function(GameID) {
# Google R stlye format
######################
######################
# Generating Game URL
urlstring <- proper_jsonurl_formatting(GameID)
# Converting JSON data
nfl.json.data <- RJSONIO::fromJSON(RCurl::getURL(urlstring))
# Creating Dataframe of Drive Outcomes
drive.data <- data.frame(do.call(rbind, (nfl.json.data[[1]]$drives)))
# Gathering Start of Drive Time, Location, and Quarter Info
start.data <- data.frame(do.call(rbind, (drive.data$start)))
colnames(start.data) <- c("StartQrt", "StartTime", "StartYardln", "StartTeam")
# Gathering End of Drive Time, Location, and Quarter Info
end.data <- data.frame(do.call(rbind, (drive.data$end)))
colnames(end.data) <- c("EndQrt", "EndTime", "EndYardln", "EndTeam")
start.index <- which(colnames(drive.data) == "start")
end.index <- which(colnames(drive.data) == "end")
# Combining all datasets into one
drive.data.final <- cbind(drive.data[, -c(start.index,end.index)],
start.data, end.data)
# Removing last row and 4th column of irrelevant information
drive.data.final[-nrow(drive.data),-c(3,4)]
}
##################################################################
# Simple Box Score
#' Simple Game Boxscore
#' @description This function pulls data from an NFL url and contructs it into a formatted
#' boxscore.
#' @param GameID (character or numeric) A 10 digit game ID associated with a
#' given NFL game.
#' @param home (boolean): home = TRUE will pull home stats,
#' home = FALSE pulls away stats
#' @return A list of playerstatistics including passing, rushing, receiving,
#' defense, kicking, kick return, and punt return statistics for the specified
#' game.
#' @examples
#' # Parsed drive Summarize of final game in 2015 NFL Season
#' nfl2015.finalregseasongame.gameID <- "2016010310"
#' simple_boxscore(nfl2015.finalregseasongame.gameID, home = TRUE)
#' @export
simple_boxscore <- function(GameID, home = TRUE) {
# Google R stlye format
##################
##################
# Generating Game URL
urlstring <- proper_jsonurl_formatting(GameID)
# Start of Function
nfl.json.data <- RJSONIO::fromJSON(RCurl::getURL(urlstring))
# Date of Game
datestep1 <- stringr::str_extract(urlstring, pattern = "/[0-9]{10}/")
datestep2 <- stringr::str_extract(datestep1, pattern = "[0-9]{8}")
year <- substr(datestep2, start = 1, stop = 4)
month <- substr(datestep2, start = 5, stop = 6)
day <- substr(datestep2, start = nchar(datestep2)-1, stop = nchar(datestep2))
date <- as.Date(paste(month, day, year, sep = "/"), format = "%m/%d/%Y")
# Parsing Data
if (home == TRUE) {
home.team.name <- nfl.json.data[[1]]$home$abbr
# Passing Stats
qb.stats <- data.frame(stat = "passing", date, GameID, home.team.name,
t(sapply(nfl.json.data[[1]]$home$stats$passing, c)))
qb.stats$playerID <- rownames(t(sapply(nfl.json.data[[1]]$home$stats$passing,
c)))
# Running Stats
rb.stats <- data.frame(stat = "rush", date, GameID, home.team.name,
t(sapply(nfl.json.data[[1]]$home$stats$rushing, c)))
rb.stats$playerID <- rownames(t(sapply(nfl.json.data[[1]]$home$stats$rushing,
c)))
# Receiving Stats
wr.stats <- data.frame(stat = "receiving", date, GameID, home.team.name,
t(sapply(nfl.json.data[[1]]$home$stats$receiving, c)))
wr.stats$playerID <- rownames(t(sapply(nfl.json.data[[1]]$home$stats$receiving,
c)))
# Defensive Stats
def.stats <- data.frame(stat = "defense", date, GameID, home.team.name,
t(sapply(nfl.json.data[[1]]$home$stats$defense, c)))
def.stats$playerID <- rownames(
t(sapply(nfl.json.data[[1]]$home$stats$defense
, c)))
# Kicking Stats
kicker.stats <- data.frame(stat = "kicking", date, GameID, home.team.name,
t(sapply(nfl.json.data[[1]]$home$stats$kicking,
c)))
kicker.stats$playerID <- rownames(t(
sapply(nfl.json.data[[1]]$home$stats$kicking,
c)))
# Fumble Stats
fumb.stats <- data.frame(stat = "fumbles", date, GameID, home.team.name,
t(sapply(nfl.json.data[[1]]$home$stats$fumbles, c)))
fumb.stats$playerID <- rownames(t(
sapply(nfl.json.data[[1]]$home$stats$fumbles,
c)))
# Kick Return Stats
kr.stats <- data.frame(stat = "kickreturn", date, GameID, home.team.name,
t(sapply(nfl.json.data[[1]]$home$stats$kickret, c)))
kr.stats$playerID <- rownames(t(
sapply(nfl.json.data[[1]]$home$stats$kickret,
c)))
# Punt Return Stats
pr.stats <- data.frame(stat = "puntreturn", date, GameID, home.team.name,
t(sapply(nfl.json.data[[1]]$home$stats$puntret, c)))
pr.stats$playerID <- rownames(t(
sapply(nfl.json.data[[1]]$home$stats$puntret,
c)))
# List of Stats
home.team.stats <- list(HomePassing = qb.stats,
HomeRushing = rb.stats,
HomeReceiving = wr.stats,
HomeDef = def.stats,
HomeKicking = kicker.stats,
HomeFumbles = fumb.stats, HomeKR = kr.stats,
HomePR = pr.stats)
home.team.stats
} else {
away.team.name <- nfl.json.data[[1]]$away$abbr
# Passing Away Stats
qb.away.stats <- data.frame(stat = "passing", GameID, away.team.name,
t(sapply(nfl.json.data[[1]]$away$stats$passing,
c)))
qb.away.stats$playerID <- rownames(t(sapply(nfl.json.data[[1]]$away$stats$passing,
c)))
# Running Away Stats
rb.away.stats <- data.frame(stat = "rushing", date, GameID, away.team.name,
t(sapply(nfl.json.data[[1]]$away$stats$rushing,
c)))
rb.away.stats$playerID <- rownames(t(sapply(nfl.json.data[[1]]$away$stats$rushing,
c)))
# Receiving Away Stats
wr.away.stats <- data.frame(stat = "receiving", date, GameID, away.team.name,
t(sapply(nfl.json.data[[1]]$away$stats$receiving,
c)))
wr.away.stats$playerID <- rownames(t(sapply(nfl.json.data[[1]]$away$stats$receiving,
c)))
# Defensive Away Stats
def.away.stats <- data.frame(stat = "defense", date, GameID, away.team.name,
t(sapply(nfl.json.data[[1]]$away$stats$defense,
c)))
def.away.stats$playerID <- rownames(t(sapply(nfl.json.data[[1]]$away$stats$defense,
c)))
# Kicking Away Stats
kicker.away.stats <- data.frame(stat = "kicking", date, GameID, away.team.name,
t(sapply(nfl.json.data[[1]]$away$stats$kicking
, c)))
kicker.away.stats$playerID <- rownames(t(sapply(nfl.json.data[[1]]$away$stats$kicking,
c)))
# Fumble Away Stats
fumb.away.stats <- data.frame(stat = "fumbles", date, GameID, away.team.name,
t(sapply(nfl.json.data[[1]]$away$stats$fumbles,
c)))
fumb.away.stats$playerID <- rownames(t(sapply(nfl.json.data[[1]]$away$stats$fumbles,
c)))
# Kick Return Away Stats
kr.away.stats <- data.frame(stat = "kickreturn", date, GameID, away.team.name,
t(sapply(nfl.json.data[[1]]$away$stats$kickret,
c)))
kr.away.stats$playerID <- rownames(t(sapply(nfl.json.data[[1]]$away$stats$kickret,
c)))
# Punt Return Away Stats
pr.away.stats <- data.frame(stat = "puntreturn", date, GameID,
away.team.name,
t(sapply(nfl.json.data[[1]]$away$stats$puntret,
c)))
pr.away.stats$playerID <- rownames(t(sapply(
nfl.json.data[[1]]$away$stats$puntret,
c)))
# List of Away Stats
awayTeamStats <- list(AwayPassing = qb.away.stats,
AwayRushing = rb.away.stats,
AwayReceiving = wr.away.stats,
AwayDef = def.away.stats,
AwayKicking = kicker.away.stats,
AwayFumb = fumb.away.stats,
AwayKR = kr.away.stats,
AwayPR = pr.away.stats)
awayTeamStats
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gen_two_gaussian_mixture.R
\name{gen_two_gaussian_mixture}
\alias{gen_two_gaussian_mixture}
\title{Generate a sample from a two Gaussian mixture}
\usage{
gen_two_gaussian_mixture(n, p1, mu1, s1, mu2, s2, rn_seed = 42)
}
\arguments{
\item{n}{- the number of samples to generate}
\item{p1}{- the fractional probability of the first Gaussian (< 1.0)}
\item{mu1}{- the mean of the first Gaussian}
\item{s1}{- the standard deviation of the first Gaussian}
\item{mu2}{- the mean of the second Gaussian}
\item{s2}{- the standard deviation of the second Gaussian}
\item{rn_seed}{- the random number seed (default 42)}
}
\value{
rand.samples - a vector of n samples
}
\description{
Generate a sample from a two Gaussian mixture
}
\examples{
library(particlesizeR)
samples <- gen_two_gaussian_mixture(10000, 0.5, 0.5, 1.0, 10.0, 3.0)
}
|
/man/gen_two_gaussian_mixture.Rd
|
no_license
|
jrminter/particlesizeR
|
R
| false | true | 911 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gen_two_gaussian_mixture.R
\name{gen_two_gaussian_mixture}
\alias{gen_two_gaussian_mixture}
\title{Generate a sample from a two Gaussian mixture}
\usage{
gen_two_gaussian_mixture(n, p1, mu1, s1, mu2, s2, rn_seed = 42)
}
\arguments{
\item{n}{- the number of samples to generate}
\item{p1}{- the fractional probability of the first Gaussian (< 1.0)}
\item{mu1}{- the mean of the first Gaussian}
\item{s1}{- the standard deviation of the first Gaussian}
\item{mu2}{- the mean of the second Gaussian}
\item{s2}{- the standard deviation of the second Gaussian}
\item{rn_seed}{- the random number seed (default 42)}
}
\value{
rand.samples - a vector of n samples
}
\description{
Generate a sample from a two Gaussian mixture
}
\examples{
library(particlesizeR)
samples <- gen_two_gaussian_mixture(10000, 0.5, 0.5, 1.0, 10.0, 3.0)
}
|
\name{IsReg.ts}
\alias{IsReg.ts}
\title{
Wrapper function for function \code{is.regular} from \code{zoo} package for \code{data.frame} objects
}
\description{
"IsReg.ts" is a wrapping Function for Function "is.regular" from "zoo" package. Given a time series (ts) a "data.frame" object it is converted into a "xts" object, while the regularity of the object is checked. The first column of the "data.frame" should contain a character string vector to be converted via as.POSIXct
accordingly with the date format (format) and time zone (tz).
}
\usage{IsReg.ts(data, format, tz)}
\arguments{
\item{data}{an object of class \code{data.frame} containing in its first column a character string vector
to be converted via as.POSIXct into a date vector accordingly with the date format (format) and time zone (tz)
defined}
\item{format}{character string giving a date-time format as used by \code{strptime}.}
\item{tz}{a time zone specification to be used for the conversion, if one is required.
System-specific, but "" is the current time zone, and "GMT" is UTC (Universal Time, Coordinated).
Invalid values are most commonly treated as UTC, on some platforms with a warning.}
}
\value{Object of class \code{"list"}. This object contains 2 elements,
the first one contains a character string "_TSregular" if the xts object created is strict regular,
or "_TSirregular" if it is strict irregular. More details can be found in the "is.regular" function of the
"zoo" package.}
\details{
"IsReg" calls the as.POSIXct function from \code{base} package to convert an object to one of the two
classes used to represent date/times (calendar dates plus time to the nearest second).
More details can be found in the "is.regular" function of the "zoo" package.
}
%\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
%}
%\references{
%% ~~ possibly secondary sources and usages ~~
%}
\author{
J.A. Torres-Matallana
}
\examples{
library(EmiStatR)
data("P1")
class(P1)
head(P1)
ts <- IsReg.ts(data = P1, format = "\%Y-\%m-\%d \%H:\%M:\%S", tz = "UTC")
str(ts)
ts[[1]]
head(ts[[2]]); tail(ts[[2]])
plot(ts[[2]], ylab = "Precipitation [mm]")
}
\keyword{IsReg.ts }
\keyword{Is a time series regular}
|
/man/IsReg_ts.Rd
|
no_license
|
cran/stUPscales
|
R
| false | false | 2,251 |
rd
|
\name{IsReg.ts}
\alias{IsReg.ts}
\title{
Wrapper function for function \code{is.regular} from \code{zoo} package for \code{data.frame} objects
}
\description{
"IsReg.ts" is a wrapping Function for Function "is.regular" from "zoo" package. Given a time series (ts) a "data.frame" object it is converted into a "xts" object, while the regularity of the object is checked. The first column of the "data.frame" should contain a character string vector to be converted via as.POSIXct
accordingly with the date format (format) and time zone (tz).
}
\usage{IsReg.ts(data, format, tz)}
\arguments{
\item{data}{an object of class \code{data.frame} containing in its first column a character string vector
to be converted via as.POSIXct into a date vector accordingly with the date format (format) and time zone (tz)
defined}
\item{format}{character string giving a date-time format as used by \code{strptime}.}
\item{tz}{a time zone specification to be used for the conversion, if one is required.
System-specific, but "" is the current time zone, and "GMT" is UTC (Universal Time, Coordinated).
Invalid values are most commonly treated as UTC, on some platforms with a warning.}
}
\value{Object of class \code{"list"}. This object contains 2 elements,
the first one contains a character string "_TSregular" if the xts object created is strict regular,
or "_TSirregular" if it is strict irregular. More details can be found in the "is.regular" function of the
"zoo" package.}
\details{
"IsReg" calls the as.POSIXct function from \code{base} package to convert an object to one of the two
classes used to represent date/times (calendar dates plus time to the nearest second).
More details can be found in the "is.regular" function of the "zoo" package.
}
%\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
%}
%\references{
%% ~~ possibly secondary sources and usages ~~
%}
\author{
J.A. Torres-Matallana
}
\examples{
library(EmiStatR)
data("P1")
class(P1)
head(P1)
ts <- IsReg.ts(data = P1, format = "\%Y-\%m-\%d \%H:\%M:\%S", tz = "UTC")
str(ts)
ts[[1]]
head(ts[[2]]); tail(ts[[2]])
plot(ts[[2]], ylab = "Precipitation [mm]")
}
\keyword{IsReg.ts }
\keyword{Is a time series regular}
|
isoline <- function(latt2Ns2) { ## util. to add Nb levels to an existing (2Nm, g) surface plot
seq2Nm <- seq(blackbox.getOption("FONKgLow")["twoNm"], latt2Ns2/blackbox.getOption("mincondS2"), length.out=100)
islog2Ns2 <- latt2Ns2
if (islogscale("latt2Ns2")) islog2Ns2 <- log(islog2Ns2) ## because tofullKrigingspace then assumes that latt2Ns2 is logscale
seqg <- sapply(seq2Nm, ## the twoNmu value because tofullK catches (twoNmu=NA & Nratio=NA)
function(v) {tofullKrigingspace(list(twoNmu=0, twoNm=v), fixedlist=list(latt2Ns2=islog2Ns2))["g"]}
)
lines(seq2Nm, seqg, type="l", lty=2)
}
|
/fuzzedpackages/blackbox/R/isoline.R
|
no_license
|
akhikolla/testpackages
|
R
| false | false | 622 |
r
|
isoline <- function(latt2Ns2) { ## util. to add Nb levels to an existing (2Nm, g) surface plot
seq2Nm <- seq(blackbox.getOption("FONKgLow")["twoNm"], latt2Ns2/blackbox.getOption("mincondS2"), length.out=100)
islog2Ns2 <- latt2Ns2
if (islogscale("latt2Ns2")) islog2Ns2 <- log(islog2Ns2) ## because tofullKrigingspace then assumes that latt2Ns2 is logscale
seqg <- sapply(seq2Nm, ## the twoNmu value because tofullK catches (twoNmu=NA & Nratio=NA)
function(v) {tofullKrigingspace(list(twoNmu=0, twoNm=v), fixedlist=list(latt2Ns2=islog2Ns2))["g"]}
)
lines(seq2Nm, seqg, type="l", lty=2)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/structuringdocument.R
\name{IsWellSectioned}
\alias{IsWellSectioned}
\title{IsWellSectioned}
\usage{
IsWellSectioned(u, v)
}
\arguments{
\item{u}{Vector, it assumes it is ordered in ascending ordered}
\item{v}{Vector, it assumes it is ordered in ascending ordered}
}
\value{
Logical value, TRUE if it is well ordered, FALSE it is not
}
\description{
Function to assure a set of sections is well sectioned.
}
\details{
Basically it makes sure that, \eqn{u[1]<v[1]<u[2]<v[2]}, etc
}
\seealso{
Other Structuring Document:
\code{\link{CompileDocument}()},
\code{\link{DivideFile}()},
\code{\link{FindStructure}},
\code{\link{StructureDocument}()}
}
\author{
Alejandro Recuenco \email{alejandrogonzalezrecuenco@gmail.com}
}
\concept{Structuring Document}
\keyword{internal}
|
/man/IsWellSectioned.Rd
|
permissive
|
jsgro/TexExamRandomizer
|
R
| false | true | 849 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/structuringdocument.R
\name{IsWellSectioned}
\alias{IsWellSectioned}
\title{IsWellSectioned}
\usage{
IsWellSectioned(u, v)
}
\arguments{
\item{u}{Vector, it assumes it is ordered in ascending ordered}
\item{v}{Vector, it assumes it is ordered in ascending ordered}
}
\value{
Logical value, TRUE if it is well ordered, FALSE it is not
}
\description{
Function to assure a set of sections is well sectioned.
}
\details{
Basically it makes sure that, \eqn{u[1]<v[1]<u[2]<v[2]}, etc
}
\seealso{
Other Structuring Document:
\code{\link{CompileDocument}()},
\code{\link{DivideFile}()},
\code{\link{FindStructure}},
\code{\link{StructureDocument}()}
}
\author{
Alejandro Recuenco \email{alejandrogonzalezrecuenco@gmail.com}
}
\concept{Structuring Document}
\keyword{internal}
|
library(dascombat)
library(reshape2)
library(dplyr)
library(testthat)
library(sva)
df = dascombat::combat_testdf
Y = acast(df, rowSeq~colSeq, value.var = "value")
bx = acast(df, rowSeq~colSeq, value.var = "RunID")[1,]
bx = factor(bx)
bx
a = factor(c('a','a','b','c','c'))
b = factor(c('a','a','cs'))
bIdx = (1:nlevels(bx))[bx[i] == levels(bx)]
sva.cMod = sva::ComBat(Y, bx, mean.only = TRUE, ref.batch = NULL)
model = dascombat::fit(Y, bx, mean.only = TRUE)
cMod = dascombat::applyModel(Y,model)
expect_true(all(round(cMod,8) - round(sva.cMod,8) == 0))
sva.cMod = sva::ComBat(Y, bx, mean.only = FALSE, ref.batch = NULL)
model = dascombat::fit(Y, bx, mean.only = FALSE)
cMod = dascombat::applyModel(Y,model)
expect_true(all(round(cMod,8) - round(sva.cMod,8) == 0))
sva.cMod = sva::ComBat(Y, bx, mean.only = TRUE, ref.batch = "1")
model = dascombat::fit(Y, bx, mean.only = TRUE, ref.batch = "1")
cMod = dascombat::applyModel(Y,model)
expect_true(all(round(cMod,8) - round(sva.cMod,8) == 0))
sva.cMod = sva::ComBat(Y, bx, mean.only = FALSE, ref.batch = "1")
model = dascombat::fit(Y, bx, mean.only = FALSE, ref.batch = "1")
cMod = dascombat::applyModel(Y,model)
expect_true(all(round(cMod,8) - round(sva.cMod,8) == 0))
expect_true(is.data.frame(df))
Y = acast(df, rowSeq~colSeq, value.var = "value")
bx = acast(df, rowSeq~colSeq, value.var = "RunID")[1,]
bx = factor(bx)
cMod = dascombat::LS.NoRef(Y, bx, mean.only = TRUE)
cMod = dascombat::LS.NoRef(Y, bx, mean.only = F)
plot(Y)
plot(cMod)
sva.cMod = sva::ComBat(Y, bx, mean.only = TRUE, ref.batch = NULL)
expect_true(all(round(cMod,8) - round(sva.cMod,8) == 0))
cMod = dascombat::LS.Ref(Y, bx, mean.only = TRUE, ref.batch = "1")
sva.cMod = sva::ComBat(Y, bx, mean.only = TRUE, ref.batch = "1")
expect_true(all(round(cMod,8) - round(sva.cMod,8) == 0))
ref.batch = "1"
bx = relevel(factor(bx), ref = ref.batch) # ref will be the first level of bx
bx
lvbx = levels(bx)
nObsPerBatch = summary(bx)
nObsPerBatch
nObs = sum(nObsPerBatch)
nObs
Y = t(Y)
Y
# scaling based on ref.batch
B = model.matrix(~bx)
lamb.hat = solve( t(B)%*% B, t(B) %*% Y)
alpha_g = lamb.hat[1,]
siggsq = t(t((Y[bx == lvbx[1], ] - (B[bx == lvbx[1],] %*% lamb.hat))^2) %*% rep(1/nObsPerBatch[1], nObsPerBatch[1]))
siggsq = t(t((Y - (B %*% lamb.hat))^2) %*% rep(1/nObs, nObs))
Z = scale(Y, center = alpha_g, scale = sqrt(siggsq))
lambda.hat = solve( t(B)%*% B, t(B) %*% Z) # unadjusted location
df = dascombat::combat_testdf
data = df %>% group_by(RunID) %>% do({
data.frame(groupRowIndex=group_rows(.))
})
class(df %>% group_by("RunID"))
length(df %>% group_by("RunID"))
df %>% group_by(RunID) %>% summarise(groupRowIndex=list(group_rows(.)))
df %>% group_by(RunID) %>% group_data() %>% summarise(groupRowIndex=max(.$.rows))
df <- tibble(x = c(1,1,2,2,1))
gf <- group_by(df, x)
group_vars(gf)
group_rows(gf)
ccc = c(1,1,1,1,2,2,5)
ccc[as.integer(list(1,2))]
ccc[as.integer(list(1,2))]
|
/tests/archive/integration/TST-INT-001-compare result to known output.R
|
no_license
|
pamgene/dascombat
|
R
| false | false | 2,957 |
r
|
library(dascombat)
library(reshape2)
library(dplyr)
library(testthat)
library(sva)
df = dascombat::combat_testdf
Y = acast(df, rowSeq~colSeq, value.var = "value")
bx = acast(df, rowSeq~colSeq, value.var = "RunID")[1,]
bx = factor(bx)
bx
a = factor(c('a','a','b','c','c'))
b = factor(c('a','a','cs'))
bIdx = (1:nlevels(bx))[bx[i] == levels(bx)]
sva.cMod = sva::ComBat(Y, bx, mean.only = TRUE, ref.batch = NULL)
model = dascombat::fit(Y, bx, mean.only = TRUE)
cMod = dascombat::applyModel(Y,model)
expect_true(all(round(cMod,8) - round(sva.cMod,8) == 0))
sva.cMod = sva::ComBat(Y, bx, mean.only = FALSE, ref.batch = NULL)
model = dascombat::fit(Y, bx, mean.only = FALSE)
cMod = dascombat::applyModel(Y,model)
expect_true(all(round(cMod,8) - round(sva.cMod,8) == 0))
sva.cMod = sva::ComBat(Y, bx, mean.only = TRUE, ref.batch = "1")
model = dascombat::fit(Y, bx, mean.only = TRUE, ref.batch = "1")
cMod = dascombat::applyModel(Y,model)
expect_true(all(round(cMod,8) - round(sva.cMod,8) == 0))
sva.cMod = sva::ComBat(Y, bx, mean.only = FALSE, ref.batch = "1")
model = dascombat::fit(Y, bx, mean.only = FALSE, ref.batch = "1")
cMod = dascombat::applyModel(Y,model)
expect_true(all(round(cMod,8) - round(sva.cMod,8) == 0))
expect_true(is.data.frame(df))
Y = acast(df, rowSeq~colSeq, value.var = "value")
bx = acast(df, rowSeq~colSeq, value.var = "RunID")[1,]
bx = factor(bx)
cMod = dascombat::LS.NoRef(Y, bx, mean.only = TRUE)
cMod = dascombat::LS.NoRef(Y, bx, mean.only = F)
plot(Y)
plot(cMod)
sva.cMod = sva::ComBat(Y, bx, mean.only = TRUE, ref.batch = NULL)
expect_true(all(round(cMod,8) - round(sva.cMod,8) == 0))
cMod = dascombat::LS.Ref(Y, bx, mean.only = TRUE, ref.batch = "1")
sva.cMod = sva::ComBat(Y, bx, mean.only = TRUE, ref.batch = "1")
expect_true(all(round(cMod,8) - round(sva.cMod,8) == 0))
ref.batch = "1"
bx = relevel(factor(bx), ref = ref.batch) # ref will be the first level of bx
bx
lvbx = levels(bx)
nObsPerBatch = summary(bx)
nObsPerBatch
nObs = sum(nObsPerBatch)
nObs
Y = t(Y)
Y
# scaling based on ref.batch
B = model.matrix(~bx)
lamb.hat = solve( t(B)%*% B, t(B) %*% Y)
alpha_g = lamb.hat[1,]
siggsq = t(t((Y[bx == lvbx[1], ] - (B[bx == lvbx[1],] %*% lamb.hat))^2) %*% rep(1/nObsPerBatch[1], nObsPerBatch[1]))
siggsq = t(t((Y - (B %*% lamb.hat))^2) %*% rep(1/nObs, nObs))
Z = scale(Y, center = alpha_g, scale = sqrt(siggsq))
lambda.hat = solve( t(B)%*% B, t(B) %*% Z) # unadjusted location
df = dascombat::combat_testdf
data = df %>% group_by(RunID) %>% do({
data.frame(groupRowIndex=group_rows(.))
})
class(df %>% group_by("RunID"))
length(df %>% group_by("RunID"))
df %>% group_by(RunID) %>% summarise(groupRowIndex=list(group_rows(.)))
df %>% group_by(RunID) %>% group_data() %>% summarise(groupRowIndex=max(.$.rows))
df <- tibble(x = c(1,1,2,2,1))
gf <- group_by(df, x)
group_vars(gf)
group_rows(gf)
ccc = c(1,1,1,1,2,2,5)
ccc[as.integer(list(1,2))]
ccc[as.integer(list(1,2))]
|
#' Initialize bllflow object from provided config file
#'
#' Uses the provided config file and matching name to load the correct config
#' type
#'
#' @param config_env_name = NULL name of the config environment to use for
#' initialization
#'
#' @return constructed bllflow object
#' @export
bllflow_config_init <-
function(config_env_name = NULL) {
if (!is.null(config_env_name)) {
set_config_env_name(config_env_name)
}
config <- config::get()
ret_bllflow <-
build_bllflow(
variables = as.data.frame(config$variables),
variable_details = as.data.frame(config$variable_details),
modules = as.data.frame(config$modules)
)
return(ret_bllflow)
}
#' Read in data according to config specified data type
#'
#' Uses bllflow object to read_csv_data based on config specifications.
#' Currently supported formats are: .RData, .csv
#'
#' @param bllflow_object passed bllflow object to read variables from
#' @param config_env_name = NULL optional passing of config if you wish to load data
#' from a specific config
#'
#' @return NULL since no modifications are made and read data is just stored in
#' pre specified location that is read from the config
#' @export
bllflow_config_read_data <- function(bllflow_object, config_env_name = NULL) {
if (!is.null(config_env_name)) {
set_config_env_name(config_env_name)
}
config <- config::get()
# use variables to only read the specified variables??
for (data_name in names(config$data)) {
if (config$data_type == ".RData") {
load(config$data[[data_name]])
}
else if (config$data_type == ".csv") {
tmp_data <-
read_csv_data(
variables = bllflow_object$variables,
data_name = data_name,
path_to_data = config$data[[data_name]]
)
assign(data_name, tmp_data)
}
save(list = data_name,
file = file.path(config$data_dir,
paste0(data_name, ".RData")))
}
return(bllflow_object)
}
#' Recode data using config data
#'
#' Recodes data according to the config then saves it as RData file at a
#' specified location
#'
#' @param bllflow_object passed bllflow object to read variables from
#' @param config_env_name = NULL optional passing of config if you wish to load data
#' from a specific config
#'
#' @return NULL since no modifications are made and read data is just stored in
#' pre specified location
#' @export
bllflow_config_rec_data <- function(bllflow_object, config_env_name = NULL) {
# Consider making this into a function or let user pass loaded config
if (!is.null(config_env_name)) {
set_config_env_name(config_env_name)
}
config <- config::get()
for (data_name in names(config$data)) {
load(file.path(config$data_dir, paste0(data_name, ".RData")))
tmp_rec_data <- rec_with_table(
base::get(data_name),
variables = bllflow_object$variables,
variable_details = bllflow_object$variable_details,
database_name = data_name)
assign(data_name, tmp_rec_data)
save(list = data_name,
file = file.path(config$data_dir,
paste0(data_name,
"_recoded",
".RData")))
}
return(bllflow_object)
}
#' Combine data based on config specified location
#'
#' Combines recoded data and applies labels before attaching
#' the data to bllflow object
#'
#' @param bllflow_object passed bllflow object to read variables from
#' @param config_env_name = NULL optional passing of config if you wish to load data
#' from a specific config
#'
#' @return modified bllflow object containing labeled combined data
#' @export
bllflow_config_combine_data <- function(bllflow_object, config_env_name = NULL) {
if (!is.null(config_env_name)) {
set_config_env_name(config_env_name)
}
config <- config::get()
tmp_working_data <- NULL
for (data_name in names(config$data)) {
load(file.path(config$data_dir, paste0(data_name, "_recoded", ".RData")))
tmp_mod_data <- base::get(data_name)
tmp_mod_data[["data_name"]] <- data_name
if (is.null(tmp_working_data)) {
tmp_working_data <- tmp_mod_data
} else {
tmp_working_data <- dplyr::bind_rows(tmp_working_data, tmp_mod_data)
}
}
tmp_working_data <- bllflow::set_data_labels(
tmp_working_data,
bllflow_object$variable_details,
bllflow_object$variables)
bllflow_object[[pkg.globals$bllFlowContent.WorkingData]] <- tmp_working_data
bllflow_object[[pkg.globals$bllFlowContent.PreviousData]] <- tmp_working_data
attr(bllflow_object[[pkg.globals$bllFlowContent.WorkingData]],
pkg.globals$bllFlowContent.Sequence) <-
0
return(bllflow_object)
}
|
/R/bll-flow-constructor-utility.R
|
permissive
|
Big-Life-Lab/bllflow
|
R
| false | false | 4,737 |
r
|
#' Initialize bllflow object from provided config file
#'
#' Uses the provided config file and matching name to load the correct config
#' type
#'
#' @param config_env_name = NULL name of the config environment to use for
#' initialization
#'
#' @return constructed bllflow object
#' @export
bllflow_config_init <-
function(config_env_name = NULL) {
if (!is.null(config_env_name)) {
set_config_env_name(config_env_name)
}
config <- config::get()
ret_bllflow <-
build_bllflow(
variables = as.data.frame(config$variables),
variable_details = as.data.frame(config$variable_details),
modules = as.data.frame(config$modules)
)
return(ret_bllflow)
}
#' Read in data according to config specified data type
#'
#' Uses bllflow object to read_csv_data based on config specifications.
#' Currently supported formats are: .RData, .csv
#'
#' @param bllflow_object passed bllflow object to read variables from
#' @param config_env_name = NULL optional passing of config if you wish to load data
#' from a specific config
#'
#' @return NULL since no modifications are made and read data is just stored in
#' pre specified location that is read from the config
#' @export
bllflow_config_read_data <- function(bllflow_object, config_env_name = NULL) {
if (!is.null(config_env_name)) {
set_config_env_name(config_env_name)
}
config <- config::get()
# use variables to only read the specified variables??
for (data_name in names(config$data)) {
if (config$data_type == ".RData") {
load(config$data[[data_name]])
}
else if (config$data_type == ".csv") {
tmp_data <-
read_csv_data(
variables = bllflow_object$variables,
data_name = data_name,
path_to_data = config$data[[data_name]]
)
assign(data_name, tmp_data)
}
save(list = data_name,
file = file.path(config$data_dir,
paste0(data_name, ".RData")))
}
return(bllflow_object)
}
#' Recode data using config data
#'
#' Recodes data according to the config then saves it as RData file at a
#' specified location
#'
#' @param bllflow_object passed bllflow object to read variables from
#' @param config_env_name = NULL optional passing of config if you wish to load data
#' from a specific config
#'
#' @return NULL since no modifications are made and read data is just stored in
#' pre specified location
#' @export
bllflow_config_rec_data <- function(bllflow_object, config_env_name = NULL) {
# Consider making this into a function or let user pass loaded config
if (!is.null(config_env_name)) {
set_config_env_name(config_env_name)
}
config <- config::get()
for (data_name in names(config$data)) {
load(file.path(config$data_dir, paste0(data_name, ".RData")))
tmp_rec_data <- rec_with_table(
base::get(data_name),
variables = bllflow_object$variables,
variable_details = bllflow_object$variable_details,
database_name = data_name)
assign(data_name, tmp_rec_data)
save(list = data_name,
file = file.path(config$data_dir,
paste0(data_name,
"_recoded",
".RData")))
}
return(bllflow_object)
}
#' Combine data based on config specified location
#'
#' Combines recoded data and applies labels before attaching
#' the data to bllflow object
#'
#' @param bllflow_object passed bllflow object to read variables from
#' @param config_env_name = NULL optional passing of config if you wish to load data
#' from a specific config
#'
#' @return modified bllflow object containing labeled combined data
#' @export
bllflow_config_combine_data <- function(bllflow_object, config_env_name = NULL) {
if (!is.null(config_env_name)) {
set_config_env_name(config_env_name)
}
config <- config::get()
tmp_working_data <- NULL
for (data_name in names(config$data)) {
load(file.path(config$data_dir, paste0(data_name, "_recoded", ".RData")))
tmp_mod_data <- base::get(data_name)
tmp_mod_data[["data_name"]] <- data_name
if (is.null(tmp_working_data)) {
tmp_working_data <- tmp_mod_data
} else {
tmp_working_data <- dplyr::bind_rows(tmp_working_data, tmp_mod_data)
}
}
tmp_working_data <- bllflow::set_data_labels(
tmp_working_data,
bllflow_object$variable_details,
bllflow_object$variables)
bllflow_object[[pkg.globals$bllFlowContent.WorkingData]] <- tmp_working_data
bllflow_object[[pkg.globals$bllFlowContent.PreviousData]] <- tmp_working_data
attr(bllflow_object[[pkg.globals$bllFlowContent.WorkingData]],
pkg.globals$bllFlowContent.Sequence) <-
0
return(bllflow_object)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/change_nf_file_names.R
\name{crssi_change_nf_file_names}
\alias{crssi_change_nf_file_names}
\alias{copyAndChangeNFFileNames}
\title{Rename CRSS Input Files}
\usage{
crssi_change_nf_file_names(iFolder, oFolder, nTrace, fromNames, toNames)
copyAndChangeNFFileNames(iFolder, oFolder, nTrace, fromNames, toNames)
}
\arguments{
\item{iFolder}{Path to the input folder cotaining trace folders.}
\item{oFolder}{Path to the ouptut folder containig trace folders.}
\item{nTrace}{Number of traces to process}
\item{fromNames}{A vector of the file names found in iFolder/traceN.}
\item{toNames}{A vector of the file names to create in oFolder/traceN.}
}
\value{
Nothing is returned from function.
}
\description{
Rename the CRSS natural flow or salt input files.
}
\details{
Because multiple versions of CRSS exist and the inflow locations have had
their names changed in recent years, it is necessary to create files with
different file names. It might be easier to copy existing files instead of
creating files from the source data, as \code{\link[=crssi_create_dnf_files]{crssi_create_dnf_files()}} does.
\code{crssi_change_nf_file_names()} assumes the folders
are structured for CRSS input, e.g., C:/CRSS/dmi/NFSinput/trace1/...
}
\examples{
# load the common old and new natural flow files names included with the
# CRSSIO package.
\dontrun{
iFolder <- 'C:/CRSS/dmi/NFSinputOrig/'
oFolder <- 'C:/CRSS/dmi/NFSinputNew/'
oldFileNames <- nf_file_names(version = 1)
newFileNames <- nf_file_names(version = 2)
crssi_change_nf_file_names(iFolder, oFolder, 107,oldFileNames, newFileNames)
}
}
|
/man/crssi_change_nf_file_names.Rd
|
no_license
|
rabutler/CRSSIO
|
R
| false | true | 1,665 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/change_nf_file_names.R
\name{crssi_change_nf_file_names}
\alias{crssi_change_nf_file_names}
\alias{copyAndChangeNFFileNames}
\title{Rename CRSS Input Files}
\usage{
crssi_change_nf_file_names(iFolder, oFolder, nTrace, fromNames, toNames)
copyAndChangeNFFileNames(iFolder, oFolder, nTrace, fromNames, toNames)
}
\arguments{
\item{iFolder}{Path to the input folder cotaining trace folders.}
\item{oFolder}{Path to the ouptut folder containig trace folders.}
\item{nTrace}{Number of traces to process}
\item{fromNames}{A vector of the file names found in iFolder/traceN.}
\item{toNames}{A vector of the file names to create in oFolder/traceN.}
}
\value{
Nothing is returned from function.
}
\description{
Rename the CRSS natural flow or salt input files.
}
\details{
Because multiple versions of CRSS exist and the inflow locations have had
their names changed in recent years, it is necessary to create files with
different file names. It might be easier to copy existing files instead of
creating files from the source data, as \code{\link[=crssi_create_dnf_files]{crssi_create_dnf_files()}} does.
\code{crssi_change_nf_file_names()} assumes the folders
are structured for CRSS input, e.g., C:/CRSS/dmi/NFSinput/trace1/...
}
\examples{
# load the common old and new natural flow files names included with the
# CRSSIO package.
\dontrun{
iFolder <- 'C:/CRSS/dmi/NFSinputOrig/'
oFolder <- 'C:/CRSS/dmi/NFSinputNew/'
oldFileNames <- nf_file_names(version = 1)
newFileNames <- nf_file_names(version = 2)
crssi_change_nf_file_names(iFolder, oFolder, 107,oldFileNames, newFileNames)
}
}
|
require(xgboost)
# load in the agaricus dataset
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
# note: for customized objective function, we leave objective as default
# note: what we are getting is margin value in prediction
# you must know what you are doing
param <- list(max.depth=2,eta=1,nthread = 2, silent=1)
watchlist <- list(eval = dtest)
num_round <- 20
# user define objective function, given prediction, return gradient and second order gradient
# this is loglikelihood loss
logregobj <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
preds <- 1/(1 + exp(-preds))
grad <- preds - labels
hess <- preds * (1 - preds)
return(list(grad = grad, hess = hess))
}
# user defined evaluation function, return a pair metric_name, result
# NOTE: when you do customized loss function, the default prediction value is margin
# this may make buildin evalution metric not function properly
# for example, we are doing logistic loss, the prediction is score before logistic transformation
# the buildin evaluation error assumes input is after logistic transformation
# Take this in mind when you use the customization, and maybe you need write customized evaluation function
evalerror <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
err <- as.numeric(sum(labels != (preds > 0)))/length(labels)
return(list(metric = "error", value = err))
}
print ('start training with early Stopping setting')
# training with customized objective, we can also do step by step training
# simply look at xgboost.py's implementation of train
bst <- xgb.train(param, dtrain, num_round, watchlist, logregobj, evalerror, maximize = FALSE,
early.stop.round = 3)
bst <- xgb.cv(param, dtrain, num_round, nfold=5, obj=logregobj, feval = evalerror,
maximize = FALSE, early.stop.round = 3)
|
/tools/xgboost-0.40/R-package/demo/early_stopping.R
|
permissive
|
hezila/kdd2015
|
R
| false | false | 2,008 |
r
|
require(xgboost)
# load in the agaricus dataset
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
# note: for customized objective function, we leave objective as default
# note: what we are getting is margin value in prediction
# you must know what you are doing
param <- list(max.depth=2,eta=1,nthread = 2, silent=1)
watchlist <- list(eval = dtest)
num_round <- 20
# user define objective function, given prediction, return gradient and second order gradient
# this is loglikelihood loss
logregobj <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
preds <- 1/(1 + exp(-preds))
grad <- preds - labels
hess <- preds * (1 - preds)
return(list(grad = grad, hess = hess))
}
# user defined evaluation function, return a pair metric_name, result
# NOTE: when you do customized loss function, the default prediction value is margin
# this may make buildin evalution metric not function properly
# for example, we are doing logistic loss, the prediction is score before logistic transformation
# the buildin evaluation error assumes input is after logistic transformation
# Take this in mind when you use the customization, and maybe you need write customized evaluation function
evalerror <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
err <- as.numeric(sum(labels != (preds > 0)))/length(labels)
return(list(metric = "error", value = err))
}
print ('start training with early Stopping setting')
# training with customized objective, we can also do step by step training
# simply look at xgboost.py's implementation of train
bst <- xgb.train(param, dtrain, num_round, watchlist, logregobj, evalerror, maximize = FALSE,
early.stop.round = 3)
bst <- xgb.cv(param, dtrain, num_round, nfold=5, obj=logregobj, feval = evalerror,
maximize = FALSE, early.stop.round = 3)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/phyBranchAL.r
\name{phyBranchAL_Abu}
\alias{phyBranchAL_Abu}
\title{R code for phylo to Chaophyabu function, speed up performance}
\usage{
phyBranchAL_Abu(phylo, data, datatype = "abundance", refT = 0,
rootExtend = T, remove0 = T)
}
\arguments{
\item{phylo}{a phylo object}
\item{data}{a vector with names}
}
\value{
a Chaophyabu objects
}
\description{
R code for phylo to Chaophyabu function, speed up performance
}
\examples{
data(AbuALdata)
adata<-AbuALdata$abudata
atree<-AbuALdata$tree
vdata<-adata$EM
names(vdata)<-rownames(adata)
refTs<-c(400,325,250)
result<-phyBranchAL_Abu(atree,vdata,datatype="abundance",refTs)
result$treeNabu
result$treeH
result$BLbyT
}
|
/man/phyBranchAL_Abu.Rd
|
no_license
|
chaolab2019/chaoUtility
|
R
| false | true | 751 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/phyBranchAL.r
\name{phyBranchAL_Abu}
\alias{phyBranchAL_Abu}
\title{R code for phylo to Chaophyabu function, speed up performance}
\usage{
phyBranchAL_Abu(phylo, data, datatype = "abundance", refT = 0,
rootExtend = T, remove0 = T)
}
\arguments{
\item{phylo}{a phylo object}
\item{data}{a vector with names}
}
\value{
a Chaophyabu objects
}
\description{
R code for phylo to Chaophyabu function, speed up performance
}
\examples{
data(AbuALdata)
adata<-AbuALdata$abudata
atree<-AbuALdata$tree
vdata<-adata$EM
names(vdata)<-rownames(adata)
refTs<-c(400,325,250)
result<-phyBranchAL_Abu(atree,vdata,datatype="abundance",refTs)
result$treeNabu
result$treeH
result$BLbyT
}
|
# loading the packages
# source("~/GitHub/r-continuous-network/R/package_to_load.R")
# source("~/GitHub/r-continuous-network/R/utils.R")
# source("~/go/src/r-continuous-network/R/adjacency_generation.R")
#
# source("~/GitHub/r-continuous-network/R/levy_recovery_v2.R")
# source("~/GitHub/r-continuous-network/R/utils.R")
# source("~/GitHub/r-continuous-network/R/path_generation.R")
# source("~/GitHub/r-continuous-network/R/grou_mle.R")
library(ntwk)
# We do not use the load data but wind since load is too correlated and not very random
AS_SPARSE <- F
#######################################################################
##################### DATA PREPARATION ################################
#######################################################################
# Functions and procedures to clean the data
data_path <- "~/Downloads/RE-Europe_dataset_package/"
n_df_load <- 25000
n_nodes <- 50
df_load <- data.table::fread(paste(data_path, "Nodal_TS/wind_signal_COSMO.csv", sep=""), nrows = n_df_load+10)[,2:(n_nodes+1)]
df_load <- df_load[-c(1:10),]
df_load <- as.matrix(df_load)
clean_wind_data <- CleanData(df_load, frequency = 24, s.window = 24, t.window = 24)
core_wind <- clean_wind_data$remainders
plot(clean_wind_data$stl_obj$V2)
plot(core_wind[,1])
# Network topology
load_nodes <- read.csv(file=paste(data_path, "Metadata/network_nodes.csv", sep=""))
load_nodes <- load_nodes[1:n_nodes,]
topo_nodes <- data.frame("name"=load_nodes$ID,
"lon"= load_nodes$longitude,
"lat"=load_nodes$latitude)
load_edges <- read.csv(file=paste(data_path, "Metadata/network_edges.csv", sep=""))
load_edges <- load_edges[which(load_edges$fromNode %in% 1:n_nodes &
load_edges$toNode %in% 1:n_nodes),]
topo_edges <- data.frame("from" = load_edges$fromNode,
"to" = load_edges$toNode)
topo_graph <- igraph::graph.data.frame(d = topo_edges, directed = FALSE, vertices = topo_nodes)
adj_grid <- igraph::as_adjacency_matrix(topo_graph, sparse = AS_SPARSE)
adj_grid <- as.matrix(adj_grid)
mesh_size <- 2/24
europeanUnion <- c("Spain", "Portugal")
maps::map(region=europeanUnion, col="grey80", fill=TRUE, bg="white", lwd=0.1)
igraph::plot.igraph(x = topo_graph, add=T, rescale=F,
layout=topo_nodes[,2:3], vertex.label=NA, arrow.size=0.4, edge.color='black')
visNetwork::visNetwork(
nodes=topo_nodes, edges=topo_edges
)
plot(igraph::graph_from_adjacency_matrix(adjmatrix = adj_grid))
igraph::layout_on_grid(igraph::graph_from_adjacency_matrix(PolymerNetwork(10, 2, 4)))
plot(igraph::layout_on_grid(igraph::graph_from_adjacency_matrix(PolymerNetwork(50, 2, 4)), width = 7, height = 8))
plot(igraph::graph_from_adjacency_matrix(PolymerNetwork(50, 2, 4)))
pdf("data/pictures/pdf_network_configurations_v2.pdf", width = 7, height = 2)
par(mfrow=c(1,4))
node.w <- 1
node.h <- 1
tmp <- PolymerNetwork(50, 2, 4)
tmp[lower.tri(tmp, diag = F)] <- 0
qgraph::qgraph(tmp, directed=F, parallelEdge=T, weighted=F, vTrans=255,
labels=F, node.width=node.w, node.height=node.h, layout='circular', edge.color='black',
edge.width=1.5, esize=2, title='Polymer', title.cex=1.5)
tmp <- LatticeNetwork(50, 2, 4)
tmp[lower.tri(tmp, diag = F)] <- 0
qgraph::qgraph(tmp, directed=F, parallelEdge=T, weighted=F, vTrans=255,
labels=F, node.width=node.w, node.height=node.h, layout='spring', edge.color='black',
edge.width=1.5, esize=2, title='Lattice', title.cex=1.5)
tmp <- FullyConnectedNetwork(50, 2, 4)
tmp[lower.tri(tmp, diag = F)] <- 0
qgraph::qgraph(tmp, directed=F, parallelEdge=T, weighted=F, vTrans=255,
labels=F, node.width=node.w, node.height=node.h, layout='spring', trans=0.2,
edge.width=1.5, esize=2, title='Fully-Connected', title.cex=1.5)
qgraph::qgraph(adj_grid,
directed=F, parallelEdge=T, weighted=F, vTrans=255,
labels=F, node.width=node.w, node.height=node.h, title.cex=1.5, edge.color='black',
edge.width=1.5, esize=2, title='RE-Europe 50')
dev.off()
|
/R/end_to_end_graph_plots.R
|
permissive
|
valcourgeau/r-continuous-network
|
R
| false | false | 4,115 |
r
|
# loading the packages
# source("~/GitHub/r-continuous-network/R/package_to_load.R")
# source("~/GitHub/r-continuous-network/R/utils.R")
# source("~/go/src/r-continuous-network/R/adjacency_generation.R")
#
# source("~/GitHub/r-continuous-network/R/levy_recovery_v2.R")
# source("~/GitHub/r-continuous-network/R/utils.R")
# source("~/GitHub/r-continuous-network/R/path_generation.R")
# source("~/GitHub/r-continuous-network/R/grou_mle.R")
library(ntwk)
# We do not use the load data but wind since load is too correlated and not very random
AS_SPARSE <- F
#######################################################################
##################### DATA PREPARATION ################################
#######################################################################
# Functions and procedures to clean the data
data_path <- "~/Downloads/RE-Europe_dataset_package/"
n_df_load <- 25000
n_nodes <- 50
df_load <- data.table::fread(paste(data_path, "Nodal_TS/wind_signal_COSMO.csv", sep=""), nrows = n_df_load+10)[,2:(n_nodes+1)]
df_load <- df_load[-c(1:10),]
df_load <- as.matrix(df_load)
clean_wind_data <- CleanData(df_load, frequency = 24, s.window = 24, t.window = 24)
core_wind <- clean_wind_data$remainders
plot(clean_wind_data$stl_obj$V2)
plot(core_wind[,1])
# Network topology
load_nodes <- read.csv(file=paste(data_path, "Metadata/network_nodes.csv", sep=""))
load_nodes <- load_nodes[1:n_nodes,]
topo_nodes <- data.frame("name"=load_nodes$ID,
"lon"= load_nodes$longitude,
"lat"=load_nodes$latitude)
load_edges <- read.csv(file=paste(data_path, "Metadata/network_edges.csv", sep=""))
load_edges <- load_edges[which(load_edges$fromNode %in% 1:n_nodes &
load_edges$toNode %in% 1:n_nodes),]
topo_edges <- data.frame("from" = load_edges$fromNode,
"to" = load_edges$toNode)
topo_graph <- igraph::graph.data.frame(d = topo_edges, directed = FALSE, vertices = topo_nodes)
adj_grid <- igraph::as_adjacency_matrix(topo_graph, sparse = AS_SPARSE)
adj_grid <- as.matrix(adj_grid)
mesh_size <- 2/24
europeanUnion <- c("Spain", "Portugal")
maps::map(region=europeanUnion, col="grey80", fill=TRUE, bg="white", lwd=0.1)
igraph::plot.igraph(x = topo_graph, add=T, rescale=F,
layout=topo_nodes[,2:3], vertex.label=NA, arrow.size=0.4, edge.color='black')
visNetwork::visNetwork(
nodes=topo_nodes, edges=topo_edges
)
plot(igraph::graph_from_adjacency_matrix(adjmatrix = adj_grid))
igraph::layout_on_grid(igraph::graph_from_adjacency_matrix(PolymerNetwork(10, 2, 4)))
plot(igraph::layout_on_grid(igraph::graph_from_adjacency_matrix(PolymerNetwork(50, 2, 4)), width = 7, height = 8))
plot(igraph::graph_from_adjacency_matrix(PolymerNetwork(50, 2, 4)))
pdf("data/pictures/pdf_network_configurations_v2.pdf", width = 7, height = 2)
par(mfrow=c(1,4))
node.w <- 1
node.h <- 1
tmp <- PolymerNetwork(50, 2, 4)
tmp[lower.tri(tmp, diag = F)] <- 0
qgraph::qgraph(tmp, directed=F, parallelEdge=T, weighted=F, vTrans=255,
labels=F, node.width=node.w, node.height=node.h, layout='circular', edge.color='black',
edge.width=1.5, esize=2, title='Polymer', title.cex=1.5)
tmp <- LatticeNetwork(50, 2, 4)
tmp[lower.tri(tmp, diag = F)] <- 0
qgraph::qgraph(tmp, directed=F, parallelEdge=T, weighted=F, vTrans=255,
labels=F, node.width=node.w, node.height=node.h, layout='spring', edge.color='black',
edge.width=1.5, esize=2, title='Lattice', title.cex=1.5)
tmp <- FullyConnectedNetwork(50, 2, 4)
tmp[lower.tri(tmp, diag = F)] <- 0
qgraph::qgraph(tmp, directed=F, parallelEdge=T, weighted=F, vTrans=255,
labels=F, node.width=node.w, node.height=node.h, layout='spring', trans=0.2,
edge.width=1.5, esize=2, title='Fully-Connected', title.cex=1.5)
qgraph::qgraph(adj_grid,
directed=F, parallelEdge=T, weighted=F, vTrans=255,
labels=F, node.width=node.w, node.height=node.h, title.cex=1.5, edge.color='black',
edge.width=1.5, esize=2, title='RE-Europe 50')
dev.off()
|
attach(mtcars)
summary(mtcars)
plot(wt,mpg,
main="Mileage vs. Car Weight",
xlab="Weight",ylab="Mileage",
pch=18,col="blue")
text(wt,mpg,row.names(mtcars),
cex=0.6,pos=4,col="red")
detach(mtcars)
|
/learn/test20180206.R
|
no_license
|
duyux/R
|
R
| false | false | 215 |
r
|
attach(mtcars)
summary(mtcars)
plot(wt,mpg,
main="Mileage vs. Car Weight",
xlab="Weight",ylab="Mileage",
pch=18,col="blue")
text(wt,mpg,row.names(mtcars),
cex=0.6,pos=4,col="red")
detach(mtcars)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{recode_race_regex}
\alias{recode_race_regex}
\title{A function}
\usage{
recode_race_regex(vec)
}
\description{
A function
}
\examples{
recode_race_regex()
}
|
/man/recode_race_regex.Rd
|
no_license
|
srhoads/srhoads
|
R
| false | true | 253 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{recode_race_regex}
\alias{recode_race_regex}
\title{A function}
\usage{
recode_race_regex(vec)
}
\description{
A function
}
\examples{
recode_race_regex()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/meowR.R
\name{meowR}
\alias{meowR}
\title{Play a short kitty meow sound}
\usage{
meowR(sound = 3)
}
\arguments{
\item{sound}{A character string or a number specifying what sound to be
played by either specifying one of the built in sounds, specifying the path
to a \code{.wav} file or specifying an url. There are currently 6 meows
included. The default is \code{3}.}
}
\description{
Play a short kitty meow sound
}
\examples{
\dontrun{
# play Eno's meow
kittyR::meowR(sound = 4)
}
}
\author{
\href{https://github.com/IndrajeetPatil/}{Indrajeet Patil}
}
|
/man/meowR.Rd
|
permissive
|
SantoshSrinivas79/kittyR
|
R
| false | true | 632 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/meowR.R
\name{meowR}
\alias{meowR}
\title{Play a short kitty meow sound}
\usage{
meowR(sound = 3)
}
\arguments{
\item{sound}{A character string or a number specifying what sound to be
played by either specifying one of the built in sounds, specifying the path
to a \code{.wav} file or specifying an url. There are currently 6 meows
included. The default is \code{3}.}
}
\description{
Play a short kitty meow sound
}
\examples{
\dontrun{
# play Eno's meow
kittyR::meowR(sound = 4)
}
}
\author{
\href{https://github.com/IndrajeetPatil/}{Indrajeet Patil}
}
|
#' Calculates the smallest distance between two modular numbers
#'
#' This function takes the difference b-a and returns the signed difference with the smallest absolute value in modulus base.
#' The function will compute b mod base and a mod base before subtracting.
#'
#' @param diff difference: (b mod base)-(a mod base)
#' @param base base of the modulus numbers a and b.
#' @return signed difference of b and a with the smallest possible absolute value in modulus base
#'
#' @export
better.subtraction <- function(diff, base=2*pi) {
# Check for smaller distances for every element in vector
diff <- sapply(diff, function(elem) {
if (elem > +base/2) elem <- elem-base
else if (elem < -base/2) elem <- elem+base
return(elem)
})
# Return result
return(diff)
}
|
/RHotStuff/R/better_subtraction.R
|
permissive
|
AlreadyTakenJonas/bachelorThesisSummary
|
R
| false | false | 793 |
r
|
#' Calculates the smallest distance between two modular numbers
#'
#' This function takes the difference b-a and returns the signed difference with the smallest absolute value in modulus base.
#' The function will compute b mod base and a mod base before subtracting.
#'
#' @param diff difference: (b mod base)-(a mod base)
#' @param base base of the modulus numbers a and b.
#' @return signed difference of b and a with the smallest possible absolute value in modulus base
#'
#' @export
better.subtraction <- function(diff, base=2*pi) {
# Check for smaller distances for every element in vector
diff <- sapply(diff, function(elem) {
if (elem > +base/2) elem <- elem-base
else if (elem < -base/2) elem <- elem+base
return(elem)
})
# Return result
return(diff)
}
|
/man/multicostring.Rd
|
no_license
|
hillhillll/LSAfun
|
R
| false | false | 2,220 |
rd
| ||
library(dplyr)
#download dataset
zipfile <- file.path("data","dataset.zip")
#create data directory
if (!file.exists("data")) {
dir.create("data")
}
#download zip file
if (!file.exists(zipfile)) {
url1 <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(url1,zipfile,method ="libcurl")
}
#get activity labels
actLabels <- read.table(
unz(zipfile,file.path("UCI HAR Dataset","activity_labels.txt")),
sep=" ")
#get the feature descriptions
featureNames <- read.table(
unz(zipfile,file.path("UCI HAR Dataset","features.txt")),
sep=" ")
#find indicies of "mean" and "std" features
ix <- grep(".*(mean|std).*",featureNames[[2]])
colNames <- featureNames[[2]][ix]
#read the test data
fpath <- file.path("UCI HAR Dataset","test","subject_test.txt")
subjecttest <- read.table(unz(zipfile,fpath), header = FALSE) %>% tbl_df()
fpath <- file.path("UCI HAR Dataset","test","X_test.txt")
xtest <- read.table(unz(zipfile,fpath), header = FALSE)[,ix] %>% tbl_df()
fpath <- file.path("UCI HAR Dataset","test","y_test.txt")
ytest <- read.table(unz(zipfile,fpath), header = FALSE) %>% tbl_df()
#read the training data
fpath <- file.path("UCI HAR Dataset","train","subject_train.txt")
subjecttrain <- read.table(unz(zipfile,fpath), header = FALSE) %>% tbl_df()
fpath <- file.path("UCI HAR Dataset","train","X_train.txt")
xtrain <- read.table(unz(zipfile,fpath), header = FALSE)[,ix] %>% tbl_df()
fpath <- file.path("UCI HAR Dataset","train","y_train.txt")
ytrain <- read.table(unz(zipfile,fpath), header = FALSE) %>% tbl_df()
#add column names
names(xtest) <- colNames
names(xtrain) <- colNames
#add subject and activity information
xtrain <- cbind( as.data.frame( actLabels[[2]][ytrain[[1]]] ),
as.data.frame( subjecttrain[[1]] ),
xtrain)
names(xtrain)[1:2] <- c("activity","subject")
xtest <- cbind( as.data.frame( actLabels[[2]][ytest[[1]]] ),
as.data.frame( subjecttest[[1]] ),
xtest)
names(xtest)[1:2] <- c("activity","subject")
#merge the test and training datasets
motionData <- bind_rows(xtrain,xtest)
#create a second data set with the average of each variable
#for each activity and each subject.
motionDataAveraged <- group_by(motionData,activity,subject) %>%
summarise_each(funs(mean(., na.rm=TRUE)) )
write.csv(motionData,file.path("data","motionData.csv"))
write.csv(motionDataAveraged,file.path("data","motionDataAveraged.csv"))
|
/run_analysis.R
|
no_license
|
rschweiz/GettingAndCleaningDataProject
|
R
| false | false | 2,570 |
r
|
library(dplyr)
#download dataset
zipfile <- file.path("data","dataset.zip")
#create data directory
if (!file.exists("data")) {
dir.create("data")
}
#download zip file
if (!file.exists(zipfile)) {
url1 <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(url1,zipfile,method ="libcurl")
}
#get activity labels
actLabels <- read.table(
unz(zipfile,file.path("UCI HAR Dataset","activity_labels.txt")),
sep=" ")
#get the feature descriptions
featureNames <- read.table(
unz(zipfile,file.path("UCI HAR Dataset","features.txt")),
sep=" ")
#find indicies of "mean" and "std" features
ix <- grep(".*(mean|std).*",featureNames[[2]])
colNames <- featureNames[[2]][ix]
#read the test data
fpath <- file.path("UCI HAR Dataset","test","subject_test.txt")
subjecttest <- read.table(unz(zipfile,fpath), header = FALSE) %>% tbl_df()
fpath <- file.path("UCI HAR Dataset","test","X_test.txt")
xtest <- read.table(unz(zipfile,fpath), header = FALSE)[,ix] %>% tbl_df()
fpath <- file.path("UCI HAR Dataset","test","y_test.txt")
ytest <- read.table(unz(zipfile,fpath), header = FALSE) %>% tbl_df()
#read the training data
fpath <- file.path("UCI HAR Dataset","train","subject_train.txt")
subjecttrain <- read.table(unz(zipfile,fpath), header = FALSE) %>% tbl_df()
fpath <- file.path("UCI HAR Dataset","train","X_train.txt")
xtrain <- read.table(unz(zipfile,fpath), header = FALSE)[,ix] %>% tbl_df()
fpath <- file.path("UCI HAR Dataset","train","y_train.txt")
ytrain <- read.table(unz(zipfile,fpath), header = FALSE) %>% tbl_df()
#add column names
names(xtest) <- colNames
names(xtrain) <- colNames
#add subject and activity information
xtrain <- cbind( as.data.frame( actLabels[[2]][ytrain[[1]]] ),
as.data.frame( subjecttrain[[1]] ),
xtrain)
names(xtrain)[1:2] <- c("activity","subject")
xtest <- cbind( as.data.frame( actLabels[[2]][ytest[[1]]] ),
as.data.frame( subjecttest[[1]] ),
xtest)
names(xtest)[1:2] <- c("activity","subject")
#merge the test and training datasets
motionData <- bind_rows(xtrain,xtest)
#create a second data set with the average of each variable
#for each activity and each subject.
motionDataAveraged <- group_by(motionData,activity,subject) %>%
summarise_each(funs(mean(., na.rm=TRUE)) )
write.csv(motionData,file.path("data","motionData.csv"))
write.csv(motionDataAveraged,file.path("data","motionDataAveraged.csv"))
|
library(tidyverse)
library(gt)
descrip <- tribble(
~method, ~parameter, ~description,
"rMATS", "annotation", "Uses annotation and library reads to create a database of splice graphs (+-)",
"JunctionSeq", "annotation", " Uses annotation to name locus (-)",
"Leafcutter", "annotation", "Uses annotation and library to selected testable features (introns) (+)",
"Majiq", "annotation", "Uses annotation and library to selected testable events (+)",
#
"rMATS", "coverage_filter", "None",
"JunctionSeq", "coverage_filter", "--minCount at `qorts mergeNovelSplices` default: 9",
"Leafcutter", "coverage_filter", "-m at `leafcutter_cluster_regtools.py`` default: 50",
"Leafcutter", "coverage_filter", "--min_coverage at `leafcutter_ds.R` default: 20",
"Majiq", "coverage", "--minreads `majiq build` default: 10)",
#
"rMATS", "replicate_filtering", "None",
"JunctionSeq", "replicate_filtering", "None",
"Leafcutter", "replicate_filtering", "--min_samples_per_intron at `leafcutter_ds.R` default: 5)",
"Leafcutter", "replicate_filtering", "--min_samples_per_group at `leafcutter_ds.R` default: 3",
"Majiq", "replicate_filtering", "--min-experiments at `majiq build` default: 0.5)",
#
"rMATS", "experimental_design", "Case vs control",
"JunctionSeq", "experimental_design", "Case vs control, supports covariates",
"Leafcutter", "experimental_design", "Supports design matrix",
"Majiq", "experimental_design", "Case control, paired-design"
)
|
/scripts/baltica_parameters_table.R
|
permissive
|
dieterich-lab/Baltica
|
R
| false | false | 1,480 |
r
|
library(tidyverse)
library(gt)
descrip <- tribble(
~method, ~parameter, ~description,
"rMATS", "annotation", "Uses annotation and library reads to create a database of splice graphs (+-)",
"JunctionSeq", "annotation", " Uses annotation to name locus (-)",
"Leafcutter", "annotation", "Uses annotation and library to selected testable features (introns) (+)",
"Majiq", "annotation", "Uses annotation and library to selected testable events (+)",
#
"rMATS", "coverage_filter", "None",
"JunctionSeq", "coverage_filter", "--minCount at `qorts mergeNovelSplices` default: 9",
"Leafcutter", "coverage_filter", "-m at `leafcutter_cluster_regtools.py`` default: 50",
"Leafcutter", "coverage_filter", "--min_coverage at `leafcutter_ds.R` default: 20",
"Majiq", "coverage", "--minreads `majiq build` default: 10)",
#
"rMATS", "replicate_filtering", "None",
"JunctionSeq", "replicate_filtering", "None",
"Leafcutter", "replicate_filtering", "--min_samples_per_intron at `leafcutter_ds.R` default: 5)",
"Leafcutter", "replicate_filtering", "--min_samples_per_group at `leafcutter_ds.R` default: 3",
"Majiq", "replicate_filtering", "--min-experiments at `majiq build` default: 0.5)",
#
"rMATS", "experimental_design", "Case vs control",
"JunctionSeq", "experimental_design", "Case vs control, supports covariates",
"Leafcutter", "experimental_design", "Supports design matrix",
"Majiq", "experimental_design", "Case control, paired-design"
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/token.R
\name{create_card_token}
\alias{create_card_token}
\title{Create a card token}
\usage{
create_card_token(number, exp_month, exp_year, cvc, name = NULL)
}
\arguments{
\item{number}{The card number}
\item{exp_month}{Expiry month (two digits)}
\item{exp_year}{Expiry year (two or four digits)}
\item{cvc}{Card security code}
\item{name}{Cardholder's full name}
}
\description{
Store this token instead of card details
}
\details{
Not implemented: address
}
\seealso{
Other tokens: \code{\link{create_bank_token}},
\code{\link{get_token}}
}
\concept{tokens}
|
/man/create_card_token.Rd
|
no_license
|
fdrennan/stripeR
|
R
| false | true | 646 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/token.R
\name{create_card_token}
\alias{create_card_token}
\title{Create a card token}
\usage{
create_card_token(number, exp_month, exp_year, cvc, name = NULL)
}
\arguments{
\item{number}{The card number}
\item{exp_month}{Expiry month (two digits)}
\item{exp_year}{Expiry year (two or four digits)}
\item{cvc}{Card security code}
\item{name}{Cardholder's full name}
}
\description{
Store this token instead of card details
}
\details{
Not implemented: address
}
\seealso{
Other tokens: \code{\link{create_bank_token}},
\code{\link{get_token}}
}
\concept{tokens}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.