content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
#' @param ... Not used. Forces remaining arguments to be specified by name.
|
/man-roxygen/args-dots-barrier.R
|
no_license
|
jpritikin/gwsem
|
R
| false | false | 78 |
r
|
#' @param ... Not used. Forces remaining arguments to be specified by name.
|
# Exercise 2: a basic Shiny app
# Load the `shiny` package
library("shiny")
# Define a new `ui` variable. This variable should be assigned a `fluidPage()` layout
# The `fluidPage()` layout should be passed the following:
ui <- fluidPage(
# A `titlePanel()` layout with the text "Cost Calculator"
titlePanel("Cost Calculator"),
# A `numericInput()` widget with the label "Price (in dollars)"
# It should have a default value of 0 and a minimum value of 0
# Hint: look up the function's arguments in the documentation!
numericInput("Price", label = "Price (in dollars)", value = 0, min = 0),
# A second `numericInput()` widget with the label "Quantity"
# It should have a default value of 1 and a minimum value of 1
numericInput("Quantity", label = "Quantity", value = 1, min = 1),
# The word "Cost", strongly bolded
strong("Cost"),
# A `textOutput()` output of a calculated value labeled `cost`
textOutput("cost")
)
# Define a `server` function (with appropriate arguments)
# This function should perform the following:
server <- function(input, output) {
# Assign a reactive `renderText()` function to the output's `cost` value
# The reactive expression should return the input `price` times the `quantity`
# So it looks nice, paste a "$" in front of it!
output$cost <- renderText({
return(paste0("$", input$Price * input$Quantity))
})
}
# Create a new `shinyApp()` using the above ui and server
shinyApp(ui = ui, server = server)
|
/chapter-19-exercises/exercise-2/app.R
|
permissive
|
info-201a-sp21/exercises-avaghela-1925502
|
R
| false | false | 1,514 |
r
|
# Exercise 2: a basic Shiny app
# Load the `shiny` package
library("shiny")
# Define a new `ui` variable. This variable should be assigned a `fluidPage()` layout
# The `fluidPage()` layout should be passed the following:
ui <- fluidPage(
# A `titlePanel()` layout with the text "Cost Calculator"
titlePanel("Cost Calculator"),
# A `numericInput()` widget with the label "Price (in dollars)"
# It should have a default value of 0 and a minimum value of 0
# Hint: look up the function's arguments in the documentation!
numericInput("Price", label = "Price (in dollars)", value = 0, min = 0),
# A second `numericInput()` widget with the label "Quantity"
# It should have a default value of 1 and a minimum value of 1
numericInput("Quantity", label = "Quantity", value = 1, min = 1),
# The word "Cost", strongly bolded
strong("Cost"),
# A `textOutput()` output of a calculated value labeled `cost`
textOutput("cost")
)
# Define a `server` function (with appropriate arguments)
# This function should perform the following:
server <- function(input, output) {
# Assign a reactive `renderText()` function to the output's `cost` value
# The reactive expression should return the input `price` times the `quantity`
# So it looks nice, paste a "$" in front of it!
output$cost <- renderText({
return(paste0("$", input$Price * input$Quantity))
})
}
# Create a new `shinyApp()` using the above ui and server
shinyApp(ui = ui, server = server)
|
library(stringr)
## Data prep
zip_name <- "names.zip" # Downloaded from http://catalog.data.gov/dataset/baby-names-from-social-security-card-applications-national-level-data
temp_dir <- tempdir()
unzip(zip_name, exdir=temp_dir)
l <- list.files(temp_dir)
yob_files <- l[grepl("yob.*", l)]
names_per_year <- 20
males <- data.frame()
females <- data.frame()
for(i in yob_files){
cat(paste("Reading file:", i, "\n"))
year <- as.numeric(str_match(i, "yob([0-9]{4}).txt")[,2])
if(year >= 1960){
year_df <- read.csv(file.path(temp_dir, i), header=F)
names(year_df) <- c("name", "gender", "count")
year_df["year"] = year
tmp_males <- subset(year_df, gender=="M")
tmp_males$rank <- seq(1:nrow(tmp_males)+1)
tmp_females <- subset(year_df, gender=="F")
tmp_females$rank <- seq(1:nrow(tmp_females)+1)
#males <- rbind(males, tmp_males)
#females <- rbind(females, tmp_females)
males <- rbind(males, tmp_males[0:names_per_year, ])
females <- rbind(females, tmp_females[0:names_per_year, ])
}
}
full_df <- rbind(females, males)
write.csv(full_df, "shiny_app/all_names.csv")
|
/prep_data.R
|
no_license
|
balladeer/coursera_data_products_shiny_app
|
R
| false | false | 1,174 |
r
|
library(stringr)
## Data prep
zip_name <- "names.zip" # Downloaded from http://catalog.data.gov/dataset/baby-names-from-social-security-card-applications-national-level-data
temp_dir <- tempdir()
unzip(zip_name, exdir=temp_dir)
l <- list.files(temp_dir)
yob_files <- l[grepl("yob.*", l)]
names_per_year <- 20
males <- data.frame()
females <- data.frame()
for(i in yob_files){
cat(paste("Reading file:", i, "\n"))
year <- as.numeric(str_match(i, "yob([0-9]{4}).txt")[,2])
if(year >= 1960){
year_df <- read.csv(file.path(temp_dir, i), header=F)
names(year_df) <- c("name", "gender", "count")
year_df["year"] = year
tmp_males <- subset(year_df, gender=="M")
tmp_males$rank <- seq(1:nrow(tmp_males)+1)
tmp_females <- subset(year_df, gender=="F")
tmp_females$rank <- seq(1:nrow(tmp_females)+1)
#males <- rbind(males, tmp_males)
#females <- rbind(females, tmp_females)
males <- rbind(males, tmp_males[0:names_per_year, ])
females <- rbind(females, tmp_females[0:names_per_year, ])
}
}
full_df <- rbind(females, males)
write.csv(full_df, "shiny_app/all_names.csv")
|
#
# WhoWillLeaveCompany.R, 21 Jun 20
# Data from:
# Who will leave the company?: {A} large-scale industry study of developer turnover by mining monthly work report
# Lingfeng Bao and Zhenchang Xing and Xin Xia and David Lo and Shanping Li
#
# Example from:
# Evidence-based Software Engineering: based on the publicly available data
# Derek M. Jones
#
# TAG developer_employment project_hours project_staffing
source("ESEUR_config.r")
library("plyr")
# Convert a data.frame row to a vector
row2vec=function(df)
{
return(as.vector(t(df)))
}
mk_long=function(df)
{
return(data.frame(id=rep(df$id, 6),
hours=row2vec(subset(df, select=grepl("hour[1-6]", colnames(df)))),
p_person=row2vec(subset(df, select=grepl("person$", colnames(df)))),
p_hour_mean=row2vec(subset(df, select=grepl("_hour_mean", colnames(df)))),
p_hour_sum=row2vec(subset(df, select=grepl("_hour_sum", colnames(df)))),
p_hour_std=row2vec(subset(df, select=grepl("_hour_std", colnames(df)))),
p_person_change=row2vec(subset(df, select=grepl("[1-6]_person_change", colnames(df)))),
project_num=rep(df$project_num, 6),
multi_project=rep(df$mutli_project, 6),
is_leave=rep(df$is_leave, 6))
)
}
plot_sd=function(mon_mean, mon_sd, x_offset)
{
arrows(x_offset, mon_mean,
x_offset, mon_mean-mon_sd, col=pal_col[2],
length=0.1, angle=90, lwd=1.3)
arrows(x_offset, mon_mean,
x_offset, mon_mean+mon_sd, col=pal_col[2],
length=0.1, angle=90, lwd=1.3)
}
month_msd=function(df)
{
return(data.frame(month_mean=c(mean(df$hour1),
mean(df$hour2),
mean(df$hour3),
mean(df$hour4),
mean(df$hour5),
mean(df$hour6)),
month_sd=c(sd(df$hour1),
sd(df$hour2),
sd(df$hour3),
sd(df$hour4),
sd(df$hour5),
sd(df$hour6)))
)
}
mrhrs=read.csv(paste0(ESEUR_dir, "projects/WhoWillLeaveCompany.csv.xz"), as.is=TRUE)
# Remove what look like incorrect entries
mrhrs=subset(mrhrs, hour1 != 0)
# hrs=ddply(mrhrs, .(id), mk_long)
p1=subset(mrhrs, project_num == 1)
p2=subset(mrhrs, project_num == 2)
p1_mon=month_msd(p1)
p2_mon=month_msd(p2)
plot(0, type="n",
xlim=c(1, 6), ylim=c(45, 210),
xlab="Month", ylab="Project work (hours)\n")
x_range=1:6
lines(x_range-0.1, p1_mon$month_mean, col=pal_col[1])
d=sapply(1:6, function(X) plot_sd(p1_mon$month_mean[X], p1_mon$month_sd[X], X-0.1))
lines(x_range+0.1, p2_mon$month_mean, col=pal_col[1])
d=sapply(1:6, function(X) plot_sd(p2_mon$month_mean[X], p2_mon$month_sd[X], X+0.1))
# proj_1=subset(mrhrs, project_num == 1)
#
# l_mod=glm(is_leave=="yes" ~
# # hour1+hour2+hour3+hour4+hour5+hour6+
# hour_sum+
# # hour_mean+
# hour_median+
# # hour_std+
# # hour_max+
# # task_len_sum+task_len_mean+
# # task_len_median+
# # task_len_std+task_len_max+
# task_zero+
# # token_sum+token_mean+
# # token_median+
# # token_std+
# # token_max+
# # flesch+smog+kincaid+
# # coleman_liau+automated_readability_index+
# # dale_chall+difficult_words+
# # linsear_write+gunning_fog+
# mutli_project+
# p1_person+
# I(p1_hour_mean/hour_mean)+
# # p1_hour_sum+p1_hour_std+
# # p1_person_change+
# # p2_person+
# I(p2_hour_mean/hour_mean)+
# # p2_hour_sum+p2_hour_std+
# p2_person_change+
# # p3_person+
# I(p3_hour_mean/hour_mean)+
# # p3_hour_sum+p3_hour_std+
# p3_person_change+
# # p4_person+
# I(p4_hour_mean/hour_mean)+
# # p4_hour_sum+p4_hour_std+
# p4_person_change+
# # p5_person+
# I(p5_hour_mean/hour_mean)+
# # p5_hour_sum+p5_hour_std+
# p5_person_change+
# # p6_person+
# I(p6_hour_mean/hour_mean)+
# # p6_hour_sum+
# p6_hour_std+
# p6_person_change+
# avg_person_change
# # +less_zero+equal_zero
# # +larger_zero
# , data=proj_1, family=binomial)
# summary(l_mod)
#
|
/projects/WhoWillLeaveCompany.R
|
no_license
|
Derek-Jones/ESEUR-code-data
|
R
| false | false | 3,818 |
r
|
#
# WhoWillLeaveCompany.R, 21 Jun 20
# Data from:
# Who will leave the company?: {A} large-scale industry study of developer turnover by mining monthly work report
# Lingfeng Bao and Zhenchang Xing and Xin Xia and David Lo and Shanping Li
#
# Example from:
# Evidence-based Software Engineering: based on the publicly available data
# Derek M. Jones
#
# TAG developer_employment project_hours project_staffing
source("ESEUR_config.r")
library("plyr")
# Convert a data.frame row to a vector
row2vec=function(df)
{
return(as.vector(t(df)))
}
mk_long=function(df)
{
return(data.frame(id=rep(df$id, 6),
hours=row2vec(subset(df, select=grepl("hour[1-6]", colnames(df)))),
p_person=row2vec(subset(df, select=grepl("person$", colnames(df)))),
p_hour_mean=row2vec(subset(df, select=grepl("_hour_mean", colnames(df)))),
p_hour_sum=row2vec(subset(df, select=grepl("_hour_sum", colnames(df)))),
p_hour_std=row2vec(subset(df, select=grepl("_hour_std", colnames(df)))),
p_person_change=row2vec(subset(df, select=grepl("[1-6]_person_change", colnames(df)))),
project_num=rep(df$project_num, 6),
multi_project=rep(df$mutli_project, 6),
is_leave=rep(df$is_leave, 6))
)
}
plot_sd=function(mon_mean, mon_sd, x_offset)
{
arrows(x_offset, mon_mean,
x_offset, mon_mean-mon_sd, col=pal_col[2],
length=0.1, angle=90, lwd=1.3)
arrows(x_offset, mon_mean,
x_offset, mon_mean+mon_sd, col=pal_col[2],
length=0.1, angle=90, lwd=1.3)
}
month_msd=function(df)
{
return(data.frame(month_mean=c(mean(df$hour1),
mean(df$hour2),
mean(df$hour3),
mean(df$hour4),
mean(df$hour5),
mean(df$hour6)),
month_sd=c(sd(df$hour1),
sd(df$hour2),
sd(df$hour3),
sd(df$hour4),
sd(df$hour5),
sd(df$hour6)))
)
}
mrhrs=read.csv(paste0(ESEUR_dir, "projects/WhoWillLeaveCompany.csv.xz"), as.is=TRUE)
# Remove what look like incorrect entries
mrhrs=subset(mrhrs, hour1 != 0)
# hrs=ddply(mrhrs, .(id), mk_long)
p1=subset(mrhrs, project_num == 1)
p2=subset(mrhrs, project_num == 2)
p1_mon=month_msd(p1)
p2_mon=month_msd(p2)
plot(0, type="n",
xlim=c(1, 6), ylim=c(45, 210),
xlab="Month", ylab="Project work (hours)\n")
x_range=1:6
lines(x_range-0.1, p1_mon$month_mean, col=pal_col[1])
d=sapply(1:6, function(X) plot_sd(p1_mon$month_mean[X], p1_mon$month_sd[X], X-0.1))
lines(x_range+0.1, p2_mon$month_mean, col=pal_col[1])
d=sapply(1:6, function(X) plot_sd(p2_mon$month_mean[X], p2_mon$month_sd[X], X+0.1))
# proj_1=subset(mrhrs, project_num == 1)
#
# l_mod=glm(is_leave=="yes" ~
# # hour1+hour2+hour3+hour4+hour5+hour6+
# hour_sum+
# # hour_mean+
# hour_median+
# # hour_std+
# # hour_max+
# # task_len_sum+task_len_mean+
# # task_len_median+
# # task_len_std+task_len_max+
# task_zero+
# # token_sum+token_mean+
# # token_median+
# # token_std+
# # token_max+
# # flesch+smog+kincaid+
# # coleman_liau+automated_readability_index+
# # dale_chall+difficult_words+
# # linsear_write+gunning_fog+
# mutli_project+
# p1_person+
# I(p1_hour_mean/hour_mean)+
# # p1_hour_sum+p1_hour_std+
# # p1_person_change+
# # p2_person+
# I(p2_hour_mean/hour_mean)+
# # p2_hour_sum+p2_hour_std+
# p2_person_change+
# # p3_person+
# I(p3_hour_mean/hour_mean)+
# # p3_hour_sum+p3_hour_std+
# p3_person_change+
# # p4_person+
# I(p4_hour_mean/hour_mean)+
# # p4_hour_sum+p4_hour_std+
# p4_person_change+
# # p5_person+
# I(p5_hour_mean/hour_mean)+
# # p5_hour_sum+p5_hour_std+
# p5_person_change+
# # p6_person+
# I(p6_hour_mean/hour_mean)+
# # p6_hour_sum+
# p6_hour_std+
# p6_person_change+
# avg_person_change
# # +less_zero+equal_zero
# # +larger_zero
# , data=proj_1, family=binomial)
# summary(l_mod)
#
|
SetUpAndSaveOptRun <- function(df,
sim_opt,
helpers,
which_model) {
### Sets up for an optimization run, runs i iterations of optimization,
# packages up the results and saves both all opts and best fit opts ###
# Notes: Returns the results to s.R but more typically invoked just for side effect of saving #
# out results #
### Take just the lowest NLL for this subj ###
GetBestFit <- function(x) data.frame(x %>% group_by(ID) %>% slice(which.min(nll)))
# Dynamically set up for the optimization run for this model. (This enables #
# multiple models to be passed in during sequential pass through mutliple models #
model_settings <- SetUpModelSpecStructsAndLabels(which_model, helpers)
model_label <- model_settings[["model_label"]]
param_labels <- model_settings[["param_labels"]]
helpers[["param_labels"]] <- param_labels
# Pass to iter through opt
iters <- 1:helpers[["iterations"]]
all_iters_df <- lapply(iters, function(x) {
# Run one iter of optimization all subjects in the df..
subj_res <- IterateThroughSubjs(df, helpers, sim_opt)
res_w_iter_label <- data.frame(subj_res, "iter"=x)
res_w_iter_label
}) %>% bind_rows() # .. and bind up
# Create df with just the best fit
best_fits <- GetBestFit(all_iters_df)
## Save results
all_iters_path <- opt_paths[["all_iters"]]
best_fit_path <- opt_paths[["best_fit_path"]]
write.csv(best_fits, paste0(best_fit_path, model_label, ".csv"), row.names=FALSE)
write.csv(all_iters_df, paste0(all_iters_path, model_label, ".csv"), row.names=FALSE)
# Return both best fits and all iters in a labeled list
list("best_fits"=best_fits, "all_iters"=all_iters_df)
}
|
/Functions/Model/Optimize/SetUpAndSaveOptRun.R
|
no_license
|
peter-hitchcock/rum_derails_rl
|
R
| false | false | 1,774 |
r
|
SetUpAndSaveOptRun <- function(df,
sim_opt,
helpers,
which_model) {
### Sets up for an optimization run, runs i iterations of optimization,
# packages up the results and saves both all opts and best fit opts ###
# Notes: Returns the results to s.R but more typically invoked just for side effect of saving #
# out results #
### Take just the lowest NLL for this subj ###
GetBestFit <- function(x) data.frame(x %>% group_by(ID) %>% slice(which.min(nll)))
# Dynamically set up for the optimization run for this model. (This enables #
# multiple models to be passed in during sequential pass through mutliple models #
model_settings <- SetUpModelSpecStructsAndLabels(which_model, helpers)
model_label <- model_settings[["model_label"]]
param_labels <- model_settings[["param_labels"]]
helpers[["param_labels"]] <- param_labels
# Pass to iter through opt
iters <- 1:helpers[["iterations"]]
all_iters_df <- lapply(iters, function(x) {
# Run one iter of optimization all subjects in the df..
subj_res <- IterateThroughSubjs(df, helpers, sim_opt)
res_w_iter_label <- data.frame(subj_res, "iter"=x)
res_w_iter_label
}) %>% bind_rows() # .. and bind up
# Create df with just the best fit
best_fits <- GetBestFit(all_iters_df)
## Save results
all_iters_path <- opt_paths[["all_iters"]]
best_fit_path <- opt_paths[["best_fit_path"]]
write.csv(best_fits, paste0(best_fit_path, model_label, ".csv"), row.names=FALSE)
write.csv(all_iters_df, paste0(all_iters_path, model_label, ".csv"), row.names=FALSE)
# Return both best fits and all iters in a labeled list
list("best_fits"=best_fits, "all_iters"=all_iters_df)
}
|
## Put comments here that give an overall description of what your
## functions do
## A pair of functions that cache the inverse of a matrix
makeCacheMatrix <- function( m = matrix() ) {
## Initialize the inverse property
i <- NULL
## Method to set the matrix
set <- function( matrix ) {
m <<- matrix
i <<- NULL
}
## Method the get the matrix
get <- function() {
## Return the matrix
m
}
## Method to set the inverse of the matrix
setInverse <- function(inverse) {
i <<- inverse
}
## Method to get the inverse of the matrix
getInverse <- function() {
## Return the inverse property
i
}
## Return a list of the methods
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
## Just return the inverse if its already set
if( !is.null(m) ) {
message("getting cached data")
return(m)
}
## Get the matrix from our object
data <- x$get()
## Calculate the inverse using matrix multiplication
m <- solve(data) %*% data
## Set the inverse to the object
x$setInverse(m)
## Return the matrix
m
}
|
/cachematrix.R
|
no_license
|
stkipb1/ProgrammingAssignment2
|
R
| false | false | 1,221 |
r
|
## Put comments here that give an overall description of what your
## functions do
## A pair of functions that cache the inverse of a matrix
makeCacheMatrix <- function( m = matrix() ) {
## Initialize the inverse property
i <- NULL
## Method to set the matrix
set <- function( matrix ) {
m <<- matrix
i <<- NULL
}
## Method the get the matrix
get <- function() {
## Return the matrix
m
}
## Method to set the inverse of the matrix
setInverse <- function(inverse) {
i <<- inverse
}
## Method to get the inverse of the matrix
getInverse <- function() {
## Return the inverse property
i
}
## Return a list of the methods
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
## Just return the inverse if its already set
if( !is.null(m) ) {
message("getting cached data")
return(m)
}
## Get the matrix from our object
data <- x$get()
## Calculate the inverse using matrix multiplication
m <- solve(data) %*% data
## Set the inverse to the object
x$setInverse(m)
## Return the matrix
m
}
|
# Quiz 1 - Background
# -------------------
# This matrix represents the connected graph from pagerank_the_matrix_formulation (week 1)
m <- matrix( c(0.5,0.5,0,0.5,0,1,0,0.5,0), nrow=3, ncol=3, byrow = TRUE )
r <- c(1/3,1/3,1/3)
# > m
# [,1] [,2] [,3]
# [1,] 0.5 0.5 0
# [2,] 0.5 0.0 1
# [3,] 0.0 0.5 0
iter1 <- m %*% r
# > iter1
# [,1]
# [1,] 0.3333333
# [2,] 0.5000000
# [3,] 0.1666667
r <- iter1
iter2 <- m %*% r
# > iter2
# [,1]
# [1,] 0.4166667
# [2,] 0.3333333
# [3,] 0.2500000
r <- iter2
iter3 <- m %*% r
# [1,] 0.3750000
# [2,] 0.4583333
# [3,] 0.1666667
r <- iter3
iter4 <- m %*% r
# > iter4
# [,1]
# [1,] 0.4166667
# [2,] 0.3541667
# [3,] 0.2291667
r <- iter4
iter5 <- m %*% r
> iter5
# [,1]
# [1,] 0.3854167
# [2,] 0.4375000
# [3,] 0.1770833
# eventually finishing at 6/15, 6/15,3/15
delta <- 1
r <- c(1/3,1/3,1/3)
while( delta > 0.001 )
{
r2 <- m %*% r
delta <- sum(abs(r2-r))
r <- r2
cat("\ndelta=",delta," ,r=",r)
}
cat("\nDone! final r=",r)
# Done! final r= 0.4000714 0.3998131 0.2001155
# Example - Spider Trap
# ---------------------
# simulate spider trap, from pagerank_the_matrix_formulation page 36
# ry = ry / 2 + ra /2
# ra = ry / 2
# rm = ra / 2 + rm # this is the trap, node m got a node from m, but a random m will bve trapped in a loop and can't get out of m
m <- matrix( c(0.5,0.5,0, 0.5,0,0, 0,0.5,1), nrow=3, ncol=3, byrow = TRUE )
delta <- 1
r <- c(1/3,1/3,1/3)
while( delta > 0.001 )
{
r2 <- m %*% r
delta <- sum(abs(r2-r))
r <- r2
cat("\ndelta=",delta," ,r=",r)
}
cat("\nDone! final r=",r)
# Done! final r= 0.001277102 0.0007892922 0.9979336
# As expected, the importance of node m is 1, and the importance of the rest is zero
# As soon as the random walker will be crossing to m, he will be stuck in an infinte-loop
# Example - Why teleport solve the problem (why_teleports_solve_the_problem page 47)
# ----------------------------------------------------------------------------------
# starting from the same matrix as the last example
beta <- 0.8
m <- matrix( c(0.5,0.5,0, 0.5,0,0, 0,0.5,1), nrow=3, ncol=3, byrow = TRUE )
tele <- matrix( rep(1/3,9),nrow=3,ncol=3 )
m.tele <- beta*m + (1-beta)*tele
# > m.tele
# [,1] [,2] [,3]
# [1,] 0.46666667 0.46666667 0.06666667
# [2,] 0.46666667 0.06666667 0.06666667
# [3,] 0.06666667 0.46666667 0.86666667
delta <- 1
r <- c(1/3,1/3,1/3)
while( delta > 0.001 )
{
r2 <- m.tele %*% r
delta <- sum(abs(r2-r))
r <- r2
cat("\ndelta=",delta," ,r=",r)
}
cat("\nDone! final r=",r)
# Done! final r= 0.2125037 0.1517515 0.6357448
# As we can see, the problem now is solved, so node m is not 1 and the rest is 0 - it's a more balanced solution
# Quiz 1 - Question 1
# -------------------
# Consider three Web pages with the following links:
# ra is dead end
# rb = 0.5a
# rc = 0.5a + b + c
#
# Suppose we compute PageRank with a β of 0.7, and we introduce the additional constraint that the sum of the PageRanks of the three pages must be 3,
# to handle the problem that otherwise any multiple of a solution will also be a solution.
# Compute the PageRanks a, b, and c of the three pages A, B, and C, respectively. Then, identify from the list below, the true statement.
beta <- 0.7
m <- matrix( c(0,0,0, 0.5,0,0, 0.5,1,1), nrow=3, ncol=3, byrow = TRUE )
tele <- matrix( rep(1/3,9),nrow=3,ncol=3 )
m.tele <- beta*m + (1-beta)*tele
delta <- 1
r <- c(1/3,1/3,1/3)
while( delta > 0.0001 )
{
r2 <- m.tele %*% r
delta <- sum(abs(r2-r))
r <- r2
cat("\ndelta=",delta," ,r=",r)
}
cat("\nDone! final r=",r)
# Done! final r= 0.1 0.135 0.765
sum(r) # 1
r3 = 3.0 * r # ... we introduce the additional constraint that the sum of the PageRanks of the three pages must be 3,
#
# [1,] 0.300
# [2,] 0.405
# [3,] 2.295
a <- r3[1]
b <- r3[2]
c <- r3[3]
# check the two options in the multiple choice options
a + c # 2.595
a + b # 0.705
# Quiz 1 - Question 2
# -------------------
# Consider three Web pages with the following links:
# a = c
# b = 0.5a
# c = 0.5a + b
# Suppose we compute PageRank with β=0.85.
# Write the equations for the PageRanks a, b, and c of the three pages A, B, and C, respectively. T
# Then, identify in the list below, one of the equations.
beta <- 0.85
m <- matrix( c(0,0,1, 0.5,0,0, 0.5,1,0), nrow=3, ncol=3, byrow = TRUE )
tele <- matrix( rep(1/3,9),nrow=3,ncol=3 )
m.tele <- beta*m + (1-beta)*tele
# > m.tele
# [,1] [,2] [,3]
# [1,] 0.050 0.05 0.90
# [2,] 0.475 0.05 0.05
# [3,] 0.475 0.90 0.05
# which is actually
# a b c
# a 0.050 0.05 0.90
# b 0.475 0.05 0.05
# c 0.475 0.90 0.05
# it's clear that
# b = 0.475a + 0.05b + 0.05c <--> 0.95b = 0.475a + 0.05c
# Quiz 1 - Question 3
# -------------------
# Consider three Web pages with the following links:
# a = c
# b = 0.5a
# c = 0.5a + b
# Assuming no "taxation," compute the PageRanks a, b, and c of the three pages A, B, and C, using iteration,
# starting with the "0th" iteration where all three pages have rank a = b = c = 1.
# Compute as far as the 5th iteration, and also determine what the PageRanks are in the limit.
# Then, identify the true statement from the list below.
m <- matrix( c(0,0,1, 0.5,0,0, 0.5,1,0), nrow=3, ncol=3, byrow = TRUE )
# pagerank in the limit
# ---------------------
delta <- 1
r <- c(1,1,1)
while( delta > 0.001 )
{
r2 <- m %*% r
delta <- sum(abs(r2-r))
r <- r2
cat("\ndelta=",delta," ,r=",r)
}
cat("\nDone! final r=",r)
# Done! final r= 1.200195 0.6000977 1.199707
# Now, over 5 iterations
------------------------
r <- c(1,1,1)
for( i in 1:5)
{
r2 <- m %*% r
delta <- sum(abs(r2-r))
r <- r2
cat("\ni=",i," ,delta=",delta," ,r=",r)
}
# i= 1 ,delta= 1 ,r= 1 0.5 1.5
# i= 2 ,delta= 1 ,r= 1.5 0.5 1
# i= 3 ,delta= 1 ,r= 1 0.75 1.25
# i= 4 ,delta= 0.5 ,r= 1.25 0.5 1.25
# i= 5 ,delta= 0.25 ,r= 1.25 0.625 1.125
# It's clear that the right answer is that: After iteration 4, b = 1/2
# Question 4
# ----------
# Suppose our input data to a map-reduce operation consists of integer values (the keys are not important).
# The map function takes an integer i and produces the list of pairs (p,i) such that p is a prime divisor of i. For example, map(12) = [(2,12), (3,12)].
# The reduce function is addition. That is, reduce(p, [i1, i2, ...,ik]) is (p,i1+i2+...+ik).
#
# Compute the output, if the input is the set of integers 15, 21, 24, 30, 49. Then, identify, in the list below, one of the pairs in the output.
library(gmp)
nums <- c(15, 21, 24, 30, 49)
s <- rep(0,10)
for(n in nums)
{
# cat("\nn=",n)
a <- unique(as.numeric(factorize(n)))
for(i in a)
{
cat("\n(",i,",",n,")")
s[i] <- s[i] + n
}
}
# ( 3 , 15 )
# ( 5 , 15 )
# ( 3 , 21 )
# ( 7 , 21 )
# ( 2 , 24 )
# ( 3 , 24 )
# ( 2 , 30 )
# ( 3 , 30 )
# ( 5 , 30 )
# ( 7 , 49 )
s
# [1] 0 54 90 0 45 0 70 0 0 0
# From this vector it's clear that the sum of the factor 7 is 70
|
/pageRank.R
|
no_license
|
ofirsh/MiningMassiveDatasets
|
R
| false | false | 7,004 |
r
|
# Quiz 1 - Background
# -------------------
# This matrix represents the connected graph from pagerank_the_matrix_formulation (week 1)
m <- matrix( c(0.5,0.5,0,0.5,0,1,0,0.5,0), nrow=3, ncol=3, byrow = TRUE )
r <- c(1/3,1/3,1/3)
# > m
# [,1] [,2] [,3]
# [1,] 0.5 0.5 0
# [2,] 0.5 0.0 1
# [3,] 0.0 0.5 0
iter1 <- m %*% r
# > iter1
# [,1]
# [1,] 0.3333333
# [2,] 0.5000000
# [3,] 0.1666667
r <- iter1
iter2 <- m %*% r
# > iter2
# [,1]
# [1,] 0.4166667
# [2,] 0.3333333
# [3,] 0.2500000
r <- iter2
iter3 <- m %*% r
# [1,] 0.3750000
# [2,] 0.4583333
# [3,] 0.1666667
r <- iter3
iter4 <- m %*% r
# > iter4
# [,1]
# [1,] 0.4166667
# [2,] 0.3541667
# [3,] 0.2291667
r <- iter4
iter5 <- m %*% r
> iter5
# [,1]
# [1,] 0.3854167
# [2,] 0.4375000
# [3,] 0.1770833
# eventually finishing at 6/15, 6/15,3/15
delta <- 1
r <- c(1/3,1/3,1/3)
while( delta > 0.001 )
{
r2 <- m %*% r
delta <- sum(abs(r2-r))
r <- r2
cat("\ndelta=",delta," ,r=",r)
}
cat("\nDone! final r=",r)
# Done! final r= 0.4000714 0.3998131 0.2001155
# Example - Spider Trap
# ---------------------
# simulate spider trap, from pagerank_the_matrix_formulation page 36
# ry = ry / 2 + ra /2
# ra = ry / 2
# rm = ra / 2 + rm # this is the trap, node m got a node from m, but a random m will bve trapped in a loop and can't get out of m
m <- matrix( c(0.5,0.5,0, 0.5,0,0, 0,0.5,1), nrow=3, ncol=3, byrow = TRUE )
delta <- 1
r <- c(1/3,1/3,1/3)
while( delta > 0.001 )
{
r2 <- m %*% r
delta <- sum(abs(r2-r))
r <- r2
cat("\ndelta=",delta," ,r=",r)
}
cat("\nDone! final r=",r)
# Done! final r= 0.001277102 0.0007892922 0.9979336
# As expected, the importance of node m is 1, and the importance of the rest is zero
# As soon as the random walker will be crossing to m, he will be stuck in an infinte-loop
# Example - Why teleport solve the problem (why_teleports_solve_the_problem page 47)
# ----------------------------------------------------------------------------------
# starting from the same matrix as the last example
beta <- 0.8
m <- matrix( c(0.5,0.5,0, 0.5,0,0, 0,0.5,1), nrow=3, ncol=3, byrow = TRUE )
tele <- matrix( rep(1/3,9),nrow=3,ncol=3 )
m.tele <- beta*m + (1-beta)*tele
# > m.tele
# [,1] [,2] [,3]
# [1,] 0.46666667 0.46666667 0.06666667
# [2,] 0.46666667 0.06666667 0.06666667
# [3,] 0.06666667 0.46666667 0.86666667
delta <- 1
r <- c(1/3,1/3,1/3)
while( delta > 0.001 )
{
r2 <- m.tele %*% r
delta <- sum(abs(r2-r))
r <- r2
cat("\ndelta=",delta," ,r=",r)
}
cat("\nDone! final r=",r)
# Done! final r= 0.2125037 0.1517515 0.6357448
# As we can see, the problem now is solved, so node m is not 1 and the rest is 0 - it's a more balanced solution
# Quiz 1 - Question 1
# -------------------
# Consider three Web pages with the following links:
# ra is dead end
# rb = 0.5a
# rc = 0.5a + b + c
#
# Suppose we compute PageRank with a β of 0.7, and we introduce the additional constraint that the sum of the PageRanks of the three pages must be 3,
# to handle the problem that otherwise any multiple of a solution will also be a solution.
# Compute the PageRanks a, b, and c of the three pages A, B, and C, respectively. Then, identify from the list below, the true statement.
beta <- 0.7
m <- matrix( c(0,0,0, 0.5,0,0, 0.5,1,1), nrow=3, ncol=3, byrow = TRUE )
tele <- matrix( rep(1/3,9),nrow=3,ncol=3 )
m.tele <- beta*m + (1-beta)*tele
delta <- 1
r <- c(1/3,1/3,1/3)
while( delta > 0.0001 )
{
r2 <- m.tele %*% r
delta <- sum(abs(r2-r))
r <- r2
cat("\ndelta=",delta," ,r=",r)
}
cat("\nDone! final r=",r)
# Done! final r= 0.1 0.135 0.765
sum(r) # 1
r3 = 3.0 * r # ... we introduce the additional constraint that the sum of the PageRanks of the three pages must be 3,
#
# [1,] 0.300
# [2,] 0.405
# [3,] 2.295
a <- r3[1]
b <- r3[2]
c <- r3[3]
# check the two options in the multiple choice options
a + c # 2.595
a + b # 0.705
# Quiz 1 - Question 2
# -------------------
# Consider three Web pages with the following links:
# a = c
# b = 0.5a
# c = 0.5a + b
# Suppose we compute PageRank with β=0.85.
# Write the equations for the PageRanks a, b, and c of the three pages A, B, and C, respectively. T
# Then, identify in the list below, one of the equations.
beta <- 0.85
m <- matrix( c(0,0,1, 0.5,0,0, 0.5,1,0), nrow=3, ncol=3, byrow = TRUE )
tele <- matrix( rep(1/3,9),nrow=3,ncol=3 )
m.tele <- beta*m + (1-beta)*tele
# > m.tele
# [,1] [,2] [,3]
# [1,] 0.050 0.05 0.90
# [2,] 0.475 0.05 0.05
# [3,] 0.475 0.90 0.05
# which is actually
# a b c
# a 0.050 0.05 0.90
# b 0.475 0.05 0.05
# c 0.475 0.90 0.05
# it's clear that
# b = 0.475a + 0.05b + 0.05c <--> 0.95b = 0.475a + 0.05c
# Quiz 1 - Question 3
# -------------------
# Consider three Web pages with the following links:
# a = c
# b = 0.5a
# c = 0.5a + b
# Assuming no "taxation," compute the PageRanks a, b, and c of the three pages A, B, and C, using iteration,
# starting with the "0th" iteration where all three pages have rank a = b = c = 1.
# Compute as far as the 5th iteration, and also determine what the PageRanks are in the limit.
# Then, identify the true statement from the list below.
m <- matrix( c(0,0,1, 0.5,0,0, 0.5,1,0), nrow=3, ncol=3, byrow = TRUE )
# pagerank in the limit
# ---------------------
delta <- 1
r <- c(1,1,1)
while( delta > 0.001 )
{
r2 <- m %*% r
delta <- sum(abs(r2-r))
r <- r2
cat("\ndelta=",delta," ,r=",r)
}
cat("\nDone! final r=",r)
# Done! final r= 1.200195 0.6000977 1.199707
# Now, over 5 iterations
------------------------
r <- c(1,1,1)
for( i in 1:5)
{
r2 <- m %*% r
delta <- sum(abs(r2-r))
r <- r2
cat("\ni=",i," ,delta=",delta," ,r=",r)
}
# i= 1 ,delta= 1 ,r= 1 0.5 1.5
# i= 2 ,delta= 1 ,r= 1.5 0.5 1
# i= 3 ,delta= 1 ,r= 1 0.75 1.25
# i= 4 ,delta= 0.5 ,r= 1.25 0.5 1.25
# i= 5 ,delta= 0.25 ,r= 1.25 0.625 1.125
# It's clear that the right answer is that: After iteration 4, b = 1/2
# Question 4
# ----------
# Suppose our input data to a map-reduce operation consists of integer values (the keys are not important).
# The map function takes an integer i and produces the list of pairs (p,i) such that p is a prime divisor of i. For example, map(12) = [(2,12), (3,12)].
# The reduce function is addition. That is, reduce(p, [i1, i2, ...,ik]) is (p,i1+i2+...+ik).
#
# Compute the output, if the input is the set of integers 15, 21, 24, 30, 49. Then, identify, in the list below, one of the pairs in the output.
library(gmp)
nums <- c(15, 21, 24, 30, 49)
s <- rep(0,10)
for(n in nums)
{
# cat("\nn=",n)
a <- unique(as.numeric(factorize(n)))
for(i in a)
{
cat("\n(",i,",",n,")")
s[i] <- s[i] + n
}
}
# ( 3 , 15 )
# ( 5 , 15 )
# ( 3 , 21 )
# ( 7 , 21 )
# ( 2 , 24 )
# ( 3 , 24 )
# ( 2 , 30 )
# ( 3 , 30 )
# ( 5 , 30 )
# ( 7 , 49 )
s
# [1] 0 54 90 0 45 0 70 0 0 0
# From this vector it's clear that the sum of the factor 7 is 70
|
#' Draw_names
#'
#' @param x list of the names you want to draw
#' @param the_sex
#'
#' @import ggplot2
#' @import dplyr
#' @import prenoms
#'
#' @return a graph
#' @export
#'
#' @examples draw_names(c("Louis","Paul"))
draw_names <- function(x) {
data_name <- prenoms
data_name <- data_name %>% filter(name %in% x) %>% group_by(year,name) %>% summarize(total=sum(n))
ggplot(data = data_name,
aes(x=data_name$year, y=data_name$total, color = data_name$name)
) +
geom_line()
}
|
/R/draw_names.R
|
no_license
|
iordanoffff/homework3
|
R
| false | false | 496 |
r
|
#' Draw_names
#'
#' @param x list of the names you want to draw
#' @param the_sex
#'
#' @import ggplot2
#' @import dplyr
#' @import prenoms
#'
#' @return a graph
#' @export
#'
#' @examples draw_names(c("Louis","Paul"))
draw_names <- function(x) {
data_name <- prenoms
data_name <- data_name %>% filter(name %in% x) %>% group_by(year,name) %>% summarize(total=sum(n))
ggplot(data = data_name,
aes(x=data_name$year, y=data_name$total, color = data_name$name)
) +
geom_line()
}
|
\name{latFormat}
\alias{latFormat}
\title{Format a latitude}
\description{Format a latitude, using "S" for negative latitude.}
\usage{latFormat(lat, digits=max(6, getOption("digits") - 1))}
\arguments{
\item{lat}{latitude in \eqn{^\circ}{deg}N north of the equator.}
\item{digits}{the number of significant digits to use when printing.}
}
\value{A character string.}
\seealso{\code{\link{lonFormat}} and \code{\link{latlonFormat}}.}
\author{Dan Kelley}
\keyword{misc}
|
/man/latFormat.Rd
|
no_license
|
marie-geissler/oce
|
R
| false | false | 481 |
rd
|
\name{latFormat}
\alias{latFormat}
\title{Format a latitude}
\description{Format a latitude, using "S" for negative latitude.}
\usage{latFormat(lat, digits=max(6, getOption("digits") - 1))}
\arguments{
\item{lat}{latitude in \eqn{^\circ}{deg}N north of the equator.}
\item{digits}{the number of significant digits to use when printing.}
}
\value{A character string.}
\seealso{\code{\link{lonFormat}} and \code{\link{latlonFormat}}.}
\author{Dan Kelley}
\keyword{misc}
|
#####Escrita por nosotros
# makeCacheMatrix creates a list containing a function to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
# a. set the value of the matrix
# b. get the value of the matrix
inversa <- NULL
set <- function(y) {
x <<- y
inversa <<- NULL
}
get <- function() x
# c. set the value of inverse of the matrix
# d. get the value of inverse of the matrix
setinversa <- function(inversaT) inversa <<- inversaT
getinversa <- function() inversa
list(set=set, get=get, setinversa=setinversa, getinversa=getinversa)
}
# The following function returns the inverse of the matrix. It first checks if
# the inverse has already been computed. If so, it gets the result and skips the
# computation. If not, it computes the inverse, sets the value in the cache via
# setinverse function.
# This function assumes that the matrix is always invertible.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inversa <- x$getinversa()
if(!is.null(inversa)) {
message("...obteniendo datos de cache.")
return(inversa)
}
datos <- x$get()
inversa <- solve(datos)
x$setinversa(inversa)
inversa
}
|
/cachematrix.R
|
no_license
|
xiopepa/ProgrammingAssignment2
|
R
| false | false | 1,526 |
r
|
#####Escrita por nosotros
# makeCacheMatrix creates a list containing a function to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
# a. set the value of the matrix
# b. get the value of the matrix
inversa <- NULL
set <- function(y) {
x <<- y
inversa <<- NULL
}
get <- function() x
# c. set the value of inverse of the matrix
# d. get the value of inverse of the matrix
setinversa <- function(inversaT) inversa <<- inversaT
getinversa <- function() inversa
list(set=set, get=get, setinversa=setinversa, getinversa=getinversa)
}
# The following function returns the inverse of the matrix. It first checks if
# the inverse has already been computed. If so, it gets the result and skips the
# computation. If not, it computes the inverse, sets the value in the cache via
# setinverse function.
# This function assumes that the matrix is always invertible.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inversa <- x$getinversa()
if(!is.null(inversa)) {
message("...obteniendo datos de cache.")
return(inversa)
}
datos <- x$get()
inversa <- solve(datos)
x$setinversa(inversa)
inversa
}
|
\name{ram}
\alias{ram}
\title{RAM Matrix for a Structural-Equation Model}
\description{
Print the labelled RAM definition matrix for a structural-equation
model fit by \code{sem}.
}
\usage{
ram(object, digits=getOption("digits"), startvalues=FALSE)
}
\arguments{
\item{object}{an object of class \code{sem} returned by the \code{sem} function.}
\item{digits}{number of digits for printed output.}
\item{startvalues}{if \code{TRUE}, start values for parameters are printed;
otherwise, the parameter estimates are printed; the default is \code{FALSE}.}
}
\value{
A data frame containing the labelled RAM definition matrix, which is normally
just printed.
}
\author{John Fox \email{jfox@mcmaster.ca}}
\seealso{\code{\link{sem}}}
\examples{
# ------------- assumes that Duncan, Haller and Portes peer-influences model
# ------------- has been fit and is in sem.dhp
\dontrun{
ram(sem.dhp)
}
}
\keyword{models}
|
/man/ram.Rd
|
no_license
|
cran/sem
|
R
| false | false | 985 |
rd
|
\name{ram}
\alias{ram}
\title{RAM Matrix for a Structural-Equation Model}
\description{
Print the labelled RAM definition matrix for a structural-equation
model fit by \code{sem}.
}
\usage{
ram(object, digits=getOption("digits"), startvalues=FALSE)
}
\arguments{
\item{object}{an object of class \code{sem} returned by the \code{sem} function.}
\item{digits}{number of digits for printed output.}
\item{startvalues}{if \code{TRUE}, start values for parameters are printed;
otherwise, the parameter estimates are printed; the default is \code{FALSE}.}
}
\value{
A data frame containing the labelled RAM definition matrix, which is normally
just printed.
}
\author{John Fox \email{jfox@mcmaster.ca}}
\seealso{\code{\link{sem}}}
\examples{
# ------------- assumes that Duncan, Haller and Portes peer-influences model
# ------------- has been fit and is in sem.dhp
\dontrun{
ram(sem.dhp)
}
}
\keyword{models}
|
#!/usr/bin/env Rscript
# coding=utf-8
# ==============================================================================
# description : processing pipeline for qgraph from Twitter matrix
# date : 2021-01-19
# version : 5 (Guillaume Dumas)
# ==============================================================================
rm(list=ls())
input_file1 <- "../data/twitter_tdm.Rdata"
input_file2 <- "../data/twitter_tdm_group.Rdata"
output_file1 <- "../fig/fig3a.pdf"
output_file2 <- "../data/twitter_qgraph_50.Rdata"
library(tidyverse)
library(qgraph)
library(NbClust)
library(dplyr)
library(tidyr)
library(ggplot2)
# load the pubmed tidy object and the terms order by group after topic modeling
load(input_file1)
load(input_file2)
# definition of the reduced matrix with 50 nodes
nNode <- 50
freq_word <- dplyr::top_n(dplyr::count(tidy_twitter2, word), nNode, n)
matrix_reduite <- matrix_twitter[, freq_word$word]
cor_matrix_reduite <- cor(matrix_reduite)
# choose the best numbers of clusters
rest<-NbClust(cor_matrix_reduite, distance = "euclidean", min.nc=2, max.nc=8,
method = "complete", index = "ch")
rest$All.index
rest$Best.nc
rest$Best.partition
# determine the number of groups
group1_reduit <- which(colnames(cor_matrix_reduite) %in% group[[1]])
group2_reduit <- which(colnames(cor_matrix_reduite) %in% group[[2]])
group3_reduit <- which(colnames(cor_matrix_reduite) %in% group[[3]])
group4_reduit <- which(colnames(cor_matrix_reduite) %in% group[[4]])
group_matrix_reduite <- list(group1_reduit,group2_reduit,group3_reduit,group4_reduit)
# determine the names of the clusters
types_names <- str_to_upper(c("A.Integration and Social Support",
"B.Understanding and Mental Health",
"C.Child Welfare",
"D.Daily Challenges and Difficulties"))
names(group_matrix_reduite) <- types_names
# create a qgraph object
pdf(file = output_file1)
Q <- qgraph(cor_matrix_reduite, layout = "groups", posCol = "black", negCol = "NA",
nodeNames = colnames(cor_matrix_reduite), legend.cex = 0.2,
groups = group_matrix_reduite,
label.scale = TRUE,
label.norm = "OOOOOOOOOO",
curveAll = TRUE,
vsize = 3,
label.cex = 3,
label.prop = 1,
shape = "circle",
labels = colnames(cor_matrix_reduite),
minimum = 0.08,
repulsion = 1.,
legend.mode = "groups",
nodeNames = TRUE,
sampleSize = nrow(cor_matrix_reduite),
alpha = 0.05, #inflation du risque alpha par les tests multiples via la méthode de Bonferroni.
palette = "pastel",
cut = 0.3)
dev.off()
# save
save(Q, file = output_file2)
rm(list=ls())
input_file1 <- "../data/twitter_tdm.Rdata"
input_file2 <- "../data/twitter_tdm_group.Rdata"
output_file1 <- "../fig/twitter_graph_500.pdf"
output_file2 <- "../data/twitter_qgraph_500.Rdata"
# load the pubmed tidy object and the terms order by group after topic modeling
load(input_file1)
load(input_file2)
# definition of the reduced matrix with 500 nodes
nNode <- 500
freq_word <- dplyr::top_n(dplyr::count(tidy_twitter2, word), nNode, n)
matrix_reduite <- matrix_twitter[, freq_word$word]
cor_matrix_reduite <- cor(matrix_reduite)
# determine the number of groups
group1_reduit <- which(colnames(cor_matrix_reduite) %in% group[[1]])
group2_reduit <- which(colnames(cor_matrix_reduite) %in% group[[2]])
group3_reduit <- which(colnames(cor_matrix_reduite) %in% group[[3]])
group4_reduit <- which(colnames(cor_matrix_reduite) %in% group[[4]])
group_matrix_reduite <- list(group1_reduit,group2_reduit,group3_reduit,group4_reduit)
# create a qgraph object
pdf(file = output_file1, width=50, height=50)
Q <- qgraph(cor_matrix_reduite, layout = "spring", posCol = "black", negCol = "NA",
nodeNames = colnames(cor_matrix_reduite), legend.cex = 0.2,
groups = group_matrix_reduite,
vsize = 1,
curveAll = FALSE,
esize = 1.,
label.cex = 1,
label.scale = TRUE,
label.fill.horizontal = 0.6,
labels = colnames(cor_matrix_reduite),
borders = FALSE,
minimum = 0.15, # 0.15 quand 500 noeuds
repulsion = 1, # Augmenter la distance entre les noeuds pour améliorer la visualisation en "cluster"
legend.mode = "groups",
nodeNames= TRUE,
sampleSize = nrow(cor_matrix_reduite),
alpha = 0.05, # Pour ne prendre en compte que les corrélations statistiquement significatifs, en tenant compte de l'inflation du risque alpha par les tests multiples via la méthode de Bonferroni.
palette = "pastel",
cut = 0.3)
dev.off()
# save
save(Q, file = output_file2)
|
/src/run4_twitter_qgraph.R
|
permissive
|
ChristopheGauld/TwiMed
|
R
| false | false | 4,892 |
r
|
#!/usr/bin/env Rscript
# coding=utf-8
# ==============================================================================
# description : processing pipeline for qgraph from Twitter matrix
# date : 2021-01-19
# version : 5 (Guillaume Dumas)
# ==============================================================================
rm(list=ls())
input_file1 <- "../data/twitter_tdm.Rdata"
input_file2 <- "../data/twitter_tdm_group.Rdata"
output_file1 <- "../fig/fig3a.pdf"
output_file2 <- "../data/twitter_qgraph_50.Rdata"
library(tidyverse)
library(qgraph)
library(NbClust)
library(dplyr)
library(tidyr)
library(ggplot2)
# load the pubmed tidy object and the terms order by group after topic modeling
load(input_file1)
load(input_file2)
# definition of the reduced matrix with 50 nodes
nNode <- 50
freq_word <- dplyr::top_n(dplyr::count(tidy_twitter2, word), nNode, n)
matrix_reduite <- matrix_twitter[, freq_word$word]
cor_matrix_reduite <- cor(matrix_reduite)
# choose the best numbers of clusters
rest<-NbClust(cor_matrix_reduite, distance = "euclidean", min.nc=2, max.nc=8,
method = "complete", index = "ch")
rest$All.index
rest$Best.nc
rest$Best.partition
# determine the number of groups
group1_reduit <- which(colnames(cor_matrix_reduite) %in% group[[1]])
group2_reduit <- which(colnames(cor_matrix_reduite) %in% group[[2]])
group3_reduit <- which(colnames(cor_matrix_reduite) %in% group[[3]])
group4_reduit <- which(colnames(cor_matrix_reduite) %in% group[[4]])
group_matrix_reduite <- list(group1_reduit,group2_reduit,group3_reduit,group4_reduit)
# determine the names of the clusters
types_names <- str_to_upper(c("A.Integration and Social Support",
"B.Understanding and Mental Health",
"C.Child Welfare",
"D.Daily Challenges and Difficulties"))
names(group_matrix_reduite) <- types_names
# create a qgraph object
pdf(file = output_file1)
Q <- qgraph(cor_matrix_reduite, layout = "groups", posCol = "black", negCol = "NA",
nodeNames = colnames(cor_matrix_reduite), legend.cex = 0.2,
groups = group_matrix_reduite,
label.scale = TRUE,
label.norm = "OOOOOOOOOO",
curveAll = TRUE,
vsize = 3,
label.cex = 3,
label.prop = 1,
shape = "circle",
labels = colnames(cor_matrix_reduite),
minimum = 0.08,
repulsion = 1.,
legend.mode = "groups",
nodeNames = TRUE,
sampleSize = nrow(cor_matrix_reduite),
alpha = 0.05, #inflation du risque alpha par les tests multiples via la méthode de Bonferroni.
palette = "pastel",
cut = 0.3)
dev.off()
# save
save(Q, file = output_file2)
rm(list=ls())
input_file1 <- "../data/twitter_tdm.Rdata"
input_file2 <- "../data/twitter_tdm_group.Rdata"
output_file1 <- "../fig/twitter_graph_500.pdf"
output_file2 <- "../data/twitter_qgraph_500.Rdata"
# load the pubmed tidy object and the terms order by group after topic modeling
load(input_file1)
load(input_file2)
# definition of the reduced matrix with 500 nodes
nNode <- 500
freq_word <- dplyr::top_n(dplyr::count(tidy_twitter2, word), nNode, n)
matrix_reduite <- matrix_twitter[, freq_word$word]
cor_matrix_reduite <- cor(matrix_reduite)
# determine the number of groups
group1_reduit <- which(colnames(cor_matrix_reduite) %in% group[[1]])
group2_reduit <- which(colnames(cor_matrix_reduite) %in% group[[2]])
group3_reduit <- which(colnames(cor_matrix_reduite) %in% group[[3]])
group4_reduit <- which(colnames(cor_matrix_reduite) %in% group[[4]])
group_matrix_reduite <- list(group1_reduit,group2_reduit,group3_reduit,group4_reduit)
# create a qgraph object
pdf(file = output_file1, width=50, height=50)
Q <- qgraph(cor_matrix_reduite, layout = "spring", posCol = "black", negCol = "NA",
nodeNames = colnames(cor_matrix_reduite), legend.cex = 0.2,
groups = group_matrix_reduite,
vsize = 1,
curveAll = FALSE,
esize = 1.,
label.cex = 1,
label.scale = TRUE,
label.fill.horizontal = 0.6,
labels = colnames(cor_matrix_reduite),
borders = FALSE,
minimum = 0.15, # 0.15 quand 500 noeuds
repulsion = 1, # Augmenter la distance entre les noeuds pour améliorer la visualisation en "cluster"
legend.mode = "groups",
nodeNames= TRUE,
sampleSize = nrow(cor_matrix_reduite),
alpha = 0.05, # Pour ne prendre en compte que les corrélations statistiquement significatifs, en tenant compte de l'inflation du risque alpha par les tests multiples via la méthode de Bonferroni.
palette = "pastel",
cut = 0.3)
dev.off()
# save
save(Q, file = output_file2)
|
L2_distance <- function(a, b, df = 0){
# a and b are k*n matrices, representing n k-dimensional points
# D(i,j) is the L2 distance between a(i) and b(j)
k = dim(a)[1]
n = dim(a)[2]
aa = colSums(a^2)
bb = colSums(b^2)
ab = t(a) %*% b
D = matrix(aa, n, n) + matrix(bb, n, n, byrow = TRUE) - 2 * ab
D = sqrt(D)
return(D)
# example
# a = b = [1,2,3]
# D = [0 1 4
# 1 0 1
# 4 1 0]
}
|
/R/L2_distance.R
|
no_license
|
emanuel996/maniwarp
|
R
| false | false | 429 |
r
|
L2_distance <- function(a, b, df = 0){
# a and b are k*n matrices, representing n k-dimensional points
# D(i,j) is the L2 distance between a(i) and b(j)
k = dim(a)[1]
n = dim(a)[2]
aa = colSums(a^2)
bb = colSums(b^2)
ab = t(a) %*% b
D = matrix(aa, n, n) + matrix(bb, n, n, byrow = TRUE) - 2 * ab
D = sqrt(D)
return(D)
# example
# a = b = [1,2,3]
# D = [0 1 4
# 1 0 1
# 4 1 0]
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adjBy2ptReg.R
\name{.datSlope}
\alias{.datSlope}
\title{Model linear regression and optional plot}
\usage{
.datSlope(
dat,
typeOfPlot = "sort",
toNinX = FALSE,
plotData = FALSE,
silent = FALSE,
debug = FALSE,
callFrom = NULL
)
}
\arguments{
\item{dat}{(vector or matrix) main input}
\item{typeOfPlot}{(character)}
\item{toNinX}{(logical)}
\item{plotData}{(logical)}
\item{silent}{(logical) suppress messages}
\item{debug}{(logical) display additional messages for debugging}
\item{callFrom}{(character) allow easier tracking of messages produced}
}
\value{
numeric vector with intercept and slope, optional plot
}
\description{
This function allows to model a linear regression and optionally to plot the results
}
\examples{
.datSlope(c(3:6))
}
\seealso{
\code{\link[base]{append}}; \code{\link{lrbind}}
}
|
/man/dot-datSlope.Rd
|
no_license
|
cran/wrMisc
|
R
| false | true | 905 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adjBy2ptReg.R
\name{.datSlope}
\alias{.datSlope}
\title{Model linear regression and optional plot}
\usage{
.datSlope(
dat,
typeOfPlot = "sort",
toNinX = FALSE,
plotData = FALSE,
silent = FALSE,
debug = FALSE,
callFrom = NULL
)
}
\arguments{
\item{dat}{(vector or matrix) main input}
\item{typeOfPlot}{(character)}
\item{toNinX}{(logical)}
\item{plotData}{(logical)}
\item{silent}{(logical) suppress messages}
\item{debug}{(logical) display additional messages for debugging}
\item{callFrom}{(character) allow easier tracking of messages produced}
}
\value{
numeric vector with intercept and slope, optional plot
}
\description{
This function allows to model a linear regression and optionally to plot the results
}
\examples{
.datSlope(c(3:6))
}
\seealso{
\code{\link[base]{append}}; \code{\link{lrbind}}
}
|
## Programming in R
## Week 4
##
# Given a condition and a rank, this function returns a data frame whose
# rows are the state and hospital of the rank (compared to others in that state)
# This version uses dplyr instead -- the better choice
library(R.utils)
library(dplyr)
library(plyr)
rankall2 <- function(condition, rank = "best"){
# ------ Load data -----
setwd("~/github/datasciencecoursera/assignments/hospital")
dataSet <- "outcome-of-care-measures.csv"
outcome <- read.csv(dataSet)
outcome <- tbl_df(outcome)
# ------ Outcome/condition constants ------
possCondition <- c("heart attack", "heart failure", "pneumonia")
colName <- "Hospital.30.Day.Death..Mortality..Rates.from."
# ----- Error Handles for bad user input -----
if(!any(condition == possCondition)){
stop("invalid outcome")
}
# ----- Query data and output hospital name -----
# Format end part of column name ("heart attack" --> "Heart.Attack")
end <- strsplit(condition, " ")[[1]]
end <- paste(capitalize(end), collapse=".")
colName <- paste(colName, end, sep="")
# Select and rename necessary columns
tbl_out <- select(outcome, hospital = Hospital.Name, state = State,
rate = starts_with(colName))
# Remove rows without ratings
tbl_out <- filter(tbl_out, rate != "Not Available")
# Sort by state, then rating, then hospital name
tbl_out <- arrange(tbl_out, state, rate, hospital)
# Get hospital of specified rank from each state
res <- if(rank == "best"){
ddply(tbl_out, .(state), function(x){ x[1, ]})
}else if(rank == "worst"){
ddply(tbl_out, .(state), function(x){ x[nrow(x), ]})
}else{
ddply(tbl_out, .(state), function(x){ x[rank, ]})
}
# Prepare Final Form
states <- select(tbl_out, state) %>% distinct(state)
final <- cbind(select(res, hospital), states)
rownames(final) <- final$state
final
}
|
/assignments/hospital/rankall2.R
|
no_license
|
FigNewtons/datasciencecoursera
|
R
| false | false | 2,036 |
r
|
## Programming in R
## Week 4
##
# Given a condition and a rank, this function returns a data frame whose
# rows are the state and hospital of the rank (compared to others in that state)
# This version uses dplyr instead -- the better choice
library(R.utils)
library(dplyr)
library(plyr)
rankall2 <- function(condition, rank = "best"){
# ------ Load data -----
setwd("~/github/datasciencecoursera/assignments/hospital")
dataSet <- "outcome-of-care-measures.csv"
outcome <- read.csv(dataSet)
outcome <- tbl_df(outcome)
# ------ Outcome/condition constants ------
possCondition <- c("heart attack", "heart failure", "pneumonia")
colName <- "Hospital.30.Day.Death..Mortality..Rates.from."
# ----- Error Handles for bad user input -----
if(!any(condition == possCondition)){
stop("invalid outcome")
}
# ----- Query data and output hospital name -----
# Format end part of column name ("heart attack" --> "Heart.Attack")
end <- strsplit(condition, " ")[[1]]
end <- paste(capitalize(end), collapse=".")
colName <- paste(colName, end, sep="")
# Select and rename necessary columns
tbl_out <- select(outcome, hospital = Hospital.Name, state = State,
rate = starts_with(colName))
# Remove rows without ratings
tbl_out <- filter(tbl_out, rate != "Not Available")
# Sort by state, then rating, then hospital name
tbl_out <- arrange(tbl_out, state, rate, hospital)
# Get hospital of specified rank from each state
res <- if(rank == "best"){
ddply(tbl_out, .(state), function(x){ x[1, ]})
}else if(rank == "worst"){
ddply(tbl_out, .(state), function(x){ x[nrow(x), ]})
}else{
ddply(tbl_out, .(state), function(x){ x[rank, ]})
}
# Prepare Final Form
states <- select(tbl_out, state) %>% distinct(state)
final <- cbind(select(res, hospital), states)
rownames(final) <- final$state
final
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/selector-specificity.R
\name{selector_specificity}
\alias{selector_specificity}
\title{Calculate selector specificity}
\usage{
selector_specificity(sel)
}
\arguments{
\item{sel}{selector}
}
\value{
numeric value
}
\description{
Algorithm defined here: \url{https://drafts.csswg.org/selectors-3/#specificity}
}
\details{
A selector's specificity is calculated as follows:
\itemize{
\item{count the number of ID selectors in the selector (= a)}
\item{count the number of class selectors, attributes selectors, and
pseudo-classes in the selector (= b)}
\item{count the number of type selectors and pseudo-elements in the selector (= c)}
\item{ignore the universal selector}
}
Selectors inside the negation pseudo-class are counted like any other,
but the negation itself does not count as a pseudo-class.
Concatenating the three numbers a-b-c (in a number system with a large base)
gives the specificity.
Since we only really have base-10 numbers (rather than numbers in
arbitrarily large bases), just use 2 digits for each of the counts. Hopefully it
is really unlikely that any of the counts exceeds 99!
}
|
/man/selector_specificity.Rd
|
permissive
|
coolbutuseless/cssparser
|
R
| false | true | 1,204 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/selector-specificity.R
\name{selector_specificity}
\alias{selector_specificity}
\title{Calculate selector specificity}
\usage{
selector_specificity(sel)
}
\arguments{
\item{sel}{selector}
}
\value{
numeric value
}
\description{
Algorithm defined here: \url{https://drafts.csswg.org/selectors-3/#specificity}
}
\details{
A selector's specificity is calculated as follows:
\itemize{
\item{count the number of ID selectors in the selector (= a)}
\item{count the number of class selectors, attributes selectors, and
pseudo-classes in the selector (= b)}
\item{count the number of type selectors and pseudo-elements in the selector (= c)}
\item{ignore the universal selector}
}
Selectors inside the negation pseudo-class are counted like any other,
but the negation itself does not count as a pseudo-class.
Concatenating the three numbers a-b-c (in a number system with a large base)
gives the specificity.
Since we only really have base-10 numbers (rather than numbers in
arbitrarily large bases), just use 2 digits for each of the counts. Hopefully it
is really unlikely that any of the counts exceeds 99!
}
|
#' plot of drainage network during creation process
#' @keywords internal
plot_drainage_network <- function(coords, outfalls, conduits_sf, main, col_arrow){
# initiate plot
graphics::plot(coords$X, coords$Y, xlab = "X", ylab = "Y", main = main)
coords_O <- as.data.frame(sf::st_coordinates(outfalls))
coords_O$Name <- outfalls$Name
graphics::points(coords_O$X, coords_O$Y, col = "green", pch = 19)
for(i in 1:nrow(conduits_sf)){
p0 <- conduits_sf$FromNode[i]
p1 <- conduits_sf$ToNode[i]
# plot arrow:
if(all(list(p0,p1) %in% coords$Name)){
graphics::arrows(x0 = coords$X[coords$Name == p0], y0 = coords$Y[coords$Name == p0],
x1 = coords$X[coords$Name == p1], y1 = coords$Y[coords$Name == p1],
col = col_arrow, length = 0.1)
}
if(all(list(p0,p1) %in% coords_O$Name)){
graphics::arrows(x0 = coords_O$X[coords_O$Name == p0], y0 = coords_O$Y[coords_O$Name == p0],
x1 = coords_O$X[coords_O$Name == p1], y1 = coords_O$Y[coords_O$Name == p1],
col = col_arrow, length = 0.1)
}
if(p0 %in% coords$Name & p1 %in% coords_O$Name){
graphics::arrows(x0 = coords$X[coords$Name == p0], y0 = coords$Y[coords$Name == p0],
x1 = coords_O$X[coords_O$Name == p1], y1 = coords_O$Y[coords_O$Name == p1],
col = col_arrow, length = 0.1)
}
if(p0 %in% coords_O$Name & p1 %in% coords$Name){
graphics::arrows(x0 = coords_O$X[coords_O$Name == p0], y0 = coords_O$Y[coords_O$Name == p0],
x1 = coords$X[coords$Name == p1], y1 = coords$Y[coords$Name == p1],
col = col_arrow, length = 0.1)
}
}
}
#' plot tagged junctions
#' @keywords internal
plot_tagged_junctions <- function(junctions, outfalls, conduits_sf, main){
coords <- as.data.frame(sf::st_coordinates(junctions))
coords$Name <- junctions$Name
coords$tag <- junctions$tag
dic <- data.frame(tag = c("hill", "sink", "outfall_artificial", "loop", "normal", "start", "crossing"),
pch = c(17,15,19,19,1,19,19), col = c("red", "red", "red", "lightblue", "black", "black", "blue"),
cex = c(1.3,1.3,1.3,1,1,1,1),
stringsAsFactors = F)
graphics::par(mar=c(5.1, 4.1, 4.1, 9), xpd=TRUE)
plot_drainage_network(coords, outfalls, conduits_sf, main = main, col_arrow = "black")
for(tag in unique(coords$tag)){
graphics::points(coords$X[coords$tag == tag], coords$Y[coords$tag == tag], pch = dic$pch[dic$tag == tag],
col = dic$col[dic$tag == tag], cex = dic$cex[dic$tag == tag])
}
graphics::legend("topright",inset = c(-0.4,0), pch = c(19, dic$pch), col = c("green", dic$col), c("outfalls", dic$tag))
}
#' plot catchment of outfalls
#' @keywords internal
plot_path_to_outfall <- function(junctions_u, outfalls, oid, conduits_u, main, col_arrow){
# preparation default plot:
coords <- as.data.frame(sf::st_coordinates(junctions_u))
coords$Name <- junctions_u$Name
# default plot:
plot_drainage_network(coords, outfalls, conduits_u, main = main, col_arrow = "black")
# colour selected outfall:
coords_O <- as.data.frame(sf::st_coordinates(outfalls[oid,]))
coords_O$Name <- outfalls$Name[oid]
graphics::points(coords_O$X, coords_O$Y, col = "red", pch = 19)
# paths to outfall
list_O <- track_outfall_to_start_connectivity(outfalls = outfalls[oid,], junctions = junctions_u, conduits_sf = conduits_u)
# add coloured arrows to show catchment of a defined outfall
for(t in 1:length(list_O)){
for(j in 1:length(list_O[[t]])){
to_O <- c(outfalls$Name[oid], names(list_O[[t]][[j]]))
for(i in 1:(length(to_O)-1)){
p0 <- to_O[i+1]
p1 <- to_O[i]
# plot arrows:
if(all(list(p0,p1) %in% coords$Name)){
graphics::arrows(x0 = coords$X[coords$Name == p0], y0 = coords$Y[coords$Name == p0],
x1 = coords$X[coords$Name == p1], y1 = coords$Y[coords$Name == p1],
col = col_arrow, length = 0.1)
}
if(all(list(p0,p1) %in% coords_O$Name)){
graphics::arrows(x0 = coords_O$X[coords_O$Name == p0], y0 = coords_O$Y[coords_O$Name == p0],
x1 = coords_O$X[coords_O$Name == p1], y1 = coords_O$Y[coords_O$Name == p1],
col = col_arrow, length = 0.1)
}
if(p0 %in% coords$Name & p1 %in% coords_O$Name){
graphics::arrows(x0 = coords$X[coords$Name == p0], y0 = coords$Y[coords$Name == p0],
x1 = coords_O$X[coords_O$Name == p1], y1 = coords_O$Y[coords_O$Name == p1],
col = col_arrow, length = 0.1)
}
if(p0 %in% coords_O$Name & p1 %in% coords$Name){
graphics::arrows(x0 = coords_O$X[coords_O$Name == p0], y0 = coords_O$Y[coords_O$Name == p0],
x1 = coords$X[coords$Name == p1], y1 = coords$Y[coords$Name == p1],
col = col_arrow, length = 0.1)
}
}
}
}
}
#' cross section plot when adjusting pipe slopes and junction depths
#' @keywords internal
plot_cross_section_junctions <- function(junctions, conduits, connections){
cross_section_data <- data.frame(Name = connections, Length = 0, Offset = 0, stringsAsFactors = F)
for(i in 1:length(connections)){
name <- connections[i]
cross_section_data$Top[i] <- junctions$Top[junctions$Name == name]
cross_section_data$Bottom[i] <- junctions$Bottom[junctions$Name == name]
cross_section_data$max_bottom[i] <- junctions$max_bottom[junctions$Name == name]
cross_section_data$min_bottom[i] <- junctions$min_bottom[junctions$Name == name]
}
for(i in 1:(length(connections)-1)){
length_part <- conduits$Length[conduits$FromNode == connections[i+1] & conduits$ToNode == connections[i]]
cross_section_data$Length[i+1] <- cross_section_data$Length[i] + length_part
cross_section_data$Offset[i] <- conduits$OutOffset[conduits$FromNode == connections[i+1] & conduits$ToNode == connections[i]]
}
cross_section_data$Bottom_pipe <- cross_section_data$Bottom + cross_section_data$Offset
graphics::plot(cross_section_data$Length, cross_section_data$Bottom_pipe,
ylim = c(min(cross_section_data$Bottom)-5, max(cross_section_data$Top)+1),
xlab = "Length [m]",
ylab = "Height [m a.s.l.]",
type = "b", pch = 19)
graphics::text(cross_section_data$Length, cross_section_data$Top + 0.3, cross_section_data$Name, cex = 0.5, col = "grey")
graphics::rect(xleft = cross_section_data$Length - 1, ybottom = cross_section_data$Bottom,
xright = cross_section_data$Length + 1, ytop = cross_section_data$Top,
col = "lightgrey", border = NA)
graphics::points(cross_section_data$Length, cross_section_data$Top, type = "b", pch = 19)
graphics::points(cross_section_data$Length, cross_section_data$Bottom, pch = 19, col = "grey")
graphics::points(cross_section_data$Length, cross_section_data$Bottom_pipe, type = "b", pch = 19)
graphics::points(cross_section_data$Length, cross_section_data$max_bottom, col = "red", pch = 4)
graphics::points(cross_section_data$Length, cross_section_data$min_bottom, col = "red", pch = 4)
graphics::legend("bottomright", c("top and bottom ", "possible range of bottom", "offset"), pch = c(19, 4, 19), col = c("black", "red", "grey"))
}
#' plot slopes
#' @keywords internal
plot_slopes <- function(junctions, outfalls, conduits_sf, min_slope, max_slope, main, roundValue = 3){
conduits_sf$Slope <- round(conduits_sf$Slope, roundValue)
conduits_sf$col_arrow <- "black"
conduits_sf$col_arrow[conduits_sf$Slope < min_slope] <- "red"
conduits_sf$col_arrow[conduits_sf$Slope > max_slope] <- "purple"
# coordinates from juncions:
coords <- as.data.frame(sf::st_coordinates(junctions))
coords$Name <- junctions$Name
# initiate plot:
graphics::par(mar=c(5.1, 4.1, 4.1, 9), xpd=TRUE)
graphics::plot(coords$X, coords$Y, xlab = "X", ylab = "Y", main = main)
coords_O <- as.data.frame(sf::st_coordinates(outfalls))
coords_O$Name <- outfalls$Name
graphics::points(coords_O$X, coords_O$Y, col = "black", pch = 19)
for(i in 1:nrow(conduits_sf)){
p0 <- conduits_sf$FromNode[i]
p1 <- conduits_sf$ToNode[i]
# plot arrow:
if(all(list(p0,p1) %in% coords$Name)){
graphics::arrows(x0 = coords$X[coords$Name == p0], y0 = coords$Y[coords$Name == p0],
x1 = coords$X[coords$Name == p1], y1 = coords$Y[coords$Name == p1],
col = conduits_sf$col_arrow[i], length = 0.1)
}
if(all(list(p0,p1) %in% coords_O$Name)){
graphics::arrows(x0 = coords_O$X[coords_O$Name == p0], y0 = coords_O$Y[coords_O$Name == p0],
x1 = coords_O$X[coords_O$Name == p1], y1 = coords_O$Y[coords_O$Name == p1],
col = conduits_sf$col_arrow[i], length = 0.1)
}
if(p0 %in% coords$Name & p1 %in% coords_O$Name){
graphics::arrows(x0 = coords$X[coords$Name == p0], y0 = coords$Y[coords$Name == p0],
x1 = coords_O$X[coords_O$Name == p1], y1 = coords_O$Y[coords_O$Name == p1],
col = conduits_sf$col_arrow[i], length = 0.1)
}
if(p0 %in% coords_O$Name & p1 %in% coords$Name){
graphics::arrows(x0 = coords_O$X[coords_O$Name == p0], y0 = coords_O$Y[coords_O$Name == p0],
x1 = coords$X[coords$Name == p1], y1 = coords$Y[coords$Name == p1],
col = conduits_sf$col_arrow[i], length = 0.1)
}
}
# add plot legend:
graphics::legend("topright", inset = c(-0.4,0), legend = c("slope < min_slope", "slope > max_slope"), col = c("red", "purple"), lwd = 1)
}
#' plot conduits that are not connected to an outfall
#' @keywords internal
plot_disconnected_conduits <- function(junctions, conduits, outfalls, col_arrow, main = "conduits disconnected from outfalls"){
# preparation default plot:
coords <- as.data.frame(sf::st_coordinates(junctions))
coords$Name <- junctions$Name
# extract outfall coordinates:
coords_O <- as.data.frame(sf::st_coordinates(outfalls))
coords_O$Name <- outfalls$Name
# default plot:
plot_drainage_network(coords, outfalls, conduits, main = main, col_arrow = "black")
# paths to outfall
network_paths <- track_outfall_to_start_connectivity(outfalls, junctions, conduits)
all_conduits_connected <- NULL
for(j in 1:length(network_paths)){
for(i in 1:length(network_paths[[j]])){
names_junc <- names(network_paths[[j]][[i]])
conduits_connected <- NULL
for(n in 2:length(names_junc)){
to <- names_junc[n-1]
from <- names_junc[n]
c_name <- conduits$Name[conduits$FromNode == from & conduits$ToNode == to]
if(length(c_name) > 1){
message(paste("junctions are connected more than once:", paste(c_name, collapse = " ")))
c_name <- c_name[1]
}
conduits_connected[n-1] <- c_name
}
if(is.null(all_conduits_connected)){
all_conduits_connected <- conduits_connected
}else{
all_conduits_connected <- c(all_conduits_connected, conduits_connected)
}
}
}
conduits_disconnected <- conduits$Name[!(conduits$Name %in% unique(all_conduits_connected) | conduits$ToNode %in% outfalls$Name)]
if(length(conduits_disconnected)>0){
# add coloured arrows to mark disconnected conduits
for(i in 1:length(conduits_disconnected)){
p0 <- conduits$FromNode[conduits$Name == conduits_disconnected[i]]
p1 <- conduits$ToNode[conduits$Name == conduits_disconnected[i]]
# plot arrows:
if(all(list(p0,p1) %in% coords$Name)){
graphics::arrows(x0 = coords$X[coords$Name == p0], y0 = coords$Y[coords$Name == p0],
x1 = coords$X[coords$Name == p1], y1 = coords$Y[coords$Name == p1],
col = col_arrow, length = 0.1)
}
if(all(list(p0,p1) %in% coords_O$Name)){
graphics::arrows(x0 = coords_O$X[coords_O$Name == p0], y0 = coords_O$Y[coords_O$Name == p0],
x1 = coords_O$X[coords_O$Name == p1], y1 = coords_O$Y[coords_O$Name == p1],
col = col_arrow, length = 0.1)
}
if(p0 %in% coords$Name & p1 %in% coords_O$Name){
graphics::arrows(x0 = coords$X[coords$Name == p0], y0 = coords$Y[coords$Name == p0],
x1 = coords_O$X[coords_O$Name == p1], y1 = coords_O$Y[coords_O$Name == p1],
col = col_arrow, length = 0.1)
}
if(p0 %in% coords_O$Name & p1 %in% coords$Name){
graphics::arrows(x0 = coords_O$X[coords_O$Name == p0], y0 = coords_O$Y[coords_O$Name == p0],
x1 = coords$X[coords$Name == p1], y1 = coords$Y[coords$Name == p1],
col = col_arrow, length = 0.1)
}
}
}
}
#' plot short cuts
#' @keywords internal
plot_short_cuts <- function(junctions, conduits, outfalls, col_arrow){
# preparation default plot:
coords <- as.data.frame(sf::st_coordinates(junctions))
coords$Name <- junctions$Name
# extract outfall coordinates:
coords_O <- as.data.frame(sf::st_coordinates(outfalls))
coords_O$Name <- outfalls$Name
# default plot:
plot_drainage_network(coords, outfalls, conduits, main = "conduits added to drain sinks", col_arrow = "black")
# added short cuts:
added_conduits <- conduits$Name[grep("n", conduits$Name)]
if(length(added_conduits > 0)){
# add coloured arrows to mark disconnected conduits
for(i in 1:length(added_conduits)){
p0 <- conduits$FromNode[conduits$Name == added_conduits[i]]
p1 <- conduits$ToNode[conduits$Name == added_conduits[i]]
# plot arrows:
if(all(list(p0,p1) %in% coords$Name)){
graphics::arrows(x0 = coords$X[coords$Name == p0], y0 = coords$Y[coords$Name == p0],
x1 = coords$X[coords$Name == p1], y1 = coords$Y[coords$Name == p1],
col = col_arrow, length = 0.1)
}
if(all(list(p0,p1) %in% coords_O$Name)){
graphics::arrows(x0 = coords_O$X[coords_O$Name == p0], y0 = coords_O$Y[coords_O$Name == p0],
x1 = coords_O$X[coords_O$Name == p1], y1 = coords_O$Y[coords_O$Name == p1],
col = col_arrow, length = 0.1)
}
if(p0 %in% coords$Name & p1 %in% coords_O$Name){
graphics::arrows(x0 = coords$X[coords$Name == p0], y0 = coords$Y[coords$Name == p0],
x1 = coords_O$X[coords_O$Name == p1], y1 = coords_O$Y[coords_O$Name == p1],
col = col_arrow, length = 0.1)
}
if(p0 %in% coords_O$Name & p1 %in% coords$Name){
graphics::arrows(x0 = coords_O$X[coords_O$Name == p0], y0 = coords_O$Y[coords_O$Name == p0],
x1 = coords$X[coords$Name == p1], y1 = coords$Y[coords$Name == p1],
col = col_arrow, length = 0.1)
}
}
}
}
#' plot pipe diameters
#' @keywords internal
plot_drainage_network_pipe_diameters <- function(junctions, outfalls, conduits_sf, main, DN_m){
# scale arrow line thickness according to diameter:
dic_scaling <- data.frame(value = DN_m[order(DN_m, decreasing = T)], stringsAsFactors = F)
dic_scaling$col_arrow <- viridis::viridis(nrow(dic_scaling))
dic_scaling$lwd <- round(seq(10, 1, length.out = nrow(dic_scaling)),1) # maximum line width 10 ?
# initialize columns:
conduits_sf$lwd <- NA
conduits_sf$col_arrow <- NA
# add colors to conduits:
for(i in 1:nrow(conduits_sf)){
conduits_sf$lwd[i] <- dic_scaling$lwd[as.character(dic_scaling$value) == as.character(conduits_sf$Geom1[i])]
conduits_sf$col_arrow[i] <- dic_scaling$col_arrow[as.character(dic_scaling$value) == as.character(conduits_sf$Geom1[i])]
}
# coordinates from juncions:
coords <- as.data.frame(sf::st_coordinates(junctions))
coords$Name <- junctions$Name
# initiate plot:
graphics::par(mar=c(5.1, 4.1, 4.1, 8.1), xpd=TRUE)
graphics::plot(coords$X, coords$Y, xlab = "X", ylab = "Y", main = main)
coords_O <- as.data.frame(sf::st_coordinates(outfalls))
coords_O$Name <- outfalls$Name
graphics::points(coords_O$X, coords_O$Y, col = "black", pch = 19)
for(i in 1:nrow(conduits_sf)){
p0 <- conduits_sf$FromNode[i]
p1 <- conduits_sf$ToNode[i]
# plot arrow:
if(all(list(p0,p1) %in% coords$Name)){
graphics::arrows(x0 = coords$X[coords$Name == p0], y0 = coords$Y[coords$Name == p0],
x1 = coords$X[coords$Name == p1], y1 = coords$Y[coords$Name == p1],
col = conduits_sf$col_arrow[i], length = 0.1, lwd = conduits_sf$lwd[i])
}
if(all(list(p0,p1) %in% coords_O$Name)){
graphics::arrows(x0 = coords_O$X[coords_O$Name == p0], y0 = coords_O$Y[coords_O$Name == p0],
x1 = coords_O$X[coords_O$Name == p1], y1 = coords_O$Y[coords_O$Name == p1],
col = conduits_sf$col_arrow[i], length = 0.1, lwd = conduits_sf$lwd[i])
}
if(p0 %in% coords$Name & p1 %in% coords_O$Name){
graphics::arrows(x0 = coords$X[coords$Name == p0], y0 = coords$Y[coords$Name == p0],
x1 = coords_O$X[coords_O$Name == p1], y1 = coords_O$Y[coords_O$Name == p1],
col = conduits_sf$col_arrow[i], length = 0.1, lwd = conduits_sf$lwd[i])
}
if(p0 %in% coords_O$Name & p1 %in% coords$Name){
graphics::arrows(x0 = coords_O$X[coords_O$Name == p0], y0 = coords_O$Y[coords_O$Name == p0],
x1 = coords$X[coords$Name == p1], y1 = coords$Y[coords$Name == p1],
col = conduits_sf$col_arrow[i], length = 0.1, lwd = conduits_sf$lwd[i])
}
}
# add plot legend:
content_legend <- dic_scaling[dic_scaling$value %in% unique(conduits_sf$Geom1),]
graphics::legend("topright", inset = c(-0.3,0), legend = content_legend$value, lwd = content_legend$lwd, col = content_legend$col_arrow)
}
|
/R/plot_drainage_network_workflow.R
|
no_license
|
rarygit/urbandrain
|
R
| false | false | 17,549 |
r
|
#' plot of drainage network during creation process
#' @keywords internal
plot_drainage_network <- function(coords, outfalls, conduits_sf, main, col_arrow){
# initiate plot
graphics::plot(coords$X, coords$Y, xlab = "X", ylab = "Y", main = main)
coords_O <- as.data.frame(sf::st_coordinates(outfalls))
coords_O$Name <- outfalls$Name
graphics::points(coords_O$X, coords_O$Y, col = "green", pch = 19)
for(i in 1:nrow(conduits_sf)){
p0 <- conduits_sf$FromNode[i]
p1 <- conduits_sf$ToNode[i]
# plot arrow:
if(all(list(p0,p1) %in% coords$Name)){
graphics::arrows(x0 = coords$X[coords$Name == p0], y0 = coords$Y[coords$Name == p0],
x1 = coords$X[coords$Name == p1], y1 = coords$Y[coords$Name == p1],
col = col_arrow, length = 0.1)
}
if(all(list(p0,p1) %in% coords_O$Name)){
graphics::arrows(x0 = coords_O$X[coords_O$Name == p0], y0 = coords_O$Y[coords_O$Name == p0],
x1 = coords_O$X[coords_O$Name == p1], y1 = coords_O$Y[coords_O$Name == p1],
col = col_arrow, length = 0.1)
}
if(p0 %in% coords$Name & p1 %in% coords_O$Name){
graphics::arrows(x0 = coords$X[coords$Name == p0], y0 = coords$Y[coords$Name == p0],
x1 = coords_O$X[coords_O$Name == p1], y1 = coords_O$Y[coords_O$Name == p1],
col = col_arrow, length = 0.1)
}
if(p0 %in% coords_O$Name & p1 %in% coords$Name){
graphics::arrows(x0 = coords_O$X[coords_O$Name == p0], y0 = coords_O$Y[coords_O$Name == p0],
x1 = coords$X[coords$Name == p1], y1 = coords$Y[coords$Name == p1],
col = col_arrow, length = 0.1)
}
}
}
#' plot tagged junctions
#' @keywords internal
plot_tagged_junctions <- function(junctions, outfalls, conduits_sf, main){
coords <- as.data.frame(sf::st_coordinates(junctions))
coords$Name <- junctions$Name
coords$tag <- junctions$tag
dic <- data.frame(tag = c("hill", "sink", "outfall_artificial", "loop", "normal", "start", "crossing"),
pch = c(17,15,19,19,1,19,19), col = c("red", "red", "red", "lightblue", "black", "black", "blue"),
cex = c(1.3,1.3,1.3,1,1,1,1),
stringsAsFactors = F)
graphics::par(mar=c(5.1, 4.1, 4.1, 9), xpd=TRUE)
plot_drainage_network(coords, outfalls, conduits_sf, main = main, col_arrow = "black")
for(tag in unique(coords$tag)){
graphics::points(coords$X[coords$tag == tag], coords$Y[coords$tag == tag], pch = dic$pch[dic$tag == tag],
col = dic$col[dic$tag == tag], cex = dic$cex[dic$tag == tag])
}
graphics::legend("topright",inset = c(-0.4,0), pch = c(19, dic$pch), col = c("green", dic$col), c("outfalls", dic$tag))
}
#' plot catchment of outfalls
#' @keywords internal
plot_path_to_outfall <- function(junctions_u, outfalls, oid, conduits_u, main, col_arrow){
# preparation default plot:
coords <- as.data.frame(sf::st_coordinates(junctions_u))
coords$Name <- junctions_u$Name
# default plot:
plot_drainage_network(coords, outfalls, conduits_u, main = main, col_arrow = "black")
# colour selected outfall:
coords_O <- as.data.frame(sf::st_coordinates(outfalls[oid,]))
coords_O$Name <- outfalls$Name[oid]
graphics::points(coords_O$X, coords_O$Y, col = "red", pch = 19)
# paths to outfall
list_O <- track_outfall_to_start_connectivity(outfalls = outfalls[oid,], junctions = junctions_u, conduits_sf = conduits_u)
# add coloured arrows to show catchment of a defined outfall
for(t in 1:length(list_O)){
for(j in 1:length(list_O[[t]])){
to_O <- c(outfalls$Name[oid], names(list_O[[t]][[j]]))
for(i in 1:(length(to_O)-1)){
p0 <- to_O[i+1]
p1 <- to_O[i]
# plot arrows:
if(all(list(p0,p1) %in% coords$Name)){
graphics::arrows(x0 = coords$X[coords$Name == p0], y0 = coords$Y[coords$Name == p0],
x1 = coords$X[coords$Name == p1], y1 = coords$Y[coords$Name == p1],
col = col_arrow, length = 0.1)
}
if(all(list(p0,p1) %in% coords_O$Name)){
graphics::arrows(x0 = coords_O$X[coords_O$Name == p0], y0 = coords_O$Y[coords_O$Name == p0],
x1 = coords_O$X[coords_O$Name == p1], y1 = coords_O$Y[coords_O$Name == p1],
col = col_arrow, length = 0.1)
}
if(p0 %in% coords$Name & p1 %in% coords_O$Name){
graphics::arrows(x0 = coords$X[coords$Name == p0], y0 = coords$Y[coords$Name == p0],
x1 = coords_O$X[coords_O$Name == p1], y1 = coords_O$Y[coords_O$Name == p1],
col = col_arrow, length = 0.1)
}
if(p0 %in% coords_O$Name & p1 %in% coords$Name){
graphics::arrows(x0 = coords_O$X[coords_O$Name == p0], y0 = coords_O$Y[coords_O$Name == p0],
x1 = coords$X[coords$Name == p1], y1 = coords$Y[coords$Name == p1],
col = col_arrow, length = 0.1)
}
}
}
}
}
#' cross section plot when adjusting pipe slopes and junction depths
#' @keywords internal
plot_cross_section_junctions <- function(junctions, conduits, connections){
cross_section_data <- data.frame(Name = connections, Length = 0, Offset = 0, stringsAsFactors = F)
for(i in 1:length(connections)){
name <- connections[i]
cross_section_data$Top[i] <- junctions$Top[junctions$Name == name]
cross_section_data$Bottom[i] <- junctions$Bottom[junctions$Name == name]
cross_section_data$max_bottom[i] <- junctions$max_bottom[junctions$Name == name]
cross_section_data$min_bottom[i] <- junctions$min_bottom[junctions$Name == name]
}
for(i in 1:(length(connections)-1)){
length_part <- conduits$Length[conduits$FromNode == connections[i+1] & conduits$ToNode == connections[i]]
cross_section_data$Length[i+1] <- cross_section_data$Length[i] + length_part
cross_section_data$Offset[i] <- conduits$OutOffset[conduits$FromNode == connections[i+1] & conduits$ToNode == connections[i]]
}
cross_section_data$Bottom_pipe <- cross_section_data$Bottom + cross_section_data$Offset
graphics::plot(cross_section_data$Length, cross_section_data$Bottom_pipe,
ylim = c(min(cross_section_data$Bottom)-5, max(cross_section_data$Top)+1),
xlab = "Length [m]",
ylab = "Height [m a.s.l.]",
type = "b", pch = 19)
graphics::text(cross_section_data$Length, cross_section_data$Top + 0.3, cross_section_data$Name, cex = 0.5, col = "grey")
graphics::rect(xleft = cross_section_data$Length - 1, ybottom = cross_section_data$Bottom,
xright = cross_section_data$Length + 1, ytop = cross_section_data$Top,
col = "lightgrey", border = NA)
graphics::points(cross_section_data$Length, cross_section_data$Top, type = "b", pch = 19)
graphics::points(cross_section_data$Length, cross_section_data$Bottom, pch = 19, col = "grey")
graphics::points(cross_section_data$Length, cross_section_data$Bottom_pipe, type = "b", pch = 19)
graphics::points(cross_section_data$Length, cross_section_data$max_bottom, col = "red", pch = 4)
graphics::points(cross_section_data$Length, cross_section_data$min_bottom, col = "red", pch = 4)
graphics::legend("bottomright", c("top and bottom ", "possible range of bottom", "offset"), pch = c(19, 4, 19), col = c("black", "red", "grey"))
}
#' plot slopes
#' @keywords internal
plot_slopes <- function(junctions, outfalls, conduits_sf, min_slope, max_slope, main, roundValue = 3){
conduits_sf$Slope <- round(conduits_sf$Slope, roundValue)
conduits_sf$col_arrow <- "black"
conduits_sf$col_arrow[conduits_sf$Slope < min_slope] <- "red"
conduits_sf$col_arrow[conduits_sf$Slope > max_slope] <- "purple"
# coordinates from juncions:
coords <- as.data.frame(sf::st_coordinates(junctions))
coords$Name <- junctions$Name
# initiate plot:
graphics::par(mar=c(5.1, 4.1, 4.1, 9), xpd=TRUE)
graphics::plot(coords$X, coords$Y, xlab = "X", ylab = "Y", main = main)
coords_O <- as.data.frame(sf::st_coordinates(outfalls))
coords_O$Name <- outfalls$Name
graphics::points(coords_O$X, coords_O$Y, col = "black", pch = 19)
for(i in 1:nrow(conduits_sf)){
p0 <- conduits_sf$FromNode[i]
p1 <- conduits_sf$ToNode[i]
# plot arrow:
if(all(list(p0,p1) %in% coords$Name)){
graphics::arrows(x0 = coords$X[coords$Name == p0], y0 = coords$Y[coords$Name == p0],
x1 = coords$X[coords$Name == p1], y1 = coords$Y[coords$Name == p1],
col = conduits_sf$col_arrow[i], length = 0.1)
}
if(all(list(p0,p1) %in% coords_O$Name)){
graphics::arrows(x0 = coords_O$X[coords_O$Name == p0], y0 = coords_O$Y[coords_O$Name == p0],
x1 = coords_O$X[coords_O$Name == p1], y1 = coords_O$Y[coords_O$Name == p1],
col = conduits_sf$col_arrow[i], length = 0.1)
}
if(p0 %in% coords$Name & p1 %in% coords_O$Name){
graphics::arrows(x0 = coords$X[coords$Name == p0], y0 = coords$Y[coords$Name == p0],
x1 = coords_O$X[coords_O$Name == p1], y1 = coords_O$Y[coords_O$Name == p1],
col = conduits_sf$col_arrow[i], length = 0.1)
}
if(p0 %in% coords_O$Name & p1 %in% coords$Name){
graphics::arrows(x0 = coords_O$X[coords_O$Name == p0], y0 = coords_O$Y[coords_O$Name == p0],
x1 = coords$X[coords$Name == p1], y1 = coords$Y[coords$Name == p1],
col = conduits_sf$col_arrow[i], length = 0.1)
}
}
# add plot legend:
graphics::legend("topright", inset = c(-0.4,0), legend = c("slope < min_slope", "slope > max_slope"), col = c("red", "purple"), lwd = 1)
}
#' plot conduits that are not connected to an outfall
#' @keywords internal
plot_disconnected_conduits <- function(junctions, conduits, outfalls, col_arrow, main = "conduits disconnected from outfalls"){
# preparation default plot:
coords <- as.data.frame(sf::st_coordinates(junctions))
coords$Name <- junctions$Name
# extract outfall coordinates:
coords_O <- as.data.frame(sf::st_coordinates(outfalls))
coords_O$Name <- outfalls$Name
# default plot:
plot_drainage_network(coords, outfalls, conduits, main = main, col_arrow = "black")
# paths to outfall
network_paths <- track_outfall_to_start_connectivity(outfalls, junctions, conduits)
all_conduits_connected <- NULL
for(j in 1:length(network_paths)){
for(i in 1:length(network_paths[[j]])){
names_junc <- names(network_paths[[j]][[i]])
conduits_connected <- NULL
for(n in 2:length(names_junc)){
to <- names_junc[n-1]
from <- names_junc[n]
c_name <- conduits$Name[conduits$FromNode == from & conduits$ToNode == to]
if(length(c_name) > 1){
message(paste("junctions are connected more than once:", paste(c_name, collapse = " ")))
c_name <- c_name[1]
}
conduits_connected[n-1] <- c_name
}
if(is.null(all_conduits_connected)){
all_conduits_connected <- conduits_connected
}else{
all_conduits_connected <- c(all_conduits_connected, conduits_connected)
}
}
}
conduits_disconnected <- conduits$Name[!(conduits$Name %in% unique(all_conduits_connected) | conduits$ToNode %in% outfalls$Name)]
if(length(conduits_disconnected)>0){
# add coloured arrows to mark disconnected conduits
for(i in 1:length(conduits_disconnected)){
p0 <- conduits$FromNode[conduits$Name == conduits_disconnected[i]]
p1 <- conduits$ToNode[conduits$Name == conduits_disconnected[i]]
# plot arrows:
if(all(list(p0,p1) %in% coords$Name)){
graphics::arrows(x0 = coords$X[coords$Name == p0], y0 = coords$Y[coords$Name == p0],
x1 = coords$X[coords$Name == p1], y1 = coords$Y[coords$Name == p1],
col = col_arrow, length = 0.1)
}
if(all(list(p0,p1) %in% coords_O$Name)){
graphics::arrows(x0 = coords_O$X[coords_O$Name == p0], y0 = coords_O$Y[coords_O$Name == p0],
x1 = coords_O$X[coords_O$Name == p1], y1 = coords_O$Y[coords_O$Name == p1],
col = col_arrow, length = 0.1)
}
if(p0 %in% coords$Name & p1 %in% coords_O$Name){
graphics::arrows(x0 = coords$X[coords$Name == p0], y0 = coords$Y[coords$Name == p0],
x1 = coords_O$X[coords_O$Name == p1], y1 = coords_O$Y[coords_O$Name == p1],
col = col_arrow, length = 0.1)
}
if(p0 %in% coords_O$Name & p1 %in% coords$Name){
graphics::arrows(x0 = coords_O$X[coords_O$Name == p0], y0 = coords_O$Y[coords_O$Name == p0],
x1 = coords$X[coords$Name == p1], y1 = coords$Y[coords$Name == p1],
col = col_arrow, length = 0.1)
}
}
}
}
#' plot short cuts
#' @keywords internal
plot_short_cuts <- function(junctions, conduits, outfalls, col_arrow){
# preparation default plot:
coords <- as.data.frame(sf::st_coordinates(junctions))
coords$Name <- junctions$Name
# extract outfall coordinates:
coords_O <- as.data.frame(sf::st_coordinates(outfalls))
coords_O$Name <- outfalls$Name
# default plot:
plot_drainage_network(coords, outfalls, conduits, main = "conduits added to drain sinks", col_arrow = "black")
# added short cuts:
added_conduits <- conduits$Name[grep("n", conduits$Name)]
if(length(added_conduits > 0)){
# add coloured arrows to mark disconnected conduits
for(i in 1:length(added_conduits)){
p0 <- conduits$FromNode[conduits$Name == added_conduits[i]]
p1 <- conduits$ToNode[conduits$Name == added_conduits[i]]
# plot arrows:
if(all(list(p0,p1) %in% coords$Name)){
graphics::arrows(x0 = coords$X[coords$Name == p0], y0 = coords$Y[coords$Name == p0],
x1 = coords$X[coords$Name == p1], y1 = coords$Y[coords$Name == p1],
col = col_arrow, length = 0.1)
}
if(all(list(p0,p1) %in% coords_O$Name)){
graphics::arrows(x0 = coords_O$X[coords_O$Name == p0], y0 = coords_O$Y[coords_O$Name == p0],
x1 = coords_O$X[coords_O$Name == p1], y1 = coords_O$Y[coords_O$Name == p1],
col = col_arrow, length = 0.1)
}
if(p0 %in% coords$Name & p1 %in% coords_O$Name){
graphics::arrows(x0 = coords$X[coords$Name == p0], y0 = coords$Y[coords$Name == p0],
x1 = coords_O$X[coords_O$Name == p1], y1 = coords_O$Y[coords_O$Name == p1],
col = col_arrow, length = 0.1)
}
if(p0 %in% coords_O$Name & p1 %in% coords$Name){
graphics::arrows(x0 = coords_O$X[coords_O$Name == p0], y0 = coords_O$Y[coords_O$Name == p0],
x1 = coords$X[coords$Name == p1], y1 = coords$Y[coords$Name == p1],
col = col_arrow, length = 0.1)
}
}
}
}
#' plot pipe diameters
#' @keywords internal
plot_drainage_network_pipe_diameters <- function(junctions, outfalls, conduits_sf, main, DN_m){
# scale arrow line thickness according to diameter:
dic_scaling <- data.frame(value = DN_m[order(DN_m, decreasing = T)], stringsAsFactors = F)
dic_scaling$col_arrow <- viridis::viridis(nrow(dic_scaling))
dic_scaling$lwd <- round(seq(10, 1, length.out = nrow(dic_scaling)),1) # maximum line width 10 ?
# initialize columns:
conduits_sf$lwd <- NA
conduits_sf$col_arrow <- NA
# add colors to conduits:
for(i in 1:nrow(conduits_sf)){
conduits_sf$lwd[i] <- dic_scaling$lwd[as.character(dic_scaling$value) == as.character(conduits_sf$Geom1[i])]
conduits_sf$col_arrow[i] <- dic_scaling$col_arrow[as.character(dic_scaling$value) == as.character(conduits_sf$Geom1[i])]
}
# coordinates from juncions:
coords <- as.data.frame(sf::st_coordinates(junctions))
coords$Name <- junctions$Name
# initiate plot:
graphics::par(mar=c(5.1, 4.1, 4.1, 8.1), xpd=TRUE)
graphics::plot(coords$X, coords$Y, xlab = "X", ylab = "Y", main = main)
coords_O <- as.data.frame(sf::st_coordinates(outfalls))
coords_O$Name <- outfalls$Name
graphics::points(coords_O$X, coords_O$Y, col = "black", pch = 19)
for(i in 1:nrow(conduits_sf)){
p0 <- conduits_sf$FromNode[i]
p1 <- conduits_sf$ToNode[i]
# plot arrow:
if(all(list(p0,p1) %in% coords$Name)){
graphics::arrows(x0 = coords$X[coords$Name == p0], y0 = coords$Y[coords$Name == p0],
x1 = coords$X[coords$Name == p1], y1 = coords$Y[coords$Name == p1],
col = conduits_sf$col_arrow[i], length = 0.1, lwd = conduits_sf$lwd[i])
}
if(all(list(p0,p1) %in% coords_O$Name)){
graphics::arrows(x0 = coords_O$X[coords_O$Name == p0], y0 = coords_O$Y[coords_O$Name == p0],
x1 = coords_O$X[coords_O$Name == p1], y1 = coords_O$Y[coords_O$Name == p1],
col = conduits_sf$col_arrow[i], length = 0.1, lwd = conduits_sf$lwd[i])
}
if(p0 %in% coords$Name & p1 %in% coords_O$Name){
graphics::arrows(x0 = coords$X[coords$Name == p0], y0 = coords$Y[coords$Name == p0],
x1 = coords_O$X[coords_O$Name == p1], y1 = coords_O$Y[coords_O$Name == p1],
col = conduits_sf$col_arrow[i], length = 0.1, lwd = conduits_sf$lwd[i])
}
if(p0 %in% coords_O$Name & p1 %in% coords$Name){
graphics::arrows(x0 = coords_O$X[coords_O$Name == p0], y0 = coords_O$Y[coords_O$Name == p0],
x1 = coords$X[coords$Name == p1], y1 = coords$Y[coords$Name == p1],
col = conduits_sf$col_arrow[i], length = 0.1, lwd = conduits_sf$lwd[i])
}
}
# add plot legend:
content_legend <- dic_scaling[dic_scaling$value %in% unique(conduits_sf$Geom1),]
graphics::legend("topright", inset = c(-0.3,0), legend = content_legend$value, lwd = content_legend$lwd, col = content_legend$col_arrow)
}
|
library(tidyverse)
options(stringsAsFactors = FALSE)
rawdata <- read_csv(file = 'C:/Users/Banhlam/Documents/Data/tennis_MatchChartingProject-master/charting-m-matches.csv')
summary(rawdata)
rawdata %>% filter(is.na(`Best of`)) %>% View()
lapply(rawdata, function(x) sum(is.na(x)))
cleandata <- janitor::clean_names(rawdata) %>%
filter(!is.na(charted_by))
lapply(cleandata, function(x) sum(is.na(x)))
View(head(cleandata))
cleandata %>% group_by(match_id) %>% tally() %>% summary()
# No matches charted by multiple people
|
/code/scratch.R
|
no_license
|
leilacodes/tennis
|
R
| false | false | 528 |
r
|
library(tidyverse)
options(stringsAsFactors = FALSE)
rawdata <- read_csv(file = 'C:/Users/Banhlam/Documents/Data/tennis_MatchChartingProject-master/charting-m-matches.csv')
summary(rawdata)
rawdata %>% filter(is.na(`Best of`)) %>% View()
lapply(rawdata, function(x) sum(is.na(x)))
cleandata <- janitor::clean_names(rawdata) %>%
filter(!is.na(charted_by))
lapply(cleandata, function(x) sum(is.na(x)))
View(head(cleandata))
cleandata %>% group_by(match_id) %>% tally() %>% summary()
# No matches charted by multiple people
|
parallel<-function (){
require(doParallel)
cl<-makeCluster(detectCores())
registerDoParallel(cl)
}
|
/R-code/parallel.R
|
no_license
|
jiayouchrislin/R-code
|
R
| false | false | 104 |
r
|
parallel<-function (){
require(doParallel)
cl<-makeCluster(detectCores())
registerDoParallel(cl)
}
|
spark_dependencies <- function(spark_version, scala_version, ...) {
sparklyr::spark_dependency(
jars = c(
system.file(
sprintf("java/sparkhail-%s-%s.jar", spark_version, scala_version),
package = "sparkhail"
),
system.file(
sprintf("java/hail-all-spark.jar"),
package = "sparkhail"
)
),
packages = c(
)
)
}
#' @import sparklyr
.onLoad <- function(libname, pkgname) {
sparklyr::register_extension(pkgname)
}
|
/R/dependencies.R
|
no_license
|
javierluraschi/sparkhail
|
R
| false | false | 486 |
r
|
spark_dependencies <- function(spark_version, scala_version, ...) {
sparklyr::spark_dependency(
jars = c(
system.file(
sprintf("java/sparkhail-%s-%s.jar", spark_version, scala_version),
package = "sparkhail"
),
system.file(
sprintf("java/hail-all-spark.jar"),
package = "sparkhail"
)
),
packages = c(
)
)
}
#' @import sparklyr
.onLoad <- function(libname, pkgname) {
sparklyr::register_extension(pkgname)
}
|
## Creates a special "matrix" object that can cache inverse of the input matrix
## x is the input matrix
## inverseMat is the inverse of matrix x
makeCacheMatrix <- function(x = matrix()) {
inverseMat <- matrix(0,0,0)
# setMatrix sets the new matrix and resets the inverse matrix as the inverse needs to be calculated for new data
setMatrix<- function(y){
x <<- y
inverseMat <<- matrix(0,0,0)
}
# getMatrix gets the value of matrix.
getMatrix<-function()x
# setInverse sets the values of inverse matrix
setInverse<- function(inverseMatrix){
inverseMat <<- inverseMatrix
}
# getInverse gets the values of inverse matrix
getInverse<-function()inverseMat
list(setMatrix = setMatrix, getMatrix = getMatrix, setInverse = setInverse, getInverse = getInverse)
}
## Takes a 'matrix' object as input argument and if the inverse of the matrix has been calculated,
## retrives the inverse from cache. Else, it calculates inverse.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
invMat <- matX$getInverse()
## check if inverse has already been calculated.
## if inverse has been clauculated for a given matrix, then fetch the inverse from cache
if (!(nrow(invMat)==0 && ncol(invMat)==0)){
print("retrieving cached inverse matrix")
return(invMat)
}
else{
# Calculate inverse of the matrix
calInvMat <- solve(matX$getMatrix(),...)
matX$setInverse(calInvMat)
calInvMat
}
}
|
/cachematrix.R
|
no_license
|
datacurls/ProgrammingAssignment2
|
R
| false | false | 1,502 |
r
|
## Creates a special "matrix" object that can cache inverse of the input matrix
## x is the input matrix
## inverseMat is the inverse of matrix x
makeCacheMatrix <- function(x = matrix()) {
inverseMat <- matrix(0,0,0)
# setMatrix sets the new matrix and resets the inverse matrix as the inverse needs to be calculated for new data
setMatrix<- function(y){
x <<- y
inverseMat <<- matrix(0,0,0)
}
# getMatrix gets the value of matrix.
getMatrix<-function()x
# setInverse sets the values of inverse matrix
setInverse<- function(inverseMatrix){
inverseMat <<- inverseMatrix
}
# getInverse gets the values of inverse matrix
getInverse<-function()inverseMat
list(setMatrix = setMatrix, getMatrix = getMatrix, setInverse = setInverse, getInverse = getInverse)
}
## Takes a 'matrix' object as input argument and if the inverse of the matrix has been calculated,
## retrives the inverse from cache. Else, it calculates inverse.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
invMat <- matX$getInverse()
## check if inverse has already been calculated.
## if inverse has been clauculated for a given matrix, then fetch the inverse from cache
if (!(nrow(invMat)==0 && ncol(invMat)==0)){
print("retrieving cached inverse matrix")
return(invMat)
}
else{
# Calculate inverse of the matrix
calInvMat <- solve(matX$getMatrix(),...)
matX$setInverse(calInvMat)
calInvMat
}
}
|
# Script to check general trends in bleaching drivers
# Authors: Steph and Emily
# Date: December 2017
# Clean environment
rm(list=ls())
# Load packages
#source(file="R code/brt.functions.R")
library(here)
library(PerformanceAnalytics)
library(MASS)
library(tree)
library(gbm)
library(rpart)
library(dismo)
# Set directory
wd <- "/Users/stephdagata/Documents/GitHub/2016-Bleaching-Analysis/"
setwd(wd)
# Load data
rm(BVAR)
BVAR <- read.csv("Data/masterfile - analysis variables_20Dec2017.csv",header=T,sep=";",dec=".",check.names = F, stringsAsFactors = T)
head(BVAR); dim(BVAR)
# Summary bleaching data
### There are 226 sites (rows)
### Location is a random effect, as a grouping variables of sites.
### The response variable is "bleach_intensity".
### And then the predictor variables are all the columns from X (longitude) through CA1 (community axis).
### They're mostly independent and not collinear.
summary(BVAR)
# Check for normality of bleaching intensity
hist(BVAR$bleach_intensity)
# quick correlogram
BVAR.num <- BVAR[,sapply(BVAR,is.numeric) | sapply(BVAR,is.integer)] # Subset only numeric columns from BVAR
chart.Correlation(BVAR.num, histogram=TRUE, pch=16) # numeric variables only
# quick boxplot for qualitative/quantitative variables
BVAR_Q <- BVAR[,sapply(BVAR,is.factor)]
for i in seq_along(length(BVAR[,sapply(BVAR,is.factor)])){
for (j in seqalong(length(BVAR.num))){
boxplot(BVAR.num[,i] ~ BVAR_Q$habitat)[i]
}
}
### Simple BRT model
# The response variable we are interested in
rm(Resp)
Resp <- which(colnames(BVAR) == "bleach_intensity"); Resp
# Predictors
rm(Pred)
Pred=c("management","habitat","X","Y","average.dhw.90days","dip.Statistic.sst",
"bimodality.coefficient","bimodality.ratio","avg.high.spell.duration.90days",
"avg.high.spell.rise.90days","avg.spell.peak","sd.spell.peak",
"avg.low.spell.duration.90dys","dhd_mmmplus1","CA1")
# removed due to correlation:
## "max.dhw.90days"
## "max.high.spell.duration.90days"
## "n.h.spell.events_90Days"
# Ncol = the vector containing the column number of each variable use to predict S
rm(Ncol); Ncol<- which(colnames(BVAR) %in% Pred); Ncol
# chck
length(Pred) == length(Ncol)
# 1st Step: Built the full brt model and check for goodness of fit
rm(BLEACH_brt)
BLEACH_brt <- gbm.step(gbm.y=Resp,gbm.x=Ncol,data=BVAR,tree.complexity=10,learning.rate=0.005,bag.fraction=0.7,n.trees=50,family="gaussian",n.folds=10,max.trees=10000)
# output of the model:
ls(BLEACH_brt)
# Contributions of each variable
BLEACH_brt$contributions
# summary brt
summary(BLEACH_brt)
abline(v=5,col="red")
# marginal distributions
windows()
plot.gbm(BLEACH_brt,5)
# CV AUC
BLEACH_brt$cv.statistics$discrimination.mean
# plot observed vs residuals
plot(BVAR$bleach_intensity,BLEACH_brt$fit,
xlim=c(0,100),ylim=c(0,100),
xlab="Observed Bleaching Int.",ylab="Predicted Bleaching Int.",pch=16)
abline(a=0,b=1)
# linear model between predicted vs fitted
lm_BlEACH <- lm(BLEACH_brt$fit~BVAR$bleach_intensity)
abline(lm_BlEACH$coefficients[1],lm_BlEACH$coefficients[2],lty=2,col="red")
# plot residuals vs observed data
plot(BVAR$bleach_intensity,BLEACH_brt$residuals,
xlim=c(0,100),ylim=c(-10,10),
xlab="Observed Bleaching Int",ylab="Residuals",pch=16)
abline(h=0)
hist(BLEACH_brt$residuals)
# plot residuals vs fitted data
plot(BLEACH_brt$fit,BLEACH_brt$residuals,
xlim=c(0,100),
xlab="Fitted Biomass",ylab="Residuals",pch=16)
abline(h=0)
# 2. Look for interactions
rm(BLEACH_int)
BLEACH_int <- gbm.interactions(BLEACH_brt)
BLEACH_int$rank.list
# plot the 3D plot between the most interacting variables
gbm.perspec(BLEACH_brt, 1, 11,z.range=c(0,60))
gbm.perspec(BLEACH_brt, 9, 10,z.range=c(0,60)) # sd spell and avg spell
gbm.perspec(BLEACH_brt, 9, 5,z.range=c(0,60)) # bimod. coef and avg spell
gbm.perspec(BLEACH_brt, 9, 3,z.range=c(0,60)) # bimod. coef and avg spell
gbm.perspec(BLEACH_brt, 8, 3,z.range=c(0,60)) # bimod. coef and avg spell
#etc.
# 3. Simplify the model
BLEACH_brt_simpl <- gbm.simplify(BLEACH_brt,n.drops=10,n.folds=10) # 7 last variables to remove
# New model with only significant explanatory variables
rm(BLEACH_brt_simpl)
BLEACH_brt_SIMPL <- gbm.step(gbm.y=Resp,gbm.x=BLEACH_brt_simpl$pred.list[[7]],data=BVAR,tree.complexity=10,learning.rate=0.005,bag.fraction=0.7,n.trees=50,family="gaussian",n.folds=10,max.trees=10000)
# Contributions
BLEACH_brt_SIMPL$contributions
# Interactions
rm(BLEACH_int_SIMPL)
BLEACH_int_SIMPL <- gbm.interactions(BLEACH_brt_SIMPL)
BLEACH_int_SIMPL$rank.list
gbm.perspec(BLEACH_brt_SIMPL, 5, 4,z.range=c(0,60)) # sd spell and avg spell
gbm.perspec(BLEACH_brt_SIMPL, 6, 3,z.range=c(0,60)) # bimod. coef and avg spell
|
/code/03-BRT_Global_Bleaching.R
|
no_license
|
WCS-Marine/2016-bleaching-patterns
|
R
| false | false | 4,972 |
r
|
# Script to check general trends in bleaching drivers
# Authors: Steph and Emily
# Date: December 2017
# Clean environment
rm(list=ls())
# Load packages
#source(file="R code/brt.functions.R")
library(here)
library(PerformanceAnalytics)
library(MASS)
library(tree)
library(gbm)
library(rpart)
library(dismo)
# Set directory
wd <- "/Users/stephdagata/Documents/GitHub/2016-Bleaching-Analysis/"
setwd(wd)
# Load data
rm(BVAR)
BVAR <- read.csv("Data/masterfile - analysis variables_20Dec2017.csv",header=T,sep=";",dec=".",check.names = F, stringsAsFactors = T)
head(BVAR); dim(BVAR)
# Summary bleaching data
### There are 226 sites (rows)
### Location is a random effect, as a grouping variables of sites.
### The response variable is "bleach_intensity".
### And then the predictor variables are all the columns from X (longitude) through CA1 (community axis).
### They're mostly independent and not collinear.
summary(BVAR)
# Check for normality of bleaching intensity
hist(BVAR$bleach_intensity)
# quick correlogram
BVAR.num <- BVAR[,sapply(BVAR,is.numeric) | sapply(BVAR,is.integer)] # Subset only numeric columns from BVAR
chart.Correlation(BVAR.num, histogram=TRUE, pch=16) # numeric variables only
# quick boxplot for qualitative/quantitative variables
BVAR_Q <- BVAR[,sapply(BVAR,is.factor)]
for i in seq_along(length(BVAR[,sapply(BVAR,is.factor)])){
for (j in seqalong(length(BVAR.num))){
boxplot(BVAR.num[,i] ~ BVAR_Q$habitat)[i]
}
}
### Simple BRT model
# The response variable we are interested in
rm(Resp)
Resp <- which(colnames(BVAR) == "bleach_intensity"); Resp
# Predictors
rm(Pred)
Pred=c("management","habitat","X","Y","average.dhw.90days","dip.Statistic.sst",
"bimodality.coefficient","bimodality.ratio","avg.high.spell.duration.90days",
"avg.high.spell.rise.90days","avg.spell.peak","sd.spell.peak",
"avg.low.spell.duration.90dys","dhd_mmmplus1","CA1")
# removed due to correlation:
## "max.dhw.90days"
## "max.high.spell.duration.90days"
## "n.h.spell.events_90Days"
# Ncol = the vector containing the column number of each variable use to predict S
rm(Ncol); Ncol<- which(colnames(BVAR) %in% Pred); Ncol
# chck
length(Pred) == length(Ncol)
# 1st Step: Built the full brt model and check for goodness of fit
rm(BLEACH_brt)
BLEACH_brt <- gbm.step(gbm.y=Resp,gbm.x=Ncol,data=BVAR,tree.complexity=10,learning.rate=0.005,bag.fraction=0.7,n.trees=50,family="gaussian",n.folds=10,max.trees=10000)
# output of the model:
ls(BLEACH_brt)
# Contributions of each variable
BLEACH_brt$contributions
# summary brt
summary(BLEACH_brt)
abline(v=5,col="red")
# marginal distributions
windows()
plot.gbm(BLEACH_brt,5)
# CV AUC
BLEACH_brt$cv.statistics$discrimination.mean
# plot observed vs residuals
plot(BVAR$bleach_intensity,BLEACH_brt$fit,
xlim=c(0,100),ylim=c(0,100),
xlab="Observed Bleaching Int.",ylab="Predicted Bleaching Int.",pch=16)
abline(a=0,b=1)
# linear model between predicted vs fitted
lm_BlEACH <- lm(BLEACH_brt$fit~BVAR$bleach_intensity)
abline(lm_BlEACH$coefficients[1],lm_BlEACH$coefficients[2],lty=2,col="red")
# plot residuals vs observed data
plot(BVAR$bleach_intensity,BLEACH_brt$residuals,
xlim=c(0,100),ylim=c(-10,10),
xlab="Observed Bleaching Int",ylab="Residuals",pch=16)
abline(h=0)
hist(BLEACH_brt$residuals)
# plot residuals vs fitted data
plot(BLEACH_brt$fit,BLEACH_brt$residuals,
xlim=c(0,100),
xlab="Fitted Biomass",ylab="Residuals",pch=16)
abline(h=0)
# 2. Look for interactions
rm(BLEACH_int)
BLEACH_int <- gbm.interactions(BLEACH_brt)
BLEACH_int$rank.list
# plot the 3D plot between the most interacting variables
gbm.perspec(BLEACH_brt, 1, 11,z.range=c(0,60))
gbm.perspec(BLEACH_brt, 9, 10,z.range=c(0,60)) # sd spell and avg spell
gbm.perspec(BLEACH_brt, 9, 5,z.range=c(0,60)) # bimod. coef and avg spell
gbm.perspec(BLEACH_brt, 9, 3,z.range=c(0,60)) # bimod. coef and avg spell
gbm.perspec(BLEACH_brt, 8, 3,z.range=c(0,60)) # bimod. coef and avg spell
#etc.
# 3. Simplify the model
BLEACH_brt_simpl <- gbm.simplify(BLEACH_brt,n.drops=10,n.folds=10) # 7 last variables to remove
# New model with only significant explanatory variables
rm(BLEACH_brt_simpl)
BLEACH_brt_SIMPL <- gbm.step(gbm.y=Resp,gbm.x=BLEACH_brt_simpl$pred.list[[7]],data=BVAR,tree.complexity=10,learning.rate=0.005,bag.fraction=0.7,n.trees=50,family="gaussian",n.folds=10,max.trees=10000)
# Contributions
BLEACH_brt_SIMPL$contributions
# Interactions
rm(BLEACH_int_SIMPL)
BLEACH_int_SIMPL <- gbm.interactions(BLEACH_brt_SIMPL)
BLEACH_int_SIMPL$rank.list
gbm.perspec(BLEACH_brt_SIMPL, 5, 4,z.range=c(0,60)) # sd spell and avg spell
gbm.perspec(BLEACH_brt_SIMPL, 6, 3,z.range=c(0,60)) # bimod. coef and avg spell
|
#' @import zoo
#' @import reshape2
#' @import graphics
#' @import stats
#' @import utils
NULL
|
/R/import.R
|
no_license
|
cran/zoocat
|
R
| false | false | 108 |
r
|
#' @import zoo
#' @import reshape2
#' @import graphics
#' @import stats
#' @import utils
NULL
|
testlist <- list(doy = c(-Inf, 0), latitude = c(-6.93132091139805e-107, 1.86807199752012e+112, -Inf, 2.00994342527714e-162, 1.81541609400951e-79, 7.89363005545926e+139, 2.3317908961407e-93, NaN, -1.51345790188863e+21, 1.44942408802595e-285, -1.72131968218895e+83, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result)
|
/meteor/inst/testfiles/ET0_ThornthwaiteWilmott/AFL_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1615832113-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 421 |
r
|
testlist <- list(doy = c(-Inf, 0), latitude = c(-6.93132091139805e-107, 1.86807199752012e+112, -Inf, 2.00994342527714e-162, 1.81541609400951e-79, 7.89363005545926e+139, 2.3317908961407e-93, NaN, -1.51345790188863e+21, 1.44942408802595e-285, -1.72131968218895e+83, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result)
|
library(dplyr)
r = data.frame(
t = c(0.0833, 0.25, 1, 2, 3, 5, 10),
R = 0.01 * c(5.56, 5.64, 5.68, 5.64, 5.62, 5.66, 5.76)
)
r %>% mutate(B_0_T = 1 * exp(-R * t)) %>%mutate(phi_t=log(B_0_T)/-t) %>% select(t, B_0_T,phi_t) -> data_for_modelling
opt=function(x)
{
a=x[1]
b=x[2]
c=x[3]
rho=x[4]
sum(
(
a +
(b + c) * (1 - exp(-rho * data_for_modelling$t)) / (rho * data_for_modelling$t) -
c * exp(-rho * data_for_modelling$t) -
data_for_modelling$B_0_T
)
^ 2)
}
answer=nlm(opt, c(1,1,1,0.5))$estimate
|
/cw12.R
|
no_license
|
devidjons/credit_risk
|
R
| false | false | 596 |
r
|
library(dplyr)
r = data.frame(
t = c(0.0833, 0.25, 1, 2, 3, 5, 10),
R = 0.01 * c(5.56, 5.64, 5.68, 5.64, 5.62, 5.66, 5.76)
)
r %>% mutate(B_0_T = 1 * exp(-R * t)) %>%mutate(phi_t=log(B_0_T)/-t) %>% select(t, B_0_T,phi_t) -> data_for_modelling
opt=function(x)
{
a=x[1]
b=x[2]
c=x[3]
rho=x[4]
sum(
(
a +
(b + c) * (1 - exp(-rho * data_for_modelling$t)) / (rho * data_for_modelling$t) -
c * exp(-rho * data_for_modelling$t) -
data_for_modelling$B_0_T
)
^ 2)
}
answer=nlm(opt, c(1,1,1,0.5))$estimate
|
library(shiny)
library(DT)
library(DBI)
library(RSQLite)
library(shinyjs)
library(shinycssloaders)
library(lubridate)
library(shinyFeedback)
library(dplyr)
library(dbplyr)
#library(RMySQL)
db_config <- config::get()$db
# How it could work in future
#if(Sys.getenv("DB_TECH") == 'sqlite'){
# # Create database connection to SQLite DB
# conn <- dbConnect(
# RSQLite::SQLite(),
# dbname = db_config$dbname
# )
#}else{
# # Create database connection to MySql DB
# conn <- dbConnect(
# RMySQL::MySQL(),
# dbname = db_config$databaseName,
# host = db_config$host,
# port = db_config$port,
# user = db_config$user,
# password = db_config$password
# )
#}
# Create database connection to SQLite DB
conn <- dbConnect(
RSQLite::SQLite(),
dbname = db_config$dbname
)
# Stop database connection when application stops
shiny::onStop(function() {
dbDisconnect(conn)
})
# Turn off scientific notation
options(scipen = 999)
# Set spinner type (for loading)
options(spinner.type = 8)
|
/global.R
|
no_license
|
TrisKast/crudR-test
|
R
| false | false | 1,009 |
r
|
library(shiny)
library(DT)
library(DBI)
library(RSQLite)
library(shinyjs)
library(shinycssloaders)
library(lubridate)
library(shinyFeedback)
library(dplyr)
library(dbplyr)
#library(RMySQL)
db_config <- config::get()$db
# How it could work in future
#if(Sys.getenv("DB_TECH") == 'sqlite'){
# # Create database connection to SQLite DB
# conn <- dbConnect(
# RSQLite::SQLite(),
# dbname = db_config$dbname
# )
#}else{
# # Create database connection to MySql DB
# conn <- dbConnect(
# RMySQL::MySQL(),
# dbname = db_config$databaseName,
# host = db_config$host,
# port = db_config$port,
# user = db_config$user,
# password = db_config$password
# )
#}
# Create database connection to SQLite DB
conn <- dbConnect(
RSQLite::SQLite(),
dbname = db_config$dbname
)
# Stop database connection when application stops
shiny::onStop(function() {
dbDisconnect(conn)
})
# Turn off scientific notation
options(scipen = 999)
# Set spinner type (for loading)
options(spinner.type = 8)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dm_data.R
\docType{data}
\name{dm}
\alias{dm}
\title{DM data for Analysis}
\format{
A \code{tibble} with 33 column
\describe{
\item{studyid}{ Study Id }
\item{domain}{ Domain}
\item{usubjid}{ Unique subjid}
\item{aeseq}{ AE Seq}
\item{aespid}{ aespid}
\item{aeterm}{ AE Unique TERM }
\item{aellt}{ AE Low level term}
\item{aelltcd}{ AE Low level Term CD}
}
}
\usage{
dm
}
\description{
A simple \code{tibble}
}
\keyword{datasets}
|
/man/dm.Rd
|
permissive
|
Princeton2021/clinical_fd
|
R
| false | true | 508 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dm_data.R
\docType{data}
\name{dm}
\alias{dm}
\title{DM data for Analysis}
\format{
A \code{tibble} with 33 column
\describe{
\item{studyid}{ Study Id }
\item{domain}{ Domain}
\item{usubjid}{ Unique subjid}
\item{aeseq}{ AE Seq}
\item{aespid}{ aespid}
\item{aeterm}{ AE Unique TERM }
\item{aellt}{ AE Low level term}
\item{aelltcd}{ AE Low level Term CD}
}
}
\usage{
dm
}
\description{
A simple \code{tibble}
}
\keyword{datasets}
|
# ui of statistical app for Proefcentrum Hoogstraten ----------------------
ui <- fluidPage(
tags$head(
tags$link(rel = "shortcut icon", type = "image/png", href = "logopch_black.png"),
tags$title("PCH - Statistical application")
),
navbarPage(
title = tags$div(img(src="image.png", height = '30px', width = '40px'),"Statistical application"),
id = "mainnavbarpage",
collapsible = TRUE,
fluid = TRUE,
footer = p("(C) 2020 - Dieter Baets (", a("Github",href = "http://github.com/dbaets"), ") for ",
a("Proefcentrum Hoogstraten", href = "http://www.proefcentrum.be"),"."),
inverse = TRUE,
windowTitle = "Statistical application Proefcentrum Hoogstraten",
tabPanel("Input", page_input, value = "page_input"),
tabPanel("Statistics", page_statistics, value = "page_statistics"),
tabPanel("Output", page_output, value = "page_output"),
tabPanel("About", page_about, value = "page_about")
)
)
|
/statisticalapp/UI/ui.R
|
permissive
|
dbaets/PCH_StatisticalApplication
|
R
| false | false | 966 |
r
|
# ui of statistical app for Proefcentrum Hoogstraten ----------------------
ui <- fluidPage(
tags$head(
tags$link(rel = "shortcut icon", type = "image/png", href = "logopch_black.png"),
tags$title("PCH - Statistical application")
),
navbarPage(
title = tags$div(img(src="image.png", height = '30px', width = '40px'),"Statistical application"),
id = "mainnavbarpage",
collapsible = TRUE,
fluid = TRUE,
footer = p("(C) 2020 - Dieter Baets (", a("Github",href = "http://github.com/dbaets"), ") for ",
a("Proefcentrum Hoogstraten", href = "http://www.proefcentrum.be"),"."),
inverse = TRUE,
windowTitle = "Statistical application Proefcentrum Hoogstraten",
tabPanel("Input", page_input, value = "page_input"),
tabPanel("Statistics", page_statistics, value = "page_statistics"),
tabPanel("Output", page_output, value = "page_output"),
tabPanel("About", page_about, value = "page_about")
)
)
|
testlist <- list(latLongs = structure(c(1.68342448282407e-307, 9.3633527093844e-97, 4.94065645841247e-324, 4.94065645841247e-324, 4.94065645841247e-324, 4.94065645841247e-324, 4.94065645841247e-324, 4.94065645841247e-324, 4.94065645841247e-324, 4.94065645841247e-324, 4.94065645841247e-324, 8.28904605845809e-317, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(9L, 7L)), r = 0)
result <- do.call(MGDrivE::calcCos,testlist)
str(result)
|
/MGDrivE/inst/testfiles/calcCos/libFuzzer_calcCos/calcCos_valgrind_files/1612726939-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false | false | 557 |
r
|
testlist <- list(latLongs = structure(c(1.68342448282407e-307, 9.3633527093844e-97, 4.94065645841247e-324, 4.94065645841247e-324, 4.94065645841247e-324, 4.94065645841247e-324, 4.94065645841247e-324, 4.94065645841247e-324, 4.94065645841247e-324, 4.94065645841247e-324, 4.94065645841247e-324, 8.28904605845809e-317, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(9L, 7L)), r = 0)
result <- do.call(MGDrivE::calcCos,testlist)
str(result)
|
#'Functional principal component analysis of a dfrr fit
#'
#'\code{fpca()} returns estimations of the smooth principal components/eigen-functions
#' and the corresponding eigen-values of the residual function in the \code{dfrr} model.
#' The result is a named list containing the vector of eigen-values and the matrix of Fourier coefficients. See Details.
#'
#'@details Fourier coefficients which are reported are
#' based on the a set of basis which can be determined by \code{\link{basis}(dfrr_fit)}.
#' Thus the evaluation of pricipal component/eigen-function on the set of time points specified by vector \code{time},
#' equals to \code{fpca(dfrr_fit)\%*\%t(\link[fda]{eval.basis}(time,\link{basis}(dfrr_fit)))}.
#'
#' Consider that the unstandardized estimations are not identifiable. So, it is recommended to
#' extract and report the standardized estimations.
#'
#'
#'
#'@return
#' \code{fpca(dfrr_fit)} returns a list containtng the following components:
#' \item{values}{a vector containing the eigen-values of the standaridized/unstandardized covariance operator of
#' the residual function term in \code{dfrr} model,
#' sorted in decreasing order.}
#' \item{vectors}{a matrix whose columns contain the Fourier coefficients of the
#' principal components/eigen-functions of the standaridized/unstandardized covariance operator of
#' the residual function term in \code{dfrr} model,
#' sorted based on the corresponding eigen-values.}
#'
#'
#'@inheritParams fitted.dfrr
#'@param standardized,unstandardized a \code{boolean} indicating whether stanadrdized/unstandardized pricipal components/eigen-functions are reported.
#' Only standardized pricipal components/eigen-functions are identifiable, thus the arugment is defaults to \code{standardized=TRUE}.
#'
#'
#'@seealso \code{\link{plot.fpca.dfrr}}
#'
#'@examples
#' set.seed(2000)
#' \donttest{N<-50;M<-24}
#' \dontshow{N<-30;M<-12}
#' X<-rnorm(N,mean=0)
#' time<-seq(0,1,length.out=M)
#' Y<-simulate_simple_dfrr(beta0=function(t){cos(pi*t+pi)},
#' beta1=function(t){2*t},
#' X=X,time=time)
#' \donttest{dfrr_fit<-dfrr(Y~X,yind=time)}
#' \dontshow{dfrr_fit<-dfrr(Y~X,yind=time,T_E=1)}
#' fpcs<-fpca(dfrr_fit)
#'\donttest{plot(fpcs,plot.eigen.functions=TRUE,plot.contour=TRUE,plot.3dsurface = TRUE)}
#'
#'@export
fpca <-
function(object,standardized=NULL,unstandardized=!standardized){
dfrr_fit<-object
standardized<-paired.args.check(standardized,
ifelse(missing(unstandardized),NA,unstandardized),
"Please specify 'standardized' or 'unstandardizedd' coefficients must be reported",
TRUE)
if(standardized)
res<-list(values=dfrr_fit$nus_std,vectors=t(dfrr_fit$Theta_std))
else
res<-list(values=dfrr_fit$nus,vectors=t(dfrr_fit$Theta))
class(res)<-"fpca.dfrr"
attr(res,"standardized")<-standardized
attr(res,"dfrr_fit")<-dfrr_fit
res
}
|
/R/fpca.R
|
no_license
|
asgari-fatemeh/dfrr
|
R
| false | false | 3,042 |
r
|
#'Functional principal component analysis of a dfrr fit
#'
#'\code{fpca()} returns estimations of the smooth principal components/eigen-functions
#' and the corresponding eigen-values of the residual function in the \code{dfrr} model.
#' The result is a named list containing the vector of eigen-values and the matrix of Fourier coefficients. See Details.
#'
#'@details Fourier coefficients which are reported are
#' based on the a set of basis which can be determined by \code{\link{basis}(dfrr_fit)}.
#' Thus the evaluation of pricipal component/eigen-function on the set of time points specified by vector \code{time},
#' equals to \code{fpca(dfrr_fit)\%*\%t(\link[fda]{eval.basis}(time,\link{basis}(dfrr_fit)))}.
#'
#' Consider that the unstandardized estimations are not identifiable. So, it is recommended to
#' extract and report the standardized estimations.
#'
#'
#'
#'@return
#' \code{fpca(dfrr_fit)} returns a list containtng the following components:
#' \item{values}{a vector containing the eigen-values of the standaridized/unstandardized covariance operator of
#' the residual function term in \code{dfrr} model,
#' sorted in decreasing order.}
#' \item{vectors}{a matrix whose columns contain the Fourier coefficients of the
#' principal components/eigen-functions of the standaridized/unstandardized covariance operator of
#' the residual function term in \code{dfrr} model,
#' sorted based on the corresponding eigen-values.}
#'
#'
#'@inheritParams fitted.dfrr
#'@param standardized,unstandardized a \code{boolean} indicating whether stanadrdized/unstandardized pricipal components/eigen-functions are reported.
#' Only standardized pricipal components/eigen-functions are identifiable, thus the arugment is defaults to \code{standardized=TRUE}.
#'
#'
#'@seealso \code{\link{plot.fpca.dfrr}}
#'
#'@examples
#' set.seed(2000)
#' \donttest{N<-50;M<-24}
#' \dontshow{N<-30;M<-12}
#' X<-rnorm(N,mean=0)
#' time<-seq(0,1,length.out=M)
#' Y<-simulate_simple_dfrr(beta0=function(t){cos(pi*t+pi)},
#' beta1=function(t){2*t},
#' X=X,time=time)
#' \donttest{dfrr_fit<-dfrr(Y~X,yind=time)}
#' \dontshow{dfrr_fit<-dfrr(Y~X,yind=time,T_E=1)}
#' fpcs<-fpca(dfrr_fit)
#'\donttest{plot(fpcs,plot.eigen.functions=TRUE,plot.contour=TRUE,plot.3dsurface = TRUE)}
#'
#'@export
fpca <-
function(object,standardized=NULL,unstandardized=!standardized){
dfrr_fit<-object
standardized<-paired.args.check(standardized,
ifelse(missing(unstandardized),NA,unstandardized),
"Please specify 'standardized' or 'unstandardizedd' coefficients must be reported",
TRUE)
if(standardized)
res<-list(values=dfrr_fit$nus_std,vectors=t(dfrr_fit$Theta_std))
else
res<-list(values=dfrr_fit$nus,vectors=t(dfrr_fit$Theta))
class(res)<-"fpca.dfrr"
attr(res,"standardized")<-standardized
attr(res,"dfrr_fit")<-dfrr_fit
res
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clustering.R
\name{scatterplot}
\alias{scatterplot}
\title{Clustering Scatter Plots}
\usage{
scatterplot(
d,
clusters,
centers = NULL,
labels = FALSE,
ellipses = FALSE,
legend = c("auto1", "auto2"),
...
)
}
\arguments{
\item{d}{The dataset (\code{matrix} or \code{data.frame}).}
\item{clusters}{Cluster labels of the training set (\code{vector} or \code{factor}).}
\item{centers}{Coordinates of the cluster centers.}
\item{labels}{Indicates whether or not labels (row names) should be showned on the plot.}
\item{ellipses}{Indicates whether or not ellipses should be drawned around clusters.}
\item{legend}{Indicates where the legend is placed on the graphics.}
\item{...}{Other parameters.}
}
\description{
Produce a scatter plot for clustering results. If the dataset has more than two dimensions, the scatter plot will show the two first PCA axes.
}
\examples{
require (datasets)
data (iris)
km = KMEANS (iris [, -5], k = 3)
scatterplot (iris [, -5], km$cluster)
}
|
/man/scatterplot.Rd
|
no_license
|
cran/fdm2id
|
R
| false | true | 1,066 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clustering.R
\name{scatterplot}
\alias{scatterplot}
\title{Clustering Scatter Plots}
\usage{
scatterplot(
d,
clusters,
centers = NULL,
labels = FALSE,
ellipses = FALSE,
legend = c("auto1", "auto2"),
...
)
}
\arguments{
\item{d}{The dataset (\code{matrix} or \code{data.frame}).}
\item{clusters}{Cluster labels of the training set (\code{vector} or \code{factor}).}
\item{centers}{Coordinates of the cluster centers.}
\item{labels}{Indicates whether or not labels (row names) should be showned on the plot.}
\item{ellipses}{Indicates whether or not ellipses should be drawned around clusters.}
\item{legend}{Indicates where the legend is placed on the graphics.}
\item{...}{Other parameters.}
}
\description{
Produce a scatter plot for clustering results. If the dataset has more than two dimensions, the scatter plot will show the two first PCA axes.
}
\examples{
require (datasets)
data (iris)
km = KMEANS (iris [, -5], k = 3)
scatterplot (iris [, -5], km$cluster)
}
|
#' Affine_grid
#'
#' Generates a 2D or 3D flow field (sampling grid), given a batch of
#' affine matrices `theta`.
#'
#' @section Note:
#'
#' This function is often used in conjunction with [`grid_sample`]
#' to build `Spatial Transformer Networks`_ .
#'
#'
#' @param theta (Tensor) input batch of affine matrices with shape
#' (\eqn{N \times 2 \times 3}) for 2D or (\eqn{N \times 3 \times 4}) for 3D
#' @param size (torch.Size) the target output image size. (\eqn{N \times C \times H \times W}
#' for 2D or \eqn{N \times C \times D \times H \times W} for 3D)
#' Example: torch.Size((32, 3, 24, 24))
#' @param align_corners (bool, optional) if ``True``, consider ``-1`` and ``1``
#' to refer to the centers of the corner pixels rather than the image corners.
#' Refer to [`grid_sample`] for a more complete description. A grid generated by
#' [`affine_grid`] should be passed to [`grid_sample`] with the same setting for
#' this option. Default: ``False``
#'
#' @export
nnf_affine_grid <- function(theta, size, align_corners = FALSE) {
torch_affine_grid_generator(theta, size, align_corners)
}
#' Grid_sample
#'
#' Given an `input` and a flow-field `grid`, computes the
#' ``output`` using `input` values and pixel locations from `grid`.
#'
#' Currently, only spatial (4-D) and volumetric (5-D) `input` are
#' supported.
#'
#' In the spatial (4-D) case, for `input` with shape
#' \eqn{(N, C, H_\text{in}, W_\text{in})} and `grid` with shape
#' \eqn{(N, H_\text{out}, W_\text{out}, 2)}, the output will have shape
#' \eqn{(N, C, H_\text{out}, W_\text{out})}.
#'
#' For each output location ``output[n, :, h, w]``, the size-2 vector
#' ``grid[n, h, w]`` specifies `input` pixel locations ``x`` and ``y``,
#' which are used to interpolate the output value ``output[n, :, h, w]``.
#' In the case of 5D inputs, ``grid[n, d, h, w]`` specifies the
#' ``x``, ``y``, ``z`` pixel locations for interpolating
#' ``output[n, :, d, h, w]``. `mode` argument specifies ``nearest`` or
#' ``bilinear`` interpolation method to sample the input pixels.
#'
#' `grid` specifies the sampling pixel locations normalized by the
#' `input` spatial dimensions. Therefore, it should have most values in
#' the range of ``[-1, 1]``. For example, values ``x = -1, y = -1`` is the
#' left-top pixel of `input`, and values ``x = 1, y = 1`` is the
#' right-bottom pixel of `input`.
#'
#' If `grid` has values outside the range of ``[-1, 1]``, the corresponding
#' outputs are handled as defined by `padding_mode`. Options are
#'
#' * ``padding_mode="zeros"``: use ``0`` for out-of-bound grid locations,
#' * ``padding_mode="border"``: use border values for out-of-bound grid locations,
#' * ``padding_mode="reflection"``: use values at locations reflected by
#' the border for out-of-bound grid locations. For location far away
#' from the border, it will keep being reflected until becoming in bound,
#' e.g., (normalized) pixel location ``x = -3.5`` reflects by border ``-1``
#' and becomes ``x' = 1.5``, then reflects by border ``1`` and becomes
#' ``x'' = -0.5``.
#'
#' @section Note:
#'
#' This function is often used in conjunction with [`affine_grid`]
#' to build `Spatial Transformer Networks`_ .
#'
#' @param input (Tensor) input of shape \eqn{(N, C, H_\text{in}, W_\text{in})} (4-D case) or \eqn{(N, C, D_\text{in}, H_\text{in}, W_\text{in})} (5-D case)
#' @param grid (Tensor) flow-field of shape \eqn{(N, H_\text{out}, W_\text{out}, 2)} (4-D case) or \eqn{(N, D_\text{out}, H_\text{out}, W_\text{out}, 3)} (5-D case)
#' @param mode (str) interpolation mode to calculate output values ``'bilinear'`` | ``'nearest'``.
#' Default: ``'bilinear'``
#' @param padding_mode (str) padding mode for outside grid values ``'zeros'`` | ``'border'``
#' | ``'reflection'``. Default: ``'zeros'``
#' @param align_corners (bool, optional) Geometrically, we consider the pixels of the
#' input as squares rather than points. If set to ``True``, the extrema (``-1`` and
#' ``1``) are considered as referring to the center points of the input's corner pixels.
#' If set to ``False``, they are instead considered as referring to the corner
#' points of the input's corner pixels, making the sampling more resolution
#' agnostic. This option parallels the ``align_corners`` option in [`interpolate`], and
#' so whichever option is used here should also be used there to resize the input
#' image before grid sampling. Default: ``False``
#'
#' @export
nnf_grid_sample <- function(input, grid, mode = c("bilinear", "nearest"),
padding_mode = c("zeros", "border", "reflection"),
align_corners = FALSE) {
if (mode == "bilinear")
mode_enum <- 0
else if (mode == "nearest")
mode_enum <- 1
else
value_error("Unknown mode name '{mode}'. Supported modes are 'bilinear'",
"and 'nearest'.")
if (padding_mode == "zeros")
padding_mode_enum <- 0
else if (padding_mode == "border")
padding_mode_enum <- 1
else if (padding_mode == "reflection")
padding_mode_enum <- 2
else
value_error("Unknown padding mode name '{padding_mode}'. Supported modes are",
"'zeros', 'border' and 'reflection'.")
torch_grid_sampler(input = input, grid = grid, interpolation_mode = mode_enum,
padding_mode = padding_mode_enum, align_corners = align_corners)
}
|
/R/nnf-vision.R
|
permissive
|
qykong/torch
|
R
| false | false | 5,451 |
r
|
#' Affine_grid
#'
#' Generates a 2D or 3D flow field (sampling grid), given a batch of
#' affine matrices `theta`.
#'
#' @section Note:
#'
#' This function is often used in conjunction with [`grid_sample`]
#' to build `Spatial Transformer Networks`_ .
#'
#'
#' @param theta (Tensor) input batch of affine matrices with shape
#' (\eqn{N \times 2 \times 3}) for 2D or (\eqn{N \times 3 \times 4}) for 3D
#' @param size (torch.Size) the target output image size. (\eqn{N \times C \times H \times W}
#' for 2D or \eqn{N \times C \times D \times H \times W} for 3D)
#' Example: torch.Size((32, 3, 24, 24))
#' @param align_corners (bool, optional) if ``True``, consider ``-1`` and ``1``
#' to refer to the centers of the corner pixels rather than the image corners.
#' Refer to [`grid_sample`] for a more complete description. A grid generated by
#' [`affine_grid`] should be passed to [`grid_sample`] with the same setting for
#' this option. Default: ``False``
#'
#' @export
nnf_affine_grid <- function(theta, size, align_corners = FALSE) {
torch_affine_grid_generator(theta, size, align_corners)
}
#' Grid_sample
#'
#' Given an `input` and a flow-field `grid`, computes the
#' ``output`` using `input` values and pixel locations from `grid`.
#'
#' Currently, only spatial (4-D) and volumetric (5-D) `input` are
#' supported.
#'
#' In the spatial (4-D) case, for `input` with shape
#' \eqn{(N, C, H_\text{in}, W_\text{in})} and `grid` with shape
#' \eqn{(N, H_\text{out}, W_\text{out}, 2)}, the output will have shape
#' \eqn{(N, C, H_\text{out}, W_\text{out})}.
#'
#' For each output location ``output[n, :, h, w]``, the size-2 vector
#' ``grid[n, h, w]`` specifies `input` pixel locations ``x`` and ``y``,
#' which are used to interpolate the output value ``output[n, :, h, w]``.
#' In the case of 5D inputs, ``grid[n, d, h, w]`` specifies the
#' ``x``, ``y``, ``z`` pixel locations for interpolating
#' ``output[n, :, d, h, w]``. `mode` argument specifies ``nearest`` or
#' ``bilinear`` interpolation method to sample the input pixels.
#'
#' `grid` specifies the sampling pixel locations normalized by the
#' `input` spatial dimensions. Therefore, it should have most values in
#' the range of ``[-1, 1]``. For example, values ``x = -1, y = -1`` is the
#' left-top pixel of `input`, and values ``x = 1, y = 1`` is the
#' right-bottom pixel of `input`.
#'
#' If `grid` has values outside the range of ``[-1, 1]``, the corresponding
#' outputs are handled as defined by `padding_mode`. Options are
#'
#' * ``padding_mode="zeros"``: use ``0`` for out-of-bound grid locations,
#' * ``padding_mode="border"``: use border values for out-of-bound grid locations,
#' * ``padding_mode="reflection"``: use values at locations reflected by
#' the border for out-of-bound grid locations. For location far away
#' from the border, it will keep being reflected until becoming in bound,
#' e.g., (normalized) pixel location ``x = -3.5`` reflects by border ``-1``
#' and becomes ``x' = 1.5``, then reflects by border ``1`` and becomes
#' ``x'' = -0.5``.
#'
#' @section Note:
#'
#' This function is often used in conjunction with [`affine_grid`]
#' to build `Spatial Transformer Networks`_ .
#'
#' @param input (Tensor) input of shape \eqn{(N, C, H_\text{in}, W_\text{in})} (4-D case) or \eqn{(N, C, D_\text{in}, H_\text{in}, W_\text{in})} (5-D case)
#' @param grid (Tensor) flow-field of shape \eqn{(N, H_\text{out}, W_\text{out}, 2)} (4-D case) or \eqn{(N, D_\text{out}, H_\text{out}, W_\text{out}, 3)} (5-D case)
#' @param mode (str) interpolation mode to calculate output values ``'bilinear'`` | ``'nearest'``.
#' Default: ``'bilinear'``
#' @param padding_mode (str) padding mode for outside grid values ``'zeros'`` | ``'border'``
#' | ``'reflection'``. Default: ``'zeros'``
#' @param align_corners (bool, optional) Geometrically, we consider the pixels of the
#' input as squares rather than points. If set to ``True``, the extrema (``-1`` and
#' ``1``) are considered as referring to the center points of the input's corner pixels.
#' If set to ``False``, they are instead considered as referring to the corner
#' points of the input's corner pixels, making the sampling more resolution
#' agnostic. This option parallels the ``align_corners`` option in [`interpolate`], and
#' so whichever option is used here should also be used there to resize the input
#' image before grid sampling. Default: ``False``
#'
#' @export
nnf_grid_sample <- function(input, grid, mode = c("bilinear", "nearest"),
padding_mode = c("zeros", "border", "reflection"),
align_corners = FALSE) {
if (mode == "bilinear")
mode_enum <- 0
else if (mode == "nearest")
mode_enum <- 1
else
value_error("Unknown mode name '{mode}'. Supported modes are 'bilinear'",
"and 'nearest'.")
if (padding_mode == "zeros")
padding_mode_enum <- 0
else if (padding_mode == "border")
padding_mode_enum <- 1
else if (padding_mode == "reflection")
padding_mode_enum <- 2
else
value_error("Unknown padding mode name '{padding_mode}'. Supported modes are",
"'zeros', 'border' and 'reflection'.")
torch_grid_sampler(input = input, grid = grid, interpolation_mode = mode_enum,
padding_mode = padding_mode_enum, align_corners = align_corners)
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/data-ch5.R
\docType{data}
\name{BB}
\alias{BB}
\title{Blood Alcohol Contents}
\format{A data frame with 16 rows and 2 variables:
\describe{
\item{Beers}{number of cans of beer consumed}
\item{BAC}{blood alcohol content, in grams per decaliter}
}}
\source{
In progress
}
\usage{
BB
}
\description{
A group of n=16 student volunteers at The Ohio State University drank a
randomly assigned number of cans of beer. Thirty minutes later, a police
officer measured their BAC.
}
\keyword{datasets}
|
/man/BB.Rd
|
no_license
|
kferris10/catstats
|
R
| false | false | 580 |
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/data-ch5.R
\docType{data}
\name{BB}
\alias{BB}
\title{Blood Alcohol Contents}
\format{A data frame with 16 rows and 2 variables:
\describe{
\item{Beers}{number of cans of beer consumed}
\item{BAC}{blood alcohol content, in grams per decaliter}
}}
\source{
In progress
}
\usage{
BB
}
\description{
A group of n=16 student volunteers at The Ohio State University drank a
randomly assigned number of cans of beer. Thirty minutes later, a police
officer measured their BAC.
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/assess_convergence.R
\name{assess_convergence}
\alias{assess_convergence}
\title{Trace Plots from Metropolis-Hastings Algorithm}
\usage{
assess_convergence(model_fit, parameter = "alpha", items = NULL,
assessors = NULL, ...)
}
\arguments{
\item{model_fit}{A fitted model object of class \code{BayesMallows} returned from
\code{\link{compute_mallows}} or an object of class \code{BayesMallowsMixtures}
returned from \code{\link{compute_mallows_mixtures}}.}
\item{parameter}{Character string specifying which parameter to plot. Available
options are \code{"alpha"}, \code{"rho"}, \code{"Rtilde"},
\code{"cluster_probs"}, or \code{"theta"}.}
\item{items}{The items to study in the diagnostic plot for \code{rho}. Either
a vector of item names, corresponding to \code{model_fit$items} or a
vector of indices. If NULL, five items are selected randomly. Only used when
\code{parameter = "rho"} or \code{parameter = "Rtilde"}.}
\item{assessors}{Numeric vector specifying the assessors to study in
the diagnostic plot for \code{"Rtilde"}.}
\item{...}{Additional arguments passed on to \code{cowplot::plot_grid} when
\code{model_fit} is of class \code{BayesMallowsMixtures} or to \code{ggplot2::facet_wrap}.}
}
\description{
\code{assess_convergence} provides trace plots for the parameters of the
Mallows Rank model, in order to study the convergence of the Metropolis-Hastings
algorithm.
}
\seealso{
\code{\link{compute_mallows}}, \code{\link{plot.BayesMallows}}
}
|
/man/assess_convergence.Rd
|
no_license
|
lengzi/BayesMallows
|
R
| false | true | 1,542 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/assess_convergence.R
\name{assess_convergence}
\alias{assess_convergence}
\title{Trace Plots from Metropolis-Hastings Algorithm}
\usage{
assess_convergence(model_fit, parameter = "alpha", items = NULL,
assessors = NULL, ...)
}
\arguments{
\item{model_fit}{A fitted model object of class \code{BayesMallows} returned from
\code{\link{compute_mallows}} or an object of class \code{BayesMallowsMixtures}
returned from \code{\link{compute_mallows_mixtures}}.}
\item{parameter}{Character string specifying which parameter to plot. Available
options are \code{"alpha"}, \code{"rho"}, \code{"Rtilde"},
\code{"cluster_probs"}, or \code{"theta"}.}
\item{items}{The items to study in the diagnostic plot for \code{rho}. Either
a vector of item names, corresponding to \code{model_fit$items} or a
vector of indices. If NULL, five items are selected randomly. Only used when
\code{parameter = "rho"} or \code{parameter = "Rtilde"}.}
\item{assessors}{Numeric vector specifying the assessors to study in
the diagnostic plot for \code{"Rtilde"}.}
\item{...}{Additional arguments passed on to \code{cowplot::plot_grid} when
\code{model_fit} is of class \code{BayesMallowsMixtures} or to \code{ggplot2::facet_wrap}.}
}
\description{
\code{assess_convergence} provides trace plots for the parameters of the
Mallows Rank model, in order to study the convergence of the Metropolis-Hastings
algorithm.
}
\seealso{
\code{\link{compute_mallows}}, \code{\link{plot.BayesMallows}}
}
|
load("data/gaussian.RData")
d$kmeans <- factor(d$kmeans)
library(ggplot2)
library(directlabels)
p <- ggplot(,aes(x,y))+
geom_path(aes(group=row),data=res,col="grey")+
geom_point(aes(colour=kmeans),data=d)+
geom_dl(aes(label=kmeans,colour=kmeans),data=d,method="smart.grid")+
guides(colour="none")+
theme_bw()
##pdf("figure-clusterpath-gaussian.pdf")
library(tikzDevice);options(tikzDocumentDeclaration="\\documentclass[11pt]{memoir}",tikzMetricsDictionary="tikzMetrics")
png("figure-clusterpath-gaussian.png",h=6,w=6,units="in",res=600)
print(p)
dev.off()
|
/figure-clusterpath-gaussian.R
|
no_license
|
tdhock/breakpointError-orig
|
R
| false | false | 567 |
r
|
load("data/gaussian.RData")
d$kmeans <- factor(d$kmeans)
library(ggplot2)
library(directlabels)
p <- ggplot(,aes(x,y))+
geom_path(aes(group=row),data=res,col="grey")+
geom_point(aes(colour=kmeans),data=d)+
geom_dl(aes(label=kmeans,colour=kmeans),data=d,method="smart.grid")+
guides(colour="none")+
theme_bw()
##pdf("figure-clusterpath-gaussian.pdf")
library(tikzDevice);options(tikzDocumentDeclaration="\\documentclass[11pt]{memoir}",tikzMetricsDictionary="tikzMetrics")
png("figure-clusterpath-gaussian.png",h=6,w=6,units="in",res=600)
print(p)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fertilizer.R
\name{NPK_TargetYield_forOutput}
\alias{NPK_TargetYield_forOutput}
\title{using the output of function "NPK_TargetYield_forinput" and a dat frame per long and lat for intended NPK input
this function calculates the yield that can be obtained for intended NPK rate.}
\usage{
NPK_TargetYield_forOutput(NutrUse_soilNPK, N_rate, P_rate, K_rate)
}
\arguments{
\item{NutrUse_soilNPK}{Update Descriptiosn here}
\item{NPKdata:}{needs to be provided}
}
\value{
}
\description{
using the output of function "NPK_TargetYield_forinput" and a dat frame per long and lat for intended NPK input
this function calculates the yield that can be obtained for intended NPK rate.
}
\author{
Meklit
}
|
/man/NPK_TargetYield_forOutput.Rd
|
no_license
|
masgeek/fertilizer-estimate
|
R
| false | true | 772 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fertilizer.R
\name{NPK_TargetYield_forOutput}
\alias{NPK_TargetYield_forOutput}
\title{using the output of function "NPK_TargetYield_forinput" and a dat frame per long and lat for intended NPK input
this function calculates the yield that can be obtained for intended NPK rate.}
\usage{
NPK_TargetYield_forOutput(NutrUse_soilNPK, N_rate, P_rate, K_rate)
}
\arguments{
\item{NutrUse_soilNPK}{Update Descriptiosn here}
\item{NPKdata:}{needs to be provided}
}
\value{
}
\description{
using the output of function "NPK_TargetYield_forinput" and a dat frame per long and lat for intended NPK input
this function calculates the yield that can be obtained for intended NPK rate.
}
\author{
Meklit
}
|
## ---- eval=FALSE, include=TRUE-------------------------------------------------------
## "Protocolo:
##
## 1. Daniel Felipe Villa Rengifo
##
## 2. Lenguaje: R
##
## 3. Tema: ANÁLISIS DISCRIMINANTE LINEAL Y CUADRÁTICO EN R [Parte 3]
##
## 4. Fuentes:
## https://www.r-bloggers.com/2018/11/linear-quadratic-and-regularized-discriminant-analysis/"
## ------------------------------------------------------------------------------------
# Cargamos la base de datos del replit anterior:
Auto <- read.csv(file = "Auto.csv")
Auto2 <- read.csv(file = "Auto2.csv")
## ------------------------------------------------------------------------------------
# Guardamos los OUTPUTS:
sink("OUTPUTS.txt", split = T)
# Matrices de Covarianza:
#cargarmos la libreria que contiene la función "boxM()":
#install.packages("biotools")
library(biotools)
# Realizamos una matriz de covarainza
boxM(data = Auto2[, 2:4], grouping = Auto2[, 6])
# Resultados:
print("El test M de Box muestra evidencias significativas de que la matriz de covarianza no es constante para todos los grupos")
print("lo que haría apropiado aplicar QDA en lugar de LDA, pero en este caso ante la falta de normalidad multivariante en los datos, el test podría haberse visto afectado por ello.")
## ------------------------------------------------------------------------------------
# Creamos una semilla:
set.seed(1)
# Creamos una muestra pseudoAletorio del tamano de las filas * 0.8
entrenamiento <- sample(x = nrow(Auto), size = nrow(Auto)*0.8, replace = FALSE)
# Subgrupo de datos de entrenamiento
Auto.train <- Auto[entrenamiento,]
# Subgrupo de datos de test
Auto.test <- Auto[-entrenamiento,]
# Comprobamos que la suma de observaciones de cada subgrupo iguala al set de datos original
print("# Comprobamos que la suma de observaciones de cada subgrupo iguala al set de datos original")
print(nrow(Auto.train)) #1.
print(nrow(Auto.test)) #2.
# nrow(Auto.train) + nrow(Auto.test) == nrow(Auto)
print("nrow(Auto.train) + nrow(Auto.test) == nrow(Auto)")
print("OUTPUT:")
print(nrow(Auto.train) + nrow(Auto.test) == nrow(Auto))
## ------------------------------------------------------------------------------------
# Cargar el paquete:
#install.packages("MASS")
library(MASS)
# Modelo LDA con los datos de entrenamiento
print("# Modelo LDA con los datos de entrenamiento")
modelo.lda <- lda(formula = mpg01 ~ cylinders + displacement + horsepower + weight, data = Auto.train)
print(modelo.lda)
# Resultado:
"El modelo calcula automáticamente las probabilidades a priori"
"(PI_0 = 0,514, PI_1 = 0,485)"
" y el promedio de cada predictor dentro de cada clase, usados por el modelo como estimadores de µk."
"Los coeficientes proporcionan la combinación de los predictores"
"(- 0,4183 cylinders - 0,0017 displacement + 0,0028 – 0,0009), para generar los discriminantes lineales para cada una de las observaciones de entrenamiento."
# Modelo QDA con los datos de entrenamiento
print("# Modelo QDA con los datos de entrenamiento")
modelo.qda <- qda(formula = mpg01 ~ cylinders + displacement + horsepower + weight, data = Auto.train)
print(modelo.qda)
# Explicación:
"A diferencia del LDA, el QDA no contiene los coeficientes de los discriminantes lineales, puesto que el clasificador QDA se basa en una función cuadrática de los predictores, no lineal."
## ------------------------------------------------------------------------------------
# Cargamos el paquete "caret"
#install.packages("caret")
library(caret)
#Se ajusta sólo el modelo LDA con este paquete:
print("#Se ajusta sólo el modelo LDA con este paquete:")
modelo.lda.caret <- train(as.factor(mpg01) ~ cylinders + displacement + horsepower + weight, method ='lda',data=Auto.train)
print(modelo.lda.caret)
sink()
|
/main.r
|
no_license
|
dvillaunal/Dia_91_15junio
|
R
| false | false | 3,782 |
r
|
## ---- eval=FALSE, include=TRUE-------------------------------------------------------
## "Protocolo:
##
## 1. Daniel Felipe Villa Rengifo
##
## 2. Lenguaje: R
##
## 3. Tema: ANÁLISIS DISCRIMINANTE LINEAL Y CUADRÁTICO EN R [Parte 3]
##
## 4. Fuentes:
## https://www.r-bloggers.com/2018/11/linear-quadratic-and-regularized-discriminant-analysis/"
## ------------------------------------------------------------------------------------
# Cargamos la base de datos del replit anterior:
Auto <- read.csv(file = "Auto.csv")
Auto2 <- read.csv(file = "Auto2.csv")
## ------------------------------------------------------------------------------------
# Guardamos los OUTPUTS:
sink("OUTPUTS.txt", split = T)
# Matrices de Covarianza:
#cargarmos la libreria que contiene la función "boxM()":
#install.packages("biotools")
library(biotools)
# Realizamos una matriz de covarainza
boxM(data = Auto2[, 2:4], grouping = Auto2[, 6])
# Resultados:
print("El test M de Box muestra evidencias significativas de que la matriz de covarianza no es constante para todos los grupos")
print("lo que haría apropiado aplicar QDA en lugar de LDA, pero en este caso ante la falta de normalidad multivariante en los datos, el test podría haberse visto afectado por ello.")
## ------------------------------------------------------------------------------------
# Creamos una semilla:
set.seed(1)
# Creamos una muestra pseudoAletorio del tamano de las filas * 0.8
entrenamiento <- sample(x = nrow(Auto), size = nrow(Auto)*0.8, replace = FALSE)
# Subgrupo de datos de entrenamiento
Auto.train <- Auto[entrenamiento,]
# Subgrupo de datos de test
Auto.test <- Auto[-entrenamiento,]
# Comprobamos que la suma de observaciones de cada subgrupo iguala al set de datos original
print("# Comprobamos que la suma de observaciones de cada subgrupo iguala al set de datos original")
print(nrow(Auto.train)) #1.
print(nrow(Auto.test)) #2.
# nrow(Auto.train) + nrow(Auto.test) == nrow(Auto)
print("nrow(Auto.train) + nrow(Auto.test) == nrow(Auto)")
print("OUTPUT:")
print(nrow(Auto.train) + nrow(Auto.test) == nrow(Auto))
## ------------------------------------------------------------------------------------
# Cargar el paquete:
#install.packages("MASS")
library(MASS)
# Modelo LDA con los datos de entrenamiento
print("# Modelo LDA con los datos de entrenamiento")
modelo.lda <- lda(formula = mpg01 ~ cylinders + displacement + horsepower + weight, data = Auto.train)
print(modelo.lda)
# Resultado:
"El modelo calcula automáticamente las probabilidades a priori"
"(PI_0 = 0,514, PI_1 = 0,485)"
" y el promedio de cada predictor dentro de cada clase, usados por el modelo como estimadores de µk."
"Los coeficientes proporcionan la combinación de los predictores"
"(- 0,4183 cylinders - 0,0017 displacement + 0,0028 – 0,0009), para generar los discriminantes lineales para cada una de las observaciones de entrenamiento."
# Modelo QDA con los datos de entrenamiento
print("# Modelo QDA con los datos de entrenamiento")
modelo.qda <- qda(formula = mpg01 ~ cylinders + displacement + horsepower + weight, data = Auto.train)
print(modelo.qda)
# Explicación:
"A diferencia del LDA, el QDA no contiene los coeficientes de los discriminantes lineales, puesto que el clasificador QDA se basa en una función cuadrática de los predictores, no lineal."
## ------------------------------------------------------------------------------------
# Cargamos el paquete "caret"
#install.packages("caret")
library(caret)
#Se ajusta sólo el modelo LDA con este paquete:
print("#Se ajusta sólo el modelo LDA con este paquete:")
modelo.lda.caret <- train(as.factor(mpg01) ~ cylinders + displacement + horsepower + weight, method ='lda',data=Auto.train)
print(modelo.lda.caret)
sink()
|
#' @title plot barplot
#'
#' @description An internal helper function to create a ggplot barplot from the
#' data provided to the function. Takes data that has been created
#' with the IlluminaAnalysis function.
#'
#' @param genera a list data object containing a dataframe called "Abundance"
#' genera lists are created with the IlluminaAnalysis function.
#' Defines the dataframe from which to take the data to plot.
#' @param samples a character string or a vector/list containing multiple
#' strings. Each string must represent a column name within in the
#' genera$Abundance dataframe.
#' Defines which data to plot.
#' @param cutoff a integer value to determine how many of the most abundant
#' values (rownames of the genera$Abundance) of the selected samples are
#' to be displayed.
#' Defines how much of the data is plotted
#' @param modus a character string. Either "total", "hide_others" or "absolute"
#' everything else will result in the default "relative" modus.
#' Defines how the data is plotted.
#' @return plot - a ggplot barplot
#' @import ggplot2
#' @import dplyr
#' @import tidyr
#' @import plotly
#'
#' @examples
#'
#' @export
plot_barplot <- function(
genera,
samples,
cutoff,
modus
){
if (length(samples) == 0){
messagetext <- list(
x = 1,
y = 1,
text = "No sample selected \nPlease select a sample",
showarrow = FALSE,
font = list(size = 28)
)
plot <- plot_ly(as.data.frame(NULL))
plot <- plot %>% layout(annotations = messagetext,
yaxis = list(visible = FALSE),
xaxis = list(visible = FALSE))
} else {
plotdata <- dplyr::select(genera$Abundance, all_of(samples))
if (ncol(plotdata) > 1) {
plotdata$Total <- rowSums(plotdata)
circ = F
} else if (ncol(plotdata) == 1) {
plotdata$copy <- plotdata[1]
plotdata$Total <- rowSums(plotdata) / 2
circ = T
}
plotdata <- plotdata[order(plotdata$Total,
decreasing = T),]
plotdata <- rbind(plotdata[1:cutoff,],
colSums(plotdata[(cutoff + 1):nrow(plotdata),]))
row.names(plotdata)[nrow(plotdata)] <- "#others"
if (circ == T | modus == "total"){
plotdata <- dplyr::select(plotdata, Total)
plotdata$Name <- row.names(plotdata)
plot <- plotly::plot_ly(plotdata,
labels = ~Name,
values = ~Total,
type = "pie")
} else {
plotdata <- dplyr::select(plotdata, -Total)
if (modus == "hide_others"){
plotdata <- dplyr::slice_head(plotdata, n = (nrow(plotdata) - 1))
}
datapoints <- colnames(plotdata)
plotdata$Name <- row.names(plotdata)
plotdata <- tidyr::pivot_longer(plotdata,
datapoints,
names_to = "Sample",
values_to = "Abundance")
if (modus %in% c("absolute", "hide_others")){
plot <- ggplot2::ggplot(plotdata,
ggplot2::aes(fill = Name,
y = Abundance,
x = Sample))
plot <- plot + ggplot2::geom_bar(position = "stack",
stat = "identity")
plot <- plot + ggplot2::theme_classic()
plot <- plotly::ggplotly(plot)
} else {
plot <- ggplot2::ggplot(plotdata,
ggplot2::aes(fill = Name,
y = Abundance,
x = Sample))
plot <- plot + ggplot2::geom_bar(position = "fill",
stat = "identity")
plot <- plot + ggplot2::theme_classic()
plot <- plotly::ggplotly(plot)
}
}
}
return(plot)
}
|
/R/plot_barplot.R
|
no_license
|
GATempel/NGS.shiny.helper
|
R
| false | false | 4,040 |
r
|
#' @title plot barplot
#'
#' @description An internal helper function to create a ggplot barplot from the
#' data provided to the function. Takes data that has been created
#' with the IlluminaAnalysis function.
#'
#' @param genera a list data object containing a dataframe called "Abundance"
#' genera lists are created with the IlluminaAnalysis function.
#' Defines the dataframe from which to take the data to plot.
#' @param samples a character string or a vector/list containing multiple
#' strings. Each string must represent a column name within in the
#' genera$Abundance dataframe.
#' Defines which data to plot.
#' @param cutoff a integer value to determine how many of the most abundant
#' values (rownames of the genera$Abundance) of the selected samples are
#' to be displayed.
#' Defines how much of the data is plotted
#' @param modus a character string. Either "total", "hide_others" or "absolute"
#' everything else will result in the default "relative" modus.
#' Defines how the data is plotted.
#' @return plot - a ggplot barplot
#' @import ggplot2
#' @import dplyr
#' @import tidyr
#' @import plotly
#'
#' @examples
#'
#' @export
plot_barplot <- function(
genera,
samples,
cutoff,
modus
){
if (length(samples) == 0){
messagetext <- list(
x = 1,
y = 1,
text = "No sample selected \nPlease select a sample",
showarrow = FALSE,
font = list(size = 28)
)
plot <- plot_ly(as.data.frame(NULL))
plot <- plot %>% layout(annotations = messagetext,
yaxis = list(visible = FALSE),
xaxis = list(visible = FALSE))
} else {
plotdata <- dplyr::select(genera$Abundance, all_of(samples))
if (ncol(plotdata) > 1) {
plotdata$Total <- rowSums(plotdata)
circ = F
} else if (ncol(plotdata) == 1) {
plotdata$copy <- plotdata[1]
plotdata$Total <- rowSums(plotdata) / 2
circ = T
}
plotdata <- plotdata[order(plotdata$Total,
decreasing = T),]
plotdata <- rbind(plotdata[1:cutoff,],
colSums(plotdata[(cutoff + 1):nrow(plotdata),]))
row.names(plotdata)[nrow(plotdata)] <- "#others"
if (circ == T | modus == "total"){
plotdata <- dplyr::select(plotdata, Total)
plotdata$Name <- row.names(plotdata)
plot <- plotly::plot_ly(plotdata,
labels = ~Name,
values = ~Total,
type = "pie")
} else {
plotdata <- dplyr::select(plotdata, -Total)
if (modus == "hide_others"){
plotdata <- dplyr::slice_head(plotdata, n = (nrow(plotdata) - 1))
}
datapoints <- colnames(plotdata)
plotdata$Name <- row.names(plotdata)
plotdata <- tidyr::pivot_longer(plotdata,
datapoints,
names_to = "Sample",
values_to = "Abundance")
if (modus %in% c("absolute", "hide_others")){
plot <- ggplot2::ggplot(plotdata,
ggplot2::aes(fill = Name,
y = Abundance,
x = Sample))
plot <- plot + ggplot2::geom_bar(position = "stack",
stat = "identity")
plot <- plot + ggplot2::theme_classic()
plot <- plotly::ggplotly(plot)
} else {
plot <- ggplot2::ggplot(plotdata,
ggplot2::aes(fill = Name,
y = Abundance,
x = Sample))
plot <- plot + ggplot2::geom_bar(position = "fill",
stat = "identity")
plot <- plot + ggplot2::theme_classic()
plot <- plotly::ggplotly(plot)
}
}
}
return(plot)
}
|
# Load the two data frames
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Gather the subset of NEI data which contains vehicles data
vehicles <- grepl("vehicle", SCC$SCC.Level.Two, ignore.case=TRUE)
vehiclesSCC <- SCC[vehicles,]$SCC
vehiclesNEI <- NEI[NEI$SCC %in% vehiclesSCC,]
# Subset the vehicles NEI data to Baltimore's fips
baltimoreVehiclesNEI <- vehiclesNEI[vehiclesNEI$fips=="24510",]
png("plot5.png",width=480,height=480,units="px",bg="transparent")
library(ggplot2)
ggp <- ggplot(baltimoreVehiclesNEI,aes(factor(year),Emissions)) +
geom_bar(stat="identity",fill="grey",width=0.75) +
theme_bw() + guides(fill=FALSE) +
labs(x="year", y=expression("Total PM"[2.5]*" Emission (10^5 Tons)")) +
labs(title=expression("PM"[2.5]*" Motor Vehicle Source Emissions in Baltimore from 1999-2008"))
print(ggp)
dev.off()
|
/plot5.R
|
no_license
|
davidnea3/Exploratory-data-analysis
|
R
| false | false | 876 |
r
|
# Load the two data frames
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Gather the subset of NEI data which contains vehicles data
vehicles <- grepl("vehicle", SCC$SCC.Level.Two, ignore.case=TRUE)
vehiclesSCC <- SCC[vehicles,]$SCC
vehiclesNEI <- NEI[NEI$SCC %in% vehiclesSCC,]
# Subset the vehicles NEI data to Baltimore's fips
baltimoreVehiclesNEI <- vehiclesNEI[vehiclesNEI$fips=="24510",]
png("plot5.png",width=480,height=480,units="px",bg="transparent")
library(ggplot2)
ggp <- ggplot(baltimoreVehiclesNEI,aes(factor(year),Emissions)) +
geom_bar(stat="identity",fill="grey",width=0.75) +
theme_bw() + guides(fill=FALSE) +
labs(x="year", y=expression("Total PM"[2.5]*" Emission (10^5 Tons)")) +
labs(title=expression("PM"[2.5]*" Motor Vehicle Source Emissions in Baltimore from 1999-2008"))
print(ggp)
dev.off()
|
> dataFile <- "household_power_consumption.txt"
> data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")> subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
>
> #Str(subSetData)
> globalActivePower <- as.numeric(subSetData$Global_active_power)
> png("plot1.png", width=480, height=480)
> hist(globalActivePower, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
> dev.off()
null device
1
|
/Plot1.R
|
no_license
|
bjk127/ExData_Plotting1
|
R
| false | false | 463 |
r
|
> dataFile <- "household_power_consumption.txt"
> data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")> subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
>
> #Str(subSetData)
> globalActivePower <- as.numeric(subSetData$Global_active_power)
> png("plot1.png", width=480, height=480)
> hist(globalActivePower, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
> dev.off()
null device
1
|
shiny::runApp('c:/Shinyapp/TBMMaterialCode/',
port = 1234,
host = getOption('shiny.host','0.0.0.0'))
|
/runApp.R
|
no_license
|
NegativeDearc/TBMMaterialCode
|
R
| false | false | 129 |
r
|
shiny::runApp('c:/Shinyapp/TBMMaterialCode/',
port = 1234,
host = getOption('shiny.host','0.0.0.0'))
|
library(textTinyR)
### Name: select_predictors
### Title: Exclude highly correlated predictors
### Aliases: select_predictors
### ** Examples
library(textTinyR)
set.seed(1)
resp = runif(100)
set.seed(2)
col = runif(100)
matr = matrix(c(col, col^4, col^6, col^8, col^10), nrow = 100, ncol = 5)
out = select_predictors(resp, matr, predictors_upper_thresh = 0.75)
|
/data/genthat_extracted_code/textTinyR/examples/select_predictors.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 373 |
r
|
library(textTinyR)
### Name: select_predictors
### Title: Exclude highly correlated predictors
### Aliases: select_predictors
### ** Examples
library(textTinyR)
set.seed(1)
resp = runif(100)
set.seed(2)
col = runif(100)
matr = matrix(c(col, col^4, col^6, col^8, col^10), nrow = 100, ncol = 5)
out = select_predictors(resp, matr, predictors_upper_thresh = 0.75)
|
require(bio.survey)
require(bio.lobster)
require(bio.groundfish)
la()
p = bio.lobster::load.environment()
p$libs = NULL
fp = file.path(project.datadirectory('bio.lobster'),"analysis")
load_all('~/git/bio.survey/')
p$series =c('summer')# p$series =c('georges');p$series =c('fall')
p$define.by.polygons = F
p$lobster.subunits=F
p$area = 'LFA41'
p$years.to.estimate = c(1970:2016)
p$length.based = F
p$by.sex = F
p$bootstrapped.ci=T
p$strata.files.return=F
p$vessel.correction.fixed=1.2
p$strat = NULL
p$clusters = c( rep( "localhost", 7) )
p$strata.efficiencies = F
p = make.list(list(yrs=p$years.to.estimate),Y=p)
#DFO restratified to lfa40
p$define.by.polygons = T
p$lobster.subunits=F
p$area = 'LFA40'
p$reweight.strata = T #this subsets
aout= dfo.rv.analysis(DS='stratified.estimates.redo',p=p)
#Figure
p$add.reference.lines = F
p$time.series.start.year = p$years.to.estimate[1]
p$time.series.end.year = p$years.to.estimate[length(p$years.to.estimate)]
p$metric = 'numbers' #weights
p$measure = 'stratified.mean' #'stratified.total'
p$figure.title = ""
p$reference.measure = 'median' # mean, geomean
p$file.name = 'lfa40DFOrestratifiednumbers.png'
p$y.maximum = NULL # NULL # if ymax is too high for one year
p$show.truncated.numbers = F #if using ymax and want to show the numbers that are cut off as values on figure
p$legend = FALSE
p$running.median = T
p$running.length = 3
p$running.mean = F #can only have rmedian or rmean
p$error.polygon=F
p$error.bars=T
p$ylim=c(0,70)
ref.out= figure.stratified.analysis(x=aout,out.dir = 'bio.lobster', p=p,save=T)
p$box=NULL
p$ylim=c(0,32)
p$metric = 'weights'
p$file.name = 'lfa40DFOrestratifiedweights.png'
ref.out= figure.stratified.analysis(x=aout,out.dir = 'bio.lobster', p=p)
p$box=T
p$ylim=NULL
p$metric = 'weights'
p$file.name = 'lfa40DFOrestratifiedweightsNOY.png'
ref.out= figure.stratified.analysis(x=aout,out.dir = 'bio.lobster', p=p)
p$box=NULL
p$ylim=NULL
p$file.name = 'lfa40DFOrestratifiedDWAO.png'
p$metric = 'dwao'
ref.out= figure.stratified.analysis(x=aout,out.dir = 'bio.lobster', p=p)
p$file.name = 'lfa40DFOrestratifiedgini.png'
p$metric = 'gini'
p$ylim =c(0,1)
ref.out= figure.stratified.analysis(x=aout,out.dir = 'bio.lobster', p=p)
p$ylim = NULL
aout$subset = 'DFO.restratified.All'
write.csv(aout,file=file.path(fp,'indicators','DFO.restratified.All.csv'))
|
/inst/Frameworks/LFA3438Framework/LFA34-38Framework/2h.LFA40StratifiedAnalysis.r
|
no_license
|
LobsterScience/bio.lobster
|
R
| false | false | 3,555 |
r
|
require(bio.survey)
require(bio.lobster)
require(bio.groundfish)
la()
p = bio.lobster::load.environment()
p$libs = NULL
fp = file.path(project.datadirectory('bio.lobster'),"analysis")
load_all('~/git/bio.survey/')
p$series =c('summer')# p$series =c('georges');p$series =c('fall')
p$define.by.polygons = F
p$lobster.subunits=F
p$area = 'LFA41'
p$years.to.estimate = c(1970:2016)
p$length.based = F
p$by.sex = F
p$bootstrapped.ci=T
p$strata.files.return=F
p$vessel.correction.fixed=1.2
p$strat = NULL
p$clusters = c( rep( "localhost", 7) )
p$strata.efficiencies = F
p = make.list(list(yrs=p$years.to.estimate),Y=p)
#DFO restratified to lfa40
p$define.by.polygons = T
p$lobster.subunits=F
p$area = 'LFA40'
p$reweight.strata = T #this subsets
aout= dfo.rv.analysis(DS='stratified.estimates.redo',p=p)
#Figure
p$add.reference.lines = F
p$time.series.start.year = p$years.to.estimate[1]
p$time.series.end.year = p$years.to.estimate[length(p$years.to.estimate)]
p$metric = 'numbers' #weights
p$measure = 'stratified.mean' #'stratified.total'
p$figure.title = ""
p$reference.measure = 'median' # mean, geomean
p$file.name = 'lfa40DFOrestratifiednumbers.png'
p$y.maximum = NULL # NULL # if ymax is too high for one year
p$show.truncated.numbers = F #if using ymax and want to show the numbers that are cut off as values on figure
p$legend = FALSE
p$running.median = T
p$running.length = 3
p$running.mean = F #can only have rmedian or rmean
p$error.polygon=F
p$error.bars=T
p$ylim=c(0,70)
ref.out= figure.stratified.analysis(x=aout,out.dir = 'bio.lobster', p=p,save=T)
p$box=NULL
p$ylim=c(0,32)
p$metric = 'weights'
p$file.name = 'lfa40DFOrestratifiedweights.png'
ref.out= figure.stratified.analysis(x=aout,out.dir = 'bio.lobster', p=p)
p$box=T
p$ylim=NULL
p$metric = 'weights'
p$file.name = 'lfa40DFOrestratifiedweightsNOY.png'
ref.out= figure.stratified.analysis(x=aout,out.dir = 'bio.lobster', p=p)
p$box=NULL
p$ylim=NULL
p$file.name = 'lfa40DFOrestratifiedDWAO.png'
p$metric = 'dwao'
ref.out= figure.stratified.analysis(x=aout,out.dir = 'bio.lobster', p=p)
p$file.name = 'lfa40DFOrestratifiedgini.png'
p$metric = 'gini'
p$ylim =c(0,1)
ref.out= figure.stratified.analysis(x=aout,out.dir = 'bio.lobster', p=p)
p$ylim = NULL
aout$subset = 'DFO.restratified.All'
write.csv(aout,file=file.path(fp,'indicators','DFO.restratified.All.csv'))
|
library(quantmod)
tickers <-c("AAPL",
"GOOGL")
#1.Using the quant mod package, download the Apple data from 01-01-2011 to 01-01-2013.
startDate = '2011-01-01'
endDate = '2013-01-01'
getSymbols(tickers, from=startDate, to=endDate)
#2. For the data downloaded above, check what class type is the downloaded data and convert it into a data frame and name it Apple_dataframe.
class(AAPL)
Apple_dataframe=data.frame(AAPL)
class(Apple_dataframe)
Apple_dataframe=na.omit(Apple_dataframe)
#3. Create a new column called Cum_Price and the values in this column should be the product of closing price and the volume.
Apple_dataframe$AAPL.Cum_Price=Apple_dataframe$AAPL.Close*Apple_dataframe$AAPL.Volume
# 4. Create two new columns called LMA and SMA and calculate exponential moving average choosing moving average window of 60 and 20 respectively.
ESMAdays = 20
ELMAdays = 60
Apple_dataframe$SMA=EMA(Apple_dataframe$AAPL.Close,ESMAdays)
Apple_dataframe$LMA=EMA(Apple_dataframe$AAPL.Close,ELMAdays)
#5. Create two columns called LMA2 and SMA2 and calculate moving average using roll apply function.
SMAdays = 20
LMAdays = 60
Apple_dataframe$SMA2=SMA(Apple_dataframe$AAPL.Close,SMAdays)
Apple_dataframe$LMA2=SMA(Apple_dataframe$AAPL.Close,LMAdays)
#6. Download google stock data from 01-01-2011 to 01-01-2013 and perform a linear regression between apple and google. Tabulate the result.
Data <- merge(AAPL,GOOGL)
Data <- na.omit(Data)
regression<-lm(Data$GOOGL.Close~Data$AAPL.Close)
print(summary(regression))
#7. Calculate moving average crossover strategy for apple with SMA and LMA pair of 20-60, 30-90, 50-200. Choose the duration from 01-01-2007 to 01-01-2014. Document the returns, positive trade and negative trades. Which pair provides maximum returns?
startDate = '2007-01-01'
endDate = '2014-01-01'
getSymbols("AAPL", from=startDate, to=endDate)
SMAdays = c(20,30,50)
LMAdays = c(60,90,200)
# data fram tmp to store return, Positive and negative trade details
tmp<-data.frame(SMA=SMAdays, LMA=LMAdays)
tmp$Return<-0
tmp$PositiveTrades<-0
tmp$NegativeTrades<-0
J=length(SMAdays)
# Loop to run the program for the given number of MA's
for(r in 1:J)
{
# Computes Short term moving average (SMA) and long-term moving average(LMA) using
# the SMA function
data=Cl(AAPL)
data=na.omit(data)
data$SMA=SMA(data$AAPL.Close,tmp[r,1])
data$LMA=SMA(data$AAPL.Close,tmp[r,2])
data$Signal = 0
data$E_Price = 0
data$PL = 0
# To count number of rows in the data set.
n = nrow(data)
ntotal = n
nstart = tmp[r,2] + 1
# The rule to enter into the trade and to exit is coded below
for (i in nstart:ntotal){
if ((data$SMA[i-1] <= data$LMA[i-1]) && (data$SMA[i] > data$LMA[i])) {
data$Signal[i] = "Buy"
data$E_Price[i] = data$AAPL.Close[i]
} else if ((data$SMA[i-1] >= data$LMA[i-1]) && (data$SMA[i] < data$LMA[i])) {
data$Signal[i] = "Sell"
data$E_Price[i] = data$AAPL.Close[i]
}
}
# This will remove all the zeros from the Signal and the E_Price column
for (i in 1:ntotal){
if ((data$Signal[i] == 0 ) && (data$E_Price[i] == 0)){
data$Signal[i] = NA
data$E_Price[i] = ""
}
}
#data$Signal <- ifelse(((data$Signal== 0 ) && (data$E_Price== 0)),NA)
# This will keep only those rows of the data frame which entered or exited a trade
data = data[!is.na(data$Signal),]
head(data)
# This will compute the PL for each trade entered using the revised data frame
n = nrow(data)
ntotal = n
print(n)
as.numeric(data$E_Price)
condition1 = data$Signal== "Buy"
condition2 = data$Signal == "Sell"
for (i in 2:ntotal){
if (condition1[i]) {
data$PL[i] = (as.numeric(data$E_Price[i-1]) - as.numeric(data$E_Price[i]))*100/as.numeric(data$E_Price[i])
} else if (condition2[i]) {
data$PL[i] = (as.numeric(data$E_Price[i]) - as.numeric(data$E_Price[i-1]))*100/as.numeric(data$E_Price[i-1])
}
}
# This will compute the total number of trades entered
no_of_trades = ntotal - 1
tmp[r,4] = length(data$PL[data$PL>0])
tmp[r,5] = no_of_trades - tmp[r,4]
# This will compute the total return made on all trades
k=as.numeric(data$PL)
#k=data$PL
tmp[r,3] = sum(k)
}
tmp
#Displays the pair with Maximum Returns:
tmp[which.max(tmp$Return),]
|
/R2_assignment.R
|
no_license
|
savio2928/R-Code
|
R
| false | false | 4,334 |
r
|
library(quantmod)
tickers <-c("AAPL",
"GOOGL")
#1.Using the quant mod package, download the Apple data from 01-01-2011 to 01-01-2013.
startDate = '2011-01-01'
endDate = '2013-01-01'
getSymbols(tickers, from=startDate, to=endDate)
#2. For the data downloaded above, check what class type is the downloaded data and convert it into a data frame and name it Apple_dataframe.
class(AAPL)
Apple_dataframe=data.frame(AAPL)
class(Apple_dataframe)
Apple_dataframe=na.omit(Apple_dataframe)
#3. Create a new column called Cum_Price and the values in this column should be the product of closing price and the volume.
Apple_dataframe$AAPL.Cum_Price=Apple_dataframe$AAPL.Close*Apple_dataframe$AAPL.Volume
# 4. Create two new columns called LMA and SMA and calculate exponential moving average choosing moving average window of 60 and 20 respectively.
ESMAdays = 20
ELMAdays = 60
Apple_dataframe$SMA=EMA(Apple_dataframe$AAPL.Close,ESMAdays)
Apple_dataframe$LMA=EMA(Apple_dataframe$AAPL.Close,ELMAdays)
#5. Create two columns called LMA2 and SMA2 and calculate moving average using roll apply function.
SMAdays = 20
LMAdays = 60
Apple_dataframe$SMA2=SMA(Apple_dataframe$AAPL.Close,SMAdays)
Apple_dataframe$LMA2=SMA(Apple_dataframe$AAPL.Close,LMAdays)
#6. Download google stock data from 01-01-2011 to 01-01-2013 and perform a linear regression between apple and google. Tabulate the result.
Data <- merge(AAPL,GOOGL)
Data <- na.omit(Data)
regression<-lm(Data$GOOGL.Close~Data$AAPL.Close)
print(summary(regression))
#7. Calculate moving average crossover strategy for apple with SMA and LMA pair of 20-60, 30-90, 50-200. Choose the duration from 01-01-2007 to 01-01-2014. Document the returns, positive trade and negative trades. Which pair provides maximum returns?
startDate = '2007-01-01'
endDate = '2014-01-01'
getSymbols("AAPL", from=startDate, to=endDate)
SMAdays = c(20,30,50)
LMAdays = c(60,90,200)
# data fram tmp to store return, Positive and negative trade details
tmp<-data.frame(SMA=SMAdays, LMA=LMAdays)
tmp$Return<-0
tmp$PositiveTrades<-0
tmp$NegativeTrades<-0
J=length(SMAdays)
# Loop to run the program for the given number of MA's
for(r in 1:J)
{
# Computes Short term moving average (SMA) and long-term moving average(LMA) using
# the SMA function
data=Cl(AAPL)
data=na.omit(data)
data$SMA=SMA(data$AAPL.Close,tmp[r,1])
data$LMA=SMA(data$AAPL.Close,tmp[r,2])
data$Signal = 0
data$E_Price = 0
data$PL = 0
# To count number of rows in the data set.
n = nrow(data)
ntotal = n
nstart = tmp[r,2] + 1
# The rule to enter into the trade and to exit is coded below
for (i in nstart:ntotal){
if ((data$SMA[i-1] <= data$LMA[i-1]) && (data$SMA[i] > data$LMA[i])) {
data$Signal[i] = "Buy"
data$E_Price[i] = data$AAPL.Close[i]
} else if ((data$SMA[i-1] >= data$LMA[i-1]) && (data$SMA[i] < data$LMA[i])) {
data$Signal[i] = "Sell"
data$E_Price[i] = data$AAPL.Close[i]
}
}
# This will remove all the zeros from the Signal and the E_Price column
for (i in 1:ntotal){
if ((data$Signal[i] == 0 ) && (data$E_Price[i] == 0)){
data$Signal[i] = NA
data$E_Price[i] = ""
}
}
#data$Signal <- ifelse(((data$Signal== 0 ) && (data$E_Price== 0)),NA)
# This will keep only those rows of the data frame which entered or exited a trade
data = data[!is.na(data$Signal),]
head(data)
# This will compute the PL for each trade entered using the revised data frame
n = nrow(data)
ntotal = n
print(n)
as.numeric(data$E_Price)
condition1 = data$Signal== "Buy"
condition2 = data$Signal == "Sell"
for (i in 2:ntotal){
if (condition1[i]) {
data$PL[i] = (as.numeric(data$E_Price[i-1]) - as.numeric(data$E_Price[i]))*100/as.numeric(data$E_Price[i])
} else if (condition2[i]) {
data$PL[i] = (as.numeric(data$E_Price[i]) - as.numeric(data$E_Price[i-1]))*100/as.numeric(data$E_Price[i-1])
}
}
# This will compute the total number of trades entered
no_of_trades = ntotal - 1
tmp[r,4] = length(data$PL[data$PL>0])
tmp[r,5] = no_of_trades - tmp[r,4]
# This will compute the total return made on all trades
k=as.numeric(data$PL)
#k=data$PL
tmp[r,3] = sum(k)
}
tmp
#Displays the pair with Maximum Returns:
tmp[which.max(tmp$Return),]
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qh.eSIR.R
\name{qh.eSIR}
\alias{qh.eSIR}
\title{Extended state-space SIR with quarantine}
\usage{
qh.eSIR(Y, R, phi0 = NULL, change_time = NULL,
begin_str = "01/13/2020", T_fin = 200, nchain = 4,
nadapt = 10000, M = 500, thn = 10, nburnin = 200, dic = FALSE,
death_in_R = 0.02, casename = "qh.eSIR", beta0 = 0.2586,
gamma0 = 0.0821, R0 = beta0/gamma0, gamma0_sd = 0.1, R0_sd = 1,
file_add = character(0), save_files = FALSE, save_mcmc = FALSE,
save_plot_data = FALSE)
}
\arguments{
\item{Y}{the time series of daily observed infected compartment proportions.}
\item{R}{the time series of daily observed removed compartment proportions, including death and recovered.}
\item{phi0}{a vector of values of the dirac delta function \eqn{\phi_t}. Each entry denotes the proportion that will be qurantined at each change time point. Note that all the entries lie between 0 and 1, its default is \code{NULL}.}
\item{change_time}{the change points over time corresponding to \code{phi0}, to formulate the dirac delta function \eqn{\phi_t}; its defalt value is \code{NULL}.}
\item{begin_str}{the character of starting time, the default is "01/13/2020".}
\item{T_fin}{the end of follow-up time after the beginning date \code{begin_str}, the default is 200.}
\item{nchain}{the number of MCMC chains generated by \code{\link[rjags]{rjags}}, the default is 4.}
\item{nadapt}{the iteration number of adaptation in the MCMC. We recommend using at least the default value 1e4 to obtained fully adapted chains.}
\item{M}{the number of draws in each chain, with no thinning. The default is M=5e2 but suggest using 5e5.}
\item{thn}{the thinning interval between mixing. The total number of draws thus would become \code{round(M/thn)*nchain}. The default is 10.}
\item{nburnin}{the burn-in period. The default is 2e2 but suggest 2e5.}
\item{dic}{logical, whether compute the DIC (deviance information criterion) for model selection.}
\item{death_in_R}{the numeric value of average of cumulative deaths in the removed compartments. The default is 0.4 within Hubei and 0.02 outside Hubei.}
\item{casename}{the string of the job's name. The default is "qh.eSIR".}
\item{beta0}{the hyperparameter of average transmission rate, the default is the one estimated from the SARS first-month outbreak (0.2586).}
\item{gamma0}{the hyperparameter of average removed rate, the default is the one estimated from the SARS first-month outbreak (0.0821).}
\item{R0}{the hyperparameter of the mean reproduction number R0. The default is thus the ratio of \code{beta0/gamma0}, which can be specified directly.}
\item{gamma0_sd}{the standard deviation for the prior distrbution of the removed rate \eqn{\gamma}, the default is 0.1.}
\item{R0_sd}{the standard deviation for the prior disbution of R0, the default is 1.}
\item{file_add}{the string to denote the location of saving output files and tables.}
\item{save_mcmc}{logical, whether save (\code{TRUE}) all the MCMC outputs or not (\code{FALSE}).The output file will be an \code{.RData} file named by the \eqn{casename}. We include arrays of prevalence values of the three compartments with their matrices of posterior draws up to the last date of the collected data as \code{theta_p[,,1]} and afterwards as \code{theta_pp[,,1]} for \eqn{\theta_t^S}, \code{theta_p[,,2]} and \code{theta_pp[,,2]} for \eqn{\theta_t^I}, and \code{theta_p[,,3]} and \code{theta_pp[,,3]} for \eqn{\theta_t^R}. The posterior draws of the prevalence process of the quarantine compartment can be obtained via \code{thetaQ_p} and \code{thetaQ_pp}. Moreover, the input and predicted proportions \code{Y}, \code{Y_pp}, \code{R} and \code{R_pp} can also be retrieved. The prevalence and prediceted proportion matrices have rows for MCMC replicates, and columns for days. The MCMC posterior draws of other parameters including \code{beta}, \code{gamma}, \code{R0}, and variance controllers \code{k_p}, \code{lambdaY_p}, \code{lambdaR_p} are also available.}
\item{save_plot_data}{logical, whether save the plotting data or not.}
}
\value{
\item{casename}{the predefined \code{casename}.}
\item{incidence_mean}{mean incidence.}
\item{incidence_ci}{2.5\%, 50\%, and 97.5\% quantiles of the incidences.}
\item{out_table}{summary tables including the posterior mean of the prevalance processes of the 3 states compartments (\eqn{\theta_t^S,\theta_t^I,\theta_t^R,\theta_t^H}) at last date of data collected ((\eqn{t^\prime}) decided by the lengths of your input data \code{Y} and \code{R}), and their respective credible inctervals (ci); the respective means and ci's of the reporduction number (R0), removed rate (\eqn{\gamma}), transmission rate (\eqn{\beta}).}
\item{plot_infection}{plot of summarizing and forecasting for the infection compartment, in which the vertial blue line denotes the last date of data collected (\eqn{t^\prime}), the vertial darkgray line denotes the deacceleration point (first turning point) that the posterior mean first-derivative of infection prevalence \eqn{\dot{\theta}_t^I} achieves the maximum, the vertical purple line denotes the second turning point that the posterior mean first-derivative infection proportion \eqn{\dot{\theta}_t^I} equals zero, the darkgray line denotes the posterior mean of the infection prevalence \eqn{\theta_t^I} and the red line denotes its posterior median. }
\item{plot_removed}{plot of summarizing and forecasting for the removed compartment with lines similar to those in the \code{plot_infection}. The vertical lines are identical, but the horizontal mean and median correspond to the posterior mean and median of the removed process \eqn{\theta_t^R}. An additional line indicates the estimated death prevalence from the input \code{death_in_R}.}
\item{spaghetti_plot}{20 randomly selected MCMC draws of the first-order derivative of the posterior prevalence of infection, namely \eqn{\dot{\theta}_t^I}. The black curve is the posterior mean of the derivative, and the vertical lines mark times of turning points corresponding respectively to those shown in \code{plot_infection} and \code{plot_removed}. Moreover, the 95\% credible intervals of these turning points are also highlighted by semi-transparent rectangles. }
\item{first_tp_mean}{the date t at which \eqn{\ddot{\theta}_t^I=0}, calculated as the average of the time points with maximum posterior first-order derivatives \eqn{\dot{\theta}_t^I}; this value may be slightly different from the one labeled by the "darkgreen" lines in the two plots \code{plot_infection} and \code{plot_removed}, which indicate the stationary point such that the first-order derivative of the averaged posterior of \eqn{\theta_t^I} reaches its maximum.}
\item{first_tp_mean}{the date t at which \eqn{\ddot{\theta}_t^I=0}, calculated as the average of the time points with maximum posterior first-order derivatives \eqn{\dot{\theta}_t^I}; this value may be slightly different from the one labeled by the "darkgreen" lines in the two plots \code{plot_infection} and \code{plot_removed}, which indicate the stationary point such that the first-order derivative of the averaged posterior of \eqn{\theta_t^I} reaches its maximum.}
\item{first_tp_ci}{fwith \code{first_tp_mean}, it reports the corresponding credible interval and median.}
\item{second_tp_mean}{the date t at which \eqn{\theta_t^I=0}, calculated as the average of the stationary points of all of posterior first-order derivatives \eqn{\dot{\theta}_t^I}; this value may be slightly different from the one labeled by the "pruple" lines in the plots of \code{plot_infection} and \code{plot_removed}. The latter indicate stationary t at which the first-order derivative of the averaged posterior of \eqn{\theta_t^I} equals zero.}
\item{second_tp_ci}{with \code{second_tp_mean}, it reports the corresponding credible interval and median.}
\item{dic_val}{the output of \code{dic.sample()} in \code{\link[rjags]{dic.sample}}, computing deviance information criterion for model comparison.}
}
\description{
Fit an extended state-space SIR model being reduced by in-home hospitalization.
}
\details{
In this function we allow it to characterize time-varying proportions of susceptible due to government-enforced stringent in-home isolation. We expanded the SIR model by adding a quarantine compartment with a time-varying rate of quarantine \eqn{\phi_t}, the chance of a susceptible person being willing to take in-home isolation at time t.
}
\examples{
NI_complete <- c( 41,41,41,45,62,131,200,270,375,444,549, 729,
1052,1423,2714,3554,4903,5806,7153,9074,11177,
13522,16678,19665,22112,24953,27100,29631,31728,33366)
RI_complete <- c(1,1,7,10,14,20,25,31,34,45,55,71,94,121,152,213,
252,345,417,561,650,811,1017,1261,1485,1917,2260,
2725,3284,3754)
N=58.5e6
R <- RI_complete/N
Y <- NI_complete/N- R #Jan13->Feb 11
change_time <- c("01/23/2020","02/04/2020","02/08/2020")
phi0 <- c(0.1,0.4,0.4)
res.q <- qh.eSIR (Y,R,begin_str="01/13/2020",death_in_R = 0.4,
phi0=phi0,change_time=change_time,
casename="Hubei_q",save_files = T,save_mcmc = F,
M=5e2,nburnin = 2e2)
res.q$plot_infection
#res.q$plot_removed
res.noq <- qh.eSIR (Y,R,begin_str="01/13/2020",death_in_R = 0.4,
T_fin=200,casename="Hubei_noq",
M=5e2,nburnin = 2e2)
res.noq$plot_infection
}
|
/man/qh.eSIR.Rd
|
permissive
|
ynsxx/eSIR
|
R
| false | true | 9,452 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qh.eSIR.R
\name{qh.eSIR}
\alias{qh.eSIR}
\title{Extended state-space SIR with quarantine}
\usage{
qh.eSIR(Y, R, phi0 = NULL, change_time = NULL,
begin_str = "01/13/2020", T_fin = 200, nchain = 4,
nadapt = 10000, M = 500, thn = 10, nburnin = 200, dic = FALSE,
death_in_R = 0.02, casename = "qh.eSIR", beta0 = 0.2586,
gamma0 = 0.0821, R0 = beta0/gamma0, gamma0_sd = 0.1, R0_sd = 1,
file_add = character(0), save_files = FALSE, save_mcmc = FALSE,
save_plot_data = FALSE)
}
\arguments{
\item{Y}{the time series of daily observed infected compartment proportions.}
\item{R}{the time series of daily observed removed compartment proportions, including death and recovered.}
\item{phi0}{a vector of values of the dirac delta function \eqn{\phi_t}. Each entry denotes the proportion that will be qurantined at each change time point. Note that all the entries lie between 0 and 1, its default is \code{NULL}.}
\item{change_time}{the change points over time corresponding to \code{phi0}, to formulate the dirac delta function \eqn{\phi_t}; its defalt value is \code{NULL}.}
\item{begin_str}{the character of starting time, the default is "01/13/2020".}
\item{T_fin}{the end of follow-up time after the beginning date \code{begin_str}, the default is 200.}
\item{nchain}{the number of MCMC chains generated by \code{\link[rjags]{rjags}}, the default is 4.}
\item{nadapt}{the iteration number of adaptation in the MCMC. We recommend using at least the default value 1e4 to obtained fully adapted chains.}
\item{M}{the number of draws in each chain, with no thinning. The default is M=5e2 but suggest using 5e5.}
\item{thn}{the thinning interval between mixing. The total number of draws thus would become \code{round(M/thn)*nchain}. The default is 10.}
\item{nburnin}{the burn-in period. The default is 2e2 but suggest 2e5.}
\item{dic}{logical, whether compute the DIC (deviance information criterion) for model selection.}
\item{death_in_R}{the numeric value of average of cumulative deaths in the removed compartments. The default is 0.4 within Hubei and 0.02 outside Hubei.}
\item{casename}{the string of the job's name. The default is "qh.eSIR".}
\item{beta0}{the hyperparameter of average transmission rate, the default is the one estimated from the SARS first-month outbreak (0.2586).}
\item{gamma0}{the hyperparameter of average removed rate, the default is the one estimated from the SARS first-month outbreak (0.0821).}
\item{R0}{the hyperparameter of the mean reproduction number R0. The default is thus the ratio of \code{beta0/gamma0}, which can be specified directly.}
\item{gamma0_sd}{the standard deviation for the prior distrbution of the removed rate \eqn{\gamma}, the default is 0.1.}
\item{R0_sd}{the standard deviation for the prior disbution of R0, the default is 1.}
\item{file_add}{the string to denote the location of saving output files and tables.}
\item{save_mcmc}{logical, whether save (\code{TRUE}) all the MCMC outputs or not (\code{FALSE}).The output file will be an \code{.RData} file named by the \eqn{casename}. We include arrays of prevalence values of the three compartments with their matrices of posterior draws up to the last date of the collected data as \code{theta_p[,,1]} and afterwards as \code{theta_pp[,,1]} for \eqn{\theta_t^S}, \code{theta_p[,,2]} and \code{theta_pp[,,2]} for \eqn{\theta_t^I}, and \code{theta_p[,,3]} and \code{theta_pp[,,3]} for \eqn{\theta_t^R}. The posterior draws of the prevalence process of the quarantine compartment can be obtained via \code{thetaQ_p} and \code{thetaQ_pp}. Moreover, the input and predicted proportions \code{Y}, \code{Y_pp}, \code{R} and \code{R_pp} can also be retrieved. The prevalence and prediceted proportion matrices have rows for MCMC replicates, and columns for days. The MCMC posterior draws of other parameters including \code{beta}, \code{gamma}, \code{R0}, and variance controllers \code{k_p}, \code{lambdaY_p}, \code{lambdaR_p} are also available.}
\item{save_plot_data}{logical, whether save the plotting data or not.}
}
\value{
\item{casename}{the predefined \code{casename}.}
\item{incidence_mean}{mean incidence.}
\item{incidence_ci}{2.5\%, 50\%, and 97.5\% quantiles of the incidences.}
\item{out_table}{summary tables including the posterior mean of the prevalance processes of the 3 states compartments (\eqn{\theta_t^S,\theta_t^I,\theta_t^R,\theta_t^H}) at last date of data collected ((\eqn{t^\prime}) decided by the lengths of your input data \code{Y} and \code{R}), and their respective credible inctervals (ci); the respective means and ci's of the reporduction number (R0), removed rate (\eqn{\gamma}), transmission rate (\eqn{\beta}).}
\item{plot_infection}{plot of summarizing and forecasting for the infection compartment, in which the vertial blue line denotes the last date of data collected (\eqn{t^\prime}), the vertial darkgray line denotes the deacceleration point (first turning point) that the posterior mean first-derivative of infection prevalence \eqn{\dot{\theta}_t^I} achieves the maximum, the vertical purple line denotes the second turning point that the posterior mean first-derivative infection proportion \eqn{\dot{\theta}_t^I} equals zero, the darkgray line denotes the posterior mean of the infection prevalence \eqn{\theta_t^I} and the red line denotes its posterior median. }
\item{plot_removed}{plot of summarizing and forecasting for the removed compartment with lines similar to those in the \code{plot_infection}. The vertical lines are identical, but the horizontal mean and median correspond to the posterior mean and median of the removed process \eqn{\theta_t^R}. An additional line indicates the estimated death prevalence from the input \code{death_in_R}.}
\item{spaghetti_plot}{20 randomly selected MCMC draws of the first-order derivative of the posterior prevalence of infection, namely \eqn{\dot{\theta}_t^I}. The black curve is the posterior mean of the derivative, and the vertical lines mark times of turning points corresponding respectively to those shown in \code{plot_infection} and \code{plot_removed}. Moreover, the 95\% credible intervals of these turning points are also highlighted by semi-transparent rectangles. }
\item{first_tp_mean}{the date t at which \eqn{\ddot{\theta}_t^I=0}, calculated as the average of the time points with maximum posterior first-order derivatives \eqn{\dot{\theta}_t^I}; this value may be slightly different from the one labeled by the "darkgreen" lines in the two plots \code{plot_infection} and \code{plot_removed}, which indicate the stationary point such that the first-order derivative of the averaged posterior of \eqn{\theta_t^I} reaches its maximum.}
\item{first_tp_mean}{the date t at which \eqn{\ddot{\theta}_t^I=0}, calculated as the average of the time points with maximum posterior first-order derivatives \eqn{\dot{\theta}_t^I}; this value may be slightly different from the one labeled by the "darkgreen" lines in the two plots \code{plot_infection} and \code{plot_removed}, which indicate the stationary point such that the first-order derivative of the averaged posterior of \eqn{\theta_t^I} reaches its maximum.}
\item{first_tp_ci}{fwith \code{first_tp_mean}, it reports the corresponding credible interval and median.}
\item{second_tp_mean}{the date t at which \eqn{\theta_t^I=0}, calculated as the average of the stationary points of all of posterior first-order derivatives \eqn{\dot{\theta}_t^I}; this value may be slightly different from the one labeled by the "pruple" lines in the plots of \code{plot_infection} and \code{plot_removed}. The latter indicate stationary t at which the first-order derivative of the averaged posterior of \eqn{\theta_t^I} equals zero.}
\item{second_tp_ci}{with \code{second_tp_mean}, it reports the corresponding credible interval and median.}
\item{dic_val}{the output of \code{dic.sample()} in \code{\link[rjags]{dic.sample}}, computing deviance information criterion for model comparison.}
}
\description{
Fit an extended state-space SIR model being reduced by in-home hospitalization.
}
\details{
In this function we allow it to characterize time-varying proportions of susceptible due to government-enforced stringent in-home isolation. We expanded the SIR model by adding a quarantine compartment with a time-varying rate of quarantine \eqn{\phi_t}, the chance of a susceptible person being willing to take in-home isolation at time t.
}
\examples{
NI_complete <- c( 41,41,41,45,62,131,200,270,375,444,549, 729,
1052,1423,2714,3554,4903,5806,7153,9074,11177,
13522,16678,19665,22112,24953,27100,29631,31728,33366)
RI_complete <- c(1,1,7,10,14,20,25,31,34,45,55,71,94,121,152,213,
252,345,417,561,650,811,1017,1261,1485,1917,2260,
2725,3284,3754)
N=58.5e6
R <- RI_complete/N
Y <- NI_complete/N- R #Jan13->Feb 11
change_time <- c("01/23/2020","02/04/2020","02/08/2020")
phi0 <- c(0.1,0.4,0.4)
res.q <- qh.eSIR (Y,R,begin_str="01/13/2020",death_in_R = 0.4,
phi0=phi0,change_time=change_time,
casename="Hubei_q",save_files = T,save_mcmc = F,
M=5e2,nburnin = 2e2)
res.q$plot_infection
#res.q$plot_removed
res.noq <- qh.eSIR (Y,R,begin_str="01/13/2020",death_in_R = 0.4,
T_fin=200,casename="Hubei_noq",
M=5e2,nburnin = 2e2)
res.noq$plot_infection
}
|
##Plot 1
library(sqldf)
filename="household_power_consumption.txt"
## Read the file only for 1/2/2007 and 2/2/2007
data<-read.csv.sql(filename,sep=";",sql='select * from file where Date="1/2/2007" OR Date="2/2/2007"')
## Transform dates
data$Date<-as.Date(data$Date, "%d/%m/%Y")
## Create the Histogram
png("plot1.png")
hist(data$Global_active_power, col = "red", xlab = "Global Active Power(killowatts)", main = "Global Active Power")
dev.off()
|
/Plot1.R
|
no_license
|
amitgupta0791/ExData_Plotting1
|
R
| false | false | 453 |
r
|
##Plot 1
library(sqldf)
filename="household_power_consumption.txt"
## Read the file only for 1/2/2007 and 2/2/2007
data<-read.csv.sql(filename,sep=";",sql='select * from file where Date="1/2/2007" OR Date="2/2/2007"')
## Transform dates
data$Date<-as.Date(data$Date, "%d/%m/%Y")
## Create the Histogram
png("plot1.png")
hist(data$Global_active_power, col = "red", xlab = "Global Active Power(killowatts)", main = "Global Active Power")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Phyloinfer.R
\name{mcmc_sampling}
\alias{mcmc_sampling}
\title{MCMC Sampling}
\usage{
mcmc_sampling(dataset, alg, nsamp, nburnin = 0, nsubsamp = 1, ngrid = 100,
nugget = "1,1", prec_alpha = 0.01, prec_beta = 0.01, TrjL = NULL,
Nleap = NULL, szkappa = NULL, rand_leap = NULL, f_init = rep(1, ngrid
- 1), kappa = 1, covariates = NULL, betas = rep(0, 2 +
length(covariates)), samp_alg = "none", kappa_alg = "gibbs",
beta_vars = rep(100, length(betas)), printevery = 100)
}
\arguments{
\item{dataset}{\code{phylo} object or list containing vectors of coalescent
times \code{coal_times}, sampling times \code{samp_times}, and number
sampled per sampling time \code{n_sampled}.}
\item{alg}{string selecting which MCMC sampler to use. Options are "HMC",
"splitHMC", "MALA", "aMALA", and "ESS".}
\item{nsamp}{integer number of MCMC steps to compute.}
\item{nburnin}{integer number of MCMC steps to discard as burn-in.}
\item{nsubsamp}{integer after burn-in, how often to record a step to the
output.}
\item{ngrid}{integer number of grid point in the latent field.}
\item{nugget}{string selecting which "nugget" adjustment to apply to the
precision matrix to make it full-rank. Options are '1,1' for an adjustment
to the first element, 'diag' for an adjustment to the entire main diagonal,
or 'none' which may result in a non-full-rank precision matrix.}
\item{prec_alpha, prec_beta}{numeric shape and rate parameters for the prior
on precision.}
\item{TrjL}{numeric tuning parameter.}
\item{Nleap}{integer tuning parameter.}
\item{szkappa}{numeric tuning parameter.}
\item{rand_leap}{logical tuning parameter.}
\item{f_init}{numeric vector starting log effective population size values.}
\item{kappa}{numeric starting kappa.}
\item{covariates}{list of functions representing covariate trajectories that
(may) influence sampling frequency.}
\item{betas}{numeric vector of starting values for the beta hyperparameters.}
\item{samp_alg}{string selecting sampling algorithm for sampling time
intensity coefficients. One of "none" (default), "fixed", "MH", and "ESS".}
\item{kappa_alg}{selects sampling algorithm for kappa. One of "gibbs"
(default) or "whiten".}
\item{beta_vars}{numeric vector prior variances of the beta hyperparameters.}
\item{printevery}{integer how many MCMC steps between writing output to the
console.}
}
\description{
MCMC Sampling
}
|
/man/mcmc_sampling.Rd
|
no_license
|
cran/phylodyn
|
R
| false | true | 2,465 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Phyloinfer.R
\name{mcmc_sampling}
\alias{mcmc_sampling}
\title{MCMC Sampling}
\usage{
mcmc_sampling(dataset, alg, nsamp, nburnin = 0, nsubsamp = 1, ngrid = 100,
nugget = "1,1", prec_alpha = 0.01, prec_beta = 0.01, TrjL = NULL,
Nleap = NULL, szkappa = NULL, rand_leap = NULL, f_init = rep(1, ngrid
- 1), kappa = 1, covariates = NULL, betas = rep(0, 2 +
length(covariates)), samp_alg = "none", kappa_alg = "gibbs",
beta_vars = rep(100, length(betas)), printevery = 100)
}
\arguments{
\item{dataset}{\code{phylo} object or list containing vectors of coalescent
times \code{coal_times}, sampling times \code{samp_times}, and number
sampled per sampling time \code{n_sampled}.}
\item{alg}{string selecting which MCMC sampler to use. Options are "HMC",
"splitHMC", "MALA", "aMALA", and "ESS".}
\item{nsamp}{integer number of MCMC steps to compute.}
\item{nburnin}{integer number of MCMC steps to discard as burn-in.}
\item{nsubsamp}{integer after burn-in, how often to record a step to the
output.}
\item{ngrid}{integer number of grid point in the latent field.}
\item{nugget}{string selecting which "nugget" adjustment to apply to the
precision matrix to make it full-rank. Options are '1,1' for an adjustment
to the first element, 'diag' for an adjustment to the entire main diagonal,
or 'none' which may result in a non-full-rank precision matrix.}
\item{prec_alpha, prec_beta}{numeric shape and rate parameters for the prior
on precision.}
\item{TrjL}{numeric tuning parameter.}
\item{Nleap}{integer tuning parameter.}
\item{szkappa}{numeric tuning parameter.}
\item{rand_leap}{logical tuning parameter.}
\item{f_init}{numeric vector starting log effective population size values.}
\item{kappa}{numeric starting kappa.}
\item{covariates}{list of functions representing covariate trajectories that
(may) influence sampling frequency.}
\item{betas}{numeric vector of starting values for the beta hyperparameters.}
\item{samp_alg}{string selecting sampling algorithm for sampling time
intensity coefficients. One of "none" (default), "fixed", "MH", and "ESS".}
\item{kappa_alg}{selects sampling algorithm for kappa. One of "gibbs"
(default) or "whiten".}
\item{beta_vars}{numeric vector prior variances of the beta hyperparameters.}
\item{printevery}{integer how many MCMC steps between writing output to the
console.}
}
\description{
MCMC Sampling
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/doubleclickbidmanager_objects.R
\name{Query}
\alias{Query}
\title{Query Object}
\usage{
Query(metadata = NULL, params = NULL, queryId = NULL,
reportDataEndTimeMs = NULL, reportDataStartTimeMs = NULL,
schedule = NULL, timezoneCode = NULL)
}
\arguments{
\item{metadata}{Query metadata}
\item{params}{Query parameters}
\item{queryId}{Query ID}
\item{reportDataEndTimeMs}{The ending time for the data that is shown in the report}
\item{reportDataStartTimeMs}{The starting time for the data that is shown in the report}
\item{schedule}{Information on how often and when to run a query}
\item{timezoneCode}{Canonical timezone code for report data time}
}
\value{
Query object
}
\description{
Query Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Represents a query.
}
\seealso{
Other Query functions: \code{\link{queries.createquery}}
}
|
/googledoubleclickbidmanagerv1.auto/man/Query.Rd
|
permissive
|
Phippsy/autoGoogleAPI
|
R
| false | true | 959 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/doubleclickbidmanager_objects.R
\name{Query}
\alias{Query}
\title{Query Object}
\usage{
Query(metadata = NULL, params = NULL, queryId = NULL,
reportDataEndTimeMs = NULL, reportDataStartTimeMs = NULL,
schedule = NULL, timezoneCode = NULL)
}
\arguments{
\item{metadata}{Query metadata}
\item{params}{Query parameters}
\item{queryId}{Query ID}
\item{reportDataEndTimeMs}{The ending time for the data that is shown in the report}
\item{reportDataStartTimeMs}{The starting time for the data that is shown in the report}
\item{schedule}{Information on how often and when to run a query}
\item{timezoneCode}{Canonical timezone code for report data time}
}
\value{
Query object
}
\description{
Query Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Represents a query.
}
\seealso{
Other Query functions: \code{\link{queries.createquery}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/statistic_functions.R
\name{describeDataFrame}
\alias{describeDataFrame}
\title{A convenient method to describe a full data.frame.}
\usage{
describeDataFrame(
data,
variables = colnames(data),
applicable = NULL,
group = NULL,
group_str = NULL,
p_value = FALSE,
all = FALSE,
desc = c("Mean", "Median", "Range"),
round = 3,
confint = FALSE,
NA_asModality = FALSE,
NA_group_AsModality = FALSE
)
}
\arguments{
\item{data}{a data.frame containing the data to describe}
\item{variables}{a character vector contening the name of columns to describe. Default to colnames(data).}
\item{applicable}{a list of boolean vector generating based on manageNotApplicable(). Use in cas of NonApplicable data}
\item{group}{a character vector of length 1. The name of the factor column to use as commparaison group. Default to NULL.}
\item{group_str}{a numeric vector. The index of the levels of the group variable to use. Default to NULL.}
\item{p_value}{a boolean. If TRUE, comparaison test are performed.}
\item{all}{a boolean. If TRUE, total column will be displayed. Default to FALSE}
\item{desc}{a character vector. Could contain "Mean", "Median", "Range" and/or "Mode"}
\item{round}{an integer, number of maximal decimal. Default to 3}
\item{confint}{a boolean. If TRUE, the confidence interval of the mean will be displayed. Default to FALSE}
\item{NA_asModality}{a boolean. If TRUE, missing data of the factor variable to describe will be considered as levels.
Default to FALSE}
\item{NA_group_AsModality}{a boolean. If TRUE, missing data of the group variable will be considered as levels. Default to FALSE}
}
\value{
a data.frame containing the description of the variables
}
\description{
A convenient method to describe a full data.frame.
}
\examples{
data(mtcars)
labels <- data.frame(Variable = c("vs", "vs", "am", "am"),
Modality = c(0, 1, 0, 1),
Label = c("V-shaped", "Straight", "Automatic", "Manual"))
labelVariable <- data.frame(Variable = c("mpg", "cyl", "disp", "hp", "drat", "wt",
"qsec", "vs", "am", "gear", "carb"),
Label = c("Miles/(US) gallon", "Number of cylinders", "Displacement (cu.in.)",
"Gross horsepower ", "Rear axle ratio", "Weight (1000 lbs)",
"1/4 mile time", "Engine", "Transmission", "Number of forward gears",
"Number of carburetors"))
labelledData <- statsBordeaux::labellisationDataFrame(mtcars, labels)
labelledData <- statsBordeaux::setLabelToVariable(labelledData, labelVariable)
comparaison <- describeDataFrame(mtcars, group = "vs", p_value = TRUE)
}
|
/man/describeDataFrame.Rd
|
permissive
|
rgriffier/statsBordeaux
|
R
| false | true | 2,817 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/statistic_functions.R
\name{describeDataFrame}
\alias{describeDataFrame}
\title{A convenient method to describe a full data.frame.}
\usage{
describeDataFrame(
data,
variables = colnames(data),
applicable = NULL,
group = NULL,
group_str = NULL,
p_value = FALSE,
all = FALSE,
desc = c("Mean", "Median", "Range"),
round = 3,
confint = FALSE,
NA_asModality = FALSE,
NA_group_AsModality = FALSE
)
}
\arguments{
\item{data}{a data.frame containing the data to describe}
\item{variables}{a character vector contening the name of columns to describe. Default to colnames(data).}
\item{applicable}{a list of boolean vector generating based on manageNotApplicable(). Use in cas of NonApplicable data}
\item{group}{a character vector of length 1. The name of the factor column to use as commparaison group. Default to NULL.}
\item{group_str}{a numeric vector. The index of the levels of the group variable to use. Default to NULL.}
\item{p_value}{a boolean. If TRUE, comparaison test are performed.}
\item{all}{a boolean. If TRUE, total column will be displayed. Default to FALSE}
\item{desc}{a character vector. Could contain "Mean", "Median", "Range" and/or "Mode"}
\item{round}{an integer, number of maximal decimal. Default to 3}
\item{confint}{a boolean. If TRUE, the confidence interval of the mean will be displayed. Default to FALSE}
\item{NA_asModality}{a boolean. If TRUE, missing data of the factor variable to describe will be considered as levels.
Default to FALSE}
\item{NA_group_AsModality}{a boolean. If TRUE, missing data of the group variable will be considered as levels. Default to FALSE}
}
\value{
a data.frame containing the description of the variables
}
\description{
A convenient method to describe a full data.frame.
}
\examples{
data(mtcars)
labels <- data.frame(Variable = c("vs", "vs", "am", "am"),
Modality = c(0, 1, 0, 1),
Label = c("V-shaped", "Straight", "Automatic", "Manual"))
labelVariable <- data.frame(Variable = c("mpg", "cyl", "disp", "hp", "drat", "wt",
"qsec", "vs", "am", "gear", "carb"),
Label = c("Miles/(US) gallon", "Number of cylinders", "Displacement (cu.in.)",
"Gross horsepower ", "Rear axle ratio", "Weight (1000 lbs)",
"1/4 mile time", "Engine", "Transmission", "Number of forward gears",
"Number of carburetors"))
labelledData <- statsBordeaux::labellisationDataFrame(mtcars, labels)
labelledData <- statsBordeaux::setLabelToVariable(labelledData, labelVariable)
comparaison <- describeDataFrame(mtcars, group = "vs", p_value = TRUE)
}
|
testlist <- list(doy = -1.72131968218895e+83, latitude = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889883576688e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result)
|
/meteor/inst/testfiles/ET0_ThornthwaiteWilmott/AFL_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1615828357-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 734 |
r
|
testlist <- list(doy = -1.72131968218895e+83, latitude = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889883576688e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result)
|
# File src/library/graphics/R/title.R
# Part of the R package, https://www.R-project.org
#
# Copyright (C) 1995-2012 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# https://www.R-project.org/Licenses/
title <- function(main=NULL, sub=NULL, xlab=NULL, ylab=NULL,
line=NA, outer=FALSE, ...)
{
main <- as.graphicsAnnot(main)
sub <- as.graphicsAnnot(sub)
xlab <- as.graphicsAnnot(xlab)
ylab <- as.graphicsAnnot(ylab)
.External.graphics(C_title, main, sub, xlab, ylab, line, outer, ...)
invisible()
}
|
/bin/R-3.5.1/src/library/graphics/R/title.R
|
permissive
|
lifebit-ai/exomedepth
|
R
| false | false | 1,077 |
r
|
# File src/library/graphics/R/title.R
# Part of the R package, https://www.R-project.org
#
# Copyright (C) 1995-2012 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# https://www.R-project.org/Licenses/
title <- function(main=NULL, sub=NULL, xlab=NULL, ylab=NULL,
line=NA, outer=FALSE, ...)
{
main <- as.graphicsAnnot(main)
sub <- as.graphicsAnnot(sub)
xlab <- as.graphicsAnnot(xlab)
ylab <- as.graphicsAnnot(ylab)
.External.graphics(C_title, main, sub, xlab, ylab, line, outer, ...)
invisible()
}
|
#' @title The First Iteration of the EM Step
#'
#' @description This function returns one iteration of the EM
#' step for Excersize 6.6.8. The initial estimate for the step
#' is the input vector theta0 while the ouputs are the onestep
#' esimates as given on page 409 of the text.
#'
#' @param x a vector of quantiles.
#' @param theta0 a vector, first 2 elements are means, next 2
#' elements are standard deviations, and the last element is
#' the perportion of data from the first population
#'
#' @examples x <- c(5, 7, 2, 1,9)
#' theta0 <- c(.2, .6, .8, .9, .95)
#' mixnormal(x, theta0)
#'
#' @return returns a vector, with mu1, mu2, sig1, sig2 and p
#'
#' @references Hogg, R. McKean, J. Craig, A. (2018) Introduction to
#' Mathematical Statistics, 8th Ed. Boston: Pearson.
#'
#' @export mixnormal
mixnormal <- function(x, theta0) {
# checking arguments
errors <- makeAssertCollection()
# checking arguments argument 1 x
errors$push(is_numvector(x, 1))
errors$push(has_nonan(x, 1))
errors$push(has_noinf(x, 1))
# argument 2 theta0
errors$push(has_elements(theta0, 2, 5))
errors$push(has_noinf(theta0, 2))
errors$push(has_nonan(theta0, 2))
errors$push(is_numvector(theta0, 2))
reportAssertions(errors)
# theta0[3]
errors$push(is_nonzero(theta0[3], 2, "element 3 in argument 2 cannot be zero"))
errors$push(is_positive(theta0[3], 2, "element 3 in argument 2 must be positive"))
# theta0[4]
errors$push(is_nonzero(theta0[4], 2, "element 4 in argument 2 cannot be zero"))
errors$push(is_positive(theta0[4], 2, "element 4 in argument 2 must be positive"))
# theta0[5]
errors$push(is_xrange(theta0[5], 2, 0, 1, "element 5 in argument 2 must be greater than 0 and less than 1"))
reportAssertions(errors)
# function starts calculate gamma
part1 <- (1 - theta0[5]) * dnorm(x, theta0[1], theta0[3])
part2 <- theta0[5] * dnorm(x, theta0[2], theta0[4])
gam <- part2/(part1 + part2)
# calculate denomination for sig1 and sig2
denom1 <- sum(1 - gam)
denom2 <- sum(gam)
# calculate mu1 and mu2
mu1 <- sum((1 - gam) * x)/denom1
mu2 <- sum(gam * x)/denom2
# calculate sig1 and sig2
sig1 <- sqrt(sum((1 - gam) * ((x - mu1)^2))/denom1)
sig2 <- sqrt(sum(gam * ((x - mu2)^2))/denom2)
# calculate the p value
p <- mean(gam)
mixnormal <- c(mu1, mu2, sig1, sig2, p)
return(mixnormal)
}
|
/R/mixnormal.R
|
no_license
|
joemckean/mathstat
|
R
| false | false | 2,352 |
r
|
#' @title The First Iteration of the EM Step
#'
#' @description This function returns one iteration of the EM
#' step for Excersize 6.6.8. The initial estimate for the step
#' is the input vector theta0 while the ouputs are the onestep
#' esimates as given on page 409 of the text.
#'
#' @param x a vector of quantiles.
#' @param theta0 a vector, first 2 elements are means, next 2
#' elements are standard deviations, and the last element is
#' the perportion of data from the first population
#'
#' @examples x <- c(5, 7, 2, 1,9)
#' theta0 <- c(.2, .6, .8, .9, .95)
#' mixnormal(x, theta0)
#'
#' @return returns a vector, with mu1, mu2, sig1, sig2 and p
#'
#' @references Hogg, R. McKean, J. Craig, A. (2018) Introduction to
#' Mathematical Statistics, 8th Ed. Boston: Pearson.
#'
#' @export mixnormal
mixnormal <- function(x, theta0) {
# checking arguments
errors <- makeAssertCollection()
# checking arguments argument 1 x
errors$push(is_numvector(x, 1))
errors$push(has_nonan(x, 1))
errors$push(has_noinf(x, 1))
# argument 2 theta0
errors$push(has_elements(theta0, 2, 5))
errors$push(has_noinf(theta0, 2))
errors$push(has_nonan(theta0, 2))
errors$push(is_numvector(theta0, 2))
reportAssertions(errors)
# theta0[3]
errors$push(is_nonzero(theta0[3], 2, "element 3 in argument 2 cannot be zero"))
errors$push(is_positive(theta0[3], 2, "element 3 in argument 2 must be positive"))
# theta0[4]
errors$push(is_nonzero(theta0[4], 2, "element 4 in argument 2 cannot be zero"))
errors$push(is_positive(theta0[4], 2, "element 4 in argument 2 must be positive"))
# theta0[5]
errors$push(is_xrange(theta0[5], 2, 0, 1, "element 5 in argument 2 must be greater than 0 and less than 1"))
reportAssertions(errors)
# function starts calculate gamma
part1 <- (1 - theta0[5]) * dnorm(x, theta0[1], theta0[3])
part2 <- theta0[5] * dnorm(x, theta0[2], theta0[4])
gam <- part2/(part1 + part2)
# calculate denomination for sig1 and sig2
denom1 <- sum(1 - gam)
denom2 <- sum(gam)
# calculate mu1 and mu2
mu1 <- sum((1 - gam) * x)/denom1
mu2 <- sum(gam * x)/denom2
# calculate sig1 and sig2
sig1 <- sqrt(sum((1 - gam) * ((x - mu1)^2))/denom1)
sig2 <- sqrt(sum(gam * ((x - mu2)^2))/denom2)
# calculate the p value
p <- mean(gam)
mixnormal <- c(mu1, mu2, sig1, sig2, p)
return(mixnormal)
}
|
library(BART)
### Name: mc.surv.pwbart
### Title: Predicting new observations with a previously fitted BART model
### Aliases: surv.pwbart mc.surv.pwbart recur.pwbart mc.recur.pwbart
### Keywords: nonparametric tree regression nonlinear
### ** Examples
## load the advanced lung cancer example
data(lung)
group <- -which(is.na(lung[ , 7])) ## remove missing row for ph.karno
times <- lung[group, 2] ##lung$time
delta <- lung[group, 3]-1 ##lung$status: 1=censored, 2=dead
##delta: 0=censored, 1=dead
## this study reports time in days rather than months like other studies
## coarsening from days to months will reduce the computational burden
times <- ceiling(times/30)
summary(times)
table(delta)
x.train <- as.matrix(lung[group, c(4, 5, 7)]) ## matrix of observed covariates
## lung$age: Age in years
## lung$sex: Male=1 Female=2
## lung$ph.karno: Karnofsky performance score (dead=0:normal=100:by=10)
## rated by physician
dimnames(x.train)[[2]] <- c('age(yr)', 'M(1):F(2)', 'ph.karno(0:100:10)')
summary(x.train[ , 1])
table(x.train[ , 2])
table(x.train[ , 3])
x.test <- matrix(nrow=84, ncol=3) ## matrix of covariate scenarios
dimnames(x.test)[[2]] <- dimnames(x.train)[[2]]
i <- 1
for(age in 5*(9:15)) for(sex in 1:2) for(ph.karno in 10*(5:10)) {
x.test[i, ] <- c(age, sex, ph.karno)
i <- i+1
}
## this x.test is relatively small, but often you will want to
## predict for a large x.test matrix which may cause problems
## due to consumption of RAM so we can predict separately
## mcparallel/mccollect do not exist on windows
if(.Platform$OS.type=='unix') {
##test BART with token run to ensure installation works
set.seed(99)
post <- surv.bart(x.train=x.train, times=times, delta=delta, nskip=5, ndpost=5, keepevery=1)
pre <- surv.pre.bart(x.train=x.train, times=times, delta=delta, x.test=x.test)
pred <- mc.surv.pwbart(pre$tx.test, post$treedraws, post$binaryOffset)
}
## Not run:
##D ## run one long MCMC chain in one process
##D set.seed(99)
##D post <- surv.bart(x.train=x.train, times=times, delta=delta)
##D
##D ## run "mc.cores" number of shorter MCMC chains in parallel processes
##D ## post <- mc.surv.bart(x.train=x.train, times=times, delta=delta,
##D ## mc.cores=8, seed=99)
##D
##D pre <- surv.pre.bart(x.train=x.train, times=times, delta=delta, x.test=x.test)
##D
##D pred <- surv.pwbart(pre$tx.test, post$treedraws, post$binaryOffset)
##D
##D ## let's look at some survival curves
##D ## first, a younger group with a healthier KPS
##D ## age 50 with KPS=90: males and females
##D ## males: row 17, females: row 23
##D x.test[c(17, 23), ]
##D
##D low.risk.males <- 16*post$K+1:post$K ## K=unique times including censoring
##D low.risk.females <- 22*post$K+1:post$K
##D
##D plot(post$times, pred$surv.test.mean[low.risk.males], type='s', col='blue',
##D main='Age 50 with KPS=90', xlab='t', ylab='S(t)', ylim=c(0, 1))
##D points(post$times, pred$surv.test.mean[low.risk.females], type='s', col='red')
##D
## End(Not run)
|
/data/genthat_extracted_code/BART/examples/mc.surv.pwbart.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 3,086 |
r
|
library(BART)
### Name: mc.surv.pwbart
### Title: Predicting new observations with a previously fitted BART model
### Aliases: surv.pwbart mc.surv.pwbart recur.pwbart mc.recur.pwbart
### Keywords: nonparametric tree regression nonlinear
### ** Examples
## load the advanced lung cancer example
data(lung)
group <- -which(is.na(lung[ , 7])) ## remove missing row for ph.karno
times <- lung[group, 2] ##lung$time
delta <- lung[group, 3]-1 ##lung$status: 1=censored, 2=dead
##delta: 0=censored, 1=dead
## this study reports time in days rather than months like other studies
## coarsening from days to months will reduce the computational burden
times <- ceiling(times/30)
summary(times)
table(delta)
x.train <- as.matrix(lung[group, c(4, 5, 7)]) ## matrix of observed covariates
## lung$age: Age in years
## lung$sex: Male=1 Female=2
## lung$ph.karno: Karnofsky performance score (dead=0:normal=100:by=10)
## rated by physician
dimnames(x.train)[[2]] <- c('age(yr)', 'M(1):F(2)', 'ph.karno(0:100:10)')
summary(x.train[ , 1])
table(x.train[ , 2])
table(x.train[ , 3])
x.test <- matrix(nrow=84, ncol=3) ## matrix of covariate scenarios
dimnames(x.test)[[2]] <- dimnames(x.train)[[2]]
i <- 1
for(age in 5*(9:15)) for(sex in 1:2) for(ph.karno in 10*(5:10)) {
x.test[i, ] <- c(age, sex, ph.karno)
i <- i+1
}
## this x.test is relatively small, but often you will want to
## predict for a large x.test matrix which may cause problems
## due to consumption of RAM so we can predict separately
## mcparallel/mccollect do not exist on windows
if(.Platform$OS.type=='unix') {
##test BART with token run to ensure installation works
set.seed(99)
post <- surv.bart(x.train=x.train, times=times, delta=delta, nskip=5, ndpost=5, keepevery=1)
pre <- surv.pre.bart(x.train=x.train, times=times, delta=delta, x.test=x.test)
pred <- mc.surv.pwbart(pre$tx.test, post$treedraws, post$binaryOffset)
}
## Not run:
##D ## run one long MCMC chain in one process
##D set.seed(99)
##D post <- surv.bart(x.train=x.train, times=times, delta=delta)
##D
##D ## run "mc.cores" number of shorter MCMC chains in parallel processes
##D ## post <- mc.surv.bart(x.train=x.train, times=times, delta=delta,
##D ## mc.cores=8, seed=99)
##D
##D pre <- surv.pre.bart(x.train=x.train, times=times, delta=delta, x.test=x.test)
##D
##D pred <- surv.pwbart(pre$tx.test, post$treedraws, post$binaryOffset)
##D
##D ## let's look at some survival curves
##D ## first, a younger group with a healthier KPS
##D ## age 50 with KPS=90: males and females
##D ## males: row 17, females: row 23
##D x.test[c(17, 23), ]
##D
##D low.risk.males <- 16*post$K+1:post$K ## K=unique times including censoring
##D low.risk.females <- 22*post$K+1:post$K
##D
##D plot(post$times, pred$surv.test.mean[low.risk.males], type='s', col='blue',
##D main='Age 50 with KPS=90', xlab='t', ylab='S(t)', ylim=c(0, 1))
##D points(post$times, pred$surv.test.mean[low.risk.females], type='s', col='red')
##D
## End(Not run)
|
library(glmnet)
mydata = read.table("./TrainingSet/Correlation/soft_tissue.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.6,family="gaussian",standardize=FALSE)
sink('./Model/EN/Correlation/soft_tissue/soft_tissue_068.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Correlation/soft_tissue/soft_tissue_068.R
|
no_license
|
leon1003/QSMART
|
R
| false | false | 375 |
r
|
library(glmnet)
mydata = read.table("./TrainingSet/Correlation/soft_tissue.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.6,family="gaussian",standardize=FALSE)
sink('./Model/EN/Correlation/soft_tissue/soft_tissue_068.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
# Data analyzed by Guttal et al in 2008 Eco Let paper
# Note that data is not assumed to contain a warning signal, as Lay Sandusky in # Lake Erie has not exhibited a crash / eutriphication yet.
# Care to make a prediction about it's phosphorous levels?
# (Even so, these may not be expected to oscillate, but could rather be the
# constant, linear driver of a transtion...)
# Note that data is not sampled at constant interval
require(warningsignals)
lake <- read.csv("../data/SanduskyData.csv")
phospho <- data.frame(time = lake[[1]], P=lake[[6]])
|
/demo/SanduskyData.R
|
no_license
|
cboettig/warningsignals
|
R
| false | false | 557 |
r
|
# Data analyzed by Guttal et al in 2008 Eco Let paper
# Note that data is not assumed to contain a warning signal, as Lay Sandusky in # Lake Erie has not exhibited a crash / eutriphication yet.
# Care to make a prediction about it's phosphorous levels?
# (Even so, these may not be expected to oscillate, but could rather be the
# constant, linear driver of a transtion...)
# Note that data is not sampled at constant interval
require(warningsignals)
lake <- read.csv("../data/SanduskyData.csv")
phospho <- data.frame(time = lake[[1]], P=lake[[6]])
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dlm_model.R
\name{make_y}
\alias{make_y}
\title{make y vectors for calibration}
\usage{
make_y(file_path)
}
\arguments{
\item{file_path}{route file path}
}
\value{
dataframe of observations
}
\description{
make y vectors for calibration
}
|
/man/make_y.Rd
|
permissive
|
issactoast/ikhyd
|
R
| false | true | 317 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dlm_model.R
\name{make_y}
\alias{make_y}
\title{make y vectors for calibration}
\usage{
make_y(file_path)
}
\arguments{
\item{file_path}{route file path}
}
\value{
dataframe of observations
}
\description{
make y vectors for calibration
}
|
source("../../Functions/networkFunctions-extras-20.R")
source("../../Functions/labelPoints2-01.R");
source("../../Functions/heatmap.wg.R");
source("../../Functions/outlierRemovalFunctions.R")
source("../../Functions/preprocessing-General-013.R")
source("../../Functions/GNVFunctions-015.R")
source("../../Functions/individualAnalysis-General-007-02.R");
#dir.create("RData", recursive = TRUE);
dir.create("Results", recursive = TRUE);
#dir.create("Plots", recursive = TRUE);
library(anRichment)
# Load data
#====================================================================================================
#
# Evaluate concordance with our own WGCNA modules.
#
#====================================================================================================
refNet = read.csv("../../030-NetworkAnalysis/Results/networkAnalysisResults-Liver.sub.csv.gz", check.names = FALSE)
eLabels = loadAsList("../../030-NetworkAnalysis/RData/eLabels.RData")[[1]][["Liver.sub"]]$data
modData = loadAsList("../../030-NetworkAnalysis/RData/labels.x.etc.RData");
labelTranslationForColor = data.frame(label = sort(unique(refNet$module)),
colorLabel = modData$subLabelsForColors[[1]]$data[ match(sort(unique(refNet$module)), refNet$module)]);
nGenes = nrow(refNet)
inSameModule = outer(refNet$module, refNet$module, `==`) + 0;
inModule = as.numeric(inSameModule * matrix(refNet$module, nGenes, nGenes));
# Read BioGRID PPI network
bgNodes = read.csv(gzfile("../../Data/Annotation/PPI/BioGRID-physical-mouse-4.0.189-nodes.csv.gz"))
bgEdges = read.csv(gzfile("../../Data/Annotation/PPI/BioGRID-physical-mouse-4.0.189-edges.csv.gz"))
dup = duplicated(spaste(bgEdges[[1]], "-", bgEdges[[2]]));
bgEdges = bgEdges[!dup, ];
table(bgNodes$entrez %in% refNet$Entrez)
bgNodes2 = bgNodes[bgNodes$entrez %in% refNet$Entrez, ];
bgEdges2 = bgEdges[ bgEdges$index.1 %in% bgNodes2$index & bgEdges$index.2 %in% bgNodes2$index, ];
bgNodes2$index.expr = match(bgNodes2$entrez, refNet$Entrez)
bgEdges2$index.expr.1 = translateUsingTable(bgEdges2$index.1, bgNodes2[c("index", "index.expr")]);
bgEdges2$index.expr.2 = translateUsingTable(bgEdges2$index.2, bgNodes2[c("index", "index.expr")]);
bgMat = matrix(0, nGenes, nGenes);
bgMat[as.matrix(bgEdges2[c("index.expr.1", "index.expr.2")])] = 1;
sum(bgMat)/length(bgMat)
#[1] 0.0001435566
max(bgMat - t(bgMat)) ## The matrix is not symmetrix.
# [1] 1
sum(bgMat==1 & t(bgMat)==1)/length(bgMat)
# [1] 1.1419e-05
## It's also not approximately symmetric: only less than 1/10 of the non-zero entries have a corresponding transpose
## non-zero.
bgMat.flat = as.numeric(bgMat)
ids = 1:length(inModule)
lst = list(`BioGRID interactions` = ids[as.logical(bgMat)])
bgCollection = collectionFromGeneLists(entrez = lst, organism = "mouse");
gc();
# Run enrichment analysis
moduleLevels = setdiff(sort(unique(refNet$module)), 0);
nModLevels = length(moduleLevels)
overlapSizes = numeric(nModLevels);
modSizes = numeric(nModLevels);
modNGenes = sapply(moduleLevels, function(m) sum(refNet$module==m))
for (m in 1:nModLevels)
{
printFlush(moduleLevels[m]);
overlapSizes[m] = sum(inModule== moduleLevels[m] & bgMat.flat==1);
modSizes[m] = modNGenes[m] * (modNGenes[m] - 1)
#if (m%%5==0) print(gc());
}
pValues = lpValues = numeric(nModLevels)
n.bg = sum(bgMat.flat==1);
nAll = nGenes * (nGenes-1)
for (m in 1:nModLevels)
{
pValues[m] = phyper(overlapSizes[m]-1, m = n.bg, n = nAll - n.bg, k = modSizes[m], lower.tail = FALSE, log.p = FALSE)
lpValues[m] = phyper(overlapSizes[m]-1, m = n.bg, n = nAll - n.bg, k = modSizes[m], lower.tail = FALSE, log.p = TRUE)
}
enrTab = data.frame.ncn(Module = moduleLevels, `Module size` = modNGenes,
`BioGRID ln P-value` = lpValues, `BioGRID p-value` = pValues,
#nPotentialInteractions = modSizes,
`BioGRID overlap size` = overlapSizes,
`BioGRID expected overlap size` = modSizes * n.bg/nAll,
`BioGRID enrichment ratio` = overlapSizes/(modSizes * n.bg/nAll))
enrTab.BG = enrTab;
write.csv.nr(signifNumeric(enrTab, 3), file = "Results/enrichmentOfMouseLiverModulesInBioGRIDInteractions.csv");
#=======================================================================================================================
#
# Run a similar analysis for STRING
#
#=======================================================================================================================
# Read STRING networkSTRING network
stringNodes = read.csv(gzfile("../../Data/Annotation/PPI/STRING-PPI-mouse-v11.0-nodes.csv.gz"))
stringEdges = read.csv(gzfile("../../Data/Annotation/PPI/STRING-PPI-mouse-v11.0-edges.csv.gz"));
dup = duplicated(spaste(stringEdges[[1]], "-", stringEdges[[2]]));
if (any(dup)) stringEdges = stringEdges[!dup, ];
sum(stringNodes$Entrez %in% refNet$Entrez)
stringNodes2 = stringNodes[stringNodes$Entrez %in% refNet$Entrez, ];
stringEdges2 = stringEdges[ stringEdges$Index.1 %in% stringNodes2$Index & stringEdges$Index.2 %in% stringNodes2$Index, ];
stringNodes2$Index.expr = match(stringNodes2$Entrez, refNet$Entrez)
stringEdges2$Index.expr.1 = translateUsingTable(stringEdges2$Index.1, stringNodes2[c("Index", "Index.expr")]);
stringEdges2$Index.expr.2 = translateUsingTable(stringEdges2$Index.2, stringNodes2[c("Index", "Index.expr")]);
stringMat = matrix(0, nGenes, nGenes);
stringMat[as.matrix(stringEdges2[c("Index.expr.1", "Index.expr.2")])] = stringEdges2$combined_score;
sum(stringMat>0)/length(stringMat)
# [1] 0.0251522
max(stringMat - t(stringMat)) ## The matrix is not symmetrix.
# [1] 756
stringMat.flat = as.numeric(stringMat)
thresholds = c(1, 400, 600, 800);
ids = 1:length(inModule)
lst = lapply(thresholds, function(th) ids[stringMat.flat >= th])
names(lst) = spaste("STRING (confidence >= ", thresholds, ")");
stringCollection = collectionFromGeneLists(entrez = lst, organism = "mouse");
gc();
nThresholds = length(thresholds);
# Run enrichment analysis
moduleLevels = setdiff(sort(unique(refNet$module)), 0);
nModLevels = length(moduleLevels)
overlapSizes = listRep(numeric(nModLevels), nThresholds);
modSizes = numeric(nModLevels);
modNGenes = sapply(moduleLevels, function(m) sum(refNet$module==m))
n.bg = numeric(nThresholds);
for (th in 1:nThresholds)
{
n.bg[th] = sum(stringMat.flat>=thresholds[th]);
for (m in 1:nModLevels)
{
printFlush(moduleLevels[m]);
overlapSizes[[th]][m] = sum(inModule== moduleLevels[m] & stringMat.flat >= thresholds[th]);
modSizes[m] = modNGenes[m] * (modNGenes[m] - 1)
#if (m%%5==0) print(gc());
}
gc();
}
pValues = lpValues = matrix(NA, nModLevels, nThresholds)
nAll = nGenes * (nGenes-1)
for (th in 1:nThresholds) for (m in 1:nModLevels)
{
pValues[m, th] = phyper(overlapSizes[[th]][m]-1, m = n.bg[th], n = nAll - n.bg[th], k = modSizes[m],
lower.tail = FALSE, log.p = FALSE)
lpValues[m, th] = phyper(overlapSizes[[th]][m]-1, m = n.bg[th], n = nAll - n.bg[th], k = modSizes[m],
lower.tail = FALSE, log.p = TRUE)
}
colnames(pValues) = spaste("STRING p-value at threshold ", thresholds);
colnames(lpValues) = spaste("STRING log p-value at threshold ", thresholds);
overlapSizes.mat = do.call(cbind, overlapSizes);
colnames(overlapSizes.mat) = spaste("STRING overlap at threshold ", thresholds);
expected.mat = outer(modSizes, n.bg, `*`)/nAll
colnames(expected.mat) = spaste("STRING expected overlap at threshold ", thresholds);
enrichmentRatio.mat = overlapSizes.mat/expected.mat;
colnames(enrichmentRatio.mat) = spaste("STRING enrichment ratio at threshold ", thresholds);
enrTab = data.frame.ncn(Module = moduleLevels,
`Module size` = modNGenes,
interleave(list(lpValues, pValues, overlapSizes.mat, expected.mat, enrichmentRatio.mat), nameBase = rep("", 5), sep = ""));
dir.create("Results", recursive = TRUE);
write.csv.nr(signifNumeric(enrTab, 3), file = "Results/enrichmentOfMouseLiverModulesInSTRINGInteractions.csv");
|
/210-EnrichmentInPPINetworks/010-MouseLiver/010-enrichmentInPPINetworks.R
|
no_license
|
plangfelder/Core-liver-homeostatic-networks
|
R
| false | false | 7,961 |
r
|
source("../../Functions/networkFunctions-extras-20.R")
source("../../Functions/labelPoints2-01.R");
source("../../Functions/heatmap.wg.R");
source("../../Functions/outlierRemovalFunctions.R")
source("../../Functions/preprocessing-General-013.R")
source("../../Functions/GNVFunctions-015.R")
source("../../Functions/individualAnalysis-General-007-02.R");
#dir.create("RData", recursive = TRUE);
dir.create("Results", recursive = TRUE);
#dir.create("Plots", recursive = TRUE);
library(anRichment)
# Load data
#====================================================================================================
#
# Evaluate concordance with our own WGCNA modules.
#
#====================================================================================================
refNet = read.csv("../../030-NetworkAnalysis/Results/networkAnalysisResults-Liver.sub.csv.gz", check.names = FALSE)
eLabels = loadAsList("../../030-NetworkAnalysis/RData/eLabels.RData")[[1]][["Liver.sub"]]$data
modData = loadAsList("../../030-NetworkAnalysis/RData/labels.x.etc.RData");
labelTranslationForColor = data.frame(label = sort(unique(refNet$module)),
colorLabel = modData$subLabelsForColors[[1]]$data[ match(sort(unique(refNet$module)), refNet$module)]);
nGenes = nrow(refNet)
inSameModule = outer(refNet$module, refNet$module, `==`) + 0;
inModule = as.numeric(inSameModule * matrix(refNet$module, nGenes, nGenes));
# Read BioGRID PPI network
bgNodes = read.csv(gzfile("../../Data/Annotation/PPI/BioGRID-physical-mouse-4.0.189-nodes.csv.gz"))
bgEdges = read.csv(gzfile("../../Data/Annotation/PPI/BioGRID-physical-mouse-4.0.189-edges.csv.gz"))
dup = duplicated(spaste(bgEdges[[1]], "-", bgEdges[[2]]));
bgEdges = bgEdges[!dup, ];
table(bgNodes$entrez %in% refNet$Entrez)
bgNodes2 = bgNodes[bgNodes$entrez %in% refNet$Entrez, ];
bgEdges2 = bgEdges[ bgEdges$index.1 %in% bgNodes2$index & bgEdges$index.2 %in% bgNodes2$index, ];
bgNodes2$index.expr = match(bgNodes2$entrez, refNet$Entrez)
bgEdges2$index.expr.1 = translateUsingTable(bgEdges2$index.1, bgNodes2[c("index", "index.expr")]);
bgEdges2$index.expr.2 = translateUsingTable(bgEdges2$index.2, bgNodes2[c("index", "index.expr")]);
bgMat = matrix(0, nGenes, nGenes);
bgMat[as.matrix(bgEdges2[c("index.expr.1", "index.expr.2")])] = 1;
sum(bgMat)/length(bgMat)
#[1] 0.0001435566
max(bgMat - t(bgMat)) ## The matrix is not symmetrix.
# [1] 1
sum(bgMat==1 & t(bgMat)==1)/length(bgMat)
# [1] 1.1419e-05
## It's also not approximately symmetric: only less than 1/10 of the non-zero entries have a corresponding transpose
## non-zero.
bgMat.flat = as.numeric(bgMat)
ids = 1:length(inModule)
lst = list(`BioGRID interactions` = ids[as.logical(bgMat)])
bgCollection = collectionFromGeneLists(entrez = lst, organism = "mouse");
gc();
# Run enrichment analysis
moduleLevels = setdiff(sort(unique(refNet$module)), 0);
nModLevels = length(moduleLevels)
overlapSizes = numeric(nModLevels);
modSizes = numeric(nModLevels);
modNGenes = sapply(moduleLevels, function(m) sum(refNet$module==m))
for (m in 1:nModLevels)
{
printFlush(moduleLevels[m]);
overlapSizes[m] = sum(inModule== moduleLevels[m] & bgMat.flat==1);
modSizes[m] = modNGenes[m] * (modNGenes[m] - 1)
#if (m%%5==0) print(gc());
}
pValues = lpValues = numeric(nModLevels)
n.bg = sum(bgMat.flat==1);
nAll = nGenes * (nGenes-1)
for (m in 1:nModLevels)
{
pValues[m] = phyper(overlapSizes[m]-1, m = n.bg, n = nAll - n.bg, k = modSizes[m], lower.tail = FALSE, log.p = FALSE)
lpValues[m] = phyper(overlapSizes[m]-1, m = n.bg, n = nAll - n.bg, k = modSizes[m], lower.tail = FALSE, log.p = TRUE)
}
enrTab = data.frame.ncn(Module = moduleLevels, `Module size` = modNGenes,
`BioGRID ln P-value` = lpValues, `BioGRID p-value` = pValues,
#nPotentialInteractions = modSizes,
`BioGRID overlap size` = overlapSizes,
`BioGRID expected overlap size` = modSizes * n.bg/nAll,
`BioGRID enrichment ratio` = overlapSizes/(modSizes * n.bg/nAll))
enrTab.BG = enrTab;
write.csv.nr(signifNumeric(enrTab, 3), file = "Results/enrichmentOfMouseLiverModulesInBioGRIDInteractions.csv");
#=======================================================================================================================
#
# Run a similar analysis for STRING
#
#=======================================================================================================================
# Read STRING networkSTRING network
stringNodes = read.csv(gzfile("../../Data/Annotation/PPI/STRING-PPI-mouse-v11.0-nodes.csv.gz"))
stringEdges = read.csv(gzfile("../../Data/Annotation/PPI/STRING-PPI-mouse-v11.0-edges.csv.gz"));
dup = duplicated(spaste(stringEdges[[1]], "-", stringEdges[[2]]));
if (any(dup)) stringEdges = stringEdges[!dup, ];
sum(stringNodes$Entrez %in% refNet$Entrez)
stringNodes2 = stringNodes[stringNodes$Entrez %in% refNet$Entrez, ];
stringEdges2 = stringEdges[ stringEdges$Index.1 %in% stringNodes2$Index & stringEdges$Index.2 %in% stringNodes2$Index, ];
stringNodes2$Index.expr = match(stringNodes2$Entrez, refNet$Entrez)
stringEdges2$Index.expr.1 = translateUsingTable(stringEdges2$Index.1, stringNodes2[c("Index", "Index.expr")]);
stringEdges2$Index.expr.2 = translateUsingTable(stringEdges2$Index.2, stringNodes2[c("Index", "Index.expr")]);
stringMat = matrix(0, nGenes, nGenes);
stringMat[as.matrix(stringEdges2[c("Index.expr.1", "Index.expr.2")])] = stringEdges2$combined_score;
sum(stringMat>0)/length(stringMat)
# [1] 0.0251522
max(stringMat - t(stringMat)) ## The matrix is not symmetrix.
# [1] 756
stringMat.flat = as.numeric(stringMat)
thresholds = c(1, 400, 600, 800);
ids = 1:length(inModule)
lst = lapply(thresholds, function(th) ids[stringMat.flat >= th])
names(lst) = spaste("STRING (confidence >= ", thresholds, ")");
stringCollection = collectionFromGeneLists(entrez = lst, organism = "mouse");
gc();
nThresholds = length(thresholds);
# Run enrichment analysis
moduleLevels = setdiff(sort(unique(refNet$module)), 0);
nModLevels = length(moduleLevels)
overlapSizes = listRep(numeric(nModLevels), nThresholds);
modSizes = numeric(nModLevels);
modNGenes = sapply(moduleLevels, function(m) sum(refNet$module==m))
n.bg = numeric(nThresholds);
for (th in 1:nThresholds)
{
n.bg[th] = sum(stringMat.flat>=thresholds[th]);
for (m in 1:nModLevels)
{
printFlush(moduleLevels[m]);
overlapSizes[[th]][m] = sum(inModule== moduleLevels[m] & stringMat.flat >= thresholds[th]);
modSizes[m] = modNGenes[m] * (modNGenes[m] - 1)
#if (m%%5==0) print(gc());
}
gc();
}
pValues = lpValues = matrix(NA, nModLevels, nThresholds)
nAll = nGenes * (nGenes-1)
for (th in 1:nThresholds) for (m in 1:nModLevels)
{
pValues[m, th] = phyper(overlapSizes[[th]][m]-1, m = n.bg[th], n = nAll - n.bg[th], k = modSizes[m],
lower.tail = FALSE, log.p = FALSE)
lpValues[m, th] = phyper(overlapSizes[[th]][m]-1, m = n.bg[th], n = nAll - n.bg[th], k = modSizes[m],
lower.tail = FALSE, log.p = TRUE)
}
colnames(pValues) = spaste("STRING p-value at threshold ", thresholds);
colnames(lpValues) = spaste("STRING log p-value at threshold ", thresholds);
overlapSizes.mat = do.call(cbind, overlapSizes);
colnames(overlapSizes.mat) = spaste("STRING overlap at threshold ", thresholds);
expected.mat = outer(modSizes, n.bg, `*`)/nAll
colnames(expected.mat) = spaste("STRING expected overlap at threshold ", thresholds);
enrichmentRatio.mat = overlapSizes.mat/expected.mat;
colnames(enrichmentRatio.mat) = spaste("STRING enrichment ratio at threshold ", thresholds);
enrTab = data.frame.ncn(Module = moduleLevels,
`Module size` = modNGenes,
interleave(list(lpValues, pValues, overlapSizes.mat, expected.mat, enrichmentRatio.mat), nameBase = rep("", 5), sep = ""));
dir.create("Results", recursive = TRUE);
write.csv.nr(signifNumeric(enrTab, 3), file = "Results/enrichmentOfMouseLiverModulesInSTRINGInteractions.csv");
|
# downsample.R
# Aug 6, 2015
# 11:32:47 AM
#
# Author: yaping
###############################################################################
covarInfo=NULL
for (e in commandArgs(TRUE)) {
ta = strsplit(e,"=",fixed=TRUE)
if(! is.na(ta[[1]][2])) {
if(ta[[1]][1] == "wd"){
wd<-ta[[1]][2]
}
if(ta[[1]][1] == "sampleSize"){
sampleSize<-as.numeric(ta[[1]][2])
}
if(ta[[1]][1] == "snpInfo"){
snpInfo<-ta[[1]][2]
}
if(ta[[1]][1] == "exprInfo"){
exprInfo<-ta[[1]][2]
}
if(ta[[1]][1] == "covarInfo"){
covarInfo<-ta[[1]][2]
}
}
}
setwd(wd)
snp<-read.table(snpInfo,sep="\t",header=T)
expr<-read.table(exprInfo,sep="\t",header=T)
if(!is.null(covarInfo)){
cov<-read.table(covarInfo,sep="\t",header=T)
}
if(is.null(sampleSize)){
sampleSize<-dim(snp)[2]-1
}
random_1_names<-sample(colnames(snp)[2:length(snp[1,])],sampleSize)
snp_1<-snp[,colnames(snp) %in% random_1_names]
expr_1<-expr[,colnames(expr) %in% random_1_names]
if(!is.null(covarInfo)){
cov_1<-cov[,colnames(cov) %in% random_1_names]
cov_1<-cbind(cov[,1],cov_1)
colnames(cov_1)[1]<-"id"
cov_random=paste(covarInfo,".SampleSize-",sampleSize,".txt",sep="")
write.table(cov_1,cov_random,sep="\t",quote =F, row.names=F,col.names =T)
}else{
cov_random=NULL
}
snp_1<-cbind(snp[,1],snp_1)
expr_1<-cbind(expr[,1],expr_1)
snp_1<-snp_1[snp_1[,1] %in% snp_loc[,1],]
expr_1<-expr_1[expr_1[,1] %in% expr_loc[,1],]
colnames(snp_1)[1]<-"id"
colnames(expr_1)[1]<-"id"
snp_random=paste(snpInfo,".SampleSize-",sampleSize,".txt",sep="")
expr_random=paste(exprInfo,".SampleSize-",sampleSize,".txt",sep="")
write.table(snp_1,snp_random,sep="\t",quote =F, row.names=F,col.names =T)
write.table(expr_1,expr_random,sep="\t",quote =F, row.names=F,col.names =T)
|
/R/downsample.R
|
permissive
|
dnaase/QRF
|
R
| false | false | 1,747 |
r
|
# downsample.R
# Aug 6, 2015
# 11:32:47 AM
#
# Author: yaping
###############################################################################
covarInfo=NULL
for (e in commandArgs(TRUE)) {
ta = strsplit(e,"=",fixed=TRUE)
if(! is.na(ta[[1]][2])) {
if(ta[[1]][1] == "wd"){
wd<-ta[[1]][2]
}
if(ta[[1]][1] == "sampleSize"){
sampleSize<-as.numeric(ta[[1]][2])
}
if(ta[[1]][1] == "snpInfo"){
snpInfo<-ta[[1]][2]
}
if(ta[[1]][1] == "exprInfo"){
exprInfo<-ta[[1]][2]
}
if(ta[[1]][1] == "covarInfo"){
covarInfo<-ta[[1]][2]
}
}
}
setwd(wd)
snp<-read.table(snpInfo,sep="\t",header=T)
expr<-read.table(exprInfo,sep="\t",header=T)
if(!is.null(covarInfo)){
cov<-read.table(covarInfo,sep="\t",header=T)
}
if(is.null(sampleSize)){
sampleSize<-dim(snp)[2]-1
}
random_1_names<-sample(colnames(snp)[2:length(snp[1,])],sampleSize)
snp_1<-snp[,colnames(snp) %in% random_1_names]
expr_1<-expr[,colnames(expr) %in% random_1_names]
if(!is.null(covarInfo)){
cov_1<-cov[,colnames(cov) %in% random_1_names]
cov_1<-cbind(cov[,1],cov_1)
colnames(cov_1)[1]<-"id"
cov_random=paste(covarInfo,".SampleSize-",sampleSize,".txt",sep="")
write.table(cov_1,cov_random,sep="\t",quote =F, row.names=F,col.names =T)
}else{
cov_random=NULL
}
snp_1<-cbind(snp[,1],snp_1)
expr_1<-cbind(expr[,1],expr_1)
snp_1<-snp_1[snp_1[,1] %in% snp_loc[,1],]
expr_1<-expr_1[expr_1[,1] %in% expr_loc[,1],]
colnames(snp_1)[1]<-"id"
colnames(expr_1)[1]<-"id"
snp_random=paste(snpInfo,".SampleSize-",sampleSize,".txt",sep="")
expr_random=paste(exprInfo,".SampleSize-",sampleSize,".txt",sep="")
write.table(snp_1,snp_random,sep="\t",quote =F, row.names=F,col.names =T)
write.table(expr_1,expr_random,sep="\t",quote =F, row.names=F,col.names =T)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{supplystack}
\alias{supplystack}
\title{supplystack Constructor}
\usage{
supplystack(p, q, nms = NULL)
}
\arguments{
\item{p}{Numeric vector. Represents cost or price for each producer. Alternatively, a matrix in which column values belong to a single producer; rows represent different cost components.}
\item{q}{Numeric vector. Represents quantity for each producer.}
\item{nms}{Character vector (optional). Producer names for visualization.}
}
\description{
Create a supplystack object
}
\examples{
supplystack(p=c(100, 50, 75), q=rep(50, 3), nms=c('A','B','C'))
supplystack(p=matrix(c(5, 10, 15, 20), 2, 2, dimnames=list(c("Cost1", "Cost2"))), q=c(10, 15))
}
|
/man/supplystack.Rd
|
permissive
|
ccwoolfolk/supplystack
|
R
| false | true | 762 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{supplystack}
\alias{supplystack}
\title{supplystack Constructor}
\usage{
supplystack(p, q, nms = NULL)
}
\arguments{
\item{p}{Numeric vector. Represents cost or price for each producer. Alternatively, a matrix in which column values belong to a single producer; rows represent different cost components.}
\item{q}{Numeric vector. Represents quantity for each producer.}
\item{nms}{Character vector (optional). Producer names for visualization.}
}
\description{
Create a supplystack object
}
\examples{
supplystack(p=c(100, 50, 75), q=rep(50, 3), nms=c('A','B','C'))
supplystack(p=matrix(c(5, 10, 15, 20), 2, 2, dimnames=list(c("Cost1", "Cost2"))), q=c(10, 15))
}
|
#Linear Regression Project on grades data
library(readr)
grades<- read.csv("C:/Users/mohit/Documents/grades.csv")
dim(grades)
colnames(grades) # this will gives us the names of the 22 variables
table(grades$gender) #Based on gender
table(grades$ethnicity) #Based on Ethnicity
table(grades$passfail) #Based on whether pass or fail
boxplot(grades$gpa,grades$quiz1,grades$quiz2,grades$quiz3,grades$quiz4,grades$quiz5,col=heat.colors(6))
boxplot(grades$total,col="red")
#install.packages("psych")
library(psych)
describe(grades$gpa)
hist(grades$gpa,xlab="GPA",ylab=" Frequency",main="Histogram of GPA",col="red")
boxplot(grades$gpa,col="red",main= "Boxplot of GPA")
describe(grades$quiz1)
hist(grades$quiz1,xlab="Quiz1",ylab=" Frequency",main="Histogram of Quiz1",col="red")
boxplot(grades$quiz1,col="red",main= "Boxplot of Quiz1")
describe(grades$quiz2)
hist(grades$gpa,xlab="Quiz2",ylab=" Frequency",main="Histogram of Quiz2",col="blue")
boxplot(grades$quiz2,col="blue",main= "Boxplot of Quiz2")
describe(grades$quiz3)
hist(grades$gpa,xlab="Quiz3",ylab=" Frequency",main="Histogram of Quiz3",col="green")
boxplot(grades$quiz3,col="green",main= "Boxplot of Quiz3")
describe(grades$quiz4)
hist(grades$gpa,xlab="Quiz4",ylab=" Frequency",main="Histogram of Qui4",col="yellow")
boxplot(grades$quiz4,col="yellow",main= "Boxplot of Quiz4")
describe(grades$quiz5)
hist(grades$gpa,xlab="Quiz5",ylab=" Frequency",main="Histogram of Quiz5",col="brown")
boxplot(grades$quiz5,col="brown",main= "Boxplot of Quiz5")
describe(grades$total)
hist(grades$gpa,xlab="Total",ylab=" Frequency",main="Histogram of total",col="orange")
boxplot(grades$total,col="orange",main= "Boxplot of total")
describe(grades$final)
hist(grades$final,xlab="Total",ylab=" Frequency",main="Histogram of final",col="green")
boxplot(grades$final,col="red",main= "Boxplot of Final")
#Scatter plots Predictor Vs Response Variable
plot(grades$gpa,grades$final,main= "gpa vs Final" , xlab = "gpa", ylab = "final",col="red",abline(lm(final~gpa,data=grades)))
plot(grades$quiz1,grades$final,main= "Quiz1 vs Final" , xlab = "quiz1", ylab = "final",col="red",abline(lm(final~quiz1,data=grades)))
plot(grades$quiz2,grades$final,main= "Quiz2 vs Final" , xlab = "Quiz2", ylab = "final",col="red",abline(lm(final~quiz2,data=grades)))
plot(grades$quiz2,grades$final,main= "Quiz3 vs Final" , xlab = "Quiz3", ylab = "final",col="red",abline(lm(final~quiz3,data=grades)))
plot(grades$quiz2,grades$final,main= "Quiz4 vs Final" , xlab = "Quiz4", ylab = "final",col="red",abline(lm(final~quiz4,data=grades)))
plot(grades$quiz2,grades$final,main= "Quiz5 vs Final" , xlab = "Quiz5", ylab = "final",col="red",abline(lm(final~quiz5,data=grades)))
plot(grades$total,grades$final,main= "total vs Final" , xlab = "total", ylab = "final",col="red",abline(lm(final~total,data=grades)))
plot(grades$percent,grades$final,main= " Percent vs Final" , xlab = "Quiz2", ylab = "final",col="red",abline(lm(final~percent,data=grades)))
#checking Correlation of each of these predictors with the response variable
cor(grades$gpa,grades$final) #correlation between gpa & final
cor(grades$quiz1,grades$final) #correlation between quiz1 & final
cor(grades$quiz2,grades$final) #correlation between quiz2 & final
cor(grades$quiz3,grades$final) #correlation between quiz3 & final
cor(grades$quiz4,grades$final) #correlation between quiz4 & final
cor(grades$quiz5,grades$final) #correlation between quiz5 & final
cor(grades$total,grades$final) #correlation between total & final
cor(grades$percent,grades$final) #correlation between percent & final
# Building Linear Regresion Models for predicting final from other predictor variables
# final ~ gpa+quiz1+quiz2+quiz3+quiz4+quiz5
fg12345<-lm(final~gpa+quiz1+quiz2+quiz3+quiz4+quiz5,data=grades)
fg12345
summary(fg12345)
library(car)
vif(fg12345) # Variance inflation factor for our model fg12345
# final~gpa+quiz2+quiz3+quiz4+quiz5,data=grades
fg2345<-lm(final~gpa+quiz2+quiz3+quiz4+quiz5,data=grades)
summary(fg2345)
vif(fg2345) # Variance inflation factor for our model fg2345
# final~gpa+quiz3,data=grades
fg3<-lm(final~gpa+quiz3,data=grades)
fg3
summary(fg3)
vif(fg3) # Variance inflation factor for our model fg
#final~total+quiz3,data=grades
ft3<-lm(final~total+quiz3,data=grades)
ft3
summary(ft3)
vif(ft3) # Variance inflation factor for our model ft3
#final~quiz2+quiz3,data=grades
f23<-lm(final~quiz2+quiz3,data=grades)
summary(f23)
vif(f23) # Variance inflation factor for our model f23
# final3~quiz3
f3<-lm(final~quiz3,data=grades)
summary(f3)
# Finally we have selected 2 Linear Regression Models that predicts "final" with a good Accuracy.
# The models Selected are 'ft3' & 'f23'
#The Equation for our models are as follows
#For Model ft3 the equation of the regression line is given by
#final = 6.67358 + (0.69502*total) - (1.89162*quiz3)
#For Model f23 the equation of the regression line is given by
# final = 39.7129 + (1.5407*quiz2) - (1.1862*quiz3)
dwt(ft3) # For ft3 durbin watson Statistics comes out to be 2.115215
dwt(f23) #For f23 durbin watson Statistics comes out to be 2.233423
#Finding the predicted Value of final through both the models made & adding them in the grades dataset
grades$predft3<-predict(ft3) #For ft3 model
grades$predft3
grades$predf23<-predict(f23)
grades$predf23 #For f23 model
## error values of final through the models are below. These values are giving us the residuals of our model
grades$errft3<-residuals(ft3)
grades$errft3 # For Model ft3
grades$errf23<-residuals(f23)
grades$errf23 # For model f23
# Adding observation no.s against each row in the data set grades
grades$obsno<-c(1:105)
grades$obsno
View(grades)
#For model ft3,inserting the predicted values in the grades dataset by column creation
predft3<-predict(ft3)
grades$predft3<-predft3
grades$predft3
#For Model f23,inserting the predicted values in the grades dataset by column creation
predf23<-predict(f23)
grades$predf23<-predf23
grades$predf23
###Checking for Assumptions Test for ft3 & f23 Models
hist(grades$errft3,main = "Normality check for ft3 model", xlab="Residuals",col="orange")
hist(grades$errf23,main = "Normality check for f23 model", xlab="Residuals",col="yellow")
####2. Independent of observations
plot(grades$obsno,grades$errft3,col="red",main="Independence of error for ft3",xlab= " obsv no", ylab="residuals")
plot(grades$obsno,grades$errf23,col="brown",main="Independence of error for f23",xlab= " obsv no", ylab="residuals")
####3 Check of linear relationship
plot(grades$total,grades$final,main="Linear Rltnship for ft3",xlab="total",ylab="Final",col="red")
plot(grades$quiz3,grades$final,main="Linear Rltnship for ft3",xlab="quiz3",ylab="Final",col="red")
plot(grades$quiz2,grades$final,main="Linear Rltnship for f23",xlab="quiz2",ylab="Final",col="brown")
plot(grades$quiz3,grades$final,main="Linear Rltnship for f23",xlab="quiz3",ylab="Final",col="brown")
####4 Check of Constant Error Variance : Homoscedacity
plot(grades$predft3,grades$errft3,col="red",main="Constant error variance ft3",xlab="Predited",ylab="errors",abline(h=0))
plot(grades$predf23,grades$errf23,col="brown",main="Constant error variance f23",xlab="Predited",ylab="errors",abline(h=0))
# for finding the confidence intervals & the predited values for ft3 & f23 models
confint(ft3)
fitted(ft3)
fitted(f23)
confint(f23)
predict(f23, interval="confidence") #same as fitted command
#thanks a lot for reading
## happy learning
#Mohit Arora
|
/Project - R Script - Linear Regression project grades.R
|
no_license
|
mhtdsm/Project--Grades-Linear-Regression
|
R
| false | false | 7,695 |
r
|
#Linear Regression Project on grades data
library(readr)
grades<- read.csv("C:/Users/mohit/Documents/grades.csv")
dim(grades)
colnames(grades) # this will gives us the names of the 22 variables
table(grades$gender) #Based on gender
table(grades$ethnicity) #Based on Ethnicity
table(grades$passfail) #Based on whether pass or fail
boxplot(grades$gpa,grades$quiz1,grades$quiz2,grades$quiz3,grades$quiz4,grades$quiz5,col=heat.colors(6))
boxplot(grades$total,col="red")
#install.packages("psych")
library(psych)
describe(grades$gpa)
hist(grades$gpa,xlab="GPA",ylab=" Frequency",main="Histogram of GPA",col="red")
boxplot(grades$gpa,col="red",main= "Boxplot of GPA")
describe(grades$quiz1)
hist(grades$quiz1,xlab="Quiz1",ylab=" Frequency",main="Histogram of Quiz1",col="red")
boxplot(grades$quiz1,col="red",main= "Boxplot of Quiz1")
describe(grades$quiz2)
hist(grades$gpa,xlab="Quiz2",ylab=" Frequency",main="Histogram of Quiz2",col="blue")
boxplot(grades$quiz2,col="blue",main= "Boxplot of Quiz2")
describe(grades$quiz3)
hist(grades$gpa,xlab="Quiz3",ylab=" Frequency",main="Histogram of Quiz3",col="green")
boxplot(grades$quiz3,col="green",main= "Boxplot of Quiz3")
describe(grades$quiz4)
hist(grades$gpa,xlab="Quiz4",ylab=" Frequency",main="Histogram of Qui4",col="yellow")
boxplot(grades$quiz4,col="yellow",main= "Boxplot of Quiz4")
describe(grades$quiz5)
hist(grades$gpa,xlab="Quiz5",ylab=" Frequency",main="Histogram of Quiz5",col="brown")
boxplot(grades$quiz5,col="brown",main= "Boxplot of Quiz5")
describe(grades$total)
hist(grades$gpa,xlab="Total",ylab=" Frequency",main="Histogram of total",col="orange")
boxplot(grades$total,col="orange",main= "Boxplot of total")
describe(grades$final)
hist(grades$final,xlab="Total",ylab=" Frequency",main="Histogram of final",col="green")
boxplot(grades$final,col="red",main= "Boxplot of Final")
#Scatter plots Predictor Vs Response Variable
plot(grades$gpa,grades$final,main= "gpa vs Final" , xlab = "gpa", ylab = "final",col="red",abline(lm(final~gpa,data=grades)))
plot(grades$quiz1,grades$final,main= "Quiz1 vs Final" , xlab = "quiz1", ylab = "final",col="red",abline(lm(final~quiz1,data=grades)))
plot(grades$quiz2,grades$final,main= "Quiz2 vs Final" , xlab = "Quiz2", ylab = "final",col="red",abline(lm(final~quiz2,data=grades)))
plot(grades$quiz2,grades$final,main= "Quiz3 vs Final" , xlab = "Quiz3", ylab = "final",col="red",abline(lm(final~quiz3,data=grades)))
plot(grades$quiz2,grades$final,main= "Quiz4 vs Final" , xlab = "Quiz4", ylab = "final",col="red",abline(lm(final~quiz4,data=grades)))
plot(grades$quiz2,grades$final,main= "Quiz5 vs Final" , xlab = "Quiz5", ylab = "final",col="red",abline(lm(final~quiz5,data=grades)))
plot(grades$total,grades$final,main= "total vs Final" , xlab = "total", ylab = "final",col="red",abline(lm(final~total,data=grades)))
plot(grades$percent,grades$final,main= " Percent vs Final" , xlab = "Quiz2", ylab = "final",col="red",abline(lm(final~percent,data=grades)))
#checking Correlation of each of these predictors with the response variable
cor(grades$gpa,grades$final) #correlation between gpa & final
cor(grades$quiz1,grades$final) #correlation between quiz1 & final
cor(grades$quiz2,grades$final) #correlation between quiz2 & final
cor(grades$quiz3,grades$final) #correlation between quiz3 & final
cor(grades$quiz4,grades$final) #correlation between quiz4 & final
cor(grades$quiz5,grades$final) #correlation between quiz5 & final
cor(grades$total,grades$final) #correlation between total & final
cor(grades$percent,grades$final) #correlation between percent & final
# Building Linear Regresion Models for predicting final from other predictor variables
# final ~ gpa+quiz1+quiz2+quiz3+quiz4+quiz5
fg12345<-lm(final~gpa+quiz1+quiz2+quiz3+quiz4+quiz5,data=grades)
fg12345
summary(fg12345)
library(car)
vif(fg12345) # Variance inflation factor for our model fg12345
# final~gpa+quiz2+quiz3+quiz4+quiz5,data=grades
fg2345<-lm(final~gpa+quiz2+quiz3+quiz4+quiz5,data=grades)
summary(fg2345)
vif(fg2345) # Variance inflation factor for our model fg2345
# final~gpa+quiz3,data=grades
fg3<-lm(final~gpa+quiz3,data=grades)
fg3
summary(fg3)
vif(fg3) # Variance inflation factor for our model fg
#final~total+quiz3,data=grades
ft3<-lm(final~total+quiz3,data=grades)
ft3
summary(ft3)
vif(ft3) # Variance inflation factor for our model ft3
#final~quiz2+quiz3,data=grades
f23<-lm(final~quiz2+quiz3,data=grades)
summary(f23)
vif(f23) # Variance inflation factor for our model f23
# final3~quiz3
f3<-lm(final~quiz3,data=grades)
summary(f3)
# Finally we have selected 2 Linear Regression Models that predicts "final" with a good Accuracy.
# The models Selected are 'ft3' & 'f23'
#The Equation for our models are as follows
#For Model ft3 the equation of the regression line is given by
#final = 6.67358 + (0.69502*total) - (1.89162*quiz3)
#For Model f23 the equation of the regression line is given by
# final = 39.7129 + (1.5407*quiz2) - (1.1862*quiz3)
dwt(ft3) # For ft3 durbin watson Statistics comes out to be 2.115215
dwt(f23) #For f23 durbin watson Statistics comes out to be 2.233423
#Finding the predicted Value of final through both the models made & adding them in the grades dataset
grades$predft3<-predict(ft3) #For ft3 model
grades$predft3
grades$predf23<-predict(f23)
grades$predf23 #For f23 model
## error values of final through the models are below. These values are giving us the residuals of our model
grades$errft3<-residuals(ft3)
grades$errft3 # For Model ft3
grades$errf23<-residuals(f23)
grades$errf23 # For model f23
# Adding observation no.s against each row in the data set grades
grades$obsno<-c(1:105)
grades$obsno
View(grades)
#For model ft3,inserting the predicted values in the grades dataset by column creation
predft3<-predict(ft3)
grades$predft3<-predft3
grades$predft3
#For Model f23,inserting the predicted values in the grades dataset by column creation
predf23<-predict(f23)
grades$predf23<-predf23
grades$predf23
###Checking for Assumptions Test for ft3 & f23 Models
hist(grades$errft3,main = "Normality check for ft3 model", xlab="Residuals",col="orange")
hist(grades$errf23,main = "Normality check for f23 model", xlab="Residuals",col="yellow")
####2. Independent of observations
plot(grades$obsno,grades$errft3,col="red",main="Independence of error for ft3",xlab= " obsv no", ylab="residuals")
plot(grades$obsno,grades$errf23,col="brown",main="Independence of error for f23",xlab= " obsv no", ylab="residuals")
####3 Check of linear relationship
plot(grades$total,grades$final,main="Linear Rltnship for ft3",xlab="total",ylab="Final",col="red")
plot(grades$quiz3,grades$final,main="Linear Rltnship for ft3",xlab="quiz3",ylab="Final",col="red")
plot(grades$quiz2,grades$final,main="Linear Rltnship for f23",xlab="quiz2",ylab="Final",col="brown")
plot(grades$quiz3,grades$final,main="Linear Rltnship for f23",xlab="quiz3",ylab="Final",col="brown")
####4 Check of Constant Error Variance : Homoscedacity
plot(grades$predft3,grades$errft3,col="red",main="Constant error variance ft3",xlab="Predited",ylab="errors",abline(h=0))
plot(grades$predf23,grades$errf23,col="brown",main="Constant error variance f23",xlab="Predited",ylab="errors",abline(h=0))
# for finding the confidence intervals & the predited values for ft3 & f23 models
confint(ft3)
fitted(ft3)
fitted(f23)
confint(f23)
predict(f23, interval="confidence") #same as fitted command
#thanks a lot for reading
## happy learning
#Mohit Arora
|
# BuildMap file: write function that returns a map
# This function requires plotly
library(plotly)
# BuildMap function: fill this in with a function that returns a map:
# Try parameterize a few options, such as the title
# I suggest: https://plot.ly/r/bubble-maps/
BuildMap <- function(data) {
g <- list(
scope = 'usa',
projection = list(type = 'albers usa'),
showland = TRUE,
landcolor = toRGB("gray85"),
subunitwidth = 1,
countrywidth = 1,
subunitcolor = toRGB("white"),
countrycolor = toRGB("white")
)
p <- plot_geo(df, locationmode = 'USA-states', sizes = c(1, 250)) %>%
add_markers(
x = ~lon, y = ~lat, size = ~pop, color = ~q, hoverinfo = "text",
text = ~paste(df$name, "<br />", df$pop/1e6, " million")
) %>%
layout(title = '2014 US city populations<br>(Click legend to toggle)', geo = g)
return (p)
}
|
/exercise-1/scripts/buildMap.R
|
permissive
|
DaveL33/m14-shiny
|
R
| false | false | 889 |
r
|
# BuildMap file: write function that returns a map
# This function requires plotly
library(plotly)
# BuildMap function: fill this in with a function that returns a map:
# Try parameterize a few options, such as the title
# I suggest: https://plot.ly/r/bubble-maps/
BuildMap <- function(data) {
g <- list(
scope = 'usa',
projection = list(type = 'albers usa'),
showland = TRUE,
landcolor = toRGB("gray85"),
subunitwidth = 1,
countrywidth = 1,
subunitcolor = toRGB("white"),
countrycolor = toRGB("white")
)
p <- plot_geo(df, locationmode = 'USA-states', sizes = c(1, 250)) %>%
add_markers(
x = ~lon, y = ~lat, size = ~pop, color = ~q, hoverinfo = "text",
text = ~paste(df$name, "<br />", df$pop/1e6, " million")
) %>%
layout(title = '2014 US city populations<br>(Click legend to toggle)', geo = g)
return (p)
}
|
\name{tune_wsvm}
\alias{tune_wsvm}
\alias{best.tune_wsvm}
\alias{print.tune_wsvm}
\alias{summary.tune_wsvm}
\alias{print.summary.tune_wsvm}
\title{Parameter Tuning of Functions Using Grid Search}
\description{
This generic function tunes hyperparameters of statistical methods
using a grid search over supplied parameter ranges.
}
\usage{
tune_wsvm(train.x, train.y = NULL, weight, use_zero_weight = FALSE,
pre.check = TRUE, data = list(), validation.x = NULL,
validation.y = NULL, validation.weight = NULL,
weigthed.error = TRUE, ranges = NULL, predict.func = predict,
tunecontrol = tune.control(), ...)
best.tune_wsvm(...)
}
\arguments{
\item{train.x}{either a formula or a '\emph{design} matrix' of predictors.}
\item{train.y}{the response variable if \code{train.x} is a predictor
matrix. Ignored if \code{train.x} is a formula.}
\item{weight}{the weight of each subject. It should be in the same length of \code{train.y}.}
\item{use_zero_weight}{if \code{FALSE}, any subjects in the training data and the validation data (if exist) with zero (or negative) weights will be removed.}
\item{pre.check}{if \code{TRUE}, we prefit the model with partitioned training data using the first set of parameters in \code{range}. If fails (i.e., too many zero weight subjects in the partitioned training data), we re-partition the data and re-try the model for up to 10 times. This is useful when \code{use_zero_weight=TRUE} and there many zero weights subjects in the data. }
\item{data}{data, if a formula interface is used. Ignored, if
predictor matrix and response are supplied directly.}
\item{validation.x}{an optional validation set. Depending on whether a
formula interface is used or not, the response can be
included in \code{validation.x} or separately specified using
\code{validation.y}. Only used for bootstrap and fixed validation
set (see \code{\link{tune.control}})}
\item{validation.y}{if no formula interface is used, the response of
the (optional) validation set. Only used for bootstrap and fixed validation
set (see \code{\link{tune.control}})}
\item{validation.weight}{the weight of each subject in the validation set. Will be set to 1, if the user does not provide.}
\item{weigthed.error}{if \code{TRUE}, the preformance measure will be weighted.}
\item{ranges}{a named list of parameter vectors spanning the sampling
space. See \code{\link{wsvm}}. The vectors will usually be created by \code{seq}.}
\item{predict.func}{optional predict function, if the standard \code{predict}
behavior is inadequate.}
\item{tunecontrol}{object of class \code{"tune.control"}, as created by the
function \code{tune.control()}. In additon, \code{tune.control$error.fun} should be a
function that takes three arguments: (true y, predicted y, weight). If omitted, \code{tune.control()}
gives the defaults.}
\item{\dots}{Further parameters passed to the training functions.}
}
\value{
For \code{tune_wsvm}, an object of class \code{tune_wsvm}, including the components:
\item{best.parameters}{a 1 x k data frame, k number of parameters.}
\item{best.performance}{best achieved performance.}
\item{performances}{if requested, a data frame of all parameter
combinations along with the corresponding performance results.}
\item{train.ind}{list of index vectors used for splits into
training and validation sets.}
\item{best.model}{if requested, the model trained on the complete training data
using the best parameter combination.}
\code{best.tune_wsvm()} returns the best model detected by \code{tune_wsvm}.
}
\details{
As performance measure, the classification error is used
for classification, and the mean squared error for regression. It is
possible to specify only one parameter combination (i.e., vectors of
length 1) to obtain an error estimation of the specified type
(bootstrap, cross-classification, etc.) on the given data set.
Cross-validation randomizes the data set before building the splits
which---once created---remain constant during the training
process. The splits can be recovered through the \code{train.ind}
component of the returned object.
}
\author{
David Meyer \cr
Modified by Tianchen Xu \email{tx2155@columbia.edu}
}
\seealso{\code{\link{tune.control}}, \code{\link{plot.tune_wsvm}}}
\examples{
data(iris)
obj <- tune_wsvm(Species~., weight = c(rep(0.8, 50),rep(1,100)),
data = iris, ranges = list(gamma = 2^(-1:1), cost = 2^(2:4)),
tunecontrol = tune.control(sampling = "fix"))
\donttest{
set.seed(11)
obj <- tune_wsvm(Species~., weight = c(rep(1, 52),rep(0,98)),
data = iris, use_zero_weight = TRUE,
ranges = list(gamma = 2^(-1:1), cost = 2^(2:4)),
tunecontrol = tune.control(sampling = "bootstrap"))
}
summary(obj)
plot(obj, transform.x = log2, transform.y = log2)
plot(obj, type = "perspective", theta = 120, phi = 45)
best.tune_wsvm(Species~.,weight = c(rep(0.08, 50),rep(1,100)),
data = iris, ranges = list(gamma = 2^(-1:1), cost = 2^(2:4)),
tunecontrol = tune.control(sampling = "fix"))
}
\keyword{models}
|
/man/tune_wsvm.Rd
|
no_license
|
minghao2016/wsvm
|
R
| false | false | 5,182 |
rd
|
\name{tune_wsvm}
\alias{tune_wsvm}
\alias{best.tune_wsvm}
\alias{print.tune_wsvm}
\alias{summary.tune_wsvm}
\alias{print.summary.tune_wsvm}
\title{Parameter Tuning of Functions Using Grid Search}
\description{
This generic function tunes hyperparameters of statistical methods
using a grid search over supplied parameter ranges.
}
\usage{
tune_wsvm(train.x, train.y = NULL, weight, use_zero_weight = FALSE,
pre.check = TRUE, data = list(), validation.x = NULL,
validation.y = NULL, validation.weight = NULL,
weigthed.error = TRUE, ranges = NULL, predict.func = predict,
tunecontrol = tune.control(), ...)
best.tune_wsvm(...)
}
\arguments{
\item{train.x}{either a formula or a '\emph{design} matrix' of predictors.}
\item{train.y}{the response variable if \code{train.x} is a predictor
matrix. Ignored if \code{train.x} is a formula.}
\item{weight}{the weight of each subject. It should be in the same length of \code{train.y}.}
\item{use_zero_weight}{if \code{FALSE}, any subjects in the training data and the validation data (if exist) with zero (or negative) weights will be removed.}
\item{pre.check}{if \code{TRUE}, we prefit the model with partitioned training data using the first set of parameters in \code{range}. If fails (i.e., too many zero weight subjects in the partitioned training data), we re-partition the data and re-try the model for up to 10 times. This is useful when \code{use_zero_weight=TRUE} and there many zero weights subjects in the data. }
\item{data}{data, if a formula interface is used. Ignored, if
predictor matrix and response are supplied directly.}
\item{validation.x}{an optional validation set. Depending on whether a
formula interface is used or not, the response can be
included in \code{validation.x} or separately specified using
\code{validation.y}. Only used for bootstrap and fixed validation
set (see \code{\link{tune.control}})}
\item{validation.y}{if no formula interface is used, the response of
the (optional) validation set. Only used for bootstrap and fixed validation
set (see \code{\link{tune.control}})}
\item{validation.weight}{the weight of each subject in the validation set. Will be set to 1, if the user does not provide.}
\item{weigthed.error}{if \code{TRUE}, the preformance measure will be weighted.}
\item{ranges}{a named list of parameter vectors spanning the sampling
space. See \code{\link{wsvm}}. The vectors will usually be created by \code{seq}.}
\item{predict.func}{optional predict function, if the standard \code{predict}
behavior is inadequate.}
\item{tunecontrol}{object of class \code{"tune.control"}, as created by the
function \code{tune.control()}. In additon, \code{tune.control$error.fun} should be a
function that takes three arguments: (true y, predicted y, weight). If omitted, \code{tune.control()}
gives the defaults.}
\item{\dots}{Further parameters passed to the training functions.}
}
\value{
For \code{tune_wsvm}, an object of class \code{tune_wsvm}, including the components:
\item{best.parameters}{a 1 x k data frame, k number of parameters.}
\item{best.performance}{best achieved performance.}
\item{performances}{if requested, a data frame of all parameter
combinations along with the corresponding performance results.}
\item{train.ind}{list of index vectors used for splits into
training and validation sets.}
\item{best.model}{if requested, the model trained on the complete training data
using the best parameter combination.}
\code{best.tune_wsvm()} returns the best model detected by \code{tune_wsvm}.
}
\details{
As performance measure, the classification error is used
for classification, and the mean squared error for regression. It is
possible to specify only one parameter combination (i.e., vectors of
length 1) to obtain an error estimation of the specified type
(bootstrap, cross-classification, etc.) on the given data set.
Cross-validation randomizes the data set before building the splits
which---once created---remain constant during the training
process. The splits can be recovered through the \code{train.ind}
component of the returned object.
}
\author{
David Meyer \cr
Modified by Tianchen Xu \email{tx2155@columbia.edu}
}
\seealso{\code{\link{tune.control}}, \code{\link{plot.tune_wsvm}}}
\examples{
data(iris)
obj <- tune_wsvm(Species~., weight = c(rep(0.8, 50),rep(1,100)),
data = iris, ranges = list(gamma = 2^(-1:1), cost = 2^(2:4)),
tunecontrol = tune.control(sampling = "fix"))
\donttest{
set.seed(11)
obj <- tune_wsvm(Species~., weight = c(rep(1, 52),rep(0,98)),
data = iris, use_zero_weight = TRUE,
ranges = list(gamma = 2^(-1:1), cost = 2^(2:4)),
tunecontrol = tune.control(sampling = "bootstrap"))
}
summary(obj)
plot(obj, transform.x = log2, transform.y = log2)
plot(obj, type = "perspective", theta = 120, phi = 45)
best.tune_wsvm(Species~.,weight = c(rep(0.08, 50),rep(1,100)),
data = iris, ranges = list(gamma = 2^(-1:1), cost = 2^(2:4)),
tunecontrol = tune.control(sampling = "fix"))
}
\keyword{models}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/param_threshold.R
\name{threshold}
\alias{threshold}
\title{General thresholding parameter}
\usage{
threshold(range = c(0, 1), trans = NULL)
}
\arguments{
\item{range}{A two-element vector holding the \emph{defaults} for the smallest and
largest possible values, respectively.}
\item{trans}{A \code{trans} object from the \code{scales} package, such as
\code{scales::log10_trans()} or \code{scales::reciprocal_trans()}. If not provided,
the default is used which matches the units used in \code{range}. If no
transformation, \code{NULL}.}
}
\description{
In a number of cases, there are arguments that are threshold values for
data falling between zero and one. For example, \code{recipes::step_other()} and
so on.
}
\examples{
threshold()
}
|
/man/threshold.Rd
|
no_license
|
thecodemasterk/dials
|
R
| false | true | 821 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/param_threshold.R
\name{threshold}
\alias{threshold}
\title{General thresholding parameter}
\usage{
threshold(range = c(0, 1), trans = NULL)
}
\arguments{
\item{range}{A two-element vector holding the \emph{defaults} for the smallest and
largest possible values, respectively.}
\item{trans}{A \code{trans} object from the \code{scales} package, such as
\code{scales::log10_trans()} or \code{scales::reciprocal_trans()}. If not provided,
the default is used which matches the units used in \code{range}. If no
transformation, \code{NULL}.}
}
\description{
In a number of cases, there are arguments that are threshold values for
data falling between zero and one. For example, \code{recipes::step_other()} and
so on.
}
\examples{
threshold()
}
|
getwd()
setwd("C:/home/pranjali")
getwd()
library(datasets)
wine <- read.csv("WINE.csv")
#Shuffle the data with set.seed
set.seed(5)
dt.Shuffled <- wine[sample(nrow(wine)),]
# Split the data into training and testing
idx <- round(0.8 * nrow(dt.Shuffled))
wine.tra <-SF.asDecisionTable(dt.Shuffled[1:idx,],decision.attr = 12, indx.nominal = 12)
wine.tst <- SF.asDecisionTable(dt.Shuffled[(idx+1):nrow(dt.Shuffled), -ncol(dt.Shuffled)])
# DISCRETIZATION
cut.values <- D.discretization.RST(wine.tra,type.method = "global.discernibility")
d.tra <- SF.applyDecTable(wine.tra, cut.values)
d.tst <- SF.applyDecTable(wine.tst, cut.values)
data(RoughSetData)
decision.table <- RoughSetData$wine.dt
## generate single superreduct
res.2 <- FS.feature.subset.computation(decision.table,method = "quickreduct.frst")
## generate new decision table
new.decTable <- SF.applyDecTable(decision.table, res.2)
# INSTANCE SELECTION
indx <- IS.FRIS.FRST(new.decTable,control = list(threshold.tau = 0.2, alpha = 1))
wine.tra.is <- SF.applyDecTable(new.decTable, indx)
# RULE INDUCTION (Rule-based classifiers)
control.ri <- list(type.aggregation = c("t.tnorm", "lukasiewicz"),type.relation = c("tolerance", "eq.3"),t.implicator = "kleene_dienes")
decRules.hybrid <- RI.hybridFS.FRST(wine.tra.is,control.ri)
# predicting newdata
predValues.hybrid <- predict(decRules.hybrid,new.decTable)
X.laplace(decRules.hybrid)
|
/Papers/FRST_new.R
|
no_license
|
Sandy4321/A-fuzzy-rough-set-based-feature-selection
|
R
| false | false | 1,404 |
r
|
getwd()
setwd("C:/home/pranjali")
getwd()
library(datasets)
wine <- read.csv("WINE.csv")
#Shuffle the data with set.seed
set.seed(5)
dt.Shuffled <- wine[sample(nrow(wine)),]
# Split the data into training and testing
idx <- round(0.8 * nrow(dt.Shuffled))
wine.tra <-SF.asDecisionTable(dt.Shuffled[1:idx,],decision.attr = 12, indx.nominal = 12)
wine.tst <- SF.asDecisionTable(dt.Shuffled[(idx+1):nrow(dt.Shuffled), -ncol(dt.Shuffled)])
# DISCRETIZATION
cut.values <- D.discretization.RST(wine.tra,type.method = "global.discernibility")
d.tra <- SF.applyDecTable(wine.tra, cut.values)
d.tst <- SF.applyDecTable(wine.tst, cut.values)
data(RoughSetData)
decision.table <- RoughSetData$wine.dt
## generate single superreduct
res.2 <- FS.feature.subset.computation(decision.table,method = "quickreduct.frst")
## generate new decision table
new.decTable <- SF.applyDecTable(decision.table, res.2)
# INSTANCE SELECTION
indx <- IS.FRIS.FRST(new.decTable,control = list(threshold.tau = 0.2, alpha = 1))
wine.tra.is <- SF.applyDecTable(new.decTable, indx)
# RULE INDUCTION (Rule-based classifiers)
control.ri <- list(type.aggregation = c("t.tnorm", "lukasiewicz"),type.relation = c("tolerance", "eq.3"),t.implicator = "kleene_dienes")
decRules.hybrid <- RI.hybridFS.FRST(wine.tra.is,control.ri)
# predicting newdata
predValues.hybrid <- predict(decRules.hybrid,new.decTable)
X.laplace(decRules.hybrid)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{culture}
\alias{culture}
\title{Measuring Aggression}
\format{
A data frame with 12 rows and 3 variables:
\describe{
\item{Region}{Region}
\item{Condition}{Condition}
\item{Testosterone}{Testosterone}
}
}
\usage{
culture
}
\description{
A dataset examining a test of the effects of region and insult on aggresion
}
\keyword{datasets}
|
/man/culture.Rd
|
no_license
|
anhnguyendepocen/IntroStatsTutorials
|
R
| false | true | 440 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{culture}
\alias{culture}
\title{Measuring Aggression}
\format{
A data frame with 12 rows and 3 variables:
\describe{
\item{Region}{Region}
\item{Condition}{Condition}
\item{Testosterone}{Testosterone}
}
}
\usage{
culture
}
\description{
A dataset examining a test of the effects of region and insult on aggresion
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TargetPositionGetDTO.r
\docType{data}
\name{TargetPositionGetDTO}
\alias{TargetPositionGetDTO}
\title{TargetPositionGetDTO Class}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
TargetPositionGetDTO
}
\description{
TargetPositionGetDTO Class
}
\section{Fields}{
\describe{
\item{\code{target}}{}
\item{\code{position}}{}
}}
\keyword{datasets}
|
/man/TargetPositionGetDTO.Rd
|
no_license
|
OpenSILEX/opensilexClientToolsR
|
R
| false | true | 450 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TargetPositionGetDTO.r
\docType{data}
\name{TargetPositionGetDTO}
\alias{TargetPositionGetDTO}
\title{TargetPositionGetDTO Class}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
TargetPositionGetDTO
}
\description{
TargetPositionGetDTO Class
}
\section{Fields}{
\describe{
\item{\code{target}}{}
\item{\code{position}}{}
}}
\keyword{datasets}
|
#' put all the power grids into a single network
#'
#' This is sugar to clean up the code and make a single powergrid with shared attributes from all the power grids
#'
#' @export
create_union_of_power_grids <- function(){
plot_list <- list.files(file.path(embeddings_path, "PL"))[c(2,3,5,1,4,6)] %>%
map(~{
target_graph <- .x # "IEEE_118_igraph"
plot_title <- .x %>% str_remove(., "_igraph") %>% str_replace_all(., "_", " ")
embeddings_data <- read_rds(file.path(embeddings_path, "PL",target_graph, "ec_5.rds"))
g <- read_rds(file.path(power_grid_graphs_path, paste0(target_graph, ".rds")))
nodes_df <- as_data_frame(g, what = "vertices") %>%
mutate(type = case_when(
net_generation>0 ~ "generator",
net_generation<0 ~"demand",
TRUE ~ "transfer"
) ) %>%
left_join(embeddings_data$node_embeddings %>% select(name = node, elevation), by = "name")
print(cor(nodes_df$net_generation, nodes_df$elevation))
edges_df <- as_data_frame(g) %>%
mutate(edge_name = paste(from, to, sep = "-")) %>%
left_join(embeddings_data$edge_embeddings %>% select(edge_name, tension, strain), by = "edge_name")
g <- graph_from_data_frame(edges_df, directed = FALSE, vertices = nodes_df)
g <- g %>%
set.vertex.attribute(., "name", value = (paste0(vcount(g), "_",get.vertex.attribute(g, "name")))) %>%
set.vertex.attribute(., "type", value = plot_title) %>%
set.vertex.attribute(., "Name", value = "A") #Name was causing problems and is irrelevant for this plot
igraph::as_data_frame(g, what = "both")
}
) %>%
#the list is now two elements long and made up of a edge and vertex part
transpose() %>%
#join the elements from each part of the list into dataframes
map(~bind_rows(.x)) %>%
{graph_from_data_frame(d = .$edges, directed = FALSE, vertices = .$vertices)}
kappa_modifier <- as_data_frame(plot_list, what = "vertices") %>%
group_by(type) %>%
mutate(elevation = kappa(elevation)) %>%
ungroup
g <- graph_from_data_frame(as_data_frame(plot_list), directed = FALSE, vertices = kappa_modifier )
return(g)
}
|
/RobustnessInternal/R/create_union_of_power_grids.R
|
permissive
|
JonnoB/setse_and_network_robustness
|
R
| false | false | 2,285 |
r
|
#' put all the power grids into a single network
#'
#' This is sugar to clean up the code and make a single powergrid with shared attributes from all the power grids
#'
#' @export
create_union_of_power_grids <- function(){
plot_list <- list.files(file.path(embeddings_path, "PL"))[c(2,3,5,1,4,6)] %>%
map(~{
target_graph <- .x # "IEEE_118_igraph"
plot_title <- .x %>% str_remove(., "_igraph") %>% str_replace_all(., "_", " ")
embeddings_data <- read_rds(file.path(embeddings_path, "PL",target_graph, "ec_5.rds"))
g <- read_rds(file.path(power_grid_graphs_path, paste0(target_graph, ".rds")))
nodes_df <- as_data_frame(g, what = "vertices") %>%
mutate(type = case_when(
net_generation>0 ~ "generator",
net_generation<0 ~"demand",
TRUE ~ "transfer"
) ) %>%
left_join(embeddings_data$node_embeddings %>% select(name = node, elevation), by = "name")
print(cor(nodes_df$net_generation, nodes_df$elevation))
edges_df <- as_data_frame(g) %>%
mutate(edge_name = paste(from, to, sep = "-")) %>%
left_join(embeddings_data$edge_embeddings %>% select(edge_name, tension, strain), by = "edge_name")
g <- graph_from_data_frame(edges_df, directed = FALSE, vertices = nodes_df)
g <- g %>%
set.vertex.attribute(., "name", value = (paste0(vcount(g), "_",get.vertex.attribute(g, "name")))) %>%
set.vertex.attribute(., "type", value = plot_title) %>%
set.vertex.attribute(., "Name", value = "A") #Name was causing problems and is irrelevant for this plot
igraph::as_data_frame(g, what = "both")
}
) %>%
#the list is now two elements long and made up of a edge and vertex part
transpose() %>%
#join the elements from each part of the list into dataframes
map(~bind_rows(.x)) %>%
{graph_from_data_frame(d = .$edges, directed = FALSE, vertices = .$vertices)}
kappa_modifier <- as_data_frame(plot_list, what = "vertices") %>%
group_by(type) %>%
mutate(elevation = kappa(elevation)) %>%
ungroup
g <- graph_from_data_frame(as_data_frame(plot_list), directed = FALSE, vertices = kappa_modifier )
return(g)
}
|
library("devtools")
res <- revdep_check(threads = 4)
revdep_check_save_summary()
revdep_check_print_problems()
revdep_email(date = "April 20", only_problems = FALSE, draft = TRUE)
# pkgs <- list(
# list(your_package = "downscale", your_version = "1.2-4", email = "charliem2003@gmail.com"),
# list(your_package = "plotKML", your_version = "0.5-6", email = "tom.hengl@isric.org"),
# list(your_package = "speciesgeocodeR", your_version = "1.0-4", email = "alexander.zizka@bioenv.gu.se"),
# list(your_package = "rCAT", your_version = "0.1.5", email = "J.Moat@kew.org")
# )
# date = "April 18"
#
# str <- paste0(readLines("revdep/email.md"), collapse = "\n")
# lapply(pkgs, function(x)
# whisker::whisker.render(str, data = x)
# )
|
/revdep/check.R
|
permissive
|
HAdebisi1/gistr
|
R
| false | false | 738 |
r
|
library("devtools")
res <- revdep_check(threads = 4)
revdep_check_save_summary()
revdep_check_print_problems()
revdep_email(date = "April 20", only_problems = FALSE, draft = TRUE)
# pkgs <- list(
# list(your_package = "downscale", your_version = "1.2-4", email = "charliem2003@gmail.com"),
# list(your_package = "plotKML", your_version = "0.5-6", email = "tom.hengl@isric.org"),
# list(your_package = "speciesgeocodeR", your_version = "1.0-4", email = "alexander.zizka@bioenv.gu.se"),
# list(your_package = "rCAT", your_version = "0.1.5", email = "J.Moat@kew.org")
# )
# date = "April 18"
#
# str <- paste0(readLines("revdep/email.md"), collapse = "\n")
# lapply(pkgs, function(x)
# whisker::whisker.render(str, data = x)
# )
|
compute.prc <- function(q.adj,t.adj)
{
q.nzi <- which(q.adj!=0)
q.nz <- q.adj[q.nzi]
t.nz <- t.adj[q.nzi]
q.rnk <- t.nz[sort.list(q.nz,decreasing=TRUE)]
n <- length(q.rnk)
pr <- matrix(0,ncol=2,nrow=1)
if (length(q.rnk)>0)
{
pr <- matrix(0,ncol=2,nrow=n)
tp <- 0
p <- sum(t.adj)
for (i in 1:n)
{
tp <- tp + q.rnk[i]
pr[i,2] <- tp/i
pr[i,1] <- tp/p
}
}
pr
}
compute.auc.pr <- function(q.adj,t.adj)
{
auc <- 0
pr <- compute.prc(q.adj,t.adj)
lastr <- 0;
lastp <- 1;
for (i in 1:dim(pr)[1])
{
if (pr[i,1]>lastr)
{
auc <- auc + min(pr[i,2],lastp) * (pr[i,1] - lastr)
lastr <- pr[i,1]
}
lastp <- pr[i,2]
}
if (lastr < 1)
{
guess.remain <- length(t.adj) - dim(pr)[1]
t.remain <- sum(t.adj) * (1-lastr)
auc <- auc + (t.remain / guess.remain) * (1-lastr)
}
auc
}
Pause <- function () {
cat("Hit <enter> to continue...")
readline()
invisible()
}
|
/scripts/util.r
|
no_license
|
yiming-kang/TF_Network_Evaluation
|
R
| false | false | 915 |
r
|
compute.prc <- function(q.adj,t.adj)
{
q.nzi <- which(q.adj!=0)
q.nz <- q.adj[q.nzi]
t.nz <- t.adj[q.nzi]
q.rnk <- t.nz[sort.list(q.nz,decreasing=TRUE)]
n <- length(q.rnk)
pr <- matrix(0,ncol=2,nrow=1)
if (length(q.rnk)>0)
{
pr <- matrix(0,ncol=2,nrow=n)
tp <- 0
p <- sum(t.adj)
for (i in 1:n)
{
tp <- tp + q.rnk[i]
pr[i,2] <- tp/i
pr[i,1] <- tp/p
}
}
pr
}
compute.auc.pr <- function(q.adj,t.adj)
{
auc <- 0
pr <- compute.prc(q.adj,t.adj)
lastr <- 0;
lastp <- 1;
for (i in 1:dim(pr)[1])
{
if (pr[i,1]>lastr)
{
auc <- auc + min(pr[i,2],lastp) * (pr[i,1] - lastr)
lastr <- pr[i,1]
}
lastp <- pr[i,2]
}
if (lastr < 1)
{
guess.remain <- length(t.adj) - dim(pr)[1]
t.remain <- sum(t.adj) * (1-lastr)
auc <- auc + (t.remain / guess.remain) * (1-lastr)
}
auc
}
Pause <- function () {
cat("Hit <enter> to continue...")
readline()
invisible()
}
|
file<-"C:/Users/natasa/Documents/projectAmadeus/data/citibike/processed/cbOct13rbppModelDur.rdata"
load(file=file)
file<-"C:/Users/natasa/Documents/projectAmadeus/data/fits/cbOct13rbppmodelDur.rdata"
load(file=file)
file<-"C:/Users/natasa/Documents/projectAmadeus/data/citibike/processed/t01Oct13cb2ModelDur.rdata"
load(file=file)
file<-"C:/Users/natasa/Documents/projectAmadeus/data/fits/t01Oct13cb2modelDur.rdata"
load(file)
cbDur<-cb_svmPred[,6]
tDur<-tcb_svmPred[,5]
r<-(tDur-cbDur)/tDur
cbfaster<-cbDur-tDur<0
dist<-cbOct13rbppModelDurTest$distance
hour<-cbOct13rbppModelDurTest$startHour
h<-floor(hour)
distf<-cut(dist, c(seq(0,3000,500),max(dist)))
df<-data.frame(cbDur=cbDur,tDur=tDur,r=r,cbfaster=cbfaster,
dist=dist,hour=hour, h=h,distf=distf)
plot(cbDur,tDur,col="black",type="p",pch='.',xlim=c(0,2000),ylim=c(0,2000))
abline(a=0,b=1,col="red")
plot(cbDur,tDur,col="black",type="p",pch='.',xlim=c(0,1000),ylim=c(0,1200))
abline(a=0,b=1,col="red")
plot(tDur,r,col="black",type="p",pch='.')
abline(h=0,col="red")
plot(cbDur,r,col="black",type="p",pch='.')
abline(h=0,col="red")
plot(dist,r,col="black",type="p",pch='.')
abline(h=0,col="red")
plot(hour,r,col="black",type="p",pch='.')
abline(h=0,col="red")
plot(h,r,col="black",type="p",pch='.')
abline(h=0,col="red")
distf
levels(distf)
levels(distf)<-c("1","2","3","4","5","6","7")
library(ggplot2)
qplot(hour, r, data = df, facets = distf ~ . , geom =
c("point", "smooth"))
ggplot(df, aes(hour,r)) + geom_point(size=0.001) + facet_wrap(~distf,ncol=4) +
geom_smooth(size=0.8,col="red") + geom_hline(yintercept=0)
labs(title="Taxi: Predicted vs. observed values") +
labs(x='Observed trip duration (secs)') +
labs(y='Predicted trip duration (secs)') +
coord_cartesian(xlim = c(0, 3000))
plot(hour,dist,col=r+3.5,type="p")
abline(h=0,col="red")
cbDur09<-cb_svmPred[cbOct13bModelDurTest$distance,6]
tDur09<-tcb_svmPred[,4]
library(lattice)
## Convert 'Month' to a factor variable
#airquality <- transform(airquality, Month = factor(Month))
xyplot(distance ~ startHour | cbfaster, layout = c(1, 2),pch=15,cex=0.1,alpha=0.3)
library(lattice)
xyplot(r ~ hour | distf, layout = c(3, 3),pch=15,cex=0.5)
library(kernlab)
xtrain<-as.matrix(data.frame(dist=dist, hour=hour))
outcome<-cbfaster
svmFitFin<-ksvm(x=xtrain, y=outcome,
kernel="rbfdot",kpar="automatic",C=1,epsilon=0.1)
# too many obs
# regression tree
library(rpart)
rpartTreeFit<-rpart(cbfaster ~ dist + hour)
rpartTreeFit2<-rpart(cbfaster ~ dist + hour, control = rpart.control(cp = 0.005,minsplit=100,maxdepth=30))
rpartTreeFit3<-rpart(cbfaster ~ dist + hour, method="class",
control = rpart.control(cp = 0.001,minsplit=20,maxdepth=30))
xtest<-data.frame(dist=dist, hour=hour)
rpartTreePred<-predict(rpartTreeFit,newdata=xtest)
rpartTreePred2<-predict(rpartTreeFit2,newdata=xtest)
rpartTreePred3<-predict(rpartTreeFit3,newdata=xtest)
# rpartTree<-rpart(trip_distance_m ~ .,data=taxiOct01modelDist2, control = rpart.control(cp = 0.005,minsplit=100,maxdepth=30))
# no good for this
table(cbfaster,rpartTreePred>0.55)/length(cbfaster)
table(cbfaster,rpartTreePred2<0.55)/length(cbfaster)
table(cbfaster,rpartTreePred3[,2]>=0.4)/length(cbfaster)
ind <- rpartTreePred2>0.55
hourdist<-NULL
for (v1 in seq(0,23.5,0.1)){
for (v2 in seq(300,3000,100)) hourdist<-rbind(hourdist,c(v1,v2))
}
disthour<-data.frame(dist=hourdist[,2],hour=hourdist[,1])
colnames(hourdist)<-c("hour","dist")
gridPred<-predict(rpartTreeFit2,newdata=disthour)
ind<-gridPred>0.55
indf<-as.factor(ind)
levels(indf)<-c("faster","slower")
faster<-hourdist[ind,]
slower<-hourdist[!ind,]
head(hourdist)
plot(faster,col='red')
plot(slower,col='blue')
distHourPred<-data.frame(distance=hourdist[,2],hour=hourdist[,1],citibike=indf)
head(distHourPred)
ggplot(distHourPred, aes(hour,distance)) +
geom_point(aes(color=citibike)) + theme_bw() +
labs(title="Which is faster: citibike or taxi?",size=15)
library(randomForest)
rfFit<-randomForest(as.factor(cbfaster) ~ dist + hour,ntree=500)
rfPred<-predict(rfFit,newdata=xtest)
table(cbfaster,rfPred)/length(cbfaster)
ggplot(df, aes(hour,dist)) +
geom_point(aes(color=cbfaster), size = 1, alpha = 1/4) + theme_bw() +
labs(title="Which is faster: citibike or taxi?")
rfGridPred<-predict(rfFit,newdata=disthour)
indf<-as.factor(rfGridPred)
levels(indf)<-c("citibike","taxi")
distHourPred<-data.frame(distance=hourdist[,2],hour=hourdist[,1],cbfaster=indf)
ggplot(distHourPred, aes(hour,distance)) +
geom_point(aes(color=cbfaster)) +
scale_colour_discrete(name ="Which is faster: citibike or taxi?") +
theme_bw() +
theme(legend.title = element_text(size=16, face="bold")) +
theme(legend.text = element_text(size = 16)) +
guides(shape=guide_legend(override.aes=list(size=10,alpha=1)))
|
/code/final/t_cb_compare_2.R
|
no_license
|
natasasdj/DataScienceTestAmadeus
|
R
| false | false | 4,923 |
r
|
file<-"C:/Users/natasa/Documents/projectAmadeus/data/citibike/processed/cbOct13rbppModelDur.rdata"
load(file=file)
file<-"C:/Users/natasa/Documents/projectAmadeus/data/fits/cbOct13rbppmodelDur.rdata"
load(file=file)
file<-"C:/Users/natasa/Documents/projectAmadeus/data/citibike/processed/t01Oct13cb2ModelDur.rdata"
load(file=file)
file<-"C:/Users/natasa/Documents/projectAmadeus/data/fits/t01Oct13cb2modelDur.rdata"
load(file)
cbDur<-cb_svmPred[,6]
tDur<-tcb_svmPred[,5]
r<-(tDur-cbDur)/tDur
cbfaster<-cbDur-tDur<0
dist<-cbOct13rbppModelDurTest$distance
hour<-cbOct13rbppModelDurTest$startHour
h<-floor(hour)
distf<-cut(dist, c(seq(0,3000,500),max(dist)))
df<-data.frame(cbDur=cbDur,tDur=tDur,r=r,cbfaster=cbfaster,
dist=dist,hour=hour, h=h,distf=distf)
plot(cbDur,tDur,col="black",type="p",pch='.',xlim=c(0,2000),ylim=c(0,2000))
abline(a=0,b=1,col="red")
plot(cbDur,tDur,col="black",type="p",pch='.',xlim=c(0,1000),ylim=c(0,1200))
abline(a=0,b=1,col="red")
plot(tDur,r,col="black",type="p",pch='.')
abline(h=0,col="red")
plot(cbDur,r,col="black",type="p",pch='.')
abline(h=0,col="red")
plot(dist,r,col="black",type="p",pch='.')
abline(h=0,col="red")
plot(hour,r,col="black",type="p",pch='.')
abline(h=0,col="red")
plot(h,r,col="black",type="p",pch='.')
abline(h=0,col="red")
distf
levels(distf)
levels(distf)<-c("1","2","3","4","5","6","7")
library(ggplot2)
qplot(hour, r, data = df, facets = distf ~ . , geom =
c("point", "smooth"))
ggplot(df, aes(hour,r)) + geom_point(size=0.001) + facet_wrap(~distf,ncol=4) +
geom_smooth(size=0.8,col="red") + geom_hline(yintercept=0)
labs(title="Taxi: Predicted vs. observed values") +
labs(x='Observed trip duration (secs)') +
labs(y='Predicted trip duration (secs)') +
coord_cartesian(xlim = c(0, 3000))
plot(hour,dist,col=r+3.5,type="p")
abline(h=0,col="red")
cbDur09<-cb_svmPred[cbOct13bModelDurTest$distance,6]
tDur09<-tcb_svmPred[,4]
library(lattice)
## Convert 'Month' to a factor variable
#airquality <- transform(airquality, Month = factor(Month))
xyplot(distance ~ startHour | cbfaster, layout = c(1, 2),pch=15,cex=0.1,alpha=0.3)
library(lattice)
xyplot(r ~ hour | distf, layout = c(3, 3),pch=15,cex=0.5)
library(kernlab)
xtrain<-as.matrix(data.frame(dist=dist, hour=hour))
outcome<-cbfaster
svmFitFin<-ksvm(x=xtrain, y=outcome,
kernel="rbfdot",kpar="automatic",C=1,epsilon=0.1)
# too many obs
# regression tree
library(rpart)
rpartTreeFit<-rpart(cbfaster ~ dist + hour)
rpartTreeFit2<-rpart(cbfaster ~ dist + hour, control = rpart.control(cp = 0.005,minsplit=100,maxdepth=30))
rpartTreeFit3<-rpart(cbfaster ~ dist + hour, method="class",
control = rpart.control(cp = 0.001,minsplit=20,maxdepth=30))
xtest<-data.frame(dist=dist, hour=hour)
rpartTreePred<-predict(rpartTreeFit,newdata=xtest)
rpartTreePred2<-predict(rpartTreeFit2,newdata=xtest)
rpartTreePred3<-predict(rpartTreeFit3,newdata=xtest)
# rpartTree<-rpart(trip_distance_m ~ .,data=taxiOct01modelDist2, control = rpart.control(cp = 0.005,minsplit=100,maxdepth=30))
# no good for this
table(cbfaster,rpartTreePred>0.55)/length(cbfaster)
table(cbfaster,rpartTreePred2<0.55)/length(cbfaster)
table(cbfaster,rpartTreePred3[,2]>=0.4)/length(cbfaster)
ind <- rpartTreePred2>0.55
hourdist<-NULL
for (v1 in seq(0,23.5,0.1)){
for (v2 in seq(300,3000,100)) hourdist<-rbind(hourdist,c(v1,v2))
}
disthour<-data.frame(dist=hourdist[,2],hour=hourdist[,1])
colnames(hourdist)<-c("hour","dist")
gridPred<-predict(rpartTreeFit2,newdata=disthour)
ind<-gridPred>0.55
indf<-as.factor(ind)
levels(indf)<-c("faster","slower")
faster<-hourdist[ind,]
slower<-hourdist[!ind,]
head(hourdist)
plot(faster,col='red')
plot(slower,col='blue')
distHourPred<-data.frame(distance=hourdist[,2],hour=hourdist[,1],citibike=indf)
head(distHourPred)
ggplot(distHourPred, aes(hour,distance)) +
geom_point(aes(color=citibike)) + theme_bw() +
labs(title="Which is faster: citibike or taxi?",size=15)
library(randomForest)
rfFit<-randomForest(as.factor(cbfaster) ~ dist + hour,ntree=500)
rfPred<-predict(rfFit,newdata=xtest)
table(cbfaster,rfPred)/length(cbfaster)
ggplot(df, aes(hour,dist)) +
geom_point(aes(color=cbfaster), size = 1, alpha = 1/4) + theme_bw() +
labs(title="Which is faster: citibike or taxi?")
rfGridPred<-predict(rfFit,newdata=disthour)
indf<-as.factor(rfGridPred)
levels(indf)<-c("citibike","taxi")
distHourPred<-data.frame(distance=hourdist[,2],hour=hourdist[,1],cbfaster=indf)
ggplot(distHourPred, aes(hour,distance)) +
geom_point(aes(color=cbfaster)) +
scale_colour_discrete(name ="Which is faster: citibike or taxi?") +
theme_bw() +
theme(legend.title = element_text(size=16, face="bold")) +
theme(legend.text = element_text(size = 16)) +
guides(shape=guide_legend(override.aes=list(size=10,alpha=1)))
|
library(magrittr)
dir_name <- 'iris_models/'
fpath <- paste0('output/', dir_name)
dat <- read.csv('data/edit_iris.csv')
if (!dir.exists(fpath)) dir.create(fpath, recursive = TRUE)
models <-
names(dat)[1:4] %>%
purrr::set_names() %>%
purrr::map(function(x) {
fo <- as.formula(paste(x, '~ .'))
lm(fo, data = dat)
})
purrr::iwalk(models, function(mod, nm) {
saveRDS(mod, paste0(fpath, nm, '.rds'))
})
|
/scripts/model.R
|
no_license
|
mir-cat/make_example_r
|
R
| false | false | 413 |
r
|
library(magrittr)
dir_name <- 'iris_models/'
fpath <- paste0('output/', dir_name)
dat <- read.csv('data/edit_iris.csv')
if (!dir.exists(fpath)) dir.create(fpath, recursive = TRUE)
models <-
names(dat)[1:4] %>%
purrr::set_names() %>%
purrr::map(function(x) {
fo <- as.formula(paste(x, '~ .'))
lm(fo, data = dat)
})
purrr::iwalk(models, function(mod, nm) {
saveRDS(mod, paste0(fpath, nm, '.rds'))
})
|
\name{pvs}
\alias{pvs}
\alias{pvs.default}
\alias{pvs.formula}
\alias{print.pvs}
\title{Pairwise variable selection for classification}
\description{
Pairwise variable selection for numerical data, allowing the use of different classifiers and different variable selection methods.
}
\usage{
pvs(x, ...)
\method{pvs}{default}(x, grouping, prior=NULL, method="lda",
vs.method=c("ks.test","stepclass","greedy.wilks"), niveau=0.05,
fold=10, impr=0.1, direct="backward", out=FALSE, ...)
\method{pvs}{formula}(formula, data = NULL, ...)
}
\arguments{
\item{x}{matrix or data frame containing the explanatory variables
(required, if \code{formula} is not given). x must consist of numerical data only. }
\item{formula}{A formula of the form \code{groups ~ x1 + x2 + ...}.
That is, the response is the grouping factor (the classes) and the right hand side
specifies the (numerical) discriminators.
Interaction terms are not supported.}
\item{data}{data matrix (rows=cases, columns=variables)}
\item{grouping}{class indicator vector (a factor)}
\item{prior}{prior probabilites for the classes. If not specified the prior probabilities will be set according to proportion in \dQuote{grouping}. If specified the order of prior
probabilities must be the same as in \dQuote{grouping}. }
\item{method}{character, name of classification function (e.g. \dQuote{\code{\link[MASS]{lda}}} (default)).}
\item{vs.method}{character, name of variable selection method. Must be one of \dQuote{\code{\link[stats]{ks.test}}} (default),
\dQuote{\code{\link[klaR]{stepclass}}} or \dQuote{\code{\link[klaR]{greedy.wilks}}}. }
\item{niveau}{used niveau for \dQuote{\code{\link[stats]{ks.test}}}}
\item{fold}{parameter for cross-validation, if \dQuote{\code{\link[klaR]{stepclass}}} is chosen \sQuote{\code{vs.method}}}
\item{impr}{least improvement of performance measure desired to include or exclude any variable (<=1), if \dQuote{\code{\link[klaR]{stepclass}}} is chosen \sQuote{\code{vs.method}} }
\item{direct}{direction of variable selection, if \dQuote{\code{\link[klaR]{stepclass}}} is chosen \sQuote{\code{vs.method}}.
Must be one if \dQuote{\code{forward}}, \dQuote{\code{backward}} (default) or \dQuote{\code{both}}. }
\item{out}{indicator (logical) for textoutput during computation (slows down computation!), if \dQuote{\code{\link[klaR]{stepclass}}} is chosen \sQuote{\code{vs.method}} }
\item{...}{further parameters passed to classification function (\sQuote{\code{method}}) or variable selection method (\sQuote{\code{vs.method}}) }
}
\details{
The classification \dQuote{method} (e.g. \sQuote{\code{\link[MASS]{lda}}}) must have its own
\sQuote{\code{predict}} method (like \sQuote{\code{\link[MASS]{predict.lda}}} for \sQuote{\code{lda}})
returns a list with an element \sQuote{\code{posterior}} containing the posterior probabilties. It must be able to deal with matrices as in \code{method(x, grouping, ...)}.
Examples of such classification methods are \sQuote{\code{\link[MASS]{lda}}}, \sQuote{\code{\link[MASS]{qda}}}, \sQuote{\code{\link[klaR]{rda}}},
\sQuote{\code{\link[klaR]{NaiveBayes}}} or \sQuote{\code{\link[klaR]{sknn}}}.\\
For the classification methods \dQuote{\code{\link[e1071]{svm}}} and \dQuote{\code{\link[randomForest]{randomForest}}} there are special routines implemented, to make them work with \sQuote{\code{pvs}} method even though their \sQuote{\code{predict}} methods don't provide the demanded posteriors. However those two classfiers can not be used together with variable selection method \dQuote{\code{\link[klaR]{stepclass}}}.
\sQuote{\code{pvs}} performs a variable selection using the selection method chosen in \sQuote{\code{vs.method}} for each pair of classes in \sQuote{\code{x}}.
Then for each pair of classes a submodel using \sQuote{\code{method}} is trained (using only the earlier selected variables for this class-pair).
If \sQuote{\code{method}} is \dQuote{\code{\link[stats]{ks.test}}}, then for each variable the empirical distribution functions of the cases of both classes are compared via \dQuote{\code{\link[stats]{ks.test}}}. Only variables with a p-values below \sQuote{\code{niveau}} are used for training the submodel for this pair of classes.
If \sQuote{\code{method}} is \dQuote{\code{\link[klaR]{stepclass}}} the variable selection is performed using the \dQuote{\code{\link[klaR]{stepclass}}} method.
If \sQuote{\code{method}} is \dQuote{\code{\link[klaR]{greedy.wilks}}} the variable selection is performed using Wilk's lambda criterion.
}
\value{
An object of class \sQuote{\code{pvs}} containing the following components:
\item{classes}{the classes in grouping}
\item{prior}{used prior probabilities}
\item{method}{name of used classification function}
\item{vs.method}{name of used function for variable selection}
\item{submodels}{containing a list of submodels. For each pair of classes there is a list element being another list of 3 containing the class-pair of this submodel, the selected variables
for the subspace of classes and the result of the trained classification function.}
\item{call}{the (matched) function call}
}
\author{Gero Szepannek, \email{szepannek@statistik.tu-dortmund.de}, Christian Neumann}
\references{
\itemize{
\item{}{Szepannek, G. and Weihs, C. (2006) Variable Selection for Classification of More than Two
Classes Where the Data are Sparse. In \emph{From Data and Information Analysis to Kwnowledge Engineering.},
eds Spiliopolou, M., Kruse, R., Borgelt, C., Nuernberger, A. and Gaul, W. pp. 700-708. Springer, Heidelberg.}
\item{}{Szepannek, G. (2008): Different Subspace Classification - Datenanalyse, -interpretation, -visualisierung und
Vorhersage in hochdimensionalen Raeumen, ISBN 978-3-8364-6302-7, vdm, Saarbruecken.}
}
}
\seealso{
\code{\link[klaR]{predict.pvs}} for predicting \sQuote{\code{pvs}} models and \code{\link[klaR]{locpvs}} for pairwisevariable selection in local models of several subclasses
}
\examples{
## Example 1: learn an "lda" model on the waveform data using pairwise variable
## selection (pvs) using "ks.test" and compare it to using lda without pvs
library("mlbench")
trainset <- mlbench.waveform(300)
pvsmodel <- pvs(trainset$x, trainset$classes, niveau=0.05) # default: using method="lda"
## short summary, showing the class-pairs of the submodels and the selected variables
pvsmodel
testset <- mlbench.waveform(500)
## prediction of the test data set:
prediction <- predict(pvsmodel, testset$x)
## calculating the test error rate
1-sum(testset$classes==prediction$class)/length(testset$classes)
## Bayes error is 0.149
## comparison to performance of simple lda
ldamodel <- lda(trainset$x, trainset$classes)
LDAprediction <- predict(ldamodel, testset$x)
## test error rate
1-sum(testset$classes==LDAprediction$class)/length(testset$classes)
## Example 2: learn a "qda" model with pvs on half of the Satellite dataset,
## using "ks.test"
\donttest{
library("mlbench")
data("Satellite")
## takes few seconds as exact KS tests are calculated here:
model <- pvs(classes ~ ., Satellite[1:3218,], method="qda", vs.method="ks.test")
## short summary, showing the class-pairs of the submodels and the selected variables
model
## now predict on the rest of the data set:
## pred <- predict(model,Satellite[3219:6435,]) # takes some time
pred <- predict(model,Satellite[3219:6435,], quick=TRUE) # that's much quicker
## now you can look at the predicted classes:
pred$class
## or the posterior probabilities:
pred$posterior
}
}
\keyword{classif}
\keyword{multivariate}
\concept{Pairwise variable selection for classification}
|
/man/pvs.Rd
|
no_license
|
cran/klaR
|
R
| false | false | 7,873 |
rd
|
\name{pvs}
\alias{pvs}
\alias{pvs.default}
\alias{pvs.formula}
\alias{print.pvs}
\title{Pairwise variable selection for classification}
\description{
Pairwise variable selection for numerical data, allowing the use of different classifiers and different variable selection methods.
}
\usage{
pvs(x, ...)
\method{pvs}{default}(x, grouping, prior=NULL, method="lda",
vs.method=c("ks.test","stepclass","greedy.wilks"), niveau=0.05,
fold=10, impr=0.1, direct="backward", out=FALSE, ...)
\method{pvs}{formula}(formula, data = NULL, ...)
}
\arguments{
\item{x}{matrix or data frame containing the explanatory variables
(required, if \code{formula} is not given). x must consist of numerical data only. }
\item{formula}{A formula of the form \code{groups ~ x1 + x2 + ...}.
That is, the response is the grouping factor (the classes) and the right hand side
specifies the (numerical) discriminators.
Interaction terms are not supported.}
\item{data}{data matrix (rows=cases, columns=variables)}
\item{grouping}{class indicator vector (a factor)}
\item{prior}{prior probabilites for the classes. If not specified the prior probabilities will be set according to proportion in \dQuote{grouping}. If specified the order of prior
probabilities must be the same as in \dQuote{grouping}. }
\item{method}{character, name of classification function (e.g. \dQuote{\code{\link[MASS]{lda}}} (default)).}
\item{vs.method}{character, name of variable selection method. Must be one of \dQuote{\code{\link[stats]{ks.test}}} (default),
\dQuote{\code{\link[klaR]{stepclass}}} or \dQuote{\code{\link[klaR]{greedy.wilks}}}. }
\item{niveau}{used niveau for \dQuote{\code{\link[stats]{ks.test}}}}
\item{fold}{parameter for cross-validation, if \dQuote{\code{\link[klaR]{stepclass}}} is chosen \sQuote{\code{vs.method}}}
\item{impr}{least improvement of performance measure desired to include or exclude any variable (<=1), if \dQuote{\code{\link[klaR]{stepclass}}} is chosen \sQuote{\code{vs.method}} }
\item{direct}{direction of variable selection, if \dQuote{\code{\link[klaR]{stepclass}}} is chosen \sQuote{\code{vs.method}}.
Must be one if \dQuote{\code{forward}}, \dQuote{\code{backward}} (default) or \dQuote{\code{both}}. }
\item{out}{indicator (logical) for textoutput during computation (slows down computation!), if \dQuote{\code{\link[klaR]{stepclass}}} is chosen \sQuote{\code{vs.method}} }
\item{...}{further parameters passed to classification function (\sQuote{\code{method}}) or variable selection method (\sQuote{\code{vs.method}}) }
}
\details{
The classification \dQuote{method} (e.g. \sQuote{\code{\link[MASS]{lda}}}) must have its own
\sQuote{\code{predict}} method (like \sQuote{\code{\link[MASS]{predict.lda}}} for \sQuote{\code{lda}})
returns a list with an element \sQuote{\code{posterior}} containing the posterior probabilties. It must be able to deal with matrices as in \code{method(x, grouping, ...)}.
Examples of such classification methods are \sQuote{\code{\link[MASS]{lda}}}, \sQuote{\code{\link[MASS]{qda}}}, \sQuote{\code{\link[klaR]{rda}}},
\sQuote{\code{\link[klaR]{NaiveBayes}}} or \sQuote{\code{\link[klaR]{sknn}}}.\\
For the classification methods \dQuote{\code{\link[e1071]{svm}}} and \dQuote{\code{\link[randomForest]{randomForest}}} there are special routines implemented, to make them work with \sQuote{\code{pvs}} method even though their \sQuote{\code{predict}} methods don't provide the demanded posteriors. However those two classfiers can not be used together with variable selection method \dQuote{\code{\link[klaR]{stepclass}}}.
\sQuote{\code{pvs}} performs a variable selection using the selection method chosen in \sQuote{\code{vs.method}} for each pair of classes in \sQuote{\code{x}}.
Then for each pair of classes a submodel using \sQuote{\code{method}} is trained (using only the earlier selected variables for this class-pair).
If \sQuote{\code{method}} is \dQuote{\code{\link[stats]{ks.test}}}, then for each variable the empirical distribution functions of the cases of both classes are compared via \dQuote{\code{\link[stats]{ks.test}}}. Only variables with a p-values below \sQuote{\code{niveau}} are used for training the submodel for this pair of classes.
If \sQuote{\code{method}} is \dQuote{\code{\link[klaR]{stepclass}}} the variable selection is performed using the \dQuote{\code{\link[klaR]{stepclass}}} method.
If \sQuote{\code{method}} is \dQuote{\code{\link[klaR]{greedy.wilks}}} the variable selection is performed using Wilk's lambda criterion.
}
\value{
An object of class \sQuote{\code{pvs}} containing the following components:
\item{classes}{the classes in grouping}
\item{prior}{used prior probabilities}
\item{method}{name of used classification function}
\item{vs.method}{name of used function for variable selection}
\item{submodels}{containing a list of submodels. For each pair of classes there is a list element being another list of 3 containing the class-pair of this submodel, the selected variables
for the subspace of classes and the result of the trained classification function.}
\item{call}{the (matched) function call}
}
\author{Gero Szepannek, \email{szepannek@statistik.tu-dortmund.de}, Christian Neumann}
\references{
\itemize{
\item{}{Szepannek, G. and Weihs, C. (2006) Variable Selection for Classification of More than Two
Classes Where the Data are Sparse. In \emph{From Data and Information Analysis to Kwnowledge Engineering.},
eds Spiliopolou, M., Kruse, R., Borgelt, C., Nuernberger, A. and Gaul, W. pp. 700-708. Springer, Heidelberg.}
\item{}{Szepannek, G. (2008): Different Subspace Classification - Datenanalyse, -interpretation, -visualisierung und
Vorhersage in hochdimensionalen Raeumen, ISBN 978-3-8364-6302-7, vdm, Saarbruecken.}
}
}
\seealso{
\code{\link[klaR]{predict.pvs}} for predicting \sQuote{\code{pvs}} models and \code{\link[klaR]{locpvs}} for pairwisevariable selection in local models of several subclasses
}
\examples{
## Example 1: learn an "lda" model on the waveform data using pairwise variable
## selection (pvs) using "ks.test" and compare it to using lda without pvs
library("mlbench")
trainset <- mlbench.waveform(300)
pvsmodel <- pvs(trainset$x, trainset$classes, niveau=0.05) # default: using method="lda"
## short summary, showing the class-pairs of the submodels and the selected variables
pvsmodel
testset <- mlbench.waveform(500)
## prediction of the test data set:
prediction <- predict(pvsmodel, testset$x)
## calculating the test error rate
1-sum(testset$classes==prediction$class)/length(testset$classes)
## Bayes error is 0.149
## comparison to performance of simple lda
ldamodel <- lda(trainset$x, trainset$classes)
LDAprediction <- predict(ldamodel, testset$x)
## test error rate
1-sum(testset$classes==LDAprediction$class)/length(testset$classes)
## Example 2: learn a "qda" model with pvs on half of the Satellite dataset,
## using "ks.test"
\donttest{
library("mlbench")
data("Satellite")
## takes few seconds as exact KS tests are calculated here:
model <- pvs(classes ~ ., Satellite[1:3218,], method="qda", vs.method="ks.test")
## short summary, showing the class-pairs of the submodels and the selected variables
model
## now predict on the rest of the data set:
## pred <- predict(model,Satellite[3219:6435,]) # takes some time
pred <- predict(model,Satellite[3219:6435,], quick=TRUE) # that's much quicker
## now you can look at the predicted classes:
pred$class
## or the posterior probabilities:
pred$posterior
}
}
\keyword{classif}
\keyword{multivariate}
\concept{Pairwise variable selection for classification}
|
# The texreg package was written by Philip Leifeld.
# Please use the forum at http://r-forge.r-project.org/projects/texreg/
# for bug reports, help or feature requests.
# screenreg function
screenreg <- function(l, file = NA, single.row = FALSE,
stars = c(0.001, 0.01, 0.05), custom.model.names = NULL,
custom.coef.names = NULL, custom.gof.names = NULL, custom.note = NULL,
digits = 2, leading.zero = TRUE, symbol = ".", override.coef = 0,
override.se = 0, override.pval = 0, omit.coef = NA, reorder.coef = NULL,
reorder.gof = NULL, return.string = FALSE, ci.force = FALSE,
ci.force.level = 0.95, ci.test = 0, column.spacing = 2, outer.rule = "=",
inner.rule = "-", ...) {
stars <- check.stars(stars)
models <- get.data(l, ...) #extract relevant coefficients, SEs, GOFs, etc.
#models <- override(models, override.coef, override.se, override.pval)
models <- tex.replace(models, type = "screen") #convert TeX code to text code
#models <- ciforce(models, ci.force = ci.force, ci.level = ci.force.level)
gof.names <- get.gof(models) #extract names of GOFs
# arrange coefficients and GOFs nicely in a matrix
gofs <- aggregate.matrix(models, gof.names, custom.gof.names, digits,
returnobject = "gofs")
m <- aggregate.matrix(models, gof.names, custom.gof.names, digits,
returnobject = "m")
decimal.matrix <- aggregate.matrix(models, gof.names, custom.gof.names,
digits, returnobject = "decimal.matrix")
m <- customnames(m, custom.coef.names) #rename coefficients
m <- rearrangeMatrix(m) #resort matrix and conflate duplicate entries
m <- as.data.frame(m)
m <- omitcoef(m, omit.coef) #remove coefficient rows matching regex
modnames <- modelnames(models, custom.model.names) #use (custom) model names
# reorder GOF and coef matrix
m <- reorder(m, reorder.coef)
gofs <- reorder(gofs, reorder.gof)
decimal.matrix <- reorder(decimal.matrix, reorder.gof)
# create output table with significance stars etc.
ci <- logical()
for (i in 1:length(models)) {
if (length(models[[i]]@se) == 0) {
ci[i] <- TRUE
} else {
ci[i] <- FALSE
}
}
output.matrix <- outputmatrix(m, single.row, neginfstring = "-Inf",
leading.zero, digits, se.prefix = " (", se.suffix = ")",
star.prefix = " ", star.suffix = "", star.char = "*", stars,
dcolumn = TRUE, symbol = symbol, bold = 0, bold.prefix = "",
bold.suffix = "", ci = ci, ci.test = ci.test)
#class(output.matrix)
output.matrix <- output.matrix[-seq(2, dim(output.matrix)[1], by=2),]
# create GOF matrix (the lower part of the final output matrix)
gof.matrix <- gofmatrix(gofs, decimal.matrix, dcolumn = TRUE, leading.zero,
digits)
# combine the coefficient and gof matrices vertically
output.matrix <- rbind(output.matrix, gof.matrix)
# reformat output matrix and add spaces
if (ncol(output.matrix) == 2) {
temp <- matrix(format.column(output.matrix[, -1], single.row = single.row,
digits = digits))
} else {
temp <- apply(output.matrix[, -1], 2, format.column,
single.row = single.row, digits = digits)
}
output.matrix <- cbind(output.matrix[, 1], temp)
output.matrix <- rbind(c("", modnames), output.matrix)
for (i in 1:ncol(output.matrix)) {
output.matrix[, i] <- fill.spaces(output.matrix[, i])
}
string <- "\n"
# horizontal rule above the table
table.width <- sum(nchar(output.matrix[1, ])) +
(ncol(output.matrix) - 1) * column.spacing
if (class(outer.rule) != "character") {
stop("outer.rule must be a character.")
} else if (nchar(outer.rule) > 1) {
stop("outer.rule must be a character of maximum length 1.")
} else if (outer.rule == "") {
o.rule <- ""
} else {
o.rule <- paste(rep(outer.rule, table.width), collapse = "")
string <- paste0(string, o.rule, "\n")
}
# specify model names
spacing <- paste(rep(" ", column.spacing), collapse = "")
string <- paste(string, output.matrix[1, 1], sep = "")
for (i in 2:ncol(output.matrix)) {
string <- paste0(string, spacing, output.matrix[1, i])
}
string <- paste0(string, "\n")
# mid rule 1
if (class(inner.rule) != "character") {
stop("inner.rule must be a character.")
} else if (nchar(inner.rule) > 1) {
stop("inner.rule must be a character of maximum length 1.")
} else if (inner.rule == "") {
i.rule <- ""
} else {
i.rule <- paste(rep(inner.rule, table.width), collapse = "")
string <- paste0(string, i.rule, "\n")
}
# write coefficients
for (i in 2:(length(output.matrix[, 1]) - length(gof.names))) {
for (j in 1:length(output.matrix[1, ])) {
string <- paste0(string, output.matrix[i,j])
if (j == length(output.matrix[1, ])) {
string <- paste0(string, "\n")
} else {
string <- paste0(string, spacing)
}
}
}
if (length(gof.names) > 0) {
# mid rule 2
if (inner.rule != "") {
string <- paste0(string, i.rule, "\n")
}
# write GOF part of the output matrix
for (i in (length(output.matrix[, 1]) - (length(gof.names) - 1)):
(length(output.matrix[, 1]))) {
for (j in 1:length(output.matrix[1, ])) {
string <- paste0(string, output.matrix[i, j])
if (j == length(output.matrix[1, ])) {
string <- paste0(string, "\n")
} else {
string <- paste0(string, spacing)
}
}
}
}
# write table footer
if (outer.rule != "") {
string <- paste0(string, o.rule, "\n")
}
# stars note
if (is.null(stars)) {
snote <- ""
} else if (any(ci == FALSE)) {
st <- sort(stars)
if (length(unique(st)) != length(st)) {
stop("Duplicate elements are not allowed in the stars argument.")
}
if (length(st) == 4) {
snote <- paste0("*** p < ", st[1], ", ** p < ", st[2], ", * p < ", st[3],
", ", symbol, " p < ", st[4])
} else if (length(st) == 3) {
snote <- paste0("*** p < ", st[1], ", ** p < ", st[2], ", * p < ", st[3])
} else if (length(st) == 2) {
snote <- paste0("** p < ", st[1], ", * p < ", st[2])
} else if (length(st) == 1) {
snote <- paste0("* p < ", st)
} else {
snote <- ""
}
if (is.numeric(ci.test) && !is.na(ci.test) && nchar(snote) > 0 && any(ci)) {
snote <- paste(snote, "(or", ci.test, "outside the confidence interval).")
} else if (is.numeric(ci.test) && !is.na(ci.test) && any(ci)) {
snote <- paste("*", ci.test, "outside the confidence interval")
}
} else if (is.numeric(ci.test) && !is.na(ci.test)) {
snote <- paste("*", ci.test, "outside the confidence interval")
} else {
snote <- ""
}
if (is.null(custom.note)) {
note <- paste0(snote, "\n\n")
} else if (custom.note == "") {
note <- "\n"
} else {
note <- paste0(custom.note, "\n\n")
note <- gsub("%stars", snote, note)
}
string <- paste0(string, note)
#write to file
if (is.na(file)) {
cat(string)
} else if (!is.character(file)) {
stop("The 'file' argument must be a character string.")
} else {
sink(file)
cat(string)
sink()
cat(paste0("The table was written to the file '", file, "'.\n"))
}
if (return.string == TRUE) {
return(string)
}
}
# texreg function
texreg <- function(l, file = NA, single.row = FALSE,
stars = c(0.001, 0.01, 0.05), custom.model.names = NULL,
custom.coef.names = NULL, custom.gof.names = NULL, custom.note = NULL,
digits = 2, leading.zero = TRUE, symbol = "\\cdot", override.coef = 0,
override.se = 0, override.pval = 0, omit.coef = NA, reorder.coef = NULL,
reorder.gof = NULL, return.string = TRUE, ci.force = FALSE,
ci.force.level = 0.95, ci.test = 0, bold = 0.00, center = TRUE,
caption = "Statistical models", caption.above = TRUE,
label = "table:coefficients", booktabs = FALSE, dcolumn = FALSE,
sideways = FALSE, use.packages = TRUE, table = TRUE, no.margin = TRUE,
scriptsize = FALSE, float.pos = "", ...) {
stars <- check.stars(stars)
#check dcolumn vs. bold
if (dcolumn == TRUE && bold > 0) {
dcolumn <- FALSE
msg <- paste("The dcolumn package and the bold argument cannot be used at",
"the same time. Switching off dcolumn.")
if (stars == TRUE) {
warning(paste(msg, "You should also consider setting stars = FALSE."))
} else {
warning(msg)
}
}
models <- get.data(l, ...) #extract relevant coefficients, SEs, GOFs, etc.
gof.names <- get.gof(models) #extract names of GOFs
models <- override(models, override.coef, override.se, override.pval)
models <- ciforce(models, ci.force = ci.force, ci.level = ci.force.level)
# arrange coefficients and GOFs nicely in a matrix
gofs <- aggregate.matrix(models, gof.names, custom.gof.names, digits,
returnobject = "gofs")
m <- aggregate.matrix(models, gof.names, custom.gof.names, digits,
returnobject = "m")
decimal.matrix <- aggregate.matrix(models, gof.names, custom.gof.names,
digits, returnobject = "decimal.matrix")
m <- customnames(m, custom.coef.names) #rename coefficients
m <- rearrangeMatrix(m) #resort matrix and conflate duplicate entries
m <- as.data.frame(m)
m <- omitcoef(m, omit.coef) #remove coefficient rows matching regex
modnames <- modelnames(models, custom.model.names) #use (custom) model names
# reorder GOF and coef matrix
m <- reorder(m, reorder.coef)
gofs <- reorder(gofs, reorder.gof)
decimal.matrix <- reorder(decimal.matrix, reorder.gof)
# what is the optimal length of the labels?
lab.list <- c(rownames(m), gof.names)
lab.length <- 0
for (i in 1:length(lab.list)) {
if (nchar(lab.list[i]) > lab.length) {
lab.length <- nchar(lab.list[i])
}
}
# create output table with significance stars etc.
ci <- logical()
for (i in 1:length(models)) {
if (length(models[[i]]@se) == 0) {
ci[i] <- TRUE
} else {
ci[i] <- FALSE
}
}
output.matrix <- outputmatrix(m, single.row,
neginfstring = "\\multicolumn{1}{c}{$-$Inf}", leading.zero, digits,
se.prefix = " \\; (", se.suffix = ")", star.prefix = "^{",
star.suffix = "}", star.char = "*", stars, dcolumn = dcolumn,
symbol, bold, bold.prefix = "\\textbf{", bold.suffix = "}", ci = ci,
semicolon = ";\\ ", ci.test = ci.test)
output.matrix <- output.matrix[-seq(2, dim(output.matrix)[1], by=2),]
# create GOF matrix (the lower part of the final output matrix)
gof.matrix <- gofmatrix(gofs, decimal.matrix, dcolumn = TRUE, leading.zero,
digits)
# combine the coefficient and gof matrices vertically
output.matrix <- rbind(output.matrix, gof.matrix)
string <- ""
# write table header
string <- paste0(string, "\n")
if (use.packages == TRUE) {
if (sideways == TRUE & table == TRUE) {
string <- paste0(string, "\\usepackage{rotating}\n")
}
if (booktabs == TRUE) {
string <- paste0(string, "\\usepackage{booktabs}\n")
}
if (dcolumn == TRUE) {
string <- paste0(string, "\\usepackage{dcolumn}\n")
}
if (dcolumn == TRUE || booktabs == TRUE || sideways == TRUE) {
cat("\n")
}
}
if (table == TRUE) {
if (sideways == TRUE) {
t <- "sideways"
} else {
t <- ""
}
if ( float.pos == "") {
string <- paste0(string, "\\begin{", t, "table}\n")
} else {
string <- paste0(string, "\\begin{", t, "table}[", float.pos, "]\n")
}
if (caption.above == TRUE) {
string <- paste0(string, "\\caption{", caption, "}\n")
}
if (center == TRUE) {
string <- paste0(string, "\\begin{center}\n")
}
if (scriptsize == TRUE) {
string <- paste0(string, "\\scriptsize\n")
}
}
string <- paste0(string, "\\begin{tabular}{l ")
#define columns of the table
if (no.margin == FALSE) {
margin.arg <- ""
} else {
margin.arg <- "@{}"
}
for (i in 2:ncol(output.matrix)) {
if (single.row == TRUE) {
if (ci[i - 1] == FALSE) {
separator <- ")"
} else {
separator <- "]"
}
} else {
separator <- "."
}
if (dcolumn == FALSE) {
string <- paste0(string, "c ")
} else {
if (single.row == TRUE) {
dl <- compute.width(output.matrix[, i], left = TRUE, single.row = TRUE,
bracket = separator)
dr <- compute.width(output.matrix[, i], left = FALSE, single.row = TRUE,
bracket = separator)
} else {
dl <- compute.width(output.matrix[, i], left = TRUE, single.row = FALSE,
bracket = separator)
dr <- compute.width(output.matrix[, i], left = FALSE,
single.row = FALSE, bracket = separator)
}
string <- paste0(string, "D{", separator, "}{", separator, "}{",
dl, separator, dr, "}", margin.arg, " ")
}
}
# horizontal rule above the table
if (booktabs == TRUE) {
string <- paste0(string, "}\n", "\\toprule\n")
} else {
string <- paste0(string, "}\n", "\\hline\n")
}
# specify model names
for (k in 1:lab.length) {
string <- paste0(string, " ")
}
if (dcolumn == TRUE) {
for (i in 1:length(models)) {
string <- paste0(string, " & \\multicolumn{1}{c}{", modnames[i], "}")
}
} else {
for (i in 1:length(models)) {
string <- paste0(string, " & ", modnames[i])
}
}
# horizontal rule between coefficients and goodness-of-fit block
if (booktabs == TRUE) {
string <- paste0(string, " \\\\\n", "\\midrule\n")
} else {
string <- paste0(string, " \\\\\n", "\\hline\n")
}
# fill with spaces
max.lengths <- numeric(length(output.matrix[1, ]))
for (i in 1:length(output.matrix[1, ])) {
max.length <- 0
for (j in 1:length(output.matrix[, 1])) {
if (nchar(output.matrix[j, i]) > max.length) {
max.length <- nchar(output.matrix[j, i])
}
}
max.lengths[i] <- max.length
}
for (i in 1:length(output.matrix[, 1])) {
for (j in 1:length(output.matrix[1, ])) {
nzero <- max.lengths[j] - nchar(output.matrix[i, j])
zeros <- rep(" ", nzero)
zeros <- paste(zeros, collapse = "")
output.matrix[i, j] <- paste0(output.matrix[i, j], zeros)
}
}
# write coefficients to string object
for (i in 1:(length(output.matrix[, 1]) - length(gof.names))) {
for (j in 1:length(output.matrix[1, ])) {
string <- paste0(string, output.matrix[i, j])
if (j == length(output.matrix[1, ])) {
string <- paste0(string, " \\\\\n")
} else {
string <- paste0(string, " & ")
}
}
}
if (length(gof.names) > 0) {
# lower mid rule
if (booktabs == TRUE) {
string <- paste0(string, "\\midrule\n")
} else {
string <- paste0(string, "\\hline\n")
}
# write GOF block
for (i in (length(output.matrix[, 1]) - (length(gof.names) - 1)):
(length(output.matrix[, 1]))) {
for (j in 1:length(output.matrix[1, ])) {
string <- paste0(string, output.matrix[i, j])
if (j == length(output.matrix[1, ])) {
string <- paste0(string, " \\\\\n")
} else {
string <- paste0(string, " & ")
}
}
}
}
# write table footer
if (booktabs == TRUE) {
string <- paste0(string, "\\bottomrule\n")
} else {
string <- paste0(string, "\\hline\n")
}
# stars note
if (is.null(stars)) {
snote <- ""
} else if (any(ci == FALSE)) {
st <- sort(stars)
if (length(unique(st)) != length(st)) {
stop("Duplicate elements are not allowed in the stars argument.")
}
if (length(st) == 4) {
snote <- paste0("\\textsuperscript{***}$p<", st[1],
"$, \n \\textsuperscript{**}$p<", st[2],
"$, \n \\textsuperscript{*}$p<", st[3],
"$, \n \\textsuperscript{$", symbol, "$}$p<", st[4], "$")
} else if (length(st) == 3) {
snote <- paste0("\\textsuperscript{***}$p<", st[1],
"$, \n \\textsuperscript{**}$p<", st[2],
"$, \n \\textsuperscript{*}$p<", st[3], "$")
} else if (length(st) == 2) {
snote <- paste0("\\textsuperscript{**}$p<", st[1],
"$, \n \\textsuperscript{*}$p<", st[2], "$")
} else if (length(st) == 1) {
snote <- paste0("\\textsuperscript{*}$p<", st[1], "$")
} else {
snote <- ""
}
if (is.numeric(ci.test) && !is.na(ci.test) && nchar(snote) > 0 && any(ci)) {
snote <- paste(snote, "(or", ci.test, "outside the confidence interval).")
} else if (is.numeric(ci.test) && !is.na(ci.test) && any(ci)) {
# snote <- paste("\\textsuperscript{*}", ci.test,
snote <- paste("$^*$", ci.test,
"outside the confidence interval")
}
} else if (is.numeric(ci.test) && !is.na(ci.test)) {
# snote <- paste("\\textsuperscript{*}", ci.test,
snote <- paste("$^*$", ci.test,
"outside the confidence interval")
} else {
snote <- ""
}
if (is.null(custom.note)) {
note <- paste0("\\multicolumn{", length(models) + 1,
"}{l}{\\scriptsize{", snote, "}}\n")
} else if (custom.note == "") {
note <- ""
} else {
note <- paste0("\\multicolumn{", length(models) + 1,
"}{l}{\\scriptsize{", custom.note, "}}\n")
note <- gsub("%stars", snote, note, perl = TRUE)
}
string <- paste0(string, note, "\\end{tabular}\n")
if (table == TRUE) {
if (scriptsize == TRUE) {
string <- paste0(string, "\\normalsize\n")
}
if (caption.above == FALSE) {
string <- paste0(string, "\\caption{", caption, "}\n")
}
string <- paste0(string, "\\label{", label, "}\n")
if (center == TRUE) {
string <- paste0(string, "\\end{center}\n")
}
if (sideways == TRUE) {
t <- "sideways"
} else {
t <- ""
}
string <- paste0(string, "\\end{", t, "table}\n\n")
}
if (is.na(file)) {
return(string)
} else if (!is.character(file)) {
stop("The 'file' argument must be a character string.")
} else {
sink(file)
cat(string)
sink()
cat(paste0("The table was written to the file '", file, "'.\n"))
}
if (return.string == TRUE) {
return(string)
}
}
# htmlreg function
htmlreg <- function(l, file = NA, single.row = FALSE,
stars = c(0.001, 0.01, 0.05), custom.model.names = NULL,
custom.coef.names = NULL, custom.gof.names = NULL, custom.note = NULL,
digits = 2, leading.zero = TRUE, symbol = "·", override.coef = 0,
override.se = 0, override.pval = 0, omit.coef = NA, reorder.coef = NULL,
reorder.gof = NULL, return.string = FALSE, ci.force = FALSE,
ci.force.level = 0.95, ci.test = 0, bold = 0.00, center = TRUE,
caption = "Statistical models", caption.above = FALSE, star.symbol = "*",
inline.css = TRUE, doctype = TRUE, html.tag = TRUE, head.tag = TRUE,
body.tag = FALSE, append = TRUE , ...) {
linit <- l
captioninit <- caption
stars <- check.stars(stars)
for(ind.table in 1:length(linit)){
l <- linit[[ind.table]]
models <- get.data(l, ...) #extract relevant coefficients, SEs, GOFs, etc.
caption <- captioninit[[ind.table]]
# inline CSS definitions
if (inline.css == TRUE) {
css.table <- " style=\"border: none;\""
css.th <- paste0(" style=\"text-align: left; border-top: 2px solid ",
"black; border-bottom: 1px solid black; padding-right: 12px;\"")
css.midrule <- " style=\"border-top: 1px solid black;\""
css.bottomrule <- " style=\"border-bottom: 2px solid black;\""
css.bottomrule.nogof <- paste(" style=\"padding-right: 12px;",
"border-bottom: 2px solid black;\"")
css.td <- " style=\"padding-right: 12px; border: none;\""
css.caption <- ""
css.sup <- "" #" style=\"vertical-align: 4px;\""
} else {
css.table <- ""
css.th <- ""
css.midrule <- ""
css.bottomrule <- ""
css.td <- ""
css.caption <- ""
css.sup <- ""
}
models <- override(models, override.coef, override.se, override.pval)
models <- tex.replace(models, type = "html", style = css.sup) # TeX --> HTML
models <- ciforce(models, ci.force = ci.force, ci.level = ci.force.level)
gof.names <- get.gof(models) # extract names of GOFs
# arrange coefficients and GOFs nicely in a matrix
gofs <- aggregate.matrix(models, gof.names, custom.gof.names, digits,
returnobject = "gofs")
m <- aggregate.matrix(models, gof.names, custom.gof.names, digits,
returnobject = "m")
decimal.matrix <- aggregate.matrix(models, gof.names, custom.gof.names,
digits, returnobject = "decimal.matrix")
m <- customnames(m, custom.coef.names) # rename coefficients
m <- rearrangeMatrix(m) # resort matrix and conflate duplicate entries
m <- as.data.frame(m)
m <- omitcoef(m, omit.coef) # remove coefficient rows matching regex
modnames <- modelnames(models, custom.model.names[[ind.table]]) # use (custom) model names
# reorder GOF and coef matrix
m <- reorder(m, reorder.coef)
gofs <- reorder(gofs, reorder.gof)
decimal.matrix <- reorder(decimal.matrix, reorder.gof)
# create output table with significance stars etc.
ci <- logical()
for (i in 1:length(models)) {
if (length(models[[i]]@se) == 0) {
ci[i] <- TRUE
} else {
ci[i] <- FALSE
}
}
output.matrix <- outputmatrix(m, single.row, neginfstring = "-Inf",
leading.zero, digits, se.prefix = " (", se.suffix = ")",
star.char = star.symbol, star.prefix = paste0("<sup", css.sup, ">"),
star.suffix = "</sup>", stars, dcolumn = TRUE, symbol, bold = bold,
bold.prefix = "<b>", bold.suffix = "</b>", ci = ci, ci.test = ci.test)
output.matrix <- output.matrix[-seq(2, dim(output.matrix)[1], by=2),]
# create GOF matrix (the lower part of the final output matrix)
gof.matrix <- gofmatrix(gofs, decimal.matrix, leading.zero,
digits)
# combine the coefficient and gof matrices vertically
output.matrix <- rbind(output.matrix, gof.matrix)
# write table header
if (single.row == TRUE) {
numcols <- 2 * length(models)
} else {
numcols <- length(models)
}
if (doctype == TRUE) {
doct <- paste0("<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01 ",
"Transitional//EN\" \"http://www.w3.org/TR/html4/loose.dtd\">\n")
} else {
doct <- ""
}
# determine indentation for table
if (html.tag == TRUE) {
h.ind <- " "
} else {
h.ind <- ""
}
if (body.tag == TRUE) {
b.ind <- " "
} else {
b.ind <- ""
}
if (head.tag == TRUE) {
d.ind <- " "
} else {
d.ind <- ""
}
ind <- " "
# horizontal table alignment
if (center == FALSE) {
tabdef <- paste0(h.ind, b.ind, "<table cellspacing=\"3\"", css.table, ">\n")
} else {
tabdef <- paste0(h.ind, b.ind,
"<table cellspacing=\"3\" align=\"center\"", css.table, ">\n")
}
# set caption
if (is.null(caption) || !is.character(caption)) {
stop("The caption must be provided as a (possibly empty) character vector.")
} else if (caption != "" && caption.above == FALSE) {
cap <- paste0(h.ind, b.ind, ind,
"<caption align=\"bottom\" style=\"margin-top:0.5em;", css.caption,
"\">", "<b>", caption,"</b>", "</caption>\n")
} else if (caption != "" && caption.above == TRUE) {
cap <- paste0(h.ind, b.ind, ind,
"<caption align=\"top\" style=\"margin-bottom:0.3em;", css.caption,
"\">", "<b>", caption, "</b>", "</caption>\n")
} else {
cap <- ""
}
# HTML header with CSS definitions
if(ind.table==1)
string <- paste0("\n", doct)
else
string <- paste0(string, "</tr>\n <tr>\n </tr>\n <tr>\n")
if (html.tag == TRUE) {
string <- paste0(string, "<html>\n")
}
if (inline.css == TRUE) {
css.header <- ""
} else {
css.header <- paste0(
h.ind, d.ind, "<style type=\"text/css\">\n",
h.ind, d.ind, ind, "table {\n",
h.ind, d.ind, ind, ind, "border: none;\n",
h.ind, d.ind, ind, "}\n",
h.ind, d.ind, ind, "th {\n",
h.ind, d.ind, ind, ind, "text-align: left;\n",
h.ind, d.ind, ind, ind, "border-top: 2px solid black;\n",
h.ind, d.ind, ind, ind, "border-bottom: 1px solid black;\n",
h.ind, d.ind, ind, ind, "padding-right: 12px;\n",
h.ind, d.ind, ind, "}\n",
h.ind, d.ind, ind, ".midRule {\n",
h.ind, d.ind, ind, ind, "border-top: 1px solid black;\n",
h.ind, d.ind, ind, "}\n",
h.ind, d.ind, ind, ".bottomRule {\n",
h.ind, d.ind, ind, ind, "border-bottom: 2px solid black;\n",
h.ind, d.ind, ind, "}\n",
h.ind, d.ind, ind, "td {\n",
h.ind, d.ind, ind, ind, "padding-right: 12px;\n",
h.ind, d.ind, ind, ind, "border: none;\n",
h.ind, d.ind, ind, "}\n",
h.ind, d.ind, ind, "sup {\n",
h.ind, d.ind, ind, ind, "vertical-align: 4px;\n",
h.ind, d.ind, ind, "}\n",
h.ind, d.ind, "</style>\n"
)
}
if (head.tag == TRUE) {
string <- paste0(string,
h.ind, "<head>\n",
h.ind, d.ind, "<title>", caption, "</title>\n",
css.header,
h.ind, "</head>\n\n")
}
if (body.tag == TRUE) {
string <- paste0(string, h.ind, "<body>\n")
}
string <- paste0(
string,
tabdef,
cap,
h.ind, b.ind, ind, "<tr>\n",
h.ind, b.ind, ind, ind, "<th", css.th, "></th>\n"
)
# specify model names (header row)
for (i in 1:length(models)) {
string <- paste0(string,
h.ind, b.ind, ind, ind, "<td", css.th, ">", modnames[i],
"</td>\n")
}
string <- paste0(string, h.ind, b.ind, ind, "</tr>\n")
# write coefficients to string object
coef.length <- length(output.matrix[, 1]) - length(gof.names)
for (i in 1:coef.length) {
string <- paste0(string, h.ind, b.ind, ind, "<tr>\n")
for (j in 1:length(output.matrix[1, ])) {
if (length(gof.names) == 0 && i == coef.length) { # no GOF block
if (inline.css == TRUE) {
br <- css.bottomrule.nogof
} else {
br <- " class=\"bottomRule\""
}
string <- paste0(string, h.ind, b.ind, ind, ind, "<td", br, ">",
output.matrix[i,j], "</td>\n")
} else { # GOF block present
string <- paste0(string, h.ind, b.ind, ind, ind, "<td", css.td, ">",
output.matrix[i,j], "</td>\n")
}
}
string <- paste0(string, h.ind, b.ind, ind, "</tr>\n")
}
if (length(gof.names) > 0) {
# write GOF block
for (i in (length(output.matrix[, 1]) - (length(gof.names) - 1)):
(length(output.matrix[, 1]))) {
string <- paste0(string, h.ind, b.ind, ind, "<tr>\n")
for (j in 1:length(output.matrix[1, ])) {
if (i == length(output.matrix[, 1]) - (length(gof.names) - 1)) {
if (inline.css == TRUE) {
mr <- css.midrule
} else {
mr <- " class=\"midRule\"" # add mid rule via style sheets
}
string <- paste0(string, h.ind, b.ind, ind, ind,
"<td", mr, ">", output.matrix[i,j], "</td>\n")
} else if (i == length(output.matrix[, 1])) {
if (inline.css == TRUE) {
br <- css.bottomrule
} else {
br <- " class=\"bottomRule\""
}
string <- paste0(string, h.ind, b.ind, ind, ind,
"<td", br, ">", output.matrix[i,j], "</td>\n")
} else {
string <- paste0(string, h.ind, b.ind, ind, ind, "<td", css.td, ">",
output.matrix[i,j], "</td>\n")
}
}
string <- paste0(string, h.ind, b.ind, ind, "</tr>\n")
}
}
# stars note
if (is.null(stars)) {
snote <- ""
} else if (any(ci == FALSE)) {
st <- sort(stars)
if (length(unique(st)) != length(st)) {
stop("Duplicate elements are not allowed in the stars argument.")
}
if (length(st) == 4) {
snote <- paste0("<sup", css.sup, ">", star.symbol, star.symbol,
star.symbol, "</sup>p < ", st[1], ", <sup", css.sup, ">",
star.symbol, star.symbol, "</sup", css.sup, ">p < ", st[2],
", <sup", css.sup, ">", star.symbol, "</sup>p < ",
st[3], ", <sup", css.sup, ">", symbol, "</sup>p < ", st[4])
} else if (length(st) == 3) {
snote <- paste0("<sup", css.sup, ">", star.symbol, star.symbol,
star.symbol, "</sup>p < ", st[1], ", <sup", css.sup, ">",
star.symbol, star.symbol, "</sup>p < ", st[2], ", <sup", css.sup,
">", star.symbol, "</sup>p < ", st[3])
} else if (length(st) == 2) {
snote <- paste0("<sup", css.sup, ">", star.symbol, star.symbol,
"</sup>p < ", st[1], ", <sup", css.sup, ">", star.symbol,
"</sup>p < ", st[2])
} else if (length(st) == 1) {
snote <- paste0("<sup", css.sup, ">", star.symbol, "</sup>p < ", st[1])
} else {
snote <- ""
}
if (is.numeric(ci.test) && !is.na(ci.test) && nchar(snote) > 0 && any(ci)) {
snote <- paste(snote, "(or", ci.test, "outside the confidence interval).")
} else if (is.numeric(ci.test) && !is.na(ci.test) && any(ci)) {
snote <- paste0("<sup>", star.symbol, "</sup> ", ci.test,
" outside the confidence interval")
}
} else if (is.numeric(ci.test) && !is.na(ci.test)) {
snote <- paste0("<sup>", star.symbol, "</sup> ", ci.test,
" outside the confidence interval")
} else {
snote <- ""
}
if (is.null(custom.note)) {
note <- snote
} else if (custom.note == "") {
note <- ""
} else {
note <- custom.note
note <- gsub("%stars", snote, note)
}
string <- paste0(string, h.ind, b.ind, ind, "<tr>\n", h.ind, b.ind, ind, ind,
"<td", css.td, " colspan=\"", (1 + length(models)),
"\"><span style=\"font-size:0.8em\">", note, "</span></td>\n", h.ind,
b.ind, ind, "</tr>\n")
# write table footer
string <- paste0(string, h.ind, b.ind, "</table>\n")
if (body.tag == TRUE) {
string <- paste0(string, h.ind, "</body>\n")
}
}
if (html.tag == TRUE) {
string <- paste0(string, "</html>\n\n")
} else {
string <- paste0(string, "\n")
}
#print(cat(string))
if (is.na(file)) {
return(cat(string))
} else if (!is.character(file)) {
stop("The 'file' argument must be a character string.")
} else {
#sink(file, append=FALSE)
#cat(string)
#sink()
sink(file, append=append)
cat("\n")
cat("\n")
cat("\n")
cat("\n")
cat(string)
sink()
cat(paste0("The results were written to the file '", file, "'.\n"))
}
if (return.string == TRUE) {
return(string)
}
}
|
/R/texreg_SensMixed.R
|
no_license
|
alku86/SensMixed
|
R
| false | false | 32,981 |
r
|
# The texreg package was written by Philip Leifeld.
# Please use the forum at http://r-forge.r-project.org/projects/texreg/
# for bug reports, help or feature requests.
# screenreg function
screenreg <- function(l, file = NA, single.row = FALSE,
stars = c(0.001, 0.01, 0.05), custom.model.names = NULL,
custom.coef.names = NULL, custom.gof.names = NULL, custom.note = NULL,
digits = 2, leading.zero = TRUE, symbol = ".", override.coef = 0,
override.se = 0, override.pval = 0, omit.coef = NA, reorder.coef = NULL,
reorder.gof = NULL, return.string = FALSE, ci.force = FALSE,
ci.force.level = 0.95, ci.test = 0, column.spacing = 2, outer.rule = "=",
inner.rule = "-", ...) {
stars <- check.stars(stars)
models <- get.data(l, ...) #extract relevant coefficients, SEs, GOFs, etc.
#models <- override(models, override.coef, override.se, override.pval)
models <- tex.replace(models, type = "screen") #convert TeX code to text code
#models <- ciforce(models, ci.force = ci.force, ci.level = ci.force.level)
gof.names <- get.gof(models) #extract names of GOFs
# arrange coefficients and GOFs nicely in a matrix
gofs <- aggregate.matrix(models, gof.names, custom.gof.names, digits,
returnobject = "gofs")
m <- aggregate.matrix(models, gof.names, custom.gof.names, digits,
returnobject = "m")
decimal.matrix <- aggregate.matrix(models, gof.names, custom.gof.names,
digits, returnobject = "decimal.matrix")
m <- customnames(m, custom.coef.names) #rename coefficients
m <- rearrangeMatrix(m) #resort matrix and conflate duplicate entries
m <- as.data.frame(m)
m <- omitcoef(m, omit.coef) #remove coefficient rows matching regex
modnames <- modelnames(models, custom.model.names) #use (custom) model names
# reorder GOF and coef matrix
m <- reorder(m, reorder.coef)
gofs <- reorder(gofs, reorder.gof)
decimal.matrix <- reorder(decimal.matrix, reorder.gof)
# create output table with significance stars etc.
ci <- logical()
for (i in 1:length(models)) {
if (length(models[[i]]@se) == 0) {
ci[i] <- TRUE
} else {
ci[i] <- FALSE
}
}
output.matrix <- outputmatrix(m, single.row, neginfstring = "-Inf",
leading.zero, digits, se.prefix = " (", se.suffix = ")",
star.prefix = " ", star.suffix = "", star.char = "*", stars,
dcolumn = TRUE, symbol = symbol, bold = 0, bold.prefix = "",
bold.suffix = "", ci = ci, ci.test = ci.test)
#class(output.matrix)
output.matrix <- output.matrix[-seq(2, dim(output.matrix)[1], by=2),]
# create GOF matrix (the lower part of the final output matrix)
gof.matrix <- gofmatrix(gofs, decimal.matrix, dcolumn = TRUE, leading.zero,
digits)
# combine the coefficient and gof matrices vertically
output.matrix <- rbind(output.matrix, gof.matrix)
# reformat output matrix and add spaces
if (ncol(output.matrix) == 2) {
temp <- matrix(format.column(output.matrix[, -1], single.row = single.row,
digits = digits))
} else {
temp <- apply(output.matrix[, -1], 2, format.column,
single.row = single.row, digits = digits)
}
output.matrix <- cbind(output.matrix[, 1], temp)
output.matrix <- rbind(c("", modnames), output.matrix)
for (i in 1:ncol(output.matrix)) {
output.matrix[, i] <- fill.spaces(output.matrix[, i])
}
string <- "\n"
# horizontal rule above the table
table.width <- sum(nchar(output.matrix[1, ])) +
(ncol(output.matrix) - 1) * column.spacing
if (class(outer.rule) != "character") {
stop("outer.rule must be a character.")
} else if (nchar(outer.rule) > 1) {
stop("outer.rule must be a character of maximum length 1.")
} else if (outer.rule == "") {
o.rule <- ""
} else {
o.rule <- paste(rep(outer.rule, table.width), collapse = "")
string <- paste0(string, o.rule, "\n")
}
# specify model names
spacing <- paste(rep(" ", column.spacing), collapse = "")
string <- paste(string, output.matrix[1, 1], sep = "")
for (i in 2:ncol(output.matrix)) {
string <- paste0(string, spacing, output.matrix[1, i])
}
string <- paste0(string, "\n")
# mid rule 1
if (class(inner.rule) != "character") {
stop("inner.rule must be a character.")
} else if (nchar(inner.rule) > 1) {
stop("inner.rule must be a character of maximum length 1.")
} else if (inner.rule == "") {
i.rule <- ""
} else {
i.rule <- paste(rep(inner.rule, table.width), collapse = "")
string <- paste0(string, i.rule, "\n")
}
# write coefficients
for (i in 2:(length(output.matrix[, 1]) - length(gof.names))) {
for (j in 1:length(output.matrix[1, ])) {
string <- paste0(string, output.matrix[i,j])
if (j == length(output.matrix[1, ])) {
string <- paste0(string, "\n")
} else {
string <- paste0(string, spacing)
}
}
}
if (length(gof.names) > 0) {
# mid rule 2
if (inner.rule != "") {
string <- paste0(string, i.rule, "\n")
}
# write GOF part of the output matrix
for (i in (length(output.matrix[, 1]) - (length(gof.names) - 1)):
(length(output.matrix[, 1]))) {
for (j in 1:length(output.matrix[1, ])) {
string <- paste0(string, output.matrix[i, j])
if (j == length(output.matrix[1, ])) {
string <- paste0(string, "\n")
} else {
string <- paste0(string, spacing)
}
}
}
}
# write table footer
if (outer.rule != "") {
string <- paste0(string, o.rule, "\n")
}
# stars note
if (is.null(stars)) {
snote <- ""
} else if (any(ci == FALSE)) {
st <- sort(stars)
if (length(unique(st)) != length(st)) {
stop("Duplicate elements are not allowed in the stars argument.")
}
if (length(st) == 4) {
snote <- paste0("*** p < ", st[1], ", ** p < ", st[2], ", * p < ", st[3],
", ", symbol, " p < ", st[4])
} else if (length(st) == 3) {
snote <- paste0("*** p < ", st[1], ", ** p < ", st[2], ", * p < ", st[3])
} else if (length(st) == 2) {
snote <- paste0("** p < ", st[1], ", * p < ", st[2])
} else if (length(st) == 1) {
snote <- paste0("* p < ", st)
} else {
snote <- ""
}
if (is.numeric(ci.test) && !is.na(ci.test) && nchar(snote) > 0 && any(ci)) {
snote <- paste(snote, "(or", ci.test, "outside the confidence interval).")
} else if (is.numeric(ci.test) && !is.na(ci.test) && any(ci)) {
snote <- paste("*", ci.test, "outside the confidence interval")
}
} else if (is.numeric(ci.test) && !is.na(ci.test)) {
snote <- paste("*", ci.test, "outside the confidence interval")
} else {
snote <- ""
}
if (is.null(custom.note)) {
note <- paste0(snote, "\n\n")
} else if (custom.note == "") {
note <- "\n"
} else {
note <- paste0(custom.note, "\n\n")
note <- gsub("%stars", snote, note)
}
string <- paste0(string, note)
#write to file
if (is.na(file)) {
cat(string)
} else if (!is.character(file)) {
stop("The 'file' argument must be a character string.")
} else {
sink(file)
cat(string)
sink()
cat(paste0("The table was written to the file '", file, "'.\n"))
}
if (return.string == TRUE) {
return(string)
}
}
# texreg function
texreg <- function(l, file = NA, single.row = FALSE,
stars = c(0.001, 0.01, 0.05), custom.model.names = NULL,
custom.coef.names = NULL, custom.gof.names = NULL, custom.note = NULL,
digits = 2, leading.zero = TRUE, symbol = "\\cdot", override.coef = 0,
override.se = 0, override.pval = 0, omit.coef = NA, reorder.coef = NULL,
reorder.gof = NULL, return.string = TRUE, ci.force = FALSE,
ci.force.level = 0.95, ci.test = 0, bold = 0.00, center = TRUE,
caption = "Statistical models", caption.above = TRUE,
label = "table:coefficients", booktabs = FALSE, dcolumn = FALSE,
sideways = FALSE, use.packages = TRUE, table = TRUE, no.margin = TRUE,
scriptsize = FALSE, float.pos = "", ...) {
stars <- check.stars(stars)
#check dcolumn vs. bold
if (dcolumn == TRUE && bold > 0) {
dcolumn <- FALSE
msg <- paste("The dcolumn package and the bold argument cannot be used at",
"the same time. Switching off dcolumn.")
if (stars == TRUE) {
warning(paste(msg, "You should also consider setting stars = FALSE."))
} else {
warning(msg)
}
}
models <- get.data(l, ...) #extract relevant coefficients, SEs, GOFs, etc.
gof.names <- get.gof(models) #extract names of GOFs
models <- override(models, override.coef, override.se, override.pval)
models <- ciforce(models, ci.force = ci.force, ci.level = ci.force.level)
# arrange coefficients and GOFs nicely in a matrix
gofs <- aggregate.matrix(models, gof.names, custom.gof.names, digits,
returnobject = "gofs")
m <- aggregate.matrix(models, gof.names, custom.gof.names, digits,
returnobject = "m")
decimal.matrix <- aggregate.matrix(models, gof.names, custom.gof.names,
digits, returnobject = "decimal.matrix")
m <- customnames(m, custom.coef.names) #rename coefficients
m <- rearrangeMatrix(m) #resort matrix and conflate duplicate entries
m <- as.data.frame(m)
m <- omitcoef(m, omit.coef) #remove coefficient rows matching regex
modnames <- modelnames(models, custom.model.names) #use (custom) model names
# reorder GOF and coef matrix
m <- reorder(m, reorder.coef)
gofs <- reorder(gofs, reorder.gof)
decimal.matrix <- reorder(decimal.matrix, reorder.gof)
# what is the optimal length of the labels?
lab.list <- c(rownames(m), gof.names)
lab.length <- 0
for (i in 1:length(lab.list)) {
if (nchar(lab.list[i]) > lab.length) {
lab.length <- nchar(lab.list[i])
}
}
# create output table with significance stars etc.
ci <- logical()
for (i in 1:length(models)) {
if (length(models[[i]]@se) == 0) {
ci[i] <- TRUE
} else {
ci[i] <- FALSE
}
}
output.matrix <- outputmatrix(m, single.row,
neginfstring = "\\multicolumn{1}{c}{$-$Inf}", leading.zero, digits,
se.prefix = " \\; (", se.suffix = ")", star.prefix = "^{",
star.suffix = "}", star.char = "*", stars, dcolumn = dcolumn,
symbol, bold, bold.prefix = "\\textbf{", bold.suffix = "}", ci = ci,
semicolon = ";\\ ", ci.test = ci.test)
output.matrix <- output.matrix[-seq(2, dim(output.matrix)[1], by=2),]
# create GOF matrix (the lower part of the final output matrix)
gof.matrix <- gofmatrix(gofs, decimal.matrix, dcolumn = TRUE, leading.zero,
digits)
# combine the coefficient and gof matrices vertically
output.matrix <- rbind(output.matrix, gof.matrix)
string <- ""
# write table header
string <- paste0(string, "\n")
if (use.packages == TRUE) {
if (sideways == TRUE & table == TRUE) {
string <- paste0(string, "\\usepackage{rotating}\n")
}
if (booktabs == TRUE) {
string <- paste0(string, "\\usepackage{booktabs}\n")
}
if (dcolumn == TRUE) {
string <- paste0(string, "\\usepackage{dcolumn}\n")
}
if (dcolumn == TRUE || booktabs == TRUE || sideways == TRUE) {
cat("\n")
}
}
if (table == TRUE) {
if (sideways == TRUE) {
t <- "sideways"
} else {
t <- ""
}
if ( float.pos == "") {
string <- paste0(string, "\\begin{", t, "table}\n")
} else {
string <- paste0(string, "\\begin{", t, "table}[", float.pos, "]\n")
}
if (caption.above == TRUE) {
string <- paste0(string, "\\caption{", caption, "}\n")
}
if (center == TRUE) {
string <- paste0(string, "\\begin{center}\n")
}
if (scriptsize == TRUE) {
string <- paste0(string, "\\scriptsize\n")
}
}
string <- paste0(string, "\\begin{tabular}{l ")
#define columns of the table
if (no.margin == FALSE) {
margin.arg <- ""
} else {
margin.arg <- "@{}"
}
for (i in 2:ncol(output.matrix)) {
if (single.row == TRUE) {
if (ci[i - 1] == FALSE) {
separator <- ")"
} else {
separator <- "]"
}
} else {
separator <- "."
}
if (dcolumn == FALSE) {
string <- paste0(string, "c ")
} else {
if (single.row == TRUE) {
dl <- compute.width(output.matrix[, i], left = TRUE, single.row = TRUE,
bracket = separator)
dr <- compute.width(output.matrix[, i], left = FALSE, single.row = TRUE,
bracket = separator)
} else {
dl <- compute.width(output.matrix[, i], left = TRUE, single.row = FALSE,
bracket = separator)
dr <- compute.width(output.matrix[, i], left = FALSE,
single.row = FALSE, bracket = separator)
}
string <- paste0(string, "D{", separator, "}{", separator, "}{",
dl, separator, dr, "}", margin.arg, " ")
}
}
# horizontal rule above the table
if (booktabs == TRUE) {
string <- paste0(string, "}\n", "\\toprule\n")
} else {
string <- paste0(string, "}\n", "\\hline\n")
}
# specify model names
for (k in 1:lab.length) {
string <- paste0(string, " ")
}
if (dcolumn == TRUE) {
for (i in 1:length(models)) {
string <- paste0(string, " & \\multicolumn{1}{c}{", modnames[i], "}")
}
} else {
for (i in 1:length(models)) {
string <- paste0(string, " & ", modnames[i])
}
}
# horizontal rule between coefficients and goodness-of-fit block
if (booktabs == TRUE) {
string <- paste0(string, " \\\\\n", "\\midrule\n")
} else {
string <- paste0(string, " \\\\\n", "\\hline\n")
}
# fill with spaces
max.lengths <- numeric(length(output.matrix[1, ]))
for (i in 1:length(output.matrix[1, ])) {
max.length <- 0
for (j in 1:length(output.matrix[, 1])) {
if (nchar(output.matrix[j, i]) > max.length) {
max.length <- nchar(output.matrix[j, i])
}
}
max.lengths[i] <- max.length
}
for (i in 1:length(output.matrix[, 1])) {
for (j in 1:length(output.matrix[1, ])) {
nzero <- max.lengths[j] - nchar(output.matrix[i, j])
zeros <- rep(" ", nzero)
zeros <- paste(zeros, collapse = "")
output.matrix[i, j] <- paste0(output.matrix[i, j], zeros)
}
}
# write coefficients to string object
for (i in 1:(length(output.matrix[, 1]) - length(gof.names))) {
for (j in 1:length(output.matrix[1, ])) {
string <- paste0(string, output.matrix[i, j])
if (j == length(output.matrix[1, ])) {
string <- paste0(string, " \\\\\n")
} else {
string <- paste0(string, " & ")
}
}
}
if (length(gof.names) > 0) {
# lower mid rule
if (booktabs == TRUE) {
string <- paste0(string, "\\midrule\n")
} else {
string <- paste0(string, "\\hline\n")
}
# write GOF block
for (i in (length(output.matrix[, 1]) - (length(gof.names) - 1)):
(length(output.matrix[, 1]))) {
for (j in 1:length(output.matrix[1, ])) {
string <- paste0(string, output.matrix[i, j])
if (j == length(output.matrix[1, ])) {
string <- paste0(string, " \\\\\n")
} else {
string <- paste0(string, " & ")
}
}
}
}
# write table footer
if (booktabs == TRUE) {
string <- paste0(string, "\\bottomrule\n")
} else {
string <- paste0(string, "\\hline\n")
}
# stars note
if (is.null(stars)) {
snote <- ""
} else if (any(ci == FALSE)) {
st <- sort(stars)
if (length(unique(st)) != length(st)) {
stop("Duplicate elements are not allowed in the stars argument.")
}
if (length(st) == 4) {
snote <- paste0("\\textsuperscript{***}$p<", st[1],
"$, \n \\textsuperscript{**}$p<", st[2],
"$, \n \\textsuperscript{*}$p<", st[3],
"$, \n \\textsuperscript{$", symbol, "$}$p<", st[4], "$")
} else if (length(st) == 3) {
snote <- paste0("\\textsuperscript{***}$p<", st[1],
"$, \n \\textsuperscript{**}$p<", st[2],
"$, \n \\textsuperscript{*}$p<", st[3], "$")
} else if (length(st) == 2) {
snote <- paste0("\\textsuperscript{**}$p<", st[1],
"$, \n \\textsuperscript{*}$p<", st[2], "$")
} else if (length(st) == 1) {
snote <- paste0("\\textsuperscript{*}$p<", st[1], "$")
} else {
snote <- ""
}
if (is.numeric(ci.test) && !is.na(ci.test) && nchar(snote) > 0 && any(ci)) {
snote <- paste(snote, "(or", ci.test, "outside the confidence interval).")
} else if (is.numeric(ci.test) && !is.na(ci.test) && any(ci)) {
# snote <- paste("\\textsuperscript{*}", ci.test,
snote <- paste("$^*$", ci.test,
"outside the confidence interval")
}
} else if (is.numeric(ci.test) && !is.na(ci.test)) {
# snote <- paste("\\textsuperscript{*}", ci.test,
snote <- paste("$^*$", ci.test,
"outside the confidence interval")
} else {
snote <- ""
}
if (is.null(custom.note)) {
note <- paste0("\\multicolumn{", length(models) + 1,
"}{l}{\\scriptsize{", snote, "}}\n")
} else if (custom.note == "") {
note <- ""
} else {
note <- paste0("\\multicolumn{", length(models) + 1,
"}{l}{\\scriptsize{", custom.note, "}}\n")
note <- gsub("%stars", snote, note, perl = TRUE)
}
string <- paste0(string, note, "\\end{tabular}\n")
if (table == TRUE) {
if (scriptsize == TRUE) {
string <- paste0(string, "\\normalsize\n")
}
if (caption.above == FALSE) {
string <- paste0(string, "\\caption{", caption, "}\n")
}
string <- paste0(string, "\\label{", label, "}\n")
if (center == TRUE) {
string <- paste0(string, "\\end{center}\n")
}
if (sideways == TRUE) {
t <- "sideways"
} else {
t <- ""
}
string <- paste0(string, "\\end{", t, "table}\n\n")
}
if (is.na(file)) {
return(string)
} else if (!is.character(file)) {
stop("The 'file' argument must be a character string.")
} else {
sink(file)
cat(string)
sink()
cat(paste0("The table was written to the file '", file, "'.\n"))
}
if (return.string == TRUE) {
return(string)
}
}
# htmlreg function
htmlreg <- function(l, file = NA, single.row = FALSE,
stars = c(0.001, 0.01, 0.05), custom.model.names = NULL,
custom.coef.names = NULL, custom.gof.names = NULL, custom.note = NULL,
digits = 2, leading.zero = TRUE, symbol = "·", override.coef = 0,
override.se = 0, override.pval = 0, omit.coef = NA, reorder.coef = NULL,
reorder.gof = NULL, return.string = FALSE, ci.force = FALSE,
ci.force.level = 0.95, ci.test = 0, bold = 0.00, center = TRUE,
caption = "Statistical models", caption.above = FALSE, star.symbol = "*",
inline.css = TRUE, doctype = TRUE, html.tag = TRUE, head.tag = TRUE,
body.tag = FALSE, append = TRUE , ...) {
linit <- l
captioninit <- caption
stars <- check.stars(stars)
for(ind.table in 1:length(linit)){
l <- linit[[ind.table]]
models <- get.data(l, ...) #extract relevant coefficients, SEs, GOFs, etc.
caption <- captioninit[[ind.table]]
# inline CSS definitions
if (inline.css == TRUE) {
css.table <- " style=\"border: none;\""
css.th <- paste0(" style=\"text-align: left; border-top: 2px solid ",
"black; border-bottom: 1px solid black; padding-right: 12px;\"")
css.midrule <- " style=\"border-top: 1px solid black;\""
css.bottomrule <- " style=\"border-bottom: 2px solid black;\""
css.bottomrule.nogof <- paste(" style=\"padding-right: 12px;",
"border-bottom: 2px solid black;\"")
css.td <- " style=\"padding-right: 12px; border: none;\""
css.caption <- ""
css.sup <- "" #" style=\"vertical-align: 4px;\""
} else {
css.table <- ""
css.th <- ""
css.midrule <- ""
css.bottomrule <- ""
css.td <- ""
css.caption <- ""
css.sup <- ""
}
models <- override(models, override.coef, override.se, override.pval)
models <- tex.replace(models, type = "html", style = css.sup) # TeX --> HTML
models <- ciforce(models, ci.force = ci.force, ci.level = ci.force.level)
gof.names <- get.gof(models) # extract names of GOFs
# arrange coefficients and GOFs nicely in a matrix
gofs <- aggregate.matrix(models, gof.names, custom.gof.names, digits,
returnobject = "gofs")
m <- aggregate.matrix(models, gof.names, custom.gof.names, digits,
returnobject = "m")
decimal.matrix <- aggregate.matrix(models, gof.names, custom.gof.names,
digits, returnobject = "decimal.matrix")
m <- customnames(m, custom.coef.names) # rename coefficients
m <- rearrangeMatrix(m) # resort matrix and conflate duplicate entries
m <- as.data.frame(m)
m <- omitcoef(m, omit.coef) # remove coefficient rows matching regex
modnames <- modelnames(models, custom.model.names[[ind.table]]) # use (custom) model names
# reorder GOF and coef matrix
m <- reorder(m, reorder.coef)
gofs <- reorder(gofs, reorder.gof)
decimal.matrix <- reorder(decimal.matrix, reorder.gof)
# create output table with significance stars etc.
ci <- logical()
for (i in 1:length(models)) {
if (length(models[[i]]@se) == 0) {
ci[i] <- TRUE
} else {
ci[i] <- FALSE
}
}
output.matrix <- outputmatrix(m, single.row, neginfstring = "-Inf",
leading.zero, digits, se.prefix = " (", se.suffix = ")",
star.char = star.symbol, star.prefix = paste0("<sup", css.sup, ">"),
star.suffix = "</sup>", stars, dcolumn = TRUE, symbol, bold = bold,
bold.prefix = "<b>", bold.suffix = "</b>", ci = ci, ci.test = ci.test)
output.matrix <- output.matrix[-seq(2, dim(output.matrix)[1], by=2),]
# create GOF matrix (the lower part of the final output matrix)
gof.matrix <- gofmatrix(gofs, decimal.matrix, leading.zero,
digits)
# combine the coefficient and gof matrices vertically
output.matrix <- rbind(output.matrix, gof.matrix)
# write table header
if (single.row == TRUE) {
numcols <- 2 * length(models)
} else {
numcols <- length(models)
}
if (doctype == TRUE) {
doct <- paste0("<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01 ",
"Transitional//EN\" \"http://www.w3.org/TR/html4/loose.dtd\">\n")
} else {
doct <- ""
}
# determine indentation for table
if (html.tag == TRUE) {
h.ind <- " "
} else {
h.ind <- ""
}
if (body.tag == TRUE) {
b.ind <- " "
} else {
b.ind <- ""
}
if (head.tag == TRUE) {
d.ind <- " "
} else {
d.ind <- ""
}
ind <- " "
# horizontal table alignment
if (center == FALSE) {
tabdef <- paste0(h.ind, b.ind, "<table cellspacing=\"3\"", css.table, ">\n")
} else {
tabdef <- paste0(h.ind, b.ind,
"<table cellspacing=\"3\" align=\"center\"", css.table, ">\n")
}
# set caption
if (is.null(caption) || !is.character(caption)) {
stop("The caption must be provided as a (possibly empty) character vector.")
} else if (caption != "" && caption.above == FALSE) {
cap <- paste0(h.ind, b.ind, ind,
"<caption align=\"bottom\" style=\"margin-top:0.5em;", css.caption,
"\">", "<b>", caption,"</b>", "</caption>\n")
} else if (caption != "" && caption.above == TRUE) {
cap <- paste0(h.ind, b.ind, ind,
"<caption align=\"top\" style=\"margin-bottom:0.3em;", css.caption,
"\">", "<b>", caption, "</b>", "</caption>\n")
} else {
cap <- ""
}
# HTML header with CSS definitions
if(ind.table==1)
string <- paste0("\n", doct)
else
string <- paste0(string, "</tr>\n <tr>\n </tr>\n <tr>\n")
if (html.tag == TRUE) {
string <- paste0(string, "<html>\n")
}
if (inline.css == TRUE) {
css.header <- ""
} else {
css.header <- paste0(
h.ind, d.ind, "<style type=\"text/css\">\n",
h.ind, d.ind, ind, "table {\n",
h.ind, d.ind, ind, ind, "border: none;\n",
h.ind, d.ind, ind, "}\n",
h.ind, d.ind, ind, "th {\n",
h.ind, d.ind, ind, ind, "text-align: left;\n",
h.ind, d.ind, ind, ind, "border-top: 2px solid black;\n",
h.ind, d.ind, ind, ind, "border-bottom: 1px solid black;\n",
h.ind, d.ind, ind, ind, "padding-right: 12px;\n",
h.ind, d.ind, ind, "}\n",
h.ind, d.ind, ind, ".midRule {\n",
h.ind, d.ind, ind, ind, "border-top: 1px solid black;\n",
h.ind, d.ind, ind, "}\n",
h.ind, d.ind, ind, ".bottomRule {\n",
h.ind, d.ind, ind, ind, "border-bottom: 2px solid black;\n",
h.ind, d.ind, ind, "}\n",
h.ind, d.ind, ind, "td {\n",
h.ind, d.ind, ind, ind, "padding-right: 12px;\n",
h.ind, d.ind, ind, ind, "border: none;\n",
h.ind, d.ind, ind, "}\n",
h.ind, d.ind, ind, "sup {\n",
h.ind, d.ind, ind, ind, "vertical-align: 4px;\n",
h.ind, d.ind, ind, "}\n",
h.ind, d.ind, "</style>\n"
)
}
if (head.tag == TRUE) {
string <- paste0(string,
h.ind, "<head>\n",
h.ind, d.ind, "<title>", caption, "</title>\n",
css.header,
h.ind, "</head>\n\n")
}
if (body.tag == TRUE) {
string <- paste0(string, h.ind, "<body>\n")
}
string <- paste0(
string,
tabdef,
cap,
h.ind, b.ind, ind, "<tr>\n",
h.ind, b.ind, ind, ind, "<th", css.th, "></th>\n"
)
# specify model names (header row)
for (i in 1:length(models)) {
string <- paste0(string,
h.ind, b.ind, ind, ind, "<td", css.th, ">", modnames[i],
"</td>\n")
}
string <- paste0(string, h.ind, b.ind, ind, "</tr>\n")
# write coefficients to string object
coef.length <- length(output.matrix[, 1]) - length(gof.names)
for (i in 1:coef.length) {
string <- paste0(string, h.ind, b.ind, ind, "<tr>\n")
for (j in 1:length(output.matrix[1, ])) {
if (length(gof.names) == 0 && i == coef.length) { # no GOF block
if (inline.css == TRUE) {
br <- css.bottomrule.nogof
} else {
br <- " class=\"bottomRule\""
}
string <- paste0(string, h.ind, b.ind, ind, ind, "<td", br, ">",
output.matrix[i,j], "</td>\n")
} else { # GOF block present
string <- paste0(string, h.ind, b.ind, ind, ind, "<td", css.td, ">",
output.matrix[i,j], "</td>\n")
}
}
string <- paste0(string, h.ind, b.ind, ind, "</tr>\n")
}
if (length(gof.names) > 0) {
# write GOF block
for (i in (length(output.matrix[, 1]) - (length(gof.names) - 1)):
(length(output.matrix[, 1]))) {
string <- paste0(string, h.ind, b.ind, ind, "<tr>\n")
for (j in 1:length(output.matrix[1, ])) {
if (i == length(output.matrix[, 1]) - (length(gof.names) - 1)) {
if (inline.css == TRUE) {
mr <- css.midrule
} else {
mr <- " class=\"midRule\"" # add mid rule via style sheets
}
string <- paste0(string, h.ind, b.ind, ind, ind,
"<td", mr, ">", output.matrix[i,j], "</td>\n")
} else if (i == length(output.matrix[, 1])) {
if (inline.css == TRUE) {
br <- css.bottomrule
} else {
br <- " class=\"bottomRule\""
}
string <- paste0(string, h.ind, b.ind, ind, ind,
"<td", br, ">", output.matrix[i,j], "</td>\n")
} else {
string <- paste0(string, h.ind, b.ind, ind, ind, "<td", css.td, ">",
output.matrix[i,j], "</td>\n")
}
}
string <- paste0(string, h.ind, b.ind, ind, "</tr>\n")
}
}
# stars note
if (is.null(stars)) {
snote <- ""
} else if (any(ci == FALSE)) {
st <- sort(stars)
if (length(unique(st)) != length(st)) {
stop("Duplicate elements are not allowed in the stars argument.")
}
if (length(st) == 4) {
snote <- paste0("<sup", css.sup, ">", star.symbol, star.symbol,
star.symbol, "</sup>p < ", st[1], ", <sup", css.sup, ">",
star.symbol, star.symbol, "</sup", css.sup, ">p < ", st[2],
", <sup", css.sup, ">", star.symbol, "</sup>p < ",
st[3], ", <sup", css.sup, ">", symbol, "</sup>p < ", st[4])
} else if (length(st) == 3) {
snote <- paste0("<sup", css.sup, ">", star.symbol, star.symbol,
star.symbol, "</sup>p < ", st[1], ", <sup", css.sup, ">",
star.symbol, star.symbol, "</sup>p < ", st[2], ", <sup", css.sup,
">", star.symbol, "</sup>p < ", st[3])
} else if (length(st) == 2) {
snote <- paste0("<sup", css.sup, ">", star.symbol, star.symbol,
"</sup>p < ", st[1], ", <sup", css.sup, ">", star.symbol,
"</sup>p < ", st[2])
} else if (length(st) == 1) {
snote <- paste0("<sup", css.sup, ">", star.symbol, "</sup>p < ", st[1])
} else {
snote <- ""
}
if (is.numeric(ci.test) && !is.na(ci.test) && nchar(snote) > 0 && any(ci)) {
snote <- paste(snote, "(or", ci.test, "outside the confidence interval).")
} else if (is.numeric(ci.test) && !is.na(ci.test) && any(ci)) {
snote <- paste0("<sup>", star.symbol, "</sup> ", ci.test,
" outside the confidence interval")
}
} else if (is.numeric(ci.test) && !is.na(ci.test)) {
snote <- paste0("<sup>", star.symbol, "</sup> ", ci.test,
" outside the confidence interval")
} else {
snote <- ""
}
if (is.null(custom.note)) {
note <- snote
} else if (custom.note == "") {
note <- ""
} else {
note <- custom.note
note <- gsub("%stars", snote, note)
}
string <- paste0(string, h.ind, b.ind, ind, "<tr>\n", h.ind, b.ind, ind, ind,
"<td", css.td, " colspan=\"", (1 + length(models)),
"\"><span style=\"font-size:0.8em\">", note, "</span></td>\n", h.ind,
b.ind, ind, "</tr>\n")
# write table footer
string <- paste0(string, h.ind, b.ind, "</table>\n")
if (body.tag == TRUE) {
string <- paste0(string, h.ind, "</body>\n")
}
}
if (html.tag == TRUE) {
string <- paste0(string, "</html>\n\n")
} else {
string <- paste0(string, "\n")
}
#print(cat(string))
if (is.na(file)) {
return(cat(string))
} else if (!is.character(file)) {
stop("The 'file' argument must be a character string.")
} else {
#sink(file, append=FALSE)
#cat(string)
#sink()
sink(file, append=append)
cat("\n")
cat("\n")
cat("\n")
cat("\n")
cat(string)
sink()
cat(paste0("The results were written to the file '", file, "'.\n"))
}
if (return.string == TRUE) {
return(string)
}
}
|
\name{coef}
\alias{coef<-}
\alias{coef.opm}
\alias{coef<-.opm}
\alias{coef.optimx}
\alias{coef<-.optimx}
\encoding{UTF-8}
\title{Summarize opm object}
\concept{minimization}
\concept{maximization}
\description{
Summarize an \code{"opm"} object.
}
\usage{
\method{coef}{opm}(object, ...)
\method{coef}{opm}(x) <- value
}
\arguments{
\item{object}{Object returned by \code{opm}.}
\item{\dots}{Further arguments to be passed to the function. Currently not
used.}
\item{x}{An \code{opm} object.}
\item{value}{Set parameters equal to this value.}
}
\value{
\code{coef.opm} returns the best parameters found by each method that
returned such parameters. The returned coefficients are in the form of a
matrix with the rows named by the relevant methods and the columns named
according to parameter names provided by the user in the vector of starting
values, or else by "p1", "p2", ..., if names are not provided.
}
\examples{
ans <- opm(fn = function(x) sum(x*x), par = 1:2, method="ALL", control=list(trace=0))
print(coef(ans))
ansx <- optimx(fn = function(x) sum(x*x), par = 1:2, control=list(all.methods=TRUE, trace=0))
print(coef(ansx))
\dontrun{
proj <- function(x) x/sum(x)
f <- function(x) -prod(proj(x))
ans <- opm(1:2, f)
print(ans)
coef(ans) <- apply(coef(ans), 1, proj)
print(ans)
}
}
\keyword{nonlinear}
\keyword{optimize}
|
/man/coef.opm.Rd
|
no_license
|
cran/optimx
|
R
| false | false | 1,359 |
rd
|
\name{coef}
\alias{coef<-}
\alias{coef.opm}
\alias{coef<-.opm}
\alias{coef.optimx}
\alias{coef<-.optimx}
\encoding{UTF-8}
\title{Summarize opm object}
\concept{minimization}
\concept{maximization}
\description{
Summarize an \code{"opm"} object.
}
\usage{
\method{coef}{opm}(object, ...)
\method{coef}{opm}(x) <- value
}
\arguments{
\item{object}{Object returned by \code{opm}.}
\item{\dots}{Further arguments to be passed to the function. Currently not
used.}
\item{x}{An \code{opm} object.}
\item{value}{Set parameters equal to this value.}
}
\value{
\code{coef.opm} returns the best parameters found by each method that
returned such parameters. The returned coefficients are in the form of a
matrix with the rows named by the relevant methods and the columns named
according to parameter names provided by the user in the vector of starting
values, or else by "p1", "p2", ..., if names are not provided.
}
\examples{
ans <- opm(fn = function(x) sum(x*x), par = 1:2, method="ALL", control=list(trace=0))
print(coef(ans))
ansx <- optimx(fn = function(x) sum(x*x), par = 1:2, control=list(all.methods=TRUE, trace=0))
print(coef(ansx))
\dontrun{
proj <- function(x) x/sum(x)
f <- function(x) -prod(proj(x))
ans <- opm(1:2, f)
print(ans)
coef(ans) <- apply(coef(ans), 1, proj)
print(ans)
}
}
\keyword{nonlinear}
\keyword{optimize}
|
library(squash)
### Name: cmap
### Title: Apply a color map to numeric data
### Aliases: cmap
### Keywords: color
### ** Examples
x <- y <- 1:50
mat1 <- outer(x, y)
## several ways of visualizing the matrix mat1:
plot(col(mat1), row(mat1), col = cmap(mat1), pch = 16)
cimage(x, y, zcol = cmap(mat1))
colorgram(x = x, y = y, z = mat1)
## treatment of out-of-domain values
map <- makecmap(0:100, colFn = greyscale)
x <- y <- -10:10
mat2 <- outer(x, y, "*")
## Not run:
##D ## Values outside the domain of "map" generate an error...
##D plot(col(mat2), row(mat2), col = cmap(mat2, map), pch = 15, cex = 2)
##D
##D ## ... unless we specify "outlier", but this still generates a warning
##D plot(col(mat2), row(mat2), col = cmap(mat2, map, outlier = 'red'), pch = 15, cex = 2)
##D
## End(Not run)
|
/data/genthat_extracted_code/squash/examples/cmap.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 848 |
r
|
library(squash)
### Name: cmap
### Title: Apply a color map to numeric data
### Aliases: cmap
### Keywords: color
### ** Examples
x <- y <- 1:50
mat1 <- outer(x, y)
## several ways of visualizing the matrix mat1:
plot(col(mat1), row(mat1), col = cmap(mat1), pch = 16)
cimage(x, y, zcol = cmap(mat1))
colorgram(x = x, y = y, z = mat1)
## treatment of out-of-domain values
map <- makecmap(0:100, colFn = greyscale)
x <- y <- -10:10
mat2 <- outer(x, y, "*")
## Not run:
##D ## Values outside the domain of "map" generate an error...
##D plot(col(mat2), row(mat2), col = cmap(mat2, map), pch = 15, cex = 2)
##D
##D ## ... unless we specify "outlier", but this still generates a warning
##D plot(col(mat2), row(mat2), col = cmap(mat2, map, outlier = 'red'), pch = 15, cex = 2)
##D
## End(Not run)
|
## grab args
args <- commandArgs(trailingOnly = TRUE)
DIR <- args[1]
# Sys.setenv(RSTUDIO_PANDOC="/Applications/RStudio.app/Contents/MacOS/pandoc")
setwd(DIR) # new
rmarkdown::render("scrna_jackstraw.Rmd", params = list(
seurat = args[2],
projectId = args[3],
projectDesc = args[4]
))
|
/Results-template/Scripts/scrna_jackstraw_call.R
|
no_license
|
stevetsa/Pipeliner
|
R
| false | false | 301 |
r
|
## grab args
args <- commandArgs(trailingOnly = TRUE)
DIR <- args[1]
# Sys.setenv(RSTUDIO_PANDOC="/Applications/RStudio.app/Contents/MacOS/pandoc")
setwd(DIR) # new
rmarkdown::render("scrna_jackstraw.Rmd", params = list(
seurat = args[2],
projectId = args[3],
projectDesc = args[4]
))
|
#Bajamos la Data del trabajo practico
library(tseries)
library(urca)
library(ggplot2)
library(forecast)
library(plotly)
Data<-read.csv2(file = "Grupo 13.csv")
attach(Data)
Data1<-ts(Data,class = "ts") #Creamos la serie de tiempo de la data. Me mantiene el nombre original
#Graficamos ambas series de tiempo.
#En primera instancia, la serie A es estacionaria, pero no lo es la serie B
par(mfrow = c(2,1))
plot(ts(Serie_A), main = "Serie A Grupo 13", col = "red")
plot(ts(Serie_B), main = "Serie B Grupo 13", col = "green")
#Instalar el paquete "psych" para poder utilizar la funcion describe
library(psych)
#ANÁLISIS DESCRIPTIVO#
Analisis.1<-describe(Data1,quant = c(0,0.25,0.5,0.75,1)) #Analisis descriptivo
#skew es la asmimetria. Se encuentran cercanos a 0
#Varianza
Varianza<-var(Data1)
colnames(Varianza)<-c("Var Serie A", "Var Serie B")
#Unificamos lo calculado hasta ahora
Analisis.Total<-data.frame(Analisis.1,Varianza)
#Graficamos el Boxplot
par(mfrow = c(1,2))
boxplot(Serie_A,main = "Box Plot Serie A Grupo 13", col = "red")
boxplot(Serie_B,main = "Box Plot Serie B Grupo 13", col = "green")
#Densidad
par(mfrow = c(1,2))
plot(density(Serie_A),main="Densidad",xlab="N=100",col="red")
plot(density(Serie_B),main="Densidad",xlab="N=100",col="green")
par(mfrow = c(1,1))
plot(density(Serie_B),main="Densidad",xlab="N=100",col="green")
lines(density(Serie_A), col="red")
#Histograma
par(mfrow = c(1,2))
hist(Serie_A, breaks=20, main="Histograma Serie A Grupo 13", col="red")
hist(Serie_B,breaks=20, main="Histograma Serie B Grupo 13", col="green")
#FAS, FAC y FACP
par(mfrow = c(4,2))
plot(ts(Serie_A), main = "Serie A Grupo 13", col = "red")
plot(ts(Serie_B), main = "Serie B Grupo 13", col = "green")
acf(Serie_A, type="covariance", main="FAS Serie A", col="red")
acf(Serie_B, type="covariance", main="FAS Serie B", col="green")
acf(Serie_A, main = "FAC Serie A", col = "red")
acf(Serie_B, main = "FAC Serie B", col = "green")
pacf(Serie_A, main = "FACP Serie A", col = "red")
pacf(Serie_B, main = "FACP Serie B", col = "green")
#Otra vez, en primera instancia la serie B no es estacionaria
##### SERIE A #####
auto.arima(Serie_A, stepwise = FALSE, approximation = FALSE)
#Segun auto.arima, el mejor modelo es una MA(3)
ndiffs(Serie_A)
#Segun ndiffs, no hay que diferenciar la serie
#Test de Dickey Fuller
#Para el estadistico de prueba
none.df<-ur.df(Serie_A,type="none",lags=5,selectlags=c("AIC"))
drift.df<-ur.df(Serie_A,type="drift",lags=5,selectlags=c("AIC"))
trend.df<-ur.df(Serie_A,type="trend",lags=5,selectlags=c("AIC"))
#Para los valores criticos
Detalle_none_df<-summary(none.df)
Detalle_drift_df<-summary(drift.df)
Detalle_trend_df<-summary(trend.df)
#Para ver si es estacionario sin tendencia ni termino independiente
for (i in 1:length(Detalle_none_df@cval)) {
Resultado_none<-none.df@teststat<Detalle_none_df@cval[1,i]
print(Resultado_none)
if (Resultado_none == TRUE) {
print("Es estacionario")
}else{
print("No es estacionario")
}
}
#Para ver si es estacionario sin tendencia pero con termino independiente
for (i in 1:length(Detalle_drift_df@cval[1,])) {
Resultado_drift<-drift.df@teststat[1,1]<Detalle_drift_df@cval[1,i]
print(Resultado_drift)
if (Resultado_drift == TRUE) {
print("Es estacionario")
}else{
print("No es estacionario")
}
}
#Para ver si es estacionario con tendencia y con termino independiente
for (i in 1:length(Detalle_trend_df@cval[1,])) {
Resultado_trend<-trend.df@teststat[1,1]<Detalle_trend_df@cval[1,i]
print(Resultado_trend)
if (Resultado_trend == TRUE) {
print("Es estacionario")
}else{
print("No es estacionario")
}
}
#Esto es un intento de obtener los AIC y los BIC de varios modelos ARIMA sin necesidad de hacer 1 por 1. Todavia no esta terminado\
for (i in 0:5) {
for(j in 0:5){
if(j!=0 | i!=0){
m<-arima(Serie_A,order=c(i,0,j))
Modelo_Serie_A<-cbind(AIC(m),BIC(m))
rownames(Modelo_Serie_A)<-print(paste("Modelo",i,j))
colnames(Modelo_Serie_A)<-c("AIC","BIC")
print(Modelo_Serie_A)
}
}
} #Siempre se selecciona el modelos con menor AIC y con menos BIC
#El Modelo MA(3) es el que mejor ajusta, y un modelos MA siempre es estacionario
#Observamos la FAC y la FACP de los residuos del modelo seleccionado
par(mfrow = c(2,2))
plot(acf(MA_3$residuals),main = "Autocorrelacion Modelo MA(3)")
plot(pacf(MA_3$residuals),main = "Autocorrelacion parcial Modelo MA(3)")
#Hacemos analisis del modelo selecionado
#Test sobre los coeficientes del modelo
#Ho: Algun tita i es = 0
#H1: Los tita i distintos de 0
Test_Coef_MA_3<-t.test(MA_3$coef)
Test_Coef_MA_3 #El test, indica que la H1 es verdadera, por lo tanto los coeficientes son distintos de 0
#Test de Ljung=Box
#Utilice el estadístico q de Ljung-Box para comprobar si una serie de observaciones en un período de tiempo específico son aleatorias e independientes.
#...Si las observaciones no son independientes, una observación puede estar correlacionada con otra observación k unidades de tiempo después,...
#...una relación que se denomina autocorrelación.
#Ho: las autocorrelaciones son iguales a 0. Es decir, los datos son independientes
#H1: No todos los datos son independientes
#Si p-value > alpha. No rechazo Ho
Lyung_Box_MA_3<-Box.test(MA_3$residuals,type = "Ljung-Box",lag = 1)
Lyung_Box_MA_3 #El p-value es mayor a 0.05, por lo tanto no rechazo HO y se puede decir, que no hay autocorrelacion entre los residuos
#Test de Jarque Bera
#La prueba de Jarque-Bera es una prueba de bondad de ajuste para comprobar si una muestra de datos tiene la asimetría y la curtosis de una distribución normal.
#Ho: Los errores tienen una distribucion normal
#H1: no especifica
#Si p-value > alpha. No rechazo Ho
Jarque_Bera_MA_3<-jarque.bera.test(MA_3$residuals)
Jarque_Bera_MA_3
#Ho: Los errores tienen una distribucion normal
#H1: no especifica
#Si p-value > alpha. No rechazo Ho
Test_Shapiro_MA_3<-shapiro.test(MA_3$residuals)
Test_Shapiro_MA_3
#Tanto el test de JB y el Shapiro, se rechaza la Ho de que siguen una distribucion normal
par(mfrow = c(2,2)) #Para poder comparar las distitntas predicciones
#Predicción para un horizonte
Pre1<-forecast(MA_3, level = c(95,97.5,99), h = 1)
plot(Pre1, main = "Prediccion 1 periodo")
P1<-as.data.frame(Pre1)
P1<-data.frame(P1[,6],P1[,4],P1[,2],P1[,1],P1[,3],P1[,5],P1[,7])
colnames(P1)<-c("LI 99%","LI 97,5%","LI 95%","Predicción","LS 95%","LS 97,5%","LS 99%")
P1
#Predicción para dos horizontes
Pre2<-forecast(MA_3, level = c(95,97.5,99), h = 2)
plot(Pre2,main = "Prediccion 2 periodos")
P2<-as.data.frame(Pre2)
P2<-data.frame(P2[,6],P2[,4],P2[,2],P2[,1],P2[,3],P2[,5],P2[,7])
colnames(P2)<-c("LI 99%","LI 97,5%","LI 95%","Predicción","LS 95%","LS 97,5%","LS 99%")
P2
#Predicción para tres horizontes.
Pre3<-forecast(MA_3, level = c(95,97.5,99), h = 3)
plot(Pre3, main = "Prediccion 3 periodos")
P3<-as.data.frame(Pre5)
P3<-data.frame(P3[,6],P3[,4],P3[,2],P3[,1],P3[,3],P3[,5],P3[,7])
colnames(P3)<-c("LI 99%","LI 97,5%","LI 95%","Predicción","LS 95%","LS 97,5%","LS 99%")
P3
#Predicción para 20 horizontes.
Pre20<-forecast(MA_3, level = c(95,97.5,99), h = 20)
plot(Pre20, main = "Prediccion 20 periodos")
P20<-as.data.frame(Pre10)
P20<-data.frame(P20[,6],P20[,4],P20[,2],P20[,1],P20[,3],P20[,5],P20[,7])
colnames(P20)<-c("LI 99%","LI 97,5%","LI 95%","Predicción","LS 95%","LS 97,5%","LS 99%")
rownames(P20)<-c(1:20)
P20
par(mfrow=c(2,2))
plot(MA_3$residuals,col="red",main="Residuos MA(3)")
hist(MA_3$residuals,main="Histograma de los residuos")
acf(MA_3$residuals,col="red",main="FAC de residuos")
pacf(MA_3$residuals,col="red",main="FACP de Residuos")
#Prueba de Dickey-Fuller Aumentado Serie B
none.df<-ur.df(Serie_B_Diff,type="none",lags=5,selectlags=c("AIC"))
drift.df<-ur.df(Serie_B_Diff,type="drift",lags=5,selectlags=c("AIC"))
trend.df<-ur.df(Serie_B_Diff,type="trend",lags=5,selectlags=c("AIC"))
summary(none.df)
summary(drift.df)
summary(trend.df)
adf.test(Serie_B_Diff)
par(mfrow = c(2,1))
plot(ts(Serie_A), main = "Serie A Grupo 13", col = "red")
plot(ts(Serie_B_Diff), main = "Serie B Dif Grupo 13", col = "green") #Parece ser estacionaria por el momento
#Se compara nuevamente los box plot
par(mfrow = c(1,2))
boxplot(Serie_A,main = "Box Plot Serie A Grupo 13", col = "red")
boxplot(Serie_B_Diff,main = "Box Plot Serie B Dif Grupo 13", col = "green")
#Se vuelve a graficar la FAS, FAC y FACP
par(mfrow = c(3,2))
acf(Serie_A, main = "FAC Serie A", col = "red")
acf(Serie_B_Diff, main = "FAC Serie B Dif", col = "green")
pacf(Serie_A, main = "FACP Serie A", col = "red")
pacf(Serie_B_Diff, main = "FACP Serie B Dif", col = "green") #Ahora si parece ser estacionario
-#Modelizamos ---> Primero hay que testear
#Hay una forma simple de hacerla, que es con una funcion de AUTO.ARIMA, que nos dice el mejor modelo. Pero para comparar, se va a hacer completo
auto.arima(Serie_A,stationary = TRUE) #stationary TRUE para que me indique modelos estacionarioa
#El modelo nos da un MA(3), y una MA siempre es estacionario.
M1_SerieA<-arima(Serie_A,order = c(1,0,0))
M1_SerieA
M2_SerieA<-arima(Serie_A,order = c(0,0,1))
M2_SerieA
M3_SerieA<-arima(Serie_A,order = c(1,0,1))
M3_SerieA
M3_SerieA<-arima(Serie_A,order = c(2,0,1))
M3_SerieA
M4_SerieA<-arima(Serie_A,order = c(1,0,2))
M4_SerieA
M5_SerieA<-arima(Serie_A,order = c(2,0,2))
M5_SerieA
M6_SerieA<-arima(Serie_A,order = c(3,0,0))
M6_SerieA
M7_SerieA<-arima(Serie_A,order = c(3,0,1))
M7_SerieA
M8_SerieA<-arima(Serie_A,order = c(0,0,3))
M8_SerieA
M9_SerieA<-arima(Serie_A,order = c(1,0,3))
M9_SerieA
M10_SerieA<-arima(Serie_A,order = c(3,0,3))
M10_SerieA
M11_SerieA<-arima(Serie_A,order = c(1,1,1))
M11_SerieA
M12_SerieA<-arima(Serie_A,order = c(2,1,1))
M12_SerieA
M13_SerieA<-arima(Serie_A,order = c(1,1,2))
M13_SerieA
M14_SerieA<-arima(Serie_A,order = c(2,1,2))
M14_SerieA
Serie_A_aic<-rbind(AIC(M1_SerieA,M2_SerieA,M3_SerieA,M4_SerieA,M5_SerieA,M6_SerieA,M7_SerieA,M8_SerieA,M9_SerieA,M10_SerieA),AIC(M11_SerieA,M12_SerieA,M13_SerieA,M14_SerieA))
Serie_A_bic<-rbind(BIC(M1_SerieA,M2_SerieA,M3_SerieA,M4_SerieA,M5_SerieA,M6_SerieA,M7_SerieA,M8_SerieA,M9_SerieA,M10_SerieA),BIC(M11_SerieA,M12_SerieA,M13_SerieA,M14_SerieA))
Modelo_Serie_A<-cbind(Serie_A_aic,Serie_A_bic)
Modelo_Serie_A
M8_SerieA
#Observamos la FAC y la FACP de los residuos del modelo seleccionado
par(mfrow = c(2,2))
plot(acf(M8_SerieA$residuals))
plot(pacf(M8_SerieA$residuals))
#Hacemos analisis del modelo selecionado
#Test sobre los coeficientes del modelo
#Ho: Algun tita i es = 0
#H1: Los tita i distintos de 0
Test_Coef_M8<-t.test(M8_SerieA$coef)
Test_Coef_M8 #El test, indica que la H1 es verdadera, por lo tanto los coeficientes son distintos de 0
#Test de Ljung=Box
#Utilice el estadístico q de Ljung-Box para comprobar si una serie de observaciones en un período de tiempo específico son aleatorias e independientes.
#...Si las observaciones no son independientes, una observación puede estar correlacionada con otra observación k unidades de tiempo después,...
#...una relación que se denomina autocorrelación.
#Ho: las autocorrelaciones son iguales a 0. Es decir, los datos son independientes
#H1: No todos los datos son independientes
#Si chi-cuadrado > alpha. No rechazo Ho
Lyung_Box_Serie_A<-Box.test(M8_SerieA$residuals,type = "Ljung-Box",lag = 1)
Lyung_Box_Serie_A #El p-value es mayor a 0.05, por lo tanto no rechazo HO y se puede decir, que no hay autocorrelacion entre los residuos
|
/Tp script.R
|
no_license
|
celevicenzi/TP-EA
|
R
| false | false | 11,825 |
r
|
#Bajamos la Data del trabajo practico
library(tseries)
library(urca)
library(ggplot2)
library(forecast)
library(plotly)
Data<-read.csv2(file = "Grupo 13.csv")
attach(Data)
Data1<-ts(Data,class = "ts") #Creamos la serie de tiempo de la data. Me mantiene el nombre original
#Graficamos ambas series de tiempo.
#En primera instancia, la serie A es estacionaria, pero no lo es la serie B
par(mfrow = c(2,1))
plot(ts(Serie_A), main = "Serie A Grupo 13", col = "red")
plot(ts(Serie_B), main = "Serie B Grupo 13", col = "green")
#Instalar el paquete "psych" para poder utilizar la funcion describe
library(psych)
#ANÁLISIS DESCRIPTIVO#
Analisis.1<-describe(Data1,quant = c(0,0.25,0.5,0.75,1)) #Analisis descriptivo
#skew es la asmimetria. Se encuentran cercanos a 0
#Varianza
Varianza<-var(Data1)
colnames(Varianza)<-c("Var Serie A", "Var Serie B")
#Unificamos lo calculado hasta ahora
Analisis.Total<-data.frame(Analisis.1,Varianza)
#Graficamos el Boxplot
par(mfrow = c(1,2))
boxplot(Serie_A,main = "Box Plot Serie A Grupo 13", col = "red")
boxplot(Serie_B,main = "Box Plot Serie B Grupo 13", col = "green")
#Densidad
par(mfrow = c(1,2))
plot(density(Serie_A),main="Densidad",xlab="N=100",col="red")
plot(density(Serie_B),main="Densidad",xlab="N=100",col="green")
par(mfrow = c(1,1))
plot(density(Serie_B),main="Densidad",xlab="N=100",col="green")
lines(density(Serie_A), col="red")
#Histograma
par(mfrow = c(1,2))
hist(Serie_A, breaks=20, main="Histograma Serie A Grupo 13", col="red")
hist(Serie_B,breaks=20, main="Histograma Serie B Grupo 13", col="green")
#FAS, FAC y FACP
par(mfrow = c(4,2))
plot(ts(Serie_A), main = "Serie A Grupo 13", col = "red")
plot(ts(Serie_B), main = "Serie B Grupo 13", col = "green")
acf(Serie_A, type="covariance", main="FAS Serie A", col="red")
acf(Serie_B, type="covariance", main="FAS Serie B", col="green")
acf(Serie_A, main = "FAC Serie A", col = "red")
acf(Serie_B, main = "FAC Serie B", col = "green")
pacf(Serie_A, main = "FACP Serie A", col = "red")
pacf(Serie_B, main = "FACP Serie B", col = "green")
#Otra vez, en primera instancia la serie B no es estacionaria
##### SERIE A #####
auto.arima(Serie_A, stepwise = FALSE, approximation = FALSE)
#Segun auto.arima, el mejor modelo es una MA(3)
ndiffs(Serie_A)
#Segun ndiffs, no hay que diferenciar la serie
#Test de Dickey Fuller
#Para el estadistico de prueba
none.df<-ur.df(Serie_A,type="none",lags=5,selectlags=c("AIC"))
drift.df<-ur.df(Serie_A,type="drift",lags=5,selectlags=c("AIC"))
trend.df<-ur.df(Serie_A,type="trend",lags=5,selectlags=c("AIC"))
#Para los valores criticos
Detalle_none_df<-summary(none.df)
Detalle_drift_df<-summary(drift.df)
Detalle_trend_df<-summary(trend.df)
#Para ver si es estacionario sin tendencia ni termino independiente
for (i in 1:length(Detalle_none_df@cval)) {
Resultado_none<-none.df@teststat<Detalle_none_df@cval[1,i]
print(Resultado_none)
if (Resultado_none == TRUE) {
print("Es estacionario")
}else{
print("No es estacionario")
}
}
#Para ver si es estacionario sin tendencia pero con termino independiente
for (i in 1:length(Detalle_drift_df@cval[1,])) {
Resultado_drift<-drift.df@teststat[1,1]<Detalle_drift_df@cval[1,i]
print(Resultado_drift)
if (Resultado_drift == TRUE) {
print("Es estacionario")
}else{
print("No es estacionario")
}
}
#Para ver si es estacionario con tendencia y con termino independiente
for (i in 1:length(Detalle_trend_df@cval[1,])) {
Resultado_trend<-trend.df@teststat[1,1]<Detalle_trend_df@cval[1,i]
print(Resultado_trend)
if (Resultado_trend == TRUE) {
print("Es estacionario")
}else{
print("No es estacionario")
}
}
#Esto es un intento de obtener los AIC y los BIC de varios modelos ARIMA sin necesidad de hacer 1 por 1. Todavia no esta terminado\
for (i in 0:5) {
for(j in 0:5){
if(j!=0 | i!=0){
m<-arima(Serie_A,order=c(i,0,j))
Modelo_Serie_A<-cbind(AIC(m),BIC(m))
rownames(Modelo_Serie_A)<-print(paste("Modelo",i,j))
colnames(Modelo_Serie_A)<-c("AIC","BIC")
print(Modelo_Serie_A)
}
}
} #Siempre se selecciona el modelos con menor AIC y con menos BIC
#El Modelo MA(3) es el que mejor ajusta, y un modelos MA siempre es estacionario
#Observamos la FAC y la FACP de los residuos del modelo seleccionado
par(mfrow = c(2,2))
plot(acf(MA_3$residuals),main = "Autocorrelacion Modelo MA(3)")
plot(pacf(MA_3$residuals),main = "Autocorrelacion parcial Modelo MA(3)")
#Hacemos analisis del modelo selecionado
#Test sobre los coeficientes del modelo
#Ho: Algun tita i es = 0
#H1: Los tita i distintos de 0
Test_Coef_MA_3<-t.test(MA_3$coef)
Test_Coef_MA_3 #El test, indica que la H1 es verdadera, por lo tanto los coeficientes son distintos de 0
#Test de Ljung=Box
#Utilice el estadístico q de Ljung-Box para comprobar si una serie de observaciones en un período de tiempo específico son aleatorias e independientes.
#...Si las observaciones no son independientes, una observación puede estar correlacionada con otra observación k unidades de tiempo después,...
#...una relación que se denomina autocorrelación.
#Ho: las autocorrelaciones son iguales a 0. Es decir, los datos son independientes
#H1: No todos los datos son independientes
#Si p-value > alpha. No rechazo Ho
Lyung_Box_MA_3<-Box.test(MA_3$residuals,type = "Ljung-Box",lag = 1)
Lyung_Box_MA_3 #El p-value es mayor a 0.05, por lo tanto no rechazo HO y se puede decir, que no hay autocorrelacion entre los residuos
#Test de Jarque Bera
#La prueba de Jarque-Bera es una prueba de bondad de ajuste para comprobar si una muestra de datos tiene la asimetría y la curtosis de una distribución normal.
#Ho: Los errores tienen una distribucion normal
#H1: no especifica
#Si p-value > alpha. No rechazo Ho
Jarque_Bera_MA_3<-jarque.bera.test(MA_3$residuals)
Jarque_Bera_MA_3
#Ho: Los errores tienen una distribucion normal
#H1: no especifica
#Si p-value > alpha. No rechazo Ho
Test_Shapiro_MA_3<-shapiro.test(MA_3$residuals)
Test_Shapiro_MA_3
#Tanto el test de JB y el Shapiro, se rechaza la Ho de que siguen una distribucion normal
par(mfrow = c(2,2)) #Para poder comparar las distitntas predicciones
#Predicción para un horizonte
Pre1<-forecast(MA_3, level = c(95,97.5,99), h = 1)
plot(Pre1, main = "Prediccion 1 periodo")
P1<-as.data.frame(Pre1)
P1<-data.frame(P1[,6],P1[,4],P1[,2],P1[,1],P1[,3],P1[,5],P1[,7])
colnames(P1)<-c("LI 99%","LI 97,5%","LI 95%","Predicción","LS 95%","LS 97,5%","LS 99%")
P1
#Predicción para dos horizontes
Pre2<-forecast(MA_3, level = c(95,97.5,99), h = 2)
plot(Pre2,main = "Prediccion 2 periodos")
P2<-as.data.frame(Pre2)
P2<-data.frame(P2[,6],P2[,4],P2[,2],P2[,1],P2[,3],P2[,5],P2[,7])
colnames(P2)<-c("LI 99%","LI 97,5%","LI 95%","Predicción","LS 95%","LS 97,5%","LS 99%")
P2
#Predicción para tres horizontes.
Pre3<-forecast(MA_3, level = c(95,97.5,99), h = 3)
plot(Pre3, main = "Prediccion 3 periodos")
P3<-as.data.frame(Pre5)
P3<-data.frame(P3[,6],P3[,4],P3[,2],P3[,1],P3[,3],P3[,5],P3[,7])
colnames(P3)<-c("LI 99%","LI 97,5%","LI 95%","Predicción","LS 95%","LS 97,5%","LS 99%")
P3
#Predicción para 20 horizontes.
Pre20<-forecast(MA_3, level = c(95,97.5,99), h = 20)
plot(Pre20, main = "Prediccion 20 periodos")
P20<-as.data.frame(Pre10)
P20<-data.frame(P20[,6],P20[,4],P20[,2],P20[,1],P20[,3],P20[,5],P20[,7])
colnames(P20)<-c("LI 99%","LI 97,5%","LI 95%","Predicción","LS 95%","LS 97,5%","LS 99%")
rownames(P20)<-c(1:20)
P20
par(mfrow=c(2,2))
plot(MA_3$residuals,col="red",main="Residuos MA(3)")
hist(MA_3$residuals,main="Histograma de los residuos")
acf(MA_3$residuals,col="red",main="FAC de residuos")
pacf(MA_3$residuals,col="red",main="FACP de Residuos")
#Prueba de Dickey-Fuller Aumentado Serie B
none.df<-ur.df(Serie_B_Diff,type="none",lags=5,selectlags=c("AIC"))
drift.df<-ur.df(Serie_B_Diff,type="drift",lags=5,selectlags=c("AIC"))
trend.df<-ur.df(Serie_B_Diff,type="trend",lags=5,selectlags=c("AIC"))
summary(none.df)
summary(drift.df)
summary(trend.df)
adf.test(Serie_B_Diff)
par(mfrow = c(2,1))
plot(ts(Serie_A), main = "Serie A Grupo 13", col = "red")
plot(ts(Serie_B_Diff), main = "Serie B Dif Grupo 13", col = "green") #Parece ser estacionaria por el momento
#Se compara nuevamente los box plot
par(mfrow = c(1,2))
boxplot(Serie_A,main = "Box Plot Serie A Grupo 13", col = "red")
boxplot(Serie_B_Diff,main = "Box Plot Serie B Dif Grupo 13", col = "green")
#Se vuelve a graficar la FAS, FAC y FACP
par(mfrow = c(3,2))
acf(Serie_A, main = "FAC Serie A", col = "red")
acf(Serie_B_Diff, main = "FAC Serie B Dif", col = "green")
pacf(Serie_A, main = "FACP Serie A", col = "red")
pacf(Serie_B_Diff, main = "FACP Serie B Dif", col = "green") #Ahora si parece ser estacionario
-#Modelizamos ---> Primero hay que testear
#Hay una forma simple de hacerla, que es con una funcion de AUTO.ARIMA, que nos dice el mejor modelo. Pero para comparar, se va a hacer completo
auto.arima(Serie_A,stationary = TRUE) #stationary TRUE para que me indique modelos estacionarioa
#El modelo nos da un MA(3), y una MA siempre es estacionario.
M1_SerieA<-arima(Serie_A,order = c(1,0,0))
M1_SerieA
M2_SerieA<-arima(Serie_A,order = c(0,0,1))
M2_SerieA
M3_SerieA<-arima(Serie_A,order = c(1,0,1))
M3_SerieA
M3_SerieA<-arima(Serie_A,order = c(2,0,1))
M3_SerieA
M4_SerieA<-arima(Serie_A,order = c(1,0,2))
M4_SerieA
M5_SerieA<-arima(Serie_A,order = c(2,0,2))
M5_SerieA
M6_SerieA<-arima(Serie_A,order = c(3,0,0))
M6_SerieA
M7_SerieA<-arima(Serie_A,order = c(3,0,1))
M7_SerieA
M8_SerieA<-arima(Serie_A,order = c(0,0,3))
M8_SerieA
M9_SerieA<-arima(Serie_A,order = c(1,0,3))
M9_SerieA
M10_SerieA<-arima(Serie_A,order = c(3,0,3))
M10_SerieA
M11_SerieA<-arima(Serie_A,order = c(1,1,1))
M11_SerieA
M12_SerieA<-arima(Serie_A,order = c(2,1,1))
M12_SerieA
M13_SerieA<-arima(Serie_A,order = c(1,1,2))
M13_SerieA
M14_SerieA<-arima(Serie_A,order = c(2,1,2))
M14_SerieA
Serie_A_aic<-rbind(AIC(M1_SerieA,M2_SerieA,M3_SerieA,M4_SerieA,M5_SerieA,M6_SerieA,M7_SerieA,M8_SerieA,M9_SerieA,M10_SerieA),AIC(M11_SerieA,M12_SerieA,M13_SerieA,M14_SerieA))
Serie_A_bic<-rbind(BIC(M1_SerieA,M2_SerieA,M3_SerieA,M4_SerieA,M5_SerieA,M6_SerieA,M7_SerieA,M8_SerieA,M9_SerieA,M10_SerieA),BIC(M11_SerieA,M12_SerieA,M13_SerieA,M14_SerieA))
Modelo_Serie_A<-cbind(Serie_A_aic,Serie_A_bic)
Modelo_Serie_A
M8_SerieA
#Observamos la FAC y la FACP de los residuos del modelo seleccionado
par(mfrow = c(2,2))
plot(acf(M8_SerieA$residuals))
plot(pacf(M8_SerieA$residuals))
#Hacemos analisis del modelo selecionado
#Test sobre los coeficientes del modelo
#Ho: Algun tita i es = 0
#H1: Los tita i distintos de 0
Test_Coef_M8<-t.test(M8_SerieA$coef)
Test_Coef_M8 #El test, indica que la H1 es verdadera, por lo tanto los coeficientes son distintos de 0
#Test de Ljung=Box
#Utilice el estadístico q de Ljung-Box para comprobar si una serie de observaciones en un período de tiempo específico son aleatorias e independientes.
#...Si las observaciones no son independientes, una observación puede estar correlacionada con otra observación k unidades de tiempo después,...
#...una relación que se denomina autocorrelación.
#Ho: las autocorrelaciones son iguales a 0. Es decir, los datos son independientes
#H1: No todos los datos son independientes
#Si chi-cuadrado > alpha. No rechazo Ho
Lyung_Box_Serie_A<-Box.test(M8_SerieA$residuals,type = "Ljung-Box",lag = 1)
Lyung_Box_Serie_A #El p-value es mayor a 0.05, por lo tanto no rechazo HO y se puede decir, que no hay autocorrelacion entre los residuos
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{plot_xref_counts}
\alias{plot_xref_counts}
\title{Plot Xref Counts}
\usage{
plot_xref_counts(DO_repo, out_dir = "graphics/website", w = 8, h = 5.6)
}
\arguments{
\item{DO_repo}{The local path to the HumanDiseaseOntology repo, as a string,
or a \link{DOrepo} object.}
\item{out_dir}{The directory where the plot \code{"DO_xref_count.png"}
should be saved, as a string. If \code{NULL} the plot is not saved to disk.}
\item{w}{The width of the plot in inches, as numeric.}
\item{h}{The height of the plot in inches, as numeric.}
}
\description{
Plots the count of cross-references by source in the Human Disease Ontology.
}
|
/man/plot_xref_counts.Rd
|
permissive
|
DiseaseOntology/DO.utils
|
R
| false | true | 715 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{plot_xref_counts}
\alias{plot_xref_counts}
\title{Plot Xref Counts}
\usage{
plot_xref_counts(DO_repo, out_dir = "graphics/website", w = 8, h = 5.6)
}
\arguments{
\item{DO_repo}{The local path to the HumanDiseaseOntology repo, as a string,
or a \link{DOrepo} object.}
\item{out_dir}{The directory where the plot \code{"DO_xref_count.png"}
should be saved, as a string. If \code{NULL} the plot is not saved to disk.}
\item{w}{The width of the plot in inches, as numeric.}
\item{h}{The height of the plot in inches, as numeric.}
}
\description{
Plots the count of cross-references by source in the Human Disease Ontology.
}
|
#' @title Query GDC data
#' @description
#' Uses GDC API to search for search, it searches for both controlled and
#' open-acess data.
#' For GDC data arguments project, data.category, data.type and workflow.type should be used
#' For the legacy data arguments project, data.category, platform and/or file.extension should be used.
#' Please, see the vignette for a table with the possibilities.
#' @param project A list of valid project (see list with TCGAbiolinks:::getGDCprojects()$project_id)]
#' @param data.category A valid project (see list with TCGAbiolinks:::getProjectSummary(project))
#' @param data.type A data type to filter the files to download
#' @param sample.type A sample type to filter the files to download
#' @param barcode A list of barcodes to filter the files to download
#' @param legacy Search in the legacy repository
#' @param file.type To be used in the legacy database for some platforms,
#' to define which file types to be used.
#' @param workflow.type GDC workflow type
#' @param experimental.strategy Filter to experimental stratey. Harmonized: WXS, RNA-Seq, miRNA-Seq, Genotyping Array.
#' Legacy: WXS, RNA-Seq, miRNA-Seq, Genotyping Array,
#' DNA-Seq, Methylation array, Protein expression array, WXS,CGH array, VALIDATION, Gene expression array,WGS,
#' MSI-Mono-Dinucleotide Assay, miRNA expression array, Mixed strategies, AMPLICON, Exon array,
#' Total RNA-Seq, Capillary sequencing, Bisulfite-Seq
#' @param access Filter by access type. Possible values: controlled, open
#' @param platform Example:
#' \tabular{ll}{
#'CGH- 1x1M_G4447A \tab IlluminaGA_RNASeqV2 \cr
#'AgilentG4502A_07 \tab IlluminaGA_mRNA_DGE \cr
#'Human1MDuo \tab HumanMethylation450 \cr
#'HG-CGH-415K_G4124A \tab IlluminaGA_miRNASeq \cr
#'HumanHap550 \tab IlluminaHiSeq_miRNASeq \cr
#'ABI \tab H-miRNA_8x15K \cr
#'HG-CGH-244A \tab SOLiD_DNASeq \cr
#'IlluminaDNAMethylation_OMA003_CPI \tab IlluminaGA_DNASeq_automated \cr
#'IlluminaDNAMethylation_OMA002_CPI \tab HG-U133_Plus_2 \cr
#'HuEx- 1_0-st-v2 \tab Mixed_DNASeq \cr
#'H-miRNA_8x15Kv2 \tab IlluminaGA_DNASeq_curated \cr
#'MDA_RPPA_Core \tab IlluminaHiSeq_TotalRNASeqV2 \cr
#'HT_HG-U133A \tab IlluminaHiSeq_DNASeq_automated \cr
#'diagnostic_images \tab microsat_i \cr
#'IlluminaHiSeq_RNASeq \tab SOLiD_DNASeq_curated \cr
#'IlluminaHiSeq_DNASeqC \tab Mixed_DNASeq_curated \cr
#'IlluminaGA_RNASeq \tab IlluminaGA_DNASeq_Cont_automated \cr
#'IlluminaGA_DNASeq \tab IlluminaHiSeq_WGBS \cr
#'pathology_reports \tab IlluminaHiSeq_DNASeq_Cont_automated\cr
#'Genome_Wide_SNP_6 \tab bio \cr
#'tissue_images \tab Mixed_DNASeq_automated \cr
#'HumanMethylation27 \tab Mixed_DNASeq_Cont_curated \cr
#'IlluminaHiSeq_RNASeqV2 \tab Mixed_DNASeq_Cont
#'}
#' @export
#' @examples
#' query <- GDCquery(project = "TCGA-ACC",
#' data.category = "Copy Number Variation",
#' data.type = "Copy Number Segment")
#' \dontrun{
#' query <- GDCquery(project = "TARGET-AML",
#' data.category = "Transcriptome Profiling",
#' data.type = "miRNA Expression Quantification",
#' workflow.type = "BCGSC miRNA Profiling",
#' barcode = c("TARGET-20-PARUDL-03A-01R","TARGET-20-PASRRB-03A-01R"))
#' query <- GDCquery(project = "TARGET-AML",
#' data.category = "Transcriptome Profiling",
#' data.type = "Gene Expression Quantification",
#' workflow.type = "HTSeq - Counts",
#' barcode = c("TARGET-20-PADZCG-04A-01R","TARGET-20-PARJCR-09A-01R"))
#' query <- GDCquery(project = "TCGA-ACC",
#' data.category = "Copy Number Variation",
#' data.type = "Masked Copy Number Segment",
#' sample.type = c("Primary solid Tumor"))
#' query.met <- GDCquery(project = c("TCGA-GBM","TCGA-LGG"),
#' legacy = TRUE,
#' data.category = "DNA methylation",
#' platform = "Illumina Human Methylation 450")
#' query <- GDCquery(project = "TCGA-ACC",
#' data.category = "Copy number variation",
#' legacy = TRUE,
#' file.type = "hg19.seg",
#' barcode = c("TCGA-OR-A5LR-01A-11D-A29H-01"))
#' }
#' @return A data frame with the results and the parameters used
#' @importFrom jsonlite fromJSON
#' @importFrom knitr kable
#' @importFrom httr timeout
GDCquery <- function(project,
data.category,
data.type,
workflow.type,
legacy = FALSE,
access,
platform,
file.type,
barcode,
experimental.strategy,
sample.type){
isServeOK()
suppressWarnings({
# prepare output
if(missing(sample.type)) {
sample.type <- NA
} else if(all(sample.type == FALSE)) {
sample.type <- NA
}
if(missing(data.type)) {
data.type <- NA
} else if(data.type == FALSE) {
data.type <- NA
}
if(missing(barcode)) {
barcode <- NA
} else if(length(barcode) == 1) {
if(barcode == FALSE) barcode <- NA
}
if(missing(platform)) {
platform <- NA
} else if(platform == FALSE) {
platform <- NA
}
if(missing(file.type)) {
file.type <- NA
} else if(file.type == FALSE) {
file.type <- NA
}
if(missing(workflow.type)) {
workflow.type <- NA
} else if(workflow.type == FALSE) {
workflow.type <- NA
}
if(missing(experimental.strategy)) {
experimental.strategy <- NA
} else if(experimental.strategy == FALSE) {
experimental.strategy <- NA
}
if(missing(access)) {
access <- NA
} else if(access == FALSE) {
access <- NA
}
})
print.header("GDCquery: Searching in GDC database","section")
message("Genome of reference: ",ifelse(legacy,"hg19","hg38"))
# Check arguments
checkProjectInput(project)
checkDataCategoriesInput(project, data.category, legacy)
if(!is.na(data.type)) checkDataTypeInput(legacy = legacy, data.type = data.type)
if(!any(is.na(sample.type))) checkBarcodeDefinition(sample.type)
results <- NULL
print.header("Accessing GDC. This might take a while...","subsection")
for(proj in project){
url <- getGDCquery(project = proj,
data.category = data.category,
data.type = data.type,
legacy = legacy,
workflow.type = workflow.type,
platform = platform,
file.type = file.type,
files.access = access,
sample.type = sample.type)
message("ooo Project: ", proj)
json <- tryCatch(
getURL(url,fromJSON,timeout(600),simplifyDataFrame = TRUE),
error = function(e) {
message(paste("Error: ", e, sep = " "))
message("We will retry to access GDC!")
fromJSON(content(getURL(url,GET,timeout(600)), as = "text", encoding = "UTF-8"), simplifyDataFrame = TRUE)
}
)
if(json$data$pagination$count == 0) {
url <- getGDCquery(project = proj,
data.category = data.category,
data.type = data.type,
legacy = legacy,
workflow.type = NA,
platform = NA,
file.type = file.type,
files.access = access,
sample.type = sample.type)
json <- tryCatch(
getURL(url,fromJSON,timeout(600),simplifyDataFrame = TRUE),
error = function(e) {
message(paste("Error: ", e, sep = " "))
message("We will retry to access GDC!")
fromJSON(content(getURL(url,GET,timeout(600)), as = "text", encoding = "UTF-8"), simplifyDataFrame = TRUE)
}
)
}
json$data$hits$acl <- NULL
json$data$hits$project <- proj
if("archive" %in% colnames(json$data$hits)){
if(is.data.frame(json$data$hits$archive)){
archive <- json$data$hits$archive
colnames(archive)[1:ncol(archive)] <- paste0("archive_", colnames(archive)[1:ncol(archive)])
json$data$hits$archive <- NULL
json$data$hits <- cbind(json$data$hits, archive)
}
}
if("analysis" %in% colnames(json$data$hits)){
if(is.data.frame(json$data$hits$analysis)){
analysis <- json$data$hits$analysis
colnames(analysis)[2:ncol(analysis)] <- paste0("analysis_", colnames(analysis)[2:ncol(analysis)])
json$data$hits$analysis <- NULL
json$data$hits <- cbind(json$data$hits, analysis)
}
}
if("center" %in% colnames(json$data$hits)){
if(is.data.frame(json$data$hits$center)){
center <- json$data$hits$center
colnames(center)[2:ncol(center)] <- paste0("center_", colnames(center)[2:ncol(center)])
json$data$hits$center <- NULL
json$data$hits <- cbind(json$data$hits, center)
}
}
results <- plyr::rbind.fill(as.data.frame(results),as.data.frame(json$data$hits))
}
if(ncol(results) == 1) {
message("Sorry! There is no result for your query. Please check in GDC the data available or if there is no error in your query.")
return (NULL)
}
print.header("Filtering results","subsection")
if(!any(is.na(platform))){
if(!(all(platform %in% results$platform))){
stop("Please set a valid platform argument from the list below:\n => ", paste(unique(results$platform), collapse = "\n => "))
}
message("ooo By platform")
results <- results[tolower(results$platform) %in% tolower(platform),]
}
# Filter by access
if(!is.na(access)) {
message("ooo By access")
results <- results[grepl(access,results$access,ignore.case = TRUE),]
}
# Filter by experimental strategy
if(!is.na(experimental.strategy)) {
if(all(tolower(experimental.strategy) %in% tolower(results$experimental_strategy))) {
message("ooo By experimental.strategy")
results <- results[tolower(results$experimental_strategy) %in% tolower(experimental.strategy),]
} else {
message(paste0("The argument experimental_strategy does not match any of the results.\nPossible values:",
paste(unique(results$experimental_strategy),collapse = "\n=>")))
}
}
# Filter by data.type
if(!is.na(data.type)) {
if(!(tolower(data.type) %in% tolower(results$data_type))) {
stop("Please set a valid data.type argument from the list below:\n => ", paste(unique(results$data_type), collapse = "\n => "))
}
message("ooo By data.type")
results <- results[tolower(results$data_type) %in% tolower(data.type),]
}
# Filter by workflow.type
if(!is.na(workflow.type)) {
if(!(workflow.type %in% results$analysis_workflow_type)) {
stop("Please set a valid workflow.type argument from the list below:\n => ", paste(unique(results$analysis_workflow_type), collapse = "\n => "))
}
message("ooo By workflow.type")
results <- results[results$analysis_workflow_type %in% workflow.type,]
}
# Filter by file.type
if(!is.na(file.type)){
message("ooo By file.type")
pat <- file.type
invert <- FALSE
if(file.type == "normalized_results") pat <- "normalized_results"
if(file.type == "results") pat <- "[^normalized_]results"
if(file.type == "nocnv_hg18" | file.type == "nocnv_hg18.seg") pat <- "nocnv_hg18"
if(file.type == "cnv_hg18" | file.type == "hg18.seg") pat <- "[^nocnv_]hg18.seg"
if(file.type == "nocnv_hg19" | file.type == "nocnv_hg19.seg") pat <- "nocnv_hg19"
if(file.type == "cnv_hg19" | file.type == "hg19.seg") pat <- "[^nocnv_]hg19.seg"
if(file.type == "mirna") {
pat <- "hg19.*mirna"
invert <- TRUE
}
# if(file.type == "hg19.mirna") pat <- "hg19.mirna"
# if(file.type == "hg19.mirbase20.mirna") pat <- "hg19.mirbase20.mirna"
if(file.type == "hg19.isoform") pat <- "hg19.*isoform"
if(file.type == "isoform") {
pat <- "hg19.*isoform"
invert <- TRUE
}
idx <- grep(pat,results$file_name,invert = invert)
if(length(idx) == 0) {
print(knitr::kable(sort(results$file_name)[1:10],col.names = "Files"))
stop("We were not able to filter using this file type. Examples of available files are above. Please check the vignette for possible entries")
}
results <- results[idx,]
}
# get barcode of the samples
if(data.category %in% c("Clinical","Biospecimen")) {
pat <- paste("TCGA-[:alnum:]{2}-[:alnum:]{4}",
"TARGET-[:alnum:]{2}-[:alnum:]{6}",sep = "|")
} else {
pat <- paste("[:alnum:]{4}-[:alnum:]{2}-[:alnum:]{4}-[:alnum:]{3}-[:alnum:]{2,3}-[:alnum:]{4}-[:alnum:]{2}",
"[:alnum:]{6}-[:alnum:]{2}-[:alnum:]{6}-[:alnum:]{3}-[:alnum:]{3}",sep = "|")
}
if(!all(unique(results$data_type) == "Auxiliary test")) {
barcodes <- unlist(lapply(results$cases,function(x) {
str <- str_extract_all(unlist(x),pat) %>% unlist %>% na.omit %>% paste(collapse = ",")
ifelse(all(is.na(str)), NA,str[!is.na(str)])
}))
} else { # auxiliary fies case
pat <- paste("TCGA-[:alnum:]{2}-[:alnum:]{4}",
"TARGET-[:alnum:]{2}-[:alnum:]{6}",sep = "|")
barcodes <- unlist(lapply(results$file_name,function(x) {
str <- str_extract_all(unlist(x),pat) %>% unlist %>% na.omit %>% paste(collapse = ",")
ifelse(all(is.na(str)), NA,str[!is.na(str)])
}))
}
results$cases <- barcodes
results$tissue.definition <- expandBarcodeInfo(barcodes)$tissue.definition
# Filter by barcode
if(!any(is.na(barcode))) {
message("ooo By barcode")
idx <- unique(unlist(sapply(barcode, function(x) grep(x, results$cases,ignore.case = TRUE))))
if(length(idx) == 0) {
print(knitr::kable(results$cases,col.names = "Available barcodes"))
stop("None of the barcodes were matched. Available barcodes are above")
}
results <- results[idx,]
}
# Filter by sample.type
if(!any(is.na(sample.type))) {
if(!any(tolower(results$tissue.definition) %in% tolower(sample.type))) {
aux <- as.data.frame(table(results$tissue.definition))
aux <- aux[aux$Freq>0,]
print(kable(aux,row.names=FALSE,col.names = c("sample.type","Number of samples")))
stop("Please set a valid sample.type argument from the list above.")
}
message("ooo By sample.type")
results <- results[tolower(results$tissue.definition) %in% tolower(sample.type),]
}
# some how there are duplicated files in GDC we should remove them
# Example of problematic query
# query.exp <- GDCquery(project = "TCGA-BRCA",
# legacy = TRUE,
# data.category = "Gene expression",
# data.type = "Gene expression quantification",
# platform = "Illumina HiSeq",
# file.type = "results",
# experimental_strategy = "RNA-Seq",
# sample.type = c("Primary solid Tumor","Solid Tissue Normal"))
#
print.header("Checking data","subsection")
message("ooo Check if there are duplicated cases")
if(any(duplicated(results$cases))) {
message("Warning: There are more than one file for the same case. Please verify query results. You can use the command View(getResults(query)) in rstudio")
}
message("ooo Check if there results for the query")
if(nrow(results) == 0) stop("Sorry, no results were found for this query")
print.header("Preparing output","section")
ret <- data.frame(results=I(list(results)),
project = I(list(project)),
data.category = data.category,
data.type = data.type,
legacy = legacy,
access = I(list(access)),
experimental.strategy = I(list(experimental.strategy)),
file.type = file.type,
platform = I(list(platform)),
sample.type = I(list(sample.type)),
barcode = I(list(barcode)),
workflow.type = workflow.type)
return(ret)
}
getGDCquery <- function(project, data.category, data.type, legacy, workflow.type,platform,file.type,files.access,sample.type){
# Get manifest using the API
baseURL <- ifelse(legacy,"https://api.gdc.cancer.gov/legacy/files/?","https://api.gdc.cancer.gov/files/?")
options.pretty <- "pretty=true"
if(data.category == "Protein expression" & legacy) {
options.expand <- "fields=archive.revision,archive.file_name,md5sum,state,data_category,file_id,platform,file_name,file_size,md5sum,submitter_id,data_type&expand=cases.samples.portions,cases.project,center,analysis"
} else if(data.category %in% c("Clinical","Biospecimen")) {
options.expand <- "expand=cases,cases.project,center,analysis"
} else {
options.expand <- "expand=cases.samples.portions.analytes.aliquots,cases.project,center,analysis,cases.samples"
}
option.size <- paste0("size=",getNbFiles(project,data.category,legacy))
option.format <- paste0("format=JSON")
options.filter <- paste0("filters=",
URLencode('{"op":"and","content":['), # Start json request
URLencode('{"op":"in","content":{"field":"cases.project.project_id","value":["'),
project,
URLencode('"]}}'))
if(!is.na(data.category)) options.filter <- paste0(options.filter,addFilter("files.data_category", data.category))
if(!is.na(data.type)) options.filter <- paste0(options.filter,addFilter("files.data_type", data.type))
if(!is.na(workflow.type)) options.filter <- paste0(options.filter,addFilter("files.analysis.workflow_type", workflow.type))
if(!any(is.na(platform))) options.filter <- paste0(options.filter,addFilter("files.platform", platform))
if(!any(is.na(file.type))) {
if(file.type == "results" & legacy) options.filter <- paste0(options.filter,addFilter("files.tags", "unnormalized"))
if(file.type == "normalized_results" & legacy) options.filter <- paste0(options.filter,addFilter("files.tags", "normalized"))
if(file.type == "nocnv_hg19.seg" & legacy) options.filter <- paste0(options.filter,addFilter("files.tags", "nocnv"))
if(file.type == "hg19.isoform" & legacy) options.filter <- paste0(options.filter,addFilter("files.tags", "hg19"))
}
if(!any(is.na(files.access))) {
options.filter <- paste0(options.filter,addFilter("files.access", files.access))
}
if(!any(is.na(sample.type))) {
if("Primary solid Tumor" %in% sample.type) sample.type[sample.type == "Primary solid Tumor"] <- "Primary Tumor"
if("Recurrent Solid Tumor" %in% sample.type) sample.type[sample.type == "Recurrent Solid Tumor"] <- "Recurrent Tumor"
options.filter <- paste0(options.filter,addFilter("cases.samples.sample_type", sample.type))
}
# Close json request
options.filter <- paste0(options.filter, URLencode(']}'))
url <- paste0(baseURL,paste(options.pretty,
options.expand,
option.size,
options.filter,
option.format,
sep = "&"))
return(url)
}
addFilter <- function(field, values){
ret <- paste0(
URLencode(',{"op":"in","content":{"field":"'),
URLencode(field),
URLencode('","value":["'),
URLencode(paste0(values, collapse = '","')),
URLencode('"]}}')
)
return(ret)
}
expandBarcodeInfo <- function(barcode){
if(any(grepl("TARGET",barcode))) {
ret <- DataFrame(barcode = barcode,
code = substr(barcode, 8, 9),
case.unique.id = substr(barcode, 11, 16),
tissue.code = substr(barcode, 18, 19),
nucleic.acid.code = substr(barcode, 24, 24))
ret <- merge(ret,getBarcodeDefinition(), by = "tissue.code", sort = FALSE, all.x = TRUE)
ret <- ret[match(barcode,ret$barcode),]
}
if(any(grepl("TCGA",barcode))) {
ret <- data.frame(barcode = barcode,
patient = substr(barcode, 1, 12),
sample = substr(barcode, 1, 16),
tissue.code = substr(barcode, 14, 15))
ret <- merge(ret,getBarcodeDefinition(), by = "tissue.code", sort = FALSE, all.x = TRUE)
ret <- ret[match(barcode,ret$barcode),]
}
return(ret)
}
getBarcodeDefinition <- function(type = "TCGA"){
if(type == "TCGA"){
tissue.code <- c('01','02','03','04','05','06','07','08','09','10','11',
'12','13','14','20','40','50','60','61')
shortLetterCode <- c("TP","TR","TB","TRBM","TAP","TM","TAM","THOC",
"TBM","NB","NT","NBC","NEBV","NBM","CELLC","TRB",
"CELL","XP","XCL")
tissue.definition <- c("Primary solid Tumor",
"Recurrent Solid Tumor",
"Primary Blood Derived Cancer - Peripheral Blood",
"Recurrent Blood Derived Cancer - Bone Marrow",
"Additional - New Primary",
"Metastatic",
"Additional Metastatic",
"Human Tumor Original Cells",
"Primary Blood Derived Cancer - Bone Marrow",
"Blood Derived Normal",
"Solid Tissue Normal",
"Buccal Cell Normal",
"EBV Immortalized Normal",
"Bone Marrow Normal",
"Control Analyte",
"Recurrent Blood Derived Cancer - Peripheral Blood",
"Cell Lines",
"Primary Xenograft Tissue",
"Cell Line Derived Xenograft Tissue")
aux <- data.frame(tissue.code = tissue.code,shortLetterCode,tissue.definition)
} else {
tissue.code <- c('01','02','03','04','05','06','07','08','09','10','11',
'12','13','14','15','16','17','20','40','41','42','50','60','61','99')
tissue.definition <- c("Primary solid Tumor", # 01
"Recurrent Solid Tumor", # 02
"Primary Blood Derived Cancer - Peripheral Blood", # 03
"Recurrent Blood Derived Cancer - Bone Marrow", # 04
"Additional - New Primary", # 05
"Metastatic", # 06
"Additional Metastatic", # 07
"Tissue disease-specific post-adjuvant therapy", # 08
"Primary Blood Derived Cancer - Bone Marrow", # 09
"Blood Derived Normal", # 10
"Solid Tissue Normal", # 11
"Buccal Cell Normal", # 12
"EBV Immortalized Normal", # 13
"Bone Marrow Normal", # 14
"Fibroblasts from Bone Marrow Normal", # 15
"Mononuclear Cells from Bone Marrow Normal", # 16
"Lymphatic Tissue Normal (including centroblasts)", # 17
"Control Analyte", # 20
"Recurrent Blood Derived Cancer - Peripheral Blood", # 40
"Blood Derived Cancer- Bone Marrow, Post-treatment", # 41
"Blood Derived Cancer- Peripheral Blood, Post-treatment", # 42
"Cell line from patient tumor", # 50
"Xenograft from patient not grown as intermediate on plastic tissue culture dish", # 60
"Xenograft grown in mice from established cell lines", #61
"Granulocytes after a Ficoll separation") # 99
aux <- DataFrame(tissue.code = tissue.code,tissue.definition)
}
return(aux)
}
#' @title Retrieve open access maf files from GDC server
#' @description
#' GDCquery_Maf uses the following guide to download maf files
#' https://gdc-docs.nci.nih.gov/Data/Release_Notes/Data_Release_Notes/
#' @param pipelines Four separate variant calling pipelines are implemented for GDC data harmonization.
#' Options: muse, varscan2, somaticsniper, mutect2. For more information:
#' https://gdc-docs.nci.nih.gov/Data/Bioinformatics_Pipelines/DNA_Seq_Variant_Calling_Pipeline/
#' @param tumor a valid tumor
#' @param save.csv Write maf file into a csv document
#' @param directory Directory/Folder where the data will downloaded. Default: GDCdata
#' @export
#' @importFrom data.table fread
#' @import readr stringr
#' @importFrom downloader download
#' @importFrom R.utils gunzip
#' @importFrom tools md5sum
#' @examples
#' \dontrun{
#' acc.muse.maf <- GDCquery_Maf("ACC", pipelines = "muse")
#' acc.varscan2.maf <- GDCquery_Maf("ACC", pipelines = "varscan2")
#' acc.somaticsniper.maf <- GDCquery_Maf("ACC", pipelines = "somaticsniper")
#' acc.mutect.maf <- GDCquery_Maf("ACC", pipelines = "mutect2")
#' }
#' @return A data frame with the maf file information
GDCquery_Maf <- function(tumor,
save.csv = FALSE,
directory = "GDCdata",
pipelines = NULL){
if(is.null(pipelines)) stop("Please select the pipeline argument (muse, varscan2, somaticsniper, mutect2)")
if(grepl("varscan",pipelines, ignore.case = TRUE)) {
workflow.type <- "VarScan2 Variant Aggregation and Masking"
} else if(pipelines == "muse") {
workflow.type <- "MuSE Variant Aggregation and Masking"
} else if(pipelines == "somaticsniper") {
workflow.type <- "SomaticSniper Variant Aggregation and Masking"
} else if(grepl("mutect",pipelines, ignore.case = TRUE)) {
workflow.type <- "MuTect2 Variant Aggregation and Masking"
} else {
stop("Please select the pipeline argument (muse, varscan2, somaticsniper, mutect2)")
}
# Info to user
message("============================================================================")
message(" For more information about MAF data please read the following GDC manual and web pages:")
message(" GDC manual: https://gdc-docs.nci.nih.gov/Data/PDF/Data_UG.pdf")
message(" https://gdc-docs.nci.nih.gov/Data/Bioinformatics_Pipelines/DNA_Seq_Variant_Calling_Pipeline/")
message(" https://gdc.cancer.gov/about-gdc/variant-calling-gdc")
message("============================================================================")
query <- GDCquery(paste0("TCGA-",tumor),
data.category = "Simple Nucleotide Variation",
data.type = "Masked Somatic Mutation",
workflow.type = workflow.type,
access = "open")
if(nrow(query$results[[1]]) == 0) stop("No MAF file found for this type of workflow")
maf <- tryCatch({
tryCatch({
GDCdownload(query, directory = directory, method = "api")
}, error = function(e) {
GDCdownload(query, directory = directory, method = "client")
})
maf <- GDCprepare(query, directory = directory)
maf
}, error = function(e) {
manifest <- getManifest(query)
GDCdownload.aux( "https://api.gdc.cancer.gov/data/", manifest, manifest$filename, ".")
maf <- readSimpleNucleotideVariationMaf(file.path(manifest$id,manifest$filename))
maf
})
if(save.csv) {
fout <- file.path(directory,gsub("\\.gz", "\\.csv",getResults(query)$file_name))
write_csv(maf, fout)
message(paste0("File created: ", fout))
}
return(maf)
}
#' @title Query gene counts of TCGA and GTEx data from the Recount2 project
#' @description
#' TCGArecount2_query queries and downloads data produced by the Recount2 project. User can specify which project and which tissue to query
#' @param project is a string denoting which project the user wants. Options are "tcga" and "gtex"
#' @param tissue a vector of tissue(s) to download. Options are "adipose tissue", "adrenal", "gland", "bladder","blood", "blood vessel", "bone marrow", "brain", "breast","cervix uteri", "colon", "esophagus", "fallopian tube","heart", "kidney", "liver", "lung", "muscle", "nerve", "ovary","pancreas", "pituitary", "prostate", "salivary", "gland", "skin", "small intestine", "spleen", "stomach", "testis", "thyroid", "uterus", "vagina"
#' @export
#' @examples
#' \dontrun{
#' brain.rec<-TCGAquery_recount2(project = "gtex", tissue = "brain")
#' }
#' @return List with $subtypes attribute as a dataframe with barcodes, samples, subtypes, and colors. The $filtered attribute is returned as filtered samples with no subtype info
TCGAquery_recount2<-function(project, tissue=c()){
tissues <- c(
"adipose tissue",
"adrenal",
"gland",
"bladder",
"blood",
"blood vessel",
"bone marrow",
"brain",
"breast",
"cervix uteri",
"colon",
"esophagus",
"fallopian tube",
"heart",
"kidney",
"liver",
"lung",
"muscle",
"nerve",
"ovary",
"pancreas",
"pituitary",
"prostate",
"salivary",
"gland",
"skin",
"small intestine",
"spleen",
"stomach",
"testis",
"thyroid",
"uterus",
"vagina"
)
tissue<-paste(unlist(strsplit(tissue, " ")), collapse="_")
Res<-list()
if(tolower(project)=="gtex"){
for(t_i in tissue){
if(tissue%in%tissues){
con<-"http://duffel.rail.bio/recount/SRP012682/rse_gene_"
con<-paste0(con,tissue,".Rdata")
message(paste0("downloading Range Summarized Experiment for: ", tissue))
load(url(con))
Res[[paste0(project,"_", t_i)]]<-rse_gene
}
else stop(paste0(tissue, " is not an available tissue on Recount2"))
}
return(Res)
}
else if(tolower(project)=="tcga"){
for(t_i in tissue){
if(tissue%in%tissues){
con<-"http://duffel.rail.bio/recount/TCGA/rse_gene_"
con<-paste0(con,tissue,".Rdata")
message(paste0("downloading Range Summarized Experiment for: ", tissue))
load(url(con))
Res[[paste0(project,"_", t_i)]]<-rse_gene
}
else stop(paste0(tissue, " is not an available tissue on Recount2"))
}
return(Res)
}
else stop(paste0(project, " is not a valid project"))
}
#' @title Retrieve open access ATAC-seq files from GDC server
#' @description
#' Retrieve open access ATAC-seq files from GDC server
#' https://gdc.cancer.gov/about-data/publications/ATACseq-AWG
#' Manifest available at: https://gdc.cancer.gov/files/public/file/ATACseq-AWG_Open_GDC-Manifest.txt
#' @param tumor a valid tumor
#' @param file.type Write maf file into a csv document
#' @export
#' @examples
#' \dontrun{
#' query <- GDCquery_ATAC_seq(file.type = "txt")
#' GDCdownload(query)
#' query <- GDCquery_ATAC_seq(file.type = "bigWigs")
#' GDCdownload(query)
#' }
#' @return A data frame with the maf file information
GDCquery_ATAC_seq <- function(tumor = NULL,
file.type = NULL) {
isServeOK()
results <- readr::read_tsv("https://gdc.cancer.gov/files/public/file/ATACseq-AWG_Open_GDC-Manifest.txt")
if(!is.null(tumor)) results <- results[grep(tumor,results$filename,ignore.case = T),]
if(!is.null(file.type)) results <- results[grep(file.type,results$filename,ignore.case = T),]
colnames(results) <- c("file_id", "file_name", "md5sum", "file_size")
results$state <- "released"
results$data_type <- "ATAC-seq"
results$data_category <- "ATAC-seq"
results$project <- "ATAC-seq"
ret <- data.frame(results=I(list(results)),
tumor = I(list(tumor)),
project = I(list("ATAC-seq")),
data.type = I(list("ATAC-seq")),
data.category = I(list("ATAC-seq")),
legacy = I(list(FALSE)))
return(ret)
}
#' @title Retrieve summary of files per sample in a project
#' @description
#' Retrieve the numner of files under each
#' data_category + data_type + experimental_strategy + platform
#' Almost like https://portal.gdc.cancer.gov/exploration
#' @param project A GDC project
#' @param legacy Access legacy database ? Deafult: FALSE
#' @param files.access Filter by file access ("open" or "controlled").
#' Default: no filter
#' @export
#' @examples
#' summary <- getSampleFilesSummary("TCGA-LUAD")
#' \dontrun{
#' summary <- getSampleFilesSummary(c("TCGA-OV","TCGA_ACC"))
#' }
#' @return A data frame with the maf file information
#' @importFrom data.table dcast
#' @importFrom plyr ldply
getSampleFilesSummary <- function(project, legacy = FALSE, files.access = NA) {
out <- NULL
for(proj in project){
message("Accessing information for project: ", proj)
url <- getSampleSummaryUrl(proj,legacy)
x <- getURL(url,fromJSON,simplifyDataFrame = TRUE)
y <- x$data$hits$files
names(y) <- x$data$hits$submitter_id
df <- ldply (y, data.frame)
df <- df %>% data.table::dcast(.id ~ data_category + data_type + experimental_strategy + platform)
colnames(df) <- gsub("_NA","",colnames(df))
df$project <- proj
out <- rbind.fill(out,df)
}
return(out)
}
getSampleSummaryUrl <- function(project,legacy = FALSE, files.access = NA){
# Get manifest using the API
baseURL <- ifelse(legacy,"https://api.gdc.cancer.gov/legacy/cases/?","https://api.gdc.cancer.gov/cases/?")
options.pretty <- "pretty=true"
options.expand <- "expand=summary,summary.data_categories,files"
#option.size <- paste0("size=",getNbFiles(project,data.category,legacy))
option.size <- paste0("size=",1000)
option.format <- paste0("format=JSON")
options.filter <- paste0("filters=",
URLencode('{"op":"and","content":['), # Start json request
URLencode('{"op":"in","content":{"field":"cases.project.project_id","value":["'),
project,
URLencode('"]}}'))
if(!any(is.na(files.access))) {
options.filter <- paste0(options.filter,addFilter("files.access", files.access))
}
# Close json request
options.filter <- paste0(options.filter, URLencode(']}'))
url <- paste0(baseURL,paste(options.pretty,
options.expand,
option.size,
options.filter,
option.format,
sep = "&"))
return(url)
}
|
/R/query.R
|
no_license
|
wendashou/TCGAbiolinks
|
R
| false | false | 37,105 |
r
|
#' @title Query GDC data
#' @description
#' Uses GDC API to search for search, it searches for both controlled and
#' open-acess data.
#' For GDC data arguments project, data.category, data.type and workflow.type should be used
#' For the legacy data arguments project, data.category, platform and/or file.extension should be used.
#' Please, see the vignette for a table with the possibilities.
#' @param project A list of valid project (see list with TCGAbiolinks:::getGDCprojects()$project_id)]
#' @param data.category A valid project (see list with TCGAbiolinks:::getProjectSummary(project))
#' @param data.type A data type to filter the files to download
#' @param sample.type A sample type to filter the files to download
#' @param barcode A list of barcodes to filter the files to download
#' @param legacy Search in the legacy repository
#' @param file.type To be used in the legacy database for some platforms,
#' to define which file types to be used.
#' @param workflow.type GDC workflow type
#' @param experimental.strategy Filter to experimental stratey. Harmonized: WXS, RNA-Seq, miRNA-Seq, Genotyping Array.
#' Legacy: WXS, RNA-Seq, miRNA-Seq, Genotyping Array,
#' DNA-Seq, Methylation array, Protein expression array, WXS,CGH array, VALIDATION, Gene expression array,WGS,
#' MSI-Mono-Dinucleotide Assay, miRNA expression array, Mixed strategies, AMPLICON, Exon array,
#' Total RNA-Seq, Capillary sequencing, Bisulfite-Seq
#' @param access Filter by access type. Possible values: controlled, open
#' @param platform Example:
#' \tabular{ll}{
#'CGH- 1x1M_G4447A \tab IlluminaGA_RNASeqV2 \cr
#'AgilentG4502A_07 \tab IlluminaGA_mRNA_DGE \cr
#'Human1MDuo \tab HumanMethylation450 \cr
#'HG-CGH-415K_G4124A \tab IlluminaGA_miRNASeq \cr
#'HumanHap550 \tab IlluminaHiSeq_miRNASeq \cr
#'ABI \tab H-miRNA_8x15K \cr
#'HG-CGH-244A \tab SOLiD_DNASeq \cr
#'IlluminaDNAMethylation_OMA003_CPI \tab IlluminaGA_DNASeq_automated \cr
#'IlluminaDNAMethylation_OMA002_CPI \tab HG-U133_Plus_2 \cr
#'HuEx- 1_0-st-v2 \tab Mixed_DNASeq \cr
#'H-miRNA_8x15Kv2 \tab IlluminaGA_DNASeq_curated \cr
#'MDA_RPPA_Core \tab IlluminaHiSeq_TotalRNASeqV2 \cr
#'HT_HG-U133A \tab IlluminaHiSeq_DNASeq_automated \cr
#'diagnostic_images \tab microsat_i \cr
#'IlluminaHiSeq_RNASeq \tab SOLiD_DNASeq_curated \cr
#'IlluminaHiSeq_DNASeqC \tab Mixed_DNASeq_curated \cr
#'IlluminaGA_RNASeq \tab IlluminaGA_DNASeq_Cont_automated \cr
#'IlluminaGA_DNASeq \tab IlluminaHiSeq_WGBS \cr
#'pathology_reports \tab IlluminaHiSeq_DNASeq_Cont_automated\cr
#'Genome_Wide_SNP_6 \tab bio \cr
#'tissue_images \tab Mixed_DNASeq_automated \cr
#'HumanMethylation27 \tab Mixed_DNASeq_Cont_curated \cr
#'IlluminaHiSeq_RNASeqV2 \tab Mixed_DNASeq_Cont
#'}
#' @export
#' @examples
#' query <- GDCquery(project = "TCGA-ACC",
#' data.category = "Copy Number Variation",
#' data.type = "Copy Number Segment")
#' \dontrun{
#' query <- GDCquery(project = "TARGET-AML",
#' data.category = "Transcriptome Profiling",
#' data.type = "miRNA Expression Quantification",
#' workflow.type = "BCGSC miRNA Profiling",
#' barcode = c("TARGET-20-PARUDL-03A-01R","TARGET-20-PASRRB-03A-01R"))
#' query <- GDCquery(project = "TARGET-AML",
#' data.category = "Transcriptome Profiling",
#' data.type = "Gene Expression Quantification",
#' workflow.type = "HTSeq - Counts",
#' barcode = c("TARGET-20-PADZCG-04A-01R","TARGET-20-PARJCR-09A-01R"))
#' query <- GDCquery(project = "TCGA-ACC",
#' data.category = "Copy Number Variation",
#' data.type = "Masked Copy Number Segment",
#' sample.type = c("Primary solid Tumor"))
#' query.met <- GDCquery(project = c("TCGA-GBM","TCGA-LGG"),
#' legacy = TRUE,
#' data.category = "DNA methylation",
#' platform = "Illumina Human Methylation 450")
#' query <- GDCquery(project = "TCGA-ACC",
#' data.category = "Copy number variation",
#' legacy = TRUE,
#' file.type = "hg19.seg",
#' barcode = c("TCGA-OR-A5LR-01A-11D-A29H-01"))
#' }
#' @return A data frame with the results and the parameters used
#' @importFrom jsonlite fromJSON
#' @importFrom knitr kable
#' @importFrom httr timeout
GDCquery <- function(project,
data.category,
data.type,
workflow.type,
legacy = FALSE,
access,
platform,
file.type,
barcode,
experimental.strategy,
sample.type){
isServeOK()
suppressWarnings({
# prepare output
if(missing(sample.type)) {
sample.type <- NA
} else if(all(sample.type == FALSE)) {
sample.type <- NA
}
if(missing(data.type)) {
data.type <- NA
} else if(data.type == FALSE) {
data.type <- NA
}
if(missing(barcode)) {
barcode <- NA
} else if(length(barcode) == 1) {
if(barcode == FALSE) barcode <- NA
}
if(missing(platform)) {
platform <- NA
} else if(platform == FALSE) {
platform <- NA
}
if(missing(file.type)) {
file.type <- NA
} else if(file.type == FALSE) {
file.type <- NA
}
if(missing(workflow.type)) {
workflow.type <- NA
} else if(workflow.type == FALSE) {
workflow.type <- NA
}
if(missing(experimental.strategy)) {
experimental.strategy <- NA
} else if(experimental.strategy == FALSE) {
experimental.strategy <- NA
}
if(missing(access)) {
access <- NA
} else if(access == FALSE) {
access <- NA
}
})
print.header("GDCquery: Searching in GDC database","section")
message("Genome of reference: ",ifelse(legacy,"hg19","hg38"))
# Check arguments
checkProjectInput(project)
checkDataCategoriesInput(project, data.category, legacy)
if(!is.na(data.type)) checkDataTypeInput(legacy = legacy, data.type = data.type)
if(!any(is.na(sample.type))) checkBarcodeDefinition(sample.type)
results <- NULL
print.header("Accessing GDC. This might take a while...","subsection")
for(proj in project){
url <- getGDCquery(project = proj,
data.category = data.category,
data.type = data.type,
legacy = legacy,
workflow.type = workflow.type,
platform = platform,
file.type = file.type,
files.access = access,
sample.type = sample.type)
message("ooo Project: ", proj)
json <- tryCatch(
getURL(url,fromJSON,timeout(600),simplifyDataFrame = TRUE),
error = function(e) {
message(paste("Error: ", e, sep = " "))
message("We will retry to access GDC!")
fromJSON(content(getURL(url,GET,timeout(600)), as = "text", encoding = "UTF-8"), simplifyDataFrame = TRUE)
}
)
if(json$data$pagination$count == 0) {
url <- getGDCquery(project = proj,
data.category = data.category,
data.type = data.type,
legacy = legacy,
workflow.type = NA,
platform = NA,
file.type = file.type,
files.access = access,
sample.type = sample.type)
json <- tryCatch(
getURL(url,fromJSON,timeout(600),simplifyDataFrame = TRUE),
error = function(e) {
message(paste("Error: ", e, sep = " "))
message("We will retry to access GDC!")
fromJSON(content(getURL(url,GET,timeout(600)), as = "text", encoding = "UTF-8"), simplifyDataFrame = TRUE)
}
)
}
json$data$hits$acl <- NULL
json$data$hits$project <- proj
if("archive" %in% colnames(json$data$hits)){
if(is.data.frame(json$data$hits$archive)){
archive <- json$data$hits$archive
colnames(archive)[1:ncol(archive)] <- paste0("archive_", colnames(archive)[1:ncol(archive)])
json$data$hits$archive <- NULL
json$data$hits <- cbind(json$data$hits, archive)
}
}
if("analysis" %in% colnames(json$data$hits)){
if(is.data.frame(json$data$hits$analysis)){
analysis <- json$data$hits$analysis
colnames(analysis)[2:ncol(analysis)] <- paste0("analysis_", colnames(analysis)[2:ncol(analysis)])
json$data$hits$analysis <- NULL
json$data$hits <- cbind(json$data$hits, analysis)
}
}
if("center" %in% colnames(json$data$hits)){
if(is.data.frame(json$data$hits$center)){
center <- json$data$hits$center
colnames(center)[2:ncol(center)] <- paste0("center_", colnames(center)[2:ncol(center)])
json$data$hits$center <- NULL
json$data$hits <- cbind(json$data$hits, center)
}
}
results <- plyr::rbind.fill(as.data.frame(results),as.data.frame(json$data$hits))
}
if(ncol(results) == 1) {
message("Sorry! There is no result for your query. Please check in GDC the data available or if there is no error in your query.")
return (NULL)
}
print.header("Filtering results","subsection")
if(!any(is.na(platform))){
if(!(all(platform %in% results$platform))){
stop("Please set a valid platform argument from the list below:\n => ", paste(unique(results$platform), collapse = "\n => "))
}
message("ooo By platform")
results <- results[tolower(results$platform) %in% tolower(platform),]
}
# Filter by access
if(!is.na(access)) {
message("ooo By access")
results <- results[grepl(access,results$access,ignore.case = TRUE),]
}
# Filter by experimental strategy
if(!is.na(experimental.strategy)) {
if(all(tolower(experimental.strategy) %in% tolower(results$experimental_strategy))) {
message("ooo By experimental.strategy")
results <- results[tolower(results$experimental_strategy) %in% tolower(experimental.strategy),]
} else {
message(paste0("The argument experimental_strategy does not match any of the results.\nPossible values:",
paste(unique(results$experimental_strategy),collapse = "\n=>")))
}
}
# Filter by data.type
if(!is.na(data.type)) {
if(!(tolower(data.type) %in% tolower(results$data_type))) {
stop("Please set a valid data.type argument from the list below:\n => ", paste(unique(results$data_type), collapse = "\n => "))
}
message("ooo By data.type")
results <- results[tolower(results$data_type) %in% tolower(data.type),]
}
# Filter by workflow.type
if(!is.na(workflow.type)) {
if(!(workflow.type %in% results$analysis_workflow_type)) {
stop("Please set a valid workflow.type argument from the list below:\n => ", paste(unique(results$analysis_workflow_type), collapse = "\n => "))
}
message("ooo By workflow.type")
results <- results[results$analysis_workflow_type %in% workflow.type,]
}
# Filter by file.type
if(!is.na(file.type)){
message("ooo By file.type")
pat <- file.type
invert <- FALSE
if(file.type == "normalized_results") pat <- "normalized_results"
if(file.type == "results") pat <- "[^normalized_]results"
if(file.type == "nocnv_hg18" | file.type == "nocnv_hg18.seg") pat <- "nocnv_hg18"
if(file.type == "cnv_hg18" | file.type == "hg18.seg") pat <- "[^nocnv_]hg18.seg"
if(file.type == "nocnv_hg19" | file.type == "nocnv_hg19.seg") pat <- "nocnv_hg19"
if(file.type == "cnv_hg19" | file.type == "hg19.seg") pat <- "[^nocnv_]hg19.seg"
if(file.type == "mirna") {
pat <- "hg19.*mirna"
invert <- TRUE
}
# if(file.type == "hg19.mirna") pat <- "hg19.mirna"
# if(file.type == "hg19.mirbase20.mirna") pat <- "hg19.mirbase20.mirna"
if(file.type == "hg19.isoform") pat <- "hg19.*isoform"
if(file.type == "isoform") {
pat <- "hg19.*isoform"
invert <- TRUE
}
idx <- grep(pat,results$file_name,invert = invert)
if(length(idx) == 0) {
print(knitr::kable(sort(results$file_name)[1:10],col.names = "Files"))
stop("We were not able to filter using this file type. Examples of available files are above. Please check the vignette for possible entries")
}
results <- results[idx,]
}
# get barcode of the samples
if(data.category %in% c("Clinical","Biospecimen")) {
pat <- paste("TCGA-[:alnum:]{2}-[:alnum:]{4}",
"TARGET-[:alnum:]{2}-[:alnum:]{6}",sep = "|")
} else {
pat <- paste("[:alnum:]{4}-[:alnum:]{2}-[:alnum:]{4}-[:alnum:]{3}-[:alnum:]{2,3}-[:alnum:]{4}-[:alnum:]{2}",
"[:alnum:]{6}-[:alnum:]{2}-[:alnum:]{6}-[:alnum:]{3}-[:alnum:]{3}",sep = "|")
}
if(!all(unique(results$data_type) == "Auxiliary test")) {
barcodes <- unlist(lapply(results$cases,function(x) {
str <- str_extract_all(unlist(x),pat) %>% unlist %>% na.omit %>% paste(collapse = ",")
ifelse(all(is.na(str)), NA,str[!is.na(str)])
}))
} else { # auxiliary fies case
pat <- paste("TCGA-[:alnum:]{2}-[:alnum:]{4}",
"TARGET-[:alnum:]{2}-[:alnum:]{6}",sep = "|")
barcodes <- unlist(lapply(results$file_name,function(x) {
str <- str_extract_all(unlist(x),pat) %>% unlist %>% na.omit %>% paste(collapse = ",")
ifelse(all(is.na(str)), NA,str[!is.na(str)])
}))
}
results$cases <- barcodes
results$tissue.definition <- expandBarcodeInfo(barcodes)$tissue.definition
# Filter by barcode
if(!any(is.na(barcode))) {
message("ooo By barcode")
idx <- unique(unlist(sapply(barcode, function(x) grep(x, results$cases,ignore.case = TRUE))))
if(length(idx) == 0) {
print(knitr::kable(results$cases,col.names = "Available barcodes"))
stop("None of the barcodes were matched. Available barcodes are above")
}
results <- results[idx,]
}
# Filter by sample.type
if(!any(is.na(sample.type))) {
if(!any(tolower(results$tissue.definition) %in% tolower(sample.type))) {
aux <- as.data.frame(table(results$tissue.definition))
aux <- aux[aux$Freq>0,]
print(kable(aux,row.names=FALSE,col.names = c("sample.type","Number of samples")))
stop("Please set a valid sample.type argument from the list above.")
}
message("ooo By sample.type")
results <- results[tolower(results$tissue.definition) %in% tolower(sample.type),]
}
# some how there are duplicated files in GDC we should remove them
# Example of problematic query
# query.exp <- GDCquery(project = "TCGA-BRCA",
# legacy = TRUE,
# data.category = "Gene expression",
# data.type = "Gene expression quantification",
# platform = "Illumina HiSeq",
# file.type = "results",
# experimental_strategy = "RNA-Seq",
# sample.type = c("Primary solid Tumor","Solid Tissue Normal"))
#
print.header("Checking data","subsection")
message("ooo Check if there are duplicated cases")
if(any(duplicated(results$cases))) {
message("Warning: There are more than one file for the same case. Please verify query results. You can use the command View(getResults(query)) in rstudio")
}
message("ooo Check if there results for the query")
if(nrow(results) == 0) stop("Sorry, no results were found for this query")
print.header("Preparing output","section")
ret <- data.frame(results=I(list(results)),
project = I(list(project)),
data.category = data.category,
data.type = data.type,
legacy = legacy,
access = I(list(access)),
experimental.strategy = I(list(experimental.strategy)),
file.type = file.type,
platform = I(list(platform)),
sample.type = I(list(sample.type)),
barcode = I(list(barcode)),
workflow.type = workflow.type)
return(ret)
}
getGDCquery <- function(project, data.category, data.type, legacy, workflow.type,platform,file.type,files.access,sample.type){
# Get manifest using the API
baseURL <- ifelse(legacy,"https://api.gdc.cancer.gov/legacy/files/?","https://api.gdc.cancer.gov/files/?")
options.pretty <- "pretty=true"
if(data.category == "Protein expression" & legacy) {
options.expand <- "fields=archive.revision,archive.file_name,md5sum,state,data_category,file_id,platform,file_name,file_size,md5sum,submitter_id,data_type&expand=cases.samples.portions,cases.project,center,analysis"
} else if(data.category %in% c("Clinical","Biospecimen")) {
options.expand <- "expand=cases,cases.project,center,analysis"
} else {
options.expand <- "expand=cases.samples.portions.analytes.aliquots,cases.project,center,analysis,cases.samples"
}
option.size <- paste0("size=",getNbFiles(project,data.category,legacy))
option.format <- paste0("format=JSON")
options.filter <- paste0("filters=",
URLencode('{"op":"and","content":['), # Start json request
URLencode('{"op":"in","content":{"field":"cases.project.project_id","value":["'),
project,
URLencode('"]}}'))
if(!is.na(data.category)) options.filter <- paste0(options.filter,addFilter("files.data_category", data.category))
if(!is.na(data.type)) options.filter <- paste0(options.filter,addFilter("files.data_type", data.type))
if(!is.na(workflow.type)) options.filter <- paste0(options.filter,addFilter("files.analysis.workflow_type", workflow.type))
if(!any(is.na(platform))) options.filter <- paste0(options.filter,addFilter("files.platform", platform))
if(!any(is.na(file.type))) {
if(file.type == "results" & legacy) options.filter <- paste0(options.filter,addFilter("files.tags", "unnormalized"))
if(file.type == "normalized_results" & legacy) options.filter <- paste0(options.filter,addFilter("files.tags", "normalized"))
if(file.type == "nocnv_hg19.seg" & legacy) options.filter <- paste0(options.filter,addFilter("files.tags", "nocnv"))
if(file.type == "hg19.isoform" & legacy) options.filter <- paste0(options.filter,addFilter("files.tags", "hg19"))
}
if(!any(is.na(files.access))) {
options.filter <- paste0(options.filter,addFilter("files.access", files.access))
}
if(!any(is.na(sample.type))) {
if("Primary solid Tumor" %in% sample.type) sample.type[sample.type == "Primary solid Tumor"] <- "Primary Tumor"
if("Recurrent Solid Tumor" %in% sample.type) sample.type[sample.type == "Recurrent Solid Tumor"] <- "Recurrent Tumor"
options.filter <- paste0(options.filter,addFilter("cases.samples.sample_type", sample.type))
}
# Close json request
options.filter <- paste0(options.filter, URLencode(']}'))
url <- paste0(baseURL,paste(options.pretty,
options.expand,
option.size,
options.filter,
option.format,
sep = "&"))
return(url)
}
addFilter <- function(field, values){
ret <- paste0(
URLencode(',{"op":"in","content":{"field":"'),
URLencode(field),
URLencode('","value":["'),
URLencode(paste0(values, collapse = '","')),
URLencode('"]}}')
)
return(ret)
}
expandBarcodeInfo <- function(barcode){
if(any(grepl("TARGET",barcode))) {
ret <- DataFrame(barcode = barcode,
code = substr(barcode, 8, 9),
case.unique.id = substr(barcode, 11, 16),
tissue.code = substr(barcode, 18, 19),
nucleic.acid.code = substr(barcode, 24, 24))
ret <- merge(ret,getBarcodeDefinition(), by = "tissue.code", sort = FALSE, all.x = TRUE)
ret <- ret[match(barcode,ret$barcode),]
}
if(any(grepl("TCGA",barcode))) {
ret <- data.frame(barcode = barcode,
patient = substr(barcode, 1, 12),
sample = substr(barcode, 1, 16),
tissue.code = substr(barcode, 14, 15))
ret <- merge(ret,getBarcodeDefinition(), by = "tissue.code", sort = FALSE, all.x = TRUE)
ret <- ret[match(barcode,ret$barcode),]
}
return(ret)
}
getBarcodeDefinition <- function(type = "TCGA"){
if(type == "TCGA"){
tissue.code <- c('01','02','03','04','05','06','07','08','09','10','11',
'12','13','14','20','40','50','60','61')
shortLetterCode <- c("TP","TR","TB","TRBM","TAP","TM","TAM","THOC",
"TBM","NB","NT","NBC","NEBV","NBM","CELLC","TRB",
"CELL","XP","XCL")
tissue.definition <- c("Primary solid Tumor",
"Recurrent Solid Tumor",
"Primary Blood Derived Cancer - Peripheral Blood",
"Recurrent Blood Derived Cancer - Bone Marrow",
"Additional - New Primary",
"Metastatic",
"Additional Metastatic",
"Human Tumor Original Cells",
"Primary Blood Derived Cancer - Bone Marrow",
"Blood Derived Normal",
"Solid Tissue Normal",
"Buccal Cell Normal",
"EBV Immortalized Normal",
"Bone Marrow Normal",
"Control Analyte",
"Recurrent Blood Derived Cancer - Peripheral Blood",
"Cell Lines",
"Primary Xenograft Tissue",
"Cell Line Derived Xenograft Tissue")
aux <- data.frame(tissue.code = tissue.code,shortLetterCode,tissue.definition)
} else {
tissue.code <- c('01','02','03','04','05','06','07','08','09','10','11',
'12','13','14','15','16','17','20','40','41','42','50','60','61','99')
tissue.definition <- c("Primary solid Tumor", # 01
"Recurrent Solid Tumor", # 02
"Primary Blood Derived Cancer - Peripheral Blood", # 03
"Recurrent Blood Derived Cancer - Bone Marrow", # 04
"Additional - New Primary", # 05
"Metastatic", # 06
"Additional Metastatic", # 07
"Tissue disease-specific post-adjuvant therapy", # 08
"Primary Blood Derived Cancer - Bone Marrow", # 09
"Blood Derived Normal", # 10
"Solid Tissue Normal", # 11
"Buccal Cell Normal", # 12
"EBV Immortalized Normal", # 13
"Bone Marrow Normal", # 14
"Fibroblasts from Bone Marrow Normal", # 15
"Mononuclear Cells from Bone Marrow Normal", # 16
"Lymphatic Tissue Normal (including centroblasts)", # 17
"Control Analyte", # 20
"Recurrent Blood Derived Cancer - Peripheral Blood", # 40
"Blood Derived Cancer- Bone Marrow, Post-treatment", # 41
"Blood Derived Cancer- Peripheral Blood, Post-treatment", # 42
"Cell line from patient tumor", # 50
"Xenograft from patient not grown as intermediate on plastic tissue culture dish", # 60
"Xenograft grown in mice from established cell lines", #61
"Granulocytes after a Ficoll separation") # 99
aux <- DataFrame(tissue.code = tissue.code,tissue.definition)
}
return(aux)
}
#' @title Retrieve open access maf files from GDC server
#' @description
#' GDCquery_Maf uses the following guide to download maf files
#' https://gdc-docs.nci.nih.gov/Data/Release_Notes/Data_Release_Notes/
#' @param pipelines Four separate variant calling pipelines are implemented for GDC data harmonization.
#' Options: muse, varscan2, somaticsniper, mutect2. For more information:
#' https://gdc-docs.nci.nih.gov/Data/Bioinformatics_Pipelines/DNA_Seq_Variant_Calling_Pipeline/
#' @param tumor a valid tumor
#' @param save.csv Write maf file into a csv document
#' @param directory Directory/Folder where the data will downloaded. Default: GDCdata
#' @export
#' @importFrom data.table fread
#' @import readr stringr
#' @importFrom downloader download
#' @importFrom R.utils gunzip
#' @importFrom tools md5sum
#' @examples
#' \dontrun{
#' acc.muse.maf <- GDCquery_Maf("ACC", pipelines = "muse")
#' acc.varscan2.maf <- GDCquery_Maf("ACC", pipelines = "varscan2")
#' acc.somaticsniper.maf <- GDCquery_Maf("ACC", pipelines = "somaticsniper")
#' acc.mutect.maf <- GDCquery_Maf("ACC", pipelines = "mutect2")
#' }
#' @return A data frame with the maf file information
GDCquery_Maf <- function(tumor,
save.csv = FALSE,
directory = "GDCdata",
pipelines = NULL){
if(is.null(pipelines)) stop("Please select the pipeline argument (muse, varscan2, somaticsniper, mutect2)")
if(grepl("varscan",pipelines, ignore.case = TRUE)) {
workflow.type <- "VarScan2 Variant Aggregation and Masking"
} else if(pipelines == "muse") {
workflow.type <- "MuSE Variant Aggregation and Masking"
} else if(pipelines == "somaticsniper") {
workflow.type <- "SomaticSniper Variant Aggregation and Masking"
} else if(grepl("mutect",pipelines, ignore.case = TRUE)) {
workflow.type <- "MuTect2 Variant Aggregation and Masking"
} else {
stop("Please select the pipeline argument (muse, varscan2, somaticsniper, mutect2)")
}
# Info to user
message("============================================================================")
message(" For more information about MAF data please read the following GDC manual and web pages:")
message(" GDC manual: https://gdc-docs.nci.nih.gov/Data/PDF/Data_UG.pdf")
message(" https://gdc-docs.nci.nih.gov/Data/Bioinformatics_Pipelines/DNA_Seq_Variant_Calling_Pipeline/")
message(" https://gdc.cancer.gov/about-gdc/variant-calling-gdc")
message("============================================================================")
query <- GDCquery(paste0("TCGA-",tumor),
data.category = "Simple Nucleotide Variation",
data.type = "Masked Somatic Mutation",
workflow.type = workflow.type,
access = "open")
if(nrow(query$results[[1]]) == 0) stop("No MAF file found for this type of workflow")
maf <- tryCatch({
tryCatch({
GDCdownload(query, directory = directory, method = "api")
}, error = function(e) {
GDCdownload(query, directory = directory, method = "client")
})
maf <- GDCprepare(query, directory = directory)
maf
}, error = function(e) {
manifest <- getManifest(query)
GDCdownload.aux( "https://api.gdc.cancer.gov/data/", manifest, manifest$filename, ".")
maf <- readSimpleNucleotideVariationMaf(file.path(manifest$id,manifest$filename))
maf
})
if(save.csv) {
fout <- file.path(directory,gsub("\\.gz", "\\.csv",getResults(query)$file_name))
write_csv(maf, fout)
message(paste0("File created: ", fout))
}
return(maf)
}
#' @title Query gene counts of TCGA and GTEx data from the Recount2 project
#' @description
#' TCGArecount2_query queries and downloads data produced by the Recount2 project. User can specify which project and which tissue to query
#' @param project is a string denoting which project the user wants. Options are "tcga" and "gtex"
#' @param tissue a vector of tissue(s) to download. Options are "adipose tissue", "adrenal", "gland", "bladder","blood", "blood vessel", "bone marrow", "brain", "breast","cervix uteri", "colon", "esophagus", "fallopian tube","heart", "kidney", "liver", "lung", "muscle", "nerve", "ovary","pancreas", "pituitary", "prostate", "salivary", "gland", "skin", "small intestine", "spleen", "stomach", "testis", "thyroid", "uterus", "vagina"
#' @export
#' @examples
#' \dontrun{
#' brain.rec<-TCGAquery_recount2(project = "gtex", tissue = "brain")
#' }
#' @return List with $subtypes attribute as a dataframe with barcodes, samples, subtypes, and colors. The $filtered attribute is returned as filtered samples with no subtype info
TCGAquery_recount2<-function(project, tissue=c()){
tissues <- c(
"adipose tissue",
"adrenal",
"gland",
"bladder",
"blood",
"blood vessel",
"bone marrow",
"brain",
"breast",
"cervix uteri",
"colon",
"esophagus",
"fallopian tube",
"heart",
"kidney",
"liver",
"lung",
"muscle",
"nerve",
"ovary",
"pancreas",
"pituitary",
"prostate",
"salivary",
"gland",
"skin",
"small intestine",
"spleen",
"stomach",
"testis",
"thyroid",
"uterus",
"vagina"
)
tissue<-paste(unlist(strsplit(tissue, " ")), collapse="_")
Res<-list()
if(tolower(project)=="gtex"){
for(t_i in tissue){
if(tissue%in%tissues){
con<-"http://duffel.rail.bio/recount/SRP012682/rse_gene_"
con<-paste0(con,tissue,".Rdata")
message(paste0("downloading Range Summarized Experiment for: ", tissue))
load(url(con))
Res[[paste0(project,"_", t_i)]]<-rse_gene
}
else stop(paste0(tissue, " is not an available tissue on Recount2"))
}
return(Res)
}
else if(tolower(project)=="tcga"){
for(t_i in tissue){
if(tissue%in%tissues){
con<-"http://duffel.rail.bio/recount/TCGA/rse_gene_"
con<-paste0(con,tissue,".Rdata")
message(paste0("downloading Range Summarized Experiment for: ", tissue))
load(url(con))
Res[[paste0(project,"_", t_i)]]<-rse_gene
}
else stop(paste0(tissue, " is not an available tissue on Recount2"))
}
return(Res)
}
else stop(paste0(project, " is not a valid project"))
}
#' @title Retrieve open access ATAC-seq files from GDC server
#' @description
#' Retrieve open access ATAC-seq files from GDC server
#' https://gdc.cancer.gov/about-data/publications/ATACseq-AWG
#' Manifest available at: https://gdc.cancer.gov/files/public/file/ATACseq-AWG_Open_GDC-Manifest.txt
#' @param tumor a valid tumor
#' @param file.type Write maf file into a csv document
#' @export
#' @examples
#' \dontrun{
#' query <- GDCquery_ATAC_seq(file.type = "txt")
#' GDCdownload(query)
#' query <- GDCquery_ATAC_seq(file.type = "bigWigs")
#' GDCdownload(query)
#' }
#' @return A data frame with the maf file information
GDCquery_ATAC_seq <- function(tumor = NULL,
file.type = NULL) {
isServeOK()
results <- readr::read_tsv("https://gdc.cancer.gov/files/public/file/ATACseq-AWG_Open_GDC-Manifest.txt")
if(!is.null(tumor)) results <- results[grep(tumor,results$filename,ignore.case = T),]
if(!is.null(file.type)) results <- results[grep(file.type,results$filename,ignore.case = T),]
colnames(results) <- c("file_id", "file_name", "md5sum", "file_size")
results$state <- "released"
results$data_type <- "ATAC-seq"
results$data_category <- "ATAC-seq"
results$project <- "ATAC-seq"
ret <- data.frame(results=I(list(results)),
tumor = I(list(tumor)),
project = I(list("ATAC-seq")),
data.type = I(list("ATAC-seq")),
data.category = I(list("ATAC-seq")),
legacy = I(list(FALSE)))
return(ret)
}
#' @title Retrieve summary of files per sample in a project
#' @description
#' Retrieve the numner of files under each
#' data_category + data_type + experimental_strategy + platform
#' Almost like https://portal.gdc.cancer.gov/exploration
#' @param project A GDC project
#' @param legacy Access legacy database ? Deafult: FALSE
#' @param files.access Filter by file access ("open" or "controlled").
#' Default: no filter
#' @export
#' @examples
#' summary <- getSampleFilesSummary("TCGA-LUAD")
#' \dontrun{
#' summary <- getSampleFilesSummary(c("TCGA-OV","TCGA_ACC"))
#' }
#' @return A data frame with the maf file information
#' @importFrom data.table dcast
#' @importFrom plyr ldply
getSampleFilesSummary <- function(project, legacy = FALSE, files.access = NA) {
out <- NULL
for(proj in project){
message("Accessing information for project: ", proj)
url <- getSampleSummaryUrl(proj,legacy)
x <- getURL(url,fromJSON,simplifyDataFrame = TRUE)
y <- x$data$hits$files
names(y) <- x$data$hits$submitter_id
df <- ldply (y, data.frame)
df <- df %>% data.table::dcast(.id ~ data_category + data_type + experimental_strategy + platform)
colnames(df) <- gsub("_NA","",colnames(df))
df$project <- proj
out <- rbind.fill(out,df)
}
return(out)
}
getSampleSummaryUrl <- function(project,legacy = FALSE, files.access = NA){
# Get manifest using the API
baseURL <- ifelse(legacy,"https://api.gdc.cancer.gov/legacy/cases/?","https://api.gdc.cancer.gov/cases/?")
options.pretty <- "pretty=true"
options.expand <- "expand=summary,summary.data_categories,files"
#option.size <- paste0("size=",getNbFiles(project,data.category,legacy))
option.size <- paste0("size=",1000)
option.format <- paste0("format=JSON")
options.filter <- paste0("filters=",
URLencode('{"op":"and","content":['), # Start json request
URLencode('{"op":"in","content":{"field":"cases.project.project_id","value":["'),
project,
URLencode('"]}}'))
if(!any(is.na(files.access))) {
options.filter <- paste0(options.filter,addFilter("files.access", files.access))
}
# Close json request
options.filter <- paste0(options.filter, URLencode(']}'))
url <- paste0(baseURL,paste(options.pretty,
options.expand,
option.size,
options.filter,
option.format,
sep = "&"))
return(url)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pl.3smoothtxtgg.R
\name{pl.3smoothtxtgg}
\alias{pl.3smoothtxtgg}
\title{Scatter smooth plot with text overlay using ggplot2}
\usage{
pl.3smoothtxtgg(x,y,txt,size,title,xlab,ylab)
}
\arguments{
\item{x}{:a numeric vector}
\item{y}{:a numeric vector}
\item{txt}{:a vector used as labels}
\item{size}{:text size, which default is set as 3}
\item{title}{:graph title}
\item{xlab}{:x-axis label}
\item{ylab}{:y-axis label}
}
\description{
Generate a scatter plot with text overlay, with a smooth curve fitted by loess.
}
\examples{
pl.3smoothtxtgg(mtcars[,1], mtcars[,3], row.names(mtcars), 3, "MPG v. DISP","mpg","disp")
}
|
/man/pl.3smoothtxtgg.Rd
|
no_license
|
cran/YRmisc
|
R
| false | true | 703 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pl.3smoothtxtgg.R
\name{pl.3smoothtxtgg}
\alias{pl.3smoothtxtgg}
\title{Scatter smooth plot with text overlay using ggplot2}
\usage{
pl.3smoothtxtgg(x,y,txt,size,title,xlab,ylab)
}
\arguments{
\item{x}{:a numeric vector}
\item{y}{:a numeric vector}
\item{txt}{:a vector used as labels}
\item{size}{:text size, which default is set as 3}
\item{title}{:graph title}
\item{xlab}{:x-axis label}
\item{ylab}{:y-axis label}
}
\description{
Generate a scatter plot with text overlay, with a smooth curve fitted by loess.
}
\examples{
pl.3smoothtxtgg(mtcars[,1], mtcars[,3], row.names(mtcars), 3, "MPG v. DISP","mpg","disp")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/set_node_attr.R
\name{set_node_attr}
\alias{set_node_attr}
\title{Set node attributes}
\usage{
set_node_attr(x, nodes = NULL, node_attr, values)
}
\arguments{
\item{x}{either a graph object of class \code{dgr_graph} that is created
using \code{create_graph}, or a node data frame.}
\item{nodes}{an optional vector of node IDs for filtering list of
nodes present in the graph.}
\item{node_attr}{the name of the attribute to set.}
\item{values}{the values to be set for the chosen attribute for the
chosen nodes.}
}
\value{
either a graph object of class \code{dgr_graph} or a node
data frame, depending on what type of object was supplied to \code{x}.
}
\description{
From a graph object of class \code{dgr_graph} or a node
data frame, set node attribute properties for one or more nodes.
}
\examples{
\dontrun{
# Create a simple graph
nodes <-
create_nodes(nodes = c("a", "b", "c", "d"),
type = "letter",
label = TRUE,
value = c(3.5, 2.6, 9.4, 2.7))
edges <-
create_edges(from = c("a", "b", "c"),
to = c("d", "c", "a"),
rel = "leading_to")
graph <-
create_graph(nodes_df = nodes,
edges_df = edges)
# Set attribute 'color = "green"' for nodes "a" and "c" using
# the graph object
graph <-
set_node_attr(x = graph, nodes = c("a", "c"),
node_attr = "color", values = "green")
# Set attribute 'color = "green"' for nodes "a" and "c" using
# the node data frame
nodes <-
set_node_attr(x = nodes, nodes = c("a", "c"),
node_attr = "color", values = "green")
#' # Set attribute 'color = "blue"' for all nodes
# the node data frame
nodes <-
set_node_attr(x = nodes, node_attr = "color", values = "blue")
}
}
|
/man/set_node_attr.Rd
|
no_license
|
UweBlock/DiagrammeR
|
R
| false | true | 1,820 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/set_node_attr.R
\name{set_node_attr}
\alias{set_node_attr}
\title{Set node attributes}
\usage{
set_node_attr(x, nodes = NULL, node_attr, values)
}
\arguments{
\item{x}{either a graph object of class \code{dgr_graph} that is created
using \code{create_graph}, or a node data frame.}
\item{nodes}{an optional vector of node IDs for filtering list of
nodes present in the graph.}
\item{node_attr}{the name of the attribute to set.}
\item{values}{the values to be set for the chosen attribute for the
chosen nodes.}
}
\value{
either a graph object of class \code{dgr_graph} or a node
data frame, depending on what type of object was supplied to \code{x}.
}
\description{
From a graph object of class \code{dgr_graph} or a node
data frame, set node attribute properties for one or more nodes.
}
\examples{
\dontrun{
# Create a simple graph
nodes <-
create_nodes(nodes = c("a", "b", "c", "d"),
type = "letter",
label = TRUE,
value = c(3.5, 2.6, 9.4, 2.7))
edges <-
create_edges(from = c("a", "b", "c"),
to = c("d", "c", "a"),
rel = "leading_to")
graph <-
create_graph(nodes_df = nodes,
edges_df = edges)
# Set attribute 'color = "green"' for nodes "a" and "c" using
# the graph object
graph <-
set_node_attr(x = graph, nodes = c("a", "c"),
node_attr = "color", values = "green")
# Set attribute 'color = "green"' for nodes "a" and "c" using
# the node data frame
nodes <-
set_node_attr(x = nodes, nodes = c("a", "c"),
node_attr = "color", values = "green")
#' # Set attribute 'color = "blue"' for all nodes
# the node data frame
nodes <-
set_node_attr(x = nodes, node_attr = "color", values = "blue")
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filter_activity_presence.R
\name{filter_activity_presence}
\alias{filter_activity_presence}
\alias{filter_activity_presence.eventlog}
\alias{filter_activity_presence.grouped_eventlog}
\alias{ifilter_activity_presence}
\title{Filter: Activity Presence}
\usage{
filter_activity_presence(eventlog, activities, method, reverse)
\method{filter_activity_presence}{eventlog}(eventlog, activities = NULL,
method = c("all", "one_of", "none"), reverse = FALSE)
\method{filter_activity_presence}{grouped_eventlog}(eventlog,
activities = NULL, method = c("all", "one_of", "none"), reverse = FALSE)
ifilter_activity_presence(eventlog)
}
\arguments{
\item{eventlog}{The dataset to be used. Should be a (grouped) eventlog object.}
\item{activities}{Character vector containing one or more activity identifiers.}
\item{method}{Filter method. If "all", each of the activities should be present. If "one_of", at least one of them should be present. If "none", none of the activities are allowed to occur in the filtered traces.}
\item{reverse}{Logical, indicating whether the selection should be reversed.}
}
\value{
When given an eventlog, it will return a filtered eventlog. When given a grouped eventlog, the filter will be applied
in a stratified way (i.e. each separately for each group). The returned eventlog will be grouped on the same variables as
the original event log.
}
\description{
Filters cases based on the presence (or absence) of activities
}
\details{
This functions allows to filter cases that contain certain activities.
It requires as input a vector containing one or more activity labels and it has a method
argument. The latter can have the values all, none or one_of.
\itemize{
\item When set to `all`, it means
that all the specified activity labels must be present for a case to be selected
\item `none` means
that they are not allowed to be present.
\item `one_of` means that at least one of them must be
present.
}
When only one activity label is supplied, note that methods all and one_of will be identical.
}
\section{Methods (by class)}{
\itemize{
\item \code{eventlog}: Filter event log on presence of activities.
\item \code{grouped_eventlog}: Filter grouped event log on presence of activities.
}}
\seealso{
\code{vignette("filters", "edeaR")}
}
|
/man/filter_activity_presence.Rd
|
no_license
|
hectorperez8/edeaR
|
R
| false | true | 2,355 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filter_activity_presence.R
\name{filter_activity_presence}
\alias{filter_activity_presence}
\alias{filter_activity_presence.eventlog}
\alias{filter_activity_presence.grouped_eventlog}
\alias{ifilter_activity_presence}
\title{Filter: Activity Presence}
\usage{
filter_activity_presence(eventlog, activities, method, reverse)
\method{filter_activity_presence}{eventlog}(eventlog, activities = NULL,
method = c("all", "one_of", "none"), reverse = FALSE)
\method{filter_activity_presence}{grouped_eventlog}(eventlog,
activities = NULL, method = c("all", "one_of", "none"), reverse = FALSE)
ifilter_activity_presence(eventlog)
}
\arguments{
\item{eventlog}{The dataset to be used. Should be a (grouped) eventlog object.}
\item{activities}{Character vector containing one or more activity identifiers.}
\item{method}{Filter method. If "all", each of the activities should be present. If "one_of", at least one of them should be present. If "none", none of the activities are allowed to occur in the filtered traces.}
\item{reverse}{Logical, indicating whether the selection should be reversed.}
}
\value{
When given an eventlog, it will return a filtered eventlog. When given a grouped eventlog, the filter will be applied
in a stratified way (i.e. each separately for each group). The returned eventlog will be grouped on the same variables as
the original event log.
}
\description{
Filters cases based on the presence (or absence) of activities
}
\details{
This functions allows to filter cases that contain certain activities.
It requires as input a vector containing one or more activity labels and it has a method
argument. The latter can have the values all, none or one_of.
\itemize{
\item When set to `all`, it means
that all the specified activity labels must be present for a case to be selected
\item `none` means
that they are not allowed to be present.
\item `one_of` means that at least one of them must be
present.
}
When only one activity label is supplied, note that methods all and one_of will be identical.
}
\section{Methods (by class)}{
\itemize{
\item \code{eventlog}: Filter event log on presence of activities.
\item \code{grouped_eventlog}: Filter grouped event log on presence of activities.
}}
\seealso{
\code{vignette("filters", "edeaR")}
}
|
library(PhyloMeasures)
### Name: pd.pvalues
### Title: Computes the p-values of the unrooted Phylogenetic Diversity
### measure
### Aliases: pd.pvalues
### ** Examples
#Load phylogenetic tree of bird families from package "ape"
data(bird.families, package = "ape")
#Create 100 random communities with 50 families each
comm = matrix(0,nrow = 100,ncol = length(bird.families$tip.label))
for(i in 1:nrow(comm)) {comm[i,sample(1:ncol(comm),50)] = 1}
colnames(comm) = bird.families$tip.label
#Calculate p-values under the uniform model
pd.pvalues(bird.families,comm, reps=1000)
# Create random abundance weights
weights = runif(length(bird.families$tip.label))
names(weights) = bird.families$tip.label
#Use query function to calculate standardized versions under the sequential model
pd.pvalues(bird.families,comm,null.model="sequential",
abundance.weights=weights, reps=1000)
|
/data/genthat_extracted_code/PhyloMeasures/examples/pd.pvalues.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 896 |
r
|
library(PhyloMeasures)
### Name: pd.pvalues
### Title: Computes the p-values of the unrooted Phylogenetic Diversity
### measure
### Aliases: pd.pvalues
### ** Examples
#Load phylogenetic tree of bird families from package "ape"
data(bird.families, package = "ape")
#Create 100 random communities with 50 families each
comm = matrix(0,nrow = 100,ncol = length(bird.families$tip.label))
for(i in 1:nrow(comm)) {comm[i,sample(1:ncol(comm),50)] = 1}
colnames(comm) = bird.families$tip.label
#Calculate p-values under the uniform model
pd.pvalues(bird.families,comm, reps=1000)
# Create random abundance weights
weights = runif(length(bird.families$tip.label))
names(weights) = bird.families$tip.label
#Use query function to calculate standardized versions under the sequential model
pd.pvalues(bird.families,comm,null.model="sequential",
abundance.weights=weights, reps=1000)
|
library(geoSpectral)
### Name: names,Spectra-method
### Title: The Names of a 'Spectra' object
### Aliases: names,Spectra-method
### ** Examples
x <- spc.example_spectra()
names(x)
|
/data/genthat_extracted_code/geoSpectral/examples/names-Spectra-method.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 191 |
r
|
library(geoSpectral)
### Name: names,Spectra-method
### Title: The Names of a 'Spectra' object
### Aliases: names,Spectra-method
### ** Examples
x <- spc.example_spectra()
names(x)
|
### Tests of the functions in the tab "Chart types"
### https://echarts4r.john-coene.com/articles/chart_types.html
df <- data.frame(
x = seq(3),
y = c(1, 3, 9),
z = c(2, 5, 4),
w = c(3, 4, 3)
)
test_that("e_line plot has the good data structure and type", {
plot <- df |>
e_charts(x) |>
e_line(z)
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(list(value = c(1, 2)), list(value = c(2, 5)), list(value = c(3, 4)))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"line"
)
})
test_that("e_area plot has the good data structure and type", {
plot <- df |>
e_charts(x) |>
e_area(w)
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(list(value = c(1, 3)), list(value = c(2, 4)), list(value = c(3, 3)))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"line"
)
})
test_that("e_bar plot has the good data structure and type", {
plot <- df |>
e_charts(x) |>
e_bar(y, name = "Serie 1")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(list(value = c(1, 1)), list(value = c(2, 3)), list(value = c(3, 9)))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"bar"
)
})
test_that("e_step plot has the good data structure and type", {
plot <- df |>
e_charts(x) |>
e_step(z, name = "Serie 2")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(list(value = c(1, 2)), list(value = c(2, 5)), list(value = c(3, 4)))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"line"
)
})
test_that("e_scatter plot has the good data structure and type", {
plot <- df |>
e_charts(x) |>
e_scatter(y)
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(list(value = c(1, 1)), list(value = c(2, 3)), list(value = c(3, 9)))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"scatter"
)
plot2 <- df |>
e_charts(x) |>
e_scatter(y) |>
e_visual_map(y, scale = e_scale)
expect_s3_class(plot2, "echarts4r")
expect_s3_class(plot2, "htmlwidget")
expect_equal(
plot2$x$opts$series[[1]]$data,
list(list(value = c(1, 1)), list(value = c(2, 3)), list(value = c(3, 9)))
)
expect_equal(
plot2$x$opts$series[[1]]$type,
"scatter"
)
})
test_that("e_effect_scatter plot has the good data structure and type", {
plot <- df |>
e_charts(x) |>
e_effect_scatter(y)
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(list(value = c(1, 1)), list(value = c(2, 3)), list(value = c(3, 9)))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"effectScatter"
)
plot2 <- df |>
e_charts(x) |>
e_effect_scatter(y, ) |>
e_visual_map(z, scale = e_scale)
expect_s3_class(plot2, "echarts4r")
expect_s3_class(plot2, "htmlwidget")
expect_equal(
plot2$x$opts$series[[1]]$data,
list(list(value = c(1, 1)), list(value = c(2, 3)), list(value = c(3, 9)))
)
expect_equal(
plot2$x$opts$series[[1]]$type,
"effectScatter"
)
})
test_that("e_polar plot has the good data structure and type", {
plot <- df |>
e_charts(x) |>
e_polar() |>
e_angle_axis(x) |> # angle = x
e_radius_axis() |>
e_bar(y, coord_system = "polar")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
c(list(c(1)), list(c(3)), list(c(9)))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"bar"
)
})
# test_that("e_radius plot has the good data structure and type", {
# plot <- df |>
# head(10) |>
# e_charts(x) |>
# e_radius_axis(x)
#
# plot2 <- df |>
# head(10) |>
# e_charts(x) |>
# e_radius_axis(x) |>
# e_bar(y, coord_system = "polar") |>
# e_scatter(z, coord_system = "polar")
#
# expect_s3_class(plot, "echarts4r")
# expect_s3_class(plot, "htmlwidget")
#
# expect_s3_class(plot2, "echarts4r")
# expect_s3_class(plot2, "htmlwidget")
# })
test_that("e_candle plot has the good data structure and type", {
date <- c(
"2017-01-01",
"2017-01-02",
"2017-01-03",
"2017-01-04",
"2017-03-05",
"2017-01-06",
"2017-01-07"
)
stock <- data.frame(
date = date,
opening = c(200.60, 200.22, 198.43, 199.05, 203.54, 203.40, 208.34),
closing = c(200.72, 198.85, 199.05, 203.73, 204.08, 208.11, 211.88),
low = c(197.82, 198.07, 197.90, 198.10, 202.00, 201.50, 207.60),
high = c(203.32, 200.67, 200.00, 203.95, 204.90, 208.44, 213.17)
)
plot <- stock |>
e_charts(date) |>
e_candle(opening, closing, low, high) |>
e_y_axis(min = 190, max = 220)
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(
list(value = c(200.72, 200.60, 197.82, 203.32)),
list(value = c(198.85, 200.22, 198.07, 200.67)),
list(value = c(199.05, 198.43, 197.90, 200.00)),
list(value = c(203.73, 199.05, 198.10, 203.95)),
list(value = c(204.08, 203.54, 202.00, 204.90)),
list(value = c(208.11, 203.40, 201.50, 208.44)),
list(value = c(211.88, 208.34, 207.60, 213.17)))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"candlestick"
)
})
test_that("e_funnel plot has the good data structure and type", {
funnel <- data.frame(stage = c("View", "Click", "Purchase"), value = c(80, 30, 20))
plot <- funnel |>
e_charts() |>
e_funnel(value, stage) |>
e_title("Funnel")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(list(value = c(80), name = "View"), list(value = c(30), name = "Click"), list(value = c(20), name = "Purchase"))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"funnel"
)
})
test_that("e_sankey plot has the good data structure and type", {
sankey <- data.frame(
source = c("a", "b", "c", "d", "c"),
target = c("b", "c", "d", "e", "e"),
value = ceiling(rnorm(5, 10, 1)),
stringsAsFactors = FALSE
)
plot <- sankey |>
e_charts() |>
e_sankey(source, target, value) |>
e_title("Sankey chart")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(list(name = c("a")), list(name = c("b")), list(name = c("c")), list(name = c("d")), list(name = c("e")))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"sankey"
)
})
### Careful : heatmap not full to reduce the data to write in expect_equal
test_that("e_heatmap plot has the good data structure and type", {
set.seed(1)
v <- LETTERS[1:5]
matrix <- data.frame(
x = sample(v, 5, replace = TRUE),
y = sample(v, 5, replace = TRUE),
z = rnorm(5, 10, 1),
stringsAsFactors = FALSE
) |>
dplyr::group_by(x, y) |>
dplyr::summarise(z = sum(z)) |>
dplyr::ungroup()
plot <- matrix |>
e_charts(x) |>
e_heatmap(y, z) |>
e_visual_map(z) |>
e_title("Heatmap")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(list(value = c("A", "C", "22.109933")), list(value = c("B", "C", "10.763593")), list(value = c("D", "B", " 9.994233")), list(value = c("E", "A", " 9.200991")))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"heatmap"
)
})
test_that("e_parallel plot has the good data structure and type", {
df <- data.frame(
price = c(2, 4, 3),
amount = c(12, 3, 1),
letter = LETTERS[1:3]
)
plot <- df |>
e_charts() |>
e_parallel(price, amount, letter) |>
e_title("Parallel chart")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series$data,
list(list(c("2"), c("12"), c("A")), list(c("4"), c("3"), c("B")), list(c("3"), c("1"), c("C")))
)
expect_equal(
plot$x$opts$series$type,
"parallel"
)
})
test_that("e_pie plot has the good data structure and type", {
plot <- mtcars |>
head(5) |>
tibble::rownames_to_column("model") |>
e_charts(model) |>
e_pie(carb) |>
e_title("Pie chart")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(list(value = c(4), name = c("Mazda RX4")), list(value = c(4), name = c("Mazda RX4 Wag")), list(value = c(1), name = c("Datsun 710")), list(value = c(1), name = c("Hornet 4 Drive")), list(value = c(2), name = c("Hornet Sportabout")))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"pie"
)
})
test_that("e_donut plot has the good data structure and type", {
plot <- mtcars |>
head(5) |>
tibble::rownames_to_column("model") |>
e_charts(model) |>
e_pie(carb, radius = c("50%", "70%")) |>
e_title("Donut chart")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(list(value = c(4), name = c("Mazda RX4")), list(value = c(4), name = c("Mazda RX4 Wag")), list(value = c(1), name = c("Datsun 710")), list(value = c(1), name = c("Hornet 4 Drive")), list(value = c(2), name = c("Hornet Sportabout")))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"pie"
)
})
test_that("e_rosetype plot has the good data structure and type", {
plot <- mtcars |>
head(5) |>
tibble::rownames_to_column("model") |>
e_charts(model) |>
e_pie(hp, roseType = "radius")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(list(value = c(110), name = c("Mazda RX4")), list(value = c(110), name = c("Mazda RX4 Wag")), list(value = c(93), name = c("Datsun 710")), list(value = c(110), name = c("Hornet 4 Drive")), list(value = c(175), name = c("Hornet Sportabout")))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"pie"
)
})
### make the equivalent of plot$x$opts$series[[1]]$data in expect_equal()
# test_that("e_sunburst plot has the good data structure and type", {
# df <- dplyr::tibble(
# name = c("earth", "mars", "venus"), value = c(30, 40, 30), # 1st level
# itemStyle = dplyr::tibble(color = c(NA, 'red', 'blue')),
# children = list(
# dplyr::tibble(name = c("land", "ocean"), value = c(10,20), # 2nd level
# children = list(
# dplyr::tibble(name = c("forest", "river"), value = c(3,7)), # 3rd level
# dplyr::tibble(name = c("fish", "kelp"), value = c(10,5),
# children = list(
# dplyr::tibble(name = c("shark", "tuna"), value = c(2,6)), # 4th level
# NULL # kelp
# ))
# )),
# dplyr::tibble(name = c("crater", "valley"), value = c(20,20)),
# NULL # venus
# )
# )
#
# plot <- df |>
# e_charts() |>
# e_sunburst() |>
# e_title("Sunburst")
#
# expect_s3_class(plot, "echarts4r")
# expect_s3_class(plot, "htmlwidget")
#
# expect_equal(
# plot$x$opts$series[[1]]$data,
# list(list(value = c(110), name = c("Mazda RX4")), list(value = c(110), name = c("Mazda RX4 Wag")), list(value = c(93), name = c("Datsun 710")), list(value = c(110), name = c("Hornet 4 Drive")), list(value = c(175), name = c("Hornet Sportabout")))
# )
# expect_equal(
# plot$x$opts$series[[1]]$type,
# "sunburst"
# )
# })
#
#
# test_that("e_tree plot has the good data structure and type", {
# tree <- dplyr::tibble(
# name = "earth", # 1st level
# children = list(
# dplyr::tibble(name = c("land", "ocean"), # 2nd level
# children = list(
# dplyr::tibble(name = c("forest", "river")), # 3rd level
# dplyr::tibble(name = c("fish", "kelp"),
# children = list(
# dplyr::tibble(name = c("shark", "tuna"), # 4th level
# NULL # kelp
# ))
# )
# ))
# )
# )
#
# plot <- tree |>
# e_charts() |>
# e_tree() |>
# e_title("Tree graph")
#
# expect_s3_class(plot, "echarts4r")
# expect_s3_class(plot, "htmlwidget")
# })
#
# test_that("e_treemap plot has the good data structure and type", {
# df <- dplyr::tibble(
# name = c("earth", "mars", "venus"), value = c(30, 40, 30), # 1st level
# itemStyle = dplyr::tibble(color = c(NA, 'red', 'blue')),
# children = list(
# dplyr::tibble(name = c("land", "ocean"), value = c(10,20), # 2nd level
# children = list(
# dplyr::tibble(name = c("forest", "river"), value = c(3,7)), # 3rd level
# dplyr::tibble(name = c("fish", "kelp"), value = c(10,5),
# children = list(
# dplyr::tibble(name = c("shark", "tuna"), value = c(2,6)), # 4th level
# NULL # kelp
# ))
# )),
# dplyr::tibble(name = c("crater", "valley"), value = c(20,20)),
# NULL # venus
# )
# )
#
# plot <- df |>
# e_charts() |>
# e_treemap() |>
# e_title("Treemap chart")
#
# expect_s3_class(plot, "echarts4r")
# expect_s3_class(plot, "htmlwidget")
# })
test_that("e_river plot has the good data structure and type", {
set.seed(1)
dates <- c("2020-10-08", "2020-10-09", "2020-10-10")
river <- data.frame(
dates = dates,
apples = runif(length(dates)),
bananas = runif(length(dates)),
pears = runif(length(dates))
)
plot <- river |>
e_charts(dates) |>
e_river(apples) |>
e_river(bananas) |>
e_tooltip(trigger = "axis") |>
e_title("River charts", "(Streamgraphs)")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(
list(c("2020-10-08"), c("0.2655087"), c("apples")),
list(c("2020-10-09"), c("0.3721239"), c("apples")),
list(c("2020-10-10"), c("0.5728534"), c("apples")),
list(c("2020-10-08"), c("0.9082078"), c("bananas")),
list(c("2020-10-09"), c("0.2016819"), c("bananas")),
list(c("2020-10-10"), c("0.8983897"), c("bananas"))
)
)
expect_equal(
plot$x$opts$series[[1]]$type,
"themeRiver"
)
})
test_that("e_calendar plot has the good data structure and type", {
set.seed(1)
dates <- seq.Date(as.Date("2017-01-01"), as.Date("2017-01-05"), by = "day")
values <- rnorm(length(dates), 20, 6)
year <- data.frame(date = dates, values = values)
plot <- year |>
e_charts(date) |>
e_calendar(range = "2017") |>
e_heatmap(values, coord_system = "calendar") |>
e_visual_map(max = 30) |>
e_title("Calendar", "Heatmap")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(list(value = c("2017-01-01", "16.24128")), list(value = c("2017-01-02", "21.10186")), list(value = c("2017-01-03", "14.98623")), list(value = c("2017-01-04", "29.57168")), list(value = c("2017-01-05", "21.97705")))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"heatmap"
)
})
test_that("e_gauge plot has the good data structure and type", {
plot <- e_charts() |>
e_gauge(41, "PERCENT") |>
e_title("Gauge")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(list(value = 41, name = "PERCENT"))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"gauge"
)
})
test_that("e_radar plot has the good data structure and type", {
set.seed(1)
df <- data.frame(
x = LETTERS[1:5],
y = round(runif(5, 1, 5), 6),
z = round(runif(5, 3, 7), 6)
)
plot <- df |>
e_charts(x) |>
e_radar(y, max = 7, name = "radar") |>
e_radar(z, max = 7, name = "chart") |>
e_tooltip(trigger = "item")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(
list(value = c(2.062035, 2.488496, 3.291413, 4.632831, 1.806728), name = "radar"),
list(value = c(6.593559, 6.778701, 5.643191, 5.516456, 3.247145), name = "chart")
)
)
expect_equal(
plot$x$opts$series[[1]]$type,
"radar"
)
})
test_that("e_cloud plot has the good data structure and type", {
words <- function(n = 5000) {
set.seed(1)
a <- do.call(paste0, replicate(5, sample(LETTERS, n, TRUE), FALSE))
paste0(a, sprintf("%04d", sample(9999, n, TRUE)), sample(LETTERS, n, TRUE))
}
tf <- data.frame(terms = words(5))
set.seed(1)
tf$freq <- round(rnorm(5, 55, 10), 5)
tf <- tf |>
dplyr::arrange(-freq)
plot <- tf |>
e_color_range(freq, color) |>
e_charts() |>
e_cloud(terms, freq, color, shape = "circle", sizeRange = c(3, 15)) |>
e_title("Wordcloud", "Random strings")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(
list(value = 70.95281, name = "ARJIY6526F", textStyle = list(color = "#F6EFA6")),
list(value = 58.29508, name = "BSVON5071J", textStyle = list(color = "#D78071")),
list(value = 56.83643, name = "DKUJE7845T", textStyle = list(color = "#D4796C")),
list(value = 48.73546, name = "YWANU8677A", textStyle = list(color = "#C45052")),
list(value = 46.64371, name = "GNUGI5922C", textStyle = list(color = "#BF444C"))
)
)
expect_equal(
plot$x$opts$series[[1]]$type,
"wordCloud"
)
})
test_that("e_liquid plot has the good data structure and type", {
liquid <- data.frame(val = c(0.6, 0.5, 0.4))
plot <- liquid |>
e_charts() |>
e_liquid(val)
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
c(0.6, 0.5, 0.4)
)
expect_equal(
plot$x$opts$series[[1]]$type,
"liquidFill"
)
})
test_that("e_mark_p has good data structure", {
library(dplyr)
data(EuStockMarkets)
dd <- as.data.frame(EuStockMarkets) |>
slice_head(n = 50) |>
mutate(day = 1:n())
plot <- dd |>
e_charts(day) |>
e_line(SMI, symbol = "none") |>
e_mark_p(
type = "line",
serie_index = 1,
data = list(
list(xAxis = dd$day[10], yAxis = dd$SMI[10]),
list(xAxis = dd$day[37], yAxis = dd$SMI[37])
)
)
expect_equal(
plot$x$opts$series[[1]]$markLine$data[[1]][[1]]$xAxis,
10
)
expect_equal(
plot$x$opts$series[[1]]$markLine$data[[1]][[1]]$yAxis,
1716.3
)
})
|
/tests/testthat/test-chart_types.R
|
no_license
|
cran/echarts4r
|
R
| false | false | 19,018 |
r
|
### Tests of the functions in the tab "Chart types"
### https://echarts4r.john-coene.com/articles/chart_types.html
df <- data.frame(
x = seq(3),
y = c(1, 3, 9),
z = c(2, 5, 4),
w = c(3, 4, 3)
)
test_that("e_line plot has the good data structure and type", {
plot <- df |>
e_charts(x) |>
e_line(z)
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(list(value = c(1, 2)), list(value = c(2, 5)), list(value = c(3, 4)))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"line"
)
})
test_that("e_area plot has the good data structure and type", {
plot <- df |>
e_charts(x) |>
e_area(w)
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(list(value = c(1, 3)), list(value = c(2, 4)), list(value = c(3, 3)))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"line"
)
})
test_that("e_bar plot has the good data structure and type", {
plot <- df |>
e_charts(x) |>
e_bar(y, name = "Serie 1")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(list(value = c(1, 1)), list(value = c(2, 3)), list(value = c(3, 9)))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"bar"
)
})
test_that("e_step plot has the good data structure and type", {
plot <- df |>
e_charts(x) |>
e_step(z, name = "Serie 2")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(list(value = c(1, 2)), list(value = c(2, 5)), list(value = c(3, 4)))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"line"
)
})
test_that("e_scatter plot has the good data structure and type", {
plot <- df |>
e_charts(x) |>
e_scatter(y)
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(list(value = c(1, 1)), list(value = c(2, 3)), list(value = c(3, 9)))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"scatter"
)
plot2 <- df |>
e_charts(x) |>
e_scatter(y) |>
e_visual_map(y, scale = e_scale)
expect_s3_class(plot2, "echarts4r")
expect_s3_class(plot2, "htmlwidget")
expect_equal(
plot2$x$opts$series[[1]]$data,
list(list(value = c(1, 1)), list(value = c(2, 3)), list(value = c(3, 9)))
)
expect_equal(
plot2$x$opts$series[[1]]$type,
"scatter"
)
})
test_that("e_effect_scatter plot has the good data structure and type", {
plot <- df |>
e_charts(x) |>
e_effect_scatter(y)
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(list(value = c(1, 1)), list(value = c(2, 3)), list(value = c(3, 9)))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"effectScatter"
)
plot2 <- df |>
e_charts(x) |>
e_effect_scatter(y, ) |>
e_visual_map(z, scale = e_scale)
expect_s3_class(plot2, "echarts4r")
expect_s3_class(plot2, "htmlwidget")
expect_equal(
plot2$x$opts$series[[1]]$data,
list(list(value = c(1, 1)), list(value = c(2, 3)), list(value = c(3, 9)))
)
expect_equal(
plot2$x$opts$series[[1]]$type,
"effectScatter"
)
})
test_that("e_polar plot has the good data structure and type", {
plot <- df |>
e_charts(x) |>
e_polar() |>
e_angle_axis(x) |> # angle = x
e_radius_axis() |>
e_bar(y, coord_system = "polar")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
c(list(c(1)), list(c(3)), list(c(9)))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"bar"
)
})
# test_that("e_radius plot has the good data structure and type", {
# plot <- df |>
# head(10) |>
# e_charts(x) |>
# e_radius_axis(x)
#
# plot2 <- df |>
# head(10) |>
# e_charts(x) |>
# e_radius_axis(x) |>
# e_bar(y, coord_system = "polar") |>
# e_scatter(z, coord_system = "polar")
#
# expect_s3_class(plot, "echarts4r")
# expect_s3_class(plot, "htmlwidget")
#
# expect_s3_class(plot2, "echarts4r")
# expect_s3_class(plot2, "htmlwidget")
# })
test_that("e_candle plot has the good data structure and type", {
date <- c(
"2017-01-01",
"2017-01-02",
"2017-01-03",
"2017-01-04",
"2017-03-05",
"2017-01-06",
"2017-01-07"
)
stock <- data.frame(
date = date,
opening = c(200.60, 200.22, 198.43, 199.05, 203.54, 203.40, 208.34),
closing = c(200.72, 198.85, 199.05, 203.73, 204.08, 208.11, 211.88),
low = c(197.82, 198.07, 197.90, 198.10, 202.00, 201.50, 207.60),
high = c(203.32, 200.67, 200.00, 203.95, 204.90, 208.44, 213.17)
)
plot <- stock |>
e_charts(date) |>
e_candle(opening, closing, low, high) |>
e_y_axis(min = 190, max = 220)
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(
list(value = c(200.72, 200.60, 197.82, 203.32)),
list(value = c(198.85, 200.22, 198.07, 200.67)),
list(value = c(199.05, 198.43, 197.90, 200.00)),
list(value = c(203.73, 199.05, 198.10, 203.95)),
list(value = c(204.08, 203.54, 202.00, 204.90)),
list(value = c(208.11, 203.40, 201.50, 208.44)),
list(value = c(211.88, 208.34, 207.60, 213.17)))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"candlestick"
)
})
test_that("e_funnel plot has the good data structure and type", {
funnel <- data.frame(stage = c("View", "Click", "Purchase"), value = c(80, 30, 20))
plot <- funnel |>
e_charts() |>
e_funnel(value, stage) |>
e_title("Funnel")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(list(value = c(80), name = "View"), list(value = c(30), name = "Click"), list(value = c(20), name = "Purchase"))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"funnel"
)
})
test_that("e_sankey plot has the good data structure and type", {
sankey <- data.frame(
source = c("a", "b", "c", "d", "c"),
target = c("b", "c", "d", "e", "e"),
value = ceiling(rnorm(5, 10, 1)),
stringsAsFactors = FALSE
)
plot <- sankey |>
e_charts() |>
e_sankey(source, target, value) |>
e_title("Sankey chart")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(list(name = c("a")), list(name = c("b")), list(name = c("c")), list(name = c("d")), list(name = c("e")))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"sankey"
)
})
### Careful : heatmap not full to reduce the data to write in expect_equal
test_that("e_heatmap plot has the good data structure and type", {
set.seed(1)
v <- LETTERS[1:5]
matrix <- data.frame(
x = sample(v, 5, replace = TRUE),
y = sample(v, 5, replace = TRUE),
z = rnorm(5, 10, 1),
stringsAsFactors = FALSE
) |>
dplyr::group_by(x, y) |>
dplyr::summarise(z = sum(z)) |>
dplyr::ungroup()
plot <- matrix |>
e_charts(x) |>
e_heatmap(y, z) |>
e_visual_map(z) |>
e_title("Heatmap")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(list(value = c("A", "C", "22.109933")), list(value = c("B", "C", "10.763593")), list(value = c("D", "B", " 9.994233")), list(value = c("E", "A", " 9.200991")))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"heatmap"
)
})
test_that("e_parallel plot has the good data structure and type", {
df <- data.frame(
price = c(2, 4, 3),
amount = c(12, 3, 1),
letter = LETTERS[1:3]
)
plot <- df |>
e_charts() |>
e_parallel(price, amount, letter) |>
e_title("Parallel chart")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series$data,
list(list(c("2"), c("12"), c("A")), list(c("4"), c("3"), c("B")), list(c("3"), c("1"), c("C")))
)
expect_equal(
plot$x$opts$series$type,
"parallel"
)
})
test_that("e_pie plot has the good data structure and type", {
plot <- mtcars |>
head(5) |>
tibble::rownames_to_column("model") |>
e_charts(model) |>
e_pie(carb) |>
e_title("Pie chart")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(list(value = c(4), name = c("Mazda RX4")), list(value = c(4), name = c("Mazda RX4 Wag")), list(value = c(1), name = c("Datsun 710")), list(value = c(1), name = c("Hornet 4 Drive")), list(value = c(2), name = c("Hornet Sportabout")))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"pie"
)
})
test_that("e_donut plot has the good data structure and type", {
plot <- mtcars |>
head(5) |>
tibble::rownames_to_column("model") |>
e_charts(model) |>
e_pie(carb, radius = c("50%", "70%")) |>
e_title("Donut chart")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(list(value = c(4), name = c("Mazda RX4")), list(value = c(4), name = c("Mazda RX4 Wag")), list(value = c(1), name = c("Datsun 710")), list(value = c(1), name = c("Hornet 4 Drive")), list(value = c(2), name = c("Hornet Sportabout")))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"pie"
)
})
test_that("e_rosetype plot has the good data structure and type", {
plot <- mtcars |>
head(5) |>
tibble::rownames_to_column("model") |>
e_charts(model) |>
e_pie(hp, roseType = "radius")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(list(value = c(110), name = c("Mazda RX4")), list(value = c(110), name = c("Mazda RX4 Wag")), list(value = c(93), name = c("Datsun 710")), list(value = c(110), name = c("Hornet 4 Drive")), list(value = c(175), name = c("Hornet Sportabout")))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"pie"
)
})
### make the equivalent of plot$x$opts$series[[1]]$data in expect_equal()
# test_that("e_sunburst plot has the good data structure and type", {
# df <- dplyr::tibble(
# name = c("earth", "mars", "venus"), value = c(30, 40, 30), # 1st level
# itemStyle = dplyr::tibble(color = c(NA, 'red', 'blue')),
# children = list(
# dplyr::tibble(name = c("land", "ocean"), value = c(10,20), # 2nd level
# children = list(
# dplyr::tibble(name = c("forest", "river"), value = c(3,7)), # 3rd level
# dplyr::tibble(name = c("fish", "kelp"), value = c(10,5),
# children = list(
# dplyr::tibble(name = c("shark", "tuna"), value = c(2,6)), # 4th level
# NULL # kelp
# ))
# )),
# dplyr::tibble(name = c("crater", "valley"), value = c(20,20)),
# NULL # venus
# )
# )
#
# plot <- df |>
# e_charts() |>
# e_sunburst() |>
# e_title("Sunburst")
#
# expect_s3_class(plot, "echarts4r")
# expect_s3_class(plot, "htmlwidget")
#
# expect_equal(
# plot$x$opts$series[[1]]$data,
# list(list(value = c(110), name = c("Mazda RX4")), list(value = c(110), name = c("Mazda RX4 Wag")), list(value = c(93), name = c("Datsun 710")), list(value = c(110), name = c("Hornet 4 Drive")), list(value = c(175), name = c("Hornet Sportabout")))
# )
# expect_equal(
# plot$x$opts$series[[1]]$type,
# "sunburst"
# )
# })
#
#
# test_that("e_tree plot has the good data structure and type", {
# tree <- dplyr::tibble(
# name = "earth", # 1st level
# children = list(
# dplyr::tibble(name = c("land", "ocean"), # 2nd level
# children = list(
# dplyr::tibble(name = c("forest", "river")), # 3rd level
# dplyr::tibble(name = c("fish", "kelp"),
# children = list(
# dplyr::tibble(name = c("shark", "tuna"), # 4th level
# NULL # kelp
# ))
# )
# ))
# )
# )
#
# plot <- tree |>
# e_charts() |>
# e_tree() |>
# e_title("Tree graph")
#
# expect_s3_class(plot, "echarts4r")
# expect_s3_class(plot, "htmlwidget")
# })
#
# test_that("e_treemap plot has the good data structure and type", {
# df <- dplyr::tibble(
# name = c("earth", "mars", "venus"), value = c(30, 40, 30), # 1st level
# itemStyle = dplyr::tibble(color = c(NA, 'red', 'blue')),
# children = list(
# dplyr::tibble(name = c("land", "ocean"), value = c(10,20), # 2nd level
# children = list(
# dplyr::tibble(name = c("forest", "river"), value = c(3,7)), # 3rd level
# dplyr::tibble(name = c("fish", "kelp"), value = c(10,5),
# children = list(
# dplyr::tibble(name = c("shark", "tuna"), value = c(2,6)), # 4th level
# NULL # kelp
# ))
# )),
# dplyr::tibble(name = c("crater", "valley"), value = c(20,20)),
# NULL # venus
# )
# )
#
# plot <- df |>
# e_charts() |>
# e_treemap() |>
# e_title("Treemap chart")
#
# expect_s3_class(plot, "echarts4r")
# expect_s3_class(plot, "htmlwidget")
# })
test_that("e_river plot has the good data structure and type", {
set.seed(1)
dates <- c("2020-10-08", "2020-10-09", "2020-10-10")
river <- data.frame(
dates = dates,
apples = runif(length(dates)),
bananas = runif(length(dates)),
pears = runif(length(dates))
)
plot <- river |>
e_charts(dates) |>
e_river(apples) |>
e_river(bananas) |>
e_tooltip(trigger = "axis") |>
e_title("River charts", "(Streamgraphs)")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(
list(c("2020-10-08"), c("0.2655087"), c("apples")),
list(c("2020-10-09"), c("0.3721239"), c("apples")),
list(c("2020-10-10"), c("0.5728534"), c("apples")),
list(c("2020-10-08"), c("0.9082078"), c("bananas")),
list(c("2020-10-09"), c("0.2016819"), c("bananas")),
list(c("2020-10-10"), c("0.8983897"), c("bananas"))
)
)
expect_equal(
plot$x$opts$series[[1]]$type,
"themeRiver"
)
})
test_that("e_calendar plot has the good data structure and type", {
set.seed(1)
dates <- seq.Date(as.Date("2017-01-01"), as.Date("2017-01-05"), by = "day")
values <- rnorm(length(dates), 20, 6)
year <- data.frame(date = dates, values = values)
plot <- year |>
e_charts(date) |>
e_calendar(range = "2017") |>
e_heatmap(values, coord_system = "calendar") |>
e_visual_map(max = 30) |>
e_title("Calendar", "Heatmap")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(list(value = c("2017-01-01", "16.24128")), list(value = c("2017-01-02", "21.10186")), list(value = c("2017-01-03", "14.98623")), list(value = c("2017-01-04", "29.57168")), list(value = c("2017-01-05", "21.97705")))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"heatmap"
)
})
test_that("e_gauge plot has the good data structure and type", {
plot <- e_charts() |>
e_gauge(41, "PERCENT") |>
e_title("Gauge")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(list(value = 41, name = "PERCENT"))
)
expect_equal(
plot$x$opts$series[[1]]$type,
"gauge"
)
})
test_that("e_radar plot has the good data structure and type", {
set.seed(1)
df <- data.frame(
x = LETTERS[1:5],
y = round(runif(5, 1, 5), 6),
z = round(runif(5, 3, 7), 6)
)
plot <- df |>
e_charts(x) |>
e_radar(y, max = 7, name = "radar") |>
e_radar(z, max = 7, name = "chart") |>
e_tooltip(trigger = "item")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(
list(value = c(2.062035, 2.488496, 3.291413, 4.632831, 1.806728), name = "radar"),
list(value = c(6.593559, 6.778701, 5.643191, 5.516456, 3.247145), name = "chart")
)
)
expect_equal(
plot$x$opts$series[[1]]$type,
"radar"
)
})
test_that("e_cloud plot has the good data structure and type", {
words <- function(n = 5000) {
set.seed(1)
a <- do.call(paste0, replicate(5, sample(LETTERS, n, TRUE), FALSE))
paste0(a, sprintf("%04d", sample(9999, n, TRUE)), sample(LETTERS, n, TRUE))
}
tf <- data.frame(terms = words(5))
set.seed(1)
tf$freq <- round(rnorm(5, 55, 10), 5)
tf <- tf |>
dplyr::arrange(-freq)
plot <- tf |>
e_color_range(freq, color) |>
e_charts() |>
e_cloud(terms, freq, color, shape = "circle", sizeRange = c(3, 15)) |>
e_title("Wordcloud", "Random strings")
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
list(
list(value = 70.95281, name = "ARJIY6526F", textStyle = list(color = "#F6EFA6")),
list(value = 58.29508, name = "BSVON5071J", textStyle = list(color = "#D78071")),
list(value = 56.83643, name = "DKUJE7845T", textStyle = list(color = "#D4796C")),
list(value = 48.73546, name = "YWANU8677A", textStyle = list(color = "#C45052")),
list(value = 46.64371, name = "GNUGI5922C", textStyle = list(color = "#BF444C"))
)
)
expect_equal(
plot$x$opts$series[[1]]$type,
"wordCloud"
)
})
test_that("e_liquid plot has the good data structure and type", {
liquid <- data.frame(val = c(0.6, 0.5, 0.4))
plot <- liquid |>
e_charts() |>
e_liquid(val)
expect_s3_class(plot, "echarts4r")
expect_s3_class(plot, "htmlwidget")
expect_equal(
plot$x$opts$series[[1]]$data,
c(0.6, 0.5, 0.4)
)
expect_equal(
plot$x$opts$series[[1]]$type,
"liquidFill"
)
})
test_that("e_mark_p has good data structure", {
library(dplyr)
data(EuStockMarkets)
dd <- as.data.frame(EuStockMarkets) |>
slice_head(n = 50) |>
mutate(day = 1:n())
plot <- dd |>
e_charts(day) |>
e_line(SMI, symbol = "none") |>
e_mark_p(
type = "line",
serie_index = 1,
data = list(
list(xAxis = dd$day[10], yAxis = dd$SMI[10]),
list(xAxis = dd$day[37], yAxis = dd$SMI[37])
)
)
expect_equal(
plot$x$opts$series[[1]]$markLine$data[[1]][[1]]$xAxis,
10
)
expect_equal(
plot$x$opts$series[[1]]$markLine$data[[1]][[1]]$yAxis,
1716.3
)
})
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
ui <- fluidPage(
plotOutput("plot1")
)
# I had the server function output a plot called plot 1. To do this I needed to render the image, which was achieved using a list.
# Within the list function I specified the filename and dimensions.
# I added deleteFile = FALSE so it would not delete the file each time, which allowed me to experiment with various dimensions.
server <- function(input, output, session) {
output$plot1 <- renderImage({
list(src = "ore_pct_rural.png",
width="850",
height="850")
}, deleteFile = FALSE)
}
# Running the application
shinyApp(ui = ui, server = server)
|
/milestone-6/app.R
|
no_license
|
jbikales/final-project-repo
|
R
| false | false | 867 |
r
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
ui <- fluidPage(
plotOutput("plot1")
)
# I had the server function output a plot called plot 1. To do this I needed to render the image, which was achieved using a list.
# Within the list function I specified the filename and dimensions.
# I added deleteFile = FALSE so it would not delete the file each time, which allowed me to experiment with various dimensions.
server <- function(input, output, session) {
output$plot1 <- renderImage({
list(src = "ore_pct_rural.png",
width="850",
height="850")
}, deleteFile = FALSE)
}
# Running the application
shinyApp(ui = ui, server = server)
|
.onLoad <- function(libname, pkgname) {
## set options
options(mxLSTM.debug = FALSE)
}
|
/R/zzz.R
|
no_license
|
MarkusBonsch/mxLSTM
|
R
| false | false | 90 |
r
|
.onLoad <- function(libname, pkgname) {
## set options
options(mxLSTM.debug = FALSE)
}
|
library(tidyverse)
# 1. 크롤링 데이터 합치기
batter = read_csv("C:\\Users\\dhxog\\Desktop\\ESC_summer\\Baseball_ChilliShrimp\\data\\batter_predict.csv")
colnames(batter)
july_batter = read_csv("C:\\Users\\dhxog\\Desktop\\ESC_summer\\Baseball_ChilliShrimp\\data\\kbo_record_hitter_july.csv")
july_batter = july_batter %>% mutate(year = 2020, month = 7) %>% select(-c(X1, 순위))
aug_batter = read_csv("C:\\Users\\dhxog\\Desktop\\ESC_summer\\Baseball_ChilliShrimp\\data\\kbo_record_hitter_august.csv")
aug_batter = aug_batter %>% mutate(year = 2020, month = 8) %>% select(-c(X1, 순위))
sep_batter = read_csv("C:\\Users\\dhxog\\Desktop\\ESC_summer\\Baseball_ChilliShrimp\\data\\kbo_record_hitter_septemberplus.csv")
sep_batter = sep_batter %>% mutate(year = 2020, month = 9) %>% select(-c(X1, 순위))
batter_left = rbind(july_batter, aug_batter, sep_batter)
colnames(batter_left) = c('P_ID', 'T_ID', 'AVG', 'AB', 'HIT', 'H2', 'H3', 'HR', 'RBI', 'BB', 'HP', 'KK', 'GD', 'year', 'month')
batter_left['AVG'] = round(as.numeric(batter_left$AVG), 3)
batter_left$AVG[is.na(batter_left$AVG) == T] = 0
batter_left = batter_left %>% mutate(PA = AB + BB + HP)
batter_left = batter_left %>% mutate(T_ID = case_when(
T_ID == '한화' ~ 'HH',
T_ID == 'KIA' ~ 'HT',
T_ID == '삼성' ~ 'SS',
T_ID == '두산' ~ 'OB',
T_ID == '키움' ~ 'WO',
T_ID == '롯데' ~ 'LT',
TRUE ~ T_ID
))
# 2. P_ID, T_ID 코드로 바꾸기
player = read.csv("C:\\Users\\dhxog\\Desktop\\ESC_summer\\Baseball_ChilliShrimp\\data\\선수\\2020빅콘테스트_스포츠투아이_제공데이터_선수_2020.csv")
player_2016 = read.csv("C:\\Users\\dhxog\\Desktop\\ESC_summer\\Baseball_ChilliShrimp\\data\\선수\\2020빅콘테스트_스포츠투아이_제공데이터_선수_2016.csv")
player_2017 = read.csv("C:\\Users\\dhxog\\Desktop\\ESC_summer\\Baseball_ChilliShrimp\\data\\선수\\2020빅콘테스트_스포츠투아이_제공데이터_선수_2017.csv")
player_2019 = read.csv("C:\\Users\\dhxog\\Desktop\\ESC_summer\\Baseball_ChilliShrimp\\data\\선수\\2020빅콘테스트_스포츠투아이_제공데이터_선수_2019.csv")
player_2016 = player_2016 %>% select(PCODE, NAME, T_ID)
player_2017 = player_2017 %>% select(PCODE, NAME, T_ID)
player_2019 = player_2019 %>% select(PCODE, NAME, T_ID)
player = player %>% select(NAME, PCODE, T_ID)
library(sqldf)
batter_left = sqldf('SELECT year, month, P_ID, PCODE, batter_left.T_ID, PA, AB, HIT, H2, H3, HR, RBI, BB, HP, KK, GD, AVG FROM batter_left LEFT OUTER JOIN player ON batter_left.P_ID = player.NAME AND batter_left.T_ID = player.T_ID' )
colnames(player_2017) = c( 'PCODE' , 'P_ID', 'T_ID')
colnames(player_2019) = c( 'PCODE' , 'P_ID', 'T_ID')
colnames(player_2016) = c( 'PCODE' , 'P_ID', 'T_ID')
player_before = rbind(player_2017, player_2019, player_2016) %>% unique()
player_before = player_before %>% filter(P_ID != '김재현' & T_ID != 'SK' & T_ID != 'SS')
player_before = player_before %>% select(-T_ID) %>% unique()
batter_left[is.na(batter_left$PCODE) == T, 'PCODE'] = batter_left[is.na(batter_left$PCODE) == T, ] %>% left_join(player_before, by = c('P_ID')) %>% select(PCODE.y)
new_player = batter_left[is.na(batter_left$PCODE) == T,] %>% select(P_ID, PCODE, T_ID) %>% unique()
new_PID = c(50350,64209,65462, 50469,64896, 50802, 65522, 67893, 50203, 62332,69104, 67063, 67449)
new_player['PCODE'] = new_PID
batter_left = batter_left %>% left_join(new_player, by = c('P_ID', 'T_ID')) %>% mutate(P_ID = case_when(
is.na(PCODE.x) == T ~ as.numeric(PCODE.y),
TRUE ~ as.numeric(PCODE.x)
)) %>% select(-c(PCODE.x, PCODE.y))
# 팀간 경기수, 배팅 오더 포함
schedule = read.csv("C:\\Users\\dhxog\\Desktop\\ESC_summer\\Baseball_ChilliShrimp\\data\\schedule_left.csv")
batter_left = batter_left %>% left_join(schedule, by = c('year', 'month', 'T_ID'))
order = read.csv("C:\\Users\\dhxog\\Desktop\\ESC_summer\\Baseball_ChilliShrimp\\data\\batter_tidy.csv")
order = order %>% filter(year == 2020, month == 7) %>% select(P_ID, BAT_ORDER) %>% unique()
batter_left = batter_left %>% left_join(order, by = 'P_ID')
batter_left %>% summary
write.csv(batter_left, "batter_left.csv", row.names = F)
|
/code/preprocessing/batter_left.R
|
no_license
|
Seungjun1588/Baseball_ChilliShrimp
|
R
| false | false | 4,204 |
r
|
library(tidyverse)
# 1. 크롤링 데이터 합치기
batter = read_csv("C:\\Users\\dhxog\\Desktop\\ESC_summer\\Baseball_ChilliShrimp\\data\\batter_predict.csv")
colnames(batter)
july_batter = read_csv("C:\\Users\\dhxog\\Desktop\\ESC_summer\\Baseball_ChilliShrimp\\data\\kbo_record_hitter_july.csv")
july_batter = july_batter %>% mutate(year = 2020, month = 7) %>% select(-c(X1, 순위))
aug_batter = read_csv("C:\\Users\\dhxog\\Desktop\\ESC_summer\\Baseball_ChilliShrimp\\data\\kbo_record_hitter_august.csv")
aug_batter = aug_batter %>% mutate(year = 2020, month = 8) %>% select(-c(X1, 순위))
sep_batter = read_csv("C:\\Users\\dhxog\\Desktop\\ESC_summer\\Baseball_ChilliShrimp\\data\\kbo_record_hitter_septemberplus.csv")
sep_batter = sep_batter %>% mutate(year = 2020, month = 9) %>% select(-c(X1, 순위))
batter_left = rbind(july_batter, aug_batter, sep_batter)
colnames(batter_left) = c('P_ID', 'T_ID', 'AVG', 'AB', 'HIT', 'H2', 'H3', 'HR', 'RBI', 'BB', 'HP', 'KK', 'GD', 'year', 'month')
batter_left['AVG'] = round(as.numeric(batter_left$AVG), 3)
batter_left$AVG[is.na(batter_left$AVG) == T] = 0
batter_left = batter_left %>% mutate(PA = AB + BB + HP)
batter_left = batter_left %>% mutate(T_ID = case_when(
T_ID == '한화' ~ 'HH',
T_ID == 'KIA' ~ 'HT',
T_ID == '삼성' ~ 'SS',
T_ID == '두산' ~ 'OB',
T_ID == '키움' ~ 'WO',
T_ID == '롯데' ~ 'LT',
TRUE ~ T_ID
))
# 2. P_ID, T_ID 코드로 바꾸기
player = read.csv("C:\\Users\\dhxog\\Desktop\\ESC_summer\\Baseball_ChilliShrimp\\data\\선수\\2020빅콘테스트_스포츠투아이_제공데이터_선수_2020.csv")
player_2016 = read.csv("C:\\Users\\dhxog\\Desktop\\ESC_summer\\Baseball_ChilliShrimp\\data\\선수\\2020빅콘테스트_스포츠투아이_제공데이터_선수_2016.csv")
player_2017 = read.csv("C:\\Users\\dhxog\\Desktop\\ESC_summer\\Baseball_ChilliShrimp\\data\\선수\\2020빅콘테스트_스포츠투아이_제공데이터_선수_2017.csv")
player_2019 = read.csv("C:\\Users\\dhxog\\Desktop\\ESC_summer\\Baseball_ChilliShrimp\\data\\선수\\2020빅콘테스트_스포츠투아이_제공데이터_선수_2019.csv")
player_2016 = player_2016 %>% select(PCODE, NAME, T_ID)
player_2017 = player_2017 %>% select(PCODE, NAME, T_ID)
player_2019 = player_2019 %>% select(PCODE, NAME, T_ID)
player = player %>% select(NAME, PCODE, T_ID)
library(sqldf)
batter_left = sqldf('SELECT year, month, P_ID, PCODE, batter_left.T_ID, PA, AB, HIT, H2, H3, HR, RBI, BB, HP, KK, GD, AVG FROM batter_left LEFT OUTER JOIN player ON batter_left.P_ID = player.NAME AND batter_left.T_ID = player.T_ID' )
colnames(player_2017) = c( 'PCODE' , 'P_ID', 'T_ID')
colnames(player_2019) = c( 'PCODE' , 'P_ID', 'T_ID')
colnames(player_2016) = c( 'PCODE' , 'P_ID', 'T_ID')
player_before = rbind(player_2017, player_2019, player_2016) %>% unique()
player_before = player_before %>% filter(P_ID != '김재현' & T_ID != 'SK' & T_ID != 'SS')
player_before = player_before %>% select(-T_ID) %>% unique()
batter_left[is.na(batter_left$PCODE) == T, 'PCODE'] = batter_left[is.na(batter_left$PCODE) == T, ] %>% left_join(player_before, by = c('P_ID')) %>% select(PCODE.y)
new_player = batter_left[is.na(batter_left$PCODE) == T,] %>% select(P_ID, PCODE, T_ID) %>% unique()
new_PID = c(50350,64209,65462, 50469,64896, 50802, 65522, 67893, 50203, 62332,69104, 67063, 67449)
new_player['PCODE'] = new_PID
batter_left = batter_left %>% left_join(new_player, by = c('P_ID', 'T_ID')) %>% mutate(P_ID = case_when(
is.na(PCODE.x) == T ~ as.numeric(PCODE.y),
TRUE ~ as.numeric(PCODE.x)
)) %>% select(-c(PCODE.x, PCODE.y))
# 팀간 경기수, 배팅 오더 포함
schedule = read.csv("C:\\Users\\dhxog\\Desktop\\ESC_summer\\Baseball_ChilliShrimp\\data\\schedule_left.csv")
batter_left = batter_left %>% left_join(schedule, by = c('year', 'month', 'T_ID'))
order = read.csv("C:\\Users\\dhxog\\Desktop\\ESC_summer\\Baseball_ChilliShrimp\\data\\batter_tidy.csv")
order = order %>% filter(year == 2020, month == 7) %>% select(P_ID, BAT_ORDER) %>% unique()
batter_left = batter_left %>% left_join(order, by = 'P_ID')
batter_left %>% summary
write.csv(batter_left, "batter_left.csv", row.names = F)
|
library(gtrendsR)
data = gtrends(c("puasa","ramadhan","tarawih","sahur","mudik"),geo = "ID")
str(data)
data_int = data$interest_over_time
data_city = data$interest_by_city
data_query = data$related_queries
save(data_int,data_city,data_query,file = "bahan puasa.rda")
|
/_posts/gtrends/puasa 2021/skrip.R
|
no_license
|
ikanx101/ikanx101.github.io
|
R
| false | false | 270 |
r
|
library(gtrendsR)
data = gtrends(c("puasa","ramadhan","tarawih","sahur","mudik"),geo = "ID")
str(data)
data_int = data$interest_over_time
data_city = data$interest_by_city
data_query = data$related_queries
save(data_int,data_city,data_query,file = "bahan puasa.rda")
|
#' Efficacy,toxicity and futility Stoping bound
#'
#' This function generats an efficacy stoping bound giving the parameters p and b.
#'
#' @param p0 null response rate
#' @param p1 target response rate
#' @param N maximum sample size
#' @param n1 the sample size to start interim monitoring
#' @param ptox_l a toxicity rate considered safe and we want to avoid stopping for safety under ptox_l
#' @param ptox_u a toxicity rate above than ptox_u is overly toxic, requring immiediate stop
#' @param lambda eff-tox correlation
#' @param gam probability of mistakenly stopping for toxicity when ptox<=ptox_l
#' @param w weight
#' @param alpha alpha
#' @param beta beta
#' @importFrom foreach foreach %do%
#' @export
#' @examples three.tail(N=44, n1=1, p0=0.6, p1=0.8, ptox_l=0.05, ptox_u=0.35,w=0.5,alpha=0.05, beta=0.20,gam=0.1)
three.tail <- function(N, n1, p0, p1,
ptox_l, ptox_u, lambda=1,
w,alpha, beta, gam)
{
##remove tails that generate the same bound
t_tail <- rm.tails(N, ptox_u, seq(0.5, 0.999, 0.001))
## generate candidate tails
out <- two.tail(N, n1, p0, p1, alpha, beta)
i <- NULL
tails <- foreach(i=1:length(t_tail),.combine="rbind")%do%{
cbind(out[c("e_tail","f_tail")], rep(t_tail[i], nrow(out)))
}
##safe,nonefficacious
ppair00 <- prob4(tox=ptox_l, resp=p0, lambda)
##safe,efficacious
ppair01 <- prob4(tox=ptox_l, resp=p1, lambda)
##toxic,nonefficacious
ppair10 <- prob4(tox=ptox_u, resp=p0, lambda)
##toxic,efficacious
ppair11 <- prob4(tox=ptox_u, resp=p1, lambda)
##design operating characteristics
design_opers <- apply(as.matrix(tails), 1, function(x){
e <- find.bound(tail=x[1], N=N, p=p0)
f <- find.bound(tail=x[2], N=N, p=p1)
last <- min(N, max(which(diff(e) == 0)) + 1) # last chance to cross e
f <- pmax(f, e[last] - last + (1 : N))
b <- find.bound(tail=x[3], N=N, p=ptox_u)
fun00 <- eftcon(p=ppair00, e, f, b, n1=n1)
##expected sample size under h00
es00 <- ess(1-fun00$accum)
##type 1 error rate under h00
err1 <- fun00$effstop[N]
if(err1<alpha){
fun01 <- eftcon(p=ppair01, e, f, b, n1=n1)
es01 <- ess(1-fun01$accum)
err2 <- 1-fun01$effstop[N]
err3 <- max(fun00$toxstop[N],fun01$toxstop[N])
}else{
err2 <- err3 <- es01 <- es10 <- es11 <- NA
}
if(err1<alpha & err2<beta & err3<gam){
fun10 <- eftcon(p=ppair10, e, f, b, n1=n1)
es10 <- ess(1-fun10$accum)
fun11 <- eftcon(ppair11, e, f, b, n1=n1)
es11 <- ess(1-fun11$accum)
}else{
es10 <- es11 <- NA
}
en <- t(w)%*%c(es00, es10, es11)
c(err1, err2, err3, en, x, es00, es01, es10, es11)
})
alpha_vec <- design_opers[1,]
pwr_vec <- 1-design_opers[2,]
gam_vec <- design_opers[3,]
es_vec <- design_opers[4,]
out <- data.frame("alpha"=alpha_vec,
"power"=pwr_vec,
"gam"=gam_vec,
"EN"=es_vec,
"e_tail"=design_opers[5,],
"f_tail"=design_opers[6,],
"t_tail"=design_opers[7,],
"es00"=design_opers[8,],
"es01"=design_opers[9,],
"es10"=design_opers[10,],
"es11"=design_opers[11,])
out <- out[which(pwr_vec>1-beta & gam_vec<gam), ]
if(is.null(dim(out))) warning('no proper design')
out
}
#out <- three.tail(N=41, n1=1, alpha=0.05, beta=0.20,
#gam=0.10,p0=0.2, p1=0.4, ptox_l=0.05, ptox_u=0.35)
#out[which.min(out$EN),]
|
/R/efficacy_futility_toxicity.R
|
permissive
|
smartbenben/Continual-reassessment
|
R
| false | false | 3,537 |
r
|
#' Efficacy,toxicity and futility Stoping bound
#'
#' This function generats an efficacy stoping bound giving the parameters p and b.
#'
#' @param p0 null response rate
#' @param p1 target response rate
#' @param N maximum sample size
#' @param n1 the sample size to start interim monitoring
#' @param ptox_l a toxicity rate considered safe and we want to avoid stopping for safety under ptox_l
#' @param ptox_u a toxicity rate above than ptox_u is overly toxic, requring immiediate stop
#' @param lambda eff-tox correlation
#' @param gam probability of mistakenly stopping for toxicity when ptox<=ptox_l
#' @param w weight
#' @param alpha alpha
#' @param beta beta
#' @importFrom foreach foreach %do%
#' @export
#' @examples three.tail(N=44, n1=1, p0=0.6, p1=0.8, ptox_l=0.05, ptox_u=0.35,w=0.5,alpha=0.05, beta=0.20,gam=0.1)
three.tail <- function(N, n1, p0, p1,
ptox_l, ptox_u, lambda=1,
w,alpha, beta, gam)
{
##remove tails that generate the same bound
t_tail <- rm.tails(N, ptox_u, seq(0.5, 0.999, 0.001))
## generate candidate tails
out <- two.tail(N, n1, p0, p1, alpha, beta)
i <- NULL
tails <- foreach(i=1:length(t_tail),.combine="rbind")%do%{
cbind(out[c("e_tail","f_tail")], rep(t_tail[i], nrow(out)))
}
##safe,nonefficacious
ppair00 <- prob4(tox=ptox_l, resp=p0, lambda)
##safe,efficacious
ppair01 <- prob4(tox=ptox_l, resp=p1, lambda)
##toxic,nonefficacious
ppair10 <- prob4(tox=ptox_u, resp=p0, lambda)
##toxic,efficacious
ppair11 <- prob4(tox=ptox_u, resp=p1, lambda)
##design operating characteristics
design_opers <- apply(as.matrix(tails), 1, function(x){
e <- find.bound(tail=x[1], N=N, p=p0)
f <- find.bound(tail=x[2], N=N, p=p1)
last <- min(N, max(which(diff(e) == 0)) + 1) # last chance to cross e
f <- pmax(f, e[last] - last + (1 : N))
b <- find.bound(tail=x[3], N=N, p=ptox_u)
fun00 <- eftcon(p=ppair00, e, f, b, n1=n1)
##expected sample size under h00
es00 <- ess(1-fun00$accum)
##type 1 error rate under h00
err1 <- fun00$effstop[N]
if(err1<alpha){
fun01 <- eftcon(p=ppair01, e, f, b, n1=n1)
es01 <- ess(1-fun01$accum)
err2 <- 1-fun01$effstop[N]
err3 <- max(fun00$toxstop[N],fun01$toxstop[N])
}else{
err2 <- err3 <- es01 <- es10 <- es11 <- NA
}
if(err1<alpha & err2<beta & err3<gam){
fun10 <- eftcon(p=ppair10, e, f, b, n1=n1)
es10 <- ess(1-fun10$accum)
fun11 <- eftcon(ppair11, e, f, b, n1=n1)
es11 <- ess(1-fun11$accum)
}else{
es10 <- es11 <- NA
}
en <- t(w)%*%c(es00, es10, es11)
c(err1, err2, err3, en, x, es00, es01, es10, es11)
})
alpha_vec <- design_opers[1,]
pwr_vec <- 1-design_opers[2,]
gam_vec <- design_opers[3,]
es_vec <- design_opers[4,]
out <- data.frame("alpha"=alpha_vec,
"power"=pwr_vec,
"gam"=gam_vec,
"EN"=es_vec,
"e_tail"=design_opers[5,],
"f_tail"=design_opers[6,],
"t_tail"=design_opers[7,],
"es00"=design_opers[8,],
"es01"=design_opers[9,],
"es10"=design_opers[10,],
"es11"=design_opers[11,])
out <- out[which(pwr_vec>1-beta & gam_vec<gam), ]
if(is.null(dim(out))) warning('no proper design')
out
}
#out <- three.tail(N=41, n1=1, alpha=0.05, beta=0.20,
#gam=0.10,p0=0.2, p1=0.4, ptox_l=0.05, ptox_u=0.35)
#out[which.min(out$EN),]
|
reshapeData <- function(d)
{
d$Trial1 = paste(d$Utterance0,d$TargetColor0,d$OtherColor0,d$Slider00,d$Slider10, d$Slider20,d$Slider30,d$Slider40,d$Slider50,d$Slider60,d$Slider70, d$Slider80,d$Slider90,d$Slider100)
d$Trial2 = paste(d$Utterance1,d$TargetColor1,d$OtherColor1,d$Slider01,d$Slider11, d$Slider21,d$Slider31,d$Slider41,d$Slider51,d$Slider61,d$Slider71, d$Slider81,d$Slider91,d$Slider101)
d$Trial3 = paste(d$Utterance2,d$TargetColor2,d$OtherColor2,d$Slider02,d$Slider12, d$Slider22,d$Slider32,d$Slider42,d$Slider52,d$Slider62,d$Slider72, d$Slider82,d$Slider92,d$Slider102)
d$Trial4 = paste(d$Utterance3,d$TargetColor3,d$OtherColor3,d$Slider03,d$Slider13, d$Slider23,d$Slider33,d$Slider43,d$Slider53,d$Slider63,d$Slider73, d$Slider83,d$Slider93,d$Slider103)
d$Trial5 = paste(d$Utterance4,d$TargetColor4,d$OtherColor4,d$Slider04,d$Slider14, d$Slider24,d$Slider34,d$Slider44,d$Slider54,d$Slider64,d$Slider74, d$Slider84,d$Slider94,d$Slider104)
d$Trial6 = paste(d$Utterance5,d$TargetColor5,d$OtherColor5,d$Slider05,d$Slider15, d$Slider25,d$Slider35,d$Slider45,d$Slider55,d$Slider65,d$Slider75, d$Slider85,d$Slider95,d$Slider105)
d$Trial7 = paste(d$Utterance6,d$TargetColor6,d$OtherColor6,d$Slider06,d$Slider16, d$Slider26,d$Slider36,d$Slider46,d$Slider56,d$Slider66,d$Slider76, d$Slider86,d$Slider96,d$Slider106)
d$Trial8 = paste(d$Utterance7,d$TargetColor7,d$OtherColor7,d$Slider07,d$Slider17, d$Slider27,d$Slider37,d$Slider47,d$Slider57,d$Slider67,d$Slider77, d$Slider87,d$Slider97,d$Slider107)
d$Trial9 = paste(d$Utterance8,d$TargetColor8,d$OtherColor8,d$Slider08,d$Slider18, d$Slider28,d$Slider38,d$Slider48,d$Slider58,d$Slider68,d$Slider78, d$Slider88,d$Slider98,d$Slider108)
d$Trial10 = paste(d$Utterance9,d$TargetColor9,d$OtherColor9,d$Slider09,d$Slider19, d$Slider29,d$Slider39,d$Slider49,d$Slider59,d$Slider69,d$Slider79, d$Slider89,d$Slider99,d$Slider109)
d$Trial11 = paste(d$Utterance10,d$TargetColor10,d$OtherColor10,d$Slider010,d$Slider110, d$Slider210,d$Slider310,d$Slider410,d$Slider510,d$Slider610,d$Slider710, d$Slider810,d$Slider910,d$Slider1010)
return(d)
}
getGender <- function(dd) {
genders = data.frame(Name = c("Alex", "Ben", "Calvin", "Dan", "Ted", "Max","Ann", "Liz", "Diane","Amy", "Marie", "Jane"), Gender = c(rep("male",6),rep("female",6)))
row.names(genders) = genders$Name
for (i in seq(0, 23)) {
dd[,paste("Gender",i,sep="")] = genders[as.character(dd[,paste("Speaker",i,sep="")]),]$Gender
}
return(dd)
}
getQUD <- function(qud) {
#print(qud)
if (length(grep("How many", qud)) > 0) {
return("HowMany?")
} else {
if (length(grep("all", qud)) > 0) {
return("All?")
} else {
if (length(grep("Are any", qud)) > 0) {
return("Any?")
} else {
return("ERROR!")
}
}
}
}
myCenter <- function(x) {
if (is.numeric(x)) { return(x - mean(x)) }
if (is.factor(x)) {
x <- as.numeric(x)
return(x - mean(x))
}
if (is.data.frame(x) || is.matrix(x)) {
m <- matrix(nrow=nrow(x), ncol=ncol(x))
colnames(m) <- paste("c", colnames(x), sep="")
for (i in 1:ncol(x)) {
if (is.factor(x[,i])) {
y <- as.numeric(x[,i])
m[,i] <- y - mean(y, na.rm=T)
}
if (is.numeric(x[,i])) {
m[,i] <- x[,i] - mean(x[,i], na.rm=T)
}
}
return(as.data.frame(m))
}
}
se <- function(x)
{
y <- x[!is.na(x)] # remove the missing values, if any
sqrt(var(as.vector(y))/length(y))
}
zscore <- function(x){
## Returns z-scored values
x.mean <- mean(x)
x.sd <- sd(x)
x.z <- (x-x.mean)/x.sd
return(x.z)
}
zscoreByGroup <- function(x, groups){
#Compute zscores within groups
out <- rep(NA, length(x))
for(i in unique(groups)){
out[groups == i] <- zscore(x[groups == i])
}
return(out)
}
## for bootstrapping 95% confidence intervals
library(bootstrap)
theta <- function(x,xdata,na.rm=T) {mean(xdata[x],na.rm=na.rm)}
ci.low <- function(x,na.rm=T) {
mean(x,na.rm=na.rm) - quantile(bootstrap(1:length(x),1000,theta,x,na.rm=na.rm)$thetastar,.025,na.rm=na.rm)}
ci.high <- function(x,na.rm=T) {
quantile(bootstrap(1:length(x),1000,theta,x,na.rm=na.rm)$thetastar,.975,na.rm=na.rm) - mean(x,na.rm=na.rm)}
|
/corpus_results/rscripts/helpers.R
|
no_license
|
gscontras/adjective_ordering
|
R
| false | false | 4,225 |
r
|
reshapeData <- function(d)
{
d$Trial1 = paste(d$Utterance0,d$TargetColor0,d$OtherColor0,d$Slider00,d$Slider10, d$Slider20,d$Slider30,d$Slider40,d$Slider50,d$Slider60,d$Slider70, d$Slider80,d$Slider90,d$Slider100)
d$Trial2 = paste(d$Utterance1,d$TargetColor1,d$OtherColor1,d$Slider01,d$Slider11, d$Slider21,d$Slider31,d$Slider41,d$Slider51,d$Slider61,d$Slider71, d$Slider81,d$Slider91,d$Slider101)
d$Trial3 = paste(d$Utterance2,d$TargetColor2,d$OtherColor2,d$Slider02,d$Slider12, d$Slider22,d$Slider32,d$Slider42,d$Slider52,d$Slider62,d$Slider72, d$Slider82,d$Slider92,d$Slider102)
d$Trial4 = paste(d$Utterance3,d$TargetColor3,d$OtherColor3,d$Slider03,d$Slider13, d$Slider23,d$Slider33,d$Slider43,d$Slider53,d$Slider63,d$Slider73, d$Slider83,d$Slider93,d$Slider103)
d$Trial5 = paste(d$Utterance4,d$TargetColor4,d$OtherColor4,d$Slider04,d$Slider14, d$Slider24,d$Slider34,d$Slider44,d$Slider54,d$Slider64,d$Slider74, d$Slider84,d$Slider94,d$Slider104)
d$Trial6 = paste(d$Utterance5,d$TargetColor5,d$OtherColor5,d$Slider05,d$Slider15, d$Slider25,d$Slider35,d$Slider45,d$Slider55,d$Slider65,d$Slider75, d$Slider85,d$Slider95,d$Slider105)
d$Trial7 = paste(d$Utterance6,d$TargetColor6,d$OtherColor6,d$Slider06,d$Slider16, d$Slider26,d$Slider36,d$Slider46,d$Slider56,d$Slider66,d$Slider76, d$Slider86,d$Slider96,d$Slider106)
d$Trial8 = paste(d$Utterance7,d$TargetColor7,d$OtherColor7,d$Slider07,d$Slider17, d$Slider27,d$Slider37,d$Slider47,d$Slider57,d$Slider67,d$Slider77, d$Slider87,d$Slider97,d$Slider107)
d$Trial9 = paste(d$Utterance8,d$TargetColor8,d$OtherColor8,d$Slider08,d$Slider18, d$Slider28,d$Slider38,d$Slider48,d$Slider58,d$Slider68,d$Slider78, d$Slider88,d$Slider98,d$Slider108)
d$Trial10 = paste(d$Utterance9,d$TargetColor9,d$OtherColor9,d$Slider09,d$Slider19, d$Slider29,d$Slider39,d$Slider49,d$Slider59,d$Slider69,d$Slider79, d$Slider89,d$Slider99,d$Slider109)
d$Trial11 = paste(d$Utterance10,d$TargetColor10,d$OtherColor10,d$Slider010,d$Slider110, d$Slider210,d$Slider310,d$Slider410,d$Slider510,d$Slider610,d$Slider710, d$Slider810,d$Slider910,d$Slider1010)
return(d)
}
getGender <- function(dd) {
genders = data.frame(Name = c("Alex", "Ben", "Calvin", "Dan", "Ted", "Max","Ann", "Liz", "Diane","Amy", "Marie", "Jane"), Gender = c(rep("male",6),rep("female",6)))
row.names(genders) = genders$Name
for (i in seq(0, 23)) {
dd[,paste("Gender",i,sep="")] = genders[as.character(dd[,paste("Speaker",i,sep="")]),]$Gender
}
return(dd)
}
getQUD <- function(qud) {
#print(qud)
if (length(grep("How many", qud)) > 0) {
return("HowMany?")
} else {
if (length(grep("all", qud)) > 0) {
return("All?")
} else {
if (length(grep("Are any", qud)) > 0) {
return("Any?")
} else {
return("ERROR!")
}
}
}
}
myCenter <- function(x) {
if (is.numeric(x)) { return(x - mean(x)) }
if (is.factor(x)) {
x <- as.numeric(x)
return(x - mean(x))
}
if (is.data.frame(x) || is.matrix(x)) {
m <- matrix(nrow=nrow(x), ncol=ncol(x))
colnames(m) <- paste("c", colnames(x), sep="")
for (i in 1:ncol(x)) {
if (is.factor(x[,i])) {
y <- as.numeric(x[,i])
m[,i] <- y - mean(y, na.rm=T)
}
if (is.numeric(x[,i])) {
m[,i] <- x[,i] - mean(x[,i], na.rm=T)
}
}
return(as.data.frame(m))
}
}
se <- function(x)
{
y <- x[!is.na(x)] # remove the missing values, if any
sqrt(var(as.vector(y))/length(y))
}
zscore <- function(x){
## Returns z-scored values
x.mean <- mean(x)
x.sd <- sd(x)
x.z <- (x-x.mean)/x.sd
return(x.z)
}
zscoreByGroup <- function(x, groups){
#Compute zscores within groups
out <- rep(NA, length(x))
for(i in unique(groups)){
out[groups == i] <- zscore(x[groups == i])
}
return(out)
}
## for bootstrapping 95% confidence intervals
library(bootstrap)
theta <- function(x,xdata,na.rm=T) {mean(xdata[x],na.rm=na.rm)}
ci.low <- function(x,na.rm=T) {
mean(x,na.rm=na.rm) - quantile(bootstrap(1:length(x),1000,theta,x,na.rm=na.rm)$thetastar,.025,na.rm=na.rm)}
ci.high <- function(x,na.rm=T) {
quantile(bootstrap(1:length(x),1000,theta,x,na.rm=na.rm)$thetastar,.975,na.rm=na.rm) - mean(x,na.rm=na.rm)}
|
rm(list=ls())
library(dplyr)
library(tidyr)
base_wd <- 'C:\\Users\\Edwin\\Desktop\\Edwin(10-08-2012)\\Assignments and Modules\\Coursera\\Data Science - John Hopkins\\Capstone\\'
# load(file = paste(base_wd, 'tokens_df.RData', sep =""))
cleaningQuote <- function(my_df, col.names){
# Replace the "'" characters except the contractions
# contractions: "n't" , "'s", "i'm", "'ve", "'d", "'ll", "o'"
for(i in 1:length(col.names)){
dummy_str <- as.vector(my_df[[col.names[i]]])
dummy_str <- gsub("(^')|('$)", "", x = dummy_str)
contraction_flag1 <- grepl("^.*(n\\'t)$", dummy_str)
contraction_flag2 <- grepl("^.*(\\'s)$", dummy_str)
contraction_flag3 <- grepl("^i\\'m$", dummy_str)
contraction_flag4 <- grepl("^.*(\\'ve)$", dummy_str)
contraction_flag5 <- grepl("^.*(\\'d)$", dummy_str)
contraction_flag6 <- grepl("^.*(\\'ll)$", dummy_str)
contraction_flag7 <- grepl("^(o\\').*$", dummy_str)
contraction_flags <- contraction_flag1 | contraction_flag2 | contraction_flag3 |
contraction_flag4 | contraction_flag5 | contraction_flag6 | contraction_flag7
dummy_str[!contraction_flags] <- gsub("'", "", x = dummy_str[!contraction_flags])
#Replace <s1> by s1 and <e1> by e1 because of the html problem etc.
dummy_str <- gsub('<s>', 's1', dummy_str)
dummy_str <- gsub('<e>', 'e1', dummy_str)
dummy_str <- gsub('<|>', '', dummy_str)
my_df[, names(my_df) == col.names[i]] <- dummy_str
}
return(my_df)
}
# test_str <- c("we'll", "we've", "it's", "'i've", "it's'", "o'clock", "ep[rtl[eprtl", "i'd")
#
# grepl("^(?!it)$", test_str, perl = T)
# grepl("((we)|(i)|(you)|(they))'ve", test_str)
# gsub("(?!(((we)|(i)|(you)|(they))'ve))", "", test_str, perl = T)
# gsub("(^')|('$)", "", x = test_str)
##Construct Data Frame for Training
train_trigrams_df <- read.table(paste(base_wd, 'tri_train_py.txt', sep =""),
sep = "\t", header = F, quote = "\"", stringsAsFactors = FALSE)
names(train_trigrams_df) <- c('Token1', 'Token2', 'Token3', 'cnt')
train_trigrams_df <- cleaningQuote(train_trigrams_df, col.names = c('Token1', 'Token2', 'Token3'))
train_trigrams_df <- train_trigrams_df %>% group_by(Token1, Token2, Token3) %>% summarise(cnt = sum(cnt)) %>% ungroup()
train_trigrams_df <- train_trigrams_df %>% filter(cnt > 2)
train_trigrams_df <- train_trigrams_df %>% arrange(desc(cnt))
train_bigrams_df <- read.table(paste(base_wd, 'bi_train_py.txt', sep =""),
sep = "\t", header = F, quote = "\"", stringsAsFactors = FALSE)
names(train_bigrams_df) <- c('Token1', 'Token2', 'cnt')
train_bigrams_df <- cleaningQuote(train_bigrams_df, col.names = c('Token1', 'Token2'))
train_bigrams_df <- train_bigrams_df %>% group_by(Token1, Token2) %>% summarise(cnt = sum(cnt)) %>% ungroup()
train_bigrams_df <- train_bigrams_df %>% filter(cnt > 2)
train_bigrams_df <- train_bigrams_df %>% arrange(desc(cnt))
train_unigrams_df <- read.table(paste(base_wd, 'uni_train_py.txt', sep =""),
sep = "\t", header = F, quote = "\"", stringsAsFactors = FALSE)
names(train_unigrams_df) <- c('Token', 'cnt')
train_unigrams_df <- cleaningQuote(train_unigrams_df, col.names = c('Token1'))
train_unigrams_df <- train_unigrams_df %>% group_by(Token) %>% summarise(cnt = sum(cnt)) %>% ungroup()
train_unigrams_df <- train_unigrams_df %>% filter(cnt > 2)
train_unigrams_df <- train_unigrams_df %>% arrange(desc(cnt))
##Construct Data Frame for Validation
valid_trigrams_df <- read.table(paste(base_wd, 'tri_valid_py.txt', sep =""),
sep = "\t", header = F, quote = "\"", stringsAsFactors = FALSE)
names(valid_trigrams_df) <- c('Token1', 'Token2', 'Token3', 'cnt')
valid_trigrams_df <- cleaningQuote(valid_trigrams_df, col.names = c('Token1', 'Token2', 'Token3'))
valid_trigrams_df <- valid_trigrams_df %>% group_by(Token1, Token2, Token3) %>% summarise(cnt = sum(cnt)) %>% ungroup()
valid_trigrams_df <- valid_trigrams_df %>% filter(cnt > 2)
valid_trigrams_df <- valid_trigrams_df %>% arrange(desc(cnt))
##Construct Data Frame for Testing
test_trigrams_df <- read.table(paste(base_wd, 'tri_test_py.txt', sep =""),
sep = "\t", header = F, quote = "\"", stringsAsFactors = FALSE)
names(test_trigrams_df) <- c('Token1', 'Token2', 'Token3', 'cnt')
test_trigrams_df <- cleaningQuote(test_trigrams_df, col.names = c('Token1', 'Token2', 'Token3'))
test_trigrams_df <- test_trigrams_df %>% group_by(Token1, Token2, Token3) %>% summarise(cnt = sum(cnt)) %>% ungroup()
test_trigrams_df <- test_trigrams_df %>% filter(cnt > 2)
test_trigrams_df <- test_trigrams_df %>% arrange(desc(cnt))
#Save as .RData
save(train_trigrams_df,
train_bigrams_df,
train_unigrams_df,
file = paste(base_wd, 'train_df.RData', sep =""))
save(valid_trigrams_df, file = paste(base_wd, 'valid_df.RData', sep =""))
save(test_trigrams_df, file = paste(base_wd, 'test_df.RData', sep =""))
load(paste(base_wd, file = 'train_df.RData', sep =""))
load(paste(base_wd, file = 'valid_df.RData', sep =""))
load(paste(base_wd, file = 'test_df.RData', sep =""))
cleaningMeaningLess <- function(my_df, col.names){
for(i in 1:length(col.names)){
dummy_str <- as.vector(my_df[[col.names[i]]])
dummy_str <- gsub("(^')|('$)", "", x = dummy_str)
bad_word_flag <- grepl("^[b-hj-z]$", dummy_str)
my_df <- my_df[!bad_word_flag, ]
}
return(my_df)
}
train_trigrams_df <- cleaningMeaningLess(train_trigrams_df, col.names = c('Token1', 'Token2', 'Token3'))
train_bigrams_df <- cleaningMeaningLess(train_bigrams_df, col.names = c('Token1', 'Token2'))
train_unigrams_df <- cleaningMeaningLess(train_unigrams_df, col.names = c('Token'))
valid_trigrams_df <- cleaningMeaningLess(valid_trigrams_df, col.names = c('Token1', 'Token2', 'Token3'))
test_trigrams_df <- cleaningMeaningLess(test_trigrams_df, col.names = c('Token1', 'Token2', 'Token3'))
save(train_trigrams_df,
train_bigrams_df,
train_unigrams_df,
file = paste(base_wd, 'train_df.RData', sep =""))
save(valid_trigrams_df, file = paste(base_wd, 'valid_df.RData', sep =""))
save(test_trigrams_df, file = paste(base_wd, 'test_df.RData', sep =""))
load(paste(base_wd, file = 'train_df.RData', sep =""))
load(paste(base_wd, file = 'valid_df.RData', sep =""))
load(paste(base_wd, file = 'test_df.RData', sep =""))
# predictBigram <- function(word){
#
# model_df <- rbind(news_bigrams_df, twitter_bigrams_df, blogs_bigrams_df)
# word = tolower(word)
#
# word_df <- model_df %>% filter(Token1 == word)
# if(nrow(word_df) > 1){
#
# word_p_df <- data.frame(p = word_df$cnt/sum(word_df$cnt),
# Input = word, Word_predicted = word_df$Token2)
#
# return(word_p_df)
# }else{
#
# return(NULL)
#
# }
# }
#
# predictTrigram <- function(words, model_df, discountSW = 0){
#
# words = tolower(words)
#
# word_df <- model_df %>% filter(Token1 == words[1] & Token2 == words[2])
# if(nrow(word_df) > 1){
#
# word_df <- word_df %>% group_by(Token1, Token2, Token3) %>% summarise(cnt = sum(cnt)) %>% ungroup()
#
# word_p_df <- data.frame(p = word_df$cnt/sum(word_df$cnt),
# Input = paste(words, collapse = ' '), Word_predicted = word_df$Token3)
# word_p_df <- word_p_df %>% arrange(desc(p))
# return(word_p_df)
#
# }else{
#
# return(NULL)
#
# }
# }
#Let's Validate the model
head(valid_trigrams_df)
validation_result <- valid_trigrams_df %>%
filter(!Token1 %in% c('s1', 's2') & !Token2 %in% c('s1', 's2', 'e1' ,'e2') & !Token3 %in% c('e1', 'e2')) %>%
rename(
Input_tk1 = Token1,
Input_tk2 = Token2,
Input_tk3 = Token3,
Input_cnt = cnt)
# validation_result$prob <- NA
head(validation_result)
freqThreshold_trigram <- 5
freqThreshold_bigram <- 50
freqThreshold_unigram <- 1000
train_trigrams_df2 <- train_trigrams_df %>%
filter(!Token1 %in% c('s1', 's2') & !Token2 %in% c('s1', 's2', 'e1' ,'e2') & !Token3 %in% c('e1', 'e2')) %>%
filter(cnt >= freqThreshold_trigram) %>%
rename(Tri_tk1 = Token1,
Tri_tk2 = Token2,
Tri_tk3 = Token3)
train_bigrams_df2 <- train_bigrams_df %>%
filter(!Token1 %in% c('s1', 's2') & !Token2 %in% c('e1', 'e2')) %>%
filter(cnt >= freqThreshold_bigram) %>%
rename(Bi_tk1 = Token1,
Bi_tk2 = Token2)
train_unigrams_df2 <- train_unigrams_df %>%
filter(!Token %in% c('<s>', '<e>')) %>%
filter(cnt >= freqThreshold_unigram) %>%
rename(Uni_tk1 = Token)
rm(train_trigrams_df, train_bigrams_df, train_unigrams_df)
# validation_result_test <- validation_result[1:10, ]
###############################################################################################################
#
# Ranking Approach
#
###############################################################################################################
# trigram_result <- validation_result %>%
# left_join(train_trigrams_df2, by = c('Input_tk1' = 'Tri_tk1', 'Input_tk2' = 'Tri_tk2')) %>%
# group_by(Input_tk1, Input_tk2, Input_tk3) %>% mutate(cnt_rk = rank(desc(cnt))) %>% filter(cnt_rk <= 5) %>% ungroup() %>%
# mutate(pred_correct = ifelse(Input_tk3 == Tri_Tk3, 1, 0)) %>% group_by(Input_tk1, Input_tk2, Input_tk3) %>%
# summarise(pred_correct = sum(pred_correct)) %>% ungroup()
#
# trigram_result <- trigram_result %>% inner_join(validation_result,
# by = c('Input_tk1', 'Input_tk2', 'Input_tk3'))
#
# trigram_result <- trigram_result %>% arrange(desc(Input_cnt))
#
# validation_unseen <- validation_result2 %>% filter(is.na(validation_result2$pred_correct))
#
# ## Prediction Accuracy on Trigram
# sum(validation_result2$pred_correct * validation_result2$Input_cnt, na.rm = T)/sum(validation_result2$Input_cnt, na.rm = T)
#
#
# #Use Bigram for unseen
# validation_unseen_bigram <- validation_unseen %>% left_join(train_bigrams_df2, by = c('Input_tk2' = 'Bi_tk1')) %>%
# group_by(Input_tk1, Input_tk2, Input_tk3) %>% mutate(cnt_rk = rank(desc(cnt))) %>% filter(cnt_rk <= 5) %>% ungroup() %>%
# mutate(pred_correct = ifelse(Input_tk3 == Bi_tk2, 1, 0)) %>% group_by(Input_tk1, Input_tk2, Input_tk3) %>%
# summarise(pred_correct = sum(pred_correct)) %>% ungroup()
#
# validation_unseen_bigram <- validation_unseen_bigram %>% inner_join(validation_result,
# by = c('Input_tk1', 'Input_tk2', 'Input_tk3'))
#
# validation_unseen_bigram$pred_correct[is.na(validation_unseen_bigram$pred_correct)] <- 0
# sum(validation_unseen_bigram$pred_correct * validation_unseen_bigram$Input_cnt, na.rm = T)/
# sum(validation_unseen_bigram$Input_cnt, na.rm = T)
###############################################################################################################
#
# Probability Approach
#
###############################################################################################################
# validation_result_test <- validation_result[1:10, ]
#Trigram
trigram_result <- validation_result %>%
left_join(train_trigrams_df2, by = c('Input_tk1' = 'Tri_tk1', 'Input_tk2' = 'Tri_tk2')) %>%
mutate(pred_cnt = ifelse(Input_tk3 == Tri_Tk3, cnt, 0)) %>% group_by(Input_tk1, Input_tk2, Input_tk3) %>%
summarise(pred_prob_tri = sum(pred_cnt)/sum(cnt)) %>% ungroup()
trigram_result <- trigram_result %>% inner_join(validation_result,
by = c('Input_tk1', 'Input_tk2', 'Input_tk3'))
trigram_result <- trigram_result %>% arrange(desc(Input_cnt))
#Bigram
validation_result_bi <- validation_result %>% group_by(Input_tk2, Input_tk3) %>%
summarise(Input_cnt = sum(Input_cnt)) %>% ungroup()
# x <- validation_result_bi %>% filter(Input_tk2 == 'a')
# validation_result_test <- validation_result_bi[1:10, ]
bigram_result <- validation_result_bi %>%
inner_join(train_bigrams_df2, by = c('Input_tk2' = 'Bi_tk1')) %>%
mutate(pred_cnt = ifelse(Input_tk3 == Bi_tk2, cnt, 0)) %>% group_by(Input_tk2, Input_tk3) %>%
summarise(pred_prob_bi = sum(pred_cnt)/sum(cnt)) %>% ungroup()
unigram_result <- train_unigrams_df2 %>% mutate(pred_prob_uni = cnt/sum(cnt)) %>% dplyr::select(-cnt)
trigram_result$tri_no_result <- 0
trigram_result$tri_no_result[is.na(trigram_result$pred_prob_tri)] <- 1
trigram_result$pred_prob_tri[is.na(trigram_result$pred_prob_tri)] <- 0
bigram_result$bi_no_result <- 0
bigram_result$bi_no_result[is.na(bigram_result$pred_prob_bi)] <- 1
bigram_result$pred_prob_bi[is.na(bigram_result$pred_prob_bi)] <- 0
analysis_result <- trigram_result %>% left_join(bigram_result, by = c('Input_tk2', 'Input_tk3')) %>%
left_join(unigram_result, by = c('Input_tk3' = 'Uni_tk1'))
analysis_result$pred_prob_uni[is.na(analysis_result$pred_prob_uni)] <- 0
analysis_result$pred_prob_bi[is.na(analysis_result$pred_prob_bi)] <- 0
analysis_result$bi_no_result[is.na(analysis_result$bi_no_result)] <- 1
save(analysis_result, file = paste(base_wd, 'analysis_result.RData', sep =""))
load(file = paste(base_wd, 'analysis_result.RData', sep =""))
analysis_result$dummy <- 1
# library(glmnet)
# x <- model.matrix(dummy ~ 0 + pred_prob_tri + pred_prob_bi + pred_prob_uni +
# I(pred_prob_bi * tri_no_result) + I(pred_prob_uni * tri_no_result) +
# I(bi_no_result * pred_prob_uni) + I(bi_no_result * tri_no_result * pred_prob_uni),
# data = analysis_result)
#
# y <- analysis_result$dummy
glm_fit <- glm(dummy ~ 0 + pred_prob_tri +
pred_prob_bi +
# pred_prob_uni +
I(pred_prob_bi * tri_no_result),
# I(pred_prob_uni * tri_no_result) +
# I(bi_no_result * pred_prob_uni) + I(bi_no_result * tri_no_result * pred_prob_uni),
data = analysis_result,
weights = Input_cnt^0.3)
summary(glm_fit)
# cv_fit <- cv.glmnet(x = x, y = y, weights = analysis_result$Input_cnt, alpha = 0.5, intercept = FALSE)
# lambda_min <- glmnet_fit$lambda.min
# glmnet_fit <- glmnet(x = x, y = y, weights = analysis_result$Input_cnt, alpha = 0.5, intercept = FALSE,
# lambda = lambda_min)
#
# coef(glmnet_fit)
coef_fit <- coef(glm_fit)
save(glm_fit, file = paste(base_wd, 'glm_fit.RData', sep =""))
save(coef_fit, file = paste(base_wd, 'coef_fit.RData', sep =""))
#Average Predictability...
sum(glm_fit$fitted.values * analysis_result$Input_cnt)/sum(analysis_result$Input_cnt)
load(file = paste(base_wd, 'glm_fit.RData', sep =""))
##########################################################################
#
# Write a Function to produce predicted words and probability...
#
##########################################################################
##Use Test Set...
freqThreshold_trigram <- 5
freqThreshold_bigram <- 50
freqThreshold_unigram <- 1000
# train_trigrams_df2 <- train_trigrams_df %>%
# filter(!Token1 %in% c('s1', 's2') & !Token2 %in% c('s1', 's2', 'e1' ,'e2') & !Token3 %in% c('e1', 'e2')) %>%
# filter(cnt >= freqThreshold_trigram) %>%
# rename(Tri_tk1 = Token1,
# Tri_tk2 = Token2,
# Tri_Tk3 = Token3)
#
# train_bigrams_df2 <- train_bigrams_df %>%
# filter(!Token1 %in% c('s1', 's2') & !Token2 %in% c('e1', 'e2')) %>%
# filter(cnt >= freqThreshold_bigram) %>%
# rename(Bi_tk1 = Token1,
# Bi_tk2 = Token2)
#
# train_unigrams_df2 <- train_unigrams_df %>%
# filter(!Token %in% c('<s>', '<e>')) %>%
# filter(cnt >= freqThreshold_unigram) %>%
# rename(Uni_tk1 = Token)
#Trigram
test_trigrams_df <- test_trigrams_df %>%
filter(!Token1 %in% c('s1', 's2') & !Token2 %in% c('s1', 's2', 'e1' ,'e2') & !Token3 %in% c('e1', 'e2')) %>%
rename(
Input_tk1 = Token1,
Input_tk2 = Token2,
Input_tk3 = Token3,
Input_cnt = cnt)
# load(paste(base_wd, file = 'train_df.RData', sep =""))
# load(paste(base_wd, file = 'valid_df.RData', sep =""))
# load(paste(base_wd, file = 'test_df.RData', sep =""))
save(tri_df, bi_df,
file = paste(base_wd, 'simple_train_df.RData', sep =""))
predictNextWord(test_trigrams_df$Input_tk1[15], test_trigrams_df$Input_tk2[15],
tri_df, bi_df, coef_fit = glm_fit)
predictNextWord <- function(
Input_tk1,
Input_tk2,
tri_df, bi_df,
freqThreshold_trigram = 5, freqThreshold_bigram = 50, coef_fit){
# Input_tk1 <- test_trigrams_df$Input_tk1[1]
# Input_tk2 <- test_trigrams_df$Input_tk2[1]
# tri_df <- train_trigrams_df
# bi_df <- train_bigrams_df
# coef_fit <- glm_fit
# tri_df <- tri_df %>%
# filter(!Token1 %in% c('s1', 's2') & !Token2 %in% c('s1', 's2', 'e1' ,'e2') & !Token3 %in% c('e1', 'e2')) %>%
# filter(cnt >= freqThreshold_trigram) %>%
# rename(Tri_tk1 = Token1,
# Tri_tk2 = Token2,
# Tri_tk3 = Token3)
#
# bi_df <- bi_df %>%
# filter(!Token1 %in% c('s1', 's2') & !Token2 %in% c('e1', 'e2')) %>%
# filter(cnt >= freqThreshold_bigram) %>%
# rename(Bi_tk1 = Token1,
# Bi_tk2 = Token2)
coefs <- coef(coef_fit)
trigram_result <- tri_df %>% filter(Tri_tk1 == Input_tk1 & Tri_tk2 == Input_tk2)
bigram_result <- bi_df %>% filter(Bi_tk1 == Input_tk2)
if(nrow(trigram_result) > 0){
trigram_result <- trigram_result %>% mutate(tri_p = cnt/sum(cnt)) %>% dplyr::select(-cnt)
if(nrow(bigram_result)){
bigram_result <- bigram_result %>% mutate(bi_p = cnt/sum(cnt)) %>% dplyr::select(-cnt)
combined_result <- trigram_result %>% full_join(bigram_result, by = c('Tri_tk2' = 'Bi_tk1', 'Tri_tk3' = 'Bi_tk2'))
combined_result$tri_p[is.na(combined_result$tri_p)] <- 0
combined_result$bi_p[is.na(combined_result$bi_p)] <- 0
combined_result$p <- combined_result$tri_p * coefs[1] + combined_result$bi_p * coefs[2]
output <- combined_result %>% mutate(rk = rank(desc(p))) %>% filter(rk <= 5)
output <- output %>% arrange(rk)
return(output)
}else{
trigram_result$p <- trigram_result$tri_p
trigram_result <- trigram_result %>% arrange(desc(p))
output <- trigram_result %>% mutate(rk = rank(desc(p))) %>% filter(rk <= 5)
output <- output %>% arrange(rk)
return(output)
}
}else{
if(nrow(bigram_result) > 0){
bigram_result <- bigram_result %>% mutate(bi_p = cnt/sum(cnt)) %>% dplyr::select(-cnt)
bigram_result$p <- bigram_result$bi_p
output <- bigram_result %>% mutate(rk = rank(desc(p))) %>% filter(rk <= 5)
output <- output %>% arrange(rk)
return(output)
}else{
return(NULL)
}
}
}
|
/ngram_Model_20161208.R
|
no_license
|
towinazure/DS-Capstone
|
R
| false | false | 18,180 |
r
|
rm(list=ls())
library(dplyr)
library(tidyr)
base_wd <- 'C:\\Users\\Edwin\\Desktop\\Edwin(10-08-2012)\\Assignments and Modules\\Coursera\\Data Science - John Hopkins\\Capstone\\'
# load(file = paste(base_wd, 'tokens_df.RData', sep =""))
cleaningQuote <- function(my_df, col.names){
# Replace the "'" characters except the contractions
# contractions: "n't" , "'s", "i'm", "'ve", "'d", "'ll", "o'"
for(i in 1:length(col.names)){
dummy_str <- as.vector(my_df[[col.names[i]]])
dummy_str <- gsub("(^')|('$)", "", x = dummy_str)
contraction_flag1 <- grepl("^.*(n\\'t)$", dummy_str)
contraction_flag2 <- grepl("^.*(\\'s)$", dummy_str)
contraction_flag3 <- grepl("^i\\'m$", dummy_str)
contraction_flag4 <- grepl("^.*(\\'ve)$", dummy_str)
contraction_flag5 <- grepl("^.*(\\'d)$", dummy_str)
contraction_flag6 <- grepl("^.*(\\'ll)$", dummy_str)
contraction_flag7 <- grepl("^(o\\').*$", dummy_str)
contraction_flags <- contraction_flag1 | contraction_flag2 | contraction_flag3 |
contraction_flag4 | contraction_flag5 | contraction_flag6 | contraction_flag7
dummy_str[!contraction_flags] <- gsub("'", "", x = dummy_str[!contraction_flags])
#Replace <s1> by s1 and <e1> by e1 because of the html problem etc.
dummy_str <- gsub('<s>', 's1', dummy_str)
dummy_str <- gsub('<e>', 'e1', dummy_str)
dummy_str <- gsub('<|>', '', dummy_str)
my_df[, names(my_df) == col.names[i]] <- dummy_str
}
return(my_df)
}
# test_str <- c("we'll", "we've", "it's", "'i've", "it's'", "o'clock", "ep[rtl[eprtl", "i'd")
#
# grepl("^(?!it)$", test_str, perl = T)
# grepl("((we)|(i)|(you)|(they))'ve", test_str)
# gsub("(?!(((we)|(i)|(you)|(they))'ve))", "", test_str, perl = T)
# gsub("(^')|('$)", "", x = test_str)
##Construct Data Frame for Training
train_trigrams_df <- read.table(paste(base_wd, 'tri_train_py.txt', sep =""),
sep = "\t", header = F, quote = "\"", stringsAsFactors = FALSE)
names(train_trigrams_df) <- c('Token1', 'Token2', 'Token3', 'cnt')
train_trigrams_df <- cleaningQuote(train_trigrams_df, col.names = c('Token1', 'Token2', 'Token3'))
train_trigrams_df <- train_trigrams_df %>% group_by(Token1, Token2, Token3) %>% summarise(cnt = sum(cnt)) %>% ungroup()
train_trigrams_df <- train_trigrams_df %>% filter(cnt > 2)
train_trigrams_df <- train_trigrams_df %>% arrange(desc(cnt))
train_bigrams_df <- read.table(paste(base_wd, 'bi_train_py.txt', sep =""),
sep = "\t", header = F, quote = "\"", stringsAsFactors = FALSE)
names(train_bigrams_df) <- c('Token1', 'Token2', 'cnt')
train_bigrams_df <- cleaningQuote(train_bigrams_df, col.names = c('Token1', 'Token2'))
train_bigrams_df <- train_bigrams_df %>% group_by(Token1, Token2) %>% summarise(cnt = sum(cnt)) %>% ungroup()
train_bigrams_df <- train_bigrams_df %>% filter(cnt > 2)
train_bigrams_df <- train_bigrams_df %>% arrange(desc(cnt))
train_unigrams_df <- read.table(paste(base_wd, 'uni_train_py.txt', sep =""),
sep = "\t", header = F, quote = "\"", stringsAsFactors = FALSE)
names(train_unigrams_df) <- c('Token', 'cnt')
train_unigrams_df <- cleaningQuote(train_unigrams_df, col.names = c('Token1'))
train_unigrams_df <- train_unigrams_df %>% group_by(Token) %>% summarise(cnt = sum(cnt)) %>% ungroup()
train_unigrams_df <- train_unigrams_df %>% filter(cnt > 2)
train_unigrams_df <- train_unigrams_df %>% arrange(desc(cnt))
##Construct Data Frame for Validation
valid_trigrams_df <- read.table(paste(base_wd, 'tri_valid_py.txt', sep =""),
sep = "\t", header = F, quote = "\"", stringsAsFactors = FALSE)
names(valid_trigrams_df) <- c('Token1', 'Token2', 'Token3', 'cnt')
valid_trigrams_df <- cleaningQuote(valid_trigrams_df, col.names = c('Token1', 'Token2', 'Token3'))
valid_trigrams_df <- valid_trigrams_df %>% group_by(Token1, Token2, Token3) %>% summarise(cnt = sum(cnt)) %>% ungroup()
valid_trigrams_df <- valid_trigrams_df %>% filter(cnt > 2)
valid_trigrams_df <- valid_trigrams_df %>% arrange(desc(cnt))
##Construct Data Frame for Testing
test_trigrams_df <- read.table(paste(base_wd, 'tri_test_py.txt', sep =""),
sep = "\t", header = F, quote = "\"", stringsAsFactors = FALSE)
names(test_trigrams_df) <- c('Token1', 'Token2', 'Token3', 'cnt')
test_trigrams_df <- cleaningQuote(test_trigrams_df, col.names = c('Token1', 'Token2', 'Token3'))
test_trigrams_df <- test_trigrams_df %>% group_by(Token1, Token2, Token3) %>% summarise(cnt = sum(cnt)) %>% ungroup()
test_trigrams_df <- test_trigrams_df %>% filter(cnt > 2)
test_trigrams_df <- test_trigrams_df %>% arrange(desc(cnt))
#Save as .RData
save(train_trigrams_df,
train_bigrams_df,
train_unigrams_df,
file = paste(base_wd, 'train_df.RData', sep =""))
save(valid_trigrams_df, file = paste(base_wd, 'valid_df.RData', sep =""))
save(test_trigrams_df, file = paste(base_wd, 'test_df.RData', sep =""))
load(paste(base_wd, file = 'train_df.RData', sep =""))
load(paste(base_wd, file = 'valid_df.RData', sep =""))
load(paste(base_wd, file = 'test_df.RData', sep =""))
cleaningMeaningLess <- function(my_df, col.names){
for(i in 1:length(col.names)){
dummy_str <- as.vector(my_df[[col.names[i]]])
dummy_str <- gsub("(^')|('$)", "", x = dummy_str)
bad_word_flag <- grepl("^[b-hj-z]$", dummy_str)
my_df <- my_df[!bad_word_flag, ]
}
return(my_df)
}
train_trigrams_df <- cleaningMeaningLess(train_trigrams_df, col.names = c('Token1', 'Token2', 'Token3'))
train_bigrams_df <- cleaningMeaningLess(train_bigrams_df, col.names = c('Token1', 'Token2'))
train_unigrams_df <- cleaningMeaningLess(train_unigrams_df, col.names = c('Token'))
valid_trigrams_df <- cleaningMeaningLess(valid_trigrams_df, col.names = c('Token1', 'Token2', 'Token3'))
test_trigrams_df <- cleaningMeaningLess(test_trigrams_df, col.names = c('Token1', 'Token2', 'Token3'))
save(train_trigrams_df,
train_bigrams_df,
train_unigrams_df,
file = paste(base_wd, 'train_df.RData', sep =""))
save(valid_trigrams_df, file = paste(base_wd, 'valid_df.RData', sep =""))
save(test_trigrams_df, file = paste(base_wd, 'test_df.RData', sep =""))
load(paste(base_wd, file = 'train_df.RData', sep =""))
load(paste(base_wd, file = 'valid_df.RData', sep =""))
load(paste(base_wd, file = 'test_df.RData', sep =""))
# predictBigram <- function(word){
#
# model_df <- rbind(news_bigrams_df, twitter_bigrams_df, blogs_bigrams_df)
# word = tolower(word)
#
# word_df <- model_df %>% filter(Token1 == word)
# if(nrow(word_df) > 1){
#
# word_p_df <- data.frame(p = word_df$cnt/sum(word_df$cnt),
# Input = word, Word_predicted = word_df$Token2)
#
# return(word_p_df)
# }else{
#
# return(NULL)
#
# }
# }
#
# predictTrigram <- function(words, model_df, discountSW = 0){
#
# words = tolower(words)
#
# word_df <- model_df %>% filter(Token1 == words[1] & Token2 == words[2])
# if(nrow(word_df) > 1){
#
# word_df <- word_df %>% group_by(Token1, Token2, Token3) %>% summarise(cnt = sum(cnt)) %>% ungroup()
#
# word_p_df <- data.frame(p = word_df$cnt/sum(word_df$cnt),
# Input = paste(words, collapse = ' '), Word_predicted = word_df$Token3)
# word_p_df <- word_p_df %>% arrange(desc(p))
# return(word_p_df)
#
# }else{
#
# return(NULL)
#
# }
# }
#Let's Validate the model
head(valid_trigrams_df)
validation_result <- valid_trigrams_df %>%
filter(!Token1 %in% c('s1', 's2') & !Token2 %in% c('s1', 's2', 'e1' ,'e2') & !Token3 %in% c('e1', 'e2')) %>%
rename(
Input_tk1 = Token1,
Input_tk2 = Token2,
Input_tk3 = Token3,
Input_cnt = cnt)
# validation_result$prob <- NA
head(validation_result)
freqThreshold_trigram <- 5
freqThreshold_bigram <- 50
freqThreshold_unigram <- 1000
train_trigrams_df2 <- train_trigrams_df %>%
filter(!Token1 %in% c('s1', 's2') & !Token2 %in% c('s1', 's2', 'e1' ,'e2') & !Token3 %in% c('e1', 'e2')) %>%
filter(cnt >= freqThreshold_trigram) %>%
rename(Tri_tk1 = Token1,
Tri_tk2 = Token2,
Tri_tk3 = Token3)
train_bigrams_df2 <- train_bigrams_df %>%
filter(!Token1 %in% c('s1', 's2') & !Token2 %in% c('e1', 'e2')) %>%
filter(cnt >= freqThreshold_bigram) %>%
rename(Bi_tk1 = Token1,
Bi_tk2 = Token2)
train_unigrams_df2 <- train_unigrams_df %>%
filter(!Token %in% c('<s>', '<e>')) %>%
filter(cnt >= freqThreshold_unigram) %>%
rename(Uni_tk1 = Token)
rm(train_trigrams_df, train_bigrams_df, train_unigrams_df)
# validation_result_test <- validation_result[1:10, ]
###############################################################################################################
#
# Ranking Approach
#
###############################################################################################################
# trigram_result <- validation_result %>%
# left_join(train_trigrams_df2, by = c('Input_tk1' = 'Tri_tk1', 'Input_tk2' = 'Tri_tk2')) %>%
# group_by(Input_tk1, Input_tk2, Input_tk3) %>% mutate(cnt_rk = rank(desc(cnt))) %>% filter(cnt_rk <= 5) %>% ungroup() %>%
# mutate(pred_correct = ifelse(Input_tk3 == Tri_Tk3, 1, 0)) %>% group_by(Input_tk1, Input_tk2, Input_tk3) %>%
# summarise(pred_correct = sum(pred_correct)) %>% ungroup()
#
# trigram_result <- trigram_result %>% inner_join(validation_result,
# by = c('Input_tk1', 'Input_tk2', 'Input_tk3'))
#
# trigram_result <- trigram_result %>% arrange(desc(Input_cnt))
#
# validation_unseen <- validation_result2 %>% filter(is.na(validation_result2$pred_correct))
#
# ## Prediction Accuracy on Trigram
# sum(validation_result2$pred_correct * validation_result2$Input_cnt, na.rm = T)/sum(validation_result2$Input_cnt, na.rm = T)
#
#
# #Use Bigram for unseen
# validation_unseen_bigram <- validation_unseen %>% left_join(train_bigrams_df2, by = c('Input_tk2' = 'Bi_tk1')) %>%
# group_by(Input_tk1, Input_tk2, Input_tk3) %>% mutate(cnt_rk = rank(desc(cnt))) %>% filter(cnt_rk <= 5) %>% ungroup() %>%
# mutate(pred_correct = ifelse(Input_tk3 == Bi_tk2, 1, 0)) %>% group_by(Input_tk1, Input_tk2, Input_tk3) %>%
# summarise(pred_correct = sum(pred_correct)) %>% ungroup()
#
# validation_unseen_bigram <- validation_unseen_bigram %>% inner_join(validation_result,
# by = c('Input_tk1', 'Input_tk2', 'Input_tk3'))
#
# validation_unseen_bigram$pred_correct[is.na(validation_unseen_bigram$pred_correct)] <- 0
# sum(validation_unseen_bigram$pred_correct * validation_unseen_bigram$Input_cnt, na.rm = T)/
# sum(validation_unseen_bigram$Input_cnt, na.rm = T)
###############################################################################################################
#
# Probability Approach
#
###############################################################################################################
# validation_result_test <- validation_result[1:10, ]
#Trigram
trigram_result <- validation_result %>%
left_join(train_trigrams_df2, by = c('Input_tk1' = 'Tri_tk1', 'Input_tk2' = 'Tri_tk2')) %>%
mutate(pred_cnt = ifelse(Input_tk3 == Tri_Tk3, cnt, 0)) %>% group_by(Input_tk1, Input_tk2, Input_tk3) %>%
summarise(pred_prob_tri = sum(pred_cnt)/sum(cnt)) %>% ungroup()
trigram_result <- trigram_result %>% inner_join(validation_result,
by = c('Input_tk1', 'Input_tk2', 'Input_tk3'))
trigram_result <- trigram_result %>% arrange(desc(Input_cnt))
#Bigram
validation_result_bi <- validation_result %>% group_by(Input_tk2, Input_tk3) %>%
summarise(Input_cnt = sum(Input_cnt)) %>% ungroup()
# x <- validation_result_bi %>% filter(Input_tk2 == 'a')
# validation_result_test <- validation_result_bi[1:10, ]
bigram_result <- validation_result_bi %>%
inner_join(train_bigrams_df2, by = c('Input_tk2' = 'Bi_tk1')) %>%
mutate(pred_cnt = ifelse(Input_tk3 == Bi_tk2, cnt, 0)) %>% group_by(Input_tk2, Input_tk3) %>%
summarise(pred_prob_bi = sum(pred_cnt)/sum(cnt)) %>% ungroup()
unigram_result <- train_unigrams_df2 %>% mutate(pred_prob_uni = cnt/sum(cnt)) %>% dplyr::select(-cnt)
trigram_result$tri_no_result <- 0
trigram_result$tri_no_result[is.na(trigram_result$pred_prob_tri)] <- 1
trigram_result$pred_prob_tri[is.na(trigram_result$pred_prob_tri)] <- 0
bigram_result$bi_no_result <- 0
bigram_result$bi_no_result[is.na(bigram_result$pred_prob_bi)] <- 1
bigram_result$pred_prob_bi[is.na(bigram_result$pred_prob_bi)] <- 0
analysis_result <- trigram_result %>% left_join(bigram_result, by = c('Input_tk2', 'Input_tk3')) %>%
left_join(unigram_result, by = c('Input_tk3' = 'Uni_tk1'))
analysis_result$pred_prob_uni[is.na(analysis_result$pred_prob_uni)] <- 0
analysis_result$pred_prob_bi[is.na(analysis_result$pred_prob_bi)] <- 0
analysis_result$bi_no_result[is.na(analysis_result$bi_no_result)] <- 1
save(analysis_result, file = paste(base_wd, 'analysis_result.RData', sep =""))
load(file = paste(base_wd, 'analysis_result.RData', sep =""))
analysis_result$dummy <- 1
# library(glmnet)
# x <- model.matrix(dummy ~ 0 + pred_prob_tri + pred_prob_bi + pred_prob_uni +
# I(pred_prob_bi * tri_no_result) + I(pred_prob_uni * tri_no_result) +
# I(bi_no_result * pred_prob_uni) + I(bi_no_result * tri_no_result * pred_prob_uni),
# data = analysis_result)
#
# y <- analysis_result$dummy
glm_fit <- glm(dummy ~ 0 + pred_prob_tri +
pred_prob_bi +
# pred_prob_uni +
I(pred_prob_bi * tri_no_result),
# I(pred_prob_uni * tri_no_result) +
# I(bi_no_result * pred_prob_uni) + I(bi_no_result * tri_no_result * pred_prob_uni),
data = analysis_result,
weights = Input_cnt^0.3)
summary(glm_fit)
# cv_fit <- cv.glmnet(x = x, y = y, weights = analysis_result$Input_cnt, alpha = 0.5, intercept = FALSE)
# lambda_min <- glmnet_fit$lambda.min
# glmnet_fit <- glmnet(x = x, y = y, weights = analysis_result$Input_cnt, alpha = 0.5, intercept = FALSE,
# lambda = lambda_min)
#
# coef(glmnet_fit)
coef_fit <- coef(glm_fit)
save(glm_fit, file = paste(base_wd, 'glm_fit.RData', sep =""))
save(coef_fit, file = paste(base_wd, 'coef_fit.RData', sep =""))
#Average Predictability...
sum(glm_fit$fitted.values * analysis_result$Input_cnt)/sum(analysis_result$Input_cnt)
load(file = paste(base_wd, 'glm_fit.RData', sep =""))
##########################################################################
#
# Write a Function to produce predicted words and probability...
#
##########################################################################
##Use Test Set...
freqThreshold_trigram <- 5
freqThreshold_bigram <- 50
freqThreshold_unigram <- 1000
# train_trigrams_df2 <- train_trigrams_df %>%
# filter(!Token1 %in% c('s1', 's2') & !Token2 %in% c('s1', 's2', 'e1' ,'e2') & !Token3 %in% c('e1', 'e2')) %>%
# filter(cnt >= freqThreshold_trigram) %>%
# rename(Tri_tk1 = Token1,
# Tri_tk2 = Token2,
# Tri_Tk3 = Token3)
#
# train_bigrams_df2 <- train_bigrams_df %>%
# filter(!Token1 %in% c('s1', 's2') & !Token2 %in% c('e1', 'e2')) %>%
# filter(cnt >= freqThreshold_bigram) %>%
# rename(Bi_tk1 = Token1,
# Bi_tk2 = Token2)
#
# train_unigrams_df2 <- train_unigrams_df %>%
# filter(!Token %in% c('<s>', '<e>')) %>%
# filter(cnt >= freqThreshold_unigram) %>%
# rename(Uni_tk1 = Token)
#Trigram
test_trigrams_df <- test_trigrams_df %>%
filter(!Token1 %in% c('s1', 's2') & !Token2 %in% c('s1', 's2', 'e1' ,'e2') & !Token3 %in% c('e1', 'e2')) %>%
rename(
Input_tk1 = Token1,
Input_tk2 = Token2,
Input_tk3 = Token3,
Input_cnt = cnt)
# load(paste(base_wd, file = 'train_df.RData', sep =""))
# load(paste(base_wd, file = 'valid_df.RData', sep =""))
# load(paste(base_wd, file = 'test_df.RData', sep =""))
save(tri_df, bi_df,
file = paste(base_wd, 'simple_train_df.RData', sep =""))
predictNextWord(test_trigrams_df$Input_tk1[15], test_trigrams_df$Input_tk2[15],
tri_df, bi_df, coef_fit = glm_fit)
predictNextWord <- function(
Input_tk1,
Input_tk2,
tri_df, bi_df,
freqThreshold_trigram = 5, freqThreshold_bigram = 50, coef_fit){
# Input_tk1 <- test_trigrams_df$Input_tk1[1]
# Input_tk2 <- test_trigrams_df$Input_tk2[1]
# tri_df <- train_trigrams_df
# bi_df <- train_bigrams_df
# coef_fit <- glm_fit
# tri_df <- tri_df %>%
# filter(!Token1 %in% c('s1', 's2') & !Token2 %in% c('s1', 's2', 'e1' ,'e2') & !Token3 %in% c('e1', 'e2')) %>%
# filter(cnt >= freqThreshold_trigram) %>%
# rename(Tri_tk1 = Token1,
# Tri_tk2 = Token2,
# Tri_tk3 = Token3)
#
# bi_df <- bi_df %>%
# filter(!Token1 %in% c('s1', 's2') & !Token2 %in% c('e1', 'e2')) %>%
# filter(cnt >= freqThreshold_bigram) %>%
# rename(Bi_tk1 = Token1,
# Bi_tk2 = Token2)
coefs <- coef(coef_fit)
trigram_result <- tri_df %>% filter(Tri_tk1 == Input_tk1 & Tri_tk2 == Input_tk2)
bigram_result <- bi_df %>% filter(Bi_tk1 == Input_tk2)
if(nrow(trigram_result) > 0){
trigram_result <- trigram_result %>% mutate(tri_p = cnt/sum(cnt)) %>% dplyr::select(-cnt)
if(nrow(bigram_result)){
bigram_result <- bigram_result %>% mutate(bi_p = cnt/sum(cnt)) %>% dplyr::select(-cnt)
combined_result <- trigram_result %>% full_join(bigram_result, by = c('Tri_tk2' = 'Bi_tk1', 'Tri_tk3' = 'Bi_tk2'))
combined_result$tri_p[is.na(combined_result$tri_p)] <- 0
combined_result$bi_p[is.na(combined_result$bi_p)] <- 0
combined_result$p <- combined_result$tri_p * coefs[1] + combined_result$bi_p * coefs[2]
output <- combined_result %>% mutate(rk = rank(desc(p))) %>% filter(rk <= 5)
output <- output %>% arrange(rk)
return(output)
}else{
trigram_result$p <- trigram_result$tri_p
trigram_result <- trigram_result %>% arrange(desc(p))
output <- trigram_result %>% mutate(rk = rank(desc(p))) %>% filter(rk <= 5)
output <- output %>% arrange(rk)
return(output)
}
}else{
if(nrow(bigram_result) > 0){
bigram_result <- bigram_result %>% mutate(bi_p = cnt/sum(cnt)) %>% dplyr::select(-cnt)
bigram_result$p <- bigram_result$bi_p
output <- bigram_result %>% mutate(rk = rank(desc(p))) %>% filter(rk <= 5)
output <- output %>% arrange(rk)
return(output)
}else{
return(NULL)
}
}
}
|
library(shinyEventLogger)
### Name: run_demo
### Title: Run demo shiny app
### Aliases: run_demo run_demo_dashboard
### ** Examples
## No test:
if (interactive()) {
run_demo(in_background = TRUE)
run_demo_dashboard()
}
## End(No test)
|
/data/genthat_extracted_code/shinyEventLogger/examples/run_demo.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 247 |
r
|
library(shinyEventLogger)
### Name: run_demo
### Title: Run demo shiny app
### Aliases: run_demo run_demo_dashboard
### ** Examples
## No test:
if (interactive()) {
run_demo(in_background = TRUE)
run_demo_dashboard()
}
## End(No test)
|
#' Propogate Landsat attributes through transformations
#'
#' This function applies a raster transformation function (a function that takes
#' a RasterStack/RasterBrick and returns a RasterStack/RasterBrick) to a
#' \link{landsat_scene} or landsat_scene_df (from \link{landsat_load_scenes}).
#'
#' @param x A \link{landsat_scene} or landsat_scene_df (from \link{landsat_load_scenes})
#' @param fun A funcion takes a RasterStack/RasterBrick and returns a
#' RasterStack/RasterBrick
#' @param ... Arguments to be passed to \code{fun}
#'
#' @return A \link{landsat_scene}, RasterStack, RasterBrick, or landsat_scene_df
#' @export
#'
landsat_transform <- function(x, ...) UseMethod("landsat_stransform")
#' @rdname landsat_transform
#' @export
landsat_transform.default <- function(x, fun, ...) {
# this is a base method that applies a function to a landsat_scene and propogates
# the landsat_attrs
# make sure input is a landsat scene
if(!is.landsat_scene(x)) stop("Use landsat_scene() to create a landsat scene")
# apply method
transformed <- fun(x, ...)
# propogate attributes
attr(transformed, "landsat_attrs") <- attr(x, "landsat_attrs")
# return transformed version
transformed
}
#' @rdname landsat_transform
#' @export
landsat_transform.landsat_scene_list <- function(x, fun, ...) {
structure(
lapply(x, function(scene) landsat_transform.default(scene, fun, ...)),
class = "landsat_scene_list"
)
}
#' @rdname landsat_transform
#' @export
landsat_transform.landsat_scene_df <- function(x, fun, ...) {
x$scene <- landsat_transform(x$scene, fun, ...)
x
}
#' @rdname landsat_transform
#' @export
landsat_transform.data.frame <- function(x, fun, ...) {
if("scene" %in% colnames(x)) {
x$scene <- landsat_transform(x$scene, fun, ...)
class(x) <- c("landsat_scene_df", class(x))
x
} else {
stop("x must have a scene column to be passed to landsat_transform")
}
}
#' Crop Landsat objects to a boundary
#'
#' This function crops a \link{landsat_scene} or landsat_scene_df
#' (as generated by \link{landsat_load_scenes}) to a spatial object
#' describing a boundary. This is usually an object of type
#' \link[sp]{SpatialPolygons-class}, but could also be from the
#' sf package, a \link[sp]{bbox}, or an \link[raster]{extent}.
#'
#' @param x A \link{landsat_scene} or landsat_scene_df (from \link{landsat_load_scenes})
#' @param boundary Usually an object of type
#' \link[sp]{SpatialPolygons-class}, but could also be from the
#' sf package, a \link[sp]{bbox}, or an \link[raster]{extent}.
#'
#' @return A landsat_scene or landsat_scene_df
#' @export
#'
landsat_crop <- function(x, boundary) {
# sanitize boundary
boundary_proj <- sanitize_boundary(x, boundary)
# use landsat_transform to crop and propogate attributres
landsat_transform(x, raster::crop, boundary_proj)
}
#' @rdname landsat_crop
#' @export
landsat_mask <- function(x, boundary) {
# sanitize boundary
boundary_proj <- sanitize_boundary(x, boundary)
# use landsat_transform to mask and propogate attributres
landsat_transform(x, raster::mask, boundary_proj)
}
#' Project Landsat objects
#'
#' This function projects a \link{landsat_scene} or landsat_scene_df
#' (as generated by \link{landsat_load_scenes}) using an object
#' describing a CRS This is usually an object of type
#' \link[sp]{CRS}, but could also be from the
#' sf package (\link[sf]{st_bbox}), an integer describing an EPSG code,
#' or a character string describing proj4 arguments.
#'
#' @param x A \link{landsat_scene} or landsat_scene_df (from \link{landsat_load_scenes})
#' @param crs_obj Usually an object of type
#' \link[sp]{CRS}, but could also be from the
#' sf package (\link[sf]{st_bbox}), an integer describing an EPSG code,
#' or a character string describing proj4 arguments.
#'
#' @return A landsat_scene or landsat_scene_df
#' @export
#'
landsat_project <- function(x, crs_obj) {
# use landsat_transform to call projectRaster and propogate attributes
landsat_transform(x, raster::projectRaster, sanitize_CRS(crs_obj))
}
#' Overlay a function on a Landsat object
#'
#' This function makes it easy to calculate indicies such as NDWI, NDVI,
#' etc. on \link{landsat_scene} objects.
#'
#' @param x A \link{landsat_scene} or landsat_scene_df (from \link{landsat_load_scenes})
#' @param fun Function with formal arguments B1, B2, B3, etc.
#' @param ... Passed to raster::\link[raster]{overlay}
#'
#' @return A landsat_scene or landsat_scene_df
#' @export
#'
landsat_overlay <- function(x, fun, ...) {
# use landsat_transform to handle multiple classes and
transformed <- landsat_transform(x, landsat_overlay_base, fun, ...)
# custom landsat_scene classes aren't really applicable anymore
attr(transformed, "landsat_attrs") <- NULL
class(transformed) <- class(transformed)[!grepl("landsat", class(transformed))]
transformed
}
landsat_overlay_base <- function(x, fun, ...) {
# ensure scene is a landsat_scene
if(!is.landsat_scene(x)) stop("Cannot use landsat_overlay without a landsat_scene")
# ensure fun is a function
fun <- match.fun(fun)
# extract arguments of the function
fun_args <- names(formals(fun))
# extract bands available from landsat_attrs
band_names <- attr(x, "landsat_attrs")$.band_names
# check that all band_names are in fun_ags
missing_args <- fun_args[!(fun_args %in% band_names)]
if(any(missing_args)) stop("The following bands are missing in scene that are required by fun: ",
paste(missing_args, collapse = ", "))
# modify function arguments to include all the band names so that raster::calc can be used
formals(fun) <- stats::setNames(rep(list(rlang::missing_arg()), length(band_names)), band_names)
# return result of raster::overlay
raster::overlay(x, fun = fun, ...)
}
# private method to sanitize various objects that get passed as the boundary
sanitize_boundary <- function(x, boundary) {
# make sure boundary is an sp object
if(inherits(boundary, "sf") || inherits(boundary, "sfc")) {
# sf objects can be coerced to sp objects
boundary <- methods::as(boundary, "Spatial")
} else if(inherits(boundary, "matrix") &&
(nrow(boundary) == 2) && (ncol(boundary) == 2)) {
# bbox objects get turned into polygons with same CRS as x
boundary <- bbox_as_spatial(
xmin = box[1, 1],
xmax = box[1, 2],
ymin = box[2, 1],
ymax = box[2, 2],
n = 10,
crs = x@crs
)
} else if(inherits(boundary, "bbox")) {
boundary <- bbox_as_spatial(
xmin = boundary["xmin"],
xmax = boundary["xmax"],
ymin = boundary["ymin"],
ymax = boundary["ymax"],
n = 10,
crs = x@crs
)
} else if(methods::is(boundary, "Extent")) {
boundary <- bbox_as_spatial(
xmin = boundary@xmin,
xmax = boundary@xmax,
ymin = boundary@ymin,
ymax = boundary@ymax,
n = 10,
crs = x@crs
)
}
# make sure boundary is in the same crs as the scene object
boundary_proj <- sp::spTransform(boundary, x@crs)
# return projected boundary
boundary_proj
}
# private method to turn bounding boxes into spatial objects
bbox_as_spatial <- function(xmin, xmax, ymin, ymax, n = 10, crs = sp::CRS(NA_character_)) {
# generate a polygon with the corners, and the dots filled in
coords <- unique(rbind(
data.frame(x = xmin, y = seq(ymin, ymax, length.out = n)),
data.frame(x = seq(xmin, xmax, length.out = n), y = ymax),
data.frame(x = xmax, y = seq(ymax, ymin, length.out = n)),
data.frame(x = seq(xmax, xmin, length.out = n), y = ymin)
))
# convert to spatial polygons object
sp::SpatialPolygons(list(sp::Polygons(list(sp::Polygon(coords)), 1)), proj4string = crs)
}
# private method to sanitize CRS objects
sanitize_CRS <- function(x) UseMethod("sanitize_CRS")
sanitize_CRS.CRS <- function(x) x
sanitize_CRS.numeric <- function(x) sp::CRS(sprintf("+init=epsg:%d", x))
sanitize_CRS.character <- function(x) sp::CRS(x)
sanitize_CRS.crs <- function(x) sp:CRS(x$proj4string)
|
/R/landsat_transform.R
|
no_license
|
paleolimbot/landsatutils
|
R
| false | false | 8,049 |
r
|
#' Propogate Landsat attributes through transformations
#'
#' This function applies a raster transformation function (a function that takes
#' a RasterStack/RasterBrick and returns a RasterStack/RasterBrick) to a
#' \link{landsat_scene} or landsat_scene_df (from \link{landsat_load_scenes}).
#'
#' @param x A \link{landsat_scene} or landsat_scene_df (from \link{landsat_load_scenes})
#' @param fun A funcion takes a RasterStack/RasterBrick and returns a
#' RasterStack/RasterBrick
#' @param ... Arguments to be passed to \code{fun}
#'
#' @return A \link{landsat_scene}, RasterStack, RasterBrick, or landsat_scene_df
#' @export
#'
landsat_transform <- function(x, ...) UseMethod("landsat_stransform")
#' @rdname landsat_transform
#' @export
landsat_transform.default <- function(x, fun, ...) {
# this is a base method that applies a function to a landsat_scene and propogates
# the landsat_attrs
# make sure input is a landsat scene
if(!is.landsat_scene(x)) stop("Use landsat_scene() to create a landsat scene")
# apply method
transformed <- fun(x, ...)
# propogate attributes
attr(transformed, "landsat_attrs") <- attr(x, "landsat_attrs")
# return transformed version
transformed
}
#' @rdname landsat_transform
#' @export
landsat_transform.landsat_scene_list <- function(x, fun, ...) {
structure(
lapply(x, function(scene) landsat_transform.default(scene, fun, ...)),
class = "landsat_scene_list"
)
}
#' @rdname landsat_transform
#' @export
landsat_transform.landsat_scene_df <- function(x, fun, ...) {
x$scene <- landsat_transform(x$scene, fun, ...)
x
}
#' @rdname landsat_transform
#' @export
landsat_transform.data.frame <- function(x, fun, ...) {
if("scene" %in% colnames(x)) {
x$scene <- landsat_transform(x$scene, fun, ...)
class(x) <- c("landsat_scene_df", class(x))
x
} else {
stop("x must have a scene column to be passed to landsat_transform")
}
}
#' Crop Landsat objects to a boundary
#'
#' This function crops a \link{landsat_scene} or landsat_scene_df
#' (as generated by \link{landsat_load_scenes}) to a spatial object
#' describing a boundary. This is usually an object of type
#' \link[sp]{SpatialPolygons-class}, but could also be from the
#' sf package, a \link[sp]{bbox}, or an \link[raster]{extent}.
#'
#' @param x A \link{landsat_scene} or landsat_scene_df (from \link{landsat_load_scenes})
#' @param boundary Usually an object of type
#' \link[sp]{SpatialPolygons-class}, but could also be from the
#' sf package, a \link[sp]{bbox}, or an \link[raster]{extent}.
#'
#' @return A landsat_scene or landsat_scene_df
#' @export
#'
landsat_crop <- function(x, boundary) {
# sanitize boundary
boundary_proj <- sanitize_boundary(x, boundary)
# use landsat_transform to crop and propogate attributres
landsat_transform(x, raster::crop, boundary_proj)
}
#' @rdname landsat_crop
#' @export
landsat_mask <- function(x, boundary) {
# sanitize boundary
boundary_proj <- sanitize_boundary(x, boundary)
# use landsat_transform to mask and propogate attributres
landsat_transform(x, raster::mask, boundary_proj)
}
#' Project Landsat objects
#'
#' This function projects a \link{landsat_scene} or landsat_scene_df
#' (as generated by \link{landsat_load_scenes}) using an object
#' describing a CRS This is usually an object of type
#' \link[sp]{CRS}, but could also be from the
#' sf package (\link[sf]{st_bbox}), an integer describing an EPSG code,
#' or a character string describing proj4 arguments.
#'
#' @param x A \link{landsat_scene} or landsat_scene_df (from \link{landsat_load_scenes})
#' @param crs_obj Usually an object of type
#' \link[sp]{CRS}, but could also be from the
#' sf package (\link[sf]{st_bbox}), an integer describing an EPSG code,
#' or a character string describing proj4 arguments.
#'
#' @return A landsat_scene or landsat_scene_df
#' @export
#'
landsat_project <- function(x, crs_obj) {
# use landsat_transform to call projectRaster and propogate attributes
landsat_transform(x, raster::projectRaster, sanitize_CRS(crs_obj))
}
#' Overlay a function on a Landsat object
#'
#' This function makes it easy to calculate indicies such as NDWI, NDVI,
#' etc. on \link{landsat_scene} objects.
#'
#' @param x A \link{landsat_scene} or landsat_scene_df (from \link{landsat_load_scenes})
#' @param fun Function with formal arguments B1, B2, B3, etc.
#' @param ... Passed to raster::\link[raster]{overlay}
#'
#' @return A landsat_scene or landsat_scene_df
#' @export
#'
landsat_overlay <- function(x, fun, ...) {
# use landsat_transform to handle multiple classes and
transformed <- landsat_transform(x, landsat_overlay_base, fun, ...)
# custom landsat_scene classes aren't really applicable anymore
attr(transformed, "landsat_attrs") <- NULL
class(transformed) <- class(transformed)[!grepl("landsat", class(transformed))]
transformed
}
landsat_overlay_base <- function(x, fun, ...) {
# ensure scene is a landsat_scene
if(!is.landsat_scene(x)) stop("Cannot use landsat_overlay without a landsat_scene")
# ensure fun is a function
fun <- match.fun(fun)
# extract arguments of the function
fun_args <- names(formals(fun))
# extract bands available from landsat_attrs
band_names <- attr(x, "landsat_attrs")$.band_names
# check that all band_names are in fun_ags
missing_args <- fun_args[!(fun_args %in% band_names)]
if(any(missing_args)) stop("The following bands are missing in scene that are required by fun: ",
paste(missing_args, collapse = ", "))
# modify function arguments to include all the band names so that raster::calc can be used
formals(fun) <- stats::setNames(rep(list(rlang::missing_arg()), length(band_names)), band_names)
# return result of raster::overlay
raster::overlay(x, fun = fun, ...)
}
# private method to sanitize various objects that get passed as the boundary
sanitize_boundary <- function(x, boundary) {
# make sure boundary is an sp object
if(inherits(boundary, "sf") || inherits(boundary, "sfc")) {
# sf objects can be coerced to sp objects
boundary <- methods::as(boundary, "Spatial")
} else if(inherits(boundary, "matrix") &&
(nrow(boundary) == 2) && (ncol(boundary) == 2)) {
# bbox objects get turned into polygons with same CRS as x
boundary <- bbox_as_spatial(
xmin = box[1, 1],
xmax = box[1, 2],
ymin = box[2, 1],
ymax = box[2, 2],
n = 10,
crs = x@crs
)
} else if(inherits(boundary, "bbox")) {
boundary <- bbox_as_spatial(
xmin = boundary["xmin"],
xmax = boundary["xmax"],
ymin = boundary["ymin"],
ymax = boundary["ymax"],
n = 10,
crs = x@crs
)
} else if(methods::is(boundary, "Extent")) {
boundary <- bbox_as_spatial(
xmin = boundary@xmin,
xmax = boundary@xmax,
ymin = boundary@ymin,
ymax = boundary@ymax,
n = 10,
crs = x@crs
)
}
# make sure boundary is in the same crs as the scene object
boundary_proj <- sp::spTransform(boundary, x@crs)
# return projected boundary
boundary_proj
}
# private method to turn bounding boxes into spatial objects
bbox_as_spatial <- function(xmin, xmax, ymin, ymax, n = 10, crs = sp::CRS(NA_character_)) {
# generate a polygon with the corners, and the dots filled in
coords <- unique(rbind(
data.frame(x = xmin, y = seq(ymin, ymax, length.out = n)),
data.frame(x = seq(xmin, xmax, length.out = n), y = ymax),
data.frame(x = xmax, y = seq(ymax, ymin, length.out = n)),
data.frame(x = seq(xmax, xmin, length.out = n), y = ymin)
))
# convert to spatial polygons object
sp::SpatialPolygons(list(sp::Polygons(list(sp::Polygon(coords)), 1)), proj4string = crs)
}
# private method to sanitize CRS objects
sanitize_CRS <- function(x) UseMethod("sanitize_CRS")
sanitize_CRS.CRS <- function(x) x
sanitize_CRS.numeric <- function(x) sp::CRS(sprintf("+init=epsg:%d", x))
sanitize_CRS.character <- function(x) sp::CRS(x)
sanitize_CRS.crs <- function(x) sp:CRS(x$proj4string)
|
test_that("use_testthat_setup()", {
withr::local_options(usethis.quiet = TRUE)
local_temp_dir()
local_proj_set()
expect_error(use_testthat_setup())
fs::dir_create(path("tests", "testthat"))
use_testthat_setup(open = FALSE)
expect_snapshot_file(path("tests", "testthat", "setup.R"))
})
|
/tests/testthat/test-use_testthat_setup.R
|
permissive
|
emptyfield-ds/opensky
|
R
| false | false | 302 |
r
|
test_that("use_testthat_setup()", {
withr::local_options(usethis.quiet = TRUE)
local_temp_dir()
local_proj_set()
expect_error(use_testthat_setup())
fs::dir_create(path("tests", "testthat"))
use_testthat_setup(open = FALSE)
expect_snapshot_file(path("tests", "testthat", "setup.R"))
})
|
/img_mas_ejemplos/ejemplo_psr_01.R
|
no_license
|
EncisoAlvaJC/TESIS
|
R
| false | false | 8,472 |
r
| ||
#' Get the Constant for Consistency for the M-Scale Using the Bisquare Rho Function
#' @param delta desired breakdown point (between 0 and 0.5)
#'
#' @return consistency constant
#' @keywords internal
#' @importFrom stats pnorm uniroot
#' @importFrom rlang abort
.bisquare_consistency_const <- function (delta) {
##
## Pre-computed values for some delta values
##
eps <- sqrt(.Machine$double.eps)
if (!isTRUE(delta < 0.5 + eps && delta > -eps)) {
abort("`delta` is outside valid bounds")
}
if (abs(delta - 0.5) < eps) {
return(1.5476450)
} else if (abs(delta - 0.25) < eps) {
return(2.937015)
} else if (abs(delta - 0.1) < eps) {
return(5.182361)
} else if (delta < 0.005) {
return(50) # ~.1% bdp for bisquare
}
integral_interval <- if (delta > 0.1) {
c(1.5, 5.5)
} else {
c(5, 25)
}
# For bisquare we have the closed form solution to the expectation
expectation <- function(cc, delta) {
pnorm.mcc <- 2 * pnorm(-cc)
1/cc^6 * exp(-(cc^2/2)) * (
-cc * (15 - 4 * cc^2 + cc^4) * sqrt(2 / pi) +
3 * (5 - 3 * cc^2 + cc^4) * exp(cc^2/2) * (1 - pnorm.mcc) +
cc^6 * exp(cc^2/2) * pnorm.mcc
) - delta
}
uniroot(expectation, interval = integral_interval, delta)$root
}
#' Determine a breakdown point with stable numerical properties of the M-scale
#' with Tukey's bisquare rho function.
#'
#' The M-scale objective (and hence the S-loss) can have unbounded or very high
#' 1st derivative. This can lead to numerical instability of the algorithms and
#' in turn excessive computation time.
#' This function chooses the breakdown point with lowest upper bound of the 1st
#' derivative from a range of bdp's in the vicinity of the desired bdp.
#'
#' @param n number of observations in the sample
#' @param desired_bdp the desired breakdown point (between 0.05 and 0.5)
#' @param tolerance how far can the chosen bdp be away from the desired bdp.
#' The chosen bdp is guaranteed to be in the range given by `interval`.
#' @param interval restrict the chosen bdp to this interval.
#' @param precision granularity of the grid of considered bdp's.
#' @importFrom rlang warn
#' @keywords internal
.find_stable_bdb_bisquare <- function (n, desired_bdp, tolerance = 0.01, precision = 1e-4,
interval = c(0.05, 0.5)) {
if (isTRUE(attr(desired_bdp, 'fixed', TRUE))) {
return(desired_bdp)
}
numeric_tol <- sqrt(.Machine$double.eps)
from <- min(max(desired_bdp - tolerance, interval[[1L]]),
interval[[2L]])
to <- max(min(desired_bdp + tolerance, interval[[2L]]),
interval[[1L]])
bdp_range <- seq(from, to, by = precision)
# Filter bdp's where the 1st derivative is unbounded
bdp_range <- bdp_range[abs(bdp_range * n - floor(bdp_range * n)) > numeric_tol]
# Determine an upper bound for the 1st derivative of the M-scale objective function
first_deriv_bound <- vapply(bdp_range, FUN.VALUE = numeric(1L), FUN = function (bdp) {
thresh <- tryCatch(uniroot(f = function (t) {
up <- n * (1 - bdp) / (1 - t)
up - floor(up) - n * t / (1 - t)
}, interval = c(0, 0.5), extendInt = 'downX', tol = numeric_tol)$root,
error = function (e) {
return(NA_real_)
})
1 / sqrt(1 - (1 - thresh)^(1/3))
})
good_bounds <- which(is.finite(first_deriv_bound))
if (length(good_bounds) == 0L) {
warn(paste("The chosen breakdown point may lead to numerical instability and",
"excessive computation time.",
"Consider changing the breakdown point via argument `bdp`."))
return(desired_bdp)
}
bdp_range[[which.min(first_deriv_bound)]]
}
#' Approximate Value Matching
#'
#' @param x,table see [base::match] for details.
#' @param eps numerical tolerance for matching.
#' @return a vector the same length as `x` with integers giving the position in
#' `table` of the first match if there is a match, or `NA_integer_`
#' otherwise.
#' @keywords internal
.approx_match <- function(x, table, eps) {
if (missing(eps)) {
eps <- max(.Machine$double.eps, min(sqrt(.Machine$double.eps), 0.5 * min(x, table)))
}
.Call(C_approx_match, as.numeric(x), as.numeric(table), as.numeric(eps[[1L]]))
}
## Extract the given metric from all matching nodes (by name).
extract_metric <- function (metrics, attr, node) {
matches <- c()
if (!is.null(metrics[[attr]]) && isTRUE(metrics$name == node)) {
matches <- c(matches, metrics[[attr]])
}
if (!is.null(metrics$sub_metrics)) {
matches <- c(matches, unlist(lapply(metrics$sub_metrics, extract_metric,
attr, node),
use.names = FALSE, recursive = FALSE))
}
return (matches)
}
.recurisve_metrics_class <- function (metrics) {
class(metrics) <- 'nsoptim_metrics'
if (!is.null(metrics$sub_metrics)) {
metrics$sub_metrics <- lapply(metrics$sub_metrics, .recurisve_metrics_class)
}
return(metrics)
}
.metrics_attrib <- function (estimates, metrics) {
if (!is.null(metrics) && isTRUE(metrics$name != '')) {
attr(estimates, 'metrics') <- .recurisve_metrics_class(metrics)
}
return(estimates)
}
#' Run replicated K-fold CV with random splits
#'
#' @param std_data standardized full data set
#' (standardized by `.standardize_data`)
#' @param cv_k number of folds per CV split
#' @param cv_repl number of CV replications.
#' @param cv_est_fun function taking the standardized training set and
#' the indices of the left-out observations and returns a list of estimates.
#' The function always needs to return the same number of estimates!
#' @param metric function taking a vector of prediction errors and
#' returning the scale of the prediction error.
#' @param par_cluster parallel cluster to parallelize computations.
#' @param handler_args additional arguments to the handler function.
#' @importFrom Matrix drop
#' @importFrom rlang abort
#' @keywords internal
.run_replicated_cv <- function (std_data, cv_k, cv_repl, cv_est_fun, metric,
par_cluster = NULL,
handler_args = list()) {
est_fun <- match.fun(cv_est_fun)
call_with_errors <- isTRUE(length(formals(metric)) == 1L)
if (length(std_data$y) / cv_k < 2) {
abort("`cv_k` must be chosen to have at least 2 observations in each fold.")
}
test_segments_list <- lapply(integer(cv_repl), function (repl_id) {
split(seq_along(std_data$y),
sample(rep_len(seq_len(cv_k), length(std_data$y))))
})
test_segments <- unlist(test_segments_list, recursive = FALSE,
use.names = FALSE)
cl_handler <- .make_cluster_handler(par_cluster)
predictions_all <- cl_handler(
test_segments,
function (test_ind, est_fun, handler_args) {
train_x <- std_data$x[-test_ind, , drop = FALSE]
train_y <- std_data$y[-test_ind]
test_x <- std_data$x[test_ind, , drop = FALSE]
train_std <- std_data$cv_standardize(train_x, train_y)
cv_ests <- est_fun(train_std, test_ind, handler_args)
matrix(unlist(lapply(cv_ests, function (est) {
unstd_est <- train_std$unstandardize_coef(est)
drop(test_x %*% unstd_est$beta) - unstd_est$intercept
}), use.names = FALSE, recursive = FALSE), ncol = length(cv_ests))
}, est_fun = est_fun, handler_args = handler_args)
predictions_all <- split(predictions_all, rep(seq_len(cv_repl), each = cv_k))
prediction_metrics <- mapply(
predictions_all, test_segments_list,
FUN = function (predictions, test_inds) {
obs_order <- sort.list(unlist(test_inds, recursive = FALSE, use.names = FALSE))
ordered_predictions <- do.call(rbind, predictions)[obs_order, ]
if (call_with_errors) {
apply(ordered_predictions - std_data$y, 2, metric)
} else {
apply(ordered_predictions, 2, metric, std_data$y)
}
})
matrix(unlist(prediction_metrics, recursive = FALSE, use.names = FALSE), ncol = cv_repl)
}
#' Standardize data
#'
#' @param x predictor matrix. Can also be a list with components `x` and `y`,
#' in which case `y` is ignored.
#' @param y response vector.
#' @param intercept is an intercept included (i.e., should `y` be centered?)
#' @param standardize standardize or not.
#' @param robust use robust standardization.
#' @param location_rho rho function for location estimate
#' @param cc cutoff value for the rho functions used in scale and location
#' estimates.
#' @param ... passed on to `mlocscale()`.
#' @return a list with the following entries:
#' @importFrom Matrix drop
#' @importFrom methods is
#' @importFrom rlang abort
#' @importFrom stats sd
#' @keywords internal
.standardize_data <- function (x, y, intercept, standardize, robust, sparse,
mscale_opts, location_rho = 'bisquare', cc,
target_scale_x = NULL, ...) {
if (is.list(x) && !is.null(x$x) && !is.null(x$y)) {
y <- x$y
x <- x$x
}
ret_list <- list(scale_x = rep.int(1, ncol(x)), mux = numeric(ncol(x)),
muy = 0, x = x, y = y)
## Center data for numerical convenience
if (isTRUE(intercept)) {
if (!isTRUE(robust)) {
ret_list$mux <- colMeans(x)
ret_list$muy <- mean(y)
} else {
ret_list$mux <- apply(x, 2, function (xj) {
mloc(xj, rho = location_rho, cc = cc, opts = mscale_opts)
})
# Center the response using the S-estimate of regression for the
# 0-slope.
y_locscale <- mlocscale(y, location_rho = location_rho, location_cc = cc,
scale_cc = cc, opts = mscale_opts, ...)
if (!isTRUE(y_locscale[['scale']] > .Machine$double.eps)) {
abort("M-scale of response is 0.")
}
ret_list$muy <- y_locscale[['location']]
if (!is.finite(ret_list$muy)) {
# In case the response has more than 50% equal values.
ret_list$muy <- 0
}
}
ret_list$x <- sweep(x, 2L, ret_list$mux, FUN = `-`, check.margin = FALSE)
ret_list$y <- y - ret_list$muy
}
## Scale predictors
if (isTRUE(standardize) || isTRUE(standardize == 'cv_only')) {
ret_list$scale_x <- if (!isTRUE(robust)) {
apply(ret_list$x, 2, sd)
} else {
locscale <- apply(ret_list$x, 2, function (xj) {
mlocscale(xj, location_rho = location_rho, location_cc = cc,
scale_cc = cc, opts = mscale_opts, ...)
})
if (isTRUE(intercept)) {
# Re-center the predictors with the updated centers
ret_list$mux <- ret_list$mux + locscale[1L, ]
ret_list$x <- sweep(x, 2L, ret_list$mux, FUN = `-`,
check.margin = FALSE)
}
locscale[2L, ]
}
if (!isTRUE(all(ret_list$scale_x > 0))) {
abort(paste("Standardization failed. One or more variables in `x`",
"have a scale of 0."))
}
if (isTRUE(standardize)) {
ret_list$x <- if (!is.null(target_scale_x)) {
sweep(ret_list$x, 2L, target_scale_x / ret_list$scale_x, FUN = `*`,
check.margin = FALSE)
} else {
sweep(ret_list$x, 2L, ret_list$scale_x, FUN = `/`, check.margin = FALSE)
}
}
}
# Set the target scale to 1, so that standardizing and unstandardizing works.
if (is.null(target_scale_x)) {
target_scale_x <- 1
}
ret_list$cv_standardize <- function(x, y) {
if (is.list(x) && !is.null(x$x) && !is.null(x$y)) {
y <- x$y
x <- x$x
}
if (isTRUE(standardize == 'cv_only')) {
# In case of "CV only" standardization, match the original scaling
.standardize_data(x, y,
intercept = intercept,
standardize = TRUE,
robust = robust,
sparse = sparse,
location_rho = location_rho,
cc = cc,
mscale_opts = mscale_opts,
target_scale_x = ret_list$scale_x,
... = ...)
} else {
.standardize_data(x, y,
intercept = intercept,
standardize = standardize,
robust = robust,
sparse = sparse,
location_rho = location_rho,
cc = cc,
mscale_opts = mscale_opts,
... = ...)
}
}
ret_list$standardize_coefs <- function(coef_obj) {
if (is.null(coef_obj)) {
return(NULL)
}
if (is.null(coef_obj$intercept)) {
coef_obj$intercept <- 0
}
if (isTRUE(intercept)) {
# Adjust intercept
coef_obj$intercept <- coef_obj$intercept - ret_list$muy +
sum(ret_list$mux * coef_obj$beta)
}
if (isTRUE(standardize)) {
coef_obj$beta <- coef_obj$beta * (ret_list$scale_x / target_scale_x)
}
return(coef_obj)
}
ret_list$unstandardize_coefs <- if (isTRUE(sparse)) {
function(coef_obj) {
if (is.null(coef_obj)) {
return(coef_obj)
}
if (is.null(coef_obj$intercept)) {
coef_obj$intercept <- 0
}
coef_obj$std_beta <- coef_obj$beta
coef_obj$std_intercept <- coef_obj$intercept
if (isTRUE(standardize)) {
coef_obj$beta@x <- coef_obj$beta@x * target_scale_x /
ret_list$scale_x[coef_obj$beta@i]
}
if (isTRUE(intercept)) {
# Recreate intercept
coef_obj$intercept <- coef_obj$intercept + ret_list$muy -
sum(ret_list$mux[coef_obj$beta@i] * coef_obj$beta@x)
}
return(coef_obj)
}
} else {
function(coef_obj) {
if (is.null(coef_obj)) {
return(coef_obj)
}
if (is.null(coef_obj$intercept)) {
coef_obj$intercept <- 0
}
coef_obj$std_beta <- coef_obj$beta
coef_obj$std_intercept <- coef_obj$intercept
if (isTRUE(standardize)) {
coef_obj$beta <- coef_obj$beta * target_scale_x / ret_list$scale_x
}
if (isTRUE(intercept)) {
# Recreate intercept
coef_obj$intercept <- coef_obj$intercept + ret_list$muy -
sum(ret_list$mux * coef_obj$beta)
}
return(coef_obj)
}
}
return(ret_list)
}
#' @importFrom stats median
.cv_mape <- function (r) {
median(abs(r))
}
.cv_rmspe <- function (r) {
sqrt(mean(r^2))
}
## Area under the ROC for "negatives" having value 0 and "positives" having value 1.
.cv_auroc <- function (pred, truth) {
n_neg <- sum(truth <= 0)
n_pos <- sum(truth > 0)
mww <- sum(rank(pred)[truth <= 0]) - n_neg * (n_neg + 1) / 2
return(mww / (n_neg * n_pos))
}
.cv_se_selection <- function (cvm, cvsd, se_fact) {
type <- rep.int(factor('none', levels = c('none', 'min', 'se_fact')), length(cvm))
best <- which.min(cvm)
candidates <- which(cvm <= cvm[[best]] + se_fact * cvsd[[best]])
candidates <- candidates[candidates <= best] # only consider sparser solutions
# "ignore" solutions after which the prediction performance comes back down
best_1se <- if (any(diff(candidates) > 1)) {
min(candidates[-seq_len(max(which(diff(candidates) > 1)))])
} else {
min(candidates)
}
type[[best]] <- 'min'
type[[best_1se]] <- 'se_fact'
return(type)
}
## Validate the response type and return a list with elements `values` and `binary`.
#' @importFrom rlang warn abort
.validate_response <- function (y) {
nlevels <- length(unique(y))
if (is.factor(y)) {
if (nlevels == 2L) {
return(list(values = as.numeric(y) - 1, binary = TRUE))
} else if (nlevels > 2L) {
warn("`y` with more than 2 factor levels is implicitly treated as numeric.")
return(list(values = as.numeric(y), binary = FALSE))
}
} else {
y <- .as(y, 'numeric')
if (nlevels == 2L) {
warn(paste("`y` is interpreted as continuous response but has only 2 distinct values.",
"If binary classification is desired, coerce `y` to factor with `as.factor()`."))
}
if (nlevels > 1L) {
return(list(values = y, binary = FALSE))
}
}
abort("`y` must have at least 2 distinct values.")
}
## A wrapper around `methods::as` which raises an error if the conversion results in NA.
## @param ... passed on to [methods::as].
##
#' @importFrom methods as
#' @importFrom rlang abort
.as <- function (object, class, ...) {
object_var <- deparse(substitute(object))
tryCatch(methods::as(object, class, ...), warning = function (w) {
abort(sprintf('`%s` is not of type `%s`', object_var, class))
})
}
## Create a function which restores the original length of the coefficient vector
.restore_coef_length_fun <- function (positions, length) {
if (length(positions) > 0L) {
function (coef) {
if (is(coef$beta, 'dsparseVector')) {
coef$beta <- sparseVector(coef$beta@x, positions[coef$beta@i], length)
} else {
beta <- numeric(length)
beta[positions] <- coef$beta
coef$beta <- beta
}
return(coef)
}
} else {
function (coef) {
if (is(coef$beta, 'dsparseVector')) {
coef$beta <- sparseVector(numeric(0L), integer(0L), length)
} else {
coef$beta <- numeric(length)
}
return(coef)
}
}
}
.prepare_penalty_loadings <- function (penalty_loadings, x, alpha, sparse,
stop_all_infinite = FALSE) {
orig_p <- ncol(x)
restore_coef_length <- function (coef) coef
if(any(alpha < .Machine$double.eps)) {
abort("Non-empty `penalty_loadings` only supported for `alpha` > 0.")
} else if (length(penalty_loadings) != orig_p) {
abort("`penalty_loadings` has different number of elements than `x` columns.")
}
penalty_loadings <- .as(penalty_loadings, 'numeric')
if (any(penalty_loadings < 0)) {
abort("`penalty_loadings` must be positive.")
}
# Determine finite penalty loadings
good_pl <- which(is.finite(penalty_loadings))
if (length(good_pl) < orig_p) {
# Some penalty loadings are infinite! Remove the corresponding predictors from `x`.
x <- x[ , good_pl, drop = FALSE]
penalty_loadings <- penalty_loadings[good_pl]
restore_coef_length <- if (length(good_pl) > 0L) {
if (isTRUE(sparse)) {
function (coef) {
if (!is.null(coef$std_beta)) {
coef$std_beta <- sparseVector(coef$std_beta@x, good_pl[coef$std_beta@i], orig_p)
}
if (!is.null(coef$beta)) {
coef$beta <- sparseVector(coef$beta@x, good_pl[coef$beta@i], orig_p)
}
return(coef)
}
} else {
function (coef) {
coef_vector <- numeric(orig_p)
if (!is.null(coef$std_beta)) {
coef_vector[good_pl] <- coef$std_beta
coef$std_beta <- coef_vector
}
if (!is.null(coef$beta)) {
coef_vector[good_pl] <- coef$beta
coef$beta <- coef_vector
}
return(coef)
}
}
} else {
if (stop_all_infinite) {
abort("At least one value in `penalty_loadings` must be finite.")
}
if (isTRUE(sparse)) {
function (coef) {
coef$std_beta <- sparseVector(numeric(0L), integer(0L), orig_p)
coef$beta <- sparseVector(numeric(0L), integer(0L), orig_p)
return(coef)
}
} else {
function (coef) {
coef$std_beta <- numeric(orig_p)
coef$beta <- numeric(orig_p)
return(coef)
}
}
}
}
return(list(loadings = penalty_loadings, trimmed_x = x, restore_fun = restore_coef_length))
}
#' @importFrom methods is
.sparsify_other_starts <- function (other_starts, sparse) {
lapply(other_starts, function (est) {
if (isTRUE(sparse) && !is(est$beta, 'dsparseVector')) {
est$beta <- sparseVector(as.numeric(est$beta), seq_along(est$beta), length(est$beta))
} else if (!isTRUE(sparse) && !is.numeric(est$beta)) {
est$beta <- .as(est$beta, 'numeric')
}
class(est) <- NULL
return(est)
})
}
#' @importFrom parallel clusterEvalQ clusterCall clusterApplyLB
#' @importFrom rlang abort
.make_cluster_handler <- function (par_cluster) {
if (is.null(par_cluster)) {
return(function (X, FUN, ..., x, fun) {
lapply(X, FUN, ...)
})
} else {
tryCatch({
clusterEvalQ(par_cluster, {
library(pense)
})
}, error = function (e) {
abort(paste("`parallel` cluster cannot be used:", e))
})
return(function (X, FUN, ..., x, fun) {
clusterApplyLB(par_cluster, x = X, fun = function (X, FUN, ...) {
FUN(X, ...)
}, FUN = FUN, ... = ...)
})
}
}
## Parse strings of the form *min*, *se*, or *{m}-se*.
#' @importFrom rlang abort
.parse_se_string <- function (x, only_fact = FALSE) {
x <- .as(x[[1L]], 'character')
xlen <- nchar(x)
se_fact <- 1
se_str <- if (identical('-se', substr(x, xlen - 2L, xlen))) {
se_fact <- as.numeric(substr(x, 0L, xlen - 3L))
if (anyNA(se_fact)) {
abort(sprintf("Cannot parse standard error string '%s'.", x))
}
'se'
} else {
match.arg(x, c('min', 'se'))
}
if (identical(se_str, 'min')) {
se_fact <- 0
}
if (isTRUE(only_fact)) {
se_fact
} else {
list(se_type = se_str, se_fact = se_fact)
}
}
## Filter a list to only include items with matching values.
.filter_list <- function (x, what, value, eps = sqrt(.Machine$double.eps),
comp_fun, ...) {
comp_fun <- if (missing(comp_fun)) {
function (v) { abs(v - value) < eps }
} else {
match.fun(comp_fun)
}
matches <- vapply(x, FUN.VALUE = logical(1L), FUN = function (el) {
comp_fun(el[[what]], ...)
})
x[matches]
}
|
/R/utilities-internal.R
|
no_license
|
cran/pense
|
R
| false | false | 21,718 |
r
|
#' Get the Constant for Consistency for the M-Scale Using the Bisquare Rho Function
#' @param delta desired breakdown point (between 0 and 0.5)
#'
#' @return consistency constant
#' @keywords internal
#' @importFrom stats pnorm uniroot
#' @importFrom rlang abort
.bisquare_consistency_const <- function (delta) {
##
## Pre-computed values for some delta values
##
eps <- sqrt(.Machine$double.eps)
if (!isTRUE(delta < 0.5 + eps && delta > -eps)) {
abort("`delta` is outside valid bounds")
}
if (abs(delta - 0.5) < eps) {
return(1.5476450)
} else if (abs(delta - 0.25) < eps) {
return(2.937015)
} else if (abs(delta - 0.1) < eps) {
return(5.182361)
} else if (delta < 0.005) {
return(50) # ~.1% bdp for bisquare
}
integral_interval <- if (delta > 0.1) {
c(1.5, 5.5)
} else {
c(5, 25)
}
# For bisquare we have the closed form solution to the expectation
expectation <- function(cc, delta) {
pnorm.mcc <- 2 * pnorm(-cc)
1/cc^6 * exp(-(cc^2/2)) * (
-cc * (15 - 4 * cc^2 + cc^4) * sqrt(2 / pi) +
3 * (5 - 3 * cc^2 + cc^4) * exp(cc^2/2) * (1 - pnorm.mcc) +
cc^6 * exp(cc^2/2) * pnorm.mcc
) - delta
}
uniroot(expectation, interval = integral_interval, delta)$root
}
#' Determine a breakdown point with stable numerical properties of the M-scale
#' with Tukey's bisquare rho function.
#'
#' The M-scale objective (and hence the S-loss) can have unbounded or very high
#' 1st derivative. This can lead to numerical instability of the algorithms and
#' in turn excessive computation time.
#' This function chooses the breakdown point with lowest upper bound of the 1st
#' derivative from a range of bdp's in the vicinity of the desired bdp.
#'
#' @param n number of observations in the sample
#' @param desired_bdp the desired breakdown point (between 0.05 and 0.5)
#' @param tolerance how far can the chosen bdp be away from the desired bdp.
#' The chosen bdp is guaranteed to be in the range given by `interval`.
#' @param interval restrict the chosen bdp to this interval.
#' @param precision granularity of the grid of considered bdp's.
#' @importFrom rlang warn
#' @keywords internal
.find_stable_bdb_bisquare <- function (n, desired_bdp, tolerance = 0.01, precision = 1e-4,
interval = c(0.05, 0.5)) {
if (isTRUE(attr(desired_bdp, 'fixed', TRUE))) {
return(desired_bdp)
}
numeric_tol <- sqrt(.Machine$double.eps)
from <- min(max(desired_bdp - tolerance, interval[[1L]]),
interval[[2L]])
to <- max(min(desired_bdp + tolerance, interval[[2L]]),
interval[[1L]])
bdp_range <- seq(from, to, by = precision)
# Filter bdp's where the 1st derivative is unbounded
bdp_range <- bdp_range[abs(bdp_range * n - floor(bdp_range * n)) > numeric_tol]
# Determine an upper bound for the 1st derivative of the M-scale objective function
first_deriv_bound <- vapply(bdp_range, FUN.VALUE = numeric(1L), FUN = function (bdp) {
thresh <- tryCatch(uniroot(f = function (t) {
up <- n * (1 - bdp) / (1 - t)
up - floor(up) - n * t / (1 - t)
}, interval = c(0, 0.5), extendInt = 'downX', tol = numeric_tol)$root,
error = function (e) {
return(NA_real_)
})
1 / sqrt(1 - (1 - thresh)^(1/3))
})
good_bounds <- which(is.finite(first_deriv_bound))
if (length(good_bounds) == 0L) {
warn(paste("The chosen breakdown point may lead to numerical instability and",
"excessive computation time.",
"Consider changing the breakdown point via argument `bdp`."))
return(desired_bdp)
}
bdp_range[[which.min(first_deriv_bound)]]
}
#' Approximate Value Matching
#'
#' @param x,table see [base::match] for details.
#' @param eps numerical tolerance for matching.
#' @return a vector the same length as `x` with integers giving the position in
#' `table` of the first match if there is a match, or `NA_integer_`
#' otherwise.
#' @keywords internal
.approx_match <- function(x, table, eps) {
if (missing(eps)) {
eps <- max(.Machine$double.eps, min(sqrt(.Machine$double.eps), 0.5 * min(x, table)))
}
.Call(C_approx_match, as.numeric(x), as.numeric(table), as.numeric(eps[[1L]]))
}
## Extract the given metric from all matching nodes (by name).
extract_metric <- function (metrics, attr, node) {
matches <- c()
if (!is.null(metrics[[attr]]) && isTRUE(metrics$name == node)) {
matches <- c(matches, metrics[[attr]])
}
if (!is.null(metrics$sub_metrics)) {
matches <- c(matches, unlist(lapply(metrics$sub_metrics, extract_metric,
attr, node),
use.names = FALSE, recursive = FALSE))
}
return (matches)
}
.recurisve_metrics_class <- function (metrics) {
class(metrics) <- 'nsoptim_metrics'
if (!is.null(metrics$sub_metrics)) {
metrics$sub_metrics <- lapply(metrics$sub_metrics, .recurisve_metrics_class)
}
return(metrics)
}
.metrics_attrib <- function (estimates, metrics) {
if (!is.null(metrics) && isTRUE(metrics$name != '')) {
attr(estimates, 'metrics') <- .recurisve_metrics_class(metrics)
}
return(estimates)
}
#' Run replicated K-fold CV with random splits
#'
#' @param std_data standardized full data set
#' (standardized by `.standardize_data`)
#' @param cv_k number of folds per CV split
#' @param cv_repl number of CV replications.
#' @param cv_est_fun function taking the standardized training set and
#' the indices of the left-out observations and returns a list of estimates.
#' The function always needs to return the same number of estimates!
#' @param metric function taking a vector of prediction errors and
#' returning the scale of the prediction error.
#' @param par_cluster parallel cluster to parallelize computations.
#' @param handler_args additional arguments to the handler function.
#' @importFrom Matrix drop
#' @importFrom rlang abort
#' @keywords internal
.run_replicated_cv <- function (std_data, cv_k, cv_repl, cv_est_fun, metric,
par_cluster = NULL,
handler_args = list()) {
est_fun <- match.fun(cv_est_fun)
call_with_errors <- isTRUE(length(formals(metric)) == 1L)
if (length(std_data$y) / cv_k < 2) {
abort("`cv_k` must be chosen to have at least 2 observations in each fold.")
}
test_segments_list <- lapply(integer(cv_repl), function (repl_id) {
split(seq_along(std_data$y),
sample(rep_len(seq_len(cv_k), length(std_data$y))))
})
test_segments <- unlist(test_segments_list, recursive = FALSE,
use.names = FALSE)
cl_handler <- .make_cluster_handler(par_cluster)
predictions_all <- cl_handler(
test_segments,
function (test_ind, est_fun, handler_args) {
train_x <- std_data$x[-test_ind, , drop = FALSE]
train_y <- std_data$y[-test_ind]
test_x <- std_data$x[test_ind, , drop = FALSE]
train_std <- std_data$cv_standardize(train_x, train_y)
cv_ests <- est_fun(train_std, test_ind, handler_args)
matrix(unlist(lapply(cv_ests, function (est) {
unstd_est <- train_std$unstandardize_coef(est)
drop(test_x %*% unstd_est$beta) - unstd_est$intercept
}), use.names = FALSE, recursive = FALSE), ncol = length(cv_ests))
}, est_fun = est_fun, handler_args = handler_args)
predictions_all <- split(predictions_all, rep(seq_len(cv_repl), each = cv_k))
prediction_metrics <- mapply(
predictions_all, test_segments_list,
FUN = function (predictions, test_inds) {
obs_order <- sort.list(unlist(test_inds, recursive = FALSE, use.names = FALSE))
ordered_predictions <- do.call(rbind, predictions)[obs_order, ]
if (call_with_errors) {
apply(ordered_predictions - std_data$y, 2, metric)
} else {
apply(ordered_predictions, 2, metric, std_data$y)
}
})
matrix(unlist(prediction_metrics, recursive = FALSE, use.names = FALSE), ncol = cv_repl)
}
#' Standardize data
#'
#' @param x predictor matrix. Can also be a list with components `x` and `y`,
#' in which case `y` is ignored.
#' @param y response vector.
#' @param intercept is an intercept included (i.e., should `y` be centered?)
#' @param standardize standardize or not.
#' @param robust use robust standardization.
#' @param location_rho rho function for location estimate
#' @param cc cutoff value for the rho functions used in scale and location
#' estimates.
#' @param ... passed on to `mlocscale()`.
#' @return a list with the following entries:
#' @importFrom Matrix drop
#' @importFrom methods is
#' @importFrom rlang abort
#' @importFrom stats sd
#' @keywords internal
.standardize_data <- function (x, y, intercept, standardize, robust, sparse,
mscale_opts, location_rho = 'bisquare', cc,
target_scale_x = NULL, ...) {
if (is.list(x) && !is.null(x$x) && !is.null(x$y)) {
y <- x$y
x <- x$x
}
ret_list <- list(scale_x = rep.int(1, ncol(x)), mux = numeric(ncol(x)),
muy = 0, x = x, y = y)
## Center data for numerical convenience
if (isTRUE(intercept)) {
if (!isTRUE(robust)) {
ret_list$mux <- colMeans(x)
ret_list$muy <- mean(y)
} else {
ret_list$mux <- apply(x, 2, function (xj) {
mloc(xj, rho = location_rho, cc = cc, opts = mscale_opts)
})
# Center the response using the S-estimate of regression for the
# 0-slope.
y_locscale <- mlocscale(y, location_rho = location_rho, location_cc = cc,
scale_cc = cc, opts = mscale_opts, ...)
if (!isTRUE(y_locscale[['scale']] > .Machine$double.eps)) {
abort("M-scale of response is 0.")
}
ret_list$muy <- y_locscale[['location']]
if (!is.finite(ret_list$muy)) {
# In case the response has more than 50% equal values.
ret_list$muy <- 0
}
}
ret_list$x <- sweep(x, 2L, ret_list$mux, FUN = `-`, check.margin = FALSE)
ret_list$y <- y - ret_list$muy
}
## Scale predictors
if (isTRUE(standardize) || isTRUE(standardize == 'cv_only')) {
ret_list$scale_x <- if (!isTRUE(robust)) {
apply(ret_list$x, 2, sd)
} else {
locscale <- apply(ret_list$x, 2, function (xj) {
mlocscale(xj, location_rho = location_rho, location_cc = cc,
scale_cc = cc, opts = mscale_opts, ...)
})
if (isTRUE(intercept)) {
# Re-center the predictors with the updated centers
ret_list$mux <- ret_list$mux + locscale[1L, ]
ret_list$x <- sweep(x, 2L, ret_list$mux, FUN = `-`,
check.margin = FALSE)
}
locscale[2L, ]
}
if (!isTRUE(all(ret_list$scale_x > 0))) {
abort(paste("Standardization failed. One or more variables in `x`",
"have a scale of 0."))
}
if (isTRUE(standardize)) {
ret_list$x <- if (!is.null(target_scale_x)) {
sweep(ret_list$x, 2L, target_scale_x / ret_list$scale_x, FUN = `*`,
check.margin = FALSE)
} else {
sweep(ret_list$x, 2L, ret_list$scale_x, FUN = `/`, check.margin = FALSE)
}
}
}
# Set the target scale to 1, so that standardizing and unstandardizing works.
if (is.null(target_scale_x)) {
target_scale_x <- 1
}
ret_list$cv_standardize <- function(x, y) {
if (is.list(x) && !is.null(x$x) && !is.null(x$y)) {
y <- x$y
x <- x$x
}
if (isTRUE(standardize == 'cv_only')) {
# In case of "CV only" standardization, match the original scaling
.standardize_data(x, y,
intercept = intercept,
standardize = TRUE,
robust = robust,
sparse = sparse,
location_rho = location_rho,
cc = cc,
mscale_opts = mscale_opts,
target_scale_x = ret_list$scale_x,
... = ...)
} else {
.standardize_data(x, y,
intercept = intercept,
standardize = standardize,
robust = robust,
sparse = sparse,
location_rho = location_rho,
cc = cc,
mscale_opts = mscale_opts,
... = ...)
}
}
ret_list$standardize_coefs <- function(coef_obj) {
if (is.null(coef_obj)) {
return(NULL)
}
if (is.null(coef_obj$intercept)) {
coef_obj$intercept <- 0
}
if (isTRUE(intercept)) {
# Adjust intercept
coef_obj$intercept <- coef_obj$intercept - ret_list$muy +
sum(ret_list$mux * coef_obj$beta)
}
if (isTRUE(standardize)) {
coef_obj$beta <- coef_obj$beta * (ret_list$scale_x / target_scale_x)
}
return(coef_obj)
}
ret_list$unstandardize_coefs <- if (isTRUE(sparse)) {
function(coef_obj) {
if (is.null(coef_obj)) {
return(coef_obj)
}
if (is.null(coef_obj$intercept)) {
coef_obj$intercept <- 0
}
coef_obj$std_beta <- coef_obj$beta
coef_obj$std_intercept <- coef_obj$intercept
if (isTRUE(standardize)) {
coef_obj$beta@x <- coef_obj$beta@x * target_scale_x /
ret_list$scale_x[coef_obj$beta@i]
}
if (isTRUE(intercept)) {
# Recreate intercept
coef_obj$intercept <- coef_obj$intercept + ret_list$muy -
sum(ret_list$mux[coef_obj$beta@i] * coef_obj$beta@x)
}
return(coef_obj)
}
} else {
function(coef_obj) {
if (is.null(coef_obj)) {
return(coef_obj)
}
if (is.null(coef_obj$intercept)) {
coef_obj$intercept <- 0
}
coef_obj$std_beta <- coef_obj$beta
coef_obj$std_intercept <- coef_obj$intercept
if (isTRUE(standardize)) {
coef_obj$beta <- coef_obj$beta * target_scale_x / ret_list$scale_x
}
if (isTRUE(intercept)) {
# Recreate intercept
coef_obj$intercept <- coef_obj$intercept + ret_list$muy -
sum(ret_list$mux * coef_obj$beta)
}
return(coef_obj)
}
}
return(ret_list)
}
#' @importFrom stats median
.cv_mape <- function (r) {
median(abs(r))
}
.cv_rmspe <- function (r) {
sqrt(mean(r^2))
}
## Area under the ROC for "negatives" having value 0 and "positives" having value 1.
.cv_auroc <- function (pred, truth) {
n_neg <- sum(truth <= 0)
n_pos <- sum(truth > 0)
mww <- sum(rank(pred)[truth <= 0]) - n_neg * (n_neg + 1) / 2
return(mww / (n_neg * n_pos))
}
.cv_se_selection <- function (cvm, cvsd, se_fact) {
type <- rep.int(factor('none', levels = c('none', 'min', 'se_fact')), length(cvm))
best <- which.min(cvm)
candidates <- which(cvm <= cvm[[best]] + se_fact * cvsd[[best]])
candidates <- candidates[candidates <= best] # only consider sparser solutions
# "ignore" solutions after which the prediction performance comes back down
best_1se <- if (any(diff(candidates) > 1)) {
min(candidates[-seq_len(max(which(diff(candidates) > 1)))])
} else {
min(candidates)
}
type[[best]] <- 'min'
type[[best_1se]] <- 'se_fact'
return(type)
}
## Validate the response type and return a list with elements `values` and `binary`.
#' @importFrom rlang warn abort
.validate_response <- function (y) {
nlevels <- length(unique(y))
if (is.factor(y)) {
if (nlevels == 2L) {
return(list(values = as.numeric(y) - 1, binary = TRUE))
} else if (nlevels > 2L) {
warn("`y` with more than 2 factor levels is implicitly treated as numeric.")
return(list(values = as.numeric(y), binary = FALSE))
}
} else {
y <- .as(y, 'numeric')
if (nlevels == 2L) {
warn(paste("`y` is interpreted as continuous response but has only 2 distinct values.",
"If binary classification is desired, coerce `y` to factor with `as.factor()`."))
}
if (nlevels > 1L) {
return(list(values = y, binary = FALSE))
}
}
abort("`y` must have at least 2 distinct values.")
}
## A wrapper around `methods::as` which raises an error if the conversion results in NA.
## @param ... passed on to [methods::as].
##
#' @importFrom methods as
#' @importFrom rlang abort
.as <- function (object, class, ...) {
object_var <- deparse(substitute(object))
tryCatch(methods::as(object, class, ...), warning = function (w) {
abort(sprintf('`%s` is not of type `%s`', object_var, class))
})
}
## Create a function which restores the original length of the coefficient vector
.restore_coef_length_fun <- function (positions, length) {
if (length(positions) > 0L) {
function (coef) {
if (is(coef$beta, 'dsparseVector')) {
coef$beta <- sparseVector(coef$beta@x, positions[coef$beta@i], length)
} else {
beta <- numeric(length)
beta[positions] <- coef$beta
coef$beta <- beta
}
return(coef)
}
} else {
function (coef) {
if (is(coef$beta, 'dsparseVector')) {
coef$beta <- sparseVector(numeric(0L), integer(0L), length)
} else {
coef$beta <- numeric(length)
}
return(coef)
}
}
}
.prepare_penalty_loadings <- function (penalty_loadings, x, alpha, sparse,
stop_all_infinite = FALSE) {
orig_p <- ncol(x)
restore_coef_length <- function (coef) coef
if(any(alpha < .Machine$double.eps)) {
abort("Non-empty `penalty_loadings` only supported for `alpha` > 0.")
} else if (length(penalty_loadings) != orig_p) {
abort("`penalty_loadings` has different number of elements than `x` columns.")
}
penalty_loadings <- .as(penalty_loadings, 'numeric')
if (any(penalty_loadings < 0)) {
abort("`penalty_loadings` must be positive.")
}
# Determine finite penalty loadings
good_pl <- which(is.finite(penalty_loadings))
if (length(good_pl) < orig_p) {
# Some penalty loadings are infinite! Remove the corresponding predictors from `x`.
x <- x[ , good_pl, drop = FALSE]
penalty_loadings <- penalty_loadings[good_pl]
restore_coef_length <- if (length(good_pl) > 0L) {
if (isTRUE(sparse)) {
function (coef) {
if (!is.null(coef$std_beta)) {
coef$std_beta <- sparseVector(coef$std_beta@x, good_pl[coef$std_beta@i], orig_p)
}
if (!is.null(coef$beta)) {
coef$beta <- sparseVector(coef$beta@x, good_pl[coef$beta@i], orig_p)
}
return(coef)
}
} else {
function (coef) {
coef_vector <- numeric(orig_p)
if (!is.null(coef$std_beta)) {
coef_vector[good_pl] <- coef$std_beta
coef$std_beta <- coef_vector
}
if (!is.null(coef$beta)) {
coef_vector[good_pl] <- coef$beta
coef$beta <- coef_vector
}
return(coef)
}
}
} else {
if (stop_all_infinite) {
abort("At least one value in `penalty_loadings` must be finite.")
}
if (isTRUE(sparse)) {
function (coef) {
coef$std_beta <- sparseVector(numeric(0L), integer(0L), orig_p)
coef$beta <- sparseVector(numeric(0L), integer(0L), orig_p)
return(coef)
}
} else {
function (coef) {
coef$std_beta <- numeric(orig_p)
coef$beta <- numeric(orig_p)
return(coef)
}
}
}
}
return(list(loadings = penalty_loadings, trimmed_x = x, restore_fun = restore_coef_length))
}
#' @importFrom methods is
.sparsify_other_starts <- function (other_starts, sparse) {
lapply(other_starts, function (est) {
if (isTRUE(sparse) && !is(est$beta, 'dsparseVector')) {
est$beta <- sparseVector(as.numeric(est$beta), seq_along(est$beta), length(est$beta))
} else if (!isTRUE(sparse) && !is.numeric(est$beta)) {
est$beta <- .as(est$beta, 'numeric')
}
class(est) <- NULL
return(est)
})
}
#' @importFrom parallel clusterEvalQ clusterCall clusterApplyLB
#' @importFrom rlang abort
.make_cluster_handler <- function (par_cluster) {
if (is.null(par_cluster)) {
return(function (X, FUN, ..., x, fun) {
lapply(X, FUN, ...)
})
} else {
tryCatch({
clusterEvalQ(par_cluster, {
library(pense)
})
}, error = function (e) {
abort(paste("`parallel` cluster cannot be used:", e))
})
return(function (X, FUN, ..., x, fun) {
clusterApplyLB(par_cluster, x = X, fun = function (X, FUN, ...) {
FUN(X, ...)
}, FUN = FUN, ... = ...)
})
}
}
## Parse strings of the form *min*, *se*, or *{m}-se*.
#' @importFrom rlang abort
.parse_se_string <- function (x, only_fact = FALSE) {
x <- .as(x[[1L]], 'character')
xlen <- nchar(x)
se_fact <- 1
se_str <- if (identical('-se', substr(x, xlen - 2L, xlen))) {
se_fact <- as.numeric(substr(x, 0L, xlen - 3L))
if (anyNA(se_fact)) {
abort(sprintf("Cannot parse standard error string '%s'.", x))
}
'se'
} else {
match.arg(x, c('min', 'se'))
}
if (identical(se_str, 'min')) {
se_fact <- 0
}
if (isTRUE(only_fact)) {
se_fact
} else {
list(se_type = se_str, se_fact = se_fact)
}
}
## Filter a list to only include items with matching values.
.filter_list <- function (x, what, value, eps = sqrt(.Machine$double.eps),
comp_fun, ...) {
comp_fun <- if (missing(comp_fun)) {
function (v) { abs(v - value) < eps }
} else {
match.fun(comp_fun)
}
matches <- vapply(x, FUN.VALUE = logical(1L), FUN = function (el) {
comp_fun(el[[what]], ...)
})
x[matches]
}
|
\name{siarmcmcdirichletv4}
\alias{siarmcmcdirichletv4}
\title{ MCMC for stable isotope data }
\description{
Runs an MCMC on stable isotope data from certain organisms to determine their dietary habits.
}
\usage{
siarmcmcdirichletv4(data, sources, corrections = 0, concdep = 0, iterations=200000, burnin=50000, howmany=10000, thinby=15, prior = rep(1, nrow(sources)), siardata=list(SHOULDRUN=FALSE))
}
\arguments{
\item{data}{ A matrix with each food source as a seperate row and each isotope as a seperate column.}
\item{sources}{ A matrix containing the mean and standard deviations of the fractionated correction
values for each of the isotopes. Also allows corrections = 0 for pre-corrected data. }
\item{corrections}{ A matrix containing the mean and standard deviations of the fractional correction
values for each of the isotopes. Also allows corrections = 0 for pre-corrected data.}
\item{concdep}{ A matrix containing the mean and standard deviations of the concentration dependence
values for each of the isotopes. Also allows concdep = 0 for data with no required concentration dependence. Note that
version 4.0 does not use the standard deviations.}
\item{iterations}{ The number of iterations to run. }
\item{burnin}{ The size of the burnin }
\item{howmany}{ How often to report the number of iterations. }
\item{thinby}{ The amount of thinning of the iterations. }
\item{prior}{ The dirichlet distribution prior parameters, the default is rep(1,numsources). New parameters
can be estimated via the function \code{\link{siarelicit}}.}
\item{siardata}{ A list containing some or all of the following parts: targets, sources, corrections,
PATH, TITLE, numgroups, numdata, numsources, numiso, SHOULDRUN, GRAPHSONLY, EXIT, and output. For more details
of these inputs see the \code{\link{siarmenu}} function. }
}
\details{
The model assumes that each target value comes from a Gaussian distribution with an unknown mean and standard
deviation. The structure of the mean is a weighted combination of the food sources' isotopic
values. The weights are made up dietary proportions (which are given a Dirichlet prior distribution) and the
concentration depdendencies given for the different food sources. The
standard deviation is divided up between the uncertainty around the fractionation corrections (if
corrections are given) and the natural variability between target individuals within a defined group
(or between all individuals if no grouping structure is specified). The default iterations numbers work well
for the demo data sets, but advanced users will want to adjust them to suit their analysis.
}
\value{
A parameter matrix consisting of (iterations-burnin)/thinby rows with numgroups*(numsources+numiso)
columns, where numsources is the number of food sources, numiso is the number of isotopes, and numgroups
is the number of groups. The parameter matrix is structured so that, for each group, the first columns
are those of the proportions of each food source eaten, the next columns are the standard deviations for
each isotope. This format repeats across rows to each group. The parameters may then subsequently be
used for plotting, convergence checks, summaries, etc, etc.
}
\author{ Andrew Parnell }
\seealso{ \code{\link{siarmenu}}, \code{\link{siarelicit}} }
\examples{
# Should take around 10 seconds to run
#out <- siarmcmcdirichletv4(geese1demo,sourcesdemo,correctionsdemo,concdepdemo)
}
\keyword{ programming }
\keyword{ models }
\keyword{ multivariate }
|
/siar/man/siarmcmcdirichletv4.Rd
|
no_license
|
albrizre/spatstat.revdep
|
R
| false | false | 3,565 |
rd
|
\name{siarmcmcdirichletv4}
\alias{siarmcmcdirichletv4}
\title{ MCMC for stable isotope data }
\description{
Runs an MCMC on stable isotope data from certain organisms to determine their dietary habits.
}
\usage{
siarmcmcdirichletv4(data, sources, corrections = 0, concdep = 0, iterations=200000, burnin=50000, howmany=10000, thinby=15, prior = rep(1, nrow(sources)), siardata=list(SHOULDRUN=FALSE))
}
\arguments{
\item{data}{ A matrix with each food source as a seperate row and each isotope as a seperate column.}
\item{sources}{ A matrix containing the mean and standard deviations of the fractionated correction
values for each of the isotopes. Also allows corrections = 0 for pre-corrected data. }
\item{corrections}{ A matrix containing the mean and standard deviations of the fractional correction
values for each of the isotopes. Also allows corrections = 0 for pre-corrected data.}
\item{concdep}{ A matrix containing the mean and standard deviations of the concentration dependence
values for each of the isotopes. Also allows concdep = 0 for data with no required concentration dependence. Note that
version 4.0 does not use the standard deviations.}
\item{iterations}{ The number of iterations to run. }
\item{burnin}{ The size of the burnin }
\item{howmany}{ How often to report the number of iterations. }
\item{thinby}{ The amount of thinning of the iterations. }
\item{prior}{ The dirichlet distribution prior parameters, the default is rep(1,numsources). New parameters
can be estimated via the function \code{\link{siarelicit}}.}
\item{siardata}{ A list containing some or all of the following parts: targets, sources, corrections,
PATH, TITLE, numgroups, numdata, numsources, numiso, SHOULDRUN, GRAPHSONLY, EXIT, and output. For more details
of these inputs see the \code{\link{siarmenu}} function. }
}
\details{
The model assumes that each target value comes from a Gaussian distribution with an unknown mean and standard
deviation. The structure of the mean is a weighted combination of the food sources' isotopic
values. The weights are made up dietary proportions (which are given a Dirichlet prior distribution) and the
concentration depdendencies given for the different food sources. The
standard deviation is divided up between the uncertainty around the fractionation corrections (if
corrections are given) and the natural variability between target individuals within a defined group
(or between all individuals if no grouping structure is specified). The default iterations numbers work well
for the demo data sets, but advanced users will want to adjust them to suit their analysis.
}
\value{
A parameter matrix consisting of (iterations-burnin)/thinby rows with numgroups*(numsources+numiso)
columns, where numsources is the number of food sources, numiso is the number of isotopes, and numgroups
is the number of groups. The parameter matrix is structured so that, for each group, the first columns
are those of the proportions of each food source eaten, the next columns are the standard deviations for
each isotope. This format repeats across rows to each group. The parameters may then subsequently be
used for plotting, convergence checks, summaries, etc, etc.
}
\author{ Andrew Parnell }
\seealso{ \code{\link{siarmenu}}, \code{\link{siarelicit}} }
\examples{
# Should take around 10 seconds to run
#out <- siarmcmcdirichletv4(geese1demo,sourcesdemo,correctionsdemo,concdepdemo)
}
\keyword{ programming }
\keyword{ models }
\keyword{ multivariate }
|
time = sort(nym.2002$time) %>% unlist
# What is the fastest time divided by the median time?
min(time)/median(time)
# What is the slowest time divided by the median time?
max(time)/median(time)
plot(time/median(time), ylim=c(1/4,4))
abline(h=c(1/2,1,2))
plot(log2(time/median(time)),ylim=c(-2,2))
abline(h=-1:1)
|
/statistics with r/week_04/Symmetry of Log Ratios.R
|
no_license
|
nezlicodes/Data_Science_for_life_sciences_edx
|
R
| false | false | 319 |
r
|
time = sort(nym.2002$time) %>% unlist
# What is the fastest time divided by the median time?
min(time)/median(time)
# What is the slowest time divided by the median time?
max(time)/median(time)
plot(time/median(time), ylim=c(1/4,4))
abline(h=c(1/2,1,2))
plot(log2(time/median(time)),ylim=c(-2,2))
abline(h=-1:1)
|
testlist <- list(holes = integer(0), numholes = integer(0), x = c(1.25986739689518e-321, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y = numeric(0))
result <- do.call(decido:::earcut_cpp,testlist)
str(result)
|
/decido/inst/testfiles/earcut_cpp/libFuzzer_earcut_cpp/earcut_cpp_valgrind_files/1609874429-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 391 |
r
|
testlist <- list(holes = integer(0), numholes = integer(0), x = c(1.25986739689518e-321, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y = numeric(0))
result <- do.call(decido:::earcut_cpp,testlist)
str(result)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.