content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
\name{domain}
\alias{domain}
\alias{domain.ppp}
\alias{domain.psp}
\alias{domain.im}
\alias{domain.ppx}
\alias{domain.pp3}
\alias{domain.lpp}
\alias{domain.ppm}
\alias{domain.kppm}
\alias{domain.lpp}
\alias{domain.lppm}
\alias{domain.msr}
\alias{domain.quad}
\alias{domain.quadratcount}
\alias{domain.quadrattest}
\alias{domain.tess}
\alias{domain.im}
\alias{domain.layered}
\alias{domain.distfun}
\alias{domain.nnfun}
\alias{domain.funxy}
\alias{domain.rmhmodel}
\alias{domain.leverage.ppm}
\alias{domain.influence.ppm}
\title{
Extract the Domain of any Spatial Object
}
\description{
Given a spatial object such as a point pattern, in any number of dimensions,
this function extracts the spatial domain in which the object is defined.
}
\usage{
domain(X, \dots)
\method{domain}{ppp}(X, \dots)
\method{domain}{psp}(X, \dots)
\method{domain}{im}(X, \dots)
\method{domain}{ppx}(X, \dots)
\method{domain}{pp3}(X, \dots)
\method{domain}{lpp}(X, \dots)
\method{domain}{ppm}(X, \dots, from=c("points", "covariates"))
\method{domain}{kppm}(X, \dots, from=c("points", "covariates"))
\method{domain}{lpp}(X, \dots)
\method{domain}{lppm}(X, \dots)
\method{domain}{msr}(X, \dots)
\method{domain}{quad}(X, \dots)
\method{domain}{quadratcount}(X, \dots)
\method{domain}{quadrattest}(X, \dots)
\method{domain}{tess}(X, \dots)
\method{domain}{im}(X, \dots)
\method{domain}{layered}(X, \dots)
\method{domain}{distfun}(X, \dots)
\method{domain}{nnfun}(X, \dots)
\method{domain}{funxy}(X, \dots)
\method{domain}{rmhmodel}(X, \dots)
\method{domain}{leverage.ppm}(X, \dots)
\method{domain}{influence.ppm}(X, \dots)
}
\arguments{
\item{X}{
A spatial object such as a point pattern (in any number
of dimensions), line segment pattern or pixel image.
}
\item{\dots}{
Extra arguments. They are ignored by all the methods listed here.
}
\item{from}{Character string. See Details.}
}
\details{
The function \code{domain} is generic.
For a spatial object \code{X} in any number of dimensions,
\code{domain(X)} extracts the spatial domain in which \code{X} is
defined.
For a two-dimensional object \code{X}, typically \code{domain(X)}
is the same as \code{domain(X)}.
The exception is that, if \code{X} is a point pattern on a linear network
(class \code{"lpp"}) or a point process model on a linear network
(class \code{"lppm"}), then \code{domain(X)} is the linear network
on which the points lie, while \code{Window(X)} is the two-dimensional
window containing the linear network.
The argument \code{from} applies when \code{X} is a fitted
point process model
(object of class \code{"ppm"} or \code{"kppm"}).
If \code{from="data"} (the default),
\code{domain} extracts the window of the original point
pattern data to which the model was fitted.
If \code{from="covariates"} then \code{domain} returns the
window in which the spatial covariates of the model were provided.
}
\value{
A spatial object representing the domain of \code{X}.
Typically a window (object of class \code{"owin"}),
a three-dimensional box (\code{"box3"}), a multidimensional
box (\code{"boxx"}) or a linear network (\code{"linnet"}).
}
\author{Adrian Baddeley
\email{Adrian.Baddeley@uwa.edu.au}
\url{http://www.maths.uwa.edu.au/~adrian/}
Rolf Turner
\email{r.turner@auckland.ac.nz}
and Ege Rubak
\email{rubak@math.aau.dk}
}
\seealso{
\code{\link{domain}}
}
\examples{
domain(cells)
domain(bei.extra$elev)
domain(chicago)
}
\keyword{spatial}
\keyword{manip}
|
/man/domain.Rd
|
no_license
|
keithschulze/spatstat
|
R
| false | false | 3,567 |
rd
|
\name{domain}
\alias{domain}
\alias{domain.ppp}
\alias{domain.psp}
\alias{domain.im}
\alias{domain.ppx}
\alias{domain.pp3}
\alias{domain.lpp}
\alias{domain.ppm}
\alias{domain.kppm}
\alias{domain.lpp}
\alias{domain.lppm}
\alias{domain.msr}
\alias{domain.quad}
\alias{domain.quadratcount}
\alias{domain.quadrattest}
\alias{domain.tess}
\alias{domain.im}
\alias{domain.layered}
\alias{domain.distfun}
\alias{domain.nnfun}
\alias{domain.funxy}
\alias{domain.rmhmodel}
\alias{domain.leverage.ppm}
\alias{domain.influence.ppm}
\title{
Extract the Domain of any Spatial Object
}
\description{
Given a spatial object such as a point pattern, in any number of dimensions,
this function extracts the spatial domain in which the object is defined.
}
\usage{
domain(X, \dots)
\method{domain}{ppp}(X, \dots)
\method{domain}{psp}(X, \dots)
\method{domain}{im}(X, \dots)
\method{domain}{ppx}(X, \dots)
\method{domain}{pp3}(X, \dots)
\method{domain}{lpp}(X, \dots)
\method{domain}{ppm}(X, \dots, from=c("points", "covariates"))
\method{domain}{kppm}(X, \dots, from=c("points", "covariates"))
\method{domain}{lpp}(X, \dots)
\method{domain}{lppm}(X, \dots)
\method{domain}{msr}(X, \dots)
\method{domain}{quad}(X, \dots)
\method{domain}{quadratcount}(X, \dots)
\method{domain}{quadrattest}(X, \dots)
\method{domain}{tess}(X, \dots)
\method{domain}{im}(X, \dots)
\method{domain}{layered}(X, \dots)
\method{domain}{distfun}(X, \dots)
\method{domain}{nnfun}(X, \dots)
\method{domain}{funxy}(X, \dots)
\method{domain}{rmhmodel}(X, \dots)
\method{domain}{leverage.ppm}(X, \dots)
\method{domain}{influence.ppm}(X, \dots)
}
\arguments{
\item{X}{
A spatial object such as a point pattern (in any number
of dimensions), line segment pattern or pixel image.
}
\item{\dots}{
Extra arguments. They are ignored by all the methods listed here.
}
\item{from}{Character string. See Details.}
}
\details{
The function \code{domain} is generic.
For a spatial object \code{X} in any number of dimensions,
\code{domain(X)} extracts the spatial domain in which \code{X} is
defined.
For a two-dimensional object \code{X}, typically \code{domain(X)}
is the same as \code{domain(X)}.
The exception is that, if \code{X} is a point pattern on a linear network
(class \code{"lpp"}) or a point process model on a linear network
(class \code{"lppm"}), then \code{domain(X)} is the linear network
on which the points lie, while \code{Window(X)} is the two-dimensional
window containing the linear network.
The argument \code{from} applies when \code{X} is a fitted
point process model
(object of class \code{"ppm"} or \code{"kppm"}).
If \code{from="data"} (the default),
\code{domain} extracts the window of the original point
pattern data to which the model was fitted.
If \code{from="covariates"} then \code{domain} returns the
window in which the spatial covariates of the model were provided.
}
\value{
A spatial object representing the domain of \code{X}.
Typically a window (object of class \code{"owin"}),
a three-dimensional box (\code{"box3"}), a multidimensional
box (\code{"boxx"}) or a linear network (\code{"linnet"}).
}
\author{Adrian Baddeley
\email{Adrian.Baddeley@uwa.edu.au}
\url{http://www.maths.uwa.edu.au/~adrian/}
Rolf Turner
\email{r.turner@auckland.ac.nz}
and Ege Rubak
\email{rubak@math.aau.dk}
}
\seealso{
\code{\link{domain}}
}
\examples{
domain(cells)
domain(bei.extra$elev)
domain(chicago)
}
\keyword{spatial}
\keyword{manip}
|
library(tidyverse)
source("mymain.R")
# read in train / test dataframes
train <- readr::read_csv('train.csv')
test <- readr::read_csv('test.csv', col_types = list(
Weekly_Pred1 = col_double(),
Weekly_Pred2 = col_double(),
Weekly_Pred3 = col_double()
))
# save weighted mean absolute error WMAE
num_folds <- 10
wae <- tibble(
model_one = rep(0, num_folds),
model_two = rep(0, num_folds),
model_three = rep(0, num_folds)
)
# time-series CV
for (t in 1:num_folds) {
# *** THIS IS YOUR PREDICTION FUNCTION ***
mypredict()
# Load fold file
# You should add this to your training data in the next call
# to mypredict()
fold_file <- paste0('fold_', t, '.csv')
new_test <- readr::read_csv(fold_file)
# extract predictions matching up to the current fold
scoring_tbl <- new_test %>%
left_join(test, by = c('Date', 'Store', 'Dept'))
# compute WMAE
actuals <- scoring_tbl$Weekly_Sales
preds <- select(scoring_tbl, contains('Weekly_Pred'))
weights <- if_else(scoring_tbl$IsHoliday.x, 5, 1)
wae[t, ] <- colSums(weights * abs(actuals - preds)) / sum(weights)
}
# save results to a file for grading
readr::write_csv(wae, 'Error.csv')
|
/project/stat542_project2/evaluationCode.R
|
no_license
|
wutianqidx/STAT542-Statistical-Learning
|
R
| false | false | 1,182 |
r
|
library(tidyverse)
source("mymain.R")
# read in train / test dataframes
train <- readr::read_csv('train.csv')
test <- readr::read_csv('test.csv', col_types = list(
Weekly_Pred1 = col_double(),
Weekly_Pred2 = col_double(),
Weekly_Pred3 = col_double()
))
# save weighted mean absolute error WMAE
num_folds <- 10
wae <- tibble(
model_one = rep(0, num_folds),
model_two = rep(0, num_folds),
model_three = rep(0, num_folds)
)
# time-series CV
for (t in 1:num_folds) {
# *** THIS IS YOUR PREDICTION FUNCTION ***
mypredict()
# Load fold file
# You should add this to your training data in the next call
# to mypredict()
fold_file <- paste0('fold_', t, '.csv')
new_test <- readr::read_csv(fold_file)
# extract predictions matching up to the current fold
scoring_tbl <- new_test %>%
left_join(test, by = c('Date', 'Store', 'Dept'))
# compute WMAE
actuals <- scoring_tbl$Weekly_Sales
preds <- select(scoring_tbl, contains('Weekly_Pred'))
weights <- if_else(scoring_tbl$IsHoliday.x, 5, 1)
wae[t, ] <- colSums(weights * abs(actuals - preds)) / sum(weights)
}
# save results to a file for grading
readr::write_csv(wae, 'Error.csv')
|
# ======================================================================
# ======================================================================
library(here)
library(tidyverse)
library(patchwork)
library(lubridate)
library(deSolve)
library(pracma)
rm(list = ls())
source(here("plot_theme.R"))
myseed = 525014
# import data
zhejiang_data <- readRDS(here("Zjchn_data"))
case_data <- readRDS(here("zhejiang_cases"))
all_pars <- readRDS(file = here("all_pars.rds"))
num_split = 5
# ======================================================================
# combine all fit objects
combine_zj_fit <- list()
pars_x0_list <- list()
pars_xfit_list <- list()
# read fitted models
for (k in 1:num_split) {
savepath <- paste0("mod_fit_",k,".rds")
lista <- readRDS(file = here(savepath))
if(k==1){combine_zj_fit <- lista}
if(k> 1){combine_zj_fit <- append(combine_zj_fit, lista)}
gc()
}
combine_zj1kfit <- combine_zj_fit[-791]
combine_zjfxfit <- combine_zj_fit[791]
saveRDS(combine_zj1kfit,here("combine_zj1kfit.rds"))
saveRDS(combine_zjfxfit,here("combine_zjfxfit.rds"))
for (k in 1:length(combine_zj1kfit)) {
pars_x0_list[[k]] <- as.data.frame(t(combine_zj1kfit[[k]]$pars))
pars_xfit_list[[k]] <- as.data.frame(t(combine_zj1kfit[[k]]$mod$nloptr$solution)) %>%
mutate(ttcase = combine_zj1kfit[[k]]$mod$fit_ts$cumulative_cases_sum)
# cat("\nFinished",k," run, ", k*100/length(combine_zj1kfit), "%")
}
pars_x0 <- bind_rows(pars_x0_list)
pars_xfit <- bind_rows(pars_xfit_list)
colnames(pars_xfit)[1:9] <- colnames(pars_x0)
cip <- rethinking::PI(pars_xfit$fp, prob = 0.95)
cie <- rethinking::PI(pars_xfit$par_epsilon, prob = 0.95)
cic0 <- rethinking::PI(pars_xfit$C0, prob = 0.95)
cic1 <- rethinking::PI(pars_xfit$C1, prob = 0.95)
pars_ci <- data.frame(pars = c("fp","par_epsilon", "C0scale", "C1scale"),
mean = c(mean(pars_xfit$fp),mean(pars_xfit$par_epsilon),mean(pars_xfit$C0),mean(pars_xfit$C1)),
hdilo = c(cip[1],cie[1],cic0[1],cic1[1]),
hdiup = c(cip[2],cie[2],cic0[2],cic1[2])) %>%
mutate_if(is.numeric,round,4) %>%
mutate(notes = c("\n\U00070 = ","\n\U003B5 = ","\n\U003BC = ","\n\U003BC = "),
labels = paste0(notes,round(mean,2),"\n95%CI (",round(hdilo,2),", ",round(hdiup,2),")"))
pars_ci
# ======================================================================
# create prediction intervals
# =================
# social contact interval
test_dur = max(zhejiang_data$cases$days_post)
changes <- matrix(nrow = length(combine_zj1kfit), ncol = test_dur+1, data = NA)
for (i in 1:length(combine_zj1kfit)) {
changes[i,] <- sigmoid(0:test_dur-pars_xfit$ctm_b[i], a = pars_xfit$ctm_a[i], b = 0) %>% as.numeric()
}
# plot(x = , y = )
ctm_tab <- tibble(date = min(zhejiang_data$cases$date) + c(0:test_dur),
ctm_mean = 1-apply(changes,2,mean,na.rm=T),
ctm_lo = 1-apply(changes,2,quantile,0.025,na.rm=T),
ctm_up = 1-apply(changes,2,quantile,0.975,na.rm=T))
ctm_plot <- ggplot(ctm_tab,aes(x = date, y = ctm_mean)) +
geom_ribbon(aes( ymin = ctm_lo, ymax = ctm_up), fill = "grey", alpha = 0.8) +
geom_line(size = 0.5) +
geom_vline(xintercept = as_date("2020-01-23"), linetype = 2, col = "red", size = 0.5) +
geom_vline(xintercept = as_date("2020-02-01"), linetype = 2, col = "red", size = 0.5) +
scale_y_continuous(limits = c(0,1), breaks = c(0,0.5,1),labels = c("Outbreak\nstrength","Middle","Baseline\nstrength")) +
scale_x_date(date_labels = "%b %d", date_breaks = "3 day") +
labs(x = "Date", y="Contact matrix") +
theme_bw() +
theme(axis.text.x=element_text(angle=60, hjust=1),
panel.border = element_blank(),
panel.grid.major = element_blank()) +
plot_theme
ctm_plot
# =================
# daily new cases interval
dnewcases <- matrix(nrow = length(combine_zj1kfit), ncol = test_dur+1, data = NA)
for (i in 1:length(combine_zj1kfit)) {
dnewcases[i,] <- combine_zj1kfit[[i]]$mod$fit_ts$daily_newcases$dailynew
}
case_tab <- tibble(date = min(zhejiang_data$cases$date) + c(0:test_dur),
case_mean = apply(dnewcases,2,mean,na.rm=T),
case_lo = apply(dnewcases,2,quantile,0.025,na.rm=T),
case_up = apply(dnewcases,2,quantile,0.975,na.rm=T))
p_plot <- data.frame(pars = "p", value = pars_xfit$fp) %>%
ggplot(aes(x=value)) +
geom_histogram(aes(y=..density..), alpha=0.5, position="identity", binwidth = 0.05, color="black", fill="white") +
geom_density(adjust=2,size = 0.5,col=c("blue")) +
labs(x = paste0("Contact tracing \nproportion",pars_ci$labels[1]), y = "Density") + # , title = expression(paste("Density plot of p"))) +
scale_x_continuous(breaks = seq(0.1,0.9,0.2)) +
plot_theme +
theme_bw() +
theme(panel.border = element_blank(),
panel.grid = element_blank(),
axis.title.x = element_text(size=9),
axis.title.x.bottom = element_text(vjust = 12))
e_plot <- data.frame(pars = "e", value = pars_xfit$par_epsilon) %>%
ggplot(aes(x=value)) +
geom_histogram(aes(y=..density..), alpha=0.5, position="identity", binwidth = 1, color="black", fill="white") +
geom_density(adjust=2,size = 0.5,col=c("blue")) +
labs(x = paste0("Isolation speed (days)",pars_ci$labels[2]), y = "Density") +
plot_theme +
scale_x_continuous(breaks = seq(1,15,3)) +
theme_bw() +
theme(panel.border = element_blank(),
panel.grid = element_blank(),
axis.title.x = element_text(size=9),
axis.title.x.bottom = element_text(vjust = 12))
ode_plot <- ggplot()+
geom_vline(xintercept = as_date("2020-01-23"), linetype = 2, col = "red", size = 0.5) +
geom_vline(xintercept = as_date("2020-02-01"), linetype = 2, col = "red", size = 0.5) +
geom_col(data = zhejiang_data$cases, mapping=aes(x=date, y=dailynewcases, fill = "data"), alpha = 0.5) +
geom_line(data = case_tab, aes(x=date,y=case_mean,col = "blue"), size = 0.8) +
geom_ribbon(data = case_tab, aes(x=date,y=case_mean, ymin = case_lo, ymax = case_up), fill = "blue", linetype=2, alpha = 0.2) +
scale_color_manual(values = c("blue"), labels = c("Model"), name = " ") +
scale_fill_manual(values = c("grey"), labels = c("Observed cases"), name = " ") +
labs(x = "Date", y = "Counts of cases") +
scale_x_date(date_labels = "%b %d", date_breaks = "3 day") +
theme_bw() +
plot_theme +
theme(legend.position = c(0.85,0.75),
legend.key.size = unit(0.5, "cm"),
legend.spacing.y = unit(-0.1, "cm"),
panel.grid.major = element_blank(),
panel.border = element_blank(),
axis.text.x=element_text(angle=60, hjust=1))
ode_plot
fig2 <- (ctm_plot + theme(axis.title.x = element_blank()) + e_plot + p_plot + plot_layout(widths = c(3, 1.1, 1.1))) /
(ode_plot) + plot_layout(heights = c(3, 6)) + plot_annotation(tag_levels = 'A')
fig2
pars_ci
ggsave(file = here("figure2.png"), fig2, dpi=dpiset, units="in", width=8, height=5)
|
/codes/seir_mod_output.R
|
no_license
|
yangepi/ZJ_Covid19_EPIDEMICS
|
R
| false | false | 6,963 |
r
|
# ======================================================================
# ======================================================================
library(here)
library(tidyverse)
library(patchwork)
library(lubridate)
library(deSolve)
library(pracma)
rm(list = ls())
source(here("plot_theme.R"))
myseed = 525014
# import data
zhejiang_data <- readRDS(here("Zjchn_data"))
case_data <- readRDS(here("zhejiang_cases"))
all_pars <- readRDS(file = here("all_pars.rds"))
num_split = 5
# ======================================================================
# combine all fit objects
combine_zj_fit <- list()
pars_x0_list <- list()
pars_xfit_list <- list()
# read fitted models
for (k in 1:num_split) {
savepath <- paste0("mod_fit_",k,".rds")
lista <- readRDS(file = here(savepath))
if(k==1){combine_zj_fit <- lista}
if(k> 1){combine_zj_fit <- append(combine_zj_fit, lista)}
gc()
}
combine_zj1kfit <- combine_zj_fit[-791]
combine_zjfxfit <- combine_zj_fit[791]
saveRDS(combine_zj1kfit,here("combine_zj1kfit.rds"))
saveRDS(combine_zjfxfit,here("combine_zjfxfit.rds"))
for (k in 1:length(combine_zj1kfit)) {
pars_x0_list[[k]] <- as.data.frame(t(combine_zj1kfit[[k]]$pars))
pars_xfit_list[[k]] <- as.data.frame(t(combine_zj1kfit[[k]]$mod$nloptr$solution)) %>%
mutate(ttcase = combine_zj1kfit[[k]]$mod$fit_ts$cumulative_cases_sum)
# cat("\nFinished",k," run, ", k*100/length(combine_zj1kfit), "%")
}
pars_x0 <- bind_rows(pars_x0_list)
pars_xfit <- bind_rows(pars_xfit_list)
colnames(pars_xfit)[1:9] <- colnames(pars_x0)
cip <- rethinking::PI(pars_xfit$fp, prob = 0.95)
cie <- rethinking::PI(pars_xfit$par_epsilon, prob = 0.95)
cic0 <- rethinking::PI(pars_xfit$C0, prob = 0.95)
cic1 <- rethinking::PI(pars_xfit$C1, prob = 0.95)
pars_ci <- data.frame(pars = c("fp","par_epsilon", "C0scale", "C1scale"),
mean = c(mean(pars_xfit$fp),mean(pars_xfit$par_epsilon),mean(pars_xfit$C0),mean(pars_xfit$C1)),
hdilo = c(cip[1],cie[1],cic0[1],cic1[1]),
hdiup = c(cip[2],cie[2],cic0[2],cic1[2])) %>%
mutate_if(is.numeric,round,4) %>%
mutate(notes = c("\n\U00070 = ","\n\U003B5 = ","\n\U003BC = ","\n\U003BC = "),
labels = paste0(notes,round(mean,2),"\n95%CI (",round(hdilo,2),", ",round(hdiup,2),")"))
pars_ci
# ======================================================================
# create prediction intervals
# =================
# social contact interval
test_dur = max(zhejiang_data$cases$days_post)
changes <- matrix(nrow = length(combine_zj1kfit), ncol = test_dur+1, data = NA)
for (i in 1:length(combine_zj1kfit)) {
changes[i,] <- sigmoid(0:test_dur-pars_xfit$ctm_b[i], a = pars_xfit$ctm_a[i], b = 0) %>% as.numeric()
}
# plot(x = , y = )
ctm_tab <- tibble(date = min(zhejiang_data$cases$date) + c(0:test_dur),
ctm_mean = 1-apply(changes,2,mean,na.rm=T),
ctm_lo = 1-apply(changes,2,quantile,0.025,na.rm=T),
ctm_up = 1-apply(changes,2,quantile,0.975,na.rm=T))
ctm_plot <- ggplot(ctm_tab,aes(x = date, y = ctm_mean)) +
geom_ribbon(aes( ymin = ctm_lo, ymax = ctm_up), fill = "grey", alpha = 0.8) +
geom_line(size = 0.5) +
geom_vline(xintercept = as_date("2020-01-23"), linetype = 2, col = "red", size = 0.5) +
geom_vline(xintercept = as_date("2020-02-01"), linetype = 2, col = "red", size = 0.5) +
scale_y_continuous(limits = c(0,1), breaks = c(0,0.5,1),labels = c("Outbreak\nstrength","Middle","Baseline\nstrength")) +
scale_x_date(date_labels = "%b %d", date_breaks = "3 day") +
labs(x = "Date", y="Contact matrix") +
theme_bw() +
theme(axis.text.x=element_text(angle=60, hjust=1),
panel.border = element_blank(),
panel.grid.major = element_blank()) +
plot_theme
ctm_plot
# =================
# daily new cases interval
dnewcases <- matrix(nrow = length(combine_zj1kfit), ncol = test_dur+1, data = NA)
for (i in 1:length(combine_zj1kfit)) {
dnewcases[i,] <- combine_zj1kfit[[i]]$mod$fit_ts$daily_newcases$dailynew
}
case_tab <- tibble(date = min(zhejiang_data$cases$date) + c(0:test_dur),
case_mean = apply(dnewcases,2,mean,na.rm=T),
case_lo = apply(dnewcases,2,quantile,0.025,na.rm=T),
case_up = apply(dnewcases,2,quantile,0.975,na.rm=T))
p_plot <- data.frame(pars = "p", value = pars_xfit$fp) %>%
ggplot(aes(x=value)) +
geom_histogram(aes(y=..density..), alpha=0.5, position="identity", binwidth = 0.05, color="black", fill="white") +
geom_density(adjust=2,size = 0.5,col=c("blue")) +
labs(x = paste0("Contact tracing \nproportion",pars_ci$labels[1]), y = "Density") + # , title = expression(paste("Density plot of p"))) +
scale_x_continuous(breaks = seq(0.1,0.9,0.2)) +
plot_theme +
theme_bw() +
theme(panel.border = element_blank(),
panel.grid = element_blank(),
axis.title.x = element_text(size=9),
axis.title.x.bottom = element_text(vjust = 12))
e_plot <- data.frame(pars = "e", value = pars_xfit$par_epsilon) %>%
ggplot(aes(x=value)) +
geom_histogram(aes(y=..density..), alpha=0.5, position="identity", binwidth = 1, color="black", fill="white") +
geom_density(adjust=2,size = 0.5,col=c("blue")) +
labs(x = paste0("Isolation speed (days)",pars_ci$labels[2]), y = "Density") +
plot_theme +
scale_x_continuous(breaks = seq(1,15,3)) +
theme_bw() +
theme(panel.border = element_blank(),
panel.grid = element_blank(),
axis.title.x = element_text(size=9),
axis.title.x.bottom = element_text(vjust = 12))
ode_plot <- ggplot()+
geom_vline(xintercept = as_date("2020-01-23"), linetype = 2, col = "red", size = 0.5) +
geom_vline(xintercept = as_date("2020-02-01"), linetype = 2, col = "red", size = 0.5) +
geom_col(data = zhejiang_data$cases, mapping=aes(x=date, y=dailynewcases, fill = "data"), alpha = 0.5) +
geom_line(data = case_tab, aes(x=date,y=case_mean,col = "blue"), size = 0.8) +
geom_ribbon(data = case_tab, aes(x=date,y=case_mean, ymin = case_lo, ymax = case_up), fill = "blue", linetype=2, alpha = 0.2) +
scale_color_manual(values = c("blue"), labels = c("Model"), name = " ") +
scale_fill_manual(values = c("grey"), labels = c("Observed cases"), name = " ") +
labs(x = "Date", y = "Counts of cases") +
scale_x_date(date_labels = "%b %d", date_breaks = "3 day") +
theme_bw() +
plot_theme +
theme(legend.position = c(0.85,0.75),
legend.key.size = unit(0.5, "cm"),
legend.spacing.y = unit(-0.1, "cm"),
panel.grid.major = element_blank(),
panel.border = element_blank(),
axis.text.x=element_text(angle=60, hjust=1))
ode_plot
fig2 <- (ctm_plot + theme(axis.title.x = element_blank()) + e_plot + p_plot + plot_layout(widths = c(3, 1.1, 1.1))) /
(ode_plot) + plot_layout(heights = c(3, 6)) + plot_annotation(tag_levels = 'A')
fig2
pars_ci
ggsave(file = here("figure2.png"), fig2, dpi=dpiset, units="in", width=8, height=5)
|
# Prior distribution for p1
prior_binary<-function(x, w, p11, p12, in1, in2){
w * dnorm(x, p11, sqrt(p11*(1-p11)/in1)) + (1-w) * dnorm(x, p12, sqrt(p12*(1-p12)/in2))
}
# 10000 realizations of the prior distribution
box_binary<-function(w, p11, p12, in1, in2){
w*rnorm(1000000,p11,sqrt(p11*(1-p11)/in1))+(1-w)*rnorm(1000000,p12,sqrt(p12*(1-p12)/in2))
}
# auxiliary functions
t1 <- function(x, p0){((1-p0)/p0) + ((1-x)/x)}
t2 <- function(x, p0){sqrt(2*(1-((p0 + x)/2))/((p0 + x)/2))}
t3 <- function(x, p0){sqrt(((1-p0)/p0) + ((1-x)/x))}
# Expected probability to go to phase III: Epgo
Epgo_binary <- function(RRgo, n2, p0, w, p11, p12, in1, in2, fixed){
if(fixed){
return(
pnorm((-log(p11/p0) + log(RRgo))/sqrt((2/n2)*t1(p11, p0)))
)
}else{
return(
integrate(function(x){
sapply(x, function(x){
pnorm((-log(x/p0) + log(RRgo))/sqrt((2/n2)*t1(x, p0))) *
prior_binary(x, w, p11, p12, in1, in2)
})
}, 0, 1)$value
)
}
}
# Expected sample size for phase III when going to phase III: En3
En3_binary <- function(RRgo, n2, alpha, beta, p0, w, p11, p12, in1, in2, fixed){
if(fixed){
return(
integrate(function(y){
((2*(qnorm(1-alpha)*t2(p11, p0)+qnorm(1-beta)*t3(p11, p0))^2)/y^2) *
dnorm(y,
mean = -log(p11/p0),
sd = sqrt((2/n2)*t1(p11, p0)))
}, - log(RRgo), Inf)$value
)
}else{
return(
integrate(function(x){
sapply(x, function(x){
integrate(function(y){
((2*(qnorm(1-alpha)*t2(x, p0)+qnorm(1-beta)*t3(x, p0))^2)/y^2) *
dnorm(y,
mean = -log(x/p0),
sd = sqrt((2/n2)*t1(x, p0)))*
prior_binary(x, w, p11, p12, in1, in2)
}, - log(RRgo), Inf)$value
})
}, 0, 1)$value
)
}
}
# Expected probability of a successful program: EsP
EPsProg_binary <- function(RRgo, n2, alpha, beta, step1, step2, p0, w, p11, p12, in1, in2, gamma, fixed){
if(fixed){
return(
integrate(function(y){
( pnorm(qnorm(1 - alpha) -
log(step2)/sqrt((t1(p11, p0)*y^2)/(qnorm(1-alpha)*t2(p11, p0) +
qnorm(1-beta)*t3(p11, p0))^2),
mean = -log((p11+gamma)/p0)/sqrt((t1(p11, p0)*y^2)/(qnorm(1-alpha)*t2(p11, p0) +
qnorm(1-beta)*t3(p11, p0))^2),
sd = 1) -
pnorm(qnorm(1 - alpha) -
log(step1)/sqrt((t1(p11, p0)*y^2)/(qnorm(1-alpha)*t2(p11, p0) +
qnorm(1-beta)*t3(p11, p0))^2),
mean = -log((p11+gamma)/p0)/sqrt((t1(p11, p0)*y^2)/(qnorm(1-alpha)*t2(p11, p0) +
qnorm(1-beta)*t3(p11, p0))^2),
sd = 1) ) *
dnorm(y,
mean = -log(p11/p0),
sd = sqrt((2/n2)*t1(p11, p0)))
}, -log(RRgo), Inf)$value
)
}else{
return(
integrate(function(x){
sapply(x, function(x){
integrate(function(y){
( pnorm(qnorm(1 - alpha) -
log(step2)/sqrt((t1(x, p0)*y^2)/(qnorm(1-alpha)*t2(x, p0) +
qnorm(1-beta)*t3(x, p0))^2),
mean = -log((x+gamma)/p0)/sqrt((t1(x, p0)*y^2)/(qnorm(1-alpha)*t2(x, p0) +
qnorm(1-beta)*t3(x, p0))^2),
sd = 1) -
pnorm(qnorm(1 - alpha) -
log(step1)/sqrt((t1(x, p0)*y^2)/(qnorm(1-alpha)*t2(x, p0) +
qnorm(1-beta)*t3(x, p0))^2),
mean = -log((x+gamma)/p0)/sqrt((t1(x, p0)*y^2)/(qnorm(1-alpha)*t2(x, p0) +
qnorm(1-beta)*t3(x, p0))^2),
sd = 1) ) *
dnorm(y,
mean = -log(x/p0),
sd = sqrt((2/n2)*t1(x, p0))) *
prior_binary(x, w, p11, p12, in1, in2)
}, -log(RRgo), Inf)$value
})
}, 0, 1)$value
)
}
}
# Utility function
utility_binary <- function(n2, RRgo, w, p0, p11, p12, in1, in2,
alpha, beta,
c2, c3, c02, c03,
K, N, S,
steps1, stepm1, stepl1,
b1, b2, b3,
gamma, fixed){
steps2 <- stepm1
stepm2 <- stepl1
stepl2 <- 0
n3 <- En3_binary(RRgo = RRgo, n2 = n2, alpha = alpha, beta = beta,
p0 = p0, w = w, p11 = p11, p12 = p12, in1 = in1, in2 = in2, fixed = fixed)
n3 <- ceiling(n3)
if(round(n3/2) != n3 / 2) {n3 = n3 + 1}
if(n2+n3>N){
return(c(-9999, -9999, -9999, -9999, -9999, -9999, -9999, -9999, -9999))
}else{
pg <- Epgo_binary(RRgo = RRgo, n2 = n2, p0 = p0, w = w, p11 = p11, p12 = p12, in1 = in1, in2 = in2, fixed = fixed)
K2 <- c02 + c2 * n2 # cost phase II
K3 <- c03 * pg + c3 * n3 # cost phase III
if(K2+K3>K){
return(c(-9999, -9999, -9999, -9999, -9999, -9999, -9999, -9999, -9999))
}else{
# probability of a successful program; small, medium, large effect size
prob1 <- EPsProg_binary(RRgo = RRgo, n2 = n2, alpha = alpha, beta = beta,
step1 = steps1, step2 = steps2,
p0 = p0, w = w, p11 = p11, p12 = p12, in1 = in1, in2 = in2,
gamma = gamma, fixed = fixed)
prob2 <- EPsProg_binary(RRgo = RRgo, n2 = n2, alpha = alpha, beta = beta,
step1 = stepm1, step2 = stepm2,
p0 = p0, w = w, p11 = p11, p12 = p12, in1 = in1, in2 = in2,
gamma = gamma, fixed = fixed)
prob3 <- EPsProg_binary(RRgo = RRgo, n2 = n2, alpha = alpha, beta = beta,
step1 = stepl1, step2 = stepl2,
p0 = p0, w = w, p11 = p11, p12 = p12, in1 = in1, in2 = in2,
gamma = gamma, fixed = fixed)
SP <- prob1 + prob2 + prob3
if(SP<S){
return(c(-9999, -9999, -9999, -9999, -9999, -9999, -9999, -9999, -9999))
}else{
G <- b1 * prob1 + b2 * prob2 + b3 * prob3 #gain
EU <- - K2 - K3 + G
return(c(EU, n3, SP, pg, K2, K3, prob1, prob2, prob3))
}
}
}
}
#################
# skip phase II #
#################
# number of events for phase III based on median_prior
n3_skipII_binary <-function(alpha, beta, p0, median_prior){
median_RR = -log(median_prior/p0)
return(((2*(qnorm(1-alpha)*sqrt(2*(1-((p0 + median_prior)/2))/((p0 + median_prior)/2))+qnorm(1-beta)*sqrt((1-p0)/p0+(1-median_prior)/median_prior))^2)/median_RR^2))
}
# expected probability of a successful program based on median_prior
EPsProg_skipII_binary <-function(alpha, beta, step1, step2, p0, median_prior, w, p11, p12, in1, in2, gamma, fixed){
c=(qnorm(1-alpha)+qnorm(1-beta))^2
median_RR = -log(median_prior/p0)
if(fixed){
return(
pnorm(qnorm(1-alpha) - log(step2)/(sqrt(median_RR^2/c)),
mean=-log((p11+gamma)/p0)/(sqrt(median_RR^2/c)),
sd=1)-
pnorm(qnorm(1-alpha) - log(step1)/(sqrt(median_RR^2/c)),
mean=-log((p11+gamma)/p0)/(sqrt(median_RR^2/c)),
sd=1)
)
}else{
return(
integrate(function(x){
sapply(x,function(x){
( pnorm(qnorm(1-alpha) - log(step2)/(sqrt(median_RR^2/c)),
mean=-log((x+gamma)/p0)/(sqrt(median_RR^2/c)),
sd=1)-
pnorm(qnorm(1-alpha) - log(step1)/(sqrt(median_RR^2/c)),
mean=-log((x+gamma)/p0)/(sqrt(median_RR^2/c)),
sd=1) )*
prior_binary(x, w, p11, p12, in1, in2)
})
}, 0, 1)$value
)
}
}
#utility function
utility_skipII_binary <-function(alpha, beta, c03, c3, b1, b2, b3, p0, median_prior,
K, N, S,
steps1, stepm1, stepl1,
w, p11, p12, in1, in2, gamma, fixed){
steps2 <- stepm1
stepm2 <- stepl1
stepl2 <- 0
n3 <- n3_skipII_binary(alpha = alpha, beta = beta, p0 = p0, median_prior = median_prior)
n3 <- ceiling(n3)
if(round(n3/2) != n3 / 2) {n3 = n3 + 1}
if(n3>N){
return(c(-9999, -9999, -9999, -9999, -9999, -9999, -9999, -9999, -9999))
}else{
K2 <- 0 #cost phase II
K3 <- c03 + c3 * n3 #cost phase III
if(K2+K3>K){
return(c(-9999, -9999, -9999, -9999, -9999, -9999, -9999))
}else{
# probability of a successful program; small, medium, large effect size
prob1 <- EPsProg_skipII_binary(alpha = alpha, beta = beta, step1 = steps1, step2 = steps2,
p0 = p0, median_prior = median_prior, w = w, p11 = p11, p12 = p12, in1 = in1, in2 = in2,
gamma = gamma, fixed = fixed)
prob2 <- EPsProg_skipII_binary(alpha = alpha, beta = beta, step1 = stepm1, step2 = stepm2,
p0 = p0, median_prior = median_prior, w = w, p11 = p11, p12 = p12, in1 = in1, in2 = in2,
gamma = gamma, fixed = fixed)
prob3 <- EPsProg_skipII_binary(alpha = alpha, beta = beta, step1 = stepl1, step2 = stepl2,
p0 = p0, median_prior = median_prior, w = w, p11 = p11, p12 = p12, in1 = in1, in2 = in2,
gamma = gamma, fixed = fixed)
SP <- prob1 + prob2 + prob3
if(SP<S){
return(c(-9999, -9999, -9999, -9999, -9999, -9999, -9999, -9999, -9999))
}else{
G <- b1 * prob1 + b2 * prob2 + b3 * prob3 #gain
EU <- - K2 - K3 + G
return(c(EU, n3, SP, K3, prob1, prob2, prob3))
}
}
}
}
|
/R/functions_binary.R
|
permissive
|
Sterniii3/drugdevelopR
|
R
| false | false | 10,262 |
r
|
# Prior distribution for p1
prior_binary<-function(x, w, p11, p12, in1, in2){
w * dnorm(x, p11, sqrt(p11*(1-p11)/in1)) + (1-w) * dnorm(x, p12, sqrt(p12*(1-p12)/in2))
}
# 10000 realizations of the prior distribution
box_binary<-function(w, p11, p12, in1, in2){
w*rnorm(1000000,p11,sqrt(p11*(1-p11)/in1))+(1-w)*rnorm(1000000,p12,sqrt(p12*(1-p12)/in2))
}
# auxiliary functions
t1 <- function(x, p0){((1-p0)/p0) + ((1-x)/x)}
t2 <- function(x, p0){sqrt(2*(1-((p0 + x)/2))/((p0 + x)/2))}
t3 <- function(x, p0){sqrt(((1-p0)/p0) + ((1-x)/x))}
# Expected probability to go to phase III: Epgo
Epgo_binary <- function(RRgo, n2, p0, w, p11, p12, in1, in2, fixed){
if(fixed){
return(
pnorm((-log(p11/p0) + log(RRgo))/sqrt((2/n2)*t1(p11, p0)))
)
}else{
return(
integrate(function(x){
sapply(x, function(x){
pnorm((-log(x/p0) + log(RRgo))/sqrt((2/n2)*t1(x, p0))) *
prior_binary(x, w, p11, p12, in1, in2)
})
}, 0, 1)$value
)
}
}
# Expected sample size for phase III when going to phase III: En3
En3_binary <- function(RRgo, n2, alpha, beta, p0, w, p11, p12, in1, in2, fixed){
if(fixed){
return(
integrate(function(y){
((2*(qnorm(1-alpha)*t2(p11, p0)+qnorm(1-beta)*t3(p11, p0))^2)/y^2) *
dnorm(y,
mean = -log(p11/p0),
sd = sqrt((2/n2)*t1(p11, p0)))
}, - log(RRgo), Inf)$value
)
}else{
return(
integrate(function(x){
sapply(x, function(x){
integrate(function(y){
((2*(qnorm(1-alpha)*t2(x, p0)+qnorm(1-beta)*t3(x, p0))^2)/y^2) *
dnorm(y,
mean = -log(x/p0),
sd = sqrt((2/n2)*t1(x, p0)))*
prior_binary(x, w, p11, p12, in1, in2)
}, - log(RRgo), Inf)$value
})
}, 0, 1)$value
)
}
}
# Expected probability of a successful program: EsP
EPsProg_binary <- function(RRgo, n2, alpha, beta, step1, step2, p0, w, p11, p12, in1, in2, gamma, fixed){
if(fixed){
return(
integrate(function(y){
( pnorm(qnorm(1 - alpha) -
log(step2)/sqrt((t1(p11, p0)*y^2)/(qnorm(1-alpha)*t2(p11, p0) +
qnorm(1-beta)*t3(p11, p0))^2),
mean = -log((p11+gamma)/p0)/sqrt((t1(p11, p0)*y^2)/(qnorm(1-alpha)*t2(p11, p0) +
qnorm(1-beta)*t3(p11, p0))^2),
sd = 1) -
pnorm(qnorm(1 - alpha) -
log(step1)/sqrt((t1(p11, p0)*y^2)/(qnorm(1-alpha)*t2(p11, p0) +
qnorm(1-beta)*t3(p11, p0))^2),
mean = -log((p11+gamma)/p0)/sqrt((t1(p11, p0)*y^2)/(qnorm(1-alpha)*t2(p11, p0) +
qnorm(1-beta)*t3(p11, p0))^2),
sd = 1) ) *
dnorm(y,
mean = -log(p11/p0),
sd = sqrt((2/n2)*t1(p11, p0)))
}, -log(RRgo), Inf)$value
)
}else{
return(
integrate(function(x){
sapply(x, function(x){
integrate(function(y){
( pnorm(qnorm(1 - alpha) -
log(step2)/sqrt((t1(x, p0)*y^2)/(qnorm(1-alpha)*t2(x, p0) +
qnorm(1-beta)*t3(x, p0))^2),
mean = -log((x+gamma)/p0)/sqrt((t1(x, p0)*y^2)/(qnorm(1-alpha)*t2(x, p0) +
qnorm(1-beta)*t3(x, p0))^2),
sd = 1) -
pnorm(qnorm(1 - alpha) -
log(step1)/sqrt((t1(x, p0)*y^2)/(qnorm(1-alpha)*t2(x, p0) +
qnorm(1-beta)*t3(x, p0))^2),
mean = -log((x+gamma)/p0)/sqrt((t1(x, p0)*y^2)/(qnorm(1-alpha)*t2(x, p0) +
qnorm(1-beta)*t3(x, p0))^2),
sd = 1) ) *
dnorm(y,
mean = -log(x/p0),
sd = sqrt((2/n2)*t1(x, p0))) *
prior_binary(x, w, p11, p12, in1, in2)
}, -log(RRgo), Inf)$value
})
}, 0, 1)$value
)
}
}
# Utility function
utility_binary <- function(n2, RRgo, w, p0, p11, p12, in1, in2,
alpha, beta,
c2, c3, c02, c03,
K, N, S,
steps1, stepm1, stepl1,
b1, b2, b3,
gamma, fixed){
steps2 <- stepm1
stepm2 <- stepl1
stepl2 <- 0
n3 <- En3_binary(RRgo = RRgo, n2 = n2, alpha = alpha, beta = beta,
p0 = p0, w = w, p11 = p11, p12 = p12, in1 = in1, in2 = in2, fixed = fixed)
n3 <- ceiling(n3)
if(round(n3/2) != n3 / 2) {n3 = n3 + 1}
if(n2+n3>N){
return(c(-9999, -9999, -9999, -9999, -9999, -9999, -9999, -9999, -9999))
}else{
pg <- Epgo_binary(RRgo = RRgo, n2 = n2, p0 = p0, w = w, p11 = p11, p12 = p12, in1 = in1, in2 = in2, fixed = fixed)
K2 <- c02 + c2 * n2 # cost phase II
K3 <- c03 * pg + c3 * n3 # cost phase III
if(K2+K3>K){
return(c(-9999, -9999, -9999, -9999, -9999, -9999, -9999, -9999, -9999))
}else{
# probability of a successful program; small, medium, large effect size
prob1 <- EPsProg_binary(RRgo = RRgo, n2 = n2, alpha = alpha, beta = beta,
step1 = steps1, step2 = steps2,
p0 = p0, w = w, p11 = p11, p12 = p12, in1 = in1, in2 = in2,
gamma = gamma, fixed = fixed)
prob2 <- EPsProg_binary(RRgo = RRgo, n2 = n2, alpha = alpha, beta = beta,
step1 = stepm1, step2 = stepm2,
p0 = p0, w = w, p11 = p11, p12 = p12, in1 = in1, in2 = in2,
gamma = gamma, fixed = fixed)
prob3 <- EPsProg_binary(RRgo = RRgo, n2 = n2, alpha = alpha, beta = beta,
step1 = stepl1, step2 = stepl2,
p0 = p0, w = w, p11 = p11, p12 = p12, in1 = in1, in2 = in2,
gamma = gamma, fixed = fixed)
SP <- prob1 + prob2 + prob3
if(SP<S){
return(c(-9999, -9999, -9999, -9999, -9999, -9999, -9999, -9999, -9999))
}else{
G <- b1 * prob1 + b2 * prob2 + b3 * prob3 #gain
EU <- - K2 - K3 + G
return(c(EU, n3, SP, pg, K2, K3, prob1, prob2, prob3))
}
}
}
}
#################
# skip phase II #
#################
# number of events for phase III based on median_prior
n3_skipII_binary <-function(alpha, beta, p0, median_prior){
median_RR = -log(median_prior/p0)
return(((2*(qnorm(1-alpha)*sqrt(2*(1-((p0 + median_prior)/2))/((p0 + median_prior)/2))+qnorm(1-beta)*sqrt((1-p0)/p0+(1-median_prior)/median_prior))^2)/median_RR^2))
}
# expected probability of a successful program based on median_prior
EPsProg_skipII_binary <-function(alpha, beta, step1, step2, p0, median_prior, w, p11, p12, in1, in2, gamma, fixed){
c=(qnorm(1-alpha)+qnorm(1-beta))^2
median_RR = -log(median_prior/p0)
if(fixed){
return(
pnorm(qnorm(1-alpha) - log(step2)/(sqrt(median_RR^2/c)),
mean=-log((p11+gamma)/p0)/(sqrt(median_RR^2/c)),
sd=1)-
pnorm(qnorm(1-alpha) - log(step1)/(sqrt(median_RR^2/c)),
mean=-log((p11+gamma)/p0)/(sqrt(median_RR^2/c)),
sd=1)
)
}else{
return(
integrate(function(x){
sapply(x,function(x){
( pnorm(qnorm(1-alpha) - log(step2)/(sqrt(median_RR^2/c)),
mean=-log((x+gamma)/p0)/(sqrt(median_RR^2/c)),
sd=1)-
pnorm(qnorm(1-alpha) - log(step1)/(sqrt(median_RR^2/c)),
mean=-log((x+gamma)/p0)/(sqrt(median_RR^2/c)),
sd=1) )*
prior_binary(x, w, p11, p12, in1, in2)
})
}, 0, 1)$value
)
}
}
#utility function
utility_skipII_binary <-function(alpha, beta, c03, c3, b1, b2, b3, p0, median_prior,
K, N, S,
steps1, stepm1, stepl1,
w, p11, p12, in1, in2, gamma, fixed){
steps2 <- stepm1
stepm2 <- stepl1
stepl2 <- 0
n3 <- n3_skipII_binary(alpha = alpha, beta = beta, p0 = p0, median_prior = median_prior)
n3 <- ceiling(n3)
if(round(n3/2) != n3 / 2) {n3 = n3 + 1}
if(n3>N){
return(c(-9999, -9999, -9999, -9999, -9999, -9999, -9999, -9999, -9999))
}else{
K2 <- 0 #cost phase II
K3 <- c03 + c3 * n3 #cost phase III
if(K2+K3>K){
return(c(-9999, -9999, -9999, -9999, -9999, -9999, -9999))
}else{
# probability of a successful program; small, medium, large effect size
prob1 <- EPsProg_skipII_binary(alpha = alpha, beta = beta, step1 = steps1, step2 = steps2,
p0 = p0, median_prior = median_prior, w = w, p11 = p11, p12 = p12, in1 = in1, in2 = in2,
gamma = gamma, fixed = fixed)
prob2 <- EPsProg_skipII_binary(alpha = alpha, beta = beta, step1 = stepm1, step2 = stepm2,
p0 = p0, median_prior = median_prior, w = w, p11 = p11, p12 = p12, in1 = in1, in2 = in2,
gamma = gamma, fixed = fixed)
prob3 <- EPsProg_skipII_binary(alpha = alpha, beta = beta, step1 = stepl1, step2 = stepl2,
p0 = p0, median_prior = median_prior, w = w, p11 = p11, p12 = p12, in1 = in1, in2 = in2,
gamma = gamma, fixed = fixed)
SP <- prob1 + prob2 + prob3
if(SP<S){
return(c(-9999, -9999, -9999, -9999, -9999, -9999, -9999, -9999, -9999))
}else{
G <- b1 * prob1 + b2 * prob2 + b3 * prob3 #gain
EU <- - K2 - K3 + G
return(c(EU, n3, SP, K3, prob1, prob2, prob3))
}
}
}
}
|
library(EvaluationMeasures)
### Name: EvaluationMeasures.MCC
### Title: EvaluationMeasures.MCC
### Aliases: EvaluationMeasures.MCC
### ** Examples
EvaluationMeasures.MCC(c(1,0,1,0,1,0,1,0),c(1,1,1,1,1,1,0,0))
|
/data/genthat_extracted_code/EvaluationMeasures/examples/EvaluationMeasures.MCC.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 216 |
r
|
library(EvaluationMeasures)
### Name: EvaluationMeasures.MCC
### Title: EvaluationMeasures.MCC
### Aliases: EvaluationMeasures.MCC
### ** Examples
EvaluationMeasures.MCC(c(1,0,1,0,1,0,1,0),c(1,1,1,1,1,1,0,0))
|
packages <- c(
"readxl",
"readr",
"dplyr",
"tidyr",
"stringr"
)
install.packages("packrat")
packrat::init()
for (p in packages) {
if(p %in% rownames(installed.packages()) == FALSE) {
install.packages(p)
}
}
|
/libraries/imports.R
|
no_license
|
mukulcdri/TN-Socioeconomic-factors-and-education-standards
|
R
| false | false | 227 |
r
|
packages <- c(
"readxl",
"readr",
"dplyr",
"tidyr",
"stringr"
)
install.packages("packrat")
packrat::init()
for (p in packages) {
if(p %in% rownames(installed.packages()) == FALSE) {
install.packages(p)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bartBMA_with_ITEs_exact_par.R
\name{bartBMA_with_ITEs_exact_par}
\alias{bartBMA_with_ITEs_exact_par}
\title{Prediction intervals for bart-bma output obtained using linear algebra to obtain means and variances, and using bisection to find the quantiles of the mixture of t distributions.}
\usage{
bartBMA_with_ITEs_exact_par(
l_quant,
u_quant,
newdata = NULL,
update_resids = 1,
num_cores = 1,
root_alg_precision = 1e-05,
x_covariates,
z_train,
y_train,
a = 3,
nu = 3,
sigquant = 0.9,
c = 1000,
pen = 12,
num_cp = 20,
x.test = matrix(0, 0, 0),
num_rounds = 5,
alpha = 0.95,
beta = 2,
split_rule_node = 0,
gridpoint = 0,
maxOWsize = 100,
num_splits = 5,
gridsize = 10,
zero_split = 1,
only_max_num_trees = 1,
min_num_obs_for_split = 2,
min_num_obs_after_split = 2,
exact_residuals = 1,
spike_tree = 0,
s_t_hyperprior = 1,
p_s_t = 0.5,
a_s_t = 1,
b_s_t = 3,
lambda_poisson = 10,
less_greedy = 0
)
}
\arguments{
\item{l_quant}{Lower quantile of credible intervals for the ITEs, CATT, CATNT.}
\item{u_quant}{Upper quantile of credible intervals for the ITEs, CATT, CATNT.}
\item{newdata}{Test data for which predictions are to be produced. Default = NULL. If NULL, then produces prediction intervals for training data if no test data was used in producing the bartBMA object, or produces prediction intervals for the original test data if test data was used in producing the bartBMA object.}
\item{update_resids}{Option for whether to update the partial residuals in the gibbs sampler. If equal to 1, updates partial residuals, if equal to zero, does not update partial residuals. The defaullt setting is to update the partial residuals.}
\item{num_cores}{Number of cores used in parallel.}
\item{root_alg_precision}{The algorithm should obtain approximate bounds that are within the distance root_alg_precision of the true quantile for the chosen average of models.}
\item{x_covariates}{Covaraite matrix for training bartBMA.}
\item{z_train}{treatment vector for traiing bartBMA.}
\item{y_train}{outcome vector for training bartBMA.}
\item{a}{This is a parameter that influences the variance of terminal node parameter values. Default value a=3.}
\item{nu}{This is a hyperparameter in the distribution of the variance of the error term. THe inverse of the variance is distributed as Gamma (nu/2, nu*lambda/2). Default value nu=3.}
\item{sigquant}{Calibration quantile for the inverse chi-squared prior on the variance of the error term.}
\item{c}{This determines the size of Occam's Window}
\item{pen}{This is a parameter used by the Pruned Exact Linear Time Algorithm when finding changepoints. Default value pen=12.}
\item{num_cp}{This is a number between 0 and 100 that determines the proportion of changepoints proposed by the changepoint detection algorithm to keep when growing trees. Default num_cp=20.}
\item{x.test}{Test data covariate matrix. Default x.test=matrix(0.0,0,0).}
\item{num_rounds}{Number of trees. (Maximum number of trees in a sum-of-tree model). Default num_rounds=5.}
\item{alpha}{Parameter in prior probability of tree node splitting. Default alpha=0.95}
\item{beta}{Parameter in prior probability of tree node splitting. Default beta=1}
\item{split_rule_node}{Binary variable. If equals 1, then find a new set of potential splitting points via a changepoint algorithm after adding each split to a tree. If equals zero, use the same set of potential split points for all splits in a tree. Default split_rule_node=0.}
\item{gridpoint}{Binary variable. If equals 1, then a grid search changepoint detection algorithm will be used. If equals 0, then the Pruned Exact Linear Time (PELT) changepoint detection algorithm will be used (Killick et al. 2012). Default gridpoint=0.}
\item{maxOWsize}{Maximum number of models to keep in Occam's window. Default maxOWsize=100.}
\item{num_splits}{Maximum number of splits in a tree}
\item{gridsize}{This integer determines the size of the grid across which to search if gridpoint=1 when finding changepoints for constructing trees.}
\item{zero_split}{Binary variable. If equals 1, then zero split trees can be included in a sum-of-trees model. If equals zero, then only trees with at least one split can be included in a sum-of-trees model.}
\item{only_max_num_trees}{Binary variable. If equals 1, then only sum-of-trees models containing the maximum number of trees, num_rounds, are selected. If equals 0, then sum-of-trees models containing less than num_rounds trees can be selected. The default is only_max_num_trees=1.}
\item{min_num_obs_for_split}{This integer determines the minimum number of observations in a (parent) tree node for the algorithm to consider potential splits of the node.}
\item{min_num_obs_after_split}{This integer determines the minimum number of observations in a child node resulting from a split in order for a split to occur. If the left or right chikd node has less than this number of observations, then the split can not occur.}
\item{exact_residuals}{Binary variable. If equal to 1, then trees are added to sum-of-tree models within each round of the algorithm by detecting changepoints in the exact residuals. If equals zero, then changepoints are detected in residuals that are constructed from approximate predictions.}
\item{spike_tree}{If equal to 1, then the Spike-and-Tree prior will be used, otherwise the standard BART prior will be used. The number of splitting variables has a beta-binomial prior. The number of terminal nodes has a truncated Poisson prior, and then a uniform prior is placed on the set of valid constructions of trees given the splitting variables and number of terminal nodes.}
\item{s_t_hyperprior}{If equals 1 and spike_tree equals 1, then a beta distribution hyperprior is placed on the variable inclusion probabilities for the spike and tree prior. The hyperprior parameters are a_s_t and b_s_t.}
\item{p_s_t}{If spike_tree=1 and s_t_hyperprior=0, then p_s_t is the prior variable inclusion probability.}
\item{a_s_t}{If spike_tree=1 and s_t_hyperprior=1, then a_s_t is a parameter of a beta distribution hyperprior.}
\item{b_s_t}{If spike_tree=1 and s_t_hyperprior=1, then b_s_t is a parameter of a beta distribution hyperprior.}
\item{lambda_poisson}{This is a parameter for the Spike-and-Tree prior. It is the parameter for the (truncated and conditional on the number of splitting variables) Poisson prior on the number of terminal nodes.}
\item{less_greedy}{If equal to one, then a less greedy model search algorithm is used.}
}
\value{
The output is a list of length 4:
\item{ITE_intervals}{A 3 by n matrix, where n is the number of observations. The first row gives the l_quant*100 quantiles of the individual treatment effects. The second row gives the medians of the ITEs. The third row gives the u_quant*100 quantiles of the ITEs.}
\item{ITE_estimates}{An n by 1 matrix containing the Individual Treatment Effect estimates.}
\item{CATE_estimate}{The Conditional Average Treatment Effect Estimates}
\item{CATE_Interval}{A 3 by 1 matrix. The first element is the l_quant*100 quantile of the CATE distribution, the second element is the median of the CATE distribution, and the thied element is the u_quant*100 quantile of the CATE distribution.}
}
\description{
This function produces prediction intervals for bart-bma output.
}
\examples{
\dontrun{
#Example of BART-BMA for ITE estimation
#Applied to data simulations from Hahn et al. (2020, Bayesian Analysis)
#"Bayesian Regression Tree Models for Causal Inference: Regularization, Confounding,
# and Heterogeneous Effects
n <- 250
x1 <- rnorm(n)
x2 <- rnorm(n)
x3 <- rnorm(n)
x4 <- rbinom(n,1,0.5)
x5 <- as.factor(sample( LETTERS[1:3], n, replace=TRUE))
p= 0
xnoise = matrix(rnorm(n*p), nrow=n)
x5A <- ifelse(x5== 'A',1,0)
x5B <- ifelse(x5== 'B',1,0)
x5C <- ifelse(x5== 'C',1,0)
x_covs_train <- cbind(x1,x2,x3,x4,x5A,x5B,x5C,xnoise)
#Treatment effect
#tautrain <- 3
tautrain <- 1+2*x_covs_train[,2]*x_covs_train[,4]
#Prognostic function
mutrain <- 1 + 2*x_covs_train[,5] -1*x_covs_train[,6]-4*x_covs_train[,7] +
x_covs_train[,1]*x_covs_train[,3]
sd_mtrain <- sd(mutrain)
utrain <- runif(n)
#pitrain <- 0.8*pnorm((3*mutrain/sd_mtrain)-0.5*x_covs_train[,1])+0.05+utrain/10
pitrain <- 0.5
ztrain <- rbinom(n,1,pitrain)
ytrain <- mutrain + tautrain*ztrain
#pihattrain <- pbart(x_covs_train,ztrain )$prob.train.mean
#set lower and upper quantiles for intervals
lbound <- 0.025
ubound <- 0.975
example_output <- bartBMA_with_ITEs_exact_par(l_quant = lbound,
u_quant= ubound,
x_covariates = x_covs_train,
z_train = ztrain,
y_train = ytrain)
}
}
|
/bartBMA/man/bartBMA_with_ITEs_exact_par.Rd
|
no_license
|
akhikolla/InformationHouse
|
R
| false | true | 8,945 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bartBMA_with_ITEs_exact_par.R
\name{bartBMA_with_ITEs_exact_par}
\alias{bartBMA_with_ITEs_exact_par}
\title{Prediction intervals for bart-bma output obtained using linear algebra to obtain means and variances, and using bisection to find the quantiles of the mixture of t distributions.}
\usage{
bartBMA_with_ITEs_exact_par(
l_quant,
u_quant,
newdata = NULL,
update_resids = 1,
num_cores = 1,
root_alg_precision = 1e-05,
x_covariates,
z_train,
y_train,
a = 3,
nu = 3,
sigquant = 0.9,
c = 1000,
pen = 12,
num_cp = 20,
x.test = matrix(0, 0, 0),
num_rounds = 5,
alpha = 0.95,
beta = 2,
split_rule_node = 0,
gridpoint = 0,
maxOWsize = 100,
num_splits = 5,
gridsize = 10,
zero_split = 1,
only_max_num_trees = 1,
min_num_obs_for_split = 2,
min_num_obs_after_split = 2,
exact_residuals = 1,
spike_tree = 0,
s_t_hyperprior = 1,
p_s_t = 0.5,
a_s_t = 1,
b_s_t = 3,
lambda_poisson = 10,
less_greedy = 0
)
}
\arguments{
\item{l_quant}{Lower quantile of credible intervals for the ITEs, CATT, CATNT.}
\item{u_quant}{Upper quantile of credible intervals for the ITEs, CATT, CATNT.}
\item{newdata}{Test data for which predictions are to be produced. Default = NULL. If NULL, then produces prediction intervals for training data if no test data was used in producing the bartBMA object, or produces prediction intervals for the original test data if test data was used in producing the bartBMA object.}
\item{update_resids}{Option for whether to update the partial residuals in the gibbs sampler. If equal to 1, updates partial residuals, if equal to zero, does not update partial residuals. The defaullt setting is to update the partial residuals.}
\item{num_cores}{Number of cores used in parallel.}
\item{root_alg_precision}{The algorithm should obtain approximate bounds that are within the distance root_alg_precision of the true quantile for the chosen average of models.}
\item{x_covariates}{Covaraite matrix for training bartBMA.}
\item{z_train}{treatment vector for traiing bartBMA.}
\item{y_train}{outcome vector for training bartBMA.}
\item{a}{This is a parameter that influences the variance of terminal node parameter values. Default value a=3.}
\item{nu}{This is a hyperparameter in the distribution of the variance of the error term. THe inverse of the variance is distributed as Gamma (nu/2, nu*lambda/2). Default value nu=3.}
\item{sigquant}{Calibration quantile for the inverse chi-squared prior on the variance of the error term.}
\item{c}{This determines the size of Occam's Window}
\item{pen}{This is a parameter used by the Pruned Exact Linear Time Algorithm when finding changepoints. Default value pen=12.}
\item{num_cp}{This is a number between 0 and 100 that determines the proportion of changepoints proposed by the changepoint detection algorithm to keep when growing trees. Default num_cp=20.}
\item{x.test}{Test data covariate matrix. Default x.test=matrix(0.0,0,0).}
\item{num_rounds}{Number of trees. (Maximum number of trees in a sum-of-tree model). Default num_rounds=5.}
\item{alpha}{Parameter in prior probability of tree node splitting. Default alpha=0.95}
\item{beta}{Parameter in prior probability of tree node splitting. Default beta=1}
\item{split_rule_node}{Binary variable. If equals 1, then find a new set of potential splitting points via a changepoint algorithm after adding each split to a tree. If equals zero, use the same set of potential split points for all splits in a tree. Default split_rule_node=0.}
\item{gridpoint}{Binary variable. If equals 1, then a grid search changepoint detection algorithm will be used. If equals 0, then the Pruned Exact Linear Time (PELT) changepoint detection algorithm will be used (Killick et al. 2012). Default gridpoint=0.}
\item{maxOWsize}{Maximum number of models to keep in Occam's window. Default maxOWsize=100.}
\item{num_splits}{Maximum number of splits in a tree}
\item{gridsize}{This integer determines the size of the grid across which to search if gridpoint=1 when finding changepoints for constructing trees.}
\item{zero_split}{Binary variable. If equals 1, then zero split trees can be included in a sum-of-trees model. If equals zero, then only trees with at least one split can be included in a sum-of-trees model.}
\item{only_max_num_trees}{Binary variable. If equals 1, then only sum-of-trees models containing the maximum number of trees, num_rounds, are selected. If equals 0, then sum-of-trees models containing less than num_rounds trees can be selected. The default is only_max_num_trees=1.}
\item{min_num_obs_for_split}{This integer determines the minimum number of observations in a (parent) tree node for the algorithm to consider potential splits of the node.}
\item{min_num_obs_after_split}{This integer determines the minimum number of observations in a child node resulting from a split in order for a split to occur. If the left or right chikd node has less than this number of observations, then the split can not occur.}
\item{exact_residuals}{Binary variable. If equal to 1, then trees are added to sum-of-tree models within each round of the algorithm by detecting changepoints in the exact residuals. If equals zero, then changepoints are detected in residuals that are constructed from approximate predictions.}
\item{spike_tree}{If equal to 1, then the Spike-and-Tree prior will be used, otherwise the standard BART prior will be used. The number of splitting variables has a beta-binomial prior. The number of terminal nodes has a truncated Poisson prior, and then a uniform prior is placed on the set of valid constructions of trees given the splitting variables and number of terminal nodes.}
\item{s_t_hyperprior}{If equals 1 and spike_tree equals 1, then a beta distribution hyperprior is placed on the variable inclusion probabilities for the spike and tree prior. The hyperprior parameters are a_s_t and b_s_t.}
\item{p_s_t}{If spike_tree=1 and s_t_hyperprior=0, then p_s_t is the prior variable inclusion probability.}
\item{a_s_t}{If spike_tree=1 and s_t_hyperprior=1, then a_s_t is a parameter of a beta distribution hyperprior.}
\item{b_s_t}{If spike_tree=1 and s_t_hyperprior=1, then b_s_t is a parameter of a beta distribution hyperprior.}
\item{lambda_poisson}{This is a parameter for the Spike-and-Tree prior. It is the parameter for the (truncated and conditional on the number of splitting variables) Poisson prior on the number of terminal nodes.}
\item{less_greedy}{If equal to one, then a less greedy model search algorithm is used.}
}
\value{
The output is a list of length 4:
\item{ITE_intervals}{A 3 by n matrix, where n is the number of observations. The first row gives the l_quant*100 quantiles of the individual treatment effects. The second row gives the medians of the ITEs. The third row gives the u_quant*100 quantiles of the ITEs.}
\item{ITE_estimates}{An n by 1 matrix containing the Individual Treatment Effect estimates.}
\item{CATE_estimate}{The Conditional Average Treatment Effect Estimates}
\item{CATE_Interval}{A 3 by 1 matrix. The first element is the l_quant*100 quantile of the CATE distribution, the second element is the median of the CATE distribution, and the thied element is the u_quant*100 quantile of the CATE distribution.}
}
\description{
This function produces prediction intervals for bart-bma output.
}
\examples{
\dontrun{
#Example of BART-BMA for ITE estimation
#Applied to data simulations from Hahn et al. (2020, Bayesian Analysis)
#"Bayesian Regression Tree Models for Causal Inference: Regularization, Confounding,
# and Heterogeneous Effects
n <- 250
x1 <- rnorm(n)
x2 <- rnorm(n)
x3 <- rnorm(n)
x4 <- rbinom(n,1,0.5)
x5 <- as.factor(sample( LETTERS[1:3], n, replace=TRUE))
p= 0
xnoise = matrix(rnorm(n*p), nrow=n)
x5A <- ifelse(x5== 'A',1,0)
x5B <- ifelse(x5== 'B',1,0)
x5C <- ifelse(x5== 'C',1,0)
x_covs_train <- cbind(x1,x2,x3,x4,x5A,x5B,x5C,xnoise)
#Treatment effect
#tautrain <- 3
tautrain <- 1+2*x_covs_train[,2]*x_covs_train[,4]
#Prognostic function
mutrain <- 1 + 2*x_covs_train[,5] -1*x_covs_train[,6]-4*x_covs_train[,7] +
x_covs_train[,1]*x_covs_train[,3]
sd_mtrain <- sd(mutrain)
utrain <- runif(n)
#pitrain <- 0.8*pnorm((3*mutrain/sd_mtrain)-0.5*x_covs_train[,1])+0.05+utrain/10
pitrain <- 0.5
ztrain <- rbinom(n,1,pitrain)
ytrain <- mutrain + tautrain*ztrain
#pihattrain <- pbart(x_covs_train,ztrain )$prob.train.mean
#set lower and upper quantiles for intervals
lbound <- 0.025
ubound <- 0.975
example_output <- bartBMA_with_ITEs_exact_par(l_quant = lbound,
u_quant= ubound,
x_covariates = x_covs_train,
z_train = ztrain,
y_train = ytrain)
}
}
|
get.geo <- function(user, token){
call <- get.user(user, token)
if(!is.na(call[1])){
if(call$data$counts$media!=0){
n <- call$data$counts$media
con = curl(paste0("https://api.instagram.com/v1/users/",user,"/media/recent?access_token=",token,"&count=35"))
call <- suppressWarnings(fromJSON(readLines(con), unexpected.escape = "keep"))
close(con)
cat(user)
if(length(call$pagination)==0){
df <- rbind.fill(lapply(call$data, function(x){
df.tmp <- data.frame(
author = ifelse(!is.null(x$user$id), x$user$id, NA),
media = ifelse(!is.null(x$id), x$id, NA),
likes = ifelse(!is.null(x$likes$count), x$likes$count, NA),
created.time = ifelse(!is.null(toString(as.POSIXct(as.numeric(x$created_time), origin="1970-01-01"))), toString(as.POSIXct(as.numeric(x$created_time), origin="1970-01-01")), NA),
longitude = ifelse(!is.null(x$location$longitude), x$location$longitude, NA),
latitude = ifelse(!is.null(x$location$latitude), x$location$latitude, NA)
)
df.tmp <- subset(df.tmp, !is.na(longitude)|!is.na(latitude))
row.names(df.tmp) <- NULL
return(df.tmp)
}
)
)
return(df)
}
else{
df <- rbind.fill(lapply(call$data, function(x){
df.tmp <- data.frame(
author = ifelse(!is.null(x$user$id), x$user$id, NA),
media = ifelse(!is.null(x$id), x$id, NA),
likes = ifelse(!is.null(x$likes$count), x$likes$count, NA),
created.time = ifelse(!is.null(toString(as.POSIXct(as.numeric(x$created_time), origin="1970-01-01"))), toString(as.POSIXct(as.numeric(x$created_time), origin="1970-01-01")), NA),
longitude = ifelse(!is.null(x$location$longitude), x$location$longitude, NA),
latitude = ifelse(!is.null(x$location$latitude), x$location$latitude, NA)
)
df.tmp <- subset(df.tmp, !is.na(longitude)|!is.na(latitude))
row.names(df.tmp) <- NULL
return(df.tmp)
}
)
)
n.retrieved <- length(call$data)
while((n.retrieved<n) & (length(call$pagination)!=0)){
next.url <- paste0(call$pagination$next_url,"&count=35")
con = curl(next.url)
call <- suppressWarnings(fromJSON(readLines(con), unexpected.escape = "keep"))
close(con)
df2 <- rbind.fill(lapply(call$data, function(x){
df.tmp <- data.frame(
author = ifelse(!is.null(x$user$id), x$user$id, NA),
media = ifelse(!is.null(x$id), x$id, NA),
likes = ifelse(!is.null(x$likes$count), x$likes$count, NA),
created.time = ifelse(!is.null(toString(as.POSIXct(as.numeric(x$created_time), origin="1970-01-01"))), toString(as.POSIXct(as.numeric(x$created_time), origin="1970-01-01")), NA),
longitude = ifelse(!is.null(x$location$longitude), x$location$longitude, NA),
latitude = ifelse(!is.null(x$location$latitude), x$location$latitude, NA)
)
df.tmp <- subset(df.tmp, !is.na(longitude)|!is.na(latitude))
row.names(df.tmp) <- NULL
return(df.tmp)
}
)
)
df <- rbind.fill(df, df2)
n.retrieved.tmp <- length(call$data)
n.retrieved <- n.retrieved+n.retrieved.tmp
cat(paste("...", n.retrieved, sep = ""))
}
cat("... Done")
return(df)
}
}
else{
return(NULL)
}
}
else{
return(NULL)
}
}
|
/R/Basic Functions/get.geo.R
|
no_license
|
AlexYachts/instagram-mining
|
R
| false | false | 3,733 |
r
|
get.geo <- function(user, token){
call <- get.user(user, token)
if(!is.na(call[1])){
if(call$data$counts$media!=0){
n <- call$data$counts$media
con = curl(paste0("https://api.instagram.com/v1/users/",user,"/media/recent?access_token=",token,"&count=35"))
call <- suppressWarnings(fromJSON(readLines(con), unexpected.escape = "keep"))
close(con)
cat(user)
if(length(call$pagination)==0){
df <- rbind.fill(lapply(call$data, function(x){
df.tmp <- data.frame(
author = ifelse(!is.null(x$user$id), x$user$id, NA),
media = ifelse(!is.null(x$id), x$id, NA),
likes = ifelse(!is.null(x$likes$count), x$likes$count, NA),
created.time = ifelse(!is.null(toString(as.POSIXct(as.numeric(x$created_time), origin="1970-01-01"))), toString(as.POSIXct(as.numeric(x$created_time), origin="1970-01-01")), NA),
longitude = ifelse(!is.null(x$location$longitude), x$location$longitude, NA),
latitude = ifelse(!is.null(x$location$latitude), x$location$latitude, NA)
)
df.tmp <- subset(df.tmp, !is.na(longitude)|!is.na(latitude))
row.names(df.tmp) <- NULL
return(df.tmp)
}
)
)
return(df)
}
else{
df <- rbind.fill(lapply(call$data, function(x){
df.tmp <- data.frame(
author = ifelse(!is.null(x$user$id), x$user$id, NA),
media = ifelse(!is.null(x$id), x$id, NA),
likes = ifelse(!is.null(x$likes$count), x$likes$count, NA),
created.time = ifelse(!is.null(toString(as.POSIXct(as.numeric(x$created_time), origin="1970-01-01"))), toString(as.POSIXct(as.numeric(x$created_time), origin="1970-01-01")), NA),
longitude = ifelse(!is.null(x$location$longitude), x$location$longitude, NA),
latitude = ifelse(!is.null(x$location$latitude), x$location$latitude, NA)
)
df.tmp <- subset(df.tmp, !is.na(longitude)|!is.na(latitude))
row.names(df.tmp) <- NULL
return(df.tmp)
}
)
)
n.retrieved <- length(call$data)
while((n.retrieved<n) & (length(call$pagination)!=0)){
next.url <- paste0(call$pagination$next_url,"&count=35")
con = curl(next.url)
call <- suppressWarnings(fromJSON(readLines(con), unexpected.escape = "keep"))
close(con)
df2 <- rbind.fill(lapply(call$data, function(x){
df.tmp <- data.frame(
author = ifelse(!is.null(x$user$id), x$user$id, NA),
media = ifelse(!is.null(x$id), x$id, NA),
likes = ifelse(!is.null(x$likes$count), x$likes$count, NA),
created.time = ifelse(!is.null(toString(as.POSIXct(as.numeric(x$created_time), origin="1970-01-01"))), toString(as.POSIXct(as.numeric(x$created_time), origin="1970-01-01")), NA),
longitude = ifelse(!is.null(x$location$longitude), x$location$longitude, NA),
latitude = ifelse(!is.null(x$location$latitude), x$location$latitude, NA)
)
df.tmp <- subset(df.tmp, !is.na(longitude)|!is.na(latitude))
row.names(df.tmp) <- NULL
return(df.tmp)
}
)
)
df <- rbind.fill(df, df2)
n.retrieved.tmp <- length(call$data)
n.retrieved <- n.retrieved+n.retrieved.tmp
cat(paste("...", n.retrieved, sep = ""))
}
cat("... Done")
return(df)
}
}
else{
return(NULL)
}
}
else{
return(NULL)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MCMC_comparisons.R
\name{combine_MCMC_comparison_results}
\alias{combine_MCMC_comparison_results}
\title{Combine multiple objects returned by compareMCMCs}
\usage{
combine_MCMC_comparison_results(..., name = "MCMCresults")
}
\arguments{
\item{...}{objects returned by \code{\link{compareMCMCs}}}
\item{name}{(default "MCMCresults") name to be given to the resulting set of comparisons. This will be used in the html generated by \code{\link{make_MCMC_comparison_pages}}. It is simply a list name and thus can easily be modified later.}
}
\value{
An object in the same format as returned by \code{\link{compareMCMCs}} with \code{summary} = TRUE.
}
\description{
Useful for running different cases separately and combining them.
}
\seealso{
\code{\link{compareMCMCs}}, \code{\link{rename_MCMC_comparison_method}}, \code{\link{make_MCMC_comparison_pages}}, \code{\link{reshape_comparison_results}}.
}
|
/packages/nimble/man/combine_MCMC_comparison_results.Rd
|
no_license
|
nemochina2008/nimble
|
R
| false | true | 980 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MCMC_comparisons.R
\name{combine_MCMC_comparison_results}
\alias{combine_MCMC_comparison_results}
\title{Combine multiple objects returned by compareMCMCs}
\usage{
combine_MCMC_comparison_results(..., name = "MCMCresults")
}
\arguments{
\item{...}{objects returned by \code{\link{compareMCMCs}}}
\item{name}{(default "MCMCresults") name to be given to the resulting set of comparisons. This will be used in the html generated by \code{\link{make_MCMC_comparison_pages}}. It is simply a list name and thus can easily be modified later.}
}
\value{
An object in the same format as returned by \code{\link{compareMCMCs}} with \code{summary} = TRUE.
}
\description{
Useful for running different cases separately and combining them.
}
\seealso{
\code{\link{compareMCMCs}}, \code{\link{rename_MCMC_comparison_method}}, \code{\link{make_MCMC_comparison_pages}}, \code{\link{reshape_comparison_results}}.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulate_market.R
\name{gp}
\alias{gp}
\title{gp}
\usage{
gp(f, p)
}
\arguments{
\item{f}{the fit}
\item{p}{the parameter name}
}
\description{
get parameter point estimates from a Stan fit (optimizing, vb or sampling)
}
|
/man/gp.Rd
|
no_license
|
khakieconomics/rrcl
|
R
| false | true | 300 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulate_market.R
\name{gp}
\alias{gp}
\title{gp}
\usage{
gp(f, p)
}
\arguments{
\item{f}{the fit}
\item{p}{the parameter name}
}
\description{
get parameter point estimates from a Stan fit (optimizing, vb or sampling)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/show.R
\name{show_palette}
\alias{show_palette}
\title{Display a color palette}
\usage{
show_palette(...)
}
\arguments{
\item{...}{the name of the palette or palettes; shows all palettes if no
argument is given.}
}
\description{
Display a palette in the graphics window by calling its name.
}
\examples{
library(vapeplot)
show_palette("cool", "sunset")
}
|
/man/show_palette.Rd
|
no_license
|
seasmith/vapeplot
|
R
| false | true | 433 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/show.R
\name{show_palette}
\alias{show_palette}
\title{Display a color palette}
\usage{
show_palette(...)
}
\arguments{
\item{...}{the name of the palette or palettes; shows all palettes if no
argument is given.}
}
\description{
Display a palette in the graphics window by calling its name.
}
\examples{
library(vapeplot)
show_palette("cool", "sunset")
}
|
##Two functions that cache the inverse of a matrix.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y){
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(solveMatrix) m <<- solveMatrix
getInverse <- function() m
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
cacheSolve <- function(x, ...) {
m <- x$getInverse()
if(!is.null(m)){
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data)
x$setInverse(m)
m
}
|
/cachematrix.R
|
no_license
|
ifteki/ProgrammingAssignment2
|
R
| false | false | 736 |
r
|
##Two functions that cache the inverse of a matrix.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y){
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(solveMatrix) m <<- solveMatrix
getInverse <- function() m
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
cacheSolve <- function(x, ...) {
m <- x$getInverse()
if(!is.null(m)){
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data)
x$setInverse(m)
m
}
|
# File src/library/graphics/R/plot.design.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2012 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
plot.design <-
function(x, y = NULL, fun = mean, data = NULL, ...,
ylim = NULL, xlab = "Factors", ylab = NULL, main = NULL,
ask = NULL, xaxt = par("xaxt"), axes = TRUE, xtick = FALSE)
{
.plot.des <-
function(x, y, fun, ylab, ylim = NULL, ...) {
## Arguments: x : data.frame with only factor columns
## y : one numeric vector
if(!is.numeric(y))
stop("'y' must be a numeric vector")
if(!is.data.frame(x)) # or allow factor (see 2 lines below)?? {FIXME}
stop("'x' must be a data frame")
if(!all(sapply(x, is.factor)) & !is.factor(x)) # incl "ordered"
stop("all columns/components of 'x' must be factors")
k <- ncol(x)
if(anyNA(y)) {
FUN <- fun; fun <- function(u) FUN(u [!is.na(u)])
}
tot <- fun(y)
stats <- lapply(x, function(xc) tapply(y, xc, fun))
if(any(is.na(unlist(stats))))
warning("some levels of the factors are empty", call. = FALSE)
if(is.null(ylim))
ylim <- range(c(sapply(stats,range,na.rm = TRUE),tot))
plot(c(0,k+1), ylim, type = "n", axes = axes, xaxt = "n",
xlab = xlab, ylab = ylab, main = main, adj = 0.5, ...)
segments(0.5, tot, k+0.5, tot, ...)
for(i in 1L:k) {
si <- stats[[i]]
segments(i, min(si, na.rm = TRUE),
i, max(si, na.rm = TRUE), ...)
for(j in 1L:(length(si))) {
sij <- si[j]
segments(i-0.05, sij, i+0.05, sij, ...)
text(i-0.1, sij, labels = names(sij), adj = 1, ...)
}
}
if(axes && xaxt != "n")
axis(1, at = 1L:k, names(stats), xaxt = xaxt, tick = xtick,
mgp = {p <- par("mgp"); c(p[1L], if(xtick) p[2L] else 0, 0)},
...)
} ## .plot.des()
## 'fun' dealing
fname <- deparse(substitute(fun))
fun <- match.fun(fun)
if (!(is.data.frame(x) | inherits(x,"formula")))
stop("'x' must be a dataframe or a formula")
## case 'switch' :
if(is.data.frame(x)) {
if(is.null(y)) { ## nothing to do
} else if(inherits(y,"formula")) {
x <- stats::model.frame(y , data = x)
}
else if(is.numeric(y)) {
x <- cbind(y,x[,sapply(x, is.factor)])
tmpname <- match.call()
names(x) <- as.character(c(tmpname[[3L]],names(x[,-1])))
}
else if(is.character(y)) {
ynames <- y
y <- data.frame(x[,y])
if(sum(sapply(y, is.numeric)) != ncol(y)) {
stop("a variable in 'y' is not numeric")
}
x <- x[,sapply(x, is.factor)]
xnames <- names(x)
x <- cbind(x,y)
names(x) <- c(xnames,ynames)
}
}
else if (is.data.frame(data)) {
x <- stats::model.frame(x , data = data)
}
else {
x <- stats::model.frame(x)
}
i.fac <- sapply(x, is.factor)
i.num <- sapply(x, is.numeric)
nResp <- sum(i.num)
if (nResp == 0)
stop("there must be at least one numeric variable!")
yname <- names(x)[i.num]
if(is.null(ylab))
ylab <- paste(fname, "of", yname)
ydata <- as.matrix(x[,i.num])
if (!any(i.fac)) {
x <- data.frame(Intercept = rep.int(" ", nrow(x)))
i.fac <- 1
}
xf <- x[, i.fac, drop = FALSE]
if (is.null(ask))
ask <- prod(par("mfcol")) < nResp && dev.interactive(orNone = TRUE)
if (ask) {
oask <- devAskNewPage(ask)
on.exit(devAskNewPage(oask))
}
for(j in 1L:nResp)
.plot.des(xf, ydata[,j], fun = fun, ylab = ylab[j], ylim = ylim, ...)
invisible()
}
|
/Rproj/src/library/graphics/R/plot.design.R
|
no_license
|
yidongwork/kentucky_intern_2
|
R
| false | false | 4,107 |
r
|
# File src/library/graphics/R/plot.design.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2012 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
plot.design <-
function(x, y = NULL, fun = mean, data = NULL, ...,
ylim = NULL, xlab = "Factors", ylab = NULL, main = NULL,
ask = NULL, xaxt = par("xaxt"), axes = TRUE, xtick = FALSE)
{
.plot.des <-
function(x, y, fun, ylab, ylim = NULL, ...) {
## Arguments: x : data.frame with only factor columns
## y : one numeric vector
if(!is.numeric(y))
stop("'y' must be a numeric vector")
if(!is.data.frame(x)) # or allow factor (see 2 lines below)?? {FIXME}
stop("'x' must be a data frame")
if(!all(sapply(x, is.factor)) & !is.factor(x)) # incl "ordered"
stop("all columns/components of 'x' must be factors")
k <- ncol(x)
if(anyNA(y)) {
FUN <- fun; fun <- function(u) FUN(u [!is.na(u)])
}
tot <- fun(y)
stats <- lapply(x, function(xc) tapply(y, xc, fun))
if(any(is.na(unlist(stats))))
warning("some levels of the factors are empty", call. = FALSE)
if(is.null(ylim))
ylim <- range(c(sapply(stats,range,na.rm = TRUE),tot))
plot(c(0,k+1), ylim, type = "n", axes = axes, xaxt = "n",
xlab = xlab, ylab = ylab, main = main, adj = 0.5, ...)
segments(0.5, tot, k+0.5, tot, ...)
for(i in 1L:k) {
si <- stats[[i]]
segments(i, min(si, na.rm = TRUE),
i, max(si, na.rm = TRUE), ...)
for(j in 1L:(length(si))) {
sij <- si[j]
segments(i-0.05, sij, i+0.05, sij, ...)
text(i-0.1, sij, labels = names(sij), adj = 1, ...)
}
}
if(axes && xaxt != "n")
axis(1, at = 1L:k, names(stats), xaxt = xaxt, tick = xtick,
mgp = {p <- par("mgp"); c(p[1L], if(xtick) p[2L] else 0, 0)},
...)
} ## .plot.des()
## 'fun' dealing
fname <- deparse(substitute(fun))
fun <- match.fun(fun)
if (!(is.data.frame(x) | inherits(x,"formula")))
stop("'x' must be a dataframe or a formula")
## case 'switch' :
if(is.data.frame(x)) {
if(is.null(y)) { ## nothing to do
} else if(inherits(y,"formula")) {
x <- stats::model.frame(y , data = x)
}
else if(is.numeric(y)) {
x <- cbind(y,x[,sapply(x, is.factor)])
tmpname <- match.call()
names(x) <- as.character(c(tmpname[[3L]],names(x[,-1])))
}
else if(is.character(y)) {
ynames <- y
y <- data.frame(x[,y])
if(sum(sapply(y, is.numeric)) != ncol(y)) {
stop("a variable in 'y' is not numeric")
}
x <- x[,sapply(x, is.factor)]
xnames <- names(x)
x <- cbind(x,y)
names(x) <- c(xnames,ynames)
}
}
else if (is.data.frame(data)) {
x <- stats::model.frame(x , data = data)
}
else {
x <- stats::model.frame(x)
}
i.fac <- sapply(x, is.factor)
i.num <- sapply(x, is.numeric)
nResp <- sum(i.num)
if (nResp == 0)
stop("there must be at least one numeric variable!")
yname <- names(x)[i.num]
if(is.null(ylab))
ylab <- paste(fname, "of", yname)
ydata <- as.matrix(x[,i.num])
if (!any(i.fac)) {
x <- data.frame(Intercept = rep.int(" ", nrow(x)))
i.fac <- 1
}
xf <- x[, i.fac, drop = FALSE]
if (is.null(ask))
ask <- prod(par("mfcol")) < nResp && dev.interactive(orNone = TRUE)
if (ask) {
oask <- devAskNewPage(ask)
on.exit(devAskNewPage(oask))
}
for(j in 1L:nResp)
.plot.des(xf, ydata[,j], fun = fun, ylab = ylab[j], ylim = ylim, ...)
invisible()
}
|
vec1<-1:16
vec2<-4*vec1+3
data1<-data.frame(vec1,vec2)
cat(names(data1),"\n")
print(data1)
cat(data1$vec1,"\n")
cat(data1$vec2,"\n")
cat(data1[,1],"\n")
cat(data1[,2],"\n")
print(data1[2,1])
|
/dataframe.R
|
no_license
|
cperez58/chem160module14
|
R
| false | false | 199 |
r
|
vec1<-1:16
vec2<-4*vec1+3
data1<-data.frame(vec1,vec2)
cat(names(data1),"\n")
print(data1)
cat(data1$vec1,"\n")
cat(data1$vec2,"\n")
cat(data1[,1],"\n")
cat(data1[,2],"\n")
print(data1[2,1])
|
## Change current working dictionary
setwd("~/GitHub/ExData_Assgn_2/exdata_data_NEI_data")
## Load Datasets
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
## find the Baltimore city from NEI
NEI_B <-NEI[which(NEI$fips == "24510"),]
## load ggplot2
library(ggplot2)
##Subset SCC & NEI
Vehicle_SCC <- SCC[grep("Vehicle", SCC$EI.Sector, ignore.case = T),]
Vehicle <- NEI_B[NEI_B$SCC %in% Vehicle_SCC$SCC,]
#ggplot2
png("plot5.png", width = 480, height = 480)
g <- ggplot(Vehicle, aes(year, Emissions))
g <-g+ geom_point(stat = "summary", fun.y = "sum")
g <-g+ geom_line (stat = "summary", fun.y = "sum")
g <-g+ labs(title = "Motor Vehicles-related Emissions of PM2.5")+ ylab(expression('PM2.5'))
g <-g+ scale_x_continuous(breaks = seq(1, 2008, 3))
g
dev.off()
|
/plot5.R
|
no_license
|
Yunleee/ExData_Assignment_2
|
R
| false | false | 800 |
r
|
## Change current working dictionary
setwd("~/GitHub/ExData_Assgn_2/exdata_data_NEI_data")
## Load Datasets
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
## find the Baltimore city from NEI
NEI_B <-NEI[which(NEI$fips == "24510"),]
## load ggplot2
library(ggplot2)
##Subset SCC & NEI
Vehicle_SCC <- SCC[grep("Vehicle", SCC$EI.Sector, ignore.case = T),]
Vehicle <- NEI_B[NEI_B$SCC %in% Vehicle_SCC$SCC,]
#ggplot2
png("plot5.png", width = 480, height = 480)
g <- ggplot(Vehicle, aes(year, Emissions))
g <-g+ geom_point(stat = "summary", fun.y = "sum")
g <-g+ geom_line (stat = "summary", fun.y = "sum")
g <-g+ labs(title = "Motor Vehicles-related Emissions of PM2.5")+ ylab(expression('PM2.5'))
g <-g+ scale_x_continuous(breaks = seq(1, 2008, 3))
g
dev.off()
|
# read data from downloaded file Sunil Buge
hpc_data <-read.table("./household_power_consumption.txt",header=TRUE,sep=";",
col.names=c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3"),
colClasses=c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"),na.strings = c("?","NA"))
# define Criteria to subset data between 2 dates
date_criteria<-(as.Date(hpc_data$Date, "%d/%m/%Y")=='2007-01-01' | as.Date(hpc_data$Date, "%d/%m/%Y")=='2007-01-02')
#Subset of limited rows to plot
hpc_subset<-hpc_data[date_criteria,]
# combine date & time by adding datetime column to data frame
hpc_subset$datetime <- strptime(paste(hpc_subset$Date, hpc_subset$Time), "%d/%m/%Y %H:%M:%S")
# define png file to store histogram chart
png("plot1.png", width=480, height=480)
# draw histogram for global_active_power values of limited data set
hist(hpc_subset$Global_active_power,col="red",xlab="Global Active Power (kilowatts)",
ylab="Frequency",main="Global Active Power" ,breaks = 13, ylim = c(0, 1200),
xlim = c(0, 6), xaxp = c(0, 6, 3))
dev.off()
|
/plot1.R
|
no_license
|
sunilbuge/ExData_Plotting1
|
R
| false | false | 1,205 |
r
|
# read data from downloaded file Sunil Buge
hpc_data <-read.table("./household_power_consumption.txt",header=TRUE,sep=";",
col.names=c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3"),
colClasses=c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"),na.strings = c("?","NA"))
# define Criteria to subset data between 2 dates
date_criteria<-(as.Date(hpc_data$Date, "%d/%m/%Y")=='2007-01-01' | as.Date(hpc_data$Date, "%d/%m/%Y")=='2007-01-02')
#Subset of limited rows to plot
hpc_subset<-hpc_data[date_criteria,]
# combine date & time by adding datetime column to data frame
hpc_subset$datetime <- strptime(paste(hpc_subset$Date, hpc_subset$Time), "%d/%m/%Y %H:%M:%S")
# define png file to store histogram chart
png("plot1.png", width=480, height=480)
# draw histogram for global_active_power values of limited data set
hist(hpc_subset$Global_active_power,col="red",xlab="Global Active Power (kilowatts)",
ylab="Frequency",main="Global Active Power" ,breaks = 13, ylim = c(0, 1200),
xlim = c(0, 6), xaxp = c(0, 6, 3))
dev.off()
|
library(gapminder)
library(dplyr)
# Filter for China in 2002
gapminder %>%
filter(country == "China", year == 2002)
|
/Introduction to the tidyverse/2.0.3.r
|
no_license
|
TopicosSelectos/tutoriales-2019-2-al150422
|
R
| false | false | 123 |
r
|
library(gapminder)
library(dplyr)
# Filter for China in 2002
gapminder %>%
filter(country == "China", year == 2002)
|
/R Basic02.R
|
no_license
|
Hsusir/R-Basic02
|
R
| false | false | 1,031 |
r
| ||
\name{ccmat}
\alias{ccmat}
\title{Concurrence matrix for given v, b and k}
\description{This function generates a nearly balanced concurrence matrix from given number
of treatments (v), number of blocks (b) and block size (k) by trial and error.}
\usage{ccmat(v,b,k)}
\arguments{
\item{v}{Number of treatments}
\item{b}{Number of blocks}
\item{k}{Block size}
}
\value{
A v by v matrix is returned if a desired concurrence matrix is found, else it returns a v by v
matrix of zeros. If the parameters are infeasible for a nearly balanced concurrence matrix, the
function returns the parameter values.}
\author{B N Mandal <mandal.stat@gmail.com>}
\keyword{internal}
|
/man/ccmat.Rd
|
no_license
|
cran/ibd
|
R
| false | false | 688 |
rd
|
\name{ccmat}
\alias{ccmat}
\title{Concurrence matrix for given v, b and k}
\description{This function generates a nearly balanced concurrence matrix from given number
of treatments (v), number of blocks (b) and block size (k) by trial and error.}
\usage{ccmat(v,b,k)}
\arguments{
\item{v}{Number of treatments}
\item{b}{Number of blocks}
\item{k}{Block size}
}
\value{
A v by v matrix is returned if a desired concurrence matrix is found, else it returns a v by v
matrix of zeros. If the parameters are infeasible for a nearly balanced concurrence matrix, the
function returns the parameter values.}
\author{B N Mandal <mandal.stat@gmail.com>}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_functions.R
\name{getStanFit}
\alias{getStanFit}
\title{Fit the model!}
\usage{
getStanFit(
beta = NULL,
deltaM_value = NULL,
binary_value = NULL,
ar_value = NULL,
random_value = NULL,
deltaD_value = NULL,
option_num,
format_data,
save_model_file = NULL,
init_values = "random",
iter_num,
chain_num,
warmup_num,
core_num,
adapt_delta = 0.999,
stepsize = 0.01,
max_treedepth = 20,
seed_no = 1,
save_warmup = FALSE,
refresh = 500,
init_r = 0.5,
hier_value = 1,
X_sim = NULL,
cluster_rating_m = NULL,
max_tNo_prd = NULL
)
}
\arguments{
\item{beta}{Variable names to be included in the memory model, e.g., c('sameC','sameA','dist').
Note that you have to include the associated columns in the data.
For example, to fit sameC, you have to include columns with names of 'sameC_1'...'sameC_C'.
To fit sameA, you have to include 'sameA_1',...,'sameA_C'.
In the memory model, we could include sameC (cluster resampling; sameC_value== 1), sameA (decision congruence; sameA_value== 1),
and dist (semantic congruence; dist_value == 1)}
\item{deltaM_value}{1 minus memory decay; 0 for full decay (marcov property); 1 for no decay;
8 for estimating it as a free parameter}
\item{binary_value}{0 for continuous inputs; 1 for discrete inputs}
\item{ar_value, }{0 for absolute accumulation; 1 for relative accumulation}
\item{random_value, }{0 for threhold-based; 1 for time limit;}
\item{deltaD_value, }{1 minus decision decay; 0 for full decay (marcov property); 1 for no decay;
8 for estimating it as a free parameter}
\item{option_num}{Number of total clusters in each choice (C)}
\item{format_data}{This should be a formated data.frame.
sID: participant IDs.
qID: question ID.
cID: option ID being sampled (can take 1 to C).
tNo: thought number in that trial (from 1 to T).
terminate: 0 if continue sampling; 1 if yes; 0 if no (only needed for decision models)}
\item{save_model_file}{Specify a name if you want to save the RStan samples and diagnostics to a csv file.
This is sample_file for rstan::sampling}
\item{init_values}{Default is 'random'; can be specified using a function or a list}
\item{iter_num}{Iteration number}
\item{chain_num}{Chain number}
\item{warmup_num}{Warm-up or burn-in number}
\item{core_num}{Number of cores to be used}
\item{adapt_delta}{Default 0.999}
\item{stepsize}{Default 0.01}
\item{max_treedepth}{Default 20}
\item{seed_no}{Seed number}
\item{refresh}{Default 500}
\item{hier_value}{0 for fitting a non-hierarchical model; 1 for fitting a hierarchical model. Default is 1.
Note that the hierachical version only works for decision or memory models seperately.}
\item{X_sim}{Feature matrix, for different clusters. Needed for simulation.}
\item{cluster_rating_m}{Base rates of different ratings (-3 to 3) for each cluster. Needed for simulation.}
\item{max_tNo_prd}{Maximum length of a simulated trial}
}
\value{
Use $stan_fit to access the returned object from Stan;
Use $stan_data to see the input data of the Stan model
}
\description{
\code{getStanFit} uses RStan to fit a memory model or a decision model.
A memory model will be fit if deltaM_value is specified.
A decision model will be fit if deltaD_value is specified.
If both specified then the two models will be fit simultaneously, and posterior predictions will be simulated
Note that for memory models the option number (C) should be the same across all questions.
}
|
/man/getStanFit.Rd
|
no_license
|
wjoycezhao/sampinfo
|
R
| false | true | 3,511 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_functions.R
\name{getStanFit}
\alias{getStanFit}
\title{Fit the model!}
\usage{
getStanFit(
beta = NULL,
deltaM_value = NULL,
binary_value = NULL,
ar_value = NULL,
random_value = NULL,
deltaD_value = NULL,
option_num,
format_data,
save_model_file = NULL,
init_values = "random",
iter_num,
chain_num,
warmup_num,
core_num,
adapt_delta = 0.999,
stepsize = 0.01,
max_treedepth = 20,
seed_no = 1,
save_warmup = FALSE,
refresh = 500,
init_r = 0.5,
hier_value = 1,
X_sim = NULL,
cluster_rating_m = NULL,
max_tNo_prd = NULL
)
}
\arguments{
\item{beta}{Variable names to be included in the memory model, e.g., c('sameC','sameA','dist').
Note that you have to include the associated columns in the data.
For example, to fit sameC, you have to include columns with names of 'sameC_1'...'sameC_C'.
To fit sameA, you have to include 'sameA_1',...,'sameA_C'.
In the memory model, we could include sameC (cluster resampling; sameC_value== 1), sameA (decision congruence; sameA_value== 1),
and dist (semantic congruence; dist_value == 1)}
\item{deltaM_value}{1 minus memory decay; 0 for full decay (marcov property); 1 for no decay;
8 for estimating it as a free parameter}
\item{binary_value}{0 for continuous inputs; 1 for discrete inputs}
\item{ar_value, }{0 for absolute accumulation; 1 for relative accumulation}
\item{random_value, }{0 for threhold-based; 1 for time limit;}
\item{deltaD_value, }{1 minus decision decay; 0 for full decay (marcov property); 1 for no decay;
8 for estimating it as a free parameter}
\item{option_num}{Number of total clusters in each choice (C)}
\item{format_data}{This should be a formated data.frame.
sID: participant IDs.
qID: question ID.
cID: option ID being sampled (can take 1 to C).
tNo: thought number in that trial (from 1 to T).
terminate: 0 if continue sampling; 1 if yes; 0 if no (only needed for decision models)}
\item{save_model_file}{Specify a name if you want to save the RStan samples and diagnostics to a csv file.
This is sample_file for rstan::sampling}
\item{init_values}{Default is 'random'; can be specified using a function or a list}
\item{iter_num}{Iteration number}
\item{chain_num}{Chain number}
\item{warmup_num}{Warm-up or burn-in number}
\item{core_num}{Number of cores to be used}
\item{adapt_delta}{Default 0.999}
\item{stepsize}{Default 0.01}
\item{max_treedepth}{Default 20}
\item{seed_no}{Seed number}
\item{refresh}{Default 500}
\item{hier_value}{0 for fitting a non-hierarchical model; 1 for fitting a hierarchical model. Default is 1.
Note that the hierachical version only works for decision or memory models seperately.}
\item{X_sim}{Feature matrix, for different clusters. Needed for simulation.}
\item{cluster_rating_m}{Base rates of different ratings (-3 to 3) for each cluster. Needed for simulation.}
\item{max_tNo_prd}{Maximum length of a simulated trial}
}
\value{
Use $stan_fit to access the returned object from Stan;
Use $stan_data to see the input data of the Stan model
}
\description{
\code{getStanFit} uses RStan to fit a memory model or a decision model.
A memory model will be fit if deltaM_value is specified.
A decision model will be fit if deltaD_value is specified.
If both specified then the two models will be fit simultaneously, and posterior predictions will be simulated
Note that for memory models the option number (C) should be the same across all questions.
}
|
calculateRankSMAAForAll <- function(dmuData, samplesNo=10) {
dmuCount = nrow(dmuData$data)
result <- c()
source("smaa.R")
source("efficiencySMAA.R")
effResults <- calculateEfficiencySMAAForAll(dmuData, samplesNo)
result <- calculateRankSMAA(effResults)
intervals <- createSummary(result, dmuCount, samplesNo)
return (intervals)
}
calculateRankSMAA <- function (effResults) {
maxRank <- NROW(effResults) + 1
dmuCount <- ncol(effResults)
for(i in 1:ncol(effResults)) {
effResults[,i] <- maxRank - rank(effResults[,i], ties.method="min")
}
return (effResults)
}
createSummary <- function (ranks, intervalsNo, samplesNo) {
dmuCount <- NROW(ranks)
avgRank <- array(0, dim=dmuCount)
intervals <- array(0, dim=c(dmuCount,intervalsNo))
for(i in 1:dmuCount) {
for(j in 1:length(ranks[i,])){
intervals[i, ranks[i,j]] = intervals[i,ranks[i,j]] + 1
}
for(j in 1:dmuCount){
avgRank[i] <- avgRank[i] + j * (intervals[i,j])
}
}
avgRank <- avgRank / samplesNo
intervals <- intervals / samplesNo
result <- list(ranks = intervals, avgRank = avgRank)
return (result)
}
|
/DEASMAACCRRanks/rankSMAA.R
|
permissive
|
chaoshengt/DEA
|
R
| false | false | 1,124 |
r
|
calculateRankSMAAForAll <- function(dmuData, samplesNo=10) {
dmuCount = nrow(dmuData$data)
result <- c()
source("smaa.R")
source("efficiencySMAA.R")
effResults <- calculateEfficiencySMAAForAll(dmuData, samplesNo)
result <- calculateRankSMAA(effResults)
intervals <- createSummary(result, dmuCount, samplesNo)
return (intervals)
}
calculateRankSMAA <- function (effResults) {
maxRank <- NROW(effResults) + 1
dmuCount <- ncol(effResults)
for(i in 1:ncol(effResults)) {
effResults[,i] <- maxRank - rank(effResults[,i], ties.method="min")
}
return (effResults)
}
createSummary <- function (ranks, intervalsNo, samplesNo) {
dmuCount <- NROW(ranks)
avgRank <- array(0, dim=dmuCount)
intervals <- array(0, dim=c(dmuCount,intervalsNo))
for(i in 1:dmuCount) {
for(j in 1:length(ranks[i,])){
intervals[i, ranks[i,j]] = intervals[i,ranks[i,j]] + 1
}
for(j in 1:dmuCount){
avgRank[i] <- avgRank[i] + j * (intervals[i,j])
}
}
avgRank <- avgRank / samplesNo
intervals <- intervals / samplesNo
result <- list(ranks = intervals, avgRank = avgRank)
return (result)
}
|
#' Optimization & predictive modelling Toolsets
#'
#' @description optR function for solving linear systems using numerical approaches.
#' Current toolbox supports Gauss Elimination, LU decomposition, Conjugate Gradiant Decent and Gauss-Sideal methods for solving the system of form AX=b
#' For optimization using numerical methods cgm method performed faster in comparision with gaussseidel.
#' For decomposition LU is utilized for multiple responses to enhance the speed of computation.
#' @param x : Input matrix
#' @param ... : S3 method
#' @return optR : Return optR class
#' @author PKS Prakash
#' @export
#' @examples
#' # Solving equation Ax=b
#' A<-matrix(c(6,-4,1, -4,6,-4,1,-4,6), nrow=3,ncol=3, byrow = TRUE)
#' b<-matrix(c(-14,36, 6), nrow=3,ncol=1,byrow=TRUE)
#' Z<-optR(A, b, method="gauss") # Solve Linear model using Gauss Elimination
#'
#' # Solve Linear model using LU decomposition (Supports Multi-response)
#' Z<-optR(A, b, method="LU")
#'
#' # Solve the matrix using Gauss Elimination (1, -1, 2)
#' A<-matrix(c(2,-2,6, -2,4,3,-1,8,4), nrow=3,ncol=3, byrow = TRUE)
#' b<-matrix(c(16,0, -1), nrow=3,ncol=1,byrow=TRUE)
#' Z<-optR(A, b, method="gauss") # Solve Linear model using Gauss Elimination
#'
#' require(utils)
#' set.seed(129)
#' n <- 10 ; p <- 4
#' X <- matrix(rnorm(n * p), n, p) # no intercept!
#' y <- rnorm(n)
#' Z<-optR(X, y, method="cgm")
optR<-function(x, ...) UseMethod("optR")
#' Optimization & predictive modelling Toolsets
#'
#' @description optR package to perform the optimization using numerical methods
#' @param formula : formula to build model
#' @param data : data used to build model
#' @param method : "gauss" for gaussian elimination and "LU" for LU factorization
#' @param iter : Number of Iterations
#' @param tol : Convergence tolerance
#' @param ... : S3 Class
#' @return U : Decomposed matrix for Gauss-ELimination Ax=b is converted into Ux=c where U is upper triangular matrix for LU decomposition U contain the values for L & U decomposition LUx=b
#' @return c : transformed b & for LU transformation c is y from equation Ux=y
#' @return estimates : Return x values for linear system
#' @author PKS Prakash
#' @export
#' @examples
#' # Solving equation Ax=b
#' b<-matrix(c(-14,36, 6), nrow=3,ncol=1,byrow=TRUE)
#' A<-matrix(c(6,-4,1, -4,6,-4,1,-4,6), nrow=3,ncol=3, byrow = TRUE)
#' Z<-optR(b~A-1, method="gauss") # -1 to remove the constant vector
#'
#' Z<-optR(b~A-1, method="LU") # -1 to remove the constant vector
#'
#' require(utils)
#' set.seed(129)
#' n <- 10 ; p <- 4
#' X <- matrix(rnorm(n * p), n, p) # no intercept!
#' y <- rnorm(n)
#' data<-cbind(X, y)
#' colnames(data)<-c("var1", "var2", "var3", "var4", "y")
#' Z<-optR(y~var1+var2+var3+var4+var1*var2-1, data=data.frame(data), method="cgm")
optR.formula<-function(formula, data=list(), method=c("gauss, LU, gaussseidel", "cgm", "choleski"), iter=500, tol=1e-7, ...)
{
# Parse the call
cl <- match.call()
mf <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "subset", "na.action"), names(mf), 0L)
mf <- mf[c(1L, m)]
mf$drop.unused.levels <- TRUE
mf[[1L]] <- as.name("model.frame")
mf <- eval(mf, parent.frame())
# Extract data
mt <- attr(mf, "terms")
x <- model.matrix(mt, mf, contrasts)
y<-model.response(mf, "numeric")
# Default Method
if(length(method)>1) method="cgm"
# Fit Models
nROWx=nrow(x)
nCOLx=ncol(x)
if(nROWx==nCOLx){
optR<-optR.fit(x, y, method, iter, tol) # Fit optimization method
} else {
y<-t(x)%*%y
x<-t(x)%*%x
optR<-optR.fit(x, y, method, iter, tol) # Fit optimization method
}
optR$formula<-formula
optR$na.action <- attr(mf, "na.action")
optR$xlevels <- .getXlevels(mt, mf)
optR$terms <- mt
optR$call<-cl
optR$method<-method
class(optR)<-"optR"
optR
# Call the default function
}
#' Optimization & predictive modelling Toolsets
#'
#' optR is the default function for optimization
#' @param x : Input data frame
#' @param y : Response is data frame
#' @param method : "gauss" for gaussian elimination and "LU" for LU factorization
#' @param iter : Number of Iterations
#' @param tol : Convergence tolerance
#' @param ... : S3 Class
#' @return U : Decomposed matrix for Gauss-ELimination Ax=b is converted into Ux=c where U is upper triangular matrix for LU decomposition U contain the values for L & U decomposition LUx=b
#' @return c : transformed b & for LU transformation c is y from equation Ux=y
#' @return estimates : Return x values for linear system
#' @return seq : sequence of A matrix re-ordered
#' @export
#' @examples
#' # Solving equation Ax=b
#' A<-matrix(c(6,-4,1, -4,6,-4,1,-4,6), nrow=3,ncol=3, byrow = TRUE)
#' b<-matrix(c(-14,36, 6), nrow=3,ncol=1,byrow=TRUE)
#' Z<-optR(A, b, method="gauss")
#'
#' # Solve Linear model using LU decomposition (Supports Multi-response)
#' Z<-optR(A, b, method="LU")
#'
#' # Solving the function using numerical method
#' Z<-optR(A, b, method="cgm")
#'
#' require(utils)
#' set.seed(129)
#' n <- 7 ; p <- 2
#' X <- matrix(rnorm(n * p), n, p) # no intercept!
#' y <- rnorm(n)
#' Z<-optR(X, y, method="LU")
optR.default<-function(x, y=NULL, method=c("gauss, LU, gaussseidel", "cgm"), iter=500, tol=1e-7, ...){
if(!is.data.frame(x)) x<-data.frame(x)
if(!is.data.frame(y)) y<-data.frame(y)
# Default fitting
if(length(method)>1){
method="LU"
}
# Assign intial list
optR<-list()
# Build model
if(nrow(y)==0){
# Check for fitting models
if(method!="LU"){
warning("b is NULL matrix!!! switching to LU factorization for A decomposition to LU")
}
modelf<-as.formula(paste0("~", paste0(colnames(x), collapse="+"), "-1", sep=""))
optR<-optR(modelf, data=x, method="LU", iter, tol)
} else
{
modelf<-as.formula(paste0(colnames(y), "~", paste0(colnames(x), collapse="+"), "-1", sep=""))
optR<-optR(modelf, data=cbind.data.frame(x, y), method=method, iter, tol)
}
class(optR)<-"optR"
optR$call<-match.call()
optR
}
#' print coefficients for optR class
#'
#' optR is the default function for optimization
#' @param x : Input of optR class
#' @param ... : S3 class
#' @export
#' @examples
#' # Solving equation Ax=b
#' A<-matrix(c(6,-4,1, -4,6,-4,1,-4,6), nrow=3,ncol=3, byrow = TRUE)
#' b<-matrix(c(-14,36, 6), nrow=3,ncol=1,byrow=TRUE)
#' Z<-optR(A, b, method="gauss")
#' print(Z)
print.optR<-function(x, ...)
{
cat("call: \n")
print(x$call)
# Beta for the coefficients
if(!is.null(x$beta)){
cat("\n Coefficients: \n")
print(x$beta)
}
}
#' Generate Summary for optR class
#'
#' summary function generates the summary for the optR class
#' @param object : Input of optR class
#' @param ... : S3 method
#' @export
#' @examples
#' # Solving equation Ax=b
#' A<-matrix(c(6,-4,1, -4,6,-4,1,-4,6), nrow=3,ncol=3, byrow = TRUE)
#' b<-matrix(c(-14,36, 6), nrow=3,ncol=1,byrow=TRUE)
#' Z<-optR(A, b, method="cgm")
#' summary(Z)
summary.optR<-function(object, ...)
{
# Print the results
print.optR(object)
# Plot convergence for cgm model
if(object$method=="cgm"){
plot(object$conv, xlab="Iterations", ylab="Error")
lines(object$conv)
title(main="CGM Convergence Plot")
}
}
|
/optR/R/optR.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 7,273 |
r
|
#' Optimization & predictive modelling Toolsets
#'
#' @description optR function for solving linear systems using numerical approaches.
#' Current toolbox supports Gauss Elimination, LU decomposition, Conjugate Gradiant Decent and Gauss-Sideal methods for solving the system of form AX=b
#' For optimization using numerical methods cgm method performed faster in comparision with gaussseidel.
#' For decomposition LU is utilized for multiple responses to enhance the speed of computation.
#' @param x : Input matrix
#' @param ... : S3 method
#' @return optR : Return optR class
#' @author PKS Prakash
#' @export
#' @examples
#' # Solving equation Ax=b
#' A<-matrix(c(6,-4,1, -4,6,-4,1,-4,6), nrow=3,ncol=3, byrow = TRUE)
#' b<-matrix(c(-14,36, 6), nrow=3,ncol=1,byrow=TRUE)
#' Z<-optR(A, b, method="gauss") # Solve Linear model using Gauss Elimination
#'
#' # Solve Linear model using LU decomposition (Supports Multi-response)
#' Z<-optR(A, b, method="LU")
#'
#' # Solve the matrix using Gauss Elimination (1, -1, 2)
#' A<-matrix(c(2,-2,6, -2,4,3,-1,8,4), nrow=3,ncol=3, byrow = TRUE)
#' b<-matrix(c(16,0, -1), nrow=3,ncol=1,byrow=TRUE)
#' Z<-optR(A, b, method="gauss") # Solve Linear model using Gauss Elimination
#'
#' require(utils)
#' set.seed(129)
#' n <- 10 ; p <- 4
#' X <- matrix(rnorm(n * p), n, p) # no intercept!
#' y <- rnorm(n)
#' Z<-optR(X, y, method="cgm")
optR<-function(x, ...) UseMethod("optR")
#' Optimization & predictive modelling Toolsets
#'
#' @description optR package to perform the optimization using numerical methods
#' @param formula : formula to build model
#' @param data : data used to build model
#' @param method : "gauss" for gaussian elimination and "LU" for LU factorization
#' @param iter : Number of Iterations
#' @param tol : Convergence tolerance
#' @param ... : S3 Class
#' @return U : Decomposed matrix for Gauss-ELimination Ax=b is converted into Ux=c where U is upper triangular matrix for LU decomposition U contain the values for L & U decomposition LUx=b
#' @return c : transformed b & for LU transformation c is y from equation Ux=y
#' @return estimates : Return x values for linear system
#' @author PKS Prakash
#' @export
#' @examples
#' # Solving equation Ax=b
#' b<-matrix(c(-14,36, 6), nrow=3,ncol=1,byrow=TRUE)
#' A<-matrix(c(6,-4,1, -4,6,-4,1,-4,6), nrow=3,ncol=3, byrow = TRUE)
#' Z<-optR(b~A-1, method="gauss") # -1 to remove the constant vector
#'
#' Z<-optR(b~A-1, method="LU") # -1 to remove the constant vector
#'
#' require(utils)
#' set.seed(129)
#' n <- 10 ; p <- 4
#' X <- matrix(rnorm(n * p), n, p) # no intercept!
#' y <- rnorm(n)
#' data<-cbind(X, y)
#' colnames(data)<-c("var1", "var2", "var3", "var4", "y")
#' Z<-optR(y~var1+var2+var3+var4+var1*var2-1, data=data.frame(data), method="cgm")
optR.formula<-function(formula, data=list(), method=c("gauss, LU, gaussseidel", "cgm", "choleski"), iter=500, tol=1e-7, ...)
{
# Parse the call
cl <- match.call()
mf <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "subset", "na.action"), names(mf), 0L)
mf <- mf[c(1L, m)]
mf$drop.unused.levels <- TRUE
mf[[1L]] <- as.name("model.frame")
mf <- eval(mf, parent.frame())
# Extract data
mt <- attr(mf, "terms")
x <- model.matrix(mt, mf, contrasts)
y<-model.response(mf, "numeric")
# Default Method
if(length(method)>1) method="cgm"
# Fit Models
nROWx=nrow(x)
nCOLx=ncol(x)
if(nROWx==nCOLx){
optR<-optR.fit(x, y, method, iter, tol) # Fit optimization method
} else {
y<-t(x)%*%y
x<-t(x)%*%x
optR<-optR.fit(x, y, method, iter, tol) # Fit optimization method
}
optR$formula<-formula
optR$na.action <- attr(mf, "na.action")
optR$xlevels <- .getXlevels(mt, mf)
optR$terms <- mt
optR$call<-cl
optR$method<-method
class(optR)<-"optR"
optR
# Call the default function
}
#' Optimization & predictive modelling Toolsets
#'
#' optR is the default function for optimization
#' @param x : Input data frame
#' @param y : Response is data frame
#' @param method : "gauss" for gaussian elimination and "LU" for LU factorization
#' @param iter : Number of Iterations
#' @param tol : Convergence tolerance
#' @param ... : S3 Class
#' @return U : Decomposed matrix for Gauss-ELimination Ax=b is converted into Ux=c where U is upper triangular matrix for LU decomposition U contain the values for L & U decomposition LUx=b
#' @return c : transformed b & for LU transformation c is y from equation Ux=y
#' @return estimates : Return x values for linear system
#' @return seq : sequence of A matrix re-ordered
#' @export
#' @examples
#' # Solving equation Ax=b
#' A<-matrix(c(6,-4,1, -4,6,-4,1,-4,6), nrow=3,ncol=3, byrow = TRUE)
#' b<-matrix(c(-14,36, 6), nrow=3,ncol=1,byrow=TRUE)
#' Z<-optR(A, b, method="gauss")
#'
#' # Solve Linear model using LU decomposition (Supports Multi-response)
#' Z<-optR(A, b, method="LU")
#'
#' # Solving the function using numerical method
#' Z<-optR(A, b, method="cgm")
#'
#' require(utils)
#' set.seed(129)
#' n <- 7 ; p <- 2
#' X <- matrix(rnorm(n * p), n, p) # no intercept!
#' y <- rnorm(n)
#' Z<-optR(X, y, method="LU")
optR.default<-function(x, y=NULL, method=c("gauss, LU, gaussseidel", "cgm"), iter=500, tol=1e-7, ...){
if(!is.data.frame(x)) x<-data.frame(x)
if(!is.data.frame(y)) y<-data.frame(y)
# Default fitting
if(length(method)>1){
method="LU"
}
# Assign intial list
optR<-list()
# Build model
if(nrow(y)==0){
# Check for fitting models
if(method!="LU"){
warning("b is NULL matrix!!! switching to LU factorization for A decomposition to LU")
}
modelf<-as.formula(paste0("~", paste0(colnames(x), collapse="+"), "-1", sep=""))
optR<-optR(modelf, data=x, method="LU", iter, tol)
} else
{
modelf<-as.formula(paste0(colnames(y), "~", paste0(colnames(x), collapse="+"), "-1", sep=""))
optR<-optR(modelf, data=cbind.data.frame(x, y), method=method, iter, tol)
}
class(optR)<-"optR"
optR$call<-match.call()
optR
}
#' print coefficients for optR class
#'
#' optR is the default function for optimization
#' @param x : Input of optR class
#' @param ... : S3 class
#' @export
#' @examples
#' # Solving equation Ax=b
#' A<-matrix(c(6,-4,1, -4,6,-4,1,-4,6), nrow=3,ncol=3, byrow = TRUE)
#' b<-matrix(c(-14,36, 6), nrow=3,ncol=1,byrow=TRUE)
#' Z<-optR(A, b, method="gauss")
#' print(Z)
print.optR<-function(x, ...)
{
cat("call: \n")
print(x$call)
# Beta for the coefficients
if(!is.null(x$beta)){
cat("\n Coefficients: \n")
print(x$beta)
}
}
#' Generate Summary for optR class
#'
#' summary function generates the summary for the optR class
#' @param object : Input of optR class
#' @param ... : S3 method
#' @export
#' @examples
#' # Solving equation Ax=b
#' A<-matrix(c(6,-4,1, -4,6,-4,1,-4,6), nrow=3,ncol=3, byrow = TRUE)
#' b<-matrix(c(-14,36, 6), nrow=3,ncol=1,byrow=TRUE)
#' Z<-optR(A, b, method="cgm")
#' summary(Z)
summary.optR<-function(object, ...)
{
# Print the results
print.optR(object)
# Plot convergence for cgm model
if(object$method=="cgm"){
plot(object$conv, xlab="Iterations", ylab="Error")
lines(object$conv)
title(main="CGM Convergence Plot")
}
}
|
#!/bin/Rscript
on_100_10 <- read.table("on_100_10/profits.dat")$V1
#op_100_10 <- read.table("op_100_10/profits.dat")$V1
#op_100_10 <- read.table("op_partial_100_10/profits.dat")$V1
op_100_10 <- read.table("op_partial_fac5_100_10/profits.dat")$V1
ov_100_10 <- read.table("ov_100_10/profits.dat")$V1
ut_100_10 <- read.table("ut_100_10/profits.dat")$V1
# Testes de hipoteses
print("100 us 10%: hi == on")
wilcox.test(ut_100_10, on_100_10, paired=TRUE, var.equal=FALSE)
print("hi > on")
wilcox.test(ut_100_10, on_100_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == ov")
wilcox.test(ut_100_10, ov_100_10, paired=TRUE, var.equal=FALSE)
print("hi > ov")
wilcox.test(ut_100_10, ov_100_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("ov == on")
wilcox.test(on_100_10, ov_100_10, paired=TRUE, var.equal=FALSE)
print("on > ov")
wilcox.test(on_100_10, ov_100_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == op")
wilcox.test(ut_100_10, op_100_10, paired=TRUE, var.equal=FALSE)
print("hi > op")
wilcox.test(ut_100_10, op_100_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("op == on")
wilcox.test(op_100_10, on_100_10, paired=TRUE, var.equal=FALSE)
print("op > on")
wilcox.test(op_100_10, on_100_10, paired=TRUE, var.equal=FALSE, alternative="less")
on_100_5 <- read.table("on_100_5/profits.dat")$V1
#op_100_5 <- read.table("op_100_5/profits.dat")$V1
#op_100_5 <- read.table("op_partial_100_5/profits.dat")$V1
op_100_5 <- read.table("op_partial_fac5_100_5/profits.dat")$V1
ov_100_5 <- read.table("ov_100_5/profits.dat")$V1
ut_100_5 <- read.table("ut_100_5/profits.dat")$V1
# Testes de hipoteses
print("100 us 5%: hi == on")
wilcox.test(ut_100_5, on_100_5, paired=TRUE, var.equal=FALSE)
print("hi > on")
wilcox.test(ut_100_5, on_100_5, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == ov")
wilcox.test(ut_100_5, ov_100_5, paired=TRUE, var.equal=FALSE)
print("hi > ov")
wilcox.test(ut_100_5, ov_100_5, paired=TRUE, var.equal=FALSE, alternative="less")
print("ov == on")
wilcox.test(on_100_5, ov_100_5, paired=TRUE, var.equal=FALSE)
print("on > ov")
wilcox.test(on_100_5, ov_100_5, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == op")
wilcox.test(ut_100_5, op_100_5, paired=TRUE, var.equal=FALSE)
print("hi > op")
wilcox.test(ut_100_5, op_100_5, paired=TRUE, var.equal=FALSE, alternative="less")
print("op == on")
wilcox.test(op_100_5, on_100_5, paired=TRUE, var.equal=FALSE)
print("op > on")
wilcox.test(op_100_5, on_100_5, paired=TRUE, var.equal=FALSE, alternative="less")
on_100_15 <- read.table("on_100_15/profits.dat")$V1
#op_100_15 <- read.table("op_100_15/profits.dat")$V1
#op_100_15 <- read.table("op_partial_100_15/profits.dat")$V1
op_100_15 <- read.table("op_partial_fac5_100_15/profits.dat")$V1
ov_100_15 <- read.table("ov_100_15/profits.dat")$V1
ut_100_15 <- read.table("ut_100_15/profits.dat")$V1
# Testes de hipoteses
print("100 us 15%: hi == on")
wilcox.test(ut_100_15, on_100_15, paired=TRUE, var.equal=FALSE)
print("hi > on")
wilcox.test(ut_100_15, on_100_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == ov")
wilcox.test(ut_100_15, ov_100_15, paired=TRUE, var.equal=FALSE)
print("hi > ov")
wilcox.test(ut_100_15, ov_100_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("ov == on")
wilcox.test(on_100_15, ov_100_15, paired=TRUE, var.equal=FALSE)
print("on > ov")
wilcox.test(on_100_15, ov_100_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == op")
wilcox.test(ut_100_15, op_100_15, paired=TRUE, var.equal=FALSE)
print("hi > op")
wilcox.test(ut_100_15, op_100_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("op == on")
wilcox.test(op_100_15, on_100_15, paired=TRUE, var.equal=FALSE)
print("op > on")
wilcox.test(op_100_15, on_100_15, paired=TRUE, var.equal=FALSE, alternative="less")
on_10_10 <- read.table("on_10_10/profits.dat")$V1
#op_10_10 <- read.table("op_10_10/profits.dat")$V1
#op_10_10 <- read.table("op_partial_10_10/profits.dat")$V1
op_10_10 <- read.table("op_partial_fac5_10_10/profits.dat")$V1
ov_10_10 <- read.table("ov_10_10/profits.dat")$V1
ut_10_10 <- read.table("ut_10_10/profits.dat")$V1
# Testes de hipoteses
print("10 us 10%: hi == on")
wilcox.test(ut_10_10, on_10_10, paired=TRUE, var.equal=FALSE)
print("hi > on")
wilcox.test(ut_10_10, on_10_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == ov")
wilcox.test(ut_10_10, ov_10_10, paired=TRUE, var.equal=FALSE)
print("hi > ov")
wilcox.test(ut_10_10, ov_10_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("ov == on")
wilcox.test(on_10_10, ov_10_10, paired=TRUE, var.equal=FALSE)
print("on > ov")
wilcox.test(on_10_10, ov_10_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == op")
wilcox.test(ut_10_10, op_10_10, paired=TRUE, var.equal=FALSE)
print("hi > op")
wilcox.test(ut_10_10, op_10_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("op == on")
wilcox.test(op_10_10, on_10_10, paired=TRUE, var.equal=FALSE)
print("op > on")
wilcox.test(op_10_10, on_10_10, paired=TRUE, var.equal=FALSE, alternative="less")
on_10_5 <- read.table("on_10_5/profits.dat")$V1
#op_10_5 <- read.table("op_10_5/profits.dat")$V1
#op_10_5 <- read.table("op_partial_10_5/profits.dat")$V1
op_10_5 <- read.table("op_partial_fac5_10_5/profits.dat")$V1
ov_10_5 <- read.table("ov_10_5/profits.dat")$V1
ut_10_5 <- read.table("ut_10_5/profits.dat")$V1
# Testes de hipoteses
print("10 us 5%: hi == on")
wilcox.test(ut_10_5, on_10_5, paired=TRUE, var.equal=FALSE)
print("hi > on")
wilcox.test(ut_10_5, on_10_5, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == ov")
wilcox.test(ut_10_5, ov_10_5, paired=TRUE, var.equal=FALSE)
print("hi > ov")
wilcox.test(ut_10_5, ov_10_5, paired=TRUE, var.equal=FALSE, alternative="less")
print("ov == on")
wilcox.test(on_10_5, ov_10_5, paired=TRUE, var.equal=FALSE)
print("on > ov")
wilcox.test(on_10_5, ov_10_5, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == op")
wilcox.test(ut_10_5, op_10_5, paired=TRUE, var.equal=FALSE)
print("hi > op")
wilcox.test(ut_10_5, op_10_5, paired=TRUE, var.equal=FALSE, alternative="less")
print("op == on")
wilcox.test(op_10_5, on_10_5, paired=TRUE, var.equal=FALSE)
print("op > on")
wilcox.test(op_10_5, on_10_5, paired=TRUE, var.equal=FALSE, alternative="less")
on_10_15 <- read.table("on_10_15/profits.dat")$V1
#op_10_15 <- read.table("op_10_15/profits.dat")$V1
#op_10_15 <- read.table("op_partial_10_15/profits.dat")$V1
op_10_15 <- read.table("op_partial_fac5_10_15/profits.dat")$V1
ov_10_15 <- read.table("ov_10_15/profits.dat")$V1
ut_10_15 <- read.table("ut_10_15/profits.dat")$V1
# Testes de hipoteses
print("10 us 15%: hi == on")
wilcox.test(ut_10_15, on_10_15, paired=TRUE, var.equal=FALSE)
print("hi > on")
wilcox.test(ut_10_15, on_10_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == ov")
wilcox.test(ut_10_15, ov_10_15, paired=TRUE, var.equal=FALSE)
print("hi > ov")
wilcox.test(ut_10_15, ov_10_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("ov == on")
wilcox.test(on_10_15, ov_10_15, paired=TRUE, var.equal=FALSE)
print("on > ov")
wilcox.test(on_10_15, ov_10_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == op")
wilcox.test(ut_10_15, op_10_15, paired=TRUE, var.equal=FALSE)
print("hi > op")
wilcox.test(ut_10_15, op_10_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("op == on")
wilcox.test(op_10_15, on_10_15, paired=TRUE, var.equal=FALSE)
print("op > on")
wilcox.test(op_10_15, on_10_15, paired=TRUE, var.equal=FALSE, alternative="less")
on_50_10 <- read.table("on_50_10/profits.dat")$V1
#op_50_10 <- read.table("op_50_10/profits.dat")$V1
#op_50_10 <- read.table("op_partial_50_10/profits.dat")$V1
op_50_10 <- read.table("op_partial_fac5_50_10/profits.dat")$V1
ov_50_10 <- read.table("ov_50_10/profits.dat")$V1
ut_50_10 <- read.table("ut_50_10/profits.dat")$V1
# Testes de hipoteses
print("50 us 10%: hi == on")
wilcox.test(ut_50_10, on_50_10, paired=TRUE, var.equal=FALSE)
print("hi > on")
wilcox.test(ut_50_10, on_50_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == ov")
wilcox.test(ut_50_10, ov_50_10, paired=TRUE, var.equal=FALSE)
print("hi > ov")
wilcox.test(ut_50_10, ov_50_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("ov == on")
wilcox.test(on_50_10, ov_50_10, paired=TRUE, var.equal=FALSE)
print("on > ov")
wilcox.test(on_50_10, ov_50_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == op")
wilcox.test(ut_50_10, op_50_10, paired=TRUE, var.equal=FALSE)
print("hi > op")
wilcox.test(ut_50_10, op_50_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("op == on")
wilcox.test(op_50_10, on_50_10, paired=TRUE, var.equal=FALSE)
print("op > on")
wilcox.test(op_50_10, on_50_10, paired=TRUE, var.equal=FALSE, alternative="less")
on_50_5 <- read.table("on_50_5/profits.dat")$V1
#op_50_5 <- read.table("op_50_5/profits.dat")$V1
#op_50_5 <- read.table("op_partial_50_5/profits.dat")$V1
op_50_5 <- read.table("op_partial_fac5_50_5/profits.dat")$V1
ov_50_5 <- read.table("ov_50_5/profits.dat")$V1
ut_50_5 <- read.table("ut_50_5/profits.dat")$V1
# Testes de hipoteses
print("50 us 5%: hi == on")
wilcox.test(ut_50_5, on_50_5, paired=TRUE, var.equal=FALSE)
print("hi > on")
wilcox.test(ut_50_5, on_50_5, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == ov")
wilcox.test(ut_50_5, ov_50_5, paired=TRUE, var.equal=FALSE)
print("hi > ov")
wilcox.test(ut_50_5, ov_50_5, paired=TRUE, var.equal=FALSE, alternative="less")
print("ov == on")
wilcox.test(on_50_5, ov_50_5, paired=TRUE, var.equal=FALSE)
print("on > ov")
wilcox.test(on_50_5, ov_50_5, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == op")
wilcox.test(ut_50_5, op_50_5, paired=TRUE, var.equal=FALSE)
print("hi > op")
wilcox.test(ut_50_5, op_50_5, paired=TRUE, var.equal=FALSE, alternative="less")
print("op == on")
wilcox.test(op_50_5, on_50_5, paired=TRUE, var.equal=FALSE)
print("op > on")
wilcox.test(op_50_5, on_50_5, paired=TRUE, var.equal=FALSE, alternative="less")
on_50_15 <- read.table("on_50_15/profits.dat")$V1
#op_50_15 <- read.table("op_50_15/profits.dat")$V1
#op_50_15 <- read.table("op_partial_50_15/profits.dat")$V1
op_50_15 <- read.table("op_partial_fac5_50_15/profits.dat")$V1
ov_50_15 <- read.table("ov_50_15/profits.dat")$V1
ut_50_15 <- read.table("ut_50_15/profits.dat")$V1
# Testes de hipoteses
print("50 us 15%: hi == on")
wilcox.test(ut_50_15, on_50_15, paired=TRUE, var.equal=FALSE)
print("hi > on")
wilcox.test(ut_50_15, on_50_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == ov")
wilcox.test(ut_50_10, ov_50_15, paired=TRUE, var.equal=FALSE)
print("hi > ov")
wilcox.test(ut_50_10, ov_50_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("ov == on")
wilcox.test(on_50_15, ov_50_15, paired=TRUE, var.equal=FALSE)
print("on > ov")
wilcox.test(on_50_15, ov_50_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == op")
wilcox.test(ut_50_15, op_50_15, paired=TRUE, var.equal=FALSE)
print("hi > op")
wilcox.test(ut_50_15, op_50_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("op == on")
wilcox.test(op_50_15, on_50_15, paired=TRUE, var.equal=FALSE)
print("op > on")
wilcox.test(op_50_15, on_50_15, paired=TRUE, var.equal=FALSE, alternative="less")
# >>>>>> Resumos dos profits
print(">>>>>>>> LUCROS 100 10%")
paste("OP", mean(op_100_10), "[", 1.96*sd(op_100_10)/sqrt(length(op_100_10))+mean(op_100_10), ":", mean(op_100_10) - 1.96*sd(op_100_10)/sqrt(length(op_100_10)), "]")
paste("UT", mean(ut_100_10), "[", 1.96*sd(ut_100_10)/sqrt(length(ut_100_10))+mean(ut_100_10), ":", mean(ut_100_10) - 1.96*sd(ut_100_10)/sqrt(length(ut_100_10)), "]")
paste("ON", mean(on_100_10), "[", 1.96*sd(on_100_10)/sqrt(length(on_100_10))+mean(on_100_10), ":", mean(on_100_10) - 1.96*sd(on_100_10)/sqrt(length(on_100_10)), "]")
paste("OV", mean(ov_100_10), "[", 1.96*sd(ov_100_10)/sqrt(length(ov_100_10))+mean(ov_100_10), ":", mean(ov_100_10) - 1.96*sd(ov_100_10)/sqrt(length(ov_100_10)), "]")
print(">>>>>>>> LUCROS 10 10%")
paste("OP", mean(op_10_10), "[", 1.96*sd(op_10_10)/sqrt(length(op_10_10))+mean(op_10_10), ":", mean(op_10_10) - 1.96*sd(op_10_10)/sqrt(length(op_10_10)), "]")
paste("UT", mean(ut_10_10), "[", 1.96*sd(ut_10_10)/sqrt(length(ut_10_10))+mean(ut_10_10), ":", mean(ut_10_10) - 1.96*sd(ut_10_10)/sqrt(length(ut_10_10)), "]")
paste("ON", mean(on_10_10), "[", 1.96*sd(on_10_10)/sqrt(length(on_10_10))+mean(on_10_10), ":", mean(on_10_10) - 1.96*sd(on_10_10)/sqrt(length(on_10_10)), "]")
paste("OV", mean(ov_10_10), "[", 1.96*sd(ov_10_10)/sqrt(length(ov_10_10))+mean(ov_10_10), ":", mean(ov_10_10) - 1.96*sd(ov_10_10)/sqrt(length(ov_10_10)), "]")
print(">>>>>>>> LUCROS 50 10%")
paste("OP", mean(op_50_10), "[", 1.96*sd(op_50_10)/sqrt(length(op_50_10))+mean(op_50_10), ":", mean(op_50_10) - 1.96*sd(op_50_10)/sqrt(length(op_50_10)), "]")
paste("UT", mean(ut_50_10), "[", 1.96*sd(ut_50_10)/sqrt(length(ut_50_10))+mean(ut_50_10), ":", mean(ut_50_10) - 1.96*sd(ut_50_10)/sqrt(length(ut_50_10)), "]")
paste("ON", mean(on_50_10), "[", 1.96*sd(on_50_10)/sqrt(length(on_50_10))+mean(on_50_10), ":", mean(on_50_10) - 1.96*sd(on_50_10)/sqrt(length(on_50_10)), "]")
paste("OV", mean(ov_50_10), "[", 1.96*sd(ov_50_10)/sqrt(length(ov_50_10))+mean(ov_50_10), ":", mean(ov_50_10) - 1.96*sd(ov_50_10)/sqrt(length(ov_50_10)), "]")
print(">>>>>>>> LUCROS 100 15%")
paste("OP", mean(op_100_15), "[", 1.96*sd(op_100_15)/sqrt(length(op_100_15))+mean(op_100_15), ":", mean(op_100_15) - 1.96*sd(op_100_15)/sqrt(length(op_100_15)), "]")
paste("UT", mean(ut_100_15), "[", 1.96*sd(ut_100_15)/sqrt(length(ut_100_15))+mean(ut_100_15), ":", mean(ut_100_15) - 1.96*sd(ut_100_15)/sqrt(length(ut_100_15)), "]")
paste("ON", mean(on_100_15), "[", 1.96*sd(on_100_15)/sqrt(length(on_100_15))+mean(on_100_15), ":", mean(on_100_15) - 1.96*sd(on_100_15)/sqrt(length(on_100_15)), "]")
paste("OV", mean(ov_100_15), "[", 1.96*sd(ov_100_15)/sqrt(length(ov_100_15))+mean(ov_100_15), ":", mean(ov_100_15) - 1.96*sd(ov_100_15)/sqrt(length(ov_100_15)), "]")
print(">>>>>>>> LUCROS 10 15%")
paste("OP", mean(op_10_15), "[", 1.96*sd(op_10_15)/sqrt(length(op_10_15))+mean(op_10_15), ":", mean(op_10_15) - 1.96*sd(op_10_15)/sqrt(length(op_10_15)), "]")
paste("UT", mean(ut_10_15), "[", 1.96*sd(ut_10_15)/sqrt(length(ut_10_15))+mean(ut_10_15), ":", mean(ut_10_15) - 1.96*sd(ut_10_15)/sqrt(length(ut_10_15)), "]")
paste("ON", mean(on_10_15), "[", 1.96*sd(on_10_15)/sqrt(length(on_10_15))+mean(on_10_15), ":", mean(on_10_15) - 1.96*sd(on_10_15)/sqrt(length(on_10_15)), "]")
paste("OV", mean(ov_10_15), "[", 1.96*sd(ov_10_15)/sqrt(length(ov_10_15))+mean(ov_10_15), ":", mean(ov_10_15) - 1.96*sd(ov_10_15)/sqrt(length(ov_10_15)), "]")
print(">>>>>>>> LUCROS 50 15%")
paste("OP", mean(op_50_15), "[", 1.96*sd(op_50_15)/sqrt(length(op_50_15))+mean(op_50_15), ":", mean(op_50_15) - 1.96*sd(op_50_15)/sqrt(length(op_50_15)), "]")
paste("UT", mean(ut_50_15), "[", 1.96*sd(ut_50_15)/sqrt(length(ut_50_15))+mean(ut_50_15), ":", mean(ut_50_15) - 1.96*sd(ut_50_15)/sqrt(length(ut_50_15)), "]")
paste("ON", mean(on_50_15), "[", 1.96*sd(on_50_15)/sqrt(length(on_50_15))+mean(on_50_15), ":", mean(on_50_15) - 1.96*sd(on_50_15)/sqrt(length(on_50_15)), "]")
paste("OV", mean(ov_50_15), "[", 1.96*sd(ov_50_15)/sqrt(length(ov_50_15))+mean(ov_50_15), ":", mean(ov_50_15) - 1.96*sd(ov_50_15)/sqrt(length(ov_50_15)), "]")
print(">>>>>>>> LUCROS 100 5%")
paste("OP", mean(op_100_5), "[", 1.96*sd(op_100_5)/sqrt(length(op_100_5))+mean(op_100_5), ":", mean(op_100_5) - 1.96*sd(op_100_5)/sqrt(length(op_100_5)), "]")
paste("UT", mean(ut_100_5), "[", 1.96*sd(ut_100_5)/sqrt(length(ut_100_5))+mean(ut_100_5), ":", mean(ut_100_5) - 1.96*sd(ut_100_5)/sqrt(length(ut_100_5)), "]")
paste("ON", mean(on_100_5), "[", 1.96*sd(on_100_5)/sqrt(length(on_100_5))+mean(on_100_5), ":", mean(on_100_5) - 1.96*sd(on_100_5)/sqrt(length(on_100_5)), "]")
paste("OV", mean(ov_100_5), "[", 1.96*sd(ov_100_5)/sqrt(length(ov_100_5))+mean(ov_100_5), ":", mean(ov_100_5) - 1.96*sd(ov_100_5)/sqrt(length(ov_100_5)), "]")
print(">>>>>>>> LUCROS 10 5%")
paste("OP", mean(op_10_5), "[", 1.96*sd(op_10_5)/sqrt(length(op_10_5))+mean(op_10_5), ":", mean(op_10_5) - 1.96*sd(op_10_5)/sqrt(length(op_10_5)), "]")
paste("UT", mean(ut_10_5), "[", 1.96*sd(ut_10_5)/sqrt(length(ut_10_5))+mean(ut_10_5), ":", mean(ut_10_5) - 1.96*sd(ut_10_5)/sqrt(length(ut_10_5)), "]")
paste("ON", mean(on_10_5), "[", 1.96*sd(on_10_5)/sqrt(length(on_10_5))+mean(on_10_5), ":", mean(on_10_5) - 1.96*sd(on_10_5)/sqrt(length(on_10_5)), "]")
paste("OV", mean(ov_10_5), "[", 1.96*sd(ov_10_5)/sqrt(length(ov_10_5))+mean(ov_10_5), ":", mean(ov_10_5) - 1.96*sd(ov_10_5)/sqrt(length(ov_10_5)), "]")
print(">>>>>>>> LUCROS 50 5%")
paste("OP", mean(op_50_5), "[", 1.96*sd(op_50_5)/sqrt(length(op_50_5))+mean(op_50_5), ":", mean(op_50_5) - 1.96*sd(op_50_5)/sqrt(length(op_50_5)), "]")
paste("UT", mean(ut_50_5), "[", 1.96*sd(ut_50_5)/sqrt(length(ut_50_5))+mean(ut_50_5), ":", mean(ut_50_5) - 1.96*sd(ut_50_5)/sqrt(length(ut_50_5)), "]")
paste("ON", mean(on_50_5), "[", 1.96*sd(on_50_5)/sqrt(length(on_50_5))+mean(on_50_5), ":", mean(on_50_5) - 1.96*sd(on_50_5)/sqrt(length(on_50_5)), "]")
paste("OV", mean(ov_50_5), "[", 1.96*sd(ov_50_5)/sqrt(length(ov_50_5))+mean(ov_50_5), ":", mean(ov_50_5) - 1.96*sd(ov_50_5)/sqrt(length(ov_50_5)), "]")
paste("Média geral lucro UT: ", mean(c(ut_50_5, ut_50_15, ut_50_10, ut_100_5, ut_100_15, ut_100_10)))
paste("Média geral lucro RF: ", mean(c(op_50_5, op_50_15, op_50_10, op_100_5, op_100_15, op_100_10)))
paste("Média geral lucro OV: ", mean(c(ov_50_5, ov_50_15, ov_50_10, ov_100_5, ov_100_15, ov_100_10)))
#>>>>>>>>> Ganhos para risco de 10%
gop_100_10 <- (op_100_10 - on_100_10) / abs(on_100_10)
gut_100_10 <- (ut_100_10 - on_100_10) / abs(on_100_10)
gov_100_10 <- (ov_100_10 - on_100_10) / abs(on_100_10)
print(">>>> GANHOS 100 10%")
paste("Ganhos hi --> on", mean(ut_100_10 - on_100_10), " [", 1.96*sd(ut_100_10 - on_100_10)/sqrt(length(ut_100_10 - on_100_10))+mean(ut_100_10 - on_100_10), ":", mean(ut_100_10 - on_100_10) - 1.96*sd(ut_100_10 - on_100_10)/sqrt(length(ut_100_10 - on_100_10)), "]", " ", mean(gut_100_10), " [", 1.96*sd(gut_100_10)/sqrt(length(gut_100_10))+mean(gut_100_10), ":", mean(gut_100_10) - 1.96*sd(gut_100_10)/sqrt(length(gut_100_10)), "]" )
print(paste(gut_100_10))
paste("Ganhos op --> on", mean(op_100_10 - on_100_10), " [", 1.96*sd(op_100_10 - on_100_10)/sqrt(length(op_100_10 - on_100_10))+mean(op_100_10 - on_100_10), ":", mean(op_100_10 - on_100_10) - 1.96*sd(op_100_10 - on_100_10)/sqrt(length(op_100_10 - on_100_10)), "]", " ", mean(gop_100_10), " [", 1.96*sd(gop_100_10)/sqrt(length(gop_100_10))+mean(gop_100_10), ":", mean(gop_100_10) - 1.96*sd(gop_100_10)/sqrt(length(gop_100_10)), "]" )
print(paste(gop_100_10))
paste("Ganhos ov --> on", mean(ov_100_10 - on_100_10), " [", 1.96*sd(ov_100_10 - on_100_10)/sqrt(length(ov_100_10 - on_100_10))+mean(ov_100_10 - on_100_10), ":", mean(ov_100_10 - on_100_10) - 1.96*sd(ov_100_10 - on_100_10)/sqrt(length(ov_100_10 - on_100_10)), "]", " ", mean(gov_100_10), " [", 1.96*sd(gov_100_10)/sqrt(length(gov_100_10))+mean(gov_100_10), ":", mean(gov_100_10) - 1.96*sd(gov_100_10)/sqrt(length(gov_100_10)), "]" )
print(paste(gov_100_10))
gop_10_10 <- (op_10_10 - on_10_10) / abs(on_10_10)
gut_10_10 <- (ut_10_10 - on_10_10) / abs(on_10_10)
gov_10_10 <- (ov_10_10 - on_10_10) / abs(on_10_10)
print(">>>> GANHOS 10 10%")
paste("Ganhos hi --> on", mean(ut_10_10 - on_10_10), " ", mean(gut_10_10), " [", 1.96*sd(ut_10_10 - on_10_10)/sqrt(length(ut_10_10 - on_10_10))+mean(ut_10_10 - on_10_10), ":", mean(ut_10_10 - on_10_10) - 1.96*sd(ut_10_10 - on_10_10)/sqrt(length(ut_10_10 - on_10_10)), "]", " [", 1.96*sd(gut_10_10)/sqrt(length(gut_10_10))+mean(gut_10_10), ":", mean(gut_10_10) - 1.96*sd(gut_10_10)/sqrt(length(gut_10_10)), "]" )
print(paste(gut_10_10))
paste("Ganhos op --> on", mean(op_10_10 - on_10_10), " [", 1.96*sd(op_10_10 - on_10_10)/sqrt(length(op_10_10 - on_10_10))+mean(op_10_10 - on_10_10), ":", mean(op_10_10 - on_10_10) - 1.96*sd(op_10_10 - on_10_10)/sqrt(length(op_10_10 - on_10_10)), "]", " ", mean(gop_10_10), " [", 1.96*sd(gop_10_10)/sqrt(length(gop_10_10))+mean(gop_10_10), ":", mean(gop_10_10) - 1.96*sd(gop_10_10)/sqrt(length(gop_10_10)), "]" )
print(paste(gop_10_10))
paste("Ganhos ov --> on", mean(ov_10_10 - on_10_10), " [", 1.96*sd(ov_10_10 - on_10_10)/sqrt(length(ov_10_10 - on_10_10))+mean(ov_10_10 - on_10_10), ":", mean(ov_10_10 - on_10_10) - 1.96*sd(ov_10_10 - on_10_10)/sqrt(length(ov_10_10 - on_10_10)), "]", " ", mean(gov_10_10), " [", 1.96*sd(gov_10_10)/sqrt(length(gov_10_10))+mean(gov_10_10), ":", mean(gov_10_10) - 1.96*sd(gov_10_10)/sqrt(length(gov_10_10)), "]" )
print(paste(gov_10_10))
gop_50_10 <- (op_50_10 - on_50_10) / abs(on_50_10)
gut_50_10 <- (ut_50_10 - on_50_10) / abs(on_50_10)
gov_50_10 <- (ov_50_10 - on_50_10) / abs(on_50_10)
print(">>>> GANHOS 50 10%")
paste("Ganhos hi --> on", mean(ut_50_10 - on_50_10), " [", 1.96*sd(ut_50_10 - on_50_10)/sqrt(length(ut_50_10 - on_50_10))+mean(ut_50_10 - on_50_10), ":", mean(ut_50_10 - on_50_10) - 1.96*sd(ut_50_10 - on_50_10)/sqrt(length(ut_50_10 - on_50_10)), "]", " ", mean(gut_50_10), " [", 1.96*sd(gut_50_10)/sqrt(length(gut_50_10))+mean(gut_50_10), ":", mean(gut_50_10) - 1.96*sd(gut_50_10)/sqrt(length(gut_50_10)), "]" )
print(paste(gut_50_10))
paste("Ganhos op --> on", mean(op_50_10 - on_50_10), " [", 1.96*sd(op_50_10 - on_50_10)/sqrt(length(op_50_10 - on_50_10))+mean(op_50_10 - on_50_10), ":", mean(op_50_10 - on_50_10) - 1.96*sd(op_50_10 - on_50_10)/sqrt(length(op_50_10 - on_50_10)), "]", " ", mean(gop_50_10), " [", 1.96*sd(gop_50_10)/sqrt(length(gop_50_10))+mean(gop_50_10), ":", mean(gop_50_10) - 1.96*sd(gop_50_10)/sqrt(length(gop_50_10)), "]" )
print(paste(gop_50_10))
paste("Ganhos ov --> on", mean(ov_50_10 - on_50_10), " [", 1.96*sd(ov_50_10 - on_50_10)/sqrt(length(ov_50_10 - on_50_10))+mean(ov_50_10 - on_50_10), ":", mean(ov_50_10 - on_50_10) - 1.96*sd(ov_50_10 - on_50_10)/sqrt(length(ov_50_10 - on_50_10)), "]", " ", mean(gov_50_10), " [", 1.96*sd(gov_50_10)/sqrt(length(gov_50_10))+mean(gov_50_10), ":", mean(gov_50_10) - 1.96*sd(gov_50_10)/sqrt(length(gov_50_10)), "]" )
print(paste(gov_50_10))
#Ganhos para risco de 15%
gop_100_15 <- (op_100_15 - on_100_15) / abs(on_100_15)
gut_100_15 <- (ut_100_15 - on_100_15) / abs(on_100_15)
gov_100_15 <- (ov_100_15 - on_100_15) / abs(on_100_15)
print(">>>> GANHOS 100 15%")
paste("Ganhos hi --> on", mean(ut_100_15 - on_100_15), " [", 1.96*sd(ut_100_15 - on_100_15)/sqrt(length(ut_100_15 - on_100_15))+mean(ut_100_15 - on_100_15), ":", mean(ut_100_15 - on_100_15) - 1.96*sd(ut_100_15 - on_100_15)/sqrt(length(ut_100_15 - on_100_15)), "]", " ", mean(gut_100_15), " [", 1.96*sd(gut_100_15)/sqrt(length(gut_100_15))+mean(gut_100_15), ":", mean(gut_100_15) - 1.96*sd(gut_100_15)/sqrt(length(gut_100_15)), "]" )
print(paste(gut_100_15))
paste("Ganhos op --> on", mean(op_100_15 - on_100_15), " [", 1.96*sd(op_100_15 - on_100_15)/sqrt(length(op_100_15 - on_100_15))+mean(op_100_15 - on_100_15), ":", mean(op_100_15 - on_100_15) - 1.96*sd(op_100_15 - on_100_15)/sqrt(length(op_100_15 - on_100_15)), "]", " ", mean(gop_100_15), " [", 1.96*sd(gop_100_15)/sqrt(length(gop_100_15))+mean(gop_100_15), ":", mean(gop_100_15) - 1.96*sd(gop_100_15)/sqrt(length(gop_100_15)), "]" )
print(paste(gop_100_15))
paste("Ganhos ov --> on", mean(ov_100_15 - on_100_15), " [", 1.96*sd(ov_100_15 - on_100_15)/sqrt(length(ov_100_15 - on_100_15))+mean(ov_100_15 - on_100_15), ":", mean(ov_100_15 - on_100_15) - 1.96*sd(ov_100_15 - on_100_15)/sqrt(length(ov_100_15 - on_100_15)), "]", " ", mean(gov_100_15), " [", 1.96*sd(gov_100_15)/sqrt(length(gov_100_15))+mean(gov_100_15), ":", mean(gov_100_15) - 1.96*sd(gov_100_15)/sqrt(length(gov_100_15)), "]" )
print(paste(gov_100_15))
gop_10_15 <- (op_10_15 - on_10_15) / abs(on_10_15)
gut_10_15 <- (ut_10_15 - on_10_15) / abs(on_10_15)
gov_10_15 <- (ov_10_15 - on_10_15) / abs(on_10_15)
print(">>>> GANHOS 10 15%")
paste("Ganhos hi --> on", mean(ut_10_15 - on_10_15), " [", 1.96*sd(ut_10_15 - on_10_15)/sqrt(length(ut_10_15 - on_10_15))+mean(ut_10_15 - on_10_15), ":", mean(ut_10_15 - on_10_15) - 1.96*sd(ut_10_15 - on_10_15)/sqrt(length(ut_10_15 - on_10_15)), "]", " ", mean(gut_10_15), " [", 1.96*sd(gut_10_15)/sqrt(length(gut_10_15))+mean(gut_10_15), ":", mean(gut_10_15) - 1.96*sd(gut_10_15)/sqrt(length(gut_10_15)), "]" )
print(paste(gut_10_15))
paste("Ganhos op --> on", mean(op_10_15 - on_10_15), " [", 1.96*sd(op_10_15 - on_10_15)/sqrt(length(op_10_15 - on_10_15))+mean(op_10_15 - on_10_15), ":", mean(op_10_15 - on_10_15) - 1.96*sd(op_10_15 - on_10_15)/sqrt(length(op_10_15 - on_10_15)), "]", " ", mean(gop_10_15), " [", 1.96*sd(gop_10_15)/sqrt(length(gop_10_15))+mean(gop_10_15), ":", mean(gop_10_15) - 1.96*sd(gop_10_15)/sqrt(length(gop_10_15)), "]" )
print(paste(gop_10_15))
paste("Ganhos ov --> on", mean(ov_10_15 - on_10_15), " [", 1.96*sd(ov_10_15 - on_10_15)/sqrt(length(ov_10_15 - on_10_15))+mean(ov_10_15 - on_10_15), ":", mean(ov_10_15 - on_10_15) - 1.96*sd(ov_10_15 - on_10_15)/sqrt(length(ov_10_15 - on_10_15)), "]", " ", mean(gov_10_15), " [", 1.96*sd(gov_10_15)/sqrt(length(gov_10_15))+mean(gov_10_15), ":", mean(gov_10_15) - 1.96*sd(gov_10_15)/sqrt(length(gov_10_15)), "]" )
print(paste(gov_10_15))
gop_50_15 <- (op_50_15 - on_50_15) / abs(on_50_15)
gut_50_15 <- (ut_50_15 - on_50_15) / abs(on_50_15)
gov_50_15 <- (ov_50_15 - on_50_15) / abs(on_50_15)
print(">>>> GANHOS 50 15%")
paste("Ganhos hi --> on", mean(ut_50_15 - on_50_15), " [", 1.96*sd(ut_50_15 - on_50_15)/sqrt(length(ut_50_15 - on_50_15))+mean(ut_50_15 - on_50_15), ":", mean(ut_50_15 - on_50_15) - 1.96*sd(ut_50_15 - on_50_15)/sqrt(length(ut_50_15 - on_50_15)), "]", " ", mean(gut_50_15), " [", 1.96*sd(gut_50_15)/sqrt(length(gut_50_15))+mean(gut_50_15), ":", mean(gut_50_15) - 1.96*sd(gut_50_15)/sqrt(length(gut_50_15)), "]" )
print(paste(gut_50_15))
paste("Ganhos op --> on", mean(op_50_15 - on_50_15), " [", 1.96*sd(op_50_15 - on_50_15)/sqrt(length(op_50_15 - on_50_15))+mean(op_50_15 - on_50_15), ":", mean(op_50_15 - on_50_15) - 1.96*sd(op_50_15 - on_50_15)/sqrt(length(op_50_15 - on_50_15)), "]", " ", mean(gop_50_15), " [", 1.96*sd(gop_50_15)/sqrt(length(gop_50_15))+mean(gop_50_15), ":", mean(gop_50_15) - 1.96*sd(gop_50_15)/sqrt(length(gop_50_15)), "]" )
print(paste(gop_50_15))
paste("Ganhos ov --> on", mean(ov_50_15 - on_50_15), " [", 1.96*sd(ov_50_15 - on_50_15)/sqrt(length(ov_50_15 - on_50_15))+mean(ov_50_15 - on_50_15), ":", mean(ov_50_15 - on_50_15) - 1.96*sd(ov_50_15 - on_50_15)/sqrt(length(ov_50_15 - on_50_15)), "]", " ", mean(gov_50_15), " [", 1.96*sd(gov_50_15)/sqrt(length(gov_50_15))+mean(gov_50_15), ":", mean(gov_50_15) - 1.96*sd(gov_50_15)/sqrt(length(gov_50_15)), "]" )
print(paste(gov_50_15))
#Ganhos para risco de 5%
gop_100_5 <- (op_100_5 - on_100_5) / abs(on_100_5)
gut_100_5 <- (ut_100_5 - on_100_5) / abs(on_100_5)
gov_100_5 <- (ov_100_5 - on_100_5) / abs(on_100_5)
print(">>>> GANHOS 100 5%")
paste("Ganhos hi --> on", mean(ut_100_5 - on_100_5), " [", 1.96*sd(ut_100_5 - on_100_5)/sqrt(length(ut_100_5 - on_100_5))+mean(ut_100_5 - on_100_5), ":", mean(ut_100_5 - on_100_5) - 1.96*sd(ut_100_5 - on_100_5)/sqrt(length(ut_100_5 - on_100_5)), "]", " ", mean(gut_100_5), " [", 1.96*sd(gut_100_5)/sqrt(length(gut_100_5))+mean(gut_100_5), ":", mean(gut_100_5) - 1.96*sd(gut_100_5)/sqrt(length(gut_100_5)), "]" )
print(paste(gut_100_5))
paste("Ganhos op --> on", mean(op_100_5 - on_100_5), " [", 1.96*sd(op_100_5 - on_100_5)/sqrt(length(op_100_5 - on_100_5))+mean(op_100_5 - on_100_5), ":", mean(op_100_5 - on_100_5) - 1.96*sd(op_100_5 - on_100_5)/sqrt(length(op_100_5 - on_100_5)), "]", " ", mean(gop_100_5), " [", 1.96*sd(gop_100_5)/sqrt(length(gop_100_5))+mean(gop_100_5), ":", mean(gop_100_5) - 1.96*sd(gop_100_5)/sqrt(length(gop_100_5)), "]" )
print(paste(gop_100_5))
paste("Ganhos ov --> on", mean(ov_100_5 - on_100_5), " [", 1.96*sd(ov_100_5 - on_100_5)/sqrt(length(ov_100_5 - on_100_5))+mean(ov_100_5 - on_100_5), ":", mean(ov_100_5 - on_100_5) - 1.96*sd(ov_100_5 - on_100_5)/sqrt(length(ov_100_5 - on_100_5)), "]", " ", mean(gov_100_5), " [", 1.96*sd(gov_100_5)/sqrt(length(gov_100_5))+mean(gov_100_5), ":", mean(gov_100_5) - 1.96*sd(gov_100_5)/sqrt(length(gov_100_5)), "]" )
print(paste(gov_100_5))
gop_10_5 <- (op_10_5 - on_10_5) / abs(on_10_5)
gut_10_5 <- (ut_10_5 - on_10_5) / abs(on_10_5)
gov_10_5 <- (ov_10_5 - on_10_5) / abs(on_10_5)
print(">>>> GANHOS 10 5%")
paste("Ganhos hi --> on", mean(ut_10_5 - on_10_5), " [", 1.96*sd(ut_10_5 - on_10_5)/sqrt(length(ut_10_5 - on_10_5))+mean(ut_10_5 - on_10_5), ":", mean(ut_10_5 - on_10_5) - 1.96*sd(ut_10_5 - on_10_5)/sqrt(length(ut_10_5 - on_10_5)), "]", " ", mean(gut_10_5), " [", 1.96*sd(gut_10_5)/sqrt(length(gut_10_5))+mean(gut_10_5), ":", mean(gut_10_5) - 1.96*sd(gut_10_5)/sqrt(length(gut_10_5)), "]" )
print(paste(gut_10_5))
paste("Ganhos op --> on", mean(op_10_5 - on_10_5), " [", 1.96*sd(op_10_5 - on_10_5)/sqrt(length(op_10_5 - on_10_5))+mean(op_10_5 - on_10_5), ":", mean(op_10_5 - on_10_5) - 1.96*sd(op_10_5 - on_10_5)/sqrt(length(op_10_5 - on_10_5)), "]", " ", mean(gop_10_5), " [", 1.96*sd(gop_10_5)/sqrt(length(gop_10_5))+mean(gop_10_5), ":", mean(gop_10_5) - 1.96*sd(gop_10_5)/sqrt(length(gop_10_5)), "]" )
print(paste(gop_10_5))
paste("Ganhos ov --> on", mean(ov_10_5 - on_10_5), " [", 1.96*sd(ov_10_5 - on_10_5)/sqrt(length(ov_10_5 - on_10_5))+mean(ov_10_5 - on_10_5), ":", mean(ov_10_5 - on_10_5) - 1.96*sd(ov_10_5 - on_10_5)/sqrt(length(ov_10_5 - on_10_5)), "]", " ", mean(gov_10_5), " [", 1.96*sd(gov_10_5)/sqrt(length(gov_10_5))+mean(gov_10_5), ":", mean(gov_10_5) - 1.96*sd(gov_10_5)/sqrt(length(gov_10_5)), "]" )
print(paste(gov_10_5))
gop_50_5 <- (op_50_5 - on_50_5) / abs(on_50_5)
gut_50_5 <- (ut_50_5 - on_50_5) / abs(on_50_5)
gov_50_5 <- (ov_50_5 - on_50_5) / abs(on_50_5)
print(">>>> GANHOS 50 5%")
paste("Ganhos hi --> on", mean(ut_50_5 - on_50_5), " [", 1.96 * sd(ut_50_5 - on_50_5)/sqrt(length(ut_50_5 - on_50_5))+mean(ut_50_5 - on_50_5), ":", mean(ut_50_5 - on_50_5) - 1.96*sd(ut_50_5 - on_50_5)/sqrt(length(ut_50_5 - on_50_5)), "]", " ", mean(gut_50_5), " [", 1.96*sd(gut_50_5)/sqrt(length(gut_50_5))+mean(gut_50_5), ":", mean(gut_50_5) - 1.96*sd(gut_50_5)/sqrt(length(gut_50_5)), "]" )
print(paste(gut_50_5))
paste("Ganhos op --> on", mean(op_50_5 - on_50_5), " [", 1.96 * sd(op_50_5 - on_50_5)/sqrt(length(op_50_5 - on_50_5))+mean(op_50_5 - on_50_5), ":", mean(op_50_5 - on_50_5) - 1.96*sd(op_50_5 - on_50_5)/sqrt(length(op_50_5 - on_50_5)), "]", " ", mean(gop_50_5), " [", 1.96*sd(gop_50_5)/sqrt(length(gop_50_5))+mean(gop_50_5), ":", mean(gop_50_5) - 1.96*sd(gop_50_5)/sqrt(length(gop_50_5)), "]" )
print(paste(gop_50_5))
paste("Ganhos ov --> on", mean(ov_50_5 - on_50_5), " [", 1.96 * sd(ov_50_5 - on_50_5)/sqrt(length(ov_50_5 - on_50_5))+mean(ov_50_5 - on_50_5), ":", mean(ov_50_5 - on_50_5) - 1.96*sd(ov_50_5 - on_50_5)/sqrt(length(ov_50_5 - on_50_5)), "]", " ", mean(gov_50_5), " [", 1.96*sd(gov_50_5)/sqrt(length(gov_50_5))+mean(gov_50_5), ":", mean(gov_50_5) - 1.96*sd(gov_50_5)/sqrt(length(gov_50_5)), "]" )
print(paste(gov_50_5))
paste("Média geral ganho % UT: ", mean(c(gut_50_5, gut_50_15, gut_50_5, gut_100_5, gut_100_15, gut_100_10)))
paste("Média geral ganho % RF: ", mean(c(gop_50_5, gop_50_15, gop_50_5, gop_100_5, gop_100_15, gop_100_10)))
paste("Média geral ganho % OV: ", mean(c(gov_50_5, gov_50_15, gov_50_5, gov_100_5, gov_100_15, gov_100_10)))
paste("Média geral ganho $ UT: ", mean(c(ut_50_5 - on_50_5, ut_50_15- on_50_15, ut_50_5 - on_50_10, ut_100_5- on_100_5, ut_100_15- on_100_15, ut_100_10- on_100_10)))
paste("Média geral ganho $ RF: ", mean(c(op_50_5 - on_50_5, op_50_15- on_50_15, op_50_5 - on_50_10, op_100_5- on_100_5, op_100_15- on_100_15, op_100_10- on_100_10)))
paste("Média geral ganho $ OV: ", mean(c(ov_50_5 - on_50_5, ov_50_15- on_50_15, ov_50_5 - on_50_10, ov_100_5- on_100_5, ov_100_15- on_100_15, ov_100_10- on_100_10)))
# Verificando influencia do fator risco nos lucros
print("Analise do fator risco")
print("\nAnalise OP\n")
print("10 us OP 5% == OP 10%")
wilcox.test(op_10_5, op_10_10, paired=TRUE, var.equal=FALSE)
print("10 us OP 5% > OP 10%")
wilcox.test(op_10_5, op_10_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("10 us OP 10% == OP 15%")
wilcox.test(op_10_10, op_10_15, paired=TRUE, var.equal=FALSE)
print("10 us OP 10% > OP 15%")
wilcox.test(op_10_10, op_10_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("10 us OP 5% == OP 15%")
wilcox.test(op_10_5, op_10_15, paired=TRUE, var.equal=FALSE)
print("10 us OP 5% > OP 15%")
wilcox.test(op_10_5, op_10_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("50 us OP 5% == OP 10%")
wilcox.test(op_50_5, op_50_10, paired=TRUE, var.equal=FALSE)
print("50 us OP 5% > OP 10%")
wilcox.test(op_50_5, op_50_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("50 us OP 10% == OP 15%")
wilcox.test(op_50_10, op_50_15, paired=TRUE, var.equal=FALSE)
print("50 us OP 10% > OP 15%")
wilcox.test(op_50_10, op_50_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("50 us OP 5% == OP 15%")
wilcox.test(op_50_5, op_50_15, paired=TRUE, var.equal=FALSE)
print("50 us OP 5% > OP 15%")
wilcox.test(op_50_5, op_50_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("100 us OP 5% == OP 10%")
wilcox.test(op_100_5, op_100_10, paired=TRUE, var.equal=FALSE)
print("100 us OP 5% > OP 10%")
wilcox.test(op_100_5, op_100_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("100 us OP 10% == OP 15%")
wilcox.test(op_100_10, op_100_15, paired=TRUE, var.equal=FALSE)
print("100 us OP 10% > OP 15%")
wilcox.test(op_100_10, op_100_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("100 us OP 5% == OP 15%")
wilcox.test(op_100_5, op_100_15, paired=TRUE, var.equal=FALSE)
print("100 us OP 5% > OP 15%")
wilcox.test(op_100_5, op_100_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("\nAnalise UT\n")
print("10 us UT 5% == UT 10%")
wilcox.test(ut_10_5, ut_10_10, paired=TRUE, var.equal=FALSE)
print("10 us UT 5% > UT 10%")
wilcox.test(ut_10_5, ut_10_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("10 us UT 10% == UT 15%")
wilcox.test(ut_10_10, ut_10_15, paired=TRUE, var.equal=FALSE)
print("10 us UT 10% > UT 15%")
wilcox.test(ut_10_10, ut_10_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("10 us UT 5% == UT 15%")
wilcox.test(ut_10_5, ut_10_15, paired=TRUE, var.equal=FALSE)
print("10 us UT 5% > UT 15%")
wilcox.test(ut_10_5, ut_10_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("50 us UT 5% == UT 10%")
wilcox.test(ut_50_5, ut_50_10, paired=TRUE, var.equal=FALSE)
print("50 us UT 5% > UT 10%")
wilcox.test(ut_50_5, ut_50_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("50 us UT 10% == UT 15%")
wilcox.test(ut_50_10, ut_50_15, paired=TRUE, var.equal=FALSE)
print("50 us UT 10% > UT 15%")
wilcox.test(ut_50_10, ut_50_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("50 us UT 5% == UT 15%")
wilcox.test(ut_50_5, ut_50_15, paired=TRUE, var.equal=FALSE)
print("50 us UT 5% > UT 15%")
wilcox.test(ut_50_5, ut_50_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("100 us UT 5% == UT 10%")
wilcox.test(ut_100_5, ut_100_10, paired=TRUE, var.equal=FALSE)
print("100 us UT 5% > UT 10%")
wilcox.test(ut_100_5, ut_100_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("100 us UT 10% == UT 15%")
wilcox.test(ut_100_10, ut_100_15, paired=TRUE, var.equal=FALSE)
print("100 us UT 10% > UT 15%")
wilcox.test(ut_100_10, ut_100_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("100 us UT 5% == UT 15%")
wilcox.test(ut_100_5, ut_100_15, paired=TRUE, var.equal=FALSE)
print("100 us UT 5% > UT 15%")
wilcox.test(ut_100_5, ut_100_15, paired=TRUE, var.equal=FALSE, alternative="less")
# Verificando influencia do fator risco nos ganhos
print("Analise do fator risco nos ganhos")
print("\nAnalise ganho OP\n")
print("10 us OP 5% == OP 10%")
wilcox.test(gop_10_5, gop_10_10, paired=TRUE, var.equal=FALSE)
print("10 us OP 5% > OP 10%")
wilcox.test(gop_10_5, gop_10_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("10 us OP 10% == OP 15%")
wilcox.test(gop_10_10, gop_10_15, paired=TRUE, var.equal=FALSE)
print("10 us OP 10% > OP 15%")
wilcox.test(gop_10_10, gop_10_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("10 us OP 5% == OP 15%")
wilcox.test(gop_10_5, gop_10_15, paired=TRUE, var.equal=FALSE)
print("10 us OP 5% > OP 15%")
wilcox.test(gop_10_5, gop_10_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("50 us OP 5% == OP 10%")
wilcox.test(gop_50_5, gop_50_10, paired=TRUE, var.equal=FALSE)
print("50 us OP 5% > OP 10%")
wilcox.test(gop_50_5, gop_50_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("50 us OP 10% == OP 15%")
wilcox.test(gop_50_10, gop_50_15, paired=TRUE, var.equal=FALSE)
print("50 us OP 10% > OP 15%")
wilcox.test(gop_50_10, gop_50_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("50 us OP 5% == OP 15%")
wilcox.test(gop_50_5, gop_50_15, paired=TRUE, var.equal=FALSE)
print("50 us OP 5% > OP 15%")
wilcox.test(gop_50_5, gop_50_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("100 us OP 5% == OP 10%")
wilcox.test(gop_100_5, gop_100_10, paired=TRUE, var.equal=FALSE)
print("100 us OP 5% > OP 10%")
wilcox.test(gop_100_5, gop_100_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("100 us OP 10% == OP 15%")
wilcox.test(gop_100_10, gop_100_15, paired=TRUE, var.equal=FALSE)
print("100 us OP 10% > OP 15%")
wilcox.test(gop_100_10, gop_100_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("100 us OP 5% == OP 15%")
wilcox.test(gop_100_5, gop_100_15, paired=TRUE, var.equal=FALSE)
print("100 us OP 5% > OP 15%")
wilcox.test(gop_100_5, gop_100_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("\nAnalise UT\n")
print("10 us UT 5% == UT 10%")
wilcox.test(gut_10_5, gut_10_10, paired=TRUE, var.equal=FALSE)
print("10 us UT 5% > UT 10%")
wilcox.test(gut_10_5, gut_10_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("10 us UT 10% == UT 15%")
wilcox.test(gut_10_10, gut_10_15, paired=TRUE, var.equal=FALSE)
print("10 us UT 10% > UT 15%")
wilcox.test(gut_10_10, gut_10_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("10 us UT 5% == UT 15%")
wilcox.test(gut_10_5, gut_10_15, paired=TRUE, var.equal=FALSE)
print("10 us UT 5% > UT 15%")
wilcox.test(gut_10_5, gut_10_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("50 us UT 5% == UT 10%")
wilcox.test(gut_50_5, gut_50_10, paired=TRUE, var.equal=FALSE)
print("50 us UT 5% > UT 10%")
wilcox.test(gut_50_5, gut_50_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("50 us UT 10% == UT 15%")
wilcox.test(gut_50_10, gut_50_15, paired=TRUE, var.equal=FALSE)
print("50 us UT 10% > UT 15%")
wilcox.test(gut_50_10, gut_50_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("50 us UT 5% == UT 15%")
wilcox.test(gut_50_5, gut_50_15, paired=TRUE, var.equal=FALSE)
print("50 us UT 5% > UT 15%")
wilcox.test(gut_50_5, gut_50_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("100 us UT 5% == UT 10%")
wilcox.test(gut_100_5, gut_100_10, paired=TRUE, var.equal=FALSE)
print("100 us UT 5% > UT 10%")
wilcox.test(gut_100_5, gut_100_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("100 us UT 10% == UT 15%")
wilcox.test(gut_100_10, gut_100_15, paired=TRUE, var.equal=FALSE)
print("100 us UT 10% > UT 15%")
wilcox.test(gut_100_10, gut_100_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("100 us UT 5% == UT 15%")
wilcox.test(gut_100_5, gut_100_15, paired=TRUE, var.equal=FALSE)
print("100 us UT 5% > UT 15%")
wilcox.test(gut_100_5, gut_100_15, paired=TRUE, var.equal=FALSE, alternative="less")
|
/SaaSim/src/main/resources/scripts/planning/avaliacao/scenario/estatistica.R
|
no_license
|
davidcmm/SaaSim_planning
|
R
| false | false | 40,003 |
r
|
#!/bin/Rscript
on_100_10 <- read.table("on_100_10/profits.dat")$V1
#op_100_10 <- read.table("op_100_10/profits.dat")$V1
#op_100_10 <- read.table("op_partial_100_10/profits.dat")$V1
op_100_10 <- read.table("op_partial_fac5_100_10/profits.dat")$V1
ov_100_10 <- read.table("ov_100_10/profits.dat")$V1
ut_100_10 <- read.table("ut_100_10/profits.dat")$V1
# Testes de hipoteses
print("100 us 10%: hi == on")
wilcox.test(ut_100_10, on_100_10, paired=TRUE, var.equal=FALSE)
print("hi > on")
wilcox.test(ut_100_10, on_100_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == ov")
wilcox.test(ut_100_10, ov_100_10, paired=TRUE, var.equal=FALSE)
print("hi > ov")
wilcox.test(ut_100_10, ov_100_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("ov == on")
wilcox.test(on_100_10, ov_100_10, paired=TRUE, var.equal=FALSE)
print("on > ov")
wilcox.test(on_100_10, ov_100_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == op")
wilcox.test(ut_100_10, op_100_10, paired=TRUE, var.equal=FALSE)
print("hi > op")
wilcox.test(ut_100_10, op_100_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("op == on")
wilcox.test(op_100_10, on_100_10, paired=TRUE, var.equal=FALSE)
print("op > on")
wilcox.test(op_100_10, on_100_10, paired=TRUE, var.equal=FALSE, alternative="less")
on_100_5 <- read.table("on_100_5/profits.dat")$V1
#op_100_5 <- read.table("op_100_5/profits.dat")$V1
#op_100_5 <- read.table("op_partial_100_5/profits.dat")$V1
op_100_5 <- read.table("op_partial_fac5_100_5/profits.dat")$V1
ov_100_5 <- read.table("ov_100_5/profits.dat")$V1
ut_100_5 <- read.table("ut_100_5/profits.dat")$V1
# Testes de hipoteses
print("100 us 5%: hi == on")
wilcox.test(ut_100_5, on_100_5, paired=TRUE, var.equal=FALSE)
print("hi > on")
wilcox.test(ut_100_5, on_100_5, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == ov")
wilcox.test(ut_100_5, ov_100_5, paired=TRUE, var.equal=FALSE)
print("hi > ov")
wilcox.test(ut_100_5, ov_100_5, paired=TRUE, var.equal=FALSE, alternative="less")
print("ov == on")
wilcox.test(on_100_5, ov_100_5, paired=TRUE, var.equal=FALSE)
print("on > ov")
wilcox.test(on_100_5, ov_100_5, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == op")
wilcox.test(ut_100_5, op_100_5, paired=TRUE, var.equal=FALSE)
print("hi > op")
wilcox.test(ut_100_5, op_100_5, paired=TRUE, var.equal=FALSE, alternative="less")
print("op == on")
wilcox.test(op_100_5, on_100_5, paired=TRUE, var.equal=FALSE)
print("op > on")
wilcox.test(op_100_5, on_100_5, paired=TRUE, var.equal=FALSE, alternative="less")
on_100_15 <- read.table("on_100_15/profits.dat")$V1
#op_100_15 <- read.table("op_100_15/profits.dat")$V1
#op_100_15 <- read.table("op_partial_100_15/profits.dat")$V1
op_100_15 <- read.table("op_partial_fac5_100_15/profits.dat")$V1
ov_100_15 <- read.table("ov_100_15/profits.dat")$V1
ut_100_15 <- read.table("ut_100_15/profits.dat")$V1
# Testes de hipoteses
print("100 us 15%: hi == on")
wilcox.test(ut_100_15, on_100_15, paired=TRUE, var.equal=FALSE)
print("hi > on")
wilcox.test(ut_100_15, on_100_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == ov")
wilcox.test(ut_100_15, ov_100_15, paired=TRUE, var.equal=FALSE)
print("hi > ov")
wilcox.test(ut_100_15, ov_100_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("ov == on")
wilcox.test(on_100_15, ov_100_15, paired=TRUE, var.equal=FALSE)
print("on > ov")
wilcox.test(on_100_15, ov_100_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == op")
wilcox.test(ut_100_15, op_100_15, paired=TRUE, var.equal=FALSE)
print("hi > op")
wilcox.test(ut_100_15, op_100_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("op == on")
wilcox.test(op_100_15, on_100_15, paired=TRUE, var.equal=FALSE)
print("op > on")
wilcox.test(op_100_15, on_100_15, paired=TRUE, var.equal=FALSE, alternative="less")
on_10_10 <- read.table("on_10_10/profits.dat")$V1
#op_10_10 <- read.table("op_10_10/profits.dat")$V1
#op_10_10 <- read.table("op_partial_10_10/profits.dat")$V1
op_10_10 <- read.table("op_partial_fac5_10_10/profits.dat")$V1
ov_10_10 <- read.table("ov_10_10/profits.dat")$V1
ut_10_10 <- read.table("ut_10_10/profits.dat")$V1
# Testes de hipoteses
print("10 us 10%: hi == on")
wilcox.test(ut_10_10, on_10_10, paired=TRUE, var.equal=FALSE)
print("hi > on")
wilcox.test(ut_10_10, on_10_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == ov")
wilcox.test(ut_10_10, ov_10_10, paired=TRUE, var.equal=FALSE)
print("hi > ov")
wilcox.test(ut_10_10, ov_10_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("ov == on")
wilcox.test(on_10_10, ov_10_10, paired=TRUE, var.equal=FALSE)
print("on > ov")
wilcox.test(on_10_10, ov_10_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == op")
wilcox.test(ut_10_10, op_10_10, paired=TRUE, var.equal=FALSE)
print("hi > op")
wilcox.test(ut_10_10, op_10_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("op == on")
wilcox.test(op_10_10, on_10_10, paired=TRUE, var.equal=FALSE)
print("op > on")
wilcox.test(op_10_10, on_10_10, paired=TRUE, var.equal=FALSE, alternative="less")
on_10_5 <- read.table("on_10_5/profits.dat")$V1
#op_10_5 <- read.table("op_10_5/profits.dat")$V1
#op_10_5 <- read.table("op_partial_10_5/profits.dat")$V1
op_10_5 <- read.table("op_partial_fac5_10_5/profits.dat")$V1
ov_10_5 <- read.table("ov_10_5/profits.dat")$V1
ut_10_5 <- read.table("ut_10_5/profits.dat")$V1
# Testes de hipoteses
print("10 us 5%: hi == on")
wilcox.test(ut_10_5, on_10_5, paired=TRUE, var.equal=FALSE)
print("hi > on")
wilcox.test(ut_10_5, on_10_5, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == ov")
wilcox.test(ut_10_5, ov_10_5, paired=TRUE, var.equal=FALSE)
print("hi > ov")
wilcox.test(ut_10_5, ov_10_5, paired=TRUE, var.equal=FALSE, alternative="less")
print("ov == on")
wilcox.test(on_10_5, ov_10_5, paired=TRUE, var.equal=FALSE)
print("on > ov")
wilcox.test(on_10_5, ov_10_5, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == op")
wilcox.test(ut_10_5, op_10_5, paired=TRUE, var.equal=FALSE)
print("hi > op")
wilcox.test(ut_10_5, op_10_5, paired=TRUE, var.equal=FALSE, alternative="less")
print("op == on")
wilcox.test(op_10_5, on_10_5, paired=TRUE, var.equal=FALSE)
print("op > on")
wilcox.test(op_10_5, on_10_5, paired=TRUE, var.equal=FALSE, alternative="less")
on_10_15 <- read.table("on_10_15/profits.dat")$V1
#op_10_15 <- read.table("op_10_15/profits.dat")$V1
#op_10_15 <- read.table("op_partial_10_15/profits.dat")$V1
op_10_15 <- read.table("op_partial_fac5_10_15/profits.dat")$V1
ov_10_15 <- read.table("ov_10_15/profits.dat")$V1
ut_10_15 <- read.table("ut_10_15/profits.dat")$V1
# Testes de hipoteses
print("10 us 15%: hi == on")
wilcox.test(ut_10_15, on_10_15, paired=TRUE, var.equal=FALSE)
print("hi > on")
wilcox.test(ut_10_15, on_10_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == ov")
wilcox.test(ut_10_15, ov_10_15, paired=TRUE, var.equal=FALSE)
print("hi > ov")
wilcox.test(ut_10_15, ov_10_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("ov == on")
wilcox.test(on_10_15, ov_10_15, paired=TRUE, var.equal=FALSE)
print("on > ov")
wilcox.test(on_10_15, ov_10_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == op")
wilcox.test(ut_10_15, op_10_15, paired=TRUE, var.equal=FALSE)
print("hi > op")
wilcox.test(ut_10_15, op_10_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("op == on")
wilcox.test(op_10_15, on_10_15, paired=TRUE, var.equal=FALSE)
print("op > on")
wilcox.test(op_10_15, on_10_15, paired=TRUE, var.equal=FALSE, alternative="less")
on_50_10 <- read.table("on_50_10/profits.dat")$V1
#op_50_10 <- read.table("op_50_10/profits.dat")$V1
#op_50_10 <- read.table("op_partial_50_10/profits.dat")$V1
op_50_10 <- read.table("op_partial_fac5_50_10/profits.dat")$V1
ov_50_10 <- read.table("ov_50_10/profits.dat")$V1
ut_50_10 <- read.table("ut_50_10/profits.dat")$V1
# Testes de hipoteses
print("50 us 10%: hi == on")
wilcox.test(ut_50_10, on_50_10, paired=TRUE, var.equal=FALSE)
print("hi > on")
wilcox.test(ut_50_10, on_50_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == ov")
wilcox.test(ut_50_10, ov_50_10, paired=TRUE, var.equal=FALSE)
print("hi > ov")
wilcox.test(ut_50_10, ov_50_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("ov == on")
wilcox.test(on_50_10, ov_50_10, paired=TRUE, var.equal=FALSE)
print("on > ov")
wilcox.test(on_50_10, ov_50_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == op")
wilcox.test(ut_50_10, op_50_10, paired=TRUE, var.equal=FALSE)
print("hi > op")
wilcox.test(ut_50_10, op_50_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("op == on")
wilcox.test(op_50_10, on_50_10, paired=TRUE, var.equal=FALSE)
print("op > on")
wilcox.test(op_50_10, on_50_10, paired=TRUE, var.equal=FALSE, alternative="less")
on_50_5 <- read.table("on_50_5/profits.dat")$V1
#op_50_5 <- read.table("op_50_5/profits.dat")$V1
#op_50_5 <- read.table("op_partial_50_5/profits.dat")$V1
op_50_5 <- read.table("op_partial_fac5_50_5/profits.dat")$V1
ov_50_5 <- read.table("ov_50_5/profits.dat")$V1
ut_50_5 <- read.table("ut_50_5/profits.dat")$V1
# Testes de hipoteses
print("50 us 5%: hi == on")
wilcox.test(ut_50_5, on_50_5, paired=TRUE, var.equal=FALSE)
print("hi > on")
wilcox.test(ut_50_5, on_50_5, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == ov")
wilcox.test(ut_50_5, ov_50_5, paired=TRUE, var.equal=FALSE)
print("hi > ov")
wilcox.test(ut_50_5, ov_50_5, paired=TRUE, var.equal=FALSE, alternative="less")
print("ov == on")
wilcox.test(on_50_5, ov_50_5, paired=TRUE, var.equal=FALSE)
print("on > ov")
wilcox.test(on_50_5, ov_50_5, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == op")
wilcox.test(ut_50_5, op_50_5, paired=TRUE, var.equal=FALSE)
print("hi > op")
wilcox.test(ut_50_5, op_50_5, paired=TRUE, var.equal=FALSE, alternative="less")
print("op == on")
wilcox.test(op_50_5, on_50_5, paired=TRUE, var.equal=FALSE)
print("op > on")
wilcox.test(op_50_5, on_50_5, paired=TRUE, var.equal=FALSE, alternative="less")
on_50_15 <- read.table("on_50_15/profits.dat")$V1
#op_50_15 <- read.table("op_50_15/profits.dat")$V1
#op_50_15 <- read.table("op_partial_50_15/profits.dat")$V1
op_50_15 <- read.table("op_partial_fac5_50_15/profits.dat")$V1
ov_50_15 <- read.table("ov_50_15/profits.dat")$V1
ut_50_15 <- read.table("ut_50_15/profits.dat")$V1
# Testes de hipoteses
print("50 us 15%: hi == on")
wilcox.test(ut_50_15, on_50_15, paired=TRUE, var.equal=FALSE)
print("hi > on")
wilcox.test(ut_50_15, on_50_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == ov")
wilcox.test(ut_50_10, ov_50_15, paired=TRUE, var.equal=FALSE)
print("hi > ov")
wilcox.test(ut_50_10, ov_50_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("ov == on")
wilcox.test(on_50_15, ov_50_15, paired=TRUE, var.equal=FALSE)
print("on > ov")
wilcox.test(on_50_15, ov_50_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("hi == op")
wilcox.test(ut_50_15, op_50_15, paired=TRUE, var.equal=FALSE)
print("hi > op")
wilcox.test(ut_50_15, op_50_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("op == on")
wilcox.test(op_50_15, on_50_15, paired=TRUE, var.equal=FALSE)
print("op > on")
wilcox.test(op_50_15, on_50_15, paired=TRUE, var.equal=FALSE, alternative="less")
# >>>>>> Resumos dos profits
print(">>>>>>>> LUCROS 100 10%")
paste("OP", mean(op_100_10), "[", 1.96*sd(op_100_10)/sqrt(length(op_100_10))+mean(op_100_10), ":", mean(op_100_10) - 1.96*sd(op_100_10)/sqrt(length(op_100_10)), "]")
paste("UT", mean(ut_100_10), "[", 1.96*sd(ut_100_10)/sqrt(length(ut_100_10))+mean(ut_100_10), ":", mean(ut_100_10) - 1.96*sd(ut_100_10)/sqrt(length(ut_100_10)), "]")
paste("ON", mean(on_100_10), "[", 1.96*sd(on_100_10)/sqrt(length(on_100_10))+mean(on_100_10), ":", mean(on_100_10) - 1.96*sd(on_100_10)/sqrt(length(on_100_10)), "]")
paste("OV", mean(ov_100_10), "[", 1.96*sd(ov_100_10)/sqrt(length(ov_100_10))+mean(ov_100_10), ":", mean(ov_100_10) - 1.96*sd(ov_100_10)/sqrt(length(ov_100_10)), "]")
print(">>>>>>>> LUCROS 10 10%")
paste("OP", mean(op_10_10), "[", 1.96*sd(op_10_10)/sqrt(length(op_10_10))+mean(op_10_10), ":", mean(op_10_10) - 1.96*sd(op_10_10)/sqrt(length(op_10_10)), "]")
paste("UT", mean(ut_10_10), "[", 1.96*sd(ut_10_10)/sqrt(length(ut_10_10))+mean(ut_10_10), ":", mean(ut_10_10) - 1.96*sd(ut_10_10)/sqrt(length(ut_10_10)), "]")
paste("ON", mean(on_10_10), "[", 1.96*sd(on_10_10)/sqrt(length(on_10_10))+mean(on_10_10), ":", mean(on_10_10) - 1.96*sd(on_10_10)/sqrt(length(on_10_10)), "]")
paste("OV", mean(ov_10_10), "[", 1.96*sd(ov_10_10)/sqrt(length(ov_10_10))+mean(ov_10_10), ":", mean(ov_10_10) - 1.96*sd(ov_10_10)/sqrt(length(ov_10_10)), "]")
print(">>>>>>>> LUCROS 50 10%")
paste("OP", mean(op_50_10), "[", 1.96*sd(op_50_10)/sqrt(length(op_50_10))+mean(op_50_10), ":", mean(op_50_10) - 1.96*sd(op_50_10)/sqrt(length(op_50_10)), "]")
paste("UT", mean(ut_50_10), "[", 1.96*sd(ut_50_10)/sqrt(length(ut_50_10))+mean(ut_50_10), ":", mean(ut_50_10) - 1.96*sd(ut_50_10)/sqrt(length(ut_50_10)), "]")
paste("ON", mean(on_50_10), "[", 1.96*sd(on_50_10)/sqrt(length(on_50_10))+mean(on_50_10), ":", mean(on_50_10) - 1.96*sd(on_50_10)/sqrt(length(on_50_10)), "]")
paste("OV", mean(ov_50_10), "[", 1.96*sd(ov_50_10)/sqrt(length(ov_50_10))+mean(ov_50_10), ":", mean(ov_50_10) - 1.96*sd(ov_50_10)/sqrt(length(ov_50_10)), "]")
print(">>>>>>>> LUCROS 100 15%")
paste("OP", mean(op_100_15), "[", 1.96*sd(op_100_15)/sqrt(length(op_100_15))+mean(op_100_15), ":", mean(op_100_15) - 1.96*sd(op_100_15)/sqrt(length(op_100_15)), "]")
paste("UT", mean(ut_100_15), "[", 1.96*sd(ut_100_15)/sqrt(length(ut_100_15))+mean(ut_100_15), ":", mean(ut_100_15) - 1.96*sd(ut_100_15)/sqrt(length(ut_100_15)), "]")
paste("ON", mean(on_100_15), "[", 1.96*sd(on_100_15)/sqrt(length(on_100_15))+mean(on_100_15), ":", mean(on_100_15) - 1.96*sd(on_100_15)/sqrt(length(on_100_15)), "]")
paste("OV", mean(ov_100_15), "[", 1.96*sd(ov_100_15)/sqrt(length(ov_100_15))+mean(ov_100_15), ":", mean(ov_100_15) - 1.96*sd(ov_100_15)/sqrt(length(ov_100_15)), "]")
print(">>>>>>>> LUCROS 10 15%")
paste("OP", mean(op_10_15), "[", 1.96*sd(op_10_15)/sqrt(length(op_10_15))+mean(op_10_15), ":", mean(op_10_15) - 1.96*sd(op_10_15)/sqrt(length(op_10_15)), "]")
paste("UT", mean(ut_10_15), "[", 1.96*sd(ut_10_15)/sqrt(length(ut_10_15))+mean(ut_10_15), ":", mean(ut_10_15) - 1.96*sd(ut_10_15)/sqrt(length(ut_10_15)), "]")
paste("ON", mean(on_10_15), "[", 1.96*sd(on_10_15)/sqrt(length(on_10_15))+mean(on_10_15), ":", mean(on_10_15) - 1.96*sd(on_10_15)/sqrt(length(on_10_15)), "]")
paste("OV", mean(ov_10_15), "[", 1.96*sd(ov_10_15)/sqrt(length(ov_10_15))+mean(ov_10_15), ":", mean(ov_10_15) - 1.96*sd(ov_10_15)/sqrt(length(ov_10_15)), "]")
print(">>>>>>>> LUCROS 50 15%")
paste("OP", mean(op_50_15), "[", 1.96*sd(op_50_15)/sqrt(length(op_50_15))+mean(op_50_15), ":", mean(op_50_15) - 1.96*sd(op_50_15)/sqrt(length(op_50_15)), "]")
paste("UT", mean(ut_50_15), "[", 1.96*sd(ut_50_15)/sqrt(length(ut_50_15))+mean(ut_50_15), ":", mean(ut_50_15) - 1.96*sd(ut_50_15)/sqrt(length(ut_50_15)), "]")
paste("ON", mean(on_50_15), "[", 1.96*sd(on_50_15)/sqrt(length(on_50_15))+mean(on_50_15), ":", mean(on_50_15) - 1.96*sd(on_50_15)/sqrt(length(on_50_15)), "]")
paste("OV", mean(ov_50_15), "[", 1.96*sd(ov_50_15)/sqrt(length(ov_50_15))+mean(ov_50_15), ":", mean(ov_50_15) - 1.96*sd(ov_50_15)/sqrt(length(ov_50_15)), "]")
print(">>>>>>>> LUCROS 100 5%")
paste("OP", mean(op_100_5), "[", 1.96*sd(op_100_5)/sqrt(length(op_100_5))+mean(op_100_5), ":", mean(op_100_5) - 1.96*sd(op_100_5)/sqrt(length(op_100_5)), "]")
paste("UT", mean(ut_100_5), "[", 1.96*sd(ut_100_5)/sqrt(length(ut_100_5))+mean(ut_100_5), ":", mean(ut_100_5) - 1.96*sd(ut_100_5)/sqrt(length(ut_100_5)), "]")
paste("ON", mean(on_100_5), "[", 1.96*sd(on_100_5)/sqrt(length(on_100_5))+mean(on_100_5), ":", mean(on_100_5) - 1.96*sd(on_100_5)/sqrt(length(on_100_5)), "]")
paste("OV", mean(ov_100_5), "[", 1.96*sd(ov_100_5)/sqrt(length(ov_100_5))+mean(ov_100_5), ":", mean(ov_100_5) - 1.96*sd(ov_100_5)/sqrt(length(ov_100_5)), "]")
print(">>>>>>>> LUCROS 10 5%")
paste("OP", mean(op_10_5), "[", 1.96*sd(op_10_5)/sqrt(length(op_10_5))+mean(op_10_5), ":", mean(op_10_5) - 1.96*sd(op_10_5)/sqrt(length(op_10_5)), "]")
paste("UT", mean(ut_10_5), "[", 1.96*sd(ut_10_5)/sqrt(length(ut_10_5))+mean(ut_10_5), ":", mean(ut_10_5) - 1.96*sd(ut_10_5)/sqrt(length(ut_10_5)), "]")
paste("ON", mean(on_10_5), "[", 1.96*sd(on_10_5)/sqrt(length(on_10_5))+mean(on_10_5), ":", mean(on_10_5) - 1.96*sd(on_10_5)/sqrt(length(on_10_5)), "]")
paste("OV", mean(ov_10_5), "[", 1.96*sd(ov_10_5)/sqrt(length(ov_10_5))+mean(ov_10_5), ":", mean(ov_10_5) - 1.96*sd(ov_10_5)/sqrt(length(ov_10_5)), "]")
print(">>>>>>>> LUCROS 50 5%")
paste("OP", mean(op_50_5), "[", 1.96*sd(op_50_5)/sqrt(length(op_50_5))+mean(op_50_5), ":", mean(op_50_5) - 1.96*sd(op_50_5)/sqrt(length(op_50_5)), "]")
paste("UT", mean(ut_50_5), "[", 1.96*sd(ut_50_5)/sqrt(length(ut_50_5))+mean(ut_50_5), ":", mean(ut_50_5) - 1.96*sd(ut_50_5)/sqrt(length(ut_50_5)), "]")
paste("ON", mean(on_50_5), "[", 1.96*sd(on_50_5)/sqrt(length(on_50_5))+mean(on_50_5), ":", mean(on_50_5) - 1.96*sd(on_50_5)/sqrt(length(on_50_5)), "]")
paste("OV", mean(ov_50_5), "[", 1.96*sd(ov_50_5)/sqrt(length(ov_50_5))+mean(ov_50_5), ":", mean(ov_50_5) - 1.96*sd(ov_50_5)/sqrt(length(ov_50_5)), "]")
paste("Média geral lucro UT: ", mean(c(ut_50_5, ut_50_15, ut_50_10, ut_100_5, ut_100_15, ut_100_10)))
paste("Média geral lucro RF: ", mean(c(op_50_5, op_50_15, op_50_10, op_100_5, op_100_15, op_100_10)))
paste("Média geral lucro OV: ", mean(c(ov_50_5, ov_50_15, ov_50_10, ov_100_5, ov_100_15, ov_100_10)))
#>>>>>>>>> Ganhos para risco de 10%
gop_100_10 <- (op_100_10 - on_100_10) / abs(on_100_10)
gut_100_10 <- (ut_100_10 - on_100_10) / abs(on_100_10)
gov_100_10 <- (ov_100_10 - on_100_10) / abs(on_100_10)
print(">>>> GANHOS 100 10%")
paste("Ganhos hi --> on", mean(ut_100_10 - on_100_10), " [", 1.96*sd(ut_100_10 - on_100_10)/sqrt(length(ut_100_10 - on_100_10))+mean(ut_100_10 - on_100_10), ":", mean(ut_100_10 - on_100_10) - 1.96*sd(ut_100_10 - on_100_10)/sqrt(length(ut_100_10 - on_100_10)), "]", " ", mean(gut_100_10), " [", 1.96*sd(gut_100_10)/sqrt(length(gut_100_10))+mean(gut_100_10), ":", mean(gut_100_10) - 1.96*sd(gut_100_10)/sqrt(length(gut_100_10)), "]" )
print(paste(gut_100_10))
paste("Ganhos op --> on", mean(op_100_10 - on_100_10), " [", 1.96*sd(op_100_10 - on_100_10)/sqrt(length(op_100_10 - on_100_10))+mean(op_100_10 - on_100_10), ":", mean(op_100_10 - on_100_10) - 1.96*sd(op_100_10 - on_100_10)/sqrt(length(op_100_10 - on_100_10)), "]", " ", mean(gop_100_10), " [", 1.96*sd(gop_100_10)/sqrt(length(gop_100_10))+mean(gop_100_10), ":", mean(gop_100_10) - 1.96*sd(gop_100_10)/sqrt(length(gop_100_10)), "]" )
print(paste(gop_100_10))
paste("Ganhos ov --> on", mean(ov_100_10 - on_100_10), " [", 1.96*sd(ov_100_10 - on_100_10)/sqrt(length(ov_100_10 - on_100_10))+mean(ov_100_10 - on_100_10), ":", mean(ov_100_10 - on_100_10) - 1.96*sd(ov_100_10 - on_100_10)/sqrt(length(ov_100_10 - on_100_10)), "]", " ", mean(gov_100_10), " [", 1.96*sd(gov_100_10)/sqrt(length(gov_100_10))+mean(gov_100_10), ":", mean(gov_100_10) - 1.96*sd(gov_100_10)/sqrt(length(gov_100_10)), "]" )
print(paste(gov_100_10))
gop_10_10 <- (op_10_10 - on_10_10) / abs(on_10_10)
gut_10_10 <- (ut_10_10 - on_10_10) / abs(on_10_10)
gov_10_10 <- (ov_10_10 - on_10_10) / abs(on_10_10)
print(">>>> GANHOS 10 10%")
paste("Ganhos hi --> on", mean(ut_10_10 - on_10_10), " ", mean(gut_10_10), " [", 1.96*sd(ut_10_10 - on_10_10)/sqrt(length(ut_10_10 - on_10_10))+mean(ut_10_10 - on_10_10), ":", mean(ut_10_10 - on_10_10) - 1.96*sd(ut_10_10 - on_10_10)/sqrt(length(ut_10_10 - on_10_10)), "]", " [", 1.96*sd(gut_10_10)/sqrt(length(gut_10_10))+mean(gut_10_10), ":", mean(gut_10_10) - 1.96*sd(gut_10_10)/sqrt(length(gut_10_10)), "]" )
print(paste(gut_10_10))
paste("Ganhos op --> on", mean(op_10_10 - on_10_10), " [", 1.96*sd(op_10_10 - on_10_10)/sqrt(length(op_10_10 - on_10_10))+mean(op_10_10 - on_10_10), ":", mean(op_10_10 - on_10_10) - 1.96*sd(op_10_10 - on_10_10)/sqrt(length(op_10_10 - on_10_10)), "]", " ", mean(gop_10_10), " [", 1.96*sd(gop_10_10)/sqrt(length(gop_10_10))+mean(gop_10_10), ":", mean(gop_10_10) - 1.96*sd(gop_10_10)/sqrt(length(gop_10_10)), "]" )
print(paste(gop_10_10))
paste("Ganhos ov --> on", mean(ov_10_10 - on_10_10), " [", 1.96*sd(ov_10_10 - on_10_10)/sqrt(length(ov_10_10 - on_10_10))+mean(ov_10_10 - on_10_10), ":", mean(ov_10_10 - on_10_10) - 1.96*sd(ov_10_10 - on_10_10)/sqrt(length(ov_10_10 - on_10_10)), "]", " ", mean(gov_10_10), " [", 1.96*sd(gov_10_10)/sqrt(length(gov_10_10))+mean(gov_10_10), ":", mean(gov_10_10) - 1.96*sd(gov_10_10)/sqrt(length(gov_10_10)), "]" )
print(paste(gov_10_10))
gop_50_10 <- (op_50_10 - on_50_10) / abs(on_50_10)
gut_50_10 <- (ut_50_10 - on_50_10) / abs(on_50_10)
gov_50_10 <- (ov_50_10 - on_50_10) / abs(on_50_10)
print(">>>> GANHOS 50 10%")
paste("Ganhos hi --> on", mean(ut_50_10 - on_50_10), " [", 1.96*sd(ut_50_10 - on_50_10)/sqrt(length(ut_50_10 - on_50_10))+mean(ut_50_10 - on_50_10), ":", mean(ut_50_10 - on_50_10) - 1.96*sd(ut_50_10 - on_50_10)/sqrt(length(ut_50_10 - on_50_10)), "]", " ", mean(gut_50_10), " [", 1.96*sd(gut_50_10)/sqrt(length(gut_50_10))+mean(gut_50_10), ":", mean(gut_50_10) - 1.96*sd(gut_50_10)/sqrt(length(gut_50_10)), "]" )
print(paste(gut_50_10))
paste("Ganhos op --> on", mean(op_50_10 - on_50_10), " [", 1.96*sd(op_50_10 - on_50_10)/sqrt(length(op_50_10 - on_50_10))+mean(op_50_10 - on_50_10), ":", mean(op_50_10 - on_50_10) - 1.96*sd(op_50_10 - on_50_10)/sqrt(length(op_50_10 - on_50_10)), "]", " ", mean(gop_50_10), " [", 1.96*sd(gop_50_10)/sqrt(length(gop_50_10))+mean(gop_50_10), ":", mean(gop_50_10) - 1.96*sd(gop_50_10)/sqrt(length(gop_50_10)), "]" )
print(paste(gop_50_10))
paste("Ganhos ov --> on", mean(ov_50_10 - on_50_10), " [", 1.96*sd(ov_50_10 - on_50_10)/sqrt(length(ov_50_10 - on_50_10))+mean(ov_50_10 - on_50_10), ":", mean(ov_50_10 - on_50_10) - 1.96*sd(ov_50_10 - on_50_10)/sqrt(length(ov_50_10 - on_50_10)), "]", " ", mean(gov_50_10), " [", 1.96*sd(gov_50_10)/sqrt(length(gov_50_10))+mean(gov_50_10), ":", mean(gov_50_10) - 1.96*sd(gov_50_10)/sqrt(length(gov_50_10)), "]" )
print(paste(gov_50_10))
#Ganhos para risco de 15%
gop_100_15 <- (op_100_15 - on_100_15) / abs(on_100_15)
gut_100_15 <- (ut_100_15 - on_100_15) / abs(on_100_15)
gov_100_15 <- (ov_100_15 - on_100_15) / abs(on_100_15)
print(">>>> GANHOS 100 15%")
paste("Ganhos hi --> on", mean(ut_100_15 - on_100_15), " [", 1.96*sd(ut_100_15 - on_100_15)/sqrt(length(ut_100_15 - on_100_15))+mean(ut_100_15 - on_100_15), ":", mean(ut_100_15 - on_100_15) - 1.96*sd(ut_100_15 - on_100_15)/sqrt(length(ut_100_15 - on_100_15)), "]", " ", mean(gut_100_15), " [", 1.96*sd(gut_100_15)/sqrt(length(gut_100_15))+mean(gut_100_15), ":", mean(gut_100_15) - 1.96*sd(gut_100_15)/sqrt(length(gut_100_15)), "]" )
print(paste(gut_100_15))
paste("Ganhos op --> on", mean(op_100_15 - on_100_15), " [", 1.96*sd(op_100_15 - on_100_15)/sqrt(length(op_100_15 - on_100_15))+mean(op_100_15 - on_100_15), ":", mean(op_100_15 - on_100_15) - 1.96*sd(op_100_15 - on_100_15)/sqrt(length(op_100_15 - on_100_15)), "]", " ", mean(gop_100_15), " [", 1.96*sd(gop_100_15)/sqrt(length(gop_100_15))+mean(gop_100_15), ":", mean(gop_100_15) - 1.96*sd(gop_100_15)/sqrt(length(gop_100_15)), "]" )
print(paste(gop_100_15))
paste("Ganhos ov --> on", mean(ov_100_15 - on_100_15), " [", 1.96*sd(ov_100_15 - on_100_15)/sqrt(length(ov_100_15 - on_100_15))+mean(ov_100_15 - on_100_15), ":", mean(ov_100_15 - on_100_15) - 1.96*sd(ov_100_15 - on_100_15)/sqrt(length(ov_100_15 - on_100_15)), "]", " ", mean(gov_100_15), " [", 1.96*sd(gov_100_15)/sqrt(length(gov_100_15))+mean(gov_100_15), ":", mean(gov_100_15) - 1.96*sd(gov_100_15)/sqrt(length(gov_100_15)), "]" )
print(paste(gov_100_15))
gop_10_15 <- (op_10_15 - on_10_15) / abs(on_10_15)
gut_10_15 <- (ut_10_15 - on_10_15) / abs(on_10_15)
gov_10_15 <- (ov_10_15 - on_10_15) / abs(on_10_15)
print(">>>> GANHOS 10 15%")
paste("Ganhos hi --> on", mean(ut_10_15 - on_10_15), " [", 1.96*sd(ut_10_15 - on_10_15)/sqrt(length(ut_10_15 - on_10_15))+mean(ut_10_15 - on_10_15), ":", mean(ut_10_15 - on_10_15) - 1.96*sd(ut_10_15 - on_10_15)/sqrt(length(ut_10_15 - on_10_15)), "]", " ", mean(gut_10_15), " [", 1.96*sd(gut_10_15)/sqrt(length(gut_10_15))+mean(gut_10_15), ":", mean(gut_10_15) - 1.96*sd(gut_10_15)/sqrt(length(gut_10_15)), "]" )
print(paste(gut_10_15))
paste("Ganhos op --> on", mean(op_10_15 - on_10_15), " [", 1.96*sd(op_10_15 - on_10_15)/sqrt(length(op_10_15 - on_10_15))+mean(op_10_15 - on_10_15), ":", mean(op_10_15 - on_10_15) - 1.96*sd(op_10_15 - on_10_15)/sqrt(length(op_10_15 - on_10_15)), "]", " ", mean(gop_10_15), " [", 1.96*sd(gop_10_15)/sqrt(length(gop_10_15))+mean(gop_10_15), ":", mean(gop_10_15) - 1.96*sd(gop_10_15)/sqrt(length(gop_10_15)), "]" )
print(paste(gop_10_15))
paste("Ganhos ov --> on", mean(ov_10_15 - on_10_15), " [", 1.96*sd(ov_10_15 - on_10_15)/sqrt(length(ov_10_15 - on_10_15))+mean(ov_10_15 - on_10_15), ":", mean(ov_10_15 - on_10_15) - 1.96*sd(ov_10_15 - on_10_15)/sqrt(length(ov_10_15 - on_10_15)), "]", " ", mean(gov_10_15), " [", 1.96*sd(gov_10_15)/sqrt(length(gov_10_15))+mean(gov_10_15), ":", mean(gov_10_15) - 1.96*sd(gov_10_15)/sqrt(length(gov_10_15)), "]" )
print(paste(gov_10_15))
gop_50_15 <- (op_50_15 - on_50_15) / abs(on_50_15)
gut_50_15 <- (ut_50_15 - on_50_15) / abs(on_50_15)
gov_50_15 <- (ov_50_15 - on_50_15) / abs(on_50_15)
print(">>>> GANHOS 50 15%")
paste("Ganhos hi --> on", mean(ut_50_15 - on_50_15), " [", 1.96*sd(ut_50_15 - on_50_15)/sqrt(length(ut_50_15 - on_50_15))+mean(ut_50_15 - on_50_15), ":", mean(ut_50_15 - on_50_15) - 1.96*sd(ut_50_15 - on_50_15)/sqrt(length(ut_50_15 - on_50_15)), "]", " ", mean(gut_50_15), " [", 1.96*sd(gut_50_15)/sqrt(length(gut_50_15))+mean(gut_50_15), ":", mean(gut_50_15) - 1.96*sd(gut_50_15)/sqrt(length(gut_50_15)), "]" )
print(paste(gut_50_15))
paste("Ganhos op --> on", mean(op_50_15 - on_50_15), " [", 1.96*sd(op_50_15 - on_50_15)/sqrt(length(op_50_15 - on_50_15))+mean(op_50_15 - on_50_15), ":", mean(op_50_15 - on_50_15) - 1.96*sd(op_50_15 - on_50_15)/sqrt(length(op_50_15 - on_50_15)), "]", " ", mean(gop_50_15), " [", 1.96*sd(gop_50_15)/sqrt(length(gop_50_15))+mean(gop_50_15), ":", mean(gop_50_15) - 1.96*sd(gop_50_15)/sqrt(length(gop_50_15)), "]" )
print(paste(gop_50_15))
paste("Ganhos ov --> on", mean(ov_50_15 - on_50_15), " [", 1.96*sd(ov_50_15 - on_50_15)/sqrt(length(ov_50_15 - on_50_15))+mean(ov_50_15 - on_50_15), ":", mean(ov_50_15 - on_50_15) - 1.96*sd(ov_50_15 - on_50_15)/sqrt(length(ov_50_15 - on_50_15)), "]", " ", mean(gov_50_15), " [", 1.96*sd(gov_50_15)/sqrt(length(gov_50_15))+mean(gov_50_15), ":", mean(gov_50_15) - 1.96*sd(gov_50_15)/sqrt(length(gov_50_15)), "]" )
print(paste(gov_50_15))
#Ganhos para risco de 5%
gop_100_5 <- (op_100_5 - on_100_5) / abs(on_100_5)
gut_100_5 <- (ut_100_5 - on_100_5) / abs(on_100_5)
gov_100_5 <- (ov_100_5 - on_100_5) / abs(on_100_5)
print(">>>> GANHOS 100 5%")
paste("Ganhos hi --> on", mean(ut_100_5 - on_100_5), " [", 1.96*sd(ut_100_5 - on_100_5)/sqrt(length(ut_100_5 - on_100_5))+mean(ut_100_5 - on_100_5), ":", mean(ut_100_5 - on_100_5) - 1.96*sd(ut_100_5 - on_100_5)/sqrt(length(ut_100_5 - on_100_5)), "]", " ", mean(gut_100_5), " [", 1.96*sd(gut_100_5)/sqrt(length(gut_100_5))+mean(gut_100_5), ":", mean(gut_100_5) - 1.96*sd(gut_100_5)/sqrt(length(gut_100_5)), "]" )
print(paste(gut_100_5))
paste("Ganhos op --> on", mean(op_100_5 - on_100_5), " [", 1.96*sd(op_100_5 - on_100_5)/sqrt(length(op_100_5 - on_100_5))+mean(op_100_5 - on_100_5), ":", mean(op_100_5 - on_100_5) - 1.96*sd(op_100_5 - on_100_5)/sqrt(length(op_100_5 - on_100_5)), "]", " ", mean(gop_100_5), " [", 1.96*sd(gop_100_5)/sqrt(length(gop_100_5))+mean(gop_100_5), ":", mean(gop_100_5) - 1.96*sd(gop_100_5)/sqrt(length(gop_100_5)), "]" )
print(paste(gop_100_5))
paste("Ganhos ov --> on", mean(ov_100_5 - on_100_5), " [", 1.96*sd(ov_100_5 - on_100_5)/sqrt(length(ov_100_5 - on_100_5))+mean(ov_100_5 - on_100_5), ":", mean(ov_100_5 - on_100_5) - 1.96*sd(ov_100_5 - on_100_5)/sqrt(length(ov_100_5 - on_100_5)), "]", " ", mean(gov_100_5), " [", 1.96*sd(gov_100_5)/sqrt(length(gov_100_5))+mean(gov_100_5), ":", mean(gov_100_5) - 1.96*sd(gov_100_5)/sqrt(length(gov_100_5)), "]" )
print(paste(gov_100_5))
gop_10_5 <- (op_10_5 - on_10_5) / abs(on_10_5)
gut_10_5 <- (ut_10_5 - on_10_5) / abs(on_10_5)
gov_10_5 <- (ov_10_5 - on_10_5) / abs(on_10_5)
print(">>>> GANHOS 10 5%")
paste("Ganhos hi --> on", mean(ut_10_5 - on_10_5), " [", 1.96*sd(ut_10_5 - on_10_5)/sqrt(length(ut_10_5 - on_10_5))+mean(ut_10_5 - on_10_5), ":", mean(ut_10_5 - on_10_5) - 1.96*sd(ut_10_5 - on_10_5)/sqrt(length(ut_10_5 - on_10_5)), "]", " ", mean(gut_10_5), " [", 1.96*sd(gut_10_5)/sqrt(length(gut_10_5))+mean(gut_10_5), ":", mean(gut_10_5) - 1.96*sd(gut_10_5)/sqrt(length(gut_10_5)), "]" )
print(paste(gut_10_5))
paste("Ganhos op --> on", mean(op_10_5 - on_10_5), " [", 1.96*sd(op_10_5 - on_10_5)/sqrt(length(op_10_5 - on_10_5))+mean(op_10_5 - on_10_5), ":", mean(op_10_5 - on_10_5) - 1.96*sd(op_10_5 - on_10_5)/sqrt(length(op_10_5 - on_10_5)), "]", " ", mean(gop_10_5), " [", 1.96*sd(gop_10_5)/sqrt(length(gop_10_5))+mean(gop_10_5), ":", mean(gop_10_5) - 1.96*sd(gop_10_5)/sqrt(length(gop_10_5)), "]" )
print(paste(gop_10_5))
paste("Ganhos ov --> on", mean(ov_10_5 - on_10_5), " [", 1.96*sd(ov_10_5 - on_10_5)/sqrt(length(ov_10_5 - on_10_5))+mean(ov_10_5 - on_10_5), ":", mean(ov_10_5 - on_10_5) - 1.96*sd(ov_10_5 - on_10_5)/sqrt(length(ov_10_5 - on_10_5)), "]", " ", mean(gov_10_5), " [", 1.96*sd(gov_10_5)/sqrt(length(gov_10_5))+mean(gov_10_5), ":", mean(gov_10_5) - 1.96*sd(gov_10_5)/sqrt(length(gov_10_5)), "]" )
print(paste(gov_10_5))
gop_50_5 <- (op_50_5 - on_50_5) / abs(on_50_5)
gut_50_5 <- (ut_50_5 - on_50_5) / abs(on_50_5)
gov_50_5 <- (ov_50_5 - on_50_5) / abs(on_50_5)
print(">>>> GANHOS 50 5%")
paste("Ganhos hi --> on", mean(ut_50_5 - on_50_5), " [", 1.96 * sd(ut_50_5 - on_50_5)/sqrt(length(ut_50_5 - on_50_5))+mean(ut_50_5 - on_50_5), ":", mean(ut_50_5 - on_50_5) - 1.96*sd(ut_50_5 - on_50_5)/sqrt(length(ut_50_5 - on_50_5)), "]", " ", mean(gut_50_5), " [", 1.96*sd(gut_50_5)/sqrt(length(gut_50_5))+mean(gut_50_5), ":", mean(gut_50_5) - 1.96*sd(gut_50_5)/sqrt(length(gut_50_5)), "]" )
print(paste(gut_50_5))
paste("Ganhos op --> on", mean(op_50_5 - on_50_5), " [", 1.96 * sd(op_50_5 - on_50_5)/sqrt(length(op_50_5 - on_50_5))+mean(op_50_5 - on_50_5), ":", mean(op_50_5 - on_50_5) - 1.96*sd(op_50_5 - on_50_5)/sqrt(length(op_50_5 - on_50_5)), "]", " ", mean(gop_50_5), " [", 1.96*sd(gop_50_5)/sqrt(length(gop_50_5))+mean(gop_50_5), ":", mean(gop_50_5) - 1.96*sd(gop_50_5)/sqrt(length(gop_50_5)), "]" )
print(paste(gop_50_5))
paste("Ganhos ov --> on", mean(ov_50_5 - on_50_5), " [", 1.96 * sd(ov_50_5 - on_50_5)/sqrt(length(ov_50_5 - on_50_5))+mean(ov_50_5 - on_50_5), ":", mean(ov_50_5 - on_50_5) - 1.96*sd(ov_50_5 - on_50_5)/sqrt(length(ov_50_5 - on_50_5)), "]", " ", mean(gov_50_5), " [", 1.96*sd(gov_50_5)/sqrt(length(gov_50_5))+mean(gov_50_5), ":", mean(gov_50_5) - 1.96*sd(gov_50_5)/sqrt(length(gov_50_5)), "]" )
print(paste(gov_50_5))
paste("Média geral ganho % UT: ", mean(c(gut_50_5, gut_50_15, gut_50_5, gut_100_5, gut_100_15, gut_100_10)))
paste("Média geral ganho % RF: ", mean(c(gop_50_5, gop_50_15, gop_50_5, gop_100_5, gop_100_15, gop_100_10)))
paste("Média geral ganho % OV: ", mean(c(gov_50_5, gov_50_15, gov_50_5, gov_100_5, gov_100_15, gov_100_10)))
paste("Média geral ganho $ UT: ", mean(c(ut_50_5 - on_50_5, ut_50_15- on_50_15, ut_50_5 - on_50_10, ut_100_5- on_100_5, ut_100_15- on_100_15, ut_100_10- on_100_10)))
paste("Média geral ganho $ RF: ", mean(c(op_50_5 - on_50_5, op_50_15- on_50_15, op_50_5 - on_50_10, op_100_5- on_100_5, op_100_15- on_100_15, op_100_10- on_100_10)))
paste("Média geral ganho $ OV: ", mean(c(ov_50_5 - on_50_5, ov_50_15- on_50_15, ov_50_5 - on_50_10, ov_100_5- on_100_5, ov_100_15- on_100_15, ov_100_10- on_100_10)))
# Verificando influencia do fator risco nos lucros
print("Analise do fator risco")
print("\nAnalise OP\n")
print("10 us OP 5% == OP 10%")
wilcox.test(op_10_5, op_10_10, paired=TRUE, var.equal=FALSE)
print("10 us OP 5% > OP 10%")
wilcox.test(op_10_5, op_10_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("10 us OP 10% == OP 15%")
wilcox.test(op_10_10, op_10_15, paired=TRUE, var.equal=FALSE)
print("10 us OP 10% > OP 15%")
wilcox.test(op_10_10, op_10_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("10 us OP 5% == OP 15%")
wilcox.test(op_10_5, op_10_15, paired=TRUE, var.equal=FALSE)
print("10 us OP 5% > OP 15%")
wilcox.test(op_10_5, op_10_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("50 us OP 5% == OP 10%")
wilcox.test(op_50_5, op_50_10, paired=TRUE, var.equal=FALSE)
print("50 us OP 5% > OP 10%")
wilcox.test(op_50_5, op_50_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("50 us OP 10% == OP 15%")
wilcox.test(op_50_10, op_50_15, paired=TRUE, var.equal=FALSE)
print("50 us OP 10% > OP 15%")
wilcox.test(op_50_10, op_50_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("50 us OP 5% == OP 15%")
wilcox.test(op_50_5, op_50_15, paired=TRUE, var.equal=FALSE)
print("50 us OP 5% > OP 15%")
wilcox.test(op_50_5, op_50_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("100 us OP 5% == OP 10%")
wilcox.test(op_100_5, op_100_10, paired=TRUE, var.equal=FALSE)
print("100 us OP 5% > OP 10%")
wilcox.test(op_100_5, op_100_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("100 us OP 10% == OP 15%")
wilcox.test(op_100_10, op_100_15, paired=TRUE, var.equal=FALSE)
print("100 us OP 10% > OP 15%")
wilcox.test(op_100_10, op_100_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("100 us OP 5% == OP 15%")
wilcox.test(op_100_5, op_100_15, paired=TRUE, var.equal=FALSE)
print("100 us OP 5% > OP 15%")
wilcox.test(op_100_5, op_100_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("\nAnalise UT\n")
print("10 us UT 5% == UT 10%")
wilcox.test(ut_10_5, ut_10_10, paired=TRUE, var.equal=FALSE)
print("10 us UT 5% > UT 10%")
wilcox.test(ut_10_5, ut_10_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("10 us UT 10% == UT 15%")
wilcox.test(ut_10_10, ut_10_15, paired=TRUE, var.equal=FALSE)
print("10 us UT 10% > UT 15%")
wilcox.test(ut_10_10, ut_10_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("10 us UT 5% == UT 15%")
wilcox.test(ut_10_5, ut_10_15, paired=TRUE, var.equal=FALSE)
print("10 us UT 5% > UT 15%")
wilcox.test(ut_10_5, ut_10_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("50 us UT 5% == UT 10%")
wilcox.test(ut_50_5, ut_50_10, paired=TRUE, var.equal=FALSE)
print("50 us UT 5% > UT 10%")
wilcox.test(ut_50_5, ut_50_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("50 us UT 10% == UT 15%")
wilcox.test(ut_50_10, ut_50_15, paired=TRUE, var.equal=FALSE)
print("50 us UT 10% > UT 15%")
wilcox.test(ut_50_10, ut_50_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("50 us UT 5% == UT 15%")
wilcox.test(ut_50_5, ut_50_15, paired=TRUE, var.equal=FALSE)
print("50 us UT 5% > UT 15%")
wilcox.test(ut_50_5, ut_50_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("100 us UT 5% == UT 10%")
wilcox.test(ut_100_5, ut_100_10, paired=TRUE, var.equal=FALSE)
print("100 us UT 5% > UT 10%")
wilcox.test(ut_100_5, ut_100_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("100 us UT 10% == UT 15%")
wilcox.test(ut_100_10, ut_100_15, paired=TRUE, var.equal=FALSE)
print("100 us UT 10% > UT 15%")
wilcox.test(ut_100_10, ut_100_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("100 us UT 5% == UT 15%")
wilcox.test(ut_100_5, ut_100_15, paired=TRUE, var.equal=FALSE)
print("100 us UT 5% > UT 15%")
wilcox.test(ut_100_5, ut_100_15, paired=TRUE, var.equal=FALSE, alternative="less")
# Verificando influencia do fator risco nos ganhos
print("Analise do fator risco nos ganhos")
print("\nAnalise ganho OP\n")
print("10 us OP 5% == OP 10%")
wilcox.test(gop_10_5, gop_10_10, paired=TRUE, var.equal=FALSE)
print("10 us OP 5% > OP 10%")
wilcox.test(gop_10_5, gop_10_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("10 us OP 10% == OP 15%")
wilcox.test(gop_10_10, gop_10_15, paired=TRUE, var.equal=FALSE)
print("10 us OP 10% > OP 15%")
wilcox.test(gop_10_10, gop_10_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("10 us OP 5% == OP 15%")
wilcox.test(gop_10_5, gop_10_15, paired=TRUE, var.equal=FALSE)
print("10 us OP 5% > OP 15%")
wilcox.test(gop_10_5, gop_10_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("50 us OP 5% == OP 10%")
wilcox.test(gop_50_5, gop_50_10, paired=TRUE, var.equal=FALSE)
print("50 us OP 5% > OP 10%")
wilcox.test(gop_50_5, gop_50_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("50 us OP 10% == OP 15%")
wilcox.test(gop_50_10, gop_50_15, paired=TRUE, var.equal=FALSE)
print("50 us OP 10% > OP 15%")
wilcox.test(gop_50_10, gop_50_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("50 us OP 5% == OP 15%")
wilcox.test(gop_50_5, gop_50_15, paired=TRUE, var.equal=FALSE)
print("50 us OP 5% > OP 15%")
wilcox.test(gop_50_5, gop_50_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("100 us OP 5% == OP 10%")
wilcox.test(gop_100_5, gop_100_10, paired=TRUE, var.equal=FALSE)
print("100 us OP 5% > OP 10%")
wilcox.test(gop_100_5, gop_100_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("100 us OP 10% == OP 15%")
wilcox.test(gop_100_10, gop_100_15, paired=TRUE, var.equal=FALSE)
print("100 us OP 10% > OP 15%")
wilcox.test(gop_100_10, gop_100_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("100 us OP 5% == OP 15%")
wilcox.test(gop_100_5, gop_100_15, paired=TRUE, var.equal=FALSE)
print("100 us OP 5% > OP 15%")
wilcox.test(gop_100_5, gop_100_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("\nAnalise UT\n")
print("10 us UT 5% == UT 10%")
wilcox.test(gut_10_5, gut_10_10, paired=TRUE, var.equal=FALSE)
print("10 us UT 5% > UT 10%")
wilcox.test(gut_10_5, gut_10_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("10 us UT 10% == UT 15%")
wilcox.test(gut_10_10, gut_10_15, paired=TRUE, var.equal=FALSE)
print("10 us UT 10% > UT 15%")
wilcox.test(gut_10_10, gut_10_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("10 us UT 5% == UT 15%")
wilcox.test(gut_10_5, gut_10_15, paired=TRUE, var.equal=FALSE)
print("10 us UT 5% > UT 15%")
wilcox.test(gut_10_5, gut_10_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("50 us UT 5% == UT 10%")
wilcox.test(gut_50_5, gut_50_10, paired=TRUE, var.equal=FALSE)
print("50 us UT 5% > UT 10%")
wilcox.test(gut_50_5, gut_50_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("50 us UT 10% == UT 15%")
wilcox.test(gut_50_10, gut_50_15, paired=TRUE, var.equal=FALSE)
print("50 us UT 10% > UT 15%")
wilcox.test(gut_50_10, gut_50_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("50 us UT 5% == UT 15%")
wilcox.test(gut_50_5, gut_50_15, paired=TRUE, var.equal=FALSE)
print("50 us UT 5% > UT 15%")
wilcox.test(gut_50_5, gut_50_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("100 us UT 5% == UT 10%")
wilcox.test(gut_100_5, gut_100_10, paired=TRUE, var.equal=FALSE)
print("100 us UT 5% > UT 10%")
wilcox.test(gut_100_5, gut_100_10, paired=TRUE, var.equal=FALSE, alternative="less")
print("100 us UT 10% == UT 15%")
wilcox.test(gut_100_10, gut_100_15, paired=TRUE, var.equal=FALSE)
print("100 us UT 10% > UT 15%")
wilcox.test(gut_100_10, gut_100_15, paired=TRUE, var.equal=FALSE, alternative="less")
print("100 us UT 5% == UT 15%")
wilcox.test(gut_100_5, gut_100_15, paired=TRUE, var.equal=FALSE)
print("100 us UT 5% > UT 15%")
wilcox.test(gut_100_5, gut_100_15, paired=TRUE, var.equal=FALSE, alternative="less")
|
library(reshape2)
# Download the compressed dataset to a temporary location.
zipFile <- tempfile()
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", zipFile)
# Decompress the dataset to another temporary location.
expansionDir <- tempfile()
unzip(zipFile, exdir = expansionDir)
tidyFile <- paste(getwd(), "UCI HAR Dataset (Tidy).csv", sep = "/")
setwd(paste(expansionDir, "UCI HAR Dataset", sep = "/"))
# Read the activity & feature mappings.
activity_labels <- read.table("activity_labels.txt", col.names = c("ID", "Label"))
features <- read.table("features.txt", col.names = c("Index", "Name"))
# Read & concatenate the observations from the training & test subsets.
readSet <- function(kind, name, colNames) {
read.table(sprintf("%s/%s_%s.txt", kind, name, kind), col.names = colNames, check.names = F)
}
readMerged <- function(name, colNames) {
rbind(readSet("test", name, colNames), readSet("train", name, colNames))
}
subject <- readMerged("subject", "Subject")
X <- readMerged("X", features$Name)
y <- readMerged("y", "ActivityID")
# Map activity ID to activity label.
y$Activity <- activity_labels[match(y$ActivityID, activity_labels$ID), "Label"]
# Filter down to just the mean & stddev for each measurement.
filteredX <- X[,grep("(-mean\\(\\)|-std\\(\\))", features$Name)]
# Stitch togther the subject column, activity label column, & measurement vector.
merged <- cbind(subject, Activity = y$Activity, filteredX)
# Group the dataset by subject & activity, using mean for aggregation.
melted <- melt(merged, id.vars = c("Subject", "Activity"))
dcasted <- dcast(melted, Subject + Activity ~ variable, mean)
# Write the new dataset and print its location.
write.csv(dcasted, tidyFile, row.names = F)
print(tidyFile)
|
/project
|
no_license
|
ColinLiiii/run_analysis.R
|
R
| false | false | 1,793 |
library(reshape2)
# Download the compressed dataset to a temporary location.
zipFile <- tempfile()
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", zipFile)
# Decompress the dataset to another temporary location.
expansionDir <- tempfile()
unzip(zipFile, exdir = expansionDir)
tidyFile <- paste(getwd(), "UCI HAR Dataset (Tidy).csv", sep = "/")
setwd(paste(expansionDir, "UCI HAR Dataset", sep = "/"))
# Read the activity & feature mappings.
activity_labels <- read.table("activity_labels.txt", col.names = c("ID", "Label"))
features <- read.table("features.txt", col.names = c("Index", "Name"))
# Read & concatenate the observations from the training & test subsets.
readSet <- function(kind, name, colNames) {
read.table(sprintf("%s/%s_%s.txt", kind, name, kind), col.names = colNames, check.names = F)
}
readMerged <- function(name, colNames) {
rbind(readSet("test", name, colNames), readSet("train", name, colNames))
}
subject <- readMerged("subject", "Subject")
X <- readMerged("X", features$Name)
y <- readMerged("y", "ActivityID")
# Map activity ID to activity label.
y$Activity <- activity_labels[match(y$ActivityID, activity_labels$ID), "Label"]
# Filter down to just the mean & stddev for each measurement.
filteredX <- X[,grep("(-mean\\(\\)|-std\\(\\))", features$Name)]
# Stitch togther the subject column, activity label column, & measurement vector.
merged <- cbind(subject, Activity = y$Activity, filteredX)
# Group the dataset by subject & activity, using mean for aggregation.
melted <- melt(merged, id.vars = c("Subject", "Activity"))
dcasted <- dcast(melted, Subject + Activity ~ variable, mean)
# Write the new dataset and print its location.
write.csv(dcasted, tidyFile, row.names = F)
print(tidyFile)
|
|
#' Find variables by name
#'
#' Function to seacrh for a variable by its name. See the "Value" section for more details on the different functions.
#'
#' @param df A data.frame from where the column names should be identified when returned function applied
#' @param pattern A character string with name (or part of name) of the variables to find.
#' @param envir environment holding data.frames where to search for the variables (the Global environment as default).
#' @param ... Arguments passed to \code{grep}
#' @return
#' \itemize{
#' \item \code{findvar_fun}: A function with argument \code{param} to search for \code{param} in \code{df}. See example!
#' \item \code{findvar_in_df}: A vector with variable names from df matching the pattern.
#' \item \code{findvar_anywhere}: Does not return anything but prints a message where variables matching the pattern can be found.
#' }
#' @examples
#' find_cars <- findvar_fun(cars)
#' find_cars("sp")
#'
#' findvar_in_df("sp", cars)
#'
#' cars <- cars; iris <- iris
#' findvar_anywhere("petal")
#' @name findvar
NULL
#' @rdname findvar
#' @export
findvar_fun <- function(df, ...) function(pattern, ...) findvar_in_df(pattern, df, ...)
#' @rdname findvar
#' @export
findvar_in_df <- function(pattern, df, ...) names(df)[grep(pattern, names(df), ignore.case = TRUE, ...)]
#' @rdname findvar
#' @export
findvar_anywhere <- function(pattern, envir = .GlobalEnv, ...) {
ls. <- ls(envir = envir)
msg <- ""
for (df_name in ls.) {
df <- get(df_name)
if (is.data.frame(df)) {
vars <- findvar_in_df(pattern, df, ...)
if (!identical(vars, character(0)))
msg <- paste0(msg, df_name, ": ", paste(vars, collapse = ", "), "\n")
}
}
if (msg != "") message(msg) else message("No variables found!")
}
|
/R/findvar.R
|
no_license
|
cran/rccmisc
|
R
| false | false | 1,793 |
r
|
#' Find variables by name
#'
#' Function to seacrh for a variable by its name. See the "Value" section for more details on the different functions.
#'
#' @param df A data.frame from where the column names should be identified when returned function applied
#' @param pattern A character string with name (or part of name) of the variables to find.
#' @param envir environment holding data.frames where to search for the variables (the Global environment as default).
#' @param ... Arguments passed to \code{grep}
#' @return
#' \itemize{
#' \item \code{findvar_fun}: A function with argument \code{param} to search for \code{param} in \code{df}. See example!
#' \item \code{findvar_in_df}: A vector with variable names from df matching the pattern.
#' \item \code{findvar_anywhere}: Does not return anything but prints a message where variables matching the pattern can be found.
#' }
#' @examples
#' find_cars <- findvar_fun(cars)
#' find_cars("sp")
#'
#' findvar_in_df("sp", cars)
#'
#' cars <- cars; iris <- iris
#' findvar_anywhere("petal")
#' @name findvar
NULL
#' @rdname findvar
#' @export
findvar_fun <- function(df, ...) function(pattern, ...) findvar_in_df(pattern, df, ...)
#' @rdname findvar
#' @export
findvar_in_df <- function(pattern, df, ...) names(df)[grep(pattern, names(df), ignore.case = TRUE, ...)]
#' @rdname findvar
#' @export
findvar_anywhere <- function(pattern, envir = .GlobalEnv, ...) {
ls. <- ls(envir = envir)
msg <- ""
for (df_name in ls.) {
df <- get(df_name)
if (is.data.frame(df)) {
vars <- findvar_in_df(pattern, df, ...)
if (!identical(vars, character(0)))
msg <- paste0(msg, df_name, ": ", paste(vars, collapse = ", "), "\n")
}
}
if (msg != "") message(msg) else message("No variables found!")
}
|
#R program for Gibbs sampling from full conditionals in OLS example
#number of iterations
m=5000
#read only observations with complete information, n=2313
x=as.matrix(read.table("c:\\ols_examp.dat")[1:2313,2:10]
y=as.matrix(read.table("c:\\ols_examp.dat")[1:2313,11]
#establish parameter vectors and constant quantities
s2=matrix(1,m); b=matrix(0,m,9)
xtxi=solve(t(x)%*%x)
pars=coefficients(lm(y ~ x-1))
#Gibbs sampling begins
for(i in 2:m){
#simulate beta from its multivariate normal conditional
b[i,]=pars+t(rnorm(9,mean=0,sd=1))%*%chol(s2[i-1]*xtxi)
#simulate sigma from its inverse gamma distribution
s2[i]=1/rgamma(1,2313/2,.5*t(y-x%*%(b[i,]))%*%(y-x%*%(b[i,])))
#write output to file and screen
write(c(b[i,],s2[i]),file="c:\\ols_examp.out", append=T, ncol=10)
if(i%%50==0){print(c(i,b[i,1],s2[i]))}
}
|
/BOOK/bookprogs/p171.R
|
no_license
|
sursu/BayesEcon
|
R
| false | false | 816 |
r
|
#R program for Gibbs sampling from full conditionals in OLS example
#number of iterations
m=5000
#read only observations with complete information, n=2313
x=as.matrix(read.table("c:\\ols_examp.dat")[1:2313,2:10]
y=as.matrix(read.table("c:\\ols_examp.dat")[1:2313,11]
#establish parameter vectors and constant quantities
s2=matrix(1,m); b=matrix(0,m,9)
xtxi=solve(t(x)%*%x)
pars=coefficients(lm(y ~ x-1))
#Gibbs sampling begins
for(i in 2:m){
#simulate beta from its multivariate normal conditional
b[i,]=pars+t(rnorm(9,mean=0,sd=1))%*%chol(s2[i-1]*xtxi)
#simulate sigma from its inverse gamma distribution
s2[i]=1/rgamma(1,2313/2,.5*t(y-x%*%(b[i,]))%*%(y-x%*%(b[i,])))
#write output to file and screen
write(c(b[i,],s2[i]),file="c:\\ols_examp.out", append=T, ncol=10)
if(i%%50==0){print(c(i,b[i,1],s2[i]))}
}
|
### cancer tools ####
# Import patients months
timesurvival<-read.delim("/Volumes/grcmc/TERESA LJ/Experiments/Resistencias PDT005/2. RNAseq/Cancertool/TCGA_p53core/time_survival.txt")
normcounts_TCGA<-read.delim("/Volumes/grcmc/TERESA LJ/Experiments/Resistencias PDT005/2. RNAseq/Cancertool/TCGA_p53core/normcounts_TCGA.txt")
row.names(normcounts_TCGA)<-normcounts_TCGA$Sample
normcounts_TCGA$Sample=NULL
# matrix
normcounts_TCGA<-as.matrix(normcounts_TCGA)
heatmap.2(normcounts_TCGA,col=rev(morecols(50)),trace="none", main="TCGA p53 genes",scale="row")
library(heatmaply)
heatmaply(normcounts_TCGA,
plot_method = "plotly",
scale_fill_gradient_fun = ggplot2::scale_fill_gradient2(low = "blue",
high = "red"),
k_row = 2,
k_col = 2)
|
/cancertool.R
|
no_license
|
yguillen/bigaslab
|
R
| false | false | 790 |
r
|
### cancer tools ####
# Import patients months
timesurvival<-read.delim("/Volumes/grcmc/TERESA LJ/Experiments/Resistencias PDT005/2. RNAseq/Cancertool/TCGA_p53core/time_survival.txt")
normcounts_TCGA<-read.delim("/Volumes/grcmc/TERESA LJ/Experiments/Resistencias PDT005/2. RNAseq/Cancertool/TCGA_p53core/normcounts_TCGA.txt")
row.names(normcounts_TCGA)<-normcounts_TCGA$Sample
normcounts_TCGA$Sample=NULL
# matrix
normcounts_TCGA<-as.matrix(normcounts_TCGA)
heatmap.2(normcounts_TCGA,col=rev(morecols(50)),trace="none", main="TCGA p53 genes",scale="row")
library(heatmaply)
heatmaply(normcounts_TCGA,
plot_method = "plotly",
scale_fill_gradient_fun = ggplot2::scale_fill_gradient2(low = "blue",
high = "red"),
k_row = 2,
k_col = 2)
|
library(dplyr)
years = c(1918:2018)
data <- data.frame(matrix(nrow=101, ncol=2))
names(data) <- c("year", "count")
i = 1
for(y in years){
fileName = paste0("names/yob", y, ".txt")
df <- read.table(fileName, header = FALSE, sep=",")
data[i,] <- c(y ,df[df$V1 == "Karen",]$V3)
i = i + 1
}
birth_rate_US = read.csv("NCHS_-_Births_and_General_Fertility_Rates__United_States.csv")
birth_rate_US = birth_rate_US %>% filter(Year >= 1918)
data$normalized_count = (data$count/birth_rate_US$Birth.Number)*100
write.csv(data, "karen1918-2018.csv")
|
/data.R
|
no_license
|
kyaralucas/karen
|
R
| false | false | 549 |
r
|
library(dplyr)
years = c(1918:2018)
data <- data.frame(matrix(nrow=101, ncol=2))
names(data) <- c("year", "count")
i = 1
for(y in years){
fileName = paste0("names/yob", y, ".txt")
df <- read.table(fileName, header = FALSE, sep=",")
data[i,] <- c(y ,df[df$V1 == "Karen",]$V3)
i = i + 1
}
birth_rate_US = read.csv("NCHS_-_Births_and_General_Fertility_Rates__United_States.csv")
birth_rate_US = birth_rate_US %>% filter(Year >= 1918)
data$normalized_count = (data$count/birth_rate_US$Birth.Number)*100
write.csv(data, "karen1918-2018.csv")
|
library(timevis)
### Name: centerTime
### Title: Move the window such that the given time is centered
### Aliases: centerTime
### ** Examples
timevis() %>%
centerTime(Sys.Date() - 1)
if (interactive()) {
library(shiny)
shinyApp(
ui = fluidPage(
timevisOutput("timeline"),
actionButton("btn", "Center around 24 hours ago")
),
server = function(input, output) {
output$timeline <- renderTimevis(
timevis()
)
observeEvent(input$btn, {
centerTime("timeline", Sys.Date() - 1)
})
}
)
}
|
/data/genthat_extracted_code/timevis/examples/centerTime.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 535 |
r
|
library(timevis)
### Name: centerTime
### Title: Move the window such that the given time is centered
### Aliases: centerTime
### ** Examples
timevis() %>%
centerTime(Sys.Date() - 1)
if (interactive()) {
library(shiny)
shinyApp(
ui = fluidPage(
timevisOutput("timeline"),
actionButton("btn", "Center around 24 hours ago")
),
server = function(input, output) {
output$timeline <- renderTimevis(
timevis()
)
observeEvent(input$btn, {
centerTime("timeline", Sys.Date() - 1)
})
}
)
}
|
###########################################################################################
# Load packages and utility functions
###########################################################################################
rm(list = ls())
setwd("/Users/RossTyzackPitman/Documents/OneDrive/Data/GitHub/Databases/LEOPARD-MP/code")
library(leopard)
library(ggplot2)
library(reshape2)
source('utils/saver.r')
source('utils/loader.r')
source('utils/reader.r')
source('utils/pdfr.r')
source('params.R')
#source('params.kzn.R')
source('aging_error.r')
###########################################################################################
# Setup model objects
###########################################################################################
# dimensions
## number of monte carlo samples
nreps <- 1000
## number of projection years
nyr.proj <- 50
## matrix of paramter values
params <- matrix(param,nrow=length(param),ncol=nreps)
rownames(params) <- names(param)
colnames(params) <- 1:nreps
# initial population size (Sabi Sands average population structure estimate from 2013-2015; using 70 leopard)
x.initial <- c(nc = 14,
nj = 7,
saf = 3,
f36 = 2,
f48 = 2,
f60 = 2,
f72 = 3,
f84 = 17,
sam = 1,
m36 = 2,
m48 = 2,
m60 = 3,
m72 = 3,
m84 = 9)
# initial population size (Phinda average population structure estimate from 2002-2012; using 30 leopard)
#x.initial <- c(nc = 7,
# nj = 5,
# saf = 2,
# f36 = 1,
# f48 = 1,
# f60 = 1,
# f72 = 1,
# f84 = 3,
# sam = 3,
# m36 = 1,
# m48 = 1,
# m60 = 1,
# m72 = 1,
# m84 = 2)
# population projection array
x <- array(x.initial,dim=c(length(x.initial),nreps,nyr.proj))
dimnames(x) <- list(age.class = names(x.initial),rep = 1:nreps, year = 1:nyr.proj)
x[,,2:nyr.proj] <- NA
# harvest rate
#harvest.rate <- seq(0, 1, length = 101)
harvest.rate <- 0
# setup selectivity object
selectivity <- matrix(data = c(0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, # all males
#0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, # males ≥ 3
#0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, # males ≥ 6
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, # males ≥ 7
#0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, # males & females ≥ 6
#0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, # males & females ≥ 7
#0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # excl dependent young
#0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, # only adults
0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1), # all males with females ≥ 7
nrow = 14)
selectivity <- t(selectivity) ; rownames(selectivity) = c("all.males",
#"males≥3",
#"males≥6",
"males≥7",
#"male.female≥6",
#"male.female≥7",
#"excl.dep.young",
#"only.adults",
"all.males.w.females≥7")
# create population size object
population.size <- array(0, dim = c(length(harvest.rate), nreps, 1))
# create extinction prob object
extinction.probability <- as.data.frame(NA)
total.removed <- data.frame()
#pop.size <- data.frame()
# query number killed
total.harvested <- array(0, dim = c(length(harvest.rate), nreps, 1))
total.harvested.df <- as.data.frame(NA)
total.population.size <- data.frame()
###########################################################################################
# Functions
###########################################################################################
prob.ext.func <- function(x){
prob.extinction <- 1 - mean(x > 0)
return(prob.extinction)
}
###########################################################################################
# Run model
###########################################################################################
# loop over range of selectivities
for(z in 1:nrow(selectivity)){
# projection
for (i in 1:nreps) {
# sample parameters
param.sample <- params[,i]
# create new object to hold leopard numbers
# and vital rates
xx <- leopard(x.initial, param.sample[1:14], param.sample[15:19])
# assign multiplicative maternal effects
xx@maternal.effect[] <- matrix(maternal.effects, nrow = 2, ncol = 5, byrow = T)
# loop forward over years
for (y in 1:nyr.proj) {
# correlated deviation in survival:
# log-normal with cv = 0.2
# truncated at 0 and 1
sdev <- rnorm(1)
sigma <- sqrt(log(1+0.20^2))
param.sample[1:14] <- exp(log(param.sample[1:14]) + sigma * sdev - sigma^2/2)
param.sample[1:14] <- vapply(vapply(param.sample[1:14],
function(x) max(x,0),numeric(1)),function(x) min(x,1),numeric(1))
# create list of sequential hunting scenarios
removals <- list(trophy = list(rate = harvest.rate, preference = selectivity[z,]),
problem_animal = list(rate = 0.0))
removals <- harvest(xx, removals)
# include trophy hunting aging error
#source('incorp.aging.error.final.r')
total.removals <- removals$trophy@kills + removals$problem_animal@kills
# add recovery years
#source('two.years.recovery.r')
#source('three.years.recovery.r')
# calculate stochastic survival
xx <- survival(xx, total.removals)
# calculate stochastic birth
xx <- birth(xx)
# -----
population.size[,i,] <- sum(xx@.Data)
#total.harvested[,i,] <- sum(total.removals)
# -----
# step forward
xx <- transition(xx)
# record numbers
x[,i,y] <- xx
}
total.population.size <- rbind(total.population.size, mean(population.size))
#extinction.probability <- cbind(extinction.probability, apply(population.size, c(1, 3), prob.ext.func))
#total.harvested.df <- cbind(total.harvested.df, apply(total.harvested, c(1, 3), mean))
}
}
x.tot <- apply(x, 2:3, mean)
boxplot(x.tot,
ylab="Extinction Probability",
xaxt="n",
xlab="Year",
outline=FALSE)
axis(side=1, at=1:nyr.proj)
# save output
#saver(extinction.probability,
# total.harvested.df,
# cub.surv.df,
# prob.inf.df,
# name = 'model_run_2')
#
#loader('model_run_1')
#loader('model_run_2')
extinction.probability[1] <- NULL
all.males <- extinction.probability[1,1:100]
all.males$group <- rownames(selectivity)[1]
males7 <- extinction.probability[1,101:200]
males7$group <- rownames(selectivity)[2]
all.males.w.females7 <- extinction.probability[1,201:300]
all.males.w.females7$group <- rownames(selectivity)[3]
all.data <- rbind(all.males,
males7,
all.males.w.females7)
colnames(all.data)[1:100] <- seq(1,100,1)
all.data.melt <- melt(all.data, id = c("group"))
colnames(all.data.melt)[1:3] <- c("Scenario", "Year", "ExtinctionProb")
###########################################################################################
# Plots
###########################################################################################
# plot extinction probability for multiple ages of harvest
cbPalette <- c("#000000", "#999999", "#E69F00") #, "#56B4E9", "#009E73") #, "#F0E442", "#0072B2", "#39e600", "#CC79A7", "#D55E00")
ext.prob.plot <- ggplot(all.data.melt) +
geom_point(aes(Year, ExtinctionProb, group = "Scenario")) +
#scale_color_manual(values = cbPalette) +
#facet_wrap("Scenario") +
theme_bw() +
theme(strip.background = element_rect(fill="white")) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
ggtitle('Extinction probability') +
labs(y = '', col = 'Year')
ext.prob.plot
# -----Extinction probability (101)--------
# clean up output and prepare for plotting
extinction.probability[1] <- NULL
all.males <- extinction.probability[1:101,1:5] ; all.males$group <- rownames(selectivity)[1]
#males3 <- extinction.probability[1:101,6:10] ; males3$group <- rownames(selectivity)[2]
#males6 <- extinction.probability[1:101,11:15] ; males6$group <- rownames(selectivity)[3]
#males7 <- extinction.probability[1:101,16:20] ; males7$group <- rownames(selectivity)[4]
#male.female6 <- extinction.probability[1:101,21:25] ; male.female6$group <- rownames(selectivity)[5]
#male.female7 <- extinction.probability[1:101,26:30] ; male.female7$group <- rownames(selectivity)[6]
#excl.dep.young <- extinction.probability[1:101,31:35] ; excl.dep.young$group <- rownames(selectivity)[7]
#only.adults <- extinction.probability[1:101,36:40] ; only.adults$group <- rownames(selectivity)[8]
#all.males.w.females7 <- extinction.probability[1:101,41:45] ; all.males.w.females7$group <- rownames(selectivity)[9]
all.males.w.females7 <- extinction.probability[1:101,6:10] ; all.males.w.females7$group <- rownames(selectivity)[2]
all.data <- rbind(all.males,
#males3,
#males6,
#males7,
#male.female6,
#male.female7,
#excl.dep.young,
#only.adults,
all.males.w.females7)
colnames(all.data)[1:5] <- c(10, 20, 30, 40, 50)
all.data$H <- c(harvest.rate,
#harvest.rate,
#harvest.rate,
#harvest.rate,
#harvest.rate,
#harvest.rate,
#harvest.rate,
#harvest.rate,
harvest.rate)
# reshape and change factor levels
all.data.melt <- melt(all.data, id = c("group", "H"))
all.data.melt$group <- factor(all.data.melt$group, levels = c(#"excl.dep.young",
#"only.adults",
"all.males",
#"males≥3",
#"males≥6",
#"male.female≥6",
#"males≥7",
#"male.female≥7",
"all.males.w.females≥7"))
# plot extinction probability for multiple ages of harvest
cbPalette <- c("#000000", "#999999", "#E69F00", "#56B4E9", "#009E73") #, "#F0E442", "#0072B2", "#39e600", "#CC79A7", "#D55E00")
ext.prob.plot <- ggplot(all.data.melt) +
geom_line(aes(H, value, color = variable)) +
scale_color_manual(values = cbPalette) +
facet_wrap("group") +
theme_bw() +
theme(strip.background = element_rect(fill="white")) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
ggtitle('Extinction probability') +
labs(y = '', col = 'Year')
ext.prob.plot
pdfr(ext.prob.plot, width = 10, name = 'extinction probability')
###########################################################################################
# End
###########################################################################################
|
/code/exploratory_code/sim.6.R
|
no_license
|
cttedwards/LEOPARD-MP
|
R
| false | false | 12,341 |
r
|
###########################################################################################
# Load packages and utility functions
###########################################################################################
rm(list = ls())
setwd("/Users/RossTyzackPitman/Documents/OneDrive/Data/GitHub/Databases/LEOPARD-MP/code")
library(leopard)
library(ggplot2)
library(reshape2)
source('utils/saver.r')
source('utils/loader.r')
source('utils/reader.r')
source('utils/pdfr.r')
source('params.R')
#source('params.kzn.R')
source('aging_error.r')
###########################################################################################
# Setup model objects
###########################################################################################
# dimensions
## number of monte carlo samples
nreps <- 1000
## number of projection years
nyr.proj <- 50
## matrix of paramter values
params <- matrix(param,nrow=length(param),ncol=nreps)
rownames(params) <- names(param)
colnames(params) <- 1:nreps
# initial population size (Sabi Sands average population structure estimate from 2013-2015; using 70 leopard)
x.initial <- c(nc = 14,
nj = 7,
saf = 3,
f36 = 2,
f48 = 2,
f60 = 2,
f72 = 3,
f84 = 17,
sam = 1,
m36 = 2,
m48 = 2,
m60 = 3,
m72 = 3,
m84 = 9)
# initial population size (Phinda average population structure estimate from 2002-2012; using 30 leopard)
#x.initial <- c(nc = 7,
# nj = 5,
# saf = 2,
# f36 = 1,
# f48 = 1,
# f60 = 1,
# f72 = 1,
# f84 = 3,
# sam = 3,
# m36 = 1,
# m48 = 1,
# m60 = 1,
# m72 = 1,
# m84 = 2)
# population projection array
x <- array(x.initial,dim=c(length(x.initial),nreps,nyr.proj))
dimnames(x) <- list(age.class = names(x.initial),rep = 1:nreps, year = 1:nyr.proj)
x[,,2:nyr.proj] <- NA
# harvest rate
#harvest.rate <- seq(0, 1, length = 101)
harvest.rate <- 0
# setup selectivity object
selectivity <- matrix(data = c(0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, # all males
#0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, # males ≥ 3
#0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, # males ≥ 6
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, # males ≥ 7
#0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, # males & females ≥ 6
#0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, # males & females ≥ 7
#0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # excl dependent young
#0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, # only adults
0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1), # all males with females ≥ 7
nrow = 14)
selectivity <- t(selectivity) ; rownames(selectivity) = c("all.males",
#"males≥3",
#"males≥6",
"males≥7",
#"male.female≥6",
#"male.female≥7",
#"excl.dep.young",
#"only.adults",
"all.males.w.females≥7")
# create population size object
population.size <- array(0, dim = c(length(harvest.rate), nreps, 1))
# create extinction prob object
extinction.probability <- as.data.frame(NA)
total.removed <- data.frame()
#pop.size <- data.frame()
# query number killed
total.harvested <- array(0, dim = c(length(harvest.rate), nreps, 1))
total.harvested.df <- as.data.frame(NA)
total.population.size <- data.frame()
###########################################################################################
# Functions
###########################################################################################
prob.ext.func <- function(x){
prob.extinction <- 1 - mean(x > 0)
return(prob.extinction)
}
###########################################################################################
# Run model
###########################################################################################
# loop over range of selectivities
for(z in 1:nrow(selectivity)){
# projection
for (i in 1:nreps) {
# sample parameters
param.sample <- params[,i]
# create new object to hold leopard numbers
# and vital rates
xx <- leopard(x.initial, param.sample[1:14], param.sample[15:19])
# assign multiplicative maternal effects
xx@maternal.effect[] <- matrix(maternal.effects, nrow = 2, ncol = 5, byrow = T)
# loop forward over years
for (y in 1:nyr.proj) {
# correlated deviation in survival:
# log-normal with cv = 0.2
# truncated at 0 and 1
sdev <- rnorm(1)
sigma <- sqrt(log(1+0.20^2))
param.sample[1:14] <- exp(log(param.sample[1:14]) + sigma * sdev - sigma^2/2)
param.sample[1:14] <- vapply(vapply(param.sample[1:14],
function(x) max(x,0),numeric(1)),function(x) min(x,1),numeric(1))
# create list of sequential hunting scenarios
removals <- list(trophy = list(rate = harvest.rate, preference = selectivity[z,]),
problem_animal = list(rate = 0.0))
removals <- harvest(xx, removals)
# include trophy hunting aging error
#source('incorp.aging.error.final.r')
total.removals <- removals$trophy@kills + removals$problem_animal@kills
# add recovery years
#source('two.years.recovery.r')
#source('three.years.recovery.r')
# calculate stochastic survival
xx <- survival(xx, total.removals)
# calculate stochastic birth
xx <- birth(xx)
# -----
population.size[,i,] <- sum(xx@.Data)
#total.harvested[,i,] <- sum(total.removals)
# -----
# step forward
xx <- transition(xx)
# record numbers
x[,i,y] <- xx
}
total.population.size <- rbind(total.population.size, mean(population.size))
#extinction.probability <- cbind(extinction.probability, apply(population.size, c(1, 3), prob.ext.func))
#total.harvested.df <- cbind(total.harvested.df, apply(total.harvested, c(1, 3), mean))
}
}
x.tot <- apply(x, 2:3, mean)
boxplot(x.tot,
ylab="Extinction Probability",
xaxt="n",
xlab="Year",
outline=FALSE)
axis(side=1, at=1:nyr.proj)
# save output
#saver(extinction.probability,
# total.harvested.df,
# cub.surv.df,
# prob.inf.df,
# name = 'model_run_2')
#
#loader('model_run_1')
#loader('model_run_2')
extinction.probability[1] <- NULL
all.males <- extinction.probability[1,1:100]
all.males$group <- rownames(selectivity)[1]
males7 <- extinction.probability[1,101:200]
males7$group <- rownames(selectivity)[2]
all.males.w.females7 <- extinction.probability[1,201:300]
all.males.w.females7$group <- rownames(selectivity)[3]
all.data <- rbind(all.males,
males7,
all.males.w.females7)
colnames(all.data)[1:100] <- seq(1,100,1)
all.data.melt <- melt(all.data, id = c("group"))
colnames(all.data.melt)[1:3] <- c("Scenario", "Year", "ExtinctionProb")
###########################################################################################
# Plots
###########################################################################################
# plot extinction probability for multiple ages of harvest
cbPalette <- c("#000000", "#999999", "#E69F00") #, "#56B4E9", "#009E73") #, "#F0E442", "#0072B2", "#39e600", "#CC79A7", "#D55E00")
ext.prob.plot <- ggplot(all.data.melt) +
geom_point(aes(Year, ExtinctionProb, group = "Scenario")) +
#scale_color_manual(values = cbPalette) +
#facet_wrap("Scenario") +
theme_bw() +
theme(strip.background = element_rect(fill="white")) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
ggtitle('Extinction probability') +
labs(y = '', col = 'Year')
ext.prob.plot
# -----Extinction probability (101)--------
# clean up output and prepare for plotting
extinction.probability[1] <- NULL
all.males <- extinction.probability[1:101,1:5] ; all.males$group <- rownames(selectivity)[1]
#males3 <- extinction.probability[1:101,6:10] ; males3$group <- rownames(selectivity)[2]
#males6 <- extinction.probability[1:101,11:15] ; males6$group <- rownames(selectivity)[3]
#males7 <- extinction.probability[1:101,16:20] ; males7$group <- rownames(selectivity)[4]
#male.female6 <- extinction.probability[1:101,21:25] ; male.female6$group <- rownames(selectivity)[5]
#male.female7 <- extinction.probability[1:101,26:30] ; male.female7$group <- rownames(selectivity)[6]
#excl.dep.young <- extinction.probability[1:101,31:35] ; excl.dep.young$group <- rownames(selectivity)[7]
#only.adults <- extinction.probability[1:101,36:40] ; only.adults$group <- rownames(selectivity)[8]
#all.males.w.females7 <- extinction.probability[1:101,41:45] ; all.males.w.females7$group <- rownames(selectivity)[9]
all.males.w.females7 <- extinction.probability[1:101,6:10] ; all.males.w.females7$group <- rownames(selectivity)[2]
all.data <- rbind(all.males,
#males3,
#males6,
#males7,
#male.female6,
#male.female7,
#excl.dep.young,
#only.adults,
all.males.w.females7)
colnames(all.data)[1:5] <- c(10, 20, 30, 40, 50)
all.data$H <- c(harvest.rate,
#harvest.rate,
#harvest.rate,
#harvest.rate,
#harvest.rate,
#harvest.rate,
#harvest.rate,
#harvest.rate,
harvest.rate)
# reshape and change factor levels
all.data.melt <- melt(all.data, id = c("group", "H"))
all.data.melt$group <- factor(all.data.melt$group, levels = c(#"excl.dep.young",
#"only.adults",
"all.males",
#"males≥3",
#"males≥6",
#"male.female≥6",
#"males≥7",
#"male.female≥7",
"all.males.w.females≥7"))
# plot extinction probability for multiple ages of harvest
cbPalette <- c("#000000", "#999999", "#E69F00", "#56B4E9", "#009E73") #, "#F0E442", "#0072B2", "#39e600", "#CC79A7", "#D55E00")
ext.prob.plot <- ggplot(all.data.melt) +
geom_line(aes(H, value, color = variable)) +
scale_color_manual(values = cbPalette) +
facet_wrap("group") +
theme_bw() +
theme(strip.background = element_rect(fill="white")) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
ggtitle('Extinction probability') +
labs(y = '', col = 'Year')
ext.prob.plot
pdfr(ext.prob.plot, width = 10, name = 'extinction probability')
###########################################################################################
# End
###########################################################################################
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
/cachematrix.R
|
no_license
|
utsawk/ProgrammingAssignment2
|
R
| false | false | 790 |
r
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
library(EMD)
library(quantstrat)
library(dtw)
library(plyr)
######时间窗口截取函数#####
##输入
##x:截取开始位置
##y:被截取序列
##k:截取长度
linestep<-function(x,y,k)
{
#窗口开始时间+窗口结束时间+窗口中数据内容
c(as.numeric(.indexDate(y[x])),as.numeric(.indexDate(y[x+k-1])),as.numeric(y[x:(x+k-1)]))
}
######时间窗矩阵函数#####
#将时间序列按制定长度转换成矩阵,以方便后续最小二乘转换和距离计算
##输入
##list_data:待转换序列
##k:截取长度
list2matrix_stepbystep<-function(list_data,k)
{
list_len <-length(list_data)
if(list_len < k)
{
matrix_tmp<-matrix(c(as.numeric(.indexDate(list_data[1])),as.numeric(list_data)),nrow=1,ncol=list_len+1)
}
else
{
#matrix_tmp<-matrix(nrow=list_len-k+1,ncol=k+1)
#for(i in 1:(list_len-k+1))
#{
# matrix_tmp[i,]<-c(as.numeric(.indexDate(list_data[i])),as.numeric(list_data[seq(i,(i+k-1))]))
#}
matrix_tmp<-sapply(1:(list_len-k+1),linestep,y=list_data,k=k)
}
matrix_tmp
}
######最小二乘转换并计算dtw距离#####
##输入
##x:被拟合向量,被拟合向量长度为样本向量长度+预测长度,目前预测长度固定为3天
##y:样本向量
##i:预测长度
lsfit_x<-function(x,y)
{
i=length(x[c(-1,-2)])-length(y)
if(all(x[c(-1,-2,((length(x)-(i-1)):length(x))*-1)]==y))
{
print("dfdfdfdfdfdf")
}
#确定最小二乘线性拟合参数,其中x去除开始时间、结束时间以及预测长度
tmpmodel<-lsfit(x=as.numeric(x[c(-1,-2,((length(x)-(i-1)):length(x))*-1)]),y=as.numeric(y))
#计算本拟合向量与样本向量的距离
dis<-dtw(x[c(-1,-2,((length(x)-(i-1)):length(x))*-1)],y)$distance
if(dis>0)
{
sim_rato<- 1/dis
}
else
{
sim_rato<- 0
}
return(c(x[1:2],sim_rato,x[c(-1,-2)]*tmpmodel$coefficients["X"]+tmpmodel$coefficients["Intercept"]))
}
######相似体拟合模型#####
##输入
##x:预测时间序列,向量
##y:样本时间序列,矩阵
##j:预测长度
##i:相似度排名
ac_model<-function(x,y,i)
{
print(as.Date(x[2],format="%Y-%m-%d",origin = "1970-01-01"))
dfddfd<-aaply(y,1,"lsfit_x",as.numeric(x)[3:length(x)],.parallel = F)
# dfddfd<-t(apply(y,1,"lsfit_x",as.numeric(x)[3:length(x)]))
t<-ncol(y)-length(x)
# write.table(dfddfd,'~/Downloads/log.txt',sep=",")
dfddfd<-dfddfd[dfddfd[,3]!=0,]
c("start_date"=x[1],"end_date"=x[2],apply((head(dfddfd[order(-dfddfd[,3]),],i)[,3])/sum(head(dfddfd[order(-dfddfd[,3]),],i)[,3])*(head(dfddfd[order(-dfddfd[,3]),],i)[,(ncol(dfddfd)-t+1):ncol(dfddfd)]),2,"sum"))
print( c("start_date"=x[1],"end_date"=x[2],apply((head(dfddfd[order(-dfddfd[,3]),],i)[,3])/sum(head(dfddfd[order(-dfddfd[,3]),],i)[,3])*(head(dfddfd[order(-dfddfd[,3]),],i)[,(ncol(dfddfd)-t+1):ncol(dfddfd)]),2,"sum"))
)
}
######相似体拟合模型#####
##输入
##x:预测时间序列
##y:样本时间序列
##i:预测时间滑动窗口
##j:预测长度
ac_indicator<-function(x,y,i,j)
{
#将预测数据序列按预测滑动窗口长度i转换为矩阵
pred_data<-t(list2matrix_stepbystep(x,i))
#将样本数据按i+j长度转换为矩阵
sample_data<-t(list2matrix_stepbystep(y,(i+j)))
#result<-t(apply(pred_data,1,"ac_model",y=sample_data,i=5))
result<-aaply(pred_data,1,"ac_model",y=sample_data,i=5,.parallel = F)
#result_ts<-xts(result[,c(-1,-2)],order.by=as.Date(result[,2],format="%Y-%m-%d",origin = "1970-01-01"))
#colnames(result_ts)<-sapply(c(1:ncol(result_ts)),function(x){paste("pred_result",x,sep="")})
result
#result_ts
}
#EMD分解加入
EMD_test_by_step<-function(emd_data,s,k,j)
{
emd_result<-emd(as.numeric(emd_data))
date_seq<-as.numeric(.indexDate(emd_data))
prd_result <- NULL
for(i in 1:emd_result$nimf)
{
emd_tmp<- cbind(date_seq,emd_result$imf[,i])
emd_tmp<-xts(emd_tmp[,-1],order.by=as.Date(emd_tmp[,1],format="%Y-%m-%d",origin = "1970-01-01"))
if(is.null(prd_result))
{
prd_result <- ac_indicator(emd_tmp[s],emd_tmp,k,j)
}
else
{
prd_result <- cbind(prd_result,ac_indicator(emd_tmp[s],emd_tmp,k,j))
}
}
emd_tmp<- cbind(date_seq,emd_result$residue)
emd_tmp<-xts(emd_tmp[,-1],order.by=as.Date(emd_tmp[,1],format="%Y-%m-%d",origin = "1970-01-01"))
if(is.null(prd_result))
{
prd_result <- ac_indicator(emd_tmp[s],emd_tmp,k,j)
}
else
{
prd_result <- cbind(prd_result, ac_indicator(emd_tmp[s],emd_tmp,k,j))
}
prd_result
}
SZZS<-read.table('~/Downloads/stock_data/SH999999.txt',header=F,sep=",",skip = 2,fill=T)
SZZS<-SZZS[-nrow(SZZS),]
SZZS<-xts(SZZS[,-1],order.by=as.Date(SZZS[,1],format="%Y-%m-%d",origin = "1970-01-01"))
colnames(SZZS)<-c("open","high","low","close","volume","total")
SZZS<-SMA(Cl(SZZS), 5)
SZZS<-SZZS[!is.na(SZZS)]
pre_date<-"2014-10-31/"
stockdata_dir<-"~/Downloads/stock_data/"
result_data<-NULL
get_pred_data<-function(stock_id,stockdata_dir,pre_date,SZZS)
{
print(stock_id)
test111<-read.table(paste(stockdata_dir,stock_id,sep=""),header=F,sep=",",skip = 2,fill=T)
if(nrow(test111)>20)
{
test111<-test111[-nrow(test111),]
test111<-xts(test111[,-1],order.by=as.Date(test111[,1],format="%Y-%m-%d",origin = "1970-01-01"))
colnames(test111)<-c("open","high","low","close","volume","total")
testdata<-SMA(Cl(test111), 5)
testdata<-testdata[!is.na(testdata)]
if( all(testdata[pre_date] != 0) & ( length(testdata[pre_date])>=10 ) )
{
result_data_tmp<-ac_indicator(testdata[pre_date],SZZS,10,3)
result_data<-c(unlist(strsplit(stock_id,".", fixed = TRUE))[1],result_data_tmp)
write.table(t(result_data),"~/sma5_pred_res.txt",sep=",",append=T,row.names=F,col.names=F)
t(result_data)
#if(is.null(result_data))
#{
# write.table(t(result_data),"~/sma5_pred_res.txt",sep=",",col.names=F,row.names=F)
#}
#else
#{
# result_data<-c(unlist(strsplit(stock_id,".", fixed = TRUE))[1],result_data_tmp)
# write.table(as.matrix(result_data,row=1),"~/sma5_pred_res.txt",sep=",",append=T)
#}
}
}
}
library("parallel")
library("foreach")
library("doParallel")
cl <- makeCluster(mc <- getOption("cl.cores", 4))
registerDoParallel(cl)
clusterEvalQ(cl,library(xts))
clusterEvalQ(cl,library(quantstrat))
clusterEvalQ(cl,library(dtw))
clusterEvalQ(cl,library(plyr))
clusterExport(cl,"xts")
clusterExport(cl,"ac_indicator")
clusterExport(cl,"linestep")
clusterExport(cl,"list2matrix_stepbystep")
clusterExport(cl,"lsfit_x")
clusterExport(cl,"ac_model")
clusterExport(cl,"get_pred_data")
clusterExport(cl,"SZZS")
clusterExport(cl,"pre_date")
clusterExport(cl,"stockdata_dir")
system.time({testdata112121<-laply(list.files(stockdata_dir),"get_pred_data",stockdata_dir,pre_date,SZZS,.parallel = F)
})
|
/sma5_pred.R
|
no_license
|
ssh352/sanshizhinian
|
R
| false | false | 6,805 |
r
|
library(EMD)
library(quantstrat)
library(dtw)
library(plyr)
######时间窗口截取函数#####
##输入
##x:截取开始位置
##y:被截取序列
##k:截取长度
linestep<-function(x,y,k)
{
#窗口开始时间+窗口结束时间+窗口中数据内容
c(as.numeric(.indexDate(y[x])),as.numeric(.indexDate(y[x+k-1])),as.numeric(y[x:(x+k-1)]))
}
######时间窗矩阵函数#####
#将时间序列按制定长度转换成矩阵,以方便后续最小二乘转换和距离计算
##输入
##list_data:待转换序列
##k:截取长度
list2matrix_stepbystep<-function(list_data,k)
{
list_len <-length(list_data)
if(list_len < k)
{
matrix_tmp<-matrix(c(as.numeric(.indexDate(list_data[1])),as.numeric(list_data)),nrow=1,ncol=list_len+1)
}
else
{
#matrix_tmp<-matrix(nrow=list_len-k+1,ncol=k+1)
#for(i in 1:(list_len-k+1))
#{
# matrix_tmp[i,]<-c(as.numeric(.indexDate(list_data[i])),as.numeric(list_data[seq(i,(i+k-1))]))
#}
matrix_tmp<-sapply(1:(list_len-k+1),linestep,y=list_data,k=k)
}
matrix_tmp
}
######最小二乘转换并计算dtw距离#####
##输入
##x:被拟合向量,被拟合向量长度为样本向量长度+预测长度,目前预测长度固定为3天
##y:样本向量
##i:预测长度
lsfit_x<-function(x,y)
{
i=length(x[c(-1,-2)])-length(y)
if(all(x[c(-1,-2,((length(x)-(i-1)):length(x))*-1)]==y))
{
print("dfdfdfdfdfdf")
}
#确定最小二乘线性拟合参数,其中x去除开始时间、结束时间以及预测长度
tmpmodel<-lsfit(x=as.numeric(x[c(-1,-2,((length(x)-(i-1)):length(x))*-1)]),y=as.numeric(y))
#计算本拟合向量与样本向量的距离
dis<-dtw(x[c(-1,-2,((length(x)-(i-1)):length(x))*-1)],y)$distance
if(dis>0)
{
sim_rato<- 1/dis
}
else
{
sim_rato<- 0
}
return(c(x[1:2],sim_rato,x[c(-1,-2)]*tmpmodel$coefficients["X"]+tmpmodel$coefficients["Intercept"]))
}
######相似体拟合模型#####
##输入
##x:预测时间序列,向量
##y:样本时间序列,矩阵
##j:预测长度
##i:相似度排名
ac_model<-function(x,y,i)
{
print(as.Date(x[2],format="%Y-%m-%d",origin = "1970-01-01"))
dfddfd<-aaply(y,1,"lsfit_x",as.numeric(x)[3:length(x)],.parallel = F)
# dfddfd<-t(apply(y,1,"lsfit_x",as.numeric(x)[3:length(x)]))
t<-ncol(y)-length(x)
# write.table(dfddfd,'~/Downloads/log.txt',sep=",")
dfddfd<-dfddfd[dfddfd[,3]!=0,]
c("start_date"=x[1],"end_date"=x[2],apply((head(dfddfd[order(-dfddfd[,3]),],i)[,3])/sum(head(dfddfd[order(-dfddfd[,3]),],i)[,3])*(head(dfddfd[order(-dfddfd[,3]),],i)[,(ncol(dfddfd)-t+1):ncol(dfddfd)]),2,"sum"))
print( c("start_date"=x[1],"end_date"=x[2],apply((head(dfddfd[order(-dfddfd[,3]),],i)[,3])/sum(head(dfddfd[order(-dfddfd[,3]),],i)[,3])*(head(dfddfd[order(-dfddfd[,3]),],i)[,(ncol(dfddfd)-t+1):ncol(dfddfd)]),2,"sum"))
)
}
######相似体拟合模型#####
##输入
##x:预测时间序列
##y:样本时间序列
##i:预测时间滑动窗口
##j:预测长度
ac_indicator<-function(x,y,i,j)
{
#将预测数据序列按预测滑动窗口长度i转换为矩阵
pred_data<-t(list2matrix_stepbystep(x,i))
#将样本数据按i+j长度转换为矩阵
sample_data<-t(list2matrix_stepbystep(y,(i+j)))
#result<-t(apply(pred_data,1,"ac_model",y=sample_data,i=5))
result<-aaply(pred_data,1,"ac_model",y=sample_data,i=5,.parallel = F)
#result_ts<-xts(result[,c(-1,-2)],order.by=as.Date(result[,2],format="%Y-%m-%d",origin = "1970-01-01"))
#colnames(result_ts)<-sapply(c(1:ncol(result_ts)),function(x){paste("pred_result",x,sep="")})
result
#result_ts
}
#EMD分解加入
EMD_test_by_step<-function(emd_data,s,k,j)
{
emd_result<-emd(as.numeric(emd_data))
date_seq<-as.numeric(.indexDate(emd_data))
prd_result <- NULL
for(i in 1:emd_result$nimf)
{
emd_tmp<- cbind(date_seq,emd_result$imf[,i])
emd_tmp<-xts(emd_tmp[,-1],order.by=as.Date(emd_tmp[,1],format="%Y-%m-%d",origin = "1970-01-01"))
if(is.null(prd_result))
{
prd_result <- ac_indicator(emd_tmp[s],emd_tmp,k,j)
}
else
{
prd_result <- cbind(prd_result,ac_indicator(emd_tmp[s],emd_tmp,k,j))
}
}
emd_tmp<- cbind(date_seq,emd_result$residue)
emd_tmp<-xts(emd_tmp[,-1],order.by=as.Date(emd_tmp[,1],format="%Y-%m-%d",origin = "1970-01-01"))
if(is.null(prd_result))
{
prd_result <- ac_indicator(emd_tmp[s],emd_tmp,k,j)
}
else
{
prd_result <- cbind(prd_result, ac_indicator(emd_tmp[s],emd_tmp,k,j))
}
prd_result
}
SZZS<-read.table('~/Downloads/stock_data/SH999999.txt',header=F,sep=",",skip = 2,fill=T)
SZZS<-SZZS[-nrow(SZZS),]
SZZS<-xts(SZZS[,-1],order.by=as.Date(SZZS[,1],format="%Y-%m-%d",origin = "1970-01-01"))
colnames(SZZS)<-c("open","high","low","close","volume","total")
SZZS<-SMA(Cl(SZZS), 5)
SZZS<-SZZS[!is.na(SZZS)]
pre_date<-"2014-10-31/"
stockdata_dir<-"~/Downloads/stock_data/"
result_data<-NULL
get_pred_data<-function(stock_id,stockdata_dir,pre_date,SZZS)
{
print(stock_id)
test111<-read.table(paste(stockdata_dir,stock_id,sep=""),header=F,sep=",",skip = 2,fill=T)
if(nrow(test111)>20)
{
test111<-test111[-nrow(test111),]
test111<-xts(test111[,-1],order.by=as.Date(test111[,1],format="%Y-%m-%d",origin = "1970-01-01"))
colnames(test111)<-c("open","high","low","close","volume","total")
testdata<-SMA(Cl(test111), 5)
testdata<-testdata[!is.na(testdata)]
if( all(testdata[pre_date] != 0) & ( length(testdata[pre_date])>=10 ) )
{
result_data_tmp<-ac_indicator(testdata[pre_date],SZZS,10,3)
result_data<-c(unlist(strsplit(stock_id,".", fixed = TRUE))[1],result_data_tmp)
write.table(t(result_data),"~/sma5_pred_res.txt",sep=",",append=T,row.names=F,col.names=F)
t(result_data)
#if(is.null(result_data))
#{
# write.table(t(result_data),"~/sma5_pred_res.txt",sep=",",col.names=F,row.names=F)
#}
#else
#{
# result_data<-c(unlist(strsplit(stock_id,".", fixed = TRUE))[1],result_data_tmp)
# write.table(as.matrix(result_data,row=1),"~/sma5_pred_res.txt",sep=",",append=T)
#}
}
}
}
library("parallel")
library("foreach")
library("doParallel")
cl <- makeCluster(mc <- getOption("cl.cores", 4))
registerDoParallel(cl)
clusterEvalQ(cl,library(xts))
clusterEvalQ(cl,library(quantstrat))
clusterEvalQ(cl,library(dtw))
clusterEvalQ(cl,library(plyr))
clusterExport(cl,"xts")
clusterExport(cl,"ac_indicator")
clusterExport(cl,"linestep")
clusterExport(cl,"list2matrix_stepbystep")
clusterExport(cl,"lsfit_x")
clusterExport(cl,"ac_model")
clusterExport(cl,"get_pred_data")
clusterExport(cl,"SZZS")
clusterExport(cl,"pre_date")
clusterExport(cl,"stockdata_dir")
system.time({testdata112121<-laply(list.files(stockdata_dir),"get_pred_data",stockdata_dir,pre_date,SZZS,.parallel = F)
})
|
---
class: title center pipe-page
# `|>`
```{r echo=FALSE, tidy=FALSE}
decorate('
leave_house(get_dressed(get_out_of_bed(wake_up(me, time = "8:00"), side = "correct"), pants = TRUE, shirt = TRUE), car = TRUE, bike = FALSE)
', eval = FALSE) |>
flair_args(color = "#005f86") |>
flair_funs(color = "#333f48", background = "#FFC0DC", before = "<b>", after = "</b>") |>
flair_input_vals(color = "#bf5700") |>
knit_print.with_flair()
```
--
```{r echo=FALSE, tidy=FALSE}
decorate('
me |>
wake_up(time = "8:00") |>
get_out_of_bed(side = "correct") |>
get_dressed(pants = TRUE, shirt = TRUE) |>
leave_house(car = TRUE, bike = FALSE)
', eval = FALSE) |>
flair("|>") |>
flair_args(color = "#005f86") |>
flair_funs(color = "#333f48", background = "#FFC0DC", before = "<b>", after = "</b>") |>
flair_input_vals(color = "#bf5700") |>
knit_print.with_flair()
```
|
/course_workshops/Wasem/prework/pipe_snippet.R
|
no_license
|
utexas-lbjp-data/slides
|
R
| false | false | 894 |
r
|
---
class: title center pipe-page
# `|>`
```{r echo=FALSE, tidy=FALSE}
decorate('
leave_house(get_dressed(get_out_of_bed(wake_up(me, time = "8:00"), side = "correct"), pants = TRUE, shirt = TRUE), car = TRUE, bike = FALSE)
', eval = FALSE) |>
flair_args(color = "#005f86") |>
flair_funs(color = "#333f48", background = "#FFC0DC", before = "<b>", after = "</b>") |>
flair_input_vals(color = "#bf5700") |>
knit_print.with_flair()
```
--
```{r echo=FALSE, tidy=FALSE}
decorate('
me |>
wake_up(time = "8:00") |>
get_out_of_bed(side = "correct") |>
get_dressed(pants = TRUE, shirt = TRUE) |>
leave_house(car = TRUE, bike = FALSE)
', eval = FALSE) |>
flair("|>") |>
flair_args(color = "#005f86") |>
flair_funs(color = "#333f48", background = "#FFC0DC", before = "<b>", after = "</b>") |>
flair_input_vals(color = "#bf5700") |>
knit_print.with_flair()
```
|
# This file contains scripts for preeliminary exploration of the Titanic dataset.
# Open dataset
data<-read.csv("train.csv", na.strings = c(""))
# Show summaries and sample data
# ---
head(data)
str(data)
summary(data)
# ---
# Check number and percentage of missing values in each field
# ---
numNAs<-1:ncol(data)
for (x in 1:ncol(data)) {
missing <- is.na(data[[x]])
numNAs[[x]]<-sum(missing)
}
percentNAs<-numNAs/nrow(data)
print(numNAs)
print(percentNAs)
# A similar analysis can be done with sapply(data,function(x) sum(is.na(x)))
sapply(data,function(x) sum(is.na(x)))
|
/Exploration.R
|
permissive
|
haalberto/MachineLearning-Titanic
|
R
| false | false | 585 |
r
|
# This file contains scripts for preeliminary exploration of the Titanic dataset.
# Open dataset
data<-read.csv("train.csv", na.strings = c(""))
# Show summaries and sample data
# ---
head(data)
str(data)
summary(data)
# ---
# Check number and percentage of missing values in each field
# ---
numNAs<-1:ncol(data)
for (x in 1:ncol(data)) {
missing <- is.na(data[[x]])
numNAs[[x]]<-sum(missing)
}
percentNAs<-numNAs/nrow(data)
print(numNAs)
print(percentNAs)
# A similar analysis can be done with sapply(data,function(x) sum(is.na(x)))
sapply(data,function(x) sum(is.na(x)))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/workmail_operations.R
\name{workmail_list_access_control_rules}
\alias{workmail_list_access_control_rules}
\title{Lists the access control rules for the specified organization}
\usage{
workmail_list_access_control_rules(OrganizationId)
}
\arguments{
\item{OrganizationId}{[required] The identifier for the organization.}
}
\description{
Lists the access control rules for the specified organization.
See \url{https://www.paws-r-sdk.com/docs/workmail_list_access_control_rules/} for full documentation.
}
\keyword{internal}
|
/cran/paws.end.user.computing/man/workmail_list_access_control_rules.Rd
|
permissive
|
paws-r/paws
|
R
| false | true | 602 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/workmail_operations.R
\name{workmail_list_access_control_rules}
\alias{workmail_list_access_control_rules}
\title{Lists the access control rules for the specified organization}
\usage{
workmail_list_access_control_rules(OrganizationId)
}
\arguments{
\item{OrganizationId}{[required] The identifier for the organization.}
}
\description{
Lists the access control rules for the specified organization.
See \url{https://www.paws-r-sdk.com/docs/workmail_list_access_control_rules/} for full documentation.
}
\keyword{internal}
|
setClass("ChIPQCsample",contains = "GRanges",
slots=c(AveragePeakSignal="list",
CrossCoverage="numeric",CrossCorrelation="numeric",SSD="numeric",SSDBL="numeric",CountsInPeaks="numeric",
CountsInBlackList="numeric",CountsInFeatures="list",PropInFeatures="list",
CoverageHistogram="numeric",FlagAndTagCounts="numeric",
readlength="numeric"
))
setMethod("show","ChIPQCsample",
function (object){
message("\t\t\t\t\t",class(object),"")
message("Number of Mapped reads: ",object@FlagAndTagCounts[2],"")
message("Number of Mapped reads passing MapQ filter: ",object@FlagAndTagCounts[4],"")
message("Percentage Of Reads as Non-Duplicates (NRF): ",round((((object@FlagAndTagCounts[2])-(object@FlagAndTagCounts[5]))/object@FlagAndTagCounts[2])*100, digits=2),"(",round(object@FlagAndTagCounts[5]/object@FlagAndTagCounts[2],digits=2),")","")
message("Percentage Of Reads in Blacklisted Regions: ",round((object@CountsInBlackList/object@FlagAndTagCounts[2])*100),"")
message("SSD: ",object@SSD,"")
message("Fragment Length Cross-Coverage: ",FragmentLengthCrossCoverage(object),"")
message("Relative Cross-Coverage: ",RelativeCrossCoverage(object),"")
message("Percentage Of Reads in GenomicFeature: ")
print(data.frame(ProportionOfCounts=unlist(object@CountsInFeatures),row.names=gsub("CountsIn","",names(object@CountsInFeatures)))/object@FlagAndTagCounts[4])
message("Percentage Of Reads in Peaks: ",round((object@CountsInPeaks/object@FlagAndTagCounts[4])*100,digits=2),"")
message("Number of Peaks: ",length(object),"")
print(granges(object, use.mcols=TRUE))
}
)
setGeneric("crosscoverage", function(object="ChIPQCsample") standardGeneric("crosscoverage"))
setMethod("crosscoverage", signature(object="ChIPQCsample"), function(object) ((object@CrossCoverage[1]-object@CrossCoverage)/object@CrossCoverage[1]))
setGeneric("ssd", function(object="ChIPQCsample") standardGeneric("ssd"))
setMethod("ssd", "ChIPQCsample", function(object) object@SSD)
setGeneric("fragmentlength", function(object="ChIPQCsample",width) standardGeneric("fragmentlength"))
setMethod("fragmentlength", "ChIPQCsample", function(object,width){
if(missing(width)) {
width = readlength(object)
}
MaxShift <- which.max(running(crosscoverage(object)[-seq(1:(2*readlength(object)))],width=width,allow.fewer=TRUE))+2*readlength(object)
return(unname(MaxShift))
})
setGeneric("FragmentLengthCrossCoverage", function(object="ChIPQCsample",width) standardGeneric("FragmentLengthCrossCoverage"))
setMethod("FragmentLengthCrossCoverage", signature(object="ChIPQCsample"), function(object){
FragmentLengthCrossCoverage <- crosscoverage(object)[fragmentlength(object,10)]-crosscoverage(object)[1]
return(FragmentLengthCrossCoverage)
}
)
setGeneric("ReadLengthCrossCoverage", function(object="ChIPQCsample",width) standardGeneric("ReadLengthCrossCoverage"))
setMethod("ReadLengthCrossCoverage", signature(object="ChIPQCsample"), function(object){
ReadLengthCrossCoverage <- crosscoverage(object)[readlength(object)]-crosscoverage(object)[1]
return(ReadLengthCrossCoverage)
}
)
setGeneric("RelativeCrossCoverage", function(object="ChIPQCsample",width) standardGeneric("RelativeCrossCoverage"))
setMethod("RelativeCrossCoverage", signature(object="ChIPQCsample"), function(object){
RelativeCrossCoverage <- FragmentLengthCrossCoverage(object)/ReadLengthCrossCoverage(object)
return(RelativeCrossCoverage)
}
)
setGeneric("flagtagcounts", function(object="ChIPQCsample") standardGeneric("flagtagcounts"))
setMethod("flagtagcounts", "ChIPQCsample", function(object) object@FlagAndTagCounts)
setGeneric("flagtagcounts", function(object="ChIPQCsample") standardGeneric("flagtagcounts"))
setMethod("flagtagcounts", "ChIPQCsample", function(object) object@FlagAndTagCounts)
setGeneric("coveragehistogram", function(object="ChIPQCsample") standardGeneric("coveragehistogram"))
setMethod("coveragehistogram", "ChIPQCsample", function(object) object@CoverageHistogram)
setGeneric("averagepeaksignal", function(object="ChIPQCsample") standardGeneric("averagepeaksignal"))
setMethod("averagepeaksignal", "ChIPQCsample", function(object) object@AveragePeakSignal[[1]])
setGeneric("Normalisedaveragepeaksignal", function(object="ChIPQCsample") standardGeneric("Normalisedaveragepeaksignal"))
setMethod("Normalisedaveragepeaksignal", "ChIPQCsample", function(object) object@AveragePeakSignal[[2]])
setGeneric("peaks", function(object="ChIPQCsample") standardGeneric("peaks"))
setMethod("peaks", "ChIPQCsample", function(object) granges(object, use.mcols=TRUE))
setGeneric("readlength", function(object="ChIPQCsample") standardGeneric("readlength"))
setMethod("readlength", "ChIPQCsample", function(object) object@readlength)
setGeneric("PropGenomeInFeature", function(object="ChIPQCsample") standardGeneric("PropGenomeInFeature"))
setMethod("PropGenomeInFeature", "ChIPQCsample", function(object) {
PropInFeatures <- object@PropInFeatures
names(PropInFeatures) <- gsub("PropIn","",names(PropInFeatures))
return(PropInFeatures)
})
setGeneric("CountsInFeatures", function(object="ChIPQCsample") standardGeneric("CountsInFeatures"))
setMethod("CountsInFeatures", "ChIPQCsample", function(object) {
CountsInFeatures <- object@CountsInFeatures
names(CountsInFeatures) <- gsub("CountsIn","",names(CountsInFeatures))
return(CountsInFeatures)
})
setGeneric("PropCountsInFeatures", function(object="ChIPQCsample") standardGeneric("PropCountsInFeatures"))
setMethod("PropCountsInFeatures", "ChIPQCsample", function(object){
return(as.list(unlist(CountsInFeatures(object))/mapped(object)))
})
setGeneric("regi", function(object="ChIPQCsample") standardGeneric("regi"))
setMethod("regi", "ChIPQCsample", function(object){
PropCountInFeatures <- data.frame(PropCountInFeatures=unlist(PropCountsInFeatures(object)),row.names=names(PropCountsInFeatures(object)))
if(sum(is.na(PropCountInFeatures))>0) {
#warning('No genomic features computed',call.=FALSE)
savenames = rownames(PropCountInFeatures)
PropCountInFeatures = PropCountInFeatures[,1]
names(PropCountInFeatures) = savenames
return(PropCountInFeatures)
}
PropGenomeInFeatures <- data.frame(PropGenomeInFeature=unlist(PropGenomeInFeature(object)),row.names=names(PropGenomeInFeature(object)))
regiFrame <- merge(PropCountInFeatures,PropGenomeInFeatures,by=0,all=FALSE,sort=FALSE)
regi <- log2(regiFrame[,"PropCountInFeatures"]/regiFrame[,"PropGenomeInFeature"])
names(regi) <- regiFrame[,"Row.names"]
#regi = regi[c(1,2,3,7,6,5,4)]
#names(regi)[2] = "5UTRs"
return(regi)
})
setGeneric("frip", function(object="ChIPQCsample") standardGeneric("frip"))
setMethod("frip", "ChIPQCsample", function(object){
CountsInPeaks <- object@CountsInPeaks
TotalCounts <- object@FlagAndTagCounts["Mapped"]
FRIP <- unname(CountsInPeaks/TotalCounts)
return(FRIP)
}
)
setGeneric("rip", function(object="ChIPQCsample") standardGeneric("rip"))
setMethod("rip", "ChIPQCsample", function(object){
CountsInPeaks <- unname(object@CountsInPeaks)
return(CountsInPeaks)
}
)
setGeneric("ribl", function(object="ChIPQCsample") standardGeneric("ribl"))
setMethod("ribl", "ChIPQCsample", function(object){
CountsInBlackList <- unname(object@CountsInBlackList)
return(CountsInBlackList)
}
)
setGeneric("mapped", function(object="ChIPQCsample") standardGeneric("mapped"))
setMethod("mapped", "ChIPQCsample", function(object){
MappedCounts <- unname(object@FlagAndTagCounts[2])
return(MappedCounts)
}
)
setGeneric("QCmetrics", function(object="ChIPQCsample") standardGeneric("QCmetrics"))
setMethod("QCmetrics", "ChIPQCsample", function(object){
fl = fragmentlength(object,width=readlength(object))
rcc = signif(RelativeCrossCoverage(object),3)
if (length(fl) == 0){
fl = 0
}
if (length(rcc) == 0){
rcc = 0
}
res = c(reads(object,FALSE),
signif((mapped(object)/reads(object,FALSE))*100,3),
signif((1-reads(object,TRUE)/reads(object,FALSE))*100,3),
signif(duplicateRate(object)*100,3),
readlength(object),
fl,
rcc,
#signif(FragmentLengthCrossCoverage(object),3),
#signif(ReadLengthCrossCoverage(object),3),
signif(ssd(object),3),
signif(frip(object)*100,3))
names(res) = c("Reads",
"Map%",
"Filt%",
"Dup%",
"ReadL",
"FragL",
"RelCC",
#"FragLenCC",
#"ReadLenCC",
"SSD",
"RiP%")
blk = ribl(object)
if(!is.na(blk)) {
names(blk) <- "RiBL%"
blk = signif(blk/res[1]*100,3)
res = c(res,blk)
}
return(res)
})
setGeneric("reads", function(object="ChIPQCsample", bFiltered) standardGeneric("reads"))
setMethod("reads", "ChIPQCsample", function(object,bFiltered){
if(missing(bFiltered)) bFiltered=TRUE
if(!bFiltered) {
res = object@FlagAndTagCounts[1] + object@FlagAndTagCounts[2]
} else {
res = object@FlagAndTagCounts[4]
}
return(res)
})
setGeneric("duplicates", function(object="ChIPQCsample", bFiltered) standardGeneric("duplicates"))
setMethod("duplicates", "ChIPQCsample", function(object,bFiltered){
if(missing(bFiltered)) bFiltered=TRUE
if(bFiltered) {
res = object@FlagAndTagCounts[5]
} else {
res = object@FlagAndTagCounts[3]
}
return(res)
})
setGeneric("duplicateRate", function(object="ChIPQCsample", bFiltered) standardGeneric("duplicateRate"))
setMethod("duplicateRate", "ChIPQCsample", function(object, bFiltered){
if(missing(bFiltered)) bFiltered=TRUE
res = duplicates(object,bFiltered) / reads(object, bFiltered)
return(res)
})
|
/R/ChIPQCsample-class.R
|
no_license
|
ejh243/ChIPQC
|
R
| false | false | 10,227 |
r
|
setClass("ChIPQCsample",contains = "GRanges",
slots=c(AveragePeakSignal="list",
CrossCoverage="numeric",CrossCorrelation="numeric",SSD="numeric",SSDBL="numeric",CountsInPeaks="numeric",
CountsInBlackList="numeric",CountsInFeatures="list",PropInFeatures="list",
CoverageHistogram="numeric",FlagAndTagCounts="numeric",
readlength="numeric"
))
setMethod("show","ChIPQCsample",
function (object){
message("\t\t\t\t\t",class(object),"")
message("Number of Mapped reads: ",object@FlagAndTagCounts[2],"")
message("Number of Mapped reads passing MapQ filter: ",object@FlagAndTagCounts[4],"")
message("Percentage Of Reads as Non-Duplicates (NRF): ",round((((object@FlagAndTagCounts[2])-(object@FlagAndTagCounts[5]))/object@FlagAndTagCounts[2])*100, digits=2),"(",round(object@FlagAndTagCounts[5]/object@FlagAndTagCounts[2],digits=2),")","")
message("Percentage Of Reads in Blacklisted Regions: ",round((object@CountsInBlackList/object@FlagAndTagCounts[2])*100),"")
message("SSD: ",object@SSD,"")
message("Fragment Length Cross-Coverage: ",FragmentLengthCrossCoverage(object),"")
message("Relative Cross-Coverage: ",RelativeCrossCoverage(object),"")
message("Percentage Of Reads in GenomicFeature: ")
print(data.frame(ProportionOfCounts=unlist(object@CountsInFeatures),row.names=gsub("CountsIn","",names(object@CountsInFeatures)))/object@FlagAndTagCounts[4])
message("Percentage Of Reads in Peaks: ",round((object@CountsInPeaks/object@FlagAndTagCounts[4])*100,digits=2),"")
message("Number of Peaks: ",length(object),"")
print(granges(object, use.mcols=TRUE))
}
)
setGeneric("crosscoverage", function(object="ChIPQCsample") standardGeneric("crosscoverage"))
setMethod("crosscoverage", signature(object="ChIPQCsample"), function(object) ((object@CrossCoverage[1]-object@CrossCoverage)/object@CrossCoverage[1]))
setGeneric("ssd", function(object="ChIPQCsample") standardGeneric("ssd"))
setMethod("ssd", "ChIPQCsample", function(object) object@SSD)
setGeneric("fragmentlength", function(object="ChIPQCsample",width) standardGeneric("fragmentlength"))
setMethod("fragmentlength", "ChIPQCsample", function(object,width){
if(missing(width)) {
width = readlength(object)
}
MaxShift <- which.max(running(crosscoverage(object)[-seq(1:(2*readlength(object)))],width=width,allow.fewer=TRUE))+2*readlength(object)
return(unname(MaxShift))
})
setGeneric("FragmentLengthCrossCoverage", function(object="ChIPQCsample",width) standardGeneric("FragmentLengthCrossCoverage"))
setMethod("FragmentLengthCrossCoverage", signature(object="ChIPQCsample"), function(object){
FragmentLengthCrossCoverage <- crosscoverage(object)[fragmentlength(object,10)]-crosscoverage(object)[1]
return(FragmentLengthCrossCoverage)
}
)
setGeneric("ReadLengthCrossCoverage", function(object="ChIPQCsample",width) standardGeneric("ReadLengthCrossCoverage"))
setMethod("ReadLengthCrossCoverage", signature(object="ChIPQCsample"), function(object){
ReadLengthCrossCoverage <- crosscoverage(object)[readlength(object)]-crosscoverage(object)[1]
return(ReadLengthCrossCoverage)
}
)
setGeneric("RelativeCrossCoverage", function(object="ChIPQCsample",width) standardGeneric("RelativeCrossCoverage"))
setMethod("RelativeCrossCoverage", signature(object="ChIPQCsample"), function(object){
RelativeCrossCoverage <- FragmentLengthCrossCoverage(object)/ReadLengthCrossCoverage(object)
return(RelativeCrossCoverage)
}
)
setGeneric("flagtagcounts", function(object="ChIPQCsample") standardGeneric("flagtagcounts"))
setMethod("flagtagcounts", "ChIPQCsample", function(object) object@FlagAndTagCounts)
setGeneric("flagtagcounts", function(object="ChIPQCsample") standardGeneric("flagtagcounts"))
setMethod("flagtagcounts", "ChIPQCsample", function(object) object@FlagAndTagCounts)
setGeneric("coveragehistogram", function(object="ChIPQCsample") standardGeneric("coveragehistogram"))
setMethod("coveragehistogram", "ChIPQCsample", function(object) object@CoverageHistogram)
setGeneric("averagepeaksignal", function(object="ChIPQCsample") standardGeneric("averagepeaksignal"))
setMethod("averagepeaksignal", "ChIPQCsample", function(object) object@AveragePeakSignal[[1]])
setGeneric("Normalisedaveragepeaksignal", function(object="ChIPQCsample") standardGeneric("Normalisedaveragepeaksignal"))
setMethod("Normalisedaveragepeaksignal", "ChIPQCsample", function(object) object@AveragePeakSignal[[2]])
setGeneric("peaks", function(object="ChIPQCsample") standardGeneric("peaks"))
setMethod("peaks", "ChIPQCsample", function(object) granges(object, use.mcols=TRUE))
setGeneric("readlength", function(object="ChIPQCsample") standardGeneric("readlength"))
setMethod("readlength", "ChIPQCsample", function(object) object@readlength)
setGeneric("PropGenomeInFeature", function(object="ChIPQCsample") standardGeneric("PropGenomeInFeature"))
setMethod("PropGenomeInFeature", "ChIPQCsample", function(object) {
PropInFeatures <- object@PropInFeatures
names(PropInFeatures) <- gsub("PropIn","",names(PropInFeatures))
return(PropInFeatures)
})
setGeneric("CountsInFeatures", function(object="ChIPQCsample") standardGeneric("CountsInFeatures"))
setMethod("CountsInFeatures", "ChIPQCsample", function(object) {
CountsInFeatures <- object@CountsInFeatures
names(CountsInFeatures) <- gsub("CountsIn","",names(CountsInFeatures))
return(CountsInFeatures)
})
setGeneric("PropCountsInFeatures", function(object="ChIPQCsample") standardGeneric("PropCountsInFeatures"))
setMethod("PropCountsInFeatures", "ChIPQCsample", function(object){
return(as.list(unlist(CountsInFeatures(object))/mapped(object)))
})
setGeneric("regi", function(object="ChIPQCsample") standardGeneric("regi"))
setMethod("regi", "ChIPQCsample", function(object){
PropCountInFeatures <- data.frame(PropCountInFeatures=unlist(PropCountsInFeatures(object)),row.names=names(PropCountsInFeatures(object)))
if(sum(is.na(PropCountInFeatures))>0) {
#warning('No genomic features computed',call.=FALSE)
savenames = rownames(PropCountInFeatures)
PropCountInFeatures = PropCountInFeatures[,1]
names(PropCountInFeatures) = savenames
return(PropCountInFeatures)
}
PropGenomeInFeatures <- data.frame(PropGenomeInFeature=unlist(PropGenomeInFeature(object)),row.names=names(PropGenomeInFeature(object)))
regiFrame <- merge(PropCountInFeatures,PropGenomeInFeatures,by=0,all=FALSE,sort=FALSE)
regi <- log2(regiFrame[,"PropCountInFeatures"]/regiFrame[,"PropGenomeInFeature"])
names(regi) <- regiFrame[,"Row.names"]
#regi = regi[c(1,2,3,7,6,5,4)]
#names(regi)[2] = "5UTRs"
return(regi)
})
setGeneric("frip", function(object="ChIPQCsample") standardGeneric("frip"))
setMethod("frip", "ChIPQCsample", function(object){
CountsInPeaks <- object@CountsInPeaks
TotalCounts <- object@FlagAndTagCounts["Mapped"]
FRIP <- unname(CountsInPeaks/TotalCounts)
return(FRIP)
}
)
setGeneric("rip", function(object="ChIPQCsample") standardGeneric("rip"))
setMethod("rip", "ChIPQCsample", function(object){
CountsInPeaks <- unname(object@CountsInPeaks)
return(CountsInPeaks)
}
)
setGeneric("ribl", function(object="ChIPQCsample") standardGeneric("ribl"))
setMethod("ribl", "ChIPQCsample", function(object){
CountsInBlackList <- unname(object@CountsInBlackList)
return(CountsInBlackList)
}
)
setGeneric("mapped", function(object="ChIPQCsample") standardGeneric("mapped"))
setMethod("mapped", "ChIPQCsample", function(object){
MappedCounts <- unname(object@FlagAndTagCounts[2])
return(MappedCounts)
}
)
setGeneric("QCmetrics", function(object="ChIPQCsample") standardGeneric("QCmetrics"))
setMethod("QCmetrics", "ChIPQCsample", function(object){
fl = fragmentlength(object,width=readlength(object))
rcc = signif(RelativeCrossCoverage(object),3)
if (length(fl) == 0){
fl = 0
}
if (length(rcc) == 0){
rcc = 0
}
res = c(reads(object,FALSE),
signif((mapped(object)/reads(object,FALSE))*100,3),
signif((1-reads(object,TRUE)/reads(object,FALSE))*100,3),
signif(duplicateRate(object)*100,3),
readlength(object),
fl,
rcc,
#signif(FragmentLengthCrossCoverage(object),3),
#signif(ReadLengthCrossCoverage(object),3),
signif(ssd(object),3),
signif(frip(object)*100,3))
names(res) = c("Reads",
"Map%",
"Filt%",
"Dup%",
"ReadL",
"FragL",
"RelCC",
#"FragLenCC",
#"ReadLenCC",
"SSD",
"RiP%")
blk = ribl(object)
if(!is.na(blk)) {
names(blk) <- "RiBL%"
blk = signif(blk/res[1]*100,3)
res = c(res,blk)
}
return(res)
})
setGeneric("reads", function(object="ChIPQCsample", bFiltered) standardGeneric("reads"))
setMethod("reads", "ChIPQCsample", function(object,bFiltered){
if(missing(bFiltered)) bFiltered=TRUE
if(!bFiltered) {
res = object@FlagAndTagCounts[1] + object@FlagAndTagCounts[2]
} else {
res = object@FlagAndTagCounts[4]
}
return(res)
})
setGeneric("duplicates", function(object="ChIPQCsample", bFiltered) standardGeneric("duplicates"))
setMethod("duplicates", "ChIPQCsample", function(object,bFiltered){
if(missing(bFiltered)) bFiltered=TRUE
if(bFiltered) {
res = object@FlagAndTagCounts[5]
} else {
res = object@FlagAndTagCounts[3]
}
return(res)
})
setGeneric("duplicateRate", function(object="ChIPQCsample", bFiltered) standardGeneric("duplicateRate"))
setMethod("duplicateRate", "ChIPQCsample", function(object, bFiltered){
if(missing(bFiltered)) bFiltered=TRUE
res = duplicates(object,bFiltered) / reads(object, bFiltered)
return(res)
})
|
source("readdata.R")
data <- readData()
png("./plot4.png", width = 480, height = 480)
par(mfrow=c(2,2)) # draw 4 plots in a 2 x 2 grid.
# plot 1
plot(data$DateTime, data$Global_active_power, type="l", xlab="", ylab="Global Active Power")
# plot 2
plot(data$DateTime, data$Voltage, type="l", xlab="datetime", ylab="Voltage")
# plot 3
plot(data$DateTime, data$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering", col="black")
lines(data$DateTime, data$Sub_metering_2, col="red")
lines(data$DateTime, data$Sub_metering_3, col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, box.lwd=0, col=c("black", "red", "blue"))
# plot 4
plot(data$DateTime, data$Global_reactive_power, type="n", xlab="datetime", ylab="Global_reactive_power")
lines(data$DateTime, data$Global_reactive_power)
dev.off()
|
/plot4.R
|
no_license
|
lebdave-coursera/ExData_Plotting1
|
R
| false | false | 844 |
r
|
source("readdata.R")
data <- readData()
png("./plot4.png", width = 480, height = 480)
par(mfrow=c(2,2)) # draw 4 plots in a 2 x 2 grid.
# plot 1
plot(data$DateTime, data$Global_active_power, type="l", xlab="", ylab="Global Active Power")
# plot 2
plot(data$DateTime, data$Voltage, type="l", xlab="datetime", ylab="Voltage")
# plot 3
plot(data$DateTime, data$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering", col="black")
lines(data$DateTime, data$Sub_metering_2, col="red")
lines(data$DateTime, data$Sub_metering_3, col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, box.lwd=0, col=c("black", "red", "blue"))
# plot 4
plot(data$DateTime, data$Global_reactive_power, type="n", xlab="datetime", ylab="Global_reactive_power")
lines(data$DateTime, data$Global_reactive_power)
dev.off()
|
#' @param par.id [\code{character(1)}]\cr
#' Optional name of parameter vector.
#' Default is \dQuote{x}.
|
/man-roxygen/arg_par.id.R
|
permissive
|
jakobbossek/smoof
|
R
| false | false | 110 |
r
|
#' @param par.id [\code{character(1)}]\cr
#' Optional name of parameter vector.
#' Default is \dQuote{x}.
|
# hard code the US repo for CRAN
r <- getOption("repos")
r["CRAN"] <- "http://cran.us.r-project.org"
options(repos = r)
rm(r)
# Don't spam large data sets
options(max.print = 1000)
# Stick these in their own environment so that rm(list=ls()) doesn't kill them
attach(list(table0 = function(...) { table(list(...), useNA='ifany') } ,
sum0 = function(...) { sum(..., na.rm=TRUE) } ,
mean0 = function(...) { mean(..., na.rm=TRUE) } ,
peek = function(x, n=3) {
if(is.null(dim(x))) {
n <- min(n, length(x)); return(x[1:n]) };
n <- min(n, dim(x)); x[1:n, 1:n] }),
name = 'MyFunctions')
|
/.Rprofile
|
no_license
|
umeshu/dotfiles
|
R
| false | false | 674 |
rprofile
|
# hard code the US repo for CRAN
r <- getOption("repos")
r["CRAN"] <- "http://cran.us.r-project.org"
options(repos = r)
rm(r)
# Don't spam large data sets
options(max.print = 1000)
# Stick these in their own environment so that rm(list=ls()) doesn't kill them
attach(list(table0 = function(...) { table(list(...), useNA='ifany') } ,
sum0 = function(...) { sum(..., na.rm=TRUE) } ,
mean0 = function(...) { mean(..., na.rm=TRUE) } ,
peek = function(x, n=3) {
if(is.null(dim(x))) {
n <- min(n, length(x)); return(x[1:n]) };
n <- min(n, dim(x)); x[1:n, 1:n] }),
name = 'MyFunctions')
|
`ensemble.batch` <- function(
x=NULL, xn=c(x), ext=NULL,
species.presence=NULL, species.absence=NULL,
presence.min=20,
an=1000, excludep=FALSE, CIRCLES.at=FALSE, CIRCLES.d=100000,
k.splits=4, k.test=0,
n.ensembles=1,
SINK=FALSE,
RASTER.format="raster", RASTER.datatype="INT2S", RASTER.NAflag=-32767,
KML.out=FALSE, KML.maxpixels=100000, KML.blur=10,
models.save=FALSE,
threshold.method="spec_sens", threshold.sensitivity=0.9, threshold.PresenceAbsence=FALSE,
ENSEMBLE.best=0, ENSEMBLE.min=0.7, ENSEMBLE.exponent=1,
input.weights=NULL,
MAXENT=1, GBM=1, GBMSTEP=1, RF=1, GLM=1, GLMSTEP=1, GAM=1, GAMSTEP=1, MGCV=1, MGCVFIX=0,
EARTH=1, RPART=1, NNET=1, FDA=1, SVM=1, SVME=1, BIOCLIM=1, DOMAIN=1, MAHAL=1,
PROBIT=FALSE, AUC.weights=TRUE,
Yweights="BIOMOD",
layer.drops=NULL, factors=NULL, dummy.vars=NULL,
formulae.defaults=TRUE, maxit=100,
MAXENT.a=NULL, MAXENT.an=10000, MAXENT.BackData=NULL, MAXENT.path=paste(getwd(), "/models/maxent", sep=""),
GBM.formula=NULL, GBM.n.trees=2001,
GBMSTEP.gbm.x=2:(1+raster::nlayers(x)), GBMSTEP.tree.complexity=5, GBMSTEP.learning.rate=0.005,
GBMSTEP.bag.fraction=0.5, GBMSTEP.step.size=100,
RF.formula=NULL, RF.ntree=751, RF.mtry=floor(sqrt(raster::nlayers(x))),
GLM.formula=NULL, GLM.family=binomial(link="logit"),
GLMSTEP.steps=1000, STEP.formula=NULL, GLMSTEP.scope=NULL, GLMSTEP.k=2,
GAM.formula=NULL, GAM.family=binomial(link="logit"),
GAMSTEP.steps=1000, GAMSTEP.scope=NULL, GAMSTEP.pos=1,
MGCV.formula=NULL, MGCV.select=FALSE,
MGCVFIX.formula=NULL,
EARTH.formula=NULL, EARTH.glm=list(family=binomial(link="logit"), maxit=maxit),
RPART.formula=NULL, RPART.xval=50,
NNET.formula=NULL, NNET.size=8, NNET.decay=0.01,
FDA.formula=NULL,
SVM.formula=NULL, SVME.formula=NULL,
MAHAL.shape=1
)
{
.BiodiversityR <- new.env()
k.test <- as.integer(k.test)
k.splits <- as.integer(k.splits)
if (k.splits < 1) {
cat(paste("\n", "NOTE: parameter k.splits was set to be smaller than 1", sep = ""))
cat(paste("\n", "default value of 4 therefore set for parameter k.splits", sep = ""))
k.splits <- 4
}
n.ensembles <- as.integer(n.ensembles)
if (n.ensembles < 1) {n.ensembles <- 1}
# if (! require(dismo)) {stop("Please install the dismo package")}
if (is.null(xn) == T) {
cat(paste("\n", "NOTE: new rasterStack assumed to be equal to the base rasterStack", sep = ""))
xn <- x
}
xn <- c(xn)
# need to recalculate threshold for mean of ensembles
# therefore put x as first of new stacks
if (n.ensembles > 1) {
xn <- c(x, xn)
i <- 1
while (i < length(xn)) {
i <- i+1
if(identical(x, xn[[i]])) {xn[[i]] <- NULL}
}
}
species.presence <- data.frame(species.presence)
species.absence <- data.frame(species.absence)
if (ncol(species.presence) < 2) {stop("species.presence expected to be 3-column data.frame with species, x (e.g., lon) and y (e.g., lat) columns")}
if (ncol(species.presence) == 2) {
cat(paste("\n", "species.presence was expected to be 3-column data.frame with columns representing species, x (e.g., lon) and y (e.g., lat)", sep = ""))
cat(paste("\n", "only two columns were provided, it is therefore assumed that these reflect x and y coordinates for a single species", "\n\n", sep = ""))
species.name <- rep("Species001", nrow(species.presence))
species.presence <- cbind(species.name, species.presence)
species.presence <- data.frame(species.presence)
species.presence[,2] <- as.numeric(species.presence[,2])
species.presence[,3] <- as.numeric(species.presence[,3])
}
if (ncol(species.presence) > 3) {
cat(paste("\n", "species.presence was expected to be 3-column data.frame with species, x (e.g., lon) and y (e.g., lat) columns", sep = ""))
cat(paste("\n", "only first three columns used", "\n\n", sep = ""))
species.presence <- species.presence[,c(1:3)]
species.presence[,2] <- as.numeric(species.presence[,2])
species.presence[,3] <- as.numeric(species.presence[,3])
}
if (is.null(species.absence)==F && ncol(species.absence) < 2) {stop("species.absence expected to be a 2-column data.frame with x (e.g., lon) and y (e.g., lat), or 3-column data.frame with species, x (e.g., lon) and y (e.g., lat) columns")}
if (is.null(species.absence)==F && ncol(species.absence)> 3) {
cat(paste("\n", "species.absence was expected to be 3-column data.frame with species, x (e.g., lon) and y (e.g., lat) columns", sep = ""))
cat(paste("\n", "only first three columns used", "\n\n", sep = ""))
species.absence <- species.absence[,c(1:3)]
species.absence[,2] <- as.numeric(species.absence[,2])
species.absence[,3] <- as.numeric(species.absence[,3])
}
if (is.null(species.absence)==F && ncol(species.absence) == 2) {
cat(paste("\n", "species.absence was expected to be 3-column data.frame with species, x (e.g., lon) and y (e.g., lat) columns", sep = ""))
cat(paste("\n", "only two columns were provided, it is therefore assumed that these reflect x and y coordinates for absence locations to be used for each species run", "\n\n", sep = ""))
species.absence[,1] <- as.numeric(species.absence[,1])
species.absence[,2] <- as.numeric(species.absence[,2])
as <- species.absence
}
#
# process species by species
species.names <- levels(droplevels(factor(species.presence[,1])))
for (s in 1:length(species.names)) {
focal.species <- species.names[s]
# check if species has required minimum number of presence points
n.pres <- nrow(species.presence[species.presence[,1]==focal.species,])
if (n.pres < presence.min) {
cat(paste("\n", "Species: ", focal.species, " only has ", n.pres, " presence locations", sep = ""))
cat(paste("\n", "This species therefore not included in batch processing", "\n\n", sep = ""))
}else{
# create output file
if (s==1) {dir.create("outputs", showWarnings = F)}
paste.file <- paste(getwd(), "/outputs/", focal.species, "_output.txt", sep="")
OLD.SINK <- TRUE
if (sink.number(type="output") == 0) {OLD.SINK <- F}
if (SINK==T && OLD.SINK==F) {
if (file.exists(paste.file) == F) {
cat(paste("\n", "NOTE: results captured in file: ", paste.file, "\n", sep = ""))
}else{
cat(paste("\n", "NOTE: results appended in file: ", paste.file, "\n", sep = ""))
}
cat(paste("\n\n", "RESULTS (ensemble.batch function)", "\n", sep=""), file=paste.file, append=T)
sink(file=paste.file, append=T)
cat(paste(date(), "\n", sep=""))
print(match.call())
}
cat(paste("\n", "Evaluations for species: ", focal.species, "\n", sep = ""))
ps <- species.presence[species.presence[,1]==focal.species, c(2:3)]
if (is.null(species.absence)==F && ncol(species.absence) == 3) {
as <- species.absence[species.absence[,1]==focal.species, c(2:3)]
}
if (is.null(species.absence)==T) {
if (excludep == T) {
as <- dismo::randomPoints(x[[1]], n=an, p=ps, ext=ext, excludep=T)
}else{
as <- dismo::randomPoints(x[[1]], n=an, p=NULL, ext=ext, excludep=F)
}
}
assign("ps", ps, envir=.BiodiversityR)
assign("as", as, envir=.BiodiversityR)
# repeat the whole process for n.ensembles
RASTER.species.name1 <- focal.species
for (runs in 1:n.ensembles) {
if (n.ensembles > 1) {
cat(paste("\n", focal.species, ": ENSEMBLE ", runs, "\n\n", sep = ""))
RASTER.species.name1 <- paste(focal.species, "_ENSEMBLE_", runs, sep="")
}
#1. first ensemble tests
calibration.1 <- ensemble.test.splits(x=x, p=ps, a=as, ext=ext, k=k.splits,
CIRCLES.at=CIRCLES.at, CIRCLES.d=CIRCLES.d,
ENSEMBLE.tune=T,
ENSEMBLE.best=ENSEMBLE.best, ENSEMBLE.min=ENSEMBLE.min,
ENSEMBLE.exponent=ENSEMBLE.exponent,
species.name = RASTER.species.name1,
threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence,
input.weights=input.weights,
MAXENT=MAXENT, GBM=GBM, GBMSTEP=GBMSTEP, RF=RF, GLM=GLM, GLMSTEP=GLMSTEP,
GAM=GAM, GAMSTEP=GAMSTEP, MGCV=MGCV, MGCVFIX=MGCVFIX, EARTH=EARTH, RPART=RPART,
NNET=NNET, FDA=FDA, SVM=SVM, SVME=SVME, BIOCLIM=BIOCLIM, DOMAIN=DOMAIN, MAHAL=MAHAL,
PROBIT=PROBIT, VIF=T,
Yweights=Yweights,
layer.drops=layer.drops, factors=factors, dummy.vars=dummy.vars,
maxit=maxit,
MAXENT.a=MAXENT.a, MAXENT.an=MAXENT.an,
MAXENT.BackData=MAXENT.BackData, MAXENT.path=MAXENT.path,
GBM.formula=GBM.formula, GBM.n.trees=GBM.n.trees,
GBMSTEP.gbm.x=GBMSTEP.gbm.x, GBMSTEP.tree.complexity=GBMSTEP.tree.complexity,
GBMSTEP.learning.rate=GBMSTEP.learning.rate, GBMSTEP.bag.fraction=GBMSTEP.bag.fraction,
GBMSTEP.step.size=GBMSTEP.step.size,
RF.formula=RF.formula, RF.ntree=RF.ntree, RF.mtry=RF.mtry,
GLM.formula=GLM.formula, GLM.family=GLM.family,
GLMSTEP.k=GLMSTEP.k, GLMSTEP.steps=GLMSTEP.steps, STEP.formula=STEP.formula, GLMSTEP.scope=GLMSTEP.scope,
GAM.formula=GAM.formula, GAM.family=GAM.family,
GAMSTEP.steps=GAMSTEP.steps, GAMSTEP.scope=GAMSTEP.scope, GAMSTEP.pos=GAMSTEP.pos,
MGCV.formula=MGCV.formula, MGCV.select=MGCV.select,
MGCVFIX.formula=MGCVFIX.formula,
EARTH.formula=EARTH.formula, EARTH.glm=EARTH.glm,
RPART.formula=RPART.formula, RPART.xval=RPART.xval,
NNET.formula=NNET.formula, NNET.size=NNET.size, NNET.decay=NNET.decay,
FDA.formula=FDA.formula, SVM.formula=SVM.formula, SVME.formula=SVME.formula,
MAHAL.shape=MAHAL.shape)
#2. calibrate final model
# xn.f <- eval(as.name(xn.focal))
cat(paste("\n", "Final model calibrations for species: ", RASTER.species.name1, "\n", sep = ""))
cat(paste("\n", "Minimum input weight is 0.05", "\n", sep=""))
if (AUC.weights == TRUE) {
output.weights <- calibration.1$output.weights.AUC
}else{
output.weights <- calibration.1$output.weights
}
output.weights[output.weights < 0.05] <- 0
print(output.weights)
if (sum(output.weights) > 0) {
calibration.2 <- ensemble.test(
x=x, p=ps, a=as, ext=ext, k=k.test, pt=NULL, at=NULL,
models.keep=TRUE, evaluations.keep=TRUE,
PLOTS=F,
models.save=models.save, species.name=RASTER.species.name1,
AUC.weights=F, ENSEMBLE.tune=F,
input.weights=output.weights,
threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence,
RASTER.format=RASTER.format,
PROBIT=PROBIT, VIF=T,
Yweights=Yweights,
layer.drops=layer.drops, factors=factors, dummy.vars=dummy.vars,
maxit=maxit,
MAXENT.BackData=MAXENT.BackData, MAXENT.path=MAXENT.path,
GBM.formula=GBM.formula, GBM.n.trees=GBM.n.trees,
GBMSTEP.gbm.x=GBMSTEP.gbm.x, GBMSTEP.tree.complexity=GBMSTEP.tree.complexity,
GBMSTEP.learning.rate=GBMSTEP.learning.rate, GBMSTEP.bag.fraction=GBMSTEP.bag.fraction,
GBMSTEP.step.size=GBMSTEP.step.size,
RF.formula=RF.formula, RF.ntree=RF.ntree, RF.mtry=RF.mtry,
GLM.formula=GLM.formula, GLM.family=GLM.family,
GLMSTEP.k=GLMSTEP.k, GLMSTEP.steps=GLMSTEP.steps, STEP.formula=STEP.formula, GLMSTEP.scope=GLMSTEP.scope,
GAM.formula=GAM.formula, GAM.family=GAM.family,
GAMSTEP.steps=GAMSTEP.steps, GAMSTEP.scope=GAMSTEP.scope, GAMSTEP.pos=GAMSTEP.pos,
MGCV.formula=MGCV.formula, MGCV.select=MGCV.select,
MGCVFIX.formula=MGCVFIX.formula,
EARTH.formula=EARTH.formula, EARTH.glm=EARTH.glm,
RPART.formula=RPART.formula, RPART.xval=RPART.xval,
NNET.formula=NNET.formula, NNET.size=NNET.size, NNET.decay=NNET.decay,
FDA.formula=FDA.formula, SVM.formula=SVM.formula, SVME.formula=SVME.formula,
MAHAL.shape=MAHAL.shape)
#3. predict for all the other rasters
for (n in 1:length(xn)) {
xn.f <- xn[[n]]
if(length(xn.f@title) == 0) {xn.f@title <- paste("stack", n, sep="")}
if (gsub(".", "_", xn.f@title, fixed=T) != xn.f@title) {cat(paste("\n", "WARNING: title of stack (", xn.f@title, ") contains '.'", "\n\n", sep = ""))}
cat(paste("\n", "Predictions for species: ", RASTER.species.name1, " for rasterStack: ", xn.f@title, sep = ""))
tryCatch(rasters2 <- ensemble.raster(xn=xn.f, ext=ext,
models.list=calibration.2$models,
RASTER.species.name=RASTER.species.name1,
RASTER.format=RASTER.format, RASTER.datatype=RASTER.datatype, RASTER.NAflag=RASTER.NAflag,
KML.out=KML.out, KML.maxpixels=KML.maxpixels, KML.blur=KML.blur),
error= function(err) {print(paste("WARNING: prediction failed for stack: ", xn.f@title, sep=""))},
silent=T)
if(runs==n.ensembles && n.ensembles>1 && RASTER.format=="raster") {
# recalculate threshold for mean of predictions with calibration stack (xn[[1]])
if (n == 1) {
calibrate.mean <- NULL
calibrate.mean <- ensemble.mean(RASTER.species.name=focal.species, RASTER.stack.name=xn.f@title,
positive.filters = c("grd", "_ENSEMBLE_"), negative.filters = c("xml"),
RASTER.format=RASTER.format, RASTER.datatype=RASTER.datatype, RASTER.NAflag=RASTER.NAflag,
KML.out=KML.out, KML.maxpixels=KML.maxpixels, KML.blur=KML.blur,
p=ps, a=as,
pt = NULL, at = NULL,
threshold = -1,
threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence)
cat(paste("\n", "threshold for mean suitability: ", calibrate.mean$threshold, "\n", sep = ""))
}else{
ensemble.mean(RASTER.species.name=focal.species, RASTER.stack.name=xn.f@title,
positive.filters = c("grd", "_ENSEMBLE_"), negative.filters = c("xml"),
RASTER.format=RASTER.format, RASTER.datatype=RASTER.datatype, RASTER.NAflag=RASTER.NAflag,
KML.out=KML.out, KML.maxpixels=KML.maxpixels, KML.blur=KML.blur,
p=NULL, a=NULL,
pt = NULL, at = NULL,
threshold = calibrate.mean$threshold,
threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence)
}
}
}
# sum output weights > 0 loop
}
# n ensembles loop
}
# if (sufficient presence locations) loop
if (SINK==T && OLD.SINK==F) {sink(file=NULL, append=T)}
}
# s (species) loop
}
result <- list(species=species.names, call=match.call())
return(result)
}
|
/BiodiversityR/R/ensemble.batch.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 15,344 |
r
|
`ensemble.batch` <- function(
x=NULL, xn=c(x), ext=NULL,
species.presence=NULL, species.absence=NULL,
presence.min=20,
an=1000, excludep=FALSE, CIRCLES.at=FALSE, CIRCLES.d=100000,
k.splits=4, k.test=0,
n.ensembles=1,
SINK=FALSE,
RASTER.format="raster", RASTER.datatype="INT2S", RASTER.NAflag=-32767,
KML.out=FALSE, KML.maxpixels=100000, KML.blur=10,
models.save=FALSE,
threshold.method="spec_sens", threshold.sensitivity=0.9, threshold.PresenceAbsence=FALSE,
ENSEMBLE.best=0, ENSEMBLE.min=0.7, ENSEMBLE.exponent=1,
input.weights=NULL,
MAXENT=1, GBM=1, GBMSTEP=1, RF=1, GLM=1, GLMSTEP=1, GAM=1, GAMSTEP=1, MGCV=1, MGCVFIX=0,
EARTH=1, RPART=1, NNET=1, FDA=1, SVM=1, SVME=1, BIOCLIM=1, DOMAIN=1, MAHAL=1,
PROBIT=FALSE, AUC.weights=TRUE,
Yweights="BIOMOD",
layer.drops=NULL, factors=NULL, dummy.vars=NULL,
formulae.defaults=TRUE, maxit=100,
MAXENT.a=NULL, MAXENT.an=10000, MAXENT.BackData=NULL, MAXENT.path=paste(getwd(), "/models/maxent", sep=""),
GBM.formula=NULL, GBM.n.trees=2001,
GBMSTEP.gbm.x=2:(1+raster::nlayers(x)), GBMSTEP.tree.complexity=5, GBMSTEP.learning.rate=0.005,
GBMSTEP.bag.fraction=0.5, GBMSTEP.step.size=100,
RF.formula=NULL, RF.ntree=751, RF.mtry=floor(sqrt(raster::nlayers(x))),
GLM.formula=NULL, GLM.family=binomial(link="logit"),
GLMSTEP.steps=1000, STEP.formula=NULL, GLMSTEP.scope=NULL, GLMSTEP.k=2,
GAM.formula=NULL, GAM.family=binomial(link="logit"),
GAMSTEP.steps=1000, GAMSTEP.scope=NULL, GAMSTEP.pos=1,
MGCV.formula=NULL, MGCV.select=FALSE,
MGCVFIX.formula=NULL,
EARTH.formula=NULL, EARTH.glm=list(family=binomial(link="logit"), maxit=maxit),
RPART.formula=NULL, RPART.xval=50,
NNET.formula=NULL, NNET.size=8, NNET.decay=0.01,
FDA.formula=NULL,
SVM.formula=NULL, SVME.formula=NULL,
MAHAL.shape=1
)
{
.BiodiversityR <- new.env()
k.test <- as.integer(k.test)
k.splits <- as.integer(k.splits)
if (k.splits < 1) {
cat(paste("\n", "NOTE: parameter k.splits was set to be smaller than 1", sep = ""))
cat(paste("\n", "default value of 4 therefore set for parameter k.splits", sep = ""))
k.splits <- 4
}
n.ensembles <- as.integer(n.ensembles)
if (n.ensembles < 1) {n.ensembles <- 1}
# if (! require(dismo)) {stop("Please install the dismo package")}
if (is.null(xn) == T) {
cat(paste("\n", "NOTE: new rasterStack assumed to be equal to the base rasterStack", sep = ""))
xn <- x
}
xn <- c(xn)
# need to recalculate threshold for mean of ensembles
# therefore put x as first of new stacks
if (n.ensembles > 1) {
xn <- c(x, xn)
i <- 1
while (i < length(xn)) {
i <- i+1
if(identical(x, xn[[i]])) {xn[[i]] <- NULL}
}
}
species.presence <- data.frame(species.presence)
species.absence <- data.frame(species.absence)
if (ncol(species.presence) < 2) {stop("species.presence expected to be 3-column data.frame with species, x (e.g., lon) and y (e.g., lat) columns")}
if (ncol(species.presence) == 2) {
cat(paste("\n", "species.presence was expected to be 3-column data.frame with columns representing species, x (e.g., lon) and y (e.g., lat)", sep = ""))
cat(paste("\n", "only two columns were provided, it is therefore assumed that these reflect x and y coordinates for a single species", "\n\n", sep = ""))
species.name <- rep("Species001", nrow(species.presence))
species.presence <- cbind(species.name, species.presence)
species.presence <- data.frame(species.presence)
species.presence[,2] <- as.numeric(species.presence[,2])
species.presence[,3] <- as.numeric(species.presence[,3])
}
if (ncol(species.presence) > 3) {
cat(paste("\n", "species.presence was expected to be 3-column data.frame with species, x (e.g., lon) and y (e.g., lat) columns", sep = ""))
cat(paste("\n", "only first three columns used", "\n\n", sep = ""))
species.presence <- species.presence[,c(1:3)]
species.presence[,2] <- as.numeric(species.presence[,2])
species.presence[,3] <- as.numeric(species.presence[,3])
}
if (is.null(species.absence)==F && ncol(species.absence) < 2) {stop("species.absence expected to be a 2-column data.frame with x (e.g., lon) and y (e.g., lat), or 3-column data.frame with species, x (e.g., lon) and y (e.g., lat) columns")}
if (is.null(species.absence)==F && ncol(species.absence)> 3) {
cat(paste("\n", "species.absence was expected to be 3-column data.frame with species, x (e.g., lon) and y (e.g., lat) columns", sep = ""))
cat(paste("\n", "only first three columns used", "\n\n", sep = ""))
species.absence <- species.absence[,c(1:3)]
species.absence[,2] <- as.numeric(species.absence[,2])
species.absence[,3] <- as.numeric(species.absence[,3])
}
if (is.null(species.absence)==F && ncol(species.absence) == 2) {
cat(paste("\n", "species.absence was expected to be 3-column data.frame with species, x (e.g., lon) and y (e.g., lat) columns", sep = ""))
cat(paste("\n", "only two columns were provided, it is therefore assumed that these reflect x and y coordinates for absence locations to be used for each species run", "\n\n", sep = ""))
species.absence[,1] <- as.numeric(species.absence[,1])
species.absence[,2] <- as.numeric(species.absence[,2])
as <- species.absence
}
#
# process species by species
species.names <- levels(droplevels(factor(species.presence[,1])))
for (s in 1:length(species.names)) {
focal.species <- species.names[s]
# check if species has required minimum number of presence points
n.pres <- nrow(species.presence[species.presence[,1]==focal.species,])
if (n.pres < presence.min) {
cat(paste("\n", "Species: ", focal.species, " only has ", n.pres, " presence locations", sep = ""))
cat(paste("\n", "This species therefore not included in batch processing", "\n\n", sep = ""))
}else{
# create output file
if (s==1) {dir.create("outputs", showWarnings = F)}
paste.file <- paste(getwd(), "/outputs/", focal.species, "_output.txt", sep="")
OLD.SINK <- TRUE
if (sink.number(type="output") == 0) {OLD.SINK <- F}
if (SINK==T && OLD.SINK==F) {
if (file.exists(paste.file) == F) {
cat(paste("\n", "NOTE: results captured in file: ", paste.file, "\n", sep = ""))
}else{
cat(paste("\n", "NOTE: results appended in file: ", paste.file, "\n", sep = ""))
}
cat(paste("\n\n", "RESULTS (ensemble.batch function)", "\n", sep=""), file=paste.file, append=T)
sink(file=paste.file, append=T)
cat(paste(date(), "\n", sep=""))
print(match.call())
}
cat(paste("\n", "Evaluations for species: ", focal.species, "\n", sep = ""))
ps <- species.presence[species.presence[,1]==focal.species, c(2:3)]
if (is.null(species.absence)==F && ncol(species.absence) == 3) {
as <- species.absence[species.absence[,1]==focal.species, c(2:3)]
}
if (is.null(species.absence)==T) {
if (excludep == T) {
as <- dismo::randomPoints(x[[1]], n=an, p=ps, ext=ext, excludep=T)
}else{
as <- dismo::randomPoints(x[[1]], n=an, p=NULL, ext=ext, excludep=F)
}
}
assign("ps", ps, envir=.BiodiversityR)
assign("as", as, envir=.BiodiversityR)
# repeat the whole process for n.ensembles
RASTER.species.name1 <- focal.species
for (runs in 1:n.ensembles) {
if (n.ensembles > 1) {
cat(paste("\n", focal.species, ": ENSEMBLE ", runs, "\n\n", sep = ""))
RASTER.species.name1 <- paste(focal.species, "_ENSEMBLE_", runs, sep="")
}
#1. first ensemble tests
calibration.1 <- ensemble.test.splits(x=x, p=ps, a=as, ext=ext, k=k.splits,
CIRCLES.at=CIRCLES.at, CIRCLES.d=CIRCLES.d,
ENSEMBLE.tune=T,
ENSEMBLE.best=ENSEMBLE.best, ENSEMBLE.min=ENSEMBLE.min,
ENSEMBLE.exponent=ENSEMBLE.exponent,
species.name = RASTER.species.name1,
threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence,
input.weights=input.weights,
MAXENT=MAXENT, GBM=GBM, GBMSTEP=GBMSTEP, RF=RF, GLM=GLM, GLMSTEP=GLMSTEP,
GAM=GAM, GAMSTEP=GAMSTEP, MGCV=MGCV, MGCVFIX=MGCVFIX, EARTH=EARTH, RPART=RPART,
NNET=NNET, FDA=FDA, SVM=SVM, SVME=SVME, BIOCLIM=BIOCLIM, DOMAIN=DOMAIN, MAHAL=MAHAL,
PROBIT=PROBIT, VIF=T,
Yweights=Yweights,
layer.drops=layer.drops, factors=factors, dummy.vars=dummy.vars,
maxit=maxit,
MAXENT.a=MAXENT.a, MAXENT.an=MAXENT.an,
MAXENT.BackData=MAXENT.BackData, MAXENT.path=MAXENT.path,
GBM.formula=GBM.formula, GBM.n.trees=GBM.n.trees,
GBMSTEP.gbm.x=GBMSTEP.gbm.x, GBMSTEP.tree.complexity=GBMSTEP.tree.complexity,
GBMSTEP.learning.rate=GBMSTEP.learning.rate, GBMSTEP.bag.fraction=GBMSTEP.bag.fraction,
GBMSTEP.step.size=GBMSTEP.step.size,
RF.formula=RF.formula, RF.ntree=RF.ntree, RF.mtry=RF.mtry,
GLM.formula=GLM.formula, GLM.family=GLM.family,
GLMSTEP.k=GLMSTEP.k, GLMSTEP.steps=GLMSTEP.steps, STEP.formula=STEP.formula, GLMSTEP.scope=GLMSTEP.scope,
GAM.formula=GAM.formula, GAM.family=GAM.family,
GAMSTEP.steps=GAMSTEP.steps, GAMSTEP.scope=GAMSTEP.scope, GAMSTEP.pos=GAMSTEP.pos,
MGCV.formula=MGCV.formula, MGCV.select=MGCV.select,
MGCVFIX.formula=MGCVFIX.formula,
EARTH.formula=EARTH.formula, EARTH.glm=EARTH.glm,
RPART.formula=RPART.formula, RPART.xval=RPART.xval,
NNET.formula=NNET.formula, NNET.size=NNET.size, NNET.decay=NNET.decay,
FDA.formula=FDA.formula, SVM.formula=SVM.formula, SVME.formula=SVME.formula,
MAHAL.shape=MAHAL.shape)
#2. calibrate final model
# xn.f <- eval(as.name(xn.focal))
cat(paste("\n", "Final model calibrations for species: ", RASTER.species.name1, "\n", sep = ""))
cat(paste("\n", "Minimum input weight is 0.05", "\n", sep=""))
if (AUC.weights == TRUE) {
output.weights <- calibration.1$output.weights.AUC
}else{
output.weights <- calibration.1$output.weights
}
output.weights[output.weights < 0.05] <- 0
print(output.weights)
if (sum(output.weights) > 0) {
calibration.2 <- ensemble.test(
x=x, p=ps, a=as, ext=ext, k=k.test, pt=NULL, at=NULL,
models.keep=TRUE, evaluations.keep=TRUE,
PLOTS=F,
models.save=models.save, species.name=RASTER.species.name1,
AUC.weights=F, ENSEMBLE.tune=F,
input.weights=output.weights,
threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence,
RASTER.format=RASTER.format,
PROBIT=PROBIT, VIF=T,
Yweights=Yweights,
layer.drops=layer.drops, factors=factors, dummy.vars=dummy.vars,
maxit=maxit,
MAXENT.BackData=MAXENT.BackData, MAXENT.path=MAXENT.path,
GBM.formula=GBM.formula, GBM.n.trees=GBM.n.trees,
GBMSTEP.gbm.x=GBMSTEP.gbm.x, GBMSTEP.tree.complexity=GBMSTEP.tree.complexity,
GBMSTEP.learning.rate=GBMSTEP.learning.rate, GBMSTEP.bag.fraction=GBMSTEP.bag.fraction,
GBMSTEP.step.size=GBMSTEP.step.size,
RF.formula=RF.formula, RF.ntree=RF.ntree, RF.mtry=RF.mtry,
GLM.formula=GLM.formula, GLM.family=GLM.family,
GLMSTEP.k=GLMSTEP.k, GLMSTEP.steps=GLMSTEP.steps, STEP.formula=STEP.formula, GLMSTEP.scope=GLMSTEP.scope,
GAM.formula=GAM.formula, GAM.family=GAM.family,
GAMSTEP.steps=GAMSTEP.steps, GAMSTEP.scope=GAMSTEP.scope, GAMSTEP.pos=GAMSTEP.pos,
MGCV.formula=MGCV.formula, MGCV.select=MGCV.select,
MGCVFIX.formula=MGCVFIX.formula,
EARTH.formula=EARTH.formula, EARTH.glm=EARTH.glm,
RPART.formula=RPART.formula, RPART.xval=RPART.xval,
NNET.formula=NNET.formula, NNET.size=NNET.size, NNET.decay=NNET.decay,
FDA.formula=FDA.formula, SVM.formula=SVM.formula, SVME.formula=SVME.formula,
MAHAL.shape=MAHAL.shape)
#3. predict for all the other rasters
for (n in 1:length(xn)) {
xn.f <- xn[[n]]
if(length(xn.f@title) == 0) {xn.f@title <- paste("stack", n, sep="")}
if (gsub(".", "_", xn.f@title, fixed=T) != xn.f@title) {cat(paste("\n", "WARNING: title of stack (", xn.f@title, ") contains '.'", "\n\n", sep = ""))}
cat(paste("\n", "Predictions for species: ", RASTER.species.name1, " for rasterStack: ", xn.f@title, sep = ""))
tryCatch(rasters2 <- ensemble.raster(xn=xn.f, ext=ext,
models.list=calibration.2$models,
RASTER.species.name=RASTER.species.name1,
RASTER.format=RASTER.format, RASTER.datatype=RASTER.datatype, RASTER.NAflag=RASTER.NAflag,
KML.out=KML.out, KML.maxpixels=KML.maxpixels, KML.blur=KML.blur),
error= function(err) {print(paste("WARNING: prediction failed for stack: ", xn.f@title, sep=""))},
silent=T)
if(runs==n.ensembles && n.ensembles>1 && RASTER.format=="raster") {
# recalculate threshold for mean of predictions with calibration stack (xn[[1]])
if (n == 1) {
calibrate.mean <- NULL
calibrate.mean <- ensemble.mean(RASTER.species.name=focal.species, RASTER.stack.name=xn.f@title,
positive.filters = c("grd", "_ENSEMBLE_"), negative.filters = c("xml"),
RASTER.format=RASTER.format, RASTER.datatype=RASTER.datatype, RASTER.NAflag=RASTER.NAflag,
KML.out=KML.out, KML.maxpixels=KML.maxpixels, KML.blur=KML.blur,
p=ps, a=as,
pt = NULL, at = NULL,
threshold = -1,
threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence)
cat(paste("\n", "threshold for mean suitability: ", calibrate.mean$threshold, "\n", sep = ""))
}else{
ensemble.mean(RASTER.species.name=focal.species, RASTER.stack.name=xn.f@title,
positive.filters = c("grd", "_ENSEMBLE_"), negative.filters = c("xml"),
RASTER.format=RASTER.format, RASTER.datatype=RASTER.datatype, RASTER.NAflag=RASTER.NAflag,
KML.out=KML.out, KML.maxpixels=KML.maxpixels, KML.blur=KML.blur,
p=NULL, a=NULL,
pt = NULL, at = NULL,
threshold = calibrate.mean$threshold,
threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence)
}
}
}
# sum output weights > 0 loop
}
# n ensembles loop
}
# if (sufficient presence locations) loop
if (SINK==T && OLD.SINK==F) {sink(file=NULL, append=T)}
}
# s (species) loop
}
result <- list(species=species.names, call=match.call())
return(result)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/node_functions.R
\name{insert_parameters}
\alias{insert_parameters}
\title{Insert values of parameters into a DAG}
\usage{
insert_parameters(parameters, dag)
}
\arguments{
\item{parameters}{a names list of parameters and their values}
\item{dag}{a the parameters are going to be inserted into}
}
\value{
the altered DAG
}
\description{
Insert values of parameters into a DAG
}
|
/man/insert_parameters.Rd
|
no_license
|
jeffreypullin/rgreta
|
R
| false | true | 456 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/node_functions.R
\name{insert_parameters}
\alias{insert_parameters}
\title{Insert values of parameters into a DAG}
\usage{
insert_parameters(parameters, dag)
}
\arguments{
\item{parameters}{a names list of parameters and their values}
\item{dag}{a the parameters are going to be inserted into}
}
\value{
the altered DAG
}
\description{
Insert values of parameters into a DAG
}
|
testlist <- list(A = structure(c(2.17107980444229e+205, 9.53818252179844e+295 ), .Dim = 1:2), B = structure(c(2.19477802979261e+294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613121058-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 323 |
r
|
testlist <- list(A = structure(c(2.17107980444229e+205, 9.53818252179844e+295 ), .Dim = 1:2), B = structure(c(2.19477802979261e+294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aqp-label-placement-solvers.R
\name{SANN_1D}
\alias{SANN_1D}
\title{Fix Overlap within a Sequence via Simulated Annealing}
\usage{
SANN_1D(
x,
thresh = 0.6,
adj = thresh * 2/3,
min.x = min(x) - 0.2,
max.x = max(x) + 0.2,
maxIter = 1000,
trace = FALSE,
tiny = 1e-04,
T0 = 500,
k = 10,
...
)
}
\arguments{
\item{x}{vector of horizontal positions, pre-sorted}
\item{thresh}{horizontal threshold defining "overlap" or distance between elements of \code{x}. For adjusting soil profile sketches values are typically < 1 and likely in (0.3, 0.8).}
\item{adj}{specifies the size of perturbations within \code{runif(min = adj * -1, max = adj)}. Larger values will sometimes reduce the number of iterations required to solve particularly difficult overlap conditions. See \code{coolingRate} argument when \code{adj} is large}
\item{min.x}{left-side boundary condition, consider expanding if a solution cannot be found within \code{maxIter}.}
\item{max.x}{right-side boundary condition, consider expanding if a solution cannot be found within \code{maxIter}.}
\item{maxIter}{maximum number of iterations to attempt before giving up and returning a regularly-spaced sequence}
\item{trace}{print diagnostics, result is a \code{list} vs \code{vector}}
\item{tiny}{the smallest allowable overlap}
\item{T0}{starting temperature}
\item{k}{cooling constant}
\item{\dots}{not used, absorbs additional arguments to \code{\link[=fixOverlap]{fixOverlap()}}}
}
\value{
When \code{trace = FALSE}, a vector of the same length as \code{x}, preserving rank-ordering and boundary conditions. When \code{trace = TRUE} a list containing the new sequence along with information about objective functions and decisions made during iteration.
}
\description{
This function makes small adjustments to elements of \code{x} until overlap defined by \code{thresh} is removed, or until \code{maxIter} is reached. Rank order and boundary conditions (defined by \code{min.x} and \code{max.x}) are preserved. The underlying algorithm is based on simulated annealing. The "cooling schedule" parameters \code{T0} and \code{k} can be used to tune the algorithm for specific applications.
}
\details{
Ideas for solving difficult overlap scenarios:
\itemize{
\item widen the boundary conditions by adjusting \code{min.x} and \code{max.x} beyond the original scale of \code{x}
\item reduce the allowable overlap threshold \code{thresh}
\item reduce the magnitude of perturbations (\code{adj}) and increase \code{maxIter}
\item increase \code{k}
}
}
\examples{
x <- c(1, 2, 3, 3.4, 3.5, 5, 6, 10)
# easy
z <- fixOverlap(x, thresh = 0.2, trace = TRUE)
# harder
z <- fixOverlap(x, thresh = 0.6, trace = TRUE)
# much harder
z <- fixOverlap(x, thresh = 0.9, trace = TRUE)
# interpret `trace` output
# relatively challenging
x <- c(1, 2, 3.4, 3.4, 3.4, 3.4, 6, 8, 10, 12, 13, 13, 15, 15.5)
# fix overlap, return debugging information
set.seed(10101)
z <- fixOverlap(x, thresh = 0.8, trace = TRUE)
# setup plot device
par(mar = c(4, 4, 1, 1))
layout(matrix(c(1,2,3)), widths = 1, heights = c(1,1,2))
# objective function = overlap + SSD
plot(
seq_along(z$stats), z$stats,
type = 'h', las = 1,
xlab = 'Iteration', ylab = 'Overlap',
cex.axis = 0.8
)
# SSD: deviation from original configuration
plot(
seq_along(z$ssd), z$ssd,
type = 'h', las = 1,
xlab = 'Iteration', ylab = 'Deviation',
cex.axis = 0.8
)
# adjustments at each iteration
matplot(
z$states, type = 'l',
lty = 1, las = 1,
xlab = 'Iteration', ylab = 'x-position'
)
# trace log
# B: boundary condition violation
# O: rank (order) violation
# +: accepted perturbation
# -: rejected perturbation
table(z$log)
}
\seealso{
\code{\link[=electroStatics_1D]{electroStatics_1D()}}, \code{\link[=fixOverlap]{fixOverlap()}}
}
\author{
D.E. Beaudette and K.C. Thompson
}
|
/man/SANN_1D.Rd
|
no_license
|
ncss-tech/aqp
|
R
| false | true | 3,916 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aqp-label-placement-solvers.R
\name{SANN_1D}
\alias{SANN_1D}
\title{Fix Overlap within a Sequence via Simulated Annealing}
\usage{
SANN_1D(
x,
thresh = 0.6,
adj = thresh * 2/3,
min.x = min(x) - 0.2,
max.x = max(x) + 0.2,
maxIter = 1000,
trace = FALSE,
tiny = 1e-04,
T0 = 500,
k = 10,
...
)
}
\arguments{
\item{x}{vector of horizontal positions, pre-sorted}
\item{thresh}{horizontal threshold defining "overlap" or distance between elements of \code{x}. For adjusting soil profile sketches values are typically < 1 and likely in (0.3, 0.8).}
\item{adj}{specifies the size of perturbations within \code{runif(min = adj * -1, max = adj)}. Larger values will sometimes reduce the number of iterations required to solve particularly difficult overlap conditions. See \code{coolingRate} argument when \code{adj} is large}
\item{min.x}{left-side boundary condition, consider expanding if a solution cannot be found within \code{maxIter}.}
\item{max.x}{right-side boundary condition, consider expanding if a solution cannot be found within \code{maxIter}.}
\item{maxIter}{maximum number of iterations to attempt before giving up and returning a regularly-spaced sequence}
\item{trace}{print diagnostics, result is a \code{list} vs \code{vector}}
\item{tiny}{the smallest allowable overlap}
\item{T0}{starting temperature}
\item{k}{cooling constant}
\item{\dots}{not used, absorbs additional arguments to \code{\link[=fixOverlap]{fixOverlap()}}}
}
\value{
When \code{trace = FALSE}, a vector of the same length as \code{x}, preserving rank-ordering and boundary conditions. When \code{trace = TRUE} a list containing the new sequence along with information about objective functions and decisions made during iteration.
}
\description{
This function makes small adjustments to elements of \code{x} until overlap defined by \code{thresh} is removed, or until \code{maxIter} is reached. Rank order and boundary conditions (defined by \code{min.x} and \code{max.x}) are preserved. The underlying algorithm is based on simulated annealing. The "cooling schedule" parameters \code{T0} and \code{k} can be used to tune the algorithm for specific applications.
}
\details{
Ideas for solving difficult overlap scenarios:
\itemize{
\item widen the boundary conditions by adjusting \code{min.x} and \code{max.x} beyond the original scale of \code{x}
\item reduce the allowable overlap threshold \code{thresh}
\item reduce the magnitude of perturbations (\code{adj}) and increase \code{maxIter}
\item increase \code{k}
}
}
\examples{
x <- c(1, 2, 3, 3.4, 3.5, 5, 6, 10)
# easy
z <- fixOverlap(x, thresh = 0.2, trace = TRUE)
# harder
z <- fixOverlap(x, thresh = 0.6, trace = TRUE)
# much harder
z <- fixOverlap(x, thresh = 0.9, trace = TRUE)
# interpret `trace` output
# relatively challenging
x <- c(1, 2, 3.4, 3.4, 3.4, 3.4, 6, 8, 10, 12, 13, 13, 15, 15.5)
# fix overlap, return debugging information
set.seed(10101)
z <- fixOverlap(x, thresh = 0.8, trace = TRUE)
# setup plot device
par(mar = c(4, 4, 1, 1))
layout(matrix(c(1,2,3)), widths = 1, heights = c(1,1,2))
# objective function = overlap + SSD
plot(
seq_along(z$stats), z$stats,
type = 'h', las = 1,
xlab = 'Iteration', ylab = 'Overlap',
cex.axis = 0.8
)
# SSD: deviation from original configuration
plot(
seq_along(z$ssd), z$ssd,
type = 'h', las = 1,
xlab = 'Iteration', ylab = 'Deviation',
cex.axis = 0.8
)
# adjustments at each iteration
matplot(
z$states, type = 'l',
lty = 1, las = 1,
xlab = 'Iteration', ylab = 'x-position'
)
# trace log
# B: boundary condition violation
# O: rank (order) violation
# +: accepted perturbation
# -: rejected perturbation
table(z$log)
}
\seealso{
\code{\link[=electroStatics_1D]{electroStatics_1D()}}, \code{\link[=fixOverlap]{fixOverlap()}}
}
\author{
D.E. Beaudette and K.C. Thompson
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ci.R
\name{ci}
\alias{ci}
\alias{ci_get_branch}
\alias{ci_is_tag}
\alias{ci_get_slug}
\alias{ci_get_build_number}
\alias{ci_get_build_url}
\alias{ci_get_commit}
\alias{ci_get_env}
\alias{ci_is_env}
\alias{ci_has_env}
\alias{ci_can_push}
\alias{ci_is_interactive}
\alias{ci_cat_with_color}
\alias{ci_on_circle}
\alias{ci_on_ghactions}
\title{The current CI environment}
\usage{
ci_get_branch()
ci_is_tag()
ci_get_slug()
ci_get_build_number()
ci_get_build_url()
ci_get_commit()
ci_get_env(env)
ci_is_env(env, value)
ci_has_env(env)
ci_can_push(private_key_name = "TIC_DEPLOY_KEY")
ci_is_interactive()
ci_cat_with_color(code)
ci_on_circle()
ci_on_ghactions()
ci()
}
\arguments{
\item{env}{Name of the environment variable to check.}
\item{value}{Value for the environment variable to compare against.}
\item{private_key_name}{\code{string}\cr
Only needed when deploying from builds on GitHub Actions.
If you have set a custom name for the private key during creation of the
SSH key pair via tic::use_ghactions_deploy()] or \code{\link[=use_tic]{use_tic()}}, pass this
name here.}
\item{code}{Code that should be colored.}
}
\description{
Functions that return environment settings that describe the CI
environment. The value is retrieved only once and then cached.
\code{ci_get_branch()}: Returns the current branch. Returns nothing if operating
on a tag.
\code{ci_is_tag()}: Returns the current tag name. Returns nothing if a branch is
selected.
\code{ci_get_slug()}: Returns the repo slug in the format \code{user/repo} or
\code{org/repo}
\code{ci_get_build_number()}: Returns the CI build number.
\code{ci_get_build_url()}: Returns the URL of the current build.
\code{ci_get_commit()}: Returns the SHA1 of the current commit.
\code{ci_get_env()}: Return an environment or configuration variable.
\code{ci_is_env()}: Checks if an environment or configuration variable is set to a
particular value.
\code{ci_has_env()}: Checks if an environment or configuration variable is set to
any value.
\code{ci_can_push()}: Checks if push deployment is possible. Always true
for local environments, CI environments require an environment
variable (by default \code{TIC_DEPLOY_KEY}).
\code{ci_is_interactive()}: Returns whether the current build is run interactively
or not. Global setup operations shouldn't be run on interactive CIs.
\code{ci_cat_with_color()}: Colored output targeted to the CI log.
The code argument can be an unevaluated call to a crayon function, the
style will be applied even if it normally wouldn't be.
\code{ci_on_circle()}: Are we running on Circle CI?
\code{ci_on_ghactions()}: Are we running on GitHub Actions?
\code{ci()}: Return the current CI environment
}
|
/man/ci.Rd
|
no_license
|
ropensci/tic
|
R
| false | true | 2,787 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ci.R
\name{ci}
\alias{ci}
\alias{ci_get_branch}
\alias{ci_is_tag}
\alias{ci_get_slug}
\alias{ci_get_build_number}
\alias{ci_get_build_url}
\alias{ci_get_commit}
\alias{ci_get_env}
\alias{ci_is_env}
\alias{ci_has_env}
\alias{ci_can_push}
\alias{ci_is_interactive}
\alias{ci_cat_with_color}
\alias{ci_on_circle}
\alias{ci_on_ghactions}
\title{The current CI environment}
\usage{
ci_get_branch()
ci_is_tag()
ci_get_slug()
ci_get_build_number()
ci_get_build_url()
ci_get_commit()
ci_get_env(env)
ci_is_env(env, value)
ci_has_env(env)
ci_can_push(private_key_name = "TIC_DEPLOY_KEY")
ci_is_interactive()
ci_cat_with_color(code)
ci_on_circle()
ci_on_ghactions()
ci()
}
\arguments{
\item{env}{Name of the environment variable to check.}
\item{value}{Value for the environment variable to compare against.}
\item{private_key_name}{\code{string}\cr
Only needed when deploying from builds on GitHub Actions.
If you have set a custom name for the private key during creation of the
SSH key pair via tic::use_ghactions_deploy()] or \code{\link[=use_tic]{use_tic()}}, pass this
name here.}
\item{code}{Code that should be colored.}
}
\description{
Functions that return environment settings that describe the CI
environment. The value is retrieved only once and then cached.
\code{ci_get_branch()}: Returns the current branch. Returns nothing if operating
on a tag.
\code{ci_is_tag()}: Returns the current tag name. Returns nothing if a branch is
selected.
\code{ci_get_slug()}: Returns the repo slug in the format \code{user/repo} or
\code{org/repo}
\code{ci_get_build_number()}: Returns the CI build number.
\code{ci_get_build_url()}: Returns the URL of the current build.
\code{ci_get_commit()}: Returns the SHA1 of the current commit.
\code{ci_get_env()}: Return an environment or configuration variable.
\code{ci_is_env()}: Checks if an environment or configuration variable is set to a
particular value.
\code{ci_has_env()}: Checks if an environment or configuration variable is set to
any value.
\code{ci_can_push()}: Checks if push deployment is possible. Always true
for local environments, CI environments require an environment
variable (by default \code{TIC_DEPLOY_KEY}).
\code{ci_is_interactive()}: Returns whether the current build is run interactively
or not. Global setup operations shouldn't be run on interactive CIs.
\code{ci_cat_with_color()}: Colored output targeted to the CI log.
The code argument can be an unevaluated call to a crayon function, the
style will be applied even if it normally wouldn't be.
\code{ci_on_circle()}: Are we running on Circle CI?
\code{ci_on_ghactions()}: Are we running on GitHub Actions?
\code{ci()}: Return the current CI environment
}
|
# Jake Yeung
# Date of Creation: 2020-10-24
# File: ~/projects/scchic/scripts/rstudioserver_analysis/k4me1_k9me3/4-match_UMAPs_K4me1_and_K9me3.R
#
rm(list=ls())
library(dplyr)
library(tidyr)
library(ggplot2)
library(data.table)
library(Matrix)
library(scchicFuncs)
library(hash)
library(igraph)
library(umap)
library(topicmodels)
# Paths -------------------------------------------------------------------
remove.na <- TRUE
joutsuffix <- "UnionRows_KeepAllCells_FewerRepressedClusters"
outpdf <- paste0("/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/from_rstudioserver/pdfs_all/H3K4me1_H3K9me3_scChIX_output/check_louvs_umaps.", joutsuffix, ".pdf")
cbPalette <- c("#696969", "#32CD32", "#56B4E9", "#851663", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#006400", "#FFB6C1", "#32CD32", "#0b1b7f", "#ff9f7d", "#eb9d01", "#7fbedf", "#28f9ff", "#88497e", "#bcf5c3", "#86f115", "#c3c89d", "#ff010b", "#664754", "#2af022", "#3afde0", "#b9b2a8", "#f6af7c", "#c3f582", "#3b3a9e", "#71a1ee", "#df5ba4", "#3a592e", "#010233", "#686cc2", "#9b114d", "#e6e6ba", "#b9f6c5")
hubprefix <- "/home/jyeung/hub_oudenaarden"
projmain <- file.path(hubprefix, paste0("jyeung/data/scChiC/raw_data_spikeins/H3K4me1_H3K9me3_analysis/projects_after_unmixing.H3K4me1xH3K9me3/SetupObjs_AllMerged_", joutsuffix))
inf.dbl <- "/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/raw_demultiplexed/LDA_outputs_all_spikeins/ldaAnalysisBins_mouse_spikein_BMround2all.dbl_common_rows/lda_outputs.count_mat.H3K4me1xH3K9me3.match_dbl.K-30.binarize.FALSE/ldaOut.count_mat.H3K4me1xH3K9me3.match_dbl.K-30.Robj"
assertthat::assert_that(file.exists(inf.dbl))
# inf.input <- "/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/raw_data_spikeins/H3K4me1_H3K9me3_analysis/double_staining_input/mouse_spikein_BMround2all.dbl_common_rows_match_dbl/mouse_spikein_BMround2all.dbl_common_rows_match_dbl_clstr_by_louvain_H3K4me1xH3K9me3.removeNA_TRUE.RData"
inf.input <- paste0("/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/raw_data_spikeins/H3K4me1_H3K9me3_analysis/double_staining_input/SetupObjs_AllMerged_", joutsuffix, "/SetupObjs_AllMerged_", joutsuffix, ".clstr_by_louvain_H3K4me1xH3K9me3.removeNA_", remove.na, ".RData")
# inf.output <- "/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/raw_data_spikeins/H3K4me1_H3K9me3_analysis/double_staining_output/mouse_spikein_BMround2all.dbl_common_rows_match_dbl/unmix_mouse_spikein_BMround2all.dbl_common_rows_match_dbl_clstr_by_louvain_H3K4me1xH3K9me3.removeNA_TRUE.RData"
inf.output <- paste0("/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/raw_data_spikeins/H3K4me1_H3K9me3_analysis/double_staining_output/SetupObjs_AllMerged_", joutsuffix, "/unmix_SetupObjs_AllMerged_", joutsuffix, ".clstr_by_louvain_H3K4me1xH3K9me3.removeNA_", remove.na, ".RData")
assertthat::assert_that(file.exists(inf.input))
assertthat::assert_that(file.exists(inf.output))
inf.annot <- paste0("/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/raw_data_spikeins/H3K4me1_H3K9me3_analysis/double_staining_input/SetupObjs_AllMerged_", joutsuffix, "/SetupObjs_AllMerged_", joutsuffix, ".clstr_by_louvain_H3K4me1xH3K9me3.removeNA_", remove.na, ".RData")
# indir.annot <- "/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/from_rstudioserver/H3K4me1_H3K9me3_analyses/cluster_tables.withdbl"
# assertthat::assert_that(dir.exists(inf.annot))
assertthat::assert_that(file.exists(inf.annot))
# inf.annot.k9me3 <- file.path(indir.annot, "cluster_tables_H3K9me3_BM_all_round2.txt")
# inf.annot.k4me1 <- file.path(indir.annot, "cluster_tables_H3K4me1_BM_all_round2.txt")
load(inf.annot, v=T)
dat.annot.k4me1 <- dat.louv$H3K4me1
dat.annot.k9me3 <- dat.louv$H3K9me3
# Load LDAs projections ---------------------------------------------------
jsettings <- umap.defaults
jsettings$n_neighbors <- 30
jsettings$min_dist <- 0.1
jsettings$random_state <- 123
jmarks <- c("H3K4me1", "H3K9me3")
names(jmarks) <- jmarks
# jmark <- "H3K4me1"
dat.umaps.lst <- lapply(jmarks, function(jmark){
inf.projs <- file.path(projmain, paste0("project_unmixed_", jmark, ".RData"))
load(inf.projs, v=T)
tm.result <- posterior(out.objs$out.lda)
tm.result <- AddTopicToTmResult(tm.result)
dat.umap <- DoUmapAndLouvain(tm.result$topics, jsettings = jsettings) %>%
rowwise() %>%
mutate(stain = "single")
tm.result.dbl <- AddTopicToTmResult(out.lda.predict)
dat.umap.dbl <- DoUmapAndLouvain(tm.result.dbl$topics, jsettings = jsettings) %>%
rowwise() %>%
mutate(stain = "dbl")
# Merge on one umap -------------------------------------------------------
umap.out.orig <- umap(tm.result$topics, config = jsettings)
umap.out.pred <- predict(umap.out.orig, data = tm.result.dbl$topics)
dat.umap.out.orig <- data.frame(cell = rownames(umap.out.orig$layout), umap1 = umap.out.orig$layout[, 1], umap2 = umap.out.orig$layout[, 2], stain = "single", stringsAsFactors = FALSE)
dat.umap.out.pred <- data.frame(cell = rownames(umap.out.pred), umap1 = umap.out.pred[, 1], umap2 = umap.out.pred[, 2], stain = "dbl", stringsAsFactors = FALSE)
dat.umap.out.merge <- bind_rows(dat.umap.out.orig, dat.umap.out.pred)
dat.umap.out.merge$mark <- jmark
return(dat.umap.out.merge)
})
for (jmark in jmarks){
m <- ggplot(dat.umaps.lst[[jmark]], aes(x = umap1, y = umap2, color = stain)) +
ggtitle(jmark) +
geom_point() +
theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
facet_wrap(~stain)
print(m)
}
# Load UMAP from double stain ---------------------------------------------
load(inf.dbl, v=T)
tm.result.dbl <- posterior(out.lda)
tm.result.dbl <- AddTopicToTmResult(tm.result.dbl)
dat.umap.dbl <- DoUmapAndLouvain(topics.mat = tm.result.dbl$topics, jsettings = jsettings)
# Get assignments by clusters ---------------------------------------------
# inf.input <- paste0("/home/jyeung/hub_oudenaarden/jyeung/data/dblchic/double_staining_input/", jprefix.input, "/", jprefix, "_", jmarks.dbl, ".removeNA_TRUE.RData")
# inf.output <- paste0("/home/jyeung/hub_oudenaarden/jyeung/data/dblchic/double_staining_output/", jprefix.output, "/unmix_", jprefix, "_", jmarks.dbl, ".removeNA_TRUE.RData")
load(inf.input, v=T)
load(inf.output, v=T)
# Make grid ---------------------------------------------------------------
fits.out <- act.repress.coord.lst
w.lst <- sapply(fits.out, function(x) x$w)
# remove 0.01 or 0.99
cells.remove.i <- which(w.lst >= 0.99 | w.lst <= 0.01)
if (length(cells.remove.i) > 0){
cells.remove <- names(w.lst)[cells.remove.i]
fits.out[[cells.remove]] <- NULL
}
# if louvains are now from clusters need eto rethink jcoord
cell.vec <- names(fits.out)
names(cell.vec) <- cell.vec
coords.dbl <- lapply(cell.vec, function(jcell){
jfit <- fits.out[[jcell]]
jweight <- fits.out[[jcell]]$w
p.mat <- SoftMax(jfit$ll.mat)
jcoord <- which(jfit$ll.mat == max(jfit$ll.mat), arr.ind = TRUE)
jmax <- max(p.mat)
# rows are active, columns are repress I THINK?
# TODO: assumes underscores be careful!
jlouv.act <- rownames(p.mat)[[jcoord[[1]]]]
jlouv.repress <- colnames(p.mat)[[jcoord[[2]]]]
if (grepl("_", jlouv.act)){
jlouv.act <- strsplit(jlouv.act, split = "_")[[1]][[2]]
}
if (grepl("_", jlouv.repress)){
jlouv.repress <- strsplit(jlouv.repress, split = "_")[[1]][[2]]
}
out.dat <- data.frame(cell = jcell, louv.act = jlouv.act, louv.repress = jlouv.repress, lnprob = jmax, w = jweight, stringsAsFactors = FALSE)
return(out.dat)
}) %>%
bind_rows()
# coords.dbl.annots <- left_join(coords.dbl, annots.dat)
coords.dbl.annots <- coords.dbl
dat.umap.dbl.merge <- left_join(dat.umap.dbl, coords.dbl.annots) %>%
rowwise() %>%
mutate(plate = ClipLast(cell, jsep = "_"))
m.check.repress <- ggplot(dat.umap.dbl.merge, aes(x = umap1, y = umap2, color = louv.repress)) + geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_manual(values = cbPalette, na.value = "grey85") + facet_wrap(~plate)
m.check.act <- ggplot(dat.umap.dbl.merge, aes(x = umap1, y = umap2, color = louv.act)) + geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_manual(values = cbPalette, na.value = "grey85") + facet_wrap(~plate)
m.check.w <- ggplot(dat.umap.dbl.merge, aes(x = umap1, y = umap2, color = w)) + geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_viridis_c() +
facet_wrap(~plate)
print(m.check.repress)
print(m.check.act)
print(m.check.w)
# coords.dbl.annots$louv.act <- factor(coords.dbl.annots$louv.act, levels = louv.act.ordering)
# coords.dbl.annots$louv.repress <- factor(coords.dbl.annots$louv.repress, levels = louv.repress.ordering)
library(ggforce)
m.grid <- ggplot(coords.dbl.annots, aes(x = louv.act, y = louv.repress, louv.act)) +
geom_point(alpha = 0.25, position = position_jitternormal(sd_x = 0.08, sd_y = 0.08)) +
theme_bw() +
theme(aspect.ratio=0.6) +
scale_color_manual(values = cbPalette) + xlab(paste0(jmarks[[1]], " Clusters (arbitrary names)")) + ylab(paste0(jmarks[[2]], " Clusters (arbitrary names)")) +
ggtitle("Each dot is a doubel stained cell,\nX-Y shows the cluster pair it is assigned")
print(m.grid)
m.grid.w <- ggplot(coords.dbl.annots, aes(x = louv.act, y = louv.repress, col = w)) +
geom_point(alpha = 0.25, position = position_jitternormal(sd_x = 0.08, sd_y = 0.08)) +
theme_bw() +
scale_color_viridis_c() +
theme(aspect.ratio=0.6) +
xlab(paste0(jmarks[[1]], " Clusters (arbitrary names)")) + ylab(paste0(jmarks[[2]], " Clusters (arbitrary names)")) +
ggtitle("Each dot is a doubel stained cell,\nX-Y shows the cluster pair it is assigned")
print(m.grid.w)
# Rename -----------------------------------------------------------------
# dat.annot.k9me3 <- fread(inf.annot.k9me3) %>%
# mutate(louvain = paste("louvain", louvain, sep = ""))
m.k9me3 <- ggplot(dat.annot.k9me3, aes(x = umap1, y = umap2, color = cluster)) +
scale_color_manual(values = cbPalette) +
geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
# dat.annot.k4me1 <- fread(inf.annot.k4me1) %>%
# mutate(louvain = paste("louvain", louvain, sep = ""))
m.k4me1 <- ggplot(dat.annot.k4me1, aes(x = umap1, y = umap2, color = cluster)) +
scale_color_manual(values = cbPalette) +
geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
print(m.k4me1)
print(m.k9me3)
JFuncs::multiplot(m.grid, m.k9me3, cols = 2)
JFuncs::multiplot(m.grid, m.k4me1, cols = 2)
# Plot UMAP H3K4me1 single and double -------------------------------------
jmark <- "H3K4me1"
m <- ggplot(dat.umaps.lst[[jmark]], aes(x = umap1, y = umap2, color = stain)) +
ggtitle(jmark) +
geom_point() +
theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom") +
facet_wrap(~stain)
print(m)
# label from dat.annot
dat.umap.k4me1 <- left_join(dat.umaps.lst[["H3K4me1"]], subset(dat.annot.k4me1, select = c(cell, cluster))) %>%
left_join(., coords.dbl.annots) %>%
rowwise() %>%
mutate(cluster = ifelse(is.na(cluster), louv.act, cluster))
dat.umap.k9me3 <- left_join(dat.umaps.lst[["H3K9me3"]], subset(dat.annot.k9me3, select = c(cell, cluster))) %>%
left_join(., coords.dbl.annots) %>%
rowwise()
m <- ggplot(dat.umap.k4me1, aes(x = umap1, y = umap2, color = cluster)) +
ggtitle("H3K4me1") +
geom_point() +
theme_bw() +
scale_color_manual(values = cbPalette, na.value = "grey85") +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom") +
facet_wrap(~stain)
print(m)
m <- ggplot(dat.umap.k4me1, aes(x = umap1, y = umap2, color = louv.act)) +
ggtitle(jmark) +
geom_point() +
theme_bw() +
scale_color_manual(values = cbPalette, na.value = "grey85") +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom") +
facet_wrap(~stain)
print(m)
m <- ggplot(dat.umap.k9me3, aes(x = umap1, y = umap2, color = cluster)) +
ggtitle("H3K9me3") +
geom_point() +
theme_bw() +
scale_color_manual(values = cbPalette, na.value = "grey85") +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom") +
facet_wrap(~stain)
print(m)
m <- ggplot(dat.umap.k9me3, aes(x = umap1, y = umap2, color = louv.act)) +
ggtitle("H3K9me3") +
geom_point() +
theme_bw() +
scale_color_manual(values = cbPalette, na.value = "grey85") +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom") +
facet_wrap(~stain)
print(m)
# Check if there are double cells that are bad ----------------------------
print(m.check.act)
print(m.check.repress)
JFuncs::multiplot(m.grid, m.k4me1, cols = 2)
JFuncs::multiplot(m.grid, m.k9me3, cols = 2)
# Connect the UMAPs ------------------------------------------------------
# shift k4me1 left, shift k9me3 right
dat.umap.k4me1.shift <- dat.umap.k4me1 %>%
ungroup() %>%
mutate(umap1 = unlist(scale(umap1, center = TRUE, scale = TRUE)),
umap1.shift = umap1 - 2)
dat.umap.k9me3.shift <- dat.umap.k9me3 %>%
ungroup() %>%
mutate(umap1 = unlist(scale(umap1, center = TRUE, scale = TRUE)),
umap1.shift = umap1 + 2)
dat.umap.merged <- rbind(dat.umap.k4me1.shift, dat.umap.k9me3.shift)
# put everything in one UMAP
cbPalette <- c("#696969", "#32CD32", "#56B4E9", "#FFB6C1", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#006400", "#FFB6C1", "#32CD32", "#0b1b7f", "#ff9f7d", "#eb9d01", "#7fbedf", "#28f9ff", "#88497e", "#bcf5c3", "#86f115", "#c3c89d", "#ff010b", "#664754", "#2af022", "#3afde0", "#b9b2a8", "#f6af7c", "#c3f582", "#3b3a9e", "#71a1ee", "#df5ba4", "#3a592e", "#010233", "#686cc2", "#9b114d", "#e6e6ba", "#b9f6c5")
# check louvs active
jlouvs <- unique(dat.umap.merged$louv.act)
jlouvs.act <- jlouvs[!is.na(jlouvs)]
jlouvs <- unique(dat.umap.merged$louv.repress)
jlouvs.repress <- jlouvs[!is.na(jlouvs)]
pdf(file = outpdf, useDingbats = FALSE)
# do acts
m.k4me1.act <- ggplot(dat.annot.k4me1, aes(x = umap1, y = umap2, color = cluster)) +
scale_color_manual(values = cbPalette) +
ggtitle("H3K4me1 clusters") +
geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
print(m.k4me1.act)
m.k9me3.rep <- ggplot(dat.annot.k9me3, aes(x = umap1, y = umap2, color = cluster)) +
scale_color_manual(values = cbPalette) +
ggtitle("H3K9me3 clusters") +
geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
print(m.k9me3.rep)
for (jlouv in jlouvs.act){
m <- ggplot(dat.umap.merged %>% filter(stain == "dbl") %>% mutate(louv.act = louv.act == jlouv), aes(x = umap1.shift, y = umap2, group = cell, color = louv.act)) +
geom_line(alpha = 0.01) +
geom_point() +
geom_vline(xintercept = 0, linetype = "dotted", alpha = 0.25, size = 1) +
scale_color_manual(values = cbPalette) +
ggtitle(paste("Active:", jlouv)) +
theme_bw() + theme(aspect.ratio=0.75, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
print(m)
}
m.k9me3.rep <- ggplot(dat.annot.k9me3, aes(x = umap1, y = umap2, color = cluster)) +
scale_color_manual(values = cbPalette) +
ggtitle("H3K9me3 Repress clusters") +
geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
print(m.k9me3.rep)
for (jlouv in jlouvs.repress){
m <- ggplot(dat.umap.merged %>% filter(stain == "dbl") %>% mutate(louv.repress = louv.repress == jlouv), aes(x = umap1.shift, y = umap2, group = cell, color = louv.repress)) +
geom_line(alpha = 0.01) +
geom_point() +
geom_vline(xintercept = 0, linetype = "dotted", alpha = 0.25, size = 1) +
scale_color_manual(values = cbPalette) +
ggtitle(paste("Repres:", jlouv)) +
theme_bw() + theme(aspect.ratio=0.75, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
print(m)
}
dev.off()
# Filter out bad cells ? -------------------------------------------------
|
/scripts/rstudioserver_analysis/k4me1_k9me3/4-match_UMAPs_K4me1_and_K9me3.KeepAllCellFewerRepressed.R
|
no_license
|
jakeyeung/sortchicAllScripts
|
R
| false | false | 16,681 |
r
|
# Jake Yeung
# Date of Creation: 2020-10-24
# File: ~/projects/scchic/scripts/rstudioserver_analysis/k4me1_k9me3/4-match_UMAPs_K4me1_and_K9me3.R
#
rm(list=ls())
library(dplyr)
library(tidyr)
library(ggplot2)
library(data.table)
library(Matrix)
library(scchicFuncs)
library(hash)
library(igraph)
library(umap)
library(topicmodels)
# Paths -------------------------------------------------------------------
remove.na <- TRUE
joutsuffix <- "UnionRows_KeepAllCells_FewerRepressedClusters"
outpdf <- paste0("/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/from_rstudioserver/pdfs_all/H3K4me1_H3K9me3_scChIX_output/check_louvs_umaps.", joutsuffix, ".pdf")
cbPalette <- c("#696969", "#32CD32", "#56B4E9", "#851663", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#006400", "#FFB6C1", "#32CD32", "#0b1b7f", "#ff9f7d", "#eb9d01", "#7fbedf", "#28f9ff", "#88497e", "#bcf5c3", "#86f115", "#c3c89d", "#ff010b", "#664754", "#2af022", "#3afde0", "#b9b2a8", "#f6af7c", "#c3f582", "#3b3a9e", "#71a1ee", "#df5ba4", "#3a592e", "#010233", "#686cc2", "#9b114d", "#e6e6ba", "#b9f6c5")
hubprefix <- "/home/jyeung/hub_oudenaarden"
projmain <- file.path(hubprefix, paste0("jyeung/data/scChiC/raw_data_spikeins/H3K4me1_H3K9me3_analysis/projects_after_unmixing.H3K4me1xH3K9me3/SetupObjs_AllMerged_", joutsuffix))
inf.dbl <- "/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/raw_demultiplexed/LDA_outputs_all_spikeins/ldaAnalysisBins_mouse_spikein_BMround2all.dbl_common_rows/lda_outputs.count_mat.H3K4me1xH3K9me3.match_dbl.K-30.binarize.FALSE/ldaOut.count_mat.H3K4me1xH3K9me3.match_dbl.K-30.Robj"
assertthat::assert_that(file.exists(inf.dbl))
# inf.input <- "/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/raw_data_spikeins/H3K4me1_H3K9me3_analysis/double_staining_input/mouse_spikein_BMround2all.dbl_common_rows_match_dbl/mouse_spikein_BMround2all.dbl_common_rows_match_dbl_clstr_by_louvain_H3K4me1xH3K9me3.removeNA_TRUE.RData"
inf.input <- paste0("/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/raw_data_spikeins/H3K4me1_H3K9me3_analysis/double_staining_input/SetupObjs_AllMerged_", joutsuffix, "/SetupObjs_AllMerged_", joutsuffix, ".clstr_by_louvain_H3K4me1xH3K9me3.removeNA_", remove.na, ".RData")
# inf.output <- "/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/raw_data_spikeins/H3K4me1_H3K9me3_analysis/double_staining_output/mouse_spikein_BMround2all.dbl_common_rows_match_dbl/unmix_mouse_spikein_BMround2all.dbl_common_rows_match_dbl_clstr_by_louvain_H3K4me1xH3K9me3.removeNA_TRUE.RData"
inf.output <- paste0("/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/raw_data_spikeins/H3K4me1_H3K9me3_analysis/double_staining_output/SetupObjs_AllMerged_", joutsuffix, "/unmix_SetupObjs_AllMerged_", joutsuffix, ".clstr_by_louvain_H3K4me1xH3K9me3.removeNA_", remove.na, ".RData")
assertthat::assert_that(file.exists(inf.input))
assertthat::assert_that(file.exists(inf.output))
inf.annot <- paste0("/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/raw_data_spikeins/H3K4me1_H3K9me3_analysis/double_staining_input/SetupObjs_AllMerged_", joutsuffix, "/SetupObjs_AllMerged_", joutsuffix, ".clstr_by_louvain_H3K4me1xH3K9me3.removeNA_", remove.na, ".RData")
# indir.annot <- "/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/from_rstudioserver/H3K4me1_H3K9me3_analyses/cluster_tables.withdbl"
# assertthat::assert_that(dir.exists(inf.annot))
assertthat::assert_that(file.exists(inf.annot))
# inf.annot.k9me3 <- file.path(indir.annot, "cluster_tables_H3K9me3_BM_all_round2.txt")
# inf.annot.k4me1 <- file.path(indir.annot, "cluster_tables_H3K4me1_BM_all_round2.txt")
load(inf.annot, v=T)
dat.annot.k4me1 <- dat.louv$H3K4me1
dat.annot.k9me3 <- dat.louv$H3K9me3
# Load LDAs projections ---------------------------------------------------
jsettings <- umap.defaults
jsettings$n_neighbors <- 30
jsettings$min_dist <- 0.1
jsettings$random_state <- 123
jmarks <- c("H3K4me1", "H3K9me3")
names(jmarks) <- jmarks
# jmark <- "H3K4me1"
dat.umaps.lst <- lapply(jmarks, function(jmark){
inf.projs <- file.path(projmain, paste0("project_unmixed_", jmark, ".RData"))
load(inf.projs, v=T)
tm.result <- posterior(out.objs$out.lda)
tm.result <- AddTopicToTmResult(tm.result)
dat.umap <- DoUmapAndLouvain(tm.result$topics, jsettings = jsettings) %>%
rowwise() %>%
mutate(stain = "single")
tm.result.dbl <- AddTopicToTmResult(out.lda.predict)
dat.umap.dbl <- DoUmapAndLouvain(tm.result.dbl$topics, jsettings = jsettings) %>%
rowwise() %>%
mutate(stain = "dbl")
# Merge on one umap -------------------------------------------------------
umap.out.orig <- umap(tm.result$topics, config = jsettings)
umap.out.pred <- predict(umap.out.orig, data = tm.result.dbl$topics)
dat.umap.out.orig <- data.frame(cell = rownames(umap.out.orig$layout), umap1 = umap.out.orig$layout[, 1], umap2 = umap.out.orig$layout[, 2], stain = "single", stringsAsFactors = FALSE)
dat.umap.out.pred <- data.frame(cell = rownames(umap.out.pred), umap1 = umap.out.pred[, 1], umap2 = umap.out.pred[, 2], stain = "dbl", stringsAsFactors = FALSE)
dat.umap.out.merge <- bind_rows(dat.umap.out.orig, dat.umap.out.pred)
dat.umap.out.merge$mark <- jmark
return(dat.umap.out.merge)
})
for (jmark in jmarks){
m <- ggplot(dat.umaps.lst[[jmark]], aes(x = umap1, y = umap2, color = stain)) +
ggtitle(jmark) +
geom_point() +
theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
facet_wrap(~stain)
print(m)
}
# Load UMAP from double stain ---------------------------------------------
load(inf.dbl, v=T)
tm.result.dbl <- posterior(out.lda)
tm.result.dbl <- AddTopicToTmResult(tm.result.dbl)
dat.umap.dbl <- DoUmapAndLouvain(topics.mat = tm.result.dbl$topics, jsettings = jsettings)
# Get assignments by clusters ---------------------------------------------
# inf.input <- paste0("/home/jyeung/hub_oudenaarden/jyeung/data/dblchic/double_staining_input/", jprefix.input, "/", jprefix, "_", jmarks.dbl, ".removeNA_TRUE.RData")
# inf.output <- paste0("/home/jyeung/hub_oudenaarden/jyeung/data/dblchic/double_staining_output/", jprefix.output, "/unmix_", jprefix, "_", jmarks.dbl, ".removeNA_TRUE.RData")
load(inf.input, v=T)
load(inf.output, v=T)
# Make grid ---------------------------------------------------------------
fits.out <- act.repress.coord.lst
w.lst <- sapply(fits.out, function(x) x$w)
# remove 0.01 or 0.99
cells.remove.i <- which(w.lst >= 0.99 | w.lst <= 0.01)
if (length(cells.remove.i) > 0){
cells.remove <- names(w.lst)[cells.remove.i]
fits.out[[cells.remove]] <- NULL
}
# if louvains are now from clusters need eto rethink jcoord
cell.vec <- names(fits.out)
names(cell.vec) <- cell.vec
coords.dbl <- lapply(cell.vec, function(jcell){
jfit <- fits.out[[jcell]]
jweight <- fits.out[[jcell]]$w
p.mat <- SoftMax(jfit$ll.mat)
jcoord <- which(jfit$ll.mat == max(jfit$ll.mat), arr.ind = TRUE)
jmax <- max(p.mat)
# rows are active, columns are repress I THINK?
# TODO: assumes underscores be careful!
jlouv.act <- rownames(p.mat)[[jcoord[[1]]]]
jlouv.repress <- colnames(p.mat)[[jcoord[[2]]]]
if (grepl("_", jlouv.act)){
jlouv.act <- strsplit(jlouv.act, split = "_")[[1]][[2]]
}
if (grepl("_", jlouv.repress)){
jlouv.repress <- strsplit(jlouv.repress, split = "_")[[1]][[2]]
}
out.dat <- data.frame(cell = jcell, louv.act = jlouv.act, louv.repress = jlouv.repress, lnprob = jmax, w = jweight, stringsAsFactors = FALSE)
return(out.dat)
}) %>%
bind_rows()
# coords.dbl.annots <- left_join(coords.dbl, annots.dat)
coords.dbl.annots <- coords.dbl
dat.umap.dbl.merge <- left_join(dat.umap.dbl, coords.dbl.annots) %>%
rowwise() %>%
mutate(plate = ClipLast(cell, jsep = "_"))
m.check.repress <- ggplot(dat.umap.dbl.merge, aes(x = umap1, y = umap2, color = louv.repress)) + geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_manual(values = cbPalette, na.value = "grey85") + facet_wrap(~plate)
m.check.act <- ggplot(dat.umap.dbl.merge, aes(x = umap1, y = umap2, color = louv.act)) + geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_manual(values = cbPalette, na.value = "grey85") + facet_wrap(~plate)
m.check.w <- ggplot(dat.umap.dbl.merge, aes(x = umap1, y = umap2, color = w)) + geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_viridis_c() +
facet_wrap(~plate)
print(m.check.repress)
print(m.check.act)
print(m.check.w)
# coords.dbl.annots$louv.act <- factor(coords.dbl.annots$louv.act, levels = louv.act.ordering)
# coords.dbl.annots$louv.repress <- factor(coords.dbl.annots$louv.repress, levels = louv.repress.ordering)
library(ggforce)
m.grid <- ggplot(coords.dbl.annots, aes(x = louv.act, y = louv.repress, louv.act)) +
geom_point(alpha = 0.25, position = position_jitternormal(sd_x = 0.08, sd_y = 0.08)) +
theme_bw() +
theme(aspect.ratio=0.6) +
scale_color_manual(values = cbPalette) + xlab(paste0(jmarks[[1]], " Clusters (arbitrary names)")) + ylab(paste0(jmarks[[2]], " Clusters (arbitrary names)")) +
ggtitle("Each dot is a doubel stained cell,\nX-Y shows the cluster pair it is assigned")
print(m.grid)
m.grid.w <- ggplot(coords.dbl.annots, aes(x = louv.act, y = louv.repress, col = w)) +
geom_point(alpha = 0.25, position = position_jitternormal(sd_x = 0.08, sd_y = 0.08)) +
theme_bw() +
scale_color_viridis_c() +
theme(aspect.ratio=0.6) +
xlab(paste0(jmarks[[1]], " Clusters (arbitrary names)")) + ylab(paste0(jmarks[[2]], " Clusters (arbitrary names)")) +
ggtitle("Each dot is a doubel stained cell,\nX-Y shows the cluster pair it is assigned")
print(m.grid.w)
# Rename -----------------------------------------------------------------
# dat.annot.k9me3 <- fread(inf.annot.k9me3) %>%
# mutate(louvain = paste("louvain", louvain, sep = ""))
m.k9me3 <- ggplot(dat.annot.k9me3, aes(x = umap1, y = umap2, color = cluster)) +
scale_color_manual(values = cbPalette) +
geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
# dat.annot.k4me1 <- fread(inf.annot.k4me1) %>%
# mutate(louvain = paste("louvain", louvain, sep = ""))
m.k4me1 <- ggplot(dat.annot.k4me1, aes(x = umap1, y = umap2, color = cluster)) +
scale_color_manual(values = cbPalette) +
geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
print(m.k4me1)
print(m.k9me3)
JFuncs::multiplot(m.grid, m.k9me3, cols = 2)
JFuncs::multiplot(m.grid, m.k4me1, cols = 2)
# Plot UMAP H3K4me1 single and double -------------------------------------
jmark <- "H3K4me1"
m <- ggplot(dat.umaps.lst[[jmark]], aes(x = umap1, y = umap2, color = stain)) +
ggtitle(jmark) +
geom_point() +
theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom") +
facet_wrap(~stain)
print(m)
# label from dat.annot
dat.umap.k4me1 <- left_join(dat.umaps.lst[["H3K4me1"]], subset(dat.annot.k4me1, select = c(cell, cluster))) %>%
left_join(., coords.dbl.annots) %>%
rowwise() %>%
mutate(cluster = ifelse(is.na(cluster), louv.act, cluster))
dat.umap.k9me3 <- left_join(dat.umaps.lst[["H3K9me3"]], subset(dat.annot.k9me3, select = c(cell, cluster))) %>%
left_join(., coords.dbl.annots) %>%
rowwise()
m <- ggplot(dat.umap.k4me1, aes(x = umap1, y = umap2, color = cluster)) +
ggtitle("H3K4me1") +
geom_point() +
theme_bw() +
scale_color_manual(values = cbPalette, na.value = "grey85") +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom") +
facet_wrap(~stain)
print(m)
m <- ggplot(dat.umap.k4me1, aes(x = umap1, y = umap2, color = louv.act)) +
ggtitle(jmark) +
geom_point() +
theme_bw() +
scale_color_manual(values = cbPalette, na.value = "grey85") +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom") +
facet_wrap(~stain)
print(m)
m <- ggplot(dat.umap.k9me3, aes(x = umap1, y = umap2, color = cluster)) +
ggtitle("H3K9me3") +
geom_point() +
theme_bw() +
scale_color_manual(values = cbPalette, na.value = "grey85") +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom") +
facet_wrap(~stain)
print(m)
m <- ggplot(dat.umap.k9me3, aes(x = umap1, y = umap2, color = louv.act)) +
ggtitle("H3K9me3") +
geom_point() +
theme_bw() +
scale_color_manual(values = cbPalette, na.value = "grey85") +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom") +
facet_wrap(~stain)
print(m)
# Check if there are double cells that are bad ----------------------------
print(m.check.act)
print(m.check.repress)
JFuncs::multiplot(m.grid, m.k4me1, cols = 2)
JFuncs::multiplot(m.grid, m.k9me3, cols = 2)
# Connect the UMAPs ------------------------------------------------------
# shift k4me1 left, shift k9me3 right
dat.umap.k4me1.shift <- dat.umap.k4me1 %>%
ungroup() %>%
mutate(umap1 = unlist(scale(umap1, center = TRUE, scale = TRUE)),
umap1.shift = umap1 - 2)
dat.umap.k9me3.shift <- dat.umap.k9me3 %>%
ungroup() %>%
mutate(umap1 = unlist(scale(umap1, center = TRUE, scale = TRUE)),
umap1.shift = umap1 + 2)
dat.umap.merged <- rbind(dat.umap.k4me1.shift, dat.umap.k9me3.shift)
# put everything in one UMAP
cbPalette <- c("#696969", "#32CD32", "#56B4E9", "#FFB6C1", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#006400", "#FFB6C1", "#32CD32", "#0b1b7f", "#ff9f7d", "#eb9d01", "#7fbedf", "#28f9ff", "#88497e", "#bcf5c3", "#86f115", "#c3c89d", "#ff010b", "#664754", "#2af022", "#3afde0", "#b9b2a8", "#f6af7c", "#c3f582", "#3b3a9e", "#71a1ee", "#df5ba4", "#3a592e", "#010233", "#686cc2", "#9b114d", "#e6e6ba", "#b9f6c5")
# check louvs active
jlouvs <- unique(dat.umap.merged$louv.act)
jlouvs.act <- jlouvs[!is.na(jlouvs)]
jlouvs <- unique(dat.umap.merged$louv.repress)
jlouvs.repress <- jlouvs[!is.na(jlouvs)]
pdf(file = outpdf, useDingbats = FALSE)
# do acts
m.k4me1.act <- ggplot(dat.annot.k4me1, aes(x = umap1, y = umap2, color = cluster)) +
scale_color_manual(values = cbPalette) +
ggtitle("H3K4me1 clusters") +
geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
print(m.k4me1.act)
m.k9me3.rep <- ggplot(dat.annot.k9me3, aes(x = umap1, y = umap2, color = cluster)) +
scale_color_manual(values = cbPalette) +
ggtitle("H3K9me3 clusters") +
geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
print(m.k9me3.rep)
for (jlouv in jlouvs.act){
m <- ggplot(dat.umap.merged %>% filter(stain == "dbl") %>% mutate(louv.act = louv.act == jlouv), aes(x = umap1.shift, y = umap2, group = cell, color = louv.act)) +
geom_line(alpha = 0.01) +
geom_point() +
geom_vline(xintercept = 0, linetype = "dotted", alpha = 0.25, size = 1) +
scale_color_manual(values = cbPalette) +
ggtitle(paste("Active:", jlouv)) +
theme_bw() + theme(aspect.ratio=0.75, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
print(m)
}
m.k9me3.rep <- ggplot(dat.annot.k9me3, aes(x = umap1, y = umap2, color = cluster)) +
scale_color_manual(values = cbPalette) +
ggtitle("H3K9me3 Repress clusters") +
geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
print(m.k9me3.rep)
for (jlouv in jlouvs.repress){
m <- ggplot(dat.umap.merged %>% filter(stain == "dbl") %>% mutate(louv.repress = louv.repress == jlouv), aes(x = umap1.shift, y = umap2, group = cell, color = louv.repress)) +
geom_line(alpha = 0.01) +
geom_point() +
geom_vline(xintercept = 0, linetype = "dotted", alpha = 0.25, size = 1) +
scale_color_manual(values = cbPalette) +
ggtitle(paste("Repres:", jlouv)) +
theme_bw() + theme(aspect.ratio=0.75, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
print(m)
}
dev.off()
# Filter out bad cells ? -------------------------------------------------
|
ComposicaoIDM <- function(Eixo){
Municipio <- IDM %>%
filter(!is.na(Valor) & Localidade == LocRef$Localidade & IDM == Eixo)
#Número de eixos de avaliação em cada ano
eixos <- Municipio %>%
group_by(Ano) %>%
count()
Municipio %>%
select(Ano, Variável, Valor) %>%
merge(eixos) %>%
ggplot(aes(x = Ano, y = Valor/n)) +
geom_bar(aes(fill = Variável), stat="identity") +
scale_fill_manual(values = mypallete) +
theme_bw() +
theme(strip.text = element_text(size = 6),
legend.title = element_blank(),
legend.text = element_text(size=6),
aspect.ratio = 1,
legend.direction = "vertical") +
labs(y = paste("IDM",Eixo), x = NULL,
caption = "Fonte: Elaborado pelo núcleo de base do OMT/GYN
a partir de dados do BDE/IMB, com acesso em 19/03/2020.")
}
|
/Functions/ComposicaoIDM.R
|
no_license
|
supervedovatto/AnexoA
|
R
| false | false | 871 |
r
|
ComposicaoIDM <- function(Eixo){
Municipio <- IDM %>%
filter(!is.na(Valor) & Localidade == LocRef$Localidade & IDM == Eixo)
#Número de eixos de avaliação em cada ano
eixos <- Municipio %>%
group_by(Ano) %>%
count()
Municipio %>%
select(Ano, Variável, Valor) %>%
merge(eixos) %>%
ggplot(aes(x = Ano, y = Valor/n)) +
geom_bar(aes(fill = Variável), stat="identity") +
scale_fill_manual(values = mypallete) +
theme_bw() +
theme(strip.text = element_text(size = 6),
legend.title = element_blank(),
legend.text = element_text(size=6),
aspect.ratio = 1,
legend.direction = "vertical") +
labs(y = paste("IDM",Eixo), x = NULL,
caption = "Fonte: Elaborado pelo núcleo de base do OMT/GYN
a partir de dados do BDE/IMB, com acesso em 19/03/2020.")
}
|
library(extRemes)
### Name: strip
### Title: Strip Fitted EVD Object of Everything but the Parameter
### Estimates
### Aliases: strip strip.fevd
### Keywords: manip attribute
### ** Examples
z <- revd(100, loc=20, scale=0.5, shape=-0.2)
fit <- fevd(z)
fit
strip( fit )
strip( fit, use.names = FALSE )
# Compare with ...
distill( fit )
distill( fit, cov = FALSE )
## Not run:
##D data( "Fort" )
##D fit <- fevd(Prec, Fort, threshold=0.395,
##D scale.fun=~sin(2 * pi * (year - 1900)/365.25) +
##D cos(2 * pi * (year - 1900)/365.25),
##D type="PP", method="Bayesian", iter=1999, use.phi=TRUE, verbose=TRUE)
##D
##D fit
##D
##D strip( fit )
##D strip( fit, burn.in = 700 )
##D strip( fit, FUN = "postmode" )
##D
## End(Not run)
|
/data/genthat_extracted_code/extRemes/examples/strip.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 758 |
r
|
library(extRemes)
### Name: strip
### Title: Strip Fitted EVD Object of Everything but the Parameter
### Estimates
### Aliases: strip strip.fevd
### Keywords: manip attribute
### ** Examples
z <- revd(100, loc=20, scale=0.5, shape=-0.2)
fit <- fevd(z)
fit
strip( fit )
strip( fit, use.names = FALSE )
# Compare with ...
distill( fit )
distill( fit, cov = FALSE )
## Not run:
##D data( "Fort" )
##D fit <- fevd(Prec, Fort, threshold=0.395,
##D scale.fun=~sin(2 * pi * (year - 1900)/365.25) +
##D cos(2 * pi * (year - 1900)/365.25),
##D type="PP", method="Bayesian", iter=1999, use.phi=TRUE, verbose=TRUE)
##D
##D fit
##D
##D strip( fit )
##D strip( fit, burn.in = 700 )
##D strip( fit, FUN = "postmode" )
##D
## End(Not run)
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 4759
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 4759
c
c Input Parameter (command line, file):
c input filename QBFLIB/Biere/tipfixpoint/nusmv.brp.B-f2.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 1713
c no.of clauses 4759
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 4759
c
c QBFLIB/Biere/tipfixpoint/nusmv.brp.B-f2.qdimacs 1713 4759 E1 [] 0 58 1655 4759 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Biere/tipfixpoint/nusmv.brp.B-f2/nusmv.brp.B-f2.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false | false | 625 |
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 4759
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 4759
c
c Input Parameter (command line, file):
c input filename QBFLIB/Biere/tipfixpoint/nusmv.brp.B-f2.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 1713
c no.of clauses 4759
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 4759
c
c QBFLIB/Biere/tipfixpoint/nusmv.brp.B-f2.qdimacs 1713 4759 E1 [] 0 58 1655 4759 NONE
|
#descomposicion y forecast de serie filtrada
library(dplyr)
library(xts)
library(TTR)
library(forecast)
fechas <- data.frame(fechas= seq(as.Date("2016-04-01"), as.Date("2017-03-31"), by="days"))
agrupada_completa <- fechas %>% left_join(agrupada[,c(1,3)],by=c("fechas"="fecha"))
agrupada_ts <- ts(agrupada_completa[,c(2)],start = c(2016,4,1),frequency = 365)
plot(testDF.inv)
plot(agrupada_ts)
x <- xts(testDF.inv,agrupada$fecha)
plot(x)
componentes_agrupada <- as.ts(x)
plot.ts(componentes_agrupada)
plot.ts(ts(x,start=c(2016,4,1),frequency=365))
plot(componentes_agrupada)
attr(testDF.inv, 'frequency') <- 1
periodicity(x) # check periodicity: weekly
plot(decompose(x)) # Decompose after conversion to ts
xtimeseriescomponents <- decompose(as.ts(x))
xtimeseriesseasonallyadjusted <- as.ts(x) - xtimeseriescomponents$seasonal
plot(xtimeseriesseasonallyadjusted)
#forecast
cobrosseriesforecasts <- HoltWinters(x, beta=FALSE, gamma=FALSE)
cobrosseriesforecasts
cobrosseriesforecasts$fitted
cobrosseriesforecasts$SSE
plot(cobrosseriesforecasts)
library(forecast)
cobro.forecast <- forecast.HoltWinters(cobrosseriesforecasts,h=20)
plot(cobro.forecast)
plot.forecast(cobro.forecast)
#autocorrelacion de las serie
acf(cobro.forecast$residuals,na.action = na.pass,lag.max = 20)
Box.test(cobro.forecast$residuals, type="Ljung-Box")
plotForecastErrors <- function(forecasterrors) {
# make a histogram of the forecast errors:
mybinsize <- IQR(forecasterrors)/4
mysd <- sd(forecasterrors)
mymin <- min(forecasterrors) - mysd*5
mymax <- max(forecasterrors) + mysd*3
# generate normally distributed data with mean 0 and standard deviation mysd mynorm <- rnorm(10000, mean=0, sd=mysd)
mymin2 <- min(mynorm)
mymax2 <- max(mynorm)
if (mymin2 < mymin) { mymin <- mymin2 }
if (mymax2 > mymax) { mymax <- mymax2 }
# make a red histogram of the forecast errors, with the normally distributed data overlaid: mybins <- seq(mymin, mymax, mybinsize)
hist(forecasterrors, col="red", freq=FALSE, breaks=mybins)
# freq=FALSE ensures the area under the histogram = 1
# generate normally distributed data with mean 0 and standard deviation mysd myhist <- hist(mynorm, plot=FALSE, breaks=mybins)
# plot the normal curve as a blue line on top of the histogram of forecast errors: points(myhist$mids, myhist$density, type="l", col="blue", lwd=2)
}
plotForecastErrors(cobro.forecast$residuals)
#ver estacionalidad
library(tseries)
adf.test(x)
kpss.test(agrupada_ts)
#forecasting
fit.tslm <- tslm(componentes_agrupada ~ trend)
f <- forecast( fit.tslm, h=20,level=c(80,95),fan = TRUE)
plot(f, ylab="x", xlab="tiempo")
lines ( fitted ( fit.tslm),col="blue")
summary(fit.tslm)
res <- ts(resid( fit.tslm))
plot.ts(res,ylab="res (x)")
abline (0,0)
Acf(res)
library(lmtest)
print(dwtest(fit.tslm, alt="two.sided"))
par(mfrow=c(1,1))
bins <- hist(res, breaks="FD", xlab="Residuos",
main="Histograma de residuos")
xx <- -0.06:0.08
lines(xx, 1300*dnorm(xx,0,sd(res)),col=2)
|
/forecasting.R
|
no_license
|
pmtempone/series_temporales
|
R
| false | false | 3,032 |
r
|
#descomposicion y forecast de serie filtrada
library(dplyr)
library(xts)
library(TTR)
library(forecast)
fechas <- data.frame(fechas= seq(as.Date("2016-04-01"), as.Date("2017-03-31"), by="days"))
agrupada_completa <- fechas %>% left_join(agrupada[,c(1,3)],by=c("fechas"="fecha"))
agrupada_ts <- ts(agrupada_completa[,c(2)],start = c(2016,4,1),frequency = 365)
plot(testDF.inv)
plot(agrupada_ts)
x <- xts(testDF.inv,agrupada$fecha)
plot(x)
componentes_agrupada <- as.ts(x)
plot.ts(componentes_agrupada)
plot.ts(ts(x,start=c(2016,4,1),frequency=365))
plot(componentes_agrupada)
attr(testDF.inv, 'frequency') <- 1
periodicity(x) # check periodicity: weekly
plot(decompose(x)) # Decompose after conversion to ts
xtimeseriescomponents <- decompose(as.ts(x))
xtimeseriesseasonallyadjusted <- as.ts(x) - xtimeseriescomponents$seasonal
plot(xtimeseriesseasonallyadjusted)
#forecast
cobrosseriesforecasts <- HoltWinters(x, beta=FALSE, gamma=FALSE)
cobrosseriesforecasts
cobrosseriesforecasts$fitted
cobrosseriesforecasts$SSE
plot(cobrosseriesforecasts)
library(forecast)
cobro.forecast <- forecast.HoltWinters(cobrosseriesforecasts,h=20)
plot(cobro.forecast)
plot.forecast(cobro.forecast)
#autocorrelacion de las serie
acf(cobro.forecast$residuals,na.action = na.pass,lag.max = 20)
Box.test(cobro.forecast$residuals, type="Ljung-Box")
plotForecastErrors <- function(forecasterrors) {
# make a histogram of the forecast errors:
mybinsize <- IQR(forecasterrors)/4
mysd <- sd(forecasterrors)
mymin <- min(forecasterrors) - mysd*5
mymax <- max(forecasterrors) + mysd*3
# generate normally distributed data with mean 0 and standard deviation mysd mynorm <- rnorm(10000, mean=0, sd=mysd)
mymin2 <- min(mynorm)
mymax2 <- max(mynorm)
if (mymin2 < mymin) { mymin <- mymin2 }
if (mymax2 > mymax) { mymax <- mymax2 }
# make a red histogram of the forecast errors, with the normally distributed data overlaid: mybins <- seq(mymin, mymax, mybinsize)
hist(forecasterrors, col="red", freq=FALSE, breaks=mybins)
# freq=FALSE ensures the area under the histogram = 1
# generate normally distributed data with mean 0 and standard deviation mysd myhist <- hist(mynorm, plot=FALSE, breaks=mybins)
# plot the normal curve as a blue line on top of the histogram of forecast errors: points(myhist$mids, myhist$density, type="l", col="blue", lwd=2)
}
plotForecastErrors(cobro.forecast$residuals)
#ver estacionalidad
library(tseries)
adf.test(x)
kpss.test(agrupada_ts)
#forecasting
fit.tslm <- tslm(componentes_agrupada ~ trend)
f <- forecast( fit.tslm, h=20,level=c(80,95),fan = TRUE)
plot(f, ylab="x", xlab="tiempo")
lines ( fitted ( fit.tslm),col="blue")
summary(fit.tslm)
res <- ts(resid( fit.tslm))
plot.ts(res,ylab="res (x)")
abline (0,0)
Acf(res)
library(lmtest)
print(dwtest(fit.tslm, alt="two.sided"))
par(mfrow=c(1,1))
bins <- hist(res, breaks="FD", xlab="Residuos",
main="Histograma de residuos")
xx <- -0.06:0.08
lines(xx, 1300*dnorm(xx,0,sd(res)),col=2)
|
#' Continuous colour bar guide
#'
#' Colour bar guide shows continuous colour scales mapped onto values.
#' Colour bar is available with `scale_fill` and `scale_colour`.
#' For more information, see the inspiration for this function:
#' \href{http://www.mathworks.com/help/techdoc/ref/colorbar.html}{Matlab's colorbar function}.
#'
#' Guides can be specified in each `scale_*` or in [guides()].
#' `guide="legend"` in `scale_*` is syntactic sugar for
#' `guide=guide_legend()` (e.g. `scale_colour_manual(guide = "legend")`).
#' As for how to specify the guide for each scale in more detail,
#' see [guides()].
#'
#' @inheritParams guide_legend
#' @param barwidth A numeric or a [grid::unit()] object specifying
#' the width of the colourbar. Default value is `legend.key.width` or
#' `legend.key.size` in [theme()] or theme.
#' @param barheight A numeric or a [grid::unit()] object specifying
#' the height of the colourbar. Default value is `legend.key.height` or
#' `legend.key.size` in [theme()] or theme.
#' @param frame.colour A string specifying the colour of the frame
#' drawn around the bar. If `NULL` (the default), no frame is drawn.
#' @param frame.linewidth A numeric specifying the width of the frame
#' drawn around the bar.
#' @param frame.linetype A numeric specifying the linetype of the frame
#' drawn around the bar.
#' @param nbin A numeric specifying the number of bins for drawing the
#' colourbar. A smoother colourbar results from a larger value.
#' @param raster A logical. If `TRUE` then the colourbar is rendered as a
#' raster object. If `FALSE` then the colourbar is rendered as a set of
#' rectangles. Note that not all graphics devices are capable of rendering
#' raster image.
#' @param ticks A logical specifying if tick marks on the colourbar should be
#' visible.
#' @param ticks.colour A string specifying the colour of the tick marks.
#' @param ticks.linewidth A numeric specifying the width of the tick marks.
#' @param draw.ulim A logical specifying if the upper limit tick marks should
#' be visible.
#' @param draw.llim A logical specifying if the lower limit tick marks should
#' be visible.
#' @param direction A character string indicating the direction of the guide.
#' One of "horizontal" or "vertical."
#' @param default.unit A character string indicating [grid::unit()]
#' for `barwidth` and `barheight`.
#' @param reverse logical. If `TRUE` the colourbar is reversed. By default,
#' the highest value is on the top and the lowest value is on the bottom
#' @param available_aes A vector of character strings listing the aesthetics
#' for which a colourbar can be drawn.
#' @param ... ignored.
#' @return A guide object
#' @export
#' @family guides
#' @examples
#' df <- expand.grid(X1 = 1:10, X2 = 1:10)
#' df$value <- df$X1 * df$X2
#'
#' p1 <- ggplot(df, aes(X1, X2)) + geom_tile(aes(fill = value))
#' p2 <- p1 + geom_point(aes(size = value))
#'
#' # Basic form
#' p1 + scale_fill_continuous(guide = "colourbar")
#' p1 + scale_fill_continuous(guide = guide_colourbar())
#' p1 + guides(fill = guide_colourbar())
#'
#' # Control styles
#'
#' # bar size
#' p1 + guides(fill = guide_colourbar(barwidth = 0.5, barheight = 10))
#'
#' # no label
#' p1 + guides(fill = guide_colourbar(label = FALSE))
#'
#' # no tick marks
#' p1 + guides(fill = guide_colourbar(ticks = FALSE))
#'
#' # label position
#' p1 + guides(fill = guide_colourbar(label.position = "left"))
#'
#' # label theme
#' p1 + guides(fill = guide_colourbar(label.theme = element_text(colour = "blue", angle = 0)))
#'
#' # small number of bins
#' p1 + guides(fill = guide_colourbar(nbin = 3))
#'
#' # large number of bins
#' p1 + guides(fill = guide_colourbar(nbin = 100))
#'
#' # make top- and bottom-most ticks invisible
#' p1 +
#' scale_fill_continuous(
#' limits = c(0,20), breaks = c(0, 5, 10, 15, 20),
#' guide = guide_colourbar(nbin = 100, draw.ulim = FALSE, draw.llim = FALSE)
#' )
#'
#' # guides can be controlled independently
#' p2 +
#' scale_fill_continuous(guide = "colourbar") +
#' scale_size(guide = "legend")
#' p2 + guides(fill = "colourbar", size = "legend")
#'
#' p2 +
#' scale_fill_continuous(guide = guide_colourbar(direction = "horizontal")) +
#' scale_size(guide = guide_legend(direction = "vertical"))
guide_colourbar <- function(
# title
title = waiver(),
title.position = NULL,
title.theme = NULL,
title.hjust = NULL,
title.vjust = NULL,
# label
label = TRUE,
label.position = NULL,
label.theme = NULL,
label.hjust = NULL,
label.vjust = NULL,
# bar
barwidth = NULL,
barheight = NULL,
nbin = 300,
raster = TRUE,
# frame
frame.colour = NULL,
frame.linewidth = 0.5,
frame.linetype = 1,
# ticks
ticks = TRUE,
ticks.colour = "white",
ticks.linewidth = 0.5,
draw.ulim= TRUE,
draw.llim = TRUE,
# general
direction = NULL,
default.unit = "line",
reverse = FALSE,
order = 0,
available_aes = c("colour", "color", "fill"),
...) {
if (!is.null(barwidth) && !is.unit(barwidth)) barwidth <- unit(barwidth, default.unit)
if (!is.null(barheight) && !is.unit(barheight)) barheight <- unit(barheight, default.unit)
structure(list(
# title
title = title,
title.position = title.position,
title.theme = title.theme,
title.hjust = title.hjust,
title.vjust = title.vjust,
# label
label = label,
label.position = label.position,
label.theme = label.theme,
label.hjust = label.hjust,
label.vjust = label.vjust,
# bar
barwidth = barwidth,
barheight = barheight,
nbin = nbin,
raster = raster,
# frame
frame.colour = frame.colour,
frame.linewidth = frame.linewidth,
frame.linetype = frame.linetype,
# ticks
ticks = ticks,
ticks.colour = ticks.colour,
ticks.linewidth = ticks.linewidth,
draw.ulim = draw.ulim,
draw.llim = draw.llim,
# general
direction = direction,
default.unit = default.unit,
reverse = reverse,
order = order,
# parameter
available_aes = available_aes,
...,
name = "colorbar"),
class = c("guide", "colorbar")
)
}
#' @export
guide_train.colorbar <- function(guide, scale, aesthetic = NULL) {
# do nothing if scale are inappropriate
if (length(intersect(scale$aesthetics, guide$available_aes)) == 0) {
warn(glue(
"colourbar guide needs appropriate scales: ",
glue_collapse(guide$available_aes, ", ", last = " or ")
))
return(NULL)
}
if (scale$is_discrete()) {
warn("colourbar guide needs continuous scales.")
return(NULL)
}
# create data frame for tick display
breaks <- scale$get_breaks()
if (length(breaks) == 0 || all(is.na(breaks)))
return()
ticks <- new_data_frame(setNames(list(scale$map(breaks)), aesthetic %||% scale$aesthetics[1]))
ticks$.value <- breaks
ticks$.label <- scale$get_labels(breaks)
guide$key <- ticks
# bar specification (number of divs etc)
.limits <- scale$get_limits()
.bar <- seq(.limits[1], .limits[2], length.out = guide$nbin)
if (length(.bar) == 0) {
.bar = unique(.limits)
}
guide$bar <- new_data_frame(list(colour = scale$map(.bar), value = .bar), n = length(.bar))
if (guide$reverse) {
guide$key <- guide$key[nrow(guide$key):1, ]
guide$bar <- guide$bar[nrow(guide$bar):1, ]
}
guide$hash <- with(guide, digest::digest(list(title, key$.label, bar, name)))
guide
}
# simply discards the new guide
#' @export
guide_merge.colorbar <- function(guide, new_guide) {
guide
}
# this guide is not geom-based.
#' @export
guide_geom.colorbar <- function(guide, layers, default_mapping) {
# Layers that use this guide
guide_layers <- lapply(layers, function(layer) {
matched <- matched_aes(layer, guide)
if (length(matched) == 0) {
# This layer does not use this guide
return(NULL)
}
# check if this layer should be included
if (include_layer_in_guide(layer, matched)) {
layer
} else {
NULL
}
})
# Remove this guide if no layer uses it
if (length(compact(guide_layers)) == 0) guide <- NULL
guide
}
#' @export
guide_gengrob.colorbar <- function(guide, theme) {
# settings of location and size
if (guide$direction == "horizontal") {
label.position <- guide$label.position %||% "bottom"
if (!label.position %in% c("top", "bottom")) {
abort(glue("label position '{label.position}' is invalid"))
}
barwidth <- width_cm(guide$barwidth %||% (theme$legend.key.width * 5))
barheight <- height_cm(guide$barheight %||% theme$legend.key.height)
} else { # guide$direction == "vertical"
label.position <- guide$label.position %||% "right"
if (!label.position %in% c("left", "right")) {
abort(glue("label position '{label.position}' is invalid"))
}
barwidth <- width_cm(guide$barwidth %||% theme$legend.key.width)
barheight <- height_cm(guide$barheight %||% (theme$legend.key.height * 5))
}
barlength <- switch(guide$direction, "horizontal" = barwidth, "vertical" = barheight)
nbreak <- nrow(guide$key)
# make the bar grob (`grob.bar`)
if (guide$raster) {
image <- switch(guide$direction, horizontal = t(guide$bar$colour), vertical = rev(guide$bar$colour))
grob.bar <-rasterGrob(image = image, width = barwidth, height = barheight, default.units = "cm", gp = gpar(col = NA), interpolate = TRUE)
} else {
if (guide$direction == "horizontal") {
bw <- barwidth / nrow(guide$bar)
bx <- (seq(nrow(guide$bar)) - 1) * bw
grob.bar <-rectGrob(x = bx, y = 0, vjust = 0, hjust = 0, width = bw, height = barheight, default.units = "cm",
gp = gpar(col = NA, fill = guide$bar$colour))
} else { # guide$direction == "vertical"
bh <- barheight / nrow(guide$bar)
by <- (seq(nrow(guide$bar)) - 1) * bh
grob.bar <-rectGrob(x = 0, y = by, vjust = 0, hjust = 0, width = barwidth, height = bh, default.units = "cm",
gp = gpar(col = NA, fill = guide$bar$colour))
}
}
# make frame around color bar if requested (colour is not NULL)
if (!is.null(guide$frame.colour)) {
grob.bar <- grobTree(
grob.bar,
rectGrob(
width = barwidth,
height = barheight,
default.units = "cm",
gp = gpar(
col = guide$frame.colour,
lwd = guide$frame.linewidth,
lty = guide$frame.linetype,
fill = NA)
)
)
}
# tick and label position
tick_pos <- rescale(guide$key$.value, c(0.5, guide$nbin - 0.5), guide$bar$value[c(1, nrow(guide$bar))]) * barlength / guide$nbin
label_pos <- unit(tick_pos, "cm")
if (!guide$draw.ulim) tick_pos <- tick_pos[-1]
if (!guide$draw.llim) tick_pos <- tick_pos[-length(tick_pos)]
# title
# obtain the theme for the legend title. We need this both for the title grob
# and to obtain the title fontsize.
title.theme <- guide$title.theme %||% calc_element("legend.title", theme)
title.hjust <- guide$title.hjust %||% theme$legend.title.align %||% title.theme$hjust %||% 0
title.vjust <- guide$title.vjust %||% title.theme$vjust %||% 0.5
grob.title <- ggname("guide.title",
element_grob(
title.theme,
label = guide$title,
hjust = title.hjust,
vjust = title.vjust,
margin_x = TRUE,
margin_y = TRUE
)
)
title_width <- width_cm(grob.title)
title_height <- height_cm(grob.title)
title_fontsize <- title.theme$size %||% calc_element("legend.title", theme)$size %||%
calc_element("text", theme)$size %||% 11
# gap between keys etc
# the default horizontal and vertical gap need to be the same to avoid strange
# effects for certain guide layouts
hgap <- width_cm(theme$legend.spacing.x %||% (0.5 * unit(title_fontsize, "pt")))
vgap <- height_cm(theme$legend.spacing.y %||% (0.5 * unit(title_fontsize, "pt")))
# Labels
# get the defaults for label justification. The defaults are complicated and depend
# on the direction of the legend and on label placement
just_defaults <- label_just_defaults.colorbar(guide$direction, label.position)
# don't set expressions left-justified
if (just_defaults$hjust == 0 && any(is.expression(guide$key$.label))) just_defaults$hjust <- 1
# get the label theme
label.theme <- guide$label.theme %||% calc_element("legend.text", theme)
# We break inheritance for hjust and vjust, because that's more intuitive here; it still allows manual
# setting of hjust and vjust if desired. The alternative is to ignore hjust and vjust altogether, which
# seems worse
if (is.null(guide$label.theme$hjust) && is.null(theme$legend.text$hjust)) label.theme$hjust <- NULL
if (is.null(guide$label.theme$vjust) && is.null(theme$legend.text$vjust)) label.theme$vjust <- NULL
# label.theme in param of guide_legend() > theme$legend.text.align > default
hjust <- guide$label.hjust %||% theme$legend.text.align %||% label.theme$hjust %||%
just_defaults$hjust
vjust <- guide$label.vjust %||% label.theme$vjust %||%
just_defaults$vjust
# make the label grob (`grob.label`)
if (!guide$label)
grob.label <- zeroGrob()
else {
if (guide$direction == "horizontal") {
x <- label_pos
y <- rep(vjust, length(label_pos))
margin_x <- FALSE
margin_y <- TRUE
} else { # guide$direction == "vertical"
x <- rep(hjust, length(label_pos))
y <- label_pos
margin_x <- TRUE
margin_y <- FALSE
}
label <- guide$key$.label
# If any of the labels are quoted language objects, convert them
# to expressions. Labels from formatter functions can return these
if (any(vapply(label, is.call, logical(1)))) {
label <- lapply(label, function(l) {
if (is.call(l)) substitute(expression(x), list(x = l))
else l
})
label <- do.call(c, label)
}
grob.label <- element_grob(
element = label.theme,
label = label,
x = x,
y = y,
hjust = hjust,
vjust = vjust,
margin_x = margin_x,
margin_y = margin_y
)
grob.label <- ggname("guide.label", grob.label)
}
label_width <- width_cm(grob.label)
label_height <- height_cm(grob.label)
# make the ticks grob (`grob.ticks`)
if (!guide$ticks)
grob.ticks <-zeroGrob()
else {
if (guide$direction == "horizontal") {
x0 <- rep(tick_pos, 2)
y0 <- c(rep(0, nbreak), rep(barheight * (4/5), nbreak))
x1 <- rep(tick_pos, 2)
y1 <- c(rep(barheight * (1/5), nbreak), rep(barheight, nbreak))
} else { # guide$direction == "vertical"
x0 <- c(rep(0, nbreak), rep(barwidth * (4/5), nbreak))
y0 <- rep(tick_pos, 2)
x1 <- c(rep(barwidth * (1/5), nbreak), rep(barwidth, nbreak))
y1 <- rep(tick_pos, 2)
}
grob.ticks <- segmentsGrob(
x0 = x0, y0 = y0, x1 = x1, y1 = y1,
default.units = "cm",
gp = gpar(
col = guide$ticks.colour,
lwd = guide$ticks.linewidth,
lineend = "butt"
)
)
}
# layout of bar and label
if (guide$direction == "horizontal") {
if (label.position == "top") {
bl_widths <- barwidth
bl_heights <- c(label_height, vgap, barheight)
vps <- list(bar.row = 3, bar.col = 1,
label.row = 1, label.col = 1)
} else { # label.position == "bottom" or other
bl_widths <- barwidth
bl_heights <- c(barheight, vgap, label_height)
vps <- list(bar.row = 1, bar.col = 1,
label.row = 3, label.col = 1)
}
} else { # guide$direction == "vertical"
if (label.position == "left") {
bl_widths <- c(label_width, hgap, barwidth)
bl_heights <- barheight
vps <- list(bar.row = 1, bar.col = 3,
label.row = 1, label.col = 1)
} else { # label.position == "right" or other
bl_widths <- c(barwidth, hgap, label_width)
bl_heights <- barheight
vps <- list(bar.row = 1, bar.col = 1,
label.row = 1, label.col = 3)
}
}
# layout of title and bar+label
switch(guide$title.position,
"top" = {
widths <- c(bl_widths, max(0, title_width - sum(bl_widths)))
heights <- c(title_height, vgap, bl_heights)
vps <- with(vps,
list(bar.row = bar.row + 2, bar.col = bar.col,
label.row = label.row + 2, label.col = label.col,
title.row = 1, title.col = 1:length(widths)))
},
"bottom" = {
widths <- c(bl_widths, max(0, title_width - sum(bl_widths)))
heights <- c(bl_heights, vgap, title_height)
vps <- with(vps,
list(bar.row = bar.row, bar.col = bar.col,
label.row = label.row, label.col = label.col,
title.row = length(heights), title.col = 1:length(widths)))
},
"left" = {
widths <- c(title_width, hgap, bl_widths)
heights <- c(bl_heights, max(0, title_height - sum(bl_heights)))
vps <- with(vps,
list(bar.row = bar.row, bar.col = bar.col + 2,
label.row = label.row, label.col = label.col + 2,
title.row = 1:length(heights), title.col = 1))
},
"right" = {
widths <- c(bl_widths, hgap, title_width)
heights <- c(bl_heights, max(0, title_height - sum(bl_heights)))
vps <- with(vps,
list(bar.row = bar.row, bar.col = bar.col,
label.row = label.row, label.col = label.col,
title.row = 1:length(heights), title.col = length(widths)))
})
# background
grob.background <- element_render(theme, "legend.background")
# padding
padding <- convertUnit(theme$legend.margin %||% margin(), "cm", valueOnly = TRUE)
widths <- c(padding[4], widths, padding[2])
heights <- c(padding[1], heights, padding[3])
gt <- gtable(widths = unit(widths, "cm"), heights = unit(heights, "cm"))
gt <- gtable_add_grob(gt, grob.background, name = "background", clip = "off",
t = 1, r = -1, b = -1, l = 1)
gt <- gtable_add_grob(gt, grob.bar, name = "bar", clip = "off",
t = 1 + min(vps$bar.row), r = 1 + max(vps$bar.col),
b = 1 + max(vps$bar.row), l = 1 + min(vps$bar.col))
gt <- gtable_add_grob(
gt,
grob.label,
name = "label",
clip = "off",
t = 1 + min(vps$label.row), r = 1 + max(vps$label.col),
b = 1 + max(vps$label.row), l = 1 + min(vps$label.col))
gt <- gtable_add_grob(
gt,
justify_grobs(
grob.title,
hjust = title.hjust,
vjust = title.vjust,
int_angle = title.theme$angle,
debug = title.theme$debug
),
name = "title",
clip = "off",
t = 1 + min(vps$title.row), r = 1 + max(vps$title.col),
b = 1 + max(vps$title.row), l = 1 + min(vps$title.col))
gt <- gtable_add_grob(gt, grob.ticks, name = "ticks", clip = "off",
t = 1 + min(vps$bar.row), r = 1 + max(vps$bar.col),
b = 1 + max(vps$bar.row), l = 1 + min(vps$bar.col))
gt
}
#' @export
#' @rdname guide_colourbar
guide_colorbar <- guide_colourbar
|
/R/guide-colorbar.r
|
permissive
|
banfai/ggplot2
|
R
| false | false | 19,151 |
r
|
#' Continuous colour bar guide
#'
#' Colour bar guide shows continuous colour scales mapped onto values.
#' Colour bar is available with `scale_fill` and `scale_colour`.
#' For more information, see the inspiration for this function:
#' \href{http://www.mathworks.com/help/techdoc/ref/colorbar.html}{Matlab's colorbar function}.
#'
#' Guides can be specified in each `scale_*` or in [guides()].
#' `guide="legend"` in `scale_*` is syntactic sugar for
#' `guide=guide_legend()` (e.g. `scale_colour_manual(guide = "legend")`).
#' As for how to specify the guide for each scale in more detail,
#' see [guides()].
#'
#' @inheritParams guide_legend
#' @param barwidth A numeric or a [grid::unit()] object specifying
#' the width of the colourbar. Default value is `legend.key.width` or
#' `legend.key.size` in [theme()] or theme.
#' @param barheight A numeric or a [grid::unit()] object specifying
#' the height of the colourbar. Default value is `legend.key.height` or
#' `legend.key.size` in [theme()] or theme.
#' @param frame.colour A string specifying the colour of the frame
#' drawn around the bar. If `NULL` (the default), no frame is drawn.
#' @param frame.linewidth A numeric specifying the width of the frame
#' drawn around the bar.
#' @param frame.linetype A numeric specifying the linetype of the frame
#' drawn around the bar.
#' @param nbin A numeric specifying the number of bins for drawing the
#' colourbar. A smoother colourbar results from a larger value.
#' @param raster A logical. If `TRUE` then the colourbar is rendered as a
#' raster object. If `FALSE` then the colourbar is rendered as a set of
#' rectangles. Note that not all graphics devices are capable of rendering
#' raster image.
#' @param ticks A logical specifying if tick marks on the colourbar should be
#' visible.
#' @param ticks.colour A string specifying the colour of the tick marks.
#' @param ticks.linewidth A numeric specifying the width of the tick marks.
#' @param draw.ulim A logical specifying if the upper limit tick marks should
#' be visible.
#' @param draw.llim A logical specifying if the lower limit tick marks should
#' be visible.
#' @param direction A character string indicating the direction of the guide.
#' One of "horizontal" or "vertical."
#' @param default.unit A character string indicating [grid::unit()]
#' for `barwidth` and `barheight`.
#' @param reverse logical. If `TRUE` the colourbar is reversed. By default,
#' the highest value is on the top and the lowest value is on the bottom
#' @param available_aes A vector of character strings listing the aesthetics
#' for which a colourbar can be drawn.
#' @param ... ignored.
#' @return A guide object
#' @export
#' @family guides
#' @examples
#' df <- expand.grid(X1 = 1:10, X2 = 1:10)
#' df$value <- df$X1 * df$X2
#'
#' p1 <- ggplot(df, aes(X1, X2)) + geom_tile(aes(fill = value))
#' p2 <- p1 + geom_point(aes(size = value))
#'
#' # Basic form
#' p1 + scale_fill_continuous(guide = "colourbar")
#' p1 + scale_fill_continuous(guide = guide_colourbar())
#' p1 + guides(fill = guide_colourbar())
#'
#' # Control styles
#'
#' # bar size
#' p1 + guides(fill = guide_colourbar(barwidth = 0.5, barheight = 10))
#'
#' # no label
#' p1 + guides(fill = guide_colourbar(label = FALSE))
#'
#' # no tick marks
#' p1 + guides(fill = guide_colourbar(ticks = FALSE))
#'
#' # label position
#' p1 + guides(fill = guide_colourbar(label.position = "left"))
#'
#' # label theme
#' p1 + guides(fill = guide_colourbar(label.theme = element_text(colour = "blue", angle = 0)))
#'
#' # small number of bins
#' p1 + guides(fill = guide_colourbar(nbin = 3))
#'
#' # large number of bins
#' p1 + guides(fill = guide_colourbar(nbin = 100))
#'
#' # make top- and bottom-most ticks invisible
#' p1 +
#' scale_fill_continuous(
#' limits = c(0,20), breaks = c(0, 5, 10, 15, 20),
#' guide = guide_colourbar(nbin = 100, draw.ulim = FALSE, draw.llim = FALSE)
#' )
#'
#' # guides can be controlled independently
#' p2 +
#' scale_fill_continuous(guide = "colourbar") +
#' scale_size(guide = "legend")
#' p2 + guides(fill = "colourbar", size = "legend")
#'
#' p2 +
#' scale_fill_continuous(guide = guide_colourbar(direction = "horizontal")) +
#' scale_size(guide = guide_legend(direction = "vertical"))
guide_colourbar <- function(
# title
title = waiver(),
title.position = NULL,
title.theme = NULL,
title.hjust = NULL,
title.vjust = NULL,
# label
label = TRUE,
label.position = NULL,
label.theme = NULL,
label.hjust = NULL,
label.vjust = NULL,
# bar
barwidth = NULL,
barheight = NULL,
nbin = 300,
raster = TRUE,
# frame
frame.colour = NULL,
frame.linewidth = 0.5,
frame.linetype = 1,
# ticks
ticks = TRUE,
ticks.colour = "white",
ticks.linewidth = 0.5,
draw.ulim= TRUE,
draw.llim = TRUE,
# general
direction = NULL,
default.unit = "line",
reverse = FALSE,
order = 0,
available_aes = c("colour", "color", "fill"),
...) {
if (!is.null(barwidth) && !is.unit(barwidth)) barwidth <- unit(barwidth, default.unit)
if (!is.null(barheight) && !is.unit(barheight)) barheight <- unit(barheight, default.unit)
structure(list(
# title
title = title,
title.position = title.position,
title.theme = title.theme,
title.hjust = title.hjust,
title.vjust = title.vjust,
# label
label = label,
label.position = label.position,
label.theme = label.theme,
label.hjust = label.hjust,
label.vjust = label.vjust,
# bar
barwidth = barwidth,
barheight = barheight,
nbin = nbin,
raster = raster,
# frame
frame.colour = frame.colour,
frame.linewidth = frame.linewidth,
frame.linetype = frame.linetype,
# ticks
ticks = ticks,
ticks.colour = ticks.colour,
ticks.linewidth = ticks.linewidth,
draw.ulim = draw.ulim,
draw.llim = draw.llim,
# general
direction = direction,
default.unit = default.unit,
reverse = reverse,
order = order,
# parameter
available_aes = available_aes,
...,
name = "colorbar"),
class = c("guide", "colorbar")
)
}
#' @export
guide_train.colorbar <- function(guide, scale, aesthetic = NULL) {
# do nothing if scale are inappropriate
if (length(intersect(scale$aesthetics, guide$available_aes)) == 0) {
warn(glue(
"colourbar guide needs appropriate scales: ",
glue_collapse(guide$available_aes, ", ", last = " or ")
))
return(NULL)
}
if (scale$is_discrete()) {
warn("colourbar guide needs continuous scales.")
return(NULL)
}
# create data frame for tick display
breaks <- scale$get_breaks()
if (length(breaks) == 0 || all(is.na(breaks)))
return()
ticks <- new_data_frame(setNames(list(scale$map(breaks)), aesthetic %||% scale$aesthetics[1]))
ticks$.value <- breaks
ticks$.label <- scale$get_labels(breaks)
guide$key <- ticks
# bar specification (number of divs etc)
.limits <- scale$get_limits()
.bar <- seq(.limits[1], .limits[2], length.out = guide$nbin)
if (length(.bar) == 0) {
.bar = unique(.limits)
}
guide$bar <- new_data_frame(list(colour = scale$map(.bar), value = .bar), n = length(.bar))
if (guide$reverse) {
guide$key <- guide$key[nrow(guide$key):1, ]
guide$bar <- guide$bar[nrow(guide$bar):1, ]
}
guide$hash <- with(guide, digest::digest(list(title, key$.label, bar, name)))
guide
}
# simply discards the new guide
#' @export
guide_merge.colorbar <- function(guide, new_guide) {
guide
}
# this guide is not geom-based.
#' @export
guide_geom.colorbar <- function(guide, layers, default_mapping) {
# Layers that use this guide
guide_layers <- lapply(layers, function(layer) {
matched <- matched_aes(layer, guide)
if (length(matched) == 0) {
# This layer does not use this guide
return(NULL)
}
# check if this layer should be included
if (include_layer_in_guide(layer, matched)) {
layer
} else {
NULL
}
})
# Remove this guide if no layer uses it
if (length(compact(guide_layers)) == 0) guide <- NULL
guide
}
#' @export
guide_gengrob.colorbar <- function(guide, theme) {
# settings of location and size
if (guide$direction == "horizontal") {
label.position <- guide$label.position %||% "bottom"
if (!label.position %in% c("top", "bottom")) {
abort(glue("label position '{label.position}' is invalid"))
}
barwidth <- width_cm(guide$barwidth %||% (theme$legend.key.width * 5))
barheight <- height_cm(guide$barheight %||% theme$legend.key.height)
} else { # guide$direction == "vertical"
label.position <- guide$label.position %||% "right"
if (!label.position %in% c("left", "right")) {
abort(glue("label position '{label.position}' is invalid"))
}
barwidth <- width_cm(guide$barwidth %||% theme$legend.key.width)
barheight <- height_cm(guide$barheight %||% (theme$legend.key.height * 5))
}
barlength <- switch(guide$direction, "horizontal" = barwidth, "vertical" = barheight)
nbreak <- nrow(guide$key)
# make the bar grob (`grob.bar`)
if (guide$raster) {
image <- switch(guide$direction, horizontal = t(guide$bar$colour), vertical = rev(guide$bar$colour))
grob.bar <-rasterGrob(image = image, width = barwidth, height = barheight, default.units = "cm", gp = gpar(col = NA), interpolate = TRUE)
} else {
if (guide$direction == "horizontal") {
bw <- barwidth / nrow(guide$bar)
bx <- (seq(nrow(guide$bar)) - 1) * bw
grob.bar <-rectGrob(x = bx, y = 0, vjust = 0, hjust = 0, width = bw, height = barheight, default.units = "cm",
gp = gpar(col = NA, fill = guide$bar$colour))
} else { # guide$direction == "vertical"
bh <- barheight / nrow(guide$bar)
by <- (seq(nrow(guide$bar)) - 1) * bh
grob.bar <-rectGrob(x = 0, y = by, vjust = 0, hjust = 0, width = barwidth, height = bh, default.units = "cm",
gp = gpar(col = NA, fill = guide$bar$colour))
}
}
# make frame around color bar if requested (colour is not NULL)
if (!is.null(guide$frame.colour)) {
grob.bar <- grobTree(
grob.bar,
rectGrob(
width = barwidth,
height = barheight,
default.units = "cm",
gp = gpar(
col = guide$frame.colour,
lwd = guide$frame.linewidth,
lty = guide$frame.linetype,
fill = NA)
)
)
}
# tick and label position
tick_pos <- rescale(guide$key$.value, c(0.5, guide$nbin - 0.5), guide$bar$value[c(1, nrow(guide$bar))]) * barlength / guide$nbin
label_pos <- unit(tick_pos, "cm")
if (!guide$draw.ulim) tick_pos <- tick_pos[-1]
if (!guide$draw.llim) tick_pos <- tick_pos[-length(tick_pos)]
# title
# obtain the theme for the legend title. We need this both for the title grob
# and to obtain the title fontsize.
title.theme <- guide$title.theme %||% calc_element("legend.title", theme)
title.hjust <- guide$title.hjust %||% theme$legend.title.align %||% title.theme$hjust %||% 0
title.vjust <- guide$title.vjust %||% title.theme$vjust %||% 0.5
grob.title <- ggname("guide.title",
element_grob(
title.theme,
label = guide$title,
hjust = title.hjust,
vjust = title.vjust,
margin_x = TRUE,
margin_y = TRUE
)
)
title_width <- width_cm(grob.title)
title_height <- height_cm(grob.title)
title_fontsize <- title.theme$size %||% calc_element("legend.title", theme)$size %||%
calc_element("text", theme)$size %||% 11
# gap between keys etc
# the default horizontal and vertical gap need to be the same to avoid strange
# effects for certain guide layouts
hgap <- width_cm(theme$legend.spacing.x %||% (0.5 * unit(title_fontsize, "pt")))
vgap <- height_cm(theme$legend.spacing.y %||% (0.5 * unit(title_fontsize, "pt")))
# Labels
# get the defaults for label justification. The defaults are complicated and depend
# on the direction of the legend and on label placement
just_defaults <- label_just_defaults.colorbar(guide$direction, label.position)
# don't set expressions left-justified
if (just_defaults$hjust == 0 && any(is.expression(guide$key$.label))) just_defaults$hjust <- 1
# get the label theme
label.theme <- guide$label.theme %||% calc_element("legend.text", theme)
# We break inheritance for hjust and vjust, because that's more intuitive here; it still allows manual
# setting of hjust and vjust if desired. The alternative is to ignore hjust and vjust altogether, which
# seems worse
if (is.null(guide$label.theme$hjust) && is.null(theme$legend.text$hjust)) label.theme$hjust <- NULL
if (is.null(guide$label.theme$vjust) && is.null(theme$legend.text$vjust)) label.theme$vjust <- NULL
# label.theme in param of guide_legend() > theme$legend.text.align > default
hjust <- guide$label.hjust %||% theme$legend.text.align %||% label.theme$hjust %||%
just_defaults$hjust
vjust <- guide$label.vjust %||% label.theme$vjust %||%
just_defaults$vjust
# make the label grob (`grob.label`)
if (!guide$label)
grob.label <- zeroGrob()
else {
if (guide$direction == "horizontal") {
x <- label_pos
y <- rep(vjust, length(label_pos))
margin_x <- FALSE
margin_y <- TRUE
} else { # guide$direction == "vertical"
x <- rep(hjust, length(label_pos))
y <- label_pos
margin_x <- TRUE
margin_y <- FALSE
}
label <- guide$key$.label
# If any of the labels are quoted language objects, convert them
# to expressions. Labels from formatter functions can return these
if (any(vapply(label, is.call, logical(1)))) {
label <- lapply(label, function(l) {
if (is.call(l)) substitute(expression(x), list(x = l))
else l
})
label <- do.call(c, label)
}
grob.label <- element_grob(
element = label.theme,
label = label,
x = x,
y = y,
hjust = hjust,
vjust = vjust,
margin_x = margin_x,
margin_y = margin_y
)
grob.label <- ggname("guide.label", grob.label)
}
label_width <- width_cm(grob.label)
label_height <- height_cm(grob.label)
# make the ticks grob (`grob.ticks`)
if (!guide$ticks)
grob.ticks <-zeroGrob()
else {
if (guide$direction == "horizontal") {
x0 <- rep(tick_pos, 2)
y0 <- c(rep(0, nbreak), rep(barheight * (4/5), nbreak))
x1 <- rep(tick_pos, 2)
y1 <- c(rep(barheight * (1/5), nbreak), rep(barheight, nbreak))
} else { # guide$direction == "vertical"
x0 <- c(rep(0, nbreak), rep(barwidth * (4/5), nbreak))
y0 <- rep(tick_pos, 2)
x1 <- c(rep(barwidth * (1/5), nbreak), rep(barwidth, nbreak))
y1 <- rep(tick_pos, 2)
}
grob.ticks <- segmentsGrob(
x0 = x0, y0 = y0, x1 = x1, y1 = y1,
default.units = "cm",
gp = gpar(
col = guide$ticks.colour,
lwd = guide$ticks.linewidth,
lineend = "butt"
)
)
}
# layout of bar and label
if (guide$direction == "horizontal") {
if (label.position == "top") {
bl_widths <- barwidth
bl_heights <- c(label_height, vgap, barheight)
vps <- list(bar.row = 3, bar.col = 1,
label.row = 1, label.col = 1)
} else { # label.position == "bottom" or other
bl_widths <- barwidth
bl_heights <- c(barheight, vgap, label_height)
vps <- list(bar.row = 1, bar.col = 1,
label.row = 3, label.col = 1)
}
} else { # guide$direction == "vertical"
if (label.position == "left") {
bl_widths <- c(label_width, hgap, barwidth)
bl_heights <- barheight
vps <- list(bar.row = 1, bar.col = 3,
label.row = 1, label.col = 1)
} else { # label.position == "right" or other
bl_widths <- c(barwidth, hgap, label_width)
bl_heights <- barheight
vps <- list(bar.row = 1, bar.col = 1,
label.row = 1, label.col = 3)
}
}
# layout of title and bar+label
switch(guide$title.position,
"top" = {
widths <- c(bl_widths, max(0, title_width - sum(bl_widths)))
heights <- c(title_height, vgap, bl_heights)
vps <- with(vps,
list(bar.row = bar.row + 2, bar.col = bar.col,
label.row = label.row + 2, label.col = label.col,
title.row = 1, title.col = 1:length(widths)))
},
"bottom" = {
widths <- c(bl_widths, max(0, title_width - sum(bl_widths)))
heights <- c(bl_heights, vgap, title_height)
vps <- with(vps,
list(bar.row = bar.row, bar.col = bar.col,
label.row = label.row, label.col = label.col,
title.row = length(heights), title.col = 1:length(widths)))
},
"left" = {
widths <- c(title_width, hgap, bl_widths)
heights <- c(bl_heights, max(0, title_height - sum(bl_heights)))
vps <- with(vps,
list(bar.row = bar.row, bar.col = bar.col + 2,
label.row = label.row, label.col = label.col + 2,
title.row = 1:length(heights), title.col = 1))
},
"right" = {
widths <- c(bl_widths, hgap, title_width)
heights <- c(bl_heights, max(0, title_height - sum(bl_heights)))
vps <- with(vps,
list(bar.row = bar.row, bar.col = bar.col,
label.row = label.row, label.col = label.col,
title.row = 1:length(heights), title.col = length(widths)))
})
# background
grob.background <- element_render(theme, "legend.background")
# padding
padding <- convertUnit(theme$legend.margin %||% margin(), "cm", valueOnly = TRUE)
widths <- c(padding[4], widths, padding[2])
heights <- c(padding[1], heights, padding[3])
gt <- gtable(widths = unit(widths, "cm"), heights = unit(heights, "cm"))
gt <- gtable_add_grob(gt, grob.background, name = "background", clip = "off",
t = 1, r = -1, b = -1, l = 1)
gt <- gtable_add_grob(gt, grob.bar, name = "bar", clip = "off",
t = 1 + min(vps$bar.row), r = 1 + max(vps$bar.col),
b = 1 + max(vps$bar.row), l = 1 + min(vps$bar.col))
gt <- gtable_add_grob(
gt,
grob.label,
name = "label",
clip = "off",
t = 1 + min(vps$label.row), r = 1 + max(vps$label.col),
b = 1 + max(vps$label.row), l = 1 + min(vps$label.col))
gt <- gtable_add_grob(
gt,
justify_grobs(
grob.title,
hjust = title.hjust,
vjust = title.vjust,
int_angle = title.theme$angle,
debug = title.theme$debug
),
name = "title",
clip = "off",
t = 1 + min(vps$title.row), r = 1 + max(vps$title.col),
b = 1 + max(vps$title.row), l = 1 + min(vps$title.col))
gt <- gtable_add_grob(gt, grob.ticks, name = "ticks", clip = "off",
t = 1 + min(vps$bar.row), r = 1 + max(vps$bar.col),
b = 1 + max(vps$bar.row), l = 1 + min(vps$bar.col))
gt
}
#' @export
#' @rdname guide_colourbar
guide_colorbar <- guide_colourbar
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/statsBam.R
\name{reads_aligned}
\alias{reads_aligned}
\title{Count aligned reads in a bam file.}
\usage{
reads_aligned(bamfiles, idx, primary_align_only = F, return_tibble = T,
millions = F, ...)
}
\value{
A list of integer values corresponding to counts.
}
\description{
Convenience wrapper for a few
}
\details{
When primary_align_only is set to FALSE and no extra arguments
are given this will use an index if one exists.
}
|
/man/reads_aligned.Rd
|
permissive
|
mal2017/spaar
|
R
| false | true | 507 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/statsBam.R
\name{reads_aligned}
\alias{reads_aligned}
\title{Count aligned reads in a bam file.}
\usage{
reads_aligned(bamfiles, idx, primary_align_only = F, return_tibble = T,
millions = F, ...)
}
\value{
A list of integer values corresponding to counts.
}
\description{
Convenience wrapper for a few
}
\details{
When primary_align_only is set to FALSE and no extra arguments
are given this will use an index if one exists.
}
|
AnswerKeyToDataFrame <-
function (xml = NULL, xml.parsed = NULL) {
if(!is.null(xml))
xml.parsed <- xmlParse(xml)
nodes <- xmlChildren(xmlChildren(xml.parsed)$AnswerKey)
answerkey <- data.frame(matrix(nrow = length(nodes[names(nodes) ==
"Question"]), ncol = 3))
names(answerkey) <- c("QuestionIdentifier", "SelectionIdentifier",
"AnswerScore")
k <- 1
for(i in 1:length(nodes[names(nodes) == "Question"])) {
question <- xmlChildren(nodes[names(nodes) == "Question"][[i]])
qid <- xmlValue(question$QuestionIdentifier)
answeroptions <- question[names(question) == "AnswerOption"]
for(j in 1:length(answeroptions)) {
answerkey$QuestionIdentifier[k] <- qid
answerkey$SelectionIdentifier[k] <- xmlValue(xmlChildren(answeroptions[[j]])$SelectionIdentifier)
answerkey$AnswerScore[k] <- xmlValue(xmlChildren(answeroptions[[j]])$AnswerScore)
k <- k + 1
}
}
if(!is.null(nodes$QualificationValueMapping)) {
map <- xmlChildren(nodes$QualificationValueMapping)
mapping <- list()
if("PercentageMapping" %in% names(map)) {
mapping$Type <- "PercentageMapping"
mapping$MaximumSummedScore <- xmlValue(xmlChildren(map$PercentageMapping)$MaximumSummedScore)
}
else if("ScaleMapping" %in% names(map)) {
mapping$Type <- "ScaleMapping"
mapping$SummedScoreMultiplier <- xmlValue(xmlChildren(map$PercentageMapping)$SummedScoreMultiplier)
}
else if("RangeMapping" %in% names(map)) {
mapping$Type <- "RangeMapping"
ranges.xml <- xmlChildren(map$RangeMapping)
scoreranges <- ranges.xml[names(ranges.xml) == "SummedScoreRange"]
mapping$Ranges <- data.frame(matrix(nrow=length(scoreranges), ncol=3))
names(mapping$Ranges) <- c("InclusiveLowerBound",
"InclusiveUpperBound", "QualificationValue")
for(i in 1:length(scoreranges)) {
mapping$Ranges[i, ] <- c(xmlValue(xmlChildren(scoreranges[[i]])$InclusiveLowerBound),
xmlValue(xmlChildren(scoreranges[[i]])$InclusiveUpperBound),
xmlValue(xmlChildren(scoreranges[[i]])$QualificationValue))
}
mapping$OutOfRangeQualificationValue <- xmlValue(ranges.xml$OutOfRangeQualificationValue)
}
return(list(Questions = answerkey, Scoring = mapping))
}
else
return(list(Questions = answerkey))
}
|
/R/AnswerKeyToDataFrame.R
|
no_license
|
imclab/MTurkR
|
R
| false | false | 2,542 |
r
|
AnswerKeyToDataFrame <-
function (xml = NULL, xml.parsed = NULL) {
if(!is.null(xml))
xml.parsed <- xmlParse(xml)
nodes <- xmlChildren(xmlChildren(xml.parsed)$AnswerKey)
answerkey <- data.frame(matrix(nrow = length(nodes[names(nodes) ==
"Question"]), ncol = 3))
names(answerkey) <- c("QuestionIdentifier", "SelectionIdentifier",
"AnswerScore")
k <- 1
for(i in 1:length(nodes[names(nodes) == "Question"])) {
question <- xmlChildren(nodes[names(nodes) == "Question"][[i]])
qid <- xmlValue(question$QuestionIdentifier)
answeroptions <- question[names(question) == "AnswerOption"]
for(j in 1:length(answeroptions)) {
answerkey$QuestionIdentifier[k] <- qid
answerkey$SelectionIdentifier[k] <- xmlValue(xmlChildren(answeroptions[[j]])$SelectionIdentifier)
answerkey$AnswerScore[k] <- xmlValue(xmlChildren(answeroptions[[j]])$AnswerScore)
k <- k + 1
}
}
if(!is.null(nodes$QualificationValueMapping)) {
map <- xmlChildren(nodes$QualificationValueMapping)
mapping <- list()
if("PercentageMapping" %in% names(map)) {
mapping$Type <- "PercentageMapping"
mapping$MaximumSummedScore <- xmlValue(xmlChildren(map$PercentageMapping)$MaximumSummedScore)
}
else if("ScaleMapping" %in% names(map)) {
mapping$Type <- "ScaleMapping"
mapping$SummedScoreMultiplier <- xmlValue(xmlChildren(map$PercentageMapping)$SummedScoreMultiplier)
}
else if("RangeMapping" %in% names(map)) {
mapping$Type <- "RangeMapping"
ranges.xml <- xmlChildren(map$RangeMapping)
scoreranges <- ranges.xml[names(ranges.xml) == "SummedScoreRange"]
mapping$Ranges <- data.frame(matrix(nrow=length(scoreranges), ncol=3))
names(mapping$Ranges) <- c("InclusiveLowerBound",
"InclusiveUpperBound", "QualificationValue")
for(i in 1:length(scoreranges)) {
mapping$Ranges[i, ] <- c(xmlValue(xmlChildren(scoreranges[[i]])$InclusiveLowerBound),
xmlValue(xmlChildren(scoreranges[[i]])$InclusiveUpperBound),
xmlValue(xmlChildren(scoreranges[[i]])$QualificationValue))
}
mapping$OutOfRangeQualificationValue <- xmlValue(ranges.xml$OutOfRangeQualificationValue)
}
return(list(Questions = answerkey, Scoring = mapping))
}
else
return(list(Questions = answerkey))
}
|
## This function creates a special matrix object that caches its inverse
makeCacheMatrix <- function(x = matrix()) {
j<-NULL
set<-function(y){
x<<-y
j<<-NULL
}
get<-function() x
setmatrix<-function(solve) j<<- solve
getmatrix<-function() j
list(set=set, get=get,
setmatrix=setmatrix,
getmatrix=getmatrix)
}
## This function computes the inverse of the special matrix returned by makeCacheMatrix
## If inverse has already been calculated, cachesolve should retrieve the inverse from the cache
cacheSolve <- function(x=matrix(), ...) {
j<-x$getmatrix()
if(!is.null(j)){
message("getting cached data")
return(j)
}
matrix<-x$get ()
j<-solve(matrix, ...)
x$setmatrix(j)
j
}
|
/cachematrix.R
|
no_license
|
hoffm834/ProgrammingAssignment2
|
R
| false | false | 877 |
r
|
## This function creates a special matrix object that caches its inverse
makeCacheMatrix <- function(x = matrix()) {
j<-NULL
set<-function(y){
x<<-y
j<<-NULL
}
get<-function() x
setmatrix<-function(solve) j<<- solve
getmatrix<-function() j
list(set=set, get=get,
setmatrix=setmatrix,
getmatrix=getmatrix)
}
## This function computes the inverse of the special matrix returned by makeCacheMatrix
## If inverse has already been calculated, cachesolve should retrieve the inverse from the cache
cacheSolve <- function(x=matrix(), ...) {
j<-x$getmatrix()
if(!is.null(j)){
message("getting cached data")
return(j)
}
matrix<-x$get ()
j<-solve(matrix, ...)
x$setmatrix(j)
j
}
|
### merge results
library(tidyverse)
library(tidyr)
### work flow
## upload time stats
## converge all nodes per species
## converge all species
## calculate suitability per node position
## calculate suitability per node (if 1 position suitable, node is suitable)
# setwd("/Users/katieirving/Documents/git/SOC_tier_3/output_data")
## water year types
getwd()
### santa ana sucker
## velocity
## upload all time stats csvs
## time stats
ts <- list.files("output_data/", pattern="time_stats")
length(ts) ## 219
ts
ts <- Filter(function(x) grepl("Depth", x), ts)
ts <- Filter(function(x) grepl("Breeding", x), ts)
ts <- Filter(function(x) grepl("Toad", x), ts)
time_statsx <- NULL
j=1
j
for(j in 1: length(ts)) {
time_stats <- read.csv(file=paste("output_data/", ts[j], sep=""))
######## juvenile
time_stats <- time_stats %>%
select(-X) %>%
# filter(Probability_Threshold == ann_stats) %>%
rename(TimePeriod = season, TimePercentage = value, TimePeriod2 = Probability_Threshold) %>%
# mutate(Species = "Willow", Life_Stage = "Seedling", Hydraulic = "Depth", Node = paste(stringx[3])) %>%
distinct()
time_statsx <- rbind(time_statsx, time_stats)
}
head(time_stats)
unique(time_statsx$TimePeriod2)
## change time period to seasonal
time_stats_seas <- time_statsx %>%
filter(TimePeriod == "critical") %>%
select(-TimePeriod) %>%
distinct()
head(time_stats_seas)
## calculate suitability
time_stats_seas <- time_stats_seas %>%
mutate(Suitability_Class = NA)
# group_by(Node, position, Species, Life_Stage, water_year) %>%
probs <- seq(1, dim(time_stats_seas)[1], 1)
for(p in 1: length(probs)) {
time_stats_seas$Suitability_Class[p] = if(time_stats_seas$TimePercentage[p] >= 75) {
paste("High")
} else if(time_stats_seas$TimePercentage[p] >= 25 & time_stats_seas$TimePercentage[p] <= 75 ){
paste("Partial")
} else if(time_stats_seas$TimePercentage[p] < 25){
paste("Low")
} else {
paste("Partial")
}
}
time_stats_seas
head(time_stats_seas)
## join back together and save
time_stats_all <-time_stats_seas
write.csv(time_stats_all, "/Users/katieirving/Documents/git/SOC_tier_3/output_data/results/Toad_Breeding_Depth_time_stats.csv")
# Days per month ----------------------------------------------------------
### days per month
td <- list.files("output_data/", pattern="total_days")
length(td) ## 153
td <- Filter(function(x) grepl("Depth", x), td)
td <- Filter(function(x) grepl("Breeding", x), td)
td <- Filter(function(x) grepl("Toad", x), td)
td
total_daysx <- NULL
for(j in 1: length(td)) {
total_days <- read.csv(file=paste("output_data/", td[j], sep=""))
######## juvenile
total_days <- total_days %>%
select(-X,-Probability_Threshold) %>%
# filter(Probability_Threshold == ann_stats) %>%
rename(TimePeriod = season, DaysPerMonth = n_days) %>%
# mutate(Species = "Willow", Life_Stage = "Seedling", Hydraulic = "Depth", Node = paste(stringx[2])) %>%
distinct()
total_daysx <- rbind(total_daysx, total_days)
}
head(total_days)
## change time period to seasonal and add bottom and water year type
total_days_seas <- total_daysx
total_days_seas <- total_days_seas %>%
mutate(Suitability_Class = NA)
probs <- seq(1, dim(total_days_seas)[1], 1)
for(p in 1: length(probs)) {
total_days_seas$Suitability_Class[p] = if(total_days_seas$DaysPerMonth[p] >= 21) {
paste("High")
} else if(total_days_seas$DaysPerMonth[p] >= 7 & total_days_seas$DaysPerMonth[p] <= 21 ){
paste("Partial")
} else if(total_days_seas$DaysPerMonth[p] < 7){
paste("Low")
} else {
paste("Partial")
}
}
total_days_seas
### bind together and save
total_days_all <- total_days_seas
write.csv(total_days_all,"/Users/katieirving/Documents/git/SOC_tier_3/output_data/results/Toad_Breeding_Depth_total_days.csv")
total_days_all
|
/code/S1f_suitability_toad_depth.R
|
no_license
|
ksirving/SOC_tier_3
|
R
| false | false | 3,890 |
r
|
### merge results
library(tidyverse)
library(tidyr)
### work flow
## upload time stats
## converge all nodes per species
## converge all species
## calculate suitability per node position
## calculate suitability per node (if 1 position suitable, node is suitable)
# setwd("/Users/katieirving/Documents/git/SOC_tier_3/output_data")
## water year types
getwd()
### santa ana sucker
## velocity
## upload all time stats csvs
## time stats
ts <- list.files("output_data/", pattern="time_stats")
length(ts) ## 219
ts
ts <- Filter(function(x) grepl("Depth", x), ts)
ts <- Filter(function(x) grepl("Breeding", x), ts)
ts <- Filter(function(x) grepl("Toad", x), ts)
time_statsx <- NULL
j=1
j
for(j in 1: length(ts)) {
time_stats <- read.csv(file=paste("output_data/", ts[j], sep=""))
######## juvenile
time_stats <- time_stats %>%
select(-X) %>%
# filter(Probability_Threshold == ann_stats) %>%
rename(TimePeriod = season, TimePercentage = value, TimePeriod2 = Probability_Threshold) %>%
# mutate(Species = "Willow", Life_Stage = "Seedling", Hydraulic = "Depth", Node = paste(stringx[3])) %>%
distinct()
time_statsx <- rbind(time_statsx, time_stats)
}
head(time_stats)
unique(time_statsx$TimePeriod2)
## change time period to seasonal
time_stats_seas <- time_statsx %>%
filter(TimePeriod == "critical") %>%
select(-TimePeriod) %>%
distinct()
head(time_stats_seas)
## calculate suitability
time_stats_seas <- time_stats_seas %>%
mutate(Suitability_Class = NA)
# group_by(Node, position, Species, Life_Stage, water_year) %>%
probs <- seq(1, dim(time_stats_seas)[1], 1)
for(p in 1: length(probs)) {
time_stats_seas$Suitability_Class[p] = if(time_stats_seas$TimePercentage[p] >= 75) {
paste("High")
} else if(time_stats_seas$TimePercentage[p] >= 25 & time_stats_seas$TimePercentage[p] <= 75 ){
paste("Partial")
} else if(time_stats_seas$TimePercentage[p] < 25){
paste("Low")
} else {
paste("Partial")
}
}
time_stats_seas
head(time_stats_seas)
## join back together and save
time_stats_all <-time_stats_seas
write.csv(time_stats_all, "/Users/katieirving/Documents/git/SOC_tier_3/output_data/results/Toad_Breeding_Depth_time_stats.csv")
# Days per month ----------------------------------------------------------
### days per month
td <- list.files("output_data/", pattern="total_days")
length(td) ## 153
td <- Filter(function(x) grepl("Depth", x), td)
td <- Filter(function(x) grepl("Breeding", x), td)
td <- Filter(function(x) grepl("Toad", x), td)
td
total_daysx <- NULL
for(j in 1: length(td)) {
total_days <- read.csv(file=paste("output_data/", td[j], sep=""))
######## juvenile
total_days <- total_days %>%
select(-X,-Probability_Threshold) %>%
# filter(Probability_Threshold == ann_stats) %>%
rename(TimePeriod = season, DaysPerMonth = n_days) %>%
# mutate(Species = "Willow", Life_Stage = "Seedling", Hydraulic = "Depth", Node = paste(stringx[2])) %>%
distinct()
total_daysx <- rbind(total_daysx, total_days)
}
head(total_days)
## change time period to seasonal and add bottom and water year type
total_days_seas <- total_daysx
total_days_seas <- total_days_seas %>%
mutate(Suitability_Class = NA)
probs <- seq(1, dim(total_days_seas)[1], 1)
for(p in 1: length(probs)) {
total_days_seas$Suitability_Class[p] = if(total_days_seas$DaysPerMonth[p] >= 21) {
paste("High")
} else if(total_days_seas$DaysPerMonth[p] >= 7 & total_days_seas$DaysPerMonth[p] <= 21 ){
paste("Partial")
} else if(total_days_seas$DaysPerMonth[p] < 7){
paste("Low")
} else {
paste("Partial")
}
}
total_days_seas
### bind together and save
total_days_all <- total_days_seas
write.csv(total_days_all,"/Users/katieirving/Documents/git/SOC_tier_3/output_data/results/Toad_Breeding_Depth_total_days.csv")
total_days_all
|
library(RNeo4j)
subs <- unique(data$SubCategories)
graph = startGraph("http://localhost:7474/db/data/")
for (i in subs){
query <- paste0("MATCH (a:SubCategories {title:'",i,"'})-[r:`In SubCategory`]-() WITH a, count(r) AS weight SET a.weight = weight")
cypher(graph,query)
}
Agencies <- cypher(graph,"MATCH (a:Agencies) RETURN a.title")
for (i in Agencies){
query <- paste0("MATCH (a:Agencies {title:'",i,"'})-[r:Performed]-() WITH a, count(r) AS weight SET a.weight = weight")
}
for(i in query){
cypher(graph,i)
}
|
/R/WeightNodes.R
|
permissive
|
davidmeza1/KA_Interns
|
R
| false | false | 535 |
r
|
library(RNeo4j)
subs <- unique(data$SubCategories)
graph = startGraph("http://localhost:7474/db/data/")
for (i in subs){
query <- paste0("MATCH (a:SubCategories {title:'",i,"'})-[r:`In SubCategory`]-() WITH a, count(r) AS weight SET a.weight = weight")
cypher(graph,query)
}
Agencies <- cypher(graph,"MATCH (a:Agencies) RETURN a.title")
for (i in Agencies){
query <- paste0("MATCH (a:Agencies {title:'",i,"'})-[r:Performed]-() WITH a, count(r) AS weight SET a.weight = weight")
}
for(i in query){
cypher(graph,i)
}
|
library(shiny)
library(DT)
shinyServer(function(input, output) {
filedata <- read.csv("sf_nbh_summary.csv")
format_cols <- c("Black.Diff","Hispanic.Latino.Diff","Juvenile.Diff","White.Diff")
numeric_cols <- c("Black.stop_and_frisk","Black.census","Black.Diff",
"Hispanic.Latino.stop_and_frisk","Hispanic.Latino.census","Hispanic.Latino.Diff",
"Juvenile.stop_and_frisk","Juvenile.census","Juvenile.Diff","White.stop_and_frisk",
"White.census","White.Diff")
output$tbl = renderDT(
datatable(filedata,rownames = FALSE, extensions ="FixedColumns",options = list(
scrollX=TRUE,
scrollY=500,
fixedColumns = list(leftColumns = 2),
autoWidth = TRUE,
columnDefs = list(list(width = '250px', targets = c(1)),
list(className = 'dt-center', targets = 0:13),
list(visible=FALSE, targets=c(0))))) %>%
formatStyle(format_cols,
backgroundColor = styleInterval(0, c('lightpink', 'lightgreen'))) %>%
#formatStyle("neighborhood","white-space"="nowrap") %>%
#formatStyle(columns = c(2), width='200%') %>%
formatPercentage(numeric_cols, 1)
)
})
# columnDefs = list(list(visible=FALSE, targets=c(4)
#
|
/03_stop_frisk/scripts/shiny/server.R
|
no_license
|
GWarrenn/dc_data
|
R
| false | false | 1,263 |
r
|
library(shiny)
library(DT)
shinyServer(function(input, output) {
filedata <- read.csv("sf_nbh_summary.csv")
format_cols <- c("Black.Diff","Hispanic.Latino.Diff","Juvenile.Diff","White.Diff")
numeric_cols <- c("Black.stop_and_frisk","Black.census","Black.Diff",
"Hispanic.Latino.stop_and_frisk","Hispanic.Latino.census","Hispanic.Latino.Diff",
"Juvenile.stop_and_frisk","Juvenile.census","Juvenile.Diff","White.stop_and_frisk",
"White.census","White.Diff")
output$tbl = renderDT(
datatable(filedata,rownames = FALSE, extensions ="FixedColumns",options = list(
scrollX=TRUE,
scrollY=500,
fixedColumns = list(leftColumns = 2),
autoWidth = TRUE,
columnDefs = list(list(width = '250px', targets = c(1)),
list(className = 'dt-center', targets = 0:13),
list(visible=FALSE, targets=c(0))))) %>%
formatStyle(format_cols,
backgroundColor = styleInterval(0, c('lightpink', 'lightgreen'))) %>%
#formatStyle("neighborhood","white-space"="nowrap") %>%
#formatStyle(columns = c(2), width='200%') %>%
formatPercentage(numeric_cols, 1)
)
})
# columnDefs = list(list(visible=FALSE, targets=c(4)
#
|
ggplot(Ovary,
aes(x = Time, y = follicles, colour = Mare)
) + geom_point() + facet_wrap(~Mare)
fm1 <- gls(follicles ~ sin(2*pi*Time) + cos(2*pi*Time), Ovary,
correlation = corAR1(form = ~ 1 | Mare))
|
/scripts/scripts_5.R
|
no_license
|
mark-andrews/gnam01
|
R
| false | false | 219 |
r
|
ggplot(Ovary,
aes(x = Time, y = follicles, colour = Mare)
) + geom_point() + facet_wrap(~Mare)
fm1 <- gls(follicles ~ sin(2*pi*Time) + cos(2*pi*Time), Ovary,
correlation = corAR1(form = ~ 1 | Mare))
|
library(tidyverse)
#https://www.datacamp.com/community/tutorials/survival-analysis-R
#This is the GWAS results
t <- read.csv("manuscript/tables/TableS6_geneP-allSigCP.csv")
t.ddn <- split(t, t$gene)
byGene <- data.frame(gene= names(t.ddn))
byGene$gene <- as.character(byGene$gene)
byGene$numVar <- unlist(lapply(t.ddn, function(x) nrow(x)))
byGene$numPhen <- unlist(lapply(t.ddn, function(x) sum(x$numPhen)))
#This is all sig genes (GWAS + PCA)
t2 <- read_csv("manuscript/tables/TableS9_UgCl-allSigThings.csv")
t2.gwas <- subset(t2, Gene %in% byGene$gene)
names(t2.gwas)[6] <- "KOSurv"
byGene <- cbind(byGene, t2.gwas)
tVir <- subset(byGene, !is.na(byGene$KOSurv))
tVir$KONum <- ifelse(tVir$KOSurv == "no effect", 0, 1)
test <- glm(KONum~numVar, data=tVir, family=binomial)
tidy(test)
|
/scripts/10-extra.R
|
no_license
|
acgerstein/UgClGenomics
|
R
| false | false | 794 |
r
|
library(tidyverse)
#https://www.datacamp.com/community/tutorials/survival-analysis-R
#This is the GWAS results
t <- read.csv("manuscript/tables/TableS6_geneP-allSigCP.csv")
t.ddn <- split(t, t$gene)
byGene <- data.frame(gene= names(t.ddn))
byGene$gene <- as.character(byGene$gene)
byGene$numVar <- unlist(lapply(t.ddn, function(x) nrow(x)))
byGene$numPhen <- unlist(lapply(t.ddn, function(x) sum(x$numPhen)))
#This is all sig genes (GWAS + PCA)
t2 <- read_csv("manuscript/tables/TableS9_UgCl-allSigThings.csv")
t2.gwas <- subset(t2, Gene %in% byGene$gene)
names(t2.gwas)[6] <- "KOSurv"
byGene <- cbind(byGene, t2.gwas)
tVir <- subset(byGene, !is.na(byGene$KOSurv))
tVir$KONum <- ifelse(tVir$KOSurv == "no effect", 0, 1)
test <- glm(KONum~numVar, data=tVir, family=binomial)
tidy(test)
|
# 계층적 군집분석(Hierachical Clustering)
# 1. 최단거리 (Single Linkage)
# 데이터 a와 군집 C거리, 데이터 b와 군집 C거리 중 최소값을 선택.
# 2. 최대거리 (Complete Linkage)
# 데이터 a와 군집 C거리, 데이터 b와 군집 C거리 중 최대값을 선택.
|
/5. Machine Learning/08. Welcome MachineLearning/ch03. Clustering.R
|
no_license
|
byungjun0689/DataScience
|
R
| false | false | 298 |
r
|
# 계층적 군집분석(Hierachical Clustering)
# 1. 최단거리 (Single Linkage)
# 데이터 a와 군집 C거리, 데이터 b와 군집 C거리 중 최소값을 선택.
# 2. 최대거리 (Complete Linkage)
# 데이터 a와 군집 C거리, 데이터 b와 군집 C거리 중 최대값을 선택.
|
#!/mnt/software/bin/Rscript-3.1.0 --slave
argv <- commandArgs(TRUE)
data <- read.table(argv[1], header=T, check.names=F)
write.table(t(data), file=argv[2], quote=F, sep= "\t", row.names=T, col.names=T)
|
/transpose_matrix.R
|
no_license
|
CSB5/TCGA-preprocessing
|
R
| false | false | 203 |
r
|
#!/mnt/software/bin/Rscript-3.1.0 --slave
argv <- commandArgs(TRUE)
data <- read.table(argv[1], header=T, check.names=F)
write.table(t(data), file=argv[2], quote=F, sep= "\t", row.names=T, col.names=T)
|
library(shiny)
library(ggplot2)
library(reshape)
library(grid)
library(ExomeDepth)
load('Data.RData') #wrapper script copies data to this hard path
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
bed.file.disp<-cbind(1:nrow(bed.file),bed.file)
if(ncol(bed.file.disp==4)){colnames(bed.file.disp)=c("Exon Index","Chromosome","Start","Stop")}
if(ncol(bed.file.disp==5)){colnames(bed.file.disp)=c("Exon Index","Chromosome","Start","Stop","Gene")}
# bed.file.disp=cbind(bed.file.disp,ExomeCount$names)
# colnames(bed.file.disp)[ncol(bed.file.disp)]="Gene.Exon"
names_files<-data.frame(sample.names,bams)
colnames(names_files)<-c("Sample Name","File")
Index=vector(length=nrow(bed.file))
Index[1]=1
for(i in 2:nrow(bed.file)){
if(bed.file[i,4]==bed.file[i-1,4]){
Index[i]=Index[i-1]+1
}else{
Index[i]=1
}
}
if(!is.null(exon_numbers)){
for(i in 1:nrow(exons)){
x=which(paste(bed.file[,4])==paste(exons[i,4]) & bed.file[,2]<=exons[i,3] & bed.file[,3]>=exons[i,2])
Index[x]=exons[i,5]
}
}
bed.file.disp=cbind(bed.file.disp,Index)
colnames(bed.file.disp)[ncol(bed.file.disp)]="Custom.exon"
#####Data tab
output$bed.file<-renderDataTable({
if(input$readBed%%2==0){bed.file.disp}
})
output$bamlist <- renderDataTable({
if(input$readBams%%2==0){names_files}
})
output$fasta <- renderPrint({
if(input$readfasta%%2==0){fasta}
})
#####Coverage Evaluation tab
output$downloadCov<-downloadHandler(
filename=function(){
paste('Coverage-',Sys.Date(),'.txt',sep='')
},
content = function(file){
write.table(ExomeCount[,sample.names],file,sep="\t",quote=F,row.names=F)
}
)
output$FailedSamples<-renderDataTable({
ReadDepths<-ExomeCount[,sample.names] #extracts just the read depths
Sample<-vector()
Details<-vector()
Correlation<-vector()
MedCov<-vector()
Corr<-cor(ReadDepths) #calculates correlation matrix
MaxCorr<-apply(Corr,1,function(x)max(x[x!=1])) #finds the maximum correlation for each sample
SampleMedian<-apply(ReadDepths,2,median)
for(i in 1:length(MaxCorr)){ #tests correlation and median coverage for each sample; if below threshold, adds to list of fails and adds reason to Details list
if(MaxCorr[i]<input$SampleCorLimit[1]|SampleMedian[i]<input$SampleCovLimit[1]){
Sample<-c(Sample,sample.names[i])
Correlation<-c(Correlation,MaxCorr[i])
MedCov<-c(MedCov,SampleMedian[i])
if(MaxCorr[i]<input$SampleCorLimit[1] ){
if(SampleMedian[i]<input$SampleCovLimit[1]){
Details<-c(Details,"Low correlation and low median coverage ")
}else{Details<-c(Details,"Low correlation")}
}else{
Details<-c(Details,"Low median coverage")
}
}
}
Failures<-data.frame(Sample,Details,Correlation,MedCov)
names(Failures)[4]="Median Coverage"
Failures
})
output$FailedExons<-renderDataTable({
ReadDepths<-ExomeCount[,sample.names]
ExonMedian<-apply(ReadDepths,1,median)
Exon<-which(ExonMedian<input$ExonCovLimit[1])
Custom.exon<-Index[Exon]
Gene<-bed.file[Exon,4]
Coverage<-ExonMedian[Exon]
Failures<-data.frame(Exon,Gene,Custom.exon,Coverage)
names(Failures)=c("Exon number (bed file)","Gene","Exon number (custom)","Coverage")
Failures
})
output$PlotSampleHighlight<-renderUI({
selectInput("SampHigh",choices=as.list(c(sample.names)),label="Select sample to highlight",multiple=F,selected=sample.names[1])
})
output$PlotSamplesInput<-renderUI({
if(input$ChooseFrom==1){
selectInput("PlotSamp",choices=as.list(refs[[input$SampHigh]]),label="Select samples to display",multiple=T,selected=paste(refs[[input$SampHigh]]))
}else if(input$ChooseFrom==2){
selectInput("PlotSamp",choices=as.list(sample.names),label="Select samples to display",multiple=T,selected=paste(sample.names))
}
})
output$PlotGenesInput<-renderUI({
selectInput("PlotGenes",choices=as.list(paste(unique(bed.file[,4]))),label="Select genes to plot",multiple=T,selected=paste(unique(bed.file[,4])[1:2]))
})
output$CovPlot<-renderPlot({
if(input$plotType==1){
exons<-which(bed.file[,4]%in%input$PlotGenes)
Data<-cbind(exons,ExomeCount[exons,sample.names])
Data1<-melt(Data,id=c("exons"))
Data1$exons<-as.factor(Data1$exons)
if(input$plotScale==2){Data1$value=log(Data1$value)}
p<-ggplot(data=Data1[Data1$variable%in%input$PlotSamp,],aes(x=exons,y=value)) + geom_boxplot(width=0.75) + theme_bw() + xlab(NULL)+ ggtitle("")
if(input$plotScale==1){p <- p + ylab("Coverage")}
if(input$plotScale==2){p <- p + ylab("Log (Coverage)")}
p<-p + geom_point(data=Data1[Data1$variable==input$SampHigh,],aes(x=exons,y=value),colour="blue",cex=2.5)
p
}else if(input$plotType==2){
exons<-which(bed.file[,4]%in%input$PlotGenes)
Data<-cbind(exons,ExomeCount[exons,sample.names])
Data1<-melt(Data,id=c("exons"))
Data1$exons<-as.factor(Data1$exons)
if(input$plotScale==2){Data1$value=log(Data1$value)}
p<-ggplot(data=Data1[Data1$variable%in%input$PlotSamp,],aes(x=exons,y=value)) + geom_point(width=0.75) + theme_bw() + xlab(NULL)+ ggtitle("")
if(input$plotScale==1){p <- p + ylab("Coverage")}
if(input$plotScale==2){p <- p + ylab("Log (Coverage)")}
for(gene in input$PlotGenes){
p<-p + geom_line(data=Data1[Data1$variable%in%input$PlotSamp & Data1$exons%in%which(bed.file[,4]==gene),],aes(x=exons,y=value,group=variable),colour="grey")
}
p<-p + geom_point(data=Data1[Data1$variable==input$SampHigh,],aes(x=exons,y=value),colour="blue",cex=2.5)
for(gene in input$PlotGenes){
p<-p + geom_line(data=Data1[Data1$variable==input$SampHigh & Data1$exons%in%which(bed.file[,4]==gene),],aes(x=exons,y=value,group=variable),colour="blue")
}
p
}
})
#####CNV calls tab
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
if(!exists("cnv.calls_ids") | !exists("cnv.calls_ids$Confidence")){ #Formats CNV calls if generated by old DECoN version
if(!exists("cnv.calls$Gene")){
cnv.calls$Gene=apply(cnv.calls,1,function(x)paste(unique(bed.file[x[4]:x[5],4]),collapse=", "))
}
if(!exists("cnv.calls$Confidence")){
Flag<-rep("HIGH",nrow(cnv.calls))
a<-which(cnv.calls$correlation<.985)
if(length(a)>0){Flag[a]="LOW"}
a<-which(cnv.calls$reads.ratio<1.25 & cnv.calls$reads.ratio>.75)
if(length(a)>0){Flag[a]="LOW"}
Ncomp<-lapply(refs,length)
a<-which(Ncomp<=3)
if(length(a)>0){
samples_a<-sample.names[a]
b<-which(cnv.calls$sample%in%samples_a)
Flag[b]="LOW"
}
a<-which(cnv.calls$Gene=="PMS2")
if(length(a)>0){Flag[a]="LOW"}
cnv.calls$Confidence=Flag
}
cnv.calls_ids=cbind(1:nrow(cnv.calls),cnv.calls)
names(cnv.calls_ids)[1]="ID"
for(i in 1:nrow(cnv.calls_ids)){
genes=strsplit(paste(cnv.calls_ids[i,]$Gene),",")[[1]]
genes=trim(genes)
whole.index=cnv.calls_ids[i,]$start.p:cnv.calls_ids[i,]$end.p
if(length(genes)>1){
temp=cnv.calls_ids[rep(i,length(genes)),]
temp$Gene=genes
for(j in 1:length(genes)){
gene.index=which(bed.file[,4]==genes[j])
overlap=gene.index[gene.index%in%whole.index]
temp[j,]$start.p=min(overlap)
temp[j,]$end.p=max(overlap)
}
if(i==1){
cnv.calls_ids=rbind(temp,cnv.calls_ids[(i+1):nrow(cnv.calls_ids),])
}else if(i==nrow(cnv.calls_ids)){
cnv.calls_ids=rbind(cnv.calls_ids[1:(i-1),],temp)
}else{
cnv.calls_ids=rbind(cnv.calls_ids[1:(i-1),],temp,cnv.calls_ids[(i+1):nrow(cnv.calls_ids),])
}
}
}
cnv.calls_ids$Gene=trim(cnv.calls_ids$Gene)
}
output$downloadCNVs<-downloadHandler(
filename=function(){
paste('CNVs-',Sys.Date(),'.txt',sep='')
},
content = function(file){
write.table(cnv.calls_ids,file,sep="\t",quote=F,row.names=F)
}
)
genes.exons.short<-apply(cnv.calls,1,function(x)paste(ExomeCount$names[x[4]:x[5]],collapse=", "))
genes.exons.short<-strsplit(genes.exons.short,",")
genes.exons.short<-lapply(genes.exons.short,trim)
genes.exons.split<-vector(length=nrow(cnv.calls_ids))
for(i in 1:nrow(cnv.calls_ids)){
j=cnv.calls_ids$ID[i]
single.gene=cnv.calls_ids$Gene[i]
all.genes=unlist(lapply(strsplit(genes.exons.short[[j]],".",fixed=T),function(x)x[1]))
genes.exons.split[i]=paste(genes.exons.short[[j]][single.gene==all.genes],collapse=", ")
}
# CNVs<-cbind(cnv.calls_ids[,c("ID","sample","start.p","end.p","nexons","Gene")],genes.exons.split,cnv.calls_ids[,c("type","reads.ratio","correlation","Confidence")])
# names(CNVs)<-c("CNV identifier","Sample","First exon","Last exon","Number of exons","Gene","Gene.Exon","Type","Read ratio","Correlation","Confidence")
# CNVs<-cbind(cnv.calls_ids[,c("ID","sample","start.p","end.p","nexons","Gene")],genes.exons.split,cnv.calls_ids[,c("type","reads.ratio","correlation")])
# names(CNVs)<-c("CNV ID","Sample","First exon","Last exon","Number of exons","Gene","Gene.Exon","Type","Read ratio","Correlation")
CNVs<-cbind(cnv.calls_ids[,c("ID","sample","start.p","end.p","nexons","Gene")],Index[cnv.calls_ids$start.p],Index[cnv.calls_ids$end.p],cnv.calls_ids[,c("type","reads.ratio","correlation")])
names(CNVs)<-c("CNV ID","Sample","First exon (BED file)","Last exon (BED file)","Number of exons","Gene","First exon (custom)","Last exon (custom)","Type","Read ratio","Correlation")
output$CNVcalls<-renderDataTable(
CNVs,options=list(iDisplayLength=10)
)
output$selVar<-renderUI({
selectInput("selVar1","Use the CNV ID given in the table above",c('None',1:nrow(cnv.calls)))
})
output$minEx <- renderUI({
if(input$selVar1!="None"){
numericInput("minEx1",min=1,max=nrow(bed.file)-1,value=max(cnv.calls[input$selVar1,]$start.p-5,1),label="First exon")
}
})
output$maxEx <- renderUI({
if(input$selVar1!="None"){
numericInput("maxEx1",min=2,max=nrow(bed.file),value=min(cnv.calls[input$selVar1,]$end.p+5,nrow(bed.file)),label="Last")
}
})
output$plot<-renderPlot({
if(input$selVar1=="None"){
plot(NULL,xlim=c(1,10),ylim=c(0,1000))
}else{
Sample<-cnv.calls[input$selVar1,]$sample
exonRange<-input$minEx1:input$maxEx1
VariantExon<- unlist(mapply(function(x,y)x:y,cnv.calls[cnv.calls$sample==Sample,]$start.p,cnv.calls[cnv.calls$sample==Sample,]$end.p))
if(input$chSamp==1){
refs_sample<-refs[[Sample]]
Data<-cbind(ExomeCount[exonRange,c(Sample,refs_sample)],exonRange)
if(input$chScale==2){Data[,-ncol(Data)]=log(Data[,-ncol(Data)])}
# if(input$chScale==3){Data_temp<-data.frame(cbind(t(apply(Data[,1:(ncol(Data)-1)],1,function(x)x-median(x))),exonRange));names(Data_temp)<-names(Data);Data<-Data_temp}
# if(input$chScale==4){Data_temp<-data.frame(cbind(apply(Data[,1:(ncol(Data)-1)],2,function(x)x-median(x)),exonRange));names(Data_temp)<-names(Data);Data<-Data_temp}
# if(input$chScale==5){Data_temp<-t(apply(Data[,1:(ncol(Data)-1)],1,function(x)x-median(x)));Data_temp<-apply(Data_temp,2,function(x)x-median(x));Data_temp<-data.frame(Data_temp,exonRange);names(Data_temp)<-names(Data);Data<-Data_temp}
Data1<-melt(Data,id=c("exonRange"))
testref<-rep("gray",nrow(Data1))
testref[Data1$variable==Sample]="blue"
Data1<-data.frame(Data1,testref)
levels(Data1$variable)=c(levels(Data1$variable),"VAR")
Data1$testref<-as.factor(Data1$testref)
levels(Data1$testref)=c(levels(Data1$testref),"red")
data_temp<-Data1[Data1$variable==Sample & Data1$exonRange%in%VariantExon,]
if(nrow(data_temp)>0){
data_temp$variable="VAR"
data_temp$testref="red"
Data1<-rbind(Data1,data_temp)
}
levels(Data1$testref)=c("Test Sample","Reference Sample","Affected exon")
new_cols=c("blue","gray","red")
A1<-ggplot(data=Data1,aes(x=exonRange,y=value,group=variable,colour=testref))
A1<-A1 + geom_point(cex=2.5,lwd=1.5)
A1<-A1 + scale_colour_manual(values=new_cols)
A1<-A1 + geom_line(data=subset(Data1,testref=="Reference Sample"),lty="dashed",lwd=1.5,col="grey")
A1<-A1 + geom_point(data=subset(Data1,testref=="Reference Sample"),cex=2.5,col="grey")
A1<-A1 + geom_line(data=subset(Data1,testref=="Test Sample"),lty="dashed",lwd=1.5,col="blue")
A1<-A1 + geom_point(data=subset(Data1,testref=="Test Sample"),cex=2.5,col="blue")
A1<-A1 + geom_point(data=subset(Data1,testref=="Affected exon"),cex=3.5,col="red")
if(input$chScale==1){
A1<-A1 + ylab("Coverage") + xlab("")
}
if(input$chScale==2){
A1<-A1 + ylab("Log (Coverage)") + xlab("")
}
A1<-A1 + theme_bw() + theme(legend.position="none",axis.text.x=element_blank())
A1<-A1 + scale_x_continuous(breaks=exonRange)#,labels=paste(exonRange))
Data2<-Data1[Data1$testref=="Affected exon",]
if(nrow(Data2)>1){
for(i in 1:(nrow(Data2)-1)){
if((Data2$exonRange[i]+1)==Data2$exonRange[i+1]){ A1<-A1 + geom_line(data=Data2[i:(i+1),],aes(x=exonRange,y=value,group=1),lwd=1.5,col="red")}
}
}
print(A1)
}else if(input$chSamp==2){
refs_sample<-sample.names[sample.names!=Sample]
Data<-cbind(ExomeCount[exonRange,c(Sample,refs_sample)],exonRange)
if(input$chScale==2){Data[,-ncol(Data)]=log(Data[,-ncol(Data)])}
#if(input$chScale==3){Data_temp<-data.frame(cbind(t(apply(Data[,1:(ncol(Data)-1)],1,function(x)x-median(x))),exonRange));names(Data_temp)<-names(Data);Data<-Data_temp}
#if(input$chScale==4){Data_temp<-data.frame(cbind(apply(Data[,1:(ncol(Data)-1)],2,function(x)x-median(x)),exonRange));names(Data_temp)<-names(Data);Data<-Data_temp}
#if(input$chScale==5){Data_temp<-t(apply(Data[,1:(ncol(Data)-1)],1,function(x)x-median(x)));Data_temp<-apply(Data_temp,2,function(x)x-median(x));Data_temp<-data.frame(Data_temp,exonRange);names(Data_temp)<-names(Data);Data<-Data_temp}
Data1<-melt(Data,id=c("exonRange"))
testref<-rep("gray",nrow(Data1))
testref[Data1$variable==Sample]="blue"
Data1<-data.frame(Data1,testref)
levels(Data1$variable)=c(levels(Data1$variable),"VAR")
Data1$testref<-as.factor(Data1$testref)
levels(Data1$testref)=c(levels(Data1$testref),"red")
data_temp<-Data1[Data1$variable==Sample & Data1$exonRange%in%VariantExon,]
if(nrow(data_temp)>0){
data_temp$variable="VAR"
data_temp$testref="red"
Data1<-rbind(Data1,data_temp)
}
levels(Data1$testref)=c("Test Sample","Reference Sample","Affected exon")
new_cols=c("blue","gray","red")
A1<-ggplot(data=Data1,aes(x=exonRange,y=value,group=variable,colour=testref))
A1<-A1 + geom_point(cex=2.5,lwd=1.5)
A1<-A1 + scale_colour_manual(values=new_cols)
A1<-A1 + geom_line(data=subset(Data1,testref=="Reference Sample"),lty="dashed",lwd=1.5,col="grey")
A1<-A1 + geom_point(data=subset(Data1,testref=="Reference Sample"),cex=2.5,col="grey")
A1<-A1 + geom_line(data=subset(Data1,testref=="Test Sample"),lty="dashed",lwd=1.5,col="blue")
A1<-A1 + geom_point(data=subset(Data1,testref=="Test Sample"),cex=2.5,col="blue")
A1<-A1 + geom_point(data=subset(Data1,testref=="Affected exon"),cex=3.5,col="red")
if(input$chScale==1){
A1<-A1 + ylab("Coverage") + xlab("")
}
if(input$chScale==2){
A1<-A1 + ylab("Log (Coverage)") + xlab("")
}
A1<-A1 + theme_bw() + theme(legend.position="none",axis.text.x=element_blank())
A1<-A1 + scale_x_continuous(breaks=exonRange)#,labels=paste(exonRange))
Data2<-Data1[Data1$testref=="Affected exon",]
if(nrow(Data2)>1){
for(i in 1:(nrow(Data2)-1)){
if((Data2$exonRange[i]+1)==Data2$exonRange[i+1]){ A1<-A1 + geom_line(data=Data2[i:(i+1),],aes(x=exonRange,y=value,group=1),lwd=1.5,col="red") }
}
}
print(A1)
}else if(input$chSamp==3){
Data1<-data.frame(rep(Sample,length(exonRange)),ExomeCount[exonRange,Sample],exonRange)
names(Data1)<-c("variable","value","exonRange")
if(input$chScale==2){Data1$value=log(Data1$value)}
# if(input$chScale==4){Data1$value = Data1$value - median(ExomeCount[,Sample])}
testref<-rep("gray",nrow(Data1))
testref[Data1$variable==Sample]="blue"
Data1<-data.frame(Data1,testref)
levels(Data1$variable)=c(levels(Data1$variable),"VAR")
Data1$testref=as.factor(Data1$testref)
levels(Data1$testref)=c(levels(Data1$testref),"red")
data_temp<-Data1[Data1$variable==Sample & Data1$exonRange%in%VariantExon,]
if(nrow(data_temp)>0){
data_temp$variable="VAR"
data_temp$testref="red"
Data1<-rbind(Data1,data_temp)
}
levels(Data1$testref)=c("Test Sample","Affected exon")
new_cols=c("blue","red")
A1<-ggplot(data=Data1,aes(x=exonRange,y=value,group=variable,colour=testref))
A1<-A1 + geom_point(cex=2.5,lwd=1.5)
A1<-A1 + scale_colour_manual(values=new_cols)
A1<-A1 + geom_line(data=subset(Data1,testref=="Test Sample"),lty="dashed",lwd=1.5,col="blue")
A1<-A1 + geom_point(data=subset(Data1,testref=="Affected exon"),cex=3.5,col="red")
if(input$chScale==1){
A1<-A1 + ylab("Coverage") + xlab("")
}
if(input$chScale==2){
A1<-A1 + ylab("Log (Coverage)") + xlab("")
}
A1<-A1 + theme_bw() + theme(legend.position="none",axis.text.x=element_blank())
A1<-A1 + scale_x_continuous(breaks=exonRange)#,labels=paste(exonRange))
Data2<-Data1[Data1$testref=="Affected exon",]
if(nrow(Data2)>1){
for(i in 1:(nrow(Data2)-1)){
if((Data2$exonRange[i]+1)==Data2$exonRange[i+1]){ A1<-A1 + geom_line(data=Data2[i:(i+1),],aes(x=exonRange,y=value,group=1),lwd=1.5,col="red") }
}
}
print(A1)
}
}
})
output$genes<-renderPlot({
if(input$selVar1=="None"){
par(mar=rep(0,4))
plot.new()
}else{
exonRange<-input$minEx1:input$maxEx1
genes_sel = unique(bed.file[exonRange,4])
temp<-cbind(1:nrow(bed.file),bed.file)[exonRange,]
len<-table(temp$name)
mp<-tapply(exonRange,temp[,5],mean)
mp<-mp[genes_sel]
len<-len[genes_sel]
Genes<-data.frame(genes_sel,as.vector(mp),as.vector(len-.5),1)
names(Genes)=c("Gene","MP","Length","Ind")
if(!is.null(exon_numbers)){
qplot(data=Genes,MP,Ind,fill=Gene,geom="tile",width=Length,label=Gene) + geom_text() + theme_bw() + theme(legend.position="none",panel.grid.major = element_blank(), panel.grid.minor = element_blank(),axis.text.y = element_blank(),axis.ticks.y=element_blank(),plot.margin=unit(c(.5,.5,.5,.55),"cm")) + ylab(" ") + xlab("Custom Numbering") + scale_x_continuous(breaks=exonRange,labels=paste(Index[exonRange]))
}else{
qplot(data=Genes,MP,Ind,fill=Gene,geom="tile",width=Length,label=Gene) + geom_text() + theme_bw() + theme(legend.position="none",panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.text.x = element_blank(),axis.text.y = element_blank(),axis.ticks.y=element_blank(),plot.margin=unit(c(.5,.5,.5,.55),"cm")) + ylab(" ") + xlab(" ")
}
}
})
output$CIplot<-renderPlot({
if(input$selVar1=="None"){
plot(NULL,xlim=c(1,10),ylim=c(0,1000))
}else{
Sample<-cnv.calls[input$selVar1,]$sample
exonRange<-input$minEx1:input$maxEx1
refs_sample<-refs[[Sample]]
Totals<-rowSums(ExomeCount[exonRange,c(Sample,refs_sample)])
ratio = (ExomeCount[exonRange,Sample]/Totals)/models[[Sample]][1]
mins <- vector(length=length(exonRange))
maxs <-vector(length=length(exonRange))
for(i in 1:length(exonRange)){
temp = qbetabinom(p=0.025,Totals[i],models[[Sample]][2],models[[Sample]][1])
mins[i] = (temp/Totals[i])/models[[Sample]][1]
temp = qbetabinom(p=0.975,Totals[i],models[[Sample]][2],models[[Sample]][1])
maxs[i] = (temp/Totals[i])/models[[Sample]][1]
}
CIData<-data.frame(exonRange,ratio,mins,maxs)
names(CIData)<-c("Exon","Ratio","Min","Max")
CIPlot<-ggplot(CIData,aes(x=Exon,y=Ratio))+geom_ribbon(aes(ymin=Min,ymax=Max),fill="grey")+geom_point(col="blue",cex=3.5) + theme_bw()+xlab("Exon Index")+ylab("Observed/Expected") + scale_x_continuous(breaks=exonRange,labels=paste(exonRange))
temp = cnv.calls[cnv.calls$sample==Sample,]
if(sum(temp$start.p%in%exonRange |temp$end.p%in%exonRange)>0){
temp = temp[temp$start.p%in%exonRange|temp$end.p%in%exonRange,]
for(i in 1:nrow(temp)){
start.temp = temp[i,]$start.p
end.temp = temp[i,]$end.p
CIPlot<-CIPlot + geom_point(data=CIData[CIData$Exon%in%start.temp:end.temp,], aes(x=Exon,y=Ratio),color="red",cex=3.5)
}
}
# my_grob = grobTree(textGrob("Confidence Region", x=0.8, y=0.9, hjust=0,gp=gpar(col="black", fontsize=12, fontface="bold")))
# CIPlot<-CIPlot + annotation_custom(my_grob)
print(CIPlot)
}
})
})
|
/Windows/scripts/shinyGUI/server.R
|
permissive
|
RahmanTeam/DECoN
|
R
| false | false | 24,938 |
r
|
library(shiny)
library(ggplot2)
library(reshape)
library(grid)
library(ExomeDepth)
load('Data.RData') #wrapper script copies data to this hard path
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
bed.file.disp<-cbind(1:nrow(bed.file),bed.file)
if(ncol(bed.file.disp==4)){colnames(bed.file.disp)=c("Exon Index","Chromosome","Start","Stop")}
if(ncol(bed.file.disp==5)){colnames(bed.file.disp)=c("Exon Index","Chromosome","Start","Stop","Gene")}
# bed.file.disp=cbind(bed.file.disp,ExomeCount$names)
# colnames(bed.file.disp)[ncol(bed.file.disp)]="Gene.Exon"
names_files<-data.frame(sample.names,bams)
colnames(names_files)<-c("Sample Name","File")
Index=vector(length=nrow(bed.file))
Index[1]=1
for(i in 2:nrow(bed.file)){
if(bed.file[i,4]==bed.file[i-1,4]){
Index[i]=Index[i-1]+1
}else{
Index[i]=1
}
}
if(!is.null(exon_numbers)){
for(i in 1:nrow(exons)){
x=which(paste(bed.file[,4])==paste(exons[i,4]) & bed.file[,2]<=exons[i,3] & bed.file[,3]>=exons[i,2])
Index[x]=exons[i,5]
}
}
bed.file.disp=cbind(bed.file.disp,Index)
colnames(bed.file.disp)[ncol(bed.file.disp)]="Custom.exon"
#####Data tab
output$bed.file<-renderDataTable({
if(input$readBed%%2==0){bed.file.disp}
})
output$bamlist <- renderDataTable({
if(input$readBams%%2==0){names_files}
})
output$fasta <- renderPrint({
if(input$readfasta%%2==0){fasta}
})
#####Coverage Evaluation tab
output$downloadCov<-downloadHandler(
filename=function(){
paste('Coverage-',Sys.Date(),'.txt',sep='')
},
content = function(file){
write.table(ExomeCount[,sample.names],file,sep="\t",quote=F,row.names=F)
}
)
output$FailedSamples<-renderDataTable({
ReadDepths<-ExomeCount[,sample.names] #extracts just the read depths
Sample<-vector()
Details<-vector()
Correlation<-vector()
MedCov<-vector()
Corr<-cor(ReadDepths) #calculates correlation matrix
MaxCorr<-apply(Corr,1,function(x)max(x[x!=1])) #finds the maximum correlation for each sample
SampleMedian<-apply(ReadDepths,2,median)
for(i in 1:length(MaxCorr)){ #tests correlation and median coverage for each sample; if below threshold, adds to list of fails and adds reason to Details list
if(MaxCorr[i]<input$SampleCorLimit[1]|SampleMedian[i]<input$SampleCovLimit[1]){
Sample<-c(Sample,sample.names[i])
Correlation<-c(Correlation,MaxCorr[i])
MedCov<-c(MedCov,SampleMedian[i])
if(MaxCorr[i]<input$SampleCorLimit[1] ){
if(SampleMedian[i]<input$SampleCovLimit[1]){
Details<-c(Details,"Low correlation and low median coverage ")
}else{Details<-c(Details,"Low correlation")}
}else{
Details<-c(Details,"Low median coverage")
}
}
}
Failures<-data.frame(Sample,Details,Correlation,MedCov)
names(Failures)[4]="Median Coverage"
Failures
})
output$FailedExons<-renderDataTable({
ReadDepths<-ExomeCount[,sample.names]
ExonMedian<-apply(ReadDepths,1,median)
Exon<-which(ExonMedian<input$ExonCovLimit[1])
Custom.exon<-Index[Exon]
Gene<-bed.file[Exon,4]
Coverage<-ExonMedian[Exon]
Failures<-data.frame(Exon,Gene,Custom.exon,Coverage)
names(Failures)=c("Exon number (bed file)","Gene","Exon number (custom)","Coverage")
Failures
})
output$PlotSampleHighlight<-renderUI({
selectInput("SampHigh",choices=as.list(c(sample.names)),label="Select sample to highlight",multiple=F,selected=sample.names[1])
})
output$PlotSamplesInput<-renderUI({
if(input$ChooseFrom==1){
selectInput("PlotSamp",choices=as.list(refs[[input$SampHigh]]),label="Select samples to display",multiple=T,selected=paste(refs[[input$SampHigh]]))
}else if(input$ChooseFrom==2){
selectInput("PlotSamp",choices=as.list(sample.names),label="Select samples to display",multiple=T,selected=paste(sample.names))
}
})
output$PlotGenesInput<-renderUI({
selectInput("PlotGenes",choices=as.list(paste(unique(bed.file[,4]))),label="Select genes to plot",multiple=T,selected=paste(unique(bed.file[,4])[1:2]))
})
output$CovPlot<-renderPlot({
if(input$plotType==1){
exons<-which(bed.file[,4]%in%input$PlotGenes)
Data<-cbind(exons,ExomeCount[exons,sample.names])
Data1<-melt(Data,id=c("exons"))
Data1$exons<-as.factor(Data1$exons)
if(input$plotScale==2){Data1$value=log(Data1$value)}
p<-ggplot(data=Data1[Data1$variable%in%input$PlotSamp,],aes(x=exons,y=value)) + geom_boxplot(width=0.75) + theme_bw() + xlab(NULL)+ ggtitle("")
if(input$plotScale==1){p <- p + ylab("Coverage")}
if(input$plotScale==2){p <- p + ylab("Log (Coverage)")}
p<-p + geom_point(data=Data1[Data1$variable==input$SampHigh,],aes(x=exons,y=value),colour="blue",cex=2.5)
p
}else if(input$plotType==2){
exons<-which(bed.file[,4]%in%input$PlotGenes)
Data<-cbind(exons,ExomeCount[exons,sample.names])
Data1<-melt(Data,id=c("exons"))
Data1$exons<-as.factor(Data1$exons)
if(input$plotScale==2){Data1$value=log(Data1$value)}
p<-ggplot(data=Data1[Data1$variable%in%input$PlotSamp,],aes(x=exons,y=value)) + geom_point(width=0.75) + theme_bw() + xlab(NULL)+ ggtitle("")
if(input$plotScale==1){p <- p + ylab("Coverage")}
if(input$plotScale==2){p <- p + ylab("Log (Coverage)")}
for(gene in input$PlotGenes){
p<-p + geom_line(data=Data1[Data1$variable%in%input$PlotSamp & Data1$exons%in%which(bed.file[,4]==gene),],aes(x=exons,y=value,group=variable),colour="grey")
}
p<-p + geom_point(data=Data1[Data1$variable==input$SampHigh,],aes(x=exons,y=value),colour="blue",cex=2.5)
for(gene in input$PlotGenes){
p<-p + geom_line(data=Data1[Data1$variable==input$SampHigh & Data1$exons%in%which(bed.file[,4]==gene),],aes(x=exons,y=value,group=variable),colour="blue")
}
p
}
})
#####CNV calls tab
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
if(!exists("cnv.calls_ids") | !exists("cnv.calls_ids$Confidence")){ #Formats CNV calls if generated by old DECoN version
if(!exists("cnv.calls$Gene")){
cnv.calls$Gene=apply(cnv.calls,1,function(x)paste(unique(bed.file[x[4]:x[5],4]),collapse=", "))
}
if(!exists("cnv.calls$Confidence")){
Flag<-rep("HIGH",nrow(cnv.calls))
a<-which(cnv.calls$correlation<.985)
if(length(a)>0){Flag[a]="LOW"}
a<-which(cnv.calls$reads.ratio<1.25 & cnv.calls$reads.ratio>.75)
if(length(a)>0){Flag[a]="LOW"}
Ncomp<-lapply(refs,length)
a<-which(Ncomp<=3)
if(length(a)>0){
samples_a<-sample.names[a]
b<-which(cnv.calls$sample%in%samples_a)
Flag[b]="LOW"
}
a<-which(cnv.calls$Gene=="PMS2")
if(length(a)>0){Flag[a]="LOW"}
cnv.calls$Confidence=Flag
}
cnv.calls_ids=cbind(1:nrow(cnv.calls),cnv.calls)
names(cnv.calls_ids)[1]="ID"
for(i in 1:nrow(cnv.calls_ids)){
genes=strsplit(paste(cnv.calls_ids[i,]$Gene),",")[[1]]
genes=trim(genes)
whole.index=cnv.calls_ids[i,]$start.p:cnv.calls_ids[i,]$end.p
if(length(genes)>1){
temp=cnv.calls_ids[rep(i,length(genes)),]
temp$Gene=genes
for(j in 1:length(genes)){
gene.index=which(bed.file[,4]==genes[j])
overlap=gene.index[gene.index%in%whole.index]
temp[j,]$start.p=min(overlap)
temp[j,]$end.p=max(overlap)
}
if(i==1){
cnv.calls_ids=rbind(temp,cnv.calls_ids[(i+1):nrow(cnv.calls_ids),])
}else if(i==nrow(cnv.calls_ids)){
cnv.calls_ids=rbind(cnv.calls_ids[1:(i-1),],temp)
}else{
cnv.calls_ids=rbind(cnv.calls_ids[1:(i-1),],temp,cnv.calls_ids[(i+1):nrow(cnv.calls_ids),])
}
}
}
cnv.calls_ids$Gene=trim(cnv.calls_ids$Gene)
}
output$downloadCNVs<-downloadHandler(
filename=function(){
paste('CNVs-',Sys.Date(),'.txt',sep='')
},
content = function(file){
write.table(cnv.calls_ids,file,sep="\t",quote=F,row.names=F)
}
)
genes.exons.short<-apply(cnv.calls,1,function(x)paste(ExomeCount$names[x[4]:x[5]],collapse=", "))
genes.exons.short<-strsplit(genes.exons.short,",")
genes.exons.short<-lapply(genes.exons.short,trim)
genes.exons.split<-vector(length=nrow(cnv.calls_ids))
for(i in 1:nrow(cnv.calls_ids)){
j=cnv.calls_ids$ID[i]
single.gene=cnv.calls_ids$Gene[i]
all.genes=unlist(lapply(strsplit(genes.exons.short[[j]],".",fixed=T),function(x)x[1]))
genes.exons.split[i]=paste(genes.exons.short[[j]][single.gene==all.genes],collapse=", ")
}
# CNVs<-cbind(cnv.calls_ids[,c("ID","sample","start.p","end.p","nexons","Gene")],genes.exons.split,cnv.calls_ids[,c("type","reads.ratio","correlation","Confidence")])
# names(CNVs)<-c("CNV identifier","Sample","First exon","Last exon","Number of exons","Gene","Gene.Exon","Type","Read ratio","Correlation","Confidence")
# CNVs<-cbind(cnv.calls_ids[,c("ID","sample","start.p","end.p","nexons","Gene")],genes.exons.split,cnv.calls_ids[,c("type","reads.ratio","correlation")])
# names(CNVs)<-c("CNV ID","Sample","First exon","Last exon","Number of exons","Gene","Gene.Exon","Type","Read ratio","Correlation")
CNVs<-cbind(cnv.calls_ids[,c("ID","sample","start.p","end.p","nexons","Gene")],Index[cnv.calls_ids$start.p],Index[cnv.calls_ids$end.p],cnv.calls_ids[,c("type","reads.ratio","correlation")])
names(CNVs)<-c("CNV ID","Sample","First exon (BED file)","Last exon (BED file)","Number of exons","Gene","First exon (custom)","Last exon (custom)","Type","Read ratio","Correlation")
output$CNVcalls<-renderDataTable(
CNVs,options=list(iDisplayLength=10)
)
output$selVar<-renderUI({
selectInput("selVar1","Use the CNV ID given in the table above",c('None',1:nrow(cnv.calls)))
})
output$minEx <- renderUI({
if(input$selVar1!="None"){
numericInput("minEx1",min=1,max=nrow(bed.file)-1,value=max(cnv.calls[input$selVar1,]$start.p-5,1),label="First exon")
}
})
output$maxEx <- renderUI({
if(input$selVar1!="None"){
numericInput("maxEx1",min=2,max=nrow(bed.file),value=min(cnv.calls[input$selVar1,]$end.p+5,nrow(bed.file)),label="Last")
}
})
output$plot<-renderPlot({
if(input$selVar1=="None"){
plot(NULL,xlim=c(1,10),ylim=c(0,1000))
}else{
Sample<-cnv.calls[input$selVar1,]$sample
exonRange<-input$minEx1:input$maxEx1
VariantExon<- unlist(mapply(function(x,y)x:y,cnv.calls[cnv.calls$sample==Sample,]$start.p,cnv.calls[cnv.calls$sample==Sample,]$end.p))
if(input$chSamp==1){
refs_sample<-refs[[Sample]]
Data<-cbind(ExomeCount[exonRange,c(Sample,refs_sample)],exonRange)
if(input$chScale==2){Data[,-ncol(Data)]=log(Data[,-ncol(Data)])}
# if(input$chScale==3){Data_temp<-data.frame(cbind(t(apply(Data[,1:(ncol(Data)-1)],1,function(x)x-median(x))),exonRange));names(Data_temp)<-names(Data);Data<-Data_temp}
# if(input$chScale==4){Data_temp<-data.frame(cbind(apply(Data[,1:(ncol(Data)-1)],2,function(x)x-median(x)),exonRange));names(Data_temp)<-names(Data);Data<-Data_temp}
# if(input$chScale==5){Data_temp<-t(apply(Data[,1:(ncol(Data)-1)],1,function(x)x-median(x)));Data_temp<-apply(Data_temp,2,function(x)x-median(x));Data_temp<-data.frame(Data_temp,exonRange);names(Data_temp)<-names(Data);Data<-Data_temp}
Data1<-melt(Data,id=c("exonRange"))
testref<-rep("gray",nrow(Data1))
testref[Data1$variable==Sample]="blue"
Data1<-data.frame(Data1,testref)
levels(Data1$variable)=c(levels(Data1$variable),"VAR")
Data1$testref<-as.factor(Data1$testref)
levels(Data1$testref)=c(levels(Data1$testref),"red")
data_temp<-Data1[Data1$variable==Sample & Data1$exonRange%in%VariantExon,]
if(nrow(data_temp)>0){
data_temp$variable="VAR"
data_temp$testref="red"
Data1<-rbind(Data1,data_temp)
}
levels(Data1$testref)=c("Test Sample","Reference Sample","Affected exon")
new_cols=c("blue","gray","red")
A1<-ggplot(data=Data1,aes(x=exonRange,y=value,group=variable,colour=testref))
A1<-A1 + geom_point(cex=2.5,lwd=1.5)
A1<-A1 + scale_colour_manual(values=new_cols)
A1<-A1 + geom_line(data=subset(Data1,testref=="Reference Sample"),lty="dashed",lwd=1.5,col="grey")
A1<-A1 + geom_point(data=subset(Data1,testref=="Reference Sample"),cex=2.5,col="grey")
A1<-A1 + geom_line(data=subset(Data1,testref=="Test Sample"),lty="dashed",lwd=1.5,col="blue")
A1<-A1 + geom_point(data=subset(Data1,testref=="Test Sample"),cex=2.5,col="blue")
A1<-A1 + geom_point(data=subset(Data1,testref=="Affected exon"),cex=3.5,col="red")
if(input$chScale==1){
A1<-A1 + ylab("Coverage") + xlab("")
}
if(input$chScale==2){
A1<-A1 + ylab("Log (Coverage)") + xlab("")
}
A1<-A1 + theme_bw() + theme(legend.position="none",axis.text.x=element_blank())
A1<-A1 + scale_x_continuous(breaks=exonRange)#,labels=paste(exonRange))
Data2<-Data1[Data1$testref=="Affected exon",]
if(nrow(Data2)>1){
for(i in 1:(nrow(Data2)-1)){
if((Data2$exonRange[i]+1)==Data2$exonRange[i+1]){ A1<-A1 + geom_line(data=Data2[i:(i+1),],aes(x=exonRange,y=value,group=1),lwd=1.5,col="red")}
}
}
print(A1)
}else if(input$chSamp==2){
refs_sample<-sample.names[sample.names!=Sample]
Data<-cbind(ExomeCount[exonRange,c(Sample,refs_sample)],exonRange)
if(input$chScale==2){Data[,-ncol(Data)]=log(Data[,-ncol(Data)])}
#if(input$chScale==3){Data_temp<-data.frame(cbind(t(apply(Data[,1:(ncol(Data)-1)],1,function(x)x-median(x))),exonRange));names(Data_temp)<-names(Data);Data<-Data_temp}
#if(input$chScale==4){Data_temp<-data.frame(cbind(apply(Data[,1:(ncol(Data)-1)],2,function(x)x-median(x)),exonRange));names(Data_temp)<-names(Data);Data<-Data_temp}
#if(input$chScale==5){Data_temp<-t(apply(Data[,1:(ncol(Data)-1)],1,function(x)x-median(x)));Data_temp<-apply(Data_temp,2,function(x)x-median(x));Data_temp<-data.frame(Data_temp,exonRange);names(Data_temp)<-names(Data);Data<-Data_temp}
Data1<-melt(Data,id=c("exonRange"))
testref<-rep("gray",nrow(Data1))
testref[Data1$variable==Sample]="blue"
Data1<-data.frame(Data1,testref)
levels(Data1$variable)=c(levels(Data1$variable),"VAR")
Data1$testref<-as.factor(Data1$testref)
levels(Data1$testref)=c(levels(Data1$testref),"red")
data_temp<-Data1[Data1$variable==Sample & Data1$exonRange%in%VariantExon,]
if(nrow(data_temp)>0){
data_temp$variable="VAR"
data_temp$testref="red"
Data1<-rbind(Data1,data_temp)
}
levels(Data1$testref)=c("Test Sample","Reference Sample","Affected exon")
new_cols=c("blue","gray","red")
A1<-ggplot(data=Data1,aes(x=exonRange,y=value,group=variable,colour=testref))
A1<-A1 + geom_point(cex=2.5,lwd=1.5)
A1<-A1 + scale_colour_manual(values=new_cols)
A1<-A1 + geom_line(data=subset(Data1,testref=="Reference Sample"),lty="dashed",lwd=1.5,col="grey")
A1<-A1 + geom_point(data=subset(Data1,testref=="Reference Sample"),cex=2.5,col="grey")
A1<-A1 + geom_line(data=subset(Data1,testref=="Test Sample"),lty="dashed",lwd=1.5,col="blue")
A1<-A1 + geom_point(data=subset(Data1,testref=="Test Sample"),cex=2.5,col="blue")
A1<-A1 + geom_point(data=subset(Data1,testref=="Affected exon"),cex=3.5,col="red")
if(input$chScale==1){
A1<-A1 + ylab("Coverage") + xlab("")
}
if(input$chScale==2){
A1<-A1 + ylab("Log (Coverage)") + xlab("")
}
A1<-A1 + theme_bw() + theme(legend.position="none",axis.text.x=element_blank())
A1<-A1 + scale_x_continuous(breaks=exonRange)#,labels=paste(exonRange))
Data2<-Data1[Data1$testref=="Affected exon",]
if(nrow(Data2)>1){
for(i in 1:(nrow(Data2)-1)){
if((Data2$exonRange[i]+1)==Data2$exonRange[i+1]){ A1<-A1 + geom_line(data=Data2[i:(i+1),],aes(x=exonRange,y=value,group=1),lwd=1.5,col="red") }
}
}
print(A1)
}else if(input$chSamp==3){
Data1<-data.frame(rep(Sample,length(exonRange)),ExomeCount[exonRange,Sample],exonRange)
names(Data1)<-c("variable","value","exonRange")
if(input$chScale==2){Data1$value=log(Data1$value)}
# if(input$chScale==4){Data1$value = Data1$value - median(ExomeCount[,Sample])}
testref<-rep("gray",nrow(Data1))
testref[Data1$variable==Sample]="blue"
Data1<-data.frame(Data1,testref)
levels(Data1$variable)=c(levels(Data1$variable),"VAR")
Data1$testref=as.factor(Data1$testref)
levels(Data1$testref)=c(levels(Data1$testref),"red")
data_temp<-Data1[Data1$variable==Sample & Data1$exonRange%in%VariantExon,]
if(nrow(data_temp)>0){
data_temp$variable="VAR"
data_temp$testref="red"
Data1<-rbind(Data1,data_temp)
}
levels(Data1$testref)=c("Test Sample","Affected exon")
new_cols=c("blue","red")
A1<-ggplot(data=Data1,aes(x=exonRange,y=value,group=variable,colour=testref))
A1<-A1 + geom_point(cex=2.5,lwd=1.5)
A1<-A1 + scale_colour_manual(values=new_cols)
A1<-A1 + geom_line(data=subset(Data1,testref=="Test Sample"),lty="dashed",lwd=1.5,col="blue")
A1<-A1 + geom_point(data=subset(Data1,testref=="Affected exon"),cex=3.5,col="red")
if(input$chScale==1){
A1<-A1 + ylab("Coverage") + xlab("")
}
if(input$chScale==2){
A1<-A1 + ylab("Log (Coverage)") + xlab("")
}
A1<-A1 + theme_bw() + theme(legend.position="none",axis.text.x=element_blank())
A1<-A1 + scale_x_continuous(breaks=exonRange)#,labels=paste(exonRange))
Data2<-Data1[Data1$testref=="Affected exon",]
if(nrow(Data2)>1){
for(i in 1:(nrow(Data2)-1)){
if((Data2$exonRange[i]+1)==Data2$exonRange[i+1]){ A1<-A1 + geom_line(data=Data2[i:(i+1),],aes(x=exonRange,y=value,group=1),lwd=1.5,col="red") }
}
}
print(A1)
}
}
})
output$genes<-renderPlot({
if(input$selVar1=="None"){
par(mar=rep(0,4))
plot.new()
}else{
exonRange<-input$minEx1:input$maxEx1
genes_sel = unique(bed.file[exonRange,4])
temp<-cbind(1:nrow(bed.file),bed.file)[exonRange,]
len<-table(temp$name)
mp<-tapply(exonRange,temp[,5],mean)
mp<-mp[genes_sel]
len<-len[genes_sel]
Genes<-data.frame(genes_sel,as.vector(mp),as.vector(len-.5),1)
names(Genes)=c("Gene","MP","Length","Ind")
if(!is.null(exon_numbers)){
qplot(data=Genes,MP,Ind,fill=Gene,geom="tile",width=Length,label=Gene) + geom_text() + theme_bw() + theme(legend.position="none",panel.grid.major = element_blank(), panel.grid.minor = element_blank(),axis.text.y = element_blank(),axis.ticks.y=element_blank(),plot.margin=unit(c(.5,.5,.5,.55),"cm")) + ylab(" ") + xlab("Custom Numbering") + scale_x_continuous(breaks=exonRange,labels=paste(Index[exonRange]))
}else{
qplot(data=Genes,MP,Ind,fill=Gene,geom="tile",width=Length,label=Gene) + geom_text() + theme_bw() + theme(legend.position="none",panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.text.x = element_blank(),axis.text.y = element_blank(),axis.ticks.y=element_blank(),plot.margin=unit(c(.5,.5,.5,.55),"cm")) + ylab(" ") + xlab(" ")
}
}
})
output$CIplot<-renderPlot({
if(input$selVar1=="None"){
plot(NULL,xlim=c(1,10),ylim=c(0,1000))
}else{
Sample<-cnv.calls[input$selVar1,]$sample
exonRange<-input$minEx1:input$maxEx1
refs_sample<-refs[[Sample]]
Totals<-rowSums(ExomeCount[exonRange,c(Sample,refs_sample)])
ratio = (ExomeCount[exonRange,Sample]/Totals)/models[[Sample]][1]
mins <- vector(length=length(exonRange))
maxs <-vector(length=length(exonRange))
for(i in 1:length(exonRange)){
temp = qbetabinom(p=0.025,Totals[i],models[[Sample]][2],models[[Sample]][1])
mins[i] = (temp/Totals[i])/models[[Sample]][1]
temp = qbetabinom(p=0.975,Totals[i],models[[Sample]][2],models[[Sample]][1])
maxs[i] = (temp/Totals[i])/models[[Sample]][1]
}
CIData<-data.frame(exonRange,ratio,mins,maxs)
names(CIData)<-c("Exon","Ratio","Min","Max")
CIPlot<-ggplot(CIData,aes(x=Exon,y=Ratio))+geom_ribbon(aes(ymin=Min,ymax=Max),fill="grey")+geom_point(col="blue",cex=3.5) + theme_bw()+xlab("Exon Index")+ylab("Observed/Expected") + scale_x_continuous(breaks=exonRange,labels=paste(exonRange))
temp = cnv.calls[cnv.calls$sample==Sample,]
if(sum(temp$start.p%in%exonRange |temp$end.p%in%exonRange)>0){
temp = temp[temp$start.p%in%exonRange|temp$end.p%in%exonRange,]
for(i in 1:nrow(temp)){
start.temp = temp[i,]$start.p
end.temp = temp[i,]$end.p
CIPlot<-CIPlot + geom_point(data=CIData[CIData$Exon%in%start.temp:end.temp,], aes(x=Exon,y=Ratio),color="red",cex=3.5)
}
}
# my_grob = grobTree(textGrob("Confidence Region", x=0.8, y=0.9, hjust=0,gp=gpar(col="black", fontsize=12, fontface="bold")))
# CIPlot<-CIPlot + annotation_custom(my_grob)
print(CIPlot)
}
})
})
|
require(shiny)
shinyUI(fluidPage(navbarPage("Canceres", id="nav",
tabPanel('Mapas dos Canceres',
sidebarPanel(
selectInput('cancerm', 'Cancerm', names(Rate3)[-c(1,2)],selected=names(Rate3)[[3]])
),
mainPanel(
h4("Quantidade"),
plotOutput("plot2mapa"),
h4("Rate"),
plotOutput('plot1mapa'),
h4("SMR"),
plotOutput("plot3mapa")
)
),
tabPanel('Descritivas e graficos gerais',
img(src="incidencia-de-canceres-analise.jpg", height = "1800px", width = "900px"),
p("Os canceres são mostrados em ordem decrescente de frequencia. O cancer mais comum foi o Cancer de mama entre mulheres brancas seguido pelo cancer de prostata entre homens brancos. Isso aparamente contradiz o que se espera, que o cancer de pulmao seja o mais frequente, entretanto o site do SEER aponta que os canceres mais frequentes sao os canceres de prostata e mama, superando mesmo o pulmao."),
p("Os cancerese mais frequentes sao em ordem decrescente sao mama entre mulheres brancas, prostata entre homens brancos, pulmao e bronquios entre homens brancos seguidos por mulheres brancas, bexiga entre homens brancso e mama in situ feminino entre mulheres brancas"),
img(src="porcentagem-acumulada-da-incidencia-analise.jpg", height = "1800px", width = "900px"),
p("Percebemos que 8 canceres sao responsavei por 30% das incidencias. Sao necessarios 50 canceres para termos 90% da incidencia."),
img(src="mapa-de-correlacao-analise.jpg", height = "900px", width = "900px"),
p("Existe uma alta correlacao entre os canceres como pode-se observar pelo mapa de calor. Existe uma correlacao maior que 0.75 em 942 combinacoes de canceres e ccorrelacao maior que 9 em 68 combinacoes."),
img(src="correlacao-entre-os-pares-de-canceres-por-quantidade.jpg", height = "1800px", width = "900px"),
p("Existe 47 pares de canceres, feminino e masculino para um mesmo cancer em uma mesma raca.Podemos observar que existe um decaimento na correlacao quando a frequencia dos canceres diminui. Entretanto mesmo com esse decaimento apenas 2 pares de canceres apresentaram uma correlacao menor que 0.5, mas ainda assim, maior que 0.4. 19 canceres possuem uma correlacao maior que 0.9 e 30 uma correlacao maior que 0.8. No grafico dos pontos ordenados por correlacao vemos que a correlacao tem um decaimento lento ate o 43 e despenca apartir dele."),
img(src="correlacao-entre-os-pares-de-canceres-por-correlacao.jpg"),
p("Na proxima aba estao os boxplots para quantidade, taxa bayesiana e SMR de cada cancer.")
),
tabPanel('Boxplot dos Canceres',
sidebarPanel(
selectInput('cancer', 'Cancer', names(Rate3)[-c(1,2)],selected=names(Rate3)[[3]])
),
mainPanel(
h4("Quantidade"),
plotOutput("plot2"),
h4("Rate"),
plotOutput('plot1'),
h4("SMR"),
plotOutput("plot3")
)
),
tabPanel('Grupo dos Canceres',
sidebarPanel(
numericInput('ngrupo', 'Numero de Grupos', value=2,min=1,max=100,step=1),
numericInput('grupo','Grupo de Interesse',value=1,min=1,max=100,step=1),
p("Abaixo encontra-se o grafico que mostra o decaimento da soma de quadrados a medida que os grupos sao formados. Para o calculo sao utilizados 1000 repeticoes, devido a semente aleatoria do kmeans"),
img(src="soma-dos-quadrados-interna.jpg"),
p("Percebemos que nao ha um decaimento rapido e uma estagnacao como seria o desejado. Entretanto percebemos que existe dois comportamentos, o primeiro e um rapido decaimento entre 1 e 7 canceres, apartir do oitavo a velocidade de decaimento começa e diminuir.")
),
mainPanel(
h4("SMR dos centroides do grupo"),
plotOutput("plot1g"),
h4("Tabela dos Canceres"),
dataTableOutput(outputId="tab1")
)
),
tabPanel('Grupo dos Canceres Alterando a area de valor superior a 4',
sidebarPanel(
numericInput('ngrupoa', 'Numero de Grupos', value=2,min=1,max=100,step=1),
numericInput('grupoa','Grupo de Interesse',value=1,min=1,max=100,step=1),
p("Abaixo encontra-se o grafico que mostra o decaimento da soma de quadrados a medida que os grupos sao formados. Para o calculo sao utilizados 1000 repeticoes, devido a semente aleatoria do kmeans"),
img(src="soma-dos-quadrados-interna-alterada.jpg"),
p("Percebemos que nao ha um decaimento rapido e uma estagnacao como seria o desejado. Entretanto percebemos que existe dois comportamentos, o primeiro e um rapido decaimento entre 1 e 7 canceres, apartir do oitavo a velocidade de decaimento começa e diminuir.")
),
mainPanel(
h4("SMR dos centroides do grupo"),
plotOutput("plot1ga"),
h4("Tabela dos Canceres"),
dataTableOutput(outputId="tab1a")
)
),
tabPanel('Comparacao entre os agrupamentos',
sidebarPanel(
selectInput('grupotab1', 'Primeiro Cluster', names(grupo.geral)),
numericInput('ngrupo1','Numero de Agrupamentos Primeiro',value=2,min=2,max=100,step=1),
selectInput('grupotab2', 'Segundo Cluster', names(grupo.geral)),
numericInput('ngrupo2','Numero de Agrupamentos Segundo',value=2,min=2,max=100,step=1)
),
mainPanel(
h4("Tabela com as Classificacoes"),
tableOutput(outputId="tab.comp"),
br(),
h4("Teste de Qui-Quadrado para os Grupos"),
verbatimTextOutput(outputId="qui.teste")
)
)
)))
|
/ui.R
|
no_license
|
Recca2012/geral
|
R
| false | false | 7,720 |
r
|
require(shiny)
shinyUI(fluidPage(navbarPage("Canceres", id="nav",
tabPanel('Mapas dos Canceres',
sidebarPanel(
selectInput('cancerm', 'Cancerm', names(Rate3)[-c(1,2)],selected=names(Rate3)[[3]])
),
mainPanel(
h4("Quantidade"),
plotOutput("plot2mapa"),
h4("Rate"),
plotOutput('plot1mapa'),
h4("SMR"),
plotOutput("plot3mapa")
)
),
tabPanel('Descritivas e graficos gerais',
img(src="incidencia-de-canceres-analise.jpg", height = "1800px", width = "900px"),
p("Os canceres são mostrados em ordem decrescente de frequencia. O cancer mais comum foi o Cancer de mama entre mulheres brancas seguido pelo cancer de prostata entre homens brancos. Isso aparamente contradiz o que se espera, que o cancer de pulmao seja o mais frequente, entretanto o site do SEER aponta que os canceres mais frequentes sao os canceres de prostata e mama, superando mesmo o pulmao."),
p("Os cancerese mais frequentes sao em ordem decrescente sao mama entre mulheres brancas, prostata entre homens brancos, pulmao e bronquios entre homens brancos seguidos por mulheres brancas, bexiga entre homens brancso e mama in situ feminino entre mulheres brancas"),
img(src="porcentagem-acumulada-da-incidencia-analise.jpg", height = "1800px", width = "900px"),
p("Percebemos que 8 canceres sao responsavei por 30% das incidencias. Sao necessarios 50 canceres para termos 90% da incidencia."),
img(src="mapa-de-correlacao-analise.jpg", height = "900px", width = "900px"),
p("Existe uma alta correlacao entre os canceres como pode-se observar pelo mapa de calor. Existe uma correlacao maior que 0.75 em 942 combinacoes de canceres e ccorrelacao maior que 9 em 68 combinacoes."),
img(src="correlacao-entre-os-pares-de-canceres-por-quantidade.jpg", height = "1800px", width = "900px"),
p("Existe 47 pares de canceres, feminino e masculino para um mesmo cancer em uma mesma raca.Podemos observar que existe um decaimento na correlacao quando a frequencia dos canceres diminui. Entretanto mesmo com esse decaimento apenas 2 pares de canceres apresentaram uma correlacao menor que 0.5, mas ainda assim, maior que 0.4. 19 canceres possuem uma correlacao maior que 0.9 e 30 uma correlacao maior que 0.8. No grafico dos pontos ordenados por correlacao vemos que a correlacao tem um decaimento lento ate o 43 e despenca apartir dele."),
img(src="correlacao-entre-os-pares-de-canceres-por-correlacao.jpg"),
p("Na proxima aba estao os boxplots para quantidade, taxa bayesiana e SMR de cada cancer.")
),
tabPanel('Boxplot dos Canceres',
sidebarPanel(
selectInput('cancer', 'Cancer', names(Rate3)[-c(1,2)],selected=names(Rate3)[[3]])
),
mainPanel(
h4("Quantidade"),
plotOutput("plot2"),
h4("Rate"),
plotOutput('plot1'),
h4("SMR"),
plotOutput("plot3")
)
),
tabPanel('Grupo dos Canceres',
sidebarPanel(
numericInput('ngrupo', 'Numero de Grupos', value=2,min=1,max=100,step=1),
numericInput('grupo','Grupo de Interesse',value=1,min=1,max=100,step=1),
p("Abaixo encontra-se o grafico que mostra o decaimento da soma de quadrados a medida que os grupos sao formados. Para o calculo sao utilizados 1000 repeticoes, devido a semente aleatoria do kmeans"),
img(src="soma-dos-quadrados-interna.jpg"),
p("Percebemos que nao ha um decaimento rapido e uma estagnacao como seria o desejado. Entretanto percebemos que existe dois comportamentos, o primeiro e um rapido decaimento entre 1 e 7 canceres, apartir do oitavo a velocidade de decaimento começa e diminuir.")
),
mainPanel(
h4("SMR dos centroides do grupo"),
plotOutput("plot1g"),
h4("Tabela dos Canceres"),
dataTableOutput(outputId="tab1")
)
),
tabPanel('Grupo dos Canceres Alterando a area de valor superior a 4',
sidebarPanel(
numericInput('ngrupoa', 'Numero de Grupos', value=2,min=1,max=100,step=1),
numericInput('grupoa','Grupo de Interesse',value=1,min=1,max=100,step=1),
p("Abaixo encontra-se o grafico que mostra o decaimento da soma de quadrados a medida que os grupos sao formados. Para o calculo sao utilizados 1000 repeticoes, devido a semente aleatoria do kmeans"),
img(src="soma-dos-quadrados-interna-alterada.jpg"),
p("Percebemos que nao ha um decaimento rapido e uma estagnacao como seria o desejado. Entretanto percebemos que existe dois comportamentos, o primeiro e um rapido decaimento entre 1 e 7 canceres, apartir do oitavo a velocidade de decaimento começa e diminuir.")
),
mainPanel(
h4("SMR dos centroides do grupo"),
plotOutput("plot1ga"),
h4("Tabela dos Canceres"),
dataTableOutput(outputId="tab1a")
)
),
tabPanel('Comparacao entre os agrupamentos',
sidebarPanel(
selectInput('grupotab1', 'Primeiro Cluster', names(grupo.geral)),
numericInput('ngrupo1','Numero de Agrupamentos Primeiro',value=2,min=2,max=100,step=1),
selectInput('grupotab2', 'Segundo Cluster', names(grupo.geral)),
numericInput('ngrupo2','Numero de Agrupamentos Segundo',value=2,min=2,max=100,step=1)
),
mainPanel(
h4("Tabela com as Classificacoes"),
tableOutput(outputId="tab.comp"),
br(),
h4("Teste de Qui-Quadrado para os Grupos"),
verbatimTextOutput(outputId="qui.teste")
)
)
)))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_describe_local_gateway_route_table_vpc_associations}
\alias{ec2_describe_local_gateway_route_table_vpc_associations}
\title{Describes the specified associations between VPCs and local gateway
route tables}
\usage{
ec2_describe_local_gateway_route_table_vpc_associations(
LocalGatewayRouteTableVpcAssociationIds = NULL,
Filters = NULL,
MaxResults = NULL,
NextToken = NULL,
DryRun = NULL
)
}
\arguments{
\item{LocalGatewayRouteTableVpcAssociationIds}{The IDs of the associations.}
\item{Filters}{One or more filters.
\itemize{
\item \code{local-gateway-id} - The ID of a local gateway.
\item \code{local-gateway-route-table-arn} - The Amazon Resource Name (ARN) of
the local gateway route table for the association.
\item \code{local-gateway-route-table-id} - The ID of the local gateway route
table.
\item \code{local-gateway-route-table-vpc-association-id} - The ID of the
association.
\item \code{owner-id} - The ID of the Amazon Web Services account that owns the
local gateway route table for the association.
\item \code{state} - The state of the association.
\item \code{vpc-id} - The ID of the VPC.
}}
\item{MaxResults}{The maximum number of results to return with a single call. To retrieve
the remaining results, make another call with the returned \code{nextToken}
value.}
\item{NextToken}{The token for the next page of results.}
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
}
\description{
Describes the specified associations between VPCs and local gateway route tables.
See \url{https://www.paws-r-sdk.com/docs/ec2_describe_local_gateway_route_table_vpc_associations/} for full documentation.
}
\keyword{internal}
|
/cran/paws.compute/man/ec2_describe_local_gateway_route_table_vpc_associations.Rd
|
permissive
|
paws-r/paws
|
R
| false | true | 1,982 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_describe_local_gateway_route_table_vpc_associations}
\alias{ec2_describe_local_gateway_route_table_vpc_associations}
\title{Describes the specified associations between VPCs and local gateway
route tables}
\usage{
ec2_describe_local_gateway_route_table_vpc_associations(
LocalGatewayRouteTableVpcAssociationIds = NULL,
Filters = NULL,
MaxResults = NULL,
NextToken = NULL,
DryRun = NULL
)
}
\arguments{
\item{LocalGatewayRouteTableVpcAssociationIds}{The IDs of the associations.}
\item{Filters}{One or more filters.
\itemize{
\item \code{local-gateway-id} - The ID of a local gateway.
\item \code{local-gateway-route-table-arn} - The Amazon Resource Name (ARN) of
the local gateway route table for the association.
\item \code{local-gateway-route-table-id} - The ID of the local gateway route
table.
\item \code{local-gateway-route-table-vpc-association-id} - The ID of the
association.
\item \code{owner-id} - The ID of the Amazon Web Services account that owns the
local gateway route table for the association.
\item \code{state} - The state of the association.
\item \code{vpc-id} - The ID of the VPC.
}}
\item{MaxResults}{The maximum number of results to return with a single call. To retrieve
the remaining results, make another call with the returned \code{nextToken}
value.}
\item{NextToken}{The token for the next page of results.}
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
}
\description{
Describes the specified associations between VPCs and local gateway route tables.
See \url{https://www.paws-r-sdk.com/docs/ec2_describe_local_gateway_route_table_vpc_associations/} for full documentation.
}
\keyword{internal}
|
#Test 3
#================Test3===============================
folder<-"C:/Users/HPDM1/Documents/CanadaUofT/4thYear/BCB420/ekplektoR/R/"
fPath<-paste(folder,"FINDSUB_Test_Data.R",sep="")
source(fPath) #executing script generating test data. See script for variables generated, network structure
#delta set to 3.1, minOrd to 2
igraph::graph_attr(EGG,"delta")<-3.1 #setting delta parameter (score threshold for edge inclusion)
minOrd <- 2
#Here, we expect three subnetworks to form. UVW8 and XYZ9 have directed edges going each way of Influence 3.5
#Thus, they were eliminated in Test1 with a delta of 4
outputGraphs<-findsub(method="Leis",EGG,minOrd,noLog = TRUE,silent=FALSE)
#MethType: method of edge removal; Thresh: influence threshold; inputgraph: iGraph object containing
#network with assigned 'influence' to each edge; logResults: do we save a log of the process? ; silent
#if false, print what appears in the process log while the process is in action, as well as additional
#material (e.g. list edges removed, vertices removed)
#Note
#With a threshold influence of 3.5, LMN5 OPQ6 RST7 should form a network,
#ABC1,DEF2,GHI4,JKL3 should form a network
#When this is lowered to 3, LMN5,OPQ6,RST7,UVW8,and XYZ9 should form a network
#ABC1 DEF2 JKL3 GHI4 from same subnetwork
#Lower further to 2, and the 'discovered subnetwork' will include all vertices, but not all edges
#Lower influence threshold (delta) to 1, and every edge included
#below 6 lines: set up expected edges and vertices
expectNet1Edges<-BigNetEdges[1:10,]
expectNet1Edges<-expectNet1Edges[order(expectNet1Edges$edgeID),]
expectNet1Verts<-BigNetVertices[1:4,]
expectNet2Edges<-BigNetEdges[c(15:16,19:20),]
expectNet2Edges<-expectNet2Edges[order(expectNet2Edges$edgeID),]
expectNet2Verts<-BigNetVertices[5:7,]
expectNet3Edges<-BigNetEdges[21:22,]
expectNet3Edges<-expectNet3Edges[order(expectNet3Edges$edgeID),]
expectNet3Verts<-BigNetVertices[8:9,]
#below 9 lines: set up edges and vertices from output as data frames
netwk1<-outputGraphs[[1]]
netwk1Edges<-as_data_frame(netwk1,what="edges")
netwk1Verts<-as_data_frame(netwk1,what="vertices")
netwk2<-outputGraphs[[2]]
netwk2Edges<-as_data_frame(netwk2,what="edges")
netwk2Verts<-as_data_frame(netwk2,what="vertices")
netwk3<-outputGraphs[[3]]
netwk3Edges<-as_data_frame(netwk3,what="edges")
netwk3Verts<-as_data_frame(netwk3,what="vertices")
#edgeCompar: vector of falses with length(ncol(expectNetXEdges)). If all column in the expected
#dataset are identical to that of the test dataset (netwkXEdges), edgeCompar becomes a vector of
#TRUES with the same length. If this condition is fulfilled, then we know the output for
#the given network is identical to the expected edges.
edgeCompar1<-vector(length=ncol(expectNet1Edges))
if (ncol(expectNet1Edges)==ncol(netwk1Edges) && nrow(expectNet1Edges)==nrow(netwk1Edges)) {
for (i in 1:ncol(expectNet1Edges)) {
truRows<-expectNet1Edges[,i]==netwk1Edges[,i]
sumTruRows<-sum(as.numeric(truRows))
if (sumTruRows==nrow(expectNet1Edges)) {
edgeCompar1[i]<-TRUE
}
}
}
edgeCompar2<-vector(length=ncol(expectNet2Edges))
if (ncol(expectNet2Edges)==ncol(netwk2Edges) && nrow(expectNet2Edges)==nrow(netwk2Edges)) {
for (i in 1:ncol(expectNet2Edges)) {
truRows<-expectNet2Edges[,i]==netwk2Edges[,i]
sumTruRows<-sum(as.numeric(truRows))
if (sumTruRows==nrow(expectNet2Edges)) {
edgeCompar2[i]<-TRUE
}
}
}
edgeCompar3<-vector(length=ncol(expectNet3Edges))
if (ncol(expectNet3Edges)==ncol(netwk3Edges) && nrow(expectNet3Edges)==nrow(netwk3Edges)) {
for (i in 1:ncol(expectNet2Edges)) {
truRows<-expectNet3Edges[,i]==netwk3Edges[,i]
sumTruRows<-sum(as.numeric(truRows))
if (sumTruRows==nrow(expectNet3Edges)) {
edgeCompar3[i]<-TRUE
}
}
}
#vertCompar: because of how unique() sorts things, you can get your vertices out of original order
#Therefore, the aim here is to see that for every vertex/gene score pair in the expected vertex dataframe
#is present in the output vertex data frame (by vertex dataframe I mean dataframe for vertices of
#a given network, expected or produced by the FINDSUB function)
#If that condition is fulfilled, AND the nrow of the expected vertex set is the same as that of the
#output vertex set, then you know that the two data frames of vertices have identical contents, and
# by extension the network's iGraph object's vertices/attributes were added correctly
vertCompar1<-vector(length=nrow(expectNet1Verts))
for (i in 1:nrow(expectNet1Verts)) {
ind1<-grepl(expectNet1Verts$HGNC_symbol[i],netwk1Verts$name)
if (sum(as.numeric(ind1))>0) {
if (netwk1Verts$Gene_Score[i]==expectNet1Verts$Gene_Score[ind1]) {
vertCompar1[i]<-TRUE
}
}
}
vertCompar2<-vector(length=nrow(expectNet2Verts))
for (i in 1:nrow(expectNet2Verts)) {
ind2<-grepl(expectNet2Verts$HGNC_symbol[i],netwk2Verts$name)
if (sum(as.numeric(ind1))>0) {
if (netwk2Verts$Gene_Score[i]==expectNet2Verts$Gene_Score[ind2]) {
vertCompar2[i]<-TRUE
}
}
}
vertCompar3<-vector(length=nrow(expectNet3Verts))
for (i in 1:nrow(expectNet3Verts)) {
ind3<-grepl(expectNet3Verts$HGNC_symbol[i],netwk3Verts$name)
if (sum(as.numeric(ind1))>0) {
if (netwk3Verts$Gene_Score[i]==expectNet3Verts$Gene_Score[ind3]) {
vertCompar3[i]<-TRUE
}
}
}
context("Check that you have 3 subnetworks")
test_that("3 subnetworks created with expected edges and vertices", {
expect_equal(nrow(expectNet1Verts),nrow(netwk1Verts))
expect_equal(nrow(expectNet2Verts),nrow(netwk2Verts))
expect_equal(nrow(expectNet3Verts),nrow(netwk3Verts))
expect_equal(sum(as.numeric(vertCompar1)),nrow(expectNet1Verts))
expect_equal(sum(as.numeric(vertCompar2)),nrow(expectNet2Verts))
expect_equal(sum(as.numeric(vertCompar3)),nrow(expectNet3Verts))
expect_equal(sum(as.numeric(edgeCompar1)),ncol(expectNet1Edges))
expect_equal(sum(as.numeric(edgeCompar2)),ncol(expectNet2Edges))
expect_equal(sum(as.numeric(edgeCompar3)),ncol(expectNet3Edges))
expect_equal(length(outputGraphs),3)
})
context("Check Heat score")
test_that("Heat scores are as expected", {
expect_equal(graph_attr(netwk1,"aggHeatScore"),22)
expect_equal(graph_attr(netwk2,"aggHeatScore"),17)
expect_equal(graph_attr(netwk3,"aggHeatScore"),14)
})
#[END]
|
/R/FINSUB_Test3.R
|
no_license
|
whitleyo/reteFINDSUB
|
R
| false | false | 6,451 |
r
|
#Test 3
#================Test3===============================
folder<-"C:/Users/HPDM1/Documents/CanadaUofT/4thYear/BCB420/ekplektoR/R/"
fPath<-paste(folder,"FINDSUB_Test_Data.R",sep="")
source(fPath) #executing script generating test data. See script for variables generated, network structure
#delta set to 3.1, minOrd to 2
igraph::graph_attr(EGG,"delta")<-3.1 #setting delta parameter (score threshold for edge inclusion)
minOrd <- 2
#Here, we expect three subnetworks to form. UVW8 and XYZ9 have directed edges going each way of Influence 3.5
#Thus, they were eliminated in Test1 with a delta of 4
outputGraphs<-findsub(method="Leis",EGG,minOrd,noLog = TRUE,silent=FALSE)
#MethType: method of edge removal; Thresh: influence threshold; inputgraph: iGraph object containing
#network with assigned 'influence' to each edge; logResults: do we save a log of the process? ; silent
#if false, print what appears in the process log while the process is in action, as well as additional
#material (e.g. list edges removed, vertices removed)
#Note
#With a threshold influence of 3.5, LMN5 OPQ6 RST7 should form a network,
#ABC1,DEF2,GHI4,JKL3 should form a network
#When this is lowered to 3, LMN5,OPQ6,RST7,UVW8,and XYZ9 should form a network
#ABC1 DEF2 JKL3 GHI4 from same subnetwork
#Lower further to 2, and the 'discovered subnetwork' will include all vertices, but not all edges
#Lower influence threshold (delta) to 1, and every edge included
#below 6 lines: set up expected edges and vertices
expectNet1Edges<-BigNetEdges[1:10,]
expectNet1Edges<-expectNet1Edges[order(expectNet1Edges$edgeID),]
expectNet1Verts<-BigNetVertices[1:4,]
expectNet2Edges<-BigNetEdges[c(15:16,19:20),]
expectNet2Edges<-expectNet2Edges[order(expectNet2Edges$edgeID),]
expectNet2Verts<-BigNetVertices[5:7,]
expectNet3Edges<-BigNetEdges[21:22,]
expectNet3Edges<-expectNet3Edges[order(expectNet3Edges$edgeID),]
expectNet3Verts<-BigNetVertices[8:9,]
#below 9 lines: set up edges and vertices from output as data frames
netwk1<-outputGraphs[[1]]
netwk1Edges<-as_data_frame(netwk1,what="edges")
netwk1Verts<-as_data_frame(netwk1,what="vertices")
netwk2<-outputGraphs[[2]]
netwk2Edges<-as_data_frame(netwk2,what="edges")
netwk2Verts<-as_data_frame(netwk2,what="vertices")
netwk3<-outputGraphs[[3]]
netwk3Edges<-as_data_frame(netwk3,what="edges")
netwk3Verts<-as_data_frame(netwk3,what="vertices")
#edgeCompar: vector of falses with length(ncol(expectNetXEdges)). If all column in the expected
#dataset are identical to that of the test dataset (netwkXEdges), edgeCompar becomes a vector of
#TRUES with the same length. If this condition is fulfilled, then we know the output for
#the given network is identical to the expected edges.
edgeCompar1<-vector(length=ncol(expectNet1Edges))
if (ncol(expectNet1Edges)==ncol(netwk1Edges) && nrow(expectNet1Edges)==nrow(netwk1Edges)) {
for (i in 1:ncol(expectNet1Edges)) {
truRows<-expectNet1Edges[,i]==netwk1Edges[,i]
sumTruRows<-sum(as.numeric(truRows))
if (sumTruRows==nrow(expectNet1Edges)) {
edgeCompar1[i]<-TRUE
}
}
}
edgeCompar2<-vector(length=ncol(expectNet2Edges))
if (ncol(expectNet2Edges)==ncol(netwk2Edges) && nrow(expectNet2Edges)==nrow(netwk2Edges)) {
for (i in 1:ncol(expectNet2Edges)) {
truRows<-expectNet2Edges[,i]==netwk2Edges[,i]
sumTruRows<-sum(as.numeric(truRows))
if (sumTruRows==nrow(expectNet2Edges)) {
edgeCompar2[i]<-TRUE
}
}
}
edgeCompar3<-vector(length=ncol(expectNet3Edges))
if (ncol(expectNet3Edges)==ncol(netwk3Edges) && nrow(expectNet3Edges)==nrow(netwk3Edges)) {
for (i in 1:ncol(expectNet2Edges)) {
truRows<-expectNet3Edges[,i]==netwk3Edges[,i]
sumTruRows<-sum(as.numeric(truRows))
if (sumTruRows==nrow(expectNet3Edges)) {
edgeCompar3[i]<-TRUE
}
}
}
#vertCompar: because of how unique() sorts things, you can get your vertices out of original order
#Therefore, the aim here is to see that for every vertex/gene score pair in the expected vertex dataframe
#is present in the output vertex data frame (by vertex dataframe I mean dataframe for vertices of
#a given network, expected or produced by the FINDSUB function)
#If that condition is fulfilled, AND the nrow of the expected vertex set is the same as that of the
#output vertex set, then you know that the two data frames of vertices have identical contents, and
# by extension the network's iGraph object's vertices/attributes were added correctly
vertCompar1<-vector(length=nrow(expectNet1Verts))
for (i in 1:nrow(expectNet1Verts)) {
ind1<-grepl(expectNet1Verts$HGNC_symbol[i],netwk1Verts$name)
if (sum(as.numeric(ind1))>0) {
if (netwk1Verts$Gene_Score[i]==expectNet1Verts$Gene_Score[ind1]) {
vertCompar1[i]<-TRUE
}
}
}
vertCompar2<-vector(length=nrow(expectNet2Verts))
for (i in 1:nrow(expectNet2Verts)) {
ind2<-grepl(expectNet2Verts$HGNC_symbol[i],netwk2Verts$name)
if (sum(as.numeric(ind1))>0) {
if (netwk2Verts$Gene_Score[i]==expectNet2Verts$Gene_Score[ind2]) {
vertCompar2[i]<-TRUE
}
}
}
vertCompar3<-vector(length=nrow(expectNet3Verts))
for (i in 1:nrow(expectNet3Verts)) {
ind3<-grepl(expectNet3Verts$HGNC_symbol[i],netwk3Verts$name)
if (sum(as.numeric(ind1))>0) {
if (netwk3Verts$Gene_Score[i]==expectNet3Verts$Gene_Score[ind3]) {
vertCompar3[i]<-TRUE
}
}
}
context("Check that you have 3 subnetworks")
test_that("3 subnetworks created with expected edges and vertices", {
expect_equal(nrow(expectNet1Verts),nrow(netwk1Verts))
expect_equal(nrow(expectNet2Verts),nrow(netwk2Verts))
expect_equal(nrow(expectNet3Verts),nrow(netwk3Verts))
expect_equal(sum(as.numeric(vertCompar1)),nrow(expectNet1Verts))
expect_equal(sum(as.numeric(vertCompar2)),nrow(expectNet2Verts))
expect_equal(sum(as.numeric(vertCompar3)),nrow(expectNet3Verts))
expect_equal(sum(as.numeric(edgeCompar1)),ncol(expectNet1Edges))
expect_equal(sum(as.numeric(edgeCompar2)),ncol(expectNet2Edges))
expect_equal(sum(as.numeric(edgeCompar3)),ncol(expectNet3Edges))
expect_equal(length(outputGraphs),3)
})
context("Check Heat score")
test_that("Heat scores are as expected", {
expect_equal(graph_attr(netwk1,"aggHeatScore"),22)
expect_equal(graph_attr(netwk2,"aggHeatScore"),17)
expect_equal(graph_attr(netwk3,"aggHeatScore"),14)
})
#[END]
|
#want to determine whether the number and type of TF motifs are different in the different triad categories
#first read in all the triad sets
all_111_triads <- "W:/Jemima/companion_paper/triads_1-1-1_HC_only.tsv"
all_111_triads_stacked <- "W:/Jemima/companion_paper/triads_1-1-1_HC_only_stacked.tsv"
fimo_unique_path <- "W:/Jemima/companion_paper/out_2018_03_12_12_03/fimo_results/IWGSCv1.0_UTR.HC.firstCDS_consensus._1500bp_upstream_triads_only.fa/unique_hits_analysis/fimo_unique_TF_motifs_per_gene.txt"
#then read in the promoter motif calling
#start with 1.3 kb upstream, but will do 2kb upstream eventually as well
fimo_unique <- read.table(fimo_unique_path, sep = "\t", header = TRUE, stringsAsFactors = FALSE)
triads <- read.table(all_111_triads, sep = "\t", header = TRUE, stringsAsFactors = FALSE)
triads_stacked <- read.table(all_111_triads_stacked, sep = "\t", header = TRUE, stringsAsFactors = FALSE)
#extract only the fimo results in triads
fimo_triads <- subset(fimo_unique, gene %in% unique(triads_stacked$gene))
#fimo_triads_ids <- merge(triads_stacked, fimo_triads, all.x = TRUE, all.y = FALSE)
#Now we need to do the breakdown of motif per triads
triad_ids <- unique(triads_stacked$group_id)
motif_ids <- unique(fimo_triads$motif_id)
motif_triad_categ <- data.frame(group_id = character(), motif_id = character(), motif_cat = character())
for (i in seq(1, length(triad_ids))){
triad <- triad_ids[i]
group_id <- rep(triad, times = length(motif_ids))
motif_id <- character()
motif_cat <- character()
triad_genes <- triads_stacked[triads_stacked$group_id == triad,]
#extract all the motifs
motifs_triad <- subset(fimo_triads, gene %in% triad_genes$gene)
motifs_triad_id <- merge(motifs_triad, triad_genes)
for (j in seq(1, length(motif_ids))){
motif <- motif_ids[j]
triad_motif <- motifs_triad_id[motifs_triad_id$motif_id == motif,]
motif_no <- nrow(triad_motif)
if (motif_no == 0){
motif_categ <- "none"
} else if (motif_no == 1){
motif_categ <- triad_motif[1,"genome"]
} else {
motif_categ <- paste(triad_motif$genome, collapse = "")
}
motif_id[j] <- motif
motif_cat[j] <- motif_categ
}
triad_motif_cats <- data.frame(group_id, motif_id, motif_cat)
motif_triad_categ <- rbind(motif_triad_categ, triad_motif_cats)
}
#correct for things that are the wrong way around
motif_triad_categ$motif_cat <- gsub("BA", "AB", motif_triad_categ$motif_cat)
motif_triad_categ$motif_cat <- gsub("DA", "AD", motif_triad_categ$motif_cat)
motif_triad_categ$motif_cat <- gsub("BDA", "ABD", motif_triad_categ$motif_cat)
motif_triad_categ$motif_cat <- gsub("ADB", "ABD", motif_triad_categ$motif_cat)
motif_triad_categ$motif_cat <- gsub("DBA", "ABD", motif_triad_categ$motif_cat)
motif_triad_categ$motif_cat <- gsub("DB", "BD", motif_triad_categ$motif_cat)
motif_triad_categ$motif_cat <- gsub("BAD", "ABD", motif_triad_categ$motif_cat)
motif_triad_categ$simple_category <- motif_triad_categ$motif_cat
motif_triad_categ$simple_category <- gsub("\\<ABD\\>", "all_same", motif_triad_categ$simple_category)
motif_triad_categ$simple_category <- gsub("\\<none\\>", "all_same", motif_triad_categ$simple_category)
motif_triad_categ$simple_category <- gsub("\\<BD\\>", "A_diff", motif_triad_categ$simple_category)
motif_triad_categ$simple_category <- gsub("\\<AD\\>", "B_diff", motif_triad_categ$simple_category)
motif_triad_categ$simple_category <- gsub("\\<AB\\>", "D_diff", motif_triad_categ$simple_category)
motif_triad_categ$simple_category <- gsub("\\<D\\>", "D_diff", motif_triad_categ$simple_category)
motif_triad_categ$simple_category <- gsub("\\<B\\>", "B_diff", motif_triad_categ$simple_category)
motif_triad_categ$simple_category <- gsub("\\<A\\>", "A_diff", motif_triad_categ$simple_category)
motif_triad_categ_dedup <- unique(motif_triad_categ)
write.table(motif_triad_categ_dedup, file = "W:/Jemima/companion_paper/TF_motifs/triad_categories_per_motif_stacked_1500bp.tsv", sep = "\t", col.names = TRUE, row.names = FALSE, quote = FALSE)
summarised_categories <- data.frame(table(motif_triad_categ$group_id, motif_triad_categ$motif_cat))
colnames(summarised_categories) <- c("group_id", "motif_cat", "count")
summarised_simple_categories <- data.frame(table(motif_triad_categ_dedup$group_id, motif_triad_categ_dedup$simple_category))
colnames(summarised_simple_categories) <- c("group_id", "simple_category", "count")
write.table(summarised_categories, file = "W:/Jemima/companion_paper/TF_motifs/triad_categories_per_triad_stacked_1500bp.tsv", sep = "\t", col.names = TRUE, row.names = FALSE, quote = FALSE)
write.table(summarised_simple_categories, file = "W:/Jemima/companion_paper/TF_motifs/triad_categories_per_triad_stacked_simple_categories_1500bp.tsv", sep = "\t", col.names = TRUE, row.names = FALSE, quote = FALSE)
#create a simple summary with no none values included
motif_triad_categ_dedup_no_none <- motif_triad_categ_dedup[!motif_triad_categ_dedup$motif_cat == "none",]
summarised_simple_categories_no_none <- data.frame(table(motif_triad_categ_dedup_no_none$group_id, motif_triad_categ_dedup_no_none$simple_category))
colnames(summarised_simple_categories_no_none) <- c("group_id", "simple_category", "count")
write.table(summarised_simple_categories_no_none, file = "W:/Jemima/companion_paper/TF_motifs/triad_categories_per_triad_stacked_simple_categories_no_none_1500bp.tsv", sep = "\t", col.names = TRUE, row.names = FALSE, quote = FALSE)
|
/scripts/TFBS/generate_TF_motif_groupings_1500bp.r
|
no_license
|
Musketeer-D/WheatHomoeologExpression
|
R
| false | false | 5,523 |
r
|
#want to determine whether the number and type of TF motifs are different in the different triad categories
#first read in all the triad sets
all_111_triads <- "W:/Jemima/companion_paper/triads_1-1-1_HC_only.tsv"
all_111_triads_stacked <- "W:/Jemima/companion_paper/triads_1-1-1_HC_only_stacked.tsv"
fimo_unique_path <- "W:/Jemima/companion_paper/out_2018_03_12_12_03/fimo_results/IWGSCv1.0_UTR.HC.firstCDS_consensus._1500bp_upstream_triads_only.fa/unique_hits_analysis/fimo_unique_TF_motifs_per_gene.txt"
#then read in the promoter motif calling
#start with 1.3 kb upstream, but will do 2kb upstream eventually as well
fimo_unique <- read.table(fimo_unique_path, sep = "\t", header = TRUE, stringsAsFactors = FALSE)
triads <- read.table(all_111_triads, sep = "\t", header = TRUE, stringsAsFactors = FALSE)
triads_stacked <- read.table(all_111_triads_stacked, sep = "\t", header = TRUE, stringsAsFactors = FALSE)
#extract only the fimo results in triads
fimo_triads <- subset(fimo_unique, gene %in% unique(triads_stacked$gene))
#fimo_triads_ids <- merge(triads_stacked, fimo_triads, all.x = TRUE, all.y = FALSE)
#Now we need to do the breakdown of motif per triads
triad_ids <- unique(triads_stacked$group_id)
motif_ids <- unique(fimo_triads$motif_id)
motif_triad_categ <- data.frame(group_id = character(), motif_id = character(), motif_cat = character())
for (i in seq(1, length(triad_ids))){
triad <- triad_ids[i]
group_id <- rep(triad, times = length(motif_ids))
motif_id <- character()
motif_cat <- character()
triad_genes <- triads_stacked[triads_stacked$group_id == triad,]
#extract all the motifs
motifs_triad <- subset(fimo_triads, gene %in% triad_genes$gene)
motifs_triad_id <- merge(motifs_triad, triad_genes)
for (j in seq(1, length(motif_ids))){
motif <- motif_ids[j]
triad_motif <- motifs_triad_id[motifs_triad_id$motif_id == motif,]
motif_no <- nrow(triad_motif)
if (motif_no == 0){
motif_categ <- "none"
} else if (motif_no == 1){
motif_categ <- triad_motif[1,"genome"]
} else {
motif_categ <- paste(triad_motif$genome, collapse = "")
}
motif_id[j] <- motif
motif_cat[j] <- motif_categ
}
triad_motif_cats <- data.frame(group_id, motif_id, motif_cat)
motif_triad_categ <- rbind(motif_triad_categ, triad_motif_cats)
}
#correct for things that are the wrong way around
motif_triad_categ$motif_cat <- gsub("BA", "AB", motif_triad_categ$motif_cat)
motif_triad_categ$motif_cat <- gsub("DA", "AD", motif_triad_categ$motif_cat)
motif_triad_categ$motif_cat <- gsub("BDA", "ABD", motif_triad_categ$motif_cat)
motif_triad_categ$motif_cat <- gsub("ADB", "ABD", motif_triad_categ$motif_cat)
motif_triad_categ$motif_cat <- gsub("DBA", "ABD", motif_triad_categ$motif_cat)
motif_triad_categ$motif_cat <- gsub("DB", "BD", motif_triad_categ$motif_cat)
motif_triad_categ$motif_cat <- gsub("BAD", "ABD", motif_triad_categ$motif_cat)
motif_triad_categ$simple_category <- motif_triad_categ$motif_cat
motif_triad_categ$simple_category <- gsub("\\<ABD\\>", "all_same", motif_triad_categ$simple_category)
motif_triad_categ$simple_category <- gsub("\\<none\\>", "all_same", motif_triad_categ$simple_category)
motif_triad_categ$simple_category <- gsub("\\<BD\\>", "A_diff", motif_triad_categ$simple_category)
motif_triad_categ$simple_category <- gsub("\\<AD\\>", "B_diff", motif_triad_categ$simple_category)
motif_triad_categ$simple_category <- gsub("\\<AB\\>", "D_diff", motif_triad_categ$simple_category)
motif_triad_categ$simple_category <- gsub("\\<D\\>", "D_diff", motif_triad_categ$simple_category)
motif_triad_categ$simple_category <- gsub("\\<B\\>", "B_diff", motif_triad_categ$simple_category)
motif_triad_categ$simple_category <- gsub("\\<A\\>", "A_diff", motif_triad_categ$simple_category)
motif_triad_categ_dedup <- unique(motif_triad_categ)
write.table(motif_triad_categ_dedup, file = "W:/Jemima/companion_paper/TF_motifs/triad_categories_per_motif_stacked_1500bp.tsv", sep = "\t", col.names = TRUE, row.names = FALSE, quote = FALSE)
summarised_categories <- data.frame(table(motif_triad_categ$group_id, motif_triad_categ$motif_cat))
colnames(summarised_categories) <- c("group_id", "motif_cat", "count")
summarised_simple_categories <- data.frame(table(motif_triad_categ_dedup$group_id, motif_triad_categ_dedup$simple_category))
colnames(summarised_simple_categories) <- c("group_id", "simple_category", "count")
write.table(summarised_categories, file = "W:/Jemima/companion_paper/TF_motifs/triad_categories_per_triad_stacked_1500bp.tsv", sep = "\t", col.names = TRUE, row.names = FALSE, quote = FALSE)
write.table(summarised_simple_categories, file = "W:/Jemima/companion_paper/TF_motifs/triad_categories_per_triad_stacked_simple_categories_1500bp.tsv", sep = "\t", col.names = TRUE, row.names = FALSE, quote = FALSE)
#create a simple summary with no none values included
motif_triad_categ_dedup_no_none <- motif_triad_categ_dedup[!motif_triad_categ_dedup$motif_cat == "none",]
summarised_simple_categories_no_none <- data.frame(table(motif_triad_categ_dedup_no_none$group_id, motif_triad_categ_dedup_no_none$simple_category))
colnames(summarised_simple_categories_no_none) <- c("group_id", "simple_category", "count")
write.table(summarised_simple_categories_no_none, file = "W:/Jemima/companion_paper/TF_motifs/triad_categories_per_triad_stacked_simple_categories_no_none_1500bp.tsv", sep = "\t", col.names = TRUE, row.names = FALSE, quote = FALSE)
|
# Read in Arlequin jackknifes from 1/12/10
# Calc RMA regression and output to file
setwd("/Users/mpinsky/Documents/Stanford/Philippines/2008/Genotyping/Analysis/APCL/090507/Arlequin/")
source("../../../readArlFstBatch 090415.R")
library(vegan)
library(smatr)
file = "jackknife/FSTdist.sum"
# read in geographic distance
geo = read.csv("../Aclarkii_2009-05-14 geo.csv", row.names=1)
cebugeo = as.matrix(geo[1:10,1:10])
leytegeo = as.matrix(geo[11:18, 11:18])
# read in fsts
fsts = readArlFstBatch(file)
num = length(fsts)
mant = data.frame(b = numeric(num), r = numeric(num), p = numeric(num), region=character(num))
mant$region = as.character(mant$region)
for(i in 1:num){
thesefsts = as.matrix(fsts[[i]])
thesefsts = thesefsts/(1-thesefsts) # linearize fst
if(length(grep("Cebu", names(fsts)[i]))>0){ # if Cebu
m = mantel(cebugeo, thesefsts, method="pearson")
l = line.cis(y=thesefsts[lower.tri(thesefsts)], x= cebugeo[lower.tri(cebugeo)])
mant$region[i] = "Cebu"
}
if(length(grep("Leyte", names(fsts)[i]))>0){ # if Cebu
m = mantel(leytegeo, thesefsts, method="pearson")
l = line.cis(y=thesefsts[lower.tri(thesefsts)], x= leytegeo[lower.tri(leytegeo)])
mant$region[i] = "Leyte"
}
mant$p[i] = m$signif
mant$r[i] = m$statistic
mant$b[i] = l$coef[2]
}
row.names(mant) = names(fsts)
write.csv(mant, paste("JackKnifeResults",Sys.Date(),".csv", sep=""))
# diagnostic plots
hist(mant$b[mant$region=="Cebu"], breaks=20)
|
/Genotyping/Analysis/APCL/090507/Arlequin/readArlJackknifes 100112.R
|
permissive
|
pinskylab/PhilippinesAnemonefish2008
|
R
| false | false | 1,449 |
r
|
# Read in Arlequin jackknifes from 1/12/10
# Calc RMA regression and output to file
setwd("/Users/mpinsky/Documents/Stanford/Philippines/2008/Genotyping/Analysis/APCL/090507/Arlequin/")
source("../../../readArlFstBatch 090415.R")
library(vegan)
library(smatr)
file = "jackknife/FSTdist.sum"
# read in geographic distance
geo = read.csv("../Aclarkii_2009-05-14 geo.csv", row.names=1)
cebugeo = as.matrix(geo[1:10,1:10])
leytegeo = as.matrix(geo[11:18, 11:18])
# read in fsts
fsts = readArlFstBatch(file)
num = length(fsts)
mant = data.frame(b = numeric(num), r = numeric(num), p = numeric(num), region=character(num))
mant$region = as.character(mant$region)
for(i in 1:num){
thesefsts = as.matrix(fsts[[i]])
thesefsts = thesefsts/(1-thesefsts) # linearize fst
if(length(grep("Cebu", names(fsts)[i]))>0){ # if Cebu
m = mantel(cebugeo, thesefsts, method="pearson")
l = line.cis(y=thesefsts[lower.tri(thesefsts)], x= cebugeo[lower.tri(cebugeo)])
mant$region[i] = "Cebu"
}
if(length(grep("Leyte", names(fsts)[i]))>0){ # if Cebu
m = mantel(leytegeo, thesefsts, method="pearson")
l = line.cis(y=thesefsts[lower.tri(thesefsts)], x= leytegeo[lower.tri(leytegeo)])
mant$region[i] = "Leyte"
}
mant$p[i] = m$signif
mant$r[i] = m$statistic
mant$b[i] = l$coef[2]
}
row.names(mant) = names(fsts)
write.csv(mant, paste("JackKnifeResults",Sys.Date(),".csv", sep=""))
# diagnostic plots
hist(mant$b[mant$region=="Cebu"], breaks=20)
|
library(igraph)
library(Matrix)
mmatrix <- matrix(c(0,0.000386665749550218,0.0089162378817095,0.00125239665365081,0.00571388670360401,0.0271825279137305,0.0151792221956226,0.00841610623478111,0.00745645221609359,0.00915116098900969,0.0161713918217335,0.0127182942388623,0.00733954382498813,0.00510186215637128,0.00125125118758389,0.0197010580604511,0.0048080569765917,0,0,0,0,0.00146983692656043,0,0,0,0,0.00528711614835667,0,0.00524557795202013,0.000386665749550218,0,0.0037755571155261,0.00112648136196275,0.000483906012542723,0.00457935406511062,0.0189869295828335,0.00750918829914344,0.0105765817151743,0.00358950580682936,0.0182042971183108,0.00324555150979055,0.000828776060958619,0.00510831769449959,0.00103848438759091,0.0166002660898445,0,0,0,0.000533984764913786,0,0.00132778351057615,0,0,0,0,0.0122224248155727,0,0.00215108199463338,0.0089162378817095,0.0037755571155261,0,0.000702810094006201,0.00232469361739829,0.00616981003776192,0.00426414853423318,0.0100835437863768,0.000601877357118082,0.00190356436325524,0.0069958757261915,0.00526561484467535,0,0.00173412206078099,0,0.00477431184955667,0,0,0,0,0,0,0,0,0,0,0.00161823020797657,0,0.00134789197843045,0.00125239665365081,0.00112648136196275,0.000702810094006201,0,0,0.00906894626358056,0.00831839277575157,0.00417464265927406,0.0117089991491381,0.00809622417252334,0.0126814856682681,0.00513528294796573,0.0054613172539932,0.00581535413984163,0.00224872160913446,0.0149605697060244,0.00227393056614436,0.000426978646758249,0,0,0,0.000556117721703494,0.00153699451492292,0,0,0,0.00275895883952282,0,0.00245473823674855,0.00571388670360401,0.000483906012542723,0.00232469361739829,0,0,0.0136477018270643,0.00113074727651417,0.00414670180781662,0.000223959141794321,0.0103331114723106,0.00724847631752466,0.0081073714377548,0.00612354794869377,0.00323286027279988,0.000782960442678707,0.00460749863790614,0,0,0,0,0,0,0,0,0,0,0.000657627208553685,0,0.00431891912016351,0.0271825279137305,0.00457935406511062,0.00616981003776192,0.00906894626358056,0.0136477018270643,0,0.0481286297378402,0.08399643596187,0.0162965101965557,0.0200874046628238,0.0526809593598904,0.0991709605629716,0.00445520586874179,0.0181954711431042,0.004565185116383,0.0625738968097483,0.00069367133337333,0.00238794400608451,0.00318089919616704,0.00257739680406785,0,0.00391127970262102,0.000607789433545311,0,0,0,0.0166926609405317,0,0.00652347286308901,0.0151792221956226,0.0189869295828335,0.00426414853423318,0.00831839277575157,0.00113074727651417,0.0481286297378402,0,0.0294457240121131,0.0433501258069067,0.0222217693030455,0.0689019428220739,0.0325864103878595,0.0133625954656555,0.0232342914961071,0.00402788437854488,0.108139543703767,0.00745332636785608,0.00163773611898964,0.0016967787834815,0.00415922240390994,0,0.0115056343503632,0,0,0,0,0.0383085677705769,0,0.0129303643892581,0.00841610623478111,0.00750918829914344,0.0100835437863768,0.00417464265927406,0.00414670180781662,0.08399643596187,0.0294457240121131,0,0.0168279143679243,0.0118842750053832,0.0271108272108017,0.018337947883559,0.00383506514619379,0.00673018884434486,0.00366857201694062,0.0466339849173185,0.00139572795829799,0.000982790619933932,0.00293344790952753,0,0,0.00305785953344081,0,0,0,0,0.0143787176515509,0,0.00392705511859761,0.00745645221609359,0.0105765817151743,0.000601877357118082,0.0117089991491381,0.000223959141794321,0.0162965101965557,0.0433501258069067,0.0168279143679243,0,0.0088745352014167,0.0289655818393987,0.0167001496660237,0.00503755663242696,0.00694243508103071,0.0011116532204211,0.0591224977071608,0.0115585496694893,0.000353863470303088,0.00076815774196397,0.000411893930438181,0,0.0116758537988399,0.00431677311656683,0,0,0,0.0215605382378359,0,0.00470363107342849,0.00915116098900969,0.00358950580682936,0.00190356436325524,0.00809622417252334,0.0103331114723106,0.0200874046628238,0.0222217693030455,0.0118842750053832,0.0088745352014167,0,0.0325837653340463,0.0254064181957125,0.0106183868786443,0.0214307409924143,0.00452557730730302,0.0698925644169922,0,0,0.00110754850087255,0.0059387867914857,0,0.000590685254912491,0,0,0,0,0.00800188840926637,0,0.00807160160248371,0.0161713918217335,0.0182042971183108,0.0069958757261915,0.0126814856682681,0.00724847631752466,0.0526809593598904,0.0689019428220739,0.0271108272108017,0.0289655818393987,0.0325837653340463,0,0.087993115124211,0.014840914042358,0.0757405233860745,0.0184828519039751,0.0740081464872808,0.0109546689440226,0.0029037685144904,0.0097456603196083,0.0106950265164466,0,0.0136772098440261,0.0052944734084333,0,0,0,0.0532146365532304,0,0.0338206273789281,0.0127182942388623,0.00324555150979055,0.00526561484467535,0.00513528294796573,0.0081073714377548,0.0991709605629716,0.0325864103878595,0.018337947883559,0.0167001496660237,0.0254064181957125,0.087993115124211,0,0.00420312735164954,0.0218650275927043,0.00543556213641944,0.0286940002926094,0.00162255340206219,0.000796293407460614,0.0152189605562937,0.00140240737901115,0,0.00583198412961861,0.000332338134424545,0,0,0,0.00864877636554844,0,0.0108075468631893,0.00733954382498813,0.000828776060958619,0,0.0054613172539932,0.00612354794869377,0.00445520586874179,0.0133625954656555,0.00383506514619379,0.00503755663242696,0.0106183868786443,0.014840914042358,0.00420312735164954,0,0.00816686044662259,0.00268192109521428,0.0136448890961324,0,0,0,0,0,0,0.0145119234867097,0,0,0,0.00351753667643504,0,0.00118350809726861,0.00510186215637128,0.00510831769449959,0.00173412206078099,0.00581535413984163,0.00323286027279988,0.0181954711431042,0.0232342914961071,0.00673018884434486,0.00694243508103071,0.0214307409924143,0.0757405233860745,0.0218650275927043,0.00816686044662259,0,0.0180880069719006,0.030192431937561,0.00170021926353316,0.0035915871730527,0.0103953670438899,0.00847263303009105,0,0.0164244775340738,0.00383070577018204,0,0,0,0.00748535534607467,0,0.0211657834794219,0.00125125118758389,0.00103848438759091,0,0.00224872160913446,0.000782960442678707,0.004565185116383,0.00402788437854488,0.00366857201694062,0.0011116532204211,0.00452557730730302,0.0184828519039751,0.00543556213641944,0.00268192109521428,0.0180880069719006,0,0.00890435558425234,0.001317673455598,0.00293812570319453,0.00100705387745603,0.00172797583449605,0,0.00214835513152548,0.00148440245850577,0,0,0,0.00188320769575039,0,0.00847414632249922,0.0197010580604511,0.0166002660898445,0.00477431184955667,0.0149605697060244,0.00460749863790614,0.0625738968097483,0.108139543703767,0.0466339849173185,0.0591224977071608,0.0698925644169922,0.0740081464872808,0.0286940002926094,0.0136448890961324,0.030192431937561,0.00890435558425234,0,0.0037854817815895,0.00200630278751669,0.00205318014648616,0.00332282473774885,0,0.0362599589149451,0.000825381223052437,0,0,0,0.0629683165868872,0,0.0157204586869218,0.0048080569765917,0,0,0.00227393056614436,0,0.00069367133337333,0.00745332636785608,0.00139572795829799,0.0115585496694893,0,0.0109546689440226,0.00162255340206219,0,0.00170021926353316,0.001317673455598,0.0037854817815895,0,0,0,0,0,0,0,0,0,0,0.00446954897872978,0,0.00290738830280826,0,0,0,0.000426978646758249,0,0.00238794400608451,0.00163773611898964,0.000982790619933932,0.000353863470303088,0,0.0029037685144904,0.000796293407460614,0,0.0035915871730527,0.00293812570319453,0.00200630278751669,0,0,0,0,0,0.00242203357602801,0,0,0,0,0,0,0.00204721409750219,0,0,0,0,0,0.00318089919616704,0.0016967787834815,0.00293344790952753,0.00076815774196397,0.00110754850087255,0.0097456603196083,0.0152189605562937,0,0.0103953670438899,0.00100705387745603,0.00205318014648616,0,0,0,0,0,0,0,0,0,0,0.00130130560919781,0,0.0066660597536692,0,0.000533984764913786,0,0,0,0.00257739680406785,0.00415922240390994,0,0.000411893930438181,0.0059387867914857,0.0106950265164466,0.00140240737901115,0,0.00847263303009105,0.00172797583449605,0.00332282473774885,0,0,0,0,0,0.0067661412697656,0,0,0,0,0.000893149689932518,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0.00146983692656043,0.00132778351057615,0,0.000556117721703494,0,0.00391127970262102,0.0115056343503632,0.00305785953344081,0.0116758537988399,0.000590685254912491,0.0136772098440261,0.00583198412961861,0,0.0164244775340738,0.00214835513152548,0.0362599589149451,0,0.00242203357602801,0,0.0067661412697656,0,0,0,0,0,0,0.00284548653446618,0,0.00296265671567724,0,0,0,0.00153699451492292,0,0.000607789433545311,0,0,0.00431677311656683,0,0.0052944734084333,0.000332338134424545,0.0145119234867097,0.00383070577018204,0.00148440245850577,0.000825381223052437,0,0,0,0,0,0,0,0,0,0,0.000671345844627587,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0.00528711614835667,0.0122224248155727,0.00161823020797657,0.00275895883952282,0.000657627208553685,0.0166926609405317,0.0383085677705769,0.0143787176515509,0.0215605382378359,0.00800188840926637,0.0532146365532304,0.00864877636554844,0.00351753667643504,0.00748535534607467,0.00188320769575039,0.0629683165868872,0.00446954897872978,0,0.00130130560919781,0.000893149689932518,0,0.00284548653446618,0.000671345844627587,0,0,0,0,0,0.00614971765927358,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0.00524557795202013,0.00215108199463338,0.00134789197843045,0.00245473823674855,0.00431891912016351,0.00652347286308901,0.0129303643892581,0.00392705511859761,0.00470363107342849,0.00807160160248371,0.0338206273789281,0.0108075468631893,0.00118350809726861,0.0211657834794219,0.00847414632249922,0.0157204586869218,0.00290738830280826,0.00204721409750219,0.0066660597536692,0,0,0.00296265671567724,0,0,0,0,0.00614971765927358,0,0), nrow = 29, ncol = 29, byrow = TRUE)
plot.igraph(g, main = 'He was and independent and proud', sub = 'bright',
vertex.shape = 'rectangle', vertex.size = 45, vertex.color = 'yellow', edge.width = 0.2, edge.arrow.size = 0.2,
edge.color = 'black', vertex.label = c('brilliant','sunny','vivid','smart','dazzling','light','clear','intense','intelligent','quick','lively','dark','clever','happy','cheerful','positive','unintelligent','perky','gloomy','upbeat','sharp-witted','optimistic','brainy','He','was','and','independent','and','proud'), edge.curved = TRUE)
cs <- colSums(mmatrix)
rs <- rowSums(mmatrix)
sink(file = "4.out")
print(cs)
print(rs)
sink()
|
/sample_DegreeUndirected.R
|
no_license
|
ambidextrousTx/Explorations-in-R
|
R
| false | false | 10,356 |
r
|
library(igraph)
library(Matrix)
mmatrix <- matrix(c(0,0.000386665749550218,0.0089162378817095,0.00125239665365081,0.00571388670360401,0.0271825279137305,0.0151792221956226,0.00841610623478111,0.00745645221609359,0.00915116098900969,0.0161713918217335,0.0127182942388623,0.00733954382498813,0.00510186215637128,0.00125125118758389,0.0197010580604511,0.0048080569765917,0,0,0,0,0.00146983692656043,0,0,0,0,0.00528711614835667,0,0.00524557795202013,0.000386665749550218,0,0.0037755571155261,0.00112648136196275,0.000483906012542723,0.00457935406511062,0.0189869295828335,0.00750918829914344,0.0105765817151743,0.00358950580682936,0.0182042971183108,0.00324555150979055,0.000828776060958619,0.00510831769449959,0.00103848438759091,0.0166002660898445,0,0,0,0.000533984764913786,0,0.00132778351057615,0,0,0,0,0.0122224248155727,0,0.00215108199463338,0.0089162378817095,0.0037755571155261,0,0.000702810094006201,0.00232469361739829,0.00616981003776192,0.00426414853423318,0.0100835437863768,0.000601877357118082,0.00190356436325524,0.0069958757261915,0.00526561484467535,0,0.00173412206078099,0,0.00477431184955667,0,0,0,0,0,0,0,0,0,0,0.00161823020797657,0,0.00134789197843045,0.00125239665365081,0.00112648136196275,0.000702810094006201,0,0,0.00906894626358056,0.00831839277575157,0.00417464265927406,0.0117089991491381,0.00809622417252334,0.0126814856682681,0.00513528294796573,0.0054613172539932,0.00581535413984163,0.00224872160913446,0.0149605697060244,0.00227393056614436,0.000426978646758249,0,0,0,0.000556117721703494,0.00153699451492292,0,0,0,0.00275895883952282,0,0.00245473823674855,0.00571388670360401,0.000483906012542723,0.00232469361739829,0,0,0.0136477018270643,0.00113074727651417,0.00414670180781662,0.000223959141794321,0.0103331114723106,0.00724847631752466,0.0081073714377548,0.00612354794869377,0.00323286027279988,0.000782960442678707,0.00460749863790614,0,0,0,0,0,0,0,0,0,0,0.000657627208553685,0,0.00431891912016351,0.0271825279137305,0.00457935406511062,0.00616981003776192,0.00906894626358056,0.0136477018270643,0,0.0481286297378402,0.08399643596187,0.0162965101965557,0.0200874046628238,0.0526809593598904,0.0991709605629716,0.00445520586874179,0.0181954711431042,0.004565185116383,0.0625738968097483,0.00069367133337333,0.00238794400608451,0.00318089919616704,0.00257739680406785,0,0.00391127970262102,0.000607789433545311,0,0,0,0.0166926609405317,0,0.00652347286308901,0.0151792221956226,0.0189869295828335,0.00426414853423318,0.00831839277575157,0.00113074727651417,0.0481286297378402,0,0.0294457240121131,0.0433501258069067,0.0222217693030455,0.0689019428220739,0.0325864103878595,0.0133625954656555,0.0232342914961071,0.00402788437854488,0.108139543703767,0.00745332636785608,0.00163773611898964,0.0016967787834815,0.00415922240390994,0,0.0115056343503632,0,0,0,0,0.0383085677705769,0,0.0129303643892581,0.00841610623478111,0.00750918829914344,0.0100835437863768,0.00417464265927406,0.00414670180781662,0.08399643596187,0.0294457240121131,0,0.0168279143679243,0.0118842750053832,0.0271108272108017,0.018337947883559,0.00383506514619379,0.00673018884434486,0.00366857201694062,0.0466339849173185,0.00139572795829799,0.000982790619933932,0.00293344790952753,0,0,0.00305785953344081,0,0,0,0,0.0143787176515509,0,0.00392705511859761,0.00745645221609359,0.0105765817151743,0.000601877357118082,0.0117089991491381,0.000223959141794321,0.0162965101965557,0.0433501258069067,0.0168279143679243,0,0.0088745352014167,0.0289655818393987,0.0167001496660237,0.00503755663242696,0.00694243508103071,0.0011116532204211,0.0591224977071608,0.0115585496694893,0.000353863470303088,0.00076815774196397,0.000411893930438181,0,0.0116758537988399,0.00431677311656683,0,0,0,0.0215605382378359,0,0.00470363107342849,0.00915116098900969,0.00358950580682936,0.00190356436325524,0.00809622417252334,0.0103331114723106,0.0200874046628238,0.0222217693030455,0.0118842750053832,0.0088745352014167,0,0.0325837653340463,0.0254064181957125,0.0106183868786443,0.0214307409924143,0.00452557730730302,0.0698925644169922,0,0,0.00110754850087255,0.0059387867914857,0,0.000590685254912491,0,0,0,0,0.00800188840926637,0,0.00807160160248371,0.0161713918217335,0.0182042971183108,0.0069958757261915,0.0126814856682681,0.00724847631752466,0.0526809593598904,0.0689019428220739,0.0271108272108017,0.0289655818393987,0.0325837653340463,0,0.087993115124211,0.014840914042358,0.0757405233860745,0.0184828519039751,0.0740081464872808,0.0109546689440226,0.0029037685144904,0.0097456603196083,0.0106950265164466,0,0.0136772098440261,0.0052944734084333,0,0,0,0.0532146365532304,0,0.0338206273789281,0.0127182942388623,0.00324555150979055,0.00526561484467535,0.00513528294796573,0.0081073714377548,0.0991709605629716,0.0325864103878595,0.018337947883559,0.0167001496660237,0.0254064181957125,0.087993115124211,0,0.00420312735164954,0.0218650275927043,0.00543556213641944,0.0286940002926094,0.00162255340206219,0.000796293407460614,0.0152189605562937,0.00140240737901115,0,0.00583198412961861,0.000332338134424545,0,0,0,0.00864877636554844,0,0.0108075468631893,0.00733954382498813,0.000828776060958619,0,0.0054613172539932,0.00612354794869377,0.00445520586874179,0.0133625954656555,0.00383506514619379,0.00503755663242696,0.0106183868786443,0.014840914042358,0.00420312735164954,0,0.00816686044662259,0.00268192109521428,0.0136448890961324,0,0,0,0,0,0,0.0145119234867097,0,0,0,0.00351753667643504,0,0.00118350809726861,0.00510186215637128,0.00510831769449959,0.00173412206078099,0.00581535413984163,0.00323286027279988,0.0181954711431042,0.0232342914961071,0.00673018884434486,0.00694243508103071,0.0214307409924143,0.0757405233860745,0.0218650275927043,0.00816686044662259,0,0.0180880069719006,0.030192431937561,0.00170021926353316,0.0035915871730527,0.0103953670438899,0.00847263303009105,0,0.0164244775340738,0.00383070577018204,0,0,0,0.00748535534607467,0,0.0211657834794219,0.00125125118758389,0.00103848438759091,0,0.00224872160913446,0.000782960442678707,0.004565185116383,0.00402788437854488,0.00366857201694062,0.0011116532204211,0.00452557730730302,0.0184828519039751,0.00543556213641944,0.00268192109521428,0.0180880069719006,0,0.00890435558425234,0.001317673455598,0.00293812570319453,0.00100705387745603,0.00172797583449605,0,0.00214835513152548,0.00148440245850577,0,0,0,0.00188320769575039,0,0.00847414632249922,0.0197010580604511,0.0166002660898445,0.00477431184955667,0.0149605697060244,0.00460749863790614,0.0625738968097483,0.108139543703767,0.0466339849173185,0.0591224977071608,0.0698925644169922,0.0740081464872808,0.0286940002926094,0.0136448890961324,0.030192431937561,0.00890435558425234,0,0.0037854817815895,0.00200630278751669,0.00205318014648616,0.00332282473774885,0,0.0362599589149451,0.000825381223052437,0,0,0,0.0629683165868872,0,0.0157204586869218,0.0048080569765917,0,0,0.00227393056614436,0,0.00069367133337333,0.00745332636785608,0.00139572795829799,0.0115585496694893,0,0.0109546689440226,0.00162255340206219,0,0.00170021926353316,0.001317673455598,0.0037854817815895,0,0,0,0,0,0,0,0,0,0,0.00446954897872978,0,0.00290738830280826,0,0,0,0.000426978646758249,0,0.00238794400608451,0.00163773611898964,0.000982790619933932,0.000353863470303088,0,0.0029037685144904,0.000796293407460614,0,0.0035915871730527,0.00293812570319453,0.00200630278751669,0,0,0,0,0,0.00242203357602801,0,0,0,0,0,0,0.00204721409750219,0,0,0,0,0,0.00318089919616704,0.0016967787834815,0.00293344790952753,0.00076815774196397,0.00110754850087255,0.0097456603196083,0.0152189605562937,0,0.0103953670438899,0.00100705387745603,0.00205318014648616,0,0,0,0,0,0,0,0,0,0,0.00130130560919781,0,0.0066660597536692,0,0.000533984764913786,0,0,0,0.00257739680406785,0.00415922240390994,0,0.000411893930438181,0.0059387867914857,0.0106950265164466,0.00140240737901115,0,0.00847263303009105,0.00172797583449605,0.00332282473774885,0,0,0,0,0,0.0067661412697656,0,0,0,0,0.000893149689932518,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0.00146983692656043,0.00132778351057615,0,0.000556117721703494,0,0.00391127970262102,0.0115056343503632,0.00305785953344081,0.0116758537988399,0.000590685254912491,0.0136772098440261,0.00583198412961861,0,0.0164244775340738,0.00214835513152548,0.0362599589149451,0,0.00242203357602801,0,0.0067661412697656,0,0,0,0,0,0,0.00284548653446618,0,0.00296265671567724,0,0,0,0.00153699451492292,0,0.000607789433545311,0,0,0.00431677311656683,0,0.0052944734084333,0.000332338134424545,0.0145119234867097,0.00383070577018204,0.00148440245850577,0.000825381223052437,0,0,0,0,0,0,0,0,0,0,0.000671345844627587,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0.00528711614835667,0.0122224248155727,0.00161823020797657,0.00275895883952282,0.000657627208553685,0.0166926609405317,0.0383085677705769,0.0143787176515509,0.0215605382378359,0.00800188840926637,0.0532146365532304,0.00864877636554844,0.00351753667643504,0.00748535534607467,0.00188320769575039,0.0629683165868872,0.00446954897872978,0,0.00130130560919781,0.000893149689932518,0,0.00284548653446618,0.000671345844627587,0,0,0,0,0,0.00614971765927358,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0.00524557795202013,0.00215108199463338,0.00134789197843045,0.00245473823674855,0.00431891912016351,0.00652347286308901,0.0129303643892581,0.00392705511859761,0.00470363107342849,0.00807160160248371,0.0338206273789281,0.0108075468631893,0.00118350809726861,0.0211657834794219,0.00847414632249922,0.0157204586869218,0.00290738830280826,0.00204721409750219,0.0066660597536692,0,0,0.00296265671567724,0,0,0,0,0.00614971765927358,0,0), nrow = 29, ncol = 29, byrow = TRUE)
plot.igraph(g, main = 'He was and independent and proud', sub = 'bright',
vertex.shape = 'rectangle', vertex.size = 45, vertex.color = 'yellow', edge.width = 0.2, edge.arrow.size = 0.2,
edge.color = 'black', vertex.label = c('brilliant','sunny','vivid','smart','dazzling','light','clear','intense','intelligent','quick','lively','dark','clever','happy','cheerful','positive','unintelligent','perky','gloomy','upbeat','sharp-witted','optimistic','brainy','He','was','and','independent','and','proud'), edge.curved = TRUE)
cs <- colSums(mmatrix)
rs <- rowSums(mmatrix)
sink(file = "4.out")
print(cs)
print(rs)
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_traffic.R
\name{get_traffic}
\alias{get_traffic}
\title{Fetch Wikipedia 'Traffic' table}
\format{
Data frame with columns:
\itemize{
\item{pageid: Wikipedia page ID identifying a legislator's Wikipedia biography (of class \sQuote{integer}).}
\item{date: Date for which user traffic is recorded, from 2015-07-01 to 2018-12-31 UTC (of class \sQuote{POSIXct}).}
\item{traffic: Daily non-unique user visits (of class \sQuote{numeric}).}
}
}
\source{
Wikimedia API, \url{https://wikimedia.org/api/rest_v1/} \cr
\url{http://petermeissner.de:8880/}
}
\usage{
get_traffic(legislature)
}
\arguments{
\item{legislature}{A character string specifying the three-letter country code of the legislature for which data shall be fetched. Currently one of \sQuote{aut}, \sQuote{can}, \sQuote{cze}, \sQuote{esp}, \sQuote{fra}, \sQuote{deu}, \sQuote{irl}, \sQuote{sco}, \sQuote{gbr}, \sQuote{usa_house}, or \sQuote{usa_senate}.}
}
\value{
A data frame with columns as specified above.
}
\description{
Fetches daily user traffic on legislators' Wikipedia biographies for the specified legislature. Requires a working Internet connection.
}
\examples{
\donttest{# Get entire 'Traffic' table for the Scottish Parliament
sco_traffic <- get_traffic(legislature = "sco")
tibble::glimpse(sco_traffic)
# Add Wikidataid to 'Traffic' table for the Scottish Parliament
sco_traffic_subset <- dplyr::inner_join(x = dplyr::select(get_core(legislature = "sco"),
pageid, wikidataid),
y = sco_traffic,
by = "pageid")
tibble::glimpse(sco_traffic_subset)
}
}
|
/man/get_traffic.Rd
|
no_license
|
cran/legislatoR
|
R
| false | true | 1,784 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_traffic.R
\name{get_traffic}
\alias{get_traffic}
\title{Fetch Wikipedia 'Traffic' table}
\format{
Data frame with columns:
\itemize{
\item{pageid: Wikipedia page ID identifying a legislator's Wikipedia biography (of class \sQuote{integer}).}
\item{date: Date for which user traffic is recorded, from 2015-07-01 to 2018-12-31 UTC (of class \sQuote{POSIXct}).}
\item{traffic: Daily non-unique user visits (of class \sQuote{numeric}).}
}
}
\source{
Wikimedia API, \url{https://wikimedia.org/api/rest_v1/} \cr
\url{http://petermeissner.de:8880/}
}
\usage{
get_traffic(legislature)
}
\arguments{
\item{legislature}{A character string specifying the three-letter country code of the legislature for which data shall be fetched. Currently one of \sQuote{aut}, \sQuote{can}, \sQuote{cze}, \sQuote{esp}, \sQuote{fra}, \sQuote{deu}, \sQuote{irl}, \sQuote{sco}, \sQuote{gbr}, \sQuote{usa_house}, or \sQuote{usa_senate}.}
}
\value{
A data frame with columns as specified above.
}
\description{
Fetches daily user traffic on legislators' Wikipedia biographies for the specified legislature. Requires a working Internet connection.
}
\examples{
\donttest{# Get entire 'Traffic' table for the Scottish Parliament
sco_traffic <- get_traffic(legislature = "sco")
tibble::glimpse(sco_traffic)
# Add Wikidataid to 'Traffic' table for the Scottish Parliament
sco_traffic_subset <- dplyr::inner_join(x = dplyr::select(get_core(legislature = "sco"),
pageid, wikidataid),
y = sco_traffic,
by = "pageid")
tibble::glimpse(sco_traffic_subset)
}
}
|
#' Compute model quality for a given dataset
#'
#' \code{rmse} is the root-mean-squared-error, \code{mae} is the mean
#' absolute error, \code{qae} is quantiles of absolute error. These can all
#' be interpreted on the scale of the response; \code{mae} is less sensitive
#' to outliers. \code{mse} is the mean-squared-error and can be interpreted on
#' the squared scale of the response. \code{rsquare} is the variance of the
#' predictions divided by the variance of the response.
#'
#' @param model A model
#' @param data The dataset
#' @name model-quality
#' @examples
#' mod <- lm(mpg ~ wt, data = mtcars)
#' mse(mod, mtcars)
#' rmse(mod, mtcars)
#' rsquare(mod, mtcars)
#' mae(mod, mtcars)
#' qae(mod, mtcars)
NULL
#' @export
#' @rdname model-quality
mse <- function (model, data){
x <- residuals(model, data)
mean(x ^ 2, na.rm = TRUE)
}
#' @export
#' @rdname model-quality
rmse <- function(model, data) {
x <- residuals(model, data)
sqrt(mean(x ^ 2, na.rm = TRUE))
}
#' @export
#' @rdname model-quality
mae <- function(model, data) {
x <- residuals(model, data)
mean(abs(x), na.rm = TRUE)
}
#' @export
#' @rdname model-quality
rsquare <- function(model, data) {
stats::var(stats::predict(model, data), na.rm = TRUE) / stats::var(response(model, data), na.rm = TRUE)
}
#' @export
#' @rdname model-quality
#' @param probs Numeric vector of probabilities
qae <- function(model, data, probs = c(0.05, 0.25, 0.5, 0.75, 0.95)) {
x <- residuals(model, data)
stats::quantile(abs(x), probs, na.rm = TRUE)
}
|
/R/quality.R
|
no_license
|
jrnold/modelr
|
R
| false | false | 1,527 |
r
|
#' Compute model quality for a given dataset
#'
#' \code{rmse} is the root-mean-squared-error, \code{mae} is the mean
#' absolute error, \code{qae} is quantiles of absolute error. These can all
#' be interpreted on the scale of the response; \code{mae} is less sensitive
#' to outliers. \code{mse} is the mean-squared-error and can be interpreted on
#' the squared scale of the response. \code{rsquare} is the variance of the
#' predictions divided by the variance of the response.
#'
#' @param model A model
#' @param data The dataset
#' @name model-quality
#' @examples
#' mod <- lm(mpg ~ wt, data = mtcars)
#' mse(mod, mtcars)
#' rmse(mod, mtcars)
#' rsquare(mod, mtcars)
#' mae(mod, mtcars)
#' qae(mod, mtcars)
NULL
#' @export
#' @rdname model-quality
mse <- function (model, data){
x <- residuals(model, data)
mean(x ^ 2, na.rm = TRUE)
}
#' @export
#' @rdname model-quality
rmse <- function(model, data) {
x <- residuals(model, data)
sqrt(mean(x ^ 2, na.rm = TRUE))
}
#' @export
#' @rdname model-quality
mae <- function(model, data) {
x <- residuals(model, data)
mean(abs(x), na.rm = TRUE)
}
#' @export
#' @rdname model-quality
rsquare <- function(model, data) {
stats::var(stats::predict(model, data), na.rm = TRUE) / stats::var(response(model, data), na.rm = TRUE)
}
#' @export
#' @rdname model-quality
#' @param probs Numeric vector of probabilities
qae <- function(model, data, probs = c(0.05, 0.25, 0.5, 0.75, 0.95)) {
x <- residuals(model, data)
stats::quantile(abs(x), probs, na.rm = TRUE)
}
|
#!/usr/bin/env Rscript
library(ggplot2)
library(grid)
csv_fileI='results/clustering_evaluation/homogeneity_results_merge/merge_Viruses_evalue1e-50_I_all_coverage20_homogeneityonly_poly.csv'
csv_fileC='results/clustering_evaluation/homogeneity_results_merge/merge_Viruses_evalue1e-50_I2_coverage_all_homogeneityonly_poly.csv'
csv_fileE="results/clustering_evaluation/homogeneity_results_merge/merge_Viruses_evalue_all_I2_coverage20_homogeneityonly_poly.csv"
evalue = 1e-50
inflation = 2
coverage = 20
output_dir = 'results/clustering_evaluation/homogeneity_results_merge/'
#EVALUE variation
capation_text = '\nClustering parameters:\n'
data = read.csv(file = csv_fileE, sep = '\t', header = TRUE, stringsAsFactors = FALSE)
data = data[data$unclassified_cluster == "False",]
data = data[data$inflation == inflation,]
output_dir = paste(output_dir, '_I', inflation, sep="")
capation_text = paste(capation_text, "Inflation=", inflation, sep='')
data = data[data$coverage == coverage,]
output_dir = paste(output_dir, '_coverage_all', sep="")
capation_text = paste(capation_text, " Coverage=", coverage, '%', sep='')
#Filter some of Evalue to display less data
data = data[data$evalue != 1e-100,]
data = data[data$evalue != 1e-120,]
data = data[data$evalue != 1e-70,]
data = data[data$evalue != 1e-40,]
data = data[data$evalue != 1e-20,]
data = data[data$evalue != 1e-60,]
data = data[data$evalue != 1e-10,]
data$evalue_string = paste('Evalue=',data$evalue)
data = data[data$nb_polyprotein != 0,] # take only polyprotein
p = ggplot(data, aes(x=homogeneity)) +
geom_density(aes(colour=reorder(evalue, evalue), group=reorder(evalue, evalue), fill=reorder(evalue, evalue)), alpha=0.1) +
theme_minimal()+
theme(axis.text.x = element_text(size=20),
axis.text.y = element_text(size=20), axis.title=element_text(size=22,face="bold"),
legend.text = element_text( size=20), legend.title =element_text(size=22,face="bold"),
plot.caption = element_text(size=22, hjust=0))+
labs(caption = capation_text) +
scale_fill_discrete(name="Evalue") +
scale_color_discrete(name="Evalue")
p
output_file = paste(output_dir, '_polyprotein_only.png', sep ='')
png(filename=output_file, width = 1500, height = 750)
p
dev.off()
#COVERAGE variation
capation_text = '\nClustering parameters:\n'
data = read.csv(file = csv_fileC, sep = '\t', header = TRUE, stringsAsFactors = FALSE)
data = data[data$unclassified_cluster == "False",]
data = data[data$inflation == inflation,]
output_dir = paste(output_dir, '_I', inflation, sep="")
capation_text = paste(capation_text, "Inflation=", inflation, sep='')
data = data[data$evalue == evalue,]
output_dir = paste(output_dir, '_evalue', evalue, sep="")
capation_text = paste(capation_text, " Evalue =", evalue, sep='')
#Filter some of to display less data
data = data[data$coverage != 10,]
data$coverage = paste(data$coverage, '%')
data = data[data$nb_polyprotein != 0,] # take only polyprotein
p = ggplot(data, aes(x=homogeneity)) +
geom_density(aes(colour=coverage, group=coverage, fill=coverage), alpha=0.1) +
theme_minimal()+
theme(axis.text.x = element_text(size=20),
axis.text.y = element_text(size=20), axis.title=element_text(size=22,face="bold"),
legend.text = element_text( size=20), legend.title =element_text(size=22,face="bold"),
plot.caption = element_text(hjust=0, size=22))+
labs(caption = capation_text) +
scale_fill_discrete(name="Coverage\nthreshold") +
scale_color_discrete(name="Coverage\nthreshold")
p
output_file = paste(output_dir, '_polyprotein_only.png', sep ='')
png(filename=output_file, width = 1500, height = 750)
p
dev.off()
#INFLATION variation
capation_text = '\nClustering parameters:\n'
data = read.csv(file = csv_fileI, sep = '\t', header = TRUE, stringsAsFactors = FALSE)
data = data[data$unclassified_cluster == "False",]
data = data[data$coverage == coverage,]
output_dir = paste(output_dir, '_coverage_all', sep="")
capation_text = paste(capation_text, " Coverage=", coverage, '%', sep='')
data = data[data$evalue == evalue,]
output_dir = paste(output_dir, '_evalue', evalue, sep="")
capation_text = paste(capation_text, " Evalue =", evalue, sep='')
data$inflation = gsub("_", ".", data$inflation)
#Filter some of to display less data
data = data[data$inflation != '1.8',]
data$inflation = paste('I=',data$inflation)
data = data[data$nb_polyprotein != 0,] # take only polyprotein
p = ggplot(data, aes(x=homogeneity)) +
geom_density(aes(colour=inflation, group=inflation, fill=inflation), alpha=0.1) +
theme_minimal()+
theme(axis.text.x = element_text(size=20),
axis.text.y = element_text(size=20), axis.title=element_text(size=22,face="bold"),
legend.text = element_text( size=20), legend.title =element_text(size=22,face="bold"),
plot.caption = element_text(hjust=0, size=22))+
labs(caption = capation_text) +
scale_fill_discrete(name="Inflation") +
scale_color_discrete(name="Inflation")
p
output_file = paste(output_dir, '_polyprotein_only.png', sep ='')
print(output_file)
png(filename=output_file, width = 1500, height = 750)
p
dev.off()
|
/scripts/R_scripts/density_plot_only_poly_rstudio.r
|
no_license
|
rnaimehaom/viral_polyprotein_annotation
|
R
| false | false | 5,137 |
r
|
#!/usr/bin/env Rscript
library(ggplot2)
library(grid)
csv_fileI='results/clustering_evaluation/homogeneity_results_merge/merge_Viruses_evalue1e-50_I_all_coverage20_homogeneityonly_poly.csv'
csv_fileC='results/clustering_evaluation/homogeneity_results_merge/merge_Viruses_evalue1e-50_I2_coverage_all_homogeneityonly_poly.csv'
csv_fileE="results/clustering_evaluation/homogeneity_results_merge/merge_Viruses_evalue_all_I2_coverage20_homogeneityonly_poly.csv"
evalue = 1e-50
inflation = 2
coverage = 20
output_dir = 'results/clustering_evaluation/homogeneity_results_merge/'
#EVALUE variation
capation_text = '\nClustering parameters:\n'
data = read.csv(file = csv_fileE, sep = '\t', header = TRUE, stringsAsFactors = FALSE)
data = data[data$unclassified_cluster == "False",]
data = data[data$inflation == inflation,]
output_dir = paste(output_dir, '_I', inflation, sep="")
capation_text = paste(capation_text, "Inflation=", inflation, sep='')
data = data[data$coverage == coverage,]
output_dir = paste(output_dir, '_coverage_all', sep="")
capation_text = paste(capation_text, " Coverage=", coverage, '%', sep='')
#Filter some of Evalue to display less data
data = data[data$evalue != 1e-100,]
data = data[data$evalue != 1e-120,]
data = data[data$evalue != 1e-70,]
data = data[data$evalue != 1e-40,]
data = data[data$evalue != 1e-20,]
data = data[data$evalue != 1e-60,]
data = data[data$evalue != 1e-10,]
data$evalue_string = paste('Evalue=',data$evalue)
data = data[data$nb_polyprotein != 0,] # take only polyprotein
p = ggplot(data, aes(x=homogeneity)) +
geom_density(aes(colour=reorder(evalue, evalue), group=reorder(evalue, evalue), fill=reorder(evalue, evalue)), alpha=0.1) +
theme_minimal()+
theme(axis.text.x = element_text(size=20),
axis.text.y = element_text(size=20), axis.title=element_text(size=22,face="bold"),
legend.text = element_text( size=20), legend.title =element_text(size=22,face="bold"),
plot.caption = element_text(size=22, hjust=0))+
labs(caption = capation_text) +
scale_fill_discrete(name="Evalue") +
scale_color_discrete(name="Evalue")
p
output_file = paste(output_dir, '_polyprotein_only.png', sep ='')
png(filename=output_file, width = 1500, height = 750)
p
dev.off()
#COVERAGE variation
capation_text = '\nClustering parameters:\n'
data = read.csv(file = csv_fileC, sep = '\t', header = TRUE, stringsAsFactors = FALSE)
data = data[data$unclassified_cluster == "False",]
data = data[data$inflation == inflation,]
output_dir = paste(output_dir, '_I', inflation, sep="")
capation_text = paste(capation_text, "Inflation=", inflation, sep='')
data = data[data$evalue == evalue,]
output_dir = paste(output_dir, '_evalue', evalue, sep="")
capation_text = paste(capation_text, " Evalue =", evalue, sep='')
#Filter some of to display less data
data = data[data$coverage != 10,]
data$coverage = paste(data$coverage, '%')
data = data[data$nb_polyprotein != 0,] # take only polyprotein
p = ggplot(data, aes(x=homogeneity)) +
geom_density(aes(colour=coverage, group=coverage, fill=coverage), alpha=0.1) +
theme_minimal()+
theme(axis.text.x = element_text(size=20),
axis.text.y = element_text(size=20), axis.title=element_text(size=22,face="bold"),
legend.text = element_text( size=20), legend.title =element_text(size=22,face="bold"),
plot.caption = element_text(hjust=0, size=22))+
labs(caption = capation_text) +
scale_fill_discrete(name="Coverage\nthreshold") +
scale_color_discrete(name="Coverage\nthreshold")
p
output_file = paste(output_dir, '_polyprotein_only.png', sep ='')
png(filename=output_file, width = 1500, height = 750)
p
dev.off()
#INFLATION variation
capation_text = '\nClustering parameters:\n'
data = read.csv(file = csv_fileI, sep = '\t', header = TRUE, stringsAsFactors = FALSE)
data = data[data$unclassified_cluster == "False",]
data = data[data$coverage == coverage,]
output_dir = paste(output_dir, '_coverage_all', sep="")
capation_text = paste(capation_text, " Coverage=", coverage, '%', sep='')
data = data[data$evalue == evalue,]
output_dir = paste(output_dir, '_evalue', evalue, sep="")
capation_text = paste(capation_text, " Evalue =", evalue, sep='')
data$inflation = gsub("_", ".", data$inflation)
#Filter some of to display less data
data = data[data$inflation != '1.8',]
data$inflation = paste('I=',data$inflation)
data = data[data$nb_polyprotein != 0,] # take only polyprotein
p = ggplot(data, aes(x=homogeneity)) +
geom_density(aes(colour=inflation, group=inflation, fill=inflation), alpha=0.1) +
theme_minimal()+
theme(axis.text.x = element_text(size=20),
axis.text.y = element_text(size=20), axis.title=element_text(size=22,face="bold"),
legend.text = element_text( size=20), legend.title =element_text(size=22,face="bold"),
plot.caption = element_text(hjust=0, size=22))+
labs(caption = capation_text) +
scale_fill_discrete(name="Inflation") +
scale_color_discrete(name="Inflation")
p
output_file = paste(output_dir, '_polyprotein_only.png', sep ='')
print(output_file)
png(filename=output_file, width = 1500, height = 750)
p
dev.off()
|
testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = numeric(0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161, -1.51345790188863e+21, 1.44942408802595e-285, -1.72131968218895e+83, -7.88781071482504e+93, 1.0823131123826e-105, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
/meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615853830-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 956 |
r
|
testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = numeric(0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161, -1.51345790188863e+21, 1.44942408802595e-285, -1.72131968218895e+83, -7.88781071482504e+93, 1.0823131123826e-105, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
#-----------------------------------------------------------------------------
# S1 Tab
tabItem(tabName = "s1_rtc2ts",
fluidRow(
# Include the line below in ui.R so you can send messages
tags$head(tags$script(HTML('Shiny.addCustomMessageHandler("jsCode",function(message) {eval(message.value);});'))),
# for busy indicator
useShinyjs(),
tags$style(appCSS),
#----------------------------------------------------------------------------------
# Processing Panel Sentinel-1
box(
# Title
title = "Processing Panel", status = "success", solidHeader= TRUE,
tabBox(width = 700,
tabPanel("Processor",
tags$h4("Sentinel-1 RTC to time-series & timescan processor"),
span(textOutput("RAMwarning_rtc2ts"), style='color:red'),
hr(),
tags$b("Short description:"),
p("This interface allows the fully-automated generation of time-series and timescan products. It expects the folder structure
created by the data download tab and that all data has been already processed by the GRD to RTC processor, using the time-series preparation option.
If more than one track is present within the data folder, the procedure will generate
the final product for each track separately. The output can be subsequently mosaicked by using the routine in the time-series mosaics submenu. "),
hr(),
tags$b("Processing directory:"),
p("The path should point to the", tags$b(" DATA directory "),
"created by routine of the data download submenu, within your project folder, i.e. \"/path/to/project/DATA\""),
shinyDirButton("s1_rtc2ts_inputdir","Browse","Browse",FALSE),
br(),
br(),
verbatimTextOutput("s1_rtc2ts_inputdir"),
hr(),
tags$b("Select the desired multi-temporal product:"),
p("The time-series processor creates a co-registered, multi-temporal speckle-filtered stack of of all available overlapping scenes per track and polarisation,
clipped to the same extent. Timescan data aggregates the time-series data by calculating basic
descriptive statistics for each polarization in the temporal domain."),
radioButtons("s1_rtc2ts_mode", "",
c("Time-series & Timescan" = "1",
"Time-series only" = "2",
"Timescan only" = "3"),
"1"),
hr(),
tags$b("Choose the output datatype for the timeseries/timescan products."),
p("Those values represent the amount of space reserved for one pixel per band."),
radioButtons("s1_rtc2ts_dtype", "",
c("16 bit unsigned integer (recommended)" = "1",
"8 bit unsigned integer" = "2",
"32 bit floating point" = "3"),
"1"),
hr(),
withBusyIndicatorUI(
actionButton("s1_rtc2ts_process", "Start processing")
),
#"Output:",
textOutput("processS1_RTC2TS")
),
# tab panel for cleaning up files
tabPanel("Clean up files",
# Title
tags$h4("Clean up intermediate products"),
tags$b("DATA directory:"),
br(),
br(),
shinyDirButton("S1_ts_cleanupdir","Select S1 DATA folder in your project directory","Select S1 DATA folder in your project directory",FALSE),
br(),
br(),
verbatimTextOutput("S1_ts_cleanupdir"),
hr(),
tags$b("Clean up raw files"),
p("This button deletes all the raw Sentinel-1 zip files downloaded in the first place by the",tags$i(" Data Download "), " tab."),
withBusyIndicatorUI(
actionButton("s1_ts_clean_raw", "Delete files")
),
#"Output:",
textOutput("cleanS1RAW"),
hr(),
tags$b("Clean up RTC/LS files"),
p("This button deletes all the single, radiometrically terrain corrected products
as well as the single layover/shadow maps generated during the GRD to RTC processing.",tags$i(" Data Download "), " tab."),
withBusyIndicatorUI(
actionButton("s1_ts_clean_rtc", "Delete files")
),
#"Output:",
textOutput("cleanS1RTC"),
br(),
hr(),
tags$b("Delete Time-series data"),
p("This button deletes the Time-series folder for each track within the project directory."),
withBusyIndicatorUI(
actionButton("s1_ts_clean_timeseries", "Delete files")
),
#"Output:",
textOutput("cleanS1TS"),
hr(),
tags$b("Delete Timescan data"),
p("This button deletes the Timescan products for each track within the project directory."),
withBusyIndicatorUI(
actionButton("s1_ts_clean_timescan", "Delete files")
),
#"Output:",
textOutput("cleanS1TScan"),
hr()
)
)
), #close box
# #----------------------------------------------------------------------------------
# # Info Panel
box(
title = "Info Panel", status = "success", solidHeader= TRUE,
tabBox(width = 700,
tabPanel("General Info",
tags$h4("Processing chain"),
hr(),
p("The ", tags$b("RTC to time-series/timescan processor"), " (Figure 1) applies the necessary preprocessing steps for preparing multi-temporal stacks of Sentinel-1 RTC products
generated by the ", tags$b("GRD to RTC processor."), " Those stacks are ready-to-use for time-series analysis, or the subsequent generation of timescan products."),
p("The user has the choice to select ", tags$b("different data types"), " of the resulting stack. While loosing radiometric resolution, the necessary disk space is reduced by a factor of 2
for 16 bit unsigned integer, and by a factor of 4 for 8 bit unsigned integer products. A good trade-off between disk space and radiometric accuracy is usually
the 16 bit option, since it still holds a sensitivity of about 0.00045 dB. The conversion to 8 bit channels, instead, results in a histogram binning of 0,11 dB."),
p("The ", tags$b("output"), " is written to a newly created folder named \"Timeseries\" within the track folder created by the data download routine. Inside this folder each scene can be found,
sorted by a starting number. The actual stack can be opened by using the correspondend \"Virtual Raster Table\" (VRT) file (i.e. timeseries.VV.vrt).
This is a simple xml text file that refers to the actual single layers of the stack. Band numbering of the stack refers to the numbers set as a prefix for each acquisition
and are sorted by the acquisition date."),
p("The routine will automatically detect if the ", tags$b("input imagery"), " has been acquired in ", tags$b("dual-polarised mode."), " Only if all imagery has been acquired in dual-polarised mode, the processing
routine will create the time-series stack for the VH polarisation (i.e. timeseries.VH.vrt)."),
p("For an easy", tags$b(" assessment of the quality,"), " thumbnail images and an animated gif file are created as well. This helps to identify images where the backscatter is
affected by heavy rainfall events or other disturbing factors. "),
img(src = "S1_RTC2TS/TimeseriesWorkflow.jpg", width = "100%", height = "100%"),
tags$b("Figure 1: Preprocessing chain of the RTC to time-series processor for Sentinel-1 RTC data prepared by the RTC to GRD processor.
Note that the generation of the VH-polarised time-series only applies if all imagery has been acquired in the dual-polarised mode."),
br(),
br(),
p("Using", tags$b(" multi-temporal SAR data"), " features some", tags$b("advantages"), " over single image use. First of all, the use of multiple imagery
over one area allows for more", tags$b(" effective reduction of Speckle noise."), "The combination of adaptive spatial filters and temporal statistics can
significantly improve the quality of all single scenes within the multi-temporal stack (Figure 2)."),
img(src = "MT_speckle.png", width = "100%", height = "100%"),
tags$b("Figure 1: Sentinel-1 VV-polarized image taken at 01.09.2016 over a partly forested area in Congo.
Left: Image filtered only in the spatail domain using the Refined Lee Speckle Filter.
Right: Image filtered in the spatial and temporal domain as part of a multi-temporal stack of 10 images."),
br(),
br(),
p("Another advantage of multi-temporal data is that ", tags$b("temporal dynamics"), " of the earth's surface are captured. Thus the succession of different
biogeophysical processes can potentially be tracked by various types of time-series analysis."),
p("The temporal variation of the signal can also give information on the ", tags$b("type of land cover."), " Feeding a classifier with a stack of single imagery
acquired at different dates may however result in long processing times, and the data storage demand is increased as well. This is especially true for Sentinel-1,
where the 6-24 days repeat cycle quickly leads to a massive accumulation of data. The ", tags$b("Timescan"), " approach overcomes this issue
by aggregating the temporal information for each polarisation channel separately. For every pixel of the time-series stack,
a basic set of descriptive statistics are calculated in time: "),
p(" - Average backscatter in time "),
p(" - Maximum backscatter in time "),
p(" - Minimum backscatter in time "),
p(" - Standard deviation in time "),
p(" - Coefficient of Variation (CoV) in time"),
p("For example, the", tags$b(" average backscatter"), " in time further reduces Speckle noise. The ", tags$b("minimum backscatter,"), " instead, may reduce
the influence of soil moisture, given that the dry period is covered within the time-series stack.
The ", tags$b("standard deviation"), " allows differentiating between land cover classes that have rather stable backscatter over time (e.g. urban, forests, water) from
surfaces with varying backscatter behaviour (e.g. agricultural fields, wetlands).
The ", tags$b("coefficient of variation,"), " which depicts the ratio of the standard deviation divided by the mean, represent the relative variation
of the backscatter. In this way, classes with a small standard deviation, but different levels of mean backscatter values (e.g. water and urban) become
distinguishable."),
p("By applying this approach to both polarisations, a total of 10 channels is available. Figure 3 depicts an exemplary colour composition. A set of
10 dual-polarised Sentinel-1 IW images, acquired
between the 15th of July 2016 and the 31st of October 2016, have been used. Similar to the time-series stack, all bands are actually stored
singularly, but an additional VRT-file is present to load the whole stack into your favorite GIS environment, or to use it for subsequent data fusion or
classification tasks."),
img(src = "MT_ts.png", width = "100%", height = "100%"),
tags$b("Figure 2: Left: High-resolution optical imagery taken from Google Earth over an area in Congo.
Right: Timescan RGB composite using a band combination of VH-maximum (red), VV-minimum (green) and VV-CoV (blue). ")
),
tabPanel("Detailed Description",
tags$h4("Description of the single processing steps"),
hr(),
tags$b("1.) Image stacking"),
p("The image stacking takes all available RTC scenes per track and collocates the spatially overlapping products.
Collocating the products implies that the pixel values of some products (the slave imageries) are resampled into the geographical
raster of a master image, which is usually refers to the earliest date. Resampling of the slave imagery is then achieved by bilinear interpolation."),
hr(),
tags$b("2.) Multi-temporal Lee-Sigma Speckle Filter"),
p("SAR images have inherent salt and pepper like texturing called speckles which degrade the quality of the image and make interpretation of features more difficult.
The speckles are caused by random constructive and destructive interference of the de-phased but coherent return waves scattered by the elementary scatters
within each resolution cell. Multi-temporal filtering is one of the commonly used speckle noise reduction techniques."),
p("The SNAP toolbox provides a modified approach, firstly proposed by Quegan et al 2000. The Lee-Sigma Filter is applied in the spatial domain and information
from the temporal domain are applied as stated in the original approach. The filter uses a window size of 7x7, a target window size of 3x3,
and a sigma of 0.9."),
hr(),
tags$b("3.) Linear power to decibal scale"),
p("The received power of the backscattering is usually measured in linear power. Depending on the surface, the measurement can vary across a wide range.
In order to derive a more balanced output, logarithmic scaling in decibel (dB) is applied."),
hr(),
tags$b("4.) Clip to minimum common extent"),
p("In order to assure that every pixel contains the same amount of temporal measurements, the time-series stack is clipped to the minimum common extent of the
whole set of imagery."),
hr(),
tags$b("5.) Scaling to integer value (optional)"),
p("In order to reduce disk space, values of each polarization are linearly stretched between -25 and 5 dB. This partially reduces the radiometric resolution."),
hr(),
tags$b("6.) Mask out Layover/Shadow areas"),
p("The layover/shadow masks are combined for all images, assuring that at no point in time an area affected by layover/shadow is present. Respective data points
exhibit 0 as no data value."),
hr(),
tags$b("7.) Time-series - output"),
p(""),
hr(),
tags$b("8.) Timescan calculation and output"),
p(""),
hr()
),
tabPanel("Demo",
br(),
tags$h4("Demo I: Create a time-series stack"),
br(),
p("Within this demo a time-series stack will be created. The accompanying slides contain a step-by-step guide and deepen the understanding of the user
handling and interpreting multi-temporal SAR data sets."),
#p("Here you can download a directory that contains Sentinel-1 imagery acquired over a small area in the south of Taman Nasional Sebangau area in South Kalimantan, Indonesia.
# This data will be placed in your home folder. In order to process, choose", tags$b("/home/username/S1_timeseries_demo/Demo_Jena/DATA"), "as the project directory.
# The resultant time-series data can be found in", tags$b("/home/username/S1_timeseries_demo/Demo_Jena/DATA/044A/Timeseries"), "and",
# tags$b("/home/username/S1_timeseries_demo/Demo_Jena/DATA/044A/MT_metrics",".")),
#withBusyIndicatorUI(
# actionButton("S1_ts_testdata_download", "Download")
#),
#br(),
#textOutput("download_Demo_Jena"),
hr(),
tags$h4("Demo II: Create a timescan composite"),
br(),
p("Within this demo a timescan composite will be created. The accompanying slides contain a step-by-step guide and deepen the understanding of the user
handling and interpreting timescan composites. A special focus is given for land cover applications."),
tags$b("In preparation"),
hr(),
tags$h4("Demo III: Multi-temporal data and Deforestation"),
br(),
p("Within this demo we will focus on the signatures of deforestation in time-series and timescan data. The accompanying slides contain a step-by-step guide and deepen the understanding of the user
handling and interpreting timescan composites. "),
#p("Further examples can be found here using ESA SAR data of the last 25 years. https://earth.esa.int/web/earth-watching/20-years-of-sar/-/asset_publisher/084v29WO4EeJ/content/global-deforestation-2012?redirect=https%3A%2F%2Fearth.esa.int%2Fweb%2Fearth-watching%2F20-years-of-sar%3Fp_p_id%3D101_INSTANCE_084v29WO4EeJ%26p_p_lifecycle%3D0%26p_p_state%3Dnormal%26p_p_mode%3Dview%26p_p_col_id%3Dcolumn-1%26p_p_col_pos%3D1%26p_p_col_count%3D2")
tags$b("In preparation"),
hr(),
tags$h4("Demo IV: Reprocessing of data due image artifacts caused by heavy rainfall. "),
br(),
p("While SAR is capable of seeing through clouds, heavy rainfall events during the time of data acquisition can lead to unwanted artifacts within the imagery due to
attenuation of the transmitted radiation. In such a case it is necessary to sort out the affected imagery and re-run the RTC to time-series and timescan processor.
The accompanying slides contain a step-by-step guide and help the user identify unwanted image artifacts caused by heavy rainfall."),
tags$b("In preparation")
),
tabPanel("References",
br(),
tags$b("References"),
tags$b("Scientific Articles"),
p("Quegan et al. (2000): Multitemporal ERS SAR Analysis Applied to Forest Mapping. in: IEEE Transactions on Geoscience and Remote Sensing. 38. 2.")
)
) # close tab box
) # close box
) # close fluid row
) # close tabit
|
/shiny/ui/S1_rtc2ts_tab_ui.R
|
permissive
|
ValentinLouis/opensarkit
|
R
| false | false | 21,699 |
r
|
#-----------------------------------------------------------------------------
# S1 Tab
tabItem(tabName = "s1_rtc2ts",
fluidRow(
# Include the line below in ui.R so you can send messages
tags$head(tags$script(HTML('Shiny.addCustomMessageHandler("jsCode",function(message) {eval(message.value);});'))),
# for busy indicator
useShinyjs(),
tags$style(appCSS),
#----------------------------------------------------------------------------------
# Processing Panel Sentinel-1
box(
# Title
title = "Processing Panel", status = "success", solidHeader= TRUE,
tabBox(width = 700,
tabPanel("Processor",
tags$h4("Sentinel-1 RTC to time-series & timescan processor"),
span(textOutput("RAMwarning_rtc2ts"), style='color:red'),
hr(),
tags$b("Short description:"),
p("This interface allows the fully-automated generation of time-series and timescan products. It expects the folder structure
created by the data download tab and that all data has been already processed by the GRD to RTC processor, using the time-series preparation option.
If more than one track is present within the data folder, the procedure will generate
the final product for each track separately. The output can be subsequently mosaicked by using the routine in the time-series mosaics submenu. "),
hr(),
tags$b("Processing directory:"),
p("The path should point to the", tags$b(" DATA directory "),
"created by routine of the data download submenu, within your project folder, i.e. \"/path/to/project/DATA\""),
shinyDirButton("s1_rtc2ts_inputdir","Browse","Browse",FALSE),
br(),
br(),
verbatimTextOutput("s1_rtc2ts_inputdir"),
hr(),
tags$b("Select the desired multi-temporal product:"),
p("The time-series processor creates a co-registered, multi-temporal speckle-filtered stack of of all available overlapping scenes per track and polarisation,
clipped to the same extent. Timescan data aggregates the time-series data by calculating basic
descriptive statistics for each polarization in the temporal domain."),
radioButtons("s1_rtc2ts_mode", "",
c("Time-series & Timescan" = "1",
"Time-series only" = "2",
"Timescan only" = "3"),
"1"),
hr(),
tags$b("Choose the output datatype for the timeseries/timescan products."),
p("Those values represent the amount of space reserved for one pixel per band."),
radioButtons("s1_rtc2ts_dtype", "",
c("16 bit unsigned integer (recommended)" = "1",
"8 bit unsigned integer" = "2",
"32 bit floating point" = "3"),
"1"),
hr(),
withBusyIndicatorUI(
actionButton("s1_rtc2ts_process", "Start processing")
),
#"Output:",
textOutput("processS1_RTC2TS")
),
# tab panel for cleaning up files
tabPanel("Clean up files",
# Title
tags$h4("Clean up intermediate products"),
tags$b("DATA directory:"),
br(),
br(),
shinyDirButton("S1_ts_cleanupdir","Select S1 DATA folder in your project directory","Select S1 DATA folder in your project directory",FALSE),
br(),
br(),
verbatimTextOutput("S1_ts_cleanupdir"),
hr(),
tags$b("Clean up raw files"),
p("This button deletes all the raw Sentinel-1 zip files downloaded in the first place by the",tags$i(" Data Download "), " tab."),
withBusyIndicatorUI(
actionButton("s1_ts_clean_raw", "Delete files")
),
#"Output:",
textOutput("cleanS1RAW"),
hr(),
tags$b("Clean up RTC/LS files"),
p("This button deletes all the single, radiometrically terrain corrected products
as well as the single layover/shadow maps generated during the GRD to RTC processing.",tags$i(" Data Download "), " tab."),
withBusyIndicatorUI(
actionButton("s1_ts_clean_rtc", "Delete files")
),
#"Output:",
textOutput("cleanS1RTC"),
br(),
hr(),
tags$b("Delete Time-series data"),
p("This button deletes the Time-series folder for each track within the project directory."),
withBusyIndicatorUI(
actionButton("s1_ts_clean_timeseries", "Delete files")
),
#"Output:",
textOutput("cleanS1TS"),
hr(),
tags$b("Delete Timescan data"),
p("This button deletes the Timescan products for each track within the project directory."),
withBusyIndicatorUI(
actionButton("s1_ts_clean_timescan", "Delete files")
),
#"Output:",
textOutput("cleanS1TScan"),
hr()
)
)
), #close box
# #----------------------------------------------------------------------------------
# # Info Panel
box(
title = "Info Panel", status = "success", solidHeader= TRUE,
tabBox(width = 700,
tabPanel("General Info",
tags$h4("Processing chain"),
hr(),
p("The ", tags$b("RTC to time-series/timescan processor"), " (Figure 1) applies the necessary preprocessing steps for preparing multi-temporal stacks of Sentinel-1 RTC products
generated by the ", tags$b("GRD to RTC processor."), " Those stacks are ready-to-use for time-series analysis, or the subsequent generation of timescan products."),
p("The user has the choice to select ", tags$b("different data types"), " of the resulting stack. While loosing radiometric resolution, the necessary disk space is reduced by a factor of 2
for 16 bit unsigned integer, and by a factor of 4 for 8 bit unsigned integer products. A good trade-off between disk space and radiometric accuracy is usually
the 16 bit option, since it still holds a sensitivity of about 0.00045 dB. The conversion to 8 bit channels, instead, results in a histogram binning of 0,11 dB."),
p("The ", tags$b("output"), " is written to a newly created folder named \"Timeseries\" within the track folder created by the data download routine. Inside this folder each scene can be found,
sorted by a starting number. The actual stack can be opened by using the correspondend \"Virtual Raster Table\" (VRT) file (i.e. timeseries.VV.vrt).
This is a simple xml text file that refers to the actual single layers of the stack. Band numbering of the stack refers to the numbers set as a prefix for each acquisition
and are sorted by the acquisition date."),
p("The routine will automatically detect if the ", tags$b("input imagery"), " has been acquired in ", tags$b("dual-polarised mode."), " Only if all imagery has been acquired in dual-polarised mode, the processing
routine will create the time-series stack for the VH polarisation (i.e. timeseries.VH.vrt)."),
p("For an easy", tags$b(" assessment of the quality,"), " thumbnail images and an animated gif file are created as well. This helps to identify images where the backscatter is
affected by heavy rainfall events or other disturbing factors. "),
img(src = "S1_RTC2TS/TimeseriesWorkflow.jpg", width = "100%", height = "100%"),
tags$b("Figure 1: Preprocessing chain of the RTC to time-series processor for Sentinel-1 RTC data prepared by the RTC to GRD processor.
Note that the generation of the VH-polarised time-series only applies if all imagery has been acquired in the dual-polarised mode."),
br(),
br(),
p("Using", tags$b(" multi-temporal SAR data"), " features some", tags$b("advantages"), " over single image use. First of all, the use of multiple imagery
over one area allows for more", tags$b(" effective reduction of Speckle noise."), "The combination of adaptive spatial filters and temporal statistics can
significantly improve the quality of all single scenes within the multi-temporal stack (Figure 2)."),
img(src = "MT_speckle.png", width = "100%", height = "100%"),
tags$b("Figure 1: Sentinel-1 VV-polarized image taken at 01.09.2016 over a partly forested area in Congo.
Left: Image filtered only in the spatail domain using the Refined Lee Speckle Filter.
Right: Image filtered in the spatial and temporal domain as part of a multi-temporal stack of 10 images."),
br(),
br(),
p("Another advantage of multi-temporal data is that ", tags$b("temporal dynamics"), " of the earth's surface are captured. Thus the succession of different
biogeophysical processes can potentially be tracked by various types of time-series analysis."),
p("The temporal variation of the signal can also give information on the ", tags$b("type of land cover."), " Feeding a classifier with a stack of single imagery
acquired at different dates may however result in long processing times, and the data storage demand is increased as well. This is especially true for Sentinel-1,
where the 6-24 days repeat cycle quickly leads to a massive accumulation of data. The ", tags$b("Timescan"), " approach overcomes this issue
by aggregating the temporal information for each polarisation channel separately. For every pixel of the time-series stack,
a basic set of descriptive statistics are calculated in time: "),
p(" - Average backscatter in time "),
p(" - Maximum backscatter in time "),
p(" - Minimum backscatter in time "),
p(" - Standard deviation in time "),
p(" - Coefficient of Variation (CoV) in time"),
p("For example, the", tags$b(" average backscatter"), " in time further reduces Speckle noise. The ", tags$b("minimum backscatter,"), " instead, may reduce
the influence of soil moisture, given that the dry period is covered within the time-series stack.
The ", tags$b("standard deviation"), " allows differentiating between land cover classes that have rather stable backscatter over time (e.g. urban, forests, water) from
surfaces with varying backscatter behaviour (e.g. agricultural fields, wetlands).
The ", tags$b("coefficient of variation,"), " which depicts the ratio of the standard deviation divided by the mean, represent the relative variation
of the backscatter. In this way, classes with a small standard deviation, but different levels of mean backscatter values (e.g. water and urban) become
distinguishable."),
p("By applying this approach to both polarisations, a total of 10 channels is available. Figure 3 depicts an exemplary colour composition. A set of
10 dual-polarised Sentinel-1 IW images, acquired
between the 15th of July 2016 and the 31st of October 2016, have been used. Similar to the time-series stack, all bands are actually stored
singularly, but an additional VRT-file is present to load the whole stack into your favorite GIS environment, or to use it for subsequent data fusion or
classification tasks."),
img(src = "MT_ts.png", width = "100%", height = "100%"),
tags$b("Figure 2: Left: High-resolution optical imagery taken from Google Earth over an area in Congo.
Right: Timescan RGB composite using a band combination of VH-maximum (red), VV-minimum (green) and VV-CoV (blue). ")
),
tabPanel("Detailed Description",
tags$h4("Description of the single processing steps"),
hr(),
tags$b("1.) Image stacking"),
p("The image stacking takes all available RTC scenes per track and collocates the spatially overlapping products.
Collocating the products implies that the pixel values of some products (the slave imageries) are resampled into the geographical
raster of a master image, which is usually refers to the earliest date. Resampling of the slave imagery is then achieved by bilinear interpolation."),
hr(),
tags$b("2.) Multi-temporal Lee-Sigma Speckle Filter"),
p("SAR images have inherent salt and pepper like texturing called speckles which degrade the quality of the image and make interpretation of features more difficult.
The speckles are caused by random constructive and destructive interference of the de-phased but coherent return waves scattered by the elementary scatters
within each resolution cell. Multi-temporal filtering is one of the commonly used speckle noise reduction techniques."),
p("The SNAP toolbox provides a modified approach, firstly proposed by Quegan et al 2000. The Lee-Sigma Filter is applied in the spatial domain and information
from the temporal domain are applied as stated in the original approach. The filter uses a window size of 7x7, a target window size of 3x3,
and a sigma of 0.9."),
hr(),
tags$b("3.) Linear power to decibal scale"),
p("The received power of the backscattering is usually measured in linear power. Depending on the surface, the measurement can vary across a wide range.
In order to derive a more balanced output, logarithmic scaling in decibel (dB) is applied."),
hr(),
tags$b("4.) Clip to minimum common extent"),
p("In order to assure that every pixel contains the same amount of temporal measurements, the time-series stack is clipped to the minimum common extent of the
whole set of imagery."),
hr(),
tags$b("5.) Scaling to integer value (optional)"),
p("In order to reduce disk space, values of each polarization are linearly stretched between -25 and 5 dB. This partially reduces the radiometric resolution."),
hr(),
tags$b("6.) Mask out Layover/Shadow areas"),
p("The layover/shadow masks are combined for all images, assuring that at no point in time an area affected by layover/shadow is present. Respective data points
exhibit 0 as no data value."),
hr(),
tags$b("7.) Time-series - output"),
p(""),
hr(),
tags$b("8.) Timescan calculation and output"),
p(""),
hr()
),
tabPanel("Demo",
br(),
tags$h4("Demo I: Create a time-series stack"),
br(),
p("Within this demo a time-series stack will be created. The accompanying slides contain a step-by-step guide and deepen the understanding of the user
handling and interpreting multi-temporal SAR data sets."),
#p("Here you can download a directory that contains Sentinel-1 imagery acquired over a small area in the south of Taman Nasional Sebangau area in South Kalimantan, Indonesia.
# This data will be placed in your home folder. In order to process, choose", tags$b("/home/username/S1_timeseries_demo/Demo_Jena/DATA"), "as the project directory.
# The resultant time-series data can be found in", tags$b("/home/username/S1_timeseries_demo/Demo_Jena/DATA/044A/Timeseries"), "and",
# tags$b("/home/username/S1_timeseries_demo/Demo_Jena/DATA/044A/MT_metrics",".")),
#withBusyIndicatorUI(
# actionButton("S1_ts_testdata_download", "Download")
#),
#br(),
#textOutput("download_Demo_Jena"),
hr(),
tags$h4("Demo II: Create a timescan composite"),
br(),
p("Within this demo a timescan composite will be created. The accompanying slides contain a step-by-step guide and deepen the understanding of the user
handling and interpreting timescan composites. A special focus is given for land cover applications."),
tags$b("In preparation"),
hr(),
tags$h4("Demo III: Multi-temporal data and Deforestation"),
br(),
p("Within this demo we will focus on the signatures of deforestation in time-series and timescan data. The accompanying slides contain a step-by-step guide and deepen the understanding of the user
handling and interpreting timescan composites. "),
#p("Further examples can be found here using ESA SAR data of the last 25 years. https://earth.esa.int/web/earth-watching/20-years-of-sar/-/asset_publisher/084v29WO4EeJ/content/global-deforestation-2012?redirect=https%3A%2F%2Fearth.esa.int%2Fweb%2Fearth-watching%2F20-years-of-sar%3Fp_p_id%3D101_INSTANCE_084v29WO4EeJ%26p_p_lifecycle%3D0%26p_p_state%3Dnormal%26p_p_mode%3Dview%26p_p_col_id%3Dcolumn-1%26p_p_col_pos%3D1%26p_p_col_count%3D2")
tags$b("In preparation"),
hr(),
tags$h4("Demo IV: Reprocessing of data due image artifacts caused by heavy rainfall. "),
br(),
p("While SAR is capable of seeing through clouds, heavy rainfall events during the time of data acquisition can lead to unwanted artifacts within the imagery due to
attenuation of the transmitted radiation. In such a case it is necessary to sort out the affected imagery and re-run the RTC to time-series and timescan processor.
The accompanying slides contain a step-by-step guide and help the user identify unwanted image artifacts caused by heavy rainfall."),
tags$b("In preparation")
),
tabPanel("References",
br(),
tags$b("References"),
tags$b("Scientific Articles"),
p("Quegan et al. (2000): Multitemporal ERS SAR Analysis Applied to Forest Mapping. in: IEEE Transactions on Geoscience and Remote Sensing. 38. 2.")
)
) # close tab box
) # close box
) # close fluid row
) # close tabit
|
## Moosilauke
## Visualize growth/species by environmental factors:
## - elevation
## - aspect
library(plyr)
library(lattice)
long <- read.csv("~/work/data/data/growth/moose-long.csv")
## See what our variables look like
hist(long$dbh)
hist(long$ht)
hist(long$dbhgrowth)
hist(long$bvgrowth)
hist(long$htgrowth)
## Singular value decomposition (principal component analysis)
## nums <- sapply(long, is.numeric)
## mat <- as.matrix(long[,nums])
comp <- long[complete.cases(long[,c("htgrowth","dbhgrowth","elev")]),
c("htgrowth","dbhgrowth","elev")]
mat <- as.matrix(scale(comp))
## look for pattern in rows/columns
par(mar = rep(0.2, 4))
image(1:3, 1:nrow(mat), t(mat)[,nrow(mat):1])
###########################################################################
##
## Clusters and heatmaps
##
## quantile classes for dbh/ht/bv growth
long$dbhcl <- as.numeric(cut(long$dbhgrowth, breaks = quantile(long$dbhgrowth,
probs = seq(0,1,0.1), na.rm = TRUE)))
long$htcl <- as.numeric(cut(long$htgrowth, breaks = quantile(long$htgrowth,
probs = seq(0,1,0.15), na.rm = TRUE)))
long$bvcl <- as.numeric(cut(long$bvgrowth, breaks = quantile(long$bvgrowth,
probs = seq(0,1,0.15), na.rm = TRUE)))
## numeric asp/elev classes
long$aspcl <- as.numeric(long$aspcl)
long$elevcl <- as.numeric(long$elevcl)
long$soilcl <- as.numeric(long$soilcl)
## by correlation
comp <- long[complete.cases(long[,c("aspcl","elevcl","soilcl","htcl","dbhcl")]),
c("aspcl","elevcl","soilcl","htcl","dbhcl")]
mat <- as.matrix(comp)
symnum( cU <- cor(mat) )
hU <- heatmap(cU, Rowv = FALSE, symm = TRUE,
distfun = function(c) as.dist(1 - c), keep.dendro = TRUE)
dev.copy2pdf(file = "~/work/growth/visuals/heatmap-correlations.pdf")
dev.off()
## htgrowth and dbhgrowth
comp <- long[complete.cases(long[,c("htgrowth","dbhgrowth","htcl","dbhcl")]),
c("htgrowth","dbhgrowth","htcl","dbhcl")]
mat <- as.matrix(comp)
heatmap(mat, labCol = c("ht grwth", "dbh grwth", "htcl","dbhcl"),
main = "Clustering height and dbh growth", cexCol = 1.5, labRow = FALSE,
scale = "column")
dev.copy2pdf(file="~/work/growth/visuals/ht_dbh-growth-heatmap.pdf")
dev.off()
##########################################################################
## ht and dbh growth by elevation
comp <- long[complete.cases(long[,c("htcl","dbhcl","elevcl")]),
c("htcl","dbhcl","elevcl")]
mat <- as.matrix(scale(comp))
heatmap(mat, labCol = c("htcl","dbhcl","elevcl"),
main = "Clustering height and dbh growth by elevation",
cexCol = 1.5, labRow = FALSE)
dev.copy2pdf(file="~/work/growth/visuals/ht_dbh-growth-heatmap-elev.pdf")
dev.off()
## plotmeans
comp <- long[complete.cases(long[,c("htgrowth","dbhgrowth","elev")]),
c("htgrowth","dbhgrowth","elev")]
byelev <- ddply(comp, .(elev), .fun = function(x) {
data.frame(dbhgr = mean(x$dbhgrowth, na.rm=TRUE),
htgr = mean(x$htgrowth, na.rm = TRUE))
})
rownames(byelev) = byelev$elev
byelev <- byelev[,2:3]
mat <- as.matrix(byelev)
rc <- rainbow(nrow(byelev), start = 0, end = .3)
heatmap(mat, labCol = c("ht","dbh"),
main = "Clustering mean height and dbh growth by elevect",
cexCol = 1.5, scale = "column", RowSideColors = rc)
dev.copy2pdf(file="~/work/growth/visuals/ht_dbh-growth-heatmap-elevmeans.pdf")
dev.off()
##########################################################################3
## ht and dbh growth by aspect
comp <- long[complete.cases(long[,c("htcl","dbhcl","aspcl")]),
c("htcl","dbhcl","aspcl")]
mat <- as.matrix(comp)
heatmap(mat, labCol = c("htcl","dbhcl","aspcl"),
main = "Clustering height and dbh growth by aspect",
cexCol = 1.5, labRow = FALSE, scale = "column")
dev.copy2pdf(file="~/work/growth/visuals/ht_dbh-growth-heatmap-aspect.pdf")
dev.off()
## plotmeans
comp <- long[complete.cases(long[,c("htgrowth","dbhgrowth","asp")]),
c("htgrowth","dbhgrowth","asp")]
byasp <- ddply(comp, .(asp), .fun = function(x) {
data.frame(dbhgr = mean(x$dbhgrowth, na.rm=TRUE),
htgr = mean(x$htgrowth, na.rm = TRUE))
})
rownames(byasp) = byasp$asp
byasp <- byasp[,2:3]
mat <- as.matrix(byasp)
rc <- rainbow(nrow(byasp), start = 0, end = .3)
heatmap(mat, labCol = c("ht","dbh"),
main = "Clustering mean height and dbh growth by aspect",
cexCol = 1.5, scale = "column", RowSideColors = rc)
dev.copy2pdf(file="~/work/growth/visuals/ht_dbh-growth-heatmap-aspmeans.pdf")
dev.off()
##########################################################################
## ht and dbh growth by plot
comp <- long[complete.cases(long[,c("htcl","dbhcl","pplot")]),
c("htcl","dbhcl","pplot")]
mat <- as.matrix(comp)
heatmap(mat, labCol = c("htcl","dbhcl","pplot"),
main = "Clustering height and dbh growth by plot",
cexCol = 1.5, labRow = FALSE, scale = "column")
dev.copy2pdf(file="~/work/growth/visuals/ht_dbh-growth-heatmap-plot.pdf")
dev.off()
## plotmeans
comp <- long[complete.cases(long[,c("htgrowth","dbhgrowth","pplot")]),
c("htgrowth","dbhgrowth","pplot")]
byplot <- ddply(comp, .(pplot), .fun = function(x) {
data.frame(dbhgr = mean(x$dbhgrowth, na.rm=TRUE),
htgr = mean(x$htgrowth, na.rm = TRUE))
})
rownames(byplot) = byplot$pplot
byplot <- byplot[,2:3]
mat <- as.matrix(byplot)
rc <- rainbow(nrow(byplot), start = 0, end = .3)
heatmap(mat, labCol = c("ht","dbh"),
main = "Clustering height and dbh growth by plot",
cexCol = 1.5, scale = "column", RowSideColors = rc)
dev.copy2pdf(file="~/work/growth/visuals/ht_dbh-growth-heatmap-plotmeans.pdf")
dev.off()
###########################################################################
##
## Correlations between ht and dbh
##
##############################################################################
##
## Ht vs DBH (NOT growth)
##
## General trends by gradients between ht and dbh (not growth)
comp <- long[complete.cases(long[,c("ht","dbh","aspcl","elevcl")]),
c("ht","dbh","aspcl","elevcl")]
ggplot(comp, aes(dbh, ht)) + geom_point() + geom_smooth() + facet_wrap(~aspcl)
ggsave(filename = "~/work/growth/visuals/ht_dbh-aspcl.pdf")
ggplot(comp, aes(dbh, ht)) + geom_point() + geom_smooth() + facet_wrap(~elevcl)
ggsave(filename = "~/work/growth/visuals/ht_dbh-elevcl.pdf")
## htgrowth correlated with aspect
comp <- long[complete.cases(long[,c("ht","htgrowth","dbh","dbhgrowth","elev","asp")]),
c("ht","htgrowth","dbh","dbhgrowth","asp","elev")]
## comp$elevcl = factor(x,levels(x)[c(4,5,1:3)])
pairs(~ht + htgrowth + dbh + dbhgrowth + asp + elev,
data = comp, lower.panel = panel.smooth,
upper.panel = panel.cor)
dev.copy2pdf(file = "~/work/growth/visuals/pairs-dbh-ht-growths-elev-asp.pdf")
dev.off()
comp$asp <- as.factor(comp$asp)
## dummy columns for aspect
A <- model.matrix(~ ht + dbh + asp - 1, comp)
pairs(~ht + dbh * 2:ncol(A), data = comp, lower.panel = panel.smooth,
upper.panel = panel.cor)
tst <- reshape(comp, timevar = "asp",varying = c("dbh","ht"), direction = "wide")
rgb.palette <- colorRampPalette(c("blue", "yellow"), space = "rgb")
levelplot(cor, main="stage 12-14 array correlation matrix", xlab="",
ylab="", col.regions=rgb.palette(120), cuts=100, at=seq(0,1,0.01))
###############################################################################
##
## levelplots
##
## ht growth by asp * elev
levelplot(htgrowth ~ elev*asp, data = long,
xlab = "elevation", ylab = "aspect",
main = "Height growth by aspect and elevation",
col.regions = terrain.colors(100)
)
dev.copy2pdf(file = "~/work/growth/visuals/levelplot-htgrowth-asp-elev.pdf")
dev.off()
## dbh growth by asp * elev
levelplot(dbhgrowth ~ elev*asp, data = long,
xlab = "elevation", ylab = "aspect",
main = "DBH growth by aspect and elevation",
col.regions = terrain.colors(100)
)
dev.copy2pdf(file = "~/work/growth/visuals/levelplot-dbhgrowth-asp-elev.pdf")
dev.off()
hh <- hclust(dist(mat))
ordered <- mat[hh$order,]
par(mfrow = c(1,3))
image(t(ordered)[,nrow(ordered):1])
plot(rowMeans(ordered,40:1,xlab = "Row", ylab = "Row Mean", pch = 19))
plot(colMeans(ordered,40:1,xlab = "Column", ylab = "Column Mean", pch = 19))
thing <- svd(complete.cases(tst))
plot(thing$d)
## svd
dataMatrix <- matrix(rnorm(400), nrow = 40)
image(1:10, 1:40, t(dataMatrix)[,nrow(dataMatrix):1])
|
/exploratory/growthByFactors.R
|
no_license
|
nverno/growth
|
R
| false | false | 8,499 |
r
|
## Moosilauke
## Visualize growth/species by environmental factors:
## - elevation
## - aspect
library(plyr)
library(lattice)
long <- read.csv("~/work/data/data/growth/moose-long.csv")
## See what our variables look like
hist(long$dbh)
hist(long$ht)
hist(long$dbhgrowth)
hist(long$bvgrowth)
hist(long$htgrowth)
## Singular value decomposition (principal component analysis)
## nums <- sapply(long, is.numeric)
## mat <- as.matrix(long[,nums])
comp <- long[complete.cases(long[,c("htgrowth","dbhgrowth","elev")]),
c("htgrowth","dbhgrowth","elev")]
mat <- as.matrix(scale(comp))
## look for pattern in rows/columns
par(mar = rep(0.2, 4))
image(1:3, 1:nrow(mat), t(mat)[,nrow(mat):1])
###########################################################################
##
## Clusters and heatmaps
##
## quantile classes for dbh/ht/bv growth
long$dbhcl <- as.numeric(cut(long$dbhgrowth, breaks = quantile(long$dbhgrowth,
probs = seq(0,1,0.1), na.rm = TRUE)))
long$htcl <- as.numeric(cut(long$htgrowth, breaks = quantile(long$htgrowth,
probs = seq(0,1,0.15), na.rm = TRUE)))
long$bvcl <- as.numeric(cut(long$bvgrowth, breaks = quantile(long$bvgrowth,
probs = seq(0,1,0.15), na.rm = TRUE)))
## numeric asp/elev classes
long$aspcl <- as.numeric(long$aspcl)
long$elevcl <- as.numeric(long$elevcl)
long$soilcl <- as.numeric(long$soilcl)
## by correlation
comp <- long[complete.cases(long[,c("aspcl","elevcl","soilcl","htcl","dbhcl")]),
c("aspcl","elevcl","soilcl","htcl","dbhcl")]
mat <- as.matrix(comp)
symnum( cU <- cor(mat) )
hU <- heatmap(cU, Rowv = FALSE, symm = TRUE,
distfun = function(c) as.dist(1 - c), keep.dendro = TRUE)
dev.copy2pdf(file = "~/work/growth/visuals/heatmap-correlations.pdf")
dev.off()
## htgrowth and dbhgrowth
comp <- long[complete.cases(long[,c("htgrowth","dbhgrowth","htcl","dbhcl")]),
c("htgrowth","dbhgrowth","htcl","dbhcl")]
mat <- as.matrix(comp)
heatmap(mat, labCol = c("ht grwth", "dbh grwth", "htcl","dbhcl"),
main = "Clustering height and dbh growth", cexCol = 1.5, labRow = FALSE,
scale = "column")
dev.copy2pdf(file="~/work/growth/visuals/ht_dbh-growth-heatmap.pdf")
dev.off()
##########################################################################
## ht and dbh growth by elevation
comp <- long[complete.cases(long[,c("htcl","dbhcl","elevcl")]),
c("htcl","dbhcl","elevcl")]
mat <- as.matrix(scale(comp))
heatmap(mat, labCol = c("htcl","dbhcl","elevcl"),
main = "Clustering height and dbh growth by elevation",
cexCol = 1.5, labRow = FALSE)
dev.copy2pdf(file="~/work/growth/visuals/ht_dbh-growth-heatmap-elev.pdf")
dev.off()
## plotmeans
comp <- long[complete.cases(long[,c("htgrowth","dbhgrowth","elev")]),
c("htgrowth","dbhgrowth","elev")]
byelev <- ddply(comp, .(elev), .fun = function(x) {
data.frame(dbhgr = mean(x$dbhgrowth, na.rm=TRUE),
htgr = mean(x$htgrowth, na.rm = TRUE))
})
rownames(byelev) = byelev$elev
byelev <- byelev[,2:3]
mat <- as.matrix(byelev)
rc <- rainbow(nrow(byelev), start = 0, end = .3)
heatmap(mat, labCol = c("ht","dbh"),
main = "Clustering mean height and dbh growth by elevect",
cexCol = 1.5, scale = "column", RowSideColors = rc)
dev.copy2pdf(file="~/work/growth/visuals/ht_dbh-growth-heatmap-elevmeans.pdf")
dev.off()
##########################################################################3
## ht and dbh growth by aspect
comp <- long[complete.cases(long[,c("htcl","dbhcl","aspcl")]),
c("htcl","dbhcl","aspcl")]
mat <- as.matrix(comp)
heatmap(mat, labCol = c("htcl","dbhcl","aspcl"),
main = "Clustering height and dbh growth by aspect",
cexCol = 1.5, labRow = FALSE, scale = "column")
dev.copy2pdf(file="~/work/growth/visuals/ht_dbh-growth-heatmap-aspect.pdf")
dev.off()
## plotmeans
comp <- long[complete.cases(long[,c("htgrowth","dbhgrowth","asp")]),
c("htgrowth","dbhgrowth","asp")]
byasp <- ddply(comp, .(asp), .fun = function(x) {
data.frame(dbhgr = mean(x$dbhgrowth, na.rm=TRUE),
htgr = mean(x$htgrowth, na.rm = TRUE))
})
rownames(byasp) = byasp$asp
byasp <- byasp[,2:3]
mat <- as.matrix(byasp)
rc <- rainbow(nrow(byasp), start = 0, end = .3)
heatmap(mat, labCol = c("ht","dbh"),
main = "Clustering mean height and dbh growth by aspect",
cexCol = 1.5, scale = "column", RowSideColors = rc)
dev.copy2pdf(file="~/work/growth/visuals/ht_dbh-growth-heatmap-aspmeans.pdf")
dev.off()
##########################################################################
## ht and dbh growth by plot
comp <- long[complete.cases(long[,c("htcl","dbhcl","pplot")]),
c("htcl","dbhcl","pplot")]
mat <- as.matrix(comp)
heatmap(mat, labCol = c("htcl","dbhcl","pplot"),
main = "Clustering height and dbh growth by plot",
cexCol = 1.5, labRow = FALSE, scale = "column")
dev.copy2pdf(file="~/work/growth/visuals/ht_dbh-growth-heatmap-plot.pdf")
dev.off()
## plotmeans
comp <- long[complete.cases(long[,c("htgrowth","dbhgrowth","pplot")]),
c("htgrowth","dbhgrowth","pplot")]
byplot <- ddply(comp, .(pplot), .fun = function(x) {
data.frame(dbhgr = mean(x$dbhgrowth, na.rm=TRUE),
htgr = mean(x$htgrowth, na.rm = TRUE))
})
rownames(byplot) = byplot$pplot
byplot <- byplot[,2:3]
mat <- as.matrix(byplot)
rc <- rainbow(nrow(byplot), start = 0, end = .3)
heatmap(mat, labCol = c("ht","dbh"),
main = "Clustering height and dbh growth by plot",
cexCol = 1.5, scale = "column", RowSideColors = rc)
dev.copy2pdf(file="~/work/growth/visuals/ht_dbh-growth-heatmap-plotmeans.pdf")
dev.off()
###########################################################################
##
## Correlations between ht and dbh
##
##############################################################################
##
## Ht vs DBH (NOT growth)
##
## General trends by gradients between ht and dbh (not growth)
comp <- long[complete.cases(long[,c("ht","dbh","aspcl","elevcl")]),
c("ht","dbh","aspcl","elevcl")]
ggplot(comp, aes(dbh, ht)) + geom_point() + geom_smooth() + facet_wrap(~aspcl)
ggsave(filename = "~/work/growth/visuals/ht_dbh-aspcl.pdf")
ggplot(comp, aes(dbh, ht)) + geom_point() + geom_smooth() + facet_wrap(~elevcl)
ggsave(filename = "~/work/growth/visuals/ht_dbh-elevcl.pdf")
## htgrowth correlated with aspect
comp <- long[complete.cases(long[,c("ht","htgrowth","dbh","dbhgrowth","elev","asp")]),
c("ht","htgrowth","dbh","dbhgrowth","asp","elev")]
## comp$elevcl = factor(x,levels(x)[c(4,5,1:3)])
pairs(~ht + htgrowth + dbh + dbhgrowth + asp + elev,
data = comp, lower.panel = panel.smooth,
upper.panel = panel.cor)
dev.copy2pdf(file = "~/work/growth/visuals/pairs-dbh-ht-growths-elev-asp.pdf")
dev.off()
comp$asp <- as.factor(comp$asp)
## dummy columns for aspect
A <- model.matrix(~ ht + dbh + asp - 1, comp)
pairs(~ht + dbh * 2:ncol(A), data = comp, lower.panel = panel.smooth,
upper.panel = panel.cor)
tst <- reshape(comp, timevar = "asp",varying = c("dbh","ht"), direction = "wide")
rgb.palette <- colorRampPalette(c("blue", "yellow"), space = "rgb")
levelplot(cor, main="stage 12-14 array correlation matrix", xlab="",
ylab="", col.regions=rgb.palette(120), cuts=100, at=seq(0,1,0.01))
###############################################################################
##
## levelplots
##
## ht growth by asp * elev
levelplot(htgrowth ~ elev*asp, data = long,
xlab = "elevation", ylab = "aspect",
main = "Height growth by aspect and elevation",
col.regions = terrain.colors(100)
)
dev.copy2pdf(file = "~/work/growth/visuals/levelplot-htgrowth-asp-elev.pdf")
dev.off()
## dbh growth by asp * elev
levelplot(dbhgrowth ~ elev*asp, data = long,
xlab = "elevation", ylab = "aspect",
main = "DBH growth by aspect and elevation",
col.regions = terrain.colors(100)
)
dev.copy2pdf(file = "~/work/growth/visuals/levelplot-dbhgrowth-asp-elev.pdf")
dev.off()
hh <- hclust(dist(mat))
ordered <- mat[hh$order,]
par(mfrow = c(1,3))
image(t(ordered)[,nrow(ordered):1])
plot(rowMeans(ordered,40:1,xlab = "Row", ylab = "Row Mean", pch = 19))
plot(colMeans(ordered,40:1,xlab = "Column", ylab = "Column Mean", pch = 19))
thing <- svd(complete.cases(tst))
plot(thing$d)
## svd
dataMatrix <- matrix(rnorm(400), nrow = 40)
image(1:10, 1:40, t(dataMatrix)[,nrow(dataMatrix):1])
|
source('dependencies.R')
# read all .arff files in data folder
arff_files <- list.files(path = 'data', pattern = '.arff', full.names = T)
alldat <- rbindlist(lapply(arff_files[3:5], function(i_file) {
dat <- read.arff(i_file)
dat$year <- as.numeric(gsub('year', '', gsub('data/', '', gsub('.arff', '', i_file))))
return(dat)
}))
# prepare classes for multi class classification
alldat$mclass <- NA
alldat$mclass <- ifelse(alldat$class == 1, alldat$year, 0)
# ranomForest for varImp and inital accuracy
thisdat <- na.spline(alldat[, -c(65,66,67)])
thisdat <- data.table(thisdat, mclass = factor(alldat$mclass))
# train
model_rf <- randomForest(mclass~., data = thisdat, ntrees = 5000, mtry = 2)
model_rf
varImpPlot(model_rf)
# variable importance viz
imp <- data.frame(model_rf$importance)
imp$x <- rownames(imp)
imp <- data.table(imp)
imp <- imp[order(-MeanDecreaseGini)]
imp$x <- factor(imp$x, levels = imp$x)
require('ggplot2')
plt <- ggplot(imp) + geom_bar(
aes(x = x, y = MeanDecreaseGini),
stat = 'identity'
) + theme(
axis.text.x = element_text(angle = 90)
) + ggtitle(label = 'Variable Importance')
ggsave('varimp_multic.pdf', plt, width = 12, height = 5, units = 'in', dpi = 600)
# missForest
require('missForest')
require('foreach')
## create an R cluster and rfegisteregister parallel backend
require('doParallel')
registerDoParallel(cores=5)
model_mf <- missForest(xmis = alldat, maxiter = 3, ntree = 100, parallelize = 'variables')
model_mf$OOBerror
# randomForest classification on impueted data
imputed_data <- model_mf$ximp
model_rf_imp <- randomForest(factor(mclass)~., data = imputed_data[,-c(65,66)], ntrees = 5000, mtry = 2)
model_rf_imp
varImpPlot(model_rf_imp)
write.csv(x = imputed_data, file = 'imputed_data.csv', row.names = F)
|
/feature_selection_multi_class.R
|
permissive
|
PANDYA21/bankruptcy_prediction
|
R
| false | false | 1,775 |
r
|
source('dependencies.R')
# read all .arff files in data folder
arff_files <- list.files(path = 'data', pattern = '.arff', full.names = T)
alldat <- rbindlist(lapply(arff_files[3:5], function(i_file) {
dat <- read.arff(i_file)
dat$year <- as.numeric(gsub('year', '', gsub('data/', '', gsub('.arff', '', i_file))))
return(dat)
}))
# prepare classes for multi class classification
alldat$mclass <- NA
alldat$mclass <- ifelse(alldat$class == 1, alldat$year, 0)
# ranomForest for varImp and inital accuracy
thisdat <- na.spline(alldat[, -c(65,66,67)])
thisdat <- data.table(thisdat, mclass = factor(alldat$mclass))
# train
model_rf <- randomForest(mclass~., data = thisdat, ntrees = 5000, mtry = 2)
model_rf
varImpPlot(model_rf)
# variable importance viz
imp <- data.frame(model_rf$importance)
imp$x <- rownames(imp)
imp <- data.table(imp)
imp <- imp[order(-MeanDecreaseGini)]
imp$x <- factor(imp$x, levels = imp$x)
require('ggplot2')
plt <- ggplot(imp) + geom_bar(
aes(x = x, y = MeanDecreaseGini),
stat = 'identity'
) + theme(
axis.text.x = element_text(angle = 90)
) + ggtitle(label = 'Variable Importance')
ggsave('varimp_multic.pdf', plt, width = 12, height = 5, units = 'in', dpi = 600)
# missForest
require('missForest')
require('foreach')
## create an R cluster and rfegisteregister parallel backend
require('doParallel')
registerDoParallel(cores=5)
model_mf <- missForest(xmis = alldat, maxiter = 3, ntree = 100, parallelize = 'variables')
model_mf$OOBerror
# randomForest classification on impueted data
imputed_data <- model_mf$ximp
model_rf_imp <- randomForest(factor(mclass)~., data = imputed_data[,-c(65,66)], ntrees = 5000, mtry = 2)
model_rf_imp
varImpPlot(model_rf_imp)
write.csv(x = imputed_data, file = 'imputed_data.csv', row.names = F)
|
## BioCro/R/weach.R by Fernando Ezequiel Miguez Copyright (C) 2007-2009
##
## This program is free software; you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by the Free
## Software Foundation; either version 2 or 3 of the License (at your option).
##
## This program is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
## FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
## more details.
##
## A copy of the GNU General Public License is available at
## http://www.r-project.org/Licenses/
weach366 <- function(X, lati, ts = 1, temp.units = c("Farenheit", "Celsius"), rh.units = c("percent",
"fraction"), ws.units = c("mph", "mps"), pp.units = c("in", "mm"), ...) {
if (missing(lati))
stop("latitude is missing")
if ((ts < 1) || (24%%ts != 0))
stop("ts should be a divisor of 24 (e.g. 1,2,3,4,6,etc.)")
if (dim(X)[2] != 11)
stop("X should have 11 columns")
MPHTOMPERSEC <- 0.447222222222222
temp.units <- match.arg(temp.units)
rh.units <- match.arg(rh.units)
ws.units <- match.arg(ws.units)
pp.units <- match.arg(pp.units)
year <- X[, 1]
DOYm <- X[, 2]
solar <- X[, 3]
maxTemp <- X[, 4]
minTemp <- X[, 5]
avgTemp <- X[, 6]
maxRH <- X[, 7]
minRH <- X[, 8]
avgRH <- X[, 9]
WindSpeed <- X[, 10]
precip <- X[, 11]
tint <- 24/ts
tseq <- seq(0, 23, ts)
## Solar radiation
solarR <- (0.12 * solar) * 2.07 * 10^6/3600
solarR <- rep(solarR, each = tint)
ltseq <- length(tseq)
resC2 <- numeric(ltseq * 366)
for (i in 1:366) {
res <- lightME(DOY = i, t.d = tseq, lat = lati, ...)
Itot <- res$I.dir + res$I.diff
indx <- 1:ltseq + (i - 1) * ltseq
resC2[indx] <- (Itot - min(Itot))/max(Itot)
}
SolarR <- solarR * resC2
## Temperature
if (temp.units == "Farenheit") {
minTemp <- (minTemp - 32) * (5/9)
minTemp <- rep(minTemp, each = tint)
maxTemp <- (maxTemp - 32) * (5/9)
maxTemp <- rep(maxTemp, each = tint)
rangeTemp <- maxTemp - minTemp
# avgTemp <- rep((avgTemp - 32)*(5/9),each=tint)
} else {
minTemp <- rep(minTemp, each = tint)
maxTemp <- rep(maxTemp, each = tint)
rangeTemp <- maxTemp - minTemp
}
xx <- rep(tseq, 366)
temp1 <- sin(2 * pi * (xx - 10)/tint)
temp1 <- (temp1 + 1)/2
Temp <- minTemp + temp1 * rangeTemp
## Relative humidity avgRH <- rep(avgRH,each=tint)
minRH <- rep(minRH, each = tint)
maxRH <- rep(maxRH, each = tint)
temp2 <- cos(2 * pi * (xx - 10)/tint)
temp2 <- (temp2 + 1)/2
if (rh.units == "percent") {
RH <- (minRH + temp2 * (maxRH - minRH))/100
} else {
RH <- (minRH + temp2 * (maxRH - minRH))
}
## Wind Speed
if (ws.units == "mph") {
WS <- rep(WindSpeed, each = tint) * MPHTOMPERSEC
} else {
WS <- rep(WindSpeed, each = tint)
}
## Precipitation
if (pp.units == "in") {
precip <- rep(I((precip * 2.54 * 10)/tint), each = tint)
} else {
precip <- rep(I(precip/tint), each = tint)
}
hour <- rep(tseq, 366)
DOY <- 1:366
doy <- rep(DOY, each = tint)
ans <- cbind(year, doy, hour, SolarR, Temp, RH, WS, precip)
ans
}
|
/R/weach366.R
|
no_license
|
ajmann4/biocro
|
R
| false | false | 3,509 |
r
|
## BioCro/R/weach.R by Fernando Ezequiel Miguez Copyright (C) 2007-2009
##
## This program is free software; you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by the Free
## Software Foundation; either version 2 or 3 of the License (at your option).
##
## This program is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
## FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
## more details.
##
## A copy of the GNU General Public License is available at
## http://www.r-project.org/Licenses/
weach366 <- function(X, lati, ts = 1, temp.units = c("Farenheit", "Celsius"), rh.units = c("percent",
"fraction"), ws.units = c("mph", "mps"), pp.units = c("in", "mm"), ...) {
if (missing(lati))
stop("latitude is missing")
if ((ts < 1) || (24%%ts != 0))
stop("ts should be a divisor of 24 (e.g. 1,2,3,4,6,etc.)")
if (dim(X)[2] != 11)
stop("X should have 11 columns")
MPHTOMPERSEC <- 0.447222222222222
temp.units <- match.arg(temp.units)
rh.units <- match.arg(rh.units)
ws.units <- match.arg(ws.units)
pp.units <- match.arg(pp.units)
year <- X[, 1]
DOYm <- X[, 2]
solar <- X[, 3]
maxTemp <- X[, 4]
minTemp <- X[, 5]
avgTemp <- X[, 6]
maxRH <- X[, 7]
minRH <- X[, 8]
avgRH <- X[, 9]
WindSpeed <- X[, 10]
precip <- X[, 11]
tint <- 24/ts
tseq <- seq(0, 23, ts)
## Solar radiation
solarR <- (0.12 * solar) * 2.07 * 10^6/3600
solarR <- rep(solarR, each = tint)
ltseq <- length(tseq)
resC2 <- numeric(ltseq * 366)
for (i in 1:366) {
res <- lightME(DOY = i, t.d = tseq, lat = lati, ...)
Itot <- res$I.dir + res$I.diff
indx <- 1:ltseq + (i - 1) * ltseq
resC2[indx] <- (Itot - min(Itot))/max(Itot)
}
SolarR <- solarR * resC2
## Temperature
if (temp.units == "Farenheit") {
minTemp <- (minTemp - 32) * (5/9)
minTemp <- rep(minTemp, each = tint)
maxTemp <- (maxTemp - 32) * (5/9)
maxTemp <- rep(maxTemp, each = tint)
rangeTemp <- maxTemp - minTemp
# avgTemp <- rep((avgTemp - 32)*(5/9),each=tint)
} else {
minTemp <- rep(minTemp, each = tint)
maxTemp <- rep(maxTemp, each = tint)
rangeTemp <- maxTemp - minTemp
}
xx <- rep(tseq, 366)
temp1 <- sin(2 * pi * (xx - 10)/tint)
temp1 <- (temp1 + 1)/2
Temp <- minTemp + temp1 * rangeTemp
## Relative humidity avgRH <- rep(avgRH,each=tint)
minRH <- rep(minRH, each = tint)
maxRH <- rep(maxRH, each = tint)
temp2 <- cos(2 * pi * (xx - 10)/tint)
temp2 <- (temp2 + 1)/2
if (rh.units == "percent") {
RH <- (minRH + temp2 * (maxRH - minRH))/100
} else {
RH <- (minRH + temp2 * (maxRH - minRH))
}
## Wind Speed
if (ws.units == "mph") {
WS <- rep(WindSpeed, each = tint) * MPHTOMPERSEC
} else {
WS <- rep(WindSpeed, each = tint)
}
## Precipitation
if (pp.units == "in") {
precip <- rep(I((precip * 2.54 * 10)/tint), each = tint)
} else {
precip <- rep(I(precip/tint), each = tint)
}
hour <- rep(tseq, 366)
DOY <- 1:366
doy <- rep(DOY, each = tint)
ans <- cbind(year, doy, hour, SolarR, Temp, RH, WS, precip)
ans
}
|
/hydrographSCS/drawHydrograph.R
|
no_license
|
llllllllc/hydrographSCS
|
R
| false | false | 3,849 |
r
| ||
test_that("self-defined Stan functions work correctly", {
skip("expose_stan_functions doesn't work within R CMD CHECK")
rstan::expose_stan_functions(new_stan_functions)
# ARMA matrix generating functions
cov_ar1_R <- get_cov_matrix_ar1(ar = matrix(0.5), sigma = 2,
se2 = 0, nrows = 3)[1, , ]
expect_equal(cov_matrix_ar1(0.5, 2, 3), cov_ar1_R)
cov_ma1_R <- matrix(get_cov_matrix_ma1(ma = matrix(-0.3), sigma = 3,
se2 = 0, nrows = 1)[1, , ])
expect_equal(cov_matrix_ma1(-0.3, 3, 1), cov_ma1_R)
cov_arma1_R <- get_cov_matrix_arma1(ar = matrix(-0.5), ma = matrix(0.7),
sigma = 4, se2 = 0, nrows = 5)[1, , ]
expect_equal(cov_matrix_arma1(-0.5, 0.7, 4, 5), cov_arma1_R)
# log-likelihood functions for covariance models
y <- rnorm(9)
eta <- rnorm(9)
ll_stan <- normal_cov_log(y, eta = eta, se2 = 1:9, I = 2,
begin = c(1, 5), end = c(4, 9), nobs = c(4, 5),
res_cov_matrix = cov_arma1_R)
ll_R <- c(dmulti_normal(y[1:4], eta[1:4], cov_arma1_R[1:4, 1:4] + diag(1:4)),
dmulti_normal(y[5:9], eta[5:9], cov_arma1_R[1:5, 1:5] + diag(5:9)))
expect_equal(ll_stan, sum(ll_R))
ll_stan <- student_t_cov_log(y, nu = 10, eta = eta, se2 = 1:9, I = 2,
begin = c(1, 5), end = c(4, 9), nobs = c(4, 5),
res_cov_matrix = cov_arma1_R)
ll_R <- c(dmulti_student(y[1:4], df = 10, mu = eta[1:4],
Sigma = cov_arma1_R[1:4, 1:4] + diag(1:4)),
dmulti_student(y[5:9], df = 10, mu = eta[5:9],
Sigma = cov_arma1_R[1:5, 1:5] + diag(5:9)))
expect_equal(ll_stan, sum(ll_R))
# inverse gaussian functions
shape <- rgamma(1, 20, 1)
mu <- 20
y <- statmod::rinvgauss(1, mean = mu, shape = shape)
expect_equal(inv_gaussian_cdf_log(y, mu, shape, log(y), sqrt(y)),
pinvgauss(y, mean = mu, shape = shape, log = TRUE))
expect_equal(inv_gaussian_ccdf_log(y, mu, shape, log(y), sqrt(y)),
log(1 - pinvgauss(y, mean = mu, shape = shape)))
expect_equal(inv_gaussian_log(y, mu, shape, log(y), sqrt(y)),
dinvgauss(y, mean = mu, shape = shape, log = TRUE))
mu <- 18:22
y <- statmod::rinvgauss(5, mean = mu, shape = shape)
expect_equal(inv_gaussian_vector_log(y, mu, shape, sum(log(y)), sqrt(y)),
sum(dinvgauss(y, mean = mu, shape = shape, log = TRUE)))
# zero-inflated and hurdle log-densities
dat <- list(Y = c(0, 10), N_trait = 2, max_obs = 15)
dat2 <- list(Y = c(0, 0.5), N_trait = 2)
samp <- list(eta = matrix(rnorm(4), ncol = 4), shape = 2, phi = 2)
for (i in seq_along(dat$Y)) {
# zero-inflated
args <- list(y = dat$Y[i], eta = samp$eta[i], eta_zi = samp$eta[i+2])
expect_equal(do.call(zero_inflated_poisson_log, args),
loglik_zero_inflated_poisson(i, dat, samp))
expect_equal(do.call(zero_inflated_neg_binomial_2_log,
c(args, shape = samp$shape)),
loglik_zero_inflated_negbinomial(i, dat, samp))
expect_equal(do.call(zero_inflated_binomial_log,
c(args, trials = dat$max_obs)),
loglik_zero_inflated_binomial(i, dat, samp))
# zero_inflated_beta requires Y to be in (0,1)
args <- list(y = dat2$Y[i], eta = samp$eta[i], eta_zi = samp$eta[i+2])
expect_equal(do.call(zero_inflated_beta_log, c(args, phi = samp$phi)),
loglik_zero_inflated_beta(i, dat2, samp))
# hurdle
args <- list(y = dat$Y[i], eta = samp$eta[i], eta_hu = samp$eta[i+2])
expect_equal(do.call(hurdle_poisson_log, args),
loglik_hurdle_poisson(i, dat, samp))
expect_equal(do.call(hurdle_neg_binomial_2_log,
c(args, shape = samp$shape)),
loglik_hurdle_negbinomial(i, dat, samp))
expect_equal(do.call(hurdle_gamma_log,
c(args, shape = samp$shape)),
loglik_hurdle_gamma(i, dat, samp))
}
# ordinal log-densities
dat <- list(Y = 2, max_obs = 4)
eta <- rnorm(1)
etap <- array(rnorm(6), dim = c(2, 1, 3))
thres <- sort(rnorm(3))
# cumulative and sratio require thres - eta
samp <- list(eta = rep(thres, each = 2) - array(eta, dim = c(2, 1, 3)))
expect_equal(cumulative_log(dat$Y, eta, thres),
loglik_cumulative(1, dat, samp, link = "probit")[1])
expect_equal(sratio_log(dat$Y, eta, thres),
loglik_sratio(1, dat, samp, link = "logit")[1])
# acat and cratio require eta - thres
# also category specific effects are included here
samp <- list(eta = eta + etap - rep(thres, each = 2))
expect_equal(cratio_log(dat$Y, eta, etap[1, , ], thres),
loglik_cratio(1, dat, samp, link = "cloglog")[1])
expect_equal(acat_log(dat$Y, eta, etap[1, , ], thres),
loglik_acat(1, dat, samp, link = "cauchit")[1])
# kronecker product
A <- matrix(c(3, 2, 1, 2, 4, 1, 1, 1, 5), nrow = 3)
B <- matrix(c(3, 2, 2, 4), nrow = 2)
sd <- c(2, 7)
expect_equal(t(chol(base::kronecker(A, diag(sd) %*% B %*% diag(sd)))),
kronecker(t(chol(A)), diag(sd) %*% t(chol(B))))
# as_matrix
expect_equal(as_matrix(1:28, 4, 7), rbind(1:7, 8:14, 15:21, 22:28))
expect_equal(as_matrix(1:28, 3, 4), rbind(1:4, 5:8, 9:12))
# cauchit link
expect_equal(inv_cauchit(1.5), pcauchy(1.5))
# monotonous
expect_equal(monotonous(1:10, 4), sum(1:4))
expect_equal(monotonous(rnorm(5), 0), 0)
})
|
/brms/tests/testthat/tests.stan_functions.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 5,725 |
r
|
test_that("self-defined Stan functions work correctly", {
skip("expose_stan_functions doesn't work within R CMD CHECK")
rstan::expose_stan_functions(new_stan_functions)
# ARMA matrix generating functions
cov_ar1_R <- get_cov_matrix_ar1(ar = matrix(0.5), sigma = 2,
se2 = 0, nrows = 3)[1, , ]
expect_equal(cov_matrix_ar1(0.5, 2, 3), cov_ar1_R)
cov_ma1_R <- matrix(get_cov_matrix_ma1(ma = matrix(-0.3), sigma = 3,
se2 = 0, nrows = 1)[1, , ])
expect_equal(cov_matrix_ma1(-0.3, 3, 1), cov_ma1_R)
cov_arma1_R <- get_cov_matrix_arma1(ar = matrix(-0.5), ma = matrix(0.7),
sigma = 4, se2 = 0, nrows = 5)[1, , ]
expect_equal(cov_matrix_arma1(-0.5, 0.7, 4, 5), cov_arma1_R)
# log-likelihood functions for covariance models
y <- rnorm(9)
eta <- rnorm(9)
ll_stan <- normal_cov_log(y, eta = eta, se2 = 1:9, I = 2,
begin = c(1, 5), end = c(4, 9), nobs = c(4, 5),
res_cov_matrix = cov_arma1_R)
ll_R <- c(dmulti_normal(y[1:4], eta[1:4], cov_arma1_R[1:4, 1:4] + diag(1:4)),
dmulti_normal(y[5:9], eta[5:9], cov_arma1_R[1:5, 1:5] + diag(5:9)))
expect_equal(ll_stan, sum(ll_R))
ll_stan <- student_t_cov_log(y, nu = 10, eta = eta, se2 = 1:9, I = 2,
begin = c(1, 5), end = c(4, 9), nobs = c(4, 5),
res_cov_matrix = cov_arma1_R)
ll_R <- c(dmulti_student(y[1:4], df = 10, mu = eta[1:4],
Sigma = cov_arma1_R[1:4, 1:4] + diag(1:4)),
dmulti_student(y[5:9], df = 10, mu = eta[5:9],
Sigma = cov_arma1_R[1:5, 1:5] + diag(5:9)))
expect_equal(ll_stan, sum(ll_R))
# inverse gaussian functions
shape <- rgamma(1, 20, 1)
mu <- 20
y <- statmod::rinvgauss(1, mean = mu, shape = shape)
expect_equal(inv_gaussian_cdf_log(y, mu, shape, log(y), sqrt(y)),
pinvgauss(y, mean = mu, shape = shape, log = TRUE))
expect_equal(inv_gaussian_ccdf_log(y, mu, shape, log(y), sqrt(y)),
log(1 - pinvgauss(y, mean = mu, shape = shape)))
expect_equal(inv_gaussian_log(y, mu, shape, log(y), sqrt(y)),
dinvgauss(y, mean = mu, shape = shape, log = TRUE))
mu <- 18:22
y <- statmod::rinvgauss(5, mean = mu, shape = shape)
expect_equal(inv_gaussian_vector_log(y, mu, shape, sum(log(y)), sqrt(y)),
sum(dinvgauss(y, mean = mu, shape = shape, log = TRUE)))
# zero-inflated and hurdle log-densities
dat <- list(Y = c(0, 10), N_trait = 2, max_obs = 15)
dat2 <- list(Y = c(0, 0.5), N_trait = 2)
samp <- list(eta = matrix(rnorm(4), ncol = 4), shape = 2, phi = 2)
for (i in seq_along(dat$Y)) {
# zero-inflated
args <- list(y = dat$Y[i], eta = samp$eta[i], eta_zi = samp$eta[i+2])
expect_equal(do.call(zero_inflated_poisson_log, args),
loglik_zero_inflated_poisson(i, dat, samp))
expect_equal(do.call(zero_inflated_neg_binomial_2_log,
c(args, shape = samp$shape)),
loglik_zero_inflated_negbinomial(i, dat, samp))
expect_equal(do.call(zero_inflated_binomial_log,
c(args, trials = dat$max_obs)),
loglik_zero_inflated_binomial(i, dat, samp))
# zero_inflated_beta requires Y to be in (0,1)
args <- list(y = dat2$Y[i], eta = samp$eta[i], eta_zi = samp$eta[i+2])
expect_equal(do.call(zero_inflated_beta_log, c(args, phi = samp$phi)),
loglik_zero_inflated_beta(i, dat2, samp))
# hurdle
args <- list(y = dat$Y[i], eta = samp$eta[i], eta_hu = samp$eta[i+2])
expect_equal(do.call(hurdle_poisson_log, args),
loglik_hurdle_poisson(i, dat, samp))
expect_equal(do.call(hurdle_neg_binomial_2_log,
c(args, shape = samp$shape)),
loglik_hurdle_negbinomial(i, dat, samp))
expect_equal(do.call(hurdle_gamma_log,
c(args, shape = samp$shape)),
loglik_hurdle_gamma(i, dat, samp))
}
# ordinal log-densities
dat <- list(Y = 2, max_obs = 4)
eta <- rnorm(1)
etap <- array(rnorm(6), dim = c(2, 1, 3))
thres <- sort(rnorm(3))
# cumulative and sratio require thres - eta
samp <- list(eta = rep(thres, each = 2) - array(eta, dim = c(2, 1, 3)))
expect_equal(cumulative_log(dat$Y, eta, thres),
loglik_cumulative(1, dat, samp, link = "probit")[1])
expect_equal(sratio_log(dat$Y, eta, thres),
loglik_sratio(1, dat, samp, link = "logit")[1])
# acat and cratio require eta - thres
# also category specific effects are included here
samp <- list(eta = eta + etap - rep(thres, each = 2))
expect_equal(cratio_log(dat$Y, eta, etap[1, , ], thres),
loglik_cratio(1, dat, samp, link = "cloglog")[1])
expect_equal(acat_log(dat$Y, eta, etap[1, , ], thres),
loglik_acat(1, dat, samp, link = "cauchit")[1])
# kronecker product
A <- matrix(c(3, 2, 1, 2, 4, 1, 1, 1, 5), nrow = 3)
B <- matrix(c(3, 2, 2, 4), nrow = 2)
sd <- c(2, 7)
expect_equal(t(chol(base::kronecker(A, diag(sd) %*% B %*% diag(sd)))),
kronecker(t(chol(A)), diag(sd) %*% t(chol(B))))
# as_matrix
expect_equal(as_matrix(1:28, 4, 7), rbind(1:7, 8:14, 15:21, 22:28))
expect_equal(as_matrix(1:28, 3, 4), rbind(1:4, 5:8, 9:12))
# cauchit link
expect_equal(inv_cauchit(1.5), pcauchy(1.5))
# monotonous
expect_equal(monotonous(1:10, 4), sum(1:4))
expect_equal(monotonous(rnorm(5), 0), 0)
})
|
drv <- dbDriver("PostgreSQL")
conn <- dbConnect(drv, dbname="db_name",host="localhost",port=5432,user="user",password="password" )
# Handy function to kill connections
kill_db_connections <- function (x) {
all_cons <- dbListConnections(PostgreSQL())
print(all_cons)
for(con in all_cons)
{
dbDisconnect(con)
}
print(paste(length(all_cons), " connections killed."))
}
|
/_connections/connections.R
|
no_license
|
mvaneggermond/read-write-R
|
R
| false | false | 399 |
r
|
drv <- dbDriver("PostgreSQL")
conn <- dbConnect(drv, dbname="db_name",host="localhost",port=5432,user="user",password="password" )
# Handy function to kill connections
kill_db_connections <- function (x) {
all_cons <- dbListConnections(PostgreSQL())
print(all_cons)
for(con in all_cons)
{
dbDisconnect(con)
}
print(paste(length(all_cons), " connections killed."))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-report.R
\name{format_report_row}
\alias{format_report_row}
\title{Format a single "rows" element from a report fact map}
\usage{
format_report_row(
x,
labels = TRUE,
guess_types = TRUE,
bind_using_character_cols = deprecated()
)
}
\arguments{
\item{x}{\code{list}; a single element from the \code{rows} element of a fact
map. When the data is in a tabular format, this element usually has the same
length as the number of columns with each element having a label and value
element.}
\item{labels}{\code{logical}; an indicator of whether the returned data should
be the label (i.e. formatted value) or the actual value. By default, the labels
are returned because these are what appear in the Salesforce dashboard and
more closely align with the column names. For example, "Account.Name" label
may be \code{"Account B"} and the value \code{0016A0000035mJEQAY}. The former
(label) more accurately reflects the "Account.Name".}
\item{guess_types}{\code{logical}; indicating whether or not to use \code{col_guess()}
to try and cast the data returned in the recordset. If \code{TRUE} then
\code{col_guess()} is used, if \code{FALSE} then all fields will be returned
as character. This is helpful when \code{col_guess()} will mangle field values
in Salesforce that you'd like to preserve during translation into a \code{tbl_df},
like numeric looking values that must be preserved as strings ("48.0").}
\item{bind_using_character_cols}{\code{logical}; an indicator of whether to
cast the data to all character columns to ensure that \code{\link[dplyr:bind]{bind_rows}}
does not fail because two paginated recordsets have differing datatypes for the
same column. Set this to \code{TRUE} rarely, typically only when having this
set to \code{FALSE} returns an error or you want all columns in the data to be
character.}
}
\value{
\code{tbl_df}; a single row data frame with the data for the row that
the supplied list represented in the report's fact map.
}
\description{
This function accepts a list representing a single row from a report and
selects either the value or label for the report columns to turn into a one
row \code{tbl_df} that will usually be bound to the other rows in the report
}
\note{
This function is meant to be used internally. Only use when debugging.
}
\keyword{internal}
|
/man/format_report_row.Rd
|
permissive
|
StevenMMortimer/salesforcer
|
R
| false | true | 2,385 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-report.R
\name{format_report_row}
\alias{format_report_row}
\title{Format a single "rows" element from a report fact map}
\usage{
format_report_row(
x,
labels = TRUE,
guess_types = TRUE,
bind_using_character_cols = deprecated()
)
}
\arguments{
\item{x}{\code{list}; a single element from the \code{rows} element of a fact
map. When the data is in a tabular format, this element usually has the same
length as the number of columns with each element having a label and value
element.}
\item{labels}{\code{logical}; an indicator of whether the returned data should
be the label (i.e. formatted value) or the actual value. By default, the labels
are returned because these are what appear in the Salesforce dashboard and
more closely align with the column names. For example, "Account.Name" label
may be \code{"Account B"} and the value \code{0016A0000035mJEQAY}. The former
(label) more accurately reflects the "Account.Name".}
\item{guess_types}{\code{logical}; indicating whether or not to use \code{col_guess()}
to try and cast the data returned in the recordset. If \code{TRUE} then
\code{col_guess()} is used, if \code{FALSE} then all fields will be returned
as character. This is helpful when \code{col_guess()} will mangle field values
in Salesforce that you'd like to preserve during translation into a \code{tbl_df},
like numeric looking values that must be preserved as strings ("48.0").}
\item{bind_using_character_cols}{\code{logical}; an indicator of whether to
cast the data to all character columns to ensure that \code{\link[dplyr:bind]{bind_rows}}
does not fail because two paginated recordsets have differing datatypes for the
same column. Set this to \code{TRUE} rarely, typically only when having this
set to \code{FALSE} returns an error or you want all columns in the data to be
character.}
}
\value{
\code{tbl_df}; a single row data frame with the data for the row that
the supplied list represented in the report's fact map.
}
\description{
This function accepts a list representing a single row from a report and
selects either the value or label for the report columns to turn into a one
row \code{tbl_df} that will usually be bound to the other rows in the report
}
\note{
This function is meant to be used internally. Only use when debugging.
}
\keyword{internal}
|
local.corr <-
function(s) {
# store data
plate.corr=list(plate.name=list(), local.cor=list())
#loop through different time points
#t.p is time points
for (t.p in c(1:length(s))){
#now get correlations within well
temp=list()
r.vector=list()
r.vector[[length( unique(s[[t.p]]$cw) )]]=0
names(r.vector)=unique(s[[t.p]]$cw)
#using (Demas 2009), figure 5, closer neurons are more correlated
#therefore I'm going to measure only cor. btw adjacent electrodes
#loop through wells
for (w in c(1:length(unique(s[[1]]$cw))) ){
#get indices of current well
index=which(s[[t.p]]$cw==unique(s[[1]]$cw)[w])
#convert seconds to ms
start.ms=round(s[[t.p]]$rec.time[1]*10^2,digits=0)
stop.ms=round(s[[t.p]]$rec.time[2]*10^2,digits=0)
total.ms=stop.ms-start.ms
#digitize spikes
dig.s=list()
dig.s[[length(index)]]=rep(0, total.ms )
#digitize spikes: loop through channels
for (i in c(1:length(index))){
dig.s[[i]]=rep(0, total.ms)
current.s.ms=round(s[[t.p]]$spikes[[index[i] ]]*10^2, digits=0)- start.ms
for (j in current.s.ms ){
#get millisecond
dig.s[[i]][ j ] = 1
}
}
#get indices of current well
index=which(s[[t.p]]$cw==unique(s[[1]]$cw)[w])
#get channels
w.rows=as.numeric( substr(s[[t.p]]$channels[index],4,4) )
w.cols=as.numeric( substr(s[[t.p]]$channels[index],5,5) )
coords=rbind(w.rows,w.cols)
#loop through channels, getting cor between an electrode
#and electrodes above, below, to left and to right
# if those electrodes have AE
for (i in c(1:length(s[[1]]$channels[index]))){
c.c=coords[,i]
#check for suitable coordinates to coorelate with
if (sum(c.c[1]==coords[1,] & c.c[2]+1==coords[2,])>0){
ind.c.to.comp=which(c.c[1]==coords[1,] & c.c[2]+1==coords[2,])
r.vector[[w]]=c(r.vector[[w]], cor(dig.s[[i]], dig.s[[ind.c.to.comp]]) )
}
if (sum( c.c[1]+1==coords[1,] & c.c[2]-1==coords[2,])>0){
ind.c.to.comp= which(c.c[1]+1==coords[1,] & c.c[2]-1==coords[2,])
r.vector[[w]]=c(r.vector[[w]], cor(dig.s[[i]], dig.s[[ind.c.to.comp]]) )
}
if (sum(c.c[1]+1==coords[1,] & c.c[2]==coords[2,])>0){
ind.c.to.comp= which(c.c[1]+1==coords[1,] & c.c[2]==coords[2,])
r.vector[[w]]=c(r.vector[[w]], cor(dig.s[[i]], dig.s[[ind.c.to.comp]]) )
}
if (sum(c.c[1]+1==coords[1,] & c.c[2]+1==coords[2,])>0){
ind.c.to.comp=which(c.c[1]+1==coords[1,] & c.c[2]+1==coords[2,])
r.vector[[w]]=c(r.vector[[w]], cor(dig.s[[i]], dig.s[[ind.c.to.comp]]) )
}
}#end of loop through channels
}#end of loop through wells
plate.corr$plate.name[t.p]=strsplit(basename(s[[t.p]]$file),'.h5')[[1]][1]
plate.corr$local.cor[[t.p]]=r.vector
}#end of loop through time points
plate.corr # returns plate.corr
}
|
/R/local.corr.R
|
no_license
|
dianaransomhall/meadq
|
R
| false | false | 3,153 |
r
|
local.corr <-
function(s) {
# store data
plate.corr=list(plate.name=list(), local.cor=list())
#loop through different time points
#t.p is time points
for (t.p in c(1:length(s))){
#now get correlations within well
temp=list()
r.vector=list()
r.vector[[length( unique(s[[t.p]]$cw) )]]=0
names(r.vector)=unique(s[[t.p]]$cw)
#using (Demas 2009), figure 5, closer neurons are more correlated
#therefore I'm going to measure only cor. btw adjacent electrodes
#loop through wells
for (w in c(1:length(unique(s[[1]]$cw))) ){
#get indices of current well
index=which(s[[t.p]]$cw==unique(s[[1]]$cw)[w])
#convert seconds to ms
start.ms=round(s[[t.p]]$rec.time[1]*10^2,digits=0)
stop.ms=round(s[[t.p]]$rec.time[2]*10^2,digits=0)
total.ms=stop.ms-start.ms
#digitize spikes
dig.s=list()
dig.s[[length(index)]]=rep(0, total.ms )
#digitize spikes: loop through channels
for (i in c(1:length(index))){
dig.s[[i]]=rep(0, total.ms)
current.s.ms=round(s[[t.p]]$spikes[[index[i] ]]*10^2, digits=0)- start.ms
for (j in current.s.ms ){
#get millisecond
dig.s[[i]][ j ] = 1
}
}
#get indices of current well
index=which(s[[t.p]]$cw==unique(s[[1]]$cw)[w])
#get channels
w.rows=as.numeric( substr(s[[t.p]]$channels[index],4,4) )
w.cols=as.numeric( substr(s[[t.p]]$channels[index],5,5) )
coords=rbind(w.rows,w.cols)
#loop through channels, getting cor between an electrode
#and electrodes above, below, to left and to right
# if those electrodes have AE
for (i in c(1:length(s[[1]]$channels[index]))){
c.c=coords[,i]
#check for suitable coordinates to coorelate with
if (sum(c.c[1]==coords[1,] & c.c[2]+1==coords[2,])>0){
ind.c.to.comp=which(c.c[1]==coords[1,] & c.c[2]+1==coords[2,])
r.vector[[w]]=c(r.vector[[w]], cor(dig.s[[i]], dig.s[[ind.c.to.comp]]) )
}
if (sum( c.c[1]+1==coords[1,] & c.c[2]-1==coords[2,])>0){
ind.c.to.comp= which(c.c[1]+1==coords[1,] & c.c[2]-1==coords[2,])
r.vector[[w]]=c(r.vector[[w]], cor(dig.s[[i]], dig.s[[ind.c.to.comp]]) )
}
if (sum(c.c[1]+1==coords[1,] & c.c[2]==coords[2,])>0){
ind.c.to.comp= which(c.c[1]+1==coords[1,] & c.c[2]==coords[2,])
r.vector[[w]]=c(r.vector[[w]], cor(dig.s[[i]], dig.s[[ind.c.to.comp]]) )
}
if (sum(c.c[1]+1==coords[1,] & c.c[2]+1==coords[2,])>0){
ind.c.to.comp=which(c.c[1]+1==coords[1,] & c.c[2]+1==coords[2,])
r.vector[[w]]=c(r.vector[[w]], cor(dig.s[[i]], dig.s[[ind.c.to.comp]]) )
}
}#end of loop through channels
}#end of loop through wells
plate.corr$plate.name[t.p]=strsplit(basename(s[[t.p]]$file),'.h5')[[1]][1]
plate.corr$local.cor[[t.p]]=r.vector
}#end of loop through time points
plate.corr # returns plate.corr
}
|
data <- read.table("household_power_consumption.txt", sep = ";" , dec = ".",header = TRUE, stringsAsFactors = FALSE)
data$Date <- as.Date(data$Date, "%d/%m/%Y")
data_needed <- data[data$Date == "2007-02-01" | data$Date== " 2007-02-02",]
data_needed$Date<- paste(data_needed$Date, data_needed$Time, sep=" ")
data_needed$Date<- strptime(data_needed$Date , "%Y-%m-%d %H:%M:%S")
png("plot3.png", width=480, height=480)
plot(data_needed$Date, data_needed$Sub_metering_1, type="l", ylab="Energy Submetering", xlab="")
lines(data_needed$Date, data_needed$Sub_metering_2, type="l",col = "red")
lines(data_needed$Date, data_needed$Sub_metering_3, type="l",col = "blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off()
|
/Plot3.R
|
no_license
|
akash-v/ExData_Plotting1
|
R
| false | false | 817 |
r
|
data <- read.table("household_power_consumption.txt", sep = ";" , dec = ".",header = TRUE, stringsAsFactors = FALSE)
data$Date <- as.Date(data$Date, "%d/%m/%Y")
data_needed <- data[data$Date == "2007-02-01" | data$Date== " 2007-02-02",]
data_needed$Date<- paste(data_needed$Date, data_needed$Time, sep=" ")
data_needed$Date<- strptime(data_needed$Date , "%Y-%m-%d %H:%M:%S")
png("plot3.png", width=480, height=480)
plot(data_needed$Date, data_needed$Sub_metering_1, type="l", ylab="Energy Submetering", xlab="")
lines(data_needed$Date, data_needed$Sub_metering_2, type="l",col = "red")
lines(data_needed$Date, data_needed$Sub_metering_3, type="l",col = "blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off()
|
library(data.table)
library(ISLR)
library(MASS)
library(boot)
library(splines)
library(gam)
library(leaps)
library(glmnet)
WineDataInput <- read.csv(file="winemag-data-130k-v2.csv", header=TRUE, sep=",")
WineData <-na.omit(WineDataInput)
WineData <- subset(WineData, select = c(country, points, price, province, title, variety))
Continent <- read.csv(file="continents.csv", header=TRUE, sep=",")
WineData = merge(x = WineDataInput, y = Continent, by = "country")
#Dummy Variables creation ends at line 219
WineData$EastEurope[WineData$sub.region == 'Eastern Europe'] <- 1
WineData$EastEurope[WineData$sub.region != 'Eastern Europe'] <- 0
WineData$LatinAmerica[WineData$sub.region == 'Latin America and the Caribbean'] <- 1
WineData$LatinAmerica[WineData$sub.region != 'Latin America and the Caribbean'] <- 0
WineData$NorthAmerica[WineData$sub.region == 'Northern America'] <- 1
WineData$NorthAmerica[WineData$sub.region != 'Northern America'] <- 0
WineData$SouthEurope[WineData$sub.region == 'Southern Europe'] <- 1
WineData$SouthEurope[WineData$sub.region != 'Southern Europe'] <- 0
WineData$SubSahara[WineData$sub.region == 'Sub-Saharan Africa'] <- 1
WineData$SubSahara[WineData$sub.region != 'Sub-Saharan Africa'] <- 0
WineData$WestEurope[WineData$sub.region == 'Western Europe'] <- 1
WineData$WestEurope[WineData$sub.region != 'Western Europe'] <- 0
WineData$Americas[WineData$Continent == 'Americas'] <- 1
WineData$Americas[WineData$Continent != 'Americas'] <- 0
WineData$Oceania[WineData$Continent == 'Oceania'] <- 1
WineData$Oceania[WineData$Continent != 'Oceania'] <- 0
WineData$Europe[WineData$Continent == 'Europe'] <- 1
WineData$Europe[WineData$Continent != 'Europe'] <- 0
WineData$Austria[WineData$country == 'Austria'] <- 1
WineData$Austria[WineData$country != 'Austria'] <- 0
WineData$Australia[WineData$country == 'Australia'] <- 1
WineData$Australia[WineData$country != 'Australia'] <- 0
WineData$Brazil[WineData$country == 'Brazil'] <- 1
WineData$Brazil[WineData$country != 'Brazil'] <- 0
WineData$Bulgaria[WineData$country == 'Bulgaria'] <- 1
WineData$Bulgaria[WineData$country != 'Bulgaria'] <- 0
WineData$Canada[WineData$country == 'Canada'] <- 1
WineData$Canada[WineData$country != 'Canada'] <- 0
WineData$Chile[WineData$country == 'Chile'] <- 1
WineData$Chile[WineData$country != 'Chile'] <- 0
WineData$Chile[WineData$country == 'Chile'] <- 1
WineData$Chile[WineData$country != 'Chile'] <- 0
WineData$Germany[WineData$country == 'Germany'] <- 1
WineData$Germany[WineData$country != 'Germany'] <- 0
WineData$Hungary[WineData$country == 'Hungary'] <- 1
WineData$Hungary[WineData$country != 'Hungary'] <- 0
WineData$Mexico[WineData$country == 'Mexico'] <- 1
WineData$Mexico[WineData$country != 'Mexico'] <- 0
WineData$Italy[WineData$country == 'Italy'] <- 1
WineData$Italy[WineData$country != 'Italy'] <- 0
WineData$India[WineData$country == 'India'] <- 1
WineData$India[WineData$country != 'India'] <- 0
WineData$Peru[WineData$country == 'Peru'] <- 1
WineData$Peru[WineData$country != 'Peru'] <- 0
WineData$Portugal[WineData$country == 'Portugal'] <- 1
WineData$Portugal[WineData$country != 'Portugal'] <- 0
WineData$Slovenia[WineData$country == 'Slovenia'] <- 1
WineData$Slovenia[WineData$country != 'Slovenia'] <- 0
WineData$SAfrica[WineData$country == 'South Africa'] <- 1
WineData$SAfrica[WineData$country != 'South Africa'] <- 0
WineData$Spain[WineData$country == 'Spain'] <- 1
WineData$Spain[WineData$country != 'Spain'] <- 0
WineData$US[WineData$country == 'US'] <- 1
WineData$US[WineData$country != 'US'] <- 0
WineData$Aidani[WineData$variety == 'Aidani'] <- 1
WineData$Aidani[WineData$variety != 'Aidani'] <- 0
WineData$Airen[WineData$variety == 'Airen'] <- 1
WineData$Airen[WineData$variety != 'Airen'] <- 0
WineData$Brachetto[WineData$variety == 'Brachetto'] <- 1
WineData$Brachetto[WineData$variety != 'Brachetto'] <- 0
WineData$Chenin[WineData$variety == 'Chenin Blanc-Sauvignon Blanc'] <- 1
WineData$Chenin[WineData$variety != 'Chenin Blanc-Sauvignon Blanc'] <- 0
WineData$Volpe[WineData$variety == 'Coda di Volpe'] <- 1
WineData$Volpe[WineData$variety != 'Coda di Volpe'] <- 0
WineData$Blanc[WineData$variety == 'Blanc'] <- 1
WineData$Blanc[WineData$variety != 'Blanc'] <- 0
WineData$Colombard[WineData$variety == 'Colombard'] <- 1
WineData$Colombard[WineData$variety != 'Colombard'] <- 0
WineData$Gragnano[WineData$variety == 'Gragnano'] <- 1
WineData$Gragnano[WineData$variety != 'Gragnano'] <- 0
WineData$Kinali[WineData$variety == 'Kinali Yapincak'] <- 1
WineData$Kinali[WineData$variety != 'Kinali Yapincak'] <- 0
WineData$Lambrusco[WineData$variety == 'Lambrusco'] <- 1
WineData$Lambrusco[WineData$variety != 'Lambrusco'] <- 0
WineData$Muskat[WineData$variety == 'Muskat Ottonel'] <- 1
WineData$Muskat[WineData$variety != 'Muskat Ottonel'] <- 0
WineData$Picapoll[WineData$variety == 'Picapoll'] <- 1
WineData$Picapoll[WineData$variety != 'Picapoll'] <- 0
WineData$Pigato[WineData$variety == 'Pigato'] <- 1
WineData$Pigato[WineData$variety != 'Pigato'] <- 0
WineData$Pignoletto[WineData$variety == 'Pignoletto'] <- 1
WineData$Pignoletto[WineData$variety != 'Pignoletto'] <- 0
WineData$Gamay[WineData$variety == 'Pinot Noir-Gamay'] <- 1
WineData$Gamay[WineData$variety != 'Pinot Noir-Gamay'] <- 0
WineData$Semillon[WineData$variety == 'Semillon-Sauvignon Blanc'] <- 1
WineData$Semillon[WineData$variety != 'Semillon-Sauvignon Blanc'] <- 0
WineData$Tempranillo[WineData$variety == 'Tempranillo Blanco'] <- 1
WineData$Tempranillo[WineData$variety != 'Tempranillo Blanco'] <- 0
WineData$Tinta[WineData$variety == 'Tinta del Pais'] <- 1
WineData$Tinta[WineData$variety != 'Tinta del Pais'] <- 0
WineData$Bual[WineData$variety == 'Bual'] <- 1
WineData$Bual[WineData$variety != 'Bual'] <- 0
WineData$CabernetShiraz[WineData$variety == 'Cabernet-Shiraz'] <- 1
WineData$CabernetShiraz[WineData$variety != 'Cabernet-Shiraz'] <- 0
WineData$Gelber[WineData$variety == 'Gelber Traminer'] <- 1
WineData$Gelber[WineData$variety != 'Gelber Traminer'] <- 0
WineData$Terrantez[WineData$variety == 'Terrantez'] <- 1
WineData$Terrantez[WineData$variety != 'Terrantez'] <- 0
WineData <-na.omit(WineData)
WineData$Michigan[WineData$province == 'Michigan'] <- 1
WineData$Michigan[WineData$province != 'Michigan'] <- 0
WineData$Virginia[WineData$province == 'Virginia'] <- 1
WineData$Virginia[WineData$province != 'Virginia'] <- 0
WineData$Illinois[WineData$province == 'Illinois'] <- 1
WineData$Illinois[WineData$province != 'Illinois'] <- 0
WineData$NewMexico[WineData$province == 'New Mexico'] <- 1
WineData$NewMexico[WineData$province != 'New Mexico'] <- 0
WineData$Jersey[WineData$province == 'New Jersey'] <- 1
WineData$Jersey[WineData$province != 'New Jersey'] <- 0
WineData$Pennsylvania[WineData$province == 'Pennsylvania'] <- 1
WineData$Pennsylvania[WineData$province != 'Pennsylvania'] <- 0
WineData$Missouri[WineData$province == 'Missouri'] <- 1
WineData$Missouri[WineData$province != 'Missouri'] <- 0
WineData$Nevada[WineData$province == 'Nevada'] <- 1
WineData$Nevada[WineData$province != 'Nevada'] <- 0
WineData$Colorado[WineData$province == 'Colorado'] <- 1
WineData$Colorado[WineData$province != 'Colorado'] <- 0
WineData$Arizona[WineData$province == 'Arizona'] <- 1
WineData$Arizona[WineData$province != 'Arizona'] <- 0
WineData$Massachusetts[WineData$province == 'Massachusetts'] <- 1
WineData$Massachusetts[WineData$province != 'Massachusetts'] <- 0
WineData$Ohio[WineData$province == 'Ohio'] <- 1
WineData$Ohio[WineData$province != 'Ohio'] <- 0
WineData$Iowa[WineData$province == 'Iowa'] <- 1
WineData$Iowa[WineData$province != 'Iowa'] <- 0
WineData$NorthCarolina[WineData$province == 'North Carolina'] <- 1
WineData$NorthCarolina[WineData$province != 'North Carolina'] <- 0
WineData$Napa[WineData$region_1 == 'Napa Valley'] <- 1
WineData$Napa[WineData$region_1 != 'Napa Valley'] <- 0
WineData$RRValley[WineData$region_1 == 'Russian River Valley'] <- 1
WineData$RRValley[WineData$region_1 != 'Russian River Valley'] <- 0
WineData$Sonoma[WineData$region_1 != 'Sonoma Valley'] <- 0
WineData$Sonoma[WineData$region_1 %like% 'Sonoma'] <- 1
#parameter selection top 20 using forward stepwise selection
length(WineData)
colnames(WineData)
drops <- c("country","X","description","designation","province","region_1","region_2","taster_name","taster_twitter_handle", "title", "variety","winery","Continent","sub.region")
newwine <- WineData[ , !(names(WineData) %in% drops)]
# creating the training and test data
set.seed(1)
train <- sample(1:nrow(WineData), 0.8*nrow(WineData))
test <- -train
trainingData <- newwine[train, ]
testData <- newwine[test, ]
colnames(newwine)
fit <- regsubsets(points ~ ., data = newwine, nvmax = 60, method = "forward")
fit.summary <- summary(fit)
plot(fit.summary$bic, xlab = "Number of variables", ylab = "BIC", type='b')
plot(fit.summary$cp, xlab = "Number of variables", ylab = "CP", type='b')
plot(fit.summary$adjr2, xlab = "Number of variables", ylab = "adjr2", type='b')
fit <- regsubsets(points ~ ., data = newwine, nvmax = 20, method = "forward")
coeffs <- coef(fit, id = 20)
names(coeffs)
#fitting different models and comparing results
#linear
fitlm = lm(points~price+ LatinAmerica + SubSahara + Brazil +
Canada + Germany + Mexico + Brachetto + Gragnano + Gamay + Pennsylvania + Missouri + Colorado +
Massachusetts + Ohio + NorthCarolina,data=trainingData)
summary(fitlm)
yhat <- predict(fitlm, testData)
mselm <- mean((testData$points - yhat)^2)
mselm
#poly
fitply= lm(points~poly(price,3) + LatinAmerica + SubSahara + Brazil +
Canada + Germany + Mexico + Brachetto + Gragnano + Gamay + Pennsylvania + Missouri + Colorado +
Massachusetts + Ohio + NorthCarolina,data=trainingData)
summary(fitply)
yhat <- predict(fitply, testData)
msepoly <- mean((testData$points - yhat)^2)
msepoly
#fitting with spline
x=trainingData$price
y=trainingData$points
ytest = testData$points
xtest = testData$price
yhatsp=predict(smooth.spline(x,y),xtest)$y
msespline=mean((ytest-yhatsp)^2)
msespline
sst <- sum((ytest - mean(ytest))^2)
sse <- sum((yhatsp - ytest)^2)
rsqspline <- 1 - sse / sst
rsqspline
plot(trainingData$price,trainingData$points,col="darkgrey")
fitSpl = smooth.spline(trainingData$price,trainingData$points,cv=TRUE)
lines(fitSpl,col="red",lwd=2)
#fitting with spline and other predictors
fitgam=gam(points~s(price,18)+ LatinAmerica + SubSahara + Brazil +
Canada + Germany + Mexico + Brachetto + Gragnano + Gamay + Pennsylvania + Missouri + Colorado +
Massachusetts + Ohio + NorthCarolina,data=trainingData)
yhatsp =predict(fitgam, testData)
msegam=mean((ytest-yhatsp)^2)
msegam
sst <- sum((ytest - mean(ytest))^2)
sse <- sum((yhatsp - ytest)^2)
rsqgam <- 1 - sse / sst
rsqgam
#fit model to ridge regression
x=model.matrix(points~poly(price,3)+ LatinAmerica + SubSahara + Brazil +
Canada + Germany + Mexico + Brachetto + Gragnano + Gamay + Pennsylvania + Missouri + Colorado +
Massachusetts + Ohio + NorthCarolina,data=newwine)[,-1]
y=newwine$points
grid = 10^seq(10,-2,length=100)
train = sample(1:nrow(x), 0.8*nrow(x))
test = (-train)
y.test = y[test]
ridge.mod = glmnet(x[train,], y[train],alpha = 0, lambda = grid, thresh=1e-12)
yhat = predict(ridge.mod,s=4,newx=x[test,])
mseridge <- mean((yhat-y.test)^2)
mseridge
#fit model to Lasso
ridge.mod = glmnet(x[train,], y[train],alpha = 1, lambda = grid, thresh=1e-12)
yhat = predict(ridge.mod,s=4,newx=x[test,])
mselasso <- mean((yhat-y.test)^2)
mselasso
|
/Final_Project_Code_042619.R
|
no_license
|
jchakko/WinePredictions
|
R
| false | false | 11,912 |
r
|
library(data.table)
library(ISLR)
library(MASS)
library(boot)
library(splines)
library(gam)
library(leaps)
library(glmnet)
WineDataInput <- read.csv(file="winemag-data-130k-v2.csv", header=TRUE, sep=",")
WineData <-na.omit(WineDataInput)
WineData <- subset(WineData, select = c(country, points, price, province, title, variety))
Continent <- read.csv(file="continents.csv", header=TRUE, sep=",")
WineData = merge(x = WineDataInput, y = Continent, by = "country")
#Dummy Variables creation ends at line 219
WineData$EastEurope[WineData$sub.region == 'Eastern Europe'] <- 1
WineData$EastEurope[WineData$sub.region != 'Eastern Europe'] <- 0
WineData$LatinAmerica[WineData$sub.region == 'Latin America and the Caribbean'] <- 1
WineData$LatinAmerica[WineData$sub.region != 'Latin America and the Caribbean'] <- 0
WineData$NorthAmerica[WineData$sub.region == 'Northern America'] <- 1
WineData$NorthAmerica[WineData$sub.region != 'Northern America'] <- 0
WineData$SouthEurope[WineData$sub.region == 'Southern Europe'] <- 1
WineData$SouthEurope[WineData$sub.region != 'Southern Europe'] <- 0
WineData$SubSahara[WineData$sub.region == 'Sub-Saharan Africa'] <- 1
WineData$SubSahara[WineData$sub.region != 'Sub-Saharan Africa'] <- 0
WineData$WestEurope[WineData$sub.region == 'Western Europe'] <- 1
WineData$WestEurope[WineData$sub.region != 'Western Europe'] <- 0
WineData$Americas[WineData$Continent == 'Americas'] <- 1
WineData$Americas[WineData$Continent != 'Americas'] <- 0
WineData$Oceania[WineData$Continent == 'Oceania'] <- 1
WineData$Oceania[WineData$Continent != 'Oceania'] <- 0
WineData$Europe[WineData$Continent == 'Europe'] <- 1
WineData$Europe[WineData$Continent != 'Europe'] <- 0
WineData$Austria[WineData$country == 'Austria'] <- 1
WineData$Austria[WineData$country != 'Austria'] <- 0
WineData$Australia[WineData$country == 'Australia'] <- 1
WineData$Australia[WineData$country != 'Australia'] <- 0
WineData$Brazil[WineData$country == 'Brazil'] <- 1
WineData$Brazil[WineData$country != 'Brazil'] <- 0
WineData$Bulgaria[WineData$country == 'Bulgaria'] <- 1
WineData$Bulgaria[WineData$country != 'Bulgaria'] <- 0
WineData$Canada[WineData$country == 'Canada'] <- 1
WineData$Canada[WineData$country != 'Canada'] <- 0
WineData$Chile[WineData$country == 'Chile'] <- 1
WineData$Chile[WineData$country != 'Chile'] <- 0
WineData$Chile[WineData$country == 'Chile'] <- 1
WineData$Chile[WineData$country != 'Chile'] <- 0
WineData$Germany[WineData$country == 'Germany'] <- 1
WineData$Germany[WineData$country != 'Germany'] <- 0
WineData$Hungary[WineData$country == 'Hungary'] <- 1
WineData$Hungary[WineData$country != 'Hungary'] <- 0
WineData$Mexico[WineData$country == 'Mexico'] <- 1
WineData$Mexico[WineData$country != 'Mexico'] <- 0
WineData$Italy[WineData$country == 'Italy'] <- 1
WineData$Italy[WineData$country != 'Italy'] <- 0
WineData$India[WineData$country == 'India'] <- 1
WineData$India[WineData$country != 'India'] <- 0
WineData$Peru[WineData$country == 'Peru'] <- 1
WineData$Peru[WineData$country != 'Peru'] <- 0
WineData$Portugal[WineData$country == 'Portugal'] <- 1
WineData$Portugal[WineData$country != 'Portugal'] <- 0
WineData$Slovenia[WineData$country == 'Slovenia'] <- 1
WineData$Slovenia[WineData$country != 'Slovenia'] <- 0
WineData$SAfrica[WineData$country == 'South Africa'] <- 1
WineData$SAfrica[WineData$country != 'South Africa'] <- 0
WineData$Spain[WineData$country == 'Spain'] <- 1
WineData$Spain[WineData$country != 'Spain'] <- 0
WineData$US[WineData$country == 'US'] <- 1
WineData$US[WineData$country != 'US'] <- 0
WineData$Aidani[WineData$variety == 'Aidani'] <- 1
WineData$Aidani[WineData$variety != 'Aidani'] <- 0
WineData$Airen[WineData$variety == 'Airen'] <- 1
WineData$Airen[WineData$variety != 'Airen'] <- 0
WineData$Brachetto[WineData$variety == 'Brachetto'] <- 1
WineData$Brachetto[WineData$variety != 'Brachetto'] <- 0
WineData$Chenin[WineData$variety == 'Chenin Blanc-Sauvignon Blanc'] <- 1
WineData$Chenin[WineData$variety != 'Chenin Blanc-Sauvignon Blanc'] <- 0
WineData$Volpe[WineData$variety == 'Coda di Volpe'] <- 1
WineData$Volpe[WineData$variety != 'Coda di Volpe'] <- 0
WineData$Blanc[WineData$variety == 'Blanc'] <- 1
WineData$Blanc[WineData$variety != 'Blanc'] <- 0
WineData$Colombard[WineData$variety == 'Colombard'] <- 1
WineData$Colombard[WineData$variety != 'Colombard'] <- 0
WineData$Gragnano[WineData$variety == 'Gragnano'] <- 1
WineData$Gragnano[WineData$variety != 'Gragnano'] <- 0
WineData$Kinali[WineData$variety == 'Kinali Yapincak'] <- 1
WineData$Kinali[WineData$variety != 'Kinali Yapincak'] <- 0
WineData$Lambrusco[WineData$variety == 'Lambrusco'] <- 1
WineData$Lambrusco[WineData$variety != 'Lambrusco'] <- 0
WineData$Muskat[WineData$variety == 'Muskat Ottonel'] <- 1
WineData$Muskat[WineData$variety != 'Muskat Ottonel'] <- 0
WineData$Picapoll[WineData$variety == 'Picapoll'] <- 1
WineData$Picapoll[WineData$variety != 'Picapoll'] <- 0
WineData$Pigato[WineData$variety == 'Pigato'] <- 1
WineData$Pigato[WineData$variety != 'Pigato'] <- 0
WineData$Pignoletto[WineData$variety == 'Pignoletto'] <- 1
WineData$Pignoletto[WineData$variety != 'Pignoletto'] <- 0
WineData$Gamay[WineData$variety == 'Pinot Noir-Gamay'] <- 1
WineData$Gamay[WineData$variety != 'Pinot Noir-Gamay'] <- 0
WineData$Semillon[WineData$variety == 'Semillon-Sauvignon Blanc'] <- 1
WineData$Semillon[WineData$variety != 'Semillon-Sauvignon Blanc'] <- 0
WineData$Tempranillo[WineData$variety == 'Tempranillo Blanco'] <- 1
WineData$Tempranillo[WineData$variety != 'Tempranillo Blanco'] <- 0
WineData$Tinta[WineData$variety == 'Tinta del Pais'] <- 1
WineData$Tinta[WineData$variety != 'Tinta del Pais'] <- 0
WineData$Bual[WineData$variety == 'Bual'] <- 1
WineData$Bual[WineData$variety != 'Bual'] <- 0
WineData$CabernetShiraz[WineData$variety == 'Cabernet-Shiraz'] <- 1
WineData$CabernetShiraz[WineData$variety != 'Cabernet-Shiraz'] <- 0
WineData$Gelber[WineData$variety == 'Gelber Traminer'] <- 1
WineData$Gelber[WineData$variety != 'Gelber Traminer'] <- 0
WineData$Terrantez[WineData$variety == 'Terrantez'] <- 1
WineData$Terrantez[WineData$variety != 'Terrantez'] <- 0
WineData <-na.omit(WineData)
WineData$Michigan[WineData$province == 'Michigan'] <- 1
WineData$Michigan[WineData$province != 'Michigan'] <- 0
WineData$Virginia[WineData$province == 'Virginia'] <- 1
WineData$Virginia[WineData$province != 'Virginia'] <- 0
WineData$Illinois[WineData$province == 'Illinois'] <- 1
WineData$Illinois[WineData$province != 'Illinois'] <- 0
WineData$NewMexico[WineData$province == 'New Mexico'] <- 1
WineData$NewMexico[WineData$province != 'New Mexico'] <- 0
WineData$Jersey[WineData$province == 'New Jersey'] <- 1
WineData$Jersey[WineData$province != 'New Jersey'] <- 0
WineData$Pennsylvania[WineData$province == 'Pennsylvania'] <- 1
WineData$Pennsylvania[WineData$province != 'Pennsylvania'] <- 0
WineData$Missouri[WineData$province == 'Missouri'] <- 1
WineData$Missouri[WineData$province != 'Missouri'] <- 0
WineData$Nevada[WineData$province == 'Nevada'] <- 1
WineData$Nevada[WineData$province != 'Nevada'] <- 0
WineData$Colorado[WineData$province == 'Colorado'] <- 1
WineData$Colorado[WineData$province != 'Colorado'] <- 0
WineData$Arizona[WineData$province == 'Arizona'] <- 1
WineData$Arizona[WineData$province != 'Arizona'] <- 0
WineData$Massachusetts[WineData$province == 'Massachusetts'] <- 1
WineData$Massachusetts[WineData$province != 'Massachusetts'] <- 0
WineData$Ohio[WineData$province == 'Ohio'] <- 1
WineData$Ohio[WineData$province != 'Ohio'] <- 0
WineData$Iowa[WineData$province == 'Iowa'] <- 1
WineData$Iowa[WineData$province != 'Iowa'] <- 0
WineData$NorthCarolina[WineData$province == 'North Carolina'] <- 1
WineData$NorthCarolina[WineData$province != 'North Carolina'] <- 0
WineData$Napa[WineData$region_1 == 'Napa Valley'] <- 1
WineData$Napa[WineData$region_1 != 'Napa Valley'] <- 0
WineData$RRValley[WineData$region_1 == 'Russian River Valley'] <- 1
WineData$RRValley[WineData$region_1 != 'Russian River Valley'] <- 0
WineData$Sonoma[WineData$region_1 != 'Sonoma Valley'] <- 0
WineData$Sonoma[WineData$region_1 %like% 'Sonoma'] <- 1
#parameter selection top 20 using forward stepwise selection
length(WineData)
colnames(WineData)
drops <- c("country","X","description","designation","province","region_1","region_2","taster_name","taster_twitter_handle", "title", "variety","winery","Continent","sub.region")
newwine <- WineData[ , !(names(WineData) %in% drops)]
# creating the training and test data
set.seed(1)
train <- sample(1:nrow(WineData), 0.8*nrow(WineData))
test <- -train
trainingData <- newwine[train, ]
testData <- newwine[test, ]
colnames(newwine)
fit <- regsubsets(points ~ ., data = newwine, nvmax = 60, method = "forward")
fit.summary <- summary(fit)
plot(fit.summary$bic, xlab = "Number of variables", ylab = "BIC", type='b')
plot(fit.summary$cp, xlab = "Number of variables", ylab = "CP", type='b')
plot(fit.summary$adjr2, xlab = "Number of variables", ylab = "adjr2", type='b')
fit <- regsubsets(points ~ ., data = newwine, nvmax = 20, method = "forward")
coeffs <- coef(fit, id = 20)
names(coeffs)
#fitting different models and comparing results
#linear
fitlm = lm(points~price+ LatinAmerica + SubSahara + Brazil +
Canada + Germany + Mexico + Brachetto + Gragnano + Gamay + Pennsylvania + Missouri + Colorado +
Massachusetts + Ohio + NorthCarolina,data=trainingData)
summary(fitlm)
yhat <- predict(fitlm, testData)
mselm <- mean((testData$points - yhat)^2)
mselm
#poly
fitply= lm(points~poly(price,3) + LatinAmerica + SubSahara + Brazil +
Canada + Germany + Mexico + Brachetto + Gragnano + Gamay + Pennsylvania + Missouri + Colorado +
Massachusetts + Ohio + NorthCarolina,data=trainingData)
summary(fitply)
yhat <- predict(fitply, testData)
msepoly <- mean((testData$points - yhat)^2)
msepoly
#fitting with spline
x=trainingData$price
y=trainingData$points
ytest = testData$points
xtest = testData$price
yhatsp=predict(smooth.spline(x,y),xtest)$y
msespline=mean((ytest-yhatsp)^2)
msespline
sst <- sum((ytest - mean(ytest))^2)
sse <- sum((yhatsp - ytest)^2)
rsqspline <- 1 - sse / sst
rsqspline
plot(trainingData$price,trainingData$points,col="darkgrey")
fitSpl = smooth.spline(trainingData$price,trainingData$points,cv=TRUE)
lines(fitSpl,col="red",lwd=2)
#fitting with spline and other predictors
fitgam=gam(points~s(price,18)+ LatinAmerica + SubSahara + Brazil +
Canada + Germany + Mexico + Brachetto + Gragnano + Gamay + Pennsylvania + Missouri + Colorado +
Massachusetts + Ohio + NorthCarolina,data=trainingData)
yhatsp =predict(fitgam, testData)
msegam=mean((ytest-yhatsp)^2)
msegam
sst <- sum((ytest - mean(ytest))^2)
sse <- sum((yhatsp - ytest)^2)
rsqgam <- 1 - sse / sst
rsqgam
#fit model to ridge regression
x=model.matrix(points~poly(price,3)+ LatinAmerica + SubSahara + Brazil +
Canada + Germany + Mexico + Brachetto + Gragnano + Gamay + Pennsylvania + Missouri + Colorado +
Massachusetts + Ohio + NorthCarolina,data=newwine)[,-1]
y=newwine$points
grid = 10^seq(10,-2,length=100)
train = sample(1:nrow(x), 0.8*nrow(x))
test = (-train)
y.test = y[test]
ridge.mod = glmnet(x[train,], y[train],alpha = 0, lambda = grid, thresh=1e-12)
yhat = predict(ridge.mod,s=4,newx=x[test,])
mseridge <- mean((yhat-y.test)^2)
mseridge
#fit model to Lasso
ridge.mod = glmnet(x[train,], y[train],alpha = 1, lambda = grid, thresh=1e-12)
yhat = predict(ridge.mod,s=4,newx=x[test,])
mselasso <- mean((yhat-y.test)^2)
mselasso
|
InnerVariableDifferences = function (Variable){
# all inner differences di = xi-yi for all xi,yi in Variable
#
# INPUT
# Variable(1:n) matrix of size nxd without NaNs in columns where defined = 1
#
# OUTPUT
# Di matrix of all inner differences
# mt
# zurueckrechnen auf unquadrierte Differenzen
AnzData = length(Variable)
Di =fastPdist(as.matrix(Variable))
for(i in 1:AnzData){
for(j in 1:AnzData){
if(Variable[i] < Variable[j]){
Di[i,j] = -Di[i,j]
} # if
} #for j
} #for i
return(Di)
}
|
/DbtTools/Distances/R/InnerVariableDifferences.R
|
no_license
|
markus-flicke/KD_Projekt_1
|
R
| false | false | 519 |
r
|
InnerVariableDifferences = function (Variable){
# all inner differences di = xi-yi for all xi,yi in Variable
#
# INPUT
# Variable(1:n) matrix of size nxd without NaNs in columns where defined = 1
#
# OUTPUT
# Di matrix of all inner differences
# mt
# zurueckrechnen auf unquadrierte Differenzen
AnzData = length(Variable)
Di =fastPdist(as.matrix(Variable))
for(i in 1:AnzData){
for(j in 1:AnzData){
if(Variable[i] < Variable[j]){
Di[i,j] = -Di[i,j]
} # if
} #for j
} #for i
return(Di)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calibration.R
\name{price_with_intensity_link}
\alias{price_with_intensity_link}
\title{Helper function (instrument pricing) for calibration of equity-linked default intensity}
\usage{
price_with_intensity_link(p, s, h, variance_instruments,
variance_instrument_prices, variance_instrument_spreads, fit_instruments,
S0, num_time_steps = 30, ..., relative_spread_tolerance = 0.15,
num_variance_time_steps = 30)
}
\arguments{
\item{p}{Power of default intensity}
\item{s}{Proportion of constant default intensity}
\item{h}{Base default intensity}
\item{variance_instruments}{A list of instruments in strictly increasing order
of maturity, from which the volatility term structure will be inferred. Once the
calibration is finished, the chosen parameters will reproduce the prices of
these instruments with fairly high precision.}
\item{variance_instrument_prices}{Central price targets for the variance instruments}
\item{variance_instrument_spreads}{Bid-offer spreads used to normalize errors
in variance instrument prices during term structure fitting}
\item{fit_instruments}{A list of instruments in any order, from which the
mispricing penalties used for judging fit quality will be computed}
\item{S0}{Current underlying price}
\item{num_time_steps}{Time step count passed on to \code{\link{find_present_value}}
while fitting instrument values}
\item{...}{Further arguments passed to both
\code{\link{fit_variance_cumulation}} and to
\code{\link{find_present_value}}}
\item{relative_spread_tolerance}{Tolerance to apply in
calling \code{\link{fit_variance_cumulation}}}
\item{num_variance_time_steps}{Number of time steps to use in
calling \code{\link{fit_variance_cumulation}}}
}
\description{
Given derivative instruments (subclasses of
GridPricedInstrument, though typically either \code{\link{AmericanOption}}
or \code{\link{EuropeanOption}} objects), along with their prices and spreads, calibrate
variance cumulation (the
at-the-money volatility of the continuous process) and then price the instruments via equity linked default
intensity of the form $h(s + (1-s)(S0/S_t)^p)$.
}
|
/man/price_with_intensity_link.Rd
|
no_license
|
whjieee/ragtop
|
R
| false | true | 2,193 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calibration.R
\name{price_with_intensity_link}
\alias{price_with_intensity_link}
\title{Helper function (instrument pricing) for calibration of equity-linked default intensity}
\usage{
price_with_intensity_link(p, s, h, variance_instruments,
variance_instrument_prices, variance_instrument_spreads, fit_instruments,
S0, num_time_steps = 30, ..., relative_spread_tolerance = 0.15,
num_variance_time_steps = 30)
}
\arguments{
\item{p}{Power of default intensity}
\item{s}{Proportion of constant default intensity}
\item{h}{Base default intensity}
\item{variance_instruments}{A list of instruments in strictly increasing order
of maturity, from which the volatility term structure will be inferred. Once the
calibration is finished, the chosen parameters will reproduce the prices of
these instruments with fairly high precision.}
\item{variance_instrument_prices}{Central price targets for the variance instruments}
\item{variance_instrument_spreads}{Bid-offer spreads used to normalize errors
in variance instrument prices during term structure fitting}
\item{fit_instruments}{A list of instruments in any order, from which the
mispricing penalties used for judging fit quality will be computed}
\item{S0}{Current underlying price}
\item{num_time_steps}{Time step count passed on to \code{\link{find_present_value}}
while fitting instrument values}
\item{...}{Further arguments passed to both
\code{\link{fit_variance_cumulation}} and to
\code{\link{find_present_value}}}
\item{relative_spread_tolerance}{Tolerance to apply in
calling \code{\link{fit_variance_cumulation}}}
\item{num_variance_time_steps}{Number of time steps to use in
calling \code{\link{fit_variance_cumulation}}}
}
\description{
Given derivative instruments (subclasses of
GridPricedInstrument, though typically either \code{\link{AmericanOption}}
or \code{\link{EuropeanOption}} objects), along with their prices and spreads, calibrate
variance cumulation (the
at-the-money volatility of the continuous process) and then price the instruments via equity linked default
intensity of the form $h(s + (1-s)(S0/S_t)^p)$.
}
|
# file to run lots of alpha impute runs
library(tidyverse)
library(readr)
library(data.table)
# args captures chromosome number
args <- commandArgs(trailingOnly=TRUE)
chr_num <- args[[1]]
if (purrr::is_empty(chr_num)) stop("Provide a chromosome number as argument")
# folder with all the files
main_files <- paste0("/exports/eddie/scratch/v1mstoff/all_chr_cv/chr_",
chr_num, "/AI_main_files/")
# chromosome folder
output_masked_run_to <- paste0("/exports/eddie/scratch/v1mstoff/all_chr_cv/chr_",
chr_num, "/")
# read full genotypes
geno_org <- fread(paste0(main_files, 'Genotypes.txt'))
# load SNPs to mask
to_be_imputed <- read_lines(paste0(main_files, 'to_be_imputed.txt'))
# cross validation for the following individuals:
# some were also in my and Bilals old runs, 2 are new.
inds <- c(2069, 6106, 6593, 1881, 1258, 2322, 1097, 4741, 1101, 2167)
create_cv_folder <- function(ind_num) {
# directory for the AlphaImpute run with masked SNPs for one individual
run_dir <- paste0(output_masked_run_to, "run_", ind_num)
if (!dir.exists(run_dir)) dir.create(run_dir)
# copy AI, pedigree and spec file over
file.copy(from = paste0(main_files, "AlphaImputeLinux"), to = run_dir, overwrite = TRUE)
file.copy(from = paste0(main_files, "AlphaImputeSpec.txt"), to = run_dir,
overwrite = TRUE)
file.copy(from = paste0(main_files, "Pedigree.txt"), to = run_dir, overwrite = TRUE)
# mask genotypes of 18 random individuals
geno <- geno_org
# impute by index, maybe change to name again at some point
geno[ID == ind_num, to_be_imputed] <- 9
#write_delim(geno, paste0("cv_", ind_num, "/Genotypes.txt"), delim = " ", col_names = FALSE)
fwrite(geno, file = paste0(run_dir, "/Genotypes.txt"), sep = " ", col.names = FALSE)
write_lines(x = ind_num, paste0(run_dir, "/masked_ind.txt"))
}
sapply(inds, create_cv_folder)
|
/old/loo_cv_full_split_heuristic/prep_masked_cv_runs.R
|
no_license
|
mastoffel/imputation_eddie
|
R
| false | false | 1,943 |
r
|
# file to run lots of alpha impute runs
library(tidyverse)
library(readr)
library(data.table)
# args captures chromosome number
args <- commandArgs(trailingOnly=TRUE)
chr_num <- args[[1]]
if (purrr::is_empty(chr_num)) stop("Provide a chromosome number as argument")
# folder with all the files
main_files <- paste0("/exports/eddie/scratch/v1mstoff/all_chr_cv/chr_",
chr_num, "/AI_main_files/")
# chromosome folder
output_masked_run_to <- paste0("/exports/eddie/scratch/v1mstoff/all_chr_cv/chr_",
chr_num, "/")
# read full genotypes
geno_org <- fread(paste0(main_files, 'Genotypes.txt'))
# load SNPs to mask
to_be_imputed <- read_lines(paste0(main_files, 'to_be_imputed.txt'))
# cross validation for the following individuals:
# some were also in my and Bilals old runs, 2 are new.
inds <- c(2069, 6106, 6593, 1881, 1258, 2322, 1097, 4741, 1101, 2167)
create_cv_folder <- function(ind_num) {
# directory for the AlphaImpute run with masked SNPs for one individual
run_dir <- paste0(output_masked_run_to, "run_", ind_num)
if (!dir.exists(run_dir)) dir.create(run_dir)
# copy AI, pedigree and spec file over
file.copy(from = paste0(main_files, "AlphaImputeLinux"), to = run_dir, overwrite = TRUE)
file.copy(from = paste0(main_files, "AlphaImputeSpec.txt"), to = run_dir,
overwrite = TRUE)
file.copy(from = paste0(main_files, "Pedigree.txt"), to = run_dir, overwrite = TRUE)
# mask genotypes of 18 random individuals
geno <- geno_org
# impute by index, maybe change to name again at some point
geno[ID == ind_num, to_be_imputed] <- 9
#write_delim(geno, paste0("cv_", ind_num, "/Genotypes.txt"), delim = " ", col_names = FALSE)
fwrite(geno, file = paste0(run_dir, "/Genotypes.txt"), sep = " ", col.names = FALSE)
write_lines(x = ind_num, paste0(run_dir, "/masked_ind.txt"))
}
sapply(inds, create_cv_folder)
|
library(BayesDA)
### Name: stratified
### Title: Results of CBS News Survey of 1447 Adults in the United States
### Aliases: stratified
### Keywords: datasets
### ** Examples
data(stratified)
str(stratified)
|
/data/genthat_extracted_code/BayesDA/examples/stratified.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 216 |
r
|
library(BayesDA)
### Name: stratified
### Title: Results of CBS News Survey of 1447 Adults in the United States
### Aliases: stratified
### Keywords: datasets
### ** Examples
data(stratified)
str(stratified)
|
#Examine this github page for how to compare multiple models in hSDM using
#the Deviance Information Criterion
#https://github.com/adammwilson/SpatialAnalysisTutorials/blob/master/SDM_intro2/hSDM_intro.md
#== Load libraries
library(hSDM)
library(lubridate)
####SPATIAL MIXTURE MODEL EXAMPLE####
kirbypoints4shp<-read.csv("0_data/processed/birdsXY.hab.csv", header=TRUE)
#just the points with both bird and habitat data, sorted by SS
#the order of the stations in "SS" must be the same in the
#individual visits (birddata), the habitat data (kirbypoints4shp),
#and the points used to create the graph file accounting for spatial
#correlation later (kirbypoints4shp again)
birddata<-read.csv(file="0_data/raw/allvisitsBG15.csv", header=TRUE)
str(birddata)
#make sure the data used doesn't include any stations missing habitat data
#and is sorted by station the same way as the habitat data and the station
#location data was when creating the spatial matrix
#Since we are using mixture models, there should be multiple
#rows (observations) for each station. In contrast to the unmarked
#package, observation and detection covariate data are in long
#format rather than wide format, when modelling with the HSDM package
alldata<-merge(kirbypoints4shp, birddata, by=c("SS"))
write.csv(alldata, file="0_data/processed/alldata.csv")
alldata<-read.csv("0_data/processed/alldata.csv", header=TRUE)
SS<-alldata$X
alldata$ID<-alldata$X
Y<-alldata$COYE
alldata$MY.DATE1 <- do.call(paste, list(alldata$Month, alldata$Day, alldata$Year))
alldata$MY.DATE1 <- as.Date(alldata$MY.DATE1, format=c("%m %d %Y"))
alldata$Julian <- as.numeric(format(alldata$MY.DATE1, "%j"))
w1<-scale(alldata$Julian, center=TRUE, scale=TRUE)
alldata$mean_ht_50.s<-scale(alldata$mean_MeanHeight_50, scale=TRUE, center=FALSE)
alldata$mean_ht_150.s<-scale(alldata$mean_MeanHeight_150, scale=TRUE, center=FALSE)
alldata$mean_ht_500.s<-scale(alldata$mean_MeanHeight_500, scale=TRUE, center=FALSE)
alldata$sd_ht_50.s<-scale(alldata$sd_MeanHeight_50, scale=TRUE, center=FALSE)
alldata$sd_ht_150.s<-scale(alldata$sd_MeanHeight_150, scale=TRUE, center=FALSE)
alldata$sd_ht_500.s<-scale(alldata$sd_MeanHeight_500, scale=TRUE, center=FALSE)
alldata$mean_canopypc_50.s<-scale(alldata$mean_PercCanopyDensity_50, scale=TRUE, center=FALSE)
alldata$mean_canopypc_150.s<-scale(alldata$mean_PercCanopyDensity_150, scale=TRUE, center=FALSE)
alldata$mean_canopypc_500.s<-scale(alldata$mean_PercCanopyDensity_500, scale=TRUE, center=FALSE)
alldata$sd_canopypc_50.s<-scale(alldata$sd_PercCanopyDensity_50, scale=TRUE, center=FALSE)
alldata$sd_canopypc_150.s<-scale(alldata$sd_PercCanopyDensity_150, scale=TRUE, center=FALSE)
alldata$sd_canopypc_500.s<-scale(alldata$sd_PercCanopyDensity_500, scale=TRUE, center=FALSE)
alldata$mean_shrub.50.s<-scale((alldata$strata_00_to_02_returnProportion_50+alldata$strata_02_to_04_returnProportion_50), scale=TRUE, center=FALSE)
alldata$mean_shrub.150.s<-scale((alldata$strata_00_to_02_returnProportion_150+alldata$strata_02_to_04_returnProportion_150), scale=TRUE, center=FALSE)
alldata$mean_shrub.500.s<-scale((alldata$strata_00_to_02_returnProportion_500+alldata$strata_02_to_04_returnProportion_500), scale=TRUE, center=FALSE)
#alldata$sd_shrub.50.s<-scale(alldata$sd_PercCanopyCoverCut1.0m_50, scale=TRUE, center=FALSE)
#alldata$sd_shrub.150.s<-scale(alldata$sd_PercCanopyCoverCut1.0m_150, scale=TRUE, center=FALSE)
#alldata$sd_shrub.500.s<-scale(alldata$sd_PercCanopyCoverCut1.0m_500, scale=TRUE, center=FALSE)
stationdata<-unique(alldata[,c("ID",
"SS",
"mean_ht_150.s",
"sd_ht_150.s",
"mean_canopypc_150.s",
"sd_canopypc_150.s",
"mean_shrub.150.s")])
str(stationdata)
mean_ht_150.s<-stationdata$mean_ht_150.s
sd_ht_150.s<-stationdata$sd_ht_150.s
mean_canopypc_150.s<-stationdata$mean_canopypc_150.s
sd_canopypc_150.s<-stationdata$sd_canopypc_150.s
mean_shrub.150.s<-stationdata$mean_shrub.150.s
#= Data-sets
data.obs <- data.frame(Y,w1,site=SS)#90 stations, up to 4 visits per station
data.suit <- data.frame(mean_ht_150.s,
sd_ht_150.s,
mean_canopypc_150.s,
sd_canopypc_150.s,
mean_shrub.150.s)#98 obs
#================================
#== Parameter inference with hSDM
Start <- Sys.time() # Start the clock
mod.hSDM.Nmixture <- hSDM.Nmixture(# Observations
counts=data.obs$Y,
observability=~w1,
site=data.obs$site,
data.observability=data.obs,
# Habitat
suitability=~mean_ht_150.s+sd_ht_150.s+mean_canopypc_150.s+sd_canopypc_150.s+mean_shrub.150.s,
data.suitability=data.suit,
# Predictions
suitability.pred=NULL,
# Chains
burnin = 140000, mcmc = 115000, thin = 25,
# Starting values
beta.start=0,
gamma.start=0,
# Priors
mubeta=0, Vbeta=1.0E6,
mugamma=0, Vgamma=1.0E6,
# Various
seed=1234, verbose=1,
save.p=0, save.N=1)
Time.hSDM <- difftime(Sys.time(),Start,units="sec") # Time difference
#= Computation time
Time.hSDM
#Time difference of ~176 secs
#== Outputs
#= Parameter estimates
summary(mod.hSDM.Nmixture$mcmc)
#Iterations = 140001:289976
# Thinning interval = 25
# Number of chains = 1
# Sample size per chain = 6000
#
# 1. Empirical mean and standard deviation for each variable,
# plus standard error of the mean:
#
# Mean SD Naive SE Time-series SE
#beta.(Intercept) 28.432 7.911 0.11664 2.2455
# beta.mean_ht_150.s -33.158 6.549 0.09656 1.4143
# beta.sd_ht_150.s 8.507 2.316 0.03415 0.1768
# beta.mean_canopypc_150.s 9.565 3.128 0.04612 0.4357
# beta.sd_canopypc_150.s -2.374 2.545 0.03753 0.2943
# beta.mean_shrub.150.s -22.486 5.398 0.07959 1.4413
# gamma.(Intercept) 1085.163 582.060 8.58201 10.6479
# gamma.w1 -323.368 589.610 8.69332 10.4010
# Deviance 138.691 7.555 0.11140 0.2324
#
# 2. Quantiles for each variable:
#
# 2.5% 25% 50% 75% 97.5%
# beta.(Intercept) 14.113 22.756 28.631 33.7398 44.027
# beta.mean_ht_150.s -44.553 -38.343 -33.348 -28.1383 -20.607
# beta.sd_ht_150.s 4.084 6.954 8.413 10.0934 13.260
# beta.mean_canopypc_150.s 4.315 7.353 9.307 11.4341 16.706
# beta.sd_canopypc_150.s -7.191 -4.230 -2.344 -0.6031 2.567
# beta.mean_shrub.150.s -31.839 -26.681 -22.658 -18.0304 -11.961
# gamma.(Intercept) 170.975 648.005 1004.648 1431.6789 2426.997
# gamma.w1 -1507.433 -714.717 -300.730 70.3699 806.546
# Deviance 130.835 133.590 136.279 141.1723 160.314
# COYE abundance (+) related to increasing canopy cover and variation in canopy height and
#(-) related to increasing canopy height and increasing shrub cover
## check if posterior distributions of betas and gammas are normally distributed
g = mod.hSDM.Nmixture$mcmc[,2]#e.g. canopy height mean
m<-mean(g)
std<-sqrt(var(g))
hist(g, density=30, breaks=20, prob=TRUE,
xlab="veg cover",
main="normal curve over histogram")
curve(dnorm(x, mean=m, sd=std),
col="darkblue", lwd=2, add=TRUE, yaxt="n")
g = mod.hSDM.Nmixture$mcmc[,3]#e.g. canopy height sd
m<-mean(g)
std<-sqrt(var(g))
hist(g, density=30, breaks=20, prob=TRUE,
xlab="black spruce cover",
main="normal curve over histogram")
curve(dnorm(x, mean=m, sd=std),
col="darkblue", lwd=2, add=TRUE, yaxt="n")
g = mod.hSDM.Nmixture$mcmc[,4]#e.g. canopy cover mean
m<-mean(g)
std<-sqrt(var(g))
hist(g, density=30, breaks=20, prob=TRUE,
xlab="stand age",
main="normal curve over histogram")
curve(dnorm(x, mean=m, sd=std),
col="darkblue", lwd=2, add=TRUE, yaxt="n")
g = mod.hSDM.Nmixture$mcmc[,5]#canopy cover sd
m<-mean(g)
std<-sqrt(var(g))
hist(g, density=30, breaks=20, prob=TRUE,
xlab="stand volume",
main="normal curve over histogram")
curve(dnorm(x, mean=m, sd=std),
col="darkblue", lwd=2, add=TRUE, yaxt="n")
g = mod.hSDM.Nmixture$mcmc[,6]#shrub cover
m<-mean(g)
std<-sqrt(var(g))
hist(g, density=30, breaks=20, prob=TRUE,
xlab="crown closure",
main="normal curve over histogram")
curve(dnorm(x, mean=m, sd=std),
col="darkblue", lwd=2, add=TRUE, yaxt="n")
library(coda)
params= data.frame(parameter=colnames(mod.hSDM.Nmixture$mcmc),
mean=summary(mod.hSDM.Nmixture$mcmc)$statistics[,"Mean"],
sd=summary(mod.hSDM.Nmixture$mcmc)$statistics[,"SD"],
median=summary(mod.hSDM.Nmixture$mcmc)$quantiles[,"50%"],
HPDinterval(mcmc(as.matrix(mod.hSDM.Nmixture$mcmc))),
RejectionRate=rejectionRate(mod.hSDM.Nmixture$mcmc),
ModelName= "Lidar habitat")
# save model parameters
params1= params
#assign probability of betas being < 0
params1$probabilitymin <- pnorm(0, mean= params1$mean, sd=params1$sd, lower.tail=T)
#intercept:1.627027e-04
#mean_ht:0.99
#sd_ht:1.198863e-04
#mean_canopypc:1.115080e-03
#sd_canopypc:8.244920e-01
#shrubcover:0.99
params1$probabilitymax <- pnorm(0, mean= params1$mean, sd=params1$sd, lower.tail=F)
#intercept:0.99
#mean_ht:2.061562e-07
#sd_ht:0.99
#mean_canopypc:0.99
#sd_canopypc:1.755080e-01
#shrubcover:1.554064e-05
##############################################################################################################
##############################################################################################################
##############################################################################################################
#= Predictions
#Distribution of predicted abundances
summary(mod.hSDM.Nmixture$lambda.latent)
# Min. 1st Qu. Median Mean 3rd Qu. Max.
#0.0000000 0.0002369 0.0235314 0.0584983 0.0682517 0.6168624
#50 % of stations are predicted to have <0.02 COYE
#50 % of stations are predicted to have >0.02 COYE
#Distribution of predicted detectability
summary(mod.hSDM.Nmixture$delta.latent)
#50 % of stations have detection probability COYE < 1
#50 % of stations have detection probability COYE > 1
#maximum detection probability predicted to be 1
pdf(file="3_outputs/pdfs/COYEPosteriors_hSDM.Nmixture.pdf")
plot(mod.hSDM.Nmixture$mcmc)
dev.off()
#= Predictions
plot(stationdata$crownclosure.150.s,
mod.hSDM.Nmixture.iCAR$lambda.pred,
xlab="Amount of Seismic",
ylab="Predicted Olive-sided Flycatcher Abundance",
pch=20, col="blue")#possible nonlinear effect
#Generate prediction surface: add predicted values to spdf "points"
stationdata$lambda.pred<-mod.hSDM.Nmixture$lambda.pred
stationdata$id<-as.numeric(stationdata$SS)
BG15<-read.csv("0_data/processed/birdsXY.hab.csv", header=TRUE)
LCC <- CRS("+proj=utm +zone=12 +ellps=GRS80 +units=m +no_defs")
coordinates(BG15) <- BG15[,c("EASTING", "NORTHING")]
proj4string(BG15) <- LCC
stationdata_sp <- merge(BG15, stationdata, by.x = "SS", by.y = "SS")
str(stationdata_sp)
plot(stationdata_sp, col=stationdata$lambda.pred*10)
stationdata_sp@bbox
stationdata_sp@coords
library(tmap)
# kirby <- raster("E:/BERA Mentoring/Kirby Oct 2019/Kirby_CWD_Vol.tif")
# plot(kirby)
#Create a function to generate a continuous color palette
rbPal <- colorRampPalette(c('red','green'))
plot(stationdata_sp, add=TRUE, pch=20, col=rbPal(10)[as.numeric(cut(stationdata$lambda.pred, breaks = 10))])
# Replace point boundary extent with that of Kirby grid
coords = matrix(c(488000, 6131150,
488000, 6140000,
496000, 6140000,
496000, 6131150,
488000, 6131150),
ncol = 2, byrow = TRUE)
P1 = Polygon(coords)
Ps1 = SpatialPolygons(list(Polygons(list(P1), ID = "a")), proj4string=CRS("+proj=utm +zone=12 +ellps=GRS80 +units=m +no_defs"))
plot(Ps1, axes = TRUE)
plot(stationdata_sp, add=TRUE, pch=20, col=rbPal(10)[as.numeric(cut(stationdata$lambda.pred, breaks = 10))])
stationdata_sp@bbox<-Ps1@bbox
tm_shape(Ps1) + tm_polygons() +
tm_shape(stationdata_sp) +
tm_dots(col="lambda.pred", palette = "RdBu", auto.palette.mapping = FALSE,
title="Predicted COYE abund \n(per station)", size=0.7) +
tm_legend(legend.outside=TRUE)
#IDW Interpolation - Need to work the bugs out
library(gstat) # Use gstat's idw routine
library(sp) # Used for the spsample function
# Create an empty grid where n is the total number of cells
stationdata_sp <- merge(kirbypoints4shp, stationdata, by.x = c("SS","ID"), by.y = c("SS","ID"))
grd <- as.data.frame(spsample(stationdata_sp, "regular", n=15000))
names(grd) <- c("EASTING", "NORTHING")
coordinates(grd) <- c("EASTING", "NORTHING")
gridded(grd) <- TRUE # Create SpatialPixel object
fullgrid(grd) <- TRUE # Create SpatialGrid object
# Add P's projection information to the empty grid
proj4string(grd) <- proj4string(stationdata_sp)
# Interpolate the grid cells using a power value of 2 (idp=2.0)
P.idw <- gstat::idw(lambda.pred ~ 1, stationdata_sp, newdata=grd, idp=2.0)
# Convert to raster object
r <- raster(P.idw)
plot(r)
# Plot
tm_shape(r) +
tm_raster(n=10,palette = "RdBu", auto.palette.mapping = FALSE,
title="Predicted COYE \n abundance") +
tm_shape(stationdata_sp) + tm_dots(size=0.2) +
tm_legend(legend.outside=TRUE)
summary(mod.hSDM.Nmixture.iCAR$lambda.latent)
summary(mod.hSDM.Nmixture.iCAR$delta.latent)
summary(mod.hSDM.Nmixture.iCAR$lambda.pred)
pdf(file="Pred-Init.iCAR.pdf")
plot(ymax.station$WTSPmax,
mod.hSDM.Nmixture$lambda.pred,
xlab="Maximum WTSP count per station",
ylab="Predicted WTSP abundance per station",
pch=20, col="blue")
#replaced lambda with ymax.station$WTSPmax
abline(a=0,b=1,col="red")
dev.off()
#= MCMC for latent variable N
pdf(file="MCMC_N.iCAR.pdf")
plot(mod.hSDM.Nmixture.iCAR$N.pred)
dev.off()
#= Check that Ns are correctly estimated
M <- as.matrix(mod.hSDM.Nmixture.iCAR$N.pred)
N.est <- apply(M,2,mean)
Y.by.site <- tapply(Y,siteID,mean) # Mean by site
pdf(file="Check_N.pdf",width=10,height=5)
plot(Y.by.site, N.est) ## More individuals are expected (N > Y) due to detection process
#Error: x and y differ
abline(a=0,b=1,col="red")
dev.off()
## End(Not run)
|
/Kirby April 2020/Lidar analyses/1_scripts/old/3_LIDAR regular nonspatial mixture model COYE.R
|
no_license
|
LionelLeston/Kirby-grid
|
R
| false | false | 14,479 |
r
|
#Examine this github page for how to compare multiple models in hSDM using
#the Deviance Information Criterion
#https://github.com/adammwilson/SpatialAnalysisTutorials/blob/master/SDM_intro2/hSDM_intro.md
#== Load libraries
library(hSDM)
library(lubridate)
####SPATIAL MIXTURE MODEL EXAMPLE####
kirbypoints4shp<-read.csv("0_data/processed/birdsXY.hab.csv", header=TRUE)
#just the points with both bird and habitat data, sorted by SS
#the order of the stations in "SS" must be the same in the
#individual visits (birddata), the habitat data (kirbypoints4shp),
#and the points used to create the graph file accounting for spatial
#correlation later (kirbypoints4shp again)
birddata<-read.csv(file="0_data/raw/allvisitsBG15.csv", header=TRUE)
str(birddata)
#make sure the data used doesn't include any stations missing habitat data
#and is sorted by station the same way as the habitat data and the station
#location data was when creating the spatial matrix
#Since we are using mixture models, there should be multiple
#rows (observations) for each station. In contrast to the unmarked
#package, observation and detection covariate data are in long
#format rather than wide format, when modelling with the HSDM package
alldata<-merge(kirbypoints4shp, birddata, by=c("SS"))
write.csv(alldata, file="0_data/processed/alldata.csv")
alldata<-read.csv("0_data/processed/alldata.csv", header=TRUE)
SS<-alldata$X
alldata$ID<-alldata$X
Y<-alldata$COYE
alldata$MY.DATE1 <- do.call(paste, list(alldata$Month, alldata$Day, alldata$Year))
alldata$MY.DATE1 <- as.Date(alldata$MY.DATE1, format=c("%m %d %Y"))
alldata$Julian <- as.numeric(format(alldata$MY.DATE1, "%j"))
w1<-scale(alldata$Julian, center=TRUE, scale=TRUE)
alldata$mean_ht_50.s<-scale(alldata$mean_MeanHeight_50, scale=TRUE, center=FALSE)
alldata$mean_ht_150.s<-scale(alldata$mean_MeanHeight_150, scale=TRUE, center=FALSE)
alldata$mean_ht_500.s<-scale(alldata$mean_MeanHeight_500, scale=TRUE, center=FALSE)
alldata$sd_ht_50.s<-scale(alldata$sd_MeanHeight_50, scale=TRUE, center=FALSE)
alldata$sd_ht_150.s<-scale(alldata$sd_MeanHeight_150, scale=TRUE, center=FALSE)
alldata$sd_ht_500.s<-scale(alldata$sd_MeanHeight_500, scale=TRUE, center=FALSE)
alldata$mean_canopypc_50.s<-scale(alldata$mean_PercCanopyDensity_50, scale=TRUE, center=FALSE)
alldata$mean_canopypc_150.s<-scale(alldata$mean_PercCanopyDensity_150, scale=TRUE, center=FALSE)
alldata$mean_canopypc_500.s<-scale(alldata$mean_PercCanopyDensity_500, scale=TRUE, center=FALSE)
alldata$sd_canopypc_50.s<-scale(alldata$sd_PercCanopyDensity_50, scale=TRUE, center=FALSE)
alldata$sd_canopypc_150.s<-scale(alldata$sd_PercCanopyDensity_150, scale=TRUE, center=FALSE)
alldata$sd_canopypc_500.s<-scale(alldata$sd_PercCanopyDensity_500, scale=TRUE, center=FALSE)
alldata$mean_shrub.50.s<-scale((alldata$strata_00_to_02_returnProportion_50+alldata$strata_02_to_04_returnProportion_50), scale=TRUE, center=FALSE)
alldata$mean_shrub.150.s<-scale((alldata$strata_00_to_02_returnProportion_150+alldata$strata_02_to_04_returnProportion_150), scale=TRUE, center=FALSE)
alldata$mean_shrub.500.s<-scale((alldata$strata_00_to_02_returnProportion_500+alldata$strata_02_to_04_returnProportion_500), scale=TRUE, center=FALSE)
#alldata$sd_shrub.50.s<-scale(alldata$sd_PercCanopyCoverCut1.0m_50, scale=TRUE, center=FALSE)
#alldata$sd_shrub.150.s<-scale(alldata$sd_PercCanopyCoverCut1.0m_150, scale=TRUE, center=FALSE)
#alldata$sd_shrub.500.s<-scale(alldata$sd_PercCanopyCoverCut1.0m_500, scale=TRUE, center=FALSE)
stationdata<-unique(alldata[,c("ID",
"SS",
"mean_ht_150.s",
"sd_ht_150.s",
"mean_canopypc_150.s",
"sd_canopypc_150.s",
"mean_shrub.150.s")])
str(stationdata)
mean_ht_150.s<-stationdata$mean_ht_150.s
sd_ht_150.s<-stationdata$sd_ht_150.s
mean_canopypc_150.s<-stationdata$mean_canopypc_150.s
sd_canopypc_150.s<-stationdata$sd_canopypc_150.s
mean_shrub.150.s<-stationdata$mean_shrub.150.s
#= Data-sets
data.obs <- data.frame(Y,w1,site=SS)#90 stations, up to 4 visits per station
data.suit <- data.frame(mean_ht_150.s,
sd_ht_150.s,
mean_canopypc_150.s,
sd_canopypc_150.s,
mean_shrub.150.s)#98 obs
#================================
#== Parameter inference with hSDM
Start <- Sys.time() # Start the clock
mod.hSDM.Nmixture <- hSDM.Nmixture(# Observations
counts=data.obs$Y,
observability=~w1,
site=data.obs$site,
data.observability=data.obs,
# Habitat
suitability=~mean_ht_150.s+sd_ht_150.s+mean_canopypc_150.s+sd_canopypc_150.s+mean_shrub.150.s,
data.suitability=data.suit,
# Predictions
suitability.pred=NULL,
# Chains
burnin = 140000, mcmc = 115000, thin = 25,
# Starting values
beta.start=0,
gamma.start=0,
# Priors
mubeta=0, Vbeta=1.0E6,
mugamma=0, Vgamma=1.0E6,
# Various
seed=1234, verbose=1,
save.p=0, save.N=1)
Time.hSDM <- difftime(Sys.time(),Start,units="sec") # Time difference
#= Computation time
Time.hSDM
#Time difference of ~176 secs
#== Outputs
#= Parameter estimates
summary(mod.hSDM.Nmixture$mcmc)
#Iterations = 140001:289976
# Thinning interval = 25
# Number of chains = 1
# Sample size per chain = 6000
#
# 1. Empirical mean and standard deviation for each variable,
# plus standard error of the mean:
#
# Mean SD Naive SE Time-series SE
#beta.(Intercept) 28.432 7.911 0.11664 2.2455
# beta.mean_ht_150.s -33.158 6.549 0.09656 1.4143
# beta.sd_ht_150.s 8.507 2.316 0.03415 0.1768
# beta.mean_canopypc_150.s 9.565 3.128 0.04612 0.4357
# beta.sd_canopypc_150.s -2.374 2.545 0.03753 0.2943
# beta.mean_shrub.150.s -22.486 5.398 0.07959 1.4413
# gamma.(Intercept) 1085.163 582.060 8.58201 10.6479
# gamma.w1 -323.368 589.610 8.69332 10.4010
# Deviance 138.691 7.555 0.11140 0.2324
#
# 2. Quantiles for each variable:
#
# 2.5% 25% 50% 75% 97.5%
# beta.(Intercept) 14.113 22.756 28.631 33.7398 44.027
# beta.mean_ht_150.s -44.553 -38.343 -33.348 -28.1383 -20.607
# beta.sd_ht_150.s 4.084 6.954 8.413 10.0934 13.260
# beta.mean_canopypc_150.s 4.315 7.353 9.307 11.4341 16.706
# beta.sd_canopypc_150.s -7.191 -4.230 -2.344 -0.6031 2.567
# beta.mean_shrub.150.s -31.839 -26.681 -22.658 -18.0304 -11.961
# gamma.(Intercept) 170.975 648.005 1004.648 1431.6789 2426.997
# gamma.w1 -1507.433 -714.717 -300.730 70.3699 806.546
# Deviance 130.835 133.590 136.279 141.1723 160.314
# COYE abundance (+) related to increasing canopy cover and variation in canopy height and
#(-) related to increasing canopy height and increasing shrub cover
## check if posterior distributions of betas and gammas are normally distributed
g = mod.hSDM.Nmixture$mcmc[,2]#e.g. canopy height mean
m<-mean(g)
std<-sqrt(var(g))
hist(g, density=30, breaks=20, prob=TRUE,
xlab="veg cover",
main="normal curve over histogram")
curve(dnorm(x, mean=m, sd=std),
col="darkblue", lwd=2, add=TRUE, yaxt="n")
g = mod.hSDM.Nmixture$mcmc[,3]#e.g. canopy height sd
m<-mean(g)
std<-sqrt(var(g))
hist(g, density=30, breaks=20, prob=TRUE,
xlab="black spruce cover",
main="normal curve over histogram")
curve(dnorm(x, mean=m, sd=std),
col="darkblue", lwd=2, add=TRUE, yaxt="n")
g = mod.hSDM.Nmixture$mcmc[,4]#e.g. canopy cover mean
m<-mean(g)
std<-sqrt(var(g))
hist(g, density=30, breaks=20, prob=TRUE,
xlab="stand age",
main="normal curve over histogram")
curve(dnorm(x, mean=m, sd=std),
col="darkblue", lwd=2, add=TRUE, yaxt="n")
g = mod.hSDM.Nmixture$mcmc[,5]#canopy cover sd
m<-mean(g)
std<-sqrt(var(g))
hist(g, density=30, breaks=20, prob=TRUE,
xlab="stand volume",
main="normal curve over histogram")
curve(dnorm(x, mean=m, sd=std),
col="darkblue", lwd=2, add=TRUE, yaxt="n")
g = mod.hSDM.Nmixture$mcmc[,6]#shrub cover
m<-mean(g)
std<-sqrt(var(g))
hist(g, density=30, breaks=20, prob=TRUE,
xlab="crown closure",
main="normal curve over histogram")
curve(dnorm(x, mean=m, sd=std),
col="darkblue", lwd=2, add=TRUE, yaxt="n")
library(coda)
params= data.frame(parameter=colnames(mod.hSDM.Nmixture$mcmc),
mean=summary(mod.hSDM.Nmixture$mcmc)$statistics[,"Mean"],
sd=summary(mod.hSDM.Nmixture$mcmc)$statistics[,"SD"],
median=summary(mod.hSDM.Nmixture$mcmc)$quantiles[,"50%"],
HPDinterval(mcmc(as.matrix(mod.hSDM.Nmixture$mcmc))),
RejectionRate=rejectionRate(mod.hSDM.Nmixture$mcmc),
ModelName= "Lidar habitat")
# save model parameters
params1= params
#assign probability of betas being < 0
params1$probabilitymin <- pnorm(0, mean= params1$mean, sd=params1$sd, lower.tail=T)
#intercept:1.627027e-04
#mean_ht:0.99
#sd_ht:1.198863e-04
#mean_canopypc:1.115080e-03
#sd_canopypc:8.244920e-01
#shrubcover:0.99
params1$probabilitymax <- pnorm(0, mean= params1$mean, sd=params1$sd, lower.tail=F)
#intercept:0.99
#mean_ht:2.061562e-07
#sd_ht:0.99
#mean_canopypc:0.99
#sd_canopypc:1.755080e-01
#shrubcover:1.554064e-05
##############################################################################################################
##############################################################################################################
##############################################################################################################
#= Predictions
#Distribution of predicted abundances
summary(mod.hSDM.Nmixture$lambda.latent)
# Min. 1st Qu. Median Mean 3rd Qu. Max.
#0.0000000 0.0002369 0.0235314 0.0584983 0.0682517 0.6168624
#50 % of stations are predicted to have <0.02 COYE
#50 % of stations are predicted to have >0.02 COYE
#Distribution of predicted detectability
summary(mod.hSDM.Nmixture$delta.latent)
#50 % of stations have detection probability COYE < 1
#50 % of stations have detection probability COYE > 1
#maximum detection probability predicted to be 1
pdf(file="3_outputs/pdfs/COYEPosteriors_hSDM.Nmixture.pdf")
plot(mod.hSDM.Nmixture$mcmc)
dev.off()
#= Predictions
plot(stationdata$crownclosure.150.s,
mod.hSDM.Nmixture.iCAR$lambda.pred,
xlab="Amount of Seismic",
ylab="Predicted Olive-sided Flycatcher Abundance",
pch=20, col="blue")#possible nonlinear effect
#Generate prediction surface: add predicted values to spdf "points"
stationdata$lambda.pred<-mod.hSDM.Nmixture$lambda.pred
stationdata$id<-as.numeric(stationdata$SS)
BG15<-read.csv("0_data/processed/birdsXY.hab.csv", header=TRUE)
LCC <- CRS("+proj=utm +zone=12 +ellps=GRS80 +units=m +no_defs")
coordinates(BG15) <- BG15[,c("EASTING", "NORTHING")]
proj4string(BG15) <- LCC
stationdata_sp <- merge(BG15, stationdata, by.x = "SS", by.y = "SS")
str(stationdata_sp)
plot(stationdata_sp, col=stationdata$lambda.pred*10)
stationdata_sp@bbox
stationdata_sp@coords
library(tmap)
# kirby <- raster("E:/BERA Mentoring/Kirby Oct 2019/Kirby_CWD_Vol.tif")
# plot(kirby)
#Create a function to generate a continuous color palette
rbPal <- colorRampPalette(c('red','green'))
plot(stationdata_sp, add=TRUE, pch=20, col=rbPal(10)[as.numeric(cut(stationdata$lambda.pred, breaks = 10))])
# Replace point boundary extent with that of Kirby grid
coords = matrix(c(488000, 6131150,
488000, 6140000,
496000, 6140000,
496000, 6131150,
488000, 6131150),
ncol = 2, byrow = TRUE)
P1 = Polygon(coords)
Ps1 = SpatialPolygons(list(Polygons(list(P1), ID = "a")), proj4string=CRS("+proj=utm +zone=12 +ellps=GRS80 +units=m +no_defs"))
plot(Ps1, axes = TRUE)
plot(stationdata_sp, add=TRUE, pch=20, col=rbPal(10)[as.numeric(cut(stationdata$lambda.pred, breaks = 10))])
stationdata_sp@bbox<-Ps1@bbox
tm_shape(Ps1) + tm_polygons() +
tm_shape(stationdata_sp) +
tm_dots(col="lambda.pred", palette = "RdBu", auto.palette.mapping = FALSE,
title="Predicted COYE abund \n(per station)", size=0.7) +
tm_legend(legend.outside=TRUE)
#IDW Interpolation - Need to work the bugs out
library(gstat) # Use gstat's idw routine
library(sp) # Used for the spsample function
# Create an empty grid where n is the total number of cells
stationdata_sp <- merge(kirbypoints4shp, stationdata, by.x = c("SS","ID"), by.y = c("SS","ID"))
grd <- as.data.frame(spsample(stationdata_sp, "regular", n=15000))
names(grd) <- c("EASTING", "NORTHING")
coordinates(grd) <- c("EASTING", "NORTHING")
gridded(grd) <- TRUE # Create SpatialPixel object
fullgrid(grd) <- TRUE # Create SpatialGrid object
# Add P's projection information to the empty grid
proj4string(grd) <- proj4string(stationdata_sp)
# Interpolate the grid cells using a power value of 2 (idp=2.0)
P.idw <- gstat::idw(lambda.pred ~ 1, stationdata_sp, newdata=grd, idp=2.0)
# Convert to raster object
r <- raster(P.idw)
plot(r)
# Plot
tm_shape(r) +
tm_raster(n=10,palette = "RdBu", auto.palette.mapping = FALSE,
title="Predicted COYE \n abundance") +
tm_shape(stationdata_sp) + tm_dots(size=0.2) +
tm_legend(legend.outside=TRUE)
summary(mod.hSDM.Nmixture.iCAR$lambda.latent)
summary(mod.hSDM.Nmixture.iCAR$delta.latent)
summary(mod.hSDM.Nmixture.iCAR$lambda.pred)
pdf(file="Pred-Init.iCAR.pdf")
plot(ymax.station$WTSPmax,
mod.hSDM.Nmixture$lambda.pred,
xlab="Maximum WTSP count per station",
ylab="Predicted WTSP abundance per station",
pch=20, col="blue")
#replaced lambda with ymax.station$WTSPmax
abline(a=0,b=1,col="red")
dev.off()
#= MCMC for latent variable N
pdf(file="MCMC_N.iCAR.pdf")
plot(mod.hSDM.Nmixture.iCAR$N.pred)
dev.off()
#= Check that Ns are correctly estimated
M <- as.matrix(mod.hSDM.Nmixture.iCAR$N.pred)
N.est <- apply(M,2,mean)
Y.by.site <- tapply(Y,siteID,mean) # Mean by site
pdf(file="Check_N.pdf",width=10,height=5)
plot(Y.by.site, N.est) ## More individuals are expected (N > Y) due to detection process
#Error: x and y differ
abline(a=0,b=1,col="red")
dev.off()
## End(Not run)
|
library(tidyverse)
data <- read.csv("input-2020-12", header = F)
# Part 1
data <- data %>% mutate(dir=substr(V1,1,1), n=as.numeric(substr(V1,2,4)))
data <- data %>% select(dir, n)
solve <- function(data){
pos <- c(x=0,y=0); dir <- 2
dirs <- c('N','E','S','W')
for (i in 1:nrow(data)){
if (data[i,1]=='R') {dir=1+((dir-1+data[i,2]/90) %% 4)}
if (data[i,1]=='L') {dir=1+((dir-1-data[i,2]/90) %% 4)}
if (data[i,1]=='S') {pos['y']=pos['y']-data[i,2]}
if (data[i,1]=='N') {pos['y']=pos['y']+data[i,2]}
if (data[i,1]=='W') {pos['x']=pos['x']-data[i,2]}
if (data[i,1]=='E') {pos['x']=pos['x']+data[i,2]}
if (data[i,1]=='F') {
if (dir==1) {pos['y']=pos['y']+data[i,2]}
if (dir==2) {pos['x']=pos['x']+data[i,2]}
if (dir==3) {pos['y']=pos['y']-data[i,2]}
if (dir==4) {pos['x']=pos['x']-data[i,2]}
} }
unname(abs(pos['x']) + abs(pos['y'])) }
solve(data)
# Part 2
solve <- function(data,iter=nrow(data)){
pos <- c(x=0, y=0); way <- c(x=10, y=1)
for (i in 1:iter){
if (data[i,1]=='R') {
times=data[i,2]/90
for (k in 1:times) {
x2=way['y']; y2=-way['x']
way['x']<-x2
way['y']<-y2 }}
if (data[i,1]=='L') {
times=data[i,2]/90
for (k in 1:times) {
x2=-way['y']; y2=way['x']
way['x']<-x2
way['y']<-y2 } }
if (data[i,1]=='S') {way['y']=way['y']-data[i,2]}
if (data[i,1]=='N') {way['y']=way['y']+data[i,2]}
if (data[i,1]=='W') {way['x']=way['x']-data[i,2]}
if (data[i,1]=='E') {way['x']=way['x']+data[i,2]}
if (data[i,1]=='F') {
pos['y']=pos['y'] + (data[i,2]*way['y'])
pos['x']=pos['x'] + (data[i,2]*way['x'])
}
}
unname(abs(pos['x'])+abs(pos['y'])) }
solve(data)
|
/day12/day12.R
|
no_license
|
cortinah/aoc2020
|
R
| false | false | 1,805 |
r
|
library(tidyverse)
data <- read.csv("input-2020-12", header = F)
# Part 1
data <- data %>% mutate(dir=substr(V1,1,1), n=as.numeric(substr(V1,2,4)))
data <- data %>% select(dir, n)
solve <- function(data){
pos <- c(x=0,y=0); dir <- 2
dirs <- c('N','E','S','W')
for (i in 1:nrow(data)){
if (data[i,1]=='R') {dir=1+((dir-1+data[i,2]/90) %% 4)}
if (data[i,1]=='L') {dir=1+((dir-1-data[i,2]/90) %% 4)}
if (data[i,1]=='S') {pos['y']=pos['y']-data[i,2]}
if (data[i,1]=='N') {pos['y']=pos['y']+data[i,2]}
if (data[i,1]=='W') {pos['x']=pos['x']-data[i,2]}
if (data[i,1]=='E') {pos['x']=pos['x']+data[i,2]}
if (data[i,1]=='F') {
if (dir==1) {pos['y']=pos['y']+data[i,2]}
if (dir==2) {pos['x']=pos['x']+data[i,2]}
if (dir==3) {pos['y']=pos['y']-data[i,2]}
if (dir==4) {pos['x']=pos['x']-data[i,2]}
} }
unname(abs(pos['x']) + abs(pos['y'])) }
solve(data)
# Part 2
solve <- function(data,iter=nrow(data)){
pos <- c(x=0, y=0); way <- c(x=10, y=1)
for (i in 1:iter){
if (data[i,1]=='R') {
times=data[i,2]/90
for (k in 1:times) {
x2=way['y']; y2=-way['x']
way['x']<-x2
way['y']<-y2 }}
if (data[i,1]=='L') {
times=data[i,2]/90
for (k in 1:times) {
x2=-way['y']; y2=way['x']
way['x']<-x2
way['y']<-y2 } }
if (data[i,1]=='S') {way['y']=way['y']-data[i,2]}
if (data[i,1]=='N') {way['y']=way['y']+data[i,2]}
if (data[i,1]=='W') {way['x']=way['x']-data[i,2]}
if (data[i,1]=='E') {way['x']=way['x']+data[i,2]}
if (data[i,1]=='F') {
pos['y']=pos['y'] + (data[i,2]*way['y'])
pos['x']=pos['x'] + (data[i,2]*way['x'])
}
}
unname(abs(pos['x'])+abs(pos['y'])) }
solve(data)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/redshift_operations.R
\name{redshift_create_snapshot_copy_grant}
\alias{redshift_create_snapshot_copy_grant}
\title{Creates a snapshot copy grant that permits Amazon Redshift to use an
encrypted symmetric key from Key Management Service (KMS) to encrypt
copied snapshots in a destination region}
\usage{
redshift_create_snapshot_copy_grant(
SnapshotCopyGrantName,
KmsKeyId = NULL,
Tags = NULL
)
}
\arguments{
\item{SnapshotCopyGrantName}{[required] The name of the snapshot copy grant. This name must be unique in the
region for the Amazon Web Services account.
Constraints:
\itemize{
\item Must contain from 1 to 63 alphanumeric characters or hyphens.
\item Alphabetic characters must be lowercase.
\item First character must be a letter.
\item Cannot end with a hyphen or contain two consecutive hyphens.
\item Must be unique for all clusters within an Amazon Web Services
account.
}}
\item{KmsKeyId}{The unique identifier of the encrypted symmetric key to which to grant
Amazon Redshift permission. If no key is specified, the default key is
used.}
\item{Tags}{A list of tag instances.}
}
\description{
Creates a snapshot copy grant that permits Amazon Redshift to use an encrypted symmetric key from Key Management Service (KMS) to encrypt copied snapshots in a destination region.
See \url{https://www.paws-r-sdk.com/docs/redshift_create_snapshot_copy_grant/} for full documentation.
}
\keyword{internal}
|
/cran/paws.database/man/redshift_create_snapshot_copy_grant.Rd
|
permissive
|
paws-r/paws
|
R
| false | true | 1,498 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/redshift_operations.R
\name{redshift_create_snapshot_copy_grant}
\alias{redshift_create_snapshot_copy_grant}
\title{Creates a snapshot copy grant that permits Amazon Redshift to use an
encrypted symmetric key from Key Management Service (KMS) to encrypt
copied snapshots in a destination region}
\usage{
redshift_create_snapshot_copy_grant(
SnapshotCopyGrantName,
KmsKeyId = NULL,
Tags = NULL
)
}
\arguments{
\item{SnapshotCopyGrantName}{[required] The name of the snapshot copy grant. This name must be unique in the
region for the Amazon Web Services account.
Constraints:
\itemize{
\item Must contain from 1 to 63 alphanumeric characters or hyphens.
\item Alphabetic characters must be lowercase.
\item First character must be a letter.
\item Cannot end with a hyphen or contain two consecutive hyphens.
\item Must be unique for all clusters within an Amazon Web Services
account.
}}
\item{KmsKeyId}{The unique identifier of the encrypted symmetric key to which to grant
Amazon Redshift permission. If no key is specified, the default key is
used.}
\item{Tags}{A list of tag instances.}
}
\description{
Creates a snapshot copy grant that permits Amazon Redshift to use an encrypted symmetric key from Key Management Service (KMS) to encrypt copied snapshots in a destination region.
See \url{https://www.paws-r-sdk.com/docs/redshift_create_snapshot_copy_grant/} for full documentation.
}
\keyword{internal}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 1896
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 1896
c
c Input Parameter (command line, file):
c input filename QBFLIB/Letombe/Abduction/aim-200-2_0-yes1-3-50.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 998
c no.of clauses 1896
c no.of taut cls 1
c
c Output Parameters:
c remaining no.of clauses 1896
c
c QBFLIB/Letombe/Abduction/aim-200-2_0-yes1-3-50.qdimacs 998 1896 E1 [] 1 200 798 1896 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Letombe/Abduction/aim-200-2_0-yes1-3-50/aim-200-2_0-yes1-3-50.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false | false | 637 |
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 1896
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 1896
c
c Input Parameter (command line, file):
c input filename QBFLIB/Letombe/Abduction/aim-200-2_0-yes1-3-50.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 998
c no.of clauses 1896
c no.of taut cls 1
c
c Output Parameters:
c remaining no.of clauses 1896
c
c QBFLIB/Letombe/Abduction/aim-200-2_0-yes1-3-50.qdimacs 998 1896 E1 [] 1 200 798 1896 NONE
|
map_nearest_state = function(name, rows = 70) {
states = USAboundaries::us_states() %>%
st_transform(5070)
state.of.interest = name
soi = filter(states, state_name == state.of.interest)
adjoining = st_filter(states, soi, .predicate = st_touches)
closest = st_make_grid(soi, n = rows, square = FALSE) %>%
st_centroid() %>%
st_sf() %>%
st_join(adjoining, join = st_nearest_feature)
vor = closest %>%
st_union() %>%
st_voronoi() %>%
st_cast() %>%
st_sf() %>%
st_join(closest) %>%
group_by(state_name) %>%
summarise() %>%
st_intersection(soi)
leaflet() %>%
addProviderTiles(providers$CartoDB) %>%
addPolygons(data = st_transform(vor, 4326),
fillColor = ~colorFactor("YlOrRd", state_name)(state_name), color = NA) %>%
addPolygons(data = st_transform(soi, 4326),
fillColor = "transparent", color = "black",
group = "SOI") %>%
addPolygons(data = st_transform(adjoining, 4326),
fillColor = ~colorFactor("YlOrRd", state_name)(state_name), color = NA) %>%
addLayersControl(overlayGroups = c("SOI"))
}
county_plot = function(plot_data, plot_title) {
feat = plot_data %>%
as_tibble() %>%
select(id) %>%
summarise(max(id))
ggplot() +
geom_sf(data = plot_data, color = "navy", size = 0.2) +
aes(fillColor = "white") +
labs(title = plot_title,
caption = paste("Number of Features", feat)) +
theme_void()
}
sum_tess = function(sf, desc){
area = st_area(sf) %>%
units::set_units("km^2") %>%
units::drop_units()
df = data.frame(description = desc,
count = nrow(sf),
mean_area = mean(area),
std_area = sd(area),
tot_area = sum(area))
return(df)
}
point_in_polygon = function(points, polygon, id){
st_join(polygon, points) %>%
st_drop_geometry() %>%
count(.data[[id]]) %>%
setNames(c(id, "n")) %>%
left_join(polygon, by = id) %>%
st_as_sf()
}
pip_plot = function(plot_data, plot_title){
ggplot() +
geom_sf(data = plot_data, aes(fill = log(n)), size = .2, col = NA) +
scale_fill_viridis_c() +
labs(title = plot_title,
caption = paste(sum(plot_data$n))) +
theme_void()
}
sum_tess2 = function(sf, desc) {
area = st_area(sf) %>%
units::set_units("km^2") %>%
units::drop_units()
df = data.frame(description = desc,
count = nrow(sf),
mean_area = mean(area),
std_area = sd(area),
tot_area = sum(area))
return(df)
}
|
/R/utils.R
|
no_license
|
brynmcgowan22/geog-176A-labs
|
R
| false | false | 2,571 |
r
|
map_nearest_state = function(name, rows = 70) {
states = USAboundaries::us_states() %>%
st_transform(5070)
state.of.interest = name
soi = filter(states, state_name == state.of.interest)
adjoining = st_filter(states, soi, .predicate = st_touches)
closest = st_make_grid(soi, n = rows, square = FALSE) %>%
st_centroid() %>%
st_sf() %>%
st_join(adjoining, join = st_nearest_feature)
vor = closest %>%
st_union() %>%
st_voronoi() %>%
st_cast() %>%
st_sf() %>%
st_join(closest) %>%
group_by(state_name) %>%
summarise() %>%
st_intersection(soi)
leaflet() %>%
addProviderTiles(providers$CartoDB) %>%
addPolygons(data = st_transform(vor, 4326),
fillColor = ~colorFactor("YlOrRd", state_name)(state_name), color = NA) %>%
addPolygons(data = st_transform(soi, 4326),
fillColor = "transparent", color = "black",
group = "SOI") %>%
addPolygons(data = st_transform(adjoining, 4326),
fillColor = ~colorFactor("YlOrRd", state_name)(state_name), color = NA) %>%
addLayersControl(overlayGroups = c("SOI"))
}
county_plot = function(plot_data, plot_title) {
feat = plot_data %>%
as_tibble() %>%
select(id) %>%
summarise(max(id))
ggplot() +
geom_sf(data = plot_data, color = "navy", size = 0.2) +
aes(fillColor = "white") +
labs(title = plot_title,
caption = paste("Number of Features", feat)) +
theme_void()
}
sum_tess = function(sf, desc){
area = st_area(sf) %>%
units::set_units("km^2") %>%
units::drop_units()
df = data.frame(description = desc,
count = nrow(sf),
mean_area = mean(area),
std_area = sd(area),
tot_area = sum(area))
return(df)
}
point_in_polygon = function(points, polygon, id){
st_join(polygon, points) %>%
st_drop_geometry() %>%
count(.data[[id]]) %>%
setNames(c(id, "n")) %>%
left_join(polygon, by = id) %>%
st_as_sf()
}
pip_plot = function(plot_data, plot_title){
ggplot() +
geom_sf(data = plot_data, aes(fill = log(n)), size = .2, col = NA) +
scale_fill_viridis_c() +
labs(title = plot_title,
caption = paste(sum(plot_data$n))) +
theme_void()
}
sum_tess2 = function(sf, desc) {
area = st_area(sf) %>%
units::set_units("km^2") %>%
units::drop_units()
df = data.frame(description = desc,
count = nrow(sf),
mean_area = mean(area),
std_area = sd(area),
tot_area = sum(area))
return(df)
}
|
context("copula")
test_that("copula behaves as it should", {
fun <- function(d) apply(d,2,function(x)(1:n)[rank(x)])/(1+n)
n <- 200
u2 <- cbind(sample(n),sample(n))
d2 <- fun(u2)
u3 <- cbind(sample(n),sample(n),sample(n))
d3 <- fun(u3)
expect_equal(d2, copula(u2)$copula, label="copula:2dimensional")
expect_equal(d3, copula(u3)$copula, label="copula:3dimensional")
})
test_that("copula throws errors", {
expect_error(copula(TRUE), label="copula:exception")
expect_error(copula("text"), label="copula:exception")
})
test_that("copula fails for data frames without numerics", {
dat <- data.frame(x=letters, stringsAsFactors=FALSE)
expect_error(copula(dat))
})
test_that("copula warns if it drops variables", {
dat <- data.frame(x=letters,
y=seq_along(letters),
stringsAsFactors=FALSE)
expect_warning(copula(dat))
})
|
/data/genthat_extracted_code/texmex/tests/test-copula.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 936 |
r
|
context("copula")
test_that("copula behaves as it should", {
fun <- function(d) apply(d,2,function(x)(1:n)[rank(x)])/(1+n)
n <- 200
u2 <- cbind(sample(n),sample(n))
d2 <- fun(u2)
u3 <- cbind(sample(n),sample(n),sample(n))
d3 <- fun(u3)
expect_equal(d2, copula(u2)$copula, label="copula:2dimensional")
expect_equal(d3, copula(u3)$copula, label="copula:3dimensional")
})
test_that("copula throws errors", {
expect_error(copula(TRUE), label="copula:exception")
expect_error(copula("text"), label="copula:exception")
})
test_that("copula fails for data frames without numerics", {
dat <- data.frame(x=letters, stringsAsFactors=FALSE)
expect_error(copula(dat))
})
test_that("copula warns if it drops variables", {
dat <- data.frame(x=letters,
y=seq_along(letters),
stringsAsFactors=FALSE)
expect_warning(copula(dat))
})
|
options(scipen = 100)
fp <- file("stdin", "r")
input <- scan(file=fp, what=numeric(0), n=2000000)
# input <- scan(what=numeric(0), n=2000000)
index <- input[1:2]
input <- input[-(1:2)]
mat1 <- matrix(input, nrow=index[1], byrow=T)
mat9 <- strsplit(as.character(mat1), "")
for(i in 1:length(mat1))
{
mat1[i] <- sum(mat9[[i]] == "9")
}
max9 <- sum(mat1)
maxi9 <- maxj9 <- 0
for(i in 1:index[1])
{
cur9 <- sum(mat1[i, ])
if(cur9 > maxi9)
{
maxi9 <- cur9
}
}
for(j in 1:index[2])
{
cur9 <- sum(mat1[, j])
if(cur9 > maxj9)
{
maxj9 <- cur9
}
}
if(maxi9 > maxj9)
{
result <- max9-maxi9
} else
{
result <- max9-maxj9
}
cat(result)
|
/q14647.r
|
no_license
|
taehyunkim2/practice
|
R
| false | false | 658 |
r
|
options(scipen = 100)
fp <- file("stdin", "r")
input <- scan(file=fp, what=numeric(0), n=2000000)
# input <- scan(what=numeric(0), n=2000000)
index <- input[1:2]
input <- input[-(1:2)]
mat1 <- matrix(input, nrow=index[1], byrow=T)
mat9 <- strsplit(as.character(mat1), "")
for(i in 1:length(mat1))
{
mat1[i] <- sum(mat9[[i]] == "9")
}
max9 <- sum(mat1)
maxi9 <- maxj9 <- 0
for(i in 1:index[1])
{
cur9 <- sum(mat1[i, ])
if(cur9 > maxi9)
{
maxi9 <- cur9
}
}
for(j in 1:index[2])
{
cur9 <- sum(mat1[, j])
if(cur9 > maxj9)
{
maxj9 <- cur9
}
}
if(maxi9 > maxj9)
{
result <- max9-maxi9
} else
{
result <- max9-maxj9
}
cat(result)
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90323563887713e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
/DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615835726-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false | false | 2,048 |
r
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90323563887713e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
library(tcltk2)
### Name: setLanguage
### Title: Change or get the language used in R and Tcl/Tk
### Aliases: setLanguage getLanguage
### Keywords: utilities
### ** Examples
## Determine which language is currently in use in R
oldlang <- getLanguage()
if (oldlang != "") {
## Switch to English and test a command that issues a warning
if (setLanguage("en_US")) 1:3 + 1:2
## Switch to French and test a command that issues a warning
if (setLanguage("fr_FR")) 1:3 + 1:2
## Switch to German and test a command that issues a warning
if (setLanguage("de_DE")) 1:3 + 1:2
## Switch to Italian and test a command that issues a warning
if (setLanguage("it_IT")) 1:3 + 1:2
## Etc..
## Restore previous language
setLanguage(oldlang)
}
|
/data/genthat_extracted_code/tcltk2/examples/setLanguage.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 776 |
r
|
library(tcltk2)
### Name: setLanguage
### Title: Change or get the language used in R and Tcl/Tk
### Aliases: setLanguage getLanguage
### Keywords: utilities
### ** Examples
## Determine which language is currently in use in R
oldlang <- getLanguage()
if (oldlang != "") {
## Switch to English and test a command that issues a warning
if (setLanguage("en_US")) 1:3 + 1:2
## Switch to French and test a command that issues a warning
if (setLanguage("fr_FR")) 1:3 + 1:2
## Switch to German and test a command that issues a warning
if (setLanguage("de_DE")) 1:3 + 1:2
## Switch to Italian and test a command that issues a warning
if (setLanguage("it_IT")) 1:3 + 1:2
## Etc..
## Restore previous language
setLanguage(oldlang)
}
|
#===================================================================================================
#' Metacoder
#'
#' A package for planning and analysis of amplicon metagenomics research projects.
#'
#' The goal of the \code{metacoder} package is to provide a set of tools for:
#'
#' \itemize{
#' \item Standardized parsing of taxonomic information from diverse resources.
#' \item Visualization of statistics distributed over taxonomic classifications.
#' \item Evaluating potential metabarcoding primers for taxonomic specificity.
#' \item Providing flexible functions for analyzing taxonomic and abundance data.
#' }
#'
#' To accomplish these goals, \code{metacoder} leverages resources from other R packages, interfaces with
#' external programs, and provides novel functions where needed to allow for entire analyses within R.
#'
#' @section Documentation:
#'
#' The full documentation can be found online at \url{http://grunwaldlab.github.io/metacoder_documentation}.
#'
#' There is also a short vignette included for offline use that can be accessed by the following code:
#'
#' \code{browseVignettes(package = "metacoder")}
#'
#' \strong{Plotting:}
#'
#' \itemize{
#' \item \code{\link{heat_tree}}
#' \item \code{\link{heat_tree_matrix}}
#' }
#'
#' \strong{In silico PCR:}
#'
#' \itemize{
#' \item \code{\link{primersearch}}
#' }
#'
#' \strong{Analysis:}
#'
#' \itemize{
#' \item \code{\link{calc_taxon_abund}}
#' \item \code{\link{calc_obs_props}}
#' \item \code{\link{rarefy_obs}}
#' \item \code{\link{compare_groups}}
#' \item \code{\link{zero_low_counts}}
#' \item \code{\link{calc_n_samples}}
#' \item \code{\link{filter_ambiguous_taxa}}
#' }
#'
#' \strong{Parsers:}
#'
#' \itemize{
#' \item \code{\link{parse_greengenes}}
#' \item \code{\link{parse_mothur_tax_summary}}
#' \item \code{\link{parse_mothur_taxonomy}}
#' \item \code{\link{parse_newick}}
#' \item \code{\link{parse_phyloseq}}
#' \item \code{\link{parse_phylo}}
#' \item \code{\link{parse_qiime_biom}}
#' \item \code{\link{parse_rdp}}
#' \item \code{\link{parse_silva_fasta}}
#' \item \code{\link{parse_unite_general}}
#' }
#'
#' \strong{Writers:}
#'
#' \itemize{
#' \item \code{\link{write_greengenes}}
#' \item \code{\link{write_mothur_taxonomy}}
#' \item \code{\link{write_rdp}}
#' \item \code{\link{write_silva_fasta}}
#' \item \code{\link{write_unite_general}}
#' }
#'
#' \strong{Database querying:}
#'
#' \itemize{
#' \item \code{\link{ncbi_taxon_sample}}
#' }
#'
#' @author Zachary Foster and Niklaus Grunwald
#' @name metacoder
#' @docType package
#' @useDynLib metacoder
#' @importFrom Rcpp sourceCpp
NULL
|
/R/metacoder-package.R
|
permissive
|
grabear/metacoder
|
R
| false | false | 2,674 |
r
|
#===================================================================================================
#' Metacoder
#'
#' A package for planning and analysis of amplicon metagenomics research projects.
#'
#' The goal of the \code{metacoder} package is to provide a set of tools for:
#'
#' \itemize{
#' \item Standardized parsing of taxonomic information from diverse resources.
#' \item Visualization of statistics distributed over taxonomic classifications.
#' \item Evaluating potential metabarcoding primers for taxonomic specificity.
#' \item Providing flexible functions for analyzing taxonomic and abundance data.
#' }
#'
#' To accomplish these goals, \code{metacoder} leverages resources from other R packages, interfaces with
#' external programs, and provides novel functions where needed to allow for entire analyses within R.
#'
#' @section Documentation:
#'
#' The full documentation can be found online at \url{http://grunwaldlab.github.io/metacoder_documentation}.
#'
#' There is also a short vignette included for offline use that can be accessed by the following code:
#'
#' \code{browseVignettes(package = "metacoder")}
#'
#' \strong{Plotting:}
#'
#' \itemize{
#' \item \code{\link{heat_tree}}
#' \item \code{\link{heat_tree_matrix}}
#' }
#'
#' \strong{In silico PCR:}
#'
#' \itemize{
#' \item \code{\link{primersearch}}
#' }
#'
#' \strong{Analysis:}
#'
#' \itemize{
#' \item \code{\link{calc_taxon_abund}}
#' \item \code{\link{calc_obs_props}}
#' \item \code{\link{rarefy_obs}}
#' \item \code{\link{compare_groups}}
#' \item \code{\link{zero_low_counts}}
#' \item \code{\link{calc_n_samples}}
#' \item \code{\link{filter_ambiguous_taxa}}
#' }
#'
#' \strong{Parsers:}
#'
#' \itemize{
#' \item \code{\link{parse_greengenes}}
#' \item \code{\link{parse_mothur_tax_summary}}
#' \item \code{\link{parse_mothur_taxonomy}}
#' \item \code{\link{parse_newick}}
#' \item \code{\link{parse_phyloseq}}
#' \item \code{\link{parse_phylo}}
#' \item \code{\link{parse_qiime_biom}}
#' \item \code{\link{parse_rdp}}
#' \item \code{\link{parse_silva_fasta}}
#' \item \code{\link{parse_unite_general}}
#' }
#'
#' \strong{Writers:}
#'
#' \itemize{
#' \item \code{\link{write_greengenes}}
#' \item \code{\link{write_mothur_taxonomy}}
#' \item \code{\link{write_rdp}}
#' \item \code{\link{write_silva_fasta}}
#' \item \code{\link{write_unite_general}}
#' }
#'
#' \strong{Database querying:}
#'
#' \itemize{
#' \item \code{\link{ncbi_taxon_sample}}
#' }
#'
#' @author Zachary Foster and Niklaus Grunwald
#' @name metacoder
#' @docType package
#' @useDynLib metacoder
#' @importFrom Rcpp sourceCpp
NULL
|
[
{
"title": "Faster, easier, and more reliable character string processing with stringi 0.3-1",
"href": "http://www.rexamine.com/2014/11/stringi-0-3-1-released/"
},
{
"title": "Modelling Occurence of Events, with some Exposure",
"href": "http://freakonometrics.hypotheses.org/20133"
},
{
"title": "Call for papeRs in the near future",
"href": "http://blog.rapporter.net/2014/02/call-for-papers-in-near-future.html?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rapporter-r+%28R+stories+by+Rapporter%29"
},
{
"title": "Reporting Standard Errors for USL Coefficients",
"href": "http://perfdynamics.blogspot.com/2010/11/reporting-standard-errors-for-usl.html"
},
{
"title": "Overlaying species occurrence data with climate data",
"href": "http://ropensci.org/blog/2014/04/22/rwbclimate-sp/"
},
{
"title": "How-to: Construct petridish plots in R",
"href": "http://www.scipirate.com/"
},
{
"title": "Installing RStudio Shiny Server on AWS",
"href": "http://ipub.com/shiny-aws/"
},
{
"title": "Stat Computing Visions from the Past",
"href": "http://www.theusrus.de/blog/stat-computing-visions-from-the-past/"
},
{
"title": "Creating Calendars for Future’s Expiration",
"href": "http://www.quintuitive.com/2016/01/17/creating-calendars-for-futures-expiration/"
},
{
"title": "Truly random?!",
"href": "https://xianblog.wordpress.com/2010/09/07/truly-random/"
},
{
"title": "New website design",
"href": "http://ggobi.blogspot.com/2006/02/new-website-design.html"
},
{
"title": "Successful Two Day Workshop at UNC-Chapel Hill",
"href": "http://www.rtexttools.com/blog/successful-two-day-workshop-at-unc-chapel-hill"
},
{
"title": "Using Azure as an R data source, Part 1",
"href": "http://blog.revolutionanalytics.com/2015/05/using-azure-as-an-r-datasource.html"
},
{
"title": "Creating a custom soil attribute plot using ggmap",
"href": "http://blog.revolutionanalytics.com/2015/01/creating-a-custom-soil-attribute-plot-using-ggmap.html"
},
{
"title": "PK calculations for infusion at constant rate",
"href": "http://wiekvoet.blogspot.com/2014/03/pk-calculations-for-infusion-at.html"
},
{
"title": "Example 7.8: Plot two empirical cumulative density functions using available tools",
"href": "https://feedproxy.google.com/~r/SASandR/~3/-mqMZMbtxJw/example-78-plot-two-empirical.html"
},
{
"title": "Adding labels within lattice panels by group",
"href": "http://www.magesblog.com/2014/02/adding-labels-within-lattice-panels-by.html"
},
{
"title": "Videos from Coursera’s four week course in R",
"href": "http://blog.revolutionanalytics.com/2012/12/coursera-videos.html"
},
{
"title": "Teaching R course? Use analogsea to run your customized RStudio in Digital Ocean!",
"href": "http://applyr.blogspot.com/2015/05/teaching-r-course-use-analogsea-to-run.html"
},
{
"title": "GigaOm article on R, Big Data and Data Science",
"href": "http://blog.revolutionanalytics.com/2011/07/gigaom-article-on-r-big-data-and-data-science.html"
},
{
"title": "R-bloggers.com just went through a (forced) maintenance – and is back online! (I hope)",
"href": "https://www.r-bloggers.com/r-bloggers-com-just-went-through-a-forced-maintenance-and-is-back-online-i-hope/"
},
{
"title": "Finally! A practical R book on Data Mining: \"Data Mining With R, Learning with Case Studies,\" by Luis Torgo",
"href": "http://intelligenttradingtech.blogspot.com/2010/11/finally-practical-r-book-on-data-mining.html"
},
{
"title": "R 3.0.2 and RStudio 0.9.8 are released!",
"href": "https://www.r-statistics.com/2013/09/r-3-0-2-and-rstudio-0-9-8-are-released/"
},
{
"title": "Porting Stata-like Marginal Effects to LaTeX",
"href": "https://feedproxy.google.com/~r/CoffeeAndEconometricsInTheMorning/~3/bq02pGLouVA/porting-stata-like-marginal-effects-to.html"
},
{
"title": "RTutor: CO2 Trading and Risk of Firm Relocation",
"href": "http://feedproxy.google.com/~r/skranz_R/~3/0Mv8KIs_XzU/RTutor_EmmissionTradingReallocation.html"
},
{
"title": "More about Aggregation by Group in R",
"href": "https://statcompute.wordpress.com/2012/12/24/aggregation-by-group-in-r-2-more/"
},
{
"title": "Stone Flakes V, networks again",
"href": "http://wiekvoet.blogspot.com/2014/07/stone-flakes-v-networks-again.html"
},
{
"title": "Applying multiple functions to data frame",
"href": "http://rsnippets.blogspot.com/2011/11/applying-multiple-functions-to-data.html"
},
{
"title": "Lattice Explore Bonds",
"href": "http://timelyportfolio.blogspot.com/2011/12/lattice-explore-bonds.html"
},
{
"title": "Using geom_step",
"href": "http://www.win-vector.com/blog/2016/06/using-geom_step/"
},
{
"title": "R unfolds the history of the Afghanistan war",
"href": "http://blog.revolutionanalytics.com/2010/08/r-unfolds-the-history-of-the-afghanistan-war.html"
},
{
"title": "“R for Beginners” Course | May 3-4 Milan",
"href": "http://www.milanor.net/blog/r-for-beginners-course/"
},
{
"title": "New R package: packagedocs",
"href": "http://ryanhafen.com/blog/packagedocs"
},
{
"title": "Hygge at UseR! 2015, Aalborg",
"href": "http://www.sumsar.net/blog/2015/07/hygge-at-user-2015/"
},
{
"title": "Delete a List Component in R",
"href": "http://yusung.blogspot.com/2009/06/delete-list-component-in-r.html"
},
{
"title": "R News From JSM 2015",
"href": "http://blog.revolutionanalytics.com/2015/08/r-news-from-jsm-2015.html"
},
{
"title": "Veterinary Epidemiologic Research: Modelling Survival Data – Parametric and Frailty Models",
"href": "https://denishaine.wordpress.com/2013/07/05/veterinary-epidemiologic-research-modelling-survival-data-parametric-and-frailty-models/"
},
{
"title": "Illinois long-term selection experiment for oil and protein in corn",
"href": "http://ggorjan.blogspot.com/2009/02/illinois-long-term-selection-experiment.html"
},
{
"title": "An example of MapReduce with rmr2",
"href": "http://www.milanor.net/blog/an-example-of-mapreduce-with-rmr2/"
},
{
"title": "Rcpp 0.12.2: More refinements",
"href": "http://dirk.eddelbuettel.com/blog/2015/11/15/"
},
{
"title": "Data Mining the California Solar Statistics with R: Part III",
"href": "http://www.beyondmaxwell.com/?p=182"
},
{
"title": "Computing AIC on a Validation Sample",
"href": "http://freakonometrics.hypotheses.org/20158"
},
{
"title": "Comparing all quantiles of two distributions simultaneously",
"href": "https://rbresearch.wordpress.com/2012/04/12/low-volatility-with-r/"
},
{
"title": "Read sas7bdat files in R with GGASoftware Parso library",
"href": "http://biostatmatt.com/archives/2618"
},
{
"title": "Using R to Compare Hurricane Sandy and Hurricane Irene",
"href": "http://statistical-research.com/using-r-to-compare-hurricane-sandy-and-hurricane-irene/?utm_source=rss&utm_medium=rss&utm_campaign=using-r-to-compare-hurricane-sandy-and-hurricane-irene"
},
{
"title": "Report from the useR! meeting in Ames",
"href": "https://web.archive.org/web/http://pineda-krch.com/2007/08/10/report-from-the-user-meeting-in-ames/"
},
{
"title": "Sentiment Analysis and Topic Detection in R using Microsoft Cognitive Services",
"href": "http://philferriere.blogspot.com/2016/06/sentiment-analysis-and-topic-detection.html"
},
{
"title": "Bounding sums of random variables, part 1",
"href": "http://freakonometrics.hypotheses.org/2450"
},
{
"title": "The Wiener takes it all? A review of the 2014 Eurovision results",
"href": "http://www.magesblog.com/2014/05/the-wiener-takes-it-all-reviewing.html"
},
{
"title": "What Time Is It?",
"href": "http://statistical-research.com/what-time-is-it/?utm_source=rss&utm_medium=rss&utm_campaign=what-time-is-it"
}
]
|
/json/208.r
|
no_license
|
rweekly/rweekly.org
|
R
| false | false | 8,101 |
r
|
[
{
"title": "Faster, easier, and more reliable character string processing with stringi 0.3-1",
"href": "http://www.rexamine.com/2014/11/stringi-0-3-1-released/"
},
{
"title": "Modelling Occurence of Events, with some Exposure",
"href": "http://freakonometrics.hypotheses.org/20133"
},
{
"title": "Call for papeRs in the near future",
"href": "http://blog.rapporter.net/2014/02/call-for-papers-in-near-future.html?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rapporter-r+%28R+stories+by+Rapporter%29"
},
{
"title": "Reporting Standard Errors for USL Coefficients",
"href": "http://perfdynamics.blogspot.com/2010/11/reporting-standard-errors-for-usl.html"
},
{
"title": "Overlaying species occurrence data with climate data",
"href": "http://ropensci.org/blog/2014/04/22/rwbclimate-sp/"
},
{
"title": "How-to: Construct petridish plots in R",
"href": "http://www.scipirate.com/"
},
{
"title": "Installing RStudio Shiny Server on AWS",
"href": "http://ipub.com/shiny-aws/"
},
{
"title": "Stat Computing Visions from the Past",
"href": "http://www.theusrus.de/blog/stat-computing-visions-from-the-past/"
},
{
"title": "Creating Calendars for Future’s Expiration",
"href": "http://www.quintuitive.com/2016/01/17/creating-calendars-for-futures-expiration/"
},
{
"title": "Truly random?!",
"href": "https://xianblog.wordpress.com/2010/09/07/truly-random/"
},
{
"title": "New website design",
"href": "http://ggobi.blogspot.com/2006/02/new-website-design.html"
},
{
"title": "Successful Two Day Workshop at UNC-Chapel Hill",
"href": "http://www.rtexttools.com/blog/successful-two-day-workshop-at-unc-chapel-hill"
},
{
"title": "Using Azure as an R data source, Part 1",
"href": "http://blog.revolutionanalytics.com/2015/05/using-azure-as-an-r-datasource.html"
},
{
"title": "Creating a custom soil attribute plot using ggmap",
"href": "http://blog.revolutionanalytics.com/2015/01/creating-a-custom-soil-attribute-plot-using-ggmap.html"
},
{
"title": "PK calculations for infusion at constant rate",
"href": "http://wiekvoet.blogspot.com/2014/03/pk-calculations-for-infusion-at.html"
},
{
"title": "Example 7.8: Plot two empirical cumulative density functions using available tools",
"href": "https://feedproxy.google.com/~r/SASandR/~3/-mqMZMbtxJw/example-78-plot-two-empirical.html"
},
{
"title": "Adding labels within lattice panels by group",
"href": "http://www.magesblog.com/2014/02/adding-labels-within-lattice-panels-by.html"
},
{
"title": "Videos from Coursera’s four week course in R",
"href": "http://blog.revolutionanalytics.com/2012/12/coursera-videos.html"
},
{
"title": "Teaching R course? Use analogsea to run your customized RStudio in Digital Ocean!",
"href": "http://applyr.blogspot.com/2015/05/teaching-r-course-use-analogsea-to-run.html"
},
{
"title": "GigaOm article on R, Big Data and Data Science",
"href": "http://blog.revolutionanalytics.com/2011/07/gigaom-article-on-r-big-data-and-data-science.html"
},
{
"title": "R-bloggers.com just went through a (forced) maintenance – and is back online! (I hope)",
"href": "https://www.r-bloggers.com/r-bloggers-com-just-went-through-a-forced-maintenance-and-is-back-online-i-hope/"
},
{
"title": "Finally! A practical R book on Data Mining: \"Data Mining With R, Learning with Case Studies,\" by Luis Torgo",
"href": "http://intelligenttradingtech.blogspot.com/2010/11/finally-practical-r-book-on-data-mining.html"
},
{
"title": "R 3.0.2 and RStudio 0.9.8 are released!",
"href": "https://www.r-statistics.com/2013/09/r-3-0-2-and-rstudio-0-9-8-are-released/"
},
{
"title": "Porting Stata-like Marginal Effects to LaTeX",
"href": "https://feedproxy.google.com/~r/CoffeeAndEconometricsInTheMorning/~3/bq02pGLouVA/porting-stata-like-marginal-effects-to.html"
},
{
"title": "RTutor: CO2 Trading and Risk of Firm Relocation",
"href": "http://feedproxy.google.com/~r/skranz_R/~3/0Mv8KIs_XzU/RTutor_EmmissionTradingReallocation.html"
},
{
"title": "More about Aggregation by Group in R",
"href": "https://statcompute.wordpress.com/2012/12/24/aggregation-by-group-in-r-2-more/"
},
{
"title": "Stone Flakes V, networks again",
"href": "http://wiekvoet.blogspot.com/2014/07/stone-flakes-v-networks-again.html"
},
{
"title": "Applying multiple functions to data frame",
"href": "http://rsnippets.blogspot.com/2011/11/applying-multiple-functions-to-data.html"
},
{
"title": "Lattice Explore Bonds",
"href": "http://timelyportfolio.blogspot.com/2011/12/lattice-explore-bonds.html"
},
{
"title": "Using geom_step",
"href": "http://www.win-vector.com/blog/2016/06/using-geom_step/"
},
{
"title": "R unfolds the history of the Afghanistan war",
"href": "http://blog.revolutionanalytics.com/2010/08/r-unfolds-the-history-of-the-afghanistan-war.html"
},
{
"title": "“R for Beginners” Course | May 3-4 Milan",
"href": "http://www.milanor.net/blog/r-for-beginners-course/"
},
{
"title": "New R package: packagedocs",
"href": "http://ryanhafen.com/blog/packagedocs"
},
{
"title": "Hygge at UseR! 2015, Aalborg",
"href": "http://www.sumsar.net/blog/2015/07/hygge-at-user-2015/"
},
{
"title": "Delete a List Component in R",
"href": "http://yusung.blogspot.com/2009/06/delete-list-component-in-r.html"
},
{
"title": "R News From JSM 2015",
"href": "http://blog.revolutionanalytics.com/2015/08/r-news-from-jsm-2015.html"
},
{
"title": "Veterinary Epidemiologic Research: Modelling Survival Data – Parametric and Frailty Models",
"href": "https://denishaine.wordpress.com/2013/07/05/veterinary-epidemiologic-research-modelling-survival-data-parametric-and-frailty-models/"
},
{
"title": "Illinois long-term selection experiment for oil and protein in corn",
"href": "http://ggorjan.blogspot.com/2009/02/illinois-long-term-selection-experiment.html"
},
{
"title": "An example of MapReduce with rmr2",
"href": "http://www.milanor.net/blog/an-example-of-mapreduce-with-rmr2/"
},
{
"title": "Rcpp 0.12.2: More refinements",
"href": "http://dirk.eddelbuettel.com/blog/2015/11/15/"
},
{
"title": "Data Mining the California Solar Statistics with R: Part III",
"href": "http://www.beyondmaxwell.com/?p=182"
},
{
"title": "Computing AIC on a Validation Sample",
"href": "http://freakonometrics.hypotheses.org/20158"
},
{
"title": "Comparing all quantiles of two distributions simultaneously",
"href": "https://rbresearch.wordpress.com/2012/04/12/low-volatility-with-r/"
},
{
"title": "Read sas7bdat files in R with GGASoftware Parso library",
"href": "http://biostatmatt.com/archives/2618"
},
{
"title": "Using R to Compare Hurricane Sandy and Hurricane Irene",
"href": "http://statistical-research.com/using-r-to-compare-hurricane-sandy-and-hurricane-irene/?utm_source=rss&utm_medium=rss&utm_campaign=using-r-to-compare-hurricane-sandy-and-hurricane-irene"
},
{
"title": "Report from the useR! meeting in Ames",
"href": "https://web.archive.org/web/http://pineda-krch.com/2007/08/10/report-from-the-user-meeting-in-ames/"
},
{
"title": "Sentiment Analysis and Topic Detection in R using Microsoft Cognitive Services",
"href": "http://philferriere.blogspot.com/2016/06/sentiment-analysis-and-topic-detection.html"
},
{
"title": "Bounding sums of random variables, part 1",
"href": "http://freakonometrics.hypotheses.org/2450"
},
{
"title": "The Wiener takes it all? A review of the 2014 Eurovision results",
"href": "http://www.magesblog.com/2014/05/the-wiener-takes-it-all-reviewing.html"
},
{
"title": "What Time Is It?",
"href": "http://statistical-research.com/what-time-is-it/?utm_source=rss&utm_medium=rss&utm_campaign=what-time-is-it"
}
]
|
####
## Bayesian binomial estimation
####
## frequentist unbiased
binmom <- function(succ, tot, k) {
choose(succ, k)/choose(tot, k)
}
## Bayesian
bbinom <- function(succ, tot, k, a = 1, b = 1) {
a2 = a + succ; b2 = b + tot - succ
beta(a2 + k, b2)/beta(a2, b2)
}
## test it
# a <- 2
# b <- 3
# ps <- rbeta(10000, a, b)
# ps <- sort(ps)
# c(mean(ps), a/(a + b))
#
# k <- 10
# Ys <- rbinom(length(ps), k, prob = ps)
# K <- 7
# ests1 <- sapply(Ys, function(v) binmom(v, k, K))
# plot(ps^K, ests1, pch = ".")
# psK <- ps^K
# lm(ests1 ~ psK)
# ests2 <- sapply(Ys, function(v) bbinom(v, k, K, a, b))
# plot(ps^K, ests2, pch = ".")
# sum((ests1 - psK)^2)
# sum((ests2 - psK)^2)
#
# K <- 20
# psK <- ps^K
# ests2 <- sapply(Ys, function(v) bbinom(v, k, K, a, b))
# sum((ests2 - ps^K)^2)
# res <- ksmooth(psK, ests2, bandwidth = 0.1)
# plot(res, type = "l")
# points(ps^K, ests2, pch = ".")
# abline(0, 1)
|
/extrapolation/bayes_binom.R
|
no_license
|
snarles/fmri
|
R
| false | false | 908 |
r
|
####
## Bayesian binomial estimation
####
## frequentist unbiased
binmom <- function(succ, tot, k) {
choose(succ, k)/choose(tot, k)
}
## Bayesian
bbinom <- function(succ, tot, k, a = 1, b = 1) {
a2 = a + succ; b2 = b + tot - succ
beta(a2 + k, b2)/beta(a2, b2)
}
## test it
# a <- 2
# b <- 3
# ps <- rbeta(10000, a, b)
# ps <- sort(ps)
# c(mean(ps), a/(a + b))
#
# k <- 10
# Ys <- rbinom(length(ps), k, prob = ps)
# K <- 7
# ests1 <- sapply(Ys, function(v) binmom(v, k, K))
# plot(ps^K, ests1, pch = ".")
# psK <- ps^K
# lm(ests1 ~ psK)
# ests2 <- sapply(Ys, function(v) bbinom(v, k, K, a, b))
# plot(ps^K, ests2, pch = ".")
# sum((ests1 - psK)^2)
# sum((ests2 - psK)^2)
#
# K <- 20
# psK <- ps^K
# ests2 <- sapply(Ys, function(v) bbinom(v, k, K, a, b))
# sum((ests2 - ps^K)^2)
# res <- ksmooth(psK, ests2, bandwidth = 0.1)
# plot(res, type = "l")
# points(ps^K, ests2, pch = ".")
# abline(0, 1)
|
\name{make.heldout}
\alias{make.heldout}
\alias{eval.heldout}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Heldout Likelihood by Document Completion}
\description{
Tools for making and evaluating heldout datasets.
}
\usage{
make.heldout(documents, vocab,
N=floor(.1*length(documents)),
proportion=.5, seed=NULL)
eval.heldout(model, missing)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{documents}{the documents to be modeled.
}
\item{vocab}{
the vocabulary item
}
\item{N}{
number of docs to be partially held out
}
\item{proportion}{
proportion of docs to be held out.
}
\item{seed}{
the seed, set for replicability
}
\item{model}{
an stm model
}
\item{missing}{
a missing object created by make.heldout
}
}
\details{
These functions are used to create and evaluate heldout likelihood using the document completion method. The basic idea is to hold out some fraction of the words in a set of documents, train the model and use the document-level latent variables to evaluate the probability of the heldout portion. See the example for the basic workflow.
}
\examples{
\dontrun{
prep <- prepDocuments(poliblog5k.docs, poliblog5k.voc,
poliblog5k.meta,subsample=500,
lower.thresh=20,upper.thresh=200)
heldout <- make.heldout(prep$documents, prep$vocab)
documents <- heldout$documents
vocab <- heldout$vocab
meta <- out$meta
stm1<- stm(documents, vocab, 5,
prevalence =~ rating+ s(day),
init.type="Random",
data=meta, max.em.its=5)
eval.heldout(stm1, heldout$missing)
}
}
|
/man/heldout.Rd
|
no_license
|
JohnNay/stm
|
R
| false | false | 1,677 |
rd
|
\name{make.heldout}
\alias{make.heldout}
\alias{eval.heldout}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Heldout Likelihood by Document Completion}
\description{
Tools for making and evaluating heldout datasets.
}
\usage{
make.heldout(documents, vocab,
N=floor(.1*length(documents)),
proportion=.5, seed=NULL)
eval.heldout(model, missing)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{documents}{the documents to be modeled.
}
\item{vocab}{
the vocabulary item
}
\item{N}{
number of docs to be partially held out
}
\item{proportion}{
proportion of docs to be held out.
}
\item{seed}{
the seed, set for replicability
}
\item{model}{
an stm model
}
\item{missing}{
a missing object created by make.heldout
}
}
\details{
These functions are used to create and evaluate heldout likelihood using the document completion method. The basic idea is to hold out some fraction of the words in a set of documents, train the model and use the document-level latent variables to evaluate the probability of the heldout portion. See the example for the basic workflow.
}
\examples{
\dontrun{
prep <- prepDocuments(poliblog5k.docs, poliblog5k.voc,
poliblog5k.meta,subsample=500,
lower.thresh=20,upper.thresh=200)
heldout <- make.heldout(prep$documents, prep$vocab)
documents <- heldout$documents
vocab <- heldout$vocab
meta <- out$meta
stm1<- stm(documents, vocab, 5,
prevalence =~ rating+ s(day),
init.type="Random",
data=meta, max.em.its=5)
eval.heldout(stm1, heldout$missing)
}
}
|
library(tidyverse)
cases <- read_csv("dengue_labels_train.csv")
cases %>%
ggplot() +
geom_boxplot(aes(weekofyear, total_cases, group = weekofyear)) +
facet_grid(city ~ ., scale = "free") +
ggtitle("Total number of cases per week-of-the-year in each city")
ggsave("cases_per_week_of_year.png", device = "png")
|
/plot_cases_per_week_of_year.R
|
no_license
|
caromimo/dengAI_R
|
R
| false | false | 319 |
r
|
library(tidyverse)
cases <- read_csv("dengue_labels_train.csv")
cases %>%
ggplot() +
geom_boxplot(aes(weekofyear, total_cases, group = weekofyear)) +
facet_grid(city ~ ., scale = "free") +
ggtitle("Total number of cases per week-of-the-year in each city")
ggsave("cases_per_week_of_year.png", device = "png")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.