content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
# worker script designed to mimic a process that can't be guaranteed to complete
# successfully every time. this script runs only those jobs assigned to this
# particular process.
# find the jobs assigned to this process
jobsTable <- read.csv('status.csv', header=TRUE, stringsAsFactors=FALSE)
# filter to just those jobs assigned to this t
procid <- as.numeric(Sys.getenv('SLURM_PROCID')) # or for array mode: procid = as.numeric(Sys.getenv('SLURM_ARRAY_TASK_ID', 'NA'))
if(!is.na(procid)) { # if() only necessary if we're ever not using Slurm
jobsToRun <- jobsTable[jobsTable$procID == procid, 'jobID']
} else {
jobsToRun <- jobsTable$jobID
}
# get the job configuration info
jobsConfig <- read.csv('jobs.csv', header=TRUE, stringsAsFactors=FALSE)
# run the jobs assigned to this process (with our artificial unreliability to
# mimic truly unreliable jobs, e.g., models or HTTP transfers that sometimes
# fail)
for(jobid in jobsToRun) {
configrow <- jobsConfig[jobsConfig$jobID==jobid, ]
success <- runif(1) > 0.3
if(success) {
output <- c(
paste0('Successfully ran model #', jobid),
paste0('arg1 = ', configrow$arg1),
paste0('arg2 = ', configrow$arg2)
)
writeLines(output, con=sprintf('jobs_out/job_%03d.txt', jobid))
}
}
|
/slurm_make_example/run_jobs_unreliably.R
|
permissive
|
govtmirror/slurm-examples
|
R
| false | false | 1,273 |
r
|
# worker script designed to mimic a process that can't be guaranteed to complete
# successfully every time. this script runs only those jobs assigned to this
# particular process.
# find the jobs assigned to this process
jobsTable <- read.csv('status.csv', header=TRUE, stringsAsFactors=FALSE)
# filter to just those jobs assigned to this t
procid <- as.numeric(Sys.getenv('SLURM_PROCID')) # or for array mode: procid = as.numeric(Sys.getenv('SLURM_ARRAY_TASK_ID', 'NA'))
if(!is.na(procid)) { # if() only necessary if we're ever not using Slurm
jobsToRun <- jobsTable[jobsTable$procID == procid, 'jobID']
} else {
jobsToRun <- jobsTable$jobID
}
# get the job configuration info
jobsConfig <- read.csv('jobs.csv', header=TRUE, stringsAsFactors=FALSE)
# run the jobs assigned to this process (with our artificial unreliability to
# mimic truly unreliable jobs, e.g., models or HTTP transfers that sometimes
# fail)
for(jobid in jobsToRun) {
configrow <- jobsConfig[jobsConfig$jobID==jobid, ]
success <- runif(1) > 0.3
if(success) {
output <- c(
paste0('Successfully ran model #', jobid),
paste0('arg1 = ', configrow$arg1),
paste0('arg2 = ', configrow$arg2)
)
writeLines(output, con=sprintf('jobs_out/job_%03d.txt', jobid))
}
}
|
\name{ni}
\alias{ni}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Converts column indices to names or names to indices
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
ni(df, Vector, what)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{df}{
%% ~~Describe \code{df} here~~
}
\item{Vector}{
%% ~~Describe \code{Vector} here~~
}
\item{what}{
%% ~~Describe \code{what} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (df, Vector, what)
{
if (what == "i") {
if (is.numeric(Vector)) {
return(Vector)
}
else {
return(n2i(df, Vector))
}
}
if (what == "n") {
if (is.numeric(Vector)) {
return(i2n(df, Vector))
}
else {
return(Vector)
}
}
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
/jap/man/ni.Rd
|
no_license
|
jakosz/jap
|
R
| false | false | 1,708 |
rd
|
\name{ni}
\alias{ni}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Converts column indices to names or names to indices
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
ni(df, Vector, what)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{df}{
%% ~~Describe \code{df} here~~
}
\item{Vector}{
%% ~~Describe \code{Vector} here~~
}
\item{what}{
%% ~~Describe \code{what} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (df, Vector, what)
{
if (what == "i") {
if (is.numeric(Vector)) {
return(Vector)
}
else {
return(n2i(df, Vector))
}
}
if (what == "n") {
if (is.numeric(Vector)) {
return(i2n(df, Vector))
}
else {
return(Vector)
}
}
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
#check for file in current directory.
if (!"load_data.R" %in% list.files()) {
setwd("~/Desktop/Exploratory Data Analysis")
}
source("load_data.R")
png(file = "plot2.png",
width = 480, height = 480,
units = "px", bg = "transparent")
plot(DateTime, Global_active_power,
type = "l",
xlab = "",
ylab = "Global Active Power (kilowatts)")
dev.off()
|
/plot2.R
|
no_license
|
sehough/ExData_Plotting1
|
R
| false | false | 374 |
r
|
#check for file in current directory.
if (!"load_data.R" %in% list.files()) {
setwd("~/Desktop/Exploratory Data Analysis")
}
source("load_data.R")
png(file = "plot2.png",
width = 480, height = 480,
units = "px", bg = "transparent")
plot(DateTime, Global_active_power,
type = "l",
xlab = "",
ylab = "Global Active Power (kilowatts)")
dev.off()
|
7387cc9fa685b882f66584c613e51122 p20-1.pddl_planlen=26.qdimacs 2814 10896
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Kronegger-Pfandler-Pichler/bomb/p20-1.pddl_planlen=26/p20-1.pddl_planlen=26.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false | false | 73 |
r
|
7387cc9fa685b882f66584c613e51122 p20-1.pddl_planlen=26.qdimacs 2814 10896
|
# Map walks
library(dplyr)
library(purrr)
library(furrr)
source("utils/mapping.R")
source("utils/config.R")
future::plan(future::multiprocess())
here <- here::here
km_per_mile <- 1.60934
all_flat <- get_all_activities() %>%
filter(`Activity Name` == "Pasadena Running",
`Distance (km)` %>%
between(4.5 * km_per_mile,
6 * km_per_mile)) %>%
mutate(data = future_map(`Activity ID`, get_activity_data, .progress = TRUE)) %>%
flatten_activities()
smaller_area <- all_flat %>%
group_by(`Activity ID`) %>%
filter(min(lon) > -118.0807)
library(ggmap)
basemap <- make_basemap(smaller_area, zoom=15)
pl_dat <- all_flat %>%
group_by(`Activity ID`) %>%
mutate(ele_norm = (ele - min(ele))/(max(ele)-min(ele)))
run_plot <- basemap +
geom_path(data=pl_dat, aes(x=lon,y=lat, group=`Activity ID`, color=ele_norm)) +
scale_colour_gradient(low="green", high="red")
ggsave(run_plot,
file="runs.png",
height=2.5, width=7)
|
/1_utility_scripts/map_runs.R
|
no_license
|
snowdj/clean_and_dry
|
R
| false | false | 982 |
r
|
# Map walks
library(dplyr)
library(purrr)
library(furrr)
source("utils/mapping.R")
source("utils/config.R")
future::plan(future::multiprocess())
here <- here::here
km_per_mile <- 1.60934
all_flat <- get_all_activities() %>%
filter(`Activity Name` == "Pasadena Running",
`Distance (km)` %>%
between(4.5 * km_per_mile,
6 * km_per_mile)) %>%
mutate(data = future_map(`Activity ID`, get_activity_data, .progress = TRUE)) %>%
flatten_activities()
smaller_area <- all_flat %>%
group_by(`Activity ID`) %>%
filter(min(lon) > -118.0807)
library(ggmap)
basemap <- make_basemap(smaller_area, zoom=15)
pl_dat <- all_flat %>%
group_by(`Activity ID`) %>%
mutate(ele_norm = (ele - min(ele))/(max(ele)-min(ele)))
run_plot <- basemap +
geom_path(data=pl_dat, aes(x=lon,y=lat, group=`Activity ID`, color=ele_norm)) +
scale_colour_gradient(low="green", high="red")
ggsave(run_plot,
file="runs.png",
height=2.5, width=7)
|
#loading in the data and converting to time series
#the directory needs to be set before running the program so that the data can be loaded.
install.packages("car")
library(car)
#installing SSA packages
install.packages("Rssa")
library(Rssa)
install.packages("nortest")
library(nortest)
WH.unemp <- read.csv("WAWHAT5URN.csv")
WH.unemp <- ts(WH.unemp$WAWHAT5URN, start = 1990, frequency = 12)
#using boxcox test to see if transformations are necessary
lambda <- boxCox(WH.unemp ~ 1, family = "bcPower")
#because the 95% CI for the loglikelihood includes zero the
#transformation will be logistic transformation
#transfroming the time series
mi <- min(WH.unemp); ma <- max(WH.unemp)
log.WH <- log((WH.unemp - mi + .1) / (ma - WH.unemp + .1))
log.WH <- ts(log.WH, start = 1990, frequency = 12)
#checking for normality
shapiro.test(log_WH) #at a significance level of alpha=.005 the data is not normal.
n <- length(WH.unemp)
ssa.log.WH <- ssa(log.WH, kind = "1d-ssa")
plot(ssa.log.WH) #eigenvalues
plot(ssa.log.WH, type = "vectors", idx = 1:25) #eigenvectors
plot(ssa.log.WH, type = "paired", idx = 1:25) #pairs of eigenvectors
plot(wcor(ssa.log.WH)) #w-correlation matrix
#pairs are c(8,9) and c(14,15)
#below is a table of the periods, frequencies and yearly period and the
#closest matching whole year as well.
estimates.ssa.column <- matrix(0, nrow = 15, ncol = 3)
for ( i in 1 : 1 : 15 ) {
#period
estimates.ssa.column[i,1] <- parestimate(ssa.log.WH, list(c(i,(i+1))), method = "pairs",
subspace = "column")$period
#frequency
frequency <- as.numeric(parestimate(ssa.log.WH, list(c(i,(i+1))), method = "pairs",
subspace = "column")[3])
estimates.ssa.column[i,2] <- frequency * 2 * pi
#period with respect to a year
estimates.ssa.column[i,3] <- parestimate(ssa.log.WH, list(c(i,(i+1))), method = "pairs",
subsapce = "column")$period / 12
}
colnames(estimates.ssa.column) <- c("period", "Arg", "yearly")
rownames(estimates.ssa.column) <- c("1,2","2,3","3,4","4,5","5,6","6,7","7,8","8,9","9,10","10,11",
"11,12","12,13","13,14","14,15","15,16")
pairs.cycle <- matrix(c("5 6","8 9","14 15"), nrow = 3, ncol = 1)
colnames(pairs.cycle) <- c("Pairs")
rownames(pairs.cycle) <- c("12 Months", "6 Months", "18 Months")
#creating the SSA grouping
group.log.WH <- list(c(1),c(2),c(3),c(4),c(7),c(12),c(13),
c(5,6),c(8,9),c(10,11),c(14,15))
num.comp.log <- length(group.log.WH)
recon.log.WH <- reconstruct(ssa.log.WH, groups = group.log.WH)
#fully reconstructed time series
dlog.WH <- rep(0,n)
for ( i in 1 : num.comp.log )
{
dlog.WH <- dlog.WH + recon.log.WH[[i]]
}
plot(cbind(log.WH, dlog.WH), plot.type = "single", col = c("black", "red"))
#########################################
#transforming data back
#########################################
trans.WH <- (ma * exp(log.WH) + .2 * exp(log.WH) + mi - .2) / (1 + exp(log.WH))
trans.ssa.WH <- (ma * exp(dlog.WH) + .2 * exp(dlog.WH) + mi - .2) / (1 + exp(dlog.WH))
plot(cbind(trans.WH, trans.ssa.WH), plot.type = "single", col = c("black", "red"))
#residuals for reconstruction
res <- residuals(recon.log.WH)
pvalues <- double(10)
for ( lag in 1 : 10 )
{
pvalues[lag] <- Box.test(res, lag = lag)$p.value
}
plot(pvalues)
#conclusion is that there is autocorrelation present in the residuals
#residual reconstruction
library(forecast)
ar.aicc <- auto.arima(res, d = 0, D = 0, max.p = 5, max.q = 5, max.P = 0, max.Q = 0,
stationary = TRUE, seasonal = FALSE, ic = "aicc", allowmean = FALSE)
ar1 <- arima.sim(n = (length(res)-1), list(ar = 0.2599), sd = sqrt(.08262))
ad.test(ar1)
#conclusion is that ar1 is normally distributed
#forecasting one year ahead
n.ahead <- 44
for.ssa.log.WH <- rforecast(ssa.log.WH, groups = group.log.WH,
len = n.ahead, only.new = FALSE)
for.log.WH <- rep(0, (n+n.ahead))
for ( i in 1 : num.comp.log )
{
for.log.WH <- for.log.WH + for.ssa.log.WH[[i]]
}
#forecasted points using the AR1
forecasts <- predict(ar.aicc, n.ahead = n.ahead, interval = "prediction")
log.point.forecast <- c(forecasts$pred) + for.log.WH[(n+1):(n+n.ahead)]
#degrees of freedom for t-dist using kurtosis
install.packages("fBasics")
library(fBasics)
kurt <- kurtosis(res)
df <- (6 + kurt * 4) / kurt
alpha <- .05
quantile.t <- qt(1-alpha/2, df = df)
log.up <- c(forecasts$pred + quantile.t * forecasts$se) + for.log.WH[(n+1):(n+n.ahead)]
log.lo <- c(forecasts$pred - quantile.t * forecasts$se) + for.log.WH[(n+1):(n+n.ahead)]
upper.lim <- ts((ma * exp(log.up) + .2 * exp(log.up) + mi - .2) /
(1 + exp(log.up)), start = 2018+1/3, frequency = 4)
lower.lim <- ts((ma * exp(log.lo) + .2 * exp(log.lo) + mi - .2) /
(1 + exp(log.lo)), start = 2018+1/3, frequency = 4)
#transforming prediction model
for.trans.WH <- ts((ma * exp(for.log.WH) + .2 * exp(for.log.WH) + mi - .2) /
(1 + exp(for.log.WH)), start = 1990, frequency = 12)
par(cex.lab = 2, cex.axis = 2, cex.main = 4)
plot(for.trans.WH, lwd = 2, xlab = "Year", ylab = "Percent Unemployed",
main = "Forecasted U-6 Unemployment in Whatcom County to 2022")
abline(v = c(1990,1995,2000,2005,2010,2015,2020), h = c(5,6,7,8,9,10), lty = "dashed")
t.pred <- seq(2018+4/12,2022-1/12,1/12)
lines(upper.lim ~ t.pred)
lines(lower.lim ~ t.pred)
mycol.grey <- rgb(190,190,190, max = 255, alpha = 150, "orange")
polygon(c(t.pred,rev(t.pred)), c(lower.lim,rev(upper.lim)),col = mycol.grey, border = NA)
lines(for.trans.WH[(n+1):(n+n.ahead)] ~ t.pred, col = "red", lwd = 2)
#RMSE: quality of prediction
num.window <- 10
log.error <- matrix(0, ncol = n.ahead, nrow = num.window)
for ( w in 1 : num.window )
{
log.WH.win <- for.log.WH[w:(w+n-1)]
log.WH.win.ssa <- ssa(log.WH.win, kind = "1d-ssa")
recon.log.win <- reconstruct(log.WH.win.ssa, groups = group.log.WH)
d.log.win <- rep(0,n)
for ( i in 1 : num.comp.log )
{
d.log.win <- d.log.win + recon.log.win[[i]]
}
for.log.win.ssa <- rforecast(log.WH.win.ssa, groups = group.log.WH,
len = n.ahead, only.new = FALSE)
for.log.win <- rep(0,(n+n.ahead))
for ( i in 1 : num.comp.log)
{
for.log.win <- for.log.win + for.log.win.ssa[[i]]
}
res.det.win <- log.WH.win - d.log.win
ar.aicc.win <- Arima(res.det.win, order = c(1,0,0), seasonal = c(0,0,0),
include.mean = FALSE, method = "CSS", lambda = NULL)
ar.res.win <- ar.aicc.win$residuals
for.win <- predict(ar.aicc.win, n.ahead = n.ahead)
log.point.for.win <- c(for.win$pred) + for.log.win[(n+1):(n+n.ahead)]
log.actual.win <- for.log.WH[(w+n):(w+n-1+n.ahead)]
log.error[w,] <- log.point.for.win - log.actual.win
print(w)
}
rmse <- sqrt(colMeans(log.error, na.rm = TRUE)^2)
plot(rmse, main = "RMSE Values for each Predicted Month", xlab = "Predicted Month",
ylab = "RMSE", col = "black", pch = 16, cex = 2)
abline(h = c(.2,.4,.6,.8,1), v = c(10,20,30,40), lty = "dashed")
##############
|
/whatcom_unemployment/WH_unemp_code.R
|
no_license
|
olsonre/whatcom_unemployed
|
R
| false | false | 6,928 |
r
|
#loading in the data and converting to time series
#the directory needs to be set before running the program so that the data can be loaded.
install.packages("car")
library(car)
#installing SSA packages
install.packages("Rssa")
library(Rssa)
install.packages("nortest")
library(nortest)
WH.unemp <- read.csv("WAWHAT5URN.csv")
WH.unemp <- ts(WH.unemp$WAWHAT5URN, start = 1990, frequency = 12)
#using boxcox test to see if transformations are necessary
lambda <- boxCox(WH.unemp ~ 1, family = "bcPower")
#because the 95% CI for the loglikelihood includes zero the
#transformation will be logistic transformation
#transfroming the time series
mi <- min(WH.unemp); ma <- max(WH.unemp)
log.WH <- log((WH.unemp - mi + .1) / (ma - WH.unemp + .1))
log.WH <- ts(log.WH, start = 1990, frequency = 12)
#checking for normality
shapiro.test(log_WH) #at a significance level of alpha=.005 the data is not normal.
n <- length(WH.unemp)
ssa.log.WH <- ssa(log.WH, kind = "1d-ssa")
plot(ssa.log.WH) #eigenvalues
plot(ssa.log.WH, type = "vectors", idx = 1:25) #eigenvectors
plot(ssa.log.WH, type = "paired", idx = 1:25) #pairs of eigenvectors
plot(wcor(ssa.log.WH)) #w-correlation matrix
#pairs are c(8,9) and c(14,15)
#below is a table of the periods, frequencies and yearly period and the
#closest matching whole year as well.
estimates.ssa.column <- matrix(0, nrow = 15, ncol = 3)
for ( i in 1 : 1 : 15 ) {
#period
estimates.ssa.column[i,1] <- parestimate(ssa.log.WH, list(c(i,(i+1))), method = "pairs",
subspace = "column")$period
#frequency
frequency <- as.numeric(parestimate(ssa.log.WH, list(c(i,(i+1))), method = "pairs",
subspace = "column")[3])
estimates.ssa.column[i,2] <- frequency * 2 * pi
#period with respect to a year
estimates.ssa.column[i,3] <- parestimate(ssa.log.WH, list(c(i,(i+1))), method = "pairs",
subsapce = "column")$period / 12
}
colnames(estimates.ssa.column) <- c("period", "Arg", "yearly")
rownames(estimates.ssa.column) <- c("1,2","2,3","3,4","4,5","5,6","6,7","7,8","8,9","9,10","10,11",
"11,12","12,13","13,14","14,15","15,16")
pairs.cycle <- matrix(c("5 6","8 9","14 15"), nrow = 3, ncol = 1)
colnames(pairs.cycle) <- c("Pairs")
rownames(pairs.cycle) <- c("12 Months", "6 Months", "18 Months")
#creating the SSA grouping
group.log.WH <- list(c(1),c(2),c(3),c(4),c(7),c(12),c(13),
c(5,6),c(8,9),c(10,11),c(14,15))
num.comp.log <- length(group.log.WH)
recon.log.WH <- reconstruct(ssa.log.WH, groups = group.log.WH)
#fully reconstructed time series
dlog.WH <- rep(0,n)
for ( i in 1 : num.comp.log )
{
dlog.WH <- dlog.WH + recon.log.WH[[i]]
}
plot(cbind(log.WH, dlog.WH), plot.type = "single", col = c("black", "red"))
#########################################
#transforming data back
#########################################
trans.WH <- (ma * exp(log.WH) + .2 * exp(log.WH) + mi - .2) / (1 + exp(log.WH))
trans.ssa.WH <- (ma * exp(dlog.WH) + .2 * exp(dlog.WH) + mi - .2) / (1 + exp(dlog.WH))
plot(cbind(trans.WH, trans.ssa.WH), plot.type = "single", col = c("black", "red"))
#residuals for reconstruction
res <- residuals(recon.log.WH)
pvalues <- double(10)
for ( lag in 1 : 10 )
{
pvalues[lag] <- Box.test(res, lag = lag)$p.value
}
plot(pvalues)
#conclusion is that there is autocorrelation present in the residuals
#residual reconstruction
library(forecast)
ar.aicc <- auto.arima(res, d = 0, D = 0, max.p = 5, max.q = 5, max.P = 0, max.Q = 0,
stationary = TRUE, seasonal = FALSE, ic = "aicc", allowmean = FALSE)
ar1 <- arima.sim(n = (length(res)-1), list(ar = 0.2599), sd = sqrt(.08262))
ad.test(ar1)
#conclusion is that ar1 is normally distributed
#forecasting one year ahead
n.ahead <- 44
for.ssa.log.WH <- rforecast(ssa.log.WH, groups = group.log.WH,
len = n.ahead, only.new = FALSE)
for.log.WH <- rep(0, (n+n.ahead))
for ( i in 1 : num.comp.log )
{
for.log.WH <- for.log.WH + for.ssa.log.WH[[i]]
}
#forecasted points using the AR1
forecasts <- predict(ar.aicc, n.ahead = n.ahead, interval = "prediction")
log.point.forecast <- c(forecasts$pred) + for.log.WH[(n+1):(n+n.ahead)]
#degrees of freedom for t-dist using kurtosis
install.packages("fBasics")
library(fBasics)
kurt <- kurtosis(res)
df <- (6 + kurt * 4) / kurt
alpha <- .05
quantile.t <- qt(1-alpha/2, df = df)
log.up <- c(forecasts$pred + quantile.t * forecasts$se) + for.log.WH[(n+1):(n+n.ahead)]
log.lo <- c(forecasts$pred - quantile.t * forecasts$se) + for.log.WH[(n+1):(n+n.ahead)]
upper.lim <- ts((ma * exp(log.up) + .2 * exp(log.up) + mi - .2) /
(1 + exp(log.up)), start = 2018+1/3, frequency = 4)
lower.lim <- ts((ma * exp(log.lo) + .2 * exp(log.lo) + mi - .2) /
(1 + exp(log.lo)), start = 2018+1/3, frequency = 4)
#transforming prediction model
for.trans.WH <- ts((ma * exp(for.log.WH) + .2 * exp(for.log.WH) + mi - .2) /
(1 + exp(for.log.WH)), start = 1990, frequency = 12)
par(cex.lab = 2, cex.axis = 2, cex.main = 4)
plot(for.trans.WH, lwd = 2, xlab = "Year", ylab = "Percent Unemployed",
main = "Forecasted U-6 Unemployment in Whatcom County to 2022")
abline(v = c(1990,1995,2000,2005,2010,2015,2020), h = c(5,6,7,8,9,10), lty = "dashed")
t.pred <- seq(2018+4/12,2022-1/12,1/12)
lines(upper.lim ~ t.pred)
lines(lower.lim ~ t.pred)
mycol.grey <- rgb(190,190,190, max = 255, alpha = 150, "orange")
polygon(c(t.pred,rev(t.pred)), c(lower.lim,rev(upper.lim)),col = mycol.grey, border = NA)
lines(for.trans.WH[(n+1):(n+n.ahead)] ~ t.pred, col = "red", lwd = 2)
#RMSE: quality of prediction
num.window <- 10
log.error <- matrix(0, ncol = n.ahead, nrow = num.window)
for ( w in 1 : num.window )
{
log.WH.win <- for.log.WH[w:(w+n-1)]
log.WH.win.ssa <- ssa(log.WH.win, kind = "1d-ssa")
recon.log.win <- reconstruct(log.WH.win.ssa, groups = group.log.WH)
d.log.win <- rep(0,n)
for ( i in 1 : num.comp.log )
{
d.log.win <- d.log.win + recon.log.win[[i]]
}
for.log.win.ssa <- rforecast(log.WH.win.ssa, groups = group.log.WH,
len = n.ahead, only.new = FALSE)
for.log.win <- rep(0,(n+n.ahead))
for ( i in 1 : num.comp.log)
{
for.log.win <- for.log.win + for.log.win.ssa[[i]]
}
res.det.win <- log.WH.win - d.log.win
ar.aicc.win <- Arima(res.det.win, order = c(1,0,0), seasonal = c(0,0,0),
include.mean = FALSE, method = "CSS", lambda = NULL)
ar.res.win <- ar.aicc.win$residuals
for.win <- predict(ar.aicc.win, n.ahead = n.ahead)
log.point.for.win <- c(for.win$pred) + for.log.win[(n+1):(n+n.ahead)]
log.actual.win <- for.log.WH[(w+n):(w+n-1+n.ahead)]
log.error[w,] <- log.point.for.win - log.actual.win
print(w)
}
rmse <- sqrt(colMeans(log.error, na.rm = TRUE)^2)
plot(rmse, main = "RMSE Values for each Predicted Month", xlab = "Predicted Month",
ylab = "RMSE", col = "black", pch = 16, cex = 2)
abline(h = c(.2,.4,.6,.8,1), v = c(10,20,30,40), lty = "dashed")
##############
|
install.packages("C:/Users/bloh356/Downloads/gdxrrw_0.4.0.zip", repos = NULL)
require(gdxrrw)
# The code below generates an node-branch incidence matrix for a complete network with n vertices. For more information see the Appendix C of
# Complementarity Modelling of Energy Markets
# The number of vertices
n = 3
# The number of edges in a complete graph (one edge for each direction) with n nodes is:
#edges = n*(n - 1)
edges = 3
incidence.matrix = matrix(rep(0, n*edges), nrow = n, ncol = edges)
z = seq(from = 1, to = edges, by = (n-1))
for (i in 1:n) {
if (i == 1) {incidence.matrix[i, z[i]:(n-1)] = 1} else {incidence.matrix[i, z[i]:(z[i] + n - 2)] = 1}
if (i != 1) { a = incidence.matrix
incidence.matrix[1, ] = a[i, ]
incidence.matrix[i, ] = a[1, ]
incidence.matrix[2:n, z[i]:(z[i] + n - 2)] = diag(-1, (n-1))} else {incidence.matrix[2:n, 1:(n-1)] = diag(-1, (n-1))}
}
# Set the reference node (i.e., phase angle equals zero) and remove that node from the node-incidence matrix
reference.node = n
incidence.matrix = incidence.matrix[which(1:n != reference.node), ]
# Susceptance vector
susceptance = rep(0.5, edges)
# Susceptance matrix
X = diag(susceptance)
# Power flows p610
C.11 = incidence.matrix%*%X%*%t(incidence.matrix)
# Sensitivity matrix, p610
C.8 = X%*%t(incidence.matrix)%*%solve(B)
# The above is probably not that useful since all we care about is the net flow (I think).
# Need to figure out how to send this data back to GAMS
# Might be cool to figure out how to make a node-incidence matrix for not complete graphs (using information on the edges)
# 1. Generate a 2 column matrix with all possible combinations of the number of nodes
# 2. If column 1 is greater than column 2 than entry A[column 1, column 2] is +1.
# 3. If column 1 is less than column 2 than entry A[column 1, column 2] is -1.
# 4. All other entries are zero.
|
/src/R/node_incidence.R
|
no_license
|
andymd26/vigilant-enigma
|
R
| false | false | 1,914 |
r
|
install.packages("C:/Users/bloh356/Downloads/gdxrrw_0.4.0.zip", repos = NULL)
require(gdxrrw)
# The code below generates an node-branch incidence matrix for a complete network with n vertices. For more information see the Appendix C of
# Complementarity Modelling of Energy Markets
# The number of vertices
n = 3
# The number of edges in a complete graph (one edge for each direction) with n nodes is:
#edges = n*(n - 1)
edges = 3
incidence.matrix = matrix(rep(0, n*edges), nrow = n, ncol = edges)
z = seq(from = 1, to = edges, by = (n-1))
for (i in 1:n) {
if (i == 1) {incidence.matrix[i, z[i]:(n-1)] = 1} else {incidence.matrix[i, z[i]:(z[i] + n - 2)] = 1}
if (i != 1) { a = incidence.matrix
incidence.matrix[1, ] = a[i, ]
incidence.matrix[i, ] = a[1, ]
incidence.matrix[2:n, z[i]:(z[i] + n - 2)] = diag(-1, (n-1))} else {incidence.matrix[2:n, 1:(n-1)] = diag(-1, (n-1))}
}
# Set the reference node (i.e., phase angle equals zero) and remove that node from the node-incidence matrix
reference.node = n
incidence.matrix = incidence.matrix[which(1:n != reference.node), ]
# Susceptance vector
susceptance = rep(0.5, edges)
# Susceptance matrix
X = diag(susceptance)
# Power flows p610
C.11 = incidence.matrix%*%X%*%t(incidence.matrix)
# Sensitivity matrix, p610
C.8 = X%*%t(incidence.matrix)%*%solve(B)
# The above is probably not that useful since all we care about is the net flow (I think).
# Need to figure out how to send this data back to GAMS
# Might be cool to figure out how to make a node-incidence matrix for not complete graphs (using information on the edges)
# 1. Generate a 2 column matrix with all possible combinations of the number of nodes
# 2. If column 1 is greater than column 2 than entry A[column 1, column 2] is +1.
# 3. If column 1 is less than column 2 than entry A[column 1, column 2] is -1.
# 4. All other entries are zero.
|
test_that("ifelse. works with true = NA", {
df <- tidytable(x = 1:4)
df <- df %>%
mutate.(new_col = ifelse.(x > 2L, NA, x - 1L))
expect_equal(df$new_col, c(0,1,NA,NA))
})
test_that("ifelse. works with false = NA", {
df <- tidytable(x = 1:4)
df <- df %>%
mutate.(new_col = ifelse.(x > 2L, x - 1L, NA))
expect_equal(df$new_col, c(NA,NA,2,3))
})
|
/tests/testthat/test-ifelse.R
|
permissive
|
mjkarlsen/tidytable
|
R
| false | false | 370 |
r
|
test_that("ifelse. works with true = NA", {
df <- tidytable(x = 1:4)
df <- df %>%
mutate.(new_col = ifelse.(x > 2L, NA, x - 1L))
expect_equal(df$new_col, c(0,1,NA,NA))
})
test_that("ifelse. works with false = NA", {
df <- tidytable(x = 1:4)
df <- df %>%
mutate.(new_col = ifelse.(x > 2L, x - 1L, NA))
expect_equal(df$new_col, c(NA,NA,2,3))
})
|
library(caTools)
library(ggplot2)
dataset = read.csv('Salary_Data.csv')
set.seed(123)
split = sample.split(dataset$Salary, SplitRatio = 0.2)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
regressor = lm(formula = Salary ~ YearsExperience,
data = training_set)
y_pred = predict(regressor, newdata = test_set)
ggplot() +
geom_point(aes(x = training_set$YearsExperience, y = training_set$Salary),
colour = 'red') +
geom_line(aes(x = training_set$YearsExperience, y = predict(regressor, newdata = training_set)),
colour = 'blue') +
ggtitle('Salary vs Experience (Training set)') +
xlab('Years of experience') +
ylab('Salary')
ggplot() +
geom_point(aes(x = test_set$YearsExperience, y = test_set$Salary),
colour = 'red') +
geom_line(aes(x = training_set$YearsExperience, y = predict(regressor, newdata = training_set)),
colour = 'blue') +
ggtitle('Salary vs Experience (Test set)') +
xlab('Years of experience') +
ylab('Salary')
|
/linear_regression/r.R
|
no_license
|
alinagrishchuk/machine_learning
|
R
| false | false | 1,055 |
r
|
library(caTools)
library(ggplot2)
dataset = read.csv('Salary_Data.csv')
set.seed(123)
split = sample.split(dataset$Salary, SplitRatio = 0.2)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
regressor = lm(formula = Salary ~ YearsExperience,
data = training_set)
y_pred = predict(regressor, newdata = test_set)
ggplot() +
geom_point(aes(x = training_set$YearsExperience, y = training_set$Salary),
colour = 'red') +
geom_line(aes(x = training_set$YearsExperience, y = predict(regressor, newdata = training_set)),
colour = 'blue') +
ggtitle('Salary vs Experience (Training set)') +
xlab('Years of experience') +
ylab('Salary')
ggplot() +
geom_point(aes(x = test_set$YearsExperience, y = test_set$Salary),
colour = 'red') +
geom_line(aes(x = training_set$YearsExperience, y = predict(regressor, newdata = training_set)),
colour = 'blue') +
ggtitle('Salary vs Experience (Test set)') +
xlab('Years of experience') +
ylab('Salary')
|
library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
# Expression that generates a histogram. The expression is
# wrapped in a call to renderPlot to indicate that:
#
# 1) It is "reactive" and therefore should re-execute automatically
# when inputs change
# 2) Its output type is a plot
output$distPlot <- renderPlot({
x <- 1:10000
zstar = qnorm(0.975)
p = 0.50
# margen de error 3%
E = 3/100
# n = 1067 personas
y <- c()
for( i in x) {
E = zstar*sqrt(p*(1-p)/i)
y <- c(y, E*100)
}
# draw the histogram with the specified number of bins
plot(x, y, col = 'skyblue', log="x", axes=FALSE, xlab="Personas encuestadas (escala logarítmica)",
ylab = "Margen de error en porcentaje", type="b")
axis(2, las=1)
axis(1, at=c(0,10,100,1000,10000))
abline(h=input$bins[1])
n = zstar^2*p*(1-p)/(input$bins[1]/100)^2
abline(v=n)
})
})
|
/app1/server.R
|
no_license
|
aniversarioperu/encuestas
|
R
| false | false | 1,012 |
r
|
library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
# Expression that generates a histogram. The expression is
# wrapped in a call to renderPlot to indicate that:
#
# 1) It is "reactive" and therefore should re-execute automatically
# when inputs change
# 2) Its output type is a plot
output$distPlot <- renderPlot({
x <- 1:10000
zstar = qnorm(0.975)
p = 0.50
# margen de error 3%
E = 3/100
# n = 1067 personas
y <- c()
for( i in x) {
E = zstar*sqrt(p*(1-p)/i)
y <- c(y, E*100)
}
# draw the histogram with the specified number of bins
plot(x, y, col = 'skyblue', log="x", axes=FALSE, xlab="Personas encuestadas (escala logarítmica)",
ylab = "Margen de error en porcentaje", type="b")
axis(2, las=1)
axis(1, at=c(0,10,100,1000,10000))
abline(h=input$bins[1])
n = zstar^2*p*(1-p)/(input$bins[1]/100)^2
abline(v=n)
})
})
|
library(GA)
f <- function(x) abs(x) + cos(x)
min <- -20; max <- +20
curve(f, min, max)
fitness <- function(x) - f(x)
#-----------------------------------Observe iterations--------------------------------------
monitor <- function(obj) {
curve(f, min, max, main = paste("iteration =", obj@iter), font.main = 1)
points(obj@population, -obj@fitness, pch = 20, col = 2)
rug(obj@population, col = 2)
Sys.sleep(0.2)
}
#----------------------------------Run model-------------------------------------------------
GA <- ga(type = "real-valued", fitness = fitness, min = min, max = max, monitor = monitor)
plot(GA)
summary(GA)
|
/GA_MinimizeMonitor.R
|
no_license
|
dalilareis/R-genetic-alg
|
R
| false | false | 632 |
r
|
library(GA)
f <- function(x) abs(x) + cos(x)
min <- -20; max <- +20
curve(f, min, max)
fitness <- function(x) - f(x)
#-----------------------------------Observe iterations--------------------------------------
monitor <- function(obj) {
curve(f, min, max, main = paste("iteration =", obj@iter), font.main = 1)
points(obj@population, -obj@fitness, pch = 20, col = 2)
rug(obj@population, col = 2)
Sys.sleep(0.2)
}
#----------------------------------Run model-------------------------------------------------
GA <- ga(type = "real-valued", fitness = fitness, min = min, max = max, monitor = monitor)
plot(GA)
summary(GA)
|
#' Fit an integer adjusted exponential or gamma distribution
#'
#'
#' @param delays Numeric vector of reporting delays
#' @param samples Numeric, number of samples to take
#' @param dist Character string, which distribution to fit. Defaults to exponential (`"exp"`) but
#' gamma is also supported (`"gamma"`).
#' @return
#' @export
#' @import Rcpp
#' @import methods
#' @importFrom rstan sampling extract
#' @useDynLib EpiNow, .registration=TRUE
#' @examples
#'
dist_fit <- function(delays = NULL, samples = NULL, dist = "exp") {
if (is.null(samples)) {
samples <- 1000
}
if (samples < 1000) {
samples <- 1000
}
## Model parameters
lows <- delays - 1
lows <- ifelse(lows <=0, 1e-6, lows)
ups <- delays + 1
data <- list(N = length(delays),
low = lows,
up = ups,
iter = samples + 2000,
warmup = 2000)
if (dist %in% "exp") {
model <- stanmodels$exp_fit
data <- c(data, lam_mean = mean(delays))
}else if (dist %in% "gamma") {
model <- stanmodels$gamma_fit
}
## Fit model
fit <- rstan::sampling(
model,
data = data,
control = list(adapt_delta = 0.999),
chains = 4,
refresh = 0)
return(fit)
}
|
/R/dist_fit.R
|
permissive
|
laasousa/EpiNow
|
R
| false | false | 1,226 |
r
|
#' Fit an integer adjusted exponential or gamma distribution
#'
#'
#' @param delays Numeric vector of reporting delays
#' @param samples Numeric, number of samples to take
#' @param dist Character string, which distribution to fit. Defaults to exponential (`"exp"`) but
#' gamma is also supported (`"gamma"`).
#' @return
#' @export
#' @import Rcpp
#' @import methods
#' @importFrom rstan sampling extract
#' @useDynLib EpiNow, .registration=TRUE
#' @examples
#'
dist_fit <- function(delays = NULL, samples = NULL, dist = "exp") {
if (is.null(samples)) {
samples <- 1000
}
if (samples < 1000) {
samples <- 1000
}
## Model parameters
lows <- delays - 1
lows <- ifelse(lows <=0, 1e-6, lows)
ups <- delays + 1
data <- list(N = length(delays),
low = lows,
up = ups,
iter = samples + 2000,
warmup = 2000)
if (dist %in% "exp") {
model <- stanmodels$exp_fit
data <- c(data, lam_mean = mean(delays))
}else if (dist %in% "gamma") {
model <- stanmodels$gamma_fit
}
## Fit model
fit <- rstan::sampling(
model,
data = data,
control = list(adapt_delta = 0.999),
chains = 4,
refresh = 0)
return(fit)
}
|
library(gdistance)
library(dplyr)
#----Load data----
teldata <- readRDS("output/model_data/teldata_raw.RData") # colnames? # fine
spatdata_old <- readRDS("output/model_data/cost_data.RData") # colnames? # fine
landscape <- readRDS("output/model_data/landscape.RData") # colnames?
y <- readRDS("output/model_data/y.RData")
traps <- readRDS("output/model_data/traps.RData") %>% as.matrix()
colnames(traps) <- c("X", "Y")
ss <- readRDS("output/model_data/ss.RData") # colnames?
K <- 90
# Make SCR state-space
# This is just the 10,000 cells from 100x100, so it's NOT aggregated at fact = 4,
# and the likelihood does not split up the state-space and the cost landscape
scr_ss <- list()
for(i in 1:length(landscape)){
scr_ss[[i]] <- crop(landscape[[i]], extent(ss)) %>%
raster::aggregate(fact = 4)
}
# Re-construct spatdata
spatdata <- list()
for(sim in 1:length(spatdata_old)){
spatdata[[sim]] <- list()
for(ind in 1:length(spatdata_old[[sim]])){
tmp_df <- spatdata_old[[sim]][[ind]] %>%
as.data.frame()
sbar <- tmp_df %>%
select(x, y) %>%
colMeans() %>%
as.numeric() %>%
matrix(ncol = 2)
tmp_r <- raster::rasterFromXYZ(tmp_df)
sbar_indx <- raster::extract(x = tmp_r, y = sbar, cellnumbers=T)[,1]
sbar_on_r <- tmp_df[sbar_indx,c("x", "y")]
tmp_result <- tmp_df %>%
select(x,y) %>%
mutate(sbar = ifelse(
(x == sbar_on_r[,1]) & (y == sbar_on_r[,2]),
1,0)) %>%
as.matrix()
spatdata[[sim]][[ind]] <- tmp_result
}
}
# par(mfrow=c(2,4))
# for(i in 1:8){
# plot((spatdata[[1]][inds][[i]])[,1:2],
# pch = 16, col = "gray80", cex = 0.5, asp = 1)
# lines((teldata[[1]][inds][[i]]))
# }
# par(mfrow=c(1,1))
#----1 sim, first position----
sim <- 1
inds <- sample(1:length(teldata[[1]]), size = 8)
#----Fit movement model----
source("R/main/models/scr_move_cost_like.R")
t1 <- Sys.time()
# NLM likelihood evaluation
mmscreco <- nlm(
scr_move_cost_like,
c(2, # alpha2
log(1), # ups
qlogis(0.9), # psi
log(4), # sig
qlogis(0.1), # p0
log(50/ncell(scr_ss[[1]])) # d0
),
mod = "gauss",
hessian = T, print.level = 2,
teldata = teldata[[sim]][inds],
spatdata = spatdata[[sim]][inds],
landscape = landscape[[sim]],
scr_ss = scr_ss[[sim]],
K = K, scr_y = y[[sim]], trap_locs = traps,
dist = "lcp", popcost=T, popmove=T, fixcost=F, use.sbar=T, prj=NULL)
t2 <- Sys.time()
t_mmscreco <- t2-t1
beepr::beep()
#----Fit model w/o movement----
source("R/main/models/scr_cost_like.R")
t3 <- Sys.time()
# NLM likelihood evaluation
screco <- nlm(
scr_cost_like, mod = "gauss",
c(2, # alpha2
log(4), # sigma
qlogis(0.1), # p0
log(50/ncell(scr_ss[[1]])) # d0
),
hessian = T,
landscape = landscape[[sim]],
scr_ss = scr_ss[[sim]],
K = K, scr_y = y[[sim]], trap_locs = traps,
dist = "lcp")
t4 <- Sys.time()
t_screco <- t4-t3
|
/R/archive/1_fit.R
|
no_license
|
chrissuthy/telemetry-informed-cost
|
R
| false | false | 3,134 |
r
|
library(gdistance)
library(dplyr)
#----Load data----
teldata <- readRDS("output/model_data/teldata_raw.RData") # colnames? # fine
spatdata_old <- readRDS("output/model_data/cost_data.RData") # colnames? # fine
landscape <- readRDS("output/model_data/landscape.RData") # colnames?
y <- readRDS("output/model_data/y.RData")
traps <- readRDS("output/model_data/traps.RData") %>% as.matrix()
colnames(traps) <- c("X", "Y")
ss <- readRDS("output/model_data/ss.RData") # colnames?
K <- 90
# Make SCR state-space
# This is just the 10,000 cells from 100x100, so it's NOT aggregated at fact = 4,
# and the likelihood does not split up the state-space and the cost landscape
scr_ss <- list()
for(i in 1:length(landscape)){
scr_ss[[i]] <- crop(landscape[[i]], extent(ss)) %>%
raster::aggregate(fact = 4)
}
# Re-construct spatdata
spatdata <- list()
for(sim in 1:length(spatdata_old)){
spatdata[[sim]] <- list()
for(ind in 1:length(spatdata_old[[sim]])){
tmp_df <- spatdata_old[[sim]][[ind]] %>%
as.data.frame()
sbar <- tmp_df %>%
select(x, y) %>%
colMeans() %>%
as.numeric() %>%
matrix(ncol = 2)
tmp_r <- raster::rasterFromXYZ(tmp_df)
sbar_indx <- raster::extract(x = tmp_r, y = sbar, cellnumbers=T)[,1]
sbar_on_r <- tmp_df[sbar_indx,c("x", "y")]
tmp_result <- tmp_df %>%
select(x,y) %>%
mutate(sbar = ifelse(
(x == sbar_on_r[,1]) & (y == sbar_on_r[,2]),
1,0)) %>%
as.matrix()
spatdata[[sim]][[ind]] <- tmp_result
}
}
# par(mfrow=c(2,4))
# for(i in 1:8){
# plot((spatdata[[1]][inds][[i]])[,1:2],
# pch = 16, col = "gray80", cex = 0.5, asp = 1)
# lines((teldata[[1]][inds][[i]]))
# }
# par(mfrow=c(1,1))
#----1 sim, first position----
sim <- 1
inds <- sample(1:length(teldata[[1]]), size = 8)
#----Fit movement model----
source("R/main/models/scr_move_cost_like.R")
t1 <- Sys.time()
# NLM likelihood evaluation
mmscreco <- nlm(
scr_move_cost_like,
c(2, # alpha2
log(1), # ups
qlogis(0.9), # psi
log(4), # sig
qlogis(0.1), # p0
log(50/ncell(scr_ss[[1]])) # d0
),
mod = "gauss",
hessian = T, print.level = 2,
teldata = teldata[[sim]][inds],
spatdata = spatdata[[sim]][inds],
landscape = landscape[[sim]],
scr_ss = scr_ss[[sim]],
K = K, scr_y = y[[sim]], trap_locs = traps,
dist = "lcp", popcost=T, popmove=T, fixcost=F, use.sbar=T, prj=NULL)
t2 <- Sys.time()
t_mmscreco <- t2-t1
beepr::beep()
#----Fit model w/o movement----
source("R/main/models/scr_cost_like.R")
t3 <- Sys.time()
# NLM likelihood evaluation
screco <- nlm(
scr_cost_like, mod = "gauss",
c(2, # alpha2
log(4), # sigma
qlogis(0.1), # p0
log(50/ncell(scr_ss[[1]])) # d0
),
hessian = T,
landscape = landscape[[sim]],
scr_ss = scr_ss[[sim]],
K = K, scr_y = y[[sim]], trap_locs = traps,
dist = "lcp")
t4 <- Sys.time()
t_screco <- t4-t3
|
region <- c('north', 'south')
metro <- c('urban', 'suburban', 'rural')
sample_size <- 1e3
intercept <- 0
b1 <- 1.5
tbl <- tibble(
x1 = base::sample(1:10, size = sample_size, replace = TRUE)
, a1 = base::sample(region, size = sample_size, replace = TRUE) %>% as_factor()
, a2 = base::sample(metro, size = sample_size, replace = TRUE) %>% as_factor()
) %>%
mutate(
e = rnorm(sample_size, 3)
, y = intercept + x1 * b1 + e
)
fit <- tbl %>%
lm(formula = y ~ 0 + x1:a1:a2)
summary(fit)
fit_north_suburban <- tbl %>%
filter(
a1 == 'north'
, a2 == 'suburban') %>%
lm(formula = y ~ 0 + x1)
summary(fit_north_suburban)
|
/grouped_data.R
|
no_license
|
PirateGrunt/sparsity_blues
|
R
| false | false | 648 |
r
|
region <- c('north', 'south')
metro <- c('urban', 'suburban', 'rural')
sample_size <- 1e3
intercept <- 0
b1 <- 1.5
tbl <- tibble(
x1 = base::sample(1:10, size = sample_size, replace = TRUE)
, a1 = base::sample(region, size = sample_size, replace = TRUE) %>% as_factor()
, a2 = base::sample(metro, size = sample_size, replace = TRUE) %>% as_factor()
) %>%
mutate(
e = rnorm(sample_size, 3)
, y = intercept + x1 * b1 + e
)
fit <- tbl %>%
lm(formula = y ~ 0 + x1:a1:a2)
summary(fit)
fit_north_suburban <- tbl %>%
filter(
a1 == 'north'
, a2 == 'suburban') %>%
lm(formula = y ~ 0 + x1)
summary(fit_north_suburban)
|
y6 <- subset(agg_2016, Yearly.Patient.Spend < 25000)
y7 <- subset(agg_2017, Yearly.Patient.Spend < 25000)
l <- list(y6$Yearly.Patient.Spend, y7$Yearly.Patient.Spend)
names(l) <- c("2016", "2017")
boxplot(l, horizontal = T, col = c("light blue", "grey"))
|
/playpen.R
|
no_license
|
curryhilton/aco
|
R
| false | false | 255 |
r
|
y6 <- subset(agg_2016, Yearly.Patient.Spend < 25000)
y7 <- subset(agg_2017, Yearly.Patient.Spend < 25000)
l <- list(y6$Yearly.Patient.Spend, y7$Yearly.Patient.Spend)
names(l) <- c("2016", "2017")
boxplot(l, horizontal = T, col = c("light blue", "grey"))
|
library(tidyverse)
library(rvest)
library(stringr)
library(readr)
topdir <- getwd()
datadir <- paste0(topdir, "/data/")
## set contest caption limit and daily cartoon page limit
captionNumbers <- 1:572
dailyPageNums <- 1:88
## Use rvest to scrape pages for captions -------------------
getContestCaps <- function(contestNumber) {
cap <- read_html(paste0("http://contest.newyorker.com/CaptionContest.aspx?id=", contestNumber))
caps <- html_nodes(cap, ".cap em")
caps <- as.character(caps)
# clean up strings
caps <- str_replace_all(caps, "“", "")
caps <- str_replace_all(caps, "”", "")
caps <- str_replace_all(caps, "<.*?>", "")
caps <- str_replace_all(caps, "\"", "")
caps <- str_replace_all(caps, "’", "'")
caps <- str_replace_all(caps, "‘", "'")
caps <- str_replace_all(caps, "—", ", ")
caps <- str_replace_all(caps, "…", "")
caps <- str_replace_all(caps, "\U2011", "-")
caps <- str_replace_all(caps, "\U00A0", "")
caps <- str_replace_all(caps, "\U00E9", "e")
caps <- str_replace_all(caps, "(?<=\\b[A-Z])[.](?=[A-Z]|[ a-z]|[,])", "")
caps <- str_replace_all(caps, " . . .", "...")
caps <- str_replace_all(caps, " ", "")
caps <- str_trim(caps)
return(caps)
}
getIssueCaptions <- function(pageNumber) {
cap <- read_html(paste0("http://www.newyorker.com/cartoons/daily-cartoon/page/", pageNumber))
caps <- html_nodes(cap, ".River__dek___CayIg")
caps <- as.character(caps)
# clean up strings
caps <- str_replace_all(caps, "“", "")
caps <- str_replace_all(caps, "”", "")
caps <- str_replace_all(caps, "<.*?>", "")
caps <- str_replace_all(caps, "\"", "")
caps <- str_replace_all(caps, "’", "'")
caps <- str_replace_all(caps, "‘", "'")
caps <- str_replace_all(caps, "—", ", ")
caps <- str_replace_all(caps, "…", "")
caps <- str_replace_all(caps, "\U2011", "-")
caps <- str_replace_all(caps, "\U00A0", "")
caps <- str_replace_all(caps, "\U00E9", "e")
caps <- str_replace_all(caps, "(?<=\\b[A-Z])[.](?=[A-Z]|[ a-z]|[,])", "")
caps <- str_replace_all(caps, " . . .", "...")
caps <- str_replace_all(caps, " ", "")
caps <- str_trim(caps)
sess <- html_session(paste0("http://www.newyorker.com/cartoons/daily-cartoon/page/", pageNumber))
imgsrc <- sess %>%
read_html() %>%
html_nodes("img")
imgsrc <- unlist(str_split(imgsrc, "src\\=\""))
imgsrc <- imgsrc[str_detect(imgsrc, "https\\://media.newyorker.com/photos")]
imgs <- unlist(str_split(imgsrc, "\""))
imgs <- imgs[str_detect(imgs, "https\\://media.newyorker.com/photos")]
# imgs <- imgs[str_sub(imgs, start = -3) == "jpg" & str_sub(imgs, start = 1, end = 4) == "http"]
return(list(caps, imgs))
}
# -----------------------------------------------------------------
# run loops to scrape captions ------------------------------------
# -----------------------------------------------------------------
contestCaptions <- NULL
# scrape contest caption loop -------------------------
for(i in captionNumbers) {
temp <- getContestCaps(i)
df <- data.frame(temp, 1:3, stringsAsFactors = FALSE)
contestCaptions <- bind_rows(contestCaptions, df)
}
# clean up caption dataset
names(contestCaptions) <- c("Caption", "Rank")
# Save scraped data ---------------------------
write_csv(contestCaptions, paste0(datadir, "contestcaptions.csv"))
# -----------------------------------------------------------------
# scrape regular captions from daily caption archive
# -----------------------------------------------------------------
issueCaptions <- NULL
# scrape issue captions loop --------------------------
for(i in dailyPageNums) {
temp <- getIssueCaptions(i)
issueCaptions <- if(i == 1) {
temp
} else {
Map(c, issueCaptions, temp)
}
}
names(issueCaptions) <- c("Caption", "Image")
# Save issue caption dataset --------------------------
issuecaps <- data.frame(issueCaptions$Caption, stringsAsFactors = FALSE)
names(issuecaps) <- c("Caption")
issueimgs <- data.frame(issueCaptions$Image, stringsAsFactors = FALSE)
names(issueimgs) <- c("Image")
write_csv(issuecaps, paste0(datadir, "issuecaptions.csv"))
write_csv(issueimgs, paste0(datadir, "issueimages.csv"))
|
/get_captions.R
|
no_license
|
ngbb/nycc
|
R
| false | false | 4,187 |
r
|
library(tidyverse)
library(rvest)
library(stringr)
library(readr)
topdir <- getwd()
datadir <- paste0(topdir, "/data/")
## set contest caption limit and daily cartoon page limit
captionNumbers <- 1:572
dailyPageNums <- 1:88
## Use rvest to scrape pages for captions -------------------
getContestCaps <- function(contestNumber) {
cap <- read_html(paste0("http://contest.newyorker.com/CaptionContest.aspx?id=", contestNumber))
caps <- html_nodes(cap, ".cap em")
caps <- as.character(caps)
# clean up strings
caps <- str_replace_all(caps, "“", "")
caps <- str_replace_all(caps, "”", "")
caps <- str_replace_all(caps, "<.*?>", "")
caps <- str_replace_all(caps, "\"", "")
caps <- str_replace_all(caps, "’", "'")
caps <- str_replace_all(caps, "‘", "'")
caps <- str_replace_all(caps, "—", ", ")
caps <- str_replace_all(caps, "…", "")
caps <- str_replace_all(caps, "\U2011", "-")
caps <- str_replace_all(caps, "\U00A0", "")
caps <- str_replace_all(caps, "\U00E9", "e")
caps <- str_replace_all(caps, "(?<=\\b[A-Z])[.](?=[A-Z]|[ a-z]|[,])", "")
caps <- str_replace_all(caps, " . . .", "...")
caps <- str_replace_all(caps, " ", "")
caps <- str_trim(caps)
return(caps)
}
getIssueCaptions <- function(pageNumber) {
cap <- read_html(paste0("http://www.newyorker.com/cartoons/daily-cartoon/page/", pageNumber))
caps <- html_nodes(cap, ".River__dek___CayIg")
caps <- as.character(caps)
# clean up strings
caps <- str_replace_all(caps, "“", "")
caps <- str_replace_all(caps, "”", "")
caps <- str_replace_all(caps, "<.*?>", "")
caps <- str_replace_all(caps, "\"", "")
caps <- str_replace_all(caps, "’", "'")
caps <- str_replace_all(caps, "‘", "'")
caps <- str_replace_all(caps, "—", ", ")
caps <- str_replace_all(caps, "…", "")
caps <- str_replace_all(caps, "\U2011", "-")
caps <- str_replace_all(caps, "\U00A0", "")
caps <- str_replace_all(caps, "\U00E9", "e")
caps <- str_replace_all(caps, "(?<=\\b[A-Z])[.](?=[A-Z]|[ a-z]|[,])", "")
caps <- str_replace_all(caps, " . . .", "...")
caps <- str_replace_all(caps, " ", "")
caps <- str_trim(caps)
sess <- html_session(paste0("http://www.newyorker.com/cartoons/daily-cartoon/page/", pageNumber))
imgsrc <- sess %>%
read_html() %>%
html_nodes("img")
imgsrc <- unlist(str_split(imgsrc, "src\\=\""))
imgsrc <- imgsrc[str_detect(imgsrc, "https\\://media.newyorker.com/photos")]
imgs <- unlist(str_split(imgsrc, "\""))
imgs <- imgs[str_detect(imgs, "https\\://media.newyorker.com/photos")]
# imgs <- imgs[str_sub(imgs, start = -3) == "jpg" & str_sub(imgs, start = 1, end = 4) == "http"]
return(list(caps, imgs))
}
# -----------------------------------------------------------------
# run loops to scrape captions ------------------------------------
# -----------------------------------------------------------------
contestCaptions <- NULL
# scrape contest caption loop -------------------------
for(i in captionNumbers) {
temp <- getContestCaps(i)
df <- data.frame(temp, 1:3, stringsAsFactors = FALSE)
contestCaptions <- bind_rows(contestCaptions, df)
}
# clean up caption dataset
names(contestCaptions) <- c("Caption", "Rank")
# Save scraped data ---------------------------
write_csv(contestCaptions, paste0(datadir, "contestcaptions.csv"))
# -----------------------------------------------------------------
# scrape regular captions from daily caption archive
# -----------------------------------------------------------------
issueCaptions <- NULL
# scrape issue captions loop --------------------------
for(i in dailyPageNums) {
temp <- getIssueCaptions(i)
issueCaptions <- if(i == 1) {
temp
} else {
Map(c, issueCaptions, temp)
}
}
names(issueCaptions) <- c("Caption", "Image")
# Save issue caption dataset --------------------------
issuecaps <- data.frame(issueCaptions$Caption, stringsAsFactors = FALSE)
names(issuecaps) <- c("Caption")
issueimgs <- data.frame(issueCaptions$Image, stringsAsFactors = FALSE)
names(issueimgs) <- c("Image")
write_csv(issuecaps, paste0(datadir, "issuecaptions.csv"))
write_csv(issueimgs, paste0(datadir, "issueimages.csv"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/phasingImpute4.R
\name{.imputedByImpute4}
\alias{.imputedByImpute4}
\title{Impute genotypes using IMPUTE4}
\usage{
.imputedByImpute4(
impute4,
chrs,
prefixChunk,
phaseDIR,
referencePanel,
impRefDIR,
imputedDIR,
prefix4eachChr,
nCore,
effectiveSize = 20000
)
}
\arguments{
\item{impute4}{an executable program in either the current
working directory or somewhere in the command path.}
\item{chrs}{specifiy the chromosome codes for imputation.}
\item{prefixChunk}{the prefix of the chunk files for each chromosome,
along with the proper location directory.}
\item{phaseDIR}{the directory where prephased haplotypes are located.}
\item{referencePanel}{a string indicating the type of imputation
reference panels is used: c("1000Gphase1v3_macGT1", "1000Gphase3").}
\item{impRefDIR}{the directory where the imputation reference files
are located.}
\item{imputedDIR}{the directory where imputed files will be located.}
\item{prefix4eachChr}{the prefix of IMPUTE2 files for each chunk.}
\item{nCore}{the number of cores used for computation.}
\item{effectiveSize}{this parameter controls the effective population size.
Commonly denoted as Ne. A universal -Ne value of 20000 is suggested.}
}
\value{
The imputed files for all chunks from given chromosomes, except
sex chromosomes.
}
\description{
Perform imputation by IMPUTE4 for the autosomal prephased known haplotypes
with a reference panel.
}
\seealso{
\code{\link{phaseImpute4}}.
}
\author{
Junfang Chen
}
|
/man/dot-imputedByImpute4.Rd
|
no_license
|
transbioZI/Gimpute
|
R
| false | true | 1,569 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/phasingImpute4.R
\name{.imputedByImpute4}
\alias{.imputedByImpute4}
\title{Impute genotypes using IMPUTE4}
\usage{
.imputedByImpute4(
impute4,
chrs,
prefixChunk,
phaseDIR,
referencePanel,
impRefDIR,
imputedDIR,
prefix4eachChr,
nCore,
effectiveSize = 20000
)
}
\arguments{
\item{impute4}{an executable program in either the current
working directory or somewhere in the command path.}
\item{chrs}{specifiy the chromosome codes for imputation.}
\item{prefixChunk}{the prefix of the chunk files for each chromosome,
along with the proper location directory.}
\item{phaseDIR}{the directory where prephased haplotypes are located.}
\item{referencePanel}{a string indicating the type of imputation
reference panels is used: c("1000Gphase1v3_macGT1", "1000Gphase3").}
\item{impRefDIR}{the directory where the imputation reference files
are located.}
\item{imputedDIR}{the directory where imputed files will be located.}
\item{prefix4eachChr}{the prefix of IMPUTE2 files for each chunk.}
\item{nCore}{the number of cores used for computation.}
\item{effectiveSize}{this parameter controls the effective population size.
Commonly denoted as Ne. A universal -Ne value of 20000 is suggested.}
}
\value{
The imputed files for all chunks from given chromosomes, except
sex chromosomes.
}
\description{
Perform imputation by IMPUTE4 for the autosomal prephased known haplotypes
with a reference panel.
}
\seealso{
\code{\link{phaseImpute4}}.
}
\author{
Junfang Chen
}
|
# 迷你小案例 --------------------------------------------------------------
# Data Import -------------------------------------------------------------
library(readr)
cjb_url <-
"https://github.com/byaxb/RDataAnalytics/raw/master/data/cjb.csv"
cjb <- read_csv(cjb_url,
locale = locale(encoding = "CP936"))
View(cjb)
# Data Exploration --------------------------------------------------------
library(tidyverse)
cjb %>%
select(sx, wlfk) %>%
ggplot(aes(x = wlfk,
y = sx,
fill = wlfk)) +
geom_boxplot(width = 0.5)
# Data Preparation --------------------------------------------------------
as_five_grade_scores <- function(x) {
cut(
x,
breaks = c(0, seq(60, 100, by = 10)),
include.lowest = TRUE,
right = FALSE,
ordered_result = TRUE,
labels = c("不及格", "及格", "中", "良", "优")
)
}
cjb <- cjb %>%
mutate(zcj = rowSums(.[4:12])) %>%
filter(zcj != 0) %>% #剔除脏数据
mutate_at(vars(xb, wlfk), factor) %>% #类型转换
mutate_at(vars(yw:sw), as_five_grade_scores)#数据分箱
View(cjb)
# Model -------------------------------------------------------------------
library(arulesViz)
my_model <- cjb %>%
select(xb:wlfk) %>%
apriori(parameter = list(supp = 0.06, conf = 0.8),
appearance = list(rhs = paste0("wlfk=", c("文科", "理科"))))
# Visualization -----------------------------------------------------------
inspectDT(my_model)
plot(my_model, method = "graph")
#当然,也可采用交互的方式
plot(my_model,
method = "graph",
engine = "htmlwidget")
# The End ^-^ -------------------------------------------------------------
|
/00_迷你小案例.R
|
permissive
|
zeji0923/RDataAnalytics
|
R
| false | false | 1,739 |
r
|
# 迷你小案例 --------------------------------------------------------------
# Data Import -------------------------------------------------------------
library(readr)
cjb_url <-
"https://github.com/byaxb/RDataAnalytics/raw/master/data/cjb.csv"
cjb <- read_csv(cjb_url,
locale = locale(encoding = "CP936"))
View(cjb)
# Data Exploration --------------------------------------------------------
library(tidyverse)
cjb %>%
select(sx, wlfk) %>%
ggplot(aes(x = wlfk,
y = sx,
fill = wlfk)) +
geom_boxplot(width = 0.5)
# Data Preparation --------------------------------------------------------
as_five_grade_scores <- function(x) {
cut(
x,
breaks = c(0, seq(60, 100, by = 10)),
include.lowest = TRUE,
right = FALSE,
ordered_result = TRUE,
labels = c("不及格", "及格", "中", "良", "优")
)
}
cjb <- cjb %>%
mutate(zcj = rowSums(.[4:12])) %>%
filter(zcj != 0) %>% #剔除脏数据
mutate_at(vars(xb, wlfk), factor) %>% #类型转换
mutate_at(vars(yw:sw), as_five_grade_scores)#数据分箱
View(cjb)
# Model -------------------------------------------------------------------
library(arulesViz)
my_model <- cjb %>%
select(xb:wlfk) %>%
apriori(parameter = list(supp = 0.06, conf = 0.8),
appearance = list(rhs = paste0("wlfk=", c("文科", "理科"))))
# Visualization -----------------------------------------------------------
inspectDT(my_model)
plot(my_model, method = "graph")
#当然,也可采用交互的方式
plot(my_model,
method = "graph",
engine = "htmlwidget")
# The End ^-^ -------------------------------------------------------------
|
setwd("~/Desktop/R")
subdata <- file("household_power_consumption.txt")
plot1 <- read.table(text = grep("^[1,2]/2/2007", readLines(subdata), value = TRUE), col.names = c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), header = TRUE, sep = ";", na.strings = "?",nrows = 2075259, check.names=FALSE, stringsAsFactors = FALSE, comment.char = "", quote = '\"')
png(filename='plot3.png',width=480,height=480,units='px')
with(plot1, {plot(Sub_metering_1 ~ datetime, type = "l", ylab = "Global Active Power (kilowatts)", xlab = "")
lines(Sub_metering_2 ~ datetime, col = 'Red')
lines(Sub_metering_3 ~ datetime, col = 'Blue') })
legend("topright", col = c("black", "red", "blue"), lty = 1, lwd = 1, legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
/plot3.R
|
no_license
|
Muhsuan/ExData_Plotting1
|
R
| false | false | 871 |
r
|
setwd("~/Desktop/R")
subdata <- file("household_power_consumption.txt")
plot1 <- read.table(text = grep("^[1,2]/2/2007", readLines(subdata), value = TRUE), col.names = c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), header = TRUE, sep = ";", na.strings = "?",nrows = 2075259, check.names=FALSE, stringsAsFactors = FALSE, comment.char = "", quote = '\"')
png(filename='plot3.png',width=480,height=480,units='px')
with(plot1, {plot(Sub_metering_1 ~ datetime, type = "l", ylab = "Global Active Power (kilowatts)", xlab = "")
lines(Sub_metering_2 ~ datetime, col = 'Red')
lines(Sub_metering_3 ~ datetime, col = 'Blue') })
legend("topright", col = c("black", "red", "blue"), lty = 1, lwd = 1, legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
## import dataset
blood_test <- read.table(file="/mnt/c/Users/Nnamdi/Desktop/Bioinformatics/Univariate_data_modelling/Exercises/DATASETS/BLOOD.txt", header = T, sep=",")
head(blood_test)
### Exploratory data analysis
summary(blood_test)
hist(blood_test$testost)
# remove the missing data
blood.df <- subset(blood_test, blood_test$testost!='999',)
summary(blood.df$testost)
hist(blood.df$testost)
#Simple logistic regression
blood.glm <- glm(case ~ testost, family = binomial(link=logit), data=blood.df)
summary(blood.glm)
# Odds ratio
exp(blood.glm$coefficients)
### Model diagnostics
dev <- (blood.glm$null.deviance - blood.glm$deviance)/blood.glm$null.deviance
dev
### ROC curve
library("ROCR")
predict <- fitted(blood.glm)
pred <- prediction(predict, blood.df$case)
perf <- performance(pred, measure = "tpr", x.measure = "fpr")
plot(perf, main="sensitivity vs false positive rate",colorize=TRUE)
## Multiple logistic regression
blood.glm2 <- glm(case ~ testost +age, family = binomial(link=logit), data=blood.df)
summary(blood.glm2)
# Odds ratio
exp(blood.glm2$coefficients)
## Model diagnostics - deviance of the model
dev2 <- (blood.glm2$null.deviance - blood.glm2$deviance)/blood.glm2$null.deviance
dev2
# ROC curve
predict <- fitted(blood.glm2)
pred <- prediction(predict, blood.df$case)
perf <- performance(pred, measure = "tpr", x.measure = "fpr")
plot(perf, main="sensitivity vs false positive rate", colorize=TRUE)
### Comparison of model 1 and model 2
diff.dev <- blood.glm$deviance - blood.glm2$deviance
1-pchisq(diff.dev,1)
|
/logistic_regression/breast_cancer.R
|
no_license
|
asouzujoseph/Univariate-statistical-data-analysis
|
R
| false | false | 1,607 |
r
|
## import dataset
blood_test <- read.table(file="/mnt/c/Users/Nnamdi/Desktop/Bioinformatics/Univariate_data_modelling/Exercises/DATASETS/BLOOD.txt", header = T, sep=",")
head(blood_test)
### Exploratory data analysis
summary(blood_test)
hist(blood_test$testost)
# remove the missing data
blood.df <- subset(blood_test, blood_test$testost!='999',)
summary(blood.df$testost)
hist(blood.df$testost)
#Simple logistic regression
blood.glm <- glm(case ~ testost, family = binomial(link=logit), data=blood.df)
summary(blood.glm)
# Odds ratio
exp(blood.glm$coefficients)
### Model diagnostics
dev <- (blood.glm$null.deviance - blood.glm$deviance)/blood.glm$null.deviance
dev
### ROC curve
library("ROCR")
predict <- fitted(blood.glm)
pred <- prediction(predict, blood.df$case)
perf <- performance(pred, measure = "tpr", x.measure = "fpr")
plot(perf, main="sensitivity vs false positive rate",colorize=TRUE)
## Multiple logistic regression
blood.glm2 <- glm(case ~ testost +age, family = binomial(link=logit), data=blood.df)
summary(blood.glm2)
# Odds ratio
exp(blood.glm2$coefficients)
## Model diagnostics - deviance of the model
dev2 <- (blood.glm2$null.deviance - blood.glm2$deviance)/blood.glm2$null.deviance
dev2
# ROC curve
predict <- fitted(blood.glm2)
pred <- prediction(predict, blood.df$case)
perf <- performance(pred, measure = "tpr", x.measure = "fpr")
plot(perf, main="sensitivity vs false positive rate", colorize=TRUE)
### Comparison of model 1 and model 2
diff.dev <- blood.glm$deviance - blood.glm2$deviance
1-pchisq(diff.dev,1)
|
#' Dataset merger function
#'
#' This function allows you to merge two data.frames by their overlapping rownames.
#' @param DF1 the first data.frame
#' @param DF2 the second data.frame
#' @param main which data.frame should be used as the main? Choose the larger one if working with large datasets. Default to using neither.
#' @param join (character scalar) Which data.frames to use cases from. Defaults to "both". Can be: both, left, right.
#' @export
#' @examples
#' merge_datasets()
merge_datasets = function (DF1, DF2, main=1, time=F, join = "both"){
#time if desired
if (time) {time1 = proc.time()} #start timer
#checks
if (!main %in% 0:2){ #check for valid input
stop("Invalid input to main parameter provided!")
}
if (!join %in% c("both", "left", "right")) stop("Invalid join parameter!")
#main setting decides how to combine
if (join == "left") {
DF2 = DF2[intersect(rownames(DF1), rownames(DF2)), , drop = F] #subset to overlap with DF1
}
if (join == "right") {
DF1 = DF1[intersect(rownames(DF1), rownames(DF2)), , drop = F] #subset to overlap with DF2
}
#if nothing to join
if (nrow(DF1) == 0) {
message("Warning, nothing joined! No case in DF1 matches any in DF2!")
return(DF2)
}
if (nrow(DF2) == 0) {
message("Warning, nothing joined! No case in DF2 matches any in DF1!")
return(DF1)
}
#combined dataset
if (main==0){ #create a combined dataset
#colnames, remove duplicates
total.colnames = c(colnames(DF1), colnames(DF2))
total.colnames.unique = unique(total.colnames)
#rownames, remove duplicates
total.rownames = c(rownames(DF1), rownames(DF2))
total.rownames.unique = unique(total.rownames)
#make DF3
DF3 = as.data.frame(matrix(nrow = length(total.rownames.unique),
ncol = length(total.colnames.unique)))
rownames(DF3) = sort(total.rownames.unique)
colnames(DF3) = total.colnames.unique
}
if (main==1){ #use first DF as main
DF3 = DF1
}
if (main==2){ #use second DF as main
DF3 = DF2
}
if (main!=2){
#loop over input dataset 2
for (variable in 1:length(colnames(DF2))){ #loop over variables/cols
for (case in 1:length(rownames(DF2))){ #loop over cases/rows
if (is.na(DF2[case, variable])){ #skip if datapoint is missing
next
}
DF3[rownames(DF2)[case], colnames(DF2)[variable]] = DF2[case, variable]
#print(DF3[rownames(DF2)[case], colnames(DF2)[variable]]) #used for debugging
}
}
}
if (main!=1){ #if DF2 is main
#loop over input dataset 1
for (variable in 1:length(colnames(DF1))){ #loop over variables/cols
for (case in 1:length(rownames(DF1))){ #loop over cases/rows
if (is.na(DF1[case, variable])){ #skip if datapoint is missing
next
}
DF3[rownames(DF1)[case], colnames(DF1)[variable]] = DF1[case, variable]
#print(DF3[rownames(DF1)[case], colnames(DF1)[variable]]) #used for debugging
}
}
}
#output time
if (time) {
time2 = proc.time() - time1 #end timer
message(time2) #print time
}
return(DF3)
}
#' Dataset merger function for multiple data.frames.
#'
#' This is a wrapper for merge_datasets().
#' @param ... (data.frames) Two or more data.frames to merge.
#' @keywords merging, combining, datasets, data.frame, multi, wrapper
#' @export
#' @examples
#' merge_datasets_multi(iris[1:50, ], iris[51:100, ], iris[101:150, ]) #merge three-part iris
merge_datasets_multi = function(...) {
#wrap with Reduce
Reduce(function(x, y) merge_datasets(x, y), list(...))
}
#' Jensen method (method of correlated vectors) plot
#'
#' Returns a ggplot2 scatter plot with numerical results in a corner. Also supports reversing for dealing with factors that have negative indicators.
#' @param loadings a vector of factor loadings.
#' @param loadings a vector of correlations of the indicators with the criteria variable.
#' @param reverse whether to reverse indicators with negative loadings. Default to true.
#' @param text_pos which corner to write the numerical results in. Options are "tl", "tr", "bl", "br". Defaults to "tl".
#' @export
Jensen_plot = function(loadings, cors, reverse = TRUE, text_pos = NULL, var_names = TRUE, check_overlap = TRUE){
#initial
temp_loadings = as.numeric(loadings) #conver to vector
names(temp_loadings) = rownames(loadings) #set names again
loadings = temp_loadings #back to normal name
DF = data.frame(loadings, cors) #DF
#reverse
if (reverse) {
for (idx in 1:nrow(DF)) {
if (DF[idx, 1] < 0){ #if loading <0
DF[idx, ] = DF[idx, ] * -1 #reverse
rownames(DF)[idx] = paste0(rownames(DF)[idx], "_r")
}
}
}
#method text
if (reverse) {method_text = "Jensen's method with reversing\n"} else {method_text = "Jensen's method without reversing\n"}
#correlation
cor = round(cor(DF)[1, 2], 2) #get correlation, rounded
#auto detect text position
if (is.null(text_pos)) {
if (cor>0) text_pos = "tl" else text_pos = "tr"
}
#text object location
if (text_pos == "tl") {
x = .02
y = .98
hjust = 0
vjust = 1
}
if (text_pos == "tr") {
x = .98
y = .98
hjust = 1
vjust = 1
}
if (text_pos == "bl") {
x = .02
y = .02
hjust = 0
vjust = -.1
}
if (text_pos == "br") {
x = .98
y = .02
hjust = 1
vjust = -.1
}
#text
text = paste0(method_text,
"r=", cor, " (orange line)",
"\nn=", nrow(DF))
#text object
text_object = grid::grobTree(grid::textGrob(text, x = x, y = y, hjust = hjust, vjust = vjust),
gp = grid::gpar(fontsize = 11))
#regression line
model = lm(cors ~ loadings, DF)
coefs = coef(model)
#plot
DF$rnames = rownames(DF)
g = ggplot2::ggplot(data = DF, aes(x = loadings, y = cors)) +
geom_point() +
xlab("Loadings") +
ylab("Correlation with criteria variable") +
annotation_custom(text_object) +
geom_abline(intercept = coefs[1], slope = coefs[2], color = "darkorange")
#add var_names if desired
if (var_names) g = g + geom_text(aes(label = rnames), alpha = .7, size = 3, vjust = 1.5, check_overlap = check_overlap)
return(g)
}
# Correlates all variables, finds the pair with the highest correlation, and removes one of them using the specified method.
#' Remove the n most redundant variables from a data.frame.
#'
#' Removes the n top variables that highly correlated with another variable so as to avoid problems in analysis.
#' @param df a data.frame.
#' @param num.to.remove the number of variables to remove.
#' @param remove.method the method to use to remove variables. Methods are "c", "l", "r", "f" and "s": conversative, liberal, random, first or second.
#' @export
remove_redundant_vars = function(df, num.to.remove = 1, remove.method = "s") {
if (!is.data.frame(df)) {
stop(paste0("First parameter is not a data frame. Instead it is ", class(df)))
}
if (!is.numeric(num.to.remove)) {
stop(paste0("Second parameter is not numeric. Instead is ", class(num.to.remove)))
}
remove.method.1 = substr(remove.method, 1,1) #get first char
if (!remove.method %in% c("f", "s", "r")) { #conversative, liberal or random, first or second
stop(paste0("Third parameter was neither identifable as first, second or random. It was: ", remove.method))
}
old.names = colnames(df) #save old variable names
for (drop.num in 1:num.to.remove) {
message(paste0("Dropping variable number ", drop.num))
names = colnames(df) #current names
#correlations
cors = as.data.frame(cor(df, use="pair"))
#remove diagnonal 1's
for (idx in 1:nrow(cors)) {
cors[idx, idx] = NA
}
#absolute values because we don't care if cor is .99 or -.99
cors.abs = abs(cors)
#dropping
max.idx = which_max2(cors.abs) #indexes of max value (first one if multiple identical)
topvars = paste(rownames(cors)[max.idx[2]], "and", rownames(cors)[max.idx[1]]) #names of top correlated variables
r = round(cors[max.idx[1], max.idx[2]], 3)
message(paste0("Most correlated vars are ", topvars, " r=", r)) #info
#first
if (remove.method.1 == "f") {
df[, max.idx[2]] = NULL #remove the second var
}
#second
if (remove.method.1 == "s") {
df[, max.idx[1]] = NULL #remove the second var
}
#random
if (remove.method.1 == "r") {
if (rnorm(1) > 0){
df[, max.idx[1]] = NULL #remove the second var
}
else {
df[, max.idx[2]] = NULL #remove the first var
}
}
}
#Which variables were dropped?
new.names = colnames(df)
dropped.names = setdiff(old.names, new.names)
message("Dropped the following variables:")
message(dropped.names)
#return reduced df
return(df)
}
|
/old/old.R
|
permissive
|
Deleetdk/kirkegaard
|
R
| false | false | 8,834 |
r
|
#' Dataset merger function
#'
#' This function allows you to merge two data.frames by their overlapping rownames.
#' @param DF1 the first data.frame
#' @param DF2 the second data.frame
#' @param main which data.frame should be used as the main? Choose the larger one if working with large datasets. Default to using neither.
#' @param join (character scalar) Which data.frames to use cases from. Defaults to "both". Can be: both, left, right.
#' @export
#' @examples
#' merge_datasets()
merge_datasets = function (DF1, DF2, main=1, time=F, join = "both"){
#time if desired
if (time) {time1 = proc.time()} #start timer
#checks
if (!main %in% 0:2){ #check for valid input
stop("Invalid input to main parameter provided!")
}
if (!join %in% c("both", "left", "right")) stop("Invalid join parameter!")
#main setting decides how to combine
if (join == "left") {
DF2 = DF2[intersect(rownames(DF1), rownames(DF2)), , drop = F] #subset to overlap with DF1
}
if (join == "right") {
DF1 = DF1[intersect(rownames(DF1), rownames(DF2)), , drop = F] #subset to overlap with DF2
}
#if nothing to join
if (nrow(DF1) == 0) {
message("Warning, nothing joined! No case in DF1 matches any in DF2!")
return(DF2)
}
if (nrow(DF2) == 0) {
message("Warning, nothing joined! No case in DF2 matches any in DF1!")
return(DF1)
}
#combined dataset
if (main==0){ #create a combined dataset
#colnames, remove duplicates
total.colnames = c(colnames(DF1), colnames(DF2))
total.colnames.unique = unique(total.colnames)
#rownames, remove duplicates
total.rownames = c(rownames(DF1), rownames(DF2))
total.rownames.unique = unique(total.rownames)
#make DF3
DF3 = as.data.frame(matrix(nrow = length(total.rownames.unique),
ncol = length(total.colnames.unique)))
rownames(DF3) = sort(total.rownames.unique)
colnames(DF3) = total.colnames.unique
}
if (main==1){ #use first DF as main
DF3 = DF1
}
if (main==2){ #use second DF as main
DF3 = DF2
}
if (main!=2){
#loop over input dataset 2
for (variable in 1:length(colnames(DF2))){ #loop over variables/cols
for (case in 1:length(rownames(DF2))){ #loop over cases/rows
if (is.na(DF2[case, variable])){ #skip if datapoint is missing
next
}
DF3[rownames(DF2)[case], colnames(DF2)[variable]] = DF2[case, variable]
#print(DF3[rownames(DF2)[case], colnames(DF2)[variable]]) #used for debugging
}
}
}
if (main!=1){ #if DF2 is main
#loop over input dataset 1
for (variable in 1:length(colnames(DF1))){ #loop over variables/cols
for (case in 1:length(rownames(DF1))){ #loop over cases/rows
if (is.na(DF1[case, variable])){ #skip if datapoint is missing
next
}
DF3[rownames(DF1)[case], colnames(DF1)[variable]] = DF1[case, variable]
#print(DF3[rownames(DF1)[case], colnames(DF1)[variable]]) #used for debugging
}
}
}
#output time
if (time) {
time2 = proc.time() - time1 #end timer
message(time2) #print time
}
return(DF3)
}
#' Dataset merger function for multiple data.frames.
#'
#' This is a wrapper for merge_datasets().
#' @param ... (data.frames) Two or more data.frames to merge.
#' @keywords merging, combining, datasets, data.frame, multi, wrapper
#' @export
#' @examples
#' merge_datasets_multi(iris[1:50, ], iris[51:100, ], iris[101:150, ]) #merge three-part iris
merge_datasets_multi = function(...) {
#wrap with Reduce
Reduce(function(x, y) merge_datasets(x, y), list(...))
}
#' Jensen method (method of correlated vectors) plot
#'
#' Returns a ggplot2 scatter plot with numerical results in a corner. Also supports reversing for dealing with factors that have negative indicators.
#' @param loadings a vector of factor loadings.
#' @param loadings a vector of correlations of the indicators with the criteria variable.
#' @param reverse whether to reverse indicators with negative loadings. Default to true.
#' @param text_pos which corner to write the numerical results in. Options are "tl", "tr", "bl", "br". Defaults to "tl".
#' @export
Jensen_plot = function(loadings, cors, reverse = TRUE, text_pos = NULL, var_names = TRUE, check_overlap = TRUE){
#initial
temp_loadings = as.numeric(loadings) #conver to vector
names(temp_loadings) = rownames(loadings) #set names again
loadings = temp_loadings #back to normal name
DF = data.frame(loadings, cors) #DF
#reverse
if (reverse) {
for (idx in 1:nrow(DF)) {
if (DF[idx, 1] < 0){ #if loading <0
DF[idx, ] = DF[idx, ] * -1 #reverse
rownames(DF)[idx] = paste0(rownames(DF)[idx], "_r")
}
}
}
#method text
if (reverse) {method_text = "Jensen's method with reversing\n"} else {method_text = "Jensen's method without reversing\n"}
#correlation
cor = round(cor(DF)[1, 2], 2) #get correlation, rounded
#auto detect text position
if (is.null(text_pos)) {
if (cor>0) text_pos = "tl" else text_pos = "tr"
}
#text object location
if (text_pos == "tl") {
x = .02
y = .98
hjust = 0
vjust = 1
}
if (text_pos == "tr") {
x = .98
y = .98
hjust = 1
vjust = 1
}
if (text_pos == "bl") {
x = .02
y = .02
hjust = 0
vjust = -.1
}
if (text_pos == "br") {
x = .98
y = .02
hjust = 1
vjust = -.1
}
#text
text = paste0(method_text,
"r=", cor, " (orange line)",
"\nn=", nrow(DF))
#text object
text_object = grid::grobTree(grid::textGrob(text, x = x, y = y, hjust = hjust, vjust = vjust),
gp = grid::gpar(fontsize = 11))
#regression line
model = lm(cors ~ loadings, DF)
coefs = coef(model)
#plot
DF$rnames = rownames(DF)
g = ggplot2::ggplot(data = DF, aes(x = loadings, y = cors)) +
geom_point() +
xlab("Loadings") +
ylab("Correlation with criteria variable") +
annotation_custom(text_object) +
geom_abline(intercept = coefs[1], slope = coefs[2], color = "darkorange")
#add var_names if desired
if (var_names) g = g + geom_text(aes(label = rnames), alpha = .7, size = 3, vjust = 1.5, check_overlap = check_overlap)
return(g)
}
# Correlates all variables, finds the pair with the highest correlation, and removes one of them using the specified method.
#' Remove the n most redundant variables from a data.frame.
#'
#' Removes the n top variables that highly correlated with another variable so as to avoid problems in analysis.
#' @param df a data.frame.
#' @param num.to.remove the number of variables to remove.
#' @param remove.method the method to use to remove variables. Methods are "c", "l", "r", "f" and "s": conversative, liberal, random, first or second.
#' @export
remove_redundant_vars = function(df, num.to.remove = 1, remove.method = "s") {
if (!is.data.frame(df)) {
stop(paste0("First parameter is not a data frame. Instead it is ", class(df)))
}
if (!is.numeric(num.to.remove)) {
stop(paste0("Second parameter is not numeric. Instead is ", class(num.to.remove)))
}
remove.method.1 = substr(remove.method, 1,1) #get first char
if (!remove.method %in% c("f", "s", "r")) { #conversative, liberal or random, first or second
stop(paste0("Third parameter was neither identifable as first, second or random. It was: ", remove.method))
}
old.names = colnames(df) #save old variable names
for (drop.num in 1:num.to.remove) {
message(paste0("Dropping variable number ", drop.num))
names = colnames(df) #current names
#correlations
cors = as.data.frame(cor(df, use="pair"))
#remove diagnonal 1's
for (idx in 1:nrow(cors)) {
cors[idx, idx] = NA
}
#absolute values because we don't care if cor is .99 or -.99
cors.abs = abs(cors)
#dropping
max.idx = which_max2(cors.abs) #indexes of max value (first one if multiple identical)
topvars = paste(rownames(cors)[max.idx[2]], "and", rownames(cors)[max.idx[1]]) #names of top correlated variables
r = round(cors[max.idx[1], max.idx[2]], 3)
message(paste0("Most correlated vars are ", topvars, " r=", r)) #info
#first
if (remove.method.1 == "f") {
df[, max.idx[2]] = NULL #remove the second var
}
#second
if (remove.method.1 == "s") {
df[, max.idx[1]] = NULL #remove the second var
}
#random
if (remove.method.1 == "r") {
if (rnorm(1) > 0){
df[, max.idx[1]] = NULL #remove the second var
}
else {
df[, max.idx[2]] = NULL #remove the first var
}
}
}
#Which variables were dropped?
new.names = colnames(df)
dropped.names = setdiff(old.names, new.names)
message("Dropped the following variables:")
message(dropped.names)
#return reduced df
return(df)
}
|
#' Explanation Level Uncertainty of Sequential Variable Attribution
#'
#' The `break_down_uncertainty()` calles `B` times the break down algorithm for random orderings.
#' Then it calculated distribution of attributions for these different orderings.
#' Note that the `shap()` function is just a simplified interface to the `break_down_uncertainty()` function
#' with by default `B=25` random draws.
#'
#' @param x a model to be explained, or an explainer created with function `DALEX::explain()`.
#' @param data validation dataset, will be extracted from `x` if it is an explainer.
#' @param predict_function predict function, will be extracted from `x` if it is an explainer.
#' @param new_observation a new observation with columns that correspond to variables used in the model.
#' @param ... other parameters.
#' @param B number of random paths
#' @param path if specified, then this path will be highlighed on the plot. Use `average` in order to show an average effect
#' @param label name of the model. By default it's extracted from the 'class' attribute of the model.
#'
#' @return an object of the `break_down_uncertainty` class.
#' @importFrom utils head
#'
#' @seealso \code{\link{break_down}}, \code{\link{local_attributions}}
#'
#' @references Predictive Models: Visual Exploration, Explanation and Debugging \url{https://pbiecek.github.io/PM_VEE}
#'
#' @examples
#' library("DALEX")
#' library("iBreakDown")
#' # Toy examples, because CRAN angels ask for them
#' titanic <- na.omit(titanic)
#' set.seed(1313)
#' titanic_small <- titanic[sample(1:nrow(titanic), 500), c(1,2,6,9)]
#' model_titanic_glm <- glm(survived == "yes" ~ gender + age + fare,
#' data = titanic_small, family = "binomial")
#' explain_titanic_glm <- explain(model_titanic_glm,
#' data = titanic_small[,-9],
#' y = titanic_small$survived == "yes")
#'
#' # there is no explanation level uncertanity linked with additive models
#' bd_rf <- break_down_uncertainty(explain_titanic_glm, titanic_small[1, ])
#' bd_rf
#' plot(bd_rf)
#'
#' \donttest{
#' ## Not run:
#' library("randomForest")
#' set.seed(1313)
#' model <- randomForest(status ~ . , data = HR)
#' new_observation <- HR_test[1,]
#'
#' explainer_rf <- explain(model,
#' data = HR[1:1000, 1:5])
#'
#' bd_rf <- break_down_uncertainty(explainer_rf,
#' new_observation)
#' bd_rf
#' plot(bd_rf)
#'
#' # example for regression - apartment prices
#' # here we do not have intreactions
#' model <- randomForest(m2.price ~ . , data = apartments)
#' explainer_rf <- explain(model,
#' data = apartments_test[1:1000, 2:6],
#' y = apartments_test$m2.price[1:1000])
#'
#' bd_rf <- break_down_uncertainty(explainer_rf, apartments_test[1,])
#' bd_rf
#' plot(bd_rf)
#'
#' bd_rf <- break_down_uncertainty(explainer_rf, apartments_test[1,], path = 1:5)
#' plot(bd_rf)
#'
#' bd_rf <- break_down_uncertainty(explainer_rf,
#' apartments_test[1,],
#' path = c("floor", "no.rooms", "district",
#' "construction.year", "surface"))
#' plot(bd_rf)
#'
#' bd_rf <- shap(explainer_rf,
#' apartments_test[1,])
#' bd_rf
#' plot(bd_rf)
#' plot(bd_rf, show_boxplots = FALSE)
#' }
#' @export
#' @rdname break_down_uncertainty
break_down_uncertainty <- function(x, ..., B = 10)
UseMethod("break_down_uncertainty")
#' @export
#' @rdname break_down_uncertainty
break_down_uncertainty.explainer <- function(x, new_observation,
..., B = 10) {
# extracts model, data and predict function from the explainer
model <- x$model
data <- x$data
predict_function <- x$predict_function
label <- x$label
break_down_uncertainty.default(model, data, predict_function,
new_observation = new_observation,
label = label,
..., B = B)
}
#' @export
#' @rdname break_down_uncertainty
break_down_uncertainty.default <- function(x, data, predict_function = predict,
new_observation,
label = class(x)[1],
...,
path = NULL,
B = 10) {
# here one can add model and data and new observation
# just in case only some variables are specified
# this will work only for data.frames
if ("data.frame" %in% class(data)) {
common_variables <- intersect(colnames(new_observation), colnames(data))
new_observation <- new_observation[, common_variables, drop = FALSE]
data <- data[,common_variables, drop = FALSE]
}
# Now we know the path, so we can calculate contributions
# set variable indicators
# start random path
p <- ncol(data)
result <- lapply(1:B, function(b) {
random_path <- sample(1:p)
tmp <- get_single_random_path(x, data, predict_function, new_observation, label, random_path)
tmp$B <- b
tmp
})
# should we add a specific path?
if (!is.null(path)) {
# average or selected path
if (head(path, 1) == "average") {
# let's calculate an average attribution
extracted_contributions <- sapply(result, function(chunk) {
chunk[order(chunk$variable), "contribution"]
})
result_average <- result[[1]]
result_average$contribution <- rowMeans(extracted_contributions)
result_average$variable <- result_average$variable[order(result_average$variable)]
result_average$B <- 0
result <- c(result, list(result_average))
} else {
# path is a selected ordering
tmp <- get_single_random_path(x, data, predict_function, new_observation, label, path)
tmp$B <- 0
result <- c(result, list(tmp))
}
}
result <- do.call(rbind, result)
class(result) <- c("break_down_uncertainty", "data.frame")
result
}
get_single_random_path <- function(x, data, predict_function, new_observation, label, random_path) {
# if predict_function returns a single vector, conrvet it to a data frame
if (length(unlist(predict_function(x, new_observation))) > 1) {
predict_function_df <- predict_function
} else {
predict_function_df <- function(...) {
tmp <- as.data.frame(predict_function(...))
colnames(tmp) = label
tmp
}
}
vnames <- colnames(data)
names(vnames) <- vnames
current_data <- data
yhats <- list()
yhats[[1]] <- colMeans(predict_function_df(x, current_data))
for (i in seq_along(random_path)) {
candidate <- random_path[i]
current_data[,candidate] <- new_observation[,candidate]
yhats[[i + 1]] <- colMeans(predict_function_df(x, current_data))
}
diffs <- apply(do.call(rbind, yhats), 2, diff)
single_cols <- lapply(1:ncol(diffs), function(col) {
data.frame(contribution = diffs[,col],
label = ifelse(ncol(diffs) == 1, label, paste(label,colnames(diffs)[col], sep = ".")),
variable = vnames[random_path])
})
do.call(rbind,single_cols)
}
#' @export
#' @rdname break_down_uncertainty
shap <- function(x, ..., B = 25) {
break_down_uncertainty(x, ..., B = B, path = "average")
}
|
/R/break_down_uncertainty.R
|
no_license
|
sztach/iBreakDown
|
R
| false | false | 7,257 |
r
|
#' Explanation Level Uncertainty of Sequential Variable Attribution
#'
#' The `break_down_uncertainty()` calles `B` times the break down algorithm for random orderings.
#' Then it calculated distribution of attributions for these different orderings.
#' Note that the `shap()` function is just a simplified interface to the `break_down_uncertainty()` function
#' with by default `B=25` random draws.
#'
#' @param x a model to be explained, or an explainer created with function `DALEX::explain()`.
#' @param data validation dataset, will be extracted from `x` if it is an explainer.
#' @param predict_function predict function, will be extracted from `x` if it is an explainer.
#' @param new_observation a new observation with columns that correspond to variables used in the model.
#' @param ... other parameters.
#' @param B number of random paths
#' @param path if specified, then this path will be highlighed on the plot. Use `average` in order to show an average effect
#' @param label name of the model. By default it's extracted from the 'class' attribute of the model.
#'
#' @return an object of the `break_down_uncertainty` class.
#' @importFrom utils head
#'
#' @seealso \code{\link{break_down}}, \code{\link{local_attributions}}
#'
#' @references Predictive Models: Visual Exploration, Explanation and Debugging \url{https://pbiecek.github.io/PM_VEE}
#'
#' @examples
#' library("DALEX")
#' library("iBreakDown")
#' # Toy examples, because CRAN angels ask for them
#' titanic <- na.omit(titanic)
#' set.seed(1313)
#' titanic_small <- titanic[sample(1:nrow(titanic), 500), c(1,2,6,9)]
#' model_titanic_glm <- glm(survived == "yes" ~ gender + age + fare,
#' data = titanic_small, family = "binomial")
#' explain_titanic_glm <- explain(model_titanic_glm,
#' data = titanic_small[,-9],
#' y = titanic_small$survived == "yes")
#'
#' # there is no explanation level uncertanity linked with additive models
#' bd_rf <- break_down_uncertainty(explain_titanic_glm, titanic_small[1, ])
#' bd_rf
#' plot(bd_rf)
#'
#' \donttest{
#' ## Not run:
#' library("randomForest")
#' set.seed(1313)
#' model <- randomForest(status ~ . , data = HR)
#' new_observation <- HR_test[1,]
#'
#' explainer_rf <- explain(model,
#' data = HR[1:1000, 1:5])
#'
#' bd_rf <- break_down_uncertainty(explainer_rf,
#' new_observation)
#' bd_rf
#' plot(bd_rf)
#'
#' # example for regression - apartment prices
#' # here we do not have intreactions
#' model <- randomForest(m2.price ~ . , data = apartments)
#' explainer_rf <- explain(model,
#' data = apartments_test[1:1000, 2:6],
#' y = apartments_test$m2.price[1:1000])
#'
#' bd_rf <- break_down_uncertainty(explainer_rf, apartments_test[1,])
#' bd_rf
#' plot(bd_rf)
#'
#' bd_rf <- break_down_uncertainty(explainer_rf, apartments_test[1,], path = 1:5)
#' plot(bd_rf)
#'
#' bd_rf <- break_down_uncertainty(explainer_rf,
#' apartments_test[1,],
#' path = c("floor", "no.rooms", "district",
#' "construction.year", "surface"))
#' plot(bd_rf)
#'
#' bd_rf <- shap(explainer_rf,
#' apartments_test[1,])
#' bd_rf
#' plot(bd_rf)
#' plot(bd_rf, show_boxplots = FALSE)
#' }
#' @export
#' @rdname break_down_uncertainty
break_down_uncertainty <- function(x, ..., B = 10)
UseMethod("break_down_uncertainty")
#' @export
#' @rdname break_down_uncertainty
break_down_uncertainty.explainer <- function(x, new_observation,
..., B = 10) {
# extracts model, data and predict function from the explainer
model <- x$model
data <- x$data
predict_function <- x$predict_function
label <- x$label
break_down_uncertainty.default(model, data, predict_function,
new_observation = new_observation,
label = label,
..., B = B)
}
#' @export
#' @rdname break_down_uncertainty
break_down_uncertainty.default <- function(x, data, predict_function = predict,
new_observation,
label = class(x)[1],
...,
path = NULL,
B = 10) {
# here one can add model and data and new observation
# just in case only some variables are specified
# this will work only for data.frames
if ("data.frame" %in% class(data)) {
common_variables <- intersect(colnames(new_observation), colnames(data))
new_observation <- new_observation[, common_variables, drop = FALSE]
data <- data[,common_variables, drop = FALSE]
}
# Now we know the path, so we can calculate contributions
# set variable indicators
# start random path
p <- ncol(data)
result <- lapply(1:B, function(b) {
random_path <- sample(1:p)
tmp <- get_single_random_path(x, data, predict_function, new_observation, label, random_path)
tmp$B <- b
tmp
})
# should we add a specific path?
if (!is.null(path)) {
# average or selected path
if (head(path, 1) == "average") {
# let's calculate an average attribution
extracted_contributions <- sapply(result, function(chunk) {
chunk[order(chunk$variable), "contribution"]
})
result_average <- result[[1]]
result_average$contribution <- rowMeans(extracted_contributions)
result_average$variable <- result_average$variable[order(result_average$variable)]
result_average$B <- 0
result <- c(result, list(result_average))
} else {
# path is a selected ordering
tmp <- get_single_random_path(x, data, predict_function, new_observation, label, path)
tmp$B <- 0
result <- c(result, list(tmp))
}
}
result <- do.call(rbind, result)
class(result) <- c("break_down_uncertainty", "data.frame")
result
}
get_single_random_path <- function(x, data, predict_function, new_observation, label, random_path) {
# if predict_function returns a single vector, conrvet it to a data frame
if (length(unlist(predict_function(x, new_observation))) > 1) {
predict_function_df <- predict_function
} else {
predict_function_df <- function(...) {
tmp <- as.data.frame(predict_function(...))
colnames(tmp) = label
tmp
}
}
vnames <- colnames(data)
names(vnames) <- vnames
current_data <- data
yhats <- list()
yhats[[1]] <- colMeans(predict_function_df(x, current_data))
for (i in seq_along(random_path)) {
candidate <- random_path[i]
current_data[,candidate] <- new_observation[,candidate]
yhats[[i + 1]] <- colMeans(predict_function_df(x, current_data))
}
diffs <- apply(do.call(rbind, yhats), 2, diff)
single_cols <- lapply(1:ncol(diffs), function(col) {
data.frame(contribution = diffs[,col],
label = ifelse(ncol(diffs) == 1, label, paste(label,colnames(diffs)[col], sep = ".")),
variable = vnames[random_path])
})
do.call(rbind,single_cols)
}
#' @export
#' @rdname break_down_uncertainty
shap <- function(x, ..., B = 25) {
break_down_uncertainty(x, ..., B = B, path = "average")
}
|
#' rstore
#'
#' A persistent storage framework for R
#'
#' @name rstore
#' @docType package
#' @import stringr
NULL
|
/R/rstore.R
|
no_license
|
hskksk/rstore
|
R
| false | false | 116 |
r
|
#' rstore
#'
#' A persistent storage framework for R
#'
#' @name rstore
#' @docType package
#' @import stringr
NULL
|
#detach(package:plyr)
library(dplyr)
library(ggplot2)
library(tidyverse)
#library(plyr)
library(readr)
library(tikzDevice)
library(ggrepel)
library(directlabels)
#detach(package:plyr)
data_all <- list.files(path = ".", # Identify all csv files in folder
pattern = "temp.csv", full.names = TRUE) %>%
lapply(read_csv) %>% # Store all files in list
bind_rows
print(data_all)
uniqModels <- unique(data_all[,2])
uniqFuncs <- unlist(as.list(unique(data_all[,1])))
data_all = filter (data_all, calculator != "CPUDCTnopad")
data_all = filter (data_all, N != 8)
data_double = filter(data_all, bytes == 8)
data_float = filter(data_all, bytes == 4)
HzOnly <- data_all %>%
filter (calculator != "CPUDCTnopad") %>%
filter (calculator != "CPUBicubicQuad") %>%
filter (calculator != "CPULinearQuad") %>%
group_by (N, calculator, bytes) %>%
summarize(speed = median(calcHz,na.rm=TRUE))
HzOnlyWide <- pivot_wider(HzOnly, names_from = bytes, values_from=speed)
HzOnlyWide <- mutate(HzOnlyWide, ratio = `4` / `8`)
HzOnlyWide2 = HzOnly %>%
filter(bytes == 8) %>%
filter(N<290) %>%
pivot_wider(names_from=calculator, values_from=speed)
RatioChart <-
HzOnlyWide2 %>%
mutate(LinearSpeedup = GPULinearSparse/CPULinearSparse) %>%
mutate(BicubicSpeedup = GPUBicubicSparse/CPUBicubicSparse) %>%
mutate(ChebSpeedup = GPUChebDense/CPUChebDense) %>%
select(LinearSpeedup,BicubicSpeedup,ChebSpeedup) %>%
pivot_longer(!N,names_to="calculator", values_to="Speedup")
print(RatioChart)
tikzDevice::tikz( file="DoubleFloatRatio.tex",
standAlone=F,
width=9,
height=6.5)
myPlot <- ggplot(data=HzOnlyWide,
aes( N,ratio, color=calculator,linetype=calculator))+
geom_line()+geom_point() +
scale_y_continuous(trans="log10") +#, breaks = c(1.0,.1,.01,.001,1e-4,1e-5,1e-6,1e-7,1e-8,1e-9,1e-10,1e-11)) +
#scale_x_continuous(trans="log2")+#,breaks=c(2,4,8 ,16,32,64,128,256,512,1024)) +
ggtitle(funcName)+geom_dl(aes(label=calculator), method=list("maxvar.points", "bumpup",cex=0.8))+
scale_colour_discrete(guide="none")+
scale_linetype_discrete(guide="none")+
coord_cartesian(clip="off")
print (myPlot)
dev.off()
tikzDevice::tikz( file="GPUImpact.tex",
standAlone=F,
width=9,
height=6.5)
myPlot <- ggplot(data=RatioChart,
aes( N,Speedup, color=calculator,linetype=calculator))+
geom_line()+geom_point() +
#scale_y_continuous(trans="log10") +#, breaks = c(1.0,.1,.01,.001,1e-4,1e-5,1e-6,1e-7,1e-8,1e-9,1e-10,1e-11)) +
#scale_x_continuous(trans="log2")+#,breaks=c(2,4,8 ,16,32,64,128,256,512,1024)) +
ggtitle(funcName)+geom_dl(aes(label=calculator), method=list("maxvar.points", "bumpup",cex=0.8))+
scale_colour_discrete(guide="none")+
scale_linetype_discrete(guide="none")+
coord_cartesian(clip="off")+
facet_wrap( . ~ calculator, scales="free", ncol=1)
print (myPlot)
dev.off()
print(typeof(uniqFuncs))
print(uniqFuncs)
for (funcName in uniqFuncs){
fileTarget <- paste0({{funcName}}, ".tex")
print(fileTarget)
tikzDevice::tikz( file=all_of(fileTarget),
standAlone=F,
width=9,
height=6.5)
filteredData = data_double[data_double$functionName == funcName,]
myPlot <- ggplot(data=filteredData,
aes( N,maxError, color=calculator,linetype=calculator))+
geom_line()+geom_point() +
scale_y_continuous(trans="log10") +#, breaks = c(1.0,.1,.01,.001,1e-4,1e-5,1e-6,1e-7,1e-8,1e-9,1e-10,1e-11)) +
scale_x_continuous(trans="log2")+#,breaks=c(2,4,8 ,16,32,64,128,256,512,1024)) +
ggtitle(funcName)+geom_dl(aes(label=calculator), method=list("maxvar.points", "bumpup",cex=0.8))+
scale_colour_discrete(guide="none")+
scale_linetype_discrete(guide="none")+
coord_cartesian(clip="off")
print (myPlot)
dev.off()
fileTarget <- paste0({{funcName}}, "S.tex")
print(fileTarget)
tikzDevice::tikz( file=all_of(fileTarget),
standAlone=F,
width=9,
height=6.5)
filteredData = data_double[data_double$functionName == funcName,]
myPlot2 <- ggplot(data=filteredData,
aes( calcHz,maxError, color=calculator,linetype=calculator))+
geom_line()+geom_point() +
scale_y_continuous(trans="log10")+#, breaks = c(1.0,.1,.01,.001,1e-4,1e-5,1e-6,1e-7,1e-8,1e-9,1e-10,1e-11)) +
scale_x_continuous(trans="log10")+#,breaks=c(.1,1,10,100,1000,10000,100000,1000000)) +
ggtitle(funcName)+geom_dl(aes(label=calculator), method=list("maxvar.points", "bumpup",cex=0.8))+
scale_colour_discrete(guide="none")+
scale_linetype_discrete(guide="none")+
coord_cartesian(clip="off")
print (myPlot2)
dev.off()
}
|
/NYUThesisThisDirIsGarbage/PlotAssembly.R
|
no_license
|
orebas/GyroAveraging
|
R
| false | false | 4,879 |
r
|
#detach(package:plyr)
library(dplyr)
library(ggplot2)
library(tidyverse)
#library(plyr)
library(readr)
library(tikzDevice)
library(ggrepel)
library(directlabels)
#detach(package:plyr)
data_all <- list.files(path = ".", # Identify all csv files in folder
pattern = "temp.csv", full.names = TRUE) %>%
lapply(read_csv) %>% # Store all files in list
bind_rows
print(data_all)
uniqModels <- unique(data_all[,2])
uniqFuncs <- unlist(as.list(unique(data_all[,1])))
data_all = filter (data_all, calculator != "CPUDCTnopad")
data_all = filter (data_all, N != 8)
data_double = filter(data_all, bytes == 8)
data_float = filter(data_all, bytes == 4)
HzOnly <- data_all %>%
filter (calculator != "CPUDCTnopad") %>%
filter (calculator != "CPUBicubicQuad") %>%
filter (calculator != "CPULinearQuad") %>%
group_by (N, calculator, bytes) %>%
summarize(speed = median(calcHz,na.rm=TRUE))
HzOnlyWide <- pivot_wider(HzOnly, names_from = bytes, values_from=speed)
HzOnlyWide <- mutate(HzOnlyWide, ratio = `4` / `8`)
HzOnlyWide2 = HzOnly %>%
filter(bytes == 8) %>%
filter(N<290) %>%
pivot_wider(names_from=calculator, values_from=speed)
RatioChart <-
HzOnlyWide2 %>%
mutate(LinearSpeedup = GPULinearSparse/CPULinearSparse) %>%
mutate(BicubicSpeedup = GPUBicubicSparse/CPUBicubicSparse) %>%
mutate(ChebSpeedup = GPUChebDense/CPUChebDense) %>%
select(LinearSpeedup,BicubicSpeedup,ChebSpeedup) %>%
pivot_longer(!N,names_to="calculator", values_to="Speedup")
print(RatioChart)
tikzDevice::tikz( file="DoubleFloatRatio.tex",
standAlone=F,
width=9,
height=6.5)
myPlot <- ggplot(data=HzOnlyWide,
aes( N,ratio, color=calculator,linetype=calculator))+
geom_line()+geom_point() +
scale_y_continuous(trans="log10") +#, breaks = c(1.0,.1,.01,.001,1e-4,1e-5,1e-6,1e-7,1e-8,1e-9,1e-10,1e-11)) +
#scale_x_continuous(trans="log2")+#,breaks=c(2,4,8 ,16,32,64,128,256,512,1024)) +
ggtitle(funcName)+geom_dl(aes(label=calculator), method=list("maxvar.points", "bumpup",cex=0.8))+
scale_colour_discrete(guide="none")+
scale_linetype_discrete(guide="none")+
coord_cartesian(clip="off")
print (myPlot)
dev.off()
tikzDevice::tikz( file="GPUImpact.tex",
standAlone=F,
width=9,
height=6.5)
myPlot <- ggplot(data=RatioChart,
aes( N,Speedup, color=calculator,linetype=calculator))+
geom_line()+geom_point() +
#scale_y_continuous(trans="log10") +#, breaks = c(1.0,.1,.01,.001,1e-4,1e-5,1e-6,1e-7,1e-8,1e-9,1e-10,1e-11)) +
#scale_x_continuous(trans="log2")+#,breaks=c(2,4,8 ,16,32,64,128,256,512,1024)) +
ggtitle(funcName)+geom_dl(aes(label=calculator), method=list("maxvar.points", "bumpup",cex=0.8))+
scale_colour_discrete(guide="none")+
scale_linetype_discrete(guide="none")+
coord_cartesian(clip="off")+
facet_wrap( . ~ calculator, scales="free", ncol=1)
print (myPlot)
dev.off()
print(typeof(uniqFuncs))
print(uniqFuncs)
for (funcName in uniqFuncs){
fileTarget <- paste0({{funcName}}, ".tex")
print(fileTarget)
tikzDevice::tikz( file=all_of(fileTarget),
standAlone=F,
width=9,
height=6.5)
filteredData = data_double[data_double$functionName == funcName,]
myPlot <- ggplot(data=filteredData,
aes( N,maxError, color=calculator,linetype=calculator))+
geom_line()+geom_point() +
scale_y_continuous(trans="log10") +#, breaks = c(1.0,.1,.01,.001,1e-4,1e-5,1e-6,1e-7,1e-8,1e-9,1e-10,1e-11)) +
scale_x_continuous(trans="log2")+#,breaks=c(2,4,8 ,16,32,64,128,256,512,1024)) +
ggtitle(funcName)+geom_dl(aes(label=calculator), method=list("maxvar.points", "bumpup",cex=0.8))+
scale_colour_discrete(guide="none")+
scale_linetype_discrete(guide="none")+
coord_cartesian(clip="off")
print (myPlot)
dev.off()
fileTarget <- paste0({{funcName}}, "S.tex")
print(fileTarget)
tikzDevice::tikz( file=all_of(fileTarget),
standAlone=F,
width=9,
height=6.5)
filteredData = data_double[data_double$functionName == funcName,]
myPlot2 <- ggplot(data=filteredData,
aes( calcHz,maxError, color=calculator,linetype=calculator))+
geom_line()+geom_point() +
scale_y_continuous(trans="log10")+#, breaks = c(1.0,.1,.01,.001,1e-4,1e-5,1e-6,1e-7,1e-8,1e-9,1e-10,1e-11)) +
scale_x_continuous(trans="log10")+#,breaks=c(.1,1,10,100,1000,10000,100000,1000000)) +
ggtitle(funcName)+geom_dl(aes(label=calculator), method=list("maxvar.points", "bumpup",cex=0.8))+
scale_colour_discrete(guide="none")+
scale_linetype_discrete(guide="none")+
coord_cartesian(clip="off")
print (myPlot2)
dev.off()
}
|
function(input, output, session){
# Interactive map ---------------------------------------------------------
zips <- reactive({
zipdata[zipdata$state == input$State,]
})
zip_state <- reactive({
if(input$State == "AB"){
zipdata_AB
}else if(input$State == "AZ"){
zipdata_AZ
}else if(input$State == "IL"){
zipdata_IL
}else if(input$State == "NC"){
zipdata_NC
}else if(input$State == "NV"){
zipdata_NV
}else if(input$State == "OH"){
zipdata_OH
}else if(input$State == "ON"){
zipdata_ON
}else if(input$State == "PA"){
zipdata_PA
}else if(input$State == "QC"){
zipdata_QC
}else if(input$State == "SC"){
zipdata_SC
}else if(input$State == "WI"){
zipdata_WI
}
})
weight_state <- reactive({
if(input$State2 == "AB"){
AB_weight
}else if(input$State2 == "AZ"){
AZ_weight
}else if(input$State2 == "IL"){
IL_weight
}else if(input$State2 == "NC"){
NC_weight
}else if(input$State2 == "NV"){
NV_weight
}else if(input$State2 == "OH"){
OH_weight
}else if(input$State2 == "ON"){
ON_weight
}else if(input$State2 == "PA"){
PA_weight
}else if(input$State2 == "QC"){
QC_weight
}else if(input$State2 == "SC"){
SC_weight
}else if(input$State2 == "WI"){
WI_weight
}
})
# Create the map ----------------------------------------------------------
output$map <- renderLeaflet({
leaflet() %>%
addTiles(
urlTemplate = "//{s}.tiles.mapbox.com/v3/jcheng.map-5ebohr46/{z}/{x}/{y}.png",
attribution = 'Maps by <a href="http://www.mapbox.com/">Mapbox</a>'
) %>%
setView(lng = -93.85, lat = 37.45, zoom = 4) # used to initialize the position focus on when open the website
})
# Restaurant markers ------------------------------------------------------
observe({
req(input$tabs == "Map")
leafletProxy("map") %>% clearPopups()
leafletProxy("map", data = zips()) %>%
clearMarkers() %>%
addMarkers(~longitude, ~latitude) %>%
flyTo(median(as.numeric(zips()$longitude)), median(as.numeric(zips()$latitude)), zoom = 10)
})
# Add textinput ------------------------------------------------------------
search_bar <- eventReactive(input$go, {
zipdata[zipdata$key == input$search, ]
})
observe({
req(input$tabs == "Map")
leafletProxy("map") %>%
clearMarkers() %>%
addMarkers(search_bar()$longitude, search_bar()$latitude) %>%
flyTo(search_bar()$longitude, search_bar()$latitude, zoom = 15)
})
# Show a popup at the given location --------------------------------------
showZipcodePopup5 <- function(lat, lon, zipstate) {
selectedZip <- zipstate[(zipstate$latitude == lat) & (zipstate$longitude == lon),]
content <- as.character(tagList(
tags$h3(as.character(selectedZip$name)),
tags$strong(selectedZip$city, selectedZip$state),
tags$br(),
tags$em("Stars:", selectedZip$stars),
tags$br(),
tags$em("Review count:", selectedZip$review_count),
tags$br(),
tags$em("Topic 1: Top", as.integer(selectedZip$topic_1_rank), "%"),
tags$br(),
tags$em("Topic 2: Top", as.integer(selectedZip$topic_2_rank), "%"),
tags$br(),
tags$em("Topic 3: Top", as.integer(selectedZip$topic_3_rank), "%"),
tags$br(),
tags$em("Topic 4: Top", as.integer(selectedZip$topic_4_rank) , "%"),
tags$br(),
tags$em("Topic 5: Top", as.integer(selectedZip$topic_5_rank) , "%")
))
leafletProxy("map") %>% addPopups(lon, lat, content)
}
showZipcodePopup4 <- function(lat, lon, zipstate) {
selectedZip <- zipstate[(zipstate$latitude == lat) & (zipstate$longitude == lon),]
content <- as.character(tagList(
tags$h3(as.character(selectedZip$name)),
tags$strong(selectedZip$city, selectedZip$state),
tags$br(),
tags$em("Stars:", selectedZip$stars),
tags$br(),
tags$em("Review count:", selectedZip$review_count),
tags$br(),
tags$em("Topic 1: Top", as.integer(selectedZip$topic_1_rank), "%"),
tags$br(),
tags$em("Topic 2: Top", as.integer(selectedZip$topic_2_rank), "%"),
tags$br(),
tags$em("Topic 3: Top", as.integer(selectedZip$topic_3_rank), "%"),
tags$br(),
tags$em("Topic 4: Top", as.integer(selectedZip$topic_4_rank) , "%")
))
leafletProxy("map") %>% addPopups(lon, lat, content)
}
showZipcodePopup3 <- function(lat, lon, zipstate) {
selectedZip <- zipstate[(zipstate$latitude == lat) & (zipstate$longitude == lon),]
content <- as.character(tagList(
tags$h3(as.character(selectedZip$name)),
tags$strong(selectedZip$city, selectedZip$state),
tags$br(),
tags$em("Stars:", selectedZip$stars),
tags$br(),
tags$em("Review count:", selectedZip$review_count),
tags$br(),
tags$em("Topic 1: Top", as.integer(selectedZip$topic_1_rank), "%"),
tags$br(),
tags$em("Topic 2: Top", as.integer(selectedZip$topic_2_rank), "%"),
tags$br(),
tags$em("Topic 3: Top", as.integer(selectedZip$topic_3_rank), "%")
))
leafletProxy("map") %>% addPopups(lon, lat, content)
}
# click: shows the information of restaurant ------------------------------
observe({
leafletProxy("map") %>% clearPopups()
event <- input$map_marker_click
if (is.null(event))
return()
isolate({
if(input$State == "QC"){
showZipcodePopup3( event$lat, event$lng, zip_state())
}else if(input$State == "IL" | input$State == "NC" | input$State == "OH" | input$State == "PA" | input$State == "WI"){
showZipcodePopup4( event$lat, event$lng, zip_state())
}else if(input$State == "AB" | input$State == "AZ" | input$State == "NV" | input$State == "ON" | input$State == "SC"){
showZipcodePopup5( event$lat, event$lng, zip_state())
}
})
})
# Exploration -------------------------------------------------------------------
# dynamic ui --------------------------------------------------------------
output$ui <- renderUI({
if (is.na(input$State2)){
return()
}
switch (input$State2,
"AB" = radioButtons("topic",
label = em("Topic",style="text-align:center;color:#FFFFFF;font-size:150%"),
choiceNames = list(
HTML("<font color='white'>Topic 1: Table Availability</font>"),
HTML("<font color='white'>Topic 2: Food Freshness</font>"),
HTML("<font color='white'>Topic 3: Food Flavor</font>"),
HTML("<font color='white'>Topic 4: Drinks and Environment</font>"),
HTML("<font color='white'>Topic 5: Service and Order Time</font>")
),
choiceValues = c("Topic 1", "Topic 2",
"Topic 3", "Topic 4","Topic 5"), selected = NULL),
"AZ" = radioButtons("topic",
label = em("Topic",style="text-align:center;color:#FFFFFF;font-size:150%"),
choiceNames = list(
HTML("<font color='white'>Topic 1: Table Availability</font>"),
HTML("<font color='white'>Topic 2: Food Flavor</font>"),
HTML("<font color='white'>Topic 3: Bartender Proficiency</font>"),
HTML("<font color='white'>Topic 4: Service and Order Time</font>"),
HTML("<font color='white'>Topic 5: Service Etiquette</font>")
),
choiceValues = c("Topic 1", "Topic 2",
"Topic 3", "Topic 4","Topic 5"), selected = NULL),
"IL" = radioButtons("topic",
label = em("Topic",style="text-align:center;color:#FFFFFF;font-size:150%"),
choiceNames = list(
HTML("<font color='white'>Topic 1: Service and Order Time</font>"),
HTML("<font color='white'>Topic 2: Food Flavor</font>"),
HTML("<font color='white'>Topic 3: Bartender Proficiency</font>"),
HTML("<font color='white'>Topic 4: Table Availability</font>")
),
choiceValues = c("Topic 1", "Topic 2",
"Topic 3", "Topic 4"),
selected = NULL),
"NC" = radioButtons("topic",
label = em("Topic",style="text-align:center;color:#FFFFFF;font-size:150%"),
choiceNames = list(
HTML("<font color='white'>Topic 1: Table Availability</font>"),
HTML("<font color='white'>Topic 2: Food Flavor</font>"),
HTML("<font color='white'>Topic 3: Bartender Proficiency</font>"),
HTML("<font color='white'>Topic 4: Service and Order Time</font>")
),
choiceValues = c("Topic 1", "Topic 2",
"Topic 3", "Topic 4"), selected = NULL),
"NV" = radioButtons("topic",
label = em("Topic",style="text-align:center;color:#FFFFFF;font-size:150%"),
choiceNames = list(
HTML("<font color='white'>Topic 1: Table Availability</font>"),
HTML("<font color='white'>Topic 2: Customer Care</font>"),
HTML("<font color='white'>Topic 3: Bartender Proficiency</font>"),
HTML("<font color='white'>Topic 4: Food Flavor</font>"),
HTML("<font color='white'>Topic 5: Shows</font>")
),
choiceValues = c("Topic 1", "Topic 2",
"Topic 3", "Topic 4","Topic 5"), selected = NULL),
"OH" = radioButtons("topic",
label = em("Topic",style="text-align:center;color:#FFFFFF;font-size:150%"),
choiceNames = list(
HTML("<font color='white'>Topic 1: Table Availability</font>"),
HTML("<font color='white'>Topic 2: Food Freshness</font>"),
HTML("<font color='white'>Topic 3: Bartender Proficiency</font>"),
HTML("<font color='white'>Topic 4: Service and Order Time</font>")
),
choiceValues = c("Topic 1", "Topic 2",
"Topic 3", "Topic 4"), selected = NULL),
"ON" = radioButtons("topic",
label = em("Topic",style="text-align:center;color:#FFFFFF;font-size:150%"),
choiceNames = list(
HTML("<font color='white'>Topic 1: Table Availability</font>"),
HTML("<font color='white'>Topic 2: Food Flavor</font>"),
HTML("<font color='white'>Topic 3: Bartender Proficiency</font>"),
HTML("<font color='white'>Topic 4: Service and Order Time</font>"),
HTML("<font color='white'>Topic 5: Service Etiquette</font>")
),
choiceValues = c("Topic 1", "Topic 2",
"Topic 3", "Topic 4","Topic 5"), selected = NULL),
"PA" = radioButtons("topic",
label = em("Topic",style="text-align:center;color:#FFFFFF;font-size:150%"),
choiceNames = list(
HTML("<font color='white'>Topic 1: Table Availability</font>"),
HTML("<font color='white'>Topic 2: Food Flavor</font>"),
HTML("<font color='white'>Topic 3: Bartender Proficiency</font>"),
HTML("<font color='white'>Topic 4: Service and Order Time</font>")
),
choiceValues = c("Topic 1", "Topic 2",
"Topic 3", "Topic 4"), selected = NULL),
"QC" = radioButtons("topic",
label = em("Topic",style="text-align:center;color:#FFFFFF;font-size:150%"),
choiceNames = list(
HTML("<font color='white'>Topic 1: Table Availability</font>"),
HTML("<font color='white'>Topic 2: Bartender Proficiency</font>"),
HTML("<font color='white'>Topic 3: Food Flavor</font>")
),
choiceValues = c("Topic 1", "Topic 2","Topic 3"), selected = NULL),
"SC" = radioButtons("topic",
label = em("Topic",style="text-align:center;color:#FFFFFF;font-size:150%"),
choiceNames = list(
HTML("<font color='white'>Topic 1: Table Availability</font>"),
HTML("<font color='white'>Topic 2: Burger</font>"),
HTML("<font color='white'>Topic 3: Bartender Proficiency</font>"),
HTML("<font color='white'>Topic 4: Service Quality</font>"),
HTML("<font color='white'>Topic 5: Food Flavor</font>")
),
choiceValues = c("Topic 1", "Topic 2",
"Topic 3", "Topic 4","Topic 5"), selected = NULL),
"WI" = radioButtons("topic",
label = em("Topic",style="text-align:center;color:#FFFFFF;font-size:150%"),
choiceNames = list(
HTML("<font color='white'>Topic 1: Table Availability</font>"),
HTML("<font color='white'>Topic 2: Food Flavor</font>"),
HTML("<font color='white'>Topic 3: Bartender Proficiency</font>"),
HTML("<font color='white'>Topic 4: Service Quality</font>")
),
choiceValues = c("Topic 1", "Topic 2",
"Topic 3", "Topic 4"), selected = NULL)
)
})
# Suggestions -------------------------------------------------------------
sug_bars <- eventReactive(input$submit,{
a <- zipdata[zipdata$key == input$Input2, ]
name_a <- as.character(a$name)
zip_a <- a$postal_code
if(a$state == "AB"){
return(zipdata_AB[(zipdata_AB$name == name_a) & (zipdata_AB$postal_code == zip_a), ])
}else if(a$state == "AZ"){
return(zipdata_AZ[(zipdata_AZ$name == name_a) & (zipdata_AZ$postal_code == zip_a), ])
}else if(a$state == "IL"){
return(zipdata_IL[(zipdata_IL$name == name_a) & (zipdata_IL$postal_code == zip_a), ])
}else if(a$state == "NC"){
return(zipdata_NC[(zipdata_NC$name == name_a) & (zipdata_NC$postal_code == zip_a), ])
}else if(a$state == "NV"){
return(zipdata_NV[((zipdata_NV$name == name_a) & (zipdata_NV$postal_code == zip_a)), ])
}else if(a$state == "OH"){
return(zipdata_OH[((zipdata_OH$name == name_a) & (zipdata_OH$postal_code == zip_a)), ])
}else if(a$state == "ON"){
return(zipdata_ON[((zipdata_ON$name == name_a) & (zipdata_ON$postal_code == zip_a)), ])
}else if(a$state == "PA"){
return(zipdata_PA[((zipdata_PA$name == name_a) & (zipdata_PA$postal_code == zip_a)), ])
}else if(a$state == "QC"){
return(zipdata_QC[((zipdata_QC$name == name_a) & (zipdata_QC$postal_code == zip_a)), ])
}else if(a$state == "SC"){
return(zipdata_SC[((zipdata_SC$name == name_a) & (zipdata_SC$postal_code == zip_a)), ])
}else{
return(zipdata_WI[((zipdata_WI$name == name_a) & (zipdata_WI$postal_code == zip_a)), ])
}
})
output$sug_gen <- renderUI({
strtitle1 <- paste("<font size='6' font color=white>General Suggestions</font>")
str00 <- paste("<font size='4' font color=white>1. Make the target customer more clear and pricing more pointed.</font>")
str01 <- paste("<font size='4' font color=white>2. Add booths to provide more private room for customers.</font>")
str02 <- paste("<font size='4' font color=white>3. Increase the diversity of music and make the atmosphere to be more attractive.</font>")
strtitle01 <- paste("<font size='5' font color=#FFFF33;>Menu</font>")
strtitle02 <- paste("<font size='5' font color=#FFFF33;>Service</font>")
strtitle03 <- paste("<font size='5' font color=#FFFF33;>Environment</font>")
str04 <- paste("<font size='4' font color=white>Provide more craft beer.
Add own specialty, such as special drinks.
Add more special sauce for wings to improve its taste.
Make the target customer more clear and pricing more pointed.
The fryer oil needed change regularly.
</font>")
str05 <- paste("<font size='4' font color=white>Improve customer service: such as training the waitress and manager to be politer and service more carefully. Also, hiring more professional bartenders.
Speed up customer service.
</font>")
str06 <- paste("<font size='4' font color=white>Add tables to decrease waiting time.
Add booths to provide more private room for customers.
Increase the diversity of music and make the atmosphere to be more attractive.
</font>")
HTML(paste(strtitle1, str00, str01, str02, sep = '<br/>'))
})
output$sug_spe <- eventReactive(input$submit,{
strtitle2 <- paste("<font size='6' font color=white>Specific Suggestions</font>")
if (sug_bars()$state == "ON"){
if (sug_bars()$topic_1_rank > 50){
str11 <- paste("<font size='5' font color=#FFFF33;>Table Availablity:</font> <font size='4' font color='white' >Add tables to decrease waiting time.
Provide some entertainment and seats for waiting customers.Increase table turnover rate.</font>")
}else{str11 <- paste("")}
if (sug_bars()$topic_2_rank > 50){
str12 <- paste("<font size='5' font color=#FFFF33;>Food Flavor:</font> <font size='4' font color='white'>The fryer oil needed change regularly. Add more special sauce for wings to improve its taste. Add own specialty, such as special
drinks. Make fresher food.</font>")
}else{str12 <- paste("")}
if (sug_bars()$topic_3_rank > 50){
str13 <- paste("<font size='5' font color=#FFFF33;>Bartender Proficiency:</font> <font size='4' font color='white'>Hiring more professional bartenders. Provide more craft beer.</font>")
}else{str13 <- paste("")}
if (sug_bars()$topic_4_rank > 50){
str14 <- paste("<font size='5' font color=#FFFF33;>Service and Order Time:</font> <font size='4' font color='white'>Hire more staff to speed up service. Offer some gifts or discounts for customers who wait too long.</font>")
}else{str14 <- paste("")}
if (sug_bars()$topic_5_rank > 50){
str15 <- paste("<font size='5' font color=#FFFF33;>Service Etiquette:</font> <font size='4' font color='white'>Improve customer service: such as training the waitress and manager to be politer and serve more carefully. Pay more attention to customers feedback.</font>")
}else{str15 <- paste("")}
HTML(paste(strtitle2, str11, str12, str13, str14, str15, sep = '<br/>'))
}else if (sug_bars()$state == "AZ"){
if (sug_bars()$topic_1_rank > 50){
str11 <- paste("<font size='5' font color=#FFFF33;>Table Availablity:</font> <font size='4' font color='white'>Add tables to decrease waiting time.Provide some entertainment and seats for waiting customers.Increase table turnover rate.</font>")
}else{str11 <- paste("")}
if (sug_bars()$topic_2_rank > 50){
str12 <- paste("<font size='5' font color=#FFFF33;>Food Flavor:</font> <font size='4' font color='white'>The fryer oil needed change regularly. Add more special sauce for wings to improve its taste. Add own specialty, such as special
drinks. Make fresher food.</font>")
}else{str12 <- paste("")}
if (sug_bars()$topic_3_rank > 50){
str13 <- paste("<font size='5' font color=#FFFF33;>Bartender Proficiency:</font> <font size='4' font color='white'>Hiring more professional bartenders. Provide more craft beer.</font>")
}else{str13 <- paste("")}
if (sug_bars()$topic_4_rank > 50){
str14 <- paste("<font size='5' font color=#FFFF33;>Service and Order Time:</font> <font size='4' font color='white'>Hire more staff to speed up service. Offer some gifts or discounts for customers who wait too long.</font>")
}else{str14 <- paste("")}
if (sug_bars()$topic_5_rank > 50){
str15 <- paste("<font size='5' font color=#FFFF33;>Service Etiquette:</font> <font size='4' font color='white'>Improve customer service: such as training the waitress and manager to be politer and serve more carefully. Pay more attention to customers feedback.</font>")
}else{str15 <- paste("")}
HTML(paste(strtitle2, str11, str12, str13, str14, str15, sep = '<br/>'))
}else if(sug_bars()$state == "IL"){
if (sug_bars()$topic_1_rank > 50){
str11 <- paste("<font size='5' font color=#FFFF33;>Service and Order Time:</font> <font size='4' font color='white'>Hire more staff to speed up service.Offer some gifts or discounts for customers who wait too long.Improve customer service:
such as training the waitress and manager to be politer and serve more carefully.Pay more attention to customers’ feedback.</font>")
}else{str11 <- paste("")}
if (sug_bars()$topic_2_rank > 50){
str12 <- paste("<font size='5' font color=#FFFF33;>Food Flavor</font> <font size='4' font color='white'>Add more special sauce for wings to improve its taste.Add own specialty, such as special drinks.Make fresher food.Control the steak to be cooked well.</font>")
}else{str12 <- paste("")}
if (sug_bars()$topic_3_rank > 50){
str13 <- paste("<font size='5' font color=#FFFF33;>Bartender Proficiency:</font> <font size='4' font color='white'>Hiring more professional bartenders. Provide more craft beer.</font>")
}else{str13 <- paste("")}
if (sug_bars()$topic_4_rank > 50){
str14 <- paste("<font size='5' font color=#FFFF33;>Table Avalability:</font> <font size='4' font color='white'>Add tables to decrease waiting time.Increase table turnover rate.Provide some entertainment and seats for waiting customers.</font>")
}else{str14 <- paste("")}
HTML(paste(strtitle2, str11, str12, str13, str14, sep = '<br/>'))
}else if(sug_bars()$state == "NC"){
if (sug_bars()$topic_1_rank > 50){
str11 <- paste("<font size='5' font color=#FFFF33;>Table Avalability:</font> <font size='4' font color='white'>Add tables to decrease waiting time.Increase table turnover rate.Provide some entertainment and seats for waiting customers.</font>")
}else{str11 <- paste("")}
if (sug_bars()$topic_2_rank > 50){
str12 <- paste("<font size='5' font color=#FFFF33;>Food Flavor</font> <font color='white'>The fryer oil needed change regularly.Add more special sauce for wings to improve its taste.Add own specialty, such as special drinks.Make fresher food.</font>")
}else{str12 <- paste("")}
if (sug_bars()$topic_3_rank > 50){
str13 <- paste("<font size='5' font color=#FFFF33;>Bartender Proficiency:</font> <font size='4' font color='white'>Hiring more professional bartenders. Provide more craft beer.</font>")
}else{str13 <- paste("")}
if (sug_bars()$topic_4_rank > 50){
str14 <- paste("<font size='5' font color=#FFFF33;>Service and Order Time:</font> <font size='4' font color='white'>Hire more staff to speed up service.Offer some gifts or discounts for customers who wait too long.Improve customer service: such as training the waitress and manager to be politer and serve more carefully.Pay more attention to customers’ feedback.</font>")
}else{str14 <- paste("")}
HTML(paste(strtitle2, str11, str12, str13, str14, sep = '<br/>'))
}else if(sug_bars()$state == "NV"){
if (sug_bars()$topic_1_rank > 50){
str11 <- paste("<font size='5' font color=#FFFF33;>Table Avalability:</font> <font size='4' font color='white'>Add tables to decrease waiting time.Increase table turnover rate.Provide some entertainment and seats for waiting customers.</font>")
}else{str11 <- paste("")}
if (sug_bars()$topic_2_rank > 50){
str12 <- paste("<font size='5' font color=#FFFF33;>Customer Care:</font> <font size='4' font color='white'>The fryer oil needed change regularly.Add more special sauce for wings to improve its taste.Add own specialty, such as special drinks.Make fresher food.</font>")
}else{str12 <- paste("")}
if (sug_bars()$topic_3_rank > 50){
str13 <- paste("<font size='5' font color=#FFFF33;>Bartender Proficiency:</font> <font size='4' font color='white'>Hiring more professional bartenders. Provide more craft beer.</font>")
}else{str13 <- paste("")}
if (sug_bars()$topic_4_rank > 50){
str14 <- paste("<font size='5' font color=#FFFF33;>Food Flavor:</font><font size='4' font color='white'>Hire more staff to speed up service.
Offer some gifts or discounts for customers who wait too long.
Improve customer service: such as training the waitress and manager to be politer and serve more carefully.
Pay more attention to customers’ feedback.
</font>")
}else{str14 <- paste("")}
if (sug_bars()$topic_5_rank > 50){
str15 <- paste("<font size='5' font color=#FFFF33;>Shows:</font><font size='4' font color='white'>Add more shows in club at night</font>")
}else{str15 <- paste("")}
HTML(paste(strtitle2, str11, str12, str13, str14, str15, sep = '<br/>'))
}else if(sug_bars()$state == "OH"){
if (sug_bars()$topic_1_rank > 50){
str11 <- paste("<font size='5' font color=#FFFF33;>Table Avalability:</font> <font size='4' font color='white'>Add tables to decrease waiting time.Increase table turnover rate.Provide some entertainment and seats for waiting customers.</font>")
}else{str11 <- paste("")}
if (sug_bars()$topic_2_rank > 50){
str12 <- paste("<font size='5' font color=#FFFF33;>Food Flavor:</font> <font size='4' font color='white'>Make customer feel like at home because most of them are tourists.</font>")
}else{str12 <- paste("")}
if (sug_bars()$topic_3_rank > 50){
str13 <- paste("<font size='5' font color=#FFFF33;>Bartender Proficiency:</font> <font size='4' font color='white'>Hiring more professional bartenders. Provide more craft beer.</font>")
}else{str13 <- paste("")}
if (sug_bars()$topic_4_rank > 50){
str14 <- paste("<font size='5' font color=#FFFF33;>Service and Order Time:</font> <font size='4' font color='white'>Hire more staff to speed up service.
Offer some gifts or discounts for customers who wait too long.
Improve customer service: such as training the waitress and manager to be politer and serve more carefully.
Pay more attention to customers’ feedback.
</font>")
}else{str14 <- paste("")}
HTML(paste(strtitle2, str11, str12, str13, str14, sep = '<br/>'))
}else if(sug_bars()$state == "AB"){
if (sug_bars()$topic_1_rank > 50){
str11 <- paste("<font size='5' font color=#FFFF33;>Table Avalability:</font> <font size='4' font color='white'>Add tables to decrease waiting time.Increase table turnover rate.Provide some entertainment and seats for waiting customers.</font>")
}else{str11 <- paste("")}
if (sug_bars()$topic_2_rank > 50){
str12 <- paste("<font size='5' font color=#FFFF33;>Food Freshness:</font> <font size='4' font color='white'>The fryer oil needed change regularly.Add own specialty, such as special drinks.Make fresher food.Control the steak to be cooked well.</font>")
}else{str12 <- paste("")}
if (sug_bars()$topic_3_rank > 50){
str13 <- paste("<font size='5' font color=#FFFF33;>Food Flavor:</font> <font size='4' font color='white'>Add more special sauce for wings to improve its taste.</font>")
}else{str13 <- paste("")}
if (sug_bars()$topic_4_rank > 50){
str14 <- paste("<font size='5' font color=#FFFF33;>Drinks and Environment:</font> <font size='4' font color='white'>Increase the diversity of music and make the atmosphere to be more attractive.Control the music volume.Provide more craft beer.</font>")
}else{str14 <- paste("")}
if (sug_bars()$topic_5_rank > 50){
str15 <- paste("<font size='5' font color=#FFFF33;>Service and Order Time:</font> <font size='4' font color='white'>Hire more staff to speed up service.Offer some gifts or discounts for customers who wait too long.Improve customer service: such as training the
waitress and manager to be politer and serve more carefully.Pay more attention to customers’ feedback.</font>")
}else{str15 <- paste("")}
HTML(paste( strtitle2, str11, str12, str13, str14, str15, sep = '<br/>'))
}else if(sug_bars()$state == "PA"){
if (sug_bars()$topic_1_rank > 50){
str11 <- paste("<font size='5' font color=#FFFF33;>Table Availability:</font> <font size='4' font color='white'>Add tables to decrease waiting time.Increase table turnover rate.Provide some entertainment and seats for waiting customers.</font>")
}else{str11 <- paste("")}
if (sug_bars()$topic_2_rank > 50){
str12 <- paste("<font size='5' font color=#FFFF33;>Food Flavor:</font> <font size='4' font color='white'>Make customer feel like at home because most of them are tourists.</font>")
}else{str12 <- paste("")}
if (sug_bars()$topic_3_rank > 50){
str13 <- paste("<font size='5' font color=#FFFF33;>Bartender Proficiency:</font> <font size='4' font color='white'>Hiring more professional bartenders. Provide more craft beer.</font>")
}else{str13 <- paste("")}
if (sug_bars()$topic_4_rank > 50){
str14 <- paste("<font size='5' font color=#FFFF33;>Service and Order Time:</font> <font size='4' font color='white'>The fryer oil needed change regularly.Add more special sauce for wings to improve its taste.
Add own specialty, such as special drinks.Make fresher food.</font>")
}else{str14 <- paste("")}
HTML(paste(strtitle2, str11, str12, str13, str14, sep = '<br/>'))
}else if(sug_bars()$state == "QC"){
if (sug_bars()$topic_1_rank > 50){
str11 <- paste("<font size='5' font color=#FFFF33;>Table Availability:</font> <font size='4' font color='white'>Add tables to decrease waiting time.Increase table turnover rate.Provide some entertainment and seats for waiting customers.</font>")
}else{str11 <- paste("")}
if (sug_bars()$topic_2_rank > 50){
str12 <- paste("<font size='5' font color=#FFFF33;>Bartender Proficiency:</font> <font size='4' font color='white'>Hiring more professional bartenders. Provide more craft beer.</font>")
}else{str12 <- paste("")}
if (sug_bars()$topic_3_rank > 50){
str13 <- paste("<font size='5' font color=#FFFF33;>Food Flavor:</font> <font size='4' font color='white'>The fryer oil needed change regularly.Add own specialty,
such as special drinks.Make fresher food.Add more special sauce for wings to improve its taste.</font>")
}else{str13 <- paste("")}
HTML(paste(strtitle2, str11, str12, str13, sep = '<br/>'))
}else if(sug_bars()$state == "SC"){
if (sug_bars()$topic_1_rank > 50){
str11 <- paste("<font size='5' font color=#FFFF33;>Table Availability:</font> <font size='4' font color='white'>Add tables to decrease waiting time.Increase table turnover rate.Make the table cleaner.</font>")
}else{str11 <- paste("")}
if (sug_bars()$topic_2_rank > 50){
str12 <- paste("<font size='5' font color=#FFFF33;>Burger:</font> <font size='4' font color='white'>Control the quality of burger.</font>")
}else{str12 <- paste("")}
if (sug_bars()$topic_3_rank > 50){
str13 <- paste("<font size='5' font color=#FFFF33;>Bartender Proficiency:</font> <font size='4' font color='white'>Hiring more professional bartenders.</font>")
}else{str13 <- paste("")}
if (sug_bars()$topic_4_rank > 50){
str14 <- paste("<font size='5' font color=#FFFF33;>Service Quality:</font> <font size='4' font color='white'>Hire more staff to speed up service.Offer some gifts or discounts for customers who wait too long.Improve customer service:
such as training the waitress and manager to be politer and serve more carefully.Pay more attention to customers’ feedback.</font>")
}else{str14 <- paste("")}
if (sug_bars()$topic_5_rank > 50){
str15 <- paste("<font size='5' font color=#FFFF33;>Food Flavor:</font> <font size='4' font color='white'>The fryer oil needed change regularly.Add own specialty, such as special drinks.Make fresher food.Add more special sauce for wings to improve its taste.</font>")
}
HTML(paste(strtitle2, str11, str12, str13, str14, str15, sep = '<br/>'))
}else if(sug_bars()$state == "WI"){
if (sug_bars()$topic_1_rank > 50){
str11 <- paste("<font size='5' font color=#FFFF33;>Table Availability:</font> <font size='4' font color='white'>Add tables to decrease waiting time.Increase table turnover rate.Provide some entertainment and seats for waiting customers.</font>")
}else{str11 <- paste("")}
if (sug_bars()$topic_2_rank > 50){
str12 <- paste("<font size='5' font color=#FFFF33;>Food Flavor:</font> <font size='4' font color='white'>The fryer oil needed change regularly.Add own specialty, such as special drinks.Make fresher food.Add more special sauce for wings to improve its taste.</font>")
}else{str12 <- paste("")}
if (sug_bars()$topic_3_rank > 50){
str13 <- paste("<font size='5' font color=#FFFF33;>Bartender Proficiency:</font> <font size='4' font color='white'>Hiring more professional bartenders. Provide more craft beer.</font>")
}else{str13 <- paste("")}
if (sug_bars()$topic_4_rank > 50){
str14 <- paste("<font size='5' font color=#FFFF33;>Service Quality:</font> <font size='4' font color='white'>Hire more staff to speed up service.Offer some gifts or discounts for customers who wait too long.Improve customer service: such as training the waitress and manager
to be politer and serve more carefully.Pay more attention to customers’ feedback.</font>")
}else{str14 <- paste("")}
HTML(paste(strtitle2, str11, str12, str13, str14, sep = '<br/>'))
}
})
# Plot --------------------------------------------------------------------
observe({
event <- input$topic
if(is.null(event)){
return()
}
output$weights <- renderPlot({
if(input$topic == "Topic 1"){
ggplot(data = weight_state(), aes(x = topic_1words, y = topic_1weight)) + geom_bar(stat = "identity", color = "steelblue", fill = "steelblue", width = 0.5) +
theme(panel.background = element_rect(fill = 'black', colour = "white"),
axis.text.x = element_text(size = 18, color = "white"),
axis.text.y = element_text(size = 18, color = "white"),
axis.title.y = element_text(size=22, color = "white"),
axis.title.x = element_text(size=22, color = "white"),
plot.background = element_rect(colour = "black",fill = "black")) +
xlab('Words') + ylab('Weights') + scale_fill_manual(values = "#56B4E9")
}else if(input$topic == "Topic 2"){
ggplot(data = weight_state(), aes(x = topic_2words, y = topic_2weight)) + geom_bar(stat = "identity",color = "steelblue", fill = "steelblue", width = 0.5) +
theme(panel.background = element_rect(fill = 'black', colour = "white"),
axis.text.x = element_text(size = 18, color = "white"),
axis.text.y = element_text(size = 18, color = "white"),
axis.title.y = element_text(size=22, color = "white"),
axis.title.x = element_text(size=22, color = "white"),
plot.background = element_rect(colour = "black",fill = "black")) +
xlab('Words') + ylab('Weights') + scale_fill_manual(values = "#56B4E9")
}else if(input$topic == "Topic 3"){
ggplot(data = weight_state(), aes(x = topic_3words, y = topic_3weight)) + geom_bar(stat = "identity", color = "steelblue", fill = "steelblue", width = 0.5) +
theme(panel.background = element_rect(fill = 'black', colour = "white"),
axis.text.x = element_text(size = 18, color = "white"),
axis.text.y = element_text(size = 18, color = "white"),
axis.title.y = element_text(size=22, color = "white"),
axis.title.x = element_text(size=22, color = "white"),
plot.background = element_rect(colour = "black",fill = "black")) +
xlab('Words') + ylab('Weights') + scale_fill_manual(values = "#56B4E9")
}else if(input$topic == "Topic 4"){
ggplot(data = weight_state(), aes(x = topic_4words, y = topic_4weight)) + geom_bar(stat = "identity", color = "steelblue", fill = "steelblue", width = 0.5) +
theme(panel.background = element_rect(fill = 'black', colour = "white"),
axis.text.x = element_text(size = 18, color = "white"),
axis.text.y = element_text(size = 18, color = "white"),
axis.title.y = element_text(size=22, color = "white"),
axis.title.x = element_text(size=22, color = "white"),
plot.background = element_rect(colour = "black",fill = "black")) +
xlab('Words') + ylab('Weights') + scale_fill_manual(values = "#56B4E9")
}else if(input$topic == "Topic 5"){
ggplot(data = weight_state(), aes(x = topic_5words, y = topic_5weight)) + geom_bar(stat = "identity", color = "steelblue", fill = "steelblue", width = 0.5) +
theme(panel.background = element_rect(fill = 'black', colour = "white"),
axis.text.x = element_text(size = 18, color = "white"),
axis.text.y = element_text(size = 18, color = "white"),
axis.title.y = element_text(size=22, color = "white"),
axis.title.x = element_text(size=22, color = "white"),
plot.background = element_rect(colour = "black",fill = "black")) +
xlab('Words') + ylab('Weights') + scale_fill_manual(values = "#56B4E9")}
},
height = 500, width = 700)
})
}
|
/Shiny/Yelp_data_analysis/server.R
|
no_license
|
zli873/STAT-628-Module3
|
R
| false | false | 43,233 |
r
|
function(input, output, session){
# Interactive map ---------------------------------------------------------
zips <- reactive({
zipdata[zipdata$state == input$State,]
})
zip_state <- reactive({
if(input$State == "AB"){
zipdata_AB
}else if(input$State == "AZ"){
zipdata_AZ
}else if(input$State == "IL"){
zipdata_IL
}else if(input$State == "NC"){
zipdata_NC
}else if(input$State == "NV"){
zipdata_NV
}else if(input$State == "OH"){
zipdata_OH
}else if(input$State == "ON"){
zipdata_ON
}else if(input$State == "PA"){
zipdata_PA
}else if(input$State == "QC"){
zipdata_QC
}else if(input$State == "SC"){
zipdata_SC
}else if(input$State == "WI"){
zipdata_WI
}
})
weight_state <- reactive({
if(input$State2 == "AB"){
AB_weight
}else if(input$State2 == "AZ"){
AZ_weight
}else if(input$State2 == "IL"){
IL_weight
}else if(input$State2 == "NC"){
NC_weight
}else if(input$State2 == "NV"){
NV_weight
}else if(input$State2 == "OH"){
OH_weight
}else if(input$State2 == "ON"){
ON_weight
}else if(input$State2 == "PA"){
PA_weight
}else if(input$State2 == "QC"){
QC_weight
}else if(input$State2 == "SC"){
SC_weight
}else if(input$State2 == "WI"){
WI_weight
}
})
# Create the map ----------------------------------------------------------
output$map <- renderLeaflet({
leaflet() %>%
addTiles(
urlTemplate = "//{s}.tiles.mapbox.com/v3/jcheng.map-5ebohr46/{z}/{x}/{y}.png",
attribution = 'Maps by <a href="http://www.mapbox.com/">Mapbox</a>'
) %>%
setView(lng = -93.85, lat = 37.45, zoom = 4) # used to initialize the position focus on when open the website
})
# Restaurant markers ------------------------------------------------------
observe({
req(input$tabs == "Map")
leafletProxy("map") %>% clearPopups()
leafletProxy("map", data = zips()) %>%
clearMarkers() %>%
addMarkers(~longitude, ~latitude) %>%
flyTo(median(as.numeric(zips()$longitude)), median(as.numeric(zips()$latitude)), zoom = 10)
})
# Add textinput ------------------------------------------------------------
search_bar <- eventReactive(input$go, {
zipdata[zipdata$key == input$search, ]
})
observe({
req(input$tabs == "Map")
leafletProxy("map") %>%
clearMarkers() %>%
addMarkers(search_bar()$longitude, search_bar()$latitude) %>%
flyTo(search_bar()$longitude, search_bar()$latitude, zoom = 15)
})
# Show a popup at the given location --------------------------------------
showZipcodePopup5 <- function(lat, lon, zipstate) {
selectedZip <- zipstate[(zipstate$latitude == lat) & (zipstate$longitude == lon),]
content <- as.character(tagList(
tags$h3(as.character(selectedZip$name)),
tags$strong(selectedZip$city, selectedZip$state),
tags$br(),
tags$em("Stars:", selectedZip$stars),
tags$br(),
tags$em("Review count:", selectedZip$review_count),
tags$br(),
tags$em("Topic 1: Top", as.integer(selectedZip$topic_1_rank), "%"),
tags$br(),
tags$em("Topic 2: Top", as.integer(selectedZip$topic_2_rank), "%"),
tags$br(),
tags$em("Topic 3: Top", as.integer(selectedZip$topic_3_rank), "%"),
tags$br(),
tags$em("Topic 4: Top", as.integer(selectedZip$topic_4_rank) , "%"),
tags$br(),
tags$em("Topic 5: Top", as.integer(selectedZip$topic_5_rank) , "%")
))
leafletProxy("map") %>% addPopups(lon, lat, content)
}
showZipcodePopup4 <- function(lat, lon, zipstate) {
selectedZip <- zipstate[(zipstate$latitude == lat) & (zipstate$longitude == lon),]
content <- as.character(tagList(
tags$h3(as.character(selectedZip$name)),
tags$strong(selectedZip$city, selectedZip$state),
tags$br(),
tags$em("Stars:", selectedZip$stars),
tags$br(),
tags$em("Review count:", selectedZip$review_count),
tags$br(),
tags$em("Topic 1: Top", as.integer(selectedZip$topic_1_rank), "%"),
tags$br(),
tags$em("Topic 2: Top", as.integer(selectedZip$topic_2_rank), "%"),
tags$br(),
tags$em("Topic 3: Top", as.integer(selectedZip$topic_3_rank), "%"),
tags$br(),
tags$em("Topic 4: Top", as.integer(selectedZip$topic_4_rank) , "%")
))
leafletProxy("map") %>% addPopups(lon, lat, content)
}
showZipcodePopup3 <- function(lat, lon, zipstate) {
selectedZip <- zipstate[(zipstate$latitude == lat) & (zipstate$longitude == lon),]
content <- as.character(tagList(
tags$h3(as.character(selectedZip$name)),
tags$strong(selectedZip$city, selectedZip$state),
tags$br(),
tags$em("Stars:", selectedZip$stars),
tags$br(),
tags$em("Review count:", selectedZip$review_count),
tags$br(),
tags$em("Topic 1: Top", as.integer(selectedZip$topic_1_rank), "%"),
tags$br(),
tags$em("Topic 2: Top", as.integer(selectedZip$topic_2_rank), "%"),
tags$br(),
tags$em("Topic 3: Top", as.integer(selectedZip$topic_3_rank), "%")
))
leafletProxy("map") %>% addPopups(lon, lat, content)
}
# click: shows the information of restaurant ------------------------------
observe({
leafletProxy("map") %>% clearPopups()
event <- input$map_marker_click
if (is.null(event))
return()
isolate({
if(input$State == "QC"){
showZipcodePopup3( event$lat, event$lng, zip_state())
}else if(input$State == "IL" | input$State == "NC" | input$State == "OH" | input$State == "PA" | input$State == "WI"){
showZipcodePopup4( event$lat, event$lng, zip_state())
}else if(input$State == "AB" | input$State == "AZ" | input$State == "NV" | input$State == "ON" | input$State == "SC"){
showZipcodePopup5( event$lat, event$lng, zip_state())
}
})
})
# Exploration -------------------------------------------------------------------
# dynamic ui --------------------------------------------------------------
output$ui <- renderUI({
if (is.na(input$State2)){
return()
}
switch (input$State2,
"AB" = radioButtons("topic",
label = em("Topic",style="text-align:center;color:#FFFFFF;font-size:150%"),
choiceNames = list(
HTML("<font color='white'>Topic 1: Table Availability</font>"),
HTML("<font color='white'>Topic 2: Food Freshness</font>"),
HTML("<font color='white'>Topic 3: Food Flavor</font>"),
HTML("<font color='white'>Topic 4: Drinks and Environment</font>"),
HTML("<font color='white'>Topic 5: Service and Order Time</font>")
),
choiceValues = c("Topic 1", "Topic 2",
"Topic 3", "Topic 4","Topic 5"), selected = NULL),
"AZ" = radioButtons("topic",
label = em("Topic",style="text-align:center;color:#FFFFFF;font-size:150%"),
choiceNames = list(
HTML("<font color='white'>Topic 1: Table Availability</font>"),
HTML("<font color='white'>Topic 2: Food Flavor</font>"),
HTML("<font color='white'>Topic 3: Bartender Proficiency</font>"),
HTML("<font color='white'>Topic 4: Service and Order Time</font>"),
HTML("<font color='white'>Topic 5: Service Etiquette</font>")
),
choiceValues = c("Topic 1", "Topic 2",
"Topic 3", "Topic 4","Topic 5"), selected = NULL),
"IL" = radioButtons("topic",
label = em("Topic",style="text-align:center;color:#FFFFFF;font-size:150%"),
choiceNames = list(
HTML("<font color='white'>Topic 1: Service and Order Time</font>"),
HTML("<font color='white'>Topic 2: Food Flavor</font>"),
HTML("<font color='white'>Topic 3: Bartender Proficiency</font>"),
HTML("<font color='white'>Topic 4: Table Availability</font>")
),
choiceValues = c("Topic 1", "Topic 2",
"Topic 3", "Topic 4"),
selected = NULL),
"NC" = radioButtons("topic",
label = em("Topic",style="text-align:center;color:#FFFFFF;font-size:150%"),
choiceNames = list(
HTML("<font color='white'>Topic 1: Table Availability</font>"),
HTML("<font color='white'>Topic 2: Food Flavor</font>"),
HTML("<font color='white'>Topic 3: Bartender Proficiency</font>"),
HTML("<font color='white'>Topic 4: Service and Order Time</font>")
),
choiceValues = c("Topic 1", "Topic 2",
"Topic 3", "Topic 4"), selected = NULL),
"NV" = radioButtons("topic",
label = em("Topic",style="text-align:center;color:#FFFFFF;font-size:150%"),
choiceNames = list(
HTML("<font color='white'>Topic 1: Table Availability</font>"),
HTML("<font color='white'>Topic 2: Customer Care</font>"),
HTML("<font color='white'>Topic 3: Bartender Proficiency</font>"),
HTML("<font color='white'>Topic 4: Food Flavor</font>"),
HTML("<font color='white'>Topic 5: Shows</font>")
),
choiceValues = c("Topic 1", "Topic 2",
"Topic 3", "Topic 4","Topic 5"), selected = NULL),
"OH" = radioButtons("topic",
label = em("Topic",style="text-align:center;color:#FFFFFF;font-size:150%"),
choiceNames = list(
HTML("<font color='white'>Topic 1: Table Availability</font>"),
HTML("<font color='white'>Topic 2: Food Freshness</font>"),
HTML("<font color='white'>Topic 3: Bartender Proficiency</font>"),
HTML("<font color='white'>Topic 4: Service and Order Time</font>")
),
choiceValues = c("Topic 1", "Topic 2",
"Topic 3", "Topic 4"), selected = NULL),
"ON" = radioButtons("topic",
label = em("Topic",style="text-align:center;color:#FFFFFF;font-size:150%"),
choiceNames = list(
HTML("<font color='white'>Topic 1: Table Availability</font>"),
HTML("<font color='white'>Topic 2: Food Flavor</font>"),
HTML("<font color='white'>Topic 3: Bartender Proficiency</font>"),
HTML("<font color='white'>Topic 4: Service and Order Time</font>"),
HTML("<font color='white'>Topic 5: Service Etiquette</font>")
),
choiceValues = c("Topic 1", "Topic 2",
"Topic 3", "Topic 4","Topic 5"), selected = NULL),
"PA" = radioButtons("topic",
label = em("Topic",style="text-align:center;color:#FFFFFF;font-size:150%"),
choiceNames = list(
HTML("<font color='white'>Topic 1: Table Availability</font>"),
HTML("<font color='white'>Topic 2: Food Flavor</font>"),
HTML("<font color='white'>Topic 3: Bartender Proficiency</font>"),
HTML("<font color='white'>Topic 4: Service and Order Time</font>")
),
choiceValues = c("Topic 1", "Topic 2",
"Topic 3", "Topic 4"), selected = NULL),
"QC" = radioButtons("topic",
label = em("Topic",style="text-align:center;color:#FFFFFF;font-size:150%"),
choiceNames = list(
HTML("<font color='white'>Topic 1: Table Availability</font>"),
HTML("<font color='white'>Topic 2: Bartender Proficiency</font>"),
HTML("<font color='white'>Topic 3: Food Flavor</font>")
),
choiceValues = c("Topic 1", "Topic 2","Topic 3"), selected = NULL),
"SC" = radioButtons("topic",
label = em("Topic",style="text-align:center;color:#FFFFFF;font-size:150%"),
choiceNames = list(
HTML("<font color='white'>Topic 1: Table Availability</font>"),
HTML("<font color='white'>Topic 2: Burger</font>"),
HTML("<font color='white'>Topic 3: Bartender Proficiency</font>"),
HTML("<font color='white'>Topic 4: Service Quality</font>"),
HTML("<font color='white'>Topic 5: Food Flavor</font>")
),
choiceValues = c("Topic 1", "Topic 2",
"Topic 3", "Topic 4","Topic 5"), selected = NULL),
"WI" = radioButtons("topic",
label = em("Topic",style="text-align:center;color:#FFFFFF;font-size:150%"),
choiceNames = list(
HTML("<font color='white'>Topic 1: Table Availability</font>"),
HTML("<font color='white'>Topic 2: Food Flavor</font>"),
HTML("<font color='white'>Topic 3: Bartender Proficiency</font>"),
HTML("<font color='white'>Topic 4: Service Quality</font>")
),
choiceValues = c("Topic 1", "Topic 2",
"Topic 3", "Topic 4"), selected = NULL)
)
})
# Suggestions -------------------------------------------------------------
sug_bars <- eventReactive(input$submit,{
a <- zipdata[zipdata$key == input$Input2, ]
name_a <- as.character(a$name)
zip_a <- a$postal_code
if(a$state == "AB"){
return(zipdata_AB[(zipdata_AB$name == name_a) & (zipdata_AB$postal_code == zip_a), ])
}else if(a$state == "AZ"){
return(zipdata_AZ[(zipdata_AZ$name == name_a) & (zipdata_AZ$postal_code == zip_a), ])
}else if(a$state == "IL"){
return(zipdata_IL[(zipdata_IL$name == name_a) & (zipdata_IL$postal_code == zip_a), ])
}else if(a$state == "NC"){
return(zipdata_NC[(zipdata_NC$name == name_a) & (zipdata_NC$postal_code == zip_a), ])
}else if(a$state == "NV"){
return(zipdata_NV[((zipdata_NV$name == name_a) & (zipdata_NV$postal_code == zip_a)), ])
}else if(a$state == "OH"){
return(zipdata_OH[((zipdata_OH$name == name_a) & (zipdata_OH$postal_code == zip_a)), ])
}else if(a$state == "ON"){
return(zipdata_ON[((zipdata_ON$name == name_a) & (zipdata_ON$postal_code == zip_a)), ])
}else if(a$state == "PA"){
return(zipdata_PA[((zipdata_PA$name == name_a) & (zipdata_PA$postal_code == zip_a)), ])
}else if(a$state == "QC"){
return(zipdata_QC[((zipdata_QC$name == name_a) & (zipdata_QC$postal_code == zip_a)), ])
}else if(a$state == "SC"){
return(zipdata_SC[((zipdata_SC$name == name_a) & (zipdata_SC$postal_code == zip_a)), ])
}else{
return(zipdata_WI[((zipdata_WI$name == name_a) & (zipdata_WI$postal_code == zip_a)), ])
}
})
output$sug_gen <- renderUI({
strtitle1 <- paste("<font size='6' font color=white>General Suggestions</font>")
str00 <- paste("<font size='4' font color=white>1. Make the target customer more clear and pricing more pointed.</font>")
str01 <- paste("<font size='4' font color=white>2. Add booths to provide more private room for customers.</font>")
str02 <- paste("<font size='4' font color=white>3. Increase the diversity of music and make the atmosphere to be more attractive.</font>")
strtitle01 <- paste("<font size='5' font color=#FFFF33;>Menu</font>")
strtitle02 <- paste("<font size='5' font color=#FFFF33;>Service</font>")
strtitle03 <- paste("<font size='5' font color=#FFFF33;>Environment</font>")
str04 <- paste("<font size='4' font color=white>Provide more craft beer.
Add own specialty, such as special drinks.
Add more special sauce for wings to improve its taste.
Make the target customer more clear and pricing more pointed.
The fryer oil needed change regularly.
</font>")
str05 <- paste("<font size='4' font color=white>Improve customer service: such as training the waitress and manager to be politer and service more carefully. Also, hiring more professional bartenders.
Speed up customer service.
</font>")
str06 <- paste("<font size='4' font color=white>Add tables to decrease waiting time.
Add booths to provide more private room for customers.
Increase the diversity of music and make the atmosphere to be more attractive.
</font>")
HTML(paste(strtitle1, str00, str01, str02, sep = '<br/>'))
})
output$sug_spe <- eventReactive(input$submit,{
strtitle2 <- paste("<font size='6' font color=white>Specific Suggestions</font>")
if (sug_bars()$state == "ON"){
if (sug_bars()$topic_1_rank > 50){
str11 <- paste("<font size='5' font color=#FFFF33;>Table Availablity:</font> <font size='4' font color='white' >Add tables to decrease waiting time.
Provide some entertainment and seats for waiting customers.Increase table turnover rate.</font>")
}else{str11 <- paste("")}
if (sug_bars()$topic_2_rank > 50){
str12 <- paste("<font size='5' font color=#FFFF33;>Food Flavor:</font> <font size='4' font color='white'>The fryer oil needed change regularly. Add more special sauce for wings to improve its taste. Add own specialty, such as special
drinks. Make fresher food.</font>")
}else{str12 <- paste("")}
if (sug_bars()$topic_3_rank > 50){
str13 <- paste("<font size='5' font color=#FFFF33;>Bartender Proficiency:</font> <font size='4' font color='white'>Hiring more professional bartenders. Provide more craft beer.</font>")
}else{str13 <- paste("")}
if (sug_bars()$topic_4_rank > 50){
str14 <- paste("<font size='5' font color=#FFFF33;>Service and Order Time:</font> <font size='4' font color='white'>Hire more staff to speed up service. Offer some gifts or discounts for customers who wait too long.</font>")
}else{str14 <- paste("")}
if (sug_bars()$topic_5_rank > 50){
str15 <- paste("<font size='5' font color=#FFFF33;>Service Etiquette:</font> <font size='4' font color='white'>Improve customer service: such as training the waitress and manager to be politer and serve more carefully. Pay more attention to customers feedback.</font>")
}else{str15 <- paste("")}
HTML(paste(strtitle2, str11, str12, str13, str14, str15, sep = '<br/>'))
}else if (sug_bars()$state == "AZ"){
if (sug_bars()$topic_1_rank > 50){
str11 <- paste("<font size='5' font color=#FFFF33;>Table Availablity:</font> <font size='4' font color='white'>Add tables to decrease waiting time.Provide some entertainment and seats for waiting customers.Increase table turnover rate.</font>")
}else{str11 <- paste("")}
if (sug_bars()$topic_2_rank > 50){
str12 <- paste("<font size='5' font color=#FFFF33;>Food Flavor:</font> <font size='4' font color='white'>The fryer oil needed change regularly. Add more special sauce for wings to improve its taste. Add own specialty, such as special
drinks. Make fresher food.</font>")
}else{str12 <- paste("")}
if (sug_bars()$topic_3_rank > 50){
str13 <- paste("<font size='5' font color=#FFFF33;>Bartender Proficiency:</font> <font size='4' font color='white'>Hiring more professional bartenders. Provide more craft beer.</font>")
}else{str13 <- paste("")}
if (sug_bars()$topic_4_rank > 50){
str14 <- paste("<font size='5' font color=#FFFF33;>Service and Order Time:</font> <font size='4' font color='white'>Hire more staff to speed up service. Offer some gifts or discounts for customers who wait too long.</font>")
}else{str14 <- paste("")}
if (sug_bars()$topic_5_rank > 50){
str15 <- paste("<font size='5' font color=#FFFF33;>Service Etiquette:</font> <font size='4' font color='white'>Improve customer service: such as training the waitress and manager to be politer and serve more carefully. Pay more attention to customers feedback.</font>")
}else{str15 <- paste("")}
HTML(paste(strtitle2, str11, str12, str13, str14, str15, sep = '<br/>'))
}else if(sug_bars()$state == "IL"){
if (sug_bars()$topic_1_rank > 50){
str11 <- paste("<font size='5' font color=#FFFF33;>Service and Order Time:</font> <font size='4' font color='white'>Hire more staff to speed up service.Offer some gifts or discounts for customers who wait too long.Improve customer service:
such as training the waitress and manager to be politer and serve more carefully.Pay more attention to customers’ feedback.</font>")
}else{str11 <- paste("")}
if (sug_bars()$topic_2_rank > 50){
str12 <- paste("<font size='5' font color=#FFFF33;>Food Flavor</font> <font size='4' font color='white'>Add more special sauce for wings to improve its taste.Add own specialty, such as special drinks.Make fresher food.Control the steak to be cooked well.</font>")
}else{str12 <- paste("")}
if (sug_bars()$topic_3_rank > 50){
str13 <- paste("<font size='5' font color=#FFFF33;>Bartender Proficiency:</font> <font size='4' font color='white'>Hiring more professional bartenders. Provide more craft beer.</font>")
}else{str13 <- paste("")}
if (sug_bars()$topic_4_rank > 50){
str14 <- paste("<font size='5' font color=#FFFF33;>Table Avalability:</font> <font size='4' font color='white'>Add tables to decrease waiting time.Increase table turnover rate.Provide some entertainment and seats for waiting customers.</font>")
}else{str14 <- paste("")}
HTML(paste(strtitle2, str11, str12, str13, str14, sep = '<br/>'))
}else if(sug_bars()$state == "NC"){
if (sug_bars()$topic_1_rank > 50){
str11 <- paste("<font size='5' font color=#FFFF33;>Table Avalability:</font> <font size='4' font color='white'>Add tables to decrease waiting time.Increase table turnover rate.Provide some entertainment and seats for waiting customers.</font>")
}else{str11 <- paste("")}
if (sug_bars()$topic_2_rank > 50){
str12 <- paste("<font size='5' font color=#FFFF33;>Food Flavor</font> <font color='white'>The fryer oil needed change regularly.Add more special sauce for wings to improve its taste.Add own specialty, such as special drinks.Make fresher food.</font>")
}else{str12 <- paste("")}
if (sug_bars()$topic_3_rank > 50){
str13 <- paste("<font size='5' font color=#FFFF33;>Bartender Proficiency:</font> <font size='4' font color='white'>Hiring more professional bartenders. Provide more craft beer.</font>")
}else{str13 <- paste("")}
if (sug_bars()$topic_4_rank > 50){
str14 <- paste("<font size='5' font color=#FFFF33;>Service and Order Time:</font> <font size='4' font color='white'>Hire more staff to speed up service.Offer some gifts or discounts for customers who wait too long.Improve customer service: such as training the waitress and manager to be politer and serve more carefully.Pay more attention to customers’ feedback.</font>")
}else{str14 <- paste("")}
HTML(paste(strtitle2, str11, str12, str13, str14, sep = '<br/>'))
}else if(sug_bars()$state == "NV"){
if (sug_bars()$topic_1_rank > 50){
str11 <- paste("<font size='5' font color=#FFFF33;>Table Avalability:</font> <font size='4' font color='white'>Add tables to decrease waiting time.Increase table turnover rate.Provide some entertainment and seats for waiting customers.</font>")
}else{str11 <- paste("")}
if (sug_bars()$topic_2_rank > 50){
str12 <- paste("<font size='5' font color=#FFFF33;>Customer Care:</font> <font size='4' font color='white'>The fryer oil needed change regularly.Add more special sauce for wings to improve its taste.Add own specialty, such as special drinks.Make fresher food.</font>")
}else{str12 <- paste("")}
if (sug_bars()$topic_3_rank > 50){
str13 <- paste("<font size='5' font color=#FFFF33;>Bartender Proficiency:</font> <font size='4' font color='white'>Hiring more professional bartenders. Provide more craft beer.</font>")
}else{str13 <- paste("")}
if (sug_bars()$topic_4_rank > 50){
str14 <- paste("<font size='5' font color=#FFFF33;>Food Flavor:</font><font size='4' font color='white'>Hire more staff to speed up service.
Offer some gifts or discounts for customers who wait too long.
Improve customer service: such as training the waitress and manager to be politer and serve more carefully.
Pay more attention to customers’ feedback.
</font>")
}else{str14 <- paste("")}
if (sug_bars()$topic_5_rank > 50){
str15 <- paste("<font size='5' font color=#FFFF33;>Shows:</font><font size='4' font color='white'>Add more shows in club at night</font>")
}else{str15 <- paste("")}
HTML(paste(strtitle2, str11, str12, str13, str14, str15, sep = '<br/>'))
}else if(sug_bars()$state == "OH"){
if (sug_bars()$topic_1_rank > 50){
str11 <- paste("<font size='5' font color=#FFFF33;>Table Avalability:</font> <font size='4' font color='white'>Add tables to decrease waiting time.Increase table turnover rate.Provide some entertainment and seats for waiting customers.</font>")
}else{str11 <- paste("")}
if (sug_bars()$topic_2_rank > 50){
str12 <- paste("<font size='5' font color=#FFFF33;>Food Flavor:</font> <font size='4' font color='white'>Make customer feel like at home because most of them are tourists.</font>")
}else{str12 <- paste("")}
if (sug_bars()$topic_3_rank > 50){
str13 <- paste("<font size='5' font color=#FFFF33;>Bartender Proficiency:</font> <font size='4' font color='white'>Hiring more professional bartenders. Provide more craft beer.</font>")
}else{str13 <- paste("")}
if (sug_bars()$topic_4_rank > 50){
str14 <- paste("<font size='5' font color=#FFFF33;>Service and Order Time:</font> <font size='4' font color='white'>Hire more staff to speed up service.
Offer some gifts or discounts for customers who wait too long.
Improve customer service: such as training the waitress and manager to be politer and serve more carefully.
Pay more attention to customers’ feedback.
</font>")
}else{str14 <- paste("")}
HTML(paste(strtitle2, str11, str12, str13, str14, sep = '<br/>'))
}else if(sug_bars()$state == "AB"){
if (sug_bars()$topic_1_rank > 50){
str11 <- paste("<font size='5' font color=#FFFF33;>Table Avalability:</font> <font size='4' font color='white'>Add tables to decrease waiting time.Increase table turnover rate.Provide some entertainment and seats for waiting customers.</font>")
}else{str11 <- paste("")}
if (sug_bars()$topic_2_rank > 50){
str12 <- paste("<font size='5' font color=#FFFF33;>Food Freshness:</font> <font size='4' font color='white'>The fryer oil needed change regularly.Add own specialty, such as special drinks.Make fresher food.Control the steak to be cooked well.</font>")
}else{str12 <- paste("")}
if (sug_bars()$topic_3_rank > 50){
str13 <- paste("<font size='5' font color=#FFFF33;>Food Flavor:</font> <font size='4' font color='white'>Add more special sauce for wings to improve its taste.</font>")
}else{str13 <- paste("")}
if (sug_bars()$topic_4_rank > 50){
str14 <- paste("<font size='5' font color=#FFFF33;>Drinks and Environment:</font> <font size='4' font color='white'>Increase the diversity of music and make the atmosphere to be more attractive.Control the music volume.Provide more craft beer.</font>")
}else{str14 <- paste("")}
if (sug_bars()$topic_5_rank > 50){
str15 <- paste("<font size='5' font color=#FFFF33;>Service and Order Time:</font> <font size='4' font color='white'>Hire more staff to speed up service.Offer some gifts or discounts for customers who wait too long.Improve customer service: such as training the
waitress and manager to be politer and serve more carefully.Pay more attention to customers’ feedback.</font>")
}else{str15 <- paste("")}
HTML(paste( strtitle2, str11, str12, str13, str14, str15, sep = '<br/>'))
}else if(sug_bars()$state == "PA"){
if (sug_bars()$topic_1_rank > 50){
str11 <- paste("<font size='5' font color=#FFFF33;>Table Availability:</font> <font size='4' font color='white'>Add tables to decrease waiting time.Increase table turnover rate.Provide some entertainment and seats for waiting customers.</font>")
}else{str11 <- paste("")}
if (sug_bars()$topic_2_rank > 50){
str12 <- paste("<font size='5' font color=#FFFF33;>Food Flavor:</font> <font size='4' font color='white'>Make customer feel like at home because most of them are tourists.</font>")
}else{str12 <- paste("")}
if (sug_bars()$topic_3_rank > 50){
str13 <- paste("<font size='5' font color=#FFFF33;>Bartender Proficiency:</font> <font size='4' font color='white'>Hiring more professional bartenders. Provide more craft beer.</font>")
}else{str13 <- paste("")}
if (sug_bars()$topic_4_rank > 50){
str14 <- paste("<font size='5' font color=#FFFF33;>Service and Order Time:</font> <font size='4' font color='white'>The fryer oil needed change regularly.Add more special sauce for wings to improve its taste.
Add own specialty, such as special drinks.Make fresher food.</font>")
}else{str14 <- paste("")}
HTML(paste(strtitle2, str11, str12, str13, str14, sep = '<br/>'))
}else if(sug_bars()$state == "QC"){
if (sug_bars()$topic_1_rank > 50){
str11 <- paste("<font size='5' font color=#FFFF33;>Table Availability:</font> <font size='4' font color='white'>Add tables to decrease waiting time.Increase table turnover rate.Provide some entertainment and seats for waiting customers.</font>")
}else{str11 <- paste("")}
if (sug_bars()$topic_2_rank > 50){
str12 <- paste("<font size='5' font color=#FFFF33;>Bartender Proficiency:</font> <font size='4' font color='white'>Hiring more professional bartenders. Provide more craft beer.</font>")
}else{str12 <- paste("")}
if (sug_bars()$topic_3_rank > 50){
str13 <- paste("<font size='5' font color=#FFFF33;>Food Flavor:</font> <font size='4' font color='white'>The fryer oil needed change regularly.Add own specialty,
such as special drinks.Make fresher food.Add more special sauce for wings to improve its taste.</font>")
}else{str13 <- paste("")}
HTML(paste(strtitle2, str11, str12, str13, sep = '<br/>'))
}else if(sug_bars()$state == "SC"){
if (sug_bars()$topic_1_rank > 50){
str11 <- paste("<font size='5' font color=#FFFF33;>Table Availability:</font> <font size='4' font color='white'>Add tables to decrease waiting time.Increase table turnover rate.Make the table cleaner.</font>")
}else{str11 <- paste("")}
if (sug_bars()$topic_2_rank > 50){
str12 <- paste("<font size='5' font color=#FFFF33;>Burger:</font> <font size='4' font color='white'>Control the quality of burger.</font>")
}else{str12 <- paste("")}
if (sug_bars()$topic_3_rank > 50){
str13 <- paste("<font size='5' font color=#FFFF33;>Bartender Proficiency:</font> <font size='4' font color='white'>Hiring more professional bartenders.</font>")
}else{str13 <- paste("")}
if (sug_bars()$topic_4_rank > 50){
str14 <- paste("<font size='5' font color=#FFFF33;>Service Quality:</font> <font size='4' font color='white'>Hire more staff to speed up service.Offer some gifts or discounts for customers who wait too long.Improve customer service:
such as training the waitress and manager to be politer and serve more carefully.Pay more attention to customers’ feedback.</font>")
}else{str14 <- paste("")}
if (sug_bars()$topic_5_rank > 50){
str15 <- paste("<font size='5' font color=#FFFF33;>Food Flavor:</font> <font size='4' font color='white'>The fryer oil needed change regularly.Add own specialty, such as special drinks.Make fresher food.Add more special sauce for wings to improve its taste.</font>")
}
HTML(paste(strtitle2, str11, str12, str13, str14, str15, sep = '<br/>'))
}else if(sug_bars()$state == "WI"){
if (sug_bars()$topic_1_rank > 50){
str11 <- paste("<font size='5' font color=#FFFF33;>Table Availability:</font> <font size='4' font color='white'>Add tables to decrease waiting time.Increase table turnover rate.Provide some entertainment and seats for waiting customers.</font>")
}else{str11 <- paste("")}
if (sug_bars()$topic_2_rank > 50){
str12 <- paste("<font size='5' font color=#FFFF33;>Food Flavor:</font> <font size='4' font color='white'>The fryer oil needed change regularly.Add own specialty, such as special drinks.Make fresher food.Add more special sauce for wings to improve its taste.</font>")
}else{str12 <- paste("")}
if (sug_bars()$topic_3_rank > 50){
str13 <- paste("<font size='5' font color=#FFFF33;>Bartender Proficiency:</font> <font size='4' font color='white'>Hiring more professional bartenders. Provide more craft beer.</font>")
}else{str13 <- paste("")}
if (sug_bars()$topic_4_rank > 50){
str14 <- paste("<font size='5' font color=#FFFF33;>Service Quality:</font> <font size='4' font color='white'>Hire more staff to speed up service.Offer some gifts or discounts for customers who wait too long.Improve customer service: such as training the waitress and manager
to be politer and serve more carefully.Pay more attention to customers’ feedback.</font>")
}else{str14 <- paste("")}
HTML(paste(strtitle2, str11, str12, str13, str14, sep = '<br/>'))
}
})
# Plot --------------------------------------------------------------------
observe({
event <- input$topic
if(is.null(event)){
return()
}
output$weights <- renderPlot({
if(input$topic == "Topic 1"){
ggplot(data = weight_state(), aes(x = topic_1words, y = topic_1weight)) + geom_bar(stat = "identity", color = "steelblue", fill = "steelblue", width = 0.5) +
theme(panel.background = element_rect(fill = 'black', colour = "white"),
axis.text.x = element_text(size = 18, color = "white"),
axis.text.y = element_text(size = 18, color = "white"),
axis.title.y = element_text(size=22, color = "white"),
axis.title.x = element_text(size=22, color = "white"),
plot.background = element_rect(colour = "black",fill = "black")) +
xlab('Words') + ylab('Weights') + scale_fill_manual(values = "#56B4E9")
}else if(input$topic == "Topic 2"){
ggplot(data = weight_state(), aes(x = topic_2words, y = topic_2weight)) + geom_bar(stat = "identity",color = "steelblue", fill = "steelblue", width = 0.5) +
theme(panel.background = element_rect(fill = 'black', colour = "white"),
axis.text.x = element_text(size = 18, color = "white"),
axis.text.y = element_text(size = 18, color = "white"),
axis.title.y = element_text(size=22, color = "white"),
axis.title.x = element_text(size=22, color = "white"),
plot.background = element_rect(colour = "black",fill = "black")) +
xlab('Words') + ylab('Weights') + scale_fill_manual(values = "#56B4E9")
}else if(input$topic == "Topic 3"){
ggplot(data = weight_state(), aes(x = topic_3words, y = topic_3weight)) + geom_bar(stat = "identity", color = "steelblue", fill = "steelblue", width = 0.5) +
theme(panel.background = element_rect(fill = 'black', colour = "white"),
axis.text.x = element_text(size = 18, color = "white"),
axis.text.y = element_text(size = 18, color = "white"),
axis.title.y = element_text(size=22, color = "white"),
axis.title.x = element_text(size=22, color = "white"),
plot.background = element_rect(colour = "black",fill = "black")) +
xlab('Words') + ylab('Weights') + scale_fill_manual(values = "#56B4E9")
}else if(input$topic == "Topic 4"){
ggplot(data = weight_state(), aes(x = topic_4words, y = topic_4weight)) + geom_bar(stat = "identity", color = "steelblue", fill = "steelblue", width = 0.5) +
theme(panel.background = element_rect(fill = 'black', colour = "white"),
axis.text.x = element_text(size = 18, color = "white"),
axis.text.y = element_text(size = 18, color = "white"),
axis.title.y = element_text(size=22, color = "white"),
axis.title.x = element_text(size=22, color = "white"),
plot.background = element_rect(colour = "black",fill = "black")) +
xlab('Words') + ylab('Weights') + scale_fill_manual(values = "#56B4E9")
}else if(input$topic == "Topic 5"){
ggplot(data = weight_state(), aes(x = topic_5words, y = topic_5weight)) + geom_bar(stat = "identity", color = "steelblue", fill = "steelblue", width = 0.5) +
theme(panel.background = element_rect(fill = 'black', colour = "white"),
axis.text.x = element_text(size = 18, color = "white"),
axis.text.y = element_text(size = 18, color = "white"),
axis.title.y = element_text(size=22, color = "white"),
axis.title.x = element_text(size=22, color = "white"),
plot.background = element_rect(colour = "black",fill = "black")) +
xlab('Words') + ylab('Weights') + scale_fill_manual(values = "#56B4E9")}
},
height = 500, width = 700)
})
}
|
#SVM
#0.31
rm(list = ls())
setwd('E:\\Batch46 - R\\Cute03\\Again')
library(tidyverse)
library(caret)
library(DMwR)
test.id <- readRDS('test_id.rds')
bank <- read.csv('train_new.csv', na.strings = '')
test <- read.csv('test_new.csv', na.strings = '')
str(bank)
bank$transaction_id <- NULL
test$transaction_id <- NULL
#Converting Category 19 to 42, and target to category
factor_cols <- names(bank[,26:ncol(bank)])
df <- bank %>%
dplyr::select(factor_cols) %>%
lapply(factor) %>%
as.data.frame()
bankdata <- bank %>% dplyr::select(-factor_cols) %>% cbind(df)
#str(bankdata)
factor_cols_test <- names(test[,26:ncol(test)])
df_test <- test %>%
dplyr::select(factor_cols_test) %>%
lapply(factor) %>%
as.data.frame()
test <- test %>% dplyr::select(-factor_cols_test) %>% cbind(df_test)
#str(test)
#--------------------------------------------------------------#
#decreasing bank data rows
set.seed(125)
random.rows <- createDataPartition(bankdata$target, p = 0.05, list = F)
bank.new <- bankdata[random.rows,]
#--------------------------------------------------------------#
#Train-Val split
set.seed(125)
trainrows <- createDataPartition(bank.new$target, p = 0.7, list = F)
train <- bank.new[trainrows, ]
val <- bank.new[-trainrows,]
#Imputation
train <- centralImputation(train)
val <- centralImputation(val)
test <- centralImputation(test)
#NearZeroVar cols
zeroCols <- nearZeroVar(train)
train <- train[,-zeroCols]
val <- val[,-zeroCols]
test <- test[,-zeroCols]
x.val <- val %>% select(-target)
y.val <- val %>% select(target)
#---------------------------------------------------------------#
###create dummies for factor varibales
dummies <- dummyVars(target~., data = train)
x.train=predict(dummies, newdata = train)
y.train=train$target
x.val = predict(dummies, newdata = val)
y.val = val$target
test$target = 0
test.dummy <- predict(dummies, newdata = test)
test$target <- NULL
#---------------------------------------------------------------#
library(e1071)
model.svm <- svm(x = x.train, y = y.train, type = "C-classification",
kernel = "radial", cost = 10, gamma = 0.1)
summary(model.svm)
pred_train = predict(model.svm, x.train) # x is all the input variables
pred_val = predict(model.svm,x.val)
# Build Confusion matrix
confusionMatrix(y.train,pred_train,,positive="1")
confusionMatrix(y.val,pred_val,positive="1")
pred.test <- predict(model.svm, test.dummy)
final.svm <- cbind.data.frame('transaction_id'=test.id, 'target'=pred.test)
write.csv(final.svm, file = 'final_svm.csv', row.names = F)
|
/Predict_Fradulent_Transactions_Classification/Code/Smoat with 9_1/SVM.R
|
no_license
|
pranakum/DSProjects
|
R
| false | false | 2,555 |
r
|
#SVM
#0.31
rm(list = ls())
setwd('E:\\Batch46 - R\\Cute03\\Again')
library(tidyverse)
library(caret)
library(DMwR)
test.id <- readRDS('test_id.rds')
bank <- read.csv('train_new.csv', na.strings = '')
test <- read.csv('test_new.csv', na.strings = '')
str(bank)
bank$transaction_id <- NULL
test$transaction_id <- NULL
#Converting Category 19 to 42, and target to category
factor_cols <- names(bank[,26:ncol(bank)])
df <- bank %>%
dplyr::select(factor_cols) %>%
lapply(factor) %>%
as.data.frame()
bankdata <- bank %>% dplyr::select(-factor_cols) %>% cbind(df)
#str(bankdata)
factor_cols_test <- names(test[,26:ncol(test)])
df_test <- test %>%
dplyr::select(factor_cols_test) %>%
lapply(factor) %>%
as.data.frame()
test <- test %>% dplyr::select(-factor_cols_test) %>% cbind(df_test)
#str(test)
#--------------------------------------------------------------#
#decreasing bank data rows
set.seed(125)
random.rows <- createDataPartition(bankdata$target, p = 0.05, list = F)
bank.new <- bankdata[random.rows,]
#--------------------------------------------------------------#
#Train-Val split
set.seed(125)
trainrows <- createDataPartition(bank.new$target, p = 0.7, list = F)
train <- bank.new[trainrows, ]
val <- bank.new[-trainrows,]
#Imputation
train <- centralImputation(train)
val <- centralImputation(val)
test <- centralImputation(test)
#NearZeroVar cols
zeroCols <- nearZeroVar(train)
train <- train[,-zeroCols]
val <- val[,-zeroCols]
test <- test[,-zeroCols]
x.val <- val %>% select(-target)
y.val <- val %>% select(target)
#---------------------------------------------------------------#
###create dummies for factor varibales
dummies <- dummyVars(target~., data = train)
x.train=predict(dummies, newdata = train)
y.train=train$target
x.val = predict(dummies, newdata = val)
y.val = val$target
test$target = 0
test.dummy <- predict(dummies, newdata = test)
test$target <- NULL
#---------------------------------------------------------------#
library(e1071)
model.svm <- svm(x = x.train, y = y.train, type = "C-classification",
kernel = "radial", cost = 10, gamma = 0.1)
summary(model.svm)
pred_train = predict(model.svm, x.train) # x is all the input variables
pred_val = predict(model.svm,x.val)
# Build Confusion matrix
confusionMatrix(y.train,pred_train,,positive="1")
confusionMatrix(y.val,pred_val,positive="1")
pred.test <- predict(model.svm, test.dummy)
final.svm <- cbind.data.frame('transaction_id'=test.id, 'target'=pred.test)
write.csv(final.svm, file = 'final_svm.csv', row.names = F)
|
# plot3.R
# Of the four types of sources indicated by the type (point, nonpoint, onroad, nonroad)
# variable, which of these four sources have seen decreases in emissions from 1999-2008 for
# Baltimore City? Which have seen increases in emissions from 1999-2008? Use the ggplot2
# plotting system to make a plot answer this question.
# set working directory
setwd("D:/Data Science John Hopkins/Exploratory Data Analysis/Project2")
# Source and Load data sets for independent run of the code
# activity monitoring data
get.data.project <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(get.data.project, destfile="exdata_data_NEI_data.zip", method="auto")
# make sure the site is live, if it is not live stop function terminate the program
check.url <- file(get.data.project,"r")
if (!isOpen(check.url)) {
stop(paste("There's a problem with the data:",geterrmessage()))
}
# zipfile.data is the variable to keep the *.zip file
zipfile.data = "exdata_data_NEI_data.zip"
# make sure the data in the working directory if not download the zip file into the to zipfile.data and unzip the zipfile.data
if(!file.exists(zipfile.data)) {
unzip(zipfile="exdata_data_NEI_data.zip")
}
path_rf <- file.path("D:/Data Science John Hopkins/Exploratory Data Analysis/Project2" , "exdata_data_NEI_data")
files<-list.files(path_rf, recursive=TRUE)
files
# Read data files
# read national emissions data
NEI <- readRDS("summarySCC_PM25.rds")
#read source code classification data
SCC <- readRDS("Source_Classification_Code.rds")
# Store plot as .PNG file
png("plot3.png", width=number.add.width, height=number.add.height)
require(ggplot2)
require(dplyr)
# Group total NEI emissions per year:
baltcitymary.emissions.byyear<-summarise(group_by(filter(NEI, fips == "24510"), year,type), Emissions=sum(Emissions))
# clrs <- c("red", "green", "blue", "yellow")
ggplot(baltcitymary.emissions.byyear, aes(x=factor(year), y=Emissions, fill=type,label = round(Emissions,2))) +
geom_bar(stat="identity") +
#geom_bar(position = 'dodge')+
facet_grid(. ~ type) +
xlab("year") +
ylab(expression("total PM"[2.5]*" emission in tons")) +
ggtitle(expression("PM"[2.5]*paste(" emissions in Baltimore ",
"City by various source types", sep="")))+
geom_label(aes(fill = type), colour = "white", fontface = "bold")
dev.off()
|
/plot3.R
|
no_license
|
paragsengupta/Exploratory_Analysis_Week4_Project2
|
R
| false | false | 2,513 |
r
|
# plot3.R
# Of the four types of sources indicated by the type (point, nonpoint, onroad, nonroad)
# variable, which of these four sources have seen decreases in emissions from 1999-2008 for
# Baltimore City? Which have seen increases in emissions from 1999-2008? Use the ggplot2
# plotting system to make a plot answer this question.
# set working directory
setwd("D:/Data Science John Hopkins/Exploratory Data Analysis/Project2")
# Source and Load data sets for independent run of the code
# activity monitoring data
get.data.project <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(get.data.project, destfile="exdata_data_NEI_data.zip", method="auto")
# make sure the site is live, if it is not live stop function terminate the program
check.url <- file(get.data.project,"r")
if (!isOpen(check.url)) {
stop(paste("There's a problem with the data:",geterrmessage()))
}
# zipfile.data is the variable to keep the *.zip file
zipfile.data = "exdata_data_NEI_data.zip"
# make sure the data in the working directory if not download the zip file into the to zipfile.data and unzip the zipfile.data
if(!file.exists(zipfile.data)) {
unzip(zipfile="exdata_data_NEI_data.zip")
}
path_rf <- file.path("D:/Data Science John Hopkins/Exploratory Data Analysis/Project2" , "exdata_data_NEI_data")
files<-list.files(path_rf, recursive=TRUE)
files
# Read data files
# read national emissions data
NEI <- readRDS("summarySCC_PM25.rds")
#read source code classification data
SCC <- readRDS("Source_Classification_Code.rds")
# Store plot as .PNG file
png("plot3.png", width=number.add.width, height=number.add.height)
require(ggplot2)
require(dplyr)
# Group total NEI emissions per year:
baltcitymary.emissions.byyear<-summarise(group_by(filter(NEI, fips == "24510"), year,type), Emissions=sum(Emissions))
# clrs <- c("red", "green", "blue", "yellow")
ggplot(baltcitymary.emissions.byyear, aes(x=factor(year), y=Emissions, fill=type,label = round(Emissions,2))) +
geom_bar(stat="identity") +
#geom_bar(position = 'dodge')+
facet_grid(. ~ type) +
xlab("year") +
ylab(expression("total PM"[2.5]*" emission in tons")) +
ggtitle(expression("PM"[2.5]*paste(" emissions in Baltimore ",
"City by various source types", sep="")))+
geom_label(aes(fill = type), colour = "white", fontface = "bold")
dev.off()
|
context("slowrake")
data("dog_pubs")
test_that("slowrake works for non-atomic, non-empty char vectors", {
out <- slowrake(dog_pubs$abstract[1:10])
expect_equal(length(out), 10)
})
test_that("slowrake returns a score when there is only one word in txt", {
out <- slowrake("dog")
expect_true(is.numeric(out[[1]]$score))
})
test_that("slowrake works for txt without alpha chars", {
out <- slowrake("")
expect_true(is.na(unlist(out)))
})
test_that("slowrake works when all txt is removed based on POS tags", {
out <- slowrake("walking")
expect_true(is.na(unlist(out)))
})
test_that("slowrake removes stop words", {
out <- slowrake("dogs", stop_words = "dogs")
expect_true(is.na(unlist(out)))
})
test_that("pos stopping works as expected", {
out1 <- slowrake("dogs are awesome", stop_pos = "NNS")
out2 <- slowrake("dogs found food", stop_pos = NULL)
expect_true(
nrow(out2[[1]]) == 1 && nrow(out2[[1]]) == 1
)
})
test_that("word_min_char filtering works as expected", {
out <- slowrake("dogs", word_min_char = 5)
expect_true(is.na(unlist(out)))
})
test_that("phrase_delims works as expected", {
out <- slowrake(
"dogs are great, arn't they? at least i think they are.",
stop_words = NULL, stop_pos = NULL, phrase_delims = "\\?"
)
expect_true(grepl(",", out[[1]][1]))
})
|
/tests/testthat/test-slowrake.R
|
no_license
|
Rmadillo/slowraker
|
R
| false | false | 1,323 |
r
|
context("slowrake")
data("dog_pubs")
test_that("slowrake works for non-atomic, non-empty char vectors", {
out <- slowrake(dog_pubs$abstract[1:10])
expect_equal(length(out), 10)
})
test_that("slowrake returns a score when there is only one word in txt", {
out <- slowrake("dog")
expect_true(is.numeric(out[[1]]$score))
})
test_that("slowrake works for txt without alpha chars", {
out <- slowrake("")
expect_true(is.na(unlist(out)))
})
test_that("slowrake works when all txt is removed based on POS tags", {
out <- slowrake("walking")
expect_true(is.na(unlist(out)))
})
test_that("slowrake removes stop words", {
out <- slowrake("dogs", stop_words = "dogs")
expect_true(is.na(unlist(out)))
})
test_that("pos stopping works as expected", {
out1 <- slowrake("dogs are awesome", stop_pos = "NNS")
out2 <- slowrake("dogs found food", stop_pos = NULL)
expect_true(
nrow(out2[[1]]) == 1 && nrow(out2[[1]]) == 1
)
})
test_that("word_min_char filtering works as expected", {
out <- slowrake("dogs", word_min_char = 5)
expect_true(is.na(unlist(out)))
})
test_that("phrase_delims works as expected", {
out <- slowrake(
"dogs are great, arn't they? at least i think they are.",
stop_words = NULL, stop_pos = NULL, phrase_delims = "\\?"
)
expect_true(grepl(",", out[[1]][1]))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tictoc.R
\name{tic}
\alias{tic}
\alias{toc}
\alias{resettictoc}
\title{Stopwatch to measure R Timings}
\usage{
tic(id = 1, quiet = TRUE)
toc(id = 1, msg = "Elapsed time:", units = TRUE, signif = 3, quiet = FALSE)
resettictoc(which = "both")
}
\arguments{
\item{id}{Define ID if multiple tic() & toc() are being used}
\item{quiet}{Boolean. Quiet messages?}
\item{msg}{Character. Custom message shown}
\item{units}{Boolean. Do you want nice format for the time units?
If not, seconds elapsed as numerical values}
\item{signif}{Integer. Significant digits}
\item{which}{Character. Select: both, tic, toc}
}
\value{
\code{toc} returns an (invisible) list containing the timestamps
\code{tic} and \code{toc}, \code{time} in seconds and the message \code{msg}.
}
\description{
Start a stopwatch.
Stop a stopwatch.
Reset all tic and toc values in your environment.
}
\examples{
# Basic use (global stopwatch)
tic()
Sys.sleep(0.1)
toc()
# Multiple tic tocs
tic(id = "two", quiet = FALSE)
Sys.sleep(0.2)
toc(id = "two")
# Global is still working (id = 1)
toc(msg = "The function finished its work in")
}
\seealso{
Other Tools:
\code{\link{autoline}()},
\code{\link{bindfiles}()},
\code{\link{bring_api}()},
\code{\link{db_download}()},
\code{\link{db_upload}()},
\code{\link{export_plot}()},
\code{\link{export_results}()},
\code{\link{get_credentials}()},
\code{\link{h2o_predict_API}()},
\code{\link{h2o_predict_MOJO}()},
\code{\link{h2o_predict_binary}()},
\code{\link{h2o_predict_model}()},
\code{\link{h2o_selectmodel}()},
\code{\link{h2o_update}()},
\code{\link{haveInternet}()},
\code{\link{image_metadata}()},
\code{\link{importxlsx}()},
\code{\link{ip_country}()},
\code{\link{json2vector}()},
\code{\link{listfiles}()},
\code{\link{mailSend}()},
\code{\link{msplit}()},
\code{\link{myip}()},
\code{\link{pass}()},
\code{\link{quiet}()},
\code{\link{read.file}()},
\code{\link{statusbar}()},
\code{\link{try_require}()},
\code{\link{updateLares}()},
\code{\link{zerovar}()}
}
\concept{Time}
\concept{Tools}
|
/man/tic.Rd
|
no_license
|
vahidnouri/lares
|
R
| false | true | 2,098 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tictoc.R
\name{tic}
\alias{tic}
\alias{toc}
\alias{resettictoc}
\title{Stopwatch to measure R Timings}
\usage{
tic(id = 1, quiet = TRUE)
toc(id = 1, msg = "Elapsed time:", units = TRUE, signif = 3, quiet = FALSE)
resettictoc(which = "both")
}
\arguments{
\item{id}{Define ID if multiple tic() & toc() are being used}
\item{quiet}{Boolean. Quiet messages?}
\item{msg}{Character. Custom message shown}
\item{units}{Boolean. Do you want nice format for the time units?
If not, seconds elapsed as numerical values}
\item{signif}{Integer. Significant digits}
\item{which}{Character. Select: both, tic, toc}
}
\value{
\code{toc} returns an (invisible) list containing the timestamps
\code{tic} and \code{toc}, \code{time} in seconds and the message \code{msg}.
}
\description{
Start a stopwatch.
Stop a stopwatch.
Reset all tic and toc values in your environment.
}
\examples{
# Basic use (global stopwatch)
tic()
Sys.sleep(0.1)
toc()
# Multiple tic tocs
tic(id = "two", quiet = FALSE)
Sys.sleep(0.2)
toc(id = "two")
# Global is still working (id = 1)
toc(msg = "The function finished its work in")
}
\seealso{
Other Tools:
\code{\link{autoline}()},
\code{\link{bindfiles}()},
\code{\link{bring_api}()},
\code{\link{db_download}()},
\code{\link{db_upload}()},
\code{\link{export_plot}()},
\code{\link{export_results}()},
\code{\link{get_credentials}()},
\code{\link{h2o_predict_API}()},
\code{\link{h2o_predict_MOJO}()},
\code{\link{h2o_predict_binary}()},
\code{\link{h2o_predict_model}()},
\code{\link{h2o_selectmodel}()},
\code{\link{h2o_update}()},
\code{\link{haveInternet}()},
\code{\link{image_metadata}()},
\code{\link{importxlsx}()},
\code{\link{ip_country}()},
\code{\link{json2vector}()},
\code{\link{listfiles}()},
\code{\link{mailSend}()},
\code{\link{msplit}()},
\code{\link{myip}()},
\code{\link{pass}()},
\code{\link{quiet}()},
\code{\link{read.file}()},
\code{\link{statusbar}()},
\code{\link{try_require}()},
\code{\link{updateLares}()},
\code{\link{zerovar}()}
}
\concept{Time}
\concept{Tools}
|
# DreamTK App
#v0.8.3
# BER updates
# GenTox updates
# Preliminary loading -----------------------------------------------------
source("./_setup_.R");
#source("./__test_environment.R");
# Main App - R Shiny ------------------------------------------------------
# Support functions -------------------------------------------------------
source("./app/app_support.R");
# Web UI ------------------------------------------------------------------
source("./app/app_ui.R");
# Server function ---------------------------------------------------------
source("./app/app_server.R");
# Start app ---------------------------------------------------------------
app <- shinyApp(ui = ui, server = server);
|
/app.R
|
no_license
|
NongCT230/DREAMTK.0.8.3
|
R
| false | false | 703 |
r
|
# DreamTK App
#v0.8.3
# BER updates
# GenTox updates
# Preliminary loading -----------------------------------------------------
source("./_setup_.R");
#source("./__test_environment.R");
# Main App - R Shiny ------------------------------------------------------
# Support functions -------------------------------------------------------
source("./app/app_support.R");
# Web UI ------------------------------------------------------------------
source("./app/app_ui.R");
# Server function ---------------------------------------------------------
source("./app/app_server.R");
# Start app ---------------------------------------------------------------
app <- shinyApp(ui = ui, server = server);
|
library(tidyverse)
library(lme4)
library(lattice)
library(emmeans)
# if (!requireNamespace("BiocManager", quietly = TRUE))
# install.packages("BiocManager")
# # BiocManager::install("ComplexHeatmap")
library(ComplexHeatmap)
# BiocManager::install("optimx")
library(optimx)
# Set output_dir as ~working_dir/outputs/
output_dir = "output/"
# Make table with counts
# columns needed: MouseID, ROI_name, domain2, clustername2, count
counts_table = data.frame()
n = 1
for (r in unique(celldata$ROI_name3)){
m = unique(celldata[which(celldata$ROI_name3 == r), "MouseID"])
for (d in unique(celldata$domain2)){
for (cl in unique(celldata$clustername2)){
cd = celldata[which( celldata$ROI_name3 == r &
celldata$domain2 == d &
celldata$clustername2 == cl),]
counts_table[n, c("MouseID", "ROI_name", "domain2", "clustername2")] = c(m,r,d,cl)
counts_table[n, "count"] = nrow(cd)
n = n+1
}
}
}
counts_table[1:10, 1:5]
dim(counts_table)
sum(counts_table$count)
# clean up table (remove "n/a" assignments for domain and cell type "Unclassified"):
counts_table = counts_table[which(counts_table$domain2 != "n/a"),]
counts_table = counts_table[which(counts_table$clustername2 != "Unclassified"),]
dim(counts_table)
sum(counts_table$count)
filenm = paste(output_dir, "counts_table", ".csv", sep = "")
write.csv(counts_table, filenm)
# dat <- read_csv(filenm)[,-1]
dat <- counts_table
names(dat) <- c("mouse","roi", "domain", "celltype", "count")
dat$treatment <- ifelse(grepl("_MRTX", dat$roi), "MRTX","Vehicle")
################################################################
## fig 3c
################################################################
# Simplest approach - pool counts for rois within mice and mice within treatment.
# Does the way counts split differently between domains for a celltype depend on the treatment
# Exploratory just to get a feel for things. Don't use.
fit0 <- glm(count ~ (treatment+celltype+domain)^2 , data=dat, family="poisson")
fit <- glm(count ~ (treatment+celltype+domain)^2 + treatment:celltype:domain, data=dat, family="poisson")
anova(fit, fit0, test="Chisq")
# Fit mouse and domain-within-mouse as having a random celltype effect.
# Not used - sanity check for convergence
if (FALSE) {
fit0 <- glm(count ~ (treatment+celltype+domain)^2 +mouse:celltype+mouse:domain, data=dat, family="poisson")
fit <- glm(count ~ (treatment+celltype+domain)^2 + treatment:celltype:domain + mouse + mouse:celltype + mouse:domain, data=dat, family="poisson")
anova(fit, fit0, test="Chisq")
fit0 <- glm(count ~ (treatment+celltype+domain)^2 +mouse + mouse:roi + mouse:roi:domain + mouse:roi:celltype, data=dat, family="poisson")
fit <- glm(count ~ (treatment+celltype+domain)^2 + treatment:celltype:domain + mouse + mouse:roi + mouse:roi:domain + mouse:roi:celltype, data=dat, family="poisson")
anova(fit, fit0, test="Chisq")
}
## Turn counts into proportions and pretend normal
## Again, not used, just sometimes quicker and more rigorous p-values that in the poisson/multionomial case
if (FALSE) {
mouse_dat <- dat %>%
group_by(mouse) %>%
mutate(freq=count/sum(count))
fit0 <- lmer(count ~ (treatment+celltype+domain)^2 +(celltype+domain|mouse)+(celltype+domain|mouse:roi),
dat=mouse_dat)
fit <- lmer(count ~ (treatment+celltype+domain)^2 + treatment:celltype:domain + (celltype+domain|mouse)+(celltype+domain|mouse:roi),
dat=mouse_dat)
}
# More complicated - There's a treatment effect, around which
# mouse-within-treatment varies, and around that mouse's expected value
# the ROIs will vary
if (file.exists(fname <- "Stats_input/glmer_null_3c.rds")) {
fit0 <- readRDS(file=fname)
} else {
fit0 <- glmer(count ~ (treatment+celltype+domain)^2 +(celltype+domain|mouse)+(celltype+domain|mouse:roi),
data=dat,
family="poisson",
control = glmerControl(optimizer = "nloptwrap")
)
saveRDS(fit0, file=fname)
}
################################################################
#### The main approach for the whole dataset
#### METHODS:
#### We use the `lme4` [@lme4] package within R [@R] to fit a mixed-effects
#### model to account for a fixed effects of domain, celltype and treatment,
#### whilst allowing for a per-mouse and ROI-within-mouse variation of distribution
#### of cells between celltypes. We use a poisson model as a surrogate to fit the
#### multinomial distribution of cell counts across the celltypes. Individual
#### comparisons are carried out using a Wald test.
#### [lme4] Douglas Bates, Martin Maechler, Ben Bolker, Steve Walker (2015). Fitting Linear Mixed-Effects Models Using lme4. Journal of Statistical Software, 67(1), 1-48. doi:10.18637/jss.v067.i01.
#### [R] R Core Team (2020). R: A language and environment for statistical computing. R Foundation for Statistical Computing, Vienna, Austria. URL https://www.R-project.org/.
################################################################
## Supplementary Figure 4f
################################################################
# Takes a while to run, loading of .rds file is recommended.
if (file.exists(fname <- "Stats_input/glmer_full_3c.rds")) {
fit <- readRDS(file=fname)
} else{
fit <- glmer(count ~ (treatment+celltype+domain)^2 +treatment:celltype:domain+(celltype+domain|mouse)+(celltype+domain|mouse:roi),
data=dat,
family="poisson",
control = glmerControl(optimizer = "nloptwrap")
)
saveRDS(fit, file=fname)
}
em <- emmeans(fit, pairwise~treatment|domain+celltype)
df <- as.data.frame(em$contrasts)
pdf(file="output/treatment_per_type_by_domain.pdf", width=9, height=6)
ggplot(df, aes(x=celltype, y=estimate, fill=p.value<0.05)) +
geom_col() +
coord_flip() +
facet_wrap(~domain) + labs(fill="P<0.05",y="Log(MRTX/Vehicle)" , x="") +
theme_bw(base_size=18)
dev.off()
################################################################
## Supplementary Figure 6a
################################################################
# p-values for Supplementary Figure 6a, top row plots, stats for T cells in whole tissue
em <- emmeans(fit, pairwise~treatment|celltype)
df <- as.data.frame(em$contrasts)
df[which(df$celltype == "CD4 T cells"),]
df[which(df$celltype == "Regulatory T cells"),]
df[which(df$celltype == "CD8 T cells"),]
# p-values for Supplementary Figure 6a, bottom row plots, stats for T cells in the tumour domain
em <- emmeans(fit, pairwise~treatment|domain+celltype)
df <- as.data.frame(em$contrasts)
df[which(df$domain == "Tumour" & df$celltype == "CD4 T cells"),]
df[which(df$domain == "Tumour" & df$celltype == "Regulatory T cells"),]
df[which(df$domain == "Tumour" & df$celltype == "CD8 T cells"),]
################################################################
## Stats related to Figure 3d
################################################################
f3d <- subset(dat, domain=="Tumour")
## The 'correct' way is to say that mice vary around their expected treatment group:
if (file.exists(fname <- "Stats_input/glmer_null_3d.rds")) {
fit0 <- readRDS(file=fname)
} else {
fit0 <- glmer(count ~ treatment+celltype + (celltype|mouse) + (celltype|mouse:roi),
data=f3d,
family="poisson",
control = glmerControl(optimizer = "nloptwrap")
# control = glmerControl(optimizer ='optimx', optCtrl=list(method='nlminb'))
)
saveRDS(fit0, file=fname)
}
if (file.exists(fname <- "Stats_input/glmer_full_3d.rds")) {
fit <- readRDS(file=fname)
} else {
fit <- glmer(count ~ treatment+celltype +treatment:celltype+(celltype|mouse)+(celltype|mouse:roi),
data=f3d,
family="poisson",
control = glmerControl(optimizer ='optimx', optCtrl=list(method='nlminb'))
)
saveRDS(fit, file=fname)
}
em <- emmeans(fit, pairwise~treatment|celltype)
df <- as.data.frame(em$contrasts)
ggplot(df, aes(x=celltype, y=estimate, fill=p.value<0.05)) +
geom_col() +
coord_flip()
##
|
/lme_counts.R
|
permissive
|
SongXiaoYi/vanMaldegem_Valand_2021
|
R
| false | false | 8,141 |
r
|
library(tidyverse)
library(lme4)
library(lattice)
library(emmeans)
# if (!requireNamespace("BiocManager", quietly = TRUE))
# install.packages("BiocManager")
# # BiocManager::install("ComplexHeatmap")
library(ComplexHeatmap)
# BiocManager::install("optimx")
library(optimx)
# Set output_dir as ~working_dir/outputs/
output_dir = "output/"
# Make table with counts
# columns needed: MouseID, ROI_name, domain2, clustername2, count
counts_table = data.frame()
n = 1
for (r in unique(celldata$ROI_name3)){
m = unique(celldata[which(celldata$ROI_name3 == r), "MouseID"])
for (d in unique(celldata$domain2)){
for (cl in unique(celldata$clustername2)){
cd = celldata[which( celldata$ROI_name3 == r &
celldata$domain2 == d &
celldata$clustername2 == cl),]
counts_table[n, c("MouseID", "ROI_name", "domain2", "clustername2")] = c(m,r,d,cl)
counts_table[n, "count"] = nrow(cd)
n = n+1
}
}
}
counts_table[1:10, 1:5]
dim(counts_table)
sum(counts_table$count)
# clean up table (remove "n/a" assignments for domain and cell type "Unclassified"):
counts_table = counts_table[which(counts_table$domain2 != "n/a"),]
counts_table = counts_table[which(counts_table$clustername2 != "Unclassified"),]
dim(counts_table)
sum(counts_table$count)
filenm = paste(output_dir, "counts_table", ".csv", sep = "")
write.csv(counts_table, filenm)
# dat <- read_csv(filenm)[,-1]
dat <- counts_table
names(dat) <- c("mouse","roi", "domain", "celltype", "count")
dat$treatment <- ifelse(grepl("_MRTX", dat$roi), "MRTX","Vehicle")
################################################################
## fig 3c
################################################################
# Simplest approach - pool counts for rois within mice and mice within treatment.
# Does the way counts split differently between domains for a celltype depend on the treatment
# Exploratory just to get a feel for things. Don't use.
fit0 <- glm(count ~ (treatment+celltype+domain)^2 , data=dat, family="poisson")
fit <- glm(count ~ (treatment+celltype+domain)^2 + treatment:celltype:domain, data=dat, family="poisson")
anova(fit, fit0, test="Chisq")
# Fit mouse and domain-within-mouse as having a random celltype effect.
# Not used - sanity check for convergence
if (FALSE) {
fit0 <- glm(count ~ (treatment+celltype+domain)^2 +mouse:celltype+mouse:domain, data=dat, family="poisson")
fit <- glm(count ~ (treatment+celltype+domain)^2 + treatment:celltype:domain + mouse + mouse:celltype + mouse:domain, data=dat, family="poisson")
anova(fit, fit0, test="Chisq")
fit0 <- glm(count ~ (treatment+celltype+domain)^2 +mouse + mouse:roi + mouse:roi:domain + mouse:roi:celltype, data=dat, family="poisson")
fit <- glm(count ~ (treatment+celltype+domain)^2 + treatment:celltype:domain + mouse + mouse:roi + mouse:roi:domain + mouse:roi:celltype, data=dat, family="poisson")
anova(fit, fit0, test="Chisq")
}
## Turn counts into proportions and pretend normal
## Again, not used, just sometimes quicker and more rigorous p-values that in the poisson/multionomial case
if (FALSE) {
mouse_dat <- dat %>%
group_by(mouse) %>%
mutate(freq=count/sum(count))
fit0 <- lmer(count ~ (treatment+celltype+domain)^2 +(celltype+domain|mouse)+(celltype+domain|mouse:roi),
dat=mouse_dat)
fit <- lmer(count ~ (treatment+celltype+domain)^2 + treatment:celltype:domain + (celltype+domain|mouse)+(celltype+domain|mouse:roi),
dat=mouse_dat)
}
# More complicated - There's a treatment effect, around which
# mouse-within-treatment varies, and around that mouse's expected value
# the ROIs will vary
if (file.exists(fname <- "Stats_input/glmer_null_3c.rds")) {
fit0 <- readRDS(file=fname)
} else {
fit0 <- glmer(count ~ (treatment+celltype+domain)^2 +(celltype+domain|mouse)+(celltype+domain|mouse:roi),
data=dat,
family="poisson",
control = glmerControl(optimizer = "nloptwrap")
)
saveRDS(fit0, file=fname)
}
################################################################
#### The main approach for the whole dataset
#### METHODS:
#### We use the `lme4` [@lme4] package within R [@R] to fit a mixed-effects
#### model to account for a fixed effects of domain, celltype and treatment,
#### whilst allowing for a per-mouse and ROI-within-mouse variation of distribution
#### of cells between celltypes. We use a poisson model as a surrogate to fit the
#### multinomial distribution of cell counts across the celltypes. Individual
#### comparisons are carried out using a Wald test.
#### [lme4] Douglas Bates, Martin Maechler, Ben Bolker, Steve Walker (2015). Fitting Linear Mixed-Effects Models Using lme4. Journal of Statistical Software, 67(1), 1-48. doi:10.18637/jss.v067.i01.
#### [R] R Core Team (2020). R: A language and environment for statistical computing. R Foundation for Statistical Computing, Vienna, Austria. URL https://www.R-project.org/.
################################################################
## Supplementary Figure 4f
################################################################
# Takes a while to run, loading of .rds file is recommended.
if (file.exists(fname <- "Stats_input/glmer_full_3c.rds")) {
fit <- readRDS(file=fname)
} else{
fit <- glmer(count ~ (treatment+celltype+domain)^2 +treatment:celltype:domain+(celltype+domain|mouse)+(celltype+domain|mouse:roi),
data=dat,
family="poisson",
control = glmerControl(optimizer = "nloptwrap")
)
saveRDS(fit, file=fname)
}
em <- emmeans(fit, pairwise~treatment|domain+celltype)
df <- as.data.frame(em$contrasts)
pdf(file="output/treatment_per_type_by_domain.pdf", width=9, height=6)
ggplot(df, aes(x=celltype, y=estimate, fill=p.value<0.05)) +
geom_col() +
coord_flip() +
facet_wrap(~domain) + labs(fill="P<0.05",y="Log(MRTX/Vehicle)" , x="") +
theme_bw(base_size=18)
dev.off()
################################################################
## Supplementary Figure 6a
################################################################
# p-values for Supplementary Figure 6a, top row plots, stats for T cells in whole tissue
em <- emmeans(fit, pairwise~treatment|celltype)
df <- as.data.frame(em$contrasts)
df[which(df$celltype == "CD4 T cells"),]
df[which(df$celltype == "Regulatory T cells"),]
df[which(df$celltype == "CD8 T cells"),]
# p-values for Supplementary Figure 6a, bottom row plots, stats for T cells in the tumour domain
em <- emmeans(fit, pairwise~treatment|domain+celltype)
df <- as.data.frame(em$contrasts)
df[which(df$domain == "Tumour" & df$celltype == "CD4 T cells"),]
df[which(df$domain == "Tumour" & df$celltype == "Regulatory T cells"),]
df[which(df$domain == "Tumour" & df$celltype == "CD8 T cells"),]
################################################################
## Stats related to Figure 3d
################################################################
f3d <- subset(dat, domain=="Tumour")
## The 'correct' way is to say that mice vary around their expected treatment group:
if (file.exists(fname <- "Stats_input/glmer_null_3d.rds")) {
fit0 <- readRDS(file=fname)
} else {
fit0 <- glmer(count ~ treatment+celltype + (celltype|mouse) + (celltype|mouse:roi),
data=f3d,
family="poisson",
control = glmerControl(optimizer = "nloptwrap")
# control = glmerControl(optimizer ='optimx', optCtrl=list(method='nlminb'))
)
saveRDS(fit0, file=fname)
}
if (file.exists(fname <- "Stats_input/glmer_full_3d.rds")) {
fit <- readRDS(file=fname)
} else {
fit <- glmer(count ~ treatment+celltype +treatment:celltype+(celltype|mouse)+(celltype|mouse:roi),
data=f3d,
family="poisson",
control = glmerControl(optimizer ='optimx', optCtrl=list(method='nlminb'))
)
saveRDS(fit, file=fname)
}
em <- emmeans(fit, pairwise~treatment|celltype)
df <- as.data.frame(em$contrasts)
ggplot(df, aes(x=celltype, y=estimate, fill=p.value<0.05)) +
geom_col() +
coord_flip()
##
|
# barplot of nations that have won the world cup
library(dplyr)
library(htmltab)
url2<-"https://en.wikipedia.org/wiki/List_of_FIFA_World_Cup_winners"
champs<-htmltab(url2, which=1, rm_nodata_cols=F)
champs
barplot(as.numeric(champs$Titles), main = "FIFA Winners",xlab = "Country", ylab = "No. of Wins", names = champs$Team, col = "darkred", las=1)
|
/DataScraping/fifa.r
|
no_license
|
rebeccasoren/R
|
R
| false | false | 352 |
r
|
# barplot of nations that have won the world cup
library(dplyr)
library(htmltab)
url2<-"https://en.wikipedia.org/wiki/List_of_FIFA_World_Cup_winners"
champs<-htmltab(url2, which=1, rm_nodata_cols=F)
champs
barplot(as.numeric(champs$Titles), main = "FIFA Winners",xlab = "Country", ylab = "No. of Wins", names = champs$Team, col = "darkred", las=1)
|
#' Deprecated functions
#'
#' These functions have been renamed and deprecated in \pkg{afex}:
#' \code{aov.car()} (use \code{\link{aov_car}()}),
#' \code{ez.glm()} (use \code{\link{aov_ez}()}),
#' \code{aov4()} (use \code{\link{aov_4}()}).
#' @rdname deprecated
#' @keywords internal
#' @aliases afex-deprecated
#' @param ... arguments passed from the old functions of the style
#' \code{foo.bar()} to the new functions \code{foo_bar()}
#' @export
aov.car <- function(...) {
.Deprecated("aov_car", "afex", "aov.car was renamed to aov_car and is now deprecated.")
aov_car(...)
}
#' @rdname deprecated
#' @export
ez.glm <- function(...) {
.Deprecated("aov_ez", "afex", "ez.glm was renamed to aov_ez and is now deprecated.")
aov_ez(...)
}
#' @rdname deprecated
#' @export
aov4 <- function(...) {
.Deprecated("aov_4", "afex", "aov4 was renamed to aov_4 and is now deprecated.")
aov_4(...)
}
warn_deprecated_arg <- function(name, instead) {
warning(gettextf("'%s' is deprecated; use '%s' instead", name, instead),
call.=FALSE, domain=NA)
}
|
/R/deprecated.R
|
no_license
|
jonathon-love/afex
|
R
| false | false | 1,072 |
r
|
#' Deprecated functions
#'
#' These functions have been renamed and deprecated in \pkg{afex}:
#' \code{aov.car()} (use \code{\link{aov_car}()}),
#' \code{ez.glm()} (use \code{\link{aov_ez}()}),
#' \code{aov4()} (use \code{\link{aov_4}()}).
#' @rdname deprecated
#' @keywords internal
#' @aliases afex-deprecated
#' @param ... arguments passed from the old functions of the style
#' \code{foo.bar()} to the new functions \code{foo_bar()}
#' @export
aov.car <- function(...) {
.Deprecated("aov_car", "afex", "aov.car was renamed to aov_car and is now deprecated.")
aov_car(...)
}
#' @rdname deprecated
#' @export
ez.glm <- function(...) {
.Deprecated("aov_ez", "afex", "ez.glm was renamed to aov_ez and is now deprecated.")
aov_ez(...)
}
#' @rdname deprecated
#' @export
aov4 <- function(...) {
.Deprecated("aov_4", "afex", "aov4 was renamed to aov_4 and is now deprecated.")
aov_4(...)
}
warn_deprecated_arg <- function(name, instead) {
warning(gettextf("'%s' is deprecated; use '%s' instead", name, instead),
call.=FALSE, domain=NA)
}
|
context("mungepiece reference class")
test_that("it correctly initializes without prediction arguments", {
mb <- mungebit(function(x) x)
expect_equal(as.character(class(mungepiece(mb, list()))), "mungepiece")
})
test_that("it correctly initializes with prediction arguments", {
mb <- mungebit(function(x) x)
expect_equal(as.character(class(mungepiece(mb, list(), list()))), "mungepiece")
})
test_that("run methods correctly executes on a trivial case", {
mb <- mungebit(column_transformation(function(x) 2 * x))
mp <- mungepiece(mb, 1)
plane <- mungeplane(iris)
lapply(seq_len(2), function(x) mp$run(plane))
expect_equal(plane$data[[1]], iris[[1]] * 4)
})
test_that("it does nothing if a mungepiece is already given", {
mp <- mungepiece(mungebit(function(x) x))
expect_equal(parse_mungepiece(mp), mp)
})
|
/tests/testthat/test-mungepiece.r
|
permissive
|
robertzk/mungebits
|
R
| false | false | 831 |
r
|
context("mungepiece reference class")
test_that("it correctly initializes without prediction arguments", {
mb <- mungebit(function(x) x)
expect_equal(as.character(class(mungepiece(mb, list()))), "mungepiece")
})
test_that("it correctly initializes with prediction arguments", {
mb <- mungebit(function(x) x)
expect_equal(as.character(class(mungepiece(mb, list(), list()))), "mungepiece")
})
test_that("run methods correctly executes on a trivial case", {
mb <- mungebit(column_transformation(function(x) 2 * x))
mp <- mungepiece(mb, 1)
plane <- mungeplane(iris)
lapply(seq_len(2), function(x) mp$run(plane))
expect_equal(plane$data[[1]], iris[[1]] * 4)
})
test_that("it does nothing if a mungepiece is already given", {
mp <- mungepiece(mungebit(function(x) x))
expect_equal(parse_mungepiece(mp), mp)
})
|
#shortest path
ShortestPath <- function(traffic,N,target_x,target_y){
shortest_path = matrix(N*N*10,N,N)
shortest_path[target_x,target_y]=0
identical_count = 100
while(identical_count>0){
shortest_path_temp = shortest_path
for(x in 1:N){
for(y in 1:N){
if(x==target_x & y==target_y){
shortest_path[x,y] = 0
}
else{
pos_paths = c()
if(x!=1){
pos_paths = append(pos_paths,traffic[x-1,y] + shortest_path_temp[x-1,y])
}
if(x!=N){
pos_paths = append(pos_paths,traffic[x+1,y] + shortest_path_temp[x+1,y])
}
if(y!=1){
pos_paths = append(pos_paths,traffic[x,y-1] + shortest_path_temp[x,y-1])
}
if(y!=N){
pos_paths = append(pos_paths,traffic[x,y+1] + shortest_path_temp[x,y+1])
}
shortest_path[x,y] = min(pos_paths)
}
}
}
#check if shortest path has not changed for last 100 iterations
if(identical(shortest_path,shortest_path_temp)){
identical_count = identical_count - 1
}
else identical_count = 100
}
return(shortest_path)
}
#potential actions
PotentialMoves <- function(x,y){
potential_moves = c()
#check boundaries of the map to determine potential moves
if(x!=1){
potential_moves = append(potential_moves,1)
}
if(x!=N){
potential_moves = append(potential_moves,2)
}
if(y!=1){
potential_moves = append(potential_moves,3)
}
if(y!=N){
potential_moves = append(potential_moves,4)
}
return(potential_moves)
}
#update position
UpdateLoc <- function(x,y,move){
#update position based on the given move
#1: up
#2: down
#3: left
#4: right
if(move==1){
x = x-1
}
else if(move==2){
x = x+1
}
else if(move==3){
y = y-1
}
else if(move==4){
y=y+1
}
return(c(x,y))
}
#maximum potential Q value
MaxPotentialQ <- function(Q,potential_moves,x,y){
#select maximizing Q value action
max_q = max(Q[x,y,potential_moves])
max_q_loc = Reduce(intersect,list(which(Q[x,y,] == max_q),potential_moves))[1]
Q_val_action = c(max_q_loc,max_q)
return(Q_val_action)
}
#if only right turn is allowed, update potential moves
UpdatePotentialMoves <-function(potential_moves,move){
#if previous move was going up, then going left is forbidden (if 1, forbid 3)
#if previous move was going down, then going right is forbidden (if 2, forbid 4)
#if previous move was going left, then going down is forbidden (if 3, forbid 2)
#if previous move was going right, then going up is forbidden (if 4, forbid 1)
if(move==1){
return(potential_moves[potential_moves!=3])
} else if(move==2){
return(potential_moves[potential_moves!=4])
} else if(move==3){
return(potential_moves[potential_moves!=2])
} else if(move==4){
return(potential_moves[potential_moves!=1])
} else return(potential_moves)
}
#Q-learning
QLearning <- function(Q,epsilon,demand,shortest_path,only_right){
length_ratio = c()
for(i in 1:demand_size){
#epsilon decay
eps = epsilon*((1/i)^(0.15))
x=demand[i,1]
y=demand[i,2]
old_move = 0
shortest_way = shortest_path[x,y]
total_traffic = 0
#loop until destination
while(x!=target_x | y!=target_y){
Q_temp = Q
#identify potential moves
potential_moves = PotentialMoves(x,y)
#update if only right
if(only_right){
potential_moves = UpdatePotentialMoves(potential_moves,old_move)
}
#explore
if(eps > runif(1)){
#pick random action
new_move = sample(potential_moves,1)
}
#exploit
else{
Q_val_action = MaxPotentialQ(Q_temp,potential_moves,x,y)
new_move = Q_val_action[1]
}
new_loc = UpdateLoc(x,y,new_move)
new_x = new_loc[1]
new_y = new_loc[2]
potential_moves_next = PotentialMoves(new_x,new_y)
if(only_right){
potential_moves_next = UpdatePotentialMoves(potential_moves_next,new_move)
}
Q_val_action_next = MaxPotentialQ(Q_temp,potential_moves_next,new_x,new_y)
#learnin rate decay
learning_rate = (1/i)^(0.15)
Q[x,y,new_move] = (1-learning_rate)*Q_temp[x,y,new_move] + learning_rate*(-traffic[new_x,new_y]+Q_val_action_next[2])
total_traffic = total_traffic + traffic[new_x,new_y]
x = new_x
y = new_y
old_move = new_move
}
length_ratio = append(length_ratio,total_traffic/shortest_way)
}
return(length_ratio)
}
GenerateOptimisticQ <- function(q_opt){
for(i in 1:N){
for(j in 1:N){
if(i>target_x){
Q[i,j,1]=q_opt
}
if(i<target_x){
Q[i,j,2]=q_opt
}
if(j>target_y){
Q[i,j,3]=q_opt
}
if(j<target_y){
Q[i,j,4]=q_opt
}
if(i==target_x & j==target_y){
Q[i,j,]=q_opt
}
}
}
return(Q)
}
PickBestMoveHeuristic<-function(x,y,potential_moves,revealed_traffic,prev_move){
#find actions that takes car to closer
if(x>=target_x & y>=target_y){
smart_moves = Reduce(intersect,list(c(1,3)),potential_moves)
} else if(x>=target_x & y<=target_y){
smart_moves = Reduce(intersect,list(c(1,4)),potential_moves)
} else if(x<=target_x & y>=target_y){
smart_moves = Reduce(intersect,list(c(2,3)),potential_moves)
} else if(x<=target_x & y<=target_y){
smart_moves = Reduce(intersect,list(c(2,4)),potential_moves)
}
#remove going back actions
if(prev_move==1){
smart_moves = smart_moves[smart_moves!=2]
} else if(prev_move==2){
smart_moves = smart_moves[smart_moves!=1]
} else if(prev_move==3){
smart_moves = smart_moves[smart_moves!=4]
} else if(prev_move==4){
smart_moves = smart_moves[smart_moves!=3]
}
#find traffics
trafs = c()
if(is.element(1,smart_moves)){
trafs = append(trafs,revealed_traffic[x-1,y])
}
if(is.element(2,smart_moves)){
trafs = append(trafs,revealed_traffic[x+1,y])
}
if(is.element(3,smart_moves)){
trafs = append(trafs,revealed_traffic[x,y-1])
}
if(is.element(4,smart_moves)){
trafs = append(trafs,revealed_traffic[x,y+1])
}
#return smallest
return(smart_moves[which.min(trafs)])
}
Heuristic <- function(demand, shortest_path){
length_ratio = c()
revealed_traffic = array(0,dim=c(N,N))
for(i in 1:demand_size){
x=demand[i,1]
y=demand[i,2]
prev_move = 0
shortest_way = shortest_path[x,y]
total_traffic = 0
while(x!=target_x | y!=target_y){
potential_moves = PotentialMoves(x,y)
new_move = PickBestMoveHeuristic(x,y,potential_moves,revealed_traffic,prev_move)
new_loc = UpdateLoc(x,y,new_move)
new_x = new_loc[1]
new_y = new_loc[2]
total_traffic = total_traffic + traffic[new_x,new_y]
x = new_x
y = new_y
prev_move = new_move
revealed_traffic[x,y] = traffic[x,y]
}
length_ratio = append(length_ratio,total_traffic/shortest_way)
}
return(length_ratio)
}
PlotLengths<- function(length_eps_greedy,length_opt_greedy,length_heuristic,length_opt_greedy_right,r){
roll_mean_eps = c()
roll_mean_opt = c()
roll_mean_heur =c()
roll_mean_opt_right = c()
for(i in r:length(length_eps_greedy)){
roll_mean_eps[i-r] = mean(length_eps_greedy[i-r:i],na.rm=TRUE)
roll_mean_opt[i-r] = mean(length_opt_greedy[i-r:i],na.rm=TRUE)
roll_mean_heur[i-r] = mean(length_heuristic[i-r:i],na.rm=TRUE)
roll_mean_opt_right[i-r] = mean(length_opt_greedy_right[i-r:i],na.rm=TRUE)
}
x = 1:length(roll_mean_eps)
plot(x,log(roll_mean_eps),type = 'l',col='red',ylab='log length',xlab='demand')
legend('topright',legend=c('eps-greedy'),col=c('red'),lty=1)
x = 1:length(roll_mean_eps)
plot(x[5000:14990],roll_mean_eps[5000:14990],type = 'l',col='red',ylab='length',xlab='demand')
legend('topright',legend=c('eps-greedy'),col=c('red'),lty=1)
plot(x,(roll_mean_opt),type = 'l',col='green',ylab='length',xlab='demand')
lines(x,(roll_mean_heur),col='red')
lines(x,(roll_mean_opt_right),col='blue')
legend('topright',legend=c('opt-greedy','heuristic','Right-only'),col=c('green','red','blue'),lty=1:1)
}
#initialize setup
N=100
demand_size = 15000
traffic= matrix(sample(1:10,N*N,replace=TRUE),N,N)
demand = matrix(sample(1:N,demand_size*2,replace=TRUE),demand_size,2)
target_x = 1
target_y = 20
shortest_path = ShortestPath(traffic,N,target_x,target_y)
#e-greedy with 0 initials
Q = array(0,dim=c(N,N,4))
length_eps_greedy = QLearning(Q,epsilon=0.2,demand,shortest_path,FALSE)
#greedy with smart optimistic values
Q = GenerateOptimisticQ(500)
length_opt_greedy = QLearning(Q,epsilon=0,demand,shortest_path,FALSE)
#heuristics
length_heuristic = Heuristic(demand,shortest_path)
#only-right turn with optimistic q values
Q = GenerateOptimisticQ(500)
length_opt_greedy_right = QLearning(Q,epsilon=0,demand,shortest_path,TRUE)
#plot
PlotLengths(length_eps_greedy,length_opt_greedy,length_heuristic,length_opt_greedy_right,r=10)
|
/q_learning.R
|
no_license
|
aytek-mutlu/q_learning
|
R
| false | false | 9,128 |
r
|
#shortest path
ShortestPath <- function(traffic,N,target_x,target_y){
shortest_path = matrix(N*N*10,N,N)
shortest_path[target_x,target_y]=0
identical_count = 100
while(identical_count>0){
shortest_path_temp = shortest_path
for(x in 1:N){
for(y in 1:N){
if(x==target_x & y==target_y){
shortest_path[x,y] = 0
}
else{
pos_paths = c()
if(x!=1){
pos_paths = append(pos_paths,traffic[x-1,y] + shortest_path_temp[x-1,y])
}
if(x!=N){
pos_paths = append(pos_paths,traffic[x+1,y] + shortest_path_temp[x+1,y])
}
if(y!=1){
pos_paths = append(pos_paths,traffic[x,y-1] + shortest_path_temp[x,y-1])
}
if(y!=N){
pos_paths = append(pos_paths,traffic[x,y+1] + shortest_path_temp[x,y+1])
}
shortest_path[x,y] = min(pos_paths)
}
}
}
#check if shortest path has not changed for last 100 iterations
if(identical(shortest_path,shortest_path_temp)){
identical_count = identical_count - 1
}
else identical_count = 100
}
return(shortest_path)
}
#potential actions
PotentialMoves <- function(x,y){
potential_moves = c()
#check boundaries of the map to determine potential moves
if(x!=1){
potential_moves = append(potential_moves,1)
}
if(x!=N){
potential_moves = append(potential_moves,2)
}
if(y!=1){
potential_moves = append(potential_moves,3)
}
if(y!=N){
potential_moves = append(potential_moves,4)
}
return(potential_moves)
}
#update position
UpdateLoc <- function(x,y,move){
#update position based on the given move
#1: up
#2: down
#3: left
#4: right
if(move==1){
x = x-1
}
else if(move==2){
x = x+1
}
else if(move==3){
y = y-1
}
else if(move==4){
y=y+1
}
return(c(x,y))
}
#maximum potential Q value
MaxPotentialQ <- function(Q,potential_moves,x,y){
#select maximizing Q value action
max_q = max(Q[x,y,potential_moves])
max_q_loc = Reduce(intersect,list(which(Q[x,y,] == max_q),potential_moves))[1]
Q_val_action = c(max_q_loc,max_q)
return(Q_val_action)
}
#if only right turn is allowed, update potential moves
UpdatePotentialMoves <-function(potential_moves,move){
#if previous move was going up, then going left is forbidden (if 1, forbid 3)
#if previous move was going down, then going right is forbidden (if 2, forbid 4)
#if previous move was going left, then going down is forbidden (if 3, forbid 2)
#if previous move was going right, then going up is forbidden (if 4, forbid 1)
if(move==1){
return(potential_moves[potential_moves!=3])
} else if(move==2){
return(potential_moves[potential_moves!=4])
} else if(move==3){
return(potential_moves[potential_moves!=2])
} else if(move==4){
return(potential_moves[potential_moves!=1])
} else return(potential_moves)
}
#Q-learning
QLearning <- function(Q,epsilon,demand,shortest_path,only_right){
length_ratio = c()
for(i in 1:demand_size){
#epsilon decay
eps = epsilon*((1/i)^(0.15))
x=demand[i,1]
y=demand[i,2]
old_move = 0
shortest_way = shortest_path[x,y]
total_traffic = 0
#loop until destination
while(x!=target_x | y!=target_y){
Q_temp = Q
#identify potential moves
potential_moves = PotentialMoves(x,y)
#update if only right
if(only_right){
potential_moves = UpdatePotentialMoves(potential_moves,old_move)
}
#explore
if(eps > runif(1)){
#pick random action
new_move = sample(potential_moves,1)
}
#exploit
else{
Q_val_action = MaxPotentialQ(Q_temp,potential_moves,x,y)
new_move = Q_val_action[1]
}
new_loc = UpdateLoc(x,y,new_move)
new_x = new_loc[1]
new_y = new_loc[2]
potential_moves_next = PotentialMoves(new_x,new_y)
if(only_right){
potential_moves_next = UpdatePotentialMoves(potential_moves_next,new_move)
}
Q_val_action_next = MaxPotentialQ(Q_temp,potential_moves_next,new_x,new_y)
#learnin rate decay
learning_rate = (1/i)^(0.15)
Q[x,y,new_move] = (1-learning_rate)*Q_temp[x,y,new_move] + learning_rate*(-traffic[new_x,new_y]+Q_val_action_next[2])
total_traffic = total_traffic + traffic[new_x,new_y]
x = new_x
y = new_y
old_move = new_move
}
length_ratio = append(length_ratio,total_traffic/shortest_way)
}
return(length_ratio)
}
GenerateOptimisticQ <- function(q_opt){
for(i in 1:N){
for(j in 1:N){
if(i>target_x){
Q[i,j,1]=q_opt
}
if(i<target_x){
Q[i,j,2]=q_opt
}
if(j>target_y){
Q[i,j,3]=q_opt
}
if(j<target_y){
Q[i,j,4]=q_opt
}
if(i==target_x & j==target_y){
Q[i,j,]=q_opt
}
}
}
return(Q)
}
PickBestMoveHeuristic<-function(x,y,potential_moves,revealed_traffic,prev_move){
#find actions that takes car to closer
if(x>=target_x & y>=target_y){
smart_moves = Reduce(intersect,list(c(1,3)),potential_moves)
} else if(x>=target_x & y<=target_y){
smart_moves = Reduce(intersect,list(c(1,4)),potential_moves)
} else if(x<=target_x & y>=target_y){
smart_moves = Reduce(intersect,list(c(2,3)),potential_moves)
} else if(x<=target_x & y<=target_y){
smart_moves = Reduce(intersect,list(c(2,4)),potential_moves)
}
#remove going back actions
if(prev_move==1){
smart_moves = smart_moves[smart_moves!=2]
} else if(prev_move==2){
smart_moves = smart_moves[smart_moves!=1]
} else if(prev_move==3){
smart_moves = smart_moves[smart_moves!=4]
} else if(prev_move==4){
smart_moves = smart_moves[smart_moves!=3]
}
#find traffics
trafs = c()
if(is.element(1,smart_moves)){
trafs = append(trafs,revealed_traffic[x-1,y])
}
if(is.element(2,smart_moves)){
trafs = append(trafs,revealed_traffic[x+1,y])
}
if(is.element(3,smart_moves)){
trafs = append(trafs,revealed_traffic[x,y-1])
}
if(is.element(4,smart_moves)){
trafs = append(trafs,revealed_traffic[x,y+1])
}
#return smallest
return(smart_moves[which.min(trafs)])
}
Heuristic <- function(demand, shortest_path){
length_ratio = c()
revealed_traffic = array(0,dim=c(N,N))
for(i in 1:demand_size){
x=demand[i,1]
y=demand[i,2]
prev_move = 0
shortest_way = shortest_path[x,y]
total_traffic = 0
while(x!=target_x | y!=target_y){
potential_moves = PotentialMoves(x,y)
new_move = PickBestMoveHeuristic(x,y,potential_moves,revealed_traffic,prev_move)
new_loc = UpdateLoc(x,y,new_move)
new_x = new_loc[1]
new_y = new_loc[2]
total_traffic = total_traffic + traffic[new_x,new_y]
x = new_x
y = new_y
prev_move = new_move
revealed_traffic[x,y] = traffic[x,y]
}
length_ratio = append(length_ratio,total_traffic/shortest_way)
}
return(length_ratio)
}
PlotLengths<- function(length_eps_greedy,length_opt_greedy,length_heuristic,length_opt_greedy_right,r){
roll_mean_eps = c()
roll_mean_opt = c()
roll_mean_heur =c()
roll_mean_opt_right = c()
for(i in r:length(length_eps_greedy)){
roll_mean_eps[i-r] = mean(length_eps_greedy[i-r:i],na.rm=TRUE)
roll_mean_opt[i-r] = mean(length_opt_greedy[i-r:i],na.rm=TRUE)
roll_mean_heur[i-r] = mean(length_heuristic[i-r:i],na.rm=TRUE)
roll_mean_opt_right[i-r] = mean(length_opt_greedy_right[i-r:i],na.rm=TRUE)
}
x = 1:length(roll_mean_eps)
plot(x,log(roll_mean_eps),type = 'l',col='red',ylab='log length',xlab='demand')
legend('topright',legend=c('eps-greedy'),col=c('red'),lty=1)
x = 1:length(roll_mean_eps)
plot(x[5000:14990],roll_mean_eps[5000:14990],type = 'l',col='red',ylab='length',xlab='demand')
legend('topright',legend=c('eps-greedy'),col=c('red'),lty=1)
plot(x,(roll_mean_opt),type = 'l',col='green',ylab='length',xlab='demand')
lines(x,(roll_mean_heur),col='red')
lines(x,(roll_mean_opt_right),col='blue')
legend('topright',legend=c('opt-greedy','heuristic','Right-only'),col=c('green','red','blue'),lty=1:1)
}
#initialize setup
N=100
demand_size = 15000
traffic= matrix(sample(1:10,N*N,replace=TRUE),N,N)
demand = matrix(sample(1:N,demand_size*2,replace=TRUE),demand_size,2)
target_x = 1
target_y = 20
shortest_path = ShortestPath(traffic,N,target_x,target_y)
#e-greedy with 0 initials
Q = array(0,dim=c(N,N,4))
length_eps_greedy = QLearning(Q,epsilon=0.2,demand,shortest_path,FALSE)
#greedy with smart optimistic values
Q = GenerateOptimisticQ(500)
length_opt_greedy = QLearning(Q,epsilon=0,demand,shortest_path,FALSE)
#heuristics
length_heuristic = Heuristic(demand,shortest_path)
#only-right turn with optimistic q values
Q = GenerateOptimisticQ(500)
length_opt_greedy_right = QLearning(Q,epsilon=0,demand,shortest_path,TRUE)
#plot
PlotLengths(length_eps_greedy,length_opt_greedy,length_heuristic,length_opt_greedy_right,r=10)
|
LoadDataSet <- function(path) {
wd <- getwd()
dataPath <- paste(path, "coeffgrabber New/Data", sep="")
setwd(dataPath)
files = list.files(path = dataPath, pattern="*.csv")
egbPath = paste(dataPath, "!EGB.csv", sep="/")
EGB <- read.csv(egbPath, header = FALSE, stringsAsFactors=FALSE)
for (i in 1:(length(files)-1))
{
EGB_temp <- read.csv(files[i], header = FALSE, stringsAsFactors=FALSE)
EGB <- merge(EGB, EGB_temp, all=TRUE)
}
EGB <- EGB[EGB$V12 != "draw",]
current_date_file <- read.csv(files[length(files)], header = FALSE, stringsAsFactors=FALSE)
EGB <- merge(EGB, current_date_file, all=TRUE)
EGB <- EGB[,c(2,1,11,3,6,7,12,9,4,5,8,10)]
colnames(EGB) <- c("game", "bet", "series", "date", "team1","team2", "win", "league", "coeff1", "coeff2", "bet_type", "map")
EGB$date <- as.POSIXct(EGB$date)
EGB <- EGB[(EGB$date>"2016/03/01"),]
EGB[EGB$team1 == "!Rebels!","team1"] <- "Rebels"
EGB[EGB$team2 == "!Rebels!","team2"] <- "Rebels"
EGB[EGB$team1 == "BrooDMotherS","team1"] <- "BroodMothers"
EGB[EGB$team2 == "BrooDMotherS","team2"] <- "BroodMothers"
EGB[EGB$team1 == "The Mongolz","team1"] <- "TheMongolz"
EGB[EGB$team2 == "The Mongolz","team2"] <- "TheMongolz"
setwd(wd)
return(EGB)
}
|
/R-BradleyT/helpers/LoadData.R
|
no_license
|
Michael-karn-ivanov/dota
|
R
| false | false | 1,259 |
r
|
LoadDataSet <- function(path) {
wd <- getwd()
dataPath <- paste(path, "coeffgrabber New/Data", sep="")
setwd(dataPath)
files = list.files(path = dataPath, pattern="*.csv")
egbPath = paste(dataPath, "!EGB.csv", sep="/")
EGB <- read.csv(egbPath, header = FALSE, stringsAsFactors=FALSE)
for (i in 1:(length(files)-1))
{
EGB_temp <- read.csv(files[i], header = FALSE, stringsAsFactors=FALSE)
EGB <- merge(EGB, EGB_temp, all=TRUE)
}
EGB <- EGB[EGB$V12 != "draw",]
current_date_file <- read.csv(files[length(files)], header = FALSE, stringsAsFactors=FALSE)
EGB <- merge(EGB, current_date_file, all=TRUE)
EGB <- EGB[,c(2,1,11,3,6,7,12,9,4,5,8,10)]
colnames(EGB) <- c("game", "bet", "series", "date", "team1","team2", "win", "league", "coeff1", "coeff2", "bet_type", "map")
EGB$date <- as.POSIXct(EGB$date)
EGB <- EGB[(EGB$date>"2016/03/01"),]
EGB[EGB$team1 == "!Rebels!","team1"] <- "Rebels"
EGB[EGB$team2 == "!Rebels!","team2"] <- "Rebels"
EGB[EGB$team1 == "BrooDMotherS","team1"] <- "BroodMothers"
EGB[EGB$team2 == "BrooDMotherS","team2"] <- "BroodMothers"
EGB[EGB$team1 == "The Mongolz","team1"] <- "TheMongolz"
EGB[EGB$team2 == "The Mongolz","team2"] <- "TheMongolz"
setwd(wd)
return(EGB)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rticles-package.R
\docType{package}
\name{rticles-package}
\alias{rticles}
\alias{rticles-package}
\title{rticles: Article Formats for R Markdown}
\description{
\if{html}{\figure{logo.png}{options: style='float: right' alt='logo' width='120'}}
A suite of custom R Markdown formats and templates for authoring journal articles and conference submissions.
}
\seealso{
Useful links:
\itemize{
\item \url{https://github.com/rstudio/rticles}
\item Report bugs at \url{https://github.com/rstudio/rticles/issues}
}
}
\author{
\strong{Maintainer}: Christophe Dervieux \email{cderv@posit.co} (\href{https://orcid.org/0000-0003-4474-2498}{ORCID})
Authors:
\itemize{
\item JJ Allaire \email{jj@posit.co}
\item Yihui Xie \email{xie@yihui.name} (\href{https://orcid.org/0000-0003-0645-5666}{ORCID})
\item R Foundation [copyright holder]
\item Hadley Wickham \email{hadley@posit.co}
\item Journal of Statistical Software [copyright holder]
\item Ramnath Vaidyanathan \email{ramnath.vaidya@gmail.com} [copyright holder]
\item Association for Computing Machinery [copyright holder]
\item Carl Boettiger \email{cboettig@gmail.com} [copyright holder]
\item Elsevier [copyright holder]
\item Karl Broman \email{kbroman@gmail.com} [copyright holder]
\item Kirill Mueller \email{kirill.mueller@ivt.baug.ethz.ch} [copyright holder]
\item Bastiaan Quast \email{bquast@gmail.com} [copyright holder]
\item Randall Pruim \email{rpruim@gmail.com} [copyright holder]
\item Ben Marwick \email{bmarwick@uw.edu} [copyright holder]
\item Charlotte Wickham \email{cwickham@gmail.com} [copyright holder]
\item Oliver Keyes \email{okeyes@wikimedia.org} [copyright holder]
\item Miao Yu \email{yufreecas@gmail.com} [copyright holder]
\item Daniel Emaasit \email{daniel.emaasit@gmail.com} [copyright holder]
\item Thierry Onkelinx \email{thierry.onkelinx@inbo.be} [copyright holder]
\item Alessandro Gasparini \email{ellessenne@gmail.com} (\href{https://orcid.org/0000-0002-8319-7624}{ORCID}) [copyright holder]
\item Marc-Andre Desautels \email{marc-andre.desautels@cstjean.qc.ca} [copyright holder]
\item Dominik Leutnant \email{leutnant@fh-muenster.de} (\href{https://orcid.org/0000-0003-3293-2315}{ORCID}) [copyright holder]
\item MDPI [copyright holder]
\item Taylor and Francis [copyright holder]
\item Oğuzhan Öğreden (\href{https://orcid.org/0000-0002-9949-3348}{ORCID})
\item Dalton Hance \email{dhance@usgs.gov}
\item Daniel Nüst \email{daniel.nuest@uni-muenster.de} (\href{https://orcid.org/0000-0002-0024-5046}{ORCID}) [copyright holder]
\item Petter Uvesten \email{petter.uvesten.7362@student.lu.se} [copyright holder]
\item Elio Campitelli \email{elio.campitelli@cima.fcen.uba.ar} (\href{https://orcid.org/0000-0002-7742-9230}{ORCID}) [copyright holder]
\item John Muschelli \email{muschellij2@gmail.com} (\href{https://orcid.org/0000-0001-6469-1750}{ORCID}) [copyright holder]
\item Alex Hayes \email{alexpghayes@gmail.com} (\href{https://orcid.org/0000-0002-4985-5160}{ORCID})
\item Zhian N. Kamvar \email{zkamvar@gmail.com} (\href{https://orcid.org/0000-0003-1458-7108}{ORCID}) [copyright holder]
\item Noam Ross \email{noam.ross@gmail.com} (\href{https://orcid.org/0000-0002-2136-0000}{ORCID}) [copyright holder]
\item Robrecht Cannoodt \email{rcannood@gmail.com} (\href{https://orcid.org/0000-0003-3641-729X}{ORCID}) (rcannood) [copyright holder]
\item Duncan Luguern \email{duncan.luguern@gmail.com}
\item David M. Kaplan \email{dmkaplan2000@gmail.com} (\href{https://orcid.org/0000-0001-6087-359X}{ORCID}) (dmkaplan2000) [contributor]
\item Sebastian Kreutzer \email{sebastian.kreutzer@aber.ac.uk} (\href{https://orcid.org/0000-0002-0734-2199}{ORCID})
\item Shixiang Wang \email{w_shixiang@163.com} (\href{https://orcid.org/0000-0001-9855-7357}{ORCID}) [contributor]
\item Jay Hesselberth \email{jay.hesselberth@gmail.com} (\href{https://orcid.org/0000-0002-6299-179X}{ORCID}) [contributor]
\item Rob Hyndman \email{Rob.Hyndman@monash.edu} (\href{https://orcid.org/0000-0002-2140-5352}{ORCID})
}
Other contributors:
\itemize{
\item Posit Software, PBC [copyright holder, funder]
\item Alfredo Hernández \email{aldomann.designs@gmail.com} (\href{https://orcid.org/0000-0002-2660-4545}{ORCID}) [contributor]
\item Stefano Coretta \email{stefano.coretta@gmail.com} (\href{https://orcid.org/0000-0001-9627-5532}{ORCID}) (stefanocoretta) [contributor]
\item Greg Macfarlane \email{gregmacfarlane@gmail.com} (gregmacfarlane) [contributor]
\item Matthias Templ \email{matthias.templ@gmail.com} (\href{https://orcid.org/0000-0002-8638-5276}{ORCID}) (matthias-da) [contributor]
\item Alvaro Uzaheta \email{alvaro.uzaheta@gess.ethz.ch} (auzaheta) [contributor]
\item JooYoung Seo \email{jseo1005@illinois.edu} (\href{https://orcid.org/0000-0002-4064-6012}{ORCID}) [contributor]
\item Callum Arnold \email{cal.rk.arnold@gmail.com} (arnold-c) [contributor]
\item Dmytro Perepolkin \email{dperepolkin@gmail.com} (\href{https://orcid.org/0000-0001-8558-6183}{ORCID}) (dmi3kno) [contributor]
\item Tom Palmer \email{remlapmot@hotmail.com} (\href{https://orcid.org/0000-0003-4655-4511}{ORCID}) (remlapmot) [contributor]
}
}
\keyword{internal}
|
/man/rticles-package.Rd
|
no_license
|
cran/rticles
|
R
| false | true | 5,394 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rticles-package.R
\docType{package}
\name{rticles-package}
\alias{rticles}
\alias{rticles-package}
\title{rticles: Article Formats for R Markdown}
\description{
\if{html}{\figure{logo.png}{options: style='float: right' alt='logo' width='120'}}
A suite of custom R Markdown formats and templates for authoring journal articles and conference submissions.
}
\seealso{
Useful links:
\itemize{
\item \url{https://github.com/rstudio/rticles}
\item Report bugs at \url{https://github.com/rstudio/rticles/issues}
}
}
\author{
\strong{Maintainer}: Christophe Dervieux \email{cderv@posit.co} (\href{https://orcid.org/0000-0003-4474-2498}{ORCID})
Authors:
\itemize{
\item JJ Allaire \email{jj@posit.co}
\item Yihui Xie \email{xie@yihui.name} (\href{https://orcid.org/0000-0003-0645-5666}{ORCID})
\item R Foundation [copyright holder]
\item Hadley Wickham \email{hadley@posit.co}
\item Journal of Statistical Software [copyright holder]
\item Ramnath Vaidyanathan \email{ramnath.vaidya@gmail.com} [copyright holder]
\item Association for Computing Machinery [copyright holder]
\item Carl Boettiger \email{cboettig@gmail.com} [copyright holder]
\item Elsevier [copyright holder]
\item Karl Broman \email{kbroman@gmail.com} [copyright holder]
\item Kirill Mueller \email{kirill.mueller@ivt.baug.ethz.ch} [copyright holder]
\item Bastiaan Quast \email{bquast@gmail.com} [copyright holder]
\item Randall Pruim \email{rpruim@gmail.com} [copyright holder]
\item Ben Marwick \email{bmarwick@uw.edu} [copyright holder]
\item Charlotte Wickham \email{cwickham@gmail.com} [copyright holder]
\item Oliver Keyes \email{okeyes@wikimedia.org} [copyright holder]
\item Miao Yu \email{yufreecas@gmail.com} [copyright holder]
\item Daniel Emaasit \email{daniel.emaasit@gmail.com} [copyright holder]
\item Thierry Onkelinx \email{thierry.onkelinx@inbo.be} [copyright holder]
\item Alessandro Gasparini \email{ellessenne@gmail.com} (\href{https://orcid.org/0000-0002-8319-7624}{ORCID}) [copyright holder]
\item Marc-Andre Desautels \email{marc-andre.desautels@cstjean.qc.ca} [copyright holder]
\item Dominik Leutnant \email{leutnant@fh-muenster.de} (\href{https://orcid.org/0000-0003-3293-2315}{ORCID}) [copyright holder]
\item MDPI [copyright holder]
\item Taylor and Francis [copyright holder]
\item Oğuzhan Öğreden (\href{https://orcid.org/0000-0002-9949-3348}{ORCID})
\item Dalton Hance \email{dhance@usgs.gov}
\item Daniel Nüst \email{daniel.nuest@uni-muenster.de} (\href{https://orcid.org/0000-0002-0024-5046}{ORCID}) [copyright holder]
\item Petter Uvesten \email{petter.uvesten.7362@student.lu.se} [copyright holder]
\item Elio Campitelli \email{elio.campitelli@cima.fcen.uba.ar} (\href{https://orcid.org/0000-0002-7742-9230}{ORCID}) [copyright holder]
\item John Muschelli \email{muschellij2@gmail.com} (\href{https://orcid.org/0000-0001-6469-1750}{ORCID}) [copyright holder]
\item Alex Hayes \email{alexpghayes@gmail.com} (\href{https://orcid.org/0000-0002-4985-5160}{ORCID})
\item Zhian N. Kamvar \email{zkamvar@gmail.com} (\href{https://orcid.org/0000-0003-1458-7108}{ORCID}) [copyright holder]
\item Noam Ross \email{noam.ross@gmail.com} (\href{https://orcid.org/0000-0002-2136-0000}{ORCID}) [copyright holder]
\item Robrecht Cannoodt \email{rcannood@gmail.com} (\href{https://orcid.org/0000-0003-3641-729X}{ORCID}) (rcannood) [copyright holder]
\item Duncan Luguern \email{duncan.luguern@gmail.com}
\item David M. Kaplan \email{dmkaplan2000@gmail.com} (\href{https://orcid.org/0000-0001-6087-359X}{ORCID}) (dmkaplan2000) [contributor]
\item Sebastian Kreutzer \email{sebastian.kreutzer@aber.ac.uk} (\href{https://orcid.org/0000-0002-0734-2199}{ORCID})
\item Shixiang Wang \email{w_shixiang@163.com} (\href{https://orcid.org/0000-0001-9855-7357}{ORCID}) [contributor]
\item Jay Hesselberth \email{jay.hesselberth@gmail.com} (\href{https://orcid.org/0000-0002-6299-179X}{ORCID}) [contributor]
\item Rob Hyndman \email{Rob.Hyndman@monash.edu} (\href{https://orcid.org/0000-0002-2140-5352}{ORCID})
}
Other contributors:
\itemize{
\item Posit Software, PBC [copyright holder, funder]
\item Alfredo Hernández \email{aldomann.designs@gmail.com} (\href{https://orcid.org/0000-0002-2660-4545}{ORCID}) [contributor]
\item Stefano Coretta \email{stefano.coretta@gmail.com} (\href{https://orcid.org/0000-0001-9627-5532}{ORCID}) (stefanocoretta) [contributor]
\item Greg Macfarlane \email{gregmacfarlane@gmail.com} (gregmacfarlane) [contributor]
\item Matthias Templ \email{matthias.templ@gmail.com} (\href{https://orcid.org/0000-0002-8638-5276}{ORCID}) (matthias-da) [contributor]
\item Alvaro Uzaheta \email{alvaro.uzaheta@gess.ethz.ch} (auzaheta) [contributor]
\item JooYoung Seo \email{jseo1005@illinois.edu} (\href{https://orcid.org/0000-0002-4064-6012}{ORCID}) [contributor]
\item Callum Arnold \email{cal.rk.arnold@gmail.com} (arnold-c) [contributor]
\item Dmytro Perepolkin \email{dperepolkin@gmail.com} (\href{https://orcid.org/0000-0001-8558-6183}{ORCID}) (dmi3kno) [contributor]
\item Tom Palmer \email{remlapmot@hotmail.com} (\href{https://orcid.org/0000-0003-4655-4511}{ORCID}) (remlapmot) [contributor]
}
}
\keyword{internal}
|
#####
# Verify that the biglasso outputs are identical when using
# multithreading (via 'ncores' argument)
#
#
####
#=======================================#
#====== DATA GENERATING MECHANISM ======#
#=======================================#
sim.surv.weib <- function(n, lambda=0.01, rho=1, beta, rate.cens=0.001) {
# generate survival data (Weibull baseline hazard), adapted from
# https://stats.stackexchange.com/questions/135124/how-to-create-a-toy-survival-time-to-event-data-with-right-censoring
p <- length(beta)
X <- matrix(rnorm(n * p), nrow = n)
# Weibull latent event times
v <- runif(n = n)
latent.time <- (-log(v)/(lambda * exp(X %*% beta)))^(1/rho)
# censoring times
cens.time <- rexp(n = n, rate = rate.cens)
# follow-up times and event indicators
time <- round(pmin(latent.time, cens.time)) + 1
status <- as.numeric(latent.time < cens.time)
y <- cbind(time, status)
colnames(y) <- c("time", "status")
# data set
return (list(X = X, y = y))
}
sim.surv <- function(n, p, p_nz, seed, ...) {
# n = number of obs.
# p = number of covariates.
# p_nz = proportion of non-zero coefficients (induce sparsity)
if (!missing(seed)) set.seed(seed)
beta <- rnorm(p, 0, 1) * rbinom(p, 1, p_nz)
dat <- sim.surv.weib(n = n, beta = beta, ...)
list("beta" = beta, "y" = dat$y, "X" = dat$X)
}
#=======================================#
#================ SETUP ================#
#=======================================#
library(glmnet)
library(biglasso)
set.seed(124)
### data parameters
n <- 1000
p <- 5
p_nz <- 1
### biglasso parameters
penalty <- "enet"
alpha <- 0.5 # elastic net penalty (alpha = 0 is ridge and alpha = 1 is lasso)
lambda <- exp(seq(0, -6, length.out = 100))
nfolds <- 5
grouped <- T
foldid <- sample(cut(1:n, breaks = nfolds, labels = F))
ncores <- 4
### generate data
dat <- sim.surv(n = n, p = n, p_nz = p_nz, rho = 10, rate.cens=0.1)
y <- dat$y
X <- dat$X
Xbig <- as.big.matrix(X)
table(y[,1], y[,2])
table(y[,2])
pt <- proc.time()
cv.bl0 <- cv.biglasso(X = Xbig,
y = y,
family = "cox",
penalty = penalty,
alpha = alpha,
lambda = lambda,
nfolds = nfolds,
grouped = grouped,
cv.ind = foldid,
ncores = 1,
trace = T)
proc.time() - pt
pt <- proc.time()
cv.bl1 <- cv.biglasso(X = Xbig,
y = y,
family = "cox",
penalty = penalty,
alpha = alpha,
lambda = lambda,
nfolds = nfolds,
grouped = grouped,
cv.ind = foldid,
ncores = ncores,
trace = T)
proc.time() - pt
doMC::registerDoMC(cores = ncores)
pt <- proc.time()
cv.gn <- cv.glmnet(x = X,
y = y,
family = "cox",
alpha = alpha,
nfolds = nfolds,
lambda = lambda,
grouped = grouped,
parallel = T,
foldid = foldid,
trace.it = 1)
proc.time() - pt
#lapply(list.files("~/projects/cv-biglasso-cox/R/", full.names = T), source)
#plot.compare.cv2(cv.bl1, cv.gn)
plot.compare.cv2(cv.bl0, cv.gn)
|
/tests/test-cv/compare-parallelism.R
|
no_license
|
dfleis/biglasso
|
R
| false | false | 3,503 |
r
|
#####
# Verify that the biglasso outputs are identical when using
# multithreading (via 'ncores' argument)
#
#
####
#=======================================#
#====== DATA GENERATING MECHANISM ======#
#=======================================#
sim.surv.weib <- function(n, lambda=0.01, rho=1, beta, rate.cens=0.001) {
# generate survival data (Weibull baseline hazard), adapted from
# https://stats.stackexchange.com/questions/135124/how-to-create-a-toy-survival-time-to-event-data-with-right-censoring
p <- length(beta)
X <- matrix(rnorm(n * p), nrow = n)
# Weibull latent event times
v <- runif(n = n)
latent.time <- (-log(v)/(lambda * exp(X %*% beta)))^(1/rho)
# censoring times
cens.time <- rexp(n = n, rate = rate.cens)
# follow-up times and event indicators
time <- round(pmin(latent.time, cens.time)) + 1
status <- as.numeric(latent.time < cens.time)
y <- cbind(time, status)
colnames(y) <- c("time", "status")
# data set
return (list(X = X, y = y))
}
sim.surv <- function(n, p, p_nz, seed, ...) {
# n = number of obs.
# p = number of covariates.
# p_nz = proportion of non-zero coefficients (induce sparsity)
if (!missing(seed)) set.seed(seed)
beta <- rnorm(p, 0, 1) * rbinom(p, 1, p_nz)
dat <- sim.surv.weib(n = n, beta = beta, ...)
list("beta" = beta, "y" = dat$y, "X" = dat$X)
}
#=======================================#
#================ SETUP ================#
#=======================================#
library(glmnet)
library(biglasso)
set.seed(124)
### data parameters
n <- 1000
p <- 5
p_nz <- 1
### biglasso parameters
penalty <- "enet"
alpha <- 0.5 # elastic net penalty (alpha = 0 is ridge and alpha = 1 is lasso)
lambda <- exp(seq(0, -6, length.out = 100))
nfolds <- 5
grouped <- T
foldid <- sample(cut(1:n, breaks = nfolds, labels = F))
ncores <- 4
### generate data
dat <- sim.surv(n = n, p = n, p_nz = p_nz, rho = 10, rate.cens=0.1)
y <- dat$y
X <- dat$X
Xbig <- as.big.matrix(X)
table(y[,1], y[,2])
table(y[,2])
pt <- proc.time()
cv.bl0 <- cv.biglasso(X = Xbig,
y = y,
family = "cox",
penalty = penalty,
alpha = alpha,
lambda = lambda,
nfolds = nfolds,
grouped = grouped,
cv.ind = foldid,
ncores = 1,
trace = T)
proc.time() - pt
pt <- proc.time()
cv.bl1 <- cv.biglasso(X = Xbig,
y = y,
family = "cox",
penalty = penalty,
alpha = alpha,
lambda = lambda,
nfolds = nfolds,
grouped = grouped,
cv.ind = foldid,
ncores = ncores,
trace = T)
proc.time() - pt
doMC::registerDoMC(cores = ncores)
pt <- proc.time()
cv.gn <- cv.glmnet(x = X,
y = y,
family = "cox",
alpha = alpha,
nfolds = nfolds,
lambda = lambda,
grouped = grouped,
parallel = T,
foldid = foldid,
trace.it = 1)
proc.time() - pt
#lapply(list.files("~/projects/cv-biglasso-cox/R/", full.names = T), source)
#plot.compare.cv2(cv.bl1, cv.gn)
plot.compare.cv2(cv.bl0, cv.gn)
|
x <- matrix(nrow=3,ncol=3)
x[2:3,2:3] <- cbind(4:5,2:3)
x
|
/Matrix-1.r
|
no_license
|
RKViswanadha/Datasciencecoursera
|
R
| false | false | 57 |
r
|
x <- matrix(nrow=3,ncol=3)
x[2:3,2:3] <- cbind(4:5,2:3)
x
|
primo <- function(n) {
if (n == 1 || n == 2) {
return(TRUE)
}
if (n %% 2 == 0) {
return(FALSE)
}
for (i in seq(3, max(3, ceiling(sqrt(n))), 2)) {
if ((n %% i) == 0) {
return(FALSE)
}
}
return(TRUE)
}
desde <- 10
hasta <- 30
original <- desde:hasta
invertido <- hasta:desde
replicas <- 10
suppressMessages(library(doParallel))
registerDoParallel(makeCluster(detectCores()-1))
ot <- numeric()
it <- numeric()
at <- numeric()
for (r in 1:replicas) {
ot <- c(ot, system.time(foreach(n = original, .combine=c) %dopar% primo(n))[3]) # de menor a mayor
it <- c(it, system.time(foreach(n = invertido, .combine=c) %dopar% primo(n))[3]) # de mayor a menor
at <- c(at, system.time(foreach(n = sample(original), .combine=c) %dopar% primo(n))[3]) # orden aleatorio
}
stopImplicitCluster()
summary(ot)
summary(it)
summary(at)
|
/p3/primo4.R
|
no_license
|
PabloChavez94/Simulacion
|
R
| false | false | 934 |
r
|
primo <- function(n) {
if (n == 1 || n == 2) {
return(TRUE)
}
if (n %% 2 == 0) {
return(FALSE)
}
for (i in seq(3, max(3, ceiling(sqrt(n))), 2)) {
if ((n %% i) == 0) {
return(FALSE)
}
}
return(TRUE)
}
desde <- 10
hasta <- 30
original <- desde:hasta
invertido <- hasta:desde
replicas <- 10
suppressMessages(library(doParallel))
registerDoParallel(makeCluster(detectCores()-1))
ot <- numeric()
it <- numeric()
at <- numeric()
for (r in 1:replicas) {
ot <- c(ot, system.time(foreach(n = original, .combine=c) %dopar% primo(n))[3]) # de menor a mayor
it <- c(it, system.time(foreach(n = invertido, .combine=c) %dopar% primo(n))[3]) # de mayor a menor
at <- c(at, system.time(foreach(n = sample(original), .combine=c) %dopar% primo(n))[3]) # orden aleatorio
}
stopImplicitCluster()
summary(ot)
summary(it)
summary(at)
|
library(glmnet)
mydata = read.table("./TrainingSet/RF/NSCLC.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.6,family="gaussian",standardize=TRUE)
sink('./Model/EN/Classifier/NSCLC/NSCLC_065.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Classifier/NSCLC/NSCLC_065.R
|
no_license
|
leon1003/QSMART
|
R
| false | false | 346 |
r
|
library(glmnet)
mydata = read.table("./TrainingSet/RF/NSCLC.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.6,family="gaussian",standardize=TRUE)
sink('./Model/EN/Classifier/NSCLC/NSCLC_065.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
library( "ape" )
library( "geiger" )
library( "expm" )
library( "nloptr" )
source( "masternegloglikeeps1.R" )
source( "Qmatrixwoodherb2.R" )
source("Pruning2.R")
sim.tree<-read.tree("tree50time63.txt")
sim.chrom<-read.table("chrom50time63.txt", header=FALSE)
last.state=50
x.0<- log(c(0.12, 0.001, 0.25, 0.002,0.036, 0.006, 0.04,0.02, 1.792317852, 1.57e-14))
p.0<-rep(1,2*(last.state+1))/(2*(last.state+1))
results<-rep(0,11)
my.options<-list("algorithm"= "NLOPT_LN_SBPLX","ftol_rel"=1e-08,"print_level"=1,"maxtime"=170000000, "maxeval"=1000)
mle<-nloptr(x0=x.0,eval_f=negloglikelihood.wh,opts=my.options,bichrom.phy=sim.tree, bichrom.data=sim.chrom,max.chromosome=last.state,pi.0=p.0)
print(mle)
results[1:10]<-mle$solution
results[11]<-mle$objective
write.table(results,file="globalmax50tree63.csv",sep=",")
|
/Simulations tree height/50 my/optim50tree63.R
|
no_license
|
roszenil/Bichromdryad
|
R
| false | false | 821 |
r
|
library( "ape" )
library( "geiger" )
library( "expm" )
library( "nloptr" )
source( "masternegloglikeeps1.R" )
source( "Qmatrixwoodherb2.R" )
source("Pruning2.R")
sim.tree<-read.tree("tree50time63.txt")
sim.chrom<-read.table("chrom50time63.txt", header=FALSE)
last.state=50
x.0<- log(c(0.12, 0.001, 0.25, 0.002,0.036, 0.006, 0.04,0.02, 1.792317852, 1.57e-14))
p.0<-rep(1,2*(last.state+1))/(2*(last.state+1))
results<-rep(0,11)
my.options<-list("algorithm"= "NLOPT_LN_SBPLX","ftol_rel"=1e-08,"print_level"=1,"maxtime"=170000000, "maxeval"=1000)
mle<-nloptr(x0=x.0,eval_f=negloglikelihood.wh,opts=my.options,bichrom.phy=sim.tree, bichrom.data=sim.chrom,max.chromosome=last.state,pi.0=p.0)
print(mle)
results[1:10]<-mle$solution
results[11]<-mle$objective
write.table(results,file="globalmax50tree63.csv",sep=",")
|
# -------------------------
# 01_Temperature_data_B_TS
# -------------------------
# A script for analysis of temperature data for the bleaching thermal stress experiment:
# a) Read in rda files for each tank
# b) Calculate averages
# c) Plot data
# d) Calculate degree heating week (DHW) accumulation for each tank
# Load required packages
library(lubridate)
library(plyr)
library(dplyr)
library(lattice)
library(openair)
library(ggplot2)
library(tidyr)
# --------------------------------------------------------------------------------------------------------------------
# .rda file format allows the user to save the data as a dataframe - specifically in this case with the date in POSIXct format.
# Code is repeated for each temperature treatment.
# --------------------------------------------------------------------------------------------------------------------
load(file = "Data/B_TS_Heat_Tank_1.rda")
load(file = "Data/B_TS_Heat_Tank_2.rda")
load(file = "Data/B_TS_Control_Tank_1.rda")
load(file = "Data/B_TS_Control_Tank_2.rda")
# --------------------------------------------------------------------------------------------------------------------
# b) Calculate average temperature per hour for each tank
# --------------------------------------------------------------------------------------------------------------------
#################
# Control tanks
#################
str(AMB_1)
str(AMB_2)
# Hourly average for control tank 1 using the timeAverage function in the package openair
CT1_ave <- timeAverage(AMB_1,
avg.time = "hour",
data.thresh = 0,
statistic = "mean",
start.date = "2019-03-04 00:00:00",
end.date = "2019-03-24 00:00:00")
# Plot the hourly average for bleaching tank 1 data
ggplot(CT1_ave, aes(date, Temp)) +
geom_line() +
scale_x_datetime(limits = c(as.POSIXct("2019-03-04 00:00:00"), as.POSIXct("2019-03-24 00:00:00")), name = "Date")+
scale_y_continuous(name = "Temperature", limits = c(24,35))+
theme_classic()
# Hourly average for control tank 2 using the timeAverage function in the package openair
CT2_ave <- timeAverage(AMB_2,
avg.time = "hour",
data.thresh = 0,
statistic = "mean",
start.date = "2019-03-04 00:00:00",
end.date = "2019-03-24 00:00:00")
ggplot(CT2_ave, aes(date, Temp)) +
geom_line() +
scale_x_datetime(limits = c(as.POSIXct("2019-03-04 00:00:00"), as.POSIXct("2019-03-24 00:00:00")), name = "Date")+
scale_y_continuous(name = "Temperature", limits = c(24,35))+
theme_classic()
# Filter the data for out dates of interest for tank 1 and 2
CT1_ave <- CT1_ave %>%
filter( date > ymd_hms("2019-03-04 00:00:00")) %>%
filter( date < ymd_hms("2019-03-24 00:00:00"))
CT2_ave <- CT1_ave %>%
filter( date > ymd_hms("2019-03-04 00:00:00")) %>%
filter( date < ymd_hms("2019-03-24 00:00:00"))
# Add specifiers for each tank so that tanks can be distinguished on merging
CT1_ave$CT1 <- CT1_ave$Temp
CT1_ave$Timepoint <- CT1_ave$date
CT2_ave$CT2 <- CT2_ave$Temp
# Bind the two data frames together and reorder
timeaveCT <- cbind(CT1_ave, CT2_ave$CT2)
timeaveCT<- data.frame(timeaveCT$Timepoint, timeaveCT$CT1, timeaveCT$CT2)
# Find the average temperature across the two tanks per hour per day
timeaveCT<- mutate(timeaveCT, daily_mean = rowMeans(cbind(timeaveCT.CT1, timeaveCT.CT2)))
# Rename columns
colnames(timeaveCT) <- c("Timepoint", "CT_1", "CT_2", "daily_mean")
ggplot(timeaveCT, aes(Timepoint,daily_mean)) +
geom_line() +
scale_x_datetime(limits = c(as.POSIXct("2019-03-04 00:00:00"), as.POSIXct("2019-03-24 00:00:00")), name = "Date")+
scale_y_continuous(name = "Temperature")+
theme_classic()
###############
# Heat tank (B)
###############
str(BT_1)
# Rename the timepoint as date for the function timeAverage to work
BT_1$date <- BT_1$Timepoint
# Hourly average for bleaching tank 1 using the timeAverage function in the package openair
BT1_ave <- timeAverage(BT_1,
avg.time = "hour",
data.thresh = 0,
statistic = "mean",
start.date = "2019-03-04 00:00:00",
end.date = "2019-03-24 00:00:00")
# Plot the hourly average for bleaching tank 1 data
ggplot(BT1_ave, aes(date, Temp)) +
geom_line() +
scale_x_datetime(limits = c(as.POSIXct("2019-03-04 00:00:00"), as.POSIXct("2019-03-24 00:00:00")), name = "Date")+
scale_y_continuous(name = "Temperature", limits = c(24,35))+
theme_classic()
# Rename the timepoint as date for the function timeAverage to work
BT_2$date <- BT_2$Timepoint
# Hourly average for bleaching tank 2 using the timeAverage function in the package openair
BT2_ave <- timeAverage(BT_2,
avg.time = "hour",
data.thresh = 0,
statistic = "mean",
start.date = "2019-03-04 00:00:00",
end.date = "2019-03-24 00:00:00")
ggplot(BT2_ave, aes(date, Temp)) +
geom_line() +
scale_x_datetime(limits = c(as.POSIXct("2019-03-04 00:00:00"), as.POSIXct("2019-03-24 00:00:00")), name = "Date")+
scale_y_continuous(name = "Temperature", limits = c(24,37))+
theme_classic()
# Filter the data for out dates of interest for tank 1 and 2
BT1_ave <- BT1_ave %>%
filter( date > ymd_hms("2019-03-04 00:00:00")) %>%
filter( date < ymd_hms("2019-03-24 00:00:00"))
BT2_ave <- BT2_ave %>%
filter( date > ymd_hms("2019-03-04 00:00:00")) %>%
filter( date < ymd_hms("2019-03-24 00:00:00"))
# Add specifiers for each tank so that tanks can be distinguished on merging
BT1_ave$BT1 <- BT1_ave$Temp
BT1_ave$Timepoint <- BT1_ave$date
BT2_ave$BT2 <- BT2_ave$Temp
# Bind the two data frames together and reorder
timeaveBT <- cbind(BT1_ave, BT2_ave$BT2)
timeaveBT<- data.frame(timeaveBT$Timepoint, timeaveBT$BT1, timeaveBT$BT2)
# Find the average temperature across the two tanks per hour per day
timeaveBT<- mutate(timeaveBT, daily_mean = rowMeans(cbind(timeaveBT.BT1, timeaveBT.BT2)))
# Rename columns
colnames(timeaveBT) <- c("Timepoint", "BT_1", "BT_2", "daily_mean")
ggplot(timeaveBT, aes(Timepoint,daily_mean)) +
geom_line() +
scale_x_datetime(limits = c(as.POSIXct("2019-03-04 00:00:00"), as.POSIXct("2019-03-24 00:00:00")), name = "Date")+
scale_y_continuous(name = "Temperature")+
theme_classic()
# --------------------------------------------------------------------------------------------------------------------
# c) Plot data
# --------------------------------------------------------------------------------------------------------------------
ggplot() +
geom_line(data = timeaveBT, aes(x =Timepoint, y = daily_mean, colour = "Ambient"), size = 0.4) +
geom_line(data = timeaveCT, aes(x =Timepoint, y = daily_mean, colour = "B TS"), size = 0.4) +
scale_x_datetime(limits = c(as.POSIXct("2019-03-04 00:00:00"), as.POSIXct("2019-03-24 00:00:00")), name = "Date")+
scale_y_continuous(breaks = seq(24,35,1), name = "Temperature (°C)") +
scale_colour_manual(name = "Treatment", values = c ("firebrick4","dodgerblue4"))+
theme_classic() +
theme(axis.text.x = element_text(size = "15"),
axis.text.y = element_text(size = "15"),
axis.title.x = element_text(size = "15"),
axis.title.y = element_text(size = "15"))
# --------------------------------------------------------------------------------------------------------------------
# d) Calculate degree heating week (DHW) accumulation for each tank
# --------------------------------------------------------------------------------------------------------------------
# Change the file names so that they are specific to the treatment
timeaveCT$daily_mean_CT <- timeaveCT$daily_mean
timeaveBT$daily_mean_BT <- timeaveBT$daily_mean
# Split Timepoint by space deliminator into two columns
CT <- separate(timeaveCT, Timepoint, c("date","time"), sep = " ", remove = TRUE,
convert = FALSE, extra = "warn", fill = "warn")
BT <- separate(timeaveBT, Timepoint, c("date","time"), sep = " ", remove = TRUE,
convert = FALSE, extra = "warn", fill = "warn")
# Calculate the daily mean temperature
aggdata1 <- ddply(CT, ~date, summarize, daily_mean_temp = mean(daily_mean_CT, na.rm = TRUE))
aggdata2 <- ddply(BT, ~date, summarize, daily_mean_temp = mean(daily_mean_BT, na.rm = TRUE))
# Calculate degree heating days by caclulating the anomaly on days above the maximum monthly mean for Heron
# of 27.3 °C
aggdata1$DHD <- ifelse(aggdata1$daily_mean_temp > 27.3, (aggdata1$daily_mean_temp - 27.3), 0)
str(aggdata1)
aggdata2$DHD <- ifelse(aggdata2$daily_mean_temp > 27.3, (aggdata2$daily_mean_temp - 27.3), 0)
str(aggdata2)
# Create DHWs by dividing DHDs by 7
aggdata1$DHW <- aggdata1$DHD/7
str(aggdata1)
aggdata2$DHW <- aggdata2$DHD/7
str(aggdata2)
#Creating cumulative variables to track heat load
aggdata1$totalDHD <- cumsum(aggdata1$DHD)
aggdata1$totalDHW <- cumsum(aggdata1$DHW)
aggdata2$totalDHD <- cumsum(aggdata2$DHD)
aggdata2$totalDHW <- cumsum(aggdata2$DHW)
# Add a variable that is Day
aggdata1 <- mutate(aggdata1, Day = c(1:20))
aggdata2 <- mutate(aggdata2, Day = c(1:20))
# Create a master plot of DHW data
ggplot() +
#geom_line(data = aggdata1, aes(x =Day, y = totalDHW, colour = "Ambient")) +
geom_line(data = aggdata2, aes(x =Day, y = totalDHW, colour = "Bleaching")) +
#geom_point(data = aggdata1, aes(x =Day, y = totalDHW, colour = "Ambient")) +
geom_point(data = aggdata2, aes(x =Day, y = totalDHW, colour = "Bleaching")) +
scale_x_continuous(breaks = seq(0,26,1), name = "Day")+
scale_y_continuous(breaks = seq(0,5,0.5), name = "Total Degree Heating Weeks")+
scale_colour_manual(name = "Treatment", values = c ("dodgerblue4", "indianred4"), labels = c("Ambient", "Bleaching")) +
theme_classic() +
theme(axis.text.x = element_text(size = "10"),
axis.text.y = element_text(size = "10"),
axis.title.x = element_text(size = "10"),
axis.title.y = element_text(size = "10"))
str(aggdata2)
ggplot() +
#geom_line(data = aggdata1, aes(x =Day, y = totalDHW, colour = "Ambient")) +
geom_line(data = aggdata2, aes(x =Day, y = totalDHW), size = 0.2) +
#geom_point(data = aggdata1, aes(x =Day, y = totalDHW, colour = "Ambient")) +
geom_point(data = aggdata2, aes(x =Day, y = totalDHW)) +
scale_x_continuous(limits = c(0,20),breaks = seq(0,20,1), name = "Day")+
scale_y_continuous(limits = c(0,5), breaks = seq(0,5,0.5), name = "eDHW", position = "right")+
#scale_colour_manual(name = "Treatment", values = c ("dodgerblue4", "indianred4"), labels = c("Ambient", "Bleaching")) +
theme_classic() +
theme(axis.text.x = element_text(size = "15"),
axis.text.y = element_text(size = "15"),
axis.title.x = element_text(size = "15"),
axis.title.y = element_text(size = "15"))
aggdata2
|
/01_Temperature_data_B_TS.R
|
no_license
|
CharlotteEPage/Flow_effects_thermal_stress_A.aspera
|
R
| false | false | 11,067 |
r
|
# -------------------------
# 01_Temperature_data_B_TS
# -------------------------
# A script for analysis of temperature data for the bleaching thermal stress experiment:
# a) Read in rda files for each tank
# b) Calculate averages
# c) Plot data
# d) Calculate degree heating week (DHW) accumulation for each tank
# Load required packages
library(lubridate)
library(plyr)
library(dplyr)
library(lattice)
library(openair)
library(ggplot2)
library(tidyr)
# --------------------------------------------------------------------------------------------------------------------
# .rda file format allows the user to save the data as a dataframe - specifically in this case with the date in POSIXct format.
# Code is repeated for each temperature treatment.
# --------------------------------------------------------------------------------------------------------------------
load(file = "Data/B_TS_Heat_Tank_1.rda")
load(file = "Data/B_TS_Heat_Tank_2.rda")
load(file = "Data/B_TS_Control_Tank_1.rda")
load(file = "Data/B_TS_Control_Tank_2.rda")
# --------------------------------------------------------------------------------------------------------------------
# b) Calculate average temperature per hour for each tank
# --------------------------------------------------------------------------------------------------------------------
#################
# Control tanks
#################
str(AMB_1)
str(AMB_2)
# Hourly average for control tank 1 using the timeAverage function in the package openair
CT1_ave <- timeAverage(AMB_1,
avg.time = "hour",
data.thresh = 0,
statistic = "mean",
start.date = "2019-03-04 00:00:00",
end.date = "2019-03-24 00:00:00")
# Plot the hourly average for bleaching tank 1 data
ggplot(CT1_ave, aes(date, Temp)) +
geom_line() +
scale_x_datetime(limits = c(as.POSIXct("2019-03-04 00:00:00"), as.POSIXct("2019-03-24 00:00:00")), name = "Date")+
scale_y_continuous(name = "Temperature", limits = c(24,35))+
theme_classic()
# Hourly average for control tank 2 using the timeAverage function in the package openair
CT2_ave <- timeAverage(AMB_2,
avg.time = "hour",
data.thresh = 0,
statistic = "mean",
start.date = "2019-03-04 00:00:00",
end.date = "2019-03-24 00:00:00")
ggplot(CT2_ave, aes(date, Temp)) +
geom_line() +
scale_x_datetime(limits = c(as.POSIXct("2019-03-04 00:00:00"), as.POSIXct("2019-03-24 00:00:00")), name = "Date")+
scale_y_continuous(name = "Temperature", limits = c(24,35))+
theme_classic()
# Filter the data for out dates of interest for tank 1 and 2
CT1_ave <- CT1_ave %>%
filter( date > ymd_hms("2019-03-04 00:00:00")) %>%
filter( date < ymd_hms("2019-03-24 00:00:00"))
CT2_ave <- CT1_ave %>%
filter( date > ymd_hms("2019-03-04 00:00:00")) %>%
filter( date < ymd_hms("2019-03-24 00:00:00"))
# Add specifiers for each tank so that tanks can be distinguished on merging
CT1_ave$CT1 <- CT1_ave$Temp
CT1_ave$Timepoint <- CT1_ave$date
CT2_ave$CT2 <- CT2_ave$Temp
# Bind the two data frames together and reorder
timeaveCT <- cbind(CT1_ave, CT2_ave$CT2)
timeaveCT<- data.frame(timeaveCT$Timepoint, timeaveCT$CT1, timeaveCT$CT2)
# Find the average temperature across the two tanks per hour per day
timeaveCT<- mutate(timeaveCT, daily_mean = rowMeans(cbind(timeaveCT.CT1, timeaveCT.CT2)))
# Rename columns
colnames(timeaveCT) <- c("Timepoint", "CT_1", "CT_2", "daily_mean")
ggplot(timeaveCT, aes(Timepoint,daily_mean)) +
geom_line() +
scale_x_datetime(limits = c(as.POSIXct("2019-03-04 00:00:00"), as.POSIXct("2019-03-24 00:00:00")), name = "Date")+
scale_y_continuous(name = "Temperature")+
theme_classic()
###############
# Heat tank (B)
###############
str(BT_1)
# Rename the timepoint as date for the function timeAverage to work
BT_1$date <- BT_1$Timepoint
# Hourly average for bleaching tank 1 using the timeAverage function in the package openair
BT1_ave <- timeAverage(BT_1,
avg.time = "hour",
data.thresh = 0,
statistic = "mean",
start.date = "2019-03-04 00:00:00",
end.date = "2019-03-24 00:00:00")
# Plot the hourly average for bleaching tank 1 data
ggplot(BT1_ave, aes(date, Temp)) +
geom_line() +
scale_x_datetime(limits = c(as.POSIXct("2019-03-04 00:00:00"), as.POSIXct("2019-03-24 00:00:00")), name = "Date")+
scale_y_continuous(name = "Temperature", limits = c(24,35))+
theme_classic()
# Rename the timepoint as date for the function timeAverage to work
BT_2$date <- BT_2$Timepoint
# Hourly average for bleaching tank 2 using the timeAverage function in the package openair
BT2_ave <- timeAverage(BT_2,
avg.time = "hour",
data.thresh = 0,
statistic = "mean",
start.date = "2019-03-04 00:00:00",
end.date = "2019-03-24 00:00:00")
ggplot(BT2_ave, aes(date, Temp)) +
geom_line() +
scale_x_datetime(limits = c(as.POSIXct("2019-03-04 00:00:00"), as.POSIXct("2019-03-24 00:00:00")), name = "Date")+
scale_y_continuous(name = "Temperature", limits = c(24,37))+
theme_classic()
# Filter the data for out dates of interest for tank 1 and 2
BT1_ave <- BT1_ave %>%
filter( date > ymd_hms("2019-03-04 00:00:00")) %>%
filter( date < ymd_hms("2019-03-24 00:00:00"))
BT2_ave <- BT2_ave %>%
filter( date > ymd_hms("2019-03-04 00:00:00")) %>%
filter( date < ymd_hms("2019-03-24 00:00:00"))
# Add specifiers for each tank so that tanks can be distinguished on merging
BT1_ave$BT1 <- BT1_ave$Temp
BT1_ave$Timepoint <- BT1_ave$date
BT2_ave$BT2 <- BT2_ave$Temp
# Bind the two data frames together and reorder
timeaveBT <- cbind(BT1_ave, BT2_ave$BT2)
timeaveBT<- data.frame(timeaveBT$Timepoint, timeaveBT$BT1, timeaveBT$BT2)
# Find the average temperature across the two tanks per hour per day
timeaveBT<- mutate(timeaveBT, daily_mean = rowMeans(cbind(timeaveBT.BT1, timeaveBT.BT2)))
# Rename columns
colnames(timeaveBT) <- c("Timepoint", "BT_1", "BT_2", "daily_mean")
ggplot(timeaveBT, aes(Timepoint,daily_mean)) +
geom_line() +
scale_x_datetime(limits = c(as.POSIXct("2019-03-04 00:00:00"), as.POSIXct("2019-03-24 00:00:00")), name = "Date")+
scale_y_continuous(name = "Temperature")+
theme_classic()
# --------------------------------------------------------------------------------------------------------------------
# c) Plot data
# --------------------------------------------------------------------------------------------------------------------
ggplot() +
geom_line(data = timeaveBT, aes(x =Timepoint, y = daily_mean, colour = "Ambient"), size = 0.4) +
geom_line(data = timeaveCT, aes(x =Timepoint, y = daily_mean, colour = "B TS"), size = 0.4) +
scale_x_datetime(limits = c(as.POSIXct("2019-03-04 00:00:00"), as.POSIXct("2019-03-24 00:00:00")), name = "Date")+
scale_y_continuous(breaks = seq(24,35,1), name = "Temperature (°C)") +
scale_colour_manual(name = "Treatment", values = c ("firebrick4","dodgerblue4"))+
theme_classic() +
theme(axis.text.x = element_text(size = "15"),
axis.text.y = element_text(size = "15"),
axis.title.x = element_text(size = "15"),
axis.title.y = element_text(size = "15"))
# --------------------------------------------------------------------------------------------------------------------
# d) Calculate degree heating week (DHW) accumulation for each tank
# --------------------------------------------------------------------------------------------------------------------
# Change the file names so that they are specific to the treatment
timeaveCT$daily_mean_CT <- timeaveCT$daily_mean
timeaveBT$daily_mean_BT <- timeaveBT$daily_mean
# Split Timepoint by space deliminator into two columns
CT <- separate(timeaveCT, Timepoint, c("date","time"), sep = " ", remove = TRUE,
convert = FALSE, extra = "warn", fill = "warn")
BT <- separate(timeaveBT, Timepoint, c("date","time"), sep = " ", remove = TRUE,
convert = FALSE, extra = "warn", fill = "warn")
# Calculate the daily mean temperature
aggdata1 <- ddply(CT, ~date, summarize, daily_mean_temp = mean(daily_mean_CT, na.rm = TRUE))
aggdata2 <- ddply(BT, ~date, summarize, daily_mean_temp = mean(daily_mean_BT, na.rm = TRUE))
# Calculate degree heating days by caclulating the anomaly on days above the maximum monthly mean for Heron
# of 27.3 °C
aggdata1$DHD <- ifelse(aggdata1$daily_mean_temp > 27.3, (aggdata1$daily_mean_temp - 27.3), 0)
str(aggdata1)
aggdata2$DHD <- ifelse(aggdata2$daily_mean_temp > 27.3, (aggdata2$daily_mean_temp - 27.3), 0)
str(aggdata2)
# Create DHWs by dividing DHDs by 7
aggdata1$DHW <- aggdata1$DHD/7
str(aggdata1)
aggdata2$DHW <- aggdata2$DHD/7
str(aggdata2)
#Creating cumulative variables to track heat load
aggdata1$totalDHD <- cumsum(aggdata1$DHD)
aggdata1$totalDHW <- cumsum(aggdata1$DHW)
aggdata2$totalDHD <- cumsum(aggdata2$DHD)
aggdata2$totalDHW <- cumsum(aggdata2$DHW)
# Add a variable that is Day
aggdata1 <- mutate(aggdata1, Day = c(1:20))
aggdata2 <- mutate(aggdata2, Day = c(1:20))
# Create a master plot of DHW data
ggplot() +
#geom_line(data = aggdata1, aes(x =Day, y = totalDHW, colour = "Ambient")) +
geom_line(data = aggdata2, aes(x =Day, y = totalDHW, colour = "Bleaching")) +
#geom_point(data = aggdata1, aes(x =Day, y = totalDHW, colour = "Ambient")) +
geom_point(data = aggdata2, aes(x =Day, y = totalDHW, colour = "Bleaching")) +
scale_x_continuous(breaks = seq(0,26,1), name = "Day")+
scale_y_continuous(breaks = seq(0,5,0.5), name = "Total Degree Heating Weeks")+
scale_colour_manual(name = "Treatment", values = c ("dodgerblue4", "indianred4"), labels = c("Ambient", "Bleaching")) +
theme_classic() +
theme(axis.text.x = element_text(size = "10"),
axis.text.y = element_text(size = "10"),
axis.title.x = element_text(size = "10"),
axis.title.y = element_text(size = "10"))
str(aggdata2)
ggplot() +
#geom_line(data = aggdata1, aes(x =Day, y = totalDHW, colour = "Ambient")) +
geom_line(data = aggdata2, aes(x =Day, y = totalDHW), size = 0.2) +
#geom_point(data = aggdata1, aes(x =Day, y = totalDHW, colour = "Ambient")) +
geom_point(data = aggdata2, aes(x =Day, y = totalDHW)) +
scale_x_continuous(limits = c(0,20),breaks = seq(0,20,1), name = "Day")+
scale_y_continuous(limits = c(0,5), breaks = seq(0,5,0.5), name = "eDHW", position = "right")+
#scale_colour_manual(name = "Treatment", values = c ("dodgerblue4", "indianred4"), labels = c("Ambient", "Bleaching")) +
theme_classic() +
theme(axis.text.x = element_text(size = "15"),
axis.text.y = element_text(size = "15"),
axis.title.x = element_text(size = "15"),
axis.title.y = element_text(size = "15"))
aggdata2
|
setwd("C:/Users//Ruicheng//Desktop//dmc-2015/")
build <- read.csv("SEM_DAILY_BUILD.csv", header = T)
test <- read.csv("SEM_DAILY_VALIDATION.csv", header = T)
CLICKS <- build$CLICKS
ENGN_ID.f. <- as.factor(build$ENGN_ID) # Binary
LANG_ID.f. <- as.factor(build$LANG_ID) # Binary
MTCH_TYPE_ID.f. <- as.factor(build$MTCH_TYPE_ID) # Binary
DVIC_ID.f. <- as.factor(build$DVIC_ID) # 3 levels
HEADLINE.f. <- as.factor(build$HEADLINE) # 27 levels
DESCRIPTION_1.f. <- as.factor(build$DESCRIPTION_1) # 69 levels
DESCRIPTION_2.f. <- as.factor(build$DESCRIPTION_2) # 33 levels
LANDING_PAGE.f. <- as.factor(build$LANDING_PAGE) # 27 levels
# Creating indicator variables for each keywords
build.click.only <- na.omit(subset(build, select=c(CLICKS)))
KEYWD_TXT <- build$KEYWD_TXT
KEYWD_TXT <- as.character(KEYWD_TXT)
keyword <- unlist(strsplit(KEYWD_TXT, "[+]"))
keyword <- keyword[keyword !=""]
keyword <- unique(keyword)
keyword <- sort(keyword)
KEYWD_TXT <- paste(KEYWD_TXT, "+", sep= "")
keyword <-paste(keyword, "+", sep="")
keyword.indicator <- NULL
j = 1
while (j <= length(KEYWD_TXT)) {
count = c()
i = 1
while (i <= length(keyword)) {
if (grepl(keyword[i], KEYWD_TXT[j])) {
count <- append (count, 1)
} else {
count <- append (count, 0)}
i <- i+1
}
keyword.indicator <- rbind(keyword.indicator, count)
j = j+1
}
colnames(keyword.indicator) <- keyword
keyword.indicator <- as.data.frame.matrix(keyword.indicator) # convert Matrix into Data Table
# end of creating keyword indicators
|
/DMC.R
|
no_license
|
r54liu/InfiniticsDMC2015
|
R
| false | false | 1,597 |
r
|
setwd("C:/Users//Ruicheng//Desktop//dmc-2015/")
build <- read.csv("SEM_DAILY_BUILD.csv", header = T)
test <- read.csv("SEM_DAILY_VALIDATION.csv", header = T)
CLICKS <- build$CLICKS
ENGN_ID.f. <- as.factor(build$ENGN_ID) # Binary
LANG_ID.f. <- as.factor(build$LANG_ID) # Binary
MTCH_TYPE_ID.f. <- as.factor(build$MTCH_TYPE_ID) # Binary
DVIC_ID.f. <- as.factor(build$DVIC_ID) # 3 levels
HEADLINE.f. <- as.factor(build$HEADLINE) # 27 levels
DESCRIPTION_1.f. <- as.factor(build$DESCRIPTION_1) # 69 levels
DESCRIPTION_2.f. <- as.factor(build$DESCRIPTION_2) # 33 levels
LANDING_PAGE.f. <- as.factor(build$LANDING_PAGE) # 27 levels
# Creating indicator variables for each keywords
build.click.only <- na.omit(subset(build, select=c(CLICKS)))
KEYWD_TXT <- build$KEYWD_TXT
KEYWD_TXT <- as.character(KEYWD_TXT)
keyword <- unlist(strsplit(KEYWD_TXT, "[+]"))
keyword <- keyword[keyword !=""]
keyword <- unique(keyword)
keyword <- sort(keyword)
KEYWD_TXT <- paste(KEYWD_TXT, "+", sep= "")
keyword <-paste(keyword, "+", sep="")
keyword.indicator <- NULL
j = 1
while (j <= length(KEYWD_TXT)) {
count = c()
i = 1
while (i <= length(keyword)) {
if (grepl(keyword[i], KEYWD_TXT[j])) {
count <- append (count, 1)
} else {
count <- append (count, 0)}
i <- i+1
}
keyword.indicator <- rbind(keyword.indicator, count)
j = j+1
}
colnames(keyword.indicator) <- keyword
keyword.indicator <- as.data.frame.matrix(keyword.indicator) # convert Matrix into Data Table
# end of creating keyword indicators
|
## convenience function for interfacing
## HCL colors as implemented in colorspace
hcl2hex <- function(h = 0, c = 35, l = 85, fixup = TRUE)
{
colorspace::hex(polarLUV(l, c, h), fixup = fixup)
}
## shading-generating functions should take at least the arguments
## observed, residuals, expected, df
## and return a function which takes a single argument (interpreted
## to be a vector of residuals).
shading_hsv <- function(observed, residuals = NULL, expected = NULL, df = NULL,
h = c(2/3, 0), s = c(1, 0), v = c(1, 0.5),
interpolate = c(2, 4), lty = 1, eps = NULL, line_col = "black",
p.value = NULL, level = 0.95, ...)
{
## get h/s/v and lty
my.h <- rep(h, length.out = 2) ## positive and negative hue
my.s <- rep(s, length.out = 2) ## maximum and minimum saturation
my.v <- rep(v, length.out = 2) ## significant and non-significant value
lty <- rep(lty, length.out = 2) ## positive and negative lty
## model fitting (if necessary)
if(is.null(expected) && !is.null(residuals)) stop("residuals without expected values specified")
if(!is.null(expected) && is.null(df) && is.null(p.value)) {
warning("no default inference available without degrees of freedom")
p.value <- NA
}
if(is.null(expected) && !is.null(observed)) {
expected <- loglin(observed, 1:length(dim(observed)), fit = TRUE, print = FALSE)
df <- expected$df
expected <- expected$fit
}
if(is.null(residuals) && !is.null(observed)) residuals <- (observed - expected)/sqrt(expected)
## conduct significance test (if specified)
if(is.null(p.value)) p.value <- function(observed, residuals, expected, df)
pchisq(sum(as.vector(residuals)^2), df, lower.tail = FALSE)
if(!is.function(p.value) && is.na(p.value)) {
v <- my.v[1]
p.value <- NULL
} else {
if(is.function(p.value)) p.value <- p.value(observed, residuals, expected, df)
v <- if(p.value < (1-level)) my.v[1] else my.v[2]
}
## set up function for interpolation of saturation
if(!is.function(interpolate)) {
col.bins <- sort(interpolate)
interpolate <- stepfun(col.bins, seq(my.s[2], my.s[1], length = length(col.bins) + 1))
col.bins <- sort(unique(c(col.bins, 0, -col.bins)))
} else {
col.bins <- NULL
}
## store color and lty information for legend
legend <- NULL
if(!is.null(col.bins)) {
res2 <- col.bins
res2 <- c(head(res2, 1) - 1, res2[-1] - diff(res2)/2, tail(res2, 1) + 1)
legend.col <- hsv(ifelse(res2 > 0, my.h[1], my.h[2]),
pmax(pmin(interpolate(abs(res2)), 1), 0),
v, ...)
lty.bins <- 0
legend.lty <- lty[2:1]
legend <- list(col = legend.col, col.bins = col.bins,
lty = legend.lty, lty.bins = lty.bins)
}
## set up function that computes color/lty from residuals
rval <- function(x) {
res <- as.vector(x)
fill <- hsv(ifelse(res > 0, my.h[1], my.h[2]),
pmax(pmin(interpolate(abs(res)), 1), 0),
v, ...)
dim(fill) <- dim(x)
col <- rep(line_col, length.out = length(res))
if(!is.null(eps)) {
eps <- abs(eps)
col[res > eps] <- hsv(my.h[1], 1, v, ...)
col[res < -eps] <- hsv(my.h[2], 1, v, ...)
}
dim(col) <- dim(x)
# line type should be solid if abs(resid) < eps
ltytmp <- ifelse(x > 0, lty[1], lty[2])
if(!is.null(eps))
ltytmp[abs(x) < abs(eps)] <- lty[1]
dim(ltytmp) <- dim(x)
return(structure(list(col = col, fill = fill, lty = ltytmp), class = "gpar"))
}
attr(rval, "legend") <- legend
attr(rval, "p.value") <- p.value
return(rval)
}
class(shading_hsv) <- "grapcon_generator"
shading_hcl <- function(observed, residuals = NULL, expected = NULL, df = NULL,
h = NULL, c = NULL, l = NULL,
interpolate = c(2, 4), lty = 1, eps = NULL, line_col = "black",
p.value = NULL, level = 0.95, ...)
{
## set defaults
if(is.null(h)) h <- c(260, 0)
if(is.null(c)) c <- c(100, 20)
if(is.null(l)) l <- c(90, 50)
## get h/c/l and lty
my.h <- rep(h, length.out = 2) ## positive and negative hue
my.c <- rep(c, length.out = 2) ## significant and non-significant maximum chroma
my.l <- rep(l, length.out = 2) ## maximum and minimum luminance
lty <- rep(lty, length.out = 2) ## positive and negative lty
## model fitting (if necessary)
if(is.null(expected) && !is.null(residuals)) stop("residuals without expected values specified")
if(!is.null(expected) && is.null(df) && is.null(p.value)) {
warning("no default inference available without degrees of freedom")
p.value <- NA
}
if(is.null(expected) && !is.null(observed)) {
expected <- loglin(observed, 1:length(dim(observed)), fit = TRUE, print = FALSE)
df <- expected$df
expected <- expected$fit
}
if(is.null(residuals) && !is.null(observed)) residuals <- (observed - expected)/sqrt(expected)
## conduct significance test (if specified)
if(is.null(p.value)) p.value <- function(observed, residuals, expected, df)
pchisq(sum(as.vector(residuals)^2), df, lower.tail = FALSE)
if(!is.function(p.value) && is.na(p.value)) {
max.c <- my.c[1]
p.value <- NULL
} else {
if(is.function(p.value)) p.value <- p.value(observed, residuals, expected, df)
max.c <- ifelse(p.value < (1-level), my.c[1], my.c[2])
}
## set up function for interpolation of saturation
if(!is.function(interpolate)) {
col.bins <- sort(interpolate)
interpolate <- stepfun(col.bins, seq(0, 1, length = length(col.bins) + 1))
col.bins <- sort(unique(c(col.bins, 0, -col.bins)))
} else {
col.bins <- NULL
}
## store color and lty information for legend
legend <- NULL
if(!is.null(col.bins)) {
res2 <- col.bins
res2 <- c(head(res2, 1) - 1, res2[-1] - diff(res2)/2, tail(res2, 1) + 1)
legend.col <- hcl2hex(ifelse(res2 > 0, my.h[1], my.h[2]),
max.c * pmax(pmin(interpolate(abs(res2)), 1), 0),
my.l[1] + diff(my.l) * pmax(pmin(interpolate(abs(res2)), 1), 0),
...)
lty.bins <- 0
legend.lty <- lty[2:1]
legend <- list(col = legend.col, col.bins = col.bins,
lty = legend.lty, lty.bins = lty.bins)
}
## set up function that computes color/lty from residuals
rval <- function(x) {
res <- as.vector(x)
fill <- hcl2hex(ifelse(res > 0, my.h[1], my.h[2]),
max.c * pmax(pmin(interpolate(abs(res)), 1), 0),
my.l[1] + diff(my.l) * pmax(pmin(interpolate(abs(res)), 1), 0),
...)
dim(fill) <- dim(x)
col <- rep(line_col, length.out = length(res))
if(!is.null(eps)) {
eps <- abs(eps)
col[res > eps] <- hcl2hex(my.h[1], max.c, my.l[2], ...)
col[res < -eps] <- hcl2hex(my.h[2], max.c, my.l[2], ...)
}
dim(col) <- dim(x)
ltytmp <- ifelse(x > 0, lty[1], lty[2])
if(!is.null(eps))
ltytmp[abs(x) < abs(eps)] <- lty[1]
dim(ltytmp) <- dim(x)
return(structure(list(col = col, fill = fill, lty = ltytmp), class = "gpar"))
}
attr(rval, "legend") <- legend
attr(rval, "p.value") <- p.value
return(rval)
}
class(shading_hcl) <- "grapcon_generator"
shading_Friendly <- function(observed = NULL, residuals = NULL, expected = NULL, df = NULL,
h = c(2/3, 0), lty = 1:2, interpolate = c(2, 4), eps = 0.01, line_col = "black", ...)
{
shading_hsv(observed = NULL, residuals = NULL, expected = NULL, df = NULL,
h = h, v = 1, lty = lty, interpolate = interpolate,
eps = eps, line_col = line_col, p.value = NA, ...)
}
class(shading_Friendly) <- "grapcon_generator"
shading_Friendly2 <- function(observed = NULL, residuals = NULL, expected = NULL, df = NULL, lty = 1:2, interpolate = c(2, 4), eps = 0.01, line_col = "black", ...)
{
shading_hcl(observed = NULL, residuals = NULL, expected = NULL, df = NULL,
lty = lty, interpolate = interpolate,
eps = eps, line_col = line_col, p.value = NA, ...)
}
class(shading_Friendly2) <- "grapcon_generator"
shading_sieve <-
function(observed = NULL, residuals = NULL, expected = NULL, df = NULL,
h = c(260, 0), lty = 1:2, interpolate = c(2, 4), eps = 0.01,
line_col = "black", ...)
{
shading_hcl(observed = NULL, residuals = NULL, expected = NULL,
df = NULL, h = h, c = 100, l = 50, lty = lty,
interpolate = interpolate,
eps = eps, line_col = line_col, p.value = NA, ...)
}
class(shading_sieve) <- "grapcon_generator"
shading_max <- function(observed = NULL, residuals = NULL, expected = NULL, df = NULL,
h = NULL, c = NULL, l = NULL, lty = 1, eps = NULL, line_col = "black", level = c(0.9, 0.99), n = 1000, ...)
{
stopifnot(length(dim(observed)) == 2)
## set defaults
if(is.null(h)) h <- c(260, 0)
if(is.null(c)) c <- c(100, 20)
if(is.null(l)) l <- c(90, 50)
obs.test <- coindep_test(observed, n = n)
col.bins <- obs.test$qdist(sort(level))
rval <- shading_hcl(observed = NULL, residuals = NULL, expected = NULL, df = NULL,
h = h, c = c, l = l, interpolate = col.bins, lty = lty,
eps = eps, line_col = line_col, p.value = obs.test$p.value, ...)
return(rval)
}
class(shading_max) <- "grapcon_generator"
shading_binary <- function(observed = NULL, residuals = NULL, expected = NULL, df = NULL,
col = NULL)
{
## check col argument
if(is.null(col)) col <- hcl2hex(c(260, 0), 50, 70)
col <- rep(col, length.out = 2)
## store color information for legend
legend <- list(col = col[2:1], col.bins = 0, lty = NULL, lty.bins = NULL)
## set up function that computes color/lty from residuals
rval <- function(x)
gpar(fill = ifelse(x > 0, col[1], col[2]))
## add meta information for legend
attr(rval, "legend") <- legend
attr(rval, "p.value") <- NULL
rval
}
class(shading_binary) <- "grapcon_generator"
shading_Marimekko <-
function(x, fill = NULL, byrow = FALSE)
{
if (is.null(fill)) fill <- colorspace::rainbow_hcl
d <- dim(x)
l1 <- if (length(d) > 1L) d[2] else d
l2 <- if (length(d) > 1L) d[1] else 1
if (is.function(fill)) fill <- fill(l1)
fill <- if (byrow) rep(fill, l2) else rep(fill, each = l2)
gpar(col = NA, lty = "solid",
fill = array(fill, dim = d))
}
shading_diagonal <-
function(x, fill = NULL)
{
if (is.null(fill)) fill <- colorspace::rainbow_hcl
d <- dim(x)
if (length(d) < 1L)
stop("Need matrix or array!")
if (d[1] != d[2])
stop("First two dimensions need to be of same length!")
if (is.function(fill)) fill <- fill(d[1])
tp = toeplitz(seq_len(d[1]))
gpar(col = NA, lty = "solid",
fill = array(rep(fill[tp], d[1]), dim = d))
}
|
/R/shadings.R
|
no_license
|
cran/vcd
|
R
| false | false | 10,584 |
r
|
## convenience function for interfacing
## HCL colors as implemented in colorspace
hcl2hex <- function(h = 0, c = 35, l = 85, fixup = TRUE)
{
colorspace::hex(polarLUV(l, c, h), fixup = fixup)
}
## shading-generating functions should take at least the arguments
## observed, residuals, expected, df
## and return a function which takes a single argument (interpreted
## to be a vector of residuals).
shading_hsv <- function(observed, residuals = NULL, expected = NULL, df = NULL,
h = c(2/3, 0), s = c(1, 0), v = c(1, 0.5),
interpolate = c(2, 4), lty = 1, eps = NULL, line_col = "black",
p.value = NULL, level = 0.95, ...)
{
## get h/s/v and lty
my.h <- rep(h, length.out = 2) ## positive and negative hue
my.s <- rep(s, length.out = 2) ## maximum and minimum saturation
my.v <- rep(v, length.out = 2) ## significant and non-significant value
lty <- rep(lty, length.out = 2) ## positive and negative lty
## model fitting (if necessary)
if(is.null(expected) && !is.null(residuals)) stop("residuals without expected values specified")
if(!is.null(expected) && is.null(df) && is.null(p.value)) {
warning("no default inference available without degrees of freedom")
p.value <- NA
}
if(is.null(expected) && !is.null(observed)) {
expected <- loglin(observed, 1:length(dim(observed)), fit = TRUE, print = FALSE)
df <- expected$df
expected <- expected$fit
}
if(is.null(residuals) && !is.null(observed)) residuals <- (observed - expected)/sqrt(expected)
## conduct significance test (if specified)
if(is.null(p.value)) p.value <- function(observed, residuals, expected, df)
pchisq(sum(as.vector(residuals)^2), df, lower.tail = FALSE)
if(!is.function(p.value) && is.na(p.value)) {
v <- my.v[1]
p.value <- NULL
} else {
if(is.function(p.value)) p.value <- p.value(observed, residuals, expected, df)
v <- if(p.value < (1-level)) my.v[1] else my.v[2]
}
## set up function for interpolation of saturation
if(!is.function(interpolate)) {
col.bins <- sort(interpolate)
interpolate <- stepfun(col.bins, seq(my.s[2], my.s[1], length = length(col.bins) + 1))
col.bins <- sort(unique(c(col.bins, 0, -col.bins)))
} else {
col.bins <- NULL
}
## store color and lty information for legend
legend <- NULL
if(!is.null(col.bins)) {
res2 <- col.bins
res2 <- c(head(res2, 1) - 1, res2[-1] - diff(res2)/2, tail(res2, 1) + 1)
legend.col <- hsv(ifelse(res2 > 0, my.h[1], my.h[2]),
pmax(pmin(interpolate(abs(res2)), 1), 0),
v, ...)
lty.bins <- 0
legend.lty <- lty[2:1]
legend <- list(col = legend.col, col.bins = col.bins,
lty = legend.lty, lty.bins = lty.bins)
}
## set up function that computes color/lty from residuals
rval <- function(x) {
res <- as.vector(x)
fill <- hsv(ifelse(res > 0, my.h[1], my.h[2]),
pmax(pmin(interpolate(abs(res)), 1), 0),
v, ...)
dim(fill) <- dim(x)
col <- rep(line_col, length.out = length(res))
if(!is.null(eps)) {
eps <- abs(eps)
col[res > eps] <- hsv(my.h[1], 1, v, ...)
col[res < -eps] <- hsv(my.h[2], 1, v, ...)
}
dim(col) <- dim(x)
# line type should be solid if abs(resid) < eps
ltytmp <- ifelse(x > 0, lty[1], lty[2])
if(!is.null(eps))
ltytmp[abs(x) < abs(eps)] <- lty[1]
dim(ltytmp) <- dim(x)
return(structure(list(col = col, fill = fill, lty = ltytmp), class = "gpar"))
}
attr(rval, "legend") <- legend
attr(rval, "p.value") <- p.value
return(rval)
}
class(shading_hsv) <- "grapcon_generator"
shading_hcl <- function(observed, residuals = NULL, expected = NULL, df = NULL,
h = NULL, c = NULL, l = NULL,
interpolate = c(2, 4), lty = 1, eps = NULL, line_col = "black",
p.value = NULL, level = 0.95, ...)
{
## set defaults
if(is.null(h)) h <- c(260, 0)
if(is.null(c)) c <- c(100, 20)
if(is.null(l)) l <- c(90, 50)
## get h/c/l and lty
my.h <- rep(h, length.out = 2) ## positive and negative hue
my.c <- rep(c, length.out = 2) ## significant and non-significant maximum chroma
my.l <- rep(l, length.out = 2) ## maximum and minimum luminance
lty <- rep(lty, length.out = 2) ## positive and negative lty
## model fitting (if necessary)
if(is.null(expected) && !is.null(residuals)) stop("residuals without expected values specified")
if(!is.null(expected) && is.null(df) && is.null(p.value)) {
warning("no default inference available without degrees of freedom")
p.value <- NA
}
if(is.null(expected) && !is.null(observed)) {
expected <- loglin(observed, 1:length(dim(observed)), fit = TRUE, print = FALSE)
df <- expected$df
expected <- expected$fit
}
if(is.null(residuals) && !is.null(observed)) residuals <- (observed - expected)/sqrt(expected)
## conduct significance test (if specified)
if(is.null(p.value)) p.value <- function(observed, residuals, expected, df)
pchisq(sum(as.vector(residuals)^2), df, lower.tail = FALSE)
if(!is.function(p.value) && is.na(p.value)) {
max.c <- my.c[1]
p.value <- NULL
} else {
if(is.function(p.value)) p.value <- p.value(observed, residuals, expected, df)
max.c <- ifelse(p.value < (1-level), my.c[1], my.c[2])
}
## set up function for interpolation of saturation
if(!is.function(interpolate)) {
col.bins <- sort(interpolate)
interpolate <- stepfun(col.bins, seq(0, 1, length = length(col.bins) + 1))
col.bins <- sort(unique(c(col.bins, 0, -col.bins)))
} else {
col.bins <- NULL
}
## store color and lty information for legend
legend <- NULL
if(!is.null(col.bins)) {
res2 <- col.bins
res2 <- c(head(res2, 1) - 1, res2[-1] - diff(res2)/2, tail(res2, 1) + 1)
legend.col <- hcl2hex(ifelse(res2 > 0, my.h[1], my.h[2]),
max.c * pmax(pmin(interpolate(abs(res2)), 1), 0),
my.l[1] + diff(my.l) * pmax(pmin(interpolate(abs(res2)), 1), 0),
...)
lty.bins <- 0
legend.lty <- lty[2:1]
legend <- list(col = legend.col, col.bins = col.bins,
lty = legend.lty, lty.bins = lty.bins)
}
## set up function that computes color/lty from residuals
rval <- function(x) {
res <- as.vector(x)
fill <- hcl2hex(ifelse(res > 0, my.h[1], my.h[2]),
max.c * pmax(pmin(interpolate(abs(res)), 1), 0),
my.l[1] + diff(my.l) * pmax(pmin(interpolate(abs(res)), 1), 0),
...)
dim(fill) <- dim(x)
col <- rep(line_col, length.out = length(res))
if(!is.null(eps)) {
eps <- abs(eps)
col[res > eps] <- hcl2hex(my.h[1], max.c, my.l[2], ...)
col[res < -eps] <- hcl2hex(my.h[2], max.c, my.l[2], ...)
}
dim(col) <- dim(x)
ltytmp <- ifelse(x > 0, lty[1], lty[2])
if(!is.null(eps))
ltytmp[abs(x) < abs(eps)] <- lty[1]
dim(ltytmp) <- dim(x)
return(structure(list(col = col, fill = fill, lty = ltytmp), class = "gpar"))
}
attr(rval, "legend") <- legend
attr(rval, "p.value") <- p.value
return(rval)
}
class(shading_hcl) <- "grapcon_generator"
shading_Friendly <- function(observed = NULL, residuals = NULL, expected = NULL, df = NULL,
h = c(2/3, 0), lty = 1:2, interpolate = c(2, 4), eps = 0.01, line_col = "black", ...)
{
shading_hsv(observed = NULL, residuals = NULL, expected = NULL, df = NULL,
h = h, v = 1, lty = lty, interpolate = interpolate,
eps = eps, line_col = line_col, p.value = NA, ...)
}
class(shading_Friendly) <- "grapcon_generator"
shading_Friendly2 <- function(observed = NULL, residuals = NULL, expected = NULL, df = NULL, lty = 1:2, interpolate = c(2, 4), eps = 0.01, line_col = "black", ...)
{
shading_hcl(observed = NULL, residuals = NULL, expected = NULL, df = NULL,
lty = lty, interpolate = interpolate,
eps = eps, line_col = line_col, p.value = NA, ...)
}
class(shading_Friendly2) <- "grapcon_generator"
shading_sieve <-
function(observed = NULL, residuals = NULL, expected = NULL, df = NULL,
h = c(260, 0), lty = 1:2, interpolate = c(2, 4), eps = 0.01,
line_col = "black", ...)
{
shading_hcl(observed = NULL, residuals = NULL, expected = NULL,
df = NULL, h = h, c = 100, l = 50, lty = lty,
interpolate = interpolate,
eps = eps, line_col = line_col, p.value = NA, ...)
}
class(shading_sieve) <- "grapcon_generator"
shading_max <- function(observed = NULL, residuals = NULL, expected = NULL, df = NULL,
h = NULL, c = NULL, l = NULL, lty = 1, eps = NULL, line_col = "black", level = c(0.9, 0.99), n = 1000, ...)
{
stopifnot(length(dim(observed)) == 2)
## set defaults
if(is.null(h)) h <- c(260, 0)
if(is.null(c)) c <- c(100, 20)
if(is.null(l)) l <- c(90, 50)
obs.test <- coindep_test(observed, n = n)
col.bins <- obs.test$qdist(sort(level))
rval <- shading_hcl(observed = NULL, residuals = NULL, expected = NULL, df = NULL,
h = h, c = c, l = l, interpolate = col.bins, lty = lty,
eps = eps, line_col = line_col, p.value = obs.test$p.value, ...)
return(rval)
}
class(shading_max) <- "grapcon_generator"
shading_binary <- function(observed = NULL, residuals = NULL, expected = NULL, df = NULL,
col = NULL)
{
## check col argument
if(is.null(col)) col <- hcl2hex(c(260, 0), 50, 70)
col <- rep(col, length.out = 2)
## store color information for legend
legend <- list(col = col[2:1], col.bins = 0, lty = NULL, lty.bins = NULL)
## set up function that computes color/lty from residuals
rval <- function(x)
gpar(fill = ifelse(x > 0, col[1], col[2]))
## add meta information for legend
attr(rval, "legend") <- legend
attr(rval, "p.value") <- NULL
rval
}
class(shading_binary) <- "grapcon_generator"
shading_Marimekko <-
function(x, fill = NULL, byrow = FALSE)
{
if (is.null(fill)) fill <- colorspace::rainbow_hcl
d <- dim(x)
l1 <- if (length(d) > 1L) d[2] else d
l2 <- if (length(d) > 1L) d[1] else 1
if (is.function(fill)) fill <- fill(l1)
fill <- if (byrow) rep(fill, l2) else rep(fill, each = l2)
gpar(col = NA, lty = "solid",
fill = array(fill, dim = d))
}
shading_diagonal <-
function(x, fill = NULL)
{
if (is.null(fill)) fill <- colorspace::rainbow_hcl
d <- dim(x)
if (length(d) < 1L)
stop("Need matrix or array!")
if (d[1] != d[2])
stop("First two dimensions need to be of same length!")
if (is.function(fill)) fill <- fill(d[1])
tp = toeplitz(seq_len(d[1]))
gpar(col = NA, lty = "solid",
fill = array(rep(fill[tp], d[1]), dim = d))
}
|
#' Produce fibre configuration files for a givne set of DOCats
#'
#' @description This is the highlevel main TAZ function for running running the Tiler software
#' to gnerate fibre configuration files. Users must provide DOcats for targets, sky, standards and guides.
#'
#' @param configdir Directory path location of Configure software
#' @param workingDir The directory you want to do the tiling in
#' @param DOcat A target catalogue
#' @param DATAguide A guide star catalogue
#' @param DATAstspec A standard star catalogue
#' @param DATAsky A sky potions catalogue
#' @param N_D02A Number of configurations to generate in D02A
#' @param N_D02B Number of configurations to generate in D02B
#' @param N_D03 Number of configurations to generate in D03
#' @param N_D10 Number of configurations to generate in D10
#' @param D02A_startPlate Start plate number of D02A configurations (0 or 1)
#' @param D02A_startPlate Start plate number of D02B configurations (0 or 1)
#' @param D03_startPlate Start plate number of D03 configurations (0 or 1)
#' @param D10_startPlate Start plate number of D10 configurations (0 or 1)
#' @param logName log filename to write progress to
#' @param verbose tell me whats going on: 0=nothing, 1=somethings, 2=everything
#' @param cores number of cores to use (max four in this case) - currently redundant
#' @param makeNormal Make general configurations
#' @param makeBackUp Also make configuration files for bright sources.
#' @param BrightCut Magnitude to cut at for bright sources. Only takes affect it makeBackUp==TRUE.
#' @param FaintCut Magnitude to cut at for faint sources.
#' @return List of paths to new configuration files
#' @examples
#' runTiler(configdir='/Applications/configure-8.4-MacOsX_ElCapitan_x86_64',workingDir='.', DOcat='data/observing/run1_2017_12/2017_12_19/DOCats/DObjCat_2017_12_19.tab, $
#' DATAguide=data/observing/run1_2017_12/2017_12_19/DOCats/DGuideCat_2017_12_19.tab,DATAstspec=data/observing/run1_2017_12/2017_12_19/DOCats/DStdCat_2017_12_19.tab, $
#' DATAsky=data/observing/run1_2017_12/2017_12_19/DOCats/DGSkyCat_2017_12_19.tab, N_D02A=1, N_D02B=1, N_D03=2, N_D10=3, D02A_startPlate=0, D0BA_startPlate=1, D03_startPlate=0, $
#' D10_startPlate=1, logName='tempLog.txt', verbose=1, cores=4)
#' # will make one configuration in D02A and D02B, two in D03, and 3 in D10.
#' @export
runTiler<-function(configdir=configdir, workingDir=workingDir, DOcat=DOcat, DATAguide=DATAguide, DATAstspec=DATAstspec, DATAsky=DATAsky, N_D02A=N_D02A, N_D02B=N_D02B, N_D03=N_D03, N_D10=N_D10, D02A_startPlate=0, D02B_startPlate=0, D03_startPlate=0, D10_startPlate=0, logName=logName, verbose=verbose, cores=cores, makeNormal=TRUE, makeBackUp=FALSE,BrightCut=20, FaintCut=30){
#registerDoParallel(cores=cores)
if (verbose>1){cat(' - Setting up plate sequence....', '\n')}
write(' - Setting up plate sequence....', file=logName, append=T)
plate_D02A<-c()
if (N_D02A==1) {plate_D02A<-D02A_startPlate}
if (N_D02A>1) {
int<-D02A_startPlate
for (i in 1:N_D02A){
plate_D02A<-c(plate_D02A,int)
if (int==0){int<-1}else{int<-0}
}
}
plate_D02B<-c()
if (N_D02B==1) {plate_D02B<-D02B_startPlate}
if (N_D02B>1) {
int<-D02B_startPlate
for (i in 1:N_D02B){
plate_D02B<-c(plate_D02B,int)
if (int==0){int<-1}else{int<-0}
}
}
plate_D03<-c()
if (N_D03==1) {plate_D03<-D03_startPlate}
if (N_D03>1) {
int<-D03_startPlate
for (i in 1:N_D03){
plate_D03<-c(plate_D03,int)
if (int==0){int<-1}else{int<-0}
}
}
plate_D10<-c()
if (N_D10==1) {plate_D10<-D10_startPlate}
if (N_D10>1) {
int<-D10_startPlate
for (i in 1:N_D10){
plate_D10<-c(plate_D10,int)
if (int==0){int<-1}else{int<-0}
}
}
if (verbose>1){cat(' - Running Tiling....', '\n')}
write(' - Running Tiling....', file=logName, append=T)
#oldWD<-getwd()
#setwd(workingDir)
tileplus_M<-c(N_D02A,N_D02B,N_D03,N_D10)
position_M<-c('D02A','D02B', 'D03', 'D10')
plate_M<-c(plate_D02A, plate_D02B, plate_D03, plate_D10)
DOcat=read.table(DOcat,header=T)
DOcat=DOcat[which(DOcat[,'MAG']<=FaintCut),]
DATAguide<<-read.table(DATAguide,header=T)
DATAstspec<<-read.table(DATAstspec,header=T)
DATAsky<<-read.table(DATAsky,header=T)
DOcatBright<-DOcat[which((DOcat[,'MAG']<BrightCut & DOcat[,'PRIORITY_CLASS']>1) | DOcat[,'PRIORITY_CLASS']==9 | DOcat[,'PRIORITY_CLASS']==10),]
#**** Mapping down to bump up OzDES fillers:
DOcat[which(DOcat[,'PRIORITY_CLASS']==4),'PRIORITY_CLASS']<-3
DOcat[which(DOcat[,'PRIORITY_CLASS']==5),'PRIORITY_CLASS']<-4
DOcat[which(DOcat[,'PRIORITY_CLASS']==6),'PRIORITY_CLASS']<-5
DOcat[which(DOcat[,'PRIORITY_CLASS']==7),'PRIORITY_CLASS']<-6
DOcat[which(DOcat[,'PRIORITY_CLASS']==8),'PRIORITY_CLASS']<-7
DOcat[which(DOcat[,'PRIORITY_CLASS']==9),'PRIORITY_CLASS']<-8
#**** Flip for bright:
tmp<-DOcatBright
tmp[which(DOcatBright[,'PRIORITY_CLASS']==4),'PRIORITY_CLASS']<-8
tmp[which(DOcatBright[,'PRIORITY_CLASS']==5),'PRIORITY_CLASS']<-7
tmp[which(DOcatBright[,'PRIORITY_CLASS']==6),'PRIORITY_CLASS']<-5
tmp[which(DOcatBright[,'PRIORITY_CLASS']==7),'PRIORITY_CLASS']<-5
tmp[which(DOcatBright[,'PRIORITY_CLASS']==8),'PRIORITY_CLASS']<-4
DOcatBright<-tmp
DOcatBright[which(DOcatBright[,'PRIORITY_CLASS']==9),'PRIORITY_CLASS']<-8
#configdirFiles=paste(configdir,'/data_files',sep='')
updateExtFibs(configdir=configdir)
system(paste('cp ', configdir,'/data_files/* ', configdir,'/', sep=''))
#a = foreach(i=1:length(tileplus_M)) %dopar% {
# Tiler(tileplus=tileplus_M[i], position=position_M[i], plate=plate_M[i], runfolder=TRUE, TileCat=DOcat, runoffset=1, restrict=rep('all',tileplus_M[i]), updatefib=!exists('Fibres'), basedir=workingDir, configdir=configdir, append_letter='D')
#}
#if (N_D02A>0){Tiler(tileplus=N_D02A, position='D02A', plate=plate_D02A, runfolder=TRUE, TileCat=DOcat, runoffset=1, restrict=rep('all',N_D02A), updatefib=!exists('Fibres'), basedir=workingDir, configdir=configdir, append_letter='D')}
#if (N_D02B>0){Tiler(tileplus=N_D02B, position='D02B', plate=plate_D02B, runfolder=TRUE, TileCat=DOcat, runoffset=1, restrict=rep('all',N_D02B), updatefib=!exists('Fibres'), basedir=workingDir, configdir=configdir, append_letter='D')}
#if (N_D03>0){Tiler(tileplus=N_D03, position='D03', plate=plate_D03, runfolder=TRUE, TileCat=DOcat, runoffset=1, restrict=rep('all',N_D03), updatefib=!exists('Fibres'), basedir=workingDir, configdir=configdir, append_letter='D')}
#if (N_D10>0){Tiler(tileplus=N_D10, position='D10', plate=plate_D10, runfolder=TRUE, TileCat=DOcat, runoffset=1, restrict=rep('all',N_D10), updatefib=!exists('Fibres'), basedir=workingDir, configdir=configdir, append_letter='D')}
ConfigNames<-c()
if (makeNormal==TRUE){
if (N_D02A>0){
for (i in 1:N_D02A){
Tiler(tileplus=1, position='D02A', plate=plate_D02A[i], runfolder=TRUE, TileCat=DOcat, runoffset=i, restrict=rep('all',1), updatefib=!exists('Fibres'), basedir=workingDir, configdir=configdir, append_letter='D')
configFile<-list.files(path=paste(workingDir,'/D02A/TargetFork',i,'-',i,'P',plate_D02A[i],sep=''), pattern='*.lis')
configFile<-paste(workingDir,'/D02A/TargetFork',i,'-',i,'P',plate_D02A[i],'/',configFile, sep='')
configFile<-configFile[1]
#cat(red('New Config File=',configFile),'\n')
#cat(red('Previous PRIORITY CLASS = 1:',length(which(DOcat[,'PRIORITY_CLASS']==1))),'\n')
Config<-readLines(configFile)
Config<-Config[10:409]
ID<-c()
TYPE<-c()
for (j in 1:length(Config)){
tmp<-strsplit(Config[j], ' ')[[1]]
tmp<-tmp[which(tmp!="")]
if (length(tmp)==16){
ID<-c(ID,tmp[3])
TYPE<-c(TYPE,tmp[16])
}
}
obsID<-as.numeric(substr(ID[which(substr(ID,1,2)=='D2')],2,nchar(ID[which(substr(ID,1,2)=='D2')])))
DOcat[which(DOcat[,'CATAID'] %in% obsID ==TRUE),'PRIORITY_CLASS']<-1
}
#cat(red('New PRIORITY CLASS=',length(which(DOcat[,'PRIORITY_CLASS']==1))),'\n')
#cat('\n')
}
if (N_D02B>0){
for (i in 1:N_D02B){
Tiler(tileplus=1, position='D02B', plate=plate_D02B[i], runfolder=TRUE, TileCat=DOcat, runoffset=i, restrict=rep('all',1), updatefib=!exists('Fibres'), basedir=workingDir, configdir=configdir, append_letter='D')
configFile<-list.files(path=paste(workingDir,'/D02B/TargetFork',i,'-',i,'P',plate_D02B[i],sep=''), pattern='*.lis')
configFile<-paste(workingDir,'/D02B/TargetFork',i,'-',i,'P',plate_D02B[i],'/',configFile, sep='')
configFile<-configFile[1]
#cat(red('New Config File=',configFile),'\n')
#cat(red('Previous PRIORITY CLASS = 1:',length(which(DOcat[,'PRIORITY_CLASS']==1))),'\n')
Config<-readLines(configFile)
Config<-Config[10:409]
ID<-c()
TYPE<-c()
for (j in 1:length(Config)){
tmp<-strsplit(Config[j], ' ')[[1]]
tmp<-tmp[which(tmp!="")]
if (length(tmp)==16){
ID<-c(ID,tmp[3])
TYPE<-c(TYPE,tmp[16])
}
}
obsID<-as.numeric(substr(ID[which(substr(ID,1,2)=='D2')],2,nchar(ID[which(substr(ID,1,2)=='D2')])))
DOcat[which(DOcat[,'CATAID'] %in% obsID ==TRUE),'PRIORITY_CLASS']<-1
}
#cat(red('New PRIORITY CLASS=',length(which(DOcat[,'PRIORITY_CLASS']==1))),'\n')
#cat('\n')
}
if (N_D03>0){
for (i in 1:N_D03){
Tiler(tileplus=1, position='D03', plate=plate_D03[i], runfolder=TRUE, TileCat=DOcat, runoffset=i, restrict=rep('all',1), updatefib=!exists('Fibres'), basedir=workingDir, configdir=configdir, append_letter='D')
configFile<-list.files(path=paste(workingDir,'/D03/TargetFork',i,'-',i,'P',plate_D03[i],sep=''), pattern='*.lis')
configFile<-paste(workingDir,'/D03/TargetFork',i,'-',i,'P',plate_D03[i],'/',configFile, sep='')
configFile<-configFile[1]
#cat(red('New Config File=',configFile),'\n')
#cat(red('Previous PRIORITY CLASS = 1:',length(which(DOcat[,'PRIORITY_CLASS']==1))),'\n')
Config<-readLines(configFile)
Config<-Config[10:409]
ID<-c()
TYPE<-c()
for (j in 1:length(Config)){
tmp<-strsplit(Config[j], ' ')[[1]]
tmp<-tmp[which(tmp!="")]
if (length(tmp)==16){
ID<-c(ID,tmp[3])
TYPE<-c(TYPE,tmp[16])
}
}
obsID<-as.numeric(substr(ID[which(substr(ID,1,2)=='D3')],2,nchar(ID[which(substr(ID,1,2)=='D3')])))
DOcat[which(DOcat[,'CATAID'] %in% obsID ==TRUE),'PRIORITY_CLASS']<-1
}
#cat(red('New PRIORITY CLASS=',length(which(DOcat[,'PRIORITY_CLASS']==1))),'\n')
#cat('\n')
}
if (N_D10>0){
for (i in 1:N_D10){
Tiler(tileplus=1, position='D10', plate=plate_D10[i], runfolder=TRUE, TileCat=DOcat, runoffset=i, restrict=rep('all',1), updatefib=!exists('Fibres'), basedir=workingDir, configdir=configdir, append_letter='D')
configFile<-list.files(path=paste(workingDir,'/D10/TargetFork',i,'-',i,'P',plate_D10[i],sep=''), pattern='*.lis')
configFile<-paste(workingDir,'/D10/TargetFork',i,'-',i,'P',plate_D10[i],'/',configFile, sep='')
configFile<-configFile[1]
#cat(red('New Config File=',configFile),'\n')
#cat(red('Previous PRIORITY CLASS = 1:',length(which(DOcat[,'PRIORITY_CLASS']==1))),'\n')
Config<-readLines(configFile)
Config<-Config[10:409]
ID<-c()
TYPE<-c()
for (j in 1:length(Config)){
tmp<-strsplit(Config[j], ' ')[[1]]
tmp<-tmp[which(tmp!="")]
if (length(tmp)==16){
ID<-c(ID,tmp[3])
TYPE<-c(TYPE,tmp[16])
}
}
obsID<-as.numeric(substr(ID[which(substr(ID,1,2)=='D1')],2,nchar(ID[which(substr(ID,1,2)=='D1')])))
DOcat[which(DOcat[,'CATAID'] %in% obsID ==TRUE),'PRIORITY_CLASS']<-1
}
#cat(red('New PRIORITY CLASS=',length(which(DOcat[,'PRIORITY_CLASS']==1))),'\n')
#cat('\n')
}
#setwd(oldWD)
pathD02A<-paste(workingDir,'/D02A/',list.files(path=paste(workingDir,'/D02A',sep=''), pattern='Targ*'), sep='')
pathD02B<-paste(workingDir,'/D02B/',list.files(path=paste(workingDir,'/D02B',sep=''), pattern='Targ*'), sep='')
pathD03<-paste(workingDir,'/D03/',list.files(path=paste(workingDir,'/D03',sep=''), pattern='Targ*'), sep='')
pathD10<-paste(workingDir,'/D10/',list.files(path=paste(workingDir,'/D10',sep=''), pattern='Targ*'), sep='')
count<-c(0,0,0,0)
while (sum(count)<4){
countD02A<-0
for (i in 1:length(pathD02A)){
countD02A<-countD02A+length(list.files(path=pathD02A[i], pattern='*.lis'))
}
countD02B<-0
for (i in 1:length(pathD02B)){
countD02B<-countD02B+length(list.files(path=pathD02B[i], pattern='*.lis'))
}
countD03<-0
for (i in 1:length(pathD03)){
countD03<-countD03+length(list.files(path=pathD03[i], pattern='*.lis'))
}
countD10<-0
for (i in 1:length(pathD10)){
countD10<-countD10+length(list.files(path=pathD10[i], pattern='*.lis'))
}
if (countD02A>=N_D02A) {count[1]<-1}
if (countD02B>=N_D02B) {count[2]<-1}
if (countD03>=N_D03) {count[3]<-1}
if (countD10>=N_D10) {count[4]<-1}
}
if (verbose>1){cat(' - Tiling Complete', '\n')}
write(' - Tiling Complete', file=logName, append=T)
dateF<-strsplit(workingDir, '/')[[1]][4]
dateF2<-paste(c(strsplit(workingDir, '/')[[1]][1:4], dateF), sep='', collapse='/')
if (N_D02A>0){
for (i in 1:length(pathD02A)){
system(paste('cp ',pathD02A[i],'/D02A* ', workingDir,'/TileFiles/',sep=''))
}
}
if (N_D02B>0){
for (i in 1:length(pathD02B)){
system(paste('cp ',pathD02B[i],'/D02B* ', workingDir,'/TileFiles/',sep=''))
}
}
if (N_D03>0){
for (i in 1:length(pathD03)){
system(paste('cp ',pathD03[i],'/D03* ', workingDir,'/TileFiles/',sep=''))
}
}
if (N_D10>0){
for (i in 1:length(pathD10)){
system(paste('cp ',pathD10[i],'/D10* ', workingDir,'/TileFiles/',sep=''))
}
}
listF<-list.files(path=paste(workingDir,'/TileFiles/',sep=''),pattern='*')
listM<-list.files(path=paste(workingDir,'/TileFiles/',sep=''),pattern='*.lis')
for (j in 1:length(listF)){
system(paste('mv ',workingDir,'/TileFiles/',listF[j] ,' ', workingDir,'/TileFiles/', substr(listF[j],1,3), '_', dateF,'_',substr(listF[j],5,nchar(listF[j])), sep=''))
}
for (j in 1:length(listM)){
ConfigNames<-c(ConfigNames,paste(workingDir,'/TileFiles/', substr(listM[j],1,3), '_', dateF,'_',substr(listM[j],5,nchar(listM[j])), sep=''))
}
}
if (makeBackUp==TRUE){
workingDirBright<-paste(workingDir,'Backup', sep='')
system(paste('mkdir ',workingDirBright, sep=''))
system(paste('mkdir ',workingDirBright,'/TileFiles', sep=''))
system(paste('cp ', workingDir,'/SurveyInfo.txt ', workingDirBright,'/',sep=''))
if (N_D02A>0){
for (i in 1:N_D02A){
Tiler(tileplus=1, position='D02A', plate=plate_D02A[i], runfolder=TRUE, TileCat=DOcatBright, runoffset=i, restrict=rep('all',1), updatefib=!exists('Fibres'), basedir=workingDirBright, configdir=configdir, append_letter='D')
configFile<-list.files(path=paste(workingDirBright,'/D02A/TargetFork',i,'-',i,'P',plate_D02A[i],sep=''), pattern='*.lis')
configFile<-paste(workingDirBright,'/D02A/TargetFork',i,'-',i,'P',plate_D02A[i],'/',configFile, sep='')
configFile<-configFile[1]
#cat(red('New Config File=',configFile),'\n')
#cat(red('Previous PRIORITY CLASS = 1:',length(which(DOcatBright[,'PRIORITY_CLASS']==1))),'\n')
Config<-readLines(configFile)
Config<-Config[10:409]
ID<-c()
TYPE<-c()
for (j in 1:length(Config)){
tmp<-strsplit(Config[j], ' ')[[1]]
tmp<-tmp[which(tmp!="")]
if (length(tmp)==16){
ID<-c(ID,tmp[3])
TYPE<-c(TYPE,tmp[16])
}
}
obsID<-as.numeric(substr(ID[which(substr(ID,1,2)=='D2')],2,nchar(ID[which(substr(ID,1,2)=='D2')])))
DOcatBright[which(DOcatBright[,'CATAID'] %in% obsID ==TRUE),'PRIORITY_CLASS']<-1
}
#cat(red('New PRIORITY CLASS=',length(which(DOcatBright[,'PRIORITY_CLASS']==1))),'\n')
#cat('\n')
}
if (N_D02B>0){
for (i in 1:N_D02B){
Tiler(tileplus=1, position='D02B', plate=plate_D02B[i], runfolder=TRUE, TileCat=DOcatBright, runoffset=i, restrict=rep('all',1), updatefib=!exists('Fibres'), basedir=workingDirBright, configdir=configdir, append_letter='D')
configFile<-list.files(path=paste(workingDirBright,'/D02B/TargetFork',i,'-',i,'P',plate_D02B[i],sep=''), pattern='*.lis')
configFile<-paste(workingDirBright,'/D02B/TargetFork',i,'-',i,'P',plate_D02B[i],'/',configFile, sep='')
configFile<-configFile[1]
#cat(red('New Config File=',configFile),'\n')
#cat(red('Previous PRIORITY CLASS = 1:',length(which(DOcat[,'PRIORITY_CLASS']==1))),'\n')
Config<-readLines(configFile)
Config<-Config[10:409]
ID<-c()
TYPE<-c()
for (j in 1:length(Config)){
tmp<-strsplit(Config[j], ' ')[[1]]
tmp<-tmp[which(tmp!="")]
if (length(tmp)==16){
ID<-c(ID,tmp[3])
TYPE<-c(TYPE,tmp[16])
}
}
obsID<-as.numeric(substr(ID[which(substr(ID,1,2)=='D2')],2,nchar(ID[which(substr(ID,1,2)=='D1')])))
DOcatBright[which(DOcatBright[,'CATAID'] %in% obsID ==TRUE),'PRIORITY_CLASS']<-1
}
#cat(red('New PRIORITY CLASS=',length(which(DOcat[,'PRIORITY_CLASS']==1))),'\n')
#cat('\n')
}
if (N_D03>0){
for (i in 1:N_D03){
Tiler(tileplus=1, position='D03', plate=plate_D03[i], runfolder=TRUE, TileCat=DOcatBright, runoffset=i, restrict=rep('all',1), updatefib=!exists('Fibres'), basedir=workingDirBright, configdir=configdir, append_letter='D')
configFile<-list.files(path=paste(workingDirBright,'/D03/TargetFork',i,'-',i,'P',plate_D03[i],sep=''), pattern='*.lis')
configFile<-paste(workingDirBright,'/D03/TargetFork',i,'-',i,'P',plate_D03[i],'/',configFile, sep='')
configFile<-configFile[1]
#cat(red('New Config File=',configFile),'\n')
#cat(red('Previous PRIORITY CLASS = 1:',length(which(DOcat[,'PRIORITY_CLASS']==1))),'\n')
Config<-readLines(configFile)
Config<-Config[10:409]
ID<-c()
TYPE<-c()
for (j in 1:length(Config)){
tmp<-strsplit(Config[j], ' ')[[1]]
tmp<-tmp[which(tmp!="")]
if (length(tmp)==16){
ID<-c(ID,tmp[3])
TYPE<-c(TYPE,tmp[16])
}
}
obsID<-as.numeric(substr(ID[which(substr(ID,1,2)=='D3')],2,nchar(ID[which(substr(ID,1,2)=='D3')])))
DOcatBright[which(DOcatBright[,'CATAID'] %in% obsID ==TRUE),'PRIORITY_CLASS']<-1
}
#cat(red('New PRIORITY CLASS=',length(which(DOcat[,'PRIORITY_CLASS']==1))),'\n')
#cat('\n')
}
if (N_D10>0){
for (i in 1:N_D10){
Tiler(tileplus=1, position='D10', plate=plate_D10[i], runfolder=TRUE, TileCat=DOcatBright, runoffset=i, restrict=rep('all',1), updatefib=!exists('Fibres'), basedir=workingDirBright, configdir=configdir, append_letter='D')
configFile<-list.files(path=paste(workingDirBright,'/D10/TargetFork',i,'-',i,'P',plate_D10[i],sep=''), pattern='*.lis')
configFile<-paste(workingDirBright,'/D10/TargetFork',i,'-',i,'P',plate_D10[i],'/',configFile, sep='')
configFile<-configFile[1]
#cat(red('New Config File=',configFile),'\n')
#cat(red('Previous PRIORITY CLASS = 1:',length(which(DOcat[,'PRIORITY_CLASS']==1))),'\n')
Config<-readLines(configFile)
Config<-Config[10:409]
ID<-c()
TYPE<-c()
for (j in 1:length(Config)){
tmp<-strsplit(Config[j], ' ')[[1]]
tmp<-tmp[which(tmp!="")]
if (length(tmp)==16){
ID<-c(ID,tmp[3])
TYPE<-c(TYPE,tmp[16])
}
}
obsID<-as.numeric(substr(ID[which(substr(ID,1,2)=='D1')],2,nchar(ID[which(substr(ID,1,2)=='D1')])))
DOcatBright[which(DOcatBright[,'CATAID'] %in% obsID ==TRUE),'PRIORITY_CLASS']<-1
}
#cat(red('New PRIORITY CLASS=',length(which(DOcat[,'PRIORITY_CLASS']==1))),'\n')
#cat('\n')
}
#setwd(oldWD)
pathD02ABright<-paste(workingDirBright,'/D02A/',list.files(path=paste(workingDirBright,'/D02A',sep=''), pattern='Targ*'), sep='')
pathD02BBright<-paste(workingDirBright,'/D02B/',list.files(path=paste(workingDirBright,'/D02B',sep=''), pattern='Targ*'), sep='')
pathD03Bright<-paste(workingDirBright,'/D03/',list.files(path=paste(workingDirBright,'/D03',sep=''), pattern='Targ*'), sep='')
pathD10Bright<-paste(workingDirBright,'/D10/',list.files(path=paste(workingDirBright,'/D10',sep=''), pattern='Targ*'), sep='')
count<-c(0,0,0,0)
while (sum(count)<4){
countD02A<-0
for (i in 1:length(pathD02ABright)){
countD02A<-countD02A+length(list.files(path=pathD02ABright[i], pattern='*.lis'))
}
countD02B<-0
for (i in 1:length(pathD02BBright)){
countD02B<-countD02B+length(list.files(path=pathD02BBright[i], pattern='*.lis'))
}
countD03<-0
for (i in 1:length(pathD03Bright)){
countD03<-countD03+length(list.files(path=pathD03Bright[i], pattern='*.lis'))
}
countD10<-0
for (i in 1:length(pathD10Bright)){
countD10<-countD10+length(list.files(path=pathD10Bright[i], pattern='*.lis'))
}
if (countD02A>=N_D02A) {count[1]<-1}
if (countD02B>=N_D02B) {count[2]<-1}
if (countD03>=N_D03) {count[3]<-1}
if (countD10>=N_D10) {count[4]<-1}
}
if (verbose>1){cat(' - Bright Tiling Complete', '\n')}
write(' - Bright Tiling Complete', file=logName, append=T)
dateF<-strsplit(workingDirBright, '/')[[1]][4]
dateF2<-paste(c(strsplit(workingDirBright, '/')[[1]][1:4], dateF), sep='', collapse='/')
if (N_D02A>0){
for (i in 1:length(pathD02ABright)){
system(paste('cp ',pathD02ABright[i],'/D02A* ', workingDirBright,'/TileFiles/',sep=''))
}
}
if (N_D02B>0){
for (i in 1:length(pathD02BBright)){
system(paste('cp ',pathD02BBright[i],'/D02B* ', workingDirBright,'/TileFiles/',sep=''))
}
}
if (N_D03>0){
for (i in 1:length(pathD03Bright)){
system(paste('cp ',pathD03Bright[i],'/D03* ', workingDirBright,'/TileFiles/',sep=''))
}
}
if (N_D10>0){
for (i in 1:length(pathD10Bright)){
system(paste('cp ',pathD10Bright[i],'/D10* ', workingDirBright,'/TileFiles/',sep=''))
}
}
listF<-list.files(path=paste(workingDirBright,'/TileFiles/',sep=''),pattern='*')
listM<-list.files(path=paste(workingDirBright,'/TileFiles/',sep=''),pattern='*.lis')
for (j in 1:length(listF)){
system(paste('mv ',workingDirBright,'/TileFiles/',listF[j] ,' ', workingDirBright,'/TileFiles/', substr(listF[j],1,3), '_', dateF,'_BackUp_',substr(listF[j],5,nchar(listF[j])), sep=''))
}
for (j in 1:length(listM)){
ConfigNames<-c(ConfigNames,paste(workingDirBright,'/TileFiles/', substr(listM[j],1,3), '_', dateF,'_BackUp_',substr(listM[j],5,nchar(listM[j])), sep=''))
}
}
return(ConfigNames)
}
|
/DEVILSTAZ/R/runTiler.R
|
no_license
|
ICRAR/DEVILS-TAZ
|
R
| false | false | 24,908 |
r
|
#' Produce fibre configuration files for a givne set of DOCats
#'
#' @description This is the highlevel main TAZ function for running running the Tiler software
#' to gnerate fibre configuration files. Users must provide DOcats for targets, sky, standards and guides.
#'
#' @param configdir Directory path location of Configure software
#' @param workingDir The directory you want to do the tiling in
#' @param DOcat A target catalogue
#' @param DATAguide A guide star catalogue
#' @param DATAstspec A standard star catalogue
#' @param DATAsky A sky potions catalogue
#' @param N_D02A Number of configurations to generate in D02A
#' @param N_D02B Number of configurations to generate in D02B
#' @param N_D03 Number of configurations to generate in D03
#' @param N_D10 Number of configurations to generate in D10
#' @param D02A_startPlate Start plate number of D02A configurations (0 or 1)
#' @param D02A_startPlate Start plate number of D02B configurations (0 or 1)
#' @param D03_startPlate Start plate number of D03 configurations (0 or 1)
#' @param D10_startPlate Start plate number of D10 configurations (0 or 1)
#' @param logName log filename to write progress to
#' @param verbose tell me whats going on: 0=nothing, 1=somethings, 2=everything
#' @param cores number of cores to use (max four in this case) - currently redundant
#' @param makeNormal Make general configurations
#' @param makeBackUp Also make configuration files for bright sources.
#' @param BrightCut Magnitude to cut at for bright sources. Only takes affect it makeBackUp==TRUE.
#' @param FaintCut Magnitude to cut at for faint sources.
#' @return List of paths to new configuration files
#' @examples
#' runTiler(configdir='/Applications/configure-8.4-MacOsX_ElCapitan_x86_64',workingDir='.', DOcat='data/observing/run1_2017_12/2017_12_19/DOCats/DObjCat_2017_12_19.tab, $
#' DATAguide=data/observing/run1_2017_12/2017_12_19/DOCats/DGuideCat_2017_12_19.tab,DATAstspec=data/observing/run1_2017_12/2017_12_19/DOCats/DStdCat_2017_12_19.tab, $
#' DATAsky=data/observing/run1_2017_12/2017_12_19/DOCats/DGSkyCat_2017_12_19.tab, N_D02A=1, N_D02B=1, N_D03=2, N_D10=3, D02A_startPlate=0, D0BA_startPlate=1, D03_startPlate=0, $
#' D10_startPlate=1, logName='tempLog.txt', verbose=1, cores=4)
#' # will make one configuration in D02A and D02B, two in D03, and 3 in D10.
#' @export
runTiler<-function(configdir=configdir, workingDir=workingDir, DOcat=DOcat, DATAguide=DATAguide, DATAstspec=DATAstspec, DATAsky=DATAsky, N_D02A=N_D02A, N_D02B=N_D02B, N_D03=N_D03, N_D10=N_D10, D02A_startPlate=0, D02B_startPlate=0, D03_startPlate=0, D10_startPlate=0, logName=logName, verbose=verbose, cores=cores, makeNormal=TRUE, makeBackUp=FALSE,BrightCut=20, FaintCut=30){
#registerDoParallel(cores=cores)
if (verbose>1){cat(' - Setting up plate sequence....', '\n')}
write(' - Setting up plate sequence....', file=logName, append=T)
plate_D02A<-c()
if (N_D02A==1) {plate_D02A<-D02A_startPlate}
if (N_D02A>1) {
int<-D02A_startPlate
for (i in 1:N_D02A){
plate_D02A<-c(plate_D02A,int)
if (int==0){int<-1}else{int<-0}
}
}
plate_D02B<-c()
if (N_D02B==1) {plate_D02B<-D02B_startPlate}
if (N_D02B>1) {
int<-D02B_startPlate
for (i in 1:N_D02B){
plate_D02B<-c(plate_D02B,int)
if (int==0){int<-1}else{int<-0}
}
}
plate_D03<-c()
if (N_D03==1) {plate_D03<-D03_startPlate}
if (N_D03>1) {
int<-D03_startPlate
for (i in 1:N_D03){
plate_D03<-c(plate_D03,int)
if (int==0){int<-1}else{int<-0}
}
}
plate_D10<-c()
if (N_D10==1) {plate_D10<-D10_startPlate}
if (N_D10>1) {
int<-D10_startPlate
for (i in 1:N_D10){
plate_D10<-c(plate_D10,int)
if (int==0){int<-1}else{int<-0}
}
}
if (verbose>1){cat(' - Running Tiling....', '\n')}
write(' - Running Tiling....', file=logName, append=T)
#oldWD<-getwd()
#setwd(workingDir)
tileplus_M<-c(N_D02A,N_D02B,N_D03,N_D10)
position_M<-c('D02A','D02B', 'D03', 'D10')
plate_M<-c(plate_D02A, plate_D02B, plate_D03, plate_D10)
DOcat=read.table(DOcat,header=T)
DOcat=DOcat[which(DOcat[,'MAG']<=FaintCut),]
DATAguide<<-read.table(DATAguide,header=T)
DATAstspec<<-read.table(DATAstspec,header=T)
DATAsky<<-read.table(DATAsky,header=T)
DOcatBright<-DOcat[which((DOcat[,'MAG']<BrightCut & DOcat[,'PRIORITY_CLASS']>1) | DOcat[,'PRIORITY_CLASS']==9 | DOcat[,'PRIORITY_CLASS']==10),]
#**** Mapping down to bump up OzDES fillers:
DOcat[which(DOcat[,'PRIORITY_CLASS']==4),'PRIORITY_CLASS']<-3
DOcat[which(DOcat[,'PRIORITY_CLASS']==5),'PRIORITY_CLASS']<-4
DOcat[which(DOcat[,'PRIORITY_CLASS']==6),'PRIORITY_CLASS']<-5
DOcat[which(DOcat[,'PRIORITY_CLASS']==7),'PRIORITY_CLASS']<-6
DOcat[which(DOcat[,'PRIORITY_CLASS']==8),'PRIORITY_CLASS']<-7
DOcat[which(DOcat[,'PRIORITY_CLASS']==9),'PRIORITY_CLASS']<-8
#**** Flip for bright:
tmp<-DOcatBright
tmp[which(DOcatBright[,'PRIORITY_CLASS']==4),'PRIORITY_CLASS']<-8
tmp[which(DOcatBright[,'PRIORITY_CLASS']==5),'PRIORITY_CLASS']<-7
tmp[which(DOcatBright[,'PRIORITY_CLASS']==6),'PRIORITY_CLASS']<-5
tmp[which(DOcatBright[,'PRIORITY_CLASS']==7),'PRIORITY_CLASS']<-5
tmp[which(DOcatBright[,'PRIORITY_CLASS']==8),'PRIORITY_CLASS']<-4
DOcatBright<-tmp
DOcatBright[which(DOcatBright[,'PRIORITY_CLASS']==9),'PRIORITY_CLASS']<-8
#configdirFiles=paste(configdir,'/data_files',sep='')
updateExtFibs(configdir=configdir)
system(paste('cp ', configdir,'/data_files/* ', configdir,'/', sep=''))
#a = foreach(i=1:length(tileplus_M)) %dopar% {
# Tiler(tileplus=tileplus_M[i], position=position_M[i], plate=plate_M[i], runfolder=TRUE, TileCat=DOcat, runoffset=1, restrict=rep('all',tileplus_M[i]), updatefib=!exists('Fibres'), basedir=workingDir, configdir=configdir, append_letter='D')
#}
#if (N_D02A>0){Tiler(tileplus=N_D02A, position='D02A', plate=plate_D02A, runfolder=TRUE, TileCat=DOcat, runoffset=1, restrict=rep('all',N_D02A), updatefib=!exists('Fibres'), basedir=workingDir, configdir=configdir, append_letter='D')}
#if (N_D02B>0){Tiler(tileplus=N_D02B, position='D02B', plate=plate_D02B, runfolder=TRUE, TileCat=DOcat, runoffset=1, restrict=rep('all',N_D02B), updatefib=!exists('Fibres'), basedir=workingDir, configdir=configdir, append_letter='D')}
#if (N_D03>0){Tiler(tileplus=N_D03, position='D03', plate=plate_D03, runfolder=TRUE, TileCat=DOcat, runoffset=1, restrict=rep('all',N_D03), updatefib=!exists('Fibres'), basedir=workingDir, configdir=configdir, append_letter='D')}
#if (N_D10>0){Tiler(tileplus=N_D10, position='D10', plate=plate_D10, runfolder=TRUE, TileCat=DOcat, runoffset=1, restrict=rep('all',N_D10), updatefib=!exists('Fibres'), basedir=workingDir, configdir=configdir, append_letter='D')}
ConfigNames<-c()
if (makeNormal==TRUE){
if (N_D02A>0){
for (i in 1:N_D02A){
Tiler(tileplus=1, position='D02A', plate=plate_D02A[i], runfolder=TRUE, TileCat=DOcat, runoffset=i, restrict=rep('all',1), updatefib=!exists('Fibres'), basedir=workingDir, configdir=configdir, append_letter='D')
configFile<-list.files(path=paste(workingDir,'/D02A/TargetFork',i,'-',i,'P',plate_D02A[i],sep=''), pattern='*.lis')
configFile<-paste(workingDir,'/D02A/TargetFork',i,'-',i,'P',plate_D02A[i],'/',configFile, sep='')
configFile<-configFile[1]
#cat(red('New Config File=',configFile),'\n')
#cat(red('Previous PRIORITY CLASS = 1:',length(which(DOcat[,'PRIORITY_CLASS']==1))),'\n')
Config<-readLines(configFile)
Config<-Config[10:409]
ID<-c()
TYPE<-c()
for (j in 1:length(Config)){
tmp<-strsplit(Config[j], ' ')[[1]]
tmp<-tmp[which(tmp!="")]
if (length(tmp)==16){
ID<-c(ID,tmp[3])
TYPE<-c(TYPE,tmp[16])
}
}
obsID<-as.numeric(substr(ID[which(substr(ID,1,2)=='D2')],2,nchar(ID[which(substr(ID,1,2)=='D2')])))
DOcat[which(DOcat[,'CATAID'] %in% obsID ==TRUE),'PRIORITY_CLASS']<-1
}
#cat(red('New PRIORITY CLASS=',length(which(DOcat[,'PRIORITY_CLASS']==1))),'\n')
#cat('\n')
}
if (N_D02B>0){
for (i in 1:N_D02B){
Tiler(tileplus=1, position='D02B', plate=plate_D02B[i], runfolder=TRUE, TileCat=DOcat, runoffset=i, restrict=rep('all',1), updatefib=!exists('Fibres'), basedir=workingDir, configdir=configdir, append_letter='D')
configFile<-list.files(path=paste(workingDir,'/D02B/TargetFork',i,'-',i,'P',plate_D02B[i],sep=''), pattern='*.lis')
configFile<-paste(workingDir,'/D02B/TargetFork',i,'-',i,'P',plate_D02B[i],'/',configFile, sep='')
configFile<-configFile[1]
#cat(red('New Config File=',configFile),'\n')
#cat(red('Previous PRIORITY CLASS = 1:',length(which(DOcat[,'PRIORITY_CLASS']==1))),'\n')
Config<-readLines(configFile)
Config<-Config[10:409]
ID<-c()
TYPE<-c()
for (j in 1:length(Config)){
tmp<-strsplit(Config[j], ' ')[[1]]
tmp<-tmp[which(tmp!="")]
if (length(tmp)==16){
ID<-c(ID,tmp[3])
TYPE<-c(TYPE,tmp[16])
}
}
obsID<-as.numeric(substr(ID[which(substr(ID,1,2)=='D2')],2,nchar(ID[which(substr(ID,1,2)=='D2')])))
DOcat[which(DOcat[,'CATAID'] %in% obsID ==TRUE),'PRIORITY_CLASS']<-1
}
#cat(red('New PRIORITY CLASS=',length(which(DOcat[,'PRIORITY_CLASS']==1))),'\n')
#cat('\n')
}
if (N_D03>0){
for (i in 1:N_D03){
Tiler(tileplus=1, position='D03', plate=plate_D03[i], runfolder=TRUE, TileCat=DOcat, runoffset=i, restrict=rep('all',1), updatefib=!exists('Fibres'), basedir=workingDir, configdir=configdir, append_letter='D')
configFile<-list.files(path=paste(workingDir,'/D03/TargetFork',i,'-',i,'P',plate_D03[i],sep=''), pattern='*.lis')
configFile<-paste(workingDir,'/D03/TargetFork',i,'-',i,'P',plate_D03[i],'/',configFile, sep='')
configFile<-configFile[1]
#cat(red('New Config File=',configFile),'\n')
#cat(red('Previous PRIORITY CLASS = 1:',length(which(DOcat[,'PRIORITY_CLASS']==1))),'\n')
Config<-readLines(configFile)
Config<-Config[10:409]
ID<-c()
TYPE<-c()
for (j in 1:length(Config)){
tmp<-strsplit(Config[j], ' ')[[1]]
tmp<-tmp[which(tmp!="")]
if (length(tmp)==16){
ID<-c(ID,tmp[3])
TYPE<-c(TYPE,tmp[16])
}
}
obsID<-as.numeric(substr(ID[which(substr(ID,1,2)=='D3')],2,nchar(ID[which(substr(ID,1,2)=='D3')])))
DOcat[which(DOcat[,'CATAID'] %in% obsID ==TRUE),'PRIORITY_CLASS']<-1
}
#cat(red('New PRIORITY CLASS=',length(which(DOcat[,'PRIORITY_CLASS']==1))),'\n')
#cat('\n')
}
if (N_D10>0){
for (i in 1:N_D10){
Tiler(tileplus=1, position='D10', plate=plate_D10[i], runfolder=TRUE, TileCat=DOcat, runoffset=i, restrict=rep('all',1), updatefib=!exists('Fibres'), basedir=workingDir, configdir=configdir, append_letter='D')
configFile<-list.files(path=paste(workingDir,'/D10/TargetFork',i,'-',i,'P',plate_D10[i],sep=''), pattern='*.lis')
configFile<-paste(workingDir,'/D10/TargetFork',i,'-',i,'P',plate_D10[i],'/',configFile, sep='')
configFile<-configFile[1]
#cat(red('New Config File=',configFile),'\n')
#cat(red('Previous PRIORITY CLASS = 1:',length(which(DOcat[,'PRIORITY_CLASS']==1))),'\n')
Config<-readLines(configFile)
Config<-Config[10:409]
ID<-c()
TYPE<-c()
for (j in 1:length(Config)){
tmp<-strsplit(Config[j], ' ')[[1]]
tmp<-tmp[which(tmp!="")]
if (length(tmp)==16){
ID<-c(ID,tmp[3])
TYPE<-c(TYPE,tmp[16])
}
}
obsID<-as.numeric(substr(ID[which(substr(ID,1,2)=='D1')],2,nchar(ID[which(substr(ID,1,2)=='D1')])))
DOcat[which(DOcat[,'CATAID'] %in% obsID ==TRUE),'PRIORITY_CLASS']<-1
}
#cat(red('New PRIORITY CLASS=',length(which(DOcat[,'PRIORITY_CLASS']==1))),'\n')
#cat('\n')
}
#setwd(oldWD)
pathD02A<-paste(workingDir,'/D02A/',list.files(path=paste(workingDir,'/D02A',sep=''), pattern='Targ*'), sep='')
pathD02B<-paste(workingDir,'/D02B/',list.files(path=paste(workingDir,'/D02B',sep=''), pattern='Targ*'), sep='')
pathD03<-paste(workingDir,'/D03/',list.files(path=paste(workingDir,'/D03',sep=''), pattern='Targ*'), sep='')
pathD10<-paste(workingDir,'/D10/',list.files(path=paste(workingDir,'/D10',sep=''), pattern='Targ*'), sep='')
count<-c(0,0,0,0)
while (sum(count)<4){
countD02A<-0
for (i in 1:length(pathD02A)){
countD02A<-countD02A+length(list.files(path=pathD02A[i], pattern='*.lis'))
}
countD02B<-0
for (i in 1:length(pathD02B)){
countD02B<-countD02B+length(list.files(path=pathD02B[i], pattern='*.lis'))
}
countD03<-0
for (i in 1:length(pathD03)){
countD03<-countD03+length(list.files(path=pathD03[i], pattern='*.lis'))
}
countD10<-0
for (i in 1:length(pathD10)){
countD10<-countD10+length(list.files(path=pathD10[i], pattern='*.lis'))
}
if (countD02A>=N_D02A) {count[1]<-1}
if (countD02B>=N_D02B) {count[2]<-1}
if (countD03>=N_D03) {count[3]<-1}
if (countD10>=N_D10) {count[4]<-1}
}
if (verbose>1){cat(' - Tiling Complete', '\n')}
write(' - Tiling Complete', file=logName, append=T)
dateF<-strsplit(workingDir, '/')[[1]][4]
dateF2<-paste(c(strsplit(workingDir, '/')[[1]][1:4], dateF), sep='', collapse='/')
if (N_D02A>0){
for (i in 1:length(pathD02A)){
system(paste('cp ',pathD02A[i],'/D02A* ', workingDir,'/TileFiles/',sep=''))
}
}
if (N_D02B>0){
for (i in 1:length(pathD02B)){
system(paste('cp ',pathD02B[i],'/D02B* ', workingDir,'/TileFiles/',sep=''))
}
}
if (N_D03>0){
for (i in 1:length(pathD03)){
system(paste('cp ',pathD03[i],'/D03* ', workingDir,'/TileFiles/',sep=''))
}
}
if (N_D10>0){
for (i in 1:length(pathD10)){
system(paste('cp ',pathD10[i],'/D10* ', workingDir,'/TileFiles/',sep=''))
}
}
listF<-list.files(path=paste(workingDir,'/TileFiles/',sep=''),pattern='*')
listM<-list.files(path=paste(workingDir,'/TileFiles/',sep=''),pattern='*.lis')
for (j in 1:length(listF)){
system(paste('mv ',workingDir,'/TileFiles/',listF[j] ,' ', workingDir,'/TileFiles/', substr(listF[j],1,3), '_', dateF,'_',substr(listF[j],5,nchar(listF[j])), sep=''))
}
for (j in 1:length(listM)){
ConfigNames<-c(ConfigNames,paste(workingDir,'/TileFiles/', substr(listM[j],1,3), '_', dateF,'_',substr(listM[j],5,nchar(listM[j])), sep=''))
}
}
if (makeBackUp==TRUE){
workingDirBright<-paste(workingDir,'Backup', sep='')
system(paste('mkdir ',workingDirBright, sep=''))
system(paste('mkdir ',workingDirBright,'/TileFiles', sep=''))
system(paste('cp ', workingDir,'/SurveyInfo.txt ', workingDirBright,'/',sep=''))
if (N_D02A>0){
for (i in 1:N_D02A){
Tiler(tileplus=1, position='D02A', plate=plate_D02A[i], runfolder=TRUE, TileCat=DOcatBright, runoffset=i, restrict=rep('all',1), updatefib=!exists('Fibres'), basedir=workingDirBright, configdir=configdir, append_letter='D')
configFile<-list.files(path=paste(workingDirBright,'/D02A/TargetFork',i,'-',i,'P',plate_D02A[i],sep=''), pattern='*.lis')
configFile<-paste(workingDirBright,'/D02A/TargetFork',i,'-',i,'P',plate_D02A[i],'/',configFile, sep='')
configFile<-configFile[1]
#cat(red('New Config File=',configFile),'\n')
#cat(red('Previous PRIORITY CLASS = 1:',length(which(DOcatBright[,'PRIORITY_CLASS']==1))),'\n')
Config<-readLines(configFile)
Config<-Config[10:409]
ID<-c()
TYPE<-c()
for (j in 1:length(Config)){
tmp<-strsplit(Config[j], ' ')[[1]]
tmp<-tmp[which(tmp!="")]
if (length(tmp)==16){
ID<-c(ID,tmp[3])
TYPE<-c(TYPE,tmp[16])
}
}
obsID<-as.numeric(substr(ID[which(substr(ID,1,2)=='D2')],2,nchar(ID[which(substr(ID,1,2)=='D2')])))
DOcatBright[which(DOcatBright[,'CATAID'] %in% obsID ==TRUE),'PRIORITY_CLASS']<-1
}
#cat(red('New PRIORITY CLASS=',length(which(DOcatBright[,'PRIORITY_CLASS']==1))),'\n')
#cat('\n')
}
if (N_D02B>0){
for (i in 1:N_D02B){
Tiler(tileplus=1, position='D02B', plate=plate_D02B[i], runfolder=TRUE, TileCat=DOcatBright, runoffset=i, restrict=rep('all',1), updatefib=!exists('Fibres'), basedir=workingDirBright, configdir=configdir, append_letter='D')
configFile<-list.files(path=paste(workingDirBright,'/D02B/TargetFork',i,'-',i,'P',plate_D02B[i],sep=''), pattern='*.lis')
configFile<-paste(workingDirBright,'/D02B/TargetFork',i,'-',i,'P',plate_D02B[i],'/',configFile, sep='')
configFile<-configFile[1]
#cat(red('New Config File=',configFile),'\n')
#cat(red('Previous PRIORITY CLASS = 1:',length(which(DOcat[,'PRIORITY_CLASS']==1))),'\n')
Config<-readLines(configFile)
Config<-Config[10:409]
ID<-c()
TYPE<-c()
for (j in 1:length(Config)){
tmp<-strsplit(Config[j], ' ')[[1]]
tmp<-tmp[which(tmp!="")]
if (length(tmp)==16){
ID<-c(ID,tmp[3])
TYPE<-c(TYPE,tmp[16])
}
}
obsID<-as.numeric(substr(ID[which(substr(ID,1,2)=='D2')],2,nchar(ID[which(substr(ID,1,2)=='D1')])))
DOcatBright[which(DOcatBright[,'CATAID'] %in% obsID ==TRUE),'PRIORITY_CLASS']<-1
}
#cat(red('New PRIORITY CLASS=',length(which(DOcat[,'PRIORITY_CLASS']==1))),'\n')
#cat('\n')
}
if (N_D03>0){
for (i in 1:N_D03){
Tiler(tileplus=1, position='D03', plate=plate_D03[i], runfolder=TRUE, TileCat=DOcatBright, runoffset=i, restrict=rep('all',1), updatefib=!exists('Fibres'), basedir=workingDirBright, configdir=configdir, append_letter='D')
configFile<-list.files(path=paste(workingDirBright,'/D03/TargetFork',i,'-',i,'P',plate_D03[i],sep=''), pattern='*.lis')
configFile<-paste(workingDirBright,'/D03/TargetFork',i,'-',i,'P',plate_D03[i],'/',configFile, sep='')
configFile<-configFile[1]
#cat(red('New Config File=',configFile),'\n')
#cat(red('Previous PRIORITY CLASS = 1:',length(which(DOcat[,'PRIORITY_CLASS']==1))),'\n')
Config<-readLines(configFile)
Config<-Config[10:409]
ID<-c()
TYPE<-c()
for (j in 1:length(Config)){
tmp<-strsplit(Config[j], ' ')[[1]]
tmp<-tmp[which(tmp!="")]
if (length(tmp)==16){
ID<-c(ID,tmp[3])
TYPE<-c(TYPE,tmp[16])
}
}
obsID<-as.numeric(substr(ID[which(substr(ID,1,2)=='D3')],2,nchar(ID[which(substr(ID,1,2)=='D3')])))
DOcatBright[which(DOcatBright[,'CATAID'] %in% obsID ==TRUE),'PRIORITY_CLASS']<-1
}
#cat(red('New PRIORITY CLASS=',length(which(DOcat[,'PRIORITY_CLASS']==1))),'\n')
#cat('\n')
}
if (N_D10>0){
for (i in 1:N_D10){
Tiler(tileplus=1, position='D10', plate=plate_D10[i], runfolder=TRUE, TileCat=DOcatBright, runoffset=i, restrict=rep('all',1), updatefib=!exists('Fibres'), basedir=workingDirBright, configdir=configdir, append_letter='D')
configFile<-list.files(path=paste(workingDirBright,'/D10/TargetFork',i,'-',i,'P',plate_D10[i],sep=''), pattern='*.lis')
configFile<-paste(workingDirBright,'/D10/TargetFork',i,'-',i,'P',plate_D10[i],'/',configFile, sep='')
configFile<-configFile[1]
#cat(red('New Config File=',configFile),'\n')
#cat(red('Previous PRIORITY CLASS = 1:',length(which(DOcat[,'PRIORITY_CLASS']==1))),'\n')
Config<-readLines(configFile)
Config<-Config[10:409]
ID<-c()
TYPE<-c()
for (j in 1:length(Config)){
tmp<-strsplit(Config[j], ' ')[[1]]
tmp<-tmp[which(tmp!="")]
if (length(tmp)==16){
ID<-c(ID,tmp[3])
TYPE<-c(TYPE,tmp[16])
}
}
obsID<-as.numeric(substr(ID[which(substr(ID,1,2)=='D1')],2,nchar(ID[which(substr(ID,1,2)=='D1')])))
DOcatBright[which(DOcatBright[,'CATAID'] %in% obsID ==TRUE),'PRIORITY_CLASS']<-1
}
#cat(red('New PRIORITY CLASS=',length(which(DOcat[,'PRIORITY_CLASS']==1))),'\n')
#cat('\n')
}
#setwd(oldWD)
pathD02ABright<-paste(workingDirBright,'/D02A/',list.files(path=paste(workingDirBright,'/D02A',sep=''), pattern='Targ*'), sep='')
pathD02BBright<-paste(workingDirBright,'/D02B/',list.files(path=paste(workingDirBright,'/D02B',sep=''), pattern='Targ*'), sep='')
pathD03Bright<-paste(workingDirBright,'/D03/',list.files(path=paste(workingDirBright,'/D03',sep=''), pattern='Targ*'), sep='')
pathD10Bright<-paste(workingDirBright,'/D10/',list.files(path=paste(workingDirBright,'/D10',sep=''), pattern='Targ*'), sep='')
count<-c(0,0,0,0)
while (sum(count)<4){
countD02A<-0
for (i in 1:length(pathD02ABright)){
countD02A<-countD02A+length(list.files(path=pathD02ABright[i], pattern='*.lis'))
}
countD02B<-0
for (i in 1:length(pathD02BBright)){
countD02B<-countD02B+length(list.files(path=pathD02BBright[i], pattern='*.lis'))
}
countD03<-0
for (i in 1:length(pathD03Bright)){
countD03<-countD03+length(list.files(path=pathD03Bright[i], pattern='*.lis'))
}
countD10<-0
for (i in 1:length(pathD10Bright)){
countD10<-countD10+length(list.files(path=pathD10Bright[i], pattern='*.lis'))
}
if (countD02A>=N_D02A) {count[1]<-1}
if (countD02B>=N_D02B) {count[2]<-1}
if (countD03>=N_D03) {count[3]<-1}
if (countD10>=N_D10) {count[4]<-1}
}
if (verbose>1){cat(' - Bright Tiling Complete', '\n')}
write(' - Bright Tiling Complete', file=logName, append=T)
dateF<-strsplit(workingDirBright, '/')[[1]][4]
dateF2<-paste(c(strsplit(workingDirBright, '/')[[1]][1:4], dateF), sep='', collapse='/')
if (N_D02A>0){
for (i in 1:length(pathD02ABright)){
system(paste('cp ',pathD02ABright[i],'/D02A* ', workingDirBright,'/TileFiles/',sep=''))
}
}
if (N_D02B>0){
for (i in 1:length(pathD02BBright)){
system(paste('cp ',pathD02BBright[i],'/D02B* ', workingDirBright,'/TileFiles/',sep=''))
}
}
if (N_D03>0){
for (i in 1:length(pathD03Bright)){
system(paste('cp ',pathD03Bright[i],'/D03* ', workingDirBright,'/TileFiles/',sep=''))
}
}
if (N_D10>0){
for (i in 1:length(pathD10Bright)){
system(paste('cp ',pathD10Bright[i],'/D10* ', workingDirBright,'/TileFiles/',sep=''))
}
}
listF<-list.files(path=paste(workingDirBright,'/TileFiles/',sep=''),pattern='*')
listM<-list.files(path=paste(workingDirBright,'/TileFiles/',sep=''),pattern='*.lis')
for (j in 1:length(listF)){
system(paste('mv ',workingDirBright,'/TileFiles/',listF[j] ,' ', workingDirBright,'/TileFiles/', substr(listF[j],1,3), '_', dateF,'_BackUp_',substr(listF[j],5,nchar(listF[j])), sep=''))
}
for (j in 1:length(listM)){
ConfigNames<-c(ConfigNames,paste(workingDirBright,'/TileFiles/', substr(listM[j],1,3), '_', dateF,'_BackUp_',substr(listM[j],5,nchar(listM[j])), sep=''))
}
}
return(ConfigNames)
}
|
##install.packages("igraph")
library(igraph)
# your data
mat <- as.matrix(read.table(text=
"node X1 X2 X3 X4 X5 X6
1 0 3 7 4 NA NA
2 3 0 2 NA NA 9
3 7 2 0 1 3 6
4 4 NA 1 0 3 NA
5 NA NA 3 3 0 3
6 NA 9 6 NA 3 0", header=T))
# prepare data for graph functions - set NA to zero to indicate no direct edge
nms <- mat[,1]
mat <- mat[, -1]
colnames(mat) <- rownames(mat) <- nms
mat[is.na(mat)] <- 0
# create graph from adjacency matrix
g <- graph.adjacency(mat, weighted=TRUE)
plot(g)
# Get all path distances
shortest.paths(g, algorithm = "dijkstra",v=1,t=5)
g <- make_ring(10)
plot(g)
distances(g)
shortest_paths(g,5,1)
all_shortest_paths(g, 1, 6:8)
mean_distance(g)
## Weighted shortest paths
el <- matrix(nc=3, byrow=TRUE,
c(1,2,0, 1,3,2, 1,4,1, 2,3,0, 2,5,5, 2,6,2, 3,2,1, 3,4,1,
3,7,1, 4,3,0, 4,7,2, 5,6,2, 5,8,8, 6,3,2, 6,7,1, 6,9,1,
6,10,3, 8,6,1, 8,9,1, 9,10,4) )
g2 <- add_edges(make_empty_graph(10), t(el[,1:2]), weight=el[,3])
plot(g2)
distances(g2, mode="out")
shortest_paths(g2,from=4,to=9,algorithm="dijkstra")
|
/dijkstra_shortest_path.R
|
no_license
|
Squiercg/recologia
|
R
| false | false | 1,124 |
r
|
##install.packages("igraph")
library(igraph)
# your data
mat <- as.matrix(read.table(text=
"node X1 X2 X3 X4 X5 X6
1 0 3 7 4 NA NA
2 3 0 2 NA NA 9
3 7 2 0 1 3 6
4 4 NA 1 0 3 NA
5 NA NA 3 3 0 3
6 NA 9 6 NA 3 0", header=T))
# prepare data for graph functions - set NA to zero to indicate no direct edge
nms <- mat[,1]
mat <- mat[, -1]
colnames(mat) <- rownames(mat) <- nms
mat[is.na(mat)] <- 0
# create graph from adjacency matrix
g <- graph.adjacency(mat, weighted=TRUE)
plot(g)
# Get all path distances
shortest.paths(g, algorithm = "dijkstra",v=1,t=5)
g <- make_ring(10)
plot(g)
distances(g)
shortest_paths(g,5,1)
all_shortest_paths(g, 1, 6:8)
mean_distance(g)
## Weighted shortest paths
el <- matrix(nc=3, byrow=TRUE,
c(1,2,0, 1,3,2, 1,4,1, 2,3,0, 2,5,5, 2,6,2, 3,2,1, 3,4,1,
3,7,1, 4,3,0, 4,7,2, 5,6,2, 5,8,8, 6,3,2, 6,7,1, 6,9,1,
6,10,3, 8,6,1, 8,9,1, 9,10,4) )
g2 <- add_edges(make_empty_graph(10), t(el[,1:2]), weight=el[,3])
plot(g2)
distances(g2, mode="out")
shortest_paths(g2,from=4,to=9,algorithm="dijkstra")
|
keggMap <- function(pairID, fcTable){
print(paste0("pairID: ", pairID))
#Map the gene lists and fold change values to KEGG using the M. musculus kegg set from gageData
library(pathview)
library(gage)
library(gageData)
library(org.Mm.eg.db)
egMap <- as.list(org.Mm.egENSEMBL2EG)
exp.fc <- c()
for(gNameInd in 1:length(fcTable)){
egName <- egMap[[strsplit(names(fcTable)[gNameInd], ".", fixed=T)[[1]][1]]]
if(!is.null(egName)){
for(gName in egName){
if(!gName %in% names(exp.fc)){
exp.fc <- c(exp.fc, fcTable[gNameInd])
names(exp.fc)[length(exp.fc)] <- gName
}
}
}
}
#Species-specific
data(kegg.sets.mm)
fc.kegg.p <- gage(exp.fc, gsets = kegg.sets.mm, ref = NULL, samp = NULL)
sel <- fc.kegg.p$greater[, "p.val"] < 0.05 & !is.na(fc.kegg.p$greater[, "p.val"])
path.ids <- rownames(fc.kegg.p$greater)[sel]
sel.l <- fc.kegg.p$less[, "p.val"] < 0.05 & !is.na(fc.kegg.p$less[,"p.val"])
path.ids.l <- rownames(fc.kegg.p$less)[sel.l]
path.ids2 <- substr(c(path.ids, path.ids.l), 1, 8)
write.table(fc.kegg.p, file=paste0("gageOut/", pairID, ".txt"), quote=F, sep="\t")
if(length(path.ids2) == 0){
path.ids2 <- ""
}
return(path.ids2)
}
keggGraph <- function(pairID, path.ids, exp.fc){
library(pathview)
library(org.Mm.eg.db)
egMap <- as.list(org.Mm.egENSEMBL2EG)
egCounts <- data.frame(matrix(nrow=0, ncol=ncol(exp.fc)))
for(gNameInd in 1:nrow(exp.fc)){
egName <- egMap[[strsplit(rownames(exp.fc)[gNameInd], ".", fixed=T)[[1]][1]]]
if(!is.null(egName)){
for(gName in egName){
if(!gName %in% rownames(egCounts)){
egCounts <- rbind(egCounts, exp.fc[gNameInd,])
rownames(egCounts)[nrow(egCounts)] <- gName
}
}
}
}
#Use the KEGG R. norvegicus pathway charts to chart our own expression patterns
pway <- paste0(pairID, "pathways")
out.suffix="edger"
dir.create(pway)
setwd(pway)
#Species-specific
pv.out.list <- sapply(path.ids, function(pid) pathview(gene.data = egCounts, pathway.id = pid,
species = "mmu", out.suffix=out.suffix, limit=list(gene=5, cpd=5), expand.node=T,
bins = list(gene = 20, cpd= 20), low = list(gene = "blue", cpd = "blue")))
setwd("..")
}
|
/counts/diffPath.R
|
no_license
|
ploverso/R-analysis-scripts
|
R
| false | false | 2,168 |
r
|
keggMap <- function(pairID, fcTable){
print(paste0("pairID: ", pairID))
#Map the gene lists and fold change values to KEGG using the M. musculus kegg set from gageData
library(pathview)
library(gage)
library(gageData)
library(org.Mm.eg.db)
egMap <- as.list(org.Mm.egENSEMBL2EG)
exp.fc <- c()
for(gNameInd in 1:length(fcTable)){
egName <- egMap[[strsplit(names(fcTable)[gNameInd], ".", fixed=T)[[1]][1]]]
if(!is.null(egName)){
for(gName in egName){
if(!gName %in% names(exp.fc)){
exp.fc <- c(exp.fc, fcTable[gNameInd])
names(exp.fc)[length(exp.fc)] <- gName
}
}
}
}
#Species-specific
data(kegg.sets.mm)
fc.kegg.p <- gage(exp.fc, gsets = kegg.sets.mm, ref = NULL, samp = NULL)
sel <- fc.kegg.p$greater[, "p.val"] < 0.05 & !is.na(fc.kegg.p$greater[, "p.val"])
path.ids <- rownames(fc.kegg.p$greater)[sel]
sel.l <- fc.kegg.p$less[, "p.val"] < 0.05 & !is.na(fc.kegg.p$less[,"p.val"])
path.ids.l <- rownames(fc.kegg.p$less)[sel.l]
path.ids2 <- substr(c(path.ids, path.ids.l), 1, 8)
write.table(fc.kegg.p, file=paste0("gageOut/", pairID, ".txt"), quote=F, sep="\t")
if(length(path.ids2) == 0){
path.ids2 <- ""
}
return(path.ids2)
}
keggGraph <- function(pairID, path.ids, exp.fc){
library(pathview)
library(org.Mm.eg.db)
egMap <- as.list(org.Mm.egENSEMBL2EG)
egCounts <- data.frame(matrix(nrow=0, ncol=ncol(exp.fc)))
for(gNameInd in 1:nrow(exp.fc)){
egName <- egMap[[strsplit(rownames(exp.fc)[gNameInd], ".", fixed=T)[[1]][1]]]
if(!is.null(egName)){
for(gName in egName){
if(!gName %in% rownames(egCounts)){
egCounts <- rbind(egCounts, exp.fc[gNameInd,])
rownames(egCounts)[nrow(egCounts)] <- gName
}
}
}
}
#Use the KEGG R. norvegicus pathway charts to chart our own expression patterns
pway <- paste0(pairID, "pathways")
out.suffix="edger"
dir.create(pway)
setwd(pway)
#Species-specific
pv.out.list <- sapply(path.ids, function(pid) pathview(gene.data = egCounts, pathway.id = pid,
species = "mmu", out.suffix=out.suffix, limit=list(gene=5, cpd=5), expand.node=T,
bins = list(gene = 20, cpd= 20), low = list(gene = "blue", cpd = "blue")))
setwd("..")
}
|
#' Bootstrap Equating Error
#'
#' These functions return bootstrap standard errors, bias, and RMSE of
#' equating. A summary method estimates mean and weighted mean errors over the
#' score scale.
#'
#' Samples are drawn of size \code{xn} and \code{yn}, with replacement, from
#' each score distribution. Form Y equivalents of each form X score are then
#' obtained using either the arguments in the equating output or those
#' provided. This process is repeated \code{reps} times. Standard errors are
#' calculated as standard deviations over replications for each score point;
#' bias is the mean equated score over replications, minus the criterion; and
#' RMSE is the square root of the squared standard error and squared bias
#' combined.
#'
#' The bootstrap method for objects of class \dQuote{\code{equate}} is designed
#' to be called from within \code{\link{equate}}. It simply extracts the
#' necessary arguments from the equating output before bootstrapping.
#'
#' When each element in \code{args} is a named list of equating arguments,
#' multiple equatings are performed at each replication in the bootstrapping.
#'
#' The summary method returns a \code{data.frame} of mean standard errors,
#' bias, and rmse, and weighted and absolute means, as applicable.
#'
#' @param x either an equating object, obtained with the \code{\link{equate}}
#' function, or a score distribution of class \dQuote{\code{\link{freqtab}}}.
#' @param xp,yp optional frequency tables replacing those equated in \code{x},
#' used for parametric bootsampling.
#' @param y score distribution of class \dQuote{\code{\link{freqtab}}}.
#' @param xn,yn integers specifying the number of scores to sample from each
#' distribution at each replication (default is the total number observed in
#' each).
#' @param reps number of bootstrap replications.
#' @param crit vector of equated scores serving as the criterion equating
#' function when calculating bootstrap bias and RMSE, both of which are
#' returned when \code{crit} is specified.
#' @param args named list of equating arguments, passed to
#' \code{\link{equate}}, specifying, e.g., the equating type and method. See
#' below for details.
#' @param eqs logical, with default \code{FALSE}, indicating whether or not the
#' matrices of equating functions (one column per replication, per equating)
#' should be returned.
#' @param object \code{bootstrap} output to be summarized.
#' @param weights vector of weights to be used in calculating weighted average
#' errors with \code{summary}, defaulting to the frequencies in
#' \code{margin(object$x)}.
#' @param subset vector indicating a subset of the score scale for which errors
#' should be summarized.
#' @param \dots further arguments passed to or from other methods.
#' @return With \code{bootstrap}, a list is returned, containing arguments
#' supplied for \code{x}, \code{y}, \code{reps}, \code{xn}, \code{yn}, and
#' \code{args}. For a single equating, the \code{mean} equating function over
#' replications and a vector of standard errors \code{se} are included,
#' along with vectors of \code{bias} and \code{rmse}, when \code{crit} is
#' provided, and a matrix of equating functions \code{eqs} when
#' \code{eqs = TRUE}. For multiple equatings, where each element of
#' \code{args} is a list of equating arguments, matrices are returned for the
#' mean functions, standard error, bias, and RMSE, and the equating functions
#' will be returned as a list of matrices. The \code{summary} method returns a
#' data frame of mean standard errors, bias, and rmse, and weighted and
#' absolute means, as applicable.
#' @author Anthony Albano \email{tony.d.albano@@gmail.com}
#' @seealso \code{\link{plot.bootstrap}}
#' @keywords methods
#' @examples
#'
#' # Parametric bootstrapping using smoothed
#' # frequency distributions
#' set.seed(111213)
#' x <- freqtab(KBneat$x, scales = list(0:36, 0:12))
#' y <- freqtab(KBneat$y, scales = list(0:36, 0:12))
#' xp <- loglinear(x, asfreqtab = TRUE)
#' yp <- loglinear(y, asfreqtab = TRUE)
#' crit <- equate(xp, yp, "e", "c")$conc$yx
#' eqargs <- list(m.t = list(type = "m", method = "t"),
#' l.t = list(type = "l", method = "t"))
#' bootout1 <- bootstrap(x = x, y = y, xn = 20, yn = 20,
#' crit = crit, args = eqargs, reps = 30)
#' plot(bootout1, out = "rmse", legendplace = "top",
#' addident = FALSE)
#'
#' # Bootstraps for an existing equating
#' eq <- equate(x, y, type = "m", method = "t")
#' bootout2 <- bootstrap(eq, xn = 100, yn = 100,
#' crit = crit, reps = 20)
#' summary(bootout2)
#'
#' @export
bootstrap <- function(x, ...) UseMethod("bootstrap")
#' @describeIn bootstrap Default boostrap method for
#' \dQuote{\code{\link{freqtab}}} objects.
#' @export
bootstrap.default <- function(x, y, ...) {
if(!is.freqtab(x) | is.freqtab(y))
stop("'x' and 'y' must be frequency tables")
else do.call(bootstrap.freqtab, c(list(x = x, y = y),
list(...)))
}
#----------------------------------------------------------------
# Method for equate class
#' @describeIn bootstrap Method for \dQuote{\code{\link{equate}}} objects.
#' @export
bootstrap.equate <- function(x, xp = x$x, yp = x$y, ...) {
dots <- list(...)
if(is.character(xp))
xp <- x[[xp]]
if(is.character(yp))
yp <- x[[yp]]
rmnames <- c("x", "y", "yx", "concordance",
"bootstraps", "coefficients", "synthstats",
"xsynthetic", "ysynthetic", "xsmooth", "ysmooth",
"points")
args <- x[-pmatch(rmnames, names(x), nomatch = 0)]
dots[pmatch(rmnames, names(dots), nomatch = 0)] <- NULL
mi <- pmatch(names(dots), names(args), nomatch = 0)
args[mi] <- dots[as.logical(mi)]
dots <- dots[!as.logical(mi)]
do.call(bootstrap.freqtab, c(list("x" = xp, "y" = yp),
args, dots))
}
#----------------------------------------------------------------
# Method for freqtab class
#' @describeIn bootstrap Bootstrap method for \dQuote{\code{\link{freqtab}}}
#' objects.
#' @export
bootstrap.freqtab <- function(x, y, xn = sum(x),
yn = sum(y), reps = 100, crit, args,
eqs = FALSE, ...) {
dots <- list(...)[names(list(...) != "")]
if(missing(args)) {
args <- list(dots)
neq <- 1
args[[1]]["verbose"] <- FALSE
}
else {
neq <- length(args)
for(i in 1:neq) {
args[[i]][names(dots)] <- dots
args[[i]]["verbose"] <- FALSE
}
}
if(missing(y)) {
yn <- xn
y <- NULL
xs <- scales(x, 1)
ys <- scales(x, 2)
xd <- as.data.frame(as.data.frame(x)[x > 0, 1:2])
xp <- x[x > 0]/sum(x)
xni <- nrow(xd)
eqmats <- lapply(rep(NA, neq), matrix,
nrow = length(xs), ncol = reps)
for(i in 1:reps) {
xi <- sample.int(xni, xn, replace = TRUE, prob = xp)
xtemp <- freqtab(xd[xi, ], scales = list(xs, ys))
for(j in 1:neq)
eqmats[[j]][, i] <- do.call("equate",
c(list(x = xtemp), args[[j]]))
}
}
else {
nx <- margins(x)
ny <- margins(y)
xs <- scales(x, 1:nx)
ys <- scales(y, 1:ny)
xd <- as.data.frame(as.data.frame(x)[x > 0, 1:nx])
yd <- as.data.frame(as.data.frame(y)[y > 0, 1:ny])
xp <- x[x > 0]/sum(x)
yp <- y[y > 0]/sum(y)
xni <- nrow(xd)
yni <- nrow(yd)
eqmats <- lapply(rep(NA, neq), matrix,
nrow = length(scales(x, 1)), ncol = reps)
for(i in 1:reps) {
xi <- sample.int(xni, xn, replace = TRUE, prob = xp)
xtemp <- freqtab(xd[xi, ], scales = xs)
yi <- sample.int(yni, yn, replace = TRUE, prob = yp)
ytemp <- freqtab(yd[yi, ], scales = ys)
for(j in 1:neq)
eqmats[[j]][, i] <- do.call("equate",
c(list(x = xtemp, y = ytemp), args[[j]]))
}
}
names(eqmats) <- names(args)
out <- list(x = x, y = y, reps = reps, xn = xn, yn = yn,
args = args, mean = sapply(eqmats, apply, 1, mean),
se = sapply(eqmats, apply, 1, sd))
if(!missing(crit)) {
out$bias <- sapply(eqmats, apply, 1, mean) - crit
out$rmse <- sqrt(out$bias^2 + out$se^2)
}
if(neq == 1)
out[-(1:6)] <- lapply(out[-(1:6)], c)
if(eqs)
out$eqs <- if(neq == 1) eqmats[[1]] else eqmats
out <- as.bootstrap(out)
return(out)
}
#----------------------------------------------------------------
# Assign bootstrap class
as.bootstrap <- function(x) {
class(x) <- "bootstrap"
return(x)
}
#----------------------------------------------------------------
# Test for bootstrap class
is.bootstrap <- function(x) {
return(class(x)[1] == "bootstrap")
}
#----------------------------------------------------------------
# Print method
#' @export
print.bootstrap <- function(x, ...) {
nf <- length(x$args)
cat("\nBootstrap Equating Error\n\n")
cat("Design:", if(is.null(x$y)) "single group"
else if(margins(x$x) == 1) "equivalent groups"
else "nonequivalent groups", "\n\n")
cat("Replications:", x$reps, "\n\n")
cat("Sample Sizes: x =", paste(x$xn, "; y =", sep = ""),
x$yn, "\n\n")
}
#----------------------------------------------------------------
# Summary method
# @describeIn bootstrap Summary method for \dQuote{\code{bootstrap}} objects.
#' @rdname bootstrap
#' @export
summary.bootstrap <- function(object, weights,
subset, ...) {
if(missing(subset))
subset <- 1:length(scales(object$x))
if(missing(weights))
weights <- c(margin(object$x))[subset]/
sum(margin(object$x)[subset])
tempse <- cbind(object$se)[subset, , drop = FALSE]
out <- data.frame(se = apply(tempse, 2, mean),
w.se = apply(tempse * weights, 2, mean))
if(!is.null(object$bias)) {
tempbias <- cbind(object$bias)[subset, , drop = FALSE]
out$bias <- apply(tempbias, 2, mean)
out$a.bias <- apply(abs(tempbias), 2, mean)
out$w.bias <- apply(tempbias * weights, 2, mean)
out$wa.bias <- apply(abs(tempbias * weights), 2, mean)
out$rmse <- apply(cbind(object$rmse)[subset, , drop = FALSE],
2, mean)
out$w.rmse <- apply(cbind(object$rmse)[subset, , drop = FALSE] *
weights, 2, mean)
}
class(out) <- c("summary.bootstrap", "data.frame")
return(out)
}
|
/R/bootstrap.R
|
no_license
|
Yage66/equate
|
R
| false | false | 9,805 |
r
|
#' Bootstrap Equating Error
#'
#' These functions return bootstrap standard errors, bias, and RMSE of
#' equating. A summary method estimates mean and weighted mean errors over the
#' score scale.
#'
#' Samples are drawn of size \code{xn} and \code{yn}, with replacement, from
#' each score distribution. Form Y equivalents of each form X score are then
#' obtained using either the arguments in the equating output or those
#' provided. This process is repeated \code{reps} times. Standard errors are
#' calculated as standard deviations over replications for each score point;
#' bias is the mean equated score over replications, minus the criterion; and
#' RMSE is the square root of the squared standard error and squared bias
#' combined.
#'
#' The bootstrap method for objects of class \dQuote{\code{equate}} is designed
#' to be called from within \code{\link{equate}}. It simply extracts the
#' necessary arguments from the equating output before bootstrapping.
#'
#' When each element in \code{args} is a named list of equating arguments,
#' multiple equatings are performed at each replication in the bootstrapping.
#'
#' The summary method returns a \code{data.frame} of mean standard errors,
#' bias, and rmse, and weighted and absolute means, as applicable.
#'
#' @param x either an equating object, obtained with the \code{\link{equate}}
#' function, or a score distribution of class \dQuote{\code{\link{freqtab}}}.
#' @param xp,yp optional frequency tables replacing those equated in \code{x},
#' used for parametric bootsampling.
#' @param y score distribution of class \dQuote{\code{\link{freqtab}}}.
#' @param xn,yn integers specifying the number of scores to sample from each
#' distribution at each replication (default is the total number observed in
#' each).
#' @param reps number of bootstrap replications.
#' @param crit vector of equated scores serving as the criterion equating
#' function when calculating bootstrap bias and RMSE, both of which are
#' returned when \code{crit} is specified.
#' @param args named list of equating arguments, passed to
#' \code{\link{equate}}, specifying, e.g., the equating type and method. See
#' below for details.
#' @param eqs logical, with default \code{FALSE}, indicating whether or not the
#' matrices of equating functions (one column per replication, per equating)
#' should be returned.
#' @param object \code{bootstrap} output to be summarized.
#' @param weights vector of weights to be used in calculating weighted average
#' errors with \code{summary}, defaulting to the frequencies in
#' \code{margin(object$x)}.
#' @param subset vector indicating a subset of the score scale for which errors
#' should be summarized.
#' @param \dots further arguments passed to or from other methods.
#' @return With \code{bootstrap}, a list is returned, containing arguments
#' supplied for \code{x}, \code{y}, \code{reps}, \code{xn}, \code{yn}, and
#' \code{args}. For a single equating, the \code{mean} equating function over
#' replications and a vector of standard errors \code{se} are included,
#' along with vectors of \code{bias} and \code{rmse}, when \code{crit} is
#' provided, and a matrix of equating functions \code{eqs} when
#' \code{eqs = TRUE}. For multiple equatings, where each element of
#' \code{args} is a list of equating arguments, matrices are returned for the
#' mean functions, standard error, bias, and RMSE, and the equating functions
#' will be returned as a list of matrices. The \code{summary} method returns a
#' data frame of mean standard errors, bias, and rmse, and weighted and
#' absolute means, as applicable.
#' @author Anthony Albano \email{tony.d.albano@@gmail.com}
#' @seealso \code{\link{plot.bootstrap}}
#' @keywords methods
#' @examples
#'
#' # Parametric bootstrapping using smoothed
#' # frequency distributions
#' set.seed(111213)
#' x <- freqtab(KBneat$x, scales = list(0:36, 0:12))
#' y <- freqtab(KBneat$y, scales = list(0:36, 0:12))
#' xp <- loglinear(x, asfreqtab = TRUE)
#' yp <- loglinear(y, asfreqtab = TRUE)
#' crit <- equate(xp, yp, "e", "c")$conc$yx
#' eqargs <- list(m.t = list(type = "m", method = "t"),
#' l.t = list(type = "l", method = "t"))
#' bootout1 <- bootstrap(x = x, y = y, xn = 20, yn = 20,
#' crit = crit, args = eqargs, reps = 30)
#' plot(bootout1, out = "rmse", legendplace = "top",
#' addident = FALSE)
#'
#' # Bootstraps for an existing equating
#' eq <- equate(x, y, type = "m", method = "t")
#' bootout2 <- bootstrap(eq, xn = 100, yn = 100,
#' crit = crit, reps = 20)
#' summary(bootout2)
#'
#' @export
bootstrap <- function(x, ...) UseMethod("bootstrap")
#' @describeIn bootstrap Default boostrap method for
#' \dQuote{\code{\link{freqtab}}} objects.
#' @export
bootstrap.default <- function(x, y, ...) {
if(!is.freqtab(x) | is.freqtab(y))
stop("'x' and 'y' must be frequency tables")
else do.call(bootstrap.freqtab, c(list(x = x, y = y),
list(...)))
}
#----------------------------------------------------------------
# Method for equate class
#' @describeIn bootstrap Method for \dQuote{\code{\link{equate}}} objects.
#' @export
bootstrap.equate <- function(x, xp = x$x, yp = x$y, ...) {
dots <- list(...)
if(is.character(xp))
xp <- x[[xp]]
if(is.character(yp))
yp <- x[[yp]]
rmnames <- c("x", "y", "yx", "concordance",
"bootstraps", "coefficients", "synthstats",
"xsynthetic", "ysynthetic", "xsmooth", "ysmooth",
"points")
args <- x[-pmatch(rmnames, names(x), nomatch = 0)]
dots[pmatch(rmnames, names(dots), nomatch = 0)] <- NULL
mi <- pmatch(names(dots), names(args), nomatch = 0)
args[mi] <- dots[as.logical(mi)]
dots <- dots[!as.logical(mi)]
do.call(bootstrap.freqtab, c(list("x" = xp, "y" = yp),
args, dots))
}
#----------------------------------------------------------------
# Method for freqtab class
#' @describeIn bootstrap Bootstrap method for \dQuote{\code{\link{freqtab}}}
#' objects.
#' @export
bootstrap.freqtab <- function(x, y, xn = sum(x),
yn = sum(y), reps = 100, crit, args,
eqs = FALSE, ...) {
dots <- list(...)[names(list(...) != "")]
if(missing(args)) {
args <- list(dots)
neq <- 1
args[[1]]["verbose"] <- FALSE
}
else {
neq <- length(args)
for(i in 1:neq) {
args[[i]][names(dots)] <- dots
args[[i]]["verbose"] <- FALSE
}
}
if(missing(y)) {
yn <- xn
y <- NULL
xs <- scales(x, 1)
ys <- scales(x, 2)
xd <- as.data.frame(as.data.frame(x)[x > 0, 1:2])
xp <- x[x > 0]/sum(x)
xni <- nrow(xd)
eqmats <- lapply(rep(NA, neq), matrix,
nrow = length(xs), ncol = reps)
for(i in 1:reps) {
xi <- sample.int(xni, xn, replace = TRUE, prob = xp)
xtemp <- freqtab(xd[xi, ], scales = list(xs, ys))
for(j in 1:neq)
eqmats[[j]][, i] <- do.call("equate",
c(list(x = xtemp), args[[j]]))
}
}
else {
nx <- margins(x)
ny <- margins(y)
xs <- scales(x, 1:nx)
ys <- scales(y, 1:ny)
xd <- as.data.frame(as.data.frame(x)[x > 0, 1:nx])
yd <- as.data.frame(as.data.frame(y)[y > 0, 1:ny])
xp <- x[x > 0]/sum(x)
yp <- y[y > 0]/sum(y)
xni <- nrow(xd)
yni <- nrow(yd)
eqmats <- lapply(rep(NA, neq), matrix,
nrow = length(scales(x, 1)), ncol = reps)
for(i in 1:reps) {
xi <- sample.int(xni, xn, replace = TRUE, prob = xp)
xtemp <- freqtab(xd[xi, ], scales = xs)
yi <- sample.int(yni, yn, replace = TRUE, prob = yp)
ytemp <- freqtab(yd[yi, ], scales = ys)
for(j in 1:neq)
eqmats[[j]][, i] <- do.call("equate",
c(list(x = xtemp, y = ytemp), args[[j]]))
}
}
names(eqmats) <- names(args)
out <- list(x = x, y = y, reps = reps, xn = xn, yn = yn,
args = args, mean = sapply(eqmats, apply, 1, mean),
se = sapply(eqmats, apply, 1, sd))
if(!missing(crit)) {
out$bias <- sapply(eqmats, apply, 1, mean) - crit
out$rmse <- sqrt(out$bias^2 + out$se^2)
}
if(neq == 1)
out[-(1:6)] <- lapply(out[-(1:6)], c)
if(eqs)
out$eqs <- if(neq == 1) eqmats[[1]] else eqmats
out <- as.bootstrap(out)
return(out)
}
#----------------------------------------------------------------
# Assign bootstrap class
as.bootstrap <- function(x) {
class(x) <- "bootstrap"
return(x)
}
#----------------------------------------------------------------
# Test for bootstrap class
is.bootstrap <- function(x) {
return(class(x)[1] == "bootstrap")
}
#----------------------------------------------------------------
# Print method
#' @export
print.bootstrap <- function(x, ...) {
nf <- length(x$args)
cat("\nBootstrap Equating Error\n\n")
cat("Design:", if(is.null(x$y)) "single group"
else if(margins(x$x) == 1) "equivalent groups"
else "nonequivalent groups", "\n\n")
cat("Replications:", x$reps, "\n\n")
cat("Sample Sizes: x =", paste(x$xn, "; y =", sep = ""),
x$yn, "\n\n")
}
#----------------------------------------------------------------
# Summary method
# @describeIn bootstrap Summary method for \dQuote{\code{bootstrap}} objects.
#' @rdname bootstrap
#' @export
summary.bootstrap <- function(object, weights,
subset, ...) {
if(missing(subset))
subset <- 1:length(scales(object$x))
if(missing(weights))
weights <- c(margin(object$x))[subset]/
sum(margin(object$x)[subset])
tempse <- cbind(object$se)[subset, , drop = FALSE]
out <- data.frame(se = apply(tempse, 2, mean),
w.se = apply(tempse * weights, 2, mean))
if(!is.null(object$bias)) {
tempbias <- cbind(object$bias)[subset, , drop = FALSE]
out$bias <- apply(tempbias, 2, mean)
out$a.bias <- apply(abs(tempbias), 2, mean)
out$w.bias <- apply(tempbias * weights, 2, mean)
out$wa.bias <- apply(abs(tempbias * weights), 2, mean)
out$rmse <- apply(cbind(object$rmse)[subset, , drop = FALSE],
2, mean)
out$w.rmse <- apply(cbind(object$rmse)[subset, , drop = FALSE] *
weights, 2, mean)
}
class(out) <- c("summary.bootstrap", "data.frame")
return(out)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stb_fb-data.R
\docType{data}
\name{stb_fb}
\alias{stb_fb}
\title{Stickleback Fish Body Shape}
\format{
data.frame
}
\source{
TBA
}
\usage{
data(stb_fb)
}
\description{
Example Shapes of the fishbody (fb) of a few sticklebacks (stb)
}
\examples{
data(stb_fb)
i = unique(stb_fb$ind)[1]
fb =stb_fb\%>\%filter(ind==i)
par(mfrow=c(1,2))
KRMr::shplot(x_fb = fb$x_fb, w_fb = fb$w_fb,
z_fbU = fb$z_fbU, z_fbL = fb$z_fbL)
}
\references{
TBA
}
\keyword{datasets}
|
/man/stb_fb.Rd
|
permissive
|
SvenGastauer/KRMr
|
R
| false | true | 543 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stb_fb-data.R
\docType{data}
\name{stb_fb}
\alias{stb_fb}
\title{Stickleback Fish Body Shape}
\format{
data.frame
}
\source{
TBA
}
\usage{
data(stb_fb)
}
\description{
Example Shapes of the fishbody (fb) of a few sticklebacks (stb)
}
\examples{
data(stb_fb)
i = unique(stb_fb$ind)[1]
fb =stb_fb\%>\%filter(ind==i)
par(mfrow=c(1,2))
KRMr::shplot(x_fb = fb$x_fb, w_fb = fb$w_fb,
z_fbU = fb$z_fbU, z_fbL = fb$z_fbL)
}
\references{
TBA
}
\keyword{datasets}
|
###############################################################################
# Bag of Words
# author: khmelkoff
###############################################################################
# Загружаем библиотеки, готовим обучающую выборку #############################
library(tm)
library(SnowballC)
library(e1071)
library(caret)
library(randomForest)
training <- read.delim(unz("labeledTrainData.tsv.zip",
"labeledTrainData.tsv"),
header = TRUE,
sep = "\t",
quote = "",
as.is=TRUE)
# Смотрим на данные ###########################################################
# Проверяем размерность, инспектируем первое ревю
dim(training)
r1_length <- nchar(as.character(training[1,3]))
r1 <- training[1,3]
paste(substr(r1,1,700),"...")
# Избавляемся от HTML тегов
cleanHTML <- function(x) {
return(gsub("<.*?>", "", x))
}
r1 <- cleanHTML(r1)
# Оставляем только текст, убираем однобуквенные слова и слова нулевой длины
onlyText <- function(x) {
x <- gsub("'s", "", x)
return(gsub("[^a-zA-Z]", " ", x))
}
r1 <- onlyText(r1)
# Токенизируем
tokenize <- function(x) {
x <- tolower(x)
x <- unlist(strsplit(x, split=" "))
}
r1 <- tokenize(r1)
r1 <- r1[nchar(r1)>1]
# Создаем список стоп-слов
stopWords <- stopwords("en")
r1 <- r1[!r1 %in% stopWords]
r1[1:20]
# Обрабатываем все 25000 записей
rws <- sapply(1:nrow(training), function(x){
# Прогресс-индикатор
if(x %% 1000 == 0) print(paste(x, "reviews processed"))
rw <- training[x,3]
rw <- cleanHTML(rw)
rw <- onlyText(rw)
rw <- tokenize(rw)
rw <- rw[nchar(rw)>1]
rw <- rw[!rw %in% stopWords]
paste(rw, collapse=" ") # Снова склеиваем в текст
})
# Строим "Мешок слов" #########################################################
train_vector <- VectorSource(rws) # Вектор
train_corpus <- Corpus(train_vector, # ?Корпус
readerControl = list(language = "en"))
train_bag <- DocumentTermMatrix(train_corpus, # Спец. матрица документы/термины
control=list(stemming=TRUE))
train_bag <- removeSparseTerms(train_bag, 0.9982) # Убираем слишком редкие термины
dim(train_bag)
# Смотрим на перечень наиболее распространенных терминов
hight_freq <- findFreqTerms(train_bag, 5000, Inf)
inspect(train_bag[1:4, hight_freq[1:10]])
# Из специальной матрицы формируем обучающий датафрейм
train_df <- data.frame(inspect(train_bag[1:25000,]))
train_df <- cbind(training$sentiment, train_df)
# Сокращенный датафрейм для статьи
# train_df <- data.frame(inspect(train_bag[1:1000,hight_freq]))
# train_df <- cbind(training$sentiment[1:1000], train_df)
names(train_df)[1] <- "sentiment"
vocab <- names(train_df)[-1] # Формируем словарь (для тестовой выборки)
# ?Убираем ненужное
rm(train_bag)
rm(train_corpus)
rm(train_vector)
rm(training)
rm(rws)
# Выращиваем Случайный лес ####################################################
t_start <- Sys.time()
set.seed(3113)
forest <- train(as.factor(sentiment) ~., data=train_df,
method="rf",
trControl=trainControl(method="cv",number=5),
prox=TRUE,
ntree=100,
do.trace=10,
allowParallel=TRUE)
t_end <- Sys.time()
# Смотрим на модель и на время обучения
t_end-t_start
print(forest)
# Загружаем и обрабатываем контрольную выборку ################################
testing <- read.delim(unz("testData.tsv.zip",
"testData.tsv"),
header = TRUE,
sep = "\t",
quote = "")
# Проверяем размерность
dim(testing)
# Обрабатываем тестовые ревю
rws <- sapply(1:nrow(testing), function(x){
if(x %% 1000 == 0) print(paste(x, "reviews processed"))
rw <- testing[x,2]
rw <- cleanHTML(rw)
rw <- onlyText(rw)
rw <- tokenize(rw)
rw <- rw[nchar(rw)>1]
rw <- rw[!rw %in% stopWords]
paste(rw, collapse=" ")
})
# Формируем вектор ревю, строим корпус, формируем матрицу документы/термины ###
test_vector <- VectorSource(rws)
test_corpus <- Corpus(test_vector,
readerControl = list(language = "en"))
test_bag <- DocumentTermMatrix(test_corpus,
control=list(stemming=TRUE,
dictionary = vocab))
test_df <- data.frame(inspect(test_bag[1:25000,]))
sentiment <- rep(0, 25000)
test_df <- cbind(testing[1:25000,1], sentiment, test_df)
names(test_df)[1] <- "id"
# Прогнозируем сентимент ######################################################
test_df[,2] <- predict(forest, newdata = test_df)
# Сохраняем результата в csv
write.csv(test_df[,1:2], file="Submission.csv",
quote=FALSE,
row.names=FALSE)
|
/BagofWords_1.R
|
no_license
|
Sandy4321/BagOfWords
|
R
| false | false | 5,675 |
r
|
###############################################################################
# Bag of Words
# author: khmelkoff
###############################################################################
# Загружаем библиотеки, готовим обучающую выборку #############################
library(tm)
library(SnowballC)
library(e1071)
library(caret)
library(randomForest)
training <- read.delim(unz("labeledTrainData.tsv.zip",
"labeledTrainData.tsv"),
header = TRUE,
sep = "\t",
quote = "",
as.is=TRUE)
# Смотрим на данные ###########################################################
# Проверяем размерность, инспектируем первое ревю
dim(training)
r1_length <- nchar(as.character(training[1,3]))
r1 <- training[1,3]
paste(substr(r1,1,700),"...")
# Избавляемся от HTML тегов
cleanHTML <- function(x) {
return(gsub("<.*?>", "", x))
}
r1 <- cleanHTML(r1)
# Оставляем только текст, убираем однобуквенные слова и слова нулевой длины
onlyText <- function(x) {
x <- gsub("'s", "", x)
return(gsub("[^a-zA-Z]", " ", x))
}
r1 <- onlyText(r1)
# Токенизируем
tokenize <- function(x) {
x <- tolower(x)
x <- unlist(strsplit(x, split=" "))
}
r1 <- tokenize(r1)
r1 <- r1[nchar(r1)>1]
# Создаем список стоп-слов
stopWords <- stopwords("en")
r1 <- r1[!r1 %in% stopWords]
r1[1:20]
# Обрабатываем все 25000 записей
rws <- sapply(1:nrow(training), function(x){
# Прогресс-индикатор
if(x %% 1000 == 0) print(paste(x, "reviews processed"))
rw <- training[x,3]
rw <- cleanHTML(rw)
rw <- onlyText(rw)
rw <- tokenize(rw)
rw <- rw[nchar(rw)>1]
rw <- rw[!rw %in% stopWords]
paste(rw, collapse=" ") # Снова склеиваем в текст
})
# Строим "Мешок слов" #########################################################
train_vector <- VectorSource(rws) # Вектор
train_corpus <- Corpus(train_vector, # ?Корпус
readerControl = list(language = "en"))
train_bag <- DocumentTermMatrix(train_corpus, # Спец. матрица документы/термины
control=list(stemming=TRUE))
train_bag <- removeSparseTerms(train_bag, 0.9982) # Убираем слишком редкие термины
dim(train_bag)
# Смотрим на перечень наиболее распространенных терминов
hight_freq <- findFreqTerms(train_bag, 5000, Inf)
inspect(train_bag[1:4, hight_freq[1:10]])
# Из специальной матрицы формируем обучающий датафрейм
train_df <- data.frame(inspect(train_bag[1:25000,]))
train_df <- cbind(training$sentiment, train_df)
# Сокращенный датафрейм для статьи
# train_df <- data.frame(inspect(train_bag[1:1000,hight_freq]))
# train_df <- cbind(training$sentiment[1:1000], train_df)
names(train_df)[1] <- "sentiment"
vocab <- names(train_df)[-1] # Формируем словарь (для тестовой выборки)
# ?Убираем ненужное
rm(train_bag)
rm(train_corpus)
rm(train_vector)
rm(training)
rm(rws)
# Выращиваем Случайный лес ####################################################
t_start <- Sys.time()
set.seed(3113)
forest <- train(as.factor(sentiment) ~., data=train_df,
method="rf",
trControl=trainControl(method="cv",number=5),
prox=TRUE,
ntree=100,
do.trace=10,
allowParallel=TRUE)
t_end <- Sys.time()
# Смотрим на модель и на время обучения
t_end-t_start
print(forest)
# Загружаем и обрабатываем контрольную выборку ################################
testing <- read.delim(unz("testData.tsv.zip",
"testData.tsv"),
header = TRUE,
sep = "\t",
quote = "")
# Проверяем размерность
dim(testing)
# Обрабатываем тестовые ревю
rws <- sapply(1:nrow(testing), function(x){
if(x %% 1000 == 0) print(paste(x, "reviews processed"))
rw <- testing[x,2]
rw <- cleanHTML(rw)
rw <- onlyText(rw)
rw <- tokenize(rw)
rw <- rw[nchar(rw)>1]
rw <- rw[!rw %in% stopWords]
paste(rw, collapse=" ")
})
# Формируем вектор ревю, строим корпус, формируем матрицу документы/термины ###
test_vector <- VectorSource(rws)
test_corpus <- Corpus(test_vector,
readerControl = list(language = "en"))
test_bag <- DocumentTermMatrix(test_corpus,
control=list(stemming=TRUE,
dictionary = vocab))
test_df <- data.frame(inspect(test_bag[1:25000,]))
sentiment <- rep(0, 25000)
test_df <- cbind(testing[1:25000,1], sentiment, test_df)
names(test_df)[1] <- "id"
# Прогнозируем сентимент ######################################################
test_df[,2] <- predict(forest, newdata = test_df)
# Сохраняем результата в csv
write.csv(test_df[,1:2], file="Submission.csv",
quote=FALSE,
row.names=FALSE)
|
library(lubridate)
setwd("/Users/jacobwynne/Dropbox/sunapee_ensemble")
mantemptime <- read_csv("mantemptime.csv")
newtemp <- read_csv("new_temp.csv")
newtemp <- na.omit(newtemp)
mantemptime$source <- "manual"
newtemp$source <- "buoy"
bound_newtemp <- full_join(newtemp, mantemptime, all.x = TRUE)
dups <- bound_newtemp[c("datetime", "Depth_meter")]
filtered_buoy_manual <- bound_newtemp[!duplicated(dups),]
mantemptime <- filter(mantemptime, datetime <= "2007-08-27")
buoy_manual <- rbind(mantemptime, filtered_buoy_manual)
buoy_manual$time <- as.character("00:00:00")
buoy_manual$datetime <- as.Date(buoy_manual$datetime)
formt <- "%Y-%m-%d %H:%M:%S"
buoy_manual$datetime <- as.POSIXct(paste(buoy_manual$datetime, buoy_manual$time), format=formt, tz = 'UTC')
str(buoy_manual)
buoy_manual <- select(buoy_manual, -time, -source)
write.csv(buoy_manual, "~/Dropbox/sunapee_LER_projections/LER_inputs/calibration_inputs/buoy_manual.csv", row.names = FALSE)
|
/scripts/wrangling_inputs/boundtemps_filtering_manual.R
|
no_license
|
jacob8776/sunapee_LER_projections
|
R
| false | false | 971 |
r
|
library(lubridate)
setwd("/Users/jacobwynne/Dropbox/sunapee_ensemble")
mantemptime <- read_csv("mantemptime.csv")
newtemp <- read_csv("new_temp.csv")
newtemp <- na.omit(newtemp)
mantemptime$source <- "manual"
newtemp$source <- "buoy"
bound_newtemp <- full_join(newtemp, mantemptime, all.x = TRUE)
dups <- bound_newtemp[c("datetime", "Depth_meter")]
filtered_buoy_manual <- bound_newtemp[!duplicated(dups),]
mantemptime <- filter(mantemptime, datetime <= "2007-08-27")
buoy_manual <- rbind(mantemptime, filtered_buoy_manual)
buoy_manual$time <- as.character("00:00:00")
buoy_manual$datetime <- as.Date(buoy_manual$datetime)
formt <- "%Y-%m-%d %H:%M:%S"
buoy_manual$datetime <- as.POSIXct(paste(buoy_manual$datetime, buoy_manual$time), format=formt, tz = 'UTC')
str(buoy_manual)
buoy_manual <- select(buoy_manual, -time, -source)
write.csv(buoy_manual, "~/Dropbox/sunapee_LER_projections/LER_inputs/calibration_inputs/buoy_manual.csv", row.names = FALSE)
|
#'This a general retryfromJSOM function with a wrapper with try() and trycatch().
#'In general cases this functions is to be used with rjson::fromJSON.
#'
#'
#'@param .FUN is a function which is going to be executed.
#'@param raw.data is a function which is going to be executed.
#'
#'@return it returns API content
#'@export
retryfromJSON <- function(.FUN0,raw.data,.FUN1,url,.FUN2,max.attempts=5,sleep.seconds=30)
{
#utils::setInternet2(use=TRUE)
x0 <- NULL
x1 <- NULL
raw.data<-raw.data
url<-url
for (i in 1:max.attempts)
{
f0 <- substitute(.FUN0)
f1 <- substitute(.FUN1)
f2 <- substitute(.FUN2)
x0 <- try({#utils::setInternet2(use=TRUE)
eval(f0)})
if (class(x0) == "try-error")
{
#print("fromJSON Error")
#Sys.sleep(sleep.seconds)
x0 <- try({#utils::setInternet2(use=TRUE)
raw.data<-eval(f2)
raw.data<-eval(f1)
eval(f0)})
}
else
{
return (x0)
}
}
x0
}
|
/R/retryfromJSON.R
|
no_license
|
aritrab/YouTubeR
|
R
| false | false | 990 |
r
|
#'This a general retryfromJSOM function with a wrapper with try() and trycatch().
#'In general cases this functions is to be used with rjson::fromJSON.
#'
#'
#'@param .FUN is a function which is going to be executed.
#'@param raw.data is a function which is going to be executed.
#'
#'@return it returns API content
#'@export
retryfromJSON <- function(.FUN0,raw.data,.FUN1,url,.FUN2,max.attempts=5,sleep.seconds=30)
{
#utils::setInternet2(use=TRUE)
x0 <- NULL
x1 <- NULL
raw.data<-raw.data
url<-url
for (i in 1:max.attempts)
{
f0 <- substitute(.FUN0)
f1 <- substitute(.FUN1)
f2 <- substitute(.FUN2)
x0 <- try({#utils::setInternet2(use=TRUE)
eval(f0)})
if (class(x0) == "try-error")
{
#print("fromJSON Error")
#Sys.sleep(sleep.seconds)
x0 <- try({#utils::setInternet2(use=TRUE)
raw.data<-eval(f2)
raw.data<-eval(f1)
eval(f0)})
}
else
{
return (x0)
}
}
x0
}
|
#loading libraries
library(tidyverse)
library(car)
library(MASS)
#reading the data
#as per the data dictionaries, there are only numeric variables and categorical variables.
#We can assume that all strings are categorical. So, stringsAsFactors can be TRUE.
data <- read.csv("CarPrice_Assignment.csv", stringsAsFactors = TRUE)
#checking the data
View(data)
str(data)
#We can see that a lot of variables are Categorical in the Data Dictionary but they have been read as integers or numbers.
columns <- c(2:9, 15:16, 18)
data[,columns] <- lapply(data[,columns], factor)
str(data)
#Now, our data frame reflects the data dictionary
#### DATA PREPARATION AND CLEANING
###Part 0.1 - Checking data for basic inconsitencies
##Checking for NA
sum(is.na(data))
#There are no NA values
##Checking for duplicates
length(unique(data$car_ID))
#We can see there are no duplicated rows or rows with the same ID.
##Checking for outliers
#We'll check for outliers in numerical variables
#variable: wheelbase
boxplot(data$wheelbase)
wb <- quantile(data$wheelbase, seq(0,1,0.01))
wb
plot(wb)
#Even though our boxplot shows two outliers, the quantile distribution doesn't show a huge spike in the last two values
#variable: carlength
boxplot(data$carlength)
cl <- quantile(data$carlength, seq(0,1,0.01))
cl
plot(cl)
#As with wheelbase, the outliers aren't as extreme for us to treat them.
#variable: carwidth
boxplot(data$carwidth)
cw <- quantile(data$carwidth, seq(0,1,0.01))
cw
plot(cw)
#variable: carheight
boxplot(data$carheight)
#carheight has no outliers
#variable: curbweight
boxplot(data$curbweight)
#curbweight has no outliers
#variable: enginesize
boxplot(data$enginesize)
es <- quantile(data$enginesize, seq(0,1,0.01))
es
plot(es)
#Since enginesize has six outliers, three of which are extremely out of range, we'll cap all values above 91% (183.00) to 183.00
data$enginesize[which(data$enginesize>183.00)] <- 183.00
#Checking again
boxplot(data$enginesize)
#Outliers have been capped
#variable: boreratio
boxplot(data$boreratio)
#boreratio has no outliers
#variable: stroke
boxplot(data$stroke)
s <- quantile(data$stroke, seq(0,1,0.01))
s
plot(s)
#We'll floor the values which are less than 2% (2.6400) to 2.6400
data$stroke[which(data$stroke < 2.6400)] <- 2.6400
#Checking again
boxplot(data$stroke)
#variable: compressionratio
boxplot(data$compressionratio)
cr <- quantile(data$compressionratio, seq(0,1,0.01))
cr
plot(cr)
#There are an extreme number of outliers. We'll cap all values above 90% (10.9400) to 10.9400
data$compressionratio[which(data$compressionratio > 10.9400)] <- 10.9400
#Checking again
boxplot(data$compressionratio)
#variable: price
boxplot(data$price)
p <- quantile(data$price, seq(0,1,0.01))
p
plot(p)
#Even if price has outliers, we will not treat it because it is our dependent variable.
#removing all temporary variables
rm(cl, cr, cw, es, p, s, wb, columns)
###Part 0.3 - Dropping car_ID
#Creating a backup data frame
backup <- data
#restore data, uncomment below line
#data <- backup
#We will be dropping car_ID as it is just a sequence and doesn't help us in predicting anything.
data <- data[,-1]
###Part 0.4 - Splitting CarName into Company and Model
data <- data %>% separate(col = CarName, into = c("company", "model"), sep = " ", extra = "merge")
View(data)
#Checking whether the separate command created NA values
sum(is.na(data))
#There are two NA values now, as shown by our Warning messages for separate function
#Since we cannot determine which models these are, we will fill these with the company name itself.
data[139,"model"] <- data[139,"company"]
data[142,"model"] <- data[142,"company"]
sum(is.na(data))
#There are no NA values in the data set anymore. We can proceed with creating dummy variables from the factors.
###Part 0.5 - Creating Dummy Variables
#checking structure of dataset again
str(data)
#Our separate operation made the company and model character type. We have to convert them into factors again.
columns <- c(2,3)
data[,columns] <- lapply(data[,columns], factor)
rm(columns)
str(data)
#Creating another backup to serve as a restore point just in case
backup2 <- data
#restore dataset, uncomment the next line
#data <- backup2
##Let us treat the variables with two levels first
##variable: fueltype
summary(data$fueltype)
#Our assumption is that diesel will be 0, gas (not diesel) will be 1
levels(data$fueltype) <- c(0,1)
data$fueltype <- as.integer(data$fueltype)
##variable: aspiration
summary(data$aspiration)
#Our assumption is that std will be 0, turbo (not std) will be 1
levels(data$aspiration) <- c(0,1)
data$aspiration <- as.integer(data$aspiration)
##variable: doornumber
summary(data$doornumber)
#Our assumption is that four will be 0, two (not four) will be 1
levels(data$doornumber) <- c(0,1)
data$doornumber <- as.integer(data$doornumber)
##variable: enginelocation
summary(data$enginelocation)
#Our assumption is that front will be 0, rear (not front) will be 1
levels(data$enginelocation) <- c(0,1)
data$enginelocation <- as.integer(data$enginelocation)
##This finishes the factors with two levels.
backup3 <- data
#restore backup, uncomment next line
#data <- backup3
##Let us deal with the factors with more than two levels.
##variable: symboling
summary(data$symboling)
dummy_1 <- data.frame(model.matrix( ~symboling, data = data))
dummy_1 <- dummy_1[,-1]
data_1 <- cbind(data[,-1], dummy_1)
##variable: carbody
dummy_2 <- data.frame(model.matrix( ~carbody, data = data))
dummy_2 <- dummy_2[,-1]
data_2 <- cbind(data_1[,-6], dummy_2)
##variable: drivewheel
dummy_3 <- data.frame(model.matrix( ~drivewheel, data = data))
dummy_3 <- dummy_3[,-1]
data_3 <- cbind(data_2[,-6], dummy_3)
##variable: enginetype
dummy_4 <- data.frame(model.matrix( ~enginetype, data = data))
dummy_4 <- dummy_4[,-1]
data_4 <- cbind(data_3[,-12], dummy_4)
##variable: cylindernumber
dummy_5 <- data.frame(model.matrix( ~cylindernumber, data = data))
dummy_5 <- dummy_5[,-1]
data_5 <- cbind(data_4[,-12], dummy_5)
##variable: fuelsystem
dummy_6 <- data.frame(model.matrix( ~fuelsystem, data = data))
dummy_6 <- dummy_6[,-1]
data_6 <- cbind(data_5[,-13], dummy_6)
##variable: company
#Before we treat the company, we can see that there are way too many discrepancies in the company column.
summary(data_6$company)
#We can see that there are value-pairs which should be the same. We'll treat them one-by-one.
##maxda and mazda. This could've happened because "z" and "x" are positioned adjacent on the keyboard.
data_6$company <- gsub("maxda", "mazda", data_6$company)
##Nissan and nissan. This is a simple case error.
data_6$company <- gsub("Nissan", "nissan", data_6$company)
##volkswagen, vokswagen and vw are all the same ones
data_6$company <- gsub("vw", "volkswagen", data_6$company)
data_6$company <- gsub("vokswagen", "volkswagen", data_6$company)
##toyouta and toyota is also a misspelling
data_6$company <- gsub("toyouta", "toyota", data_6$company)
##porsche and porcshce is the last misspelling.
data_6$company <- gsub("porcshce", "porsche", data_6$company)
data_6$company <- as.factor(data_6$company)
summary(data_6$company)
dummy_7 <- data.frame(model.matrix( ~company, data = data_6))
dummy_7 <- dummy_7[,-1]
data_7 <- cbind(data_6[,-1], dummy_7)
###Part 0.6 - Removing model column as required by the problem
data_7 <- data_7[,-1]
###Part 0.7 - Cleaning Environment
data <- data_7
backup <- backup3
rm(backup2,backup3,data_1,data_2,data_3,data_4,data_5,data_6,data_7,dummy_1,dummy_2,dummy_3,dummy_4,dummy_5,dummy_6,dummy_7)
str(data)
###Part 0.8 - Derived Metrics
##carvolume
#We can convert the carheight, carlength, carwidth variables to carvolume. This would decrease the number of variables.
data$carvolume <- data$carlength * data$carheight * data$carwidth
columns <- c(6,7,8)
data <- data[,-columns]
rm(columns)
###Part 0.9 - Splitting Data into Train and Test
sample_size <- floor(0.75 * nrow(data))
set.seed(42)
train_indices <- sample(seq_len(nrow(data)), size = sample_size)
train <- data[train_indices, ]
test <- data[-train_indices, ]
#### MODEL BUILDING
###Part 1.1 - First model
model_1 <- lm(price~., train)
summary(model_1)
#We'll use the first model's summary to only include significant variables in our model.
#Let us apply StepAIC
step <- stepAIC(model_1, direction = "both")
###Part 1.2 - Creating A New Model Iteratively
model_2 <- lm(price ~ fueltype + enginelocation + curbweight + enginesize +
boreratio + horsepower + peakrpm + citympg + symboling.1 +
symboling0 + symboling1 + symboling2 + symboling3 + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv +
enginetypel + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + fuelsystem2bbl +
fuelsystemmpfi + companyaudi + companybmw + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth +
companyporsche + companytoyota + companyvolkswagen + companyvolvo +
carvolume, train)
summary(model_2)
vif(model_2)
##symboling2 has a high VIF(19.338327) and has relatively low significance (p-value = 0.104208)
##We can remove it.
model_3 <- lm(price ~ fueltype + enginelocation + curbweight + enginesize +
boreratio + horsepower + peakrpm + citympg + symboling.1 +
symboling0 + symboling1 + symboling3 + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv +
enginetypel + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + fuelsystem2bbl +
fuelsystemmpfi + companyaudi + companybmw + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth +
companyporsche + companytoyota + companyvolkswagen + companyvolvo +
carvolume, train)
summary(model_3)
#The adjusted R-squared is roughly the same. Nothing much has changed in the model.
vif(model_3)
##carvolume has relatively high VIF(15.275585) and has relatively low significance (p-value = 0.147415)
## We can remove it.
model_4 <- lm(price ~ fueltype + enginelocation + curbweight + enginesize +
boreratio + horsepower + peakrpm + citympg + symboling.1 +
symboling0 + symboling1 + symboling3 + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv +
enginetypel + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + fuelsystem2bbl +
fuelsystemmpfi + companyaudi + companybmw + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth +
companyporsche + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_4)
#The adjusted R-squared is roughly the same. Nothing much has changed. So, we can proceed further.
vif(model_4)
#symboling3 has now become insignificant and its vif is also more than 2 (4.100132), so we'll remove it.
model_5 <- lm(price ~ fueltype + enginelocation + curbweight + enginesize +
boreratio + horsepower + peakrpm + citympg + symboling.1 +
symboling0 + symboling1 + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv +
enginetypel + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + fuelsystem2bbl +
fuelsystemmpfi + companyaudi + companybmw + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth +
companyporsche + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_5)
vif(model_5)
#enginetypel has become insignificant also and its vif is 4.8, so we'll remove that next.
model_6 <- lm(price ~ fueltype + enginelocation + curbweight + enginesize +
boreratio + horsepower + peakrpm + citympg + symboling.1 +
symboling0 + symboling1 + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + fuelsystem2bbl +
fuelsystemmpfi + companyaudi + companybmw + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth +
companyporsche + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_6)
vif(model_6)
#companybmw has been insignificant for the last three models, and its vif is also more than 2 (5+), so we'll remove it.
model_7 <- lm(price ~ fueltype + enginelocation + curbweight + enginesize +
boreratio + horsepower + peakrpm + citympg + symboling.1 + symboling1 + symboling0 + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + fuelsystem2bbl +
fuelsystemmpfi + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth +
companyporsche + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_7)
vif(model_7)
#companyporsche has suddenly become insignificant. Let's check for correlation between companyporsche and companybmw
cor(data$companyporsche, data$companybmw)
#they're not correlated which is good to know. We'll remove companyporsche
model_8 <- lm(price ~ fueltype + enginelocation + curbweight + enginesize +
boreratio + horsepower + peakrpm + citympg + symboling.1 + symboling1 + symboling0 + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + fuelsystem2bbl +
fuelsystemmpfi + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_8)
vif(model_8)
#fuelsystemmpfi has high vif (8.536430) and low significance, we'll remove it.
model_9 <- lm(price ~ fueltype + enginelocation + curbweight + enginesize +
boreratio + horsepower + peakrpm + citympg + symboling.1 + symboling1 + symboling0 + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + fuelsystem2bbl + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_9)
vif(model_9)
#fuelsystem2bbl has suddenly become insignificant. Let us check for correlation between that and fuelsystemmpfi
cor(data$fuelsystem2bbl, data$fuelsystemmpfi)
#They have relatively significant negative correlation (-0.634). We'll remove fuelsystem2bbl as it has low significance now and high vif (4.1)
model_10 <- lm(price ~ fueltype + enginelocation + curbweight + enginesize +
boreratio + horsepower + peakrpm + citympg + symboling.1 + symboling1 + symboling0 + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_10)
vif(model_10)
#fueltype has suddenly become insignificant. Let us check for correlation between that and fuelsystem2bbl
cor(data$fueltype, data$fuelsystem2bbl)
#The correlation is low (0.226). fueltype is insignifant and it has vif > 2 (3.041), we'll remove it next.
model_11 <- lm(price ~ + enginelocation + curbweight + enginesize +
boreratio + horsepower + peakrpm + citympg + symboling.1 + symboling1 + symboling0 + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_11)
vif(model_11)
#peakrpm has vif (4.7) and is less significant relatively (p value: 0.062), we'll remove it.
model_12 <- lm(price ~ + enginelocation + curbweight + enginesize +
boreratio + horsepower + citympg + symboling.1 + symboling1 + symboling0 + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_12)
vif(model_12)
#next up is symboling0 which has had low significance for a lot of previous models, it also has high vif (3.5)
model_13 <- lm(price ~ + enginelocation + curbweight + enginesize +
boreratio + horsepower + citympg + symboling.1 + symboling1 + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_13)
vif(model_13)
#symboling.1 has suddenly become insignificant. Let us check correlation.
cor(data$symboling.1, data$symboling0)
#The correlation is low. We can remove this.
model_14 <- lm(price ~ + enginelocation + curbweight + enginesize +
boreratio + horsepower + citympg + symboling1 + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_14)
vif(model_14)
#We can remove any between symboling1 and carbodyhardtop as both have low significance and their vifs are comparable. Let us remove symboling1
model_15 <- lm(price ~ + enginelocation + curbweight + enginesize +
boreratio + horsepower + citympg + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_15)
vif(model_15)
#next up is carbodyhardtop
model_16 <- lm(price ~ + enginelocation + curbweight + enginesize +
boreratio + horsepower + citympg +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_16)
vif(model_16)
#carbodysedan has high vif (5.38) and low significance (p-value: 0.04)
model_17 <- lm(price ~ + enginelocation + curbweight + enginesize +
boreratio + horsepower + citympg + carbodyhatchback + carbodywagon + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_17)
vif(model_17)
#carbodyhatchback and carbodywagon are now insignificant. This could be because bodytype matters if all are considered however, if one is removed, all the others become insignificant as well.
#Still, they have vif under 2, so we'll look at citympg which is less significant relatively from the others and has high vif (6.19)
model_18 <- lm(price ~ + enginelocation + curbweight + enginesize +
boreratio + horsepower + carbodyhatchback + carbodywagon + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_18)
vif(model_18)
#now, we will remove carbodyhatchback as it is insignificant
model_19 <- lm(price ~ + enginelocation + curbweight + enginesize +
boreratio + horsepower + carbodywagon + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_19)
vif(model_19)
#now, the lowest significance (p-value: 0.075) is of carbodywagon. Even if it has low vif, every other factor has high significance, so we wil remove this.
model_20 <- lm(price ~ + enginelocation + curbweight + enginesize +
boreratio + horsepower + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_20)
vif(model_20)
#As all our factors now have high significance, we'll eliminate on the basis of highest vif. enginesize has vif 22.146.
model_21 <- lm(price ~ + enginelocation + curbweight +
boreratio + horsepower + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_21)
vif(model_21)
#boreratio has suddenly become insignificanct, we'll check for correlation.
cor(data$boreratio, data$enginesize)
#There is relatively high cor, boreratio only matters if we consider enginesize. Let us remove it.
model_22 <- lm(price ~ + enginelocation + curbweight + horsepower + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_22)
vif(model_22)
#We will now remove cylindernumberthree
model_23 <- lm(price ~ + enginelocation + curbweight + horsepower + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix + cylindernumbertwelve + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_23)
vif(model_23)
#enginetypeohcf and companyaudi have similar vifs and significance values. We can remove any. We'll remove companyaudi as it has remained low significant for a while.
model_24 <- lm(price ~ + enginelocation + curbweight + horsepower + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix + cylindernumbertwelve + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_24)
vif(model_24)
#enginetypeohcf will go next.
model_25 <- lm(price ~ + enginelocation + curbweight + horsepower + enginetypedohcv + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix + cylindernumbertwelve + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_25)
vif(model_25)
#cylindernumbertwelve has low significance, we'll remove that next.
model_26 <- lm(price ~ + enginelocation + curbweight + horsepower + enginetypedohcv + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_26)
vif(model_26)
#next we will remove enginetypedohcv
model_27 <- lm(price ~ + enginelocation + curbweight + horsepower + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_27)
vif(model_27)
#enginetypeohc has low significance and vif at 2.7, we wil remove it next.
model_28 <- lm(price ~ + enginelocation + curbweight + horsepower + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_28)
#Removing enginetypeohc has made a lot of company variables insignificant. This could be because they were correlated. However, the R-squared and adjusted R-squared have dropped significantly.
#We'll try making another model_28 removing the other choice instead.
model_28.1 <- lm(price ~ + enginelocation + curbweight + horsepower + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix + companychevrolet +
companydodge + companyhonda + companyisuzu +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_28.1)
vif(model_28.1)
model_28 <- model_28.1
rm(model_28.1)
summary(model_28)
vif(model_28)
#companyisuzu and companymercury have very similar values. Perhaps, they are correlated.
cor(data$companyisuzu, data$companymercury)
#They are not correlated.
#Most of our vifs are now near 2, however, horsepower and curbweight are at 4+. Let us check for correlation
cor(data$horsepower, data$curbweight)
#These variables have relatively high correlation. Let us remove horsepower.
model_29 <- lm(price ~ + enginelocation + curbweight + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix + companychevrolet +
companydodge + companyhonda + companyisuzu +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_29)
vif(model_29)
#we'll remove companymercury next
model_30 <- lm(price ~ + enginelocation + curbweight + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix + companychevrolet +
companydodge + companyhonda + companyisuzu + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_30)
vif(model_30)
#Next, we'll remove companyisuzu since it has been insignificant for the last many models.
model_31 <- lm(price ~ + enginelocation + curbweight + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix + companychevrolet +
companydodge + companyhonda + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_31)
vif(model_31)
#cylindernumbersix and cylindernumberfour have awfully high vif values. cylindernumberfour however is significant beyond doubt. Perhaps, they are correlated.
cor(data$cylindernumbersix, data$cylindernumberfour)
#They are relatively highly negatively correlated. Let us try removing cylindernumbersix
model_32 <- lm(price ~ + enginelocation + curbweight + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour + companychevrolet +
companydodge + companyhonda + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_32)
vif(model_32)
#companychevrolet has become insignificant. Let us check for correlation
cor(data$companychevrolet, data$cylindernumbersix)
#No correlation. We can remove it.
model_33 <- lm(price ~ + enginelocation + curbweight + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour +
companydodge + companyhonda + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_33)
vif(model_33)
#next, we'll remove companyplymouth
model_34 <- lm(price ~ + enginelocation + curbweight + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour +
companydodge + companyhonda + companymitsubishi + companynissan + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_34)
vif(model_34)
#Let us try removing companyhonda next
model_35 <- lm(price ~ + enginelocation + curbweight + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour +
companydodge + companymitsubishi + companynissan + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_35)
vif(model_35)
#Let us try removing companyvolkswagen next
model_36 <- lm(price ~ + enginelocation + curbweight + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour +
companydodge + companymitsubishi + companynissan + companytoyota + companyvolvo, train)
summary(model_36)
vif(model_36)
#Next, we'll remove companydodge
model_37 <- lm(price ~ + enginelocation + curbweight + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour + companymitsubishi + companynissan + companytoyota + companyvolvo, train)
summary(model_37)
vif(model_37)
#Next, companyvolvo will be removed as it is insignificant.
model_38 <- lm(price ~ + enginelocation + curbweight + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour + companymitsubishi + companynissan + companytoyota, train)
summary(model_38)
vif(model_38)
#Next companymitsubishi will have to go as it has the least significance
model_39 <- lm(price ~ + enginelocation + curbweight + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour + companynissan + companytoyota, train)
summary(model_39)
vif(model_39)
#Next, enginetyperotor as it has low significance (p-value: 0.58)
model_40 <- lm(price ~ + enginelocation + curbweight + enginetypeohc +
cylindernumberfive + cylindernumberfour + companynissan + companytoyota, train)
summary(model_40)
vif(model_40)
#Next, companynissan
model_41 <- lm(price ~ + enginelocation + curbweight + enginetypeohc +
cylindernumberfive + cylindernumberfour + companytoyota, train)
summary(model_41)
vif(model_41)
#Next, we will remove cylindernumberfive as it has low significance and higher vif than companytoyota
model_42 <- lm(price ~ + enginelocation + curbweight + enginetypeohc + cylindernumberfour + companytoyota, train)
summary(model_42)
vif(model_42)
model_43 <- lm(price ~ + enginelocation + curbweight + enginetypeohc + cylindernumberfour, train)
summary(model_43)
vif(model_43)
model_44 <- lm(price ~ + enginelocation + curbweight + cylindernumberfour, train)
summary(model_44)
vif(model_44)
#### We had models above which produced extremely high Adjusted R-Squared where most factors were significant.
#### Now we have reached a model which says that the price of a car depends on where the engine is located, what its curbweight is and whether is has four cylinders or not.
#### Yet, its adjusted R-Squared is relatively less.
#### TESTING MODEL
### Part 1 - Final Model (model_44)
Predict_1 <- predict(model_44,test[,-14])
test$testprice <- Predict_1
r <- cor(test$price,test$testprice)
rsquared <- r^2
### Part 2 - Model 20 which has a lot of factors but all are significant.
Predict_2 <- predict(model_20, test[,-14])
test$testprice <- Predict_2
r_2 <- cor(test$price, test$testprice)
rsquared_2 <- r_2^2
## We can see that model_44 outperforms model_20. Most factors in Model 20 have high vifs and thus multicolinearity is present.
#### SOLUTION
## Model 44 is the final model. As per this model, the price of a car is highly dependent on the curbweight.
## The next factor that determines the price is the enginelocation.
## The number of cylinders being four or not somehow matters also when it comes to finding the price of the car.
summary(model_44)
|
/Solutions.R
|
no_license
|
DeepanshKhurana/linear-regression-predicting-car-prices
|
R
| false | false | 39,416 |
r
|
#loading libraries
library(tidyverse)
library(car)
library(MASS)
#reading the data
#as per the data dictionaries, there are only numeric variables and categorical variables.
#We can assume that all strings are categorical. So, stringsAsFactors can be TRUE.
data <- read.csv("CarPrice_Assignment.csv", stringsAsFactors = TRUE)
#checking the data
View(data)
str(data)
#We can see that a lot of variables are Categorical in the Data Dictionary but they have been read as integers or numbers.
columns <- c(2:9, 15:16, 18)
data[,columns] <- lapply(data[,columns], factor)
str(data)
#Now, our data frame reflects the data dictionary
#### DATA PREPARATION AND CLEANING
###Part 0.1 - Checking data for basic inconsitencies
##Checking for NA
sum(is.na(data))
#There are no NA values
##Checking for duplicates
length(unique(data$car_ID))
#We can see there are no duplicated rows or rows with the same ID.
##Checking for outliers
#We'll check for outliers in numerical variables
#variable: wheelbase
boxplot(data$wheelbase)
wb <- quantile(data$wheelbase, seq(0,1,0.01))
wb
plot(wb)
#Even though our boxplot shows two outliers, the quantile distribution doesn't show a huge spike in the last two values
#variable: carlength
boxplot(data$carlength)
cl <- quantile(data$carlength, seq(0,1,0.01))
cl
plot(cl)
#As with wheelbase, the outliers aren't as extreme for us to treat them.
#variable: carwidth
boxplot(data$carwidth)
cw <- quantile(data$carwidth, seq(0,1,0.01))
cw
plot(cw)
#variable: carheight
boxplot(data$carheight)
#carheight has no outliers
#variable: curbweight
boxplot(data$curbweight)
#curbweight has no outliers
#variable: enginesize
boxplot(data$enginesize)
es <- quantile(data$enginesize, seq(0,1,0.01))
es
plot(es)
#Since enginesize has six outliers, three of which are extremely out of range, we'll cap all values above 91% (183.00) to 183.00
data$enginesize[which(data$enginesize>183.00)] <- 183.00
#Checking again
boxplot(data$enginesize)
#Outliers have been capped
#variable: boreratio
boxplot(data$boreratio)
#boreratio has no outliers
#variable: stroke
boxplot(data$stroke)
s <- quantile(data$stroke, seq(0,1,0.01))
s
plot(s)
#We'll floor the values which are less than 2% (2.6400) to 2.6400
data$stroke[which(data$stroke < 2.6400)] <- 2.6400
#Checking again
boxplot(data$stroke)
#variable: compressionratio
boxplot(data$compressionratio)
cr <- quantile(data$compressionratio, seq(0,1,0.01))
cr
plot(cr)
#There are an extreme number of outliers. We'll cap all values above 90% (10.9400) to 10.9400
data$compressionratio[which(data$compressionratio > 10.9400)] <- 10.9400
#Checking again
boxplot(data$compressionratio)
#variable: price
boxplot(data$price)
p <- quantile(data$price, seq(0,1,0.01))
p
plot(p)
#Even if price has outliers, we will not treat it because it is our dependent variable.
#removing all temporary variables
rm(cl, cr, cw, es, p, s, wb, columns)
###Part 0.3 - Dropping car_ID
#Creating a backup data frame
backup <- data
#restore data, uncomment below line
#data <- backup
#We will be dropping car_ID as it is just a sequence and doesn't help us in predicting anything.
data <- data[,-1]
###Part 0.4 - Splitting CarName into Company and Model
data <- data %>% separate(col = CarName, into = c("company", "model"), sep = " ", extra = "merge")
View(data)
#Checking whether the separate command created NA values
sum(is.na(data))
#There are two NA values now, as shown by our Warning messages for separate function
#Since we cannot determine which models these are, we will fill these with the company name itself.
data[139,"model"] <- data[139,"company"]
data[142,"model"] <- data[142,"company"]
sum(is.na(data))
#There are no NA values in the data set anymore. We can proceed with creating dummy variables from the factors.
###Part 0.5 - Creating Dummy Variables
#checking structure of dataset again
str(data)
#Our separate operation made the company and model character type. We have to convert them into factors again.
columns <- c(2,3)
data[,columns] <- lapply(data[,columns], factor)
rm(columns)
str(data)
#Creating another backup to serve as a restore point just in case
backup2 <- data
#restore dataset, uncomment the next line
#data <- backup2
##Let us treat the variables with two levels first
##variable: fueltype
summary(data$fueltype)
#Our assumption is that diesel will be 0, gas (not diesel) will be 1
levels(data$fueltype) <- c(0,1)
data$fueltype <- as.integer(data$fueltype)
##variable: aspiration
summary(data$aspiration)
#Our assumption is that std will be 0, turbo (not std) will be 1
levels(data$aspiration) <- c(0,1)
data$aspiration <- as.integer(data$aspiration)
##variable: doornumber
summary(data$doornumber)
#Our assumption is that four will be 0, two (not four) will be 1
levels(data$doornumber) <- c(0,1)
data$doornumber <- as.integer(data$doornumber)
##variable: enginelocation
summary(data$enginelocation)
#Our assumption is that front will be 0, rear (not front) will be 1
levels(data$enginelocation) <- c(0,1)
data$enginelocation <- as.integer(data$enginelocation)
##This finishes the factors with two levels.
backup3 <- data
#restore backup, uncomment next line
#data <- backup3
##Let us deal with the factors with more than two levels.
##variable: symboling
summary(data$symboling)
dummy_1 <- data.frame(model.matrix( ~symboling, data = data))
dummy_1 <- dummy_1[,-1]
data_1 <- cbind(data[,-1], dummy_1)
##variable: carbody
dummy_2 <- data.frame(model.matrix( ~carbody, data = data))
dummy_2 <- dummy_2[,-1]
data_2 <- cbind(data_1[,-6], dummy_2)
##variable: drivewheel
dummy_3 <- data.frame(model.matrix( ~drivewheel, data = data))
dummy_3 <- dummy_3[,-1]
data_3 <- cbind(data_2[,-6], dummy_3)
##variable: enginetype
dummy_4 <- data.frame(model.matrix( ~enginetype, data = data))
dummy_4 <- dummy_4[,-1]
data_4 <- cbind(data_3[,-12], dummy_4)
##variable: cylindernumber
dummy_5 <- data.frame(model.matrix( ~cylindernumber, data = data))
dummy_5 <- dummy_5[,-1]
data_5 <- cbind(data_4[,-12], dummy_5)
##variable: fuelsystem
dummy_6 <- data.frame(model.matrix( ~fuelsystem, data = data))
dummy_6 <- dummy_6[,-1]
data_6 <- cbind(data_5[,-13], dummy_6)
##variable: company
#Before we treat the company, we can see that there are way too many discrepancies in the company column.
summary(data_6$company)
#We can see that there are value-pairs which should be the same. We'll treat them one-by-one.
##maxda and mazda. This could've happened because "z" and "x" are positioned adjacent on the keyboard.
data_6$company <- gsub("maxda", "mazda", data_6$company)
##Nissan and nissan. This is a simple case error.
data_6$company <- gsub("Nissan", "nissan", data_6$company)
##volkswagen, vokswagen and vw are all the same ones
data_6$company <- gsub("vw", "volkswagen", data_6$company)
data_6$company <- gsub("vokswagen", "volkswagen", data_6$company)
##toyouta and toyota is also a misspelling
data_6$company <- gsub("toyouta", "toyota", data_6$company)
##porsche and porcshce is the last misspelling.
data_6$company <- gsub("porcshce", "porsche", data_6$company)
data_6$company <- as.factor(data_6$company)
summary(data_6$company)
dummy_7 <- data.frame(model.matrix( ~company, data = data_6))
dummy_7 <- dummy_7[,-1]
data_7 <- cbind(data_6[,-1], dummy_7)
###Part 0.6 - Removing model column as required by the problem
data_7 <- data_7[,-1]
###Part 0.7 - Cleaning Environment
data <- data_7
backup <- backup3
rm(backup2,backup3,data_1,data_2,data_3,data_4,data_5,data_6,data_7,dummy_1,dummy_2,dummy_3,dummy_4,dummy_5,dummy_6,dummy_7)
str(data)
###Part 0.8 - Derived Metrics
##carvolume
#We can convert the carheight, carlength, carwidth variables to carvolume. This would decrease the number of variables.
data$carvolume <- data$carlength * data$carheight * data$carwidth
columns <- c(6,7,8)
data <- data[,-columns]
rm(columns)
###Part 0.9 - Splitting Data into Train and Test
sample_size <- floor(0.75 * nrow(data))
set.seed(42)
train_indices <- sample(seq_len(nrow(data)), size = sample_size)
train <- data[train_indices, ]
test <- data[-train_indices, ]
#### MODEL BUILDING
###Part 1.1 - First model
model_1 <- lm(price~., train)
summary(model_1)
#We'll use the first model's summary to only include significant variables in our model.
#Let us apply StepAIC
step <- stepAIC(model_1, direction = "both")
###Part 1.2 - Creating A New Model Iteratively
model_2 <- lm(price ~ fueltype + enginelocation + curbweight + enginesize +
boreratio + horsepower + peakrpm + citympg + symboling.1 +
symboling0 + symboling1 + symboling2 + symboling3 + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv +
enginetypel + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + fuelsystem2bbl +
fuelsystemmpfi + companyaudi + companybmw + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth +
companyporsche + companytoyota + companyvolkswagen + companyvolvo +
carvolume, train)
summary(model_2)
vif(model_2)
##symboling2 has a high VIF(19.338327) and has relatively low significance (p-value = 0.104208)
##We can remove it.
model_3 <- lm(price ~ fueltype + enginelocation + curbweight + enginesize +
boreratio + horsepower + peakrpm + citympg + symboling.1 +
symboling0 + symboling1 + symboling3 + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv +
enginetypel + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + fuelsystem2bbl +
fuelsystemmpfi + companyaudi + companybmw + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth +
companyporsche + companytoyota + companyvolkswagen + companyvolvo +
carvolume, train)
summary(model_3)
#The adjusted R-squared is roughly the same. Nothing much has changed in the model.
vif(model_3)
##carvolume has relatively high VIF(15.275585) and has relatively low significance (p-value = 0.147415)
## We can remove it.
model_4 <- lm(price ~ fueltype + enginelocation + curbweight + enginesize +
boreratio + horsepower + peakrpm + citympg + symboling.1 +
symboling0 + symboling1 + symboling3 + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv +
enginetypel + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + fuelsystem2bbl +
fuelsystemmpfi + companyaudi + companybmw + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth +
companyporsche + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_4)
#The adjusted R-squared is roughly the same. Nothing much has changed. So, we can proceed further.
vif(model_4)
#symboling3 has now become insignificant and its vif is also more than 2 (4.100132), so we'll remove it.
model_5 <- lm(price ~ fueltype + enginelocation + curbweight + enginesize +
boreratio + horsepower + peakrpm + citympg + symboling.1 +
symboling0 + symboling1 + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv +
enginetypel + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + fuelsystem2bbl +
fuelsystemmpfi + companyaudi + companybmw + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth +
companyporsche + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_5)
vif(model_5)
#enginetypel has become insignificant also and its vif is 4.8, so we'll remove that next.
model_6 <- lm(price ~ fueltype + enginelocation + curbweight + enginesize +
boreratio + horsepower + peakrpm + citympg + symboling.1 +
symboling0 + symboling1 + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + fuelsystem2bbl +
fuelsystemmpfi + companyaudi + companybmw + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth +
companyporsche + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_6)
vif(model_6)
#companybmw has been insignificant for the last three models, and its vif is also more than 2 (5+), so we'll remove it.
model_7 <- lm(price ~ fueltype + enginelocation + curbweight + enginesize +
boreratio + horsepower + peakrpm + citympg + symboling.1 + symboling1 + symboling0 + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + fuelsystem2bbl +
fuelsystemmpfi + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth +
companyporsche + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_7)
vif(model_7)
#companyporsche has suddenly become insignificant. Let's check for correlation between companyporsche and companybmw
cor(data$companyporsche, data$companybmw)
#they're not correlated which is good to know. We'll remove companyporsche
model_8 <- lm(price ~ fueltype + enginelocation + curbweight + enginesize +
boreratio + horsepower + peakrpm + citympg + symboling.1 + symboling1 + symboling0 + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + fuelsystem2bbl +
fuelsystemmpfi + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_8)
vif(model_8)
#fuelsystemmpfi has high vif (8.536430) and low significance, we'll remove it.
model_9 <- lm(price ~ fueltype + enginelocation + curbweight + enginesize +
boreratio + horsepower + peakrpm + citympg + symboling.1 + symboling1 + symboling0 + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + fuelsystem2bbl + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_9)
vif(model_9)
#fuelsystem2bbl has suddenly become insignificant. Let us check for correlation between that and fuelsystemmpfi
cor(data$fuelsystem2bbl, data$fuelsystemmpfi)
#They have relatively significant negative correlation (-0.634). We'll remove fuelsystem2bbl as it has low significance now and high vif (4.1)
model_10 <- lm(price ~ fueltype + enginelocation + curbweight + enginesize +
boreratio + horsepower + peakrpm + citympg + symboling.1 + symboling1 + symboling0 + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_10)
vif(model_10)
#fueltype has suddenly become insignificant. Let us check for correlation between that and fuelsystem2bbl
cor(data$fueltype, data$fuelsystem2bbl)
#The correlation is low (0.226). fueltype is insignifant and it has vif > 2 (3.041), we'll remove it next.
model_11 <- lm(price ~ + enginelocation + curbweight + enginesize +
boreratio + horsepower + peakrpm + citympg + symboling.1 + symboling1 + symboling0 + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_11)
vif(model_11)
#peakrpm has vif (4.7) and is less significant relatively (p value: 0.062), we'll remove it.
model_12 <- lm(price ~ + enginelocation + curbweight + enginesize +
boreratio + horsepower + citympg + symboling.1 + symboling1 + symboling0 + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_12)
vif(model_12)
#next up is symboling0 which has had low significance for a lot of previous models, it also has high vif (3.5)
model_13 <- lm(price ~ + enginelocation + curbweight + enginesize +
boreratio + horsepower + citympg + symboling.1 + symboling1 + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_13)
vif(model_13)
#symboling.1 has suddenly become insignificant. Let us check correlation.
cor(data$symboling.1, data$symboling0)
#The correlation is low. We can remove this.
model_14 <- lm(price ~ + enginelocation + curbweight + enginesize +
boreratio + horsepower + citympg + symboling1 + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_14)
vif(model_14)
#We can remove any between symboling1 and carbodyhardtop as both have low significance and their vifs are comparable. Let us remove symboling1
model_15 <- lm(price ~ + enginelocation + curbweight + enginesize +
boreratio + horsepower + citympg + carbodyhardtop +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_15)
vif(model_15)
#next up is carbodyhardtop
model_16 <- lm(price ~ + enginelocation + curbweight + enginesize +
boreratio + horsepower + citympg +
carbodyhatchback + carbodysedan + carbodywagon + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_16)
vif(model_16)
#carbodysedan has high vif (5.38) and low significance (p-value: 0.04)
model_17 <- lm(price ~ + enginelocation + curbweight + enginesize +
boreratio + horsepower + citympg + carbodyhatchback + carbodywagon + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_17)
vif(model_17)
#carbodyhatchback and carbodywagon are now insignificant. This could be because bodytype matters if all are considered however, if one is removed, all the others become insignificant as well.
#Still, they have vif under 2, so we'll look at citympg which is less significant relatively from the others and has high vif (6.19)
model_18 <- lm(price ~ + enginelocation + curbweight + enginesize +
boreratio + horsepower + carbodyhatchback + carbodywagon + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_18)
vif(model_18)
#now, we will remove carbodyhatchback as it is insignificant
model_19 <- lm(price ~ + enginelocation + curbweight + enginesize +
boreratio + horsepower + carbodywagon + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_19)
vif(model_19)
#now, the lowest significance (p-value: 0.075) is of carbodywagon. Even if it has low vif, every other factor has high significance, so we wil remove this.
model_20 <- lm(price ~ + enginelocation + curbweight + enginesize +
boreratio + horsepower + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_20)
vif(model_20)
#As all our factors now have high significance, we'll eliminate on the basis of highest vif. enginesize has vif 22.146.
model_21 <- lm(price ~ + enginelocation + curbweight +
boreratio + horsepower + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_21)
vif(model_21)
#boreratio has suddenly become insignificanct, we'll check for correlation.
cor(data$boreratio, data$enginesize)
#There is relatively high cor, boreratio only matters if we consider enginesize. Let us remove it.
model_22 <- lm(price ~ + enginelocation + curbweight + horsepower + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix +
cylindernumberthree + cylindernumbertwelve + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_22)
vif(model_22)
#We will now remove cylindernumberthree
model_23 <- lm(price ~ + enginelocation + curbweight + horsepower + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix + cylindernumbertwelve + companyaudi + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_23)
vif(model_23)
#enginetypeohcf and companyaudi have similar vifs and significance values. We can remove any. We'll remove companyaudi as it has remained low significant for a while.
model_24 <- lm(price ~ + enginelocation + curbweight + horsepower + enginetypedohcv + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix + cylindernumbertwelve + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_24)
vif(model_24)
#enginetypeohcf will go next.
model_25 <- lm(price ~ + enginelocation + curbweight + horsepower + enginetypedohcv + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix + cylindernumbertwelve + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_25)
vif(model_25)
#cylindernumbertwelve has low significance, we'll remove that next.
model_26 <- lm(price ~ + enginelocation + curbweight + horsepower + enginetypedohcv + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_26)
vif(model_26)
#next we will remove enginetypedohcv
model_27 <- lm(price ~ + enginelocation + curbweight + horsepower + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_27)
vif(model_27)
#enginetypeohc has low significance and vif at 2.7, we wil remove it next.
model_28 <- lm(price ~ + enginelocation + curbweight + horsepower + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix + companychevrolet +
companydodge + companyhonda + companyisuzu + companymazda +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_28)
#Removing enginetypeohc has made a lot of company variables insignificant. This could be because they were correlated. However, the R-squared and adjusted R-squared have dropped significantly.
#We'll try making another model_28 removing the other choice instead.
model_28.1 <- lm(price ~ + enginelocation + curbweight + horsepower + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix + companychevrolet +
companydodge + companyhonda + companyisuzu +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_28.1)
vif(model_28.1)
model_28 <- model_28.1
rm(model_28.1)
summary(model_28)
vif(model_28)
#companyisuzu and companymercury have very similar values. Perhaps, they are correlated.
cor(data$companyisuzu, data$companymercury)
#They are not correlated.
#Most of our vifs are now near 2, however, horsepower and curbweight are at 4+. Let us check for correlation
cor(data$horsepower, data$curbweight)
#These variables have relatively high correlation. Let us remove horsepower.
model_29 <- lm(price ~ + enginelocation + curbweight + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix + companychevrolet +
companydodge + companyhonda + companyisuzu +
companymercury + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_29)
vif(model_29)
#we'll remove companymercury next
model_30 <- lm(price ~ + enginelocation + curbweight + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix + companychevrolet +
companydodge + companyhonda + companyisuzu + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_30)
vif(model_30)
#Next, we'll remove companyisuzu since it has been insignificant for the last many models.
model_31 <- lm(price ~ + enginelocation + curbweight + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour + cylindernumbersix + companychevrolet +
companydodge + companyhonda + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_31)
vif(model_31)
#cylindernumbersix and cylindernumberfour have awfully high vif values. cylindernumberfour however is significant beyond doubt. Perhaps, they are correlated.
cor(data$cylindernumbersix, data$cylindernumberfour)
#They are relatively highly negatively correlated. Let us try removing cylindernumbersix
model_32 <- lm(price ~ + enginelocation + curbweight + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour + companychevrolet +
companydodge + companyhonda + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_32)
vif(model_32)
#companychevrolet has become insignificant. Let us check for correlation
cor(data$companychevrolet, data$cylindernumbersix)
#No correlation. We can remove it.
model_33 <- lm(price ~ + enginelocation + curbweight + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour +
companydodge + companyhonda + companymitsubishi + companynissan + companyplymouth + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_33)
vif(model_33)
#next, we'll remove companyplymouth
model_34 <- lm(price ~ + enginelocation + curbweight + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour +
companydodge + companyhonda + companymitsubishi + companynissan + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_34)
vif(model_34)
#Let us try removing companyhonda next
model_35 <- lm(price ~ + enginelocation + curbweight + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour +
companydodge + companymitsubishi + companynissan + companytoyota + companyvolkswagen + companyvolvo, train)
summary(model_35)
vif(model_35)
#Let us try removing companyvolkswagen next
model_36 <- lm(price ~ + enginelocation + curbweight + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour +
companydodge + companymitsubishi + companynissan + companytoyota + companyvolvo, train)
summary(model_36)
vif(model_36)
#Next, we'll remove companydodge
model_37 <- lm(price ~ + enginelocation + curbweight + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour + companymitsubishi + companynissan + companytoyota + companyvolvo, train)
summary(model_37)
vif(model_37)
#Next, companyvolvo will be removed as it is insignificant.
model_38 <- lm(price ~ + enginelocation + curbweight + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour + companymitsubishi + companynissan + companytoyota, train)
summary(model_38)
vif(model_38)
#Next companymitsubishi will have to go as it has the least significance
model_39 <- lm(price ~ + enginelocation + curbweight + enginetypeohc + enginetyperotor +
cylindernumberfive + cylindernumberfour + companynissan + companytoyota, train)
summary(model_39)
vif(model_39)
#Next, enginetyperotor as it has low significance (p-value: 0.58)
model_40 <- lm(price ~ + enginelocation + curbweight + enginetypeohc +
cylindernumberfive + cylindernumberfour + companynissan + companytoyota, train)
summary(model_40)
vif(model_40)
#Next, companynissan
model_41 <- lm(price ~ + enginelocation + curbweight + enginetypeohc +
cylindernumberfive + cylindernumberfour + companytoyota, train)
summary(model_41)
vif(model_41)
#Next, we will remove cylindernumberfive as it has low significance and higher vif than companytoyota
model_42 <- lm(price ~ + enginelocation + curbweight + enginetypeohc + cylindernumberfour + companytoyota, train)
summary(model_42)
vif(model_42)
model_43 <- lm(price ~ + enginelocation + curbweight + enginetypeohc + cylindernumberfour, train)
summary(model_43)
vif(model_43)
model_44 <- lm(price ~ + enginelocation + curbweight + cylindernumberfour, train)
summary(model_44)
vif(model_44)
#### We had models above which produced extremely high Adjusted R-Squared where most factors were significant.
#### Now we have reached a model which says that the price of a car depends on where the engine is located, what its curbweight is and whether is has four cylinders or not.
#### Yet, its adjusted R-Squared is relatively less.
#### TESTING MODEL
### Part 1 - Final Model (model_44)
Predict_1 <- predict(model_44,test[,-14])
test$testprice <- Predict_1
r <- cor(test$price,test$testprice)
rsquared <- r^2
### Part 2 - Model 20 which has a lot of factors but all are significant.
Predict_2 <- predict(model_20, test[,-14])
test$testprice <- Predict_2
r_2 <- cor(test$price, test$testprice)
rsquared_2 <- r_2^2
## We can see that model_44 outperforms model_20. Most factors in Model 20 have high vifs and thus multicolinearity is present.
#### SOLUTION
## Model 44 is the final model. As per this model, the price of a car is highly dependent on the curbweight.
## The next factor that determines the price is the enginelocation.
## The number of cylinders being four or not somehow matters also when it comes to finding the price of the car.
summary(model_44)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/project_methods.R
\name{mizerMort}
\alias{mizerMort}
\title{Get total mortality rate needed to project standard mizer model}
\usage{
mizerMort(params, n, n_pp, n_other, t, f_mort, pred_mort, ...)
}
\arguments{
\item{params}{A \linkS4class{MizerParams} object}
\item{n}{A matrix of species abundances (species x size).}
\item{n_pp}{A vector of the resource abundance by size}
\item{n_other}{A list of abundances for other dynamical components of the
ecosystem}
\item{t}{The time for which to do the calculation (Not used by standard
mizer rate functions but useful for extensions with time-dependent
parameters.)}
\item{f_mort}{A two dimensional array (species x size) with the fishing
mortality}
\item{pred_mort}{A two dimensional array (species x size) with the predation
mortality}
\item{...}{Unused}
}
\value{
A named two dimensional array (species x size) with the total
mortality rates.
}
\description{
Calculates the total mortality rate \eqn{\mu_i(w)} (in units 1/year) on each
species by size from predation mortality, background mortality and fishing
mortality.
You would not usually call this
function directly but instead use \code{\link[=getMort]{getMort()}}, which then calls this
function unless an alternative function has been registered, see below.
}
\details{
If your model contains additional components that you added with
\code{\link[=setComponent]{setComponent()}} and for which you specified a \code{mort_fun} function then
the mortality inflicted by these components will be included in the returned
value.
}
\section{Your own mortality function}{
By default \code{\link[=getMort]{getMort()}} calls \code{\link[=mizerMort]{mizerMort()}}. However you can
replace this with your own alternative mortality function. If
your function is called \code{"myMort"} then you register it in a MizerParams
object \code{params} with
\if{html}{\out{<div class="sourceCode">}}\preformatted{params <- setRateFunction(params, "Mort", "myMort")
}\if{html}{\out{</div>}}
Your function will then be called instead of \code{\link[=mizerMort]{mizerMort()}}, with the
same arguments.
}
\seealso{
Other mizer rate functions:
\code{\link{mizerEGrowth}()},
\code{\link{mizerEReproAndGrowth}()},
\code{\link{mizerERepro}()},
\code{\link{mizerEncounter}()},
\code{\link{mizerFMortGear}()},
\code{\link{mizerFMort}()},
\code{\link{mizerFeedingLevel}()},
\code{\link{mizerPredMort}()},
\code{\link{mizerPredRate}()},
\code{\link{mizerRDI}()},
\code{\link{mizerRates}()},
\code{\link{mizerResourceMort}()}
}
\concept{mizer rate functions}
|
/man/mizerMort.Rd
|
no_license
|
sizespectrum/mizer
|
R
| false | true | 2,623 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/project_methods.R
\name{mizerMort}
\alias{mizerMort}
\title{Get total mortality rate needed to project standard mizer model}
\usage{
mizerMort(params, n, n_pp, n_other, t, f_mort, pred_mort, ...)
}
\arguments{
\item{params}{A \linkS4class{MizerParams} object}
\item{n}{A matrix of species abundances (species x size).}
\item{n_pp}{A vector of the resource abundance by size}
\item{n_other}{A list of abundances for other dynamical components of the
ecosystem}
\item{t}{The time for which to do the calculation (Not used by standard
mizer rate functions but useful for extensions with time-dependent
parameters.)}
\item{f_mort}{A two dimensional array (species x size) with the fishing
mortality}
\item{pred_mort}{A two dimensional array (species x size) with the predation
mortality}
\item{...}{Unused}
}
\value{
A named two dimensional array (species x size) with the total
mortality rates.
}
\description{
Calculates the total mortality rate \eqn{\mu_i(w)} (in units 1/year) on each
species by size from predation mortality, background mortality and fishing
mortality.
You would not usually call this
function directly but instead use \code{\link[=getMort]{getMort()}}, which then calls this
function unless an alternative function has been registered, see below.
}
\details{
If your model contains additional components that you added with
\code{\link[=setComponent]{setComponent()}} and for which you specified a \code{mort_fun} function then
the mortality inflicted by these components will be included in the returned
value.
}
\section{Your own mortality function}{
By default \code{\link[=getMort]{getMort()}} calls \code{\link[=mizerMort]{mizerMort()}}. However you can
replace this with your own alternative mortality function. If
your function is called \code{"myMort"} then you register it in a MizerParams
object \code{params} with
\if{html}{\out{<div class="sourceCode">}}\preformatted{params <- setRateFunction(params, "Mort", "myMort")
}\if{html}{\out{</div>}}
Your function will then be called instead of \code{\link[=mizerMort]{mizerMort()}}, with the
same arguments.
}
\seealso{
Other mizer rate functions:
\code{\link{mizerEGrowth}()},
\code{\link{mizerEReproAndGrowth}()},
\code{\link{mizerERepro}()},
\code{\link{mizerEncounter}()},
\code{\link{mizerFMortGear}()},
\code{\link{mizerFMort}()},
\code{\link{mizerFeedingLevel}()},
\code{\link{mizerPredMort}()},
\code{\link{mizerPredRate}()},
\code{\link{mizerRDI}()},
\code{\link{mizerRates}()},
\code{\link{mizerResourceMort}()}
}
\concept{mizer rate functions}
|
## Gather columns into Rows.
## gather(columnName, value(or what to do with the values already there), which columns?)
gather(date, count, 2:9)
|
/R/reshaping/gather.r
|
no_license
|
mitchelllisle/code_snippets
|
R
| false | false | 145 |
r
|
## Gather columns into Rows.
## gather(columnName, value(or what to do with the values already there), which columns?)
gather(date, count, 2:9)
|
wdir = homedir
# make sure there is a directory homedir
if (!dir.exists(wdir)){
dir.create(wdir)
}
# make sure there is a directory tempdir
if (!dir.exists(tempdir)){
dir.create(tempdir)
}
setwd(dir=wdir)
mydir <- "/my/dir"
{
if (home.home){
auth.token.filename <- file.path(mydir, "api/authtoken.txt")
}
else {
auth.token.filename <- file.path(mydir, "api/authtoken.txt")
}
}
auth.token <-scan(file=auth.token.filename, what="character")
max.iterations <- 10000
files.per.download <- 100
library(RJSONIO)
# gets all files, NOT just one for each case; you have to do that later
# NEXT ONES HAVE disease variable, but I took that out later; not
# relevant for this function
# out.gbm <- get.files.with.tumor.or.normal(disease="Glioblastoma Multiforme", tempdir, homedir, query.template.file="gbmqueryjul19.json", auth.token, tt.files.per.download= files.per.download, sample.type="Primary Tumor", output.csv = "gbm.july19.query.tumor.csv")
# disease="Glioblastoma Multiforme"; query.template.file="gbmqueryjul19.json"; tt.files.per.download= files.per.download; sample.type="Primary Tumor"; output.csv = "gbm.july19.query.tumor.csv"
# ASSUMES template file is in tempdir
# and that output.csv should be a file in homedir
get.files.with.tumor.or.normal <- function(tempdir, homedir, query.template.file, auth.token, tt.files.per.download, sample.type, output.csv){
currentdir <- getwd()
setwd(tempdir)
# read in template file, will edit its offset value
tt.template <- scan(file= query.template.file, what="character", sep="\n")
#
# which line of template file has offset in it?
#
offset.line.number <- grep(pattern="offset", x=tt.template)
if (length(offset.line.number)!=1){
stop(paste0("ERROR in file ", query.template.file, ": there should be exactly one line with the word offset in it.\n"))
}
# which line of template file has hasSampleType in it?
#
sample.type.line.number <- grep(pattern="hasSampleType", x=tt.template)
if (length(sample.type.line.number)!=1){
stop(paste0("ERROR in file ", query.template.file, ": there should be exactly one line with the word hasSampleType in it.\n"))
}
# get count of number of files
system(paste0('curl -s -H "X-SBG-Auth-Token: ', auth.token, ' " -H "content-type: application/json" -X POST "https://cgc-datasets-api.sbgenomics.com/datasets/tcga/v0/query/total" --data @', query.template.file,' > test35.json'))
tt.nfiles <- as.numeric(fromJSON("test35.json"))
tt.files <- vector("list", length=0)
n.loops <- ceiling(tt.nfiles/tt.files.per.download)
for (tti in 1:n.loops){
cat("Working on loop ", tti , " of ", n.loops, "\n")
this.offset <- tt.files.per.download*(tti-1)
# make file with offset in it
tt.changed.template <- tt.template
tt.changed.template[offset.line.number] <- paste0(" \"offset\": ", this.offset)
tt.changed.template[sample.type.line.number] <- paste0("\"hasSampleType\": \"", sample.type, "\"")
writeLines(tt.changed.template, con = "test58.json", sep = "\n")
system(paste0('curl -s -H "X-SBG-Auth-Token: ', auth.token, ' " -H "content-type: application/json" -X POST "https://cgc-datasets-api.sbgenomics.com/datasets/tcga/v0/query" --data @test58.json > test63.json'))
these.files.raw <- fromJSON("test63.json")
tt.files <- append(tt.files, these.files.raw$`_embedded`$files)
}
ids.files <- sapply(tt.files, FUN= function(x){ x$id})
names.files <- sapply(tt.files, FUN= function(x){ x$label})
ids.cases <- vector("character", length=0)
# Get the cases for these file
for (ttj in 1:length(ids.files)){
if (ttj %% 10== 0){
cat("Working on loop ", ttj, " of ", length(ids.files),"\n")
}
system(paste0('curl -s -H "X-SBG-Auth-Token: ', auth.token, ' " -H "content-type: application/json" -X GET "https://cgc-datasets-api.sbgenomics.com/datasets/tcga/v0/files/', ids.files[ttj], '/cases" > test35.json'))
tt.case.info <- fromJSON("test35.json")
ids.cases <- append(ids.cases, tt.case.info$`_embedded`$cases[[1]]$id)
if (length(tt.case.info$`_embedded`$cases)>1){
stop(paste0("ERROR: length(tt.case.info$`_embedded`$cases)>1 for file id\n", ids.files[ttj]))
}
}
if (length(ids.cases)!= length(ids.files)){
stop(paste0("ERROR: length(ids.cases)!= length(ids.files), i.e.", length(ids.cases), "!=", length(ids.files)))
}
files.df <- data.frame(ids.files, names.files, ids.cases)
write.table(files.df, file = file.path(homedir, output.csv), row.names = FALSE, col.names = TRUE, sep = ",", append=FALSE, quote=TRUE)
setwd(currentdir)
list(tt.files=tt.files,ids.files=ids.files, names.files=names.files, ids.cases=ids.cases)
}
## added dec 19 2016
get.files.with.tumor.or.normal.and.return.NA.if.none <- function(tempdir, homedir, query.template.file, auth.token, tt.files.per.download, sample.type, output.csv){
currentdir <- getwd()
setwd(tempdir)
# read in template file, will edit its offset value
tt.template <- scan(file= query.template.file, what="character", sep="\n")
#
# which line of template file has offset in it?
#
offset.line.number <- grep(pattern="offset", x=tt.template)
if (length(offset.line.number)!=1){
stop(paste0("ERROR in file ", query.template.file, ": there should be exactly one line with the word offset in it.\n"))
}
# which line of template file has hasSampleType in it?
#
sample.type.line.number <- grep(pattern="hasSampleType", x=tt.template)
if (length(sample.type.line.number)!=1){
stop(paste0("ERROR in file ", query.template.file, ": there should be exactly one line with the word hasSampleType in it.\n"))
}
# get count of number of files
system(paste0('curl -s -H "X-SBG-Auth-Token: ', auth.token, ' " -H "content-type: application/json" -X POST "https://cgc-datasets-api.sbgenomics.com/datasets/tcga/v0/query/total" --data @', query.template.file,' > test35.json'))
tt.nfiles <- as.numeric(fromJSON("test35.json"))
tt.files <- vector("list", length=0)
n.loops <- ceiling(tt.nfiles/tt.files.per.download)
for (tti in 1:n.loops){
cat("Working on loop ", tti , " of ", n.loops, "\n")
this.offset <- tt.files.per.download*(tti-1)
# make file with offset in it
tt.changed.template <- tt.template
tt.changed.template[offset.line.number] <- paste0(" \"offset\": ", this.offset)
tt.changed.template[sample.type.line.number] <- paste0("\"hasSampleType\": \"", sample.type, "\"")
writeLines(tt.changed.template, con = "test58.json", sep = "\n")
system(paste0('curl -s -H "X-SBG-Auth-Token: ', auth.token, ' " -H "content-type: application/json" -X POST "https://cgc-datasets-api.sbgenomics.com/datasets/tcga/v0/query" --data @test58.json > test63.json'))
these.files.raw <- fromJSON("test63.json")
tt.files <- append(tt.files, these.files.raw$`_embedded`$files)
}
if (length(tt.files)>0){
ids.files <- sapply(tt.files, FUN= function(x){ x$id})
names.files <- sapply(tt.files, FUN= function(x){ x$label})
ids.cases <- vector("character", length=0)
## Get the cases for these file
for (ttj in 1:length(ids.files)){
if (ttj %% 10== 0){
cat("Working on loop ", ttj, " of ", length(ids.files),"\n")
}
system(paste0('curl -s -H "X-SBG-Auth-Token: ', auth.token, ' " -H "content-type: application/json" -X GET "https://cgc-datasets-api.sbgenomics.com/datasets/tcga/v0/files/', ids.files[ttj], '/cases" > test35.json'))
tt.case.info <- fromJSON("test35.json")
ids.cases <- append(ids.cases, tt.case.info$`_embedded`$cases[[1]]$id)
if (length(tt.case.info$`_embedded`$cases)>1){
stop(paste0("ERROR: length(tt.case.info$`_embedded`$cases)>1 for file id\n", ids.files[ttj]))
}
}
if (length(ids.cases)!= length(ids.files)){
stop(paste0("ERROR: length(ids.cases)!= length(ids.files), i.e.", length(ids.cases), "!=", length(ids.files)))
}
files.df <- data.frame(ids.files, names.files, ids.cases)
write.table(files.df, file = file.path(homedir, output.csv), row.names = FALSE, col.names = TRUE, sep = ",", append=FALSE, quote=TRUE)
n.files <- length(ids.files)
} else {
n.files <- 0
ids.files <- NULL
names.files <- NULL
ids.cases <- vector("character", length=0)
}
setwd(currentdir)
list(tt.files=tt.files,ids.files=ids.files, names.files=names.files, ids.cases=ids.cases, n.files=n.files)
}
## write a tumor template file for use with get.files.with.tumor.or.normal
## of course, could have done this in that function, but that
## is already written and don't want to change it
## ONLY FOR tumors, NOT FOR normals, DOESN'T work for "primary
## blood ..." or for "recurrent tumor"
## NOT for any of these:
## Primary Blood Derived Cancer - Peripheral Blood
## Additional - New Primary
## Additional Metastatic
## Blood Derived Normal
## Bone Marrow Normal
## Buccal Cell Normal
## Metastatic
## Recurrent Tumor
## Solid Tissue Normal
## Note that there are just 2 sample for Additional Metastatic, and
## it's for skin cancer
write.different.disease.to.tumor.template.file <- function(shortname, tempdir, longname, query.template.file=file.path(tempdir, "tumorquery.json")){
currentdir <- getwd()
setwd(tempdir)
out.template.file <- file.path(tempdir, paste0(shortname,".tumorquery.json"))
## read in template file, will edit its disease value from zzzz
tt.template <- scan(file= query.template.file, what="character", sep="\n")
tt.changed.template <- gsub(pattern="zzzz", replacement = longname, x=tt.template)
writeLines(tt.changed.template, con = out.template.file, sep="\n")
setwd(currentdir)
}
# df.query = lung.all.tumors.df
# take data frame read in from csv outputted by
# get.files.with.tumor.or.normal
# and pick one file for each case
choose.one.file.for.each.case <- function(df.query){
tt.unique.cases <- unique(df.query$ids.cases)
n.unique.cases <- length(tt.unique.cases)
new.df.query <- data.frame(ids.files= vector("character", length=0), names.files= vector("character", length=0), ids.cases= vector("character", length=0))
for (tti in 1:n.unique.cases){
# get df of files with this case
subdf <- df.query[df.query$ids.cases== tt.unique.cases[tti],]
if (dim(subdf)[1]==0){
stop(paste("ERROR: no rows for case ", tt.unique.cases[tti]))
}
# else if there is exactly one row:
else if (dim(subdf)[1]==1){
new.df.query <- rbind(new.df.query, subdf)
}
else {
new.df.query <- rbind(new.df.query, subdf[1,])
}
}
new.df.query
}
# df.query = lung.all.tumors.df
# take data frame read in from csv outputted by
# get.files.with.tumor.or.normal
# and pick one file for each case
## DIFFERS from choose.one.file.for.each.case in that
## if given a choice, it will choose the one
## already made when choosing one sample for each case
## when running machete
## ALSO will check if choices were already made in datasetapi.R
## and a filename.mostrecent is given as an input
choose.one.file.for.each.case.using.the.choice.already.made.when.choosing.for.machete.runs <- function(df.query, allfilenames, filename.mostrecent=NULL){
indices.tumors.downloaded.by.today <- sort(which(df.query$names.files %in% allfilenames))
print(paste0("length(indices.tumors.downloaded.by.today) is ", length(indices.tumors.downloaded.by.today)))
##
## now get the cases for the runs pre today
downloaded.by.today.cases <- df.query$ids.cases[indices.tumors.downloaded.by.today]
tt.unique.cases <- unique(df.query$ids.cases)
n.unique.cases <- length(tt.unique.cases)
new.df.query <- data.frame(ids.files= vector("character", length=0), names.files= vector("character", length=0), ids.cases= vector("character", length=0))
if (!is.null(filename.mostrecent)){
mostrecent.df <- read.table(filename.mostrecent, sep=",", header = FALSE, col.names = c("sb.id", "filename", "case"), stringsAsFactors = FALSE)
}
for (tti in 1:n.unique.cases){
pick.first.one <- TRUE
if (tt.unique.cases[tti] %in% downloaded.by.today.cases){
subdf.first.cut <- df.query[(df.query$ids.cases== tt.unique.cases[tti]),]
subdf <- subdf.first.cut[(subdf.first.cut$names.files %in% allfilenames),]
stopifnot(dim(subdf)[1]==1)
new.df.query <- rbind(new.df.query, subdf)
pick.first.one <- FALSE
}
else if (!is.null(filename.mostrecent)){
if (tt.unique.cases[tti] %in% mostrecent.df$case){
subdf.first.cut <- df.query[(df.query$ids.cases== tt.unique.cases[tti]),]
subdf <- subdf.first.cut[(subdf.first.cut$names.files %in% mostrecent.df$filename),]
stopifnot(dim(subdf)[1]==1)
new.df.query <- rbind(new.df.query, subdf)
pick.first.one <- FALSE
}
}
## Do next if test, no matter what, because you can't test
## if it's in the filename.mostrecent if it doesn't exist
## only pick the first one if you haven't already
if (pick.first.one) {
## get df of files with this case
subdf <- df.query[df.query$ids.cases== tt.unique.cases[tti],]
if (dim(subdf)[1]==0){
stop(paste("ERROR: no rows for case ", tt.unique.cases[tti]))
}
## else if there is exactly one row:
else if (dim(subdf)[1]==1){
new.df.query <- rbind(new.df.query, subdf)
}
else {
new.df.query <- rbind(new.df.query, subdf[1,])
}
}
}
new.df.query
}
# get.one.filename.from.fileid(fileid="576d6a09e4b01be096f370a6", allnames=alltarnames, allids=alltarids)
get.one.filename.from.fileid <- function(fileid, allnames, allids){
tfvec <- (allids == fileid)
{ # start if/else
if (any(is.na(tfvec))){
stop(paste0("Error: fileid ", fileid, " is giving NAs\n"))
}
else if (sum(tfvec)>=2){
stop(paste0("Error: fileid ", fileid, " is giving more than two matches\n"))
}
else if (sum(tfvec)==0){
stop(paste0("Error: fileid ", fileid, " is giving 0 matches\n"))
}
}
allnames[which(tfvec==1)]
}
# get.filenames.from.fileids(fileids=c("576d6a09e4b01be096f370a6","57748dd8e4b03bb2bc269eb2"), allnames=alltarnames, allids=alltarids)
#
# vectorized version of the above
get.filenames.from.fileids <- function(fileids, allnames, allids){
sapply(seq(along=fileids),FUN =function(i) get.one.filename.from.fileid(fileids[i],allnames=allnames, allids=allids))
}
# get.one.fileid.from.filename(filename="UNCID_2179117.260fce5f-8aea-4c0b-868a-ca514b130dff.130325_UNC16-SN851_0231_BC20VNACXX_2_ACAGTG.tar.gz", allnames=allfilenames, allids=allfileids)
get.one.fileid.from.filename <- function(filename, allnames, allids){
tfvec <- (allnames == filename)
{ # start if/else
if (any(is.na(tfvec))){
stop(paste0("Error: filename ", filename, " is giving NAs\n"))
}
else if (sum(tfvec)>=2){
stop(paste0("Error: filename ", filename, " is giving more than two matches\n"))
}
else if (sum(tfvec)==0){
stop(paste0("Error: filename ", filename, " is giving 0 matches\n"))
}
}
allids[which(tfvec==1)]
}
# get.fileids.from.filenames(filenames=c("UNCID_2179117.260fce5f-8aea-4c0b-868a-ca514b130dff.130325_UNC16-SN851_0231_BC20VNACXX_2_ACAGTG.tar.gz", "UNCID_2641218.918a606c-3b19-4292-862c-f8437d00ab00.140721_UNC15-SN850_0379_AC4V28ACXX_8_TGACCA.tar.gz"), allnames = allfilenames, allids = allfileids)
# vectorized version of the above
get.fileids.from.filenames <- function(filenames, allnames, allids){
sapply(seq(along=filenames),FUN =function(i) get.one.fileid.from.filename(filenames[i],allnames=allnames, allids=allids))
}
# projname = "ericfg/mach1"
# get list of all files in a project:
# projname shouldn't have any spaces or unusual characters!!!
list.all.files.project<-function(projname, ttwdir, tempdir, auth.token, max.iterations=max.iterations){
file.increment <- 100
# initialize the outputs:
filehrefs <- vector("character", length=0)
fileids <- vector("character", length=0)
filenames <- vector("character", length=0)
# while loop
# ask for 100 files, then read off last element - link I think
# then change more.files.yn to FALSE
more.files.yn <- TRUE
i.loop <- 1 # i.loop is the number of times through the loop;
# also helps to limit your total number of
# runs through loop
while (more.files.yn) {
if (i.loop %% 3 == 1){
print(paste("Working on loop", i.loop, "; max.iterations =", max.iterations, "\n"))
}
# first time through, use your own link
# later times through, use link provided by API
# Note that this overwrites the file if already there.
filename.t <- file.path(tempdir, "tempfilelist.txt")
{ # start if/else
if (i.loop==1){
system(paste('curl -s -H "X-SBG-Auth-Token: ', auth.token, ' " -H "content-type: application/json" -X GET "https://cgc-api.sbgenomics.com/v2/files?offset=0&limit=',file.increment,'&project=', projname, '" > ', filename.t, sep =""))
}
else {
system(paste('curl -s -H "X-SBG-Auth-Token: ', auth.token, ' " -H "content-type: application/json" -X GET "', nextlink, '" > ', filename.t, sep =""))
}
} # end if/else
all.files.raw <- vector("list", length=0)
all.files.raw[[i.loop]] <- RJSONIO::fromJSON(filename.t)
# names(all.files.raw[[i.loop]])
# [1] "href" "items" "links"
#
# names(all.files.raw[[i.loop]]$items[1][[1]])
# [1] "href" "id" "name" "project"
# Add hrefs, file ids and file names from this step of loop
# to big list of file names:
#
filehrefs <- append(filehrefs, sapply(all.files.raw[[i.loop]]$items, FUN= function(x){ x[["href"]]}))
fileids <- append(fileids, sapply(all.files.raw[[i.loop]]$items, FUN= function(x){ x[["id"]]}))
filenames <- append(filenames, sapply(all.files.raw[[i.loop]]$items, FUN= function(x){ x[["name"]]}))
## filehrefs <- append(filehrefs, all.files.raw[[i.loop]]$items$href)
## fileids <- append(fileids, all.files.raw[[i.loop]]$items$id)
## filenames <- append(filenames, all.files.raw[[i.loop]]$items$name)
#
templink <- all.files.raw[[i.loop]]$links
{
# if there is nothing in templink, that means the loop is
# done, apparently- not documented, but that happened once
# when number of files was less than 100
if (is.list(templink) & length(templink)==0){
more.files.yn <- FALSE
}
else {
nextlink <- all.files.raw[[i.loop]]$links[[1]][1]
names(nextlink)<- NULL
# check if next link has the val "prev" for "rel", which
# means that the current request is the last one
prev.or.next <- all.files.raw[[i.loop]]$links[[1]]["rel"]
names(prev.or.next)<- NULL
i.loop <- i.loop + 1
if (! (prev.or.next %in% c("prev","next"))){
stop(paste("ERROR: prev.or.next is ", prev.or.next, " and it should be one of prev or next"))
}
if (prev.or.next=="prev"){
more.files.yn <- FALSE
}
}
} # end if/else
# if you reach the max loop size, end loop:
if (i.loop>= max.iterations){
more.files.yn <- FALSE
}
#
} # end while loop
n.files <- length(filenames)
list(filehrefs=filehrefs, fileids= fileids, filenames=filenames, n.files=n.files)
}
# out.copy <- copy.file.with.sb.id.to.project(sbid="564a57f2e4b0298dd2cb0590", proj.name="JSALZMAN/machete", auth.token=auth.token, tempdir=tempdir)
# returns new file id, i.e. id in project
# CHECK FOR SOMETHING WITH THAT NAME FIRST?
# FOR NOW, CHECK MANUALLY
copy.file.with.sb.id.to.project <- function(sbid, proj.name, auth.token, tempdir){
currentdir <- getwd()
setwd(tempdir)
system(paste0("curl --data '{\"project\": \"" , proj.name, "\"}'", ' -s -H "X-SBG-Auth-Token: ', auth.token, ' " -H "content-type: application/json" -X POST "https://cgc-api.sbgenomics.com/v2/files/',sbid, '/actions/copy" > test93.json'))
out.file.info <- fromJSON("test93.json")
file.name <- out.file.info$name
file.id <- out.file.info$id
# for next thing, use as.list because in one case,
# when age at diagnosis was missing, it converted it to
# a character vector and then it gave an error when I asked
# for out.file.info$metadata$experimental_strategy
# problem with UNCID_2197473
# 564a5a07e4b0298dd2cbb5b0
# test93weird.json
# missing age at diagnosis for this one
# Looking here it's listed with a blank:
# https://cgc.sbgenomics.com/u/JSALZMAN/machete/files/564a5a07e4b0298dd2cbb5b0/
file.metadata <- as.list(out.file.info$metadata)
setwd(currentdir)
# list(file.id=file.id, file.name=file.name, exp.strategy = out.file.info$metadata$experimental_strategy, data.subtype=out.file.info$metadata$data_subtype, disease.type =out.file.info$metadata$disease_type)
list(file.id=file.id, file.name=file.name, exp.strategy = file.metadata$experimental_strategy, data.subtype=file.metadata$data_subtype, disease.type =file.metadata$disease_type)
}
copy.many.files.with.sb.id.to.project <- function(vec.sbids, proj.name, auth.token, tempdir){
files.ids <- vector("character", length = 0)
files.names <- vector("character", length = 0)
exp.strategies <- vector("character", length = 0)
data.subtypes <- vector("character", length = 0)
disease.types <- vector("character", length = 0)
for (tti in 1:length(vec.sbids)){
out.copy <- copy.file.with.sb.id.to.project(sbid=vec.sbids[tti], proj.name=proj.name, auth.token=auth.token, tempdir=tempdir)
files.ids <- append(files.ids, out.copy$file.id)
files.names <- append(files.names, out.copy$file.name)
exp.strategies <- append(exp.strategies, out.copy$exp.strategy)
data.subtypes <- append(data.subtypes, out.copy$data.subtype)
disease.types <- append(disease.types, out.copy$disease.type)
cat("Just copied file", out.copy$file.name, "\n")
}
list(files.ids=files.ids, files.names=files.names, exp.strategies=exp.strategies, data.subtypes= data.subtypes, disease.types=disease.types)
}
# for printing out vectors of file ids in form c("","") for easy
# copying and pasting to use them later or in other files, e.g.
# to transfer from datasetapi.R to runapi.R
print.nice.ids.vector <- function(ttvec){
noquote(paste0("c(\"", paste(ttvec, collapse = "\", \""), "\")"))
}
# for printing out vectors of file ids in form c("","") for easy
# copying and pasting to use them later or in other files, e.g.
# to transfer from datasetapi.R to runapi.R
print.nice.ids.vector.within.function <- function(ttvec){
cat(noquote(paste0("c(\"", paste(ttvec, collapse = "\", \""), "\")\n")))
}
# get.file.with.aliquot.id(aliquot.id="TCGA-CH-5739-01A-11R-1580-07", tempdir, homedir, query.template.file="aliquotquery.json", auth.token)
# aliquot.id="TCGA-CH-5739-01A-11R-1580-07"; query.template.file="aliquotquery.json"
# FOR NOW, ONLY DOES TUMOR, NOT NORMAL
# ASSUMES THERE IS AT MOST 1 FILE WITH THIS ALIQUOT ID; FAILS IF NOT
get.file.with.aliquot.id <- function(aliquot.id, tempdir, homedir, query.template.file, auth.token){
currentdir <- getwd()
setwd(tempdir)
# read in template file, will edit its offset value
tt.template <- scan(file= query.template.file, what="character", sep="\n")
#
# which line of template file has zzzz in it?
#
zzzz.line.number <- grep(pattern="zzzz", x=tt.template)
if (length(zzzz.line.number)!=1){
stop(paste0("ERROR in file ", query.template.file, ": there should be exactly one line with the string zzzz in it.\n"))
}
tt.changed.template <- tt.template
tt.changed.template[zzzz.line.number] <- gsub(pattern = "zzzz", replacement = aliquot.id, x = tt.changed.template[zzzz.line.number])
writeLines(tt.changed.template, con = "test58.json", sep = "\n")
#
## get count of number of files
system(paste0('curl -s -H "X-SBG-Auth-Token: ', auth.token, ' " -H "content-type: application/json" -X POST "https://cgc-datasets-api.sbgenomics.com/datasets/tcga/v0/query/total" --data @test58.json > test35.json'))
tt.nfiles <- as.numeric(fromJSON("test35.json"))
stopifnot(tt.nfiles<=1)
## initialize data frames, particularly in case tt.nfiles = 0
ids.files <- vector("character", length=0)
names.files <- vector("character", length=0)
ids.cases <- vector("character", length=0)
{ # begin if/else
if (tt.nfiles >0){
system(paste0('curl -s -H "X-SBG-Auth-Token: ', auth.token, ' " -H "content-type: application/json" -X POST "https://cgc-datasets-api.sbgenomics.com/datasets/tcga/v0/query" --data @test58.json > test63.json'))
tt.files.raw <- fromJSON("test63.json")
tt.files <- tt.files.raw$`_embedded`$files
ids.files <- sapply(tt.files, FUN= function(x){ x$id})
names.files <- sapply(tt.files, FUN= function(x){ x$label})
## Get the cases for this file
system(paste0('curl -s -H "X-SBG-Auth-Token: ', auth.token, ' " -H "content-type: application/json" -X GET "https://cgc-datasets-api.sbgenomics.com/datasets/tcga/v0/files/', ids.files[1], '/cases" > test35.json'))
tt.case.info <- fromJSON("test35.json")
ids.cases <- append(ids.cases, tt.case.info$`_embedded`$cases[[1]]$id)
if (length(tt.case.info$`_embedded`$cases)>1){
stop(paste0("ERROR: length(tt.case.info$`_embedded`$cases)>1 for file id\n", ids.files[1]))
}
if (length(ids.cases)!= length(ids.files)){
stop(paste0("ERROR: length(ids.cases)!= length(ids.files), i.e.", length(ids.cases), "!=", length(ids.files)))
}
}
else {
# put in empty values for all these vectors, actually should do
# it first
}
} # end if/e
files.df <- data.frame(ids.files=ids.files, names.files=names.files, ids.cases=ids.cases)
setwd(currentdir)
list(files.df=files.df, tt.files=tt.files)
}
## todaydate <- "sep9"; shortname <- "glioma"; longname <- "Brain Lower Grade Glioma"; n.use.this.many.files.now <- 22; random.seed=19651
## get list of all files
## put sb ids in random order, and then save to file
## date the file and save copy but also keep a "most recent" version
## don't worry about matched normals at all
##
## use this in datasetapi.R to get files for a particular cancer
## THAT THERE ARE NOT ALREADY FILES FOR IN THE PROJECT
## do other cases manually; it will give error if there
## are already in the project
##
## ASSUMES you've already gotten a vector allfilenames
## of all file names in the project
## Also ASSUMES that the project is machete, although that
## could easily be changed.
##
## get.new.tar.files
##
get.new.tar.files <- function(todaydate, shortname, longname, n.use.this.many.files.now, tempdir, homedir, auth.token, files.per.download, allfilenames, random.seed=19651){
## first one is the one with zzzz where the disease will go
template.for.query.template.file <- file.path(tempdir, "tumorquery.json")
query.template.file.for.particular.disease <- file.path(tempdir, paste0(shortname,".tumorquery.json"))
outcsv <- paste0(shortname, todaydate, "query.tumor.csv")
write.different.disease.to.tumor.template.file(shortname, tempdir, longname, query.template.file=template.for.query.template.file)
out.tumors <- get.files.with.tumor.or.normal(tempdir, homedir, query.template.file=query.template.file.for.particular.disease, auth.token, tt.files.per.download= files.per.download, sample.type="Primary Tumor", output.csv = outcsv)
cat("Number of files for ", shortname, " (", longname, ") is ", length(out.tumors$ids.files), "\nNote that this could include multiple files for one case, so this might be larger than the number of cases.\n", sep="")
## get indices of out.tumors which
## are in allfilenames, i.e. which we already downloaded
## This should have length 0 for this function; if not, give error
indices.tumors.downloaded.by.today <- sort(which(out.tumors$names.files %in% allfilenames))
length(indices.tumors.downloaded.by.today)
##
if (length(indices.tumors.downloaded.by.today)>0){
stop(paste0("ERROR ERROR: there are some tumors downloaded before today and there should not be for this function; an example name is \n", out.tumors$names.files[1]))
}
## READ in results, just to get them as a nice data frame
all.tumors.df <- read.table(file=file.path(homedir, outcsv), header=TRUE, sep =",", stringsAsFactors=FALSE)
dim(all.tumors.df)
##
## Now choose a unique file for each case:
unique.tumors.df <- choose.one.file.for.each.case(all.tumors.df)
n.total.cases <- length(unique.tumors.df$ids.cases)
##
print(paste0("Number of unique files- one for each case- for ", shortname, " (", longname, ") is ", length(out.tumors$ids.files)))
## Now put these in a random order
## Then can go back to get them out of a csv file
set.seed(random.seed)
random.ordering.of.indices <- sample(n.total.cases, size = n.total.cases)
sb.ids.tumors.with.random.ordering <- unique.tumors.df$ids.files[random.ordering.of.indices]
cases.tumors.with.random.ordering <- unique.tumors.df$ids.cases[random.ordering.of.indices]
names.tumors.with.random.ordering <- unique.tumors.df$names.files[random.ordering.of.indices]
df.tumors.with.random.ordering <- data.frame(sb.ids.tumors.with.random.ordering, names.tumors.with.random.ordering, cases.tumors.with.random.ordering)
print(paste0("Randomly ordering files now and then selecting the first ", n.use.this.many.files.now, "."))
use.these.now.ids <- sb.ids.tumors.with.random.ordering[1:n.use.this.many.files.now]
use.these.now.names <- names.tumors.with.random.ordering[1:n.use.this.many.files.now]
## NOT using these right now:
## write to file
## both one with current date, and one that's most recent
filename.pre.downloads.with.date = file.path(homedir,paste0(shortname, ".files.not.downloaded.before.", todaydate, ".csv"))
filename.downloads.with.date = file.path(homedir,paste0(shortname, ".files.not.yet.downloaded.as.of.", todaydate, ".csv"))
filename.mostrecent = file.path(homedir,paste0(shortname, ".files.not.yet.downloaded.most.recent.csv"))
cat("Writing files:\n", filename.pre.downloads.with.date, "\n", filename.downloads.with.date, "\n", filename.mostrecent, "\n")
n.tumors.with.random.ordering <- dim(df.tumors.with.random.ordering)[1]
write.table(df.tumors.with.random.ordering[1:n.tumors.with.random.ordering,], file = filename.pre.downloads.with.date, row.names = FALSE, col.names = FALSE, sep = ",", append=FALSE, quote=FALSE)
write.table(df.tumors.with.random.ordering[(n.use.this.many.files.now+1):n.tumors.with.random.ordering,], file = filename.downloads.with.date, row.names = FALSE, col.names = FALSE, sep = ",", append=FALSE, quote=FALSE)
write.table(df.tumors.with.random.ordering[(n.use.this.many.files.now+1):n.tumors.with.random.ordering,], file = filename.mostrecent, row.names = FALSE, col.names = FALSE, sep = ",", append=FALSE, quote=FALSE)
## Now copy these files to the machete project
print(paste0("About to copy ", n.use.this.many.files.now, " files to the machete project."))
out.tumors.today.first.copying.process <- copy.many.files.with.sb.id.to.project(vec.sbids=use.these.now.ids, proj.name="JSALZMAN/machete", auth.token=auth.token, tempdir=tempdir)
## AFTER COPYING, CHECK MANUALLY THAT THERE ARE NO _1_ PREFIXES
print("AFTER COPYING, CHECK MANUALLY THAT THERE ARE NO _1_ PREFIXES")
## after outputting the next thing, copying and pasting and editing, use it in runapi.R
## these are the ids in the project (so they are NOT the sb ids)
##
print.nice.ids.vector.within.function(out.tumors.today.first.copying.process$files.ids)
print.nice.ids.vector.within.function(names.tumors.with.random.ordering[1:n.use.this.many.files.now])
## also write files in case these are very long
nice.ids.file = file.path(homedir,paste0(shortname, ".nice.ids.", todaydate, ".csv"))
nice.names.file = file.path(homedir,paste0(shortname, ".nice.names.", todaydate, ".csv"))
write.table(out.tumors.today.first.copying.process$files.ids, file = nice.ids.file, row.names = FALSE, col.names = FALSE, sep = "\n", append=FALSE, quote=FALSE)
write.table(names.tumors.with.random.ordering[1:n.use.this.many.files.now], file = nice.names.file, row.names = FALSE, col.names = FALSE, sep = "\n", append=FALSE, quote=FALSE)
cat(paste0("Also writing files\n", nice.ids.file, "\n and \n", nice.names.file),"\n")
cat("Number of characters in ids output is ", nchar(print.nice.ids.vector(out.tumors.today.first.copying.process$files.ids)), " and number of characters in ids output is ", nchar(print.nice.ids.vector(names.tumors.with.random.ordering[1:n.use.this.many.files.now])), "\n", sep="")
}
## Assuming you already have a list of randomly ordered file names
## and ids in a "most recent" file,
## put n more on machete; also writes two files, the "most recent" file
## and a file with a date on it
## used in datasetapi.R
put.n.more.files.in.machete.project <- function(todaydate, shortname, longname, n.use.this.many.files.now, homedir=homedir, tempdir=tempdir, auth.token=auth.token){
filename.mostrecent = file.path(homedir,paste0(shortname, ".files.not.yet.downloaded.most.recent.csv"))
df.tumors.with.random.ordering <- read.table(filename.mostrecent, sep=",", header = FALSE, col.names = c("sb.id", "filename", "case"), stringsAsFactors = FALSE)
n.tumors.with.random.ordering <- dim(df.tumors.with.random.ordering)[1]
print(paste0("n.tumors.with.random.ordering is: ", n.tumors.with.random.ordering, "\n"))
stopifnot(n.use.this.many.files.now < n.tumors.with.random.ordering)
use.these.now.ids <- df.tumors.with.random.ordering$sb.id[1:n.use.this.many.files.now]
use.these.now.names <- df.tumors.with.random.ordering$filename[1:n.use.this.many.files.now]
## print.nice.ids.vector(use.these.now.ids)
## print.nice.ids.vector(use.these.now.names)
## filename.pre.downloads.with.date = file.path(homedir,paste0(shortname, todaydate, ".files.not.downloaded.before.csv"))
filename.downloads.with.date = file.path(homedir,paste0(shortname, ".", todaydate, ".files.not.yet.downloaded.as.of.csv"))
print(paste0("About to write files\n", filename.downloads.with.date, "\nand\n", filename.mostrecent))
write.table(df.tumors.with.random.ordering[(n.use.this.many.files.now+1):n.tumors.with.random.ordering,], file = filename.downloads.with.date, row.names = FALSE, col.names = FALSE, sep = ",", append=FALSE, quote=FALSE)
## NOTE: next thing overwrites previous file
write.table(df.tumors.with.random.ordering[(n.use.this.many.files.now+1):n.tumors.with.random.ordering,], file = filename.mostrecent, row.names = FALSE, col.names = FALSE, sep = ",", append=FALSE, quote=FALSE)
out.tumors.today.first.copying.process <- copy.many.files.with.sb.id.to.project(vec.sbids=use.these.now.ids, proj.name="JSALZMAN/machete", auth.token=auth.token, tempdir=tempdir)
## AFTER COPYING, CHECK MANUALLY THAT THERE ARE NO _1_ PREFIXES
## after outputting the next thing, copying and pasting and editing, use it in runapi.R
## these are the ids in the project (so they are NOT the sb ids)
##
print.nice.ids.vector(out.tumors.today.first.copying.process$files.ids)
print.nice.ids.vector(use.these.now.names)
## also write files in case these are very long
nice.ids.file = file.path(homedir,paste0(shortname, ".", todaydate, ".nice.ids.csv"))
nice.names.file = file.path(homedir,paste0(shortname, ".", todaydate, ".nice.names.csv"))
write.table(out.tumors.today.first.copying.process$files.ids, file = nice.ids.file, row.names = FALSE, col.names = FALSE, sep = "\n", append=FALSE, quote=FALSE)
write.table(use.these.now.names, file = nice.names.file, row.names = FALSE, col.names = FALSE, sep = "\n", append=FALSE, quote=FALSE)
print(paste0("Also writing files\n", nice.ids.file, "\n and \n", nice.names.file,"\n"))
}
## NOT SURE IF THIS WORKS; haven't used it yet; I was trying it for
## something but it didn't work for what I wanted it for.
# out34 <- get.details.of.a.file.from.id(fileid = "564a31abe4b0ef121817527b", auth.token=auth.token, ttwdir=tempdir)
# taskid = "0bb0a961-41fd-4617-a9d4-f2392445a04e"
# http://docs.cancergenomicscloud.org/docs/get-details-of-a-task
#
get.details.of.a.file.from.id <- function(fileid, auth.token, ttwdir=tempdir){
filename.t <- file.path(ttwdir, "temptaskdetails.json")
system(paste('curl -s -H "X-SBG-Auth-Token: ', auth.token, ' " -H "content-type: application/json" -X GET "https://cgc-api.sbgenomics.com/v2/files/',fileid, '" > ', filename.t, sep =""))
RJSONIO::fromJSON(filename.t)
}
|
/api/fileinfo/setdefs.R
|
no_license
|
ericff/auxiliary-code-sMACHETE
|
R
| false | false | 37,849 |
r
|
wdir = homedir
# make sure there is a directory homedir
if (!dir.exists(wdir)){
dir.create(wdir)
}
# make sure there is a directory tempdir
if (!dir.exists(tempdir)){
dir.create(tempdir)
}
setwd(dir=wdir)
mydir <- "/my/dir"
{
if (home.home){
auth.token.filename <- file.path(mydir, "api/authtoken.txt")
}
else {
auth.token.filename <- file.path(mydir, "api/authtoken.txt")
}
}
auth.token <-scan(file=auth.token.filename, what="character")
max.iterations <- 10000
files.per.download <- 100
library(RJSONIO)
# gets all files, NOT just one for each case; you have to do that later
# NEXT ONES HAVE disease variable, but I took that out later; not
# relevant for this function
# out.gbm <- get.files.with.tumor.or.normal(disease="Glioblastoma Multiforme", tempdir, homedir, query.template.file="gbmqueryjul19.json", auth.token, tt.files.per.download= files.per.download, sample.type="Primary Tumor", output.csv = "gbm.july19.query.tumor.csv")
# disease="Glioblastoma Multiforme"; query.template.file="gbmqueryjul19.json"; tt.files.per.download= files.per.download; sample.type="Primary Tumor"; output.csv = "gbm.july19.query.tumor.csv"
# ASSUMES template file is in tempdir
# and that output.csv should be a file in homedir
get.files.with.tumor.or.normal <- function(tempdir, homedir, query.template.file, auth.token, tt.files.per.download, sample.type, output.csv){
currentdir <- getwd()
setwd(tempdir)
# read in template file, will edit its offset value
tt.template <- scan(file= query.template.file, what="character", sep="\n")
#
# which line of template file has offset in it?
#
offset.line.number <- grep(pattern="offset", x=tt.template)
if (length(offset.line.number)!=1){
stop(paste0("ERROR in file ", query.template.file, ": there should be exactly one line with the word offset in it.\n"))
}
# which line of template file has hasSampleType in it?
#
sample.type.line.number <- grep(pattern="hasSampleType", x=tt.template)
if (length(sample.type.line.number)!=1){
stop(paste0("ERROR in file ", query.template.file, ": there should be exactly one line with the word hasSampleType in it.\n"))
}
# get count of number of files
system(paste0('curl -s -H "X-SBG-Auth-Token: ', auth.token, ' " -H "content-type: application/json" -X POST "https://cgc-datasets-api.sbgenomics.com/datasets/tcga/v0/query/total" --data @', query.template.file,' > test35.json'))
tt.nfiles <- as.numeric(fromJSON("test35.json"))
tt.files <- vector("list", length=0)
n.loops <- ceiling(tt.nfiles/tt.files.per.download)
for (tti in 1:n.loops){
cat("Working on loop ", tti , " of ", n.loops, "\n")
this.offset <- tt.files.per.download*(tti-1)
# make file with offset in it
tt.changed.template <- tt.template
tt.changed.template[offset.line.number] <- paste0(" \"offset\": ", this.offset)
tt.changed.template[sample.type.line.number] <- paste0("\"hasSampleType\": \"", sample.type, "\"")
writeLines(tt.changed.template, con = "test58.json", sep = "\n")
system(paste0('curl -s -H "X-SBG-Auth-Token: ', auth.token, ' " -H "content-type: application/json" -X POST "https://cgc-datasets-api.sbgenomics.com/datasets/tcga/v0/query" --data @test58.json > test63.json'))
these.files.raw <- fromJSON("test63.json")
tt.files <- append(tt.files, these.files.raw$`_embedded`$files)
}
ids.files <- sapply(tt.files, FUN= function(x){ x$id})
names.files <- sapply(tt.files, FUN= function(x){ x$label})
ids.cases <- vector("character", length=0)
# Get the cases for these file
for (ttj in 1:length(ids.files)){
if (ttj %% 10== 0){
cat("Working on loop ", ttj, " of ", length(ids.files),"\n")
}
system(paste0('curl -s -H "X-SBG-Auth-Token: ', auth.token, ' " -H "content-type: application/json" -X GET "https://cgc-datasets-api.sbgenomics.com/datasets/tcga/v0/files/', ids.files[ttj], '/cases" > test35.json'))
tt.case.info <- fromJSON("test35.json")
ids.cases <- append(ids.cases, tt.case.info$`_embedded`$cases[[1]]$id)
if (length(tt.case.info$`_embedded`$cases)>1){
stop(paste0("ERROR: length(tt.case.info$`_embedded`$cases)>1 for file id\n", ids.files[ttj]))
}
}
if (length(ids.cases)!= length(ids.files)){
stop(paste0("ERROR: length(ids.cases)!= length(ids.files), i.e.", length(ids.cases), "!=", length(ids.files)))
}
files.df <- data.frame(ids.files, names.files, ids.cases)
write.table(files.df, file = file.path(homedir, output.csv), row.names = FALSE, col.names = TRUE, sep = ",", append=FALSE, quote=TRUE)
setwd(currentdir)
list(tt.files=tt.files,ids.files=ids.files, names.files=names.files, ids.cases=ids.cases)
}
## added dec 19 2016
get.files.with.tumor.or.normal.and.return.NA.if.none <- function(tempdir, homedir, query.template.file, auth.token, tt.files.per.download, sample.type, output.csv){
currentdir <- getwd()
setwd(tempdir)
# read in template file, will edit its offset value
tt.template <- scan(file= query.template.file, what="character", sep="\n")
#
# which line of template file has offset in it?
#
offset.line.number <- grep(pattern="offset", x=tt.template)
if (length(offset.line.number)!=1){
stop(paste0("ERROR in file ", query.template.file, ": there should be exactly one line with the word offset in it.\n"))
}
# which line of template file has hasSampleType in it?
#
sample.type.line.number <- grep(pattern="hasSampleType", x=tt.template)
if (length(sample.type.line.number)!=1){
stop(paste0("ERROR in file ", query.template.file, ": there should be exactly one line with the word hasSampleType in it.\n"))
}
# get count of number of files
system(paste0('curl -s -H "X-SBG-Auth-Token: ', auth.token, ' " -H "content-type: application/json" -X POST "https://cgc-datasets-api.sbgenomics.com/datasets/tcga/v0/query/total" --data @', query.template.file,' > test35.json'))
tt.nfiles <- as.numeric(fromJSON("test35.json"))
tt.files <- vector("list", length=0)
n.loops <- ceiling(tt.nfiles/tt.files.per.download)
for (tti in 1:n.loops){
cat("Working on loop ", tti , " of ", n.loops, "\n")
this.offset <- tt.files.per.download*(tti-1)
# make file with offset in it
tt.changed.template <- tt.template
tt.changed.template[offset.line.number] <- paste0(" \"offset\": ", this.offset)
tt.changed.template[sample.type.line.number] <- paste0("\"hasSampleType\": \"", sample.type, "\"")
writeLines(tt.changed.template, con = "test58.json", sep = "\n")
system(paste0('curl -s -H "X-SBG-Auth-Token: ', auth.token, ' " -H "content-type: application/json" -X POST "https://cgc-datasets-api.sbgenomics.com/datasets/tcga/v0/query" --data @test58.json > test63.json'))
these.files.raw <- fromJSON("test63.json")
tt.files <- append(tt.files, these.files.raw$`_embedded`$files)
}
if (length(tt.files)>0){
ids.files <- sapply(tt.files, FUN= function(x){ x$id})
names.files <- sapply(tt.files, FUN= function(x){ x$label})
ids.cases <- vector("character", length=0)
## Get the cases for these file
for (ttj in 1:length(ids.files)){
if (ttj %% 10== 0){
cat("Working on loop ", ttj, " of ", length(ids.files),"\n")
}
system(paste0('curl -s -H "X-SBG-Auth-Token: ', auth.token, ' " -H "content-type: application/json" -X GET "https://cgc-datasets-api.sbgenomics.com/datasets/tcga/v0/files/', ids.files[ttj], '/cases" > test35.json'))
tt.case.info <- fromJSON("test35.json")
ids.cases <- append(ids.cases, tt.case.info$`_embedded`$cases[[1]]$id)
if (length(tt.case.info$`_embedded`$cases)>1){
stop(paste0("ERROR: length(tt.case.info$`_embedded`$cases)>1 for file id\n", ids.files[ttj]))
}
}
if (length(ids.cases)!= length(ids.files)){
stop(paste0("ERROR: length(ids.cases)!= length(ids.files), i.e.", length(ids.cases), "!=", length(ids.files)))
}
files.df <- data.frame(ids.files, names.files, ids.cases)
write.table(files.df, file = file.path(homedir, output.csv), row.names = FALSE, col.names = TRUE, sep = ",", append=FALSE, quote=TRUE)
n.files <- length(ids.files)
} else {
n.files <- 0
ids.files <- NULL
names.files <- NULL
ids.cases <- vector("character", length=0)
}
setwd(currentdir)
list(tt.files=tt.files,ids.files=ids.files, names.files=names.files, ids.cases=ids.cases, n.files=n.files)
}
## write a tumor template file for use with get.files.with.tumor.or.normal
## of course, could have done this in that function, but that
## is already written and don't want to change it
## ONLY FOR tumors, NOT FOR normals, DOESN'T work for "primary
## blood ..." or for "recurrent tumor"
## NOT for any of these:
## Primary Blood Derived Cancer - Peripheral Blood
## Additional - New Primary
## Additional Metastatic
## Blood Derived Normal
## Bone Marrow Normal
## Buccal Cell Normal
## Metastatic
## Recurrent Tumor
## Solid Tissue Normal
## Note that there are just 2 sample for Additional Metastatic, and
## it's for skin cancer
write.different.disease.to.tumor.template.file <- function(shortname, tempdir, longname, query.template.file=file.path(tempdir, "tumorquery.json")){
currentdir <- getwd()
setwd(tempdir)
out.template.file <- file.path(tempdir, paste0(shortname,".tumorquery.json"))
## read in template file, will edit its disease value from zzzz
tt.template <- scan(file= query.template.file, what="character", sep="\n")
tt.changed.template <- gsub(pattern="zzzz", replacement = longname, x=tt.template)
writeLines(tt.changed.template, con = out.template.file, sep="\n")
setwd(currentdir)
}
# df.query = lung.all.tumors.df
# take data frame read in from csv outputted by
# get.files.with.tumor.or.normal
# and pick one file for each case
choose.one.file.for.each.case <- function(df.query){
tt.unique.cases <- unique(df.query$ids.cases)
n.unique.cases <- length(tt.unique.cases)
new.df.query <- data.frame(ids.files= vector("character", length=0), names.files= vector("character", length=0), ids.cases= vector("character", length=0))
for (tti in 1:n.unique.cases){
# get df of files with this case
subdf <- df.query[df.query$ids.cases== tt.unique.cases[tti],]
if (dim(subdf)[1]==0){
stop(paste("ERROR: no rows for case ", tt.unique.cases[tti]))
}
# else if there is exactly one row:
else if (dim(subdf)[1]==1){
new.df.query <- rbind(new.df.query, subdf)
}
else {
new.df.query <- rbind(new.df.query, subdf[1,])
}
}
new.df.query
}
# df.query = lung.all.tumors.df
# take data frame read in from csv outputted by
# get.files.with.tumor.or.normal
# and pick one file for each case
## DIFFERS from choose.one.file.for.each.case in that
## if given a choice, it will choose the one
## already made when choosing one sample for each case
## when running machete
## ALSO will check if choices were already made in datasetapi.R
## and a filename.mostrecent is given as an input
choose.one.file.for.each.case.using.the.choice.already.made.when.choosing.for.machete.runs <- function(df.query, allfilenames, filename.mostrecent=NULL){
indices.tumors.downloaded.by.today <- sort(which(df.query$names.files %in% allfilenames))
print(paste0("length(indices.tumors.downloaded.by.today) is ", length(indices.tumors.downloaded.by.today)))
##
## now get the cases for the runs pre today
downloaded.by.today.cases <- df.query$ids.cases[indices.tumors.downloaded.by.today]
tt.unique.cases <- unique(df.query$ids.cases)
n.unique.cases <- length(tt.unique.cases)
new.df.query <- data.frame(ids.files= vector("character", length=0), names.files= vector("character", length=0), ids.cases= vector("character", length=0))
if (!is.null(filename.mostrecent)){
mostrecent.df <- read.table(filename.mostrecent, sep=",", header = FALSE, col.names = c("sb.id", "filename", "case"), stringsAsFactors = FALSE)
}
for (tti in 1:n.unique.cases){
pick.first.one <- TRUE
if (tt.unique.cases[tti] %in% downloaded.by.today.cases){
subdf.first.cut <- df.query[(df.query$ids.cases== tt.unique.cases[tti]),]
subdf <- subdf.first.cut[(subdf.first.cut$names.files %in% allfilenames),]
stopifnot(dim(subdf)[1]==1)
new.df.query <- rbind(new.df.query, subdf)
pick.first.one <- FALSE
}
else if (!is.null(filename.mostrecent)){
if (tt.unique.cases[tti] %in% mostrecent.df$case){
subdf.first.cut <- df.query[(df.query$ids.cases== tt.unique.cases[tti]),]
subdf <- subdf.first.cut[(subdf.first.cut$names.files %in% mostrecent.df$filename),]
stopifnot(dim(subdf)[1]==1)
new.df.query <- rbind(new.df.query, subdf)
pick.first.one <- FALSE
}
}
## Do next if test, no matter what, because you can't test
## if it's in the filename.mostrecent if it doesn't exist
## only pick the first one if you haven't already
if (pick.first.one) {
## get df of files with this case
subdf <- df.query[df.query$ids.cases== tt.unique.cases[tti],]
if (dim(subdf)[1]==0){
stop(paste("ERROR: no rows for case ", tt.unique.cases[tti]))
}
## else if there is exactly one row:
else if (dim(subdf)[1]==1){
new.df.query <- rbind(new.df.query, subdf)
}
else {
new.df.query <- rbind(new.df.query, subdf[1,])
}
}
}
new.df.query
}
# get.one.filename.from.fileid(fileid="576d6a09e4b01be096f370a6", allnames=alltarnames, allids=alltarids)
get.one.filename.from.fileid <- function(fileid, allnames, allids){
tfvec <- (allids == fileid)
{ # start if/else
if (any(is.na(tfvec))){
stop(paste0("Error: fileid ", fileid, " is giving NAs\n"))
}
else if (sum(tfvec)>=2){
stop(paste0("Error: fileid ", fileid, " is giving more than two matches\n"))
}
else if (sum(tfvec)==0){
stop(paste0("Error: fileid ", fileid, " is giving 0 matches\n"))
}
}
allnames[which(tfvec==1)]
}
# get.filenames.from.fileids(fileids=c("576d6a09e4b01be096f370a6","57748dd8e4b03bb2bc269eb2"), allnames=alltarnames, allids=alltarids)
#
# vectorized version of the above
get.filenames.from.fileids <- function(fileids, allnames, allids){
sapply(seq(along=fileids),FUN =function(i) get.one.filename.from.fileid(fileids[i],allnames=allnames, allids=allids))
}
# get.one.fileid.from.filename(filename="UNCID_2179117.260fce5f-8aea-4c0b-868a-ca514b130dff.130325_UNC16-SN851_0231_BC20VNACXX_2_ACAGTG.tar.gz", allnames=allfilenames, allids=allfileids)
get.one.fileid.from.filename <- function(filename, allnames, allids){
tfvec <- (allnames == filename)
{ # start if/else
if (any(is.na(tfvec))){
stop(paste0("Error: filename ", filename, " is giving NAs\n"))
}
else if (sum(tfvec)>=2){
stop(paste0("Error: filename ", filename, " is giving more than two matches\n"))
}
else if (sum(tfvec)==0){
stop(paste0("Error: filename ", filename, " is giving 0 matches\n"))
}
}
allids[which(tfvec==1)]
}
# get.fileids.from.filenames(filenames=c("UNCID_2179117.260fce5f-8aea-4c0b-868a-ca514b130dff.130325_UNC16-SN851_0231_BC20VNACXX_2_ACAGTG.tar.gz", "UNCID_2641218.918a606c-3b19-4292-862c-f8437d00ab00.140721_UNC15-SN850_0379_AC4V28ACXX_8_TGACCA.tar.gz"), allnames = allfilenames, allids = allfileids)
# vectorized version of the above
get.fileids.from.filenames <- function(filenames, allnames, allids){
sapply(seq(along=filenames),FUN =function(i) get.one.fileid.from.filename(filenames[i],allnames=allnames, allids=allids))
}
# projname = "ericfg/mach1"
# get list of all files in a project:
# projname shouldn't have any spaces or unusual characters!!!
list.all.files.project<-function(projname, ttwdir, tempdir, auth.token, max.iterations=max.iterations){
file.increment <- 100
# initialize the outputs:
filehrefs <- vector("character", length=0)
fileids <- vector("character", length=0)
filenames <- vector("character", length=0)
# while loop
# ask for 100 files, then read off last element - link I think
# then change more.files.yn to FALSE
more.files.yn <- TRUE
i.loop <- 1 # i.loop is the number of times through the loop;
# also helps to limit your total number of
# runs through loop
while (more.files.yn) {
if (i.loop %% 3 == 1){
print(paste("Working on loop", i.loop, "; max.iterations =", max.iterations, "\n"))
}
# first time through, use your own link
# later times through, use link provided by API
# Note that this overwrites the file if already there.
filename.t <- file.path(tempdir, "tempfilelist.txt")
{ # start if/else
if (i.loop==1){
system(paste('curl -s -H "X-SBG-Auth-Token: ', auth.token, ' " -H "content-type: application/json" -X GET "https://cgc-api.sbgenomics.com/v2/files?offset=0&limit=',file.increment,'&project=', projname, '" > ', filename.t, sep =""))
}
else {
system(paste('curl -s -H "X-SBG-Auth-Token: ', auth.token, ' " -H "content-type: application/json" -X GET "', nextlink, '" > ', filename.t, sep =""))
}
} # end if/else
all.files.raw <- vector("list", length=0)
all.files.raw[[i.loop]] <- RJSONIO::fromJSON(filename.t)
# names(all.files.raw[[i.loop]])
# [1] "href" "items" "links"
#
# names(all.files.raw[[i.loop]]$items[1][[1]])
# [1] "href" "id" "name" "project"
# Add hrefs, file ids and file names from this step of loop
# to big list of file names:
#
filehrefs <- append(filehrefs, sapply(all.files.raw[[i.loop]]$items, FUN= function(x){ x[["href"]]}))
fileids <- append(fileids, sapply(all.files.raw[[i.loop]]$items, FUN= function(x){ x[["id"]]}))
filenames <- append(filenames, sapply(all.files.raw[[i.loop]]$items, FUN= function(x){ x[["name"]]}))
## filehrefs <- append(filehrefs, all.files.raw[[i.loop]]$items$href)
## fileids <- append(fileids, all.files.raw[[i.loop]]$items$id)
## filenames <- append(filenames, all.files.raw[[i.loop]]$items$name)
#
templink <- all.files.raw[[i.loop]]$links
{
# if there is nothing in templink, that means the loop is
# done, apparently- not documented, but that happened once
# when number of files was less than 100
if (is.list(templink) & length(templink)==0){
more.files.yn <- FALSE
}
else {
nextlink <- all.files.raw[[i.loop]]$links[[1]][1]
names(nextlink)<- NULL
# check if next link has the val "prev" for "rel", which
# means that the current request is the last one
prev.or.next <- all.files.raw[[i.loop]]$links[[1]]["rel"]
names(prev.or.next)<- NULL
i.loop <- i.loop + 1
if (! (prev.or.next %in% c("prev","next"))){
stop(paste("ERROR: prev.or.next is ", prev.or.next, " and it should be one of prev or next"))
}
if (prev.or.next=="prev"){
more.files.yn <- FALSE
}
}
} # end if/else
# if you reach the max loop size, end loop:
if (i.loop>= max.iterations){
more.files.yn <- FALSE
}
#
} # end while loop
n.files <- length(filenames)
list(filehrefs=filehrefs, fileids= fileids, filenames=filenames, n.files=n.files)
}
# out.copy <- copy.file.with.sb.id.to.project(sbid="564a57f2e4b0298dd2cb0590", proj.name="JSALZMAN/machete", auth.token=auth.token, tempdir=tempdir)
# returns new file id, i.e. id in project
# CHECK FOR SOMETHING WITH THAT NAME FIRST?
# FOR NOW, CHECK MANUALLY
copy.file.with.sb.id.to.project <- function(sbid, proj.name, auth.token, tempdir){
currentdir <- getwd()
setwd(tempdir)
system(paste0("curl --data '{\"project\": \"" , proj.name, "\"}'", ' -s -H "X-SBG-Auth-Token: ', auth.token, ' " -H "content-type: application/json" -X POST "https://cgc-api.sbgenomics.com/v2/files/',sbid, '/actions/copy" > test93.json'))
out.file.info <- fromJSON("test93.json")
file.name <- out.file.info$name
file.id <- out.file.info$id
# for next thing, use as.list because in one case,
# when age at diagnosis was missing, it converted it to
# a character vector and then it gave an error when I asked
# for out.file.info$metadata$experimental_strategy
# problem with UNCID_2197473
# 564a5a07e4b0298dd2cbb5b0
# test93weird.json
# missing age at diagnosis for this one
# Looking here it's listed with a blank:
# https://cgc.sbgenomics.com/u/JSALZMAN/machete/files/564a5a07e4b0298dd2cbb5b0/
file.metadata <- as.list(out.file.info$metadata)
setwd(currentdir)
# list(file.id=file.id, file.name=file.name, exp.strategy = out.file.info$metadata$experimental_strategy, data.subtype=out.file.info$metadata$data_subtype, disease.type =out.file.info$metadata$disease_type)
list(file.id=file.id, file.name=file.name, exp.strategy = file.metadata$experimental_strategy, data.subtype=file.metadata$data_subtype, disease.type =file.metadata$disease_type)
}
copy.many.files.with.sb.id.to.project <- function(vec.sbids, proj.name, auth.token, tempdir){
files.ids <- vector("character", length = 0)
files.names <- vector("character", length = 0)
exp.strategies <- vector("character", length = 0)
data.subtypes <- vector("character", length = 0)
disease.types <- vector("character", length = 0)
for (tti in 1:length(vec.sbids)){
out.copy <- copy.file.with.sb.id.to.project(sbid=vec.sbids[tti], proj.name=proj.name, auth.token=auth.token, tempdir=tempdir)
files.ids <- append(files.ids, out.copy$file.id)
files.names <- append(files.names, out.copy$file.name)
exp.strategies <- append(exp.strategies, out.copy$exp.strategy)
data.subtypes <- append(data.subtypes, out.copy$data.subtype)
disease.types <- append(disease.types, out.copy$disease.type)
cat("Just copied file", out.copy$file.name, "\n")
}
list(files.ids=files.ids, files.names=files.names, exp.strategies=exp.strategies, data.subtypes= data.subtypes, disease.types=disease.types)
}
# for printing out vectors of file ids in form c("","") for easy
# copying and pasting to use them later or in other files, e.g.
# to transfer from datasetapi.R to runapi.R
print.nice.ids.vector <- function(ttvec){
noquote(paste0("c(\"", paste(ttvec, collapse = "\", \""), "\")"))
}
# for printing out vectors of file ids in form c("","") for easy
# copying and pasting to use them later or in other files, e.g.
# to transfer from datasetapi.R to runapi.R
print.nice.ids.vector.within.function <- function(ttvec){
cat(noquote(paste0("c(\"", paste(ttvec, collapse = "\", \""), "\")\n")))
}
# get.file.with.aliquot.id(aliquot.id="TCGA-CH-5739-01A-11R-1580-07", tempdir, homedir, query.template.file="aliquotquery.json", auth.token)
# aliquot.id="TCGA-CH-5739-01A-11R-1580-07"; query.template.file="aliquotquery.json"
# FOR NOW, ONLY DOES TUMOR, NOT NORMAL
# ASSUMES THERE IS AT MOST 1 FILE WITH THIS ALIQUOT ID; FAILS IF NOT
get.file.with.aliquot.id <- function(aliquot.id, tempdir, homedir, query.template.file, auth.token){
currentdir <- getwd()
setwd(tempdir)
# read in template file, will edit its offset value
tt.template <- scan(file= query.template.file, what="character", sep="\n")
#
# which line of template file has zzzz in it?
#
zzzz.line.number <- grep(pattern="zzzz", x=tt.template)
if (length(zzzz.line.number)!=1){
stop(paste0("ERROR in file ", query.template.file, ": there should be exactly one line with the string zzzz in it.\n"))
}
tt.changed.template <- tt.template
tt.changed.template[zzzz.line.number] <- gsub(pattern = "zzzz", replacement = aliquot.id, x = tt.changed.template[zzzz.line.number])
writeLines(tt.changed.template, con = "test58.json", sep = "\n")
#
## get count of number of files
system(paste0('curl -s -H "X-SBG-Auth-Token: ', auth.token, ' " -H "content-type: application/json" -X POST "https://cgc-datasets-api.sbgenomics.com/datasets/tcga/v0/query/total" --data @test58.json > test35.json'))
tt.nfiles <- as.numeric(fromJSON("test35.json"))
stopifnot(tt.nfiles<=1)
## initialize data frames, particularly in case tt.nfiles = 0
ids.files <- vector("character", length=0)
names.files <- vector("character", length=0)
ids.cases <- vector("character", length=0)
{ # begin if/else
if (tt.nfiles >0){
system(paste0('curl -s -H "X-SBG-Auth-Token: ', auth.token, ' " -H "content-type: application/json" -X POST "https://cgc-datasets-api.sbgenomics.com/datasets/tcga/v0/query" --data @test58.json > test63.json'))
tt.files.raw <- fromJSON("test63.json")
tt.files <- tt.files.raw$`_embedded`$files
ids.files <- sapply(tt.files, FUN= function(x){ x$id})
names.files <- sapply(tt.files, FUN= function(x){ x$label})
## Get the cases for this file
system(paste0('curl -s -H "X-SBG-Auth-Token: ', auth.token, ' " -H "content-type: application/json" -X GET "https://cgc-datasets-api.sbgenomics.com/datasets/tcga/v0/files/', ids.files[1], '/cases" > test35.json'))
tt.case.info <- fromJSON("test35.json")
ids.cases <- append(ids.cases, tt.case.info$`_embedded`$cases[[1]]$id)
if (length(tt.case.info$`_embedded`$cases)>1){
stop(paste0("ERROR: length(tt.case.info$`_embedded`$cases)>1 for file id\n", ids.files[1]))
}
if (length(ids.cases)!= length(ids.files)){
stop(paste0("ERROR: length(ids.cases)!= length(ids.files), i.e.", length(ids.cases), "!=", length(ids.files)))
}
}
else {
# put in empty values for all these vectors, actually should do
# it first
}
} # end if/e
files.df <- data.frame(ids.files=ids.files, names.files=names.files, ids.cases=ids.cases)
setwd(currentdir)
list(files.df=files.df, tt.files=tt.files)
}
## todaydate <- "sep9"; shortname <- "glioma"; longname <- "Brain Lower Grade Glioma"; n.use.this.many.files.now <- 22; random.seed=19651
## get list of all files
## put sb ids in random order, and then save to file
## date the file and save copy but also keep a "most recent" version
## don't worry about matched normals at all
##
## use this in datasetapi.R to get files for a particular cancer
## THAT THERE ARE NOT ALREADY FILES FOR IN THE PROJECT
## do other cases manually; it will give error if there
## are already in the project
##
## ASSUMES you've already gotten a vector allfilenames
## of all file names in the project
## Also ASSUMES that the project is machete, although that
## could easily be changed.
##
## get.new.tar.files
##
get.new.tar.files <- function(todaydate, shortname, longname, n.use.this.many.files.now, tempdir, homedir, auth.token, files.per.download, allfilenames, random.seed=19651){
## first one is the one with zzzz where the disease will go
template.for.query.template.file <- file.path(tempdir, "tumorquery.json")
query.template.file.for.particular.disease <- file.path(tempdir, paste0(shortname,".tumorquery.json"))
outcsv <- paste0(shortname, todaydate, "query.tumor.csv")
write.different.disease.to.tumor.template.file(shortname, tempdir, longname, query.template.file=template.for.query.template.file)
out.tumors <- get.files.with.tumor.or.normal(tempdir, homedir, query.template.file=query.template.file.for.particular.disease, auth.token, tt.files.per.download= files.per.download, sample.type="Primary Tumor", output.csv = outcsv)
cat("Number of files for ", shortname, " (", longname, ") is ", length(out.tumors$ids.files), "\nNote that this could include multiple files for one case, so this might be larger than the number of cases.\n", sep="")
## get indices of out.tumors which
## are in allfilenames, i.e. which we already downloaded
## This should have length 0 for this function; if not, give error
indices.tumors.downloaded.by.today <- sort(which(out.tumors$names.files %in% allfilenames))
length(indices.tumors.downloaded.by.today)
##
if (length(indices.tumors.downloaded.by.today)>0){
stop(paste0("ERROR ERROR: there are some tumors downloaded before today and there should not be for this function; an example name is \n", out.tumors$names.files[1]))
}
## READ in results, just to get them as a nice data frame
all.tumors.df <- read.table(file=file.path(homedir, outcsv), header=TRUE, sep =",", stringsAsFactors=FALSE)
dim(all.tumors.df)
##
## Now choose a unique file for each case:
unique.tumors.df <- choose.one.file.for.each.case(all.tumors.df)
n.total.cases <- length(unique.tumors.df$ids.cases)
##
print(paste0("Number of unique files- one for each case- for ", shortname, " (", longname, ") is ", length(out.tumors$ids.files)))
## Now put these in a random order
## Then can go back to get them out of a csv file
set.seed(random.seed)
random.ordering.of.indices <- sample(n.total.cases, size = n.total.cases)
sb.ids.tumors.with.random.ordering <- unique.tumors.df$ids.files[random.ordering.of.indices]
cases.tumors.with.random.ordering <- unique.tumors.df$ids.cases[random.ordering.of.indices]
names.tumors.with.random.ordering <- unique.tumors.df$names.files[random.ordering.of.indices]
df.tumors.with.random.ordering <- data.frame(sb.ids.tumors.with.random.ordering, names.tumors.with.random.ordering, cases.tumors.with.random.ordering)
print(paste0("Randomly ordering files now and then selecting the first ", n.use.this.many.files.now, "."))
use.these.now.ids <- sb.ids.tumors.with.random.ordering[1:n.use.this.many.files.now]
use.these.now.names <- names.tumors.with.random.ordering[1:n.use.this.many.files.now]
## NOT using these right now:
## write to file
## both one with current date, and one that's most recent
filename.pre.downloads.with.date = file.path(homedir,paste0(shortname, ".files.not.downloaded.before.", todaydate, ".csv"))
filename.downloads.with.date = file.path(homedir,paste0(shortname, ".files.not.yet.downloaded.as.of.", todaydate, ".csv"))
filename.mostrecent = file.path(homedir,paste0(shortname, ".files.not.yet.downloaded.most.recent.csv"))
cat("Writing files:\n", filename.pre.downloads.with.date, "\n", filename.downloads.with.date, "\n", filename.mostrecent, "\n")
n.tumors.with.random.ordering <- dim(df.tumors.with.random.ordering)[1]
write.table(df.tumors.with.random.ordering[1:n.tumors.with.random.ordering,], file = filename.pre.downloads.with.date, row.names = FALSE, col.names = FALSE, sep = ",", append=FALSE, quote=FALSE)
write.table(df.tumors.with.random.ordering[(n.use.this.many.files.now+1):n.tumors.with.random.ordering,], file = filename.downloads.with.date, row.names = FALSE, col.names = FALSE, sep = ",", append=FALSE, quote=FALSE)
write.table(df.tumors.with.random.ordering[(n.use.this.many.files.now+1):n.tumors.with.random.ordering,], file = filename.mostrecent, row.names = FALSE, col.names = FALSE, sep = ",", append=FALSE, quote=FALSE)
## Now copy these files to the machete project
print(paste0("About to copy ", n.use.this.many.files.now, " files to the machete project."))
out.tumors.today.first.copying.process <- copy.many.files.with.sb.id.to.project(vec.sbids=use.these.now.ids, proj.name="JSALZMAN/machete", auth.token=auth.token, tempdir=tempdir)
## AFTER COPYING, CHECK MANUALLY THAT THERE ARE NO _1_ PREFIXES
print("AFTER COPYING, CHECK MANUALLY THAT THERE ARE NO _1_ PREFIXES")
## after outputting the next thing, copying and pasting and editing, use it in runapi.R
## these are the ids in the project (so they are NOT the sb ids)
##
print.nice.ids.vector.within.function(out.tumors.today.first.copying.process$files.ids)
print.nice.ids.vector.within.function(names.tumors.with.random.ordering[1:n.use.this.many.files.now])
## also write files in case these are very long
nice.ids.file = file.path(homedir,paste0(shortname, ".nice.ids.", todaydate, ".csv"))
nice.names.file = file.path(homedir,paste0(shortname, ".nice.names.", todaydate, ".csv"))
write.table(out.tumors.today.first.copying.process$files.ids, file = nice.ids.file, row.names = FALSE, col.names = FALSE, sep = "\n", append=FALSE, quote=FALSE)
write.table(names.tumors.with.random.ordering[1:n.use.this.many.files.now], file = nice.names.file, row.names = FALSE, col.names = FALSE, sep = "\n", append=FALSE, quote=FALSE)
cat(paste0("Also writing files\n", nice.ids.file, "\n and \n", nice.names.file),"\n")
cat("Number of characters in ids output is ", nchar(print.nice.ids.vector(out.tumors.today.first.copying.process$files.ids)), " and number of characters in ids output is ", nchar(print.nice.ids.vector(names.tumors.with.random.ordering[1:n.use.this.many.files.now])), "\n", sep="")
}
## Assuming you already have a list of randomly ordered file names
## and ids in a "most recent" file,
## put n more on machete; also writes two files, the "most recent" file
## and a file with a date on it
## used in datasetapi.R
put.n.more.files.in.machete.project <- function(todaydate, shortname, longname, n.use.this.many.files.now, homedir=homedir, tempdir=tempdir, auth.token=auth.token){
filename.mostrecent = file.path(homedir,paste0(shortname, ".files.not.yet.downloaded.most.recent.csv"))
df.tumors.with.random.ordering <- read.table(filename.mostrecent, sep=",", header = FALSE, col.names = c("sb.id", "filename", "case"), stringsAsFactors = FALSE)
n.tumors.with.random.ordering <- dim(df.tumors.with.random.ordering)[1]
print(paste0("n.tumors.with.random.ordering is: ", n.tumors.with.random.ordering, "\n"))
stopifnot(n.use.this.many.files.now < n.tumors.with.random.ordering)
use.these.now.ids <- df.tumors.with.random.ordering$sb.id[1:n.use.this.many.files.now]
use.these.now.names <- df.tumors.with.random.ordering$filename[1:n.use.this.many.files.now]
## print.nice.ids.vector(use.these.now.ids)
## print.nice.ids.vector(use.these.now.names)
## filename.pre.downloads.with.date = file.path(homedir,paste0(shortname, todaydate, ".files.not.downloaded.before.csv"))
filename.downloads.with.date = file.path(homedir,paste0(shortname, ".", todaydate, ".files.not.yet.downloaded.as.of.csv"))
print(paste0("About to write files\n", filename.downloads.with.date, "\nand\n", filename.mostrecent))
write.table(df.tumors.with.random.ordering[(n.use.this.many.files.now+1):n.tumors.with.random.ordering,], file = filename.downloads.with.date, row.names = FALSE, col.names = FALSE, sep = ",", append=FALSE, quote=FALSE)
## NOTE: next thing overwrites previous file
write.table(df.tumors.with.random.ordering[(n.use.this.many.files.now+1):n.tumors.with.random.ordering,], file = filename.mostrecent, row.names = FALSE, col.names = FALSE, sep = ",", append=FALSE, quote=FALSE)
out.tumors.today.first.copying.process <- copy.many.files.with.sb.id.to.project(vec.sbids=use.these.now.ids, proj.name="JSALZMAN/machete", auth.token=auth.token, tempdir=tempdir)
## AFTER COPYING, CHECK MANUALLY THAT THERE ARE NO _1_ PREFIXES
## after outputting the next thing, copying and pasting and editing, use it in runapi.R
## these are the ids in the project (so they are NOT the sb ids)
##
print.nice.ids.vector(out.tumors.today.first.copying.process$files.ids)
print.nice.ids.vector(use.these.now.names)
## also write files in case these are very long
nice.ids.file = file.path(homedir,paste0(shortname, ".", todaydate, ".nice.ids.csv"))
nice.names.file = file.path(homedir,paste0(shortname, ".", todaydate, ".nice.names.csv"))
write.table(out.tumors.today.first.copying.process$files.ids, file = nice.ids.file, row.names = FALSE, col.names = FALSE, sep = "\n", append=FALSE, quote=FALSE)
write.table(use.these.now.names, file = nice.names.file, row.names = FALSE, col.names = FALSE, sep = "\n", append=FALSE, quote=FALSE)
print(paste0("Also writing files\n", nice.ids.file, "\n and \n", nice.names.file,"\n"))
}
## NOT SURE IF THIS WORKS; haven't used it yet; I was trying it for
## something but it didn't work for what I wanted it for.
# out34 <- get.details.of.a.file.from.id(fileid = "564a31abe4b0ef121817527b", auth.token=auth.token, ttwdir=tempdir)
# taskid = "0bb0a961-41fd-4617-a9d4-f2392445a04e"
# http://docs.cancergenomicscloud.org/docs/get-details-of-a-task
#
get.details.of.a.file.from.id <- function(fileid, auth.token, ttwdir=tempdir){
filename.t <- file.path(ttwdir, "temptaskdetails.json")
system(paste('curl -s -H "X-SBG-Auth-Token: ', auth.token, ' " -H "content-type: application/json" -X GET "https://cgc-api.sbgenomics.com/v2/files/',fileid, '" > ', filename.t, sep =""))
RJSONIO::fromJSON(filename.t)
}
|
#=================================================================================*
# ---- setup ----
#=================================================================================*
# Load libraries:
library(RCurl)
library(lubridate)
# Load a source script:
script <-
getURL(
"https://raw.githubusercontent.com/bsevansunc/workshop_languageOfR/master/sourceCode_lesson6.R"
)
# Evaluate then remove the source script:
eval(parse(text = script))
rm(script)
#=================================================================================*
# ---- birdBanding ----
#=================================================================================*
caps <- read.csv('captureTable.csv') %>%
as_tibble
names(caps) <- str_replace_all(names(caps), 'CaptureCapture', '')
bandMeasures <- caps %>%
select(spp, bandNumber, enc, date, mass, wing, tl, age, sex) %>%
filter(mass != '99999', wing != '99999', tl != '99999', age != 'HY') %>%
mutate_at(c('mass', 'wing', 'tl'), as.numeric) %>%
na.omit %>%
group_by(spp) %>%
mutate(n = n()) %>%
ungroup %>%
filter(n > 100) %>%
select(-n) %>%
filter(age != 'U', sex != 'U')
outlierBottom <- function(x){
median(x) - 2.5*mad(x)
}
outlierTop <- function(x){
median(x) + 2.5*mad(x)
}
bandMeasuresClean <- bandMeasures %>%
group_by(spp, sex) %>%
mutate(
massB = outlierBottom(mass),
massT = outlierTop(mass),
wingB = outlierBottom(wing),
wingT = outlierTop(wing),
tlB = outlierBottom(tl),
tlT = outlierTop(tl)
) %>%
ungroup %>%
filter(mass > massB & mass < massT,
wing > wingB & wing < wingT,
tl > tlB & tl < tlT) %>%
select(spp:sex)
birdMeasures <- bandMeasuresClean %>%
filter(age != 'noData', sex != 'noData') %>%
filter(spp %in% c('GRCA', 'CACH', 'AMRO', 'BCCH'))
#=================================================================================*
# ---- plotting distributions ----
#=================================================================================*
#---------------------------------------------------------------------------------*
# ---- histogram ----
#---------------------------------------------------------------------------------*
# Creating a ggplot starts with specifying the data to be plotted and
# the "aesthetics" of the plot:
ggplot(birdMeasures, aes(x = mass))
# Next, we add an argument specifying how to plot the data. Here, we plot
# a histogram:
ggplot(birdMeasures, aes(x = mass)) +
geom_histogram()
# We received a warning message, because there are undoubtedly more informative
# and prettier ways to bin the data than the bins that are chosen automatically.
# We can specify the width or number of bins as an argument of geom_histogram:
ggplot(birdMeasures, aes(x = mass)) +
geom_histogram(binwidth = .5)
ggplot(birdMeasures, aes(x = mass)) +
geom_histogram(bins = 20)
# The distribution of the data are just plain strange. Remember that
# there are 4 species present. Let's add a "fill" argement to the
# aesthetics. We can add the new aesthetic to the plot itself:
ggplot(birdMeasures, aes(x = mass, fill = spp)) +
geom_histogram(bins = 20)
# But, as fill is a quality of the bars rather than the plot window,
# it's best practice to add the new aesthetic to the geometry:
ggplot(birdMeasures, aes(x = mass)) +
geom_histogram(aes(fill = spp), bins = 20)
# Notice that the first argument of ggplot is the data being plotted? That's
# a clue that these data should be moved out front and piped in:
birdMeasures %>%
ggplot(aes(x = mass)) +
geom_histogram(aes(fill = spp), bins = 20)
# This makes the code clearer and frees us up a bit. For example, if we
# wanted to compare just the Black-capped chickadee and Carolina chickadee,
# we could add a filter argument:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_histogram(aes(fill = spp), bins = 20)
#---------------------------------------------------------------------------------*
# ---- violin plots ----
#---------------------------------------------------------------------------------*
# Violin plots can be a very straightforward way to observe differences in
# distributions:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = spp, y = mass)) +
geom_violin(aes(fill = spp))
# The function coord_flip can be used to switch the x and y axis:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = spp, y = mass)) +
geom_violin(aes(fill = spp)) +
coord_flip()
#---------------------------------------------------------------------------------*
# ---- density plots ----
#---------------------------------------------------------------------------------*
# Another option is to plot the density of each species:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_density(aes(fill = spp))
# To better observe both groups, you can adjust transparency levels using the
# "alpha" parameter:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_density(aes(fill = spp), alpha = 0.5)
#=================================================================================*
# ---- facets ----
#=================================================================================*
# The overlap between the species mass measurements might be
# clearer if we split the plot. We can do so using facet_wrap:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_density(aes(fill = spp), alpha = 0.5) +
facet_wrap(~spp)
# The above is clearer, but specifying the number of rows provides
# makes size comparisons between the species more clear:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_density(aes(fill = spp), alpha = 0.5) +
facet_wrap(~spp, nrow = 2)
# With the extra space we've created, we can easily use the above to display
# size differences based on species and sex:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_density(aes(fill = sex), alpha = 0.5) +
facet_wrap(~spp, nrow = 2)
#=================================================================================*
# ---- colors ----
#=================================================================================*
# The default colors that ggplot uses are just plain ugly. Luckily it's easy
# to modify the colors to those of your choosing. Here we'll use the function
# scale_fill_manual to specify the colors for the fill:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_density(aes(fill = sex), alpha = 0.5) +
facet_wrap(~spp, nrow = 2) +
scale_fill_manual(values = c('blue', 'red'))
# There really wasn't much improvement here. My preference is to hunt around
# for colors I like and grab their hex codes. For example, I used a "color-picker"
# app to extract the colors of Team Zissou's uniforms in movie The Life Aquatic.
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_density(aes(fill = sex), alpha = 0.5) +
facet_wrap(~spp, nrow = 2) +
scale_fill_manual(values = c('#9EB8C5', '#F32017'))
# It's a good idea to save the colors that you like for future use. For example,
# I could have have saved my colors as:
zPalette <- c('#9EB8C5', '#F32017')
# You can then apply your color palette using:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_density(aes(fill = sex), alpha = 0.5) +
facet_wrap(~spp, nrow = 2) +
scale_fill_manual(values = zPalette)
# If we're happy with the results and are ready to start refining the plot,
# we can assign a name:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_density(aes(fill = sex), alpha = 0.5) +
facet_wrap(~spp, nrow = 2) +
scale_fill_manual(values = zPalette)
#=================================================================================*
# ---- scaling axes ----
#=================================================================================*
# One thing we notice here is that the plot, by default, goes below 0 on the y
# axis and well outside of the bounds of the data on the x axis. We can fix this
# using "scale_y_continuous" and scale_x_continuous.
# Let's explicitly set the y axis:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_density(aes(fill = sex), alpha = 0.5) +
facet_wrap(~spp, nrow = 2) +
scale_fill_manual(values = zPalette) +
scale_y_continuous(expand = c(0,0))
# Hmmmm ... maybe add a little space to the top:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_density(aes(fill = sex), alpha = 0.5) +
facet_wrap(~spp, nrow = 2) +
scale_fill_manual(values = zPalette) +
scale_y_continuous(limits = c(0,.8), expand = c(0,0))
# And explicitly set line breaks:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_density(aes(fill = sex), alpha = 0.5) +
facet_wrap(~spp, nrow = 2) +
scale_fill_manual(values = zPalette) +
scale_y_continuous(
limits = c(0, .8),
breaks = seq(0, .8, by = 0.1),
expand = c(0, 0)
)
# Let's do the same with the x axis:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_density(aes(fill = sex), alpha = 0.5) +
facet_wrap(~spp, nrow = 2) +
scale_fill_manual(values = zPalette) +
scale_y_continuous(
limits = c(0, .8),
breaks = seq(0, .8, by = 0.1),
expand = c(0, 0)
) +
scale_x_continuous(
limits = c(7, 14),
breaks = 7:14,
expand = c(0, 0)
)
#=================================================================================*
# ---- labels ----
#=================================================================================*
# We want our legend and facet labels to be more informative. The "easiest" way to
# do this is by modifying the data frame. One method for doing so is to changing
# species to a factor and setting the factor labels. We'll assign a name tp this
# plot so that we can make a lot more changes without having to rewrite the whole
# thing.
massPlot <- birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
mutate(
spp = factor(spp, labels = c('Black-capped', 'Carolina')),
sex = factor(sex, labels = c('Female', 'Male'))) %>%
ggplot(aes(x = mass)) +
geom_density(aes(fill = sex), alpha = 0.5) +
facet_wrap(~spp, nrow = 2) +
scale_y_continuous(
limits = c(0, .8),
breaks = seq(0, .8, by = 0.1),
expand = c(0, 0)
) +
scale_x_continuous(
limits = c(7, 14),
breaks = 7:14,
expand = c(0, 0)
)
# The above is a start, but not a very professional looking plot. Let's start
# by capitalizing the x and y labels:
massPlot +
xlab('Mass') +
ylab('Density')
# To add a title to the whole plot, we can use the ggtitle argument:
massPlot +
xlab('Mass') +
ylab('Density') +
ggtitle('Mass of Carolina and Black-capped chickadees')
# You can add/modify labels and the title of your plot one step:
massPlot +
labs(title = 'Mass of Carolina and Black-capped chickadees',
x = 'Mass',
y = 'Density')
# "labs" can also be used to change the legend title, by specifying a label
# for the fill aesthetic:
massPlot +
labs(title = 'Mass of Carolina and Black-capped chickadees',
x = 'Mass',
y = 'Density',
fill = 'Sex')
#=================================================================================*
# ---- themes ----
#=================================================================================*
# We're almost there, but it still isn't very handsome. It's time to modify the
# themes, which defines how the plot looks.
# Themes are defined based on theme elements. These include:
# - element_blank
# - element_rect
# - element_text
# - element_line
# Now we'll start changing the way it looks. Let's remove that terrible
# panel background using element_rect:
massPlot +
labs(title = 'Mass of Carolina and Black-capped chickadees',
x = 'Mass',
y = 'Density',
fill = 'Sex') +
theme(
panel.background = element_rect(fill = 'white')
)
# I liked having the panel lines, so let's bring them in using
# element_line:
massPlot +
labs(title = 'Mass of Carolina and Black-capped chickadees',
x = 'Mass',
y = 'Density',
fill = 'Sex') +
theme(
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(color = 'gray80', size = .2),
)
# How about the facet strips?
massPlot +
labs(title = 'Mass of Carolina and Black-capped chickadees',
x = 'Mass',
y = 'Density',
fill = 'Sex') +
theme(
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(color = 'gray80', size = .2),
strip.background = element_rect(fill = 'white')
)
# Now I want the y axis lines as well:
massPlot +
labs(title = 'Mass of Carolina and Black-capped chickadees',
x = 'Mass',
y = 'Density',
fill = 'Sex') +
theme(
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(color = 'gray80', size = .2),
axis.line = element_line(color = 'black', size = .5),
strip.background = element_rect(fill = 'white')
)
# Well, that legend title is pretty lame. Let's remove it:
massPlot +
labs(title = 'Mass of Carolina and Black-capped chickadees',
x = 'Mass',
y = 'Density',
fill = 'Sex') +
theme(
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(color = 'gray80', size = .2),
axis.line = element_line(color = 'black', size = .5),
strip.background = element_rect(fill = 'white'),
legend.title = element_blank()
)
# Finally, let's change the size of the text throughout. To make the text
# associated with the tick labels bigger we use axis.text:
massPlot +
labs(title = 'Mass of Carolina and Black-capped chickadees',
x = 'Mass',
y = 'Density',
fill = 'Sex') +
theme(
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(color = 'gray80', size = .2),
axis.line = element_line(color = 'black', size = .5),
strip.background = element_rect(fill = 'white'),
legend.title = element_blank(),
axis.text = element_text(size = 12)
)
# To make the axis titles bigger we use axis.title:
massPlot +
labs(title = 'Mass of Carolina and Black-capped chickadees',
x = 'Mass',
y = 'Density',
fill = 'Sex') +
theme(
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(color = 'gray80', size = .2),
axis.line = element_line(color = 'black', size = .5),
strip.background = element_rect(fill = 'white'),
legend.title = element_blank(),
axis.text = element_text(size = 12),
axis.title = element_text(size = 18)
)
# To make the facet labels bigger we use axis.title:
massPlot +
labs(title = 'Mass of Carolina and Black-capped chickadees',
x = 'Mass',
y = 'Density',
fill = 'Sex') +
theme(
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(color = 'gray80', size = .2),
axis.line = element_line(color = 'black', size = .5),
strip.background = element_rect(fill = 'white'),
legend.title = element_blank(),
axis.text = element_text(size = 12),
axis.title = element_text(size = 18),
strip.text = element_text(size = 18)
)
# Finally, let's make the plot title good and large:
massPlot +
labs(title = 'Mass of Carolina and Black-capped chickadees',
x = 'Mass',
y = 'Density',
fill = 'Sex') +
theme(
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(color = 'gray80', size = .2),
axis.line = element_line(color = 'black', size = .5),
strip.background = element_rect(fill = 'white'),
legend.title = element_blank(),
axis.text = element_text(size = 12),
axis.title = element_text(size = 18),
strip.text = element_text(size = 18),
plot.title = element_text(size = 22)
)
massPlot +
labs(title = 'Mass of Carolina and Black-capped chickadees',
x = 'Mass',
y = 'Density',
fill = 'Sex') +
theme(
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(color = 'gray80', size = .2),
axis.line = element_line(color = 'black', size = .5),
strip.background = element_rect(fill = 'white'),
legend.title = element_blank(),
axis.text = element_text(size = 12),
axis.title = element_text(size = 18),
strip.text = element_text(size = 18),
plot.title = element_text(size = 22)
)
# That made the title drift off the page. Let's add a line break:
massPlot +
labs(title = 'Mass of Carolina and\nBlack-capped chickadees',
x = 'Mass',
y = 'Density',
fill = 'Sex') +
theme(
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(color = 'gray80', size = .2),
axis.line = element_line(color = 'black', size = .5),
strip.background = element_rect(fill = 'white'),
legend.title = element_blank(),
axis.text = element_text(size = 12),
axis.title = element_text(size = 18),
strip.text = element_text(size = 18),
plot.title = element_text(size = 22)
)
# I still don't like how the title looks, it's too close to the plot.
# we can add a margin to fix this
massPlot +
labs(title = 'Mass of Carolina and\nBlack-capped chickadees',
x = 'Mass',
y = 'Density',
fill = 'Sex') +
theme(
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(color = 'gray80', size = .2),
axis.line = element_line(color = 'black', size = .5),
strip.background = element_rect(fill = 'white'),
legend.title = element_blank(),
axis.text = element_text(size = 12),
axis.title = element_text(size = 18),
strip.text = element_text(size = 18),
plot.title = element_text(size = 22, margin = margin(b = 20))
)
# Let's space out the axis text as well (see ?margin):
massPlot +
labs(title = 'Mass of Carolina and Black-capped chickadees',
x = 'Mass',
y = 'Density',
fill = 'Sex') +
theme(
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(color = 'gray80', size = .2),
axis.line = element_line(color = 'black', size = .5),
strip.background = element_rect(fill = 'white'),
legend.title = element_blank(),
axis.text = element_text(size = 10),
axis.title = element_text(size = 16),
axis.title.x = element_text(size = 16, margin = margin(t = 10)),
axis.title.y = element_text(size = 16, margin = margin(r = 10)),
strip.text = element_text(size = 16),
panel.spacing = unit(1.5, 'lines'),
legend.text = element_text(size = 14),
plot.title = element_text(size = 18, margin = margin(b = 20))
)
# That may look a little crazy now, but click the zoom button above the
# plot window. As you resize the plot window, the relative size of the
# elements change.
#=================================================================================*
# ---- junkyard ----
#=================================================================================*
ggsave('examplePlot.png', width = 7, height = 7)
#=================================================================================*
# ---- junkyard ----
#=================================================================================*
# It is difficul
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_histogram(stat = 'density', aes(fill = spp))
# These plots are not very informative still, because the bird
# measurements are on very different scales:
birdMeasures %>%
ggplot(aes(x = mass)) +
geom_histogram(binwidth = 1) +
facet_wrap(~spp, scales = 'free')
# I wonder if males and females have different masses, let's
# color by sex:
birdMeasures %>%
ggplot(aes(x = mass)) +
geom_histogram(aes(fill = sex), binwidth = 1) +
facet_wrap(~spp, scales = 'free')
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) +
geom_density(aes(fill = spp)) +
facet_wrap(~spp, scales = 'free')
#=================================================================================*
# ---- points ----
#=================================================================================*
# Creating a ggplot starts with specifying the data to be plotted and
# the "aesthetics" of the plot (aes):
ggplot(birdMeasures, aes(x = wing, y = mass))
# Notice that the first argument of ggplot function is the data frame being
# plotted. As such, we should pipe the data frame into ggplot:
birdMeasures %>%
ggplot(aes(x = wing, y = mass))
# Next, we add an argument specifying how to plot the data (geometry). Here, we
# will compare the length of wings by the mass using point geometry:
birdMeasures %>%
ggplot(aes(x = wing, y = mass)) +
geom_point()
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = wing, y = mass)) +
geom_point(aes(color = spp))
# This distribution is quite strange. Perhaps due to the species being plotted?
# We can break this plot into a sub-plot for each species using the facet_wrap
# function:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = wing, y = mass)) +
geom_point() +
facet_wrap(~spp)
# This plot is still not very informative, because the species are of very
# different sizes. We can set the "scale" argument of facet_wrap such that
# the scale varies by species:
birdMeasures %>%
ggplot(aes(x = wing, y = mass)) +
geom_point() +
facet_wrap(~spp, scale = 'free')
# The results are certainly more informative. I wonder if the size difference
# between males and females may be driving some of the diversity in sizes? To
# explore this, let's color points by by sex. We do so by adding an aesthetic
# to the geom_point argument:
birdMeasures %>%
ggplot(aes(x = wing, y = mass)) +
geom_point(aes(color = sex)) +
facet_wrap(~spp, scale = 'free')
birdMeasures %>%
ggplot(aes(x = wing, y = mass)) +
# geom_point(aes(color = sex)) +
facet_wrap(~spp, scale = 'free') +
# geom_density2d(aes(color = sex)) +
stat_density_2d(aes(fill = ..density..), contour = FALSE, geom = 'raster')
|
/old_education_content/smbc_r_workshop/lesson7_code_junkyard.R
|
no_license
|
SMBC-NZP/smbc-nzp.github.io
|
R
| false | false | 22,523 |
r
|
#=================================================================================*
# ---- setup ----
#=================================================================================*
# Load libraries:
library(RCurl)
library(lubridate)
# Load a source script:
script <-
getURL(
"https://raw.githubusercontent.com/bsevansunc/workshop_languageOfR/master/sourceCode_lesson6.R"
)
# Evaluate then remove the source script:
eval(parse(text = script))
rm(script)
#=================================================================================*
# ---- birdBanding ----
#=================================================================================*
caps <- read.csv('captureTable.csv') %>%
as_tibble
names(caps) <- str_replace_all(names(caps), 'CaptureCapture', '')
bandMeasures <- caps %>%
select(spp, bandNumber, enc, date, mass, wing, tl, age, sex) %>%
filter(mass != '99999', wing != '99999', tl != '99999', age != 'HY') %>%
mutate_at(c('mass', 'wing', 'tl'), as.numeric) %>%
na.omit %>%
group_by(spp) %>%
mutate(n = n()) %>%
ungroup %>%
filter(n > 100) %>%
select(-n) %>%
filter(age != 'U', sex != 'U')
outlierBottom <- function(x){
median(x) - 2.5*mad(x)
}
outlierTop <- function(x){
median(x) + 2.5*mad(x)
}
bandMeasuresClean <- bandMeasures %>%
group_by(spp, sex) %>%
mutate(
massB = outlierBottom(mass),
massT = outlierTop(mass),
wingB = outlierBottom(wing),
wingT = outlierTop(wing),
tlB = outlierBottom(tl),
tlT = outlierTop(tl)
) %>%
ungroup %>%
filter(mass > massB & mass < massT,
wing > wingB & wing < wingT,
tl > tlB & tl < tlT) %>%
select(spp:sex)
birdMeasures <- bandMeasuresClean %>%
filter(age != 'noData', sex != 'noData') %>%
filter(spp %in% c('GRCA', 'CACH', 'AMRO', 'BCCH'))
#=================================================================================*
# ---- plotting distributions ----
#=================================================================================*
#---------------------------------------------------------------------------------*
# ---- histogram ----
#---------------------------------------------------------------------------------*
# Creating a ggplot starts with specifying the data to be plotted and
# the "aesthetics" of the plot:
ggplot(birdMeasures, aes(x = mass))
# Next, we add an argument specifying how to plot the data. Here, we plot
# a histogram:
ggplot(birdMeasures, aes(x = mass)) +
geom_histogram()
# We received a warning message, because there are undoubtedly more informative
# and prettier ways to bin the data than the bins that are chosen automatically.
# We can specify the width or number of bins as an argument of geom_histogram:
ggplot(birdMeasures, aes(x = mass)) +
geom_histogram(binwidth = .5)
ggplot(birdMeasures, aes(x = mass)) +
geom_histogram(bins = 20)
# The distribution of the data are just plain strange. Remember that
# there are 4 species present. Let's add a "fill" argement to the
# aesthetics. We can add the new aesthetic to the plot itself:
ggplot(birdMeasures, aes(x = mass, fill = spp)) +
geom_histogram(bins = 20)
# But, as fill is a quality of the bars rather than the plot window,
# it's best practice to add the new aesthetic to the geometry:
ggplot(birdMeasures, aes(x = mass)) +
geom_histogram(aes(fill = spp), bins = 20)
# Notice that the first argument of ggplot is the data being plotted? That's
# a clue that these data should be moved out front and piped in:
birdMeasures %>%
ggplot(aes(x = mass)) +
geom_histogram(aes(fill = spp), bins = 20)
# This makes the code clearer and frees us up a bit. For example, if we
# wanted to compare just the Black-capped chickadee and Carolina chickadee,
# we could add a filter argument:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_histogram(aes(fill = spp), bins = 20)
#---------------------------------------------------------------------------------*
# ---- violin plots ----
#---------------------------------------------------------------------------------*
# Violin plots can be a very straightforward way to observe differences in
# distributions:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = spp, y = mass)) +
geom_violin(aes(fill = spp))
# The function coord_flip can be used to switch the x and y axis:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = spp, y = mass)) +
geom_violin(aes(fill = spp)) +
coord_flip()
#---------------------------------------------------------------------------------*
# ---- density plots ----
#---------------------------------------------------------------------------------*
# Another option is to plot the density of each species:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_density(aes(fill = spp))
# To better observe both groups, you can adjust transparency levels using the
# "alpha" parameter:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_density(aes(fill = spp), alpha = 0.5)
#=================================================================================*
# ---- facets ----
#=================================================================================*
# The overlap between the species mass measurements might be
# clearer if we split the plot. We can do so using facet_wrap:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_density(aes(fill = spp), alpha = 0.5) +
facet_wrap(~spp)
# The above is clearer, but specifying the number of rows provides
# makes size comparisons between the species more clear:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_density(aes(fill = spp), alpha = 0.5) +
facet_wrap(~spp, nrow = 2)
# With the extra space we've created, we can easily use the above to display
# size differences based on species and sex:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_density(aes(fill = sex), alpha = 0.5) +
facet_wrap(~spp, nrow = 2)
#=================================================================================*
# ---- colors ----
#=================================================================================*
# The default colors that ggplot uses are just plain ugly. Luckily it's easy
# to modify the colors to those of your choosing. Here we'll use the function
# scale_fill_manual to specify the colors for the fill:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_density(aes(fill = sex), alpha = 0.5) +
facet_wrap(~spp, nrow = 2) +
scale_fill_manual(values = c('blue', 'red'))
# There really wasn't much improvement here. My preference is to hunt around
# for colors I like and grab their hex codes. For example, I used a "color-picker"
# app to extract the colors of Team Zissou's uniforms in movie The Life Aquatic.
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_density(aes(fill = sex), alpha = 0.5) +
facet_wrap(~spp, nrow = 2) +
scale_fill_manual(values = c('#9EB8C5', '#F32017'))
# It's a good idea to save the colors that you like for future use. For example,
# I could have have saved my colors as:
zPalette <- c('#9EB8C5', '#F32017')
# You can then apply your color palette using:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_density(aes(fill = sex), alpha = 0.5) +
facet_wrap(~spp, nrow = 2) +
scale_fill_manual(values = zPalette)
# If we're happy with the results and are ready to start refining the plot,
# we can assign a name:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_density(aes(fill = sex), alpha = 0.5) +
facet_wrap(~spp, nrow = 2) +
scale_fill_manual(values = zPalette)
#=================================================================================*
# ---- scaling axes ----
#=================================================================================*
# One thing we notice here is that the plot, by default, goes below 0 on the y
# axis and well outside of the bounds of the data on the x axis. We can fix this
# using "scale_y_continuous" and scale_x_continuous.
# Let's explicitly set the y axis:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_density(aes(fill = sex), alpha = 0.5) +
facet_wrap(~spp, nrow = 2) +
scale_fill_manual(values = zPalette) +
scale_y_continuous(expand = c(0,0))
# Hmmmm ... maybe add a little space to the top:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_density(aes(fill = sex), alpha = 0.5) +
facet_wrap(~spp, nrow = 2) +
scale_fill_manual(values = zPalette) +
scale_y_continuous(limits = c(0,.8), expand = c(0,0))
# And explicitly set line breaks:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_density(aes(fill = sex), alpha = 0.5) +
facet_wrap(~spp, nrow = 2) +
scale_fill_manual(values = zPalette) +
scale_y_continuous(
limits = c(0, .8),
breaks = seq(0, .8, by = 0.1),
expand = c(0, 0)
)
# Let's do the same with the x axis:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_density(aes(fill = sex), alpha = 0.5) +
facet_wrap(~spp, nrow = 2) +
scale_fill_manual(values = zPalette) +
scale_y_continuous(
limits = c(0, .8),
breaks = seq(0, .8, by = 0.1),
expand = c(0, 0)
) +
scale_x_continuous(
limits = c(7, 14),
breaks = 7:14,
expand = c(0, 0)
)
#=================================================================================*
# ---- labels ----
#=================================================================================*
# We want our legend and facet labels to be more informative. The "easiest" way to
# do this is by modifying the data frame. One method for doing so is to changing
# species to a factor and setting the factor labels. We'll assign a name tp this
# plot so that we can make a lot more changes without having to rewrite the whole
# thing.
massPlot <- birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
mutate(
spp = factor(spp, labels = c('Black-capped', 'Carolina')),
sex = factor(sex, labels = c('Female', 'Male'))) %>%
ggplot(aes(x = mass)) +
geom_density(aes(fill = sex), alpha = 0.5) +
facet_wrap(~spp, nrow = 2) +
scale_y_continuous(
limits = c(0, .8),
breaks = seq(0, .8, by = 0.1),
expand = c(0, 0)
) +
scale_x_continuous(
limits = c(7, 14),
breaks = 7:14,
expand = c(0, 0)
)
# The above is a start, but not a very professional looking plot. Let's start
# by capitalizing the x and y labels:
massPlot +
xlab('Mass') +
ylab('Density')
# To add a title to the whole plot, we can use the ggtitle argument:
massPlot +
xlab('Mass') +
ylab('Density') +
ggtitle('Mass of Carolina and Black-capped chickadees')
# You can add/modify labels and the title of your plot one step:
massPlot +
labs(title = 'Mass of Carolina and Black-capped chickadees',
x = 'Mass',
y = 'Density')
# "labs" can also be used to change the legend title, by specifying a label
# for the fill aesthetic:
massPlot +
labs(title = 'Mass of Carolina and Black-capped chickadees',
x = 'Mass',
y = 'Density',
fill = 'Sex')
#=================================================================================*
# ---- themes ----
#=================================================================================*
# We're almost there, but it still isn't very handsome. It's time to modify the
# themes, which defines how the plot looks.
# Themes are defined based on theme elements. These include:
# - element_blank
# - element_rect
# - element_text
# - element_line
# Now we'll start changing the way it looks. Let's remove that terrible
# panel background using element_rect:
massPlot +
labs(title = 'Mass of Carolina and Black-capped chickadees',
x = 'Mass',
y = 'Density',
fill = 'Sex') +
theme(
panel.background = element_rect(fill = 'white')
)
# I liked having the panel lines, so let's bring them in using
# element_line:
massPlot +
labs(title = 'Mass of Carolina and Black-capped chickadees',
x = 'Mass',
y = 'Density',
fill = 'Sex') +
theme(
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(color = 'gray80', size = .2),
)
# How about the facet strips?
massPlot +
labs(title = 'Mass of Carolina and Black-capped chickadees',
x = 'Mass',
y = 'Density',
fill = 'Sex') +
theme(
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(color = 'gray80', size = .2),
strip.background = element_rect(fill = 'white')
)
# Now I want the y axis lines as well:
massPlot +
labs(title = 'Mass of Carolina and Black-capped chickadees',
x = 'Mass',
y = 'Density',
fill = 'Sex') +
theme(
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(color = 'gray80', size = .2),
axis.line = element_line(color = 'black', size = .5),
strip.background = element_rect(fill = 'white')
)
# Well, that legend title is pretty lame. Let's remove it:
massPlot +
labs(title = 'Mass of Carolina and Black-capped chickadees',
x = 'Mass',
y = 'Density',
fill = 'Sex') +
theme(
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(color = 'gray80', size = .2),
axis.line = element_line(color = 'black', size = .5),
strip.background = element_rect(fill = 'white'),
legend.title = element_blank()
)
# Finally, let's change the size of the text throughout. To make the text
# associated with the tick labels bigger we use axis.text:
massPlot +
labs(title = 'Mass of Carolina and Black-capped chickadees',
x = 'Mass',
y = 'Density',
fill = 'Sex') +
theme(
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(color = 'gray80', size = .2),
axis.line = element_line(color = 'black', size = .5),
strip.background = element_rect(fill = 'white'),
legend.title = element_blank(),
axis.text = element_text(size = 12)
)
# To make the axis titles bigger we use axis.title:
massPlot +
labs(title = 'Mass of Carolina and Black-capped chickadees',
x = 'Mass',
y = 'Density',
fill = 'Sex') +
theme(
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(color = 'gray80', size = .2),
axis.line = element_line(color = 'black', size = .5),
strip.background = element_rect(fill = 'white'),
legend.title = element_blank(),
axis.text = element_text(size = 12),
axis.title = element_text(size = 18)
)
# To make the facet labels bigger we use axis.title:
massPlot +
labs(title = 'Mass of Carolina and Black-capped chickadees',
x = 'Mass',
y = 'Density',
fill = 'Sex') +
theme(
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(color = 'gray80', size = .2),
axis.line = element_line(color = 'black', size = .5),
strip.background = element_rect(fill = 'white'),
legend.title = element_blank(),
axis.text = element_text(size = 12),
axis.title = element_text(size = 18),
strip.text = element_text(size = 18)
)
# Finally, let's make the plot title good and large:
massPlot +
labs(title = 'Mass of Carolina and Black-capped chickadees',
x = 'Mass',
y = 'Density',
fill = 'Sex') +
theme(
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(color = 'gray80', size = .2),
axis.line = element_line(color = 'black', size = .5),
strip.background = element_rect(fill = 'white'),
legend.title = element_blank(),
axis.text = element_text(size = 12),
axis.title = element_text(size = 18),
strip.text = element_text(size = 18),
plot.title = element_text(size = 22)
)
massPlot +
labs(title = 'Mass of Carolina and Black-capped chickadees',
x = 'Mass',
y = 'Density',
fill = 'Sex') +
theme(
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(color = 'gray80', size = .2),
axis.line = element_line(color = 'black', size = .5),
strip.background = element_rect(fill = 'white'),
legend.title = element_blank(),
axis.text = element_text(size = 12),
axis.title = element_text(size = 18),
strip.text = element_text(size = 18),
plot.title = element_text(size = 22)
)
# That made the title drift off the page. Let's add a line break:
massPlot +
labs(title = 'Mass of Carolina and\nBlack-capped chickadees',
x = 'Mass',
y = 'Density',
fill = 'Sex') +
theme(
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(color = 'gray80', size = .2),
axis.line = element_line(color = 'black', size = .5),
strip.background = element_rect(fill = 'white'),
legend.title = element_blank(),
axis.text = element_text(size = 12),
axis.title = element_text(size = 18),
strip.text = element_text(size = 18),
plot.title = element_text(size = 22)
)
# I still don't like how the title looks, it's too close to the plot.
# we can add a margin to fix this
massPlot +
labs(title = 'Mass of Carolina and\nBlack-capped chickadees',
x = 'Mass',
y = 'Density',
fill = 'Sex') +
theme(
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(color = 'gray80', size = .2),
axis.line = element_line(color = 'black', size = .5),
strip.background = element_rect(fill = 'white'),
legend.title = element_blank(),
axis.text = element_text(size = 12),
axis.title = element_text(size = 18),
strip.text = element_text(size = 18),
plot.title = element_text(size = 22, margin = margin(b = 20))
)
# Let's space out the axis text as well (see ?margin):
massPlot +
labs(title = 'Mass of Carolina and Black-capped chickadees',
x = 'Mass',
y = 'Density',
fill = 'Sex') +
theme(
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(color = 'gray80', size = .2),
axis.line = element_line(color = 'black', size = .5),
strip.background = element_rect(fill = 'white'),
legend.title = element_blank(),
axis.text = element_text(size = 10),
axis.title = element_text(size = 16),
axis.title.x = element_text(size = 16, margin = margin(t = 10)),
axis.title.y = element_text(size = 16, margin = margin(r = 10)),
strip.text = element_text(size = 16),
panel.spacing = unit(1.5, 'lines'),
legend.text = element_text(size = 14),
plot.title = element_text(size = 18, margin = margin(b = 20))
)
# That may look a little crazy now, but click the zoom button above the
# plot window. As you resize the plot window, the relative size of the
# elements change.
#=================================================================================*
# ---- junkyard ----
#=================================================================================*
ggsave('examplePlot.png', width = 7, height = 7)
#=================================================================================*
# ---- junkyard ----
#=================================================================================*
# It is difficul
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = mass)) +
geom_histogram(stat = 'density', aes(fill = spp))
# These plots are not very informative still, because the bird
# measurements are on very different scales:
birdMeasures %>%
ggplot(aes(x = mass)) +
geom_histogram(binwidth = 1) +
facet_wrap(~spp, scales = 'free')
# I wonder if males and females have different masses, let's
# color by sex:
birdMeasures %>%
ggplot(aes(x = mass)) +
geom_histogram(aes(fill = sex), binwidth = 1) +
facet_wrap(~spp, scales = 'free')
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) +
geom_density(aes(fill = spp)) +
facet_wrap(~spp, scales = 'free')
#=================================================================================*
# ---- points ----
#=================================================================================*
# Creating a ggplot starts with specifying the data to be plotted and
# the "aesthetics" of the plot (aes):
ggplot(birdMeasures, aes(x = wing, y = mass))
# Notice that the first argument of ggplot function is the data frame being
# plotted. As such, we should pipe the data frame into ggplot:
birdMeasures %>%
ggplot(aes(x = wing, y = mass))
# Next, we add an argument specifying how to plot the data (geometry). Here, we
# will compare the length of wings by the mass using point geometry:
birdMeasures %>%
ggplot(aes(x = wing, y = mass)) +
geom_point()
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = wing, y = mass)) +
geom_point(aes(color = spp))
# This distribution is quite strange. Perhaps due to the species being plotted?
# We can break this plot into a sub-plot for each species using the facet_wrap
# function:
birdMeasures %>%
filter(spp %in% c('BCCH', 'CACH')) %>%
ggplot(aes(x = wing, y = mass)) +
geom_point() +
facet_wrap(~spp)
# This plot is still not very informative, because the species are of very
# different sizes. We can set the "scale" argument of facet_wrap such that
# the scale varies by species:
birdMeasures %>%
ggplot(aes(x = wing, y = mass)) +
geom_point() +
facet_wrap(~spp, scale = 'free')
# The results are certainly more informative. I wonder if the size difference
# between males and females may be driving some of the diversity in sizes? To
# explore this, let's color points by by sex. We do so by adding an aesthetic
# to the geom_point argument:
birdMeasures %>%
ggplot(aes(x = wing, y = mass)) +
geom_point(aes(color = sex)) +
facet_wrap(~spp, scale = 'free')
birdMeasures %>%
ggplot(aes(x = wing, y = mass)) +
# geom_point(aes(color = sex)) +
facet_wrap(~spp, scale = 'free') +
# geom_density2d(aes(color = sex)) +
stat_density_2d(aes(fill = ..density..), contour = FALSE, geom = 'raster')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pbpkUtils.R
\name{getVariabilitySetChoices}
\alias{getVariabilitySetChoices}
\title{Get all variability sets}
\usage{
getVariabilitySetChoices(var_type = "physio")
}
\arguments{
\item{var_type}{type of Variability set to return, can be "physio","chem" or "expo" or "conc"}
}
\value{
named list of all sets of the var_type
}
\description{
Get all the variability datasets in a given projects. Variabilities need to
be handled differently from the other set types since they can themselves be of multiple types
}
|
/man/getVariabilitySetChoices.Rd
|
no_license
|
cran/plethem
|
R
| false | true | 607 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pbpkUtils.R
\name{getVariabilitySetChoices}
\alias{getVariabilitySetChoices}
\title{Get all variability sets}
\usage{
getVariabilitySetChoices(var_type = "physio")
}
\arguments{
\item{var_type}{type of Variability set to return, can be "physio","chem" or "expo" or "conc"}
}
\value{
named list of all sets of the var_type
}
\description{
Get all the variability datasets in a given projects. Variabilities need to
be handled differently from the other set types since they can themselves be of multiple types
}
|
#' Data preparator for LightGBM datasets with rules (numeric)
#'
#' Attempts to prepare a clean dataset to prepare to put in a \code{lgb.Dataset}.
#' Factors and characters are converted to numeric. In addition, keeps rules created
#' so you can convert other datasets using this converter.
#'
#' @param data A data.frame or data.table to prepare.
#' @param rules A set of rules from the data preparator, if already used.
#'
#' @return A list with the cleaned dataset (\code{data}) and the rules (\code{rules}).
#' The data must be converted to a matrix format (\code{as.matrix}) for input
#' in \code{lgb.Dataset}.
#'
#' @examples
#' library(lightgbm)
#' data(iris)
#'
#' str(iris)
#'
#' new_iris <- lgb.prepare_rules(data = iris) # Autoconverter
#' str(new_iris$data)
#'
#' data(iris) # Erase iris dataset
#' iris$Species[1L] <- "NEW FACTOR" # Introduce junk factor (NA)
#'
#' # Use conversion using known rules
#' # Unknown factors become 0, excellent for sparse datasets
#' newer_iris <- lgb.prepare_rules(data = iris, rules = new_iris$rules)
#'
#' # Unknown factor is now zero, perfect for sparse datasets
#' newer_iris$data[1L, ] # Species became 0 as it is an unknown factor
#'
#' newer_iris$data[1L, 5L] <- 1.0 # Put back real initial value
#'
#' # Is the newly created dataset equal? YES!
#' all.equal(new_iris$data, newer_iris$data)
#'
#' # Can we test our own rules?
#' data(iris) # Erase iris dataset
#'
#' # We remapped values differently
#' personal_rules <- list(Species = c("setosa" = 3L,
#' "versicolor" = 2L,
#' "virginica" = 1L))
#' newest_iris <- lgb.prepare_rules(data = iris, rules = personal_rules)
#' str(newest_iris$data) # SUCCESS!
#'
#' @importFrom data.table set
#' @export
lgb.prepare_rules <- function(data, rules = NULL) {
# data.table not behaving like data.frame
if (inherits(data, "data.table")) {
# Must use existing rules
if (!is.null(rules)) {
# Loop through rules
for (i in names(rules)) {
data.table::set(data, j = i, value = unname(rules[[i]][data[[i]]]))
data[[i]][is.na(data[[i]])] <- 0L # Overwrite NAs by 0s
}
} else {
# Get data classes
list_classes <- vapply(data, class, character(1L))
# Map characters/factors
is_fix <- which(list_classes %in% c("character", "factor"))
rules <- list()
# Need to create rules?
if (length(is_fix) > 0L) {
# Go through all characters/factors
for (i in is_fix) {
# Store column elsewhere
mini_data <- data[[i]]
# Get unique values
if (is.factor(mini_data)) {
mini_unique <- levels(mini_data) # Factor
mini_numeric <- numeric(length(mini_unique))
mini_numeric[seq_along(mini_unique)] <- seq_along(mini_unique) # Respect ordinal if needed
} else {
mini_unique <- as.factor(unique(mini_data)) # Character
mini_numeric <- as.numeric(mini_unique) # No respect of ordinality
}
# Create rules
indexed <- colnames(data)[i] # Index value
rules[[indexed]] <- mini_numeric # Numeric content
names(rules[[indexed]]) <- mini_unique # Character equivalent
# Apply to real data column
data.table::set(data, j = i, value = unname(rules[[indexed]][mini_data]))
}
}
}
} else {
# Must use existing rules
if (!is.null(rules)) {
# Loop through rules
for (i in names(rules)) {
data[[i]] <- unname(rules[[i]][data[[i]]])
data[[i]][is.na(data[[i]])] <- 0L # Overwrite NAs by 0s
}
} else {
# Default routine (data.frame)
if (inherits(data, "data.frame")) {
# Get data classes
list_classes <- vapply(data, class, character(1L))
# Map characters/factors
is_fix <- which(list_classes %in% c("character", "factor"))
rules <- list()
# Need to create rules?
if (length(is_fix) > 0L) {
# Go through all characters/factors
for (i in is_fix) {
# Store column elsewhere
mini_data <- data[[i]]
# Get unique values
if (is.factor(mini_data)) {
mini_unique <- levels(mini_data) # Factor
mini_numeric <- numeric(length(mini_unique))
mini_numeric[seq_along(mini_unique)] <- seq_along(mini_unique) # Respect ordinal if needed
} else {
mini_unique <- as.factor(unique(mini_data)) # Character
mini_numeric <- as.numeric(mini_unique) # No respect of ordinality
}
# Create rules
indexed <- colnames(data)[i] # Index value
rules[[indexed]] <- mini_numeric # Numeric content
names(rules[[indexed]]) <- mini_unique # Character equivalent
# Apply to real data column
data[[i]] <- unname(rules[[indexed]][mini_data])
}
}
} else {
stop(
"lgb.prepare_rules: you provided "
, paste(class(data), collapse = " & ")
, " but data should have class data.frame"
)
}
}
}
return(list(data = data, rules = rules))
}
|
/R-package/R/lgb.prepare_rules.R
|
permissive
|
skyjiao/LightGBM
|
R
| false | false | 5,298 |
r
|
#' Data preparator for LightGBM datasets with rules (numeric)
#'
#' Attempts to prepare a clean dataset to prepare to put in a \code{lgb.Dataset}.
#' Factors and characters are converted to numeric. In addition, keeps rules created
#' so you can convert other datasets using this converter.
#'
#' @param data A data.frame or data.table to prepare.
#' @param rules A set of rules from the data preparator, if already used.
#'
#' @return A list with the cleaned dataset (\code{data}) and the rules (\code{rules}).
#' The data must be converted to a matrix format (\code{as.matrix}) for input
#' in \code{lgb.Dataset}.
#'
#' @examples
#' library(lightgbm)
#' data(iris)
#'
#' str(iris)
#'
#' new_iris <- lgb.prepare_rules(data = iris) # Autoconverter
#' str(new_iris$data)
#'
#' data(iris) # Erase iris dataset
#' iris$Species[1L] <- "NEW FACTOR" # Introduce junk factor (NA)
#'
#' # Use conversion using known rules
#' # Unknown factors become 0, excellent for sparse datasets
#' newer_iris <- lgb.prepare_rules(data = iris, rules = new_iris$rules)
#'
#' # Unknown factor is now zero, perfect for sparse datasets
#' newer_iris$data[1L, ] # Species became 0 as it is an unknown factor
#'
#' newer_iris$data[1L, 5L] <- 1.0 # Put back real initial value
#'
#' # Is the newly created dataset equal? YES!
#' all.equal(new_iris$data, newer_iris$data)
#'
#' # Can we test our own rules?
#' data(iris) # Erase iris dataset
#'
#' # We remapped values differently
#' personal_rules <- list(Species = c("setosa" = 3L,
#' "versicolor" = 2L,
#' "virginica" = 1L))
#' newest_iris <- lgb.prepare_rules(data = iris, rules = personal_rules)
#' str(newest_iris$data) # SUCCESS!
#'
#' @importFrom data.table set
#' @export
lgb.prepare_rules <- function(data, rules = NULL) {
# data.table not behaving like data.frame
if (inherits(data, "data.table")) {
# Must use existing rules
if (!is.null(rules)) {
# Loop through rules
for (i in names(rules)) {
data.table::set(data, j = i, value = unname(rules[[i]][data[[i]]]))
data[[i]][is.na(data[[i]])] <- 0L # Overwrite NAs by 0s
}
} else {
# Get data classes
list_classes <- vapply(data, class, character(1L))
# Map characters/factors
is_fix <- which(list_classes %in% c("character", "factor"))
rules <- list()
# Need to create rules?
if (length(is_fix) > 0L) {
# Go through all characters/factors
for (i in is_fix) {
# Store column elsewhere
mini_data <- data[[i]]
# Get unique values
if (is.factor(mini_data)) {
mini_unique <- levels(mini_data) # Factor
mini_numeric <- numeric(length(mini_unique))
mini_numeric[seq_along(mini_unique)] <- seq_along(mini_unique) # Respect ordinal if needed
} else {
mini_unique <- as.factor(unique(mini_data)) # Character
mini_numeric <- as.numeric(mini_unique) # No respect of ordinality
}
# Create rules
indexed <- colnames(data)[i] # Index value
rules[[indexed]] <- mini_numeric # Numeric content
names(rules[[indexed]]) <- mini_unique # Character equivalent
# Apply to real data column
data.table::set(data, j = i, value = unname(rules[[indexed]][mini_data]))
}
}
}
} else {
# Must use existing rules
if (!is.null(rules)) {
# Loop through rules
for (i in names(rules)) {
data[[i]] <- unname(rules[[i]][data[[i]]])
data[[i]][is.na(data[[i]])] <- 0L # Overwrite NAs by 0s
}
} else {
# Default routine (data.frame)
if (inherits(data, "data.frame")) {
# Get data classes
list_classes <- vapply(data, class, character(1L))
# Map characters/factors
is_fix <- which(list_classes %in% c("character", "factor"))
rules <- list()
# Need to create rules?
if (length(is_fix) > 0L) {
# Go through all characters/factors
for (i in is_fix) {
# Store column elsewhere
mini_data <- data[[i]]
# Get unique values
if (is.factor(mini_data)) {
mini_unique <- levels(mini_data) # Factor
mini_numeric <- numeric(length(mini_unique))
mini_numeric[seq_along(mini_unique)] <- seq_along(mini_unique) # Respect ordinal if needed
} else {
mini_unique <- as.factor(unique(mini_data)) # Character
mini_numeric <- as.numeric(mini_unique) # No respect of ordinality
}
# Create rules
indexed <- colnames(data)[i] # Index value
rules[[indexed]] <- mini_numeric # Numeric content
names(rules[[indexed]]) <- mini_unique # Character equivalent
# Apply to real data column
data[[i]] <- unname(rules[[indexed]][mini_data])
}
}
} else {
stop(
"lgb.prepare_rules: you provided "
, paste(class(data), collapse = " & ")
, " but data should have class data.frame"
)
}
}
}
return(list(data = data, rules = rules))
}
|
#' Create qtleffects object
#'
#' Creates an object needed to plot effect sizes of QTL from r/qtl.
#'
#' @param chr a numeric vector listing which chromosomes are to be plotted.
#' @param marker_locations A data.frame listing marker positions on each chromosome.
#' Should include a column labelled 'Position' and another labelled 'Chromosome'.
#' @param nlanes the number of lanes to plot for each chromosome. Defaults to two for Sweden vs. Italy.
#' left_gap, right_gap left- and right-hand margin around each chromosome.
#' @return A qtleffects object listing chromosomes to be plotted, x-values for marker positions,
#' and x-values for the start of each chromosome.
#'
#' @export
qtleffects <-
function(chr, marker_locations){
# subset marker locations to pull out data only for chromosomes in chr
marker_locations <- marker_locations[marker_locations$Chromosome %in% chr,]
plot_markers <- vector('list', length(chr)) # empty list
for(i in 1:length(chr)){
this_chr <- marker_locations[marker_locations$Chromosome == chr[i],] # subset data for this chromosome
this_chr$chr_id <- i # assign an index
midpoint <- max(this_chr$Position) /2
this_chr$plot_position <- this_chr$Position - midpoint + i*100 # offset marker positions
plot_markers[[i]] <- this_chr # send to plot_markers
}
plot_markers <- do.call('rbind', plot_markers) # convert list to data.frame
chr_start <- plot_markers[match(1:length(chr), plot_markers$chr_id),] # summary of the starting position for each chromosome
return(list(chr=chr, marker_positions = plot_markers, chr_start = chr_start))
}
|
/R/qtleffects.R
|
permissive
|
ellisztamas/qtltools
|
R
| false | false | 1,648 |
r
|
#' Create qtleffects object
#'
#' Creates an object needed to plot effect sizes of QTL from r/qtl.
#'
#' @param chr a numeric vector listing which chromosomes are to be plotted.
#' @param marker_locations A data.frame listing marker positions on each chromosome.
#' Should include a column labelled 'Position' and another labelled 'Chromosome'.
#' @param nlanes the number of lanes to plot for each chromosome. Defaults to two for Sweden vs. Italy.
#' left_gap, right_gap left- and right-hand margin around each chromosome.
#' @return A qtleffects object listing chromosomes to be plotted, x-values for marker positions,
#' and x-values for the start of each chromosome.
#'
#' @export
qtleffects <-
function(chr, marker_locations){
# subset marker locations to pull out data only for chromosomes in chr
marker_locations <- marker_locations[marker_locations$Chromosome %in% chr,]
plot_markers <- vector('list', length(chr)) # empty list
for(i in 1:length(chr)){
this_chr <- marker_locations[marker_locations$Chromosome == chr[i],] # subset data for this chromosome
this_chr$chr_id <- i # assign an index
midpoint <- max(this_chr$Position) /2
this_chr$plot_position <- this_chr$Position - midpoint + i*100 # offset marker positions
plot_markers[[i]] <- this_chr # send to plot_markers
}
plot_markers <- do.call('rbind', plot_markers) # convert list to data.frame
chr_start <- plot_markers[match(1:length(chr), plot_markers$chr_id),] # summary of the starting position for each chromosome
return(list(chr=chr, marker_positions = plot_markers, chr_start = chr_start))
}
|
library(shiny)
ui <- fluidPage(
pageWithSidebar(
# Application title
headerPanel("Shiny App Project - Target Heart Rate Calculator"),
sidebarPanel(
numericInput('age', 'Enter your age in years', 25) ,
submitButton('Calculate Target Heart Rate')
),
mainPanel(
p('Your heart rate, or pulse, is the number of times your heart beats per minute. Normal heart rate varies from person to person. Knowing yours can be an important heart-health gauge.'),
p('When you work out, there’s a simple way to know if you are over or under exercising: Your Target Heart Rate. Which is 70-80% of your Maxium Heart Rate'),
p('To find your Target Heart Rate, enter your Age'),
h4('Calculating Target Heart Rate based on values entered by you:'),
p('Age:'), verbatimTextOutput("inputagevalue"),
h4('Your Target Heart Rate is:'),
p('70%:'), verbatimTextOutput("THR1"),
p('80%:'), verbatimTextOutput("THR2")
)
)
)
server <- function(input, output) {
output$inputagevalue <- renderPrint({input$age})
output$inputRHRvalue <- renderPrint({input$RHR})
output$THR1 <- renderText({(220 - input$age)*.7})
output$THR2 <- renderText({(220 - input$age)*.8})
}
shinyApp(ui = ui, server = server)
|
/app.R
|
no_license
|
AmandaAffolter/Data-Products---Application
|
R
| false | false | 1,354 |
r
|
library(shiny)
ui <- fluidPage(
pageWithSidebar(
# Application title
headerPanel("Shiny App Project - Target Heart Rate Calculator"),
sidebarPanel(
numericInput('age', 'Enter your age in years', 25) ,
submitButton('Calculate Target Heart Rate')
),
mainPanel(
p('Your heart rate, or pulse, is the number of times your heart beats per minute. Normal heart rate varies from person to person. Knowing yours can be an important heart-health gauge.'),
p('When you work out, there’s a simple way to know if you are over or under exercising: Your Target Heart Rate. Which is 70-80% of your Maxium Heart Rate'),
p('To find your Target Heart Rate, enter your Age'),
h4('Calculating Target Heart Rate based on values entered by you:'),
p('Age:'), verbatimTextOutput("inputagevalue"),
h4('Your Target Heart Rate is:'),
p('70%:'), verbatimTextOutput("THR1"),
p('80%:'), verbatimTextOutput("THR2")
)
)
)
server <- function(input, output) {
output$inputagevalue <- renderPrint({input$age})
output$inputRHRvalue <- renderPrint({input$RHR})
output$THR1 <- renderText({(220 - input$age)*.7})
output$THR2 <- renderText({(220 - input$age)*.8})
}
shinyApp(ui = ui, server = server)
|
#' Plot SpatialRD output
#'
#'Produces plot of GRDDseries and optionally of a map that visualises every point estimate in space.
#'
#' @param SpatialRDoutput spatial obkect that is produced by an estimation with \code{\link{spatialrd}}
#' @param map T/F depending on whether mapplot is desired (make sure to set \code{spatial.objcet = T} in the \code{\link{spatialrd}} function)
#'
#' @return plots produced with ggplot2
#' @export
#'
#' @examples \dontrun{plotspatialrd(results.spatialrd)}
plotspatialrd <- function(SpatialRDoutput, map = F) {
# replaced Courier New with Courier for now
# TODO
# - make pvalue an ggplot2::aes() with more intensity of colour depending on p-value
# - numerate borderpoints in mapplot
# - is GRDDseries the right title?
# - bring y name in the plot. instead of "Point-Estimate" on y axis?
GRDD <- ggplot2::ggplot(data = SpatialRDoutput,
mapping = ggplot2::aes(x = .data$Point, y = .data$Estimate, ymin = .data$CI_Conv_l, ymax = .data$CI_Conv_u)) +
ggplot2::geom_errorbar(color = "grey") +
ggplot2::geom_hline(yintercept = 0, linetype = "dashed", color = "black") +
ggplot2::geom_point(ggplot2::aes(colour = cut(.data$p_Conv, c(-Inf, .11, Inf))), size = 1, shape = 19) +
ggplot2::scale_color_manual(values = c("palegreen2", "lightcoral")) +
# Here comes the styling
ggplot2::theme_bw() + # needs to go before any other individual styling, otherwise it overwrites it
ggplot2::theme(text = ggplot2::element_text(family = "Courier"), plot.title = ggplot2::element_text(hjust = 0.5), legend.position = "none") + # center title, omit legend
ggplot2::ggtitle(paste("GRDDseries (conventional)")) +
ggplot2::labs(y = "Point-Estimate", x = "#Boundarypoint [conv. confidence intervals]")
GRDDrob <- ggplot2::ggplot(data = SpatialRDoutput,
mapping = ggplot2::aes(x = .data$Point, y = .data$Estimate, ymin = .data$CI_Rob_l, ymax = .data$CI_Rob_u)) +
ggplot2::geom_errorbar(color = "grey") +
ggplot2::geom_hline(yintercept = 0, linetype = "dashed", color = "black") +
ggplot2::geom_point(ggplot2::aes(colour = cut(.data$p_Rob, c(-Inf, .11, Inf))), size = 1, shape = 19) +
ggplot2::scale_color_manual(values = c("palegreen2", "lightcoral")) +
# Here comes the styling
ggplot2::theme_bw() + # needs to go before any other individual styling, otherwise it overwrites it
ggplot2::theme(text = ggplot2::element_text(family = "Courier"), plot.title = ggplot2::element_text(hjust = 0.5), legend.position = "none") + # center title, omit legend
ggplot2::ggtitle(paste("GRDDseries (robust)")) +
ggplot2::labs(y = "Point-Estimate", x = "#Boundarypoint [rob. confidence intervals]")
# MAPPLOT OF BORDERPOINTS
mapplot <- ggplot2::ggplot() +
#geom_sf(data = polygon_full.sf, alpha = 0.5) + # u need the data = !
ggplot2::geom_sf(data = SpatialRDoutput, ggplot2::aes(colour = cut(.data$p_Conv, c(-Inf, .11, Inf))), size = 1, shape = 19) + #coord_equal() +
ggplot2::scale_color_manual(values = c("palegreen2", "lightcoral")) +
#geom_point(data = data, ggplot2::aes(longitude, latitude), size = 0.5) +
# Here comes the styling
ggplot2::theme_bw() + # needs to go before any other individual styling, otherwise it overwrites it
ggplot2::theme(text = ggplot2::element_text(family = "Courier"), plot.title = ggplot2::element_text(hjust = 0.5), legend.position = "none", axis.title.y = ggplot2::element_blank()) +
ggplot2::ggtitle("conv. inference")
#coord_map(xlim = c(73.7, 74.2), ylim = c(15, 15.8))
if (map == T) {
cowplot::plot_grid(cowplot::plot_grid(GRDD, GRDDrob, align = "v", nrow = 2), mapplot, rel_widths = c(1.8, 1))
} else {
cowplot::plot_grid(GRDD, GRDDrob, align = "v", nrow = 2)
}
}
|
/R/plotspatialrd.R
|
no_license
|
axlehner/SpatialRDD
|
R
| false | false | 3,789 |
r
|
#' Plot SpatialRD output
#'
#'Produces plot of GRDDseries and optionally of a map that visualises every point estimate in space.
#'
#' @param SpatialRDoutput spatial obkect that is produced by an estimation with \code{\link{spatialrd}}
#' @param map T/F depending on whether mapplot is desired (make sure to set \code{spatial.objcet = T} in the \code{\link{spatialrd}} function)
#'
#' @return plots produced with ggplot2
#' @export
#'
#' @examples \dontrun{plotspatialrd(results.spatialrd)}
plotspatialrd <- function(SpatialRDoutput, map = F) {
# replaced Courier New with Courier for now
# TODO
# - make pvalue an ggplot2::aes() with more intensity of colour depending on p-value
# - numerate borderpoints in mapplot
# - is GRDDseries the right title?
# - bring y name in the plot. instead of "Point-Estimate" on y axis?
GRDD <- ggplot2::ggplot(data = SpatialRDoutput,
mapping = ggplot2::aes(x = .data$Point, y = .data$Estimate, ymin = .data$CI_Conv_l, ymax = .data$CI_Conv_u)) +
ggplot2::geom_errorbar(color = "grey") +
ggplot2::geom_hline(yintercept = 0, linetype = "dashed", color = "black") +
ggplot2::geom_point(ggplot2::aes(colour = cut(.data$p_Conv, c(-Inf, .11, Inf))), size = 1, shape = 19) +
ggplot2::scale_color_manual(values = c("palegreen2", "lightcoral")) +
# Here comes the styling
ggplot2::theme_bw() + # needs to go before any other individual styling, otherwise it overwrites it
ggplot2::theme(text = ggplot2::element_text(family = "Courier"), plot.title = ggplot2::element_text(hjust = 0.5), legend.position = "none") + # center title, omit legend
ggplot2::ggtitle(paste("GRDDseries (conventional)")) +
ggplot2::labs(y = "Point-Estimate", x = "#Boundarypoint [conv. confidence intervals]")
GRDDrob <- ggplot2::ggplot(data = SpatialRDoutput,
mapping = ggplot2::aes(x = .data$Point, y = .data$Estimate, ymin = .data$CI_Rob_l, ymax = .data$CI_Rob_u)) +
ggplot2::geom_errorbar(color = "grey") +
ggplot2::geom_hline(yintercept = 0, linetype = "dashed", color = "black") +
ggplot2::geom_point(ggplot2::aes(colour = cut(.data$p_Rob, c(-Inf, .11, Inf))), size = 1, shape = 19) +
ggplot2::scale_color_manual(values = c("palegreen2", "lightcoral")) +
# Here comes the styling
ggplot2::theme_bw() + # needs to go before any other individual styling, otherwise it overwrites it
ggplot2::theme(text = ggplot2::element_text(family = "Courier"), plot.title = ggplot2::element_text(hjust = 0.5), legend.position = "none") + # center title, omit legend
ggplot2::ggtitle(paste("GRDDseries (robust)")) +
ggplot2::labs(y = "Point-Estimate", x = "#Boundarypoint [rob. confidence intervals]")
# MAPPLOT OF BORDERPOINTS
mapplot <- ggplot2::ggplot() +
#geom_sf(data = polygon_full.sf, alpha = 0.5) + # u need the data = !
ggplot2::geom_sf(data = SpatialRDoutput, ggplot2::aes(colour = cut(.data$p_Conv, c(-Inf, .11, Inf))), size = 1, shape = 19) + #coord_equal() +
ggplot2::scale_color_manual(values = c("palegreen2", "lightcoral")) +
#geom_point(data = data, ggplot2::aes(longitude, latitude), size = 0.5) +
# Here comes the styling
ggplot2::theme_bw() + # needs to go before any other individual styling, otherwise it overwrites it
ggplot2::theme(text = ggplot2::element_text(family = "Courier"), plot.title = ggplot2::element_text(hjust = 0.5), legend.position = "none", axis.title.y = ggplot2::element_blank()) +
ggplot2::ggtitle("conv. inference")
#coord_map(xlim = c(73.7, 74.2), ylim = c(15, 15.8))
if (map == T) {
cowplot::plot_grid(cowplot::plot_grid(GRDD, GRDDrob, align = "v", nrow = 2), mapplot, rel_widths = c(1.8, 1))
} else {
cowplot::plot_grid(GRDD, GRDDrob, align = "v", nrow = 2)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gg-plots.R
\name{ggally_ratio}
\alias{ggally_ratio}
\title{Plots a mosaic plot}
\usage{
ggally_ratio(data, mapping = do.call(ggplot2::aes_string,
as.list(colnames(data)[1:2])), ..., floor = 0, ceiling = NULL)
}
\arguments{
\item{data}{data set using}
\item{mapping}{aesthetics being used. Only x and y will used and both are required}
\item{...}{passed to \code{\link[ggplot2]{geom_tile}(...)}}
\item{floor}{don't display cells smaller than this value}
\item{ceiling}{max value to scale frequencies. If any frequency is larger than the ceiling, the fill color is displayed darker than other rectangles}
}
\description{
Plots the mosaic plot by using fluctuation.
}
\examples{
data(tips, package = "reshape")
ggally_ratio(tips, ggplot2::aes(sex, day))
ggally_ratio(tips, ggplot2::aes(sex, day)) + ggplot2::coord_equal()
# only plot tiles greater or equal to 20 and scale to a max of 50
ggally_ratio(
tips, ggplot2::aes(sex, day),
floor = 20, ceiling = 50
) + ggplot2::theme(aspect.ratio = 4/2)
}
\author{
Barret Schloerke \email{schloerke@gmail.com}
}
\keyword{hplot}
|
/man/ggally_ratio.Rd
|
no_license
|
cpsievert/ggally
|
R
| false | true | 1,157 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gg-plots.R
\name{ggally_ratio}
\alias{ggally_ratio}
\title{Plots a mosaic plot}
\usage{
ggally_ratio(data, mapping = do.call(ggplot2::aes_string,
as.list(colnames(data)[1:2])), ..., floor = 0, ceiling = NULL)
}
\arguments{
\item{data}{data set using}
\item{mapping}{aesthetics being used. Only x and y will used and both are required}
\item{...}{passed to \code{\link[ggplot2]{geom_tile}(...)}}
\item{floor}{don't display cells smaller than this value}
\item{ceiling}{max value to scale frequencies. If any frequency is larger than the ceiling, the fill color is displayed darker than other rectangles}
}
\description{
Plots the mosaic plot by using fluctuation.
}
\examples{
data(tips, package = "reshape")
ggally_ratio(tips, ggplot2::aes(sex, day))
ggally_ratio(tips, ggplot2::aes(sex, day)) + ggplot2::coord_equal()
# only plot tiles greater or equal to 20 and scale to a max of 50
ggally_ratio(
tips, ggplot2::aes(sex, day),
floor = 20, ceiling = 50
) + ggplot2::theme(aspect.ratio = 4/2)
}
\author{
Barret Schloerke \email{schloerke@gmail.com}
}
\keyword{hplot}
|
## Copied from RcppR6
read_file <- function(filename, ...) {
assert_file_exists(filename)
paste(readLines(filename), collapse="\n")
}
## https://github.com/viking/r-yaml/issues/5#issuecomment-16464325
yaml_load <- function(string) {
## More restrictive true/false handling. Only accept if it maps to
## full true/false:
handlers <- list("bool#yes" = function(x) {
if (identical(toupper(x), "TRUE")) TRUE else x},
"bool#no" = function(x) {
if (identical(toupper(x), "FALSE")) FALSE else x})
yaml::yaml.load(string, handlers=handlers)
}
yaml_read <- function(filename) {
catch_yaml <- function(e) {
stop(sprintf("while reading '%s'\n%s", filename, e$message),
call.=FALSE)
}
tryCatch(yaml_load(read_file(filename)),
error=catch_yaml)
}
with_default <- function(x, default=NULL) {
if (is.null(x)) default else x
}
## Warn if keys are found in an object that are not in a known set.
stop_unknown <- function(name, defn, known, error=TRUE) {
unknown <- setdiff(names(defn), known)
if (length(unknown) > 0) {
msg <- sprintf("Unknown fields in %s: %s",
name, paste(unknown, collapse=", "))
if (error) {
stop(msg, call.=FALSE)
} else {
warning(msg, immediate.=TRUE, call.=FALSE)
}
}
}
warn_unknown <- function(name, defn, known) {
stop_unknown(name, defn, known, FALSE)
}
## Pattern where we have a named list and we want to call function
## 'FUN' with rather than just
## {FUN(X[[1]], ...), ..., FUN(X[[n]], ...)}
## instead as
## {FUN{names(X)[1], X[[1]], ...}, ..., names(X)[1], X[[1]], ...}
## this can be achived via mapply, but it's not pleasant.
lnapply <- function(X, FUN, ...) {
nX <- names(X)
res <- lapply(seq_along(X), function(i) FUN(nX[[i]], X[[i]], ...))
names(res) <- nX
res
}
is_directory <- function(path) {
file.info(path)$isdir
}
## Like match.arg(), but does not allow for abbreviation.
match_value <- function(arg, choices, name=deparse(substitute(arg))) {
assert_scalar_character(arg)
if (!(arg %in% choices)) {
stop(sprintf("%s must be one of %s",
name, paste(dQuote(choices), collapse=", ")))
}
arg
}
from_yaml_map_list <- function(x) {
if (length(x) == 0L || is.character(x)) {
x <- as.list(x)
} else if (is.list(x)) {
if (!all(viapply(x, length) == 1L)) {
stop("Expected all elements to be scalar")
}
x <- unlist(x, FALSE)
} else {
stop("Unexpected input")
}
x
}
abbreviate <- function(str, width, cutoff="...") {
assert_scalar_character(str)
nc <- nchar(str)
if (nc <= width) {
str
} else if (width < nchar(cutoff)) {
character(0)
} else {
w <- nchar(cutoff)
paste0(substr(str, 1, width - w), cutoff)
}
}
empty_named_list <- function() {
structure(list(), names=character(0))
}
empty_named_character <- function() {
structure(character(0), names=character(0))
}
empty_named_integer <- function() {
structure(integer(), names=character(0))
}
strip_whitespace <- function(str) {
gsub("(^\\s+|\\s+$)", "", str)
}
strrep <- function (str, n) {
paste(rep_len(str, n), collapse = "")
}
last <- function(x) {
x[[length(x)]]
}
`last<-` <- function(x, value) {
x[[length(x)]] <- value
x
}
insert_at <- function(x, value, pos) {
assert_scalar_integer(pos)
len <- length(x)
if (pos > 0 && pos <= len) {
i <- seq_along(x)
x <- c(x[i < pos], value, x[i >= pos])
} else if (pos == len + 1L) {
x[pos] <- value
} else {
stop("Invalid position to insert")
}
x
}
isFALSE <- function(x) {
identical(x, FALSE)
}
## NOTE: Does not handle vectors & throw warning at `if (exists)`
file_remove <- function(path, recursive=FALSE) {
exists <- file.exists(path)
if (exists) {
if (is_directory(path)) {
if (recursive) {
unlink(path, recursive)
} else {
stop("Use 'recursive=TRUE' to delete directories")
}
} else {
file.remove(path)
}
}
invisible(exists)
}
brackets <- function(text, style="square", pad=1) {
styles <- list(square = c("[", "]"),
round = c("(", ")"),
curly = c("{", "}"),
angle = c("<", ">"),
pipe = c("|", "|"),
star = c("*", "*"),
none = c(" ", " "))
style <- styles[[match_value(style, names(styles))]]
pad <- strrep(" ", pad)
paste0(style[[1]], pad, text, pad, style[[2]])
}
path_copy <- function(from, to, ...) {
dest <- file.path(to, dirname(from))
dir.create(dest, FALSE, TRUE)
file_copy(from, dest, ...)
}
## Needs making more robust. Something along the lines of pythons
## os.path would be ideal I think.
path_split <- function(x) {
strsplit(x, "/", fixed=TRUE)
}
file_copy <- function(from, to, ..., warn=TRUE) {
assert_scalar_character(from)
ok <- file.exists(from) && file.copy(from, to, TRUE)
if (warn && any(!ok)) {
warning("Failed to copy file: ", paste(from[!ok], collapse=", "))
}
invisible(ok)
}
require_zip <- function() {
if (!has_zip()) {
stop("This function needs a zip program on the path.", call.=FALSE)
}
}
has_zip <- function() {
zip_default <- eval(formals(zip)$zip)
"" != unname(Sys.which(zip_default))
}
## This zips up the directory at `path` into basename(path).zip.
## Because of the limitations of `zip()`, we do need to change working
## directories temporarily.
## TODO: Is this generally useful?
zip_dir <- function(path, zipfile=NULL, ..., flags="-r9X", quiet=TRUE,
overwrite=TRUE) {
require_zip()
assert_directory(path)
at <- dirname(path)
base <- basename(path)
if (is.null(zipfile)) {
zipfile <- paste0(base, ".zip")
}
if (quiet && !grepl("q", flags)) {
flags <- paste0(flags, "q")
}
cwd <- getwd()
zipfile_full <- file.path(cwd, zipfile)
## Should backup?
if (overwrite && file.exists(zipfile)) {
file_remove(zipfile)
}
if (at != ".") {
owd <- setwd(at)
on.exit(setwd(owd))
}
zip(zipfile_full, base, flags, ...)
invisible(zipfile)
}
## For use with tryCatch and withCallingHandlers
catch_error_prefix <- function(prefix) {
force(prefix)
function(e) {
e$message <- paste0(prefix, e$message)
stop(e)
}
}
catch_warning_prefix <- function(prefix) {
force(prefix)
function(e) {
e$message <- paste0(prefix, e$message)
warning(e)
invokeRestart("muffleWarning")
}
}
rep_along <- function(x, along.with) {
rep_len(x, length(along.with))
}
backup <- function(file) {
if (file.exists(file)) {
path <- file.path(tempfile(), file)
dir.create(dirname(path), showWarnings=FALSE, recursive=TRUE)
file.copy(file, path)
path
} else {
NULL
}
}
restore <- function(file, path) {
if (!is.null(path)) {
message("Restoring previous version of ", file)
file.copy(path, file, overwrite=TRUE)
}
}
file_extension <- function(x) {
pos <- regexpr("\\.([^.]+)$", x, perl=TRUE)
ret <- rep_along("", length(x))
i <- pos > -1L
ret[i] <- substring(x[i], pos[i] + 1L)
ret
}
vlapply <- function(X, FUN, ...) {
vapply(X, FUN, logical(1), ...)
}
viapply <- function(X, FUN, ...) {
vapply(X, FUN, integer(1), ...)
}
vcapply <- function(X, FUN, ...) {
vapply(X, FUN, character(1), ...)
}
uninvisible <- function(x) {
force(x)
x
}
## Like dQuote but not "smart"
dquote <- function(x) {
sprintf('"%s"', x)
}
squote <- function(x) {
sprintf("'%s'", x)
}
append_lines <- function(text, file) {
assert_character(text)
assert_scalar_character(file)
if (file.exists(file)) {
existing <- readLines(file)
} else {
existing <- character(0)
}
writeLines(c(existing, text), file)
}
## Attempt to work out if git ignores a set of files. Returns a
## logical vector along the set. If git is not installed, if we're
## not in a git repo, or if there is an error running `git
## check-ignore`, then all files are assumed not to be ignored.
git_ignores <- function(files) {
if (length(files) == 0L || !git_exists()) {
rep_along(FALSE, files)
} else {
tmp <- tempfile()
on.exit(file_remove(tmp))
writeLines(files, tmp)
ignored <- suppressWarnings(system2("git", c("check-ignore", "--stdin"),
stdin=tmp, stdout=TRUE, stderr=FALSE))
status <- attr(ignored, "status")
if (!is.null(status) && status > 1) {
warning("git check-ignore exited with error ", status, call. = FALSE)
rep_along(FALSE, files)
} else {
files %in% ignored
}
}
}
## Checks that git exists *and* that we're running in a git repo.
git_exists <- function() {
res <- tryCatch(git_sha(), condition=function(e) e)
!inherits(res, "condition")
}
git_sha <- function() {
system2("git", c("rev-parse", "HEAD"), stdout=TRUE, stderr=FALSE)
}
copy_environment <- function(from, to) {
for (i in ls(from, all.names=TRUE)) {
assign(i, get(i, from), to)
}
}
## Not sure about this one...
browse_environment <- function(e, ...) {
f <- function(.envir) {
for (.obj in ls(envir=.envir, all.names=TRUE)) {
tryCatch(assign(.obj, get(.obj, envir=e)),
error = function(e) {})
}
rm(.obj, .envir)
browser()
}
environment(f) <- parent.env(e)
f(e)
}
paint <- function(str, col) {
if (is.null(col)) {
str
} else {
crayon::make_style(col)(str)
}
}
did_you_mean <- function(name, pos, prefix="did you mean: ") {
close <- vcapply(name, function(x)
paste(agrep(x, pos, ignore.case=TRUE, value=TRUE), collapse=", "))
i <- nzchar(close)
if (!is.null(prefix)) {
close[i] <- paste0(prefix, close[i])
}
unname(close)
}
## Like file.exists but cares about case. May not be bulletproof.
file_exists <- function(...) {
exists <- file.exists(...)
files_check <- c(...)[exists]
path_check <- dirname(normalizePath(files_check, mustWork=TRUE))
files_check <- basename(files_check)
path_uniq <- unique(path_check)
i <- match(path_check, path_uniq)
contents <- lapply(path_uniq, dir, all.files=TRUE)
ok <- vlapply(seq_along(files_check),
function(idx) files_check[[idx]] %in% contents[[i[idx]]])
exists[exists] <- ok
exists
}
## It would be nice to take a file and convert it to its real case.
file_real_case <- function(files) {
ret <- rep_len(NA_character_, length(files))
exists_real <- file_exists(files)
exists_fake <- file.exists(files)
ret[exists_real] <- files[exists_real]
i <- exists_fake & !exists_real
fix <- files[i]
fix_full <- normalizePath(fix, "/", mustWork=TRUE)
## Now, split the strings back to this point:
len <- nchar(fix_full)
ret[i] <- substr(fix_full, len - nchar(fix) + 1L, len)
ret
}
mix_cols <- function(cols, col2, p) {
m <- col2rgb(cols)
m2 <- col2rgb(rep(col2, length.out=length(cols)))
m3 <- (m * p + m2 * (1-p))/255
rgb(m3[1, ], m3[2, ], m3[3, ])
}
stop_if_duplicated <- function(x, message) {
if (anyDuplicated(x)) {
stop(message, ": ", paste(unique(duplicated(x)), collapse = ", "),
call. = FALSE)
}
}
|
/R/utils.R
|
no_license
|
tjmahr/remake
|
R
| false | false | 11,068 |
r
|
## Copied from RcppR6
read_file <- function(filename, ...) {
assert_file_exists(filename)
paste(readLines(filename), collapse="\n")
}
## https://github.com/viking/r-yaml/issues/5#issuecomment-16464325
yaml_load <- function(string) {
## More restrictive true/false handling. Only accept if it maps to
## full true/false:
handlers <- list("bool#yes" = function(x) {
if (identical(toupper(x), "TRUE")) TRUE else x},
"bool#no" = function(x) {
if (identical(toupper(x), "FALSE")) FALSE else x})
yaml::yaml.load(string, handlers=handlers)
}
yaml_read <- function(filename) {
catch_yaml <- function(e) {
stop(sprintf("while reading '%s'\n%s", filename, e$message),
call.=FALSE)
}
tryCatch(yaml_load(read_file(filename)),
error=catch_yaml)
}
with_default <- function(x, default=NULL) {
if (is.null(x)) default else x
}
## Warn if keys are found in an object that are not in a known set.
stop_unknown <- function(name, defn, known, error=TRUE) {
unknown <- setdiff(names(defn), known)
if (length(unknown) > 0) {
msg <- sprintf("Unknown fields in %s: %s",
name, paste(unknown, collapse=", "))
if (error) {
stop(msg, call.=FALSE)
} else {
warning(msg, immediate.=TRUE, call.=FALSE)
}
}
}
warn_unknown <- function(name, defn, known) {
stop_unknown(name, defn, known, FALSE)
}
## Pattern where we have a named list and we want to call function
## 'FUN' with rather than just
## {FUN(X[[1]], ...), ..., FUN(X[[n]], ...)}
## instead as
## {FUN{names(X)[1], X[[1]], ...}, ..., names(X)[1], X[[1]], ...}
## this can be achived via mapply, but it's not pleasant.
lnapply <- function(X, FUN, ...) {
nX <- names(X)
res <- lapply(seq_along(X), function(i) FUN(nX[[i]], X[[i]], ...))
names(res) <- nX
res
}
is_directory <- function(path) {
file.info(path)$isdir
}
## Like match.arg(), but does not allow for abbreviation.
match_value <- function(arg, choices, name=deparse(substitute(arg))) {
assert_scalar_character(arg)
if (!(arg %in% choices)) {
stop(sprintf("%s must be one of %s",
name, paste(dQuote(choices), collapse=", ")))
}
arg
}
from_yaml_map_list <- function(x) {
if (length(x) == 0L || is.character(x)) {
x <- as.list(x)
} else if (is.list(x)) {
if (!all(viapply(x, length) == 1L)) {
stop("Expected all elements to be scalar")
}
x <- unlist(x, FALSE)
} else {
stop("Unexpected input")
}
x
}
abbreviate <- function(str, width, cutoff="...") {
assert_scalar_character(str)
nc <- nchar(str)
if (nc <= width) {
str
} else if (width < nchar(cutoff)) {
character(0)
} else {
w <- nchar(cutoff)
paste0(substr(str, 1, width - w), cutoff)
}
}
empty_named_list <- function() {
structure(list(), names=character(0))
}
empty_named_character <- function() {
structure(character(0), names=character(0))
}
empty_named_integer <- function() {
structure(integer(), names=character(0))
}
strip_whitespace <- function(str) {
gsub("(^\\s+|\\s+$)", "", str)
}
strrep <- function (str, n) {
paste(rep_len(str, n), collapse = "")
}
last <- function(x) {
x[[length(x)]]
}
`last<-` <- function(x, value) {
x[[length(x)]] <- value
x
}
insert_at <- function(x, value, pos) {
assert_scalar_integer(pos)
len <- length(x)
if (pos > 0 && pos <= len) {
i <- seq_along(x)
x <- c(x[i < pos], value, x[i >= pos])
} else if (pos == len + 1L) {
x[pos] <- value
} else {
stop("Invalid position to insert")
}
x
}
isFALSE <- function(x) {
identical(x, FALSE)
}
## NOTE: Does not handle vectors & throw warning at `if (exists)`
file_remove <- function(path, recursive=FALSE) {
exists <- file.exists(path)
if (exists) {
if (is_directory(path)) {
if (recursive) {
unlink(path, recursive)
} else {
stop("Use 'recursive=TRUE' to delete directories")
}
} else {
file.remove(path)
}
}
invisible(exists)
}
brackets <- function(text, style="square", pad=1) {
styles <- list(square = c("[", "]"),
round = c("(", ")"),
curly = c("{", "}"),
angle = c("<", ">"),
pipe = c("|", "|"),
star = c("*", "*"),
none = c(" ", " "))
style <- styles[[match_value(style, names(styles))]]
pad <- strrep(" ", pad)
paste0(style[[1]], pad, text, pad, style[[2]])
}
path_copy <- function(from, to, ...) {
dest <- file.path(to, dirname(from))
dir.create(dest, FALSE, TRUE)
file_copy(from, dest, ...)
}
## Needs making more robust. Something along the lines of pythons
## os.path would be ideal I think.
path_split <- function(x) {
strsplit(x, "/", fixed=TRUE)
}
file_copy <- function(from, to, ..., warn=TRUE) {
assert_scalar_character(from)
ok <- file.exists(from) && file.copy(from, to, TRUE)
if (warn && any(!ok)) {
warning("Failed to copy file: ", paste(from[!ok], collapse=", "))
}
invisible(ok)
}
require_zip <- function() {
if (!has_zip()) {
stop("This function needs a zip program on the path.", call.=FALSE)
}
}
has_zip <- function() {
zip_default <- eval(formals(zip)$zip)
"" != unname(Sys.which(zip_default))
}
## This zips up the directory at `path` into basename(path).zip.
## Because of the limitations of `zip()`, we do need to change working
## directories temporarily.
## TODO: Is this generally useful?
zip_dir <- function(path, zipfile=NULL, ..., flags="-r9X", quiet=TRUE,
overwrite=TRUE) {
require_zip()
assert_directory(path)
at <- dirname(path)
base <- basename(path)
if (is.null(zipfile)) {
zipfile <- paste0(base, ".zip")
}
if (quiet && !grepl("q", flags)) {
flags <- paste0(flags, "q")
}
cwd <- getwd()
zipfile_full <- file.path(cwd, zipfile)
## Should backup?
if (overwrite && file.exists(zipfile)) {
file_remove(zipfile)
}
if (at != ".") {
owd <- setwd(at)
on.exit(setwd(owd))
}
zip(zipfile_full, base, flags, ...)
invisible(zipfile)
}
## For use with tryCatch and withCallingHandlers
catch_error_prefix <- function(prefix) {
force(prefix)
function(e) {
e$message <- paste0(prefix, e$message)
stop(e)
}
}
catch_warning_prefix <- function(prefix) {
force(prefix)
function(e) {
e$message <- paste0(prefix, e$message)
warning(e)
invokeRestart("muffleWarning")
}
}
rep_along <- function(x, along.with) {
rep_len(x, length(along.with))
}
backup <- function(file) {
if (file.exists(file)) {
path <- file.path(tempfile(), file)
dir.create(dirname(path), showWarnings=FALSE, recursive=TRUE)
file.copy(file, path)
path
} else {
NULL
}
}
restore <- function(file, path) {
if (!is.null(path)) {
message("Restoring previous version of ", file)
file.copy(path, file, overwrite=TRUE)
}
}
file_extension <- function(x) {
pos <- regexpr("\\.([^.]+)$", x, perl=TRUE)
ret <- rep_along("", length(x))
i <- pos > -1L
ret[i] <- substring(x[i], pos[i] + 1L)
ret
}
vlapply <- function(X, FUN, ...) {
vapply(X, FUN, logical(1), ...)
}
viapply <- function(X, FUN, ...) {
vapply(X, FUN, integer(1), ...)
}
vcapply <- function(X, FUN, ...) {
vapply(X, FUN, character(1), ...)
}
uninvisible <- function(x) {
force(x)
x
}
## Like dQuote but not "smart"
dquote <- function(x) {
sprintf('"%s"', x)
}
squote <- function(x) {
sprintf("'%s'", x)
}
append_lines <- function(text, file) {
assert_character(text)
assert_scalar_character(file)
if (file.exists(file)) {
existing <- readLines(file)
} else {
existing <- character(0)
}
writeLines(c(existing, text), file)
}
## Attempt to work out if git ignores a set of files. Returns a
## logical vector along the set. If git is not installed, if we're
## not in a git repo, or if there is an error running `git
## check-ignore`, then all files are assumed not to be ignored.
git_ignores <- function(files) {
if (length(files) == 0L || !git_exists()) {
rep_along(FALSE, files)
} else {
tmp <- tempfile()
on.exit(file_remove(tmp))
writeLines(files, tmp)
ignored <- suppressWarnings(system2("git", c("check-ignore", "--stdin"),
stdin=tmp, stdout=TRUE, stderr=FALSE))
status <- attr(ignored, "status")
if (!is.null(status) && status > 1) {
warning("git check-ignore exited with error ", status, call. = FALSE)
rep_along(FALSE, files)
} else {
files %in% ignored
}
}
}
## Checks that git exists *and* that we're running in a git repo.
git_exists <- function() {
res <- tryCatch(git_sha(), condition=function(e) e)
!inherits(res, "condition")
}
git_sha <- function() {
system2("git", c("rev-parse", "HEAD"), stdout=TRUE, stderr=FALSE)
}
copy_environment <- function(from, to) {
for (i in ls(from, all.names=TRUE)) {
assign(i, get(i, from), to)
}
}
## Not sure about this one...
browse_environment <- function(e, ...) {
f <- function(.envir) {
for (.obj in ls(envir=.envir, all.names=TRUE)) {
tryCatch(assign(.obj, get(.obj, envir=e)),
error = function(e) {})
}
rm(.obj, .envir)
browser()
}
environment(f) <- parent.env(e)
f(e)
}
paint <- function(str, col) {
if (is.null(col)) {
str
} else {
crayon::make_style(col)(str)
}
}
did_you_mean <- function(name, pos, prefix="did you mean: ") {
close <- vcapply(name, function(x)
paste(agrep(x, pos, ignore.case=TRUE, value=TRUE), collapse=", "))
i <- nzchar(close)
if (!is.null(prefix)) {
close[i] <- paste0(prefix, close[i])
}
unname(close)
}
## Like file.exists but cares about case. May not be bulletproof.
file_exists <- function(...) {
exists <- file.exists(...)
files_check <- c(...)[exists]
path_check <- dirname(normalizePath(files_check, mustWork=TRUE))
files_check <- basename(files_check)
path_uniq <- unique(path_check)
i <- match(path_check, path_uniq)
contents <- lapply(path_uniq, dir, all.files=TRUE)
ok <- vlapply(seq_along(files_check),
function(idx) files_check[[idx]] %in% contents[[i[idx]]])
exists[exists] <- ok
exists
}
## It would be nice to take a file and convert it to its real case.
file_real_case <- function(files) {
ret <- rep_len(NA_character_, length(files))
exists_real <- file_exists(files)
exists_fake <- file.exists(files)
ret[exists_real] <- files[exists_real]
i <- exists_fake & !exists_real
fix <- files[i]
fix_full <- normalizePath(fix, "/", mustWork=TRUE)
## Now, split the strings back to this point:
len <- nchar(fix_full)
ret[i] <- substr(fix_full, len - nchar(fix) + 1L, len)
ret
}
mix_cols <- function(cols, col2, p) {
m <- col2rgb(cols)
m2 <- col2rgb(rep(col2, length.out=length(cols)))
m3 <- (m * p + m2 * (1-p))/255
rgb(m3[1, ], m3[2, ], m3[3, ])
}
stop_if_duplicated <- function(x, message) {
if (anyDuplicated(x)) {
stop(message, ": ", paste(unique(duplicated(x)), collapse = ", "),
call. = FALSE)
}
}
|
# Data Type : 저장된 데이터의 성격(numeric, character, logical)
# Data Structure : 변수에 저장된 데이터의 메모리 구조.
# R이 제공하는 자료구조
# 6개 기억하시면 되요!!
# 2개의 분류로 나누어져요!
# 같은 데이터 타입인가 아닌가
# vector : 1차원, 같은 data type
# matrix : 2차원, 같은 data type
# Array : 3차원, 같은 data type
# List : 1차원, 다른 data type
# 중첩 자료구조
# Data Frame : 2차원, 다른 data type
# Factor : 범주형 자료구조
###########################################
# 1. vector
# vector는 scalar의 확장, 1차원 선형구조
# vector는 같은 data type으로 구성되요!
# vector는 첨자형태로 access가 가능( [] )
# 첨자(index)의 시작은 1
# vector를 생성하는 방법
# 1. combine 함수를 사용해서 생성! ( c() )
# 일반적으로 규칙성이 없는 데이터를 이용해서
# vector를 생성할 때 이용
# vector를 이용해서 다른 vector를 만들 수 있어요!
var1 = c(1,2,6,9,10)
var1
mode(var1)
var2 = c(TRUE,FALSE,TRUE) #
var2
var3 = c("홍길동","김길동","최길동")
var3
var4 = c(200,TRUE,"아우성!!")
var4
var5 = c(var1,var2) # 1 2 6 9 10 1 0 1
# vector의 결합
var5
# 2. : 을 이용해서 vector를 생성할 수 있어요!
# numeric에서만 사용가능하고 1씩 증가하거나
# 감소하는 숫자의 집합을 vector로 만들 때 사용
# start:end 형태로 사용되고 둘 다 inclusive
var1 = 1:5; var1
var2 = 5:1; var2
var3 = 3.4:10; var3
# 3. seq()를 이용해서 vector를 생성할 수 있어요!
# : 의 일반형으로 등차수열을 생성해서 vector화
# 시킬 때 사용
var1 = seq(from=1,to=10,by=3)# readability가 좋아요
var1 = seq(1,10,3)
var1
# 4. rep()를 이용해서 vector를 생성할 수 있어요!
# replicate의 약자
# 지정된 숫자만큼 반복해서 vector를 생성
var1 = rep(1:3, 3) # times는 생략이 가능
var1 # 1 2 3 1 2 3 1 2 3
var2 = rep(1:3, each=3)
var2 # 1 1 1 2 2 2 3 3 3
# vector의 데이터 타입을 확인해 보아요!
mode(var1) # numeric
# vector안의 데이터의 개수를 알아내려면??
# length() 함수를 이용.
var1 = c(1:10)
var1
length(var1) # 10
# length를 다른 의미로 사용할 수 있어요!
var1 = seq(1,100, length=7); var1
# vector에서 데이터 추출
# vector의 사용은 []를 이용해서 데이터 추출
var1 = c(67,90,87,50,100)
var1
var1[1] # vector의 제일 처음 원소를 추출
var1[length(var1)] # vector의 제일 마지막 원소를 추출
var1[2:4] # vector를 만들기 위해서 사용한 :, c(), seq(), rep()를 vector 요소를 access하기 위한 용도로 사용할 수 있어요!
var1[c(1,5)]
var1[seq(1,4)]
var1[6] # NA
var1[-1] # "-"는 제외의 의미
var1[-c(1:3)] # 50 100
# vector 데이터의 이름
var1 = c(67,90,50)
var1
names(var1) # vector의 각 데이터에 붙은 이름은 없어요!!
names(var1) = c("국어","영어","수학") # name 설정
var1
var1[2] # index를 이용해서 vector 데이터를 추출
var1["영어"] # name을 통해서 vector 데이터를 추출
# vector의 연산
# 수치형 vector는 scalar를 이용하여 사칙연산을 할 수 있어요! 그리고 vector와 vector간의 연산도 수행할 수 있어요!
var1 <- 1:3 # 1 2 3
var2 <- 4:6 # 4 5 6
var1; var2
var1 * 2 # 2 4 6
var1 + 10 # 11 12 13
var1 + var2 # 5 7 9
var3 = 5:10 # 5 6 7 8 9 10
var1 + var3 # 1 2 3 1 2 3 # recycling rule
# 5 6 7 8 9 10
# 6 8 10 9 11 13
var4 = 5:9 # 5 6 7 8 9
var1 + var4 # 1 2 3 1 2
# 5 6 7 8 9
# 6 8 10 9 11 # 동작은 가능. warning
# vector간의 집합 연산
# union() : 합집합
# intersect() : 교집합
# setdiff() : 차집합
var1 = c(1:5)
var2 = c(3:7)
union(var1,var2)
intersect(var1,var2)
setdiff(var1,var2)
# vector간의 비교( 두 vector가 같은가 다른가 확인)
# identical : 비교하는 두 vector의 요소가 개수, 순서, 내용이 같아야지 TRUE를 return
# setequal : 비교하는 두 vector의 크기, 순서와 상관없이 내용만을 비교
var1 = 1:3; var1
var2 = 1:3
var3 = c(1,3,2,1,1,1,3,3,4)
var2 = c(1,2,3); var2
class(var1)
identical(var1, var2) # 같은 vector이기 때문에 TRUE
identical(var1, var3) # 같은 vector가 아니예요!!
setequal(var1,var2) # vector의 내용이 같아요!!
# 요소가 없는 vector
var1 = vector(mode="character", length=10)
var1
|
/서비스-산업-데이터를-활용한-머신러닝-분석/전반기(문성훈 강사님)/R/06_vector.R
|
no_license
|
FASLADODO/Lecture-Multicampus
|
R
| false | false | 4,764 |
r
|
# Data Type : 저장된 데이터의 성격(numeric, character, logical)
# Data Structure : 변수에 저장된 데이터의 메모리 구조.
# R이 제공하는 자료구조
# 6개 기억하시면 되요!!
# 2개의 분류로 나누어져요!
# 같은 데이터 타입인가 아닌가
# vector : 1차원, 같은 data type
# matrix : 2차원, 같은 data type
# Array : 3차원, 같은 data type
# List : 1차원, 다른 data type
# 중첩 자료구조
# Data Frame : 2차원, 다른 data type
# Factor : 범주형 자료구조
###########################################
# 1. vector
# vector는 scalar의 확장, 1차원 선형구조
# vector는 같은 data type으로 구성되요!
# vector는 첨자형태로 access가 가능( [] )
# 첨자(index)의 시작은 1
# vector를 생성하는 방법
# 1. combine 함수를 사용해서 생성! ( c() )
# 일반적으로 규칙성이 없는 데이터를 이용해서
# vector를 생성할 때 이용
# vector를 이용해서 다른 vector를 만들 수 있어요!
var1 = c(1,2,6,9,10)
var1
mode(var1)
var2 = c(TRUE,FALSE,TRUE) #
var2
var3 = c("홍길동","김길동","최길동")
var3
var4 = c(200,TRUE,"아우성!!")
var4
var5 = c(var1,var2) # 1 2 6 9 10 1 0 1
# vector의 결합
var5
# 2. : 을 이용해서 vector를 생성할 수 있어요!
# numeric에서만 사용가능하고 1씩 증가하거나
# 감소하는 숫자의 집합을 vector로 만들 때 사용
# start:end 형태로 사용되고 둘 다 inclusive
var1 = 1:5; var1
var2 = 5:1; var2
var3 = 3.4:10; var3
# 3. seq()를 이용해서 vector를 생성할 수 있어요!
# : 의 일반형으로 등차수열을 생성해서 vector화
# 시킬 때 사용
var1 = seq(from=1,to=10,by=3)# readability가 좋아요
var1 = seq(1,10,3)
var1
# 4. rep()를 이용해서 vector를 생성할 수 있어요!
# replicate의 약자
# 지정된 숫자만큼 반복해서 vector를 생성
var1 = rep(1:3, 3) # times는 생략이 가능
var1 # 1 2 3 1 2 3 1 2 3
var2 = rep(1:3, each=3)
var2 # 1 1 1 2 2 2 3 3 3
# vector의 데이터 타입을 확인해 보아요!
mode(var1) # numeric
# vector안의 데이터의 개수를 알아내려면??
# length() 함수를 이용.
var1 = c(1:10)
var1
length(var1) # 10
# length를 다른 의미로 사용할 수 있어요!
var1 = seq(1,100, length=7); var1
# vector에서 데이터 추출
# vector의 사용은 []를 이용해서 데이터 추출
var1 = c(67,90,87,50,100)
var1
var1[1] # vector의 제일 처음 원소를 추출
var1[length(var1)] # vector의 제일 마지막 원소를 추출
var1[2:4] # vector를 만들기 위해서 사용한 :, c(), seq(), rep()를 vector 요소를 access하기 위한 용도로 사용할 수 있어요!
var1[c(1,5)]
var1[seq(1,4)]
var1[6] # NA
var1[-1] # "-"는 제외의 의미
var1[-c(1:3)] # 50 100
# vector 데이터의 이름
var1 = c(67,90,50)
var1
names(var1) # vector의 각 데이터에 붙은 이름은 없어요!!
names(var1) = c("국어","영어","수학") # name 설정
var1
var1[2] # index를 이용해서 vector 데이터를 추출
var1["영어"] # name을 통해서 vector 데이터를 추출
# vector의 연산
# 수치형 vector는 scalar를 이용하여 사칙연산을 할 수 있어요! 그리고 vector와 vector간의 연산도 수행할 수 있어요!
var1 <- 1:3 # 1 2 3
var2 <- 4:6 # 4 5 6
var1; var2
var1 * 2 # 2 4 6
var1 + 10 # 11 12 13
var1 + var2 # 5 7 9
var3 = 5:10 # 5 6 7 8 9 10
var1 + var3 # 1 2 3 1 2 3 # recycling rule
# 5 6 7 8 9 10
# 6 8 10 9 11 13
var4 = 5:9 # 5 6 7 8 9
var1 + var4 # 1 2 3 1 2
# 5 6 7 8 9
# 6 8 10 9 11 # 동작은 가능. warning
# vector간의 집합 연산
# union() : 합집합
# intersect() : 교집합
# setdiff() : 차집합
var1 = c(1:5)
var2 = c(3:7)
union(var1,var2)
intersect(var1,var2)
setdiff(var1,var2)
# vector간의 비교( 두 vector가 같은가 다른가 확인)
# identical : 비교하는 두 vector의 요소가 개수, 순서, 내용이 같아야지 TRUE를 return
# setequal : 비교하는 두 vector의 크기, 순서와 상관없이 내용만을 비교
var1 = 1:3; var1
var2 = 1:3
var3 = c(1,3,2,1,1,1,3,3,4)
var2 = c(1,2,3); var2
class(var1)
identical(var1, var2) # 같은 vector이기 때문에 TRUE
identical(var1, var3) # 같은 vector가 아니예요!!
setequal(var1,var2) # vector의 내용이 같아요!!
# 요소가 없는 vector
var1 = vector(mode="character", length=10)
var1
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dplyr_verbs.r
\name{chunk_summarize}
\alias{chunk_summarize}
\alias{chunk_summarise}
\alias{chunk_group_by}
\alias{chunk_ungroup}
\title{Group by within each disk.frame}
\usage{
chunk_summarize(.data, ...)
chunk_summarise(.data, ...)
chunk_group_by(.data, ...)
chunk_ungroup(.data, ...)
}
\arguments{
\item{.data}{a disk.frame}
\item{...}{passed to dplyr::group_by}
}
\description{
The disk.frame group by operation perform group WITHIN each chunk. This is
often used for performance reasons. If the user wishes to perform group-by,
they may choose to use the `hard_group_by` function which is expensive as it
reorganizes the chunks by the shard key.
}
\seealso{
hard_group_by group_by
}
|
/fuzzedpackages/disk.frame/man/chunk_group_by.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false | true | 801 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dplyr_verbs.r
\name{chunk_summarize}
\alias{chunk_summarize}
\alias{chunk_summarise}
\alias{chunk_group_by}
\alias{chunk_ungroup}
\title{Group by within each disk.frame}
\usage{
chunk_summarize(.data, ...)
chunk_summarise(.data, ...)
chunk_group_by(.data, ...)
chunk_ungroup(.data, ...)
}
\arguments{
\item{.data}{a disk.frame}
\item{...}{passed to dplyr::group_by}
}
\description{
The disk.frame group by operation perform group WITHIN each chunk. This is
often used for performance reasons. If the user wishes to perform group-by,
they may choose to use the `hard_group_by` function which is expensive as it
reorganizes the chunks by the shard key.
}
\seealso{
hard_group_by group_by
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/docxtractr-package.r
\docType{package}
\name{docxtractr}
\alias{docxtractr}
\alias{docxtractr-package}
\title{docxtractr is an R package for extracting tables and comments out of Word documents (docx)}
\description{
Microsoft Word docx files provide an XML structure that is fairly
straightforward to navigate, especially when it applies to Word tables. The
docxtractr package provides tools to determine table count + table structure and
extract tables from Microsoft Word docx documents. It also provides tools to determine
comment count and extract comments from Word docx documents.
}
\author{
Bob Rudis (@hrbrmstr)
}
|
/man/docxtractr.Rd
|
no_license
|
bdilday/docxtractr
|
R
| false | true | 700 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/docxtractr-package.r
\docType{package}
\name{docxtractr}
\alias{docxtractr}
\alias{docxtractr-package}
\title{docxtractr is an R package for extracting tables and comments out of Word documents (docx)}
\description{
Microsoft Word docx files provide an XML structure that is fairly
straightforward to navigate, especially when it applies to Word tables. The
docxtractr package provides tools to determine table count + table structure and
extract tables from Microsoft Word docx documents. It also provides tools to determine
comment count and extract comments from Word docx documents.
}
\author{
Bob Rudis (@hrbrmstr)
}
|
\name{estimateDensity2D}
\alias{estimateDensity2D}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
estimateDensity2D
}
\description{
Estimates densities for two-dimensional data with the given estimation type
}
\usage{
estimateDensity2D(X, Y, DensityEstimation = "SDH",
SampleSize, na.rm = FALSE, NoBinsOrPareto = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{X}{
[1:n] numerical vector of first feature
}
\item{Y}{
[1:n] numerical vector of second feature
}
\item{DensityEstimation}{
Either "PDE","SDH" or "kde2d"
}
\item{SampleSize}{
Sample Size in case of big data
}
\item{na.rm}{
Function may not work with non finite values. If these cases should be automatically removed, set parameter TRUE
}
\item{NoBinsOrPareto}{
Density specifc parameters, for PDEscatter(ParetoRadius) or SDH (nbins)) or kde2d(bins)
}
}
\details{
Each two-dimensional data point is defined by its corresponding X and Y value.
}
\value{
List V with
\item{X }{[1:m] numerical vector of first feature, m<=n depending if all values are finite an na.rm parameter}
\item{Y }{[1:m] numerical vector of second feature, m<=n depending if all values are finite an na.rm parameter}
\item{Densities }{the density of each two-dimensional data point}
}
\references{
[Ultsch, 2005] Ultsch, A.: Pareto density estimation: A density estimation for knowledge discovery, In Baier, D. & Werrnecke, K. D. (Eds.), Innovations in classification, data science, and information systems, (Vol. 27, pp. 91-100), Berlin, Germany, Springer, 2005.
[Eilers/Goeman, 2004] Eilers, P. H., & Goeman, J. J.: Enhancing scatterplots with smoothed densities, Bioinformatics, Vol. 20(5), pp. 623-628. 2004
}
\author{
Luca Brinkman and Michael Thrun
}
\examples{
X=runif(100)
Y=rnorm(100)
#V=estimateDensity2D(X,Y)
}
\keyword{estimateDensity2D}
\concept{estimate densities in 2D}
|
/man/estimateDensity2D.Rd
|
no_license
|
Mthrun/DataVisualizations
|
R
| false | false | 1,898 |
rd
|
\name{estimateDensity2D}
\alias{estimateDensity2D}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
estimateDensity2D
}
\description{
Estimates densities for two-dimensional data with the given estimation type
}
\usage{
estimateDensity2D(X, Y, DensityEstimation = "SDH",
SampleSize, na.rm = FALSE, NoBinsOrPareto = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{X}{
[1:n] numerical vector of first feature
}
\item{Y}{
[1:n] numerical vector of second feature
}
\item{DensityEstimation}{
Either "PDE","SDH" or "kde2d"
}
\item{SampleSize}{
Sample Size in case of big data
}
\item{na.rm}{
Function may not work with non finite values. If these cases should be automatically removed, set parameter TRUE
}
\item{NoBinsOrPareto}{
Density specifc parameters, for PDEscatter(ParetoRadius) or SDH (nbins)) or kde2d(bins)
}
}
\details{
Each two-dimensional data point is defined by its corresponding X and Y value.
}
\value{
List V with
\item{X }{[1:m] numerical vector of first feature, m<=n depending if all values are finite an na.rm parameter}
\item{Y }{[1:m] numerical vector of second feature, m<=n depending if all values are finite an na.rm parameter}
\item{Densities }{the density of each two-dimensional data point}
}
\references{
[Ultsch, 2005] Ultsch, A.: Pareto density estimation: A density estimation for knowledge discovery, In Baier, D. & Werrnecke, K. D. (Eds.), Innovations in classification, data science, and information systems, (Vol. 27, pp. 91-100), Berlin, Germany, Springer, 2005.
[Eilers/Goeman, 2004] Eilers, P. H., & Goeman, J. J.: Enhancing scatterplots with smoothed densities, Bioinformatics, Vol. 20(5), pp. 623-628. 2004
}
\author{
Luca Brinkman and Michael Thrun
}
\examples{
X=runif(100)
Y=rnorm(100)
#V=estimateDensity2D(X,Y)
}
\keyword{estimateDensity2D}
\concept{estimate densities in 2D}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Individual.R
\name{individual}
\alias{individual}
\title{Return individual average raw data and plotted waveforms (optional) for all loaded conditions}
\usage{
individual(data, electrodes, plots = "n")
}
\arguments{
\item{data}{A data frame in the format returned from \code{\link{load.data}}}
\item{electrodes}{A single value or concatenation of several values (to be averaged)
indicating which electrodes to include in generating the plot. At this time, if the
raw data files imported using \code{\link{load.data}}) do not have a header, you
must include a capital "V" in front of the number and enclose each electrode in quotes.
(For example, electrodes = "V78", or electrodes = c("V78", "V76").)}
\item{plots}{Creates plots of individual averaged data in separate windows. By default,
plots are suppressed, but can be activated by setting \code{plots} to "y".}
}
\value{
Data frame of individual average data for each subject in each condition.
If \code{plot = "y"}, then multiple plots (1 per subject) will also be generated.
}
\description{
\code{individual} plots individual, averaged waveforms for each condition present in the
data frame you provide. Separate plots for each individual can be generated, if specified.
}
\details{
\code{individual} will generate individual average data and separate plots of
averaged waveforms (optional) for each subject in the data frame you provide. Raw data are
organized in columns by subject and condition. Plots will be generated by setting
\code{plots = "y"}.
Single electrodes can be passed to the package functions,
or several electrodes can be provided (i.e., when using dense arrays) and those electrodes
will be averaged together as a single electrode.
}
\examples{
# Return data frame of individual average data and create average waveform
# plots for each subject
individual(ERPdata, electrodes = "V78", plots="y")
}
\author{
Travis Moore
}
|
/man/individual.Rd
|
no_license
|
mooretm/erp.easy
|
R
| false | true | 1,998 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Individual.R
\name{individual}
\alias{individual}
\title{Return individual average raw data and plotted waveforms (optional) for all loaded conditions}
\usage{
individual(data, electrodes, plots = "n")
}
\arguments{
\item{data}{A data frame in the format returned from \code{\link{load.data}}}
\item{electrodes}{A single value or concatenation of several values (to be averaged)
indicating which electrodes to include in generating the plot. At this time, if the
raw data files imported using \code{\link{load.data}}) do not have a header, you
must include a capital "V" in front of the number and enclose each electrode in quotes.
(For example, electrodes = "V78", or electrodes = c("V78", "V76").)}
\item{plots}{Creates plots of individual averaged data in separate windows. By default,
plots are suppressed, but can be activated by setting \code{plots} to "y".}
}
\value{
Data frame of individual average data for each subject in each condition.
If \code{plot = "y"}, then multiple plots (1 per subject) will also be generated.
}
\description{
\code{individual} plots individual, averaged waveforms for each condition present in the
data frame you provide. Separate plots for each individual can be generated, if specified.
}
\details{
\code{individual} will generate individual average data and separate plots of
averaged waveforms (optional) for each subject in the data frame you provide. Raw data are
organized in columns by subject and condition. Plots will be generated by setting
\code{plots = "y"}.
Single electrodes can be passed to the package functions,
or several electrodes can be provided (i.e., when using dense arrays) and those electrodes
will be averaged together as a single electrode.
}
\examples{
# Return data frame of individual average data and create average waveform
# plots for each subject
individual(ERPdata, electrodes = "V78", plots="y")
}
\author{
Travis Moore
}
|
#' Function to add names to network for the user.
#'@param grn a GRN object from KBoost.
#'@param gen_names a vector with the gene names.
#'@export
#'@return grn a GRN object with elements with user-defined gene names.
#'@examples
#' data(D4_multi_1)
#' Net = kboost(D4_multi_1)
#' g_names = matrix("G",100,1)
#' for (i in seq_along(g_names)){
#' g_names[i] = paste(g_names[i],toString(i), sep = "")
#' }
#' Net = add_names(Net,g_names)
#'
add_names <- function(grn,gen_names){
# Add the gene names to the processed network.
rownames(grn$GRN) <- gen_names
colnames(grn$GRN) <- gen_names[grn$TFs]
# Add the gene names to the un-processed network.
rownames(grn$GRN_UP) <- gen_names
colnames(grn$GRN_UP) <- gen_names[grn$TFs]
# Add the gene names to the Prior.
rownames(grn$prior) <- gen_names
colnames(grn$prior) <- gen_names[grn$TFs]
return(grn)
}
|
/R/add_names.R
|
no_license
|
ghimirey/KBoost
|
R
| false | false | 945 |
r
|
#' Function to add names to network for the user.
#'@param grn a GRN object from KBoost.
#'@param gen_names a vector with the gene names.
#'@export
#'@return grn a GRN object with elements with user-defined gene names.
#'@examples
#' data(D4_multi_1)
#' Net = kboost(D4_multi_1)
#' g_names = matrix("G",100,1)
#' for (i in seq_along(g_names)){
#' g_names[i] = paste(g_names[i],toString(i), sep = "")
#' }
#' Net = add_names(Net,g_names)
#'
add_names <- function(grn,gen_names){
# Add the gene names to the processed network.
rownames(grn$GRN) <- gen_names
colnames(grn$GRN) <- gen_names[grn$TFs]
# Add the gene names to the un-processed network.
rownames(grn$GRN_UP) <- gen_names
colnames(grn$GRN_UP) <- gen_names[grn$TFs]
# Add the gene names to the Prior.
rownames(grn$prior) <- gen_names
colnames(grn$prior) <- gen_names[grn$TFs]
return(grn)
}
|
#helper function for word_list/trans.cloud
freqTab2words <-
function(word.list){
if(is.data.frame(word.list)){
rep(word.list[, 1], word.list[, 2])
} else {
lapply(word.list, function(x) rep(x[, 1], x[, 2]))
}
}
|
/R/freqTab2words.R
|
no_license
|
abresler/qdap
|
R
| false | false | 239 |
r
|
#helper function for word_list/trans.cloud
freqTab2words <-
function(word.list){
if(is.data.frame(word.list)){
rep(word.list[, 1], word.list[, 2])
} else {
lapply(word.list, function(x) rep(x[, 1], x[, 2]))
}
}
|
performance_rating_UI <- function(id) {
#user interface section
ns <- NS(id)
tagList(
sidebarLayout(
absolutePanel(id = "controls", class = "panel panel-default", fixed = TRUE,
draggable = TRUE, top = 250, left = "auto", right = 17, bottom = "auto",
width = 330, height = "auto",
tags$div(id = 'demo5', class="collapse",
helpText(strong("Application explainer text")),
hr(),
hr()
)
),
mainPanel(
column(width = 12,
column(width = 3,
gradientBox(
title = strong("Individual Information"),status="info",
solidHeader = T,closable = F,width = 12, icon = "fa fa-user",
shinyjs::useShinyjs(),
shinyjs::inlineCSS(appCSS),
textInput(ns("name_u"), "Project Name"),
textInput(ns("company_name"), "Company Name"),
textInput(ns("fund_name"), "Fund Name"),
textInput(ns("subject_name"), labelMandatory("Subject Name")),
selectInput(ns("gender"), labelMandatory("Subject's Gender"),
c("Male","Female")),
# textInput(ns("title"), "Title"),
textInput(ns("name_usr"), labelMandatory("First Name")),
textInput(ns("name_sur"), "Last Name"),
textInput(ns("email"), "Email Address")
)),
column(width = 3,
gradientBox(
title = strong("Archetypes"),status="info",
solidHeader = T,closable = F,width = 12, icon = "fa fa-question-circle",
selectInput(ns("hi_archetypes"), "Hi Archetypes",
c("Sovereign", "Lover", "Magician", "Warrior")),
selectInput(ns("lo_archetypes"), "Lo Archetypes",
c("Sovereign", "Lover", "Magician", "Warrior")),
selectInput(ns("theme"), "Theme/Pattern",
c("Idealizing", "Affecting", "Fostering", "Releasing",
"Yielding", "Relying", "Detaching", "Optionizing",
"Alerting","Establishing","Contesting","Upholding")),
selectInput(ns("opp_type"), "Opposing Type",
c("Fostering over Alerting", "Affecting over Yielding",
"Detaching over Releasing", "Contesting over Optionizing",
"Upholding over Relying", "Idealizing over Establishing",
"Yielding over Affecting", "Alerting over Fostering","Releasing over Detaching",
"Optionizing over Contestin","Relying over Upholding",
"Establishing over Idealizing"))
)),
column(width = 3,
gradientBox(
title = strong("Kolbe A Index"),status="info",
solidHeader = T,closable = F,width = 12, icon = "fa fa-question-circle",
textInput(ns("kolbe_score"), ("Kolbe Score")),
selectInput(ns("fact_finder"), "Fact Finder",
c("Simplify", "Explain", "Specify")),
selectInput(ns("follow_thru"), "Follow Thru",
c("Adapt", "Maintain", "Systematize")),
selectInput(ns("quick_start"), "Quick Start",
c("Stabilize", "Modify", "Innovate")),
selectInput(ns("implementor"), "Implementor",
c("Envision", "Restore", "Demonstrate"))
)),
column(width = 3,
gradientBox(
title = strong("Kantor Baseline Instrument"),status="info",
solidHeader = T,closable = F,width = 12, icon = "fa fa-question-circle",
selectInput(ns("action_mode"), "Action Mode",
c("Move", "Follow", "Bystand", "Oppose")),
selectInput(ns("operating_system"), "Operating System",
c("Open", "Closed", "Random")),
selectInput(ns("com_domain"), "Communication Domain",
c("Power", "Affect", "Meaning")),
#input executioner controls
radioButtons(ns('format'), 'Document format', c('HTML', 'Word','PDF'),
inline = TRUE),
downloadButton(ns("downloadReport"), "Download Report" ,
class = "btn btn-success ")
)))
,width = 12),
position = c("right"))
)
}
performance_rating <- function(input, output, session, pool) {
#server code
#User inputs
name_u = reactive({ input$name_u })
company_name = reactive({ input$company_name })
fund_name = reactive({ input$fund_name })
subject_name = reactive({ input$subject_name })
gender = reactive({ input$gender })
# title = reactive({ input$title })
name_usr = reactive({ input$name_usr })
name_sur = reactive({ input$name_sur })
email = reactive({ input$email })
#reactive input controls [Archetypes]
hi_archetypes = reactive({ input$hi_archetypes })
lo_archetypes = reactive({ input$lo_archetypes })
#theme/pattern duplication
theme = reactive({ input$theme })
#opposing types
opp_type = reactive({ input$opp_type })
#kolbe section
kolbe_score = reactive({ input$kolbe_score })
fact_finder = reactive({ input$fact_finder })
follow_thru = reactive({ input$follow_thru })
quick_start = reactive({ input$quick_start })
implementor = reactive({ input$implementor })
#kanto section
action_mode = reactive({ input$action_mode })
operating_system = reactive({ input$operating_system })
com_domain = reactive({ input$com_domain })
#Archetype Hi-Lo
text_1 = reactive({
#hi_archetype relation check
if(hi_archetypes()=="Sovereign"){
connect_value_hi = "Visionary leadership"
}else if(hi_archetypes()=="Lover"){
connect_value_hi = "Relational leadership"
}else if(hi_archetypes()=="Warrior"){
connect_value_hi = "Perfomance leadership"
}else if(hi_archetypes()=="Magician"){
connect_value_hi = "Perfomance leadership"
}
#lo_archetype relation check
if(lo_archetypes()=="Sovereign"){
connect_value_lo = "Visionary leadership"
}else if(lo_archetypes()=="Lover"){
connect_value_lo = "Relational leadership"
}else if(lo_archetypes()=="Warrior"){
connect_value_lo = "Perfomance leadership"
}else if(lo_archetypes()=="Magician"){
connect_value_lo = "Perfomance leadership"
}
#gender pronouns
print(paste(name_usr(),"'s dominant archetype results indicate strength in",connect_value_hi, ", high in '",hi_archetypes(),
"' energy and low in", connect_value_lo,", or ",lo_archetypes()," energy, known as ",theme(),"."
)
)
})
text_1_a =reactive({
if(theme()=="Idealizing"){
theme_out = paste("Idealizing leaders are full of confidence that the right answer
always emerges and things will work out for the best. This type sometimes misses
the need to acknowledge the pain or struggle that those around them may be experiencing
")
}else if(theme()=="Affecting"){
theme_out = paste("An Affecting leader manifests a 'fake it till they make it' chutzpah— working
hard to impress others with their competencies, affiliations and possessions
")
}else if(theme()=="Fostering"){
theme_out = paste("Fostering leaders are natural coaches and mentors—taking people under
their wing, helping them to grow and improve. They may have difficulty holding
their mentees accountable or defining appropriate boundaries
")
}else if(theme()=="Realeasing"){
theme_out = paste("Releasing leaders have an intuitive connection to their senses and feelings.
Their body is an instrument of both sensing and expression, giving them a rich sensory
life filled with gusto, intuition, and emotion. Under pressure, their sensitivity can feel overwhelming
")
}else if(theme()=="Relying"){
theme_out = paste("Relying leaders recognize the value of vulnerability in building and
maintaining relationships. This type understands the value of receiving support from others,
and is open and eager to let strong bonds develop between themselves and their friends and
colleagues. Given their natural openness, a lack of attention to boundaries can
foster over-dependency in some relationships
")
}else if(theme()=="Upholding"){
theme_out = paste("Upholding leaders are compelled to correct anything around them that is
bent or broken. They will fight hard in pursuit of excellence and can benefit by
building relationships with other competent souls they can trust- finding a
place to let their guard down and just relax
")
}else if(theme()=="Establishing"){
theme_out = paste("Establishing leaders have their own distinctive brand and style,
their modus operandi, and cares little for what the rest of the pack might be doing.
They can create friction in their relationships with their willingness to go their own way
")
}else if(theme()=="Contesting"){
theme_out = paste("Contesting leaders can dominate the space with their willingness to take
the leadership role and their ability to drive their point home. This type feels their
own place by pushing against boundaries and resistance. Those with this pattern thrive in
an environment when they can assert themselves cleanly in a way that doesn't cause
collateral damage inside or outside the team
")
}else if(theme()=="Yielding"){
theme_out = paste("Yielding leaders feel a common bond with those who endure the struggle
of their lives. They may be pilgrims on a spiritual path, understanding that there
are important lessons in the trials and tribulations we endure. Yielding helps
them avoid buckling under the extreme pressures they have experienced or witnessed in life
")
}else if(theme()=="Detaching"){
theme_out = paste("Detaching leaders get on the balcony, leave the crowd, and watch from afar.
This tendency to disengage can give them perspective. The challenge for this type
is accepting the invitation to join the fray and engage with their team
")
}else if(theme()=="Alerting"){
theme_out = paste("Alerting leaders are the watchdog that sounds the alarm when things could
go wrong. This type is two steps ahead, vigilantly protecting their own and the
collective well-being. They are willing to be unpopular if it means protecting what
they care about. Their innate risk-management voice should not be confused with negativity
")
}else if(theme()=="Optionizing"){
theme_out = paste( "Optionizing leaders see all the options laid out before them.
This type avoids hurting themselves or others by making decisions too quickly or
leaping to conclusions. Instead, they juggle the possibilities, watching and
waiting for the right one to show itself. They will resist decisions they
feel are being made to quickly
")
}
print(paste(theme_out,"."))
})
text_1_b =reactive({
#gender pronouns
if(gender()=="Male"){
pronoun_1 = "he"
}else{
pronoun_1 = "she"
}
if(gender()=="Male"){
pronoun_2 = "his"
}else{
pronoun_2 = "her"
}
if(gender()=="Male"){
pronoun_3 = "himself"
}else{
pronoun_3 = "herself"
}
if(theme()=="Idealizing"){
theme_out_1 = paste(pronoun_1,"is motivated by Connection and Theory. This means ",pronoun_1,"connects
the potential in a situation or a period of change with their ability
to hold and cultivate relationships— surrounding themselves with allies and colleagues
to navigate challenges.",pronoun_1,"thrives in learning environments that use
information and knowledge to build a strong theory about why things are happening as they are")
}else if(theme()=="Affecting"){
theme_out_1 = paste(pronoun_1," is motivated by Quest and Theory. This means",pronoun_1," is motivated
to manifest a vision through action in the world.", pronoun_1 ,"can
feel ",pronoun_2," progress is hindered by too much focus on risk, relationships,
or process.",pronoun_1,"thrives in learning environments that use information and
knowledge to build a strong theory about why things are happening as they are. ",pronoun_1,"
would much prefer to research the failures of others than waste time on a trial-and-error approach ")
}else if(theme()=="Fostering"){
theme_out_1 = paste(pronoun_1,"is motivated by Connection and Quest. This means ",pronoun_1,"connects
the potential in a situation or a period of change with their ability
to hold and cultivate relationships— surrounding themselves with allies and colleagues
to navigate challenges.", pronoun_1,"is motivated to manifest a vision through action in the world.
",pronoun_1," can feel ",pronoun_2,"progress is hindered by too much focus on risk, relationships, or process")
}else if(theme()=="Realeasing"){
theme_out_1 = paste(pronoun_1,"is motivated by Experience and Connection.",pronoun_1,"likes to
learn through exploration and active engagement, preferring to pronoun_1 right
into testing and discovery, even if that means risking some mistakes and
failures. ",pronoun_1,"connects the potential in a situation or a period of
change with",pronoun_2,"ability to hold and cultivate relationships— surrounding
",pronoun_3,"with allies and colleagues to navigate challenges")
}else if(theme()=="Relying"){
theme_out_1 = paste(pronoun_1,"is motivated by Security and Connection. This means",pronoun_1," is
aware of", pronoun_2," vulnerabilities and does ",pronoun_2," best work in
environments that feel safe.", pronoun_1,"connects the potential in a situation
or a period of change with ",pronoun_2," ability to hold and cultivate
relationships— surrounding ",pronoun_3,"with allies and colleagues to navigate challenges")
}else if(theme()=="Upholding"){
theme_out_1 = paste(pronoun_1," is motivated by Independence and Quest.",pronoun_1," thrives
when given the space to work in their own way and in their
own space.",pronoun_1," may find group processes, group decisions
and group environments overwhelming when",pronoun_1," is working out
complex problems.",pronoun_1," is motivated to manifest a vision through
action in the world. ",pronoun_1," can feel ",pronoun_2," progress is hindered by
too much focus on risk, relationships, or process")
}else if(theme()=="Establishing"){
theme_out_1 = paste(pronoun_1," is motivated by Experience and Independence. ",pronoun_1," likes
to learn through exploration and active engagement, preferring to dive
right into testing and discovery, even if that means risking some mistakes
and failures. ",pronoun_1,"thrives when given the space to work in ",pronoun_2," own
way and in ",pronoun_2," own space.", pronoun_1," may find group processes,
group decisions and group environments overwhelming when",pronoun1," is
working out complex problems")
}else if(theme()=="Contesting"){
theme_out_1 = paste(pronoun_1,"is motivated by Experience and Quest. ",pronoun_1," likes to learn
through exploration and active engagement, preferring to dive right into testing and
discovery, even if that means risking some mistakes and failures.", pronoun_1,"
is motivated to manifest a vision through action in the world. ", pronoun_1," can feel",pronoun_2,"progress
is hindered by too much focus on risk, relationships, or process
")
}else if(theme()=="Yielding"){
theme_out_1 = paste(pronoun_1," is motivated by Experience and Security. ",pronoun_1," likes to learn
through exploration and active engagement, preferring to dive right into testing and
discovery, even if that means risking some mistakes and failures. ",pronoun_1," is aware
",pronoun_2," vulnerabilities and does", pronoun_2," best work in environments that feel safe")
}else if(theme()=="Detaching"){
theme_out_1 = paste("
",pronoun_1," is motivated by Theory and Independence. ",pronoun_1," thrives
in learning environments that use information and knowledge to build a strong
theory about why things are happening as they are. ",pronoun_1," thrives when
given the space to work in", pronoun_2," own way and in ",pronoun_2," own space.
",pronoun_1,"may find group processes, group decisions and group environments
overwhelming when", pronoun_1," is working out complex problems")
}else if(theme()=="Alerting"){
theme_out_1 = paste(pronoun_1," is motivated by Security and Independence. This means ",pronoun_1," is aware
of ",pronoun_2,"vulnerabilities and does", pronoun_2," best work in environments that
feel safe.", Pronoun1," thrives when given the space to work in ",pronoun_2," own way
and in", pronoun_2," own space.",pronoun_1," may find group processes, group decisions
and group environments overwhelming when ",pronoun_1," is working out complex problems")
}else if(theme()=="Optionizing"){
theme_out_1 = paste(pronoun_1," is motivated by Security and Theory. This means", pronoun_1,"is aware
of", pronoun_2," vulnerabilities and does ",pronoun_2, "best work in environments that feel safe.
",pronoun_1," thrives in learning environments that use information and knowledge to build a
strong theory about why things are happening as they are")
}
print(paste(theme_out_1,"."))
})
#Archetype Theme/Pattern
text_2 = reactive({
if(theme()=="Idealizing"){
theme_out = paste(strong("Idealizing"),": Idealizers are full of confidence that the right
answer always emerges and things will work out for the best.
This type sometimes misses the need to acknowledge the pain or struggle
that those around them may be experiencing.")
}else if(theme()=="Affecting"){
theme_out = "They manifest a 'fake it till theymake it' chutzpah— working hard to
impress others with their competencies, affiliations and possessions."
}else if(theme()=="Fostering"){
theme_out = "They are natural coaches and mentors—taking people under their wing,
helping them to grow and improve. They may have difficulty holding their mentees
accountable or defining appropriate boundaries."
}else if(theme()=="Releasing"){
theme_out = "They have an intuitive connection to their senses and feelings.
Their body is an instrument of both sensing and expression, giving them a
rich sensory life filled with gusto, intuition and emotion. Under pressure,
their sensitivity can feel overwhelming."
}else if(theme()=="Yielding"){
theme_out = "They feel a common bond with those who endure the struggle of their lives.
They may be pilgrims on a spiritual path, understanding that there are important
lessons in the trials and tribulations we endure. Yielding helps them avoid
buckling under the extreme pressures they have experienced or witnessed in life."
}else if(theme()=="Detaching"){
theme_out = "They get on the balcony, leave the crowd and watch from afar.
This tendency to disengage can give them perspective. The challenge for
this type is accepting the invitation to join the fray and engage with their team."
}else if(theme()=="Optionizing"){
theme_out = "They see all the options laid out before them. This type avoids
hurting themselves or others by making decisions too quickly or leaping to
conclusions. Instead, they juggle the possibilities, watching and waiting for
the right one to show itself. They will resist decisions they feel are
being made to quickly."
}else if(theme()=="Alerting"){
theme_out = "TThey are the watchdog that sounds the alarm when things could go wrong.
This type is two steps ahead, vigilantly protecting their own and the collective well-being.
They are willing to be unpopular if it means protecting what they care about.
Their innate risk-management voice should not be confused with negativity."
}else if(theme()=="Establishing"){
theme_out = "This type has their own distinctive brand and style, their
modus operandi, and cares little for what the rest of the pack might be doing.
They can create friction in their relationships with their willingness to go their own way."
}else if(theme()=="Contesting"){
theme_out = "This type can dominate the space with their willingness to take the
leadership role and their ability to drive their point home. This type feels their
own place by pushing against boundaries and resistance. Those with this pattern thrive in
an environment when they can assert themselves cleanly in a way that doesn't
cause collateral damage inside or outside the team."
}else if(theme()=="Upholding"){
theme_out = "This type is compelled to correct anything around them that is bent
or broken. They will fight hard in pursuit of excellence and can benefit by
building relationships with other competent souls they can trust-
finding a place to let their guard down and just relax."
}
print(paste(theme_out))
})
#Archetype Pattern [Get clarity]
text_4 = reactive({
#gender pronouns
if(gender()=="Male"){
pronoun_1 = "he"
}else{
pronoun_1 = "she"
}
if(gender()=="Male"){
pronoun_2 = "his"
}else{
pronoun_2 = "her"
}
if(gender()=="Male"){
pronoun_3 = "himself"
}else{
pronoun_3 = "herself"
}
if(opp_type()=="Fostering over Alerting"){
opp_out = paste("Under pressure,",pronoun_1,"will default to ",strong("Fostering over Alerting"),".
This means they find the best in people, and seek to support others,
showing a willingness for self-sacrifice to benefit the whole.
A way of describing this pattern is 'support without caution'.")
}else if(opp_type()=="Affecting over Yielding"){
opp_out = paste("Under pressure", pronoun_1, "will default to",strong("Affecting over Yielding"),". This means they rise
above the perceived bounds defined by their past conditions. Humility is secondary
to aspiration for them, and rather than worrying about feeling like an outsider, t
hey have chosen to carve their own path.")
}else if(opp_type()=="Detaching over Releasing"){
opp_out = paste("Under pressure", pronoun_1, "will default to",strong("Detaching over Releasing"),". This means they show
a fierce independence and step back to get away from emotions and drama. This can allow them
to see the system. They are content to hold these observations privately and have no need
to prove themselves to others. ")
}else if(opp_type()=="Contesting over Optionizing"){
opp_out = paste("Under pressure", pronoun_1, "will default to",strong("Contesting over Optionizing"),". This means they
take decisive action, and can feel uncomfortable with those who hesitate in the face of
tough choices or get lost in the weeds of pros and cons. Thinking too much can be harmful, and so
they discern a path forward for themselves and others when leadership is needed.")
}else if(opp_type()=="Upholding over Relying"){
opp_out = paste("Under pressure", pronoun_1,"will default to ",strong("Upholding over Relying"),". This means they
correct the mistakes and hold a level of excellence based on doing what's right, for its
own sake. If they want the job done right, they do it themselves. ")
}else if(opp_type()=="Idealizing over Establishing"){
opp_out = paste("Under pressure", pronoun_1,"will default to",strong("Idealizing over Establishing"),". This means
they manifest a strong sense of inspiration that can be contagious–an ability to hold a
big vision and see the potential. These big ideas and charisma can lead them to pursue
dreams that are not achievable or grounded.")
}else if(opp_type()=="Yielding over Affecting"){
opp_out = paste("Under pressure", pronoun_1, "will default to", strong("Yielding over Affecting"),". This means
they find solidarity with those who keep their heads down and their nose to the grindstone.
They feel it is not up to them to change the system or save the world and can see too much ambition as dangerous.")
}else if(opp_type()=="Alerting over Fostering"){
opp_out = paste("Under pressure", pronoun_1, "will default to", strong("Alerting over Fostering"),". This
means they manifest a watchful independence. They prefer to keep their position secure,
keeping a watchful eye on the darker side of human nature, understanding that even good
people can sometimes take advantage of others and be hypocritical. ")
}else if(opp_type()=="Releasing over Detaching"){
opp_out = paste("Under pressure", pronoun_1, "will default to",strong( "Releasing over Detaching"),". This means
they dive passionately into intuition and emotional intensity, feeling all the highs, the lows,
and the emotional doldrums and deriving vitality and interpersonal connection through it all.")
}else if(opp_type()=="Optionizing over Contesting"){
opp_out = paste("Under pressure", pronoun_1,"will default to",strong("Optionizing over Contesting"),". This
means they weigh all the options and hold off on hasty decisions. They have an ability to
step back and really think through the many costs and benefits of a particular line of
action or menu of choices. ")
}else if(opp_type()=="Relying over Upholding"){
opp_out = paste("Under pressure", pronoun_1,"will default to",strong("Relying over Upholding"),". This means
they build strong connections with people that are important to them. They maintain these
connections with those they depend on, and can create a strong web of support, creating
safety for themselves, and for others by being unafraid to ask for what they need. It can be
difficult for them to navigate when they feel their support network shaken or lose
those they depend on.")
}else if(opp_type()=="Establishing over Idealizing"){
opp_out = paste("Under pressure", pronoun_1,"will default to,",strong("Establishing over Idealizing"),". This means
they distinguish themselves through hard work and a unique path - reinforcing their sense of
inner strength. In the face of adversity or pressure to blend in, they chose the difficult
path of breaking out on their own and defining their own way")
}
print(paste(opp_out))
})
text_5 = reactive({
print(paste(name_usr(),"’s action instincts are ",kolbe_score(),"."))
})
text_6 = reactive({
#gender pronouns
if(gender()=="Male"){
pronoun_1 = "he"
}else{
pronoun_1 = "she"
}
if(gender()=="Male"){
pronoun_2 = "his"
}else{
pronoun_2 = "her"
}
if(gender()=="Male"){
pronoun_3 = "himself"
}else{
pronoun_3 = "herself"
}
if(fact_finder()=="Simplify"){
fact_out = paste("Specifically, this means",pronoun_1 ,"gathers and shares information by Simplifying. This means",pronoun_1, " will summarize and help get to the point, cut through the red tape, and offer bottom-line options.")
}else if(fact_finder()=="Explain"){
fact_out = paste("Specifically, this means",pronoun_1," gathers and shares information by Explaining; ",pronoun_1," works within priorities, tests analogies, and starts with the highest probability. ")
}else if(fact_finder()=="Specify"){
fact_out = paste("Specifically, this means ",pronoun_1, "will gather and share information by Specifying. This includes ranking and quantifying, defining objectives, and developing complex strategies. ")
}
print(paste(fact_out))
})
text_7 = reactive({
#gender pronouns
if(gender()=="Male"){
pronoun_1 = "he"
}else{
pronoun_1 = "she"
}
if(gender()=="Male"){
pronoun_2 = "his"
}else{
pronoun_2 = "her"
}
if(gender()=="Male"){
pronoun_3 = "himself"
}else{
pronoun_3 = "herself"
}
if(follow_thru()=="Adapt"){
follow_out = paste(pronoun_1 ,"organizes by", strong("Adapting;"), pronoun_1," switches task frequently, naturally
multitasks and will thrive on interruptions.")
}else if(follow_thru()=="Maintain"){
follow_out = paste(pronoun_1," organizes through",strong("Maintaining;")," packaging things together that fit,
adjusting procedures, coordinating schedules, and drafting guidelines.")
}else if(follow_thru()=="Systematize"){
follow_out = paste(pronoun_1," will organize by",strong("Systematizing;"), "this means", pronoun_1," will create the plan,
coordinate needs, and graph the logistics.")
}
print(paste(follow_out))
})
text_8 = reactive({
#gender pronouns
if(gender()=="Male"){
pronoun_1 = "he"
}else{
pronoun_1 = "she"
}
if(gender()=="Male"){
pronoun_2 = "his"
}else{
pronoun_2 = "her"
}
if(gender()=="Male"){
pronoun_3 = "himself"
}else{
pronoun_3 = "herself"
}
if(quick_start()=="Stabilize"){
quick_out = paste(pronoun_1,"will deal with risk and uncertainty by ",strong("Stabilizing;")," this means
",pronoun_1," will protect the status quo, clarify deadlines, and minimize risk factors.")
}else if(quick_start()=="Modify"){
quick_out = paste(pronoun_1," handles risks and uncertainty by",strong("Modifying;")," trying new ideas,
sustaining innovations and adjusting deadlines as needed.")
}else if(quick_start()=="Innovate"){
quick_out = paste(pronoun_1," deals with risks and uncertainty by", strong("Innovating;")," creating a sense
of urgency, initiating change, and defying the odds.")
}
print(paste(quick_out))
})
text_9 = reactive({
if(implementor()=="Envision"){
implementor_out = paste("Finally,",pronoun_1," will handle space and tangibles by ",strong("Envisioning;")," this
means ",pronoun_1," will create virtual presentations, sketch ideas and capture the essence of things.")
}else if(implementor()=="Restore"){
implementor_out = paste(pronoun_2," best method for tackling space and intangibles is to
",strong("Restoring;")," testing ingredients, fixing moving parts and removing both real and imagined obstacles.")
}else if(implementor()=="Demonstrate"){
implementor_out = paste("Finally, ",pronoun_1," handles space and tangibles by",strong("Demonstrating;")," building
prototypes and scalable solution, testing functionality and troubleshooting
malfunctions by designing and fabricating new parts.")
}
print(paste(implementor_out))
})
text_10 = reactive({
if(action_mode()=="Move"){
action_out = "A Move initiates. Movers are often the first to suggest a direction
or to introduce a new idea or concept. Moves start the action."
}else if(action_mode()=="Follow"){
action_out = "A Follow supports. Followers get behind others’ ideas and take
the necessary action to carry the idea forward to completion. Follows finish the action."
}else if(action_mode()=="Bystand"){
action_out = "A Bystand bridges. Bystanders observe, add neutral perspective,
and integrate seemingly disparate ideas. Bystands connect the elements of the action."
}else if(action_mode()=="Oppose"){
action_out = "An Oppose challenges. Opposers push back on ideas, providing alternatives,
and helping to shore up weaknesses. Opposes correct the action."
}
print(paste(action_out))
})
text_11 = reactive({
if(operating_system()=="Open"){
operating_out = "Individuals who have the Open propensity emphasize process, participation,
and teamwork. They look for ways to include others and place a high value on consensus.
They believe everyone has a contribution to make."
}else if(operating_system()=="Closed"){
operating_out = "Individuals with the Closed propensity emphasize structure and planning and
are more likely to value both tradition and hierarchy. They provide others with
clarity about individual roles and responsibilities and expect everyone to know and follow the rules."
}else if(operating_system()=="Random"){
operating_out = "Individuals demonstrating the Random propensity operate with no rules and few boundaries.
They tend to emphasize creative expression, autonomy and individuality.
They rarely consider imposing a system on others - they merely
want the freedom to operate in their own unique way."
}
print(paste(operating_out))
})
text_12 = reactive({
if(com_domain()=="Power"){
com_out = "The language of Power is about accountability, competence and completion.
Individuals with a Power Propensity tend to speak about specific goals
and are highly conscious of time. They enjoy crossing items from their
list and moving projects to closure."
}else if(com_domain()=="Affect"){
com_out = "The language of Affect is about connection between people, particularly emotional.
Individuals with an Affect Propensity take special note of others’ well-being and
how they are reacting to what is being said. They emphasize trust and
motivation and try to provide a climate of warmth and caring."
}else if(com_domain()=="Meaning"){
com_out = "The language of Meaning is about thinking, logic and a sense of purpose.
Individuals with a Meaning Propensity are concerned with 'what we stand for'
and with deeply understanding how things work. They love exploring ideas and concentrating
on the theoretical underpinnings of a discussion."
}
print(paste(com_out))
})
#download button with format as html (Other formats commented out)
output$downloadReport <- downloadHandler(
filename = function() {
paste('my-report', sep = '.', switch(
input$format, PDF = 'pdf', HTML = 'html', Word = 'docx'
))
},
content = function(file) {
src <- normalizePath('report/report.Rmd')
owd <- setwd(tempdir())
on.exit(setwd(owd))
file.copy(src, 'report.Rmd', overwrite = TRUE)
out <- rmarkdown::render('report.Rmd',
params = list(text = input$text),
switch(input$format,
PDF = pdf_document(),
HTML = html_document(),
Word = word_document()
))
file.rename(out, file)
}
)
}
|
/modules/performance_rating.R
|
no_license
|
Nybre/fiverr_proj_6
|
R
| false | false | 38,017 |
r
|
performance_rating_UI <- function(id) {
#user interface section
ns <- NS(id)
tagList(
sidebarLayout(
absolutePanel(id = "controls", class = "panel panel-default", fixed = TRUE,
draggable = TRUE, top = 250, left = "auto", right = 17, bottom = "auto",
width = 330, height = "auto",
tags$div(id = 'demo5', class="collapse",
helpText(strong("Application explainer text")),
hr(),
hr()
)
),
mainPanel(
column(width = 12,
column(width = 3,
gradientBox(
title = strong("Individual Information"),status="info",
solidHeader = T,closable = F,width = 12, icon = "fa fa-user",
shinyjs::useShinyjs(),
shinyjs::inlineCSS(appCSS),
textInput(ns("name_u"), "Project Name"),
textInput(ns("company_name"), "Company Name"),
textInput(ns("fund_name"), "Fund Name"),
textInput(ns("subject_name"), labelMandatory("Subject Name")),
selectInput(ns("gender"), labelMandatory("Subject's Gender"),
c("Male","Female")),
# textInput(ns("title"), "Title"),
textInput(ns("name_usr"), labelMandatory("First Name")),
textInput(ns("name_sur"), "Last Name"),
textInput(ns("email"), "Email Address")
)),
column(width = 3,
gradientBox(
title = strong("Archetypes"),status="info",
solidHeader = T,closable = F,width = 12, icon = "fa fa-question-circle",
selectInput(ns("hi_archetypes"), "Hi Archetypes",
c("Sovereign", "Lover", "Magician", "Warrior")),
selectInput(ns("lo_archetypes"), "Lo Archetypes",
c("Sovereign", "Lover", "Magician", "Warrior")),
selectInput(ns("theme"), "Theme/Pattern",
c("Idealizing", "Affecting", "Fostering", "Releasing",
"Yielding", "Relying", "Detaching", "Optionizing",
"Alerting","Establishing","Contesting","Upholding")),
selectInput(ns("opp_type"), "Opposing Type",
c("Fostering over Alerting", "Affecting over Yielding",
"Detaching over Releasing", "Contesting over Optionizing",
"Upholding over Relying", "Idealizing over Establishing",
"Yielding over Affecting", "Alerting over Fostering","Releasing over Detaching",
"Optionizing over Contestin","Relying over Upholding",
"Establishing over Idealizing"))
)),
column(width = 3,
gradientBox(
title = strong("Kolbe A Index"),status="info",
solidHeader = T,closable = F,width = 12, icon = "fa fa-question-circle",
textInput(ns("kolbe_score"), ("Kolbe Score")),
selectInput(ns("fact_finder"), "Fact Finder",
c("Simplify", "Explain", "Specify")),
selectInput(ns("follow_thru"), "Follow Thru",
c("Adapt", "Maintain", "Systematize")),
selectInput(ns("quick_start"), "Quick Start",
c("Stabilize", "Modify", "Innovate")),
selectInput(ns("implementor"), "Implementor",
c("Envision", "Restore", "Demonstrate"))
)),
column(width = 3,
gradientBox(
title = strong("Kantor Baseline Instrument"),status="info",
solidHeader = T,closable = F,width = 12, icon = "fa fa-question-circle",
selectInput(ns("action_mode"), "Action Mode",
c("Move", "Follow", "Bystand", "Oppose")),
selectInput(ns("operating_system"), "Operating System",
c("Open", "Closed", "Random")),
selectInput(ns("com_domain"), "Communication Domain",
c("Power", "Affect", "Meaning")),
#input executioner controls
radioButtons(ns('format'), 'Document format', c('HTML', 'Word','PDF'),
inline = TRUE),
downloadButton(ns("downloadReport"), "Download Report" ,
class = "btn btn-success ")
)))
,width = 12),
position = c("right"))
)
}
performance_rating <- function(input, output, session, pool) {
#server code
#User inputs
name_u = reactive({ input$name_u })
company_name = reactive({ input$company_name })
fund_name = reactive({ input$fund_name })
subject_name = reactive({ input$subject_name })
gender = reactive({ input$gender })
# title = reactive({ input$title })
name_usr = reactive({ input$name_usr })
name_sur = reactive({ input$name_sur })
email = reactive({ input$email })
#reactive input controls [Archetypes]
hi_archetypes = reactive({ input$hi_archetypes })
lo_archetypes = reactive({ input$lo_archetypes })
#theme/pattern duplication
theme = reactive({ input$theme })
#opposing types
opp_type = reactive({ input$opp_type })
#kolbe section
kolbe_score = reactive({ input$kolbe_score })
fact_finder = reactive({ input$fact_finder })
follow_thru = reactive({ input$follow_thru })
quick_start = reactive({ input$quick_start })
implementor = reactive({ input$implementor })
#kanto section
action_mode = reactive({ input$action_mode })
operating_system = reactive({ input$operating_system })
com_domain = reactive({ input$com_domain })
#Archetype Hi-Lo
text_1 = reactive({
#hi_archetype relation check
if(hi_archetypes()=="Sovereign"){
connect_value_hi = "Visionary leadership"
}else if(hi_archetypes()=="Lover"){
connect_value_hi = "Relational leadership"
}else if(hi_archetypes()=="Warrior"){
connect_value_hi = "Perfomance leadership"
}else if(hi_archetypes()=="Magician"){
connect_value_hi = "Perfomance leadership"
}
#lo_archetype relation check
if(lo_archetypes()=="Sovereign"){
connect_value_lo = "Visionary leadership"
}else if(lo_archetypes()=="Lover"){
connect_value_lo = "Relational leadership"
}else if(lo_archetypes()=="Warrior"){
connect_value_lo = "Perfomance leadership"
}else if(lo_archetypes()=="Magician"){
connect_value_lo = "Perfomance leadership"
}
#gender pronouns
print(paste(name_usr(),"'s dominant archetype results indicate strength in",connect_value_hi, ", high in '",hi_archetypes(),
"' energy and low in", connect_value_lo,", or ",lo_archetypes()," energy, known as ",theme(),"."
)
)
})
text_1_a =reactive({
if(theme()=="Idealizing"){
theme_out = paste("Idealizing leaders are full of confidence that the right answer
always emerges and things will work out for the best. This type sometimes misses
the need to acknowledge the pain or struggle that those around them may be experiencing
")
}else if(theme()=="Affecting"){
theme_out = paste("An Affecting leader manifests a 'fake it till they make it' chutzpah— working
hard to impress others with their competencies, affiliations and possessions
")
}else if(theme()=="Fostering"){
theme_out = paste("Fostering leaders are natural coaches and mentors—taking people under
their wing, helping them to grow and improve. They may have difficulty holding
their mentees accountable or defining appropriate boundaries
")
}else if(theme()=="Realeasing"){
theme_out = paste("Releasing leaders have an intuitive connection to their senses and feelings.
Their body is an instrument of both sensing and expression, giving them a rich sensory
life filled with gusto, intuition, and emotion. Under pressure, their sensitivity can feel overwhelming
")
}else if(theme()=="Relying"){
theme_out = paste("Relying leaders recognize the value of vulnerability in building and
maintaining relationships. This type understands the value of receiving support from others,
and is open and eager to let strong bonds develop between themselves and their friends and
colleagues. Given their natural openness, a lack of attention to boundaries can
foster over-dependency in some relationships
")
}else if(theme()=="Upholding"){
theme_out = paste("Upholding leaders are compelled to correct anything around them that is
bent or broken. They will fight hard in pursuit of excellence and can benefit by
building relationships with other competent souls they can trust- finding a
place to let their guard down and just relax
")
}else if(theme()=="Establishing"){
theme_out = paste("Establishing leaders have their own distinctive brand and style,
their modus operandi, and cares little for what the rest of the pack might be doing.
They can create friction in their relationships with their willingness to go their own way
")
}else if(theme()=="Contesting"){
theme_out = paste("Contesting leaders can dominate the space with their willingness to take
the leadership role and their ability to drive their point home. This type feels their
own place by pushing against boundaries and resistance. Those with this pattern thrive in
an environment when they can assert themselves cleanly in a way that doesn't cause
collateral damage inside or outside the team
")
}else if(theme()=="Yielding"){
theme_out = paste("Yielding leaders feel a common bond with those who endure the struggle
of their lives. They may be pilgrims on a spiritual path, understanding that there
are important lessons in the trials and tribulations we endure. Yielding helps
them avoid buckling under the extreme pressures they have experienced or witnessed in life
")
}else if(theme()=="Detaching"){
theme_out = paste("Detaching leaders get on the balcony, leave the crowd, and watch from afar.
This tendency to disengage can give them perspective. The challenge for this type
is accepting the invitation to join the fray and engage with their team
")
}else if(theme()=="Alerting"){
theme_out = paste("Alerting leaders are the watchdog that sounds the alarm when things could
go wrong. This type is two steps ahead, vigilantly protecting their own and the
collective well-being. They are willing to be unpopular if it means protecting what
they care about. Their innate risk-management voice should not be confused with negativity
")
}else if(theme()=="Optionizing"){
theme_out = paste( "Optionizing leaders see all the options laid out before them.
This type avoids hurting themselves or others by making decisions too quickly or
leaping to conclusions. Instead, they juggle the possibilities, watching and
waiting for the right one to show itself. They will resist decisions they
feel are being made to quickly
")
}
print(paste(theme_out,"."))
})
text_1_b =reactive({
#gender pronouns
if(gender()=="Male"){
pronoun_1 = "he"
}else{
pronoun_1 = "she"
}
if(gender()=="Male"){
pronoun_2 = "his"
}else{
pronoun_2 = "her"
}
if(gender()=="Male"){
pronoun_3 = "himself"
}else{
pronoun_3 = "herself"
}
if(theme()=="Idealizing"){
theme_out_1 = paste(pronoun_1,"is motivated by Connection and Theory. This means ",pronoun_1,"connects
the potential in a situation or a period of change with their ability
to hold and cultivate relationships— surrounding themselves with allies and colleagues
to navigate challenges.",pronoun_1,"thrives in learning environments that use
information and knowledge to build a strong theory about why things are happening as they are")
}else if(theme()=="Affecting"){
theme_out_1 = paste(pronoun_1," is motivated by Quest and Theory. This means",pronoun_1," is motivated
to manifest a vision through action in the world.", pronoun_1 ,"can
feel ",pronoun_2," progress is hindered by too much focus on risk, relationships,
or process.",pronoun_1,"thrives in learning environments that use information and
knowledge to build a strong theory about why things are happening as they are. ",pronoun_1,"
would much prefer to research the failures of others than waste time on a trial-and-error approach ")
}else if(theme()=="Fostering"){
theme_out_1 = paste(pronoun_1,"is motivated by Connection and Quest. This means ",pronoun_1,"connects
the potential in a situation or a period of change with their ability
to hold and cultivate relationships— surrounding themselves with allies and colleagues
to navigate challenges.", pronoun_1,"is motivated to manifest a vision through action in the world.
",pronoun_1," can feel ",pronoun_2,"progress is hindered by too much focus on risk, relationships, or process")
}else if(theme()=="Realeasing"){
theme_out_1 = paste(pronoun_1,"is motivated by Experience and Connection.",pronoun_1,"likes to
learn through exploration and active engagement, preferring to pronoun_1 right
into testing and discovery, even if that means risking some mistakes and
failures. ",pronoun_1,"connects the potential in a situation or a period of
change with",pronoun_2,"ability to hold and cultivate relationships— surrounding
",pronoun_3,"with allies and colleagues to navigate challenges")
}else if(theme()=="Relying"){
theme_out_1 = paste(pronoun_1,"is motivated by Security and Connection. This means",pronoun_1," is
aware of", pronoun_2," vulnerabilities and does ",pronoun_2," best work in
environments that feel safe.", pronoun_1,"connects the potential in a situation
or a period of change with ",pronoun_2," ability to hold and cultivate
relationships— surrounding ",pronoun_3,"with allies and colleagues to navigate challenges")
}else if(theme()=="Upholding"){
theme_out_1 = paste(pronoun_1," is motivated by Independence and Quest.",pronoun_1," thrives
when given the space to work in their own way and in their
own space.",pronoun_1," may find group processes, group decisions
and group environments overwhelming when",pronoun_1," is working out
complex problems.",pronoun_1," is motivated to manifest a vision through
action in the world. ",pronoun_1," can feel ",pronoun_2," progress is hindered by
too much focus on risk, relationships, or process")
}else if(theme()=="Establishing"){
theme_out_1 = paste(pronoun_1," is motivated by Experience and Independence. ",pronoun_1," likes
to learn through exploration and active engagement, preferring to dive
right into testing and discovery, even if that means risking some mistakes
and failures. ",pronoun_1,"thrives when given the space to work in ",pronoun_2," own
way and in ",pronoun_2," own space.", pronoun_1," may find group processes,
group decisions and group environments overwhelming when",pronoun1," is
working out complex problems")
}else if(theme()=="Contesting"){
theme_out_1 = paste(pronoun_1,"is motivated by Experience and Quest. ",pronoun_1," likes to learn
through exploration and active engagement, preferring to dive right into testing and
discovery, even if that means risking some mistakes and failures.", pronoun_1,"
is motivated to manifest a vision through action in the world. ", pronoun_1," can feel",pronoun_2,"progress
is hindered by too much focus on risk, relationships, or process
")
}else if(theme()=="Yielding"){
theme_out_1 = paste(pronoun_1," is motivated by Experience and Security. ",pronoun_1," likes to learn
through exploration and active engagement, preferring to dive right into testing and
discovery, even if that means risking some mistakes and failures. ",pronoun_1," is aware
",pronoun_2," vulnerabilities and does", pronoun_2," best work in environments that feel safe")
}else if(theme()=="Detaching"){
theme_out_1 = paste("
",pronoun_1," is motivated by Theory and Independence. ",pronoun_1," thrives
in learning environments that use information and knowledge to build a strong
theory about why things are happening as they are. ",pronoun_1," thrives when
given the space to work in", pronoun_2," own way and in ",pronoun_2," own space.
",pronoun_1,"may find group processes, group decisions and group environments
overwhelming when", pronoun_1," is working out complex problems")
}else if(theme()=="Alerting"){
theme_out_1 = paste(pronoun_1," is motivated by Security and Independence. This means ",pronoun_1," is aware
of ",pronoun_2,"vulnerabilities and does", pronoun_2," best work in environments that
feel safe.", Pronoun1," thrives when given the space to work in ",pronoun_2," own way
and in", pronoun_2," own space.",pronoun_1," may find group processes, group decisions
and group environments overwhelming when ",pronoun_1," is working out complex problems")
}else if(theme()=="Optionizing"){
theme_out_1 = paste(pronoun_1," is motivated by Security and Theory. This means", pronoun_1,"is aware
of", pronoun_2," vulnerabilities and does ",pronoun_2, "best work in environments that feel safe.
",pronoun_1," thrives in learning environments that use information and knowledge to build a
strong theory about why things are happening as they are")
}
print(paste(theme_out_1,"."))
})
#Archetype Theme/Pattern
text_2 = reactive({
if(theme()=="Idealizing"){
theme_out = paste(strong("Idealizing"),": Idealizers are full of confidence that the right
answer always emerges and things will work out for the best.
This type sometimes misses the need to acknowledge the pain or struggle
that those around them may be experiencing.")
}else if(theme()=="Affecting"){
theme_out = "They manifest a 'fake it till theymake it' chutzpah— working hard to
impress others with their competencies, affiliations and possessions."
}else if(theme()=="Fostering"){
theme_out = "They are natural coaches and mentors—taking people under their wing,
helping them to grow and improve. They may have difficulty holding their mentees
accountable or defining appropriate boundaries."
}else if(theme()=="Releasing"){
theme_out = "They have an intuitive connection to their senses and feelings.
Their body is an instrument of both sensing and expression, giving them a
rich sensory life filled with gusto, intuition and emotion. Under pressure,
their sensitivity can feel overwhelming."
}else if(theme()=="Yielding"){
theme_out = "They feel a common bond with those who endure the struggle of their lives.
They may be pilgrims on a spiritual path, understanding that there are important
lessons in the trials and tribulations we endure. Yielding helps them avoid
buckling under the extreme pressures they have experienced or witnessed in life."
}else if(theme()=="Detaching"){
theme_out = "They get on the balcony, leave the crowd and watch from afar.
This tendency to disengage can give them perspective. The challenge for
this type is accepting the invitation to join the fray and engage with their team."
}else if(theme()=="Optionizing"){
theme_out = "They see all the options laid out before them. This type avoids
hurting themselves or others by making decisions too quickly or leaping to
conclusions. Instead, they juggle the possibilities, watching and waiting for
the right one to show itself. They will resist decisions they feel are
being made to quickly."
}else if(theme()=="Alerting"){
theme_out = "TThey are the watchdog that sounds the alarm when things could go wrong.
This type is two steps ahead, vigilantly protecting their own and the collective well-being.
They are willing to be unpopular if it means protecting what they care about.
Their innate risk-management voice should not be confused with negativity."
}else if(theme()=="Establishing"){
theme_out = "This type has their own distinctive brand and style, their
modus operandi, and cares little for what the rest of the pack might be doing.
They can create friction in their relationships with their willingness to go their own way."
}else if(theme()=="Contesting"){
theme_out = "This type can dominate the space with their willingness to take the
leadership role and their ability to drive their point home. This type feels their
own place by pushing against boundaries and resistance. Those with this pattern thrive in
an environment when they can assert themselves cleanly in a way that doesn't
cause collateral damage inside or outside the team."
}else if(theme()=="Upholding"){
theme_out = "This type is compelled to correct anything around them that is bent
or broken. They will fight hard in pursuit of excellence and can benefit by
building relationships with other competent souls they can trust-
finding a place to let their guard down and just relax."
}
print(paste(theme_out))
})
#Archetype Pattern [Get clarity]
text_4 = reactive({
#gender pronouns
if(gender()=="Male"){
pronoun_1 = "he"
}else{
pronoun_1 = "she"
}
if(gender()=="Male"){
pronoun_2 = "his"
}else{
pronoun_2 = "her"
}
if(gender()=="Male"){
pronoun_3 = "himself"
}else{
pronoun_3 = "herself"
}
if(opp_type()=="Fostering over Alerting"){
opp_out = paste("Under pressure,",pronoun_1,"will default to ",strong("Fostering over Alerting"),".
This means they find the best in people, and seek to support others,
showing a willingness for self-sacrifice to benefit the whole.
A way of describing this pattern is 'support without caution'.")
}else if(opp_type()=="Affecting over Yielding"){
opp_out = paste("Under pressure", pronoun_1, "will default to",strong("Affecting over Yielding"),". This means they rise
above the perceived bounds defined by their past conditions. Humility is secondary
to aspiration for them, and rather than worrying about feeling like an outsider, t
hey have chosen to carve their own path.")
}else if(opp_type()=="Detaching over Releasing"){
opp_out = paste("Under pressure", pronoun_1, "will default to",strong("Detaching over Releasing"),". This means they show
a fierce independence and step back to get away from emotions and drama. This can allow them
to see the system. They are content to hold these observations privately and have no need
to prove themselves to others. ")
}else if(opp_type()=="Contesting over Optionizing"){
opp_out = paste("Under pressure", pronoun_1, "will default to",strong("Contesting over Optionizing"),". This means they
take decisive action, and can feel uncomfortable with those who hesitate in the face of
tough choices or get lost in the weeds of pros and cons. Thinking too much can be harmful, and so
they discern a path forward for themselves and others when leadership is needed.")
}else if(opp_type()=="Upholding over Relying"){
opp_out = paste("Under pressure", pronoun_1,"will default to ",strong("Upholding over Relying"),". This means they
correct the mistakes and hold a level of excellence based on doing what's right, for its
own sake. If they want the job done right, they do it themselves. ")
}else if(opp_type()=="Idealizing over Establishing"){
opp_out = paste("Under pressure", pronoun_1,"will default to",strong("Idealizing over Establishing"),". This means
they manifest a strong sense of inspiration that can be contagious–an ability to hold a
big vision and see the potential. These big ideas and charisma can lead them to pursue
dreams that are not achievable or grounded.")
}else if(opp_type()=="Yielding over Affecting"){
opp_out = paste("Under pressure", pronoun_1, "will default to", strong("Yielding over Affecting"),". This means
they find solidarity with those who keep their heads down and their nose to the grindstone.
They feel it is not up to them to change the system or save the world and can see too much ambition as dangerous.")
}else if(opp_type()=="Alerting over Fostering"){
opp_out = paste("Under pressure", pronoun_1, "will default to", strong("Alerting over Fostering"),". This
means they manifest a watchful independence. They prefer to keep their position secure,
keeping a watchful eye on the darker side of human nature, understanding that even good
people can sometimes take advantage of others and be hypocritical. ")
}else if(opp_type()=="Releasing over Detaching"){
opp_out = paste("Under pressure", pronoun_1, "will default to",strong( "Releasing over Detaching"),". This means
they dive passionately into intuition and emotional intensity, feeling all the highs, the lows,
and the emotional doldrums and deriving vitality and interpersonal connection through it all.")
}else if(opp_type()=="Optionizing over Contesting"){
opp_out = paste("Under pressure", pronoun_1,"will default to",strong("Optionizing over Contesting"),". This
means they weigh all the options and hold off on hasty decisions. They have an ability to
step back and really think through the many costs and benefits of a particular line of
action or menu of choices. ")
}else if(opp_type()=="Relying over Upholding"){
opp_out = paste("Under pressure", pronoun_1,"will default to",strong("Relying over Upholding"),". This means
they build strong connections with people that are important to them. They maintain these
connections with those they depend on, and can create a strong web of support, creating
safety for themselves, and for others by being unafraid to ask for what they need. It can be
difficult for them to navigate when they feel their support network shaken or lose
those they depend on.")
}else if(opp_type()=="Establishing over Idealizing"){
opp_out = paste("Under pressure", pronoun_1,"will default to,",strong("Establishing over Idealizing"),". This means
they distinguish themselves through hard work and a unique path - reinforcing their sense of
inner strength. In the face of adversity or pressure to blend in, they chose the difficult
path of breaking out on their own and defining their own way")
}
print(paste(opp_out))
})
text_5 = reactive({
print(paste(name_usr(),"’s action instincts are ",kolbe_score(),"."))
})
text_6 = reactive({
#gender pronouns
if(gender()=="Male"){
pronoun_1 = "he"
}else{
pronoun_1 = "she"
}
if(gender()=="Male"){
pronoun_2 = "his"
}else{
pronoun_2 = "her"
}
if(gender()=="Male"){
pronoun_3 = "himself"
}else{
pronoun_3 = "herself"
}
if(fact_finder()=="Simplify"){
fact_out = paste("Specifically, this means",pronoun_1 ,"gathers and shares information by Simplifying. This means",pronoun_1, " will summarize and help get to the point, cut through the red tape, and offer bottom-line options.")
}else if(fact_finder()=="Explain"){
fact_out = paste("Specifically, this means",pronoun_1," gathers and shares information by Explaining; ",pronoun_1," works within priorities, tests analogies, and starts with the highest probability. ")
}else if(fact_finder()=="Specify"){
fact_out = paste("Specifically, this means ",pronoun_1, "will gather and share information by Specifying. This includes ranking and quantifying, defining objectives, and developing complex strategies. ")
}
print(paste(fact_out))
})
text_7 = reactive({
#gender pronouns
if(gender()=="Male"){
pronoun_1 = "he"
}else{
pronoun_1 = "she"
}
if(gender()=="Male"){
pronoun_2 = "his"
}else{
pronoun_2 = "her"
}
if(gender()=="Male"){
pronoun_3 = "himself"
}else{
pronoun_3 = "herself"
}
if(follow_thru()=="Adapt"){
follow_out = paste(pronoun_1 ,"organizes by", strong("Adapting;"), pronoun_1," switches task frequently, naturally
multitasks and will thrive on interruptions.")
}else if(follow_thru()=="Maintain"){
follow_out = paste(pronoun_1," organizes through",strong("Maintaining;")," packaging things together that fit,
adjusting procedures, coordinating schedules, and drafting guidelines.")
}else if(follow_thru()=="Systematize"){
follow_out = paste(pronoun_1," will organize by",strong("Systematizing;"), "this means", pronoun_1," will create the plan,
coordinate needs, and graph the logistics.")
}
print(paste(follow_out))
})
text_8 = reactive({
#gender pronouns
if(gender()=="Male"){
pronoun_1 = "he"
}else{
pronoun_1 = "she"
}
if(gender()=="Male"){
pronoun_2 = "his"
}else{
pronoun_2 = "her"
}
if(gender()=="Male"){
pronoun_3 = "himself"
}else{
pronoun_3 = "herself"
}
if(quick_start()=="Stabilize"){
quick_out = paste(pronoun_1,"will deal with risk and uncertainty by ",strong("Stabilizing;")," this means
",pronoun_1," will protect the status quo, clarify deadlines, and minimize risk factors.")
}else if(quick_start()=="Modify"){
quick_out = paste(pronoun_1," handles risks and uncertainty by",strong("Modifying;")," trying new ideas,
sustaining innovations and adjusting deadlines as needed.")
}else if(quick_start()=="Innovate"){
quick_out = paste(pronoun_1," deals with risks and uncertainty by", strong("Innovating;")," creating a sense
of urgency, initiating change, and defying the odds.")
}
print(paste(quick_out))
})
text_9 = reactive({
if(implementor()=="Envision"){
implementor_out = paste("Finally,",pronoun_1," will handle space and tangibles by ",strong("Envisioning;")," this
means ",pronoun_1," will create virtual presentations, sketch ideas and capture the essence of things.")
}else if(implementor()=="Restore"){
implementor_out = paste(pronoun_2," best method for tackling space and intangibles is to
",strong("Restoring;")," testing ingredients, fixing moving parts and removing both real and imagined obstacles.")
}else if(implementor()=="Demonstrate"){
implementor_out = paste("Finally, ",pronoun_1," handles space and tangibles by",strong("Demonstrating;")," building
prototypes and scalable solution, testing functionality and troubleshooting
malfunctions by designing and fabricating new parts.")
}
print(paste(implementor_out))
})
text_10 = reactive({
if(action_mode()=="Move"){
action_out = "A Move initiates. Movers are often the first to suggest a direction
or to introduce a new idea or concept. Moves start the action."
}else if(action_mode()=="Follow"){
action_out = "A Follow supports. Followers get behind others’ ideas and take
the necessary action to carry the idea forward to completion. Follows finish the action."
}else if(action_mode()=="Bystand"){
action_out = "A Bystand bridges. Bystanders observe, add neutral perspective,
and integrate seemingly disparate ideas. Bystands connect the elements of the action."
}else if(action_mode()=="Oppose"){
action_out = "An Oppose challenges. Opposers push back on ideas, providing alternatives,
and helping to shore up weaknesses. Opposes correct the action."
}
print(paste(action_out))
})
text_11 = reactive({
if(operating_system()=="Open"){
operating_out = "Individuals who have the Open propensity emphasize process, participation,
and teamwork. They look for ways to include others and place a high value on consensus.
They believe everyone has a contribution to make."
}else if(operating_system()=="Closed"){
operating_out = "Individuals with the Closed propensity emphasize structure and planning and
are more likely to value both tradition and hierarchy. They provide others with
clarity about individual roles and responsibilities and expect everyone to know and follow the rules."
}else if(operating_system()=="Random"){
operating_out = "Individuals demonstrating the Random propensity operate with no rules and few boundaries.
They tend to emphasize creative expression, autonomy and individuality.
They rarely consider imposing a system on others - they merely
want the freedom to operate in their own unique way."
}
print(paste(operating_out))
})
text_12 = reactive({
if(com_domain()=="Power"){
com_out = "The language of Power is about accountability, competence and completion.
Individuals with a Power Propensity tend to speak about specific goals
and are highly conscious of time. They enjoy crossing items from their
list and moving projects to closure."
}else if(com_domain()=="Affect"){
com_out = "The language of Affect is about connection between people, particularly emotional.
Individuals with an Affect Propensity take special note of others’ well-being and
how they are reacting to what is being said. They emphasize trust and
motivation and try to provide a climate of warmth and caring."
}else if(com_domain()=="Meaning"){
com_out = "The language of Meaning is about thinking, logic and a sense of purpose.
Individuals with a Meaning Propensity are concerned with 'what we stand for'
and with deeply understanding how things work. They love exploring ideas and concentrating
on the theoretical underpinnings of a discussion."
}
print(paste(com_out))
})
#download button with format as html (Other formats commented out)
output$downloadReport <- downloadHandler(
filename = function() {
paste('my-report', sep = '.', switch(
input$format, PDF = 'pdf', HTML = 'html', Word = 'docx'
))
},
content = function(file) {
src <- normalizePath('report/report.Rmd')
owd <- setwd(tempdir())
on.exit(setwd(owd))
file.copy(src, 'report.Rmd', overwrite = TRUE)
out <- rmarkdown::render('report.Rmd',
params = list(text = input$text),
switch(input$format,
PDF = pdf_document(),
HTML = html_document(),
Word = word_document()
))
file.rename(out, file)
}
)
}
|
#relativt mått på kullar
library(dplyr)
library(tidyr)
library(readxl)
library(writexl)
library(lubridate)
kullar <- read_xlsx(path = "Lyor, kullar, gps-punkter, yta och avstånd/ALLA VALPLYOR HELAGS KORREKT 2000-2018.xlsx")
kullar<- as.data.frame(kullar)
head(kullar)
tot_kullar <- kullar %>%
group_by(Namn) %>%
count(År) %>%
summarise(kullar_totalt = sum(n))
View(tot_kullar)
inventeringar2000_2010<- read_xlsx(path = "Våraktivitet fjällräv/BEBODDA_LYOR_HEF 00_10.xlsx")
head(inventeringar2000_2010)
inventeringar2000_2010 <-as.data.frame(inventeringar2000_2010)
lyor_alla <- read_xlsx(path = "Lyor, kullar, gps-punkter, yta och avstånd/Lyor helags alla.xlsx")
lyor_alla<- as.data.frame(lyor_alla)
head(lyor_alla)
lyor_alla<-lyor_alla %>%
select("Namn")
lyor_alla <- cbind(lyor_alla,`antal kullar` = 0 )
#' Räknar ihop antal somrar som varje lya inventerats
#' mellan 2000 och 2010
sommar_inv<- inventeringar2000_2010 %>%
group_by(DenCode) %>%
filter(`INV. SUM` %in% "Y") %>%
count(`INV. SUM`) %>%
summarise(inv_totalt = sum(n))
View(sommar_inv)
colnames(sommar_inv) <- c("Namn", "inv_totalt")
#Läser in inventeringar 2015-2018 rovbasen
inventeringar2015_2018<- read_xlsx(path = "Lyor, kullar, gps-punkter, yta och avstånd/Lyinventeringar 2015_2018.xlsx")
class(inventeringar2015_2018$Kontrolldato) #datumen blev inlästa som datum
# Plockar ut sommarinventeringarna. 6,7,8 står för juni, juli augusti
sommar_inv_15_18 <- inventeringar2015_2018[month(inventeringar2015_2018$Kontrolldato) %in% c(6,7,8), ]
head(sommar_inv_15_18)
View(sommar_inv_15_18)
#' Tar bort kolumner jag inte behöver. Tar även
#' bort datumkolumnen. Då har jag flera obsar som är dubletter. Då kan
#' jag använda "distinct" för att ta bort dubletterna så att jag får en
#' inventering per sommar.
sommar_inv_15_18 <- sommar_inv_15_18 %>%
select(-c(X__1, Kontrolldato)) %>%
distinct()
View(sommar_inv_15_18)
#' Räknar ihop antal somrar som varje lya inventerats
#' mellan 2015 och 2018
sommar_inv_15_18<- sommar_inv_15_18 %>%
group_by(Namn) %>%
count(År) %>%
summarise(inv_totalt2 = sum(n))
View(sommar_inv_15_18)
inventeringar_alla <- sommar_inv_15_18 %>%
left_join(sommar_inv, by = "Namn")
#ändrar NA till 0 i den kortare kolumnen
inventeringar_alla <- inventeringar_alla %>%
mutate_all(funs(replace(., is.na(.),0)))
View(inventeringar_alla)
inventeringar_alla<-inventeringar_alla %>%
mutate(inventeringar = inv_totalt2 + inv_totalt) %>%
select(-c(inv_totalt2, inv_totalt))
lyor_antal_kullar$inventeringar[79] <- 1
lyor_antal_kullar<- lyor_alla %>%
left_join(tot_kullar, by = "Namn") %>%
left_join(inventeringar_alla, by = "Namn")
lyor_antal_kullar
#ersättar NA med 0
lyor_antal_kullar <- lyor_antal_kullar %>%
mutate_all(funs(replace(., is.na(.),0)))
lyor_antal_kullar$inventeringar[79] <- 1 #den här raden hade en kull men 0 inventeringar.
lyor_antal_kullar<- lyor_antal_kullar %>%
mutate(relativa_kullar = kullar_totalt/inventeringar)
View(lyor_antal_kullar)
lyor_antal_kullar <- lyor_antal_kullar %>%
mutate_all(funs(replace(., is.na(.),0)))
#'den här filen stämmer inte med avseende på relativa kullar.
#'Det saknas nog antal inventeringar. zz108 har 1, vilket är bäst relativa kullar.
#'Den har bara 1 inventering och 1 kull inlagd, alltså 100% hitrate.
write_xlsx(lyor_antal_kullar, path = "Den and territory selection/Rawdata/antal kullar per lya 2000_2018.xlsx")
|
/Relativt mått på kullar.R
|
no_license
|
torhanssonfrank/Den-selection
|
R
| false | false | 3,478 |
r
|
#relativt mått på kullar
library(dplyr)
library(tidyr)
library(readxl)
library(writexl)
library(lubridate)
kullar <- read_xlsx(path = "Lyor, kullar, gps-punkter, yta och avstånd/ALLA VALPLYOR HELAGS KORREKT 2000-2018.xlsx")
kullar<- as.data.frame(kullar)
head(kullar)
tot_kullar <- kullar %>%
group_by(Namn) %>%
count(År) %>%
summarise(kullar_totalt = sum(n))
View(tot_kullar)
inventeringar2000_2010<- read_xlsx(path = "Våraktivitet fjällräv/BEBODDA_LYOR_HEF 00_10.xlsx")
head(inventeringar2000_2010)
inventeringar2000_2010 <-as.data.frame(inventeringar2000_2010)
lyor_alla <- read_xlsx(path = "Lyor, kullar, gps-punkter, yta och avstånd/Lyor helags alla.xlsx")
lyor_alla<- as.data.frame(lyor_alla)
head(lyor_alla)
lyor_alla<-lyor_alla %>%
select("Namn")
lyor_alla <- cbind(lyor_alla,`antal kullar` = 0 )
#' Räknar ihop antal somrar som varje lya inventerats
#' mellan 2000 och 2010
sommar_inv<- inventeringar2000_2010 %>%
group_by(DenCode) %>%
filter(`INV. SUM` %in% "Y") %>%
count(`INV. SUM`) %>%
summarise(inv_totalt = sum(n))
View(sommar_inv)
colnames(sommar_inv) <- c("Namn", "inv_totalt")
#Läser in inventeringar 2015-2018 rovbasen
inventeringar2015_2018<- read_xlsx(path = "Lyor, kullar, gps-punkter, yta och avstånd/Lyinventeringar 2015_2018.xlsx")
class(inventeringar2015_2018$Kontrolldato) #datumen blev inlästa som datum
# Plockar ut sommarinventeringarna. 6,7,8 står för juni, juli augusti
sommar_inv_15_18 <- inventeringar2015_2018[month(inventeringar2015_2018$Kontrolldato) %in% c(6,7,8), ]
head(sommar_inv_15_18)
View(sommar_inv_15_18)
#' Tar bort kolumner jag inte behöver. Tar även
#' bort datumkolumnen. Då har jag flera obsar som är dubletter. Då kan
#' jag använda "distinct" för att ta bort dubletterna så att jag får en
#' inventering per sommar.
sommar_inv_15_18 <- sommar_inv_15_18 %>%
select(-c(X__1, Kontrolldato)) %>%
distinct()
View(sommar_inv_15_18)
#' Räknar ihop antal somrar som varje lya inventerats
#' mellan 2015 och 2018
sommar_inv_15_18<- sommar_inv_15_18 %>%
group_by(Namn) %>%
count(År) %>%
summarise(inv_totalt2 = sum(n))
View(sommar_inv_15_18)
inventeringar_alla <- sommar_inv_15_18 %>%
left_join(sommar_inv, by = "Namn")
#ändrar NA till 0 i den kortare kolumnen
inventeringar_alla <- inventeringar_alla %>%
mutate_all(funs(replace(., is.na(.),0)))
View(inventeringar_alla)
inventeringar_alla<-inventeringar_alla %>%
mutate(inventeringar = inv_totalt2 + inv_totalt) %>%
select(-c(inv_totalt2, inv_totalt))
lyor_antal_kullar$inventeringar[79] <- 1
lyor_antal_kullar<- lyor_alla %>%
left_join(tot_kullar, by = "Namn") %>%
left_join(inventeringar_alla, by = "Namn")
lyor_antal_kullar
#ersättar NA med 0
lyor_antal_kullar <- lyor_antal_kullar %>%
mutate_all(funs(replace(., is.na(.),0)))
lyor_antal_kullar$inventeringar[79] <- 1 #den här raden hade en kull men 0 inventeringar.
lyor_antal_kullar<- lyor_antal_kullar %>%
mutate(relativa_kullar = kullar_totalt/inventeringar)
View(lyor_antal_kullar)
lyor_antal_kullar <- lyor_antal_kullar %>%
mutate_all(funs(replace(., is.na(.),0)))
#'den här filen stämmer inte med avseende på relativa kullar.
#'Det saknas nog antal inventeringar. zz108 har 1, vilket är bäst relativa kullar.
#'Den har bara 1 inventering och 1 kull inlagd, alltså 100% hitrate.
write_xlsx(lyor_antal_kullar, path = "Den and territory selection/Rawdata/antal kullar per lya 2000_2018.xlsx")
|
# this script makes the plots showing cumulative curves for pulmonary complications
# and 30-day mortality for several countries. A table is also generated giving cumulative quantiles
# for 1 and 2 year periods.
load("Computations") # loads output generated from "sim_model.R"
library(ggplot2)
library(gridExtra)
qs <- matrix(0.0, ncol = length(dates), nrow = 3)
qsc <- qs
k <- 1
cols <- c(4,2,2,1,3)
df <- data.frame(time=dates[start.date:length(dates)])
dfp <- data.frame(time=dates[start.date:length(dates)])
combined.m <- 0*resCounts[, , 5,1]
combined.p <- combined.m
# table for appendix
qtab <- matrix(0.0,6,12)
for(c in 1:length(countries)){
pop <- (c-1)*3 + 1 # chooses the 15000 level of surgeries
#30 day mortality
tmp <- resCounts[, , 5,pop] # daily counts
tmpCum <- tmp
# make cumulative curves
for (t in (start.date + 1):length(dates)) {
tmpCum[t, ] <- tmpCum[t - 1, ] + tmp[t, ]
}
# quantiles for table
qtab[c,7:9] <- quantile(tmpCum[which(dates=="2021-07-01"),],prob=c(0.05,0.5,0.95))
qtab[c,10:12] <- quantile(tmpCum[which(dates=="2022-07-01"),],prob=c(0.05,0.5,0.95))
# add to combined
combined.m <- combined.m + tmpCum
# daily quantiles
for (t in start.date:length(dates)) {
qs[, t] <- quantile(tmp[t,], probs = c(0.05, 0.5, 0.95))
qsc[, t] <- quantile(tmpCum[t,], probs = c(0.05, 0.5, 0.95))
}
# add quantiles to data frame to prepare for plotting
tmpdf <-
data.frame(t(qsc[, start.date:length(dates)]))
colnames(tmpdf) <- c(paste0(countries[c],"_q005"),
paste0(countries[c],"_median"),
paste0(countries[c],"_q095"))
df <- cbind(df,tmpdf)
# pulmonary complications
tmp <- resCounts[, , 10,pop]
tmpCum <- tmp
for (t in (start.date + 1):length(dates)) {
tmpCum[t, ] <- tmpCum[t - 1, ] + tmp[t, ]
}
qtab[c,1:3] <- quantile(tmpCum[which(dates=="2021-07-01"),],prob=c(0.05,0.5,0.95))
qtab[c,4:6] <- quantile(tmpCum[which(dates=="2022-07-01"),],prob=c(0.05,0.5,0.95))
combined.p <- combined.p + tmpCum
for (t in start.date:length(dates)) {
qs[, t] <- quantile(tmp[t,], probs = c(0.05, 0.5, 0.95))
qsc[, t] <- quantile(tmpCum[t,], probs = c(0.05, 0.5, 0.95))
}
tmpdf <-
data.frame(t(qsc[, start.date:length(dates)]))
colnames(tmpdf) <- c(paste0(countries[c],"_q005"),
paste0(countries[c],"_median"),
paste0(countries[c],"_q095"))
dfp <- cbind(dfp,tmpdf)
}
# pulmonary plot
p1 <- ggplot(dfp,aes(x=time,y=AUS_median)) +
geom_line(lwd=1.5,color="black") +
geom_line(aes(x=time,y=CAN_median),color="blue",data=dfp,lwd=1.5) +
geom_line(aes(x=time,y=EU27_median),color="red",data=dfp,lwd=1.5) +
geom_line(aes(x=time,y=UK_median),color="green",data=dfp,lwd=1.5) +
geom_line(aes(x=time,y=US_median),color="cyan",data=dfp,lwd=1.5) +
theme_bw() +
geom_ribbon(aes(ymin=AUS_q005,ymax=AUS_q095),alpha=0.2,fill="black",linetype=0) +
geom_ribbon(aes(ymin=CAN_q005,ymax=CAN_q095),alpha=0.2,fill="blue",linetype=0) +
geom_ribbon(aes(ymin=EU27_q005,ymax=EU27_q095),alpha=0.2,fill="red",linetype=0) +
geom_ribbon(aes(ymin=UK_q005,ymax=UK_q095),alpha=0.2,fill="green",linetype=0) +
geom_ribbon(aes(ymin=US_q005,ymax=US_q095),alpha=0.2,fill="cyan",linetype=0) +
coord_cartesian(ylim=c(0,150000)) +
scale_y_continuous(breaks=seq(0,150000,5000)) +
ggtitle("cumulative pulmonary complications") +
ylab("") +
guides(color=guide_legend(override.aes=list(fill=NA)))+
scale_color_manual(name = "",
breaks = countries,
values=c("AUS"="black",
"CAN"="blue",
"EU27"="red",
"UK"="green",
"US"="cyan"))
# theme(legend.position = "none")
# guides(color=guide_legend(override.aes=list(fill=NA)))+
# mortality plot
p2 <- ggplot(df,aes(x=time,y=AUS_median,color="AUS")) +
geom_line(lwd=1.5) +
geom_line(aes(x=time,y=CAN_median,color="CAN"),data=df,lwd=1.5) +
geom_line(aes(x=time,y=EU27_median,color="EU27"),data=df,lwd=1.5) +
geom_line(aes(x=time,y=UK_median,color="UK"),data=df,lwd=1.5) +
geom_line(aes(x=time,y=US_median,color="US"),data=df,lwd=1.5) +
theme_bw() +
geom_ribbon(aes(ymin=AUS_q005,ymax=AUS_q095),alpha=0.2,fill="black",linetype=0) +
geom_ribbon(aes(ymin=EU27_q005,ymax=EU27_q095),alpha=0.2,fill="red",linetype=0) +
geom_ribbon(aes(ymin=CAN_q005,ymax=CAN_q095),alpha=0.2,fill="blue",linetype=0) +
geom_ribbon(aes(ymin=UK_q005,ymax=UK_q095),alpha=0.2,fill="green",linetype=0) +
geom_ribbon(aes(ymin=US_q005,ymax=US_q095),alpha=0.2,fill="cyan",linetype=0) +
coord_cartesian(ylim=c(0,50000)) +
scale_y_continuous(breaks=seq(0,50000,2500)) +
ggtitle("cumulative 30 day mortality") +
ylab("")+
guides(color=guide_legend(override.aes=list(fill=NA)))+
scale_color_manual(name = "",
breaks = countries,
values=c("AUS"="black",
"CAN"="blue",
"EU27"="red",
"UK"="green",
"US"="cyan"))
ggsave("cumulative_plot.pdf",plot=arrangeGrob(p1,p2,ncol=2),width=14,height = 7)
#legend(dates[start.date],45000,legend = c("AUS","EU27","UK","US"),col=c(4,2,1,3),lty=c(1,1,1,1),lwd=c(2,2,2,2))
# add quantiles for the combination of countries to table
print("mortality")
qtab[6,7:9] <- quantile(combined.m[which(dates=="2021-07-01"),],prob=c(0.05,0.5,0.95))
qtab[6,10:12] <- quantile(combined.m[which(dates=="2022-07-01"),],prob=c(0.05,0.5,0.95))
print(qtab[,7:12])
print("pulmonary")
qtab[6,1:3] <- quantile(combined.p[which(dates=="2021-07-01"),],prob=c(0.05,0.5,0.95))
qtab[6,4:6] <- quantile(combined.p[which(dates=="2022-07-01"),],prob=c(0.05,0.5,0.95))
print(qtab[,1:6])
# re-arrange table
qtab <- rbind(qtab[,1:6],qtab[,7:12])
# dump table to file
writexl::write_xlsx(as.data.frame(qtab),path="cumulative_quantiles.xlsx")
#dev.off()
|
/combined_cumulative_plots_ggplot.R
|
no_license
|
jtkgithub/COVID19_surgery_risk
|
R
| false | false | 6,113 |
r
|
# this script makes the plots showing cumulative curves for pulmonary complications
# and 30-day mortality for several countries. A table is also generated giving cumulative quantiles
# for 1 and 2 year periods.
load("Computations") # loads output generated from "sim_model.R"
library(ggplot2)
library(gridExtra)
qs <- matrix(0.0, ncol = length(dates), nrow = 3)
qsc <- qs
k <- 1
cols <- c(4,2,2,1,3)
df <- data.frame(time=dates[start.date:length(dates)])
dfp <- data.frame(time=dates[start.date:length(dates)])
combined.m <- 0*resCounts[, , 5,1]
combined.p <- combined.m
# table for appendix
qtab <- matrix(0.0,6,12)
for(c in 1:length(countries)){
pop <- (c-1)*3 + 1 # chooses the 15000 level of surgeries
#30 day mortality
tmp <- resCounts[, , 5,pop] # daily counts
tmpCum <- tmp
# make cumulative curves
for (t in (start.date + 1):length(dates)) {
tmpCum[t, ] <- tmpCum[t - 1, ] + tmp[t, ]
}
# quantiles for table
qtab[c,7:9] <- quantile(tmpCum[which(dates=="2021-07-01"),],prob=c(0.05,0.5,0.95))
qtab[c,10:12] <- quantile(tmpCum[which(dates=="2022-07-01"),],prob=c(0.05,0.5,0.95))
# add to combined
combined.m <- combined.m + tmpCum
# daily quantiles
for (t in start.date:length(dates)) {
qs[, t] <- quantile(tmp[t,], probs = c(0.05, 0.5, 0.95))
qsc[, t] <- quantile(tmpCum[t,], probs = c(0.05, 0.5, 0.95))
}
# add quantiles to data frame to prepare for plotting
tmpdf <-
data.frame(t(qsc[, start.date:length(dates)]))
colnames(tmpdf) <- c(paste0(countries[c],"_q005"),
paste0(countries[c],"_median"),
paste0(countries[c],"_q095"))
df <- cbind(df,tmpdf)
# pulmonary complications
tmp <- resCounts[, , 10,pop]
tmpCum <- tmp
for (t in (start.date + 1):length(dates)) {
tmpCum[t, ] <- tmpCum[t - 1, ] + tmp[t, ]
}
qtab[c,1:3] <- quantile(tmpCum[which(dates=="2021-07-01"),],prob=c(0.05,0.5,0.95))
qtab[c,4:6] <- quantile(tmpCum[which(dates=="2022-07-01"),],prob=c(0.05,0.5,0.95))
combined.p <- combined.p + tmpCum
for (t in start.date:length(dates)) {
qs[, t] <- quantile(tmp[t,], probs = c(0.05, 0.5, 0.95))
qsc[, t] <- quantile(tmpCum[t,], probs = c(0.05, 0.5, 0.95))
}
tmpdf <-
data.frame(t(qsc[, start.date:length(dates)]))
colnames(tmpdf) <- c(paste0(countries[c],"_q005"),
paste0(countries[c],"_median"),
paste0(countries[c],"_q095"))
dfp <- cbind(dfp,tmpdf)
}
# pulmonary plot
p1 <- ggplot(dfp,aes(x=time,y=AUS_median)) +
geom_line(lwd=1.5,color="black") +
geom_line(aes(x=time,y=CAN_median),color="blue",data=dfp,lwd=1.5) +
geom_line(aes(x=time,y=EU27_median),color="red",data=dfp,lwd=1.5) +
geom_line(aes(x=time,y=UK_median),color="green",data=dfp,lwd=1.5) +
geom_line(aes(x=time,y=US_median),color="cyan",data=dfp,lwd=1.5) +
theme_bw() +
geom_ribbon(aes(ymin=AUS_q005,ymax=AUS_q095),alpha=0.2,fill="black",linetype=0) +
geom_ribbon(aes(ymin=CAN_q005,ymax=CAN_q095),alpha=0.2,fill="blue",linetype=0) +
geom_ribbon(aes(ymin=EU27_q005,ymax=EU27_q095),alpha=0.2,fill="red",linetype=0) +
geom_ribbon(aes(ymin=UK_q005,ymax=UK_q095),alpha=0.2,fill="green",linetype=0) +
geom_ribbon(aes(ymin=US_q005,ymax=US_q095),alpha=0.2,fill="cyan",linetype=0) +
coord_cartesian(ylim=c(0,150000)) +
scale_y_continuous(breaks=seq(0,150000,5000)) +
ggtitle("cumulative pulmonary complications") +
ylab("") +
guides(color=guide_legend(override.aes=list(fill=NA)))+
scale_color_manual(name = "",
breaks = countries,
values=c("AUS"="black",
"CAN"="blue",
"EU27"="red",
"UK"="green",
"US"="cyan"))
# theme(legend.position = "none")
# guides(color=guide_legend(override.aes=list(fill=NA)))+
# mortality plot
p2 <- ggplot(df,aes(x=time,y=AUS_median,color="AUS")) +
geom_line(lwd=1.5) +
geom_line(aes(x=time,y=CAN_median,color="CAN"),data=df,lwd=1.5) +
geom_line(aes(x=time,y=EU27_median,color="EU27"),data=df,lwd=1.5) +
geom_line(aes(x=time,y=UK_median,color="UK"),data=df,lwd=1.5) +
geom_line(aes(x=time,y=US_median,color="US"),data=df,lwd=1.5) +
theme_bw() +
geom_ribbon(aes(ymin=AUS_q005,ymax=AUS_q095),alpha=0.2,fill="black",linetype=0) +
geom_ribbon(aes(ymin=EU27_q005,ymax=EU27_q095),alpha=0.2,fill="red",linetype=0) +
geom_ribbon(aes(ymin=CAN_q005,ymax=CAN_q095),alpha=0.2,fill="blue",linetype=0) +
geom_ribbon(aes(ymin=UK_q005,ymax=UK_q095),alpha=0.2,fill="green",linetype=0) +
geom_ribbon(aes(ymin=US_q005,ymax=US_q095),alpha=0.2,fill="cyan",linetype=0) +
coord_cartesian(ylim=c(0,50000)) +
scale_y_continuous(breaks=seq(0,50000,2500)) +
ggtitle("cumulative 30 day mortality") +
ylab("")+
guides(color=guide_legend(override.aes=list(fill=NA)))+
scale_color_manual(name = "",
breaks = countries,
values=c("AUS"="black",
"CAN"="blue",
"EU27"="red",
"UK"="green",
"US"="cyan"))
ggsave("cumulative_plot.pdf",plot=arrangeGrob(p1,p2,ncol=2),width=14,height = 7)
#legend(dates[start.date],45000,legend = c("AUS","EU27","UK","US"),col=c(4,2,1,3),lty=c(1,1,1,1),lwd=c(2,2,2,2))
# add quantiles for the combination of countries to table
print("mortality")
qtab[6,7:9] <- quantile(combined.m[which(dates=="2021-07-01"),],prob=c(0.05,0.5,0.95))
qtab[6,10:12] <- quantile(combined.m[which(dates=="2022-07-01"),],prob=c(0.05,0.5,0.95))
print(qtab[,7:12])
print("pulmonary")
qtab[6,1:3] <- quantile(combined.p[which(dates=="2021-07-01"),],prob=c(0.05,0.5,0.95))
qtab[6,4:6] <- quantile(combined.p[which(dates=="2022-07-01"),],prob=c(0.05,0.5,0.95))
print(qtab[,1:6])
# re-arrange table
qtab <- rbind(qtab[,1:6],qtab[,7:12])
# dump table to file
writexl::write_xlsx(as.data.frame(qtab),path="cumulative_quantiles.xlsx")
#dev.off()
|
testlist <- list(data = structure(c(4.73918917419577e-308, 5.88117562384501e-308, 4.58688053922028e-257, 9.53282412436824e-130, 9.53282412436824e-130, 9.53282412436824e-130, 9.53282412436824e-130, 9.53282412435301e-130, 9.53282385942128e-130, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(7L, 10L)), q = 0)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result)
|
/biwavelet/inst/testfiles/rcpp_row_quantile/libFuzzer_rcpp_row_quantile/rcpp_row_quantile_valgrind_files/1610556487-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 529 |
r
|
testlist <- list(data = structure(c(4.73918917419577e-308, 5.88117562384501e-308, 4.58688053922028e-257, 9.53282412436824e-130, 9.53282412436824e-130, 9.53282412436824e-130, 9.53282412436824e-130, 9.53282412435301e-130, 9.53282385942128e-130, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(7L, 10L)), q = 0)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result)
|
# test the input, output, and errors of my_t.test()
# load the penguins and gapminder data sets
my_data <- stats::na.omit(project3part1package::my_penguins)
my_data2 <- stats::na.omit(project3part1package::my_gapminder)
test_that("Equal outputs, two.sided", {
expect_equal(my_t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$test_stat,
as.numeric(t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$statistic))
expect_equal(my_t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$p_val,
as.numeric(t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$p.value))
expect_equal(my_t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$alternative,
t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$alternative)
expect_equal(my_t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$df,
as.numeric(t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$parameter))
})
test_that("Equal outputs, default alternative", {
expect_equal(my_t.test(x = my_data$bill_depth_mm, mu = 20)$test_stat,
as.numeric(t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$statistic))
expect_equal(my_t.test(x = my_data$bill_depth_mm, mu = 20)$p_val,
as.numeric(t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$p.value))
expect_equal(my_t.test(x = my_data$bill_depth_mm, mu = 20)$alternative,
t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$alternative)
expect_equal(my_t.test(x = my_data$bill_depth_mm, mu = 20)$df,
as.numeric(t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$parameter))
})
test_that("Equal outputs, two.sided, new data", {
expect_equal(my_t.test(x = my_data2$pop, alternative = "two.sided", mu = 30000000)$test_stat,
as.numeric(t.test(x = my_data2$pop, alternative = "two.sided", mu = 30000000)$statistic))
expect_equal(my_t.test(x = my_data2$pop, alternative = "two.sided", mu = 30000000)$p_val,
as.numeric(t.test(x = my_data2$pop, alternative = "two.sided", mu = 30000000)$p.value))
expect_equal(my_t.test(x = my_data2$pop, alternative = "two.sided", mu = 30000000)$alternative,
t.test(x = my_data2$pop, alternative = "two.sided", mu = 30000000)$alternative)
expect_equal(my_t.test(x = my_data2$pop, alternative = "two.sided", mu = 30000000)$df,
as.numeric(t.test(x = my_data2$pop, alternative = "two.sided", mu = 30000000)$parameter))
})
test_that("Equal outputs, greater", {
expect_equal(my_t.test(x = my_data$bill_depth_mm, alternative = "greater", mu = 20)$test_stat,
as.numeric(t.test(x = my_data$bill_depth_mm, alternative = "greater", mu = 20)$statistic))
expect_equal(my_t.test(x = my_data$bill_depth_mm, alternative = "greater", mu = 20)$p_val,
as.numeric(t.test(x = my_data$bill_depth_mm, alternative = "greater", mu = 20)$p.value))
expect_equal(my_t.test(x = my_data$bill_depth_mm, alternative = "greater", mu = 20)$alternative,
t.test(x = my_data$bill_depth_mm, alternative = "greater", mu = 20)$alternative)
expect_equal(my_t.test(x = my_data$bill_depth_mm, alternative = "greater", mu = 20)$df,
as.numeric(t.test(x = my_data$bill_depth_mm, alternative = "greater", mu = 20)$parameter))
})
test_that("Equal outputs, less", {
expect_equal(my_t.test(x = my_data$bill_depth_mm, alternative = "less", mu = 20)$test_stat,
as.numeric(t.test(x = my_data$bill_depth_mm, alternative = "less", mu = 20)$statistic))
expect_equal(my_t.test(x = my_data$bill_depth_mm, alternative = "less", mu = 20)$p_val,
as.numeric(t.test(x = my_data$bill_depth_mm, alternative = "less", mu = 20)$p.value))
expect_equal(my_t.test(x = my_data$bill_depth_mm, alternative = "less", mu = 20)$alternative,
t.test(x = my_data$bill_depth_mm, alternative = "less", mu = 20)$alternative)
expect_equal(my_t.test(x = my_data$bill_depth_mm, alternative = "less", mu = 20)$df,
as.numeric(t.test(x = my_data$bill_depth_mm, alternative = "less", mu = 20)$parameter))
})
test_that("Function outputs a list with the correct elements", {
expect_type(my_t.test(x = my_data$bill_depth_mm, alternative = "less", mu = 20), "list")
expect_equal(length(my_t.test(x = my_data$bill_depth_mm, alternative = "less", mu = 20)), 4)
})
test_that("The values in the returned list are the correct type", {
expect_type(my_t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$test_stat, "double")
expect_type(my_t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$p_val, "double")
expect_type(my_t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$alternative, "character")
expect_type(my_t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$df, "double")
})
test_that("Non-numeric mu input throws an error", {
expect_error(my_t.test(x = my_data$bill_depth_mm, alternative = "less", mu = "test"))
})
test_that("Incorrect alternative type input throws an error", {
expect_error(my_t.test(x = my_data$bill_depth_mm, alternative = 20, mu = 20))
})
test_that("Incorrect alternative string input throws an error", {
expect_error(my_t.test(x = my_data$bill_depth_mm, alternative = "not an alternative", mu = 20))
})
test_that("Incorrect alternative input throws an error", {
expect_error(my_t.test(x = data.frame(c("a", "b", "c")), alternative = 20, mu = 20))
expect_error(my_t.test(x = list(1, 2, "a"), alternative = 20, mu = 20))
})
|
/tests/testthat/test-my_t.R
|
no_license
|
thomson3uw/project3part1package
|
R
| false | false | 5,740 |
r
|
# test the input, output, and errors of my_t.test()
# load the penguins and gapminder data sets
my_data <- stats::na.omit(project3part1package::my_penguins)
my_data2 <- stats::na.omit(project3part1package::my_gapminder)
test_that("Equal outputs, two.sided", {
expect_equal(my_t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$test_stat,
as.numeric(t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$statistic))
expect_equal(my_t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$p_val,
as.numeric(t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$p.value))
expect_equal(my_t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$alternative,
t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$alternative)
expect_equal(my_t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$df,
as.numeric(t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$parameter))
})
test_that("Equal outputs, default alternative", {
expect_equal(my_t.test(x = my_data$bill_depth_mm, mu = 20)$test_stat,
as.numeric(t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$statistic))
expect_equal(my_t.test(x = my_data$bill_depth_mm, mu = 20)$p_val,
as.numeric(t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$p.value))
expect_equal(my_t.test(x = my_data$bill_depth_mm, mu = 20)$alternative,
t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$alternative)
expect_equal(my_t.test(x = my_data$bill_depth_mm, mu = 20)$df,
as.numeric(t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$parameter))
})
test_that("Equal outputs, two.sided, new data", {
expect_equal(my_t.test(x = my_data2$pop, alternative = "two.sided", mu = 30000000)$test_stat,
as.numeric(t.test(x = my_data2$pop, alternative = "two.sided", mu = 30000000)$statistic))
expect_equal(my_t.test(x = my_data2$pop, alternative = "two.sided", mu = 30000000)$p_val,
as.numeric(t.test(x = my_data2$pop, alternative = "two.sided", mu = 30000000)$p.value))
expect_equal(my_t.test(x = my_data2$pop, alternative = "two.sided", mu = 30000000)$alternative,
t.test(x = my_data2$pop, alternative = "two.sided", mu = 30000000)$alternative)
expect_equal(my_t.test(x = my_data2$pop, alternative = "two.sided", mu = 30000000)$df,
as.numeric(t.test(x = my_data2$pop, alternative = "two.sided", mu = 30000000)$parameter))
})
test_that("Equal outputs, greater", {
expect_equal(my_t.test(x = my_data$bill_depth_mm, alternative = "greater", mu = 20)$test_stat,
as.numeric(t.test(x = my_data$bill_depth_mm, alternative = "greater", mu = 20)$statistic))
expect_equal(my_t.test(x = my_data$bill_depth_mm, alternative = "greater", mu = 20)$p_val,
as.numeric(t.test(x = my_data$bill_depth_mm, alternative = "greater", mu = 20)$p.value))
expect_equal(my_t.test(x = my_data$bill_depth_mm, alternative = "greater", mu = 20)$alternative,
t.test(x = my_data$bill_depth_mm, alternative = "greater", mu = 20)$alternative)
expect_equal(my_t.test(x = my_data$bill_depth_mm, alternative = "greater", mu = 20)$df,
as.numeric(t.test(x = my_data$bill_depth_mm, alternative = "greater", mu = 20)$parameter))
})
test_that("Equal outputs, less", {
expect_equal(my_t.test(x = my_data$bill_depth_mm, alternative = "less", mu = 20)$test_stat,
as.numeric(t.test(x = my_data$bill_depth_mm, alternative = "less", mu = 20)$statistic))
expect_equal(my_t.test(x = my_data$bill_depth_mm, alternative = "less", mu = 20)$p_val,
as.numeric(t.test(x = my_data$bill_depth_mm, alternative = "less", mu = 20)$p.value))
expect_equal(my_t.test(x = my_data$bill_depth_mm, alternative = "less", mu = 20)$alternative,
t.test(x = my_data$bill_depth_mm, alternative = "less", mu = 20)$alternative)
expect_equal(my_t.test(x = my_data$bill_depth_mm, alternative = "less", mu = 20)$df,
as.numeric(t.test(x = my_data$bill_depth_mm, alternative = "less", mu = 20)$parameter))
})
test_that("Function outputs a list with the correct elements", {
expect_type(my_t.test(x = my_data$bill_depth_mm, alternative = "less", mu = 20), "list")
expect_equal(length(my_t.test(x = my_data$bill_depth_mm, alternative = "less", mu = 20)), 4)
})
test_that("The values in the returned list are the correct type", {
expect_type(my_t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$test_stat, "double")
expect_type(my_t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$p_val, "double")
expect_type(my_t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$alternative, "character")
expect_type(my_t.test(x = my_data$bill_depth_mm, alternative = "two.sided", mu = 20)$df, "double")
})
test_that("Non-numeric mu input throws an error", {
expect_error(my_t.test(x = my_data$bill_depth_mm, alternative = "less", mu = "test"))
})
test_that("Incorrect alternative type input throws an error", {
expect_error(my_t.test(x = my_data$bill_depth_mm, alternative = 20, mu = 20))
})
test_that("Incorrect alternative string input throws an error", {
expect_error(my_t.test(x = my_data$bill_depth_mm, alternative = "not an alternative", mu = 20))
})
test_that("Incorrect alternative input throws an error", {
expect_error(my_t.test(x = data.frame(c("a", "b", "c")), alternative = 20, mu = 20))
expect_error(my_t.test(x = list(1, 2, "a"), alternative = 20, mu = 20))
})
|
#*** zzz.R ***/
##
## AUTHOR: Arnost Komarek (my name in TeX: Arno\v{s}t Kom\'arek)
## arnost.komarek[AT]mff.cuni.cz
##
#* ********************************************************************************* */
.onAttach <- function(libname, pkgname)
#.First.lib <- function(libname, pkgname)
{
##library.dynam("mixAK", pkgname, libname) ## no more needed, load is provided by useDynLib in the NAMESPACE
packageStartupMessage(paste(
"\n",
"### Mixture of methods including mixtures\n",
"### Arnost Komarek\n\n",
"### See citation(\"mixAK\") or toBibtex(citation(\"mixAK\")) for the best way to cite\n",
"### the package if you find it useful.\n\n", sep=""))
#cat("\n")
#cat("### Mixture of methods including mixtures\n")
#cat("### Arnost Komarek\n\n")
#cat("### See citation(\"mixAK\") or toBibtex(citation(\"mixAK\")) for the best way to cite\n")
#cat("### the package if you find it useful.\n\n")
invisible()
}
|
/R/zzz.R
|
no_license
|
cran/mixAK
|
R
| false | false | 996 |
r
|
#*** zzz.R ***/
##
## AUTHOR: Arnost Komarek (my name in TeX: Arno\v{s}t Kom\'arek)
## arnost.komarek[AT]mff.cuni.cz
##
#* ********************************************************************************* */
.onAttach <- function(libname, pkgname)
#.First.lib <- function(libname, pkgname)
{
##library.dynam("mixAK", pkgname, libname) ## no more needed, load is provided by useDynLib in the NAMESPACE
packageStartupMessage(paste(
"\n",
"### Mixture of methods including mixtures\n",
"### Arnost Komarek\n\n",
"### See citation(\"mixAK\") or toBibtex(citation(\"mixAK\")) for the best way to cite\n",
"### the package if you find it useful.\n\n", sep=""))
#cat("\n")
#cat("### Mixture of methods including mixtures\n")
#cat("### Arnost Komarek\n\n")
#cat("### See citation(\"mixAK\") or toBibtex(citation(\"mixAK\")) for the best way to cite\n")
#cat("### the package if you find it useful.\n\n")
invisible()
}
|
library(plotrix)
library(Cairo)
library(RSvgDevice)
source("extract_polyploid_clades.R")
source("reading_trees_polyploid_nodes.R")
source("analyse_results_tp.R")
return_diversification_function <- function(model, name){
print(name)
model <- model[[name]]
print(model$lamb_par)
print(model$mu_par)
if (name == "constant_constant") { return( function(x) model$lamb_par-model$mu_par+x*0)}
else if (name == "constant_linear") { return( function(x) model$lamb_par-(model$mu_par[1]+x*model$mu_par[2]))}
else if (name == "constant_expo") { return( function(x) model$lamb_par - (model$mu_par[1]*exp(x*model$mu_par[2])))}
else if (name == "linear_constant") { return( function(x) model$lamb_par[1] + x*model$lamb_par[2] - model$mu_par)}
else if (name == "linear_linear") { return( function(x) model$lamb_par[1]+x*model$lamb_par[2]-(model$mu_par[1]+model$mu_par[2]*x))}
else if (name =="linear_expo") { return( function(x) model$lamb_par[1]+x*model$lamb_par[2]-(model$mu_par[1]*exp(x*model$mu_par[2])))}
else if (name =="expo_constant"){ return( function(x) model$lamb_par[1]*exp(x*model$lamb_par[2])-model$mu_par)}
else if (name =="expo_linear"){ return( function(x) model$lamb_par[1]*exp(x*model$lamb_par[2])-(model$mu_par[1]+x*model$mu_par[2]))}
else if (name =="expo_expo"){return( function(x) model$lamb_par[1]*exp(x*model$lamb_par[2])-model$mu_par[1]*exp(x*model$mu_par[2]))}
}
select_best_model <- function(res, nb_param){
index <- sapply(res, "[[", "aicc")
prob <- exp((min(index)-index)/2)
most_probable <- nb_param[names(prob[prob>0.05])]
if (length(which(most_probable==min(most_probable)))==1) {return(names(which(most_probable==min(most_probable))))}
else {
return(names(which(prob==1)))
}
}
plot_list_diversification_values <- function(res, nb_param, timemax, bound, cons, node, trees, clade, color, mean_time){
par(mfrow = c(2,1), xaxs = "i")
par(mar=c(1,7,0,0))
timemax <- plot_polyploid_diploid_clade(cons, node, strsplit(clade, "_")[[1]][1], mean_time, color)
par(mar=c(5,7,1,4))
revaxis(x=c(0, timemax), y=c(0, 500), yrev=FALSE, xrev=TRUE, col="#ffffff00", xlab="Time before present", ylab = "Diversification rate\n\n\n", yside=2)
ratemax <- c()
ratemin <- c()
func_poly <- list()
func_di <- list()
poly <- names(descendants(phylo4(cons), node, "tips"))
for (i in 1:length(res)){
if (checking_monophyly_polyploids(cons, trees[[i]], node)){
cropped_clades <- crop_clades(trees[[i]], poly)
if (!is.null(res[[i]]$res_poly)){
print("poly")
k <- select_best_model(res[[i]]$res_poly, nb_param)
func_poly[[i]] <- return_diversification_function(res[[i]]$res_poly, k)
max <- branching.times( cropped_clades$poly)[1]
l <- rev(func_poly[[i]](seq(0, max, length=200)))
lines(seq(-max, 0, length=200), l, type="l", col=color)
}
if (!is.null(res[[i]]$res_di)){
print("di")
k2 <- select_best_model(res[[i]]$res_di, nb_param)
func_di[[i]] <- return_diversification_function(res[[i]]$res_di, k2)
if (!is.null( cropped_clades$di)) max <- branching.times( cropped_clades$di)[1]
else max <- timemax
l <- rev(func_di[[i]](seq(0, max, length=200)))
lines(seq(-max, 0, length=200), l, type="l", col="black")
}
}
}
}
write_rpanda_results <- function(file, dest, boundaries, means){
results <- read_results(file)
print(names(results))
num_param_models <- setNames(c(2, 3, 3, 3, 4, 4, 3, 4, 4), c("constant_constant", "constant_linear", "constant_expo", "linear_constant", "linear_linear", "linear_expo", "expo_constant", "expo_linear", "expo_expo"))
for (i in names(results)[7]){
print(i)
devSVG(file=paste(dest, i, ".svg", sep=""), width=12, height=8)
plot_list_diversification_values(results[[i]], num_param_models, branching.times(get(paste(strsplit(i, "_")[[1]][1], "_cons", sep="")))[1], boundaries[[i]], get(paste(strsplit(i, "_")[[1]][1], "_cons", sep="")), get(paste("polyploid_node_", strsplit(i, "_")[[1]][1], "_cons", sep=""))[as.integer(strsplit(i, "_")[[1]][2])], get(strsplit(i, "_")[[1]][1]), i, rgb(95, 128, 77, max=255), means[[i]])#rgb(95, 128, 77, max=255))
dev.off()
}
}
result_rpanda_folder <- "results_rpanda/"
write_rpanda_results(result_rpanda_folder, plot_folder, boundaries, means)
|
/analyse_results_rpanda.R
|
no_license
|
sachalau/teleost-radiation
|
R
| false | false | 4,325 |
r
|
library(plotrix)
library(Cairo)
library(RSvgDevice)
source("extract_polyploid_clades.R")
source("reading_trees_polyploid_nodes.R")
source("analyse_results_tp.R")
return_diversification_function <- function(model, name){
print(name)
model <- model[[name]]
print(model$lamb_par)
print(model$mu_par)
if (name == "constant_constant") { return( function(x) model$lamb_par-model$mu_par+x*0)}
else if (name == "constant_linear") { return( function(x) model$lamb_par-(model$mu_par[1]+x*model$mu_par[2]))}
else if (name == "constant_expo") { return( function(x) model$lamb_par - (model$mu_par[1]*exp(x*model$mu_par[2])))}
else if (name == "linear_constant") { return( function(x) model$lamb_par[1] + x*model$lamb_par[2] - model$mu_par)}
else if (name == "linear_linear") { return( function(x) model$lamb_par[1]+x*model$lamb_par[2]-(model$mu_par[1]+model$mu_par[2]*x))}
else if (name =="linear_expo") { return( function(x) model$lamb_par[1]+x*model$lamb_par[2]-(model$mu_par[1]*exp(x*model$mu_par[2])))}
else if (name =="expo_constant"){ return( function(x) model$lamb_par[1]*exp(x*model$lamb_par[2])-model$mu_par)}
else if (name =="expo_linear"){ return( function(x) model$lamb_par[1]*exp(x*model$lamb_par[2])-(model$mu_par[1]+x*model$mu_par[2]))}
else if (name =="expo_expo"){return( function(x) model$lamb_par[1]*exp(x*model$lamb_par[2])-model$mu_par[1]*exp(x*model$mu_par[2]))}
}
select_best_model <- function(res, nb_param){
index <- sapply(res, "[[", "aicc")
prob <- exp((min(index)-index)/2)
most_probable <- nb_param[names(prob[prob>0.05])]
if (length(which(most_probable==min(most_probable)))==1) {return(names(which(most_probable==min(most_probable))))}
else {
return(names(which(prob==1)))
}
}
plot_list_diversification_values <- function(res, nb_param, timemax, bound, cons, node, trees, clade, color, mean_time){
par(mfrow = c(2,1), xaxs = "i")
par(mar=c(1,7,0,0))
timemax <- plot_polyploid_diploid_clade(cons, node, strsplit(clade, "_")[[1]][1], mean_time, color)
par(mar=c(5,7,1,4))
revaxis(x=c(0, timemax), y=c(0, 500), yrev=FALSE, xrev=TRUE, col="#ffffff00", xlab="Time before present", ylab = "Diversification rate\n\n\n", yside=2)
ratemax <- c()
ratemin <- c()
func_poly <- list()
func_di <- list()
poly <- names(descendants(phylo4(cons), node, "tips"))
for (i in 1:length(res)){
if (checking_monophyly_polyploids(cons, trees[[i]], node)){
cropped_clades <- crop_clades(trees[[i]], poly)
if (!is.null(res[[i]]$res_poly)){
print("poly")
k <- select_best_model(res[[i]]$res_poly, nb_param)
func_poly[[i]] <- return_diversification_function(res[[i]]$res_poly, k)
max <- branching.times( cropped_clades$poly)[1]
l <- rev(func_poly[[i]](seq(0, max, length=200)))
lines(seq(-max, 0, length=200), l, type="l", col=color)
}
if (!is.null(res[[i]]$res_di)){
print("di")
k2 <- select_best_model(res[[i]]$res_di, nb_param)
func_di[[i]] <- return_diversification_function(res[[i]]$res_di, k2)
if (!is.null( cropped_clades$di)) max <- branching.times( cropped_clades$di)[1]
else max <- timemax
l <- rev(func_di[[i]](seq(0, max, length=200)))
lines(seq(-max, 0, length=200), l, type="l", col="black")
}
}
}
}
write_rpanda_results <- function(file, dest, boundaries, means){
results <- read_results(file)
print(names(results))
num_param_models <- setNames(c(2, 3, 3, 3, 4, 4, 3, 4, 4), c("constant_constant", "constant_linear", "constant_expo", "linear_constant", "linear_linear", "linear_expo", "expo_constant", "expo_linear", "expo_expo"))
for (i in names(results)[7]){
print(i)
devSVG(file=paste(dest, i, ".svg", sep=""), width=12, height=8)
plot_list_diversification_values(results[[i]], num_param_models, branching.times(get(paste(strsplit(i, "_")[[1]][1], "_cons", sep="")))[1], boundaries[[i]], get(paste(strsplit(i, "_")[[1]][1], "_cons", sep="")), get(paste("polyploid_node_", strsplit(i, "_")[[1]][1], "_cons", sep=""))[as.integer(strsplit(i, "_")[[1]][2])], get(strsplit(i, "_")[[1]][1]), i, rgb(95, 128, 77, max=255), means[[i]])#rgb(95, 128, 77, max=255))
dev.off()
}
}
result_rpanda_folder <- "results_rpanda/"
write_rpanda_results(result_rpanda_folder, plot_folder, boundaries, means)
|
summary.Binary.Logistic.Biplot <- function(object, Normal=TRUE, Latex=FALSE, Kable=FALSE, ...){
print("BINARY LOGISTIC BIPLOT")
print(paste("Type of Biplot : ", object$Type))
print(paste("Initial Configuration : ", object$InitialConfig))
print(paste("Method : ", object$Method))
print(paste("Rotation : ", object$Rotation))
print("-----------")
dims=dim(object$RowCoordinates)[2]
print("COLUMN PARAMETERS")
print(object$ColumnParameters)
print("-----------")
print("COLUMNS FIT")
RR=cbind(object$Deviances, object$Dfs, object$pvalues, object$Nagelkerke, object$CoxSnell, object$MacFaden, object$PercentsCorrec*100, object$Sensitivity*100, object$Specificity*100)
colnames(RR)=c("Deviance", "D.F", "P-val", "Nagelkerke", "Cox-Snell", "MacFaden", "% Correct", "Sensitivity", "Specificity")
rownames(RR)=rownames(object$ColumnParameters)
Total=c(object$DevianceTotal, object$TotalDf, object$p, object$TotNagelkerke, object$TotCoxSnell, object$TotMacFaden, object$TotalPercent*100, object$TotalSensitivity*100, object$TotalSpecificity*100)
RR=rbind(RR,Total)
print(RR)
print("------------------------")
print("Thresholds, Loadings and Communalities")
LO=cbind(object$Tresholds, object$Loadings, object$Communalities)
colnames(LO)=c("Thresholds", paste("Dim",1:dims,sep=""), "Communalities")
rownames(LO)=rownames(object$ColumnParameters)
print(LO)
}
|
/R/summary.Binary.Logistic.Biplot.R
|
no_license
|
villardon/MultBiplotR
|
R
| false | false | 1,400 |
r
|
summary.Binary.Logistic.Biplot <- function(object, Normal=TRUE, Latex=FALSE, Kable=FALSE, ...){
print("BINARY LOGISTIC BIPLOT")
print(paste("Type of Biplot : ", object$Type))
print(paste("Initial Configuration : ", object$InitialConfig))
print(paste("Method : ", object$Method))
print(paste("Rotation : ", object$Rotation))
print("-----------")
dims=dim(object$RowCoordinates)[2]
print("COLUMN PARAMETERS")
print(object$ColumnParameters)
print("-----------")
print("COLUMNS FIT")
RR=cbind(object$Deviances, object$Dfs, object$pvalues, object$Nagelkerke, object$CoxSnell, object$MacFaden, object$PercentsCorrec*100, object$Sensitivity*100, object$Specificity*100)
colnames(RR)=c("Deviance", "D.F", "P-val", "Nagelkerke", "Cox-Snell", "MacFaden", "% Correct", "Sensitivity", "Specificity")
rownames(RR)=rownames(object$ColumnParameters)
Total=c(object$DevianceTotal, object$TotalDf, object$p, object$TotNagelkerke, object$TotCoxSnell, object$TotMacFaden, object$TotalPercent*100, object$TotalSensitivity*100, object$TotalSpecificity*100)
RR=rbind(RR,Total)
print(RR)
print("------------------------")
print("Thresholds, Loadings and Communalities")
LO=cbind(object$Tresholds, object$Loadings, object$Communalities)
colnames(LO)=c("Thresholds", paste("Dim",1:dims,sep=""), "Communalities")
rownames(LO)=rownames(object$ColumnParameters)
print(LO)
}
|
# Create a vector m of integers that starts at 32 and ends at 99.
m <- 32:99
# Determine the length of object m.
length(m)
# Create a vector x of integers that starts at 12 and ends at 73.
x <- 12:73
# Determine the length of object x.
length(x)
|
/Chapter 3/Exercice6.r
|
no_license
|
Mifaou/Datacamp-HarvardX-PH125.1x
|
R
| false | false | 250 |
r
|
# Create a vector m of integers that starts at 32 and ends at 99.
m <- 32:99
# Determine the length of object m.
length(m)
# Create a vector x of integers that starts at 12 and ends at 73.
x <- 12:73
# Determine the length of object x.
length(x)
|
library(mixtools)
#setup parallel processing
library(doParallel)
cl<-makeCluster(8,outfile="");
registerDoParallel(cl)
features<-read.csv("scriptFFeaturesTrimmed1.csv")
features<-features[features$f0>0,]
features<-sample(features)
n<-nrow(features)
features.train<-features[1:floor(0.6*n),]
features.cv<-features[(floor(0.6*n)+1):floor(0.8*n),]
features.test<-features[1+floor(0.8*n):n,]
constructFeatureMatrix<-function(f)
{
v<-f$v
f0<-f$f0
RMSenergy<-f$RMSenergy
return(matrix(c(v,f0,RMSenergy),ncol=3))
}
loglikes.cv <- vector(length=10)
mixture<-list()
# k=1 needs special handling
trainingMatrix<-constructFeatureMatrix(features.train)
cvMatrix<-constructFeatureMatrix(features.cv)
mu<-colMeans(trainingMatrix)
sigma <- cov(trainingMatrix)
print(paste0("learning gmm with 1 mixture"))
loglikes.cv[1] <- sum(log(dmvnorm(cvMatrix,mu,sigma)))
print(paste0("loglike on training ",sum(log(dmvnorm(trainingMatrix,mu,sigma)))))
print(paste0("loglike on cv ", loglikes.cv[1] ))
maxmixtures=15
result<-foreach (k=2:maxmixtures) %dopar%
{
library(mixtools)
source("loglikenormalmix.r")
source("mvdnormalmix.r")
source("writeGMMToXML.r")
ptm<-proc.time()
print(paste0("learning gmm with ",k, " mixtures"))
mixture <- mvnormalmixEM(trainingMatrix,k=k,maxit=1000,epsilon=0.01)
loglikes.cv <- loglike.normalmix(cvMatrix,mixture=mixture)
print(paste0("loglike on training (k=",k,"): ",mixture$loglik))
print(paste0("loglike on cv (k=",k,"): ", loglikes.cv))
ptmdiff<-proc.time()-ptm
df<-data.frame(k, mixture$loglik, loglikes.cv, ptmdiff[1], ptmdiff[2], ptmdiff[3])
names(df)<-c("gaussians","loglik_training","loglik_cv","usertime","systemtime","elapsedtime")
writeGMMToXML(mixture, filename=paste0("gmmVoicedV",k,".xml"))
saveRDS(mixture, file = paste0("gmmVoicedV",k,".rda"))
df
}
df<-do.call("rbind",result)
write.csv(df,file="cvinfov.csv", row.names=FALSE)
stopCluster(cl)
|
/AnalysisAndModel/ranalysis/cvselectGMMdesignv.r
|
no_license
|
herwinvw/VisualProsody
|
R
| false | false | 1,932 |
r
|
library(mixtools)
#setup parallel processing
library(doParallel)
cl<-makeCluster(8,outfile="");
registerDoParallel(cl)
features<-read.csv("scriptFFeaturesTrimmed1.csv")
features<-features[features$f0>0,]
features<-sample(features)
n<-nrow(features)
features.train<-features[1:floor(0.6*n),]
features.cv<-features[(floor(0.6*n)+1):floor(0.8*n),]
features.test<-features[1+floor(0.8*n):n,]
constructFeatureMatrix<-function(f)
{
v<-f$v
f0<-f$f0
RMSenergy<-f$RMSenergy
return(matrix(c(v,f0,RMSenergy),ncol=3))
}
loglikes.cv <- vector(length=10)
mixture<-list()
# k=1 needs special handling
trainingMatrix<-constructFeatureMatrix(features.train)
cvMatrix<-constructFeatureMatrix(features.cv)
mu<-colMeans(trainingMatrix)
sigma <- cov(trainingMatrix)
print(paste0("learning gmm with 1 mixture"))
loglikes.cv[1] <- sum(log(dmvnorm(cvMatrix,mu,sigma)))
print(paste0("loglike on training ",sum(log(dmvnorm(trainingMatrix,mu,sigma)))))
print(paste0("loglike on cv ", loglikes.cv[1] ))
maxmixtures=15
result<-foreach (k=2:maxmixtures) %dopar%
{
library(mixtools)
source("loglikenormalmix.r")
source("mvdnormalmix.r")
source("writeGMMToXML.r")
ptm<-proc.time()
print(paste0("learning gmm with ",k, " mixtures"))
mixture <- mvnormalmixEM(trainingMatrix,k=k,maxit=1000,epsilon=0.01)
loglikes.cv <- loglike.normalmix(cvMatrix,mixture=mixture)
print(paste0("loglike on training (k=",k,"): ",mixture$loglik))
print(paste0("loglike on cv (k=",k,"): ", loglikes.cv))
ptmdiff<-proc.time()-ptm
df<-data.frame(k, mixture$loglik, loglikes.cv, ptmdiff[1], ptmdiff[2], ptmdiff[3])
names(df)<-c("gaussians","loglik_training","loglik_cv","usertime","systemtime","elapsedtime")
writeGMMToXML(mixture, filename=paste0("gmmVoicedV",k,".xml"))
saveRDS(mixture, file = paste0("gmmVoicedV",k,".rda"))
df
}
df<-do.call("rbind",result)
write.csv(df,file="cvinfov.csv", row.names=FALSE)
stopCluster(cl)
|
library("umap")
library("ggpubr")
library("stringr")
library("reshape2")
library("ggplot2")
library("dplyr")
library("grid")
meta_info_maptor = read.table("~/MAPTor_NET/Misc/Meta_information.tsv",sep = "\t",header = T,stringsAsFactors = F)
rownames(meta_info_maptor) = meta_info_maptor$Sample
colnames(meta_info_maptor) = str_replace(colnames(meta_info_maptor),pattern = "\\.","_")
meta_info_maptor$OS_Tissue = as.double(str_replace(meta_info_maptor$OS_Tissue,pattern = ",","."))
meta_info = read.table("~/Deko_Projekt/Misc/Meta_information.tsv",sep = "\t",header = T,stringsAsFactors = F)
rownames(meta_info) = meta_info$Sample
colnames(meta_info) = str_replace(colnames(meta_info),pattern = "\\.","_")
matcher = match(meta_info_maptor$Sample,meta_info$Sample, nomatch = 0)
meta_info[matcher,"OS_Tissue"] = meta_info_maptor[matcher != 0,"OS_Tissue"]
#expr_raw = read.table("~/Deko_Projekt/Data/Publication_datasets/NEN/Sato.S22.tsv",sep="\t", stringsAsFactors = F, header = T, row.names = 1,as.is = F)
expr_raw = read.table("~/Deko_Projekt/Results/Cell_fraction_predictions/RepSet_Cibersort_Tosti_200_genes_200_samples_endocrine_exocrine_metaplastic_acinar-i_muc5+_only.tsv",sep="\t", stringsAsFactors = F, header = T, as.is = TRUE)
colnames(expr_raw) = str_replace(colnames(expr_raw), pattern = "^X", "")
colnames(expr_raw) = str_replace(colnames(expr_raw), pattern = "\\.", "")
expr_raw[1:5,1:5]
dim(expr_raw)
expr_raw = expr_raw[,!(colnames(expr_raw) %in% c("","X","RMSE","Correlation","P_value","Subtype","Strength_subtype","model","Sig_score"))]
expr_raw = t(expr_raw)
no_match = colnames(expr_raw) %in% meta_info$Sample == FALSE
#colnames(expr_raw)[no_match] = str_replace(colnames(expr_raw)[no_match], pattern = "^X","")
#no_match = colnames(expr_raw) %in% meta_info$Sample == F
#colnames(expr_raw)[no_match] = paste("X",colnames(expr_raw)[no_match],sep ="")
#no_match = colnames(expr_raw) %in% meta_info$Sample == F
#colnames(expr_raw)[which(no_match)]
candidates = meta_data$Sample[
#meta_data$Study %in% c("Master","Charite")
#meta_data$Site_of_primary %in% c("Pancreatic") #& meta_data$Primary_Metastasis %in% c("Primary")
meta_data$Site_of_primary != "Pancreatic" #& meta_data$Primary_Metastasis %in% c("Primary")
#meta_data$NET_NEC_PCA %in% c("NEC","NET")
#!(meta_data$Histology_Metastasis %in% c("Outlier"))
]
length(candidates)
expr_raw = expr_raw[,candidates]
meta_data = meta_info[colnames(expr_raw),]
dim(meta_data)
#write.table(expr_raw,"~/Deko_Projekt/Data/Publication_datasets/NEN/Diedisheim.S4.tsv",sep ="\t",quote =F , row.names = TRUE)
source("~/Deko_Projekt/Scripts/Archive/Visualization_colors.R")
genes_of_interest_hgnc_t = read.table("~/Deko_Projekt/Misc/Stem_signatures.gmt.tsv",sep ="\t", stringsAsFactors = F, header = F)
#liver_genes = genes_of_interest_hgnc_t[70,3:ncol(genes_of_interest_hgnc_t)]
genes_of_interest_hgnc_t$V1
i = 72
genes_of_interest_hgnc_t[i,1]
sad_genes = str_to_upper( as.character( genes_of_interest_hgnc_t[i,3:ncol(genes_of_interest_hgnc_t)]) )
sad_genes = sad_genes[ sad_genes != ""]
length(sad_genes)
meta_data = meta_info[colnames(expr_raw),]
expr = expr_raw[rownames(expr_raw) %in% sad_genes[1:20],]
expr[1:5,1:5]
dim(expr)
row_var = as.double(apply(expr, MARGIN = 1, FUN= var))
summary(row_var)
#expr = expr[row_var > 3,]
dim(expr)
correlation_matrix = cor(t(expr))
pcr = prcomp((correlation_matrix))
#meta_exp = as.double(apply(expr, MARGIN = 1, FUN = mean))
#expr = expr[,order(meta_exp)]
#svg(filename = "~/Downloads/Heatmap.svg", width = 10, height = 10)
p =pheatmap::pheatmap(
#correlation_matrix,
t(expr),
annotation_col = meta_data[,c("NEC_NET","Grading","Primary_Metastasis")],
#annotation_col = meta_data[,c("NEC_NET_Color","Histology")],
annotation_colors = aka3,
show_rownames = TRUE,
cluster_cols = TRUE,
show_colnames = FALSE,
treeheight_row = 0,
legend = T,
fontsize_col = 7,
#cellheight = 15,
clustering_method = "average"
)
p = ggbiplot::ggbiplot(
pcr,
obs.scale =.75,
var.scale = 2,
labels.size = 4,
alpha = 1,
groups = as.character(meta_data$NET_NEC_PCA),
#label = meta_data$Sample,
ellipse = TRUE,
circle = TRUE,
var.axes = F
)
p = p + geom_point( aes( size = 4, color = as.factor(meta_data$NET_NEC_PCA) ))
p
p = p + scale_color_manual( values = c("green","yellow","darkred","blue","red"), name = "Subtype" ) + theme(legend.position="top",axis.text=element_text(size=12),axis.title=element_text(size=13))+ theme(legend.text=element_text(size=13),legend.title=element_text(size=13))
#p = p + scale_color_manual( values = c("Red","Blue"), name = "Subtype" ) + theme(legend.position="top",axis.text=element_text(size=12),axis.title=element_text(size=13))+ theme(legend.text=element_text(size=13),legend.title=element_text(size=13))
p = p + scale_color_manual( values = c("Purple","Red","Blue") ) + theme(legend.position="top",axis.text=element_text(size=12),axis.title=element_text(size=13))+ theme(legend.text=element_text(size=13),legend.title=element_text(size=13))
p = p + theme(legend.position="top",axis.text=element_text(size=12),axis.title=element_text(size=13))+ theme(legend.text=element_text(size=13),legend.title=element_text(size=13))
#svg(filename = "~/Deco/Results/Images/SM_Figure_5_NEC_NET_PCA.svg", width = 10, height = 10)
p
#dev.off()
#p + xlim(c(-1.0,2.25)) + ylim(-1.5,1.0)
###
meta_info = read.table("~/Deko_Projekt/Misc/Meta_information.tsv",sep = "\t",header = T,stringsAsFactors = F)
rownames(meta_info) = meta_info$Name
colnames(meta_info) = str_replace(colnames(meta_info),pattern = "\\.","_")
meta_info$NEC_NET = meta_info$NEC_NET_PCA
data_t = read.table("~/Deko_Projekt/Results/Cell_fraction_predictions/RepSet_S57_CIBERSORT_Tosti_400.Absolute.tsv",sep="\t", stringsAsFactors = F, header = T, as.is = T)
#data_t = read.table("~/Deko_Projekt/Results/Bseq_results_fractions_p_values.tsv",sep="\t", stringsAsFactors = F, header = T, as.is = T)
table(data_t$Dataset) / 3
vis_mat = data_t
vis_mat = vis_mat[ vis_mat$Dataset %in% c("Fadista","RepSet") ,]
####
meta_data = meta_info[vis_mat$Sample,]
table(meta_data$Histology)
vis_mat$Histology = meta_data$Histology
vis_mat$Grading[
(vis_mat$Grading == "G3") & (vis_mat$Histology != "Pancreatic")
] = "G3_other"
# p-value
selector = c("Grading","P_value","Model","Dataset")
vis_mat_4 = vis_mat[,selector]
vis_mat_4[is.na(vis_mat_4$Grading),"Grading"] = "G0"
melt_mat_endocrine = vis_mat_4 %>% filter( Model %in% c("Alpha_Beta_Gamma_Delta_Baron")) %>% group_by(Grading)
melt_mat_endocrine_agg = aggregate(melt_mat_endocrine$P_value, by = list(melt_mat_endocrine$Grading), FUN = mean)
melt_mat_exocrine = vis_mat_4 %>% filter( Model %in% c("Alpha_Beta_Gamma_Delta_Acinar_Ductal_Baron")) %>% group_by(Grading)
melt_mat_exocrine_agg = aggregate(melt_mat_exocrine$P_value, by = list(melt_mat_exocrine$Grading), FUN = mean)
melt_mat_hisc = vis_mat_4 %>% filter( Model %in% c("Alpha_Beta_Gamma_Delta_Hisc_Baron")) %>% group_by(Grading)
melt_mat_hisc_agg = aggregate(melt_mat_hisc$P_value, by = list(melt_mat_hisc$Grading), FUN = mean)
melt_mat_crine = rbind(
melt_mat_endocrine_agg,
melt_mat_exocrine_agg,
melt_mat_hisc_agg
)
colnames(melt_mat_crine) = c( 'Grading','P_value' )
sd_endocrine = aggregate( melt_mat_endocrine$P_value, by = list(melt_mat_endocrine$Grading), FUN = sd)
sd_exocrine = aggregate( melt_mat_exocrine$P_value, by = list(melt_mat_exocrine$Grading), FUN = sd)
sd_hisc = aggregate( melt_mat_hisc$P_value, by = list(melt_mat_hisc$Grading), FUN = sd)
melt_mat_crine$SD = c(sd_endocrine$x,sd_exocrine$x,sd_hisc$x)
samples = as.character(vis_mat[
(vis_mat$Dataset == "RepSet") &
(vis_mat$Model == "Alpha_Beta_Gamma_Delta_Acinar_Ductal_Baron") &
(vis_mat$Grading != "G3_other"),
"Sample"
])
#write.table(meta_info,"~/Deko_Projekt/Misc/Meta_information.tsv",sep ="\t",quote =F , row.names = F)
melt_mat_crine$SD = melt_mat_crine$SD
#melt_mat_crine$model = c("endocrine","endocrine","endocrine","endocrine","exocrine","exocrine","exocrine","exocrine","hisc","hisc","hisc","hisc")
melt_mat_crine$Model = c(rep("Endocrine-only",5),rep("Endocrine & Exocrine",5),rep("Endocrine & HISC",5))
melt_mat_crine$Model = factor(melt_mat_crine$Model, levels = c("Endocrine-only","Endocrine & Exocrine","Endocrine & HISC"))
melt_mat_crine = melt_mat_crine[,]
#melt_mat_crine = melt_mat_crine %>% filter(Grading != "G3_other")
melt_mat_crine = props[,c("endocrine cell","ductal cell type 1","acinar cell","acinar_edge_cell")]
melt_mat_crine_save = melt_mat_crine = t(apply( melt_mat_crine, MARGIN = 1, FUN = function(vec){return((vec/sum(vec))*100)}))
melt_mat_crine = as.matrix(melt_mat_crine, nrow = nrow(melt_mat_crine_save), ncol = ncol(melt_mat_crine_save))
melt_mat_crine$NEC_NET= meta_data$NET_NEC_PCA
melt_mat_crine$Grading= meta_data$Grading
melt_mat_crine = melt_mat_crine %>% dplyr::filter(Grading != "Unknown")
melt_mat_crine[melt_mat_crine$NEC_NET == "NEC","Grading"] = "G3_NEC"
melt_mat_crine[(melt_mat_crine$NEC_NET == "NET") & melt_mat_crine$Grading == "G3","Grading"] = "G3_NET"
melt_mat_crine = reshape2::melt(melt_mat_crine)
colnames(melt_mat_crine) = c("Sample","Cell_type","Prediction")
#colnames(melt_mat_crine) = c("NEC_NET","Grading","Cell_type","Prediction")
melt_mat_crine$Grading = meta_data[,"Grading"]
melt_mat_crine_vis = melt_mat_crine %>% group_by(Grading,Cell_type) %>% summarise("Average_Absolute_Prediction" = mean(Prediction))
p = ggplot(
data = melt_mat_crine_vis,
aes(
x = Grading,
y = Average_Absolute_Prediction,
fill = Cell_type
)
)
p = p + geom_bar(stat="identity", color = "black",position = "dodge")
p = p + scale_fill_manual(values = c("darkgreen", "darkred","black"))
p = p + ylab(label = "P-value nu-SVR regression models") + xlab(label = "Grading")
p = p + geom_errorbar(aes(ymin = P_value,ymax = P_value+SD*.25), position = "dodge")
p = p + guides(fill=guide_legend(title="Deconvolution Model"))
p = p + geom_hline( yintercept = 0.05, color= "red",size=2, linetype = "dashed")
#p = p + theme(legend.position="top",axis.text=element_text(size=14),axis.title=element_text(size=13))+ theme(legend.text=element_text(size=13),legend.title=element_text(size=13))
p = p + theme(legend.position="top",axis.text=element_text(size=10),axis.title=element_text(size=10))+ theme(legend.text=element_text(size=10),legend.title=element_text(size=10))
p = p + annotate("text", label = "P-value < 0.05", x = 2, y = 0.045, size = 6, colour = "black") + annotate("text", label = "P-value > 0.05", x = 2, y = 0.055, size = 6, colour = "black")
#svg(filename = "~/Deko_Projekt/Results/Images/Figure_2_deconvolution_p_values.svg", width = 10, height = 10)
#svg(filename = "~/Downloads/P_value.svg", width = 10, height = 10)
print(p)
dev.off()
table(meta_data_sato_gep$Grading,meta_data_sato_gep$NEC_NET)
|
/Scripts/Publication_plots.R
|
no_license
|
RaikOtto/Deko_Projekt
|
R
| false | false | 10,819 |
r
|
library("umap")
library("ggpubr")
library("stringr")
library("reshape2")
library("ggplot2")
library("dplyr")
library("grid")
meta_info_maptor = read.table("~/MAPTor_NET/Misc/Meta_information.tsv",sep = "\t",header = T,stringsAsFactors = F)
rownames(meta_info_maptor) = meta_info_maptor$Sample
colnames(meta_info_maptor) = str_replace(colnames(meta_info_maptor),pattern = "\\.","_")
meta_info_maptor$OS_Tissue = as.double(str_replace(meta_info_maptor$OS_Tissue,pattern = ",","."))
meta_info = read.table("~/Deko_Projekt/Misc/Meta_information.tsv",sep = "\t",header = T,stringsAsFactors = F)
rownames(meta_info) = meta_info$Sample
colnames(meta_info) = str_replace(colnames(meta_info),pattern = "\\.","_")
matcher = match(meta_info_maptor$Sample,meta_info$Sample, nomatch = 0)
meta_info[matcher,"OS_Tissue"] = meta_info_maptor[matcher != 0,"OS_Tissue"]
#expr_raw = read.table("~/Deko_Projekt/Data/Publication_datasets/NEN/Sato.S22.tsv",sep="\t", stringsAsFactors = F, header = T, row.names = 1,as.is = F)
expr_raw = read.table("~/Deko_Projekt/Results/Cell_fraction_predictions/RepSet_Cibersort_Tosti_200_genes_200_samples_endocrine_exocrine_metaplastic_acinar-i_muc5+_only.tsv",sep="\t", stringsAsFactors = F, header = T, as.is = TRUE)
colnames(expr_raw) = str_replace(colnames(expr_raw), pattern = "^X", "")
colnames(expr_raw) = str_replace(colnames(expr_raw), pattern = "\\.", "")
expr_raw[1:5,1:5]
dim(expr_raw)
expr_raw = expr_raw[,!(colnames(expr_raw) %in% c("","X","RMSE","Correlation","P_value","Subtype","Strength_subtype","model","Sig_score"))]
expr_raw = t(expr_raw)
no_match = colnames(expr_raw) %in% meta_info$Sample == FALSE
#colnames(expr_raw)[no_match] = str_replace(colnames(expr_raw)[no_match], pattern = "^X","")
#no_match = colnames(expr_raw) %in% meta_info$Sample == F
#colnames(expr_raw)[no_match] = paste("X",colnames(expr_raw)[no_match],sep ="")
#no_match = colnames(expr_raw) %in% meta_info$Sample == F
#colnames(expr_raw)[which(no_match)]
candidates = meta_data$Sample[
#meta_data$Study %in% c("Master","Charite")
#meta_data$Site_of_primary %in% c("Pancreatic") #& meta_data$Primary_Metastasis %in% c("Primary")
meta_data$Site_of_primary != "Pancreatic" #& meta_data$Primary_Metastasis %in% c("Primary")
#meta_data$NET_NEC_PCA %in% c("NEC","NET")
#!(meta_data$Histology_Metastasis %in% c("Outlier"))
]
length(candidates)
expr_raw = expr_raw[,candidates]
meta_data = meta_info[colnames(expr_raw),]
dim(meta_data)
#write.table(expr_raw,"~/Deko_Projekt/Data/Publication_datasets/NEN/Diedisheim.S4.tsv",sep ="\t",quote =F , row.names = TRUE)
source("~/Deko_Projekt/Scripts/Archive/Visualization_colors.R")
genes_of_interest_hgnc_t = read.table("~/Deko_Projekt/Misc/Stem_signatures.gmt.tsv",sep ="\t", stringsAsFactors = F, header = F)
#liver_genes = genes_of_interest_hgnc_t[70,3:ncol(genes_of_interest_hgnc_t)]
genes_of_interest_hgnc_t$V1
i = 72
genes_of_interest_hgnc_t[i,1]
sad_genes = str_to_upper( as.character( genes_of_interest_hgnc_t[i,3:ncol(genes_of_interest_hgnc_t)]) )
sad_genes = sad_genes[ sad_genes != ""]
length(sad_genes)
meta_data = meta_info[colnames(expr_raw),]
expr = expr_raw[rownames(expr_raw) %in% sad_genes[1:20],]
expr[1:5,1:5]
dim(expr)
row_var = as.double(apply(expr, MARGIN = 1, FUN= var))
summary(row_var)
#expr = expr[row_var > 3,]
dim(expr)
correlation_matrix = cor(t(expr))
pcr = prcomp((correlation_matrix))
#meta_exp = as.double(apply(expr, MARGIN = 1, FUN = mean))
#expr = expr[,order(meta_exp)]
#svg(filename = "~/Downloads/Heatmap.svg", width = 10, height = 10)
p =pheatmap::pheatmap(
#correlation_matrix,
t(expr),
annotation_col = meta_data[,c("NEC_NET","Grading","Primary_Metastasis")],
#annotation_col = meta_data[,c("NEC_NET_Color","Histology")],
annotation_colors = aka3,
show_rownames = TRUE,
cluster_cols = TRUE,
show_colnames = FALSE,
treeheight_row = 0,
legend = T,
fontsize_col = 7,
#cellheight = 15,
clustering_method = "average"
)
p = ggbiplot::ggbiplot(
pcr,
obs.scale =.75,
var.scale = 2,
labels.size = 4,
alpha = 1,
groups = as.character(meta_data$NET_NEC_PCA),
#label = meta_data$Sample,
ellipse = TRUE,
circle = TRUE,
var.axes = F
)
p = p + geom_point( aes( size = 4, color = as.factor(meta_data$NET_NEC_PCA) ))
p
p = p + scale_color_manual( values = c("green","yellow","darkred","blue","red"), name = "Subtype" ) + theme(legend.position="top",axis.text=element_text(size=12),axis.title=element_text(size=13))+ theme(legend.text=element_text(size=13),legend.title=element_text(size=13))
#p = p + scale_color_manual( values = c("Red","Blue"), name = "Subtype" ) + theme(legend.position="top",axis.text=element_text(size=12),axis.title=element_text(size=13))+ theme(legend.text=element_text(size=13),legend.title=element_text(size=13))
p = p + scale_color_manual( values = c("Purple","Red","Blue") ) + theme(legend.position="top",axis.text=element_text(size=12),axis.title=element_text(size=13))+ theme(legend.text=element_text(size=13),legend.title=element_text(size=13))
p = p + theme(legend.position="top",axis.text=element_text(size=12),axis.title=element_text(size=13))+ theme(legend.text=element_text(size=13),legend.title=element_text(size=13))
#svg(filename = "~/Deco/Results/Images/SM_Figure_5_NEC_NET_PCA.svg", width = 10, height = 10)
p
#dev.off()
#p + xlim(c(-1.0,2.25)) + ylim(-1.5,1.0)
###
meta_info = read.table("~/Deko_Projekt/Misc/Meta_information.tsv",sep = "\t",header = T,stringsAsFactors = F)
rownames(meta_info) = meta_info$Name
colnames(meta_info) = str_replace(colnames(meta_info),pattern = "\\.","_")
meta_info$NEC_NET = meta_info$NEC_NET_PCA
data_t = read.table("~/Deko_Projekt/Results/Cell_fraction_predictions/RepSet_S57_CIBERSORT_Tosti_400.Absolute.tsv",sep="\t", stringsAsFactors = F, header = T, as.is = T)
#data_t = read.table("~/Deko_Projekt/Results/Bseq_results_fractions_p_values.tsv",sep="\t", stringsAsFactors = F, header = T, as.is = T)
table(data_t$Dataset) / 3
vis_mat = data_t
vis_mat = vis_mat[ vis_mat$Dataset %in% c("Fadista","RepSet") ,]
####
meta_data = meta_info[vis_mat$Sample,]
table(meta_data$Histology)
vis_mat$Histology = meta_data$Histology
vis_mat$Grading[
(vis_mat$Grading == "G3") & (vis_mat$Histology != "Pancreatic")
] = "G3_other"
# p-value
selector = c("Grading","P_value","Model","Dataset")
vis_mat_4 = vis_mat[,selector]
vis_mat_4[is.na(vis_mat_4$Grading),"Grading"] = "G0"
melt_mat_endocrine = vis_mat_4 %>% filter( Model %in% c("Alpha_Beta_Gamma_Delta_Baron")) %>% group_by(Grading)
melt_mat_endocrine_agg = aggregate(melt_mat_endocrine$P_value, by = list(melt_mat_endocrine$Grading), FUN = mean)
melt_mat_exocrine = vis_mat_4 %>% filter( Model %in% c("Alpha_Beta_Gamma_Delta_Acinar_Ductal_Baron")) %>% group_by(Grading)
melt_mat_exocrine_agg = aggregate(melt_mat_exocrine$P_value, by = list(melt_mat_exocrine$Grading), FUN = mean)
melt_mat_hisc = vis_mat_4 %>% filter( Model %in% c("Alpha_Beta_Gamma_Delta_Hisc_Baron")) %>% group_by(Grading)
melt_mat_hisc_agg = aggregate(melt_mat_hisc$P_value, by = list(melt_mat_hisc$Grading), FUN = mean)
melt_mat_crine = rbind(
melt_mat_endocrine_agg,
melt_mat_exocrine_agg,
melt_mat_hisc_agg
)
colnames(melt_mat_crine) = c( 'Grading','P_value' )
sd_endocrine = aggregate( melt_mat_endocrine$P_value, by = list(melt_mat_endocrine$Grading), FUN = sd)
sd_exocrine = aggregate( melt_mat_exocrine$P_value, by = list(melt_mat_exocrine$Grading), FUN = sd)
sd_hisc = aggregate( melt_mat_hisc$P_value, by = list(melt_mat_hisc$Grading), FUN = sd)
melt_mat_crine$SD = c(sd_endocrine$x,sd_exocrine$x,sd_hisc$x)
samples = as.character(vis_mat[
(vis_mat$Dataset == "RepSet") &
(vis_mat$Model == "Alpha_Beta_Gamma_Delta_Acinar_Ductal_Baron") &
(vis_mat$Grading != "G3_other"),
"Sample"
])
#write.table(meta_info,"~/Deko_Projekt/Misc/Meta_information.tsv",sep ="\t",quote =F , row.names = F)
melt_mat_crine$SD = melt_mat_crine$SD
#melt_mat_crine$model = c("endocrine","endocrine","endocrine","endocrine","exocrine","exocrine","exocrine","exocrine","hisc","hisc","hisc","hisc")
melt_mat_crine$Model = c(rep("Endocrine-only",5),rep("Endocrine & Exocrine",5),rep("Endocrine & HISC",5))
melt_mat_crine$Model = factor(melt_mat_crine$Model, levels = c("Endocrine-only","Endocrine & Exocrine","Endocrine & HISC"))
melt_mat_crine = melt_mat_crine[,]
#melt_mat_crine = melt_mat_crine %>% filter(Grading != "G3_other")
melt_mat_crine = props[,c("endocrine cell","ductal cell type 1","acinar cell","acinar_edge_cell")]
melt_mat_crine_save = melt_mat_crine = t(apply( melt_mat_crine, MARGIN = 1, FUN = function(vec){return((vec/sum(vec))*100)}))
melt_mat_crine = as.matrix(melt_mat_crine, nrow = nrow(melt_mat_crine_save), ncol = ncol(melt_mat_crine_save))
melt_mat_crine$NEC_NET= meta_data$NET_NEC_PCA
melt_mat_crine$Grading= meta_data$Grading
melt_mat_crine = melt_mat_crine %>% dplyr::filter(Grading != "Unknown")
melt_mat_crine[melt_mat_crine$NEC_NET == "NEC","Grading"] = "G3_NEC"
melt_mat_crine[(melt_mat_crine$NEC_NET == "NET") & melt_mat_crine$Grading == "G3","Grading"] = "G3_NET"
melt_mat_crine = reshape2::melt(melt_mat_crine)
colnames(melt_mat_crine) = c("Sample","Cell_type","Prediction")
#colnames(melt_mat_crine) = c("NEC_NET","Grading","Cell_type","Prediction")
melt_mat_crine$Grading = meta_data[,"Grading"]
melt_mat_crine_vis = melt_mat_crine %>% group_by(Grading,Cell_type) %>% summarise("Average_Absolute_Prediction" = mean(Prediction))
p = ggplot(
data = melt_mat_crine_vis,
aes(
x = Grading,
y = Average_Absolute_Prediction,
fill = Cell_type
)
)
p = p + geom_bar(stat="identity", color = "black",position = "dodge")
p = p + scale_fill_manual(values = c("darkgreen", "darkred","black"))
p = p + ylab(label = "P-value nu-SVR regression models") + xlab(label = "Grading")
p = p + geom_errorbar(aes(ymin = P_value,ymax = P_value+SD*.25), position = "dodge")
p = p + guides(fill=guide_legend(title="Deconvolution Model"))
p = p + geom_hline( yintercept = 0.05, color= "red",size=2, linetype = "dashed")
#p = p + theme(legend.position="top",axis.text=element_text(size=14),axis.title=element_text(size=13))+ theme(legend.text=element_text(size=13),legend.title=element_text(size=13))
p = p + theme(legend.position="top",axis.text=element_text(size=10),axis.title=element_text(size=10))+ theme(legend.text=element_text(size=10),legend.title=element_text(size=10))
p = p + annotate("text", label = "P-value < 0.05", x = 2, y = 0.045, size = 6, colour = "black") + annotate("text", label = "P-value > 0.05", x = 2, y = 0.055, size = 6, colour = "black")
#svg(filename = "~/Deko_Projekt/Results/Images/Figure_2_deconvolution_p_values.svg", width = 10, height = 10)
#svg(filename = "~/Downloads/P_value.svg", width = 10, height = 10)
print(p)
dev.off()
table(meta_data_sato_gep$Grading,meta_data_sato_gep$NEC_NET)
|
x =arima.sim(n = 63, list(ar = c(0.8897, -0.4858), ma = c(-0.2279, 0.2488)),
sd = sqrt(0.1796))
plot(x)
fit = lm(x ~ 1)
ACF = acf(x)
ACF$acf
VAR = var(x)
T = length(x)
L = round(0.75 * T^(1/3))
var=var(x)
lol <- embed(x,L+1)
cov=numeric(3)
for(h in 1:L){
cov[h]=cov(lol[,4],lol[,4-h])
}
Sigmaxv=var+2*((1-1/L)*cov[1]+(1-2/L)*cov[2]+(1-3/L)*cov[3])
manualHAC=(1/(T))*Sigmaxv
(mine = (VAR + 2*( (2/3)*ACF$acf[2] + (1/3)*ACF$acf[3] ))/T)
(mine2 = (VAR + 2*( 0.5*ACF$acf[2] ))/T)
lrvar(x, type = "Newey-West", prewhite = FALSE)
vcovHAC(fit)
manualHAC
|
/R/lollololoololl.R
|
no_license
|
andreaslillevangbech/M2-project
|
R
| false | false | 567 |
r
|
x =arima.sim(n = 63, list(ar = c(0.8897, -0.4858), ma = c(-0.2279, 0.2488)),
sd = sqrt(0.1796))
plot(x)
fit = lm(x ~ 1)
ACF = acf(x)
ACF$acf
VAR = var(x)
T = length(x)
L = round(0.75 * T^(1/3))
var=var(x)
lol <- embed(x,L+1)
cov=numeric(3)
for(h in 1:L){
cov[h]=cov(lol[,4],lol[,4-h])
}
Sigmaxv=var+2*((1-1/L)*cov[1]+(1-2/L)*cov[2]+(1-3/L)*cov[3])
manualHAC=(1/(T))*Sigmaxv
(mine = (VAR + 2*( (2/3)*ACF$acf[2] + (1/3)*ACF$acf[3] ))/T)
(mine2 = (VAR + 2*( 0.5*ACF$acf[2] ))/T)
lrvar(x, type = "Newey-West", prewhite = FALSE)
vcovHAC(fit)
manualHAC
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/import.R
\name{import_module}
\alias{import_module}
\alias{\%digests\%}
\alias{\%imports\%}
\title{Import a Module.}
\usage{
import_module(name, url, ..., digest = NULL, force = FALSE)
}
\arguments{
\item{name}{A string (character vector of lenght one).
A module name can contain letters, figures and some special characters,
namely \code{_}, \code{-}, and \code{/}. The latter is a namespace
separator.
Names containing \code{/mock/}, \code{/mocks/}, \code{/test/},
\code{/tests/}, \code{/example/}, or \code{/examples/} have a special
meaning related to code testing and examples.
The name "modulr" corresponds to a special module and is therefore
reserved.}
\item{url}{A string (character vector of length one). The URL must use the
HTTP(S) protocol.}
\item{...}{Further arguments passed to \code{httr::\link[httr]{GET}}.}
\item{digest}{A string (character vector of length one). See
\code{\link{get_digest}}.}
\item{force}{A flag. Should an already defined module be re-imported?}
}
\value{
The result of the evaluation of the imported script.
}
\description{
Import or re-import a module which is defined in an R, R Markdown or R Sweave
script at a given URL using the HTTP(S) protocol.
}
\details{
R Markdown and R Sweave files are accordingly tangled into R code, which is
in turn evaluated.
The imported module is rejected if
\itemize{
\item its name differs from the \code{name} argument,
\item its digest differs from the \code{digest} argument.
}
In such a case, the internal state of modulr is rolled back.
}
\section{Syntactic Sugars}{
\preformatted{name \%imports\% url}
\preformatted{name \%digests\% digest \%imports\% url}
}
\section{Warning}{
It is considered a very bad practice to define, touch, undefine, load, make,
reset, or perform any other operation from within a module definition that
may alterate the internal state of modulr.
}
\examples{
\dontrun{
reset()
# https://gist.github.com/aclemen1/3fcc508cb40ddac6c1e3
"modulr/vault" \%imports\%
"https://gist.github.com/aclemen1/3fcc508cb40ddac6c1e3"
list_modules()
reset()
# equivalently
"modulr/vault" \%imports\% "3fcc508cb40ddac6c1e3"
make("modulr/vault/example")
make_tests()}
\dontrun{
reset()
# https://gist.github.com/aclemen1/3fcc508cb40ddac6c1e3
"modulr/vault_with_a_typo" \%imports\% "3fcc508cb40ddac6c1e3"
list_modules()}
\dontrun{
reset()
# https://gist.github.com/aclemen1/3fcc508cb40ddac6c1e3
"modulr/vault" \%digests\%
"with a wrong digest" \%imports\%
"3fcc508cb40ddac6c1e3"
list_modules()}
}
\seealso{
\code{httr::\link[httr]{GET}}, \code{\link{get_digest}},
\code{\link{list_modules}}, \code{\link{make}}, and
\code{\link{make_tests}}.
}
|
/man/import_module.Rd
|
permissive
|
aclemen1/modulr
|
R
| false | true | 2,764 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/import.R
\name{import_module}
\alias{import_module}
\alias{\%digests\%}
\alias{\%imports\%}
\title{Import a Module.}
\usage{
import_module(name, url, ..., digest = NULL, force = FALSE)
}
\arguments{
\item{name}{A string (character vector of lenght one).
A module name can contain letters, figures and some special characters,
namely \code{_}, \code{-}, and \code{/}. The latter is a namespace
separator.
Names containing \code{/mock/}, \code{/mocks/}, \code{/test/},
\code{/tests/}, \code{/example/}, or \code{/examples/} have a special
meaning related to code testing and examples.
The name "modulr" corresponds to a special module and is therefore
reserved.}
\item{url}{A string (character vector of length one). The URL must use the
HTTP(S) protocol.}
\item{...}{Further arguments passed to \code{httr::\link[httr]{GET}}.}
\item{digest}{A string (character vector of length one). See
\code{\link{get_digest}}.}
\item{force}{A flag. Should an already defined module be re-imported?}
}
\value{
The result of the evaluation of the imported script.
}
\description{
Import or re-import a module which is defined in an R, R Markdown or R Sweave
script at a given URL using the HTTP(S) protocol.
}
\details{
R Markdown and R Sweave files are accordingly tangled into R code, which is
in turn evaluated.
The imported module is rejected if
\itemize{
\item its name differs from the \code{name} argument,
\item its digest differs from the \code{digest} argument.
}
In such a case, the internal state of modulr is rolled back.
}
\section{Syntactic Sugars}{
\preformatted{name \%imports\% url}
\preformatted{name \%digests\% digest \%imports\% url}
}
\section{Warning}{
It is considered a very bad practice to define, touch, undefine, load, make,
reset, or perform any other operation from within a module definition that
may alterate the internal state of modulr.
}
\examples{
\dontrun{
reset()
# https://gist.github.com/aclemen1/3fcc508cb40ddac6c1e3
"modulr/vault" \%imports\%
"https://gist.github.com/aclemen1/3fcc508cb40ddac6c1e3"
list_modules()
reset()
# equivalently
"modulr/vault" \%imports\% "3fcc508cb40ddac6c1e3"
make("modulr/vault/example")
make_tests()}
\dontrun{
reset()
# https://gist.github.com/aclemen1/3fcc508cb40ddac6c1e3
"modulr/vault_with_a_typo" \%imports\% "3fcc508cb40ddac6c1e3"
list_modules()}
\dontrun{
reset()
# https://gist.github.com/aclemen1/3fcc508cb40ddac6c1e3
"modulr/vault" \%digests\%
"with a wrong digest" \%imports\%
"3fcc508cb40ddac6c1e3"
list_modules()}
}
\seealso{
\code{httr::\link[httr]{GET}}, \code{\link{get_digest}},
\code{\link{list_modules}}, \code{\link{make}}, and
\code{\link{make_tests}}.
}
|
# Course : CS 513
# First Name : Sowmya
# Last Name : Vijayakumar
# CWId: 10421665
#Naive Bayes Classifier for young people survey data
rm(list=ls())
#Read the input data file
response<-read.csv("/Users/sowmyav/Desktop/Fall2017/KDD/YoungPeopleSurveyProject/young-people-survey/responses_remove_categorical.csv"
,na.strings = c(""," ", NA, NaN))
dim(response)#[1] 1010 140
#bc<-read.csv("/Users/sowmyav/Desktop/Fall2017/KDD/breast-cancer-wisconsin_data.csv")
#response<-na.omit(response)
#Replacing the numerical NA values with mean
for(i in 1:139){
response[is.na(response[,i]), i] <- round(mean(response[,i], na.rm = TRUE))
}
#Replacing the categorical NA values with mode #Gender colum
Mode <- function(x) {
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]
}
response[is.na(response[,140]), 140] <- Mode(response[,140])
#response[c(10:11,15,6)]
music_resp <- response[,1:19]
movie_resp <- response[,20:31]
hobbies_resp <- response[,32:63]
library(corrplot)
mcor1 <- cor(music_resp)
round(mcor1, digits = 2)
mcor2 <- cor(hobbies_resp)
round(mcor2, digits = 2)
corrplot(mcor1, type = 'full',title = 'Correlation matrix - Music preferences', outline = T,tl.cex=0.7)
corrplot(mcor2, type = 'full',title = 'Correlation matrix - Hobbies preferences', outline = T,tl.cex=0.7)
corrplot(mcor3, type = 'full',title = 'Correlation matrix - Movie preferences', outline = T,tl.cex=0.7)
#response[c(10:11,15,6,22,27)]
response<-response[-c(10:11,15,6,41,40,22,27)]
#Create test and training set
set.seed(321) #set repeatable random numbers
index = sample(1:nrow(response), size=0.3*nrow(response))
test<-response[index,]
training <- response[-index,]
library(e1071)
model <- naiveBayes(Gender ~ ., data = training)
class(model)
#summary(model)
#print(model)
preds <- predict(model, newdata = test)
table(preds, test$Gender)
results<-cbind(test, as.character(preds))
dim(results)
gender_col<-ncol(results)
#head(results)
?table()
#Measure the performance of svm
table(Actual=results[,gender_col-1],Prediction=results[,gender_col])
wrong<-results[,gender_col-1]!=results[,gender_col]
rate<-sum(wrong)/length(wrong)
rate #0.09756098
#Results:
#Without NAs
# # Prediction
# Actual female male
# female 105 11
# male 9 80
#Replacing NAs
# Prediction
# Actual female male
# female 186 6
# male 11 100
#rate 0.05610561
|
/naive_bayes_with_correlation_matrix.R
|
no_license
|
dhavalaM/Data-analysis-and-classification-using-R
|
R
| false | false | 2,396 |
r
|
# Course : CS 513
# First Name : Sowmya
# Last Name : Vijayakumar
# CWId: 10421665
#Naive Bayes Classifier for young people survey data
rm(list=ls())
#Read the input data file
response<-read.csv("/Users/sowmyav/Desktop/Fall2017/KDD/YoungPeopleSurveyProject/young-people-survey/responses_remove_categorical.csv"
,na.strings = c(""," ", NA, NaN))
dim(response)#[1] 1010 140
#bc<-read.csv("/Users/sowmyav/Desktop/Fall2017/KDD/breast-cancer-wisconsin_data.csv")
#response<-na.omit(response)
#Replacing the numerical NA values with mean
for(i in 1:139){
response[is.na(response[,i]), i] <- round(mean(response[,i], na.rm = TRUE))
}
#Replacing the categorical NA values with mode #Gender colum
Mode <- function(x) {
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]
}
response[is.na(response[,140]), 140] <- Mode(response[,140])
#response[c(10:11,15,6)]
music_resp <- response[,1:19]
movie_resp <- response[,20:31]
hobbies_resp <- response[,32:63]
library(corrplot)
mcor1 <- cor(music_resp)
round(mcor1, digits = 2)
mcor2 <- cor(hobbies_resp)
round(mcor2, digits = 2)
corrplot(mcor1, type = 'full',title = 'Correlation matrix - Music preferences', outline = T,tl.cex=0.7)
corrplot(mcor2, type = 'full',title = 'Correlation matrix - Hobbies preferences', outline = T,tl.cex=0.7)
corrplot(mcor3, type = 'full',title = 'Correlation matrix - Movie preferences', outline = T,tl.cex=0.7)
#response[c(10:11,15,6,22,27)]
response<-response[-c(10:11,15,6,41,40,22,27)]
#Create test and training set
set.seed(321) #set repeatable random numbers
index = sample(1:nrow(response), size=0.3*nrow(response))
test<-response[index,]
training <- response[-index,]
library(e1071)
model <- naiveBayes(Gender ~ ., data = training)
class(model)
#summary(model)
#print(model)
preds <- predict(model, newdata = test)
table(preds, test$Gender)
results<-cbind(test, as.character(preds))
dim(results)
gender_col<-ncol(results)
#head(results)
?table()
#Measure the performance of svm
table(Actual=results[,gender_col-1],Prediction=results[,gender_col])
wrong<-results[,gender_col-1]!=results[,gender_col]
rate<-sum(wrong)/length(wrong)
rate #0.09756098
#Results:
#Without NAs
# # Prediction
# Actual female male
# female 105 11
# male 9 80
#Replacing NAs
# Prediction
# Actual female male
# female 186 6
# male 11 100
#rate 0.05610561
|
library(tidyverse)
timings <- read_csv("timings.csv",
col_names=c("filtration","n_simplices","creation","reduction"),
skip=1)
timings
ggplot(timings, aes(x=n_simplices, y=reduction)) +
#geom_line(aes(x=n_simplices, y=creation)) +
geom_line() + geom_point() +
ggsave("timings.png", width=15, height=6)
|
/timings.R
|
no_license
|
dlozeve/topological-persistence
|
R
| false | false | 357 |
r
|
library(tidyverse)
timings <- read_csv("timings.csv",
col_names=c("filtration","n_simplices","creation","reduction"),
skip=1)
timings
ggplot(timings, aes(x=n_simplices, y=reduction)) +
#geom_line(aes(x=n_simplices, y=creation)) +
geom_line() + geom_point() +
ggsave("timings.png", width=15, height=6)
|
#' Drop unused factor levels
#'
#' Function to drop empty factor levels in test data
#' (\href{https://stackoverflow.com/a/39495480/4185785}{source}).
#'
#' @param fit Model fit of class "glm".
#' @param test_data Data frame containing the test data.
#' @export
rm_lvls <- function(fit, test_data) {
test_data %>%
droplevels() %>%
as.data.frame() -> test_data
# Obtain factor predictors in the model and their levels
factors <- (gsub("[-^0-9]|as.factor|\\(|\\)", "",
names(unlist(fit$xlevels))))
# Do nothing if no factors are present
if (length(factors) == 0) {
return(test_data)
}
factor_levels <- unname(unlist(fit$xlevels))
model_factors <- as.data.frame(cbind(factors, factor_levels))
# Select columns in test data that are factor predictors in trained model
predictors <- names(test_data[names(test_data) %in% factors])
# For each factor predictor in your data, set the unused level to NA
for (i in seq_len(length(predictors))) {
found <- test_data[, predictors[i]] %in% model_factors[
model_factors$factors == predictors[i], ]$factor_levels
if (any(!found)) {
# track which variable
var <- predictors[i]
# set to NA
test_data[!found, predictors[i]] <- NA
# drop empty factor levels in test data
test_data %>%
droplevels() -> test_data
}
}
return(test_data)
}
|
/R/utils.R
|
permissive
|
henckr/maidrr
|
R
| false | false | 1,390 |
r
|
#' Drop unused factor levels
#'
#' Function to drop empty factor levels in test data
#' (\href{https://stackoverflow.com/a/39495480/4185785}{source}).
#'
#' @param fit Model fit of class "glm".
#' @param test_data Data frame containing the test data.
#' @export
rm_lvls <- function(fit, test_data) {
test_data %>%
droplevels() %>%
as.data.frame() -> test_data
# Obtain factor predictors in the model and their levels
factors <- (gsub("[-^0-9]|as.factor|\\(|\\)", "",
names(unlist(fit$xlevels))))
# Do nothing if no factors are present
if (length(factors) == 0) {
return(test_data)
}
factor_levels <- unname(unlist(fit$xlevels))
model_factors <- as.data.frame(cbind(factors, factor_levels))
# Select columns in test data that are factor predictors in trained model
predictors <- names(test_data[names(test_data) %in% factors])
# For each factor predictor in your data, set the unused level to NA
for (i in seq_len(length(predictors))) {
found <- test_data[, predictors[i]] %in% model_factors[
model_factors$factors == predictors[i], ]$factor_levels
if (any(!found)) {
# track which variable
var <- predictors[i]
# set to NA
test_data[!found, predictors[i]] <- NA
# drop empty factor levels in test data
test_data %>%
droplevels() -> test_data
}
}
return(test_data)
}
|
#Volvamos al béisbol.
#En un ejemplo anterior, estimamos las líneas de regresión para predecir carrerass por bases por bolas y diferentes estratos de jonrones.
#Primero construimos un marco de datos similar a esto.
library(dslabs)
library(tidyverse)
library(Lahman)
data("Teams")
dat <- Teams %>% filter(yearID %in% 1961:2001) %>%
mutate(HR = round(HR/G,1),
BB = BB/G,
R = R/G) %>%
select(HR, BB, R) %>%
filter(HR >= 0.4 & HR <= 1.2)
#Luego, para calcular la línea de regresión en cada estrato, ya que no conocíamos la función lm en ese momento, usamos la fórmula directamente de esta manera.
dat %>%
group_by(HR) %>%
summarize(slope = cor(BB, R)*sd(R)/sd(BB))
#Argumentamos que las pendientes son similares y que las diferencias tal vez se debieron a la variación aleatoria.
#Para proporcionar una defensa más rigurosa de las pendientes, que es lo que condujo a nuestro modelo de regresión multivariante, podríamos calcular intervalos de confianza para cada pendiente.
#No hemos aprendido la fórmula para esto, pero la función lm proporciona suficiente información para construirlos.
#Primero, tenga en cuenta que si tratamos de usar la función lm para obtener la pendiente estimada de esta manera, no obtenemos lo que queremos.
dat %>%
group_by(HR) %>%
lm(R ~ BB, data = .) %>%
.$coef
#La función lm ignoró group_by.
#Esto es esperado, porque lm no es parte del tidyverse y no sabe cómo manejar el resultado de group_by que es un grupo tibble.
#Vamos a describir los tibbles en algunos detalles ahora.
#Cuando summarize recibe el resultado de group_by, de alguna manera sabe qué filas de la tabla van con qué grupos.
#Pero, ¿dónde se almacena esta información en el data frame?
#Vamos a escribir un código para ver el resultado de una llamada group_by.
dat %>% group_by(HR) %>% head()
#Tenga en cuenta que no hay columnas con la información necesaria para definir los grupos.
#Pero si miras detenidamente la salida, notas la línea "A tibble, 6 by 3".
#Podemos aprender la clase del objeto de retorno usando esta línea de código, y vemos que la clase es un "tbl".
dat %>% group_by(HR) %>% class()
#Esto se pronuncia "tibble".
#También es un tbl_df.
#Esto es equivalente a tibble.
#El tibble es un tipo especial de marco de datos.
#Los hemos visto antes, porque las funciones tidyverse como group_by y también summarize siempre devuelven este tipo de marco de datos.
#La función group_by devuelve un tipo especial de tibble, el tibble agrupado.
#Más adelante diremos más sobre los tibbles agrupados.
#Tenga en cuenta que los verbos de manipulación, seleccionar, filtrar, mutar y organizar, no necesariamente devuelven tibbles.
#Conservan la clase de la entrada.
#Si reciben un marco de datos normal, devuelven un marco de datos regular.
#Si reciben un tibble, devuelven un tibble.
#Pero los tibbles son el data frame predeterminado para el tidyverse.
#Tibbles son muy similares a los data Frames.
#Puede pensar en ellos como versiones modernas de marcos de datos.
#A continuación, vamos a describir brevemente tres diferencias importantes.
|
/HC_Advanced_dplyr.R
|
no_license
|
wparedesgt/Regresiones
|
R
| false | false | 3,161 |
r
|
#Volvamos al béisbol.
#En un ejemplo anterior, estimamos las líneas de regresión para predecir carrerass por bases por bolas y diferentes estratos de jonrones.
#Primero construimos un marco de datos similar a esto.
library(dslabs)
library(tidyverse)
library(Lahman)
data("Teams")
dat <- Teams %>% filter(yearID %in% 1961:2001) %>%
mutate(HR = round(HR/G,1),
BB = BB/G,
R = R/G) %>%
select(HR, BB, R) %>%
filter(HR >= 0.4 & HR <= 1.2)
#Luego, para calcular la línea de regresión en cada estrato, ya que no conocíamos la función lm en ese momento, usamos la fórmula directamente de esta manera.
dat %>%
group_by(HR) %>%
summarize(slope = cor(BB, R)*sd(R)/sd(BB))
#Argumentamos que las pendientes son similares y que las diferencias tal vez se debieron a la variación aleatoria.
#Para proporcionar una defensa más rigurosa de las pendientes, que es lo que condujo a nuestro modelo de regresión multivariante, podríamos calcular intervalos de confianza para cada pendiente.
#No hemos aprendido la fórmula para esto, pero la función lm proporciona suficiente información para construirlos.
#Primero, tenga en cuenta que si tratamos de usar la función lm para obtener la pendiente estimada de esta manera, no obtenemos lo que queremos.
dat %>%
group_by(HR) %>%
lm(R ~ BB, data = .) %>%
.$coef
#La función lm ignoró group_by.
#Esto es esperado, porque lm no es parte del tidyverse y no sabe cómo manejar el resultado de group_by que es un grupo tibble.
#Vamos a describir los tibbles en algunos detalles ahora.
#Cuando summarize recibe el resultado de group_by, de alguna manera sabe qué filas de la tabla van con qué grupos.
#Pero, ¿dónde se almacena esta información en el data frame?
#Vamos a escribir un código para ver el resultado de una llamada group_by.
dat %>% group_by(HR) %>% head()
#Tenga en cuenta que no hay columnas con la información necesaria para definir los grupos.
#Pero si miras detenidamente la salida, notas la línea "A tibble, 6 by 3".
#Podemos aprender la clase del objeto de retorno usando esta línea de código, y vemos que la clase es un "tbl".
dat %>% group_by(HR) %>% class()
#Esto se pronuncia "tibble".
#También es un tbl_df.
#Esto es equivalente a tibble.
#El tibble es un tipo especial de marco de datos.
#Los hemos visto antes, porque las funciones tidyverse como group_by y también summarize siempre devuelven este tipo de marco de datos.
#La función group_by devuelve un tipo especial de tibble, el tibble agrupado.
#Más adelante diremos más sobre los tibbles agrupados.
#Tenga en cuenta que los verbos de manipulación, seleccionar, filtrar, mutar y organizar, no necesariamente devuelven tibbles.
#Conservan la clase de la entrada.
#Si reciben un marco de datos normal, devuelven un marco de datos regular.
#Si reciben un tibble, devuelven un tibble.
#Pero los tibbles son el data frame predeterminado para el tidyverse.
#Tibbles son muy similares a los data Frames.
#Puede pensar en ellos como versiones modernas de marcos de datos.
#A continuación, vamos a describir brevemente tres diferencias importantes.
|
## Set working directory to folder with the excel sheet
setwd("C:/Users/Pardis/OneDrive - Danmarks Tekniske Universitet/DTU/Bachelorprojekt/2) AS16 + S26 experiments/Cultures on solid medium/Co-culture picture with bars (day 2)/Final_pyphe_analysis/pyphe_quant")
## Import data from Pyphe .csv file
library("readxl")
m <- as.data.frame(read.csv("day2-pheno-mono-exp--6.png.csv"))
c <- as.data.frame(read.csv("NewProject-Run-2-Plate-001-Expbe-bar-exp--6.png.csv"))
################################# MONOCULTURES #################################
## Extract data for each strain
## Subset of dataframe is extracted and sorted by "column" numbering.
#AS16
AS16_mono<-subset(m, m$column==1|m$column==7|m$column==13|m$column==19)
AS16_mono<-AS16_mono[order(AS16_mono$column),]
#S26
S26_mono<-subset(m, m$column==3|m$column==9|m$column==15|m$column==21)
S26_mono<-S26_mono[order(S26_mono$column),]
#mut
mut_mono<-subset(m, m$column==5|m$column==11|m$column==17|m$column==23)
mut_mono<-mut_mono[order(mut_mono$column),]
################################# CO-CULTURES #################################
## Using "row" and "column" information from dataframe to extract data
## AS16 with S26
AS16_w_S26<-subset(c,c$row==2 & c$column==3|c$row==2 & c$column==9|c$row==2 & c$column==15|c$row==3 & c$column==14|c$row==3 & c$column==20
|c$row==5 & c$column==3|c$row==5 & c$column==9|c$row==5 & c$column==15|c$row==6 & c$column==8|c$row==6 & c$column==20|c$row==8 & c$column==9|c$row==8 & c$column==3)
## AS16 with mut
AS16_w_mut<-subset(c,c$row==2 & c$column==2|c$row==2 & c$column==8|c$row==2 & c$column==14|c$row==2 & c$column==20|c$row==3 & c$column==5|c$row==3 & c$column==11|c$row==3 & c$column==17|c$row==5 & c$column==2|c$row==5 & c$column==20|c$row==6 & c$column==5|c$row==6 & c$column==11|c$row==6 & c$column==17
|c$row==6 & c$column==23|c$row==8 & c$column==2|c$row==8 & c$column==8|c$row==8 & c$column==14|c$row==8 & c$column==20)
## AS16 with AS16
AS16_w_AS16 <-subset(c,c$row==1 & c$column==7|c$row==1 & c$column==8|c$row==1 & c$column==13|c$row==1 & c$column==14|c$row==1 & c$column==19|c$row==1 & c$column==20
|c$row==4 & c$column==2|c$row==4 & c$column==7|c$row==4 & c$column==8|c$row==4 & c$column==13|c$row==4 & c$column==14
|c$row==4 & c$column==19|c$row==4 & c$column==20|c$row==7 & c$column==2|c$row==7 & c$column==7|c$row==7 & c$column==8|c$row==7 & c$column==13|c$row==7 & c$column==14)
## S26 with AS16
S26_w_AS16 <- subset(c,c$row==2 & c$column==4|c$row==2 & c$column==10|c$row==2 & c$column==16|c$row==2 & c$column==22|c$row==3 & c$column==1|c$row==3 & c$column==7|c$row==3 & c$column==13|c$row==3 & c$column==19|c$row==5 & c$column==4
|c$row==5 & c$column==10|c$row==5 & c$column==16|c$row==6 & c$column==1|c$row==6 & c$column==7|c$row==6 & c$column==19|c$row==8 & c$column==10|c$row==8 & c$column==4)
## S26 with mut
S26_w_mut <- subset(c,c$row==2 & c$column==5|c$row==2 & c$column==11|c$row==2 & c$column==17|c$row==2 & c$column==23|c$row==3 & c$column==4|c$row==3 & c$column==10
|c$row==3 & c$column==16|c$row==3 & c$column==22|c$row==5 & c$column==5|c$row==5 & c$column==11|c$row==5 & c$column==17|c$row==5 & c$column==17|c$row==5 & c$column==23
|c$row==6 & c$column==4|c$row==6 & c$column==10|c$row==6 & c$column==16|c$row==6 & c$column==16|c$row==6 & c$column==22|c$row==8 & c$column==5|c$row==8 & c$column==11|c$row==8 & c$column==17|c$row==8 & c$column==23)
## S26 with S26
S26_w_S26 <- subset(c,c$row==1 & c$column==3|c$row==1 & c$column==4||c$row==1 & c$column==9|c$row==1 & c$column==10|c$row==1 & c$column==15
|c$row==1 & c$column==16|c$row==1 & c$column==21|c$row==1 & c$column==22|c$row==4 & c$column==3|c$row==4 & c$column==4
|c$row==4 & c$column==9|c$row==4 & c$column==10|c$row==4 & c$column==15|c$row==4 & c$column==16|c$row==4 & c$column==21
|c$row==4 & c$column==22|c$row==7 & c$column==3|c$row==7 & c$column==4|c$row==7 & c$column==9|c$row==7 & c$column==10|c$row==7 & c$column==15
|c$row==7 & c$column==16|c$row==7 & c$column==21|c$row==7 & c$column==22)
## Mut with AS16
mut_w_AS16 <- subset(c,c$row==2 & c$column==1|c$row==2 & c$column==7|c$row==2 & c$column==13|c$row==2 & c$column==19|c$row==3 & c$column==6|c$row==3 & c$column==12|c$row==3 & c$column==18|c$row==5 & c$column==1|c$row==5 & c$column==19)
## Mut with S26
mut_w_S26 <- subset(c,c$row==2 & c$column==6|c$row==2 & c$column==12|c$row==2 & c$column==18|c$row==2 & c$column==24||c$row==3 & c$column==3|c$row==3 & c$column==9
|c$row==3 & c$column==15|c$row==3 & c$column==21|c$row==5 & c$column==6|c$row==5 & c$column==12|c$row==5 & c$column==18|c$row==5 & c$column==24|c$row==6 & c$column==3
|c$row==6 & c$column==9|c$row==6 & c$column==15|c$row==6 & c$column==21|c$row==8 & c$column==6|c$row==8 & c$column==12|c$row==8 & c$column==18|c$row==8 & c$column==24)
## Mut with mut
mut_w_mut <- subset(c,c$row==1 & c$column==5|c$row==1 & c$column==6|c$row==1 & c$column==11|c$row==1 & c$column==12|c$row==1 & c$column==17|c$row==1 & c$column==18|c$row==1 & c$column==23|c$row==1 & c$column==24
|c$row==4 & c$column==24|c$row==4 & c$column==24|c$row==7 & c$column==5|c$row==7 & c$column==6|c$row==7 & c$column==17|c$row==7 & c$column==18|c$row==7 & c$column==23|c$row==7 & c$column==27)
############################ Calculations ############################
####### Circularity
#AS16
mean(AS16_mono$circularity)
sd(AS16_mono$circularity)
mean(AS16_w_AS16$circularity)
sd(AS16_w_AS16$circularity)
mean(AS16_w_S26$circularity)
sd(AS16_w_S26$circularity)
mean(AS16_w_mut$circularity)
sd(AS16_w_mut$circularity)
#S26
mean(S26_mono$circularity)
sd(S26_mono$circularity)
mean(S26_w_AS16$circularity)
sd(S26_w_AS16$circularity)
mean(S26_w_S26$circularity)
sd(S26_w_S26$circularity)
mean(S26_w_mut$circularity)
sd(S26_w_mut$circularity)
#Mut
mean(mut_mono$circularity)
sd(mut_mono$circularity)
mean(mut_w_AS16$circularity)
sd(mut_w_AS16$circularity)
mean(mut_w_S26$circularity)
sd(mut_w_S26$circularity)
mean(mut_w_mut$circularity)
sd(mut_w_mut$circularity)
############################### one-way ANOVA ##################################
#### AS16 in different conditions
# Reorganize data in one vector.
AS16_circularity<-c(AS16_mono$circularity,AS16_w_AS16$circularity,AS16_w_S26$circularity,AS16_w_mut$circularity)
# Assign the strain it's co-cultured with to the corresponding element in data vector.
treatm_AS16 <-factor(rep(c("Monoculture","With AS16","With S26", "With S26 mut"),times=c(nrow(AS16_mono),nrow(AS16_w_AS16),nrow(AS16_w_S26),nrow(AS16_w_mut))))
# Check if data is normal distributed
shapiro.test(AS16_mono$circularity)
shapiro.test(AS16_w_AS16$circularity)
shapiro.test(AS16_w_S26$circularity)
shapiro.test(AS16_w_mut$circularity)
# Visualize with qq plots:
par(mfrow=c(2,2))
qqnorm(AS16_mono$circularity,main= "AS16 in monoculture")
qqline(AS16_mono$circularity)
qqnorm(AS16_w_AS16$circularity, main= "AS16 with AS16")
qqline(AS16_w_AS16$circularity)
qqnorm(AS16_w_S26$circularity,main="AS16 with S26 WT")
qqline(AS16_w_S26$circularity)
qqnorm(AS16_w_mut$circularity,main="AS16 with S26 mut")
qqline(AS16_w_mut$circularity)
# Use Bartlettes test for testing equal variance
bartlett.test(AS16_circularity,treatm_AS16)
# Put data in data frame
data_as16<-data.frame(name=treatm_AS16,value=AS16_circularity)
library(ggplot2)
library(ggeasy)
my_xlab <- paste(levels(data_as16$name),"\n(N=",table(data_as16$name),")",sep="")
ggplot(data_as16, aes(x=name, y=value)) +
geom_boxplot(color="black",fill=c("aquamarine4","darkseagreen4","darkseagreen3","darkseagreen1"))+
geom_jitter(shape=16, position=position_jitter(0.2))+
ylab("Circularity")+
xlab(" ")+
geom_point() +
labs(title="Circularity of AS16 spots")+
ggeasy::easy_center_title()+
scale_x_discrete(labels=my_xlab)+
theme(plot.title = element_text(size = 20),axis.title = element_text(size = 16))+
theme(axis.text = element_text(size=14))+
stat_summary(fun.y=mean, geom="point", shape=20, size=5, color="red", fill="red")+
ylim(0.1,1)
# one-way anova
anova(lm(AS16_circularity~treatm_AS16))
# Post hoc analysis
# Calculate M
4*(4-1)/2
#corrected alpha
0.05/6
#H0: the means are equal
#df=n-k
n<-length(AS16_circularity)
k<-4
MSE<- 0.019943
tobs<-(mean(AS16_mono$circularity)-mean(AS16_w_AS16$circularity))/(sqrt(MSE*(1/length(AS16_mono$circularity)+1/length(AS16_w_AS16$circularity))))
2*(1 - pt(abs(tobs),df=n-k))
tobs<-(mean(AS16_mono$circularity)-mean(AS16_w_S26$circularity))/(sqrt(MSE*(1/length(AS16_mono$circularity)+1/length(AS16_w_S26$circularity))))
2*(1 - pt(abs(tobs),df=n-k))
tobs<-(mean(AS16_mono$circularity)-mean(AS16_w_mut$circularity))/(sqrt(MSE*(1/length(AS16_mono$circularity)+1/length(AS16_w_mut$circularity))))
2*(1 - pt(abs(tobs),df=n-k))
tobs<-(mean(AS16_w_AS16$circularity)-mean(AS16_w_S26$circularity))/(sqrt(MSE*(1/length(AS16_w_AS16$circularity)+1/length(AS16_w_S26$circularity))))
2*(1 - pt(abs(tobs),df=n-k))
tobs<-(mean(AS16_w_AS16$circularity)-mean(AS16_w_mut$circularity))/(sqrt(MSE*(1/length(AS16_w_AS16$circularity)+1/length(AS16_w_mut$circularity))))
2*(1 - pt(abs(tobs),df=n-k))
tobs<-(mean(AS16_w_S26$circularity)-mean(AS16_w_mut$circularity))/(sqrt(MSE*(1/length(AS16_w_S26$circularity)+1/length(AS16_w_mut$circularity))))
2*(1 - pt(abs(tobs),df=n-k))
## Fold change
mean(AS16_mono$circularity)/mean(AS16_w_AS16$circularity)
mean(AS16_mono$circularity)/mean(AS16_w_S26$circularity)
mean(AS16_mono$circularity)/mean(AS16_w_mut$circularity)
mean(AS16_w_AS16$circularity)/mean(AS16_w_S26$circularity)
(mean(AS16_w_AS16$circularity)/mean(AS16_w_mut$circularity))^-1
(mean(AS16_w_S26$circularity)/mean(AS16_w_mut$circularity))^-1
############# S26 in different conditions
# Reorganize data in one vector.
S26_circularity<-c(S26_mono$circularity,S26_w_AS16$circularity,S26_w_S26$circularity,S26_w_mut$circularity)
# Assign the strain it's co-cultured with to the corresponding to element in data vector.
treatm_S26 <-factor(rep(c("Monoculture","With AS16","With S26", "With S26 mut"),times=c(nrow(S26_mono),nrow(S26_w_AS16),nrow(S26_w_S26),nrow(S26_w_mut))))
# Check if data is normal distributed
shapiro.test(S26_mono$circularity)
shapiro.test(S26_w_AS16$circularity)
shapiro.test(S26_w_S26$circularity)
shapiro.test(S26_w_mut$circularity)
# Visualize with qq plots:
par(mfrow=c(2,2))
qqnorm(S26_mono$circularity,main= "S26 in monoculture")
qqline(S26_mono$circularity)
qqnorm(S26_w_AS16$circularity, main= "S26 with AS16")
qqline(S26_w_AS16$circularity)
qqnorm(S26_w_S26$circularity,main="S26 with S26 WT")
qqline(S26_w_S26$circularity)
qqnorm(S26_w_mut$circularity,main="S26 with S26 mut")
qqline(S26_w_mut$circularity)
# Use Bartlettes test for testing equal variance
bartlett.test(S26_circularity,treatm_S26)
# Put data in data frame
data_S26<-data.frame(name=treatm_S26,value=S26_circularity)
library(ggplot2)
library(ggeasy)
my_xlab <- paste(levels(data_S26$name),"\n(N=",table(data_S26$name),")",sep="")
ggplot(data_S26, aes(x=name, y=value)) +
geom_boxplot(color="black",fill=c("darkorange4","darkorange3","darkorange","darkgoldenrod1"))+
geom_jitter(shape=16, position=position_jitter(0.2))+
ylab("Circularity")+
xlab(" ")+
geom_point() +
labs(title="Circularity of S26 WT spots")+
ggeasy::easy_center_title()+
scale_x_discrete(labels=my_xlab)+
theme(plot.title = element_text(size = 20),axis.title = element_text(size = 16))+
theme(axis.text = element_text(size=14))+
stat_summary(fun.y=mean, geom="point", shape=20, size=5, color="red", fill="red")+
ylim(0.1,1)
# one-way anova
anova(lm(S26_circularity~treatm_S26))
## Fold change
(mean(S26_mono$circularity)/mean(S26_w_AS16$circularity))^-1
(mean(S26_mono$circularity)/mean(S26_w_S26$circularity))^-1
mean(S26_mono$circularity)/mean(S26_w_mut$circularity)
mean(S26_w_AS16$circularity)/mean(S26_w_S26$circularity)
mean(S26_w_AS16$circularity)/mean(S26_w_mut$circularity)
mean(S26_w_S26$circularity)/mean(S26_w_mut$circularity)
############# Mutant in different conditions
# Reorganize data in one vector.
mut_circularity<-c(mut_mono$circularity,mut_w_AS16$circularity,mut_w_S26$circularity,mut_w_mut$circularity)
# Assign the strain it's co-cultured with to the corresponding to element in data vector.
treatm_mut <-factor(rep(c("Monoculture","With AS16","With S26", "With S26 mut"),times=c(nrow(mut_mono),nrow(mut_w_AS16),nrow(mut_w_S26),nrow(mut_w_mut))))
# Check if data is normal distributed
shapiro.test(mut_mono$circularity)
shapiro.test(mut_w_AS16$circularity)
shapiro.test(mut_w_S26$circularity)
shapiro.test(mut_w_mut$circularity)
# Visualize with qq plots:
par(mfrow=c(2,2))
qqnorm(mut_mono$circularity,main= expression(paste("S26",Delta, ~"in monoculture")))
qqline(mut_mono$circularity)
qqnorm(mut_w_AS16$circularity, main= expression(paste("S26",Delta,~"with AS16")))
qqline(mut_w_AS16$circularity)
qqnorm(mut_w_S26$circularity,main=expression(paste("S26",Delta,~"with S26 WT")))
qqline(mut_w_S26$circularity)
qqnorm(mut_w_mut$circularity,main=expression(paste("S26",Delta, ~"with S26",Delta)))
qqline(mut_w_mut$circularity)
# Use Bartlett's test for testing equal variance
bartlett.test(mut_circularity,treatm_mut)
# Put data in data frame
data_mut<-data.frame(name=treatm_mut,value=mut_circularity)
library(ggplot2)
library(ggeasy)
my_xlab <- paste(levels(data_mut$name),"\n(N=",table(data_mut$name),")",sep="")
ggplot(data_mut, aes(x=name, y=value)) +
geom_boxplot(color="black",fill=c("cornsilk4","cornsilk3","cornsilk2","cornsilk"))+
geom_jitter(shape=16, position=position_jitter(0.2))+
ylab("circularity")+
xlab(" ")+
geom_point() +
labs(title=expression(paste("Circularity of S26",Delta,~ "spots")))+
ggeasy::easy_center_title()+
scale_x_discrete(labels=my_xlab)+
theme(plot.title = element_text(size = 20),axis.title = element_text(size = 16))+
theme(axis.text = element_text(size=14))+
stat_summary(fun.y=mean, geom="point", shape=20, size=5, color="red", fill="red")+
ylim(0.1,1)
# one-way anova
anova(lm(mut_circularity~treatm_mut))
# Post hoc analysis
# Calculate M
4*(4-1)/2
#corrected alpha
0.05/6
#H0: the means are equal
#df=n-k
n<-length(mut_circularity)
k<-4
MSE <-0.018193
tobs<-(mean(mut_mono$circularity)-mean(mut_w_AS16$circularity))/(sqrt(MSE*(1/length(mut_mono$circularity)+1/length(mut_w_AS16$circularity))))
2*(1 - pt(abs(tobs),df=n-k))
tobs<-(mean(mut_mono$circularity)-mean(mut_w_S26$circularity))/(sqrt(MSE*(1/length(mut_mono$circularity)+1/length(mut_w_S26$circularity))))
2*(1 - pt(abs(tobs),df=n-k))
tobs<-(mean(mut_mono$circularity)-mean(mut_w_mut$circularity))/(sqrt(MSE*(1/length(mut_mono$circularity)+1/length(mut_w_mut$circularity))))
2*(1 - pt(abs(tobs),df=n-k))
tobs<-(mean(mut_w_AS16$circularity)-mean(mut_w_S26$circularity))/(sqrt(MSE*(1/length(mut_w_AS16$circularity)+1/length(mut_w_S26$circularity))))
2*(1 - pt(abs(tobs),df=n-k))
tobs<-(mean(mut_w_AS16$circularity)-mean(mut_w_mut$circularity))/(sqrt(MSE*(1/length(mut_w_AS16$circularity)+1/length(mut_w_mut$circularity))))
2*(1 - pt(abs(tobs),df=n-k))
tobs<-(mean(mut_w_S26$circularity)-mean(mut_w_mut$circularity))/(sqrt(MSE*(1/length(mut_w_S26$circularity)+1/length(mut_w_mut$circularity))))
2*(1 - pt(abs(tobs),df=n-k))
n<-length(mut_circularity)
k<-4
t.test(mut_mono$circularity,mut_w_AS16$circularity,df=n-k,var.equal = TRUE)
t.test(mut_mono$circularity,mut_w_S26$circularity,df=n-k,var.equal = TRUE)
t.test(mut_mono$circularity,mut_w_mut$circularity,df=n-k,var.equal = TRUE)
t.test(mut_w_AS16$circularity,mut_w_S26$circularity,df=n-k,var.equal = TRUE)
t.test(mut_w_AS16$circularity,mut_w_mut$circularity,df=n-k,var.equal = TRUE)
t.test(mut_w_S26$circularity,mut_w_mut$circularity,df=n-k,var.equal = TRUE)
(mean(mut_mono$circularity)/mean(mut_w_AS16$circularity))^-1
(mean(mut_mono$circularity)/mean(mut_w_S26$circularity))^-1
(mean(mut_mono$circularity)/mean(mut_w_mut$circularity))^-1
(mean(mut_w_AS16$circularity)/mean(mut_w_S26$circularity))^-1
(mean(mut_w_AS16$circularity)/mean(mut_w_mut$circularity))^-1
mean(mut_w_S26$circularity)/mean(mut_w_mut$circularity)
|
/R_pyphe_CIRCULARITY.R
|
no_license
|
PardisSarafraz/Bachelor-Thesis-2021
|
R
| false | false | 16,686 |
r
|
## Set working directory to folder with the excel sheet
setwd("C:/Users/Pardis/OneDrive - Danmarks Tekniske Universitet/DTU/Bachelorprojekt/2) AS16 + S26 experiments/Cultures on solid medium/Co-culture picture with bars (day 2)/Final_pyphe_analysis/pyphe_quant")
## Import data from Pyphe .csv file
library("readxl")
m <- as.data.frame(read.csv("day2-pheno-mono-exp--6.png.csv"))
c <- as.data.frame(read.csv("NewProject-Run-2-Plate-001-Expbe-bar-exp--6.png.csv"))
################################# MONOCULTURES #################################
## Extract data for each strain
## Subset of dataframe is extracted and sorted by "column" numbering.
#AS16
AS16_mono<-subset(m, m$column==1|m$column==7|m$column==13|m$column==19)
AS16_mono<-AS16_mono[order(AS16_mono$column),]
#S26
S26_mono<-subset(m, m$column==3|m$column==9|m$column==15|m$column==21)
S26_mono<-S26_mono[order(S26_mono$column),]
#mut
mut_mono<-subset(m, m$column==5|m$column==11|m$column==17|m$column==23)
mut_mono<-mut_mono[order(mut_mono$column),]
################################# CO-CULTURES #################################
## Using "row" and "column" information from dataframe to extract data
## AS16 with S26
AS16_w_S26<-subset(c,c$row==2 & c$column==3|c$row==2 & c$column==9|c$row==2 & c$column==15|c$row==3 & c$column==14|c$row==3 & c$column==20
|c$row==5 & c$column==3|c$row==5 & c$column==9|c$row==5 & c$column==15|c$row==6 & c$column==8|c$row==6 & c$column==20|c$row==8 & c$column==9|c$row==8 & c$column==3)
## AS16 with mut
AS16_w_mut<-subset(c,c$row==2 & c$column==2|c$row==2 & c$column==8|c$row==2 & c$column==14|c$row==2 & c$column==20|c$row==3 & c$column==5|c$row==3 & c$column==11|c$row==3 & c$column==17|c$row==5 & c$column==2|c$row==5 & c$column==20|c$row==6 & c$column==5|c$row==6 & c$column==11|c$row==6 & c$column==17
|c$row==6 & c$column==23|c$row==8 & c$column==2|c$row==8 & c$column==8|c$row==8 & c$column==14|c$row==8 & c$column==20)
## AS16 with AS16
AS16_w_AS16 <-subset(c,c$row==1 & c$column==7|c$row==1 & c$column==8|c$row==1 & c$column==13|c$row==1 & c$column==14|c$row==1 & c$column==19|c$row==1 & c$column==20
|c$row==4 & c$column==2|c$row==4 & c$column==7|c$row==4 & c$column==8|c$row==4 & c$column==13|c$row==4 & c$column==14
|c$row==4 & c$column==19|c$row==4 & c$column==20|c$row==7 & c$column==2|c$row==7 & c$column==7|c$row==7 & c$column==8|c$row==7 & c$column==13|c$row==7 & c$column==14)
## S26 with AS16
S26_w_AS16 <- subset(c,c$row==2 & c$column==4|c$row==2 & c$column==10|c$row==2 & c$column==16|c$row==2 & c$column==22|c$row==3 & c$column==1|c$row==3 & c$column==7|c$row==3 & c$column==13|c$row==3 & c$column==19|c$row==5 & c$column==4
|c$row==5 & c$column==10|c$row==5 & c$column==16|c$row==6 & c$column==1|c$row==6 & c$column==7|c$row==6 & c$column==19|c$row==8 & c$column==10|c$row==8 & c$column==4)
## S26 with mut
S26_w_mut <- subset(c,c$row==2 & c$column==5|c$row==2 & c$column==11|c$row==2 & c$column==17|c$row==2 & c$column==23|c$row==3 & c$column==4|c$row==3 & c$column==10
|c$row==3 & c$column==16|c$row==3 & c$column==22|c$row==5 & c$column==5|c$row==5 & c$column==11|c$row==5 & c$column==17|c$row==5 & c$column==17|c$row==5 & c$column==23
|c$row==6 & c$column==4|c$row==6 & c$column==10|c$row==6 & c$column==16|c$row==6 & c$column==16|c$row==6 & c$column==22|c$row==8 & c$column==5|c$row==8 & c$column==11|c$row==8 & c$column==17|c$row==8 & c$column==23)
## S26 with S26
S26_w_S26 <- subset(c,c$row==1 & c$column==3|c$row==1 & c$column==4||c$row==1 & c$column==9|c$row==1 & c$column==10|c$row==1 & c$column==15
|c$row==1 & c$column==16|c$row==1 & c$column==21|c$row==1 & c$column==22|c$row==4 & c$column==3|c$row==4 & c$column==4
|c$row==4 & c$column==9|c$row==4 & c$column==10|c$row==4 & c$column==15|c$row==4 & c$column==16|c$row==4 & c$column==21
|c$row==4 & c$column==22|c$row==7 & c$column==3|c$row==7 & c$column==4|c$row==7 & c$column==9|c$row==7 & c$column==10|c$row==7 & c$column==15
|c$row==7 & c$column==16|c$row==7 & c$column==21|c$row==7 & c$column==22)
## Mut with AS16
mut_w_AS16 <- subset(c,c$row==2 & c$column==1|c$row==2 & c$column==7|c$row==2 & c$column==13|c$row==2 & c$column==19|c$row==3 & c$column==6|c$row==3 & c$column==12|c$row==3 & c$column==18|c$row==5 & c$column==1|c$row==5 & c$column==19)
## Mut with S26
mut_w_S26 <- subset(c,c$row==2 & c$column==6|c$row==2 & c$column==12|c$row==2 & c$column==18|c$row==2 & c$column==24||c$row==3 & c$column==3|c$row==3 & c$column==9
|c$row==3 & c$column==15|c$row==3 & c$column==21|c$row==5 & c$column==6|c$row==5 & c$column==12|c$row==5 & c$column==18|c$row==5 & c$column==24|c$row==6 & c$column==3
|c$row==6 & c$column==9|c$row==6 & c$column==15|c$row==6 & c$column==21|c$row==8 & c$column==6|c$row==8 & c$column==12|c$row==8 & c$column==18|c$row==8 & c$column==24)
## Mut with mut
mut_w_mut <- subset(c,c$row==1 & c$column==5|c$row==1 & c$column==6|c$row==1 & c$column==11|c$row==1 & c$column==12|c$row==1 & c$column==17|c$row==1 & c$column==18|c$row==1 & c$column==23|c$row==1 & c$column==24
|c$row==4 & c$column==24|c$row==4 & c$column==24|c$row==7 & c$column==5|c$row==7 & c$column==6|c$row==7 & c$column==17|c$row==7 & c$column==18|c$row==7 & c$column==23|c$row==7 & c$column==27)
############################ Calculations ############################
####### Circularity
#AS16
mean(AS16_mono$circularity)
sd(AS16_mono$circularity)
mean(AS16_w_AS16$circularity)
sd(AS16_w_AS16$circularity)
mean(AS16_w_S26$circularity)
sd(AS16_w_S26$circularity)
mean(AS16_w_mut$circularity)
sd(AS16_w_mut$circularity)
#S26
mean(S26_mono$circularity)
sd(S26_mono$circularity)
mean(S26_w_AS16$circularity)
sd(S26_w_AS16$circularity)
mean(S26_w_S26$circularity)
sd(S26_w_S26$circularity)
mean(S26_w_mut$circularity)
sd(S26_w_mut$circularity)
#Mut
mean(mut_mono$circularity)
sd(mut_mono$circularity)
mean(mut_w_AS16$circularity)
sd(mut_w_AS16$circularity)
mean(mut_w_S26$circularity)
sd(mut_w_S26$circularity)
mean(mut_w_mut$circularity)
sd(mut_w_mut$circularity)
############################### one-way ANOVA ##################################
#### AS16 in different conditions
# Reorganize data in one vector.
AS16_circularity<-c(AS16_mono$circularity,AS16_w_AS16$circularity,AS16_w_S26$circularity,AS16_w_mut$circularity)
# Assign the strain it's co-cultured with to the corresponding element in data vector.
treatm_AS16 <-factor(rep(c("Monoculture","With AS16","With S26", "With S26 mut"),times=c(nrow(AS16_mono),nrow(AS16_w_AS16),nrow(AS16_w_S26),nrow(AS16_w_mut))))
# Check if data is normal distributed
shapiro.test(AS16_mono$circularity)
shapiro.test(AS16_w_AS16$circularity)
shapiro.test(AS16_w_S26$circularity)
shapiro.test(AS16_w_mut$circularity)
# Visualize with qq plots:
par(mfrow=c(2,2))
qqnorm(AS16_mono$circularity,main= "AS16 in monoculture")
qqline(AS16_mono$circularity)
qqnorm(AS16_w_AS16$circularity, main= "AS16 with AS16")
qqline(AS16_w_AS16$circularity)
qqnorm(AS16_w_S26$circularity,main="AS16 with S26 WT")
qqline(AS16_w_S26$circularity)
qqnorm(AS16_w_mut$circularity,main="AS16 with S26 mut")
qqline(AS16_w_mut$circularity)
# Use Bartlettes test for testing equal variance
bartlett.test(AS16_circularity,treatm_AS16)
# Put data in data frame
data_as16<-data.frame(name=treatm_AS16,value=AS16_circularity)
library(ggplot2)
library(ggeasy)
my_xlab <- paste(levels(data_as16$name),"\n(N=",table(data_as16$name),")",sep="")
ggplot(data_as16, aes(x=name, y=value)) +
geom_boxplot(color="black",fill=c("aquamarine4","darkseagreen4","darkseagreen3","darkseagreen1"))+
geom_jitter(shape=16, position=position_jitter(0.2))+
ylab("Circularity")+
xlab(" ")+
geom_point() +
labs(title="Circularity of AS16 spots")+
ggeasy::easy_center_title()+
scale_x_discrete(labels=my_xlab)+
theme(plot.title = element_text(size = 20),axis.title = element_text(size = 16))+
theme(axis.text = element_text(size=14))+
stat_summary(fun.y=mean, geom="point", shape=20, size=5, color="red", fill="red")+
ylim(0.1,1)
# one-way anova
anova(lm(AS16_circularity~treatm_AS16))
# Post hoc analysis
# Calculate M
4*(4-1)/2
#corrected alpha
0.05/6
#H0: the means are equal
#df=n-k
n<-length(AS16_circularity)
k<-4
MSE<- 0.019943
tobs<-(mean(AS16_mono$circularity)-mean(AS16_w_AS16$circularity))/(sqrt(MSE*(1/length(AS16_mono$circularity)+1/length(AS16_w_AS16$circularity))))
2*(1 - pt(abs(tobs),df=n-k))
tobs<-(mean(AS16_mono$circularity)-mean(AS16_w_S26$circularity))/(sqrt(MSE*(1/length(AS16_mono$circularity)+1/length(AS16_w_S26$circularity))))
2*(1 - pt(abs(tobs),df=n-k))
tobs<-(mean(AS16_mono$circularity)-mean(AS16_w_mut$circularity))/(sqrt(MSE*(1/length(AS16_mono$circularity)+1/length(AS16_w_mut$circularity))))
2*(1 - pt(abs(tobs),df=n-k))
tobs<-(mean(AS16_w_AS16$circularity)-mean(AS16_w_S26$circularity))/(sqrt(MSE*(1/length(AS16_w_AS16$circularity)+1/length(AS16_w_S26$circularity))))
2*(1 - pt(abs(tobs),df=n-k))
tobs<-(mean(AS16_w_AS16$circularity)-mean(AS16_w_mut$circularity))/(sqrt(MSE*(1/length(AS16_w_AS16$circularity)+1/length(AS16_w_mut$circularity))))
2*(1 - pt(abs(tobs),df=n-k))
tobs<-(mean(AS16_w_S26$circularity)-mean(AS16_w_mut$circularity))/(sqrt(MSE*(1/length(AS16_w_S26$circularity)+1/length(AS16_w_mut$circularity))))
2*(1 - pt(abs(tobs),df=n-k))
## Fold change
mean(AS16_mono$circularity)/mean(AS16_w_AS16$circularity)
mean(AS16_mono$circularity)/mean(AS16_w_S26$circularity)
mean(AS16_mono$circularity)/mean(AS16_w_mut$circularity)
mean(AS16_w_AS16$circularity)/mean(AS16_w_S26$circularity)
(mean(AS16_w_AS16$circularity)/mean(AS16_w_mut$circularity))^-1
(mean(AS16_w_S26$circularity)/mean(AS16_w_mut$circularity))^-1
############# S26 in different conditions
# Reorganize data in one vector.
S26_circularity<-c(S26_mono$circularity,S26_w_AS16$circularity,S26_w_S26$circularity,S26_w_mut$circularity)
# Assign the strain it's co-cultured with to the corresponding to element in data vector.
treatm_S26 <-factor(rep(c("Monoculture","With AS16","With S26", "With S26 mut"),times=c(nrow(S26_mono),nrow(S26_w_AS16),nrow(S26_w_S26),nrow(S26_w_mut))))
# Check if data is normal distributed
shapiro.test(S26_mono$circularity)
shapiro.test(S26_w_AS16$circularity)
shapiro.test(S26_w_S26$circularity)
shapiro.test(S26_w_mut$circularity)
# Visualize with qq plots:
par(mfrow=c(2,2))
qqnorm(S26_mono$circularity,main= "S26 in monoculture")
qqline(S26_mono$circularity)
qqnorm(S26_w_AS16$circularity, main= "S26 with AS16")
qqline(S26_w_AS16$circularity)
qqnorm(S26_w_S26$circularity,main="S26 with S26 WT")
qqline(S26_w_S26$circularity)
qqnorm(S26_w_mut$circularity,main="S26 with S26 mut")
qqline(S26_w_mut$circularity)
# Use Bartlettes test for testing equal variance
bartlett.test(S26_circularity,treatm_S26)
# Put data in data frame
data_S26<-data.frame(name=treatm_S26,value=S26_circularity)
library(ggplot2)
library(ggeasy)
my_xlab <- paste(levels(data_S26$name),"\n(N=",table(data_S26$name),")",sep="")
ggplot(data_S26, aes(x=name, y=value)) +
geom_boxplot(color="black",fill=c("darkorange4","darkorange3","darkorange","darkgoldenrod1"))+
geom_jitter(shape=16, position=position_jitter(0.2))+
ylab("Circularity")+
xlab(" ")+
geom_point() +
labs(title="Circularity of S26 WT spots")+
ggeasy::easy_center_title()+
scale_x_discrete(labels=my_xlab)+
theme(plot.title = element_text(size = 20),axis.title = element_text(size = 16))+
theme(axis.text = element_text(size=14))+
stat_summary(fun.y=mean, geom="point", shape=20, size=5, color="red", fill="red")+
ylim(0.1,1)
# one-way anova
anova(lm(S26_circularity~treatm_S26))
## Fold change
(mean(S26_mono$circularity)/mean(S26_w_AS16$circularity))^-1
(mean(S26_mono$circularity)/mean(S26_w_S26$circularity))^-1
mean(S26_mono$circularity)/mean(S26_w_mut$circularity)
mean(S26_w_AS16$circularity)/mean(S26_w_S26$circularity)
mean(S26_w_AS16$circularity)/mean(S26_w_mut$circularity)
mean(S26_w_S26$circularity)/mean(S26_w_mut$circularity)
############# Mutant in different conditions
# Reorganize data in one vector.
mut_circularity<-c(mut_mono$circularity,mut_w_AS16$circularity,mut_w_S26$circularity,mut_w_mut$circularity)
# Assign the strain it's co-cultured with to the corresponding to element in data vector.
treatm_mut <-factor(rep(c("Monoculture","With AS16","With S26", "With S26 mut"),times=c(nrow(mut_mono),nrow(mut_w_AS16),nrow(mut_w_S26),nrow(mut_w_mut))))
# Check if data is normal distributed
shapiro.test(mut_mono$circularity)
shapiro.test(mut_w_AS16$circularity)
shapiro.test(mut_w_S26$circularity)
shapiro.test(mut_w_mut$circularity)
# Visualize with qq plots:
par(mfrow=c(2,2))
qqnorm(mut_mono$circularity,main= expression(paste("S26",Delta, ~"in monoculture")))
qqline(mut_mono$circularity)
qqnorm(mut_w_AS16$circularity, main= expression(paste("S26",Delta,~"with AS16")))
qqline(mut_w_AS16$circularity)
qqnorm(mut_w_S26$circularity,main=expression(paste("S26",Delta,~"with S26 WT")))
qqline(mut_w_S26$circularity)
qqnorm(mut_w_mut$circularity,main=expression(paste("S26",Delta, ~"with S26",Delta)))
qqline(mut_w_mut$circularity)
# Use Bartlett's test for testing equal variance
bartlett.test(mut_circularity,treatm_mut)
# Put data in data frame
data_mut<-data.frame(name=treatm_mut,value=mut_circularity)
library(ggplot2)
library(ggeasy)
my_xlab <- paste(levels(data_mut$name),"\n(N=",table(data_mut$name),")",sep="")
ggplot(data_mut, aes(x=name, y=value)) +
geom_boxplot(color="black",fill=c("cornsilk4","cornsilk3","cornsilk2","cornsilk"))+
geom_jitter(shape=16, position=position_jitter(0.2))+
ylab("circularity")+
xlab(" ")+
geom_point() +
labs(title=expression(paste("Circularity of S26",Delta,~ "spots")))+
ggeasy::easy_center_title()+
scale_x_discrete(labels=my_xlab)+
theme(plot.title = element_text(size = 20),axis.title = element_text(size = 16))+
theme(axis.text = element_text(size=14))+
stat_summary(fun.y=mean, geom="point", shape=20, size=5, color="red", fill="red")+
ylim(0.1,1)
# one-way anova
anova(lm(mut_circularity~treatm_mut))
# Post hoc analysis
# Calculate M
4*(4-1)/2
#corrected alpha
0.05/6
#H0: the means are equal
#df=n-k
n<-length(mut_circularity)
k<-4
MSE <-0.018193
tobs<-(mean(mut_mono$circularity)-mean(mut_w_AS16$circularity))/(sqrt(MSE*(1/length(mut_mono$circularity)+1/length(mut_w_AS16$circularity))))
2*(1 - pt(abs(tobs),df=n-k))
tobs<-(mean(mut_mono$circularity)-mean(mut_w_S26$circularity))/(sqrt(MSE*(1/length(mut_mono$circularity)+1/length(mut_w_S26$circularity))))
2*(1 - pt(abs(tobs),df=n-k))
tobs<-(mean(mut_mono$circularity)-mean(mut_w_mut$circularity))/(sqrt(MSE*(1/length(mut_mono$circularity)+1/length(mut_w_mut$circularity))))
2*(1 - pt(abs(tobs),df=n-k))
tobs<-(mean(mut_w_AS16$circularity)-mean(mut_w_S26$circularity))/(sqrt(MSE*(1/length(mut_w_AS16$circularity)+1/length(mut_w_S26$circularity))))
2*(1 - pt(abs(tobs),df=n-k))
tobs<-(mean(mut_w_AS16$circularity)-mean(mut_w_mut$circularity))/(sqrt(MSE*(1/length(mut_w_AS16$circularity)+1/length(mut_w_mut$circularity))))
2*(1 - pt(abs(tobs),df=n-k))
tobs<-(mean(mut_w_S26$circularity)-mean(mut_w_mut$circularity))/(sqrt(MSE*(1/length(mut_w_S26$circularity)+1/length(mut_w_mut$circularity))))
2*(1 - pt(abs(tobs),df=n-k))
n<-length(mut_circularity)
k<-4
t.test(mut_mono$circularity,mut_w_AS16$circularity,df=n-k,var.equal = TRUE)
t.test(mut_mono$circularity,mut_w_S26$circularity,df=n-k,var.equal = TRUE)
t.test(mut_mono$circularity,mut_w_mut$circularity,df=n-k,var.equal = TRUE)
t.test(mut_w_AS16$circularity,mut_w_S26$circularity,df=n-k,var.equal = TRUE)
t.test(mut_w_AS16$circularity,mut_w_mut$circularity,df=n-k,var.equal = TRUE)
t.test(mut_w_S26$circularity,mut_w_mut$circularity,df=n-k,var.equal = TRUE)
(mean(mut_mono$circularity)/mean(mut_w_AS16$circularity))^-1
(mean(mut_mono$circularity)/mean(mut_w_S26$circularity))^-1
(mean(mut_mono$circularity)/mean(mut_w_mut$circularity))^-1
(mean(mut_w_AS16$circularity)/mean(mut_w_S26$circularity))^-1
(mean(mut_w_AS16$circularity)/mean(mut_w_mut$circularity))^-1
mean(mut_w_S26$circularity)/mean(mut_w_mut$circularity)
|
td <- tempdir()
tf <- tempfile(tmpdir=td,fileext=".zip")
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, tf)
dateDownloaded <- date()
powerdata<-unzip(tf)
powerdata2<-read.table(powerdata,skip=66638,nrows=2879,sep=";",na.strings="?",col.names=c("date","time","global_active_power","global_reactive_power","voltage","global_intensity","sub_metering_1","sub_metering_2","sub_metering_3"))
powerdata2$date<-as.Date(powerdata2$date,format="%d/%m/%Y")
powerdata2$datetime<-as.POSIXct(paste(powerdata2$date,powerdata2$time),format="%Y-%m-%d %H:%M:%S")
plot(powerdata2$datetime,powerdata2$sub_metering_1,type="l",xlab="",ylab="Energy sub-metering",ylim=c(0,30))
lines(powerdata2$datetime,powerdata2$sub_metering_2,type="l",col="red")
lines(powerdata2$datetime,powerdata2$sub_metering_3,type="l",col="blue")
legend("topright",xjust=1,y.intersp=.5,legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black","red","blue"), lty=1,cex=0.5)
dev.copy(png,file="plot3.png",width = 480, height = 480, units = "px")
dev.off()
|
/plot3.r
|
no_license
|
bendkasia/ExData_Plotting1
|
R
| false | false | 1,099 |
r
|
td <- tempdir()
tf <- tempfile(tmpdir=td,fileext=".zip")
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, tf)
dateDownloaded <- date()
powerdata<-unzip(tf)
powerdata2<-read.table(powerdata,skip=66638,nrows=2879,sep=";",na.strings="?",col.names=c("date","time","global_active_power","global_reactive_power","voltage","global_intensity","sub_metering_1","sub_metering_2","sub_metering_3"))
powerdata2$date<-as.Date(powerdata2$date,format="%d/%m/%Y")
powerdata2$datetime<-as.POSIXct(paste(powerdata2$date,powerdata2$time),format="%Y-%m-%d %H:%M:%S")
plot(powerdata2$datetime,powerdata2$sub_metering_1,type="l",xlab="",ylab="Energy sub-metering",ylim=c(0,30))
lines(powerdata2$datetime,powerdata2$sub_metering_2,type="l",col="red")
lines(powerdata2$datetime,powerdata2$sub_metering_3,type="l",col="blue")
legend("topright",xjust=1,y.intersp=.5,legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black","red","blue"), lty=1,cex=0.5)
dev.copy(png,file="plot3.png",width = 480, height = 480, units = "px")
dev.off()
|
library(shiny)
shinyUI(fluidPage(
# Application title
titlePanel("Which is the Good Preictor of Fertility"),
sidebarLayout(
sidebarPanel(
helpText("Have a look at swiss data using '?swiss' on Rstudio Console"),
helpText("Here we are looking for the best predictor for Fertility using the linear regression model"),
helpText("Below you can choose a predictor to see the respective Indicator plot and Root Mean Squeared error"),
radioButtons(inputId="Indi",
label = "Indicators",
choices = c("Agriculture","Catholic","Infant.Mortality"),
inline = FALSE
)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("plot"),
h3("The RMSE is:"),
textOutput("pred1")
)
)
))
|
/ui.R
|
no_license
|
nuttyboy/DevelopingDataProducts
|
R
| false | false | 924 |
r
|
library(shiny)
shinyUI(fluidPage(
# Application title
titlePanel("Which is the Good Preictor of Fertility"),
sidebarLayout(
sidebarPanel(
helpText("Have a look at swiss data using '?swiss' on Rstudio Console"),
helpText("Here we are looking for the best predictor for Fertility using the linear regression model"),
helpText("Below you can choose a predictor to see the respective Indicator plot and Root Mean Squeared error"),
radioButtons(inputId="Indi",
label = "Indicators",
choices = c("Agriculture","Catholic","Infant.Mortality"),
inline = FALSE
)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("plot"),
h3("The RMSE is:"),
textOutput("pred1")
)
)
))
|
data1 <- data.frame()
data2 <- data.frame()
# data1 = read.csv("CSVdata_journal/example2.csv", sep=",", header=TRUE)
# data2 = read.csv("CSVdata_journal/example1.csv", sep=",", header=TRUE)
data1 = read.csv("CSVdata_journal/Qt2.csv", sep=",", header=TRUE)
data2 = read.csv("CSVdata_journal/expertiseLevel_qt.csv", sep=",", header=TRUE)
# merge Two data
output <- merge(x = data1, y = data2, by = c("ReviewId", "ReviewerId"), all.x = TRUE)
# convert NA to 0
output$ExpertiseLevel <- ifelse(is.na(output$ExpertiseLevel),0, output$ExpertiseLevel)
write.csv(output, "CSVdata_journal/merged_qt2.csv", row.names=FALSE, quote=FALSE)
|
/R_program/mergeCSV.R
|
no_license
|
Ikuyadeu/comment_research
|
R
| false | false | 629 |
r
|
data1 <- data.frame()
data2 <- data.frame()
# data1 = read.csv("CSVdata_journal/example2.csv", sep=",", header=TRUE)
# data2 = read.csv("CSVdata_journal/example1.csv", sep=",", header=TRUE)
data1 = read.csv("CSVdata_journal/Qt2.csv", sep=",", header=TRUE)
data2 = read.csv("CSVdata_journal/expertiseLevel_qt.csv", sep=",", header=TRUE)
# merge Two data
output <- merge(x = data1, y = data2, by = c("ReviewId", "ReviewerId"), all.x = TRUE)
# convert NA to 0
output$ExpertiseLevel <- ifelse(is.na(output$ExpertiseLevel),0, output$ExpertiseLevel)
write.csv(output, "CSVdata_journal/merged_qt2.csv", row.names=FALSE, quote=FALSE)
|
##
# Infant_Age_Weight_Dist.R
#
# Learning to use R Syntax.
# Examples referenced from R IN ACTION by Robert I. Kabacoff
#
# Distribution of 10 infant weights and their relationship to age.
# Initialize Age and Weight Vectors
age <- c(1, 3, 5, 2, 11, 9, 3, 9, 12, 3)
weight <- c(4.4, 5.3, 7.2, 5.2, 8.5, 7.3, 6.0, 10.4, 10.2, 6.1)
# Calculate Mean, Standard Deviation, Correlation and Plot.
mean(weight)
sd(weight)
cor(age, weight)
plot(age, weight)
# Quit.
# Quit Exits R, so this can be optionally commented out.
q()
|
/Practice/Infant_Age_Weight_Dist.R
|
no_license
|
Ehizeme/DataAnalysis
|
R
| false | false | 522 |
r
|
##
# Infant_Age_Weight_Dist.R
#
# Learning to use R Syntax.
# Examples referenced from R IN ACTION by Robert I. Kabacoff
#
# Distribution of 10 infant weights and their relationship to age.
# Initialize Age and Weight Vectors
age <- c(1, 3, 5, 2, 11, 9, 3, 9, 12, 3)
weight <- c(4.4, 5.3, 7.2, 5.2, 8.5, 7.3, 6.0, 10.4, 10.2, 6.1)
# Calculate Mean, Standard Deviation, Correlation and Plot.
mean(weight)
sd(weight)
cor(age, weight)
plot(age, weight)
# Quit.
# Quit Exits R, so this can be optionally commented out.
q()
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/fortify_cluster.R
\name{fortify.kmeans}
\alias{fortify.kmeans}
\title{Convert cluster instances to \code{data.frame}}
\usage{
\method{fortify}{kmeans}(model, data = NULL, ...)
}
\arguments{
\item{model}{Clustered instance}
\item{data}{original dataset, if needed}
\item{...}{other arguments passed to methods}
}
\value{
data.frame
}
\description{
Convert cluster instances to \code{data.frame}
}
\examples{
fortify(stats::kmeans(iris[-5], 3))
fortify(stats::kmeans(iris[-5], 3), data = iris)
fortify(cluster::clara(iris[-5], 3))
fortify(cluster::fanny(iris[-5], 3))
fortify(cluster::pam(iris[-5], 3), data = iris)
}
|
/man/fortify.kmeans.Rd
|
no_license
|
LionelGeo/ggfortify
|
R
| false | false | 705 |
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/fortify_cluster.R
\name{fortify.kmeans}
\alias{fortify.kmeans}
\title{Convert cluster instances to \code{data.frame}}
\usage{
\method{fortify}{kmeans}(model, data = NULL, ...)
}
\arguments{
\item{model}{Clustered instance}
\item{data}{original dataset, if needed}
\item{...}{other arguments passed to methods}
}
\value{
data.frame
}
\description{
Convert cluster instances to \code{data.frame}
}
\examples{
fortify(stats::kmeans(iris[-5], 3))
fortify(stats::kmeans(iris[-5], 3), data = iris)
fortify(cluster::clara(iris[-5], 3))
fortify(cluster::fanny(iris[-5], 3))
fortify(cluster::pam(iris[-5], 3), data = iris)
}
|
corr <- function(directory, threshold = 0) {
corr_data <- numeric(0)
for (i in 1:332) {
file_csv <- ''
if (i < 10) {
file_csv <- '00'
} else if (i < 100) {
file_csv <- '0'
}
file_csv <- paste(file_csv, as.character(i), sep = '')
file_csv <- paste(file_csv, 'csv', sep = '.')
file_csv <- paste(directory, file_csv, sep = '/')
pollution_data <- read.csv(file_csv)
if (sum(complete.cases(pollution_data)) >= threshold) {
corr_data <- c(corr_data, cor(pollution_data$nitrate, pollution_data$sulfate, use = 'na.or.complete'))
}
}
corr_data
}
|
/Week_02/corr.R
|
no_license
|
stevanradanovic/coursera__r_programming
|
R
| false | false | 719 |
r
|
corr <- function(directory, threshold = 0) {
corr_data <- numeric(0)
for (i in 1:332) {
file_csv <- ''
if (i < 10) {
file_csv <- '00'
} else if (i < 100) {
file_csv <- '0'
}
file_csv <- paste(file_csv, as.character(i), sep = '')
file_csv <- paste(file_csv, 'csv', sep = '.')
file_csv <- paste(directory, file_csv, sep = '/')
pollution_data <- read.csv(file_csv)
if (sum(complete.cases(pollution_data)) >= threshold) {
corr_data <- c(corr_data, cor(pollution_data$nitrate, pollution_data$sulfate, use = 'na.or.complete'))
}
}
corr_data
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{xlsx_write_sheet}
\alias{xlsx_write_sheet}
\title{Samantha Rhoads's function to write a dataframe as an Excel sheet to currently existing Excel file (uses `XLConnect` package but not putting in dependencies or referencing it directly)}
\usage{
xlsx_write_sheet(
x,
file,
sheetName,
col.names = T,
row.names = F,
append = T,
overwrite = T,
open_file = F
)
}
\description{
Samantha Rhoads's function to write a dataframe as an Excel sheet to currently existing Excel file (uses `XLConnect` package but not putting in dependencies or referencing it directly)
}
\examples{
xlsx_write_sheet(x, file, sheetName, col.names=T, row.names=F, append=T, overwrite=T, open_file=F)
}
|
/man/xlsx_write_sheet.Rd
|
no_license
|
srhoads/srhoads
|
R
| false | true | 782 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{xlsx_write_sheet}
\alias{xlsx_write_sheet}
\title{Samantha Rhoads's function to write a dataframe as an Excel sheet to currently existing Excel file (uses `XLConnect` package but not putting in dependencies or referencing it directly)}
\usage{
xlsx_write_sheet(
x,
file,
sheetName,
col.names = T,
row.names = F,
append = T,
overwrite = T,
open_file = F
)
}
\description{
Samantha Rhoads's function to write a dataframe as an Excel sheet to currently existing Excel file (uses `XLConnect` package but not putting in dependencies or referencing it directly)
}
\examples{
xlsx_write_sheet(x, file, sheetName, col.names=T, row.names=F, append=T, overwrite=T, open_file=F)
}
|
## Read data from file
setAs("character","myDate", function(from) as.Date(from, format="%d/%m/%Y"))
powercons <- read.csv("./data/household_power_consumption.txt", sep = ";", stringsAsFactors=F,
na.strings = "?",
colClasses = c("myDate", "character", rep("numeric",7)))
## Select data only for dates 01/02/2007 and 02/02/2007
powercons2 <- subset(powercons, as.character(powercons$Date) %in% c("2007-02-01", "2007-02-02"))
## Add a new column combining Date and Time
powercons2$DateTime <- strptime(paste(as.character(powercons2$Date), powercons2$Time),"%Y-%m-%d %H:%M:%S")
## Create plot
with(powercons2, plot(DateTime, Sub_metering_1, type="l", cex.lab = 0.75, cex.axis = 0.75,
xlab = NA, ylab = "Energy sub metering", col= "black"))
lines(powercons2$DateTime,powercons2$Sub_metering_2,col="red")
lines(powercons2$DateTime,powercons2$Sub_metering_3,col="blue")
legend("topright", cex = 0.75,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=c(1, 1, 1), pch=c(NA, NA, NA), col=c("black", "red", "blue"))
## Copy plot to a PNG file
dev.copy(png, file = "./ExData_Plotting1/plot3.png")
dev.off()
|
/plot3.R
|
no_license
|
mariaruiz124/ExData_Plotting1
|
R
| false | false | 1,203 |
r
|
## Read data from file
setAs("character","myDate", function(from) as.Date(from, format="%d/%m/%Y"))
powercons <- read.csv("./data/household_power_consumption.txt", sep = ";", stringsAsFactors=F,
na.strings = "?",
colClasses = c("myDate", "character", rep("numeric",7)))
## Select data only for dates 01/02/2007 and 02/02/2007
powercons2 <- subset(powercons, as.character(powercons$Date) %in% c("2007-02-01", "2007-02-02"))
## Add a new column combining Date and Time
powercons2$DateTime <- strptime(paste(as.character(powercons2$Date), powercons2$Time),"%Y-%m-%d %H:%M:%S")
## Create plot
with(powercons2, plot(DateTime, Sub_metering_1, type="l", cex.lab = 0.75, cex.axis = 0.75,
xlab = NA, ylab = "Energy sub metering", col= "black"))
lines(powercons2$DateTime,powercons2$Sub_metering_2,col="red")
lines(powercons2$DateTime,powercons2$Sub_metering_3,col="blue")
legend("topright", cex = 0.75,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=c(1, 1, 1), pch=c(NA, NA, NA), col=c("black", "red", "blue"))
## Copy plot to a PNG file
dev.copy(png, file = "./ExData_Plotting1/plot3.png")
dev.off()
|
## These functions work together to handle a special "matrix" ## (similar to a
## new class that has a inverted matrix attribute). makeCacheMatrix creates the
## matrix with its functions (get,set,getinv and setinv) and cacheSolve is used
## for calculate and save in the cache the inverse of the matrix.
makeCacheMatrix <- function(mat = matrix()) {
## This function creates a special "matrix" object that can cache its inverse.
inv <- NULL
set <- function(new_mat) {
mat <<- new_mat
inv <<- NULL
}
get <- function() mat
setinv <- function(new_inv) inv <<- new_inv
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
cacheSolve <- function(x, ...) {
## This function computes the inverse of the special "matrix" returned
## by makeCacheMatrix. If the inverse has already been calculated (and the
## matrix has not changed), then the cachesolve should retrieve the inverse
## from the cache.
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat, ...)
x$setinv(inv)
inv
}
|
/cachematrix.R
|
no_license
|
ferchaure/ProgrammingAssignment2
|
R
| false | false | 1,152 |
r
|
## These functions work together to handle a special "matrix" ## (similar to a
## new class that has a inverted matrix attribute). makeCacheMatrix creates the
## matrix with its functions (get,set,getinv and setinv) and cacheSolve is used
## for calculate and save in the cache the inverse of the matrix.
makeCacheMatrix <- function(mat = matrix()) {
## This function creates a special "matrix" object that can cache its inverse.
inv <- NULL
set <- function(new_mat) {
mat <<- new_mat
inv <<- NULL
}
get <- function() mat
setinv <- function(new_inv) inv <<- new_inv
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
cacheSolve <- function(x, ...) {
## This function computes the inverse of the special "matrix" returned
## by makeCacheMatrix. If the inverse has already been calculated (and the
## matrix has not changed), then the cachesolve should retrieve the inverse
## from the cache.
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat, ...)
x$setinv(inv)
inv
}
|
testlist <- list(Beta = 0, CVLinf = -2.43245449513939e-196, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
/DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615827884-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false | false | 487 |
r
|
testlist <- list(Beta = 0, CVLinf = -2.43245449513939e-196, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.