content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
corr <- function(directory, threshold=0){
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'threshold' is a numeric vector of length 1 indicating the
## number of completely observed observations (on all
## variables) required to compute the correlation between
## nitrate and sulfate; the default is 0
## Return a numeric vector of correlations
## Compute the list of complete cases
df <- complete("specdata")
ind <- df$nobs > threshold
if (length(ind[ind==TRUE])==0){
return(numeric(0))
}
id <- df$id[ind]
comb_sulfate <- c(0)
comb_nitrate <- c(0)
corl <- c(0)
for (i in 1:length(id)){
filename <- paste(directory,"/",sprintf("%03d",id[i]),".csv",sep="")
data <- read.csv(filename, header=TRUE)
vals <- !is.na(data[,2]) & !is.na(data[,3])
comb_sulfate <- c(comb_sulfate, data$sulfate[vals])
comb_nitrate <- c(comb_nitrate, data$nitrate[vals])
corl <- c(corl, cor(data$sulfate[vals], data$nitrate[vals]))
}
corr <- corl[2:length(corl)]
}
|
/R_programming/assignment1/corr.R
|
no_license
|
afteriwoof/Coursera_DataScienceSpecialization
|
R
| false | false | 1,061 |
r
|
corr <- function(directory, threshold=0){
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'threshold' is a numeric vector of length 1 indicating the
## number of completely observed observations (on all
## variables) required to compute the correlation between
## nitrate and sulfate; the default is 0
## Return a numeric vector of correlations
## Compute the list of complete cases
df <- complete("specdata")
ind <- df$nobs > threshold
if (length(ind[ind==TRUE])==0){
return(numeric(0))
}
id <- df$id[ind]
comb_sulfate <- c(0)
comb_nitrate <- c(0)
corl <- c(0)
for (i in 1:length(id)){
filename <- paste(directory,"/",sprintf("%03d",id[i]),".csv",sep="")
data <- read.csv(filename, header=TRUE)
vals <- !is.na(data[,2]) & !is.na(data[,3])
comb_sulfate <- c(comb_sulfate, data$sulfate[vals])
comb_nitrate <- c(comb_nitrate, data$nitrate[vals])
corl <- c(corl, cor(data$sulfate[vals], data$nitrate[vals]))
}
corr <- corl[2:length(corl)]
}
|
# This script prepares data for model traning and hyperparameter tuning.
# Load libraries ----------------------------------------------------------
library(here)
library(tidyverse)
library(caret)
# Load training data ------------------------------------------------------
spi_names = readRDS(here("output/spi_names.RDS"))
train_data_pp = readRDS(here("output/machine_learning/training/train_data_pp.RDS"))
# Split training data into 3 subsets --------------------------------------
# convert diabetes to a factor
train_data_pp = train_data_pp %>%
mutate(diabetes = as.factor(diabetes))
train_data_split = list(train_spi_5 = train_data_pp %>% select(diabetes, spi_names$spi_5),
train_spi_27 = train_data_pp %>% select(diabetes, spi_names$spi_27),
train_spi_135 = train_data_pp %>% select(diabetes, spi_names$spi_135))
# Specify resampling parameters -------------------------------------------
# 10-fold repeated cross-validation with SMOTE subsampling
train_control = trainControl(method = "cv",
number = 10, # number of folds = 10
# repeats = 10, # cross-validation is repeated 10 times
sampling = "smote", # use for resolving class imbalances
classProbs = TRUE,
savePredictions = TRUE)
# Specify tuning grids ----------------------------------------------------
# Keeping this code for reference, but we are going to use default tuning grids instead.
# multinom_grid = expand.grid(decay = seq(from = 0, to = 0.5, by = .1))
#
# knn_grid = expand.grid(k = seq(from = 1, to = 5, by = 1))
#
# nnet_grid = expand.grid(size = seq(from = 1, to = 10, by = 1),
# decay = seq(from = 0.1, to = 0.5, by = 0.1))
# Specify additional arguments --------------------------------------------
multinom_args = NULL
knn_args = NULL
nnet_args = list(MaxNWts = as.character(10000),
maxit = as.character(10000))
svmRadial_args = NULL
lda_args = NULL
rf_args = NULL
rpart2_args = NULL
rpart_args = NULL
# Create master df --------------------------------------------------------
# list of ML algorithms to run
model_list = list("multinom", # penalized multinomial logistic regression
"knn", # k-nearest neighbors
"nnet", # neural network
"svmRadial", # support vector machine with radial basis function kernel
"lda", # linear discriminant analysis
"rf", # random forest
"rpart2", # decision tree classifier
"rpart" # inlcuding because of errors with rpart2
)
# list of additonal arguments (these will be unique to each ML algorithm)
add_args_list = map(model_list, ~get(paste0(.x, "_args")))
# create master df
train_master_df = data.frame(ml_model = I(model_list), # use I() to use lists "as is"
add_args = I(add_args_list))
train_master_df = train_master_df[rep(1:nrow(train_master_df), times = length(train_data_split)),]
spi_dataset_names = c("spi_5", "spi_27", "spi_135")
train_master_df = train_master_df %>%
mutate(spi_scoring = rep(spi_dataset_names, each = nrow(.)/length(spi_dataset_names))) %>%
mutate(train_data = rep(train_data_split, each = nrow(.)/length(spi_dataset_names)))
# Save training info ------------------------------------------------------
saveRDS(train_master_df, file = here("output/machine_learning/training/train_master_df.RDS"))
saveRDS(train_control, file = here("output/machine_learning/training/train_control.RDS"))
|
/scripts/4.1_prep_for_training.R
|
permissive
|
brendanhcullen/personality-diabetes
|
R
| false | false | 3,679 |
r
|
# This script prepares data for model traning and hyperparameter tuning.
# Load libraries ----------------------------------------------------------
library(here)
library(tidyverse)
library(caret)
# Load training data ------------------------------------------------------
spi_names = readRDS(here("output/spi_names.RDS"))
train_data_pp = readRDS(here("output/machine_learning/training/train_data_pp.RDS"))
# Split training data into 3 subsets --------------------------------------
# convert diabetes to a factor
train_data_pp = train_data_pp %>%
mutate(diabetes = as.factor(diabetes))
train_data_split = list(train_spi_5 = train_data_pp %>% select(diabetes, spi_names$spi_5),
train_spi_27 = train_data_pp %>% select(diabetes, spi_names$spi_27),
train_spi_135 = train_data_pp %>% select(diabetes, spi_names$spi_135))
# Specify resampling parameters -------------------------------------------
# 10-fold repeated cross-validation with SMOTE subsampling
train_control = trainControl(method = "cv",
number = 10, # number of folds = 10
# repeats = 10, # cross-validation is repeated 10 times
sampling = "smote", # use for resolving class imbalances
classProbs = TRUE,
savePredictions = TRUE)
# Specify tuning grids ----------------------------------------------------
# Keeping this code for reference, but we are going to use default tuning grids instead.
# multinom_grid = expand.grid(decay = seq(from = 0, to = 0.5, by = .1))
#
# knn_grid = expand.grid(k = seq(from = 1, to = 5, by = 1))
#
# nnet_grid = expand.grid(size = seq(from = 1, to = 10, by = 1),
# decay = seq(from = 0.1, to = 0.5, by = 0.1))
# Specify additional arguments --------------------------------------------
multinom_args = NULL
knn_args = NULL
nnet_args = list(MaxNWts = as.character(10000),
maxit = as.character(10000))
svmRadial_args = NULL
lda_args = NULL
rf_args = NULL
rpart2_args = NULL
rpart_args = NULL
# Create master df --------------------------------------------------------
# list of ML algorithms to run
model_list = list("multinom", # penalized multinomial logistic regression
"knn", # k-nearest neighbors
"nnet", # neural network
"svmRadial", # support vector machine with radial basis function kernel
"lda", # linear discriminant analysis
"rf", # random forest
"rpart2", # decision tree classifier
"rpart" # inlcuding because of errors with rpart2
)
# list of additonal arguments (these will be unique to each ML algorithm)
add_args_list = map(model_list, ~get(paste0(.x, "_args")))
# create master df
train_master_df = data.frame(ml_model = I(model_list), # use I() to use lists "as is"
add_args = I(add_args_list))
train_master_df = train_master_df[rep(1:nrow(train_master_df), times = length(train_data_split)),]
spi_dataset_names = c("spi_5", "spi_27", "spi_135")
train_master_df = train_master_df %>%
mutate(spi_scoring = rep(spi_dataset_names, each = nrow(.)/length(spi_dataset_names))) %>%
mutate(train_data = rep(train_data_split, each = nrow(.)/length(spi_dataset_names)))
# Save training info ------------------------------------------------------
saveRDS(train_master_df, file = here("output/machine_learning/training/train_master_df.RDS"))
saveRDS(train_control, file = here("output/machine_learning/training/train_control.RDS"))
|
library(DMwR)
features<-c("e3.genes","sub.genes","RCT","RCN","RCF","PCT",
"CNR.CXNR","CNR.CXNP","CCR.PPI","CCP.PPI","CNR.PPI",
"WCR","WCP","WCRP","WCPP","WCRS","WCRPS","WCPS","WCPPS")
for(tumor in tumors){
pairs.r<-read.delim(file=paste0("E:/project/ubq prediction/R space/R space/tumors/",tumor,"/random pairs v2-TP.txt"),sep = '\t')
pairs<-read.delim(file=paste0("E:/project/ubq prediction/R space/R space/tumors/",tumor,"/pairs v2-TP.txt"),sep = '\t')
ppis.other.r<-read.delim(file=paste0("E:/project/ubq prediction/R space/R space/tumors/",tumor,"/random ppis v2-TP.txt"),sep = '\t')
pairs.in<-read.delim(file="E:/project/ubq prediction/R space/R space/tumors/BRCA/random indirect pairs v2-TP.txt",sep='\t')
fbxl.pairs<-read.delim(file=paste0("E:/project/ubq prediction/R space/R space/tumors/",tumor,"/fbxl pairs-v2-TPWithIn.txt"),sep = '\t')
pairs<-pairs[,features]
pairs.r<-pairs.r[,features]
ppis.other.r<-ppis.other.r[,features]
pairs.in<-pairs.in[,features]
fbxl.pairs<-fbxl.pairs[,features]
pairs.full<-pairs[-manyNAs(pairs,0.001),]
pairs.r.full<-pairs.r[-manyNAs(pairs.r,0.001),]
ppis.full<-ppis.other.r[-manyNAs(ppis.other.r,0.001),]
pairs.in.full<-pairs.in[-manyNAs(pairs.in,0.001),]
#fbxl.pairs.na<-fbxl.pairs[manyNAs(fbxl.pairs,0.2),]
fbxl.pairs<-fbxl.pairs[-manyNAs(fbxl.pairs,0.3),]
all.pairs.full<-rbind(pairs.full[,features],pairs.r.full[,features],ppis.full[,features])
save(all.pairs.full,file="D:/all.pairs.full.RData")
#fbxl.pairs.na[,features] <- knnImputation(fbxl.pairs.na[,features], k = 10, distData = all.pairs.full[,features])
fbxl.pairs[,features]<-knnImputation(fbxl.pairs[,features],k = 10,distData = all.pairs.full[,features])
fbxl.pairs$cate<-"Less"
fbxl.pairs.na$cate<-"More"
fbxl.pairs<-rbind(fbxl.pairs,fbxl.pairs.na)
#write.table(fbxl.pairs.na,file=paste0("E:/project/ubq prediction/R space/R space/tumors/",tumor,"/fbxl pairs na-fill.txt"),sep = '\t')
write.table(fbxl.pairs,file=paste0("E:/project/ubq prediction/R space/R space/tumors/",tumor,"/fbxl pairs-fill v1-TPWithIn.txt"),sep = '\t')
}
|
/missingValuesProcessing.R
|
no_license
|
diChen310/E3-substrate-interaction-prediction
|
R
| false | false | 2,180 |
r
|
library(DMwR)
features<-c("e3.genes","sub.genes","RCT","RCN","RCF","PCT",
"CNR.CXNR","CNR.CXNP","CCR.PPI","CCP.PPI","CNR.PPI",
"WCR","WCP","WCRP","WCPP","WCRS","WCRPS","WCPS","WCPPS")
for(tumor in tumors){
pairs.r<-read.delim(file=paste0("E:/project/ubq prediction/R space/R space/tumors/",tumor,"/random pairs v2-TP.txt"),sep = '\t')
pairs<-read.delim(file=paste0("E:/project/ubq prediction/R space/R space/tumors/",tumor,"/pairs v2-TP.txt"),sep = '\t')
ppis.other.r<-read.delim(file=paste0("E:/project/ubq prediction/R space/R space/tumors/",tumor,"/random ppis v2-TP.txt"),sep = '\t')
pairs.in<-read.delim(file="E:/project/ubq prediction/R space/R space/tumors/BRCA/random indirect pairs v2-TP.txt",sep='\t')
fbxl.pairs<-read.delim(file=paste0("E:/project/ubq prediction/R space/R space/tumors/",tumor,"/fbxl pairs-v2-TPWithIn.txt"),sep = '\t')
pairs<-pairs[,features]
pairs.r<-pairs.r[,features]
ppis.other.r<-ppis.other.r[,features]
pairs.in<-pairs.in[,features]
fbxl.pairs<-fbxl.pairs[,features]
pairs.full<-pairs[-manyNAs(pairs,0.001),]
pairs.r.full<-pairs.r[-manyNAs(pairs.r,0.001),]
ppis.full<-ppis.other.r[-manyNAs(ppis.other.r,0.001),]
pairs.in.full<-pairs.in[-manyNAs(pairs.in,0.001),]
#fbxl.pairs.na<-fbxl.pairs[manyNAs(fbxl.pairs,0.2),]
fbxl.pairs<-fbxl.pairs[-manyNAs(fbxl.pairs,0.3),]
all.pairs.full<-rbind(pairs.full[,features],pairs.r.full[,features],ppis.full[,features])
save(all.pairs.full,file="D:/all.pairs.full.RData")
#fbxl.pairs.na[,features] <- knnImputation(fbxl.pairs.na[,features], k = 10, distData = all.pairs.full[,features])
fbxl.pairs[,features]<-knnImputation(fbxl.pairs[,features],k = 10,distData = all.pairs.full[,features])
fbxl.pairs$cate<-"Less"
fbxl.pairs.na$cate<-"More"
fbxl.pairs<-rbind(fbxl.pairs,fbxl.pairs.na)
#write.table(fbxl.pairs.na,file=paste0("E:/project/ubq prediction/R space/R space/tumors/",tumor,"/fbxl pairs na-fill.txt"),sep = '\t')
write.table(fbxl.pairs,file=paste0("E:/project/ubq prediction/R space/R space/tumors/",tumor,"/fbxl pairs-fill v1-TPWithIn.txt"),sep = '\t')
}
|
##takes the output of the createImageList and creates an object with
##the template and the mask(s)
##for 3D data saves a vector and for 4D data saves a matrix
##Brian Caffo July 2012
##note right now it only supports the mask being a 0,1 image or vector of 0,1 images#
##imageList is the output of createImageList
##maskFile characters pointing to mask file.
##rdaDIR is where the rda is written to
##keepImage creates a copy of the image as an RDA file
readSubjectImagingData <- function(imageList,
rdaDIR,
maskFile = NULL,
keepImage = FALSE,
verbose = TRUE,
overwrite = FALSE){
if (is.null(rdaDIR)) stop("rdaDir must be specified")
if (!file.exists(rdaDIR)) {
if (verbose) print("Making rdaDIR")
dir.create(path = rdaDIR)
}
else if (length(list.files(rdaDIR)) > 0 & verbose) {
print("Existing files in rdaDIR")
}
n <- length(imageList)
##check the imageIDs which must be there
imageIDs <- sapply(imageList, function(x) x$imageID)
##imageIDs have to be unique
if (anyDuplicated(imageIDs)) stop("Duplicate imageIDs")
if (!is.null(maskFile)){
if (!is.character(maskFile)) maskFile <- as.character(maskFile)
if (!file.exists(maskFile)) stop("Mask file doesn't exist")
else {
maskImg <- getImage(maskFile)
if (length(dim(maskImg)) == 4) maskImg <- maskImg[,,,1]
maskImg <- maskImg == 1
maskDim <- dim(maskImg)
if (length(maskDim) != 3) stop("Mask must be a 3D image")
maskVector <- which(maskImg)
}
}
for (i in 1 : n){
if (verbose) print(i)
img <- getImage(imageList[[i]]$fileFullPath)
if (all(dim(img)[1 : 3] != maskDim))
stop("image and mask dimensions don't match")
##now get the masked data
if (length(dim(img)) == 4){
imgMatrix <- t(apply(img, 4, function(x) x[maskVector]))
}
else if (length(dim(img)) == 3){
imgMatrix <- as.vector(img[maskVector])
}
else stop("Masking only implemented for 3D and 4D images")
subjDIR <- paste(rdaDIR, "/", imageList[[i]]$imageID, "/", sep = "")
if (!file.exists(subjDIR)) dir.create(subjDIR)
imageList[[i]]$subjDIR <- getAbsolutePath(subjDIR)
if (keepImage) {
imageLoc <- paste(subjDIR, "image.rda", sep = "")
if (file.exists(imageLoc) & !overwrite)
stop("Image file already exists and overwrite is FALSE")
else save(img, maskVector, file = imageLoc, compress = TRUE)
imageList[[i]]$rdaImageLoc <- imageLoc
}
if (!is.null(maskFile)) {
maskedImageLoc <- paste(subjDIR, "maskImage.rda", sep = "/")
if (file.exists(maskedImageLoc) & !overwrite) {
stop("Masked image file already exists and overwrite is FALSE")
}
else save(imgMatrix, maskVector, file = maskedImageLoc, compress = TRUE)
imageList[[i]]$maskFile <- maskFile
imageList[[i]]$maskLength <- length(maskVector)
imageList[[i]]$rdaMaskedImageLoc <- maskedImageLoc
}
}
imageListFile <- paste(rdaDIR, "/imageList.rda", sep = "")
attributes(imageList)$rdaDIR <- rdaDIR
attributes(imageList)$imageListFile <- imageListFile
save(imageList, file = imageListFile)
return(imageList)
}
|
/B_analysts_sources_github/bcaffo/brisk/readSubjectImagingData.R
|
no_license
|
Irbis3/crantasticScrapper
|
R
| false | false | 3,386 |
r
|
##takes the output of the createImageList and creates an object with
##the template and the mask(s)
##for 3D data saves a vector and for 4D data saves a matrix
##Brian Caffo July 2012
##note right now it only supports the mask being a 0,1 image or vector of 0,1 images#
##imageList is the output of createImageList
##maskFile characters pointing to mask file.
##rdaDIR is where the rda is written to
##keepImage creates a copy of the image as an RDA file
readSubjectImagingData <- function(imageList,
rdaDIR,
maskFile = NULL,
keepImage = FALSE,
verbose = TRUE,
overwrite = FALSE){
if (is.null(rdaDIR)) stop("rdaDir must be specified")
if (!file.exists(rdaDIR)) {
if (verbose) print("Making rdaDIR")
dir.create(path = rdaDIR)
}
else if (length(list.files(rdaDIR)) > 0 & verbose) {
print("Existing files in rdaDIR")
}
n <- length(imageList)
##check the imageIDs which must be there
imageIDs <- sapply(imageList, function(x) x$imageID)
##imageIDs have to be unique
if (anyDuplicated(imageIDs)) stop("Duplicate imageIDs")
if (!is.null(maskFile)){
if (!is.character(maskFile)) maskFile <- as.character(maskFile)
if (!file.exists(maskFile)) stop("Mask file doesn't exist")
else {
maskImg <- getImage(maskFile)
if (length(dim(maskImg)) == 4) maskImg <- maskImg[,,,1]
maskImg <- maskImg == 1
maskDim <- dim(maskImg)
if (length(maskDim) != 3) stop("Mask must be a 3D image")
maskVector <- which(maskImg)
}
}
for (i in 1 : n){
if (verbose) print(i)
img <- getImage(imageList[[i]]$fileFullPath)
if (all(dim(img)[1 : 3] != maskDim))
stop("image and mask dimensions don't match")
##now get the masked data
if (length(dim(img)) == 4){
imgMatrix <- t(apply(img, 4, function(x) x[maskVector]))
}
else if (length(dim(img)) == 3){
imgMatrix <- as.vector(img[maskVector])
}
else stop("Masking only implemented for 3D and 4D images")
subjDIR <- paste(rdaDIR, "/", imageList[[i]]$imageID, "/", sep = "")
if (!file.exists(subjDIR)) dir.create(subjDIR)
imageList[[i]]$subjDIR <- getAbsolutePath(subjDIR)
if (keepImage) {
imageLoc <- paste(subjDIR, "image.rda", sep = "")
if (file.exists(imageLoc) & !overwrite)
stop("Image file already exists and overwrite is FALSE")
else save(img, maskVector, file = imageLoc, compress = TRUE)
imageList[[i]]$rdaImageLoc <- imageLoc
}
if (!is.null(maskFile)) {
maskedImageLoc <- paste(subjDIR, "maskImage.rda", sep = "/")
if (file.exists(maskedImageLoc) & !overwrite) {
stop("Masked image file already exists and overwrite is FALSE")
}
else save(imgMatrix, maskVector, file = maskedImageLoc, compress = TRUE)
imageList[[i]]$maskFile <- maskFile
imageList[[i]]$maskLength <- length(maskVector)
imageList[[i]]$rdaMaskedImageLoc <- maskedImageLoc
}
}
imageListFile <- paste(rdaDIR, "/imageList.rda", sep = "")
attributes(imageList)$rdaDIR <- rdaDIR
attributes(imageList)$imageListFile <- imageListFile
save(imageList, file = imageListFile)
return(imageList)
}
|
testlist <- list(A = structure(c(1.38997190089718e-309, 3.81575932257023e-236, 3.81571422914722e-236), .Dim = c(1L, 3L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613126280-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 226 |
r
|
testlist <- list(A = structure(c(1.38997190089718e-309, 3.81575932257023e-236, 3.81571422914722e-236), .Dim = c(1L, 3L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
case <-function(integer,rows,cols){
if(i==1){#(1,1) first element first column
return (1)
}
if(i==rows){#(rows,1) last element first column
return (2)
}
if(i==(rows-1)*cols){#(1,cols) first element last column
return (3)
}
if(i==(rows*n)){#(rows,cols) last element last column
return (4)
}
if(i<rows+1){#(:,1) first column
return (5)
}
if(i>(cols-1)*rows){#(:,cols) last column
return (6)
}
if((i-1)%%rows==0){#(1,:) first row
return (7)
}
if(i%%rows==0){#(rows,:) last row
return (8)
}
return (9) #interior node
}
#todo
#1 Test the function case
#keep = matrix(0,m,n)
#for (i in 1:(m*n)){
# keep[i]=case(integer = i, rows=m,cols=n)
#}
#input variables
m=6 #rows
n=5 #columns
runs=100
beta=0.87
#isingModelf <- function(m=10,n=6,runs=500,beta=0.5)
#Decalare Markov Chain vector
mc=matrix(NA,nrow=m*n,ncol = runs+1) #each column is an observation
#Make a lattice of initial values
u=matrix(0, nrow = m, ncol = n)
#Insert initial values
mc[,1]=as.vector(u)
#TRY TO SOLVE IN SIMPLEST POSSIBLE WAY
r=0
i=0
#random = runif(runs)
a100=0
a090=0
while(r < runs){
r=r+1
i= ifelse(i==n*m, 1, i+1)
val=u[i]
two=c(1,1)*val
three=c(1,1,1)*val
four=c(1,1,1,1)*val
#compute alpha
mycase=case(integer=i,rows=m,cols=n)
temp=switch(mycase,
-2+2*abs(sum(u[c(i+1,i+m)]-two)), #(1,1)
-2+2*abs(sum(u[c(i-1,i+m)]-two)), #(m,1)
-2+2*abs(sum(u[c(i+1,i-m)]-two)), #(1,n)
-2+2*abs(sum(u[c(i-1,i-m)]-two)), #(m,n)
-3+2*abs(sum(u[c(i-1, i+1, i+m)]-three)), #(:,1)
-3+2*abs(sum(u[c(i-1, i+1, i-m)]-three)), #(:,n)
-3+2*abs(sum(u[c(i+1, i-m, i+m)]-three)), #(1,:)
-3+2*abs(sum(u[c(i-1, i-m, i+m)]-three)), #(m,:)
-4+2*abs(sum(u[c(i-1, i+1, i-m, i+m)]-three))) #interior
accept = exp(beta*temp)
random = runif(1)
u[i]=ifelse(random<=accept,1-val,val)
mc[,r+1]=as.vector(u)
}
|
/BKproj1og2/proj2/b1v03.R
|
no_license
|
pra1981/Master-Degree-files
|
R
| false | false | 1,887 |
r
|
case <-function(integer,rows,cols){
if(i==1){#(1,1) first element first column
return (1)
}
if(i==rows){#(rows,1) last element first column
return (2)
}
if(i==(rows-1)*cols){#(1,cols) first element last column
return (3)
}
if(i==(rows*n)){#(rows,cols) last element last column
return (4)
}
if(i<rows+1){#(:,1) first column
return (5)
}
if(i>(cols-1)*rows){#(:,cols) last column
return (6)
}
if((i-1)%%rows==0){#(1,:) first row
return (7)
}
if(i%%rows==0){#(rows,:) last row
return (8)
}
return (9) #interior node
}
#todo
#1 Test the function case
#keep = matrix(0,m,n)
#for (i in 1:(m*n)){
# keep[i]=case(integer = i, rows=m,cols=n)
#}
#input variables
m=6 #rows
n=5 #columns
runs=100
beta=0.87
#isingModelf <- function(m=10,n=6,runs=500,beta=0.5)
#Decalare Markov Chain vector
mc=matrix(NA,nrow=m*n,ncol = runs+1) #each column is an observation
#Make a lattice of initial values
u=matrix(0, nrow = m, ncol = n)
#Insert initial values
mc[,1]=as.vector(u)
#TRY TO SOLVE IN SIMPLEST POSSIBLE WAY
r=0
i=0
#random = runif(runs)
a100=0
a090=0
while(r < runs){
r=r+1
i= ifelse(i==n*m, 1, i+1)
val=u[i]
two=c(1,1)*val
three=c(1,1,1)*val
four=c(1,1,1,1)*val
#compute alpha
mycase=case(integer=i,rows=m,cols=n)
temp=switch(mycase,
-2+2*abs(sum(u[c(i+1,i+m)]-two)), #(1,1)
-2+2*abs(sum(u[c(i-1,i+m)]-two)), #(m,1)
-2+2*abs(sum(u[c(i+1,i-m)]-two)), #(1,n)
-2+2*abs(sum(u[c(i-1,i-m)]-two)), #(m,n)
-3+2*abs(sum(u[c(i-1, i+1, i+m)]-three)), #(:,1)
-3+2*abs(sum(u[c(i-1, i+1, i-m)]-three)), #(:,n)
-3+2*abs(sum(u[c(i+1, i-m, i+m)]-three)), #(1,:)
-3+2*abs(sum(u[c(i-1, i-m, i+m)]-three)), #(m,:)
-4+2*abs(sum(u[c(i-1, i+1, i-m, i+m)]-three))) #interior
accept = exp(beta*temp)
random = runif(1)
u[i]=ifelse(random<=accept,1-val,val)
mc[,r+1]=as.vector(u)
}
|
ppmdata=read.csv('/home/beyhan/Downloads/ppm_hourly_stats.csv',header=T,sep=",")
tempdata=read.csv('/home/beyhan/Downloads/temperature_hourly_stats.csv',header=T,sep=",")
#datayı okuyorum
colnames(ppmdata)=c("customerid","location","year","sublokasyon","fkdevice","group","created","avg")
colnames(tempdata)=c("customerid","location","year","fkdevice","sublokasyon","group","created","avg")
#tarihi +000 ları sildim
ppmdata[,"created"]=as.character(ppmdata[,"created"])
ppmdata$created=strtrim(c(ppmdata$created), c(16))
tempdata[,"created"]=as.character(tempdata[,"created"])
tempdata$created=strtrim(c(tempdata$created), c(16))
ppmdata$created <- as.POSIXct(ppmdata$created , "%Y-%m-%d %h:%m:%s")
tempdata$created <- as.POSIXct(tempdata$created , "%Y-%m-%d %h:%m:%s")
ppmdata=ppmdata[order(ppmdata$created),]
tempdata=tempdata[order(tempdata$created),]
#0 olanları siliyoruz
ppmdata=ppmdata[ ! ppmdata$avg %in% c(0), ]
ppmdata <- subset(ppmdata, ppmdata$fkdevice=='15840379')
tempdata=tempdata[ ! tempdata$avg %in% c(0), ]
tempdata <- subset(tempdata, tempdata$fkdevice=='15840379')
total <- merge(ppmdata,tempdata,by=c("created","fkdevice"))
write.csv(ppmdata,"/home/beyhan/pozi/R/ppm_hourly.csv",row.names=F)
write.csv(tempdata,"/home/beyhan/pozi/R/temp_hourly.csv",row.names=F)
write.csv(total,"/home/beyhan/pozi/R/total.csv",row.names=F)
plot(tempdata$created,tempdata$avg,type = "l",xlab="Günler",ylab="Sıcaklık vs CO2",col="red",lwd=4)
par(new=T)
plot(ppmdata$created,ppmdata$avg,type = "l",col="green",xlab="",ylab="", side=4,lwd=4)
axis(4)
c("Sıcaklık","CO2") # puts text in the legend
lty=c(1,1) # gives the legend appropriate symbols (lines)
lwd=c(2.5,2.5)
col=c("red","green")
|
/co2_temperature.R
|
no_license
|
beyhangl/SensorAnalysisWithR
|
R
| false | false | 1,724 |
r
|
ppmdata=read.csv('/home/beyhan/Downloads/ppm_hourly_stats.csv',header=T,sep=",")
tempdata=read.csv('/home/beyhan/Downloads/temperature_hourly_stats.csv',header=T,sep=",")
#datayı okuyorum
colnames(ppmdata)=c("customerid","location","year","sublokasyon","fkdevice","group","created","avg")
colnames(tempdata)=c("customerid","location","year","fkdevice","sublokasyon","group","created","avg")
#tarihi +000 ları sildim
ppmdata[,"created"]=as.character(ppmdata[,"created"])
ppmdata$created=strtrim(c(ppmdata$created), c(16))
tempdata[,"created"]=as.character(tempdata[,"created"])
tempdata$created=strtrim(c(tempdata$created), c(16))
ppmdata$created <- as.POSIXct(ppmdata$created , "%Y-%m-%d %h:%m:%s")
tempdata$created <- as.POSIXct(tempdata$created , "%Y-%m-%d %h:%m:%s")
ppmdata=ppmdata[order(ppmdata$created),]
tempdata=tempdata[order(tempdata$created),]
#0 olanları siliyoruz
ppmdata=ppmdata[ ! ppmdata$avg %in% c(0), ]
ppmdata <- subset(ppmdata, ppmdata$fkdevice=='15840379')
tempdata=tempdata[ ! tempdata$avg %in% c(0), ]
tempdata <- subset(tempdata, tempdata$fkdevice=='15840379')
total <- merge(ppmdata,tempdata,by=c("created","fkdevice"))
write.csv(ppmdata,"/home/beyhan/pozi/R/ppm_hourly.csv",row.names=F)
write.csv(tempdata,"/home/beyhan/pozi/R/temp_hourly.csv",row.names=F)
write.csv(total,"/home/beyhan/pozi/R/total.csv",row.names=F)
plot(tempdata$created,tempdata$avg,type = "l",xlab="Günler",ylab="Sıcaklık vs CO2",col="red",lwd=4)
par(new=T)
plot(ppmdata$created,ppmdata$avg,type = "l",col="green",xlab="",ylab="", side=4,lwd=4)
axis(4)
c("Sıcaklık","CO2") # puts text in the legend
lty=c(1,1) # gives the legend appropriate symbols (lines)
lwd=c(2.5,2.5)
col=c("red","green")
|
\name{simRER}
\alias{simRER}
\title{
Simulate Rougheye Rockfish Biological Data
}
\description{
Simulate biological data that best characterizes Rougheye Rockfish
(RER, \emph{Sebastes aleutianus}), using parameter estimates from
Table 2 in Orr and Hawkins (2008).
}
\usage{
simRER(Nfish)
}
\arguments{
\item{Nfish}{Number of RER fish to simulate.}
}
\details{
Exploring 35 morphometric and 9 meristic characters,
Orr and Hawkins (2008) provide a discriminant function
(using only 6 morphometrics \eqn{L} and 2 meristics \eqn{N} )
that claims to correctly classify \emph{Sebastes aleutianus} and
\emph{S. melanostictus} 97.8\% of the time (see \code{\link{predictRER}}).
Table 2 in Orr and Hawkins (2008) provides a range of RER
standard lengths (mm) and distributions for morphometrics
and meristics. This function samples from a random uniform distribution
for \eqn{S} and from random normal distributions for model inputs of
\eqn{\lambda} and \eqn{N} -- table below gives \eqn{(\mu , \sigma)}.
\tabular{rllc}{
\eqn{S} \tab = \tab standard fish length measured from tip of snout \tab 63.4--555.2 \cr
\eqn{\lambda_1}{\lambda[1]} \tab = \tab length of dorsal-fin spine 1 \tab (5.8, 0.6) \cr
\eqn{\lambda_2}{\lambda[2]} \tab = \tab snout length \tab (7.5, 0.7) \cr
\eqn{\lambda_3}{\lambda[3]} \tab = \tab length of gill rakers \tab (4.9, 0.6) \cr
\eqn{\lambda_4}{\lambda[4]} \tab = \tab length of pelvic-fin rays \tab (22.1, 1.1) \cr
\eqn{\lambda_5}{\lambda[5]} \tab = \tab length of soft-dorsal-fin base \tab (22.8, 1.2) \cr
\eqn{\lambda_6}{\lambda[6]} \tab = \tab preanal length \tab (71.8, 2.3) \cr
\eqn{N_1}{N[1]} \tab = \tab number of gill rakers \tab (31.2, 1.0) \cr
\eqn{N_2}{N[2]} \tab = \tab number of dorsal-fin rays \tab (13.5, 0.5)
}
where, \eqn{\lambda_n = 100 L_n/S}{\lambda[n] = 100 L[n]/S} , i.e., percent Standard Length
}
\value{
A numeric matrix with dimensions \code{c(Nfish,9)} where columns are labelled
\code{c('S', 'L1', 'L2', 'L3', 'L4', 'L5', 'L6', 'N1', 'N2')}.
The values are described above in \bold{Details}, but generally,
S = standard length of the fish (mm), L = six diagnostic length measurements (mm),
and N = numbers of gill rakers and dorsal fin rays.
}
\references{
Orr, J.W. and Hawkins, S. (2008) Species of the rougheye rockfish complex:
resurrection of \emph{Sebastes melanostictus} (Matsubara, 1934) and a
redescription of \emph{Sebastes aleutianus} (Jordan and Evermann, 1898)
(Teleostei: Scorpaeniformes). \emph{Fisheries Bulletin} \bold{106}: 111--134.
}
\author{
Rowan Haigh, Pacific Biological Station, Fisheries & Oceans Canada, Nanaimo BC.
}
\seealso{
\code{\link{simBSR}}, \code{\link{predictRER}}
}
\keyword{distribution}
|
/PBStools/man/simRER.Rd
|
no_license
|
jfontestad/pbs-tools
|
R
| false | false | 2,941 |
rd
|
\name{simRER}
\alias{simRER}
\title{
Simulate Rougheye Rockfish Biological Data
}
\description{
Simulate biological data that best characterizes Rougheye Rockfish
(RER, \emph{Sebastes aleutianus}), using parameter estimates from
Table 2 in Orr and Hawkins (2008).
}
\usage{
simRER(Nfish)
}
\arguments{
\item{Nfish}{Number of RER fish to simulate.}
}
\details{
Exploring 35 morphometric and 9 meristic characters,
Orr and Hawkins (2008) provide a discriminant function
(using only 6 morphometrics \eqn{L} and 2 meristics \eqn{N} )
that claims to correctly classify \emph{Sebastes aleutianus} and
\emph{S. melanostictus} 97.8\% of the time (see \code{\link{predictRER}}).
Table 2 in Orr and Hawkins (2008) provides a range of RER
standard lengths (mm) and distributions for morphometrics
and meristics. This function samples from a random uniform distribution
for \eqn{S} and from random normal distributions for model inputs of
\eqn{\lambda} and \eqn{N} -- table below gives \eqn{(\mu , \sigma)}.
\tabular{rllc}{
\eqn{S} \tab = \tab standard fish length measured from tip of snout \tab 63.4--555.2 \cr
\eqn{\lambda_1}{\lambda[1]} \tab = \tab length of dorsal-fin spine 1 \tab (5.8, 0.6) \cr
\eqn{\lambda_2}{\lambda[2]} \tab = \tab snout length \tab (7.5, 0.7) \cr
\eqn{\lambda_3}{\lambda[3]} \tab = \tab length of gill rakers \tab (4.9, 0.6) \cr
\eqn{\lambda_4}{\lambda[4]} \tab = \tab length of pelvic-fin rays \tab (22.1, 1.1) \cr
\eqn{\lambda_5}{\lambda[5]} \tab = \tab length of soft-dorsal-fin base \tab (22.8, 1.2) \cr
\eqn{\lambda_6}{\lambda[6]} \tab = \tab preanal length \tab (71.8, 2.3) \cr
\eqn{N_1}{N[1]} \tab = \tab number of gill rakers \tab (31.2, 1.0) \cr
\eqn{N_2}{N[2]} \tab = \tab number of dorsal-fin rays \tab (13.5, 0.5)
}
where, \eqn{\lambda_n = 100 L_n/S}{\lambda[n] = 100 L[n]/S} , i.e., percent Standard Length
}
\value{
A numeric matrix with dimensions \code{c(Nfish,9)} where columns are labelled
\code{c('S', 'L1', 'L2', 'L3', 'L4', 'L5', 'L6', 'N1', 'N2')}.
The values are described above in \bold{Details}, but generally,
S = standard length of the fish (mm), L = six diagnostic length measurements (mm),
and N = numbers of gill rakers and dorsal fin rays.
}
\references{
Orr, J.W. and Hawkins, S. (2008) Species of the rougheye rockfish complex:
resurrection of \emph{Sebastes melanostictus} (Matsubara, 1934) and a
redescription of \emph{Sebastes aleutianus} (Jordan and Evermann, 1898)
(Teleostei: Scorpaeniformes). \emph{Fisheries Bulletin} \bold{106}: 111--134.
}
\author{
Rowan Haigh, Pacific Biological Station, Fisheries & Oceans Canada, Nanaimo BC.
}
\seealso{
\code{\link{simBSR}}, \code{\link{predictRER}}
}
\keyword{distribution}
|
### The scipt to generate the tidy data from the given data set
## URL https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip .
## Create one R script called run_analysis.R that does the following:
## 1. Merges the training and the test sets to create one data set.
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
## 3. Uses descriptive activity names to name the activities in the data set
## 4. Appropriately labels the data set with descriptive activity names.
## 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
#require(reshape2)
#temp <-tempfile()
#download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip ", temp, mode = "wb")
#trying URL 'https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip '
#Content type 'application/zip' length 62556944 bytes (59.7 MB)
#downloaded 59.7 MB
#list.files<-unzip(temp)
#unlink(temp)
# now all files in the dataset downloaded and unzipped.
# Load: activity labels, only vector with values
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")[,2]
# Load: data column names, only values=names
features <- read.table("./UCI HAR Dataset/features.txt")[,2]
# Extract only the measurements on the mean and standard deviation for each measurement.
# determine the indices of desired features (those containing -mean() or -std())
extract_features <- grepl("mean|std", features)
# Load and process X_test & y_test data & subject_test.
X_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
# assign names of X_text as features:
names(X_test) = features
# Extract only the measurements on the mean and standard deviation for each measurement.
X_test = X_test[,extract_features]
# Load activity labels and assign names of y_test and subject_test.
y_test[,2] = activity_labels[y_test[,1]]
names(y_test) = c("Activity_ID", "Activity_Label")
names(subject_test) = "subject"
# Bind data(subiect_test + y_test +X_test)
test_data <- cbind(as.data.frame(subject_test), y_test, X_test)
# Load and process X_train & y_train data & subject_train.
X_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
# assign names of X_train as features.
names(X_train) = features
# Extract only the measurements on the mean and standard deviation for each measurement.
X_train = X_train[,extract_features]
# Load activity data and assign names of y_train and subject_train.
y_train[,2] = activity_labels[y_train[,1]]
names(y_train) = c("Activity_ID", "Activity_Label")
names(subject_train) = "subject"
# Bind train_data(subject_train + y_train + X_train)
train_data <- cbind(as.data.frame(subject_train), y_train, X_train)
##print(str(train_data))
# Merge test and train data in one dataset data
data = rbind(test_data, train_data)
#print(head(data, n=3))
# assisn id_labels and measurable values of merged data.
id_labels = c("subject", "Activity_ID", "Activity_Label")
data_labels = setdiff(colnames(data), id_labels)
melt_data = melt(data, id = id_labels, measure.vars = data_labels)
# Apply mean function to dataset using dcast function. Casting data frame.
tidy_data = dcast(melt_data, subject + Activity_Label ~ variable, mean)
##print(str(tidy_data))
# write tidy_data into tidy_data.txt file with row.names = FALSE to submit the file on github
write.table(tidy_data, file = "./tidy_data.txt", row.names = FALSE)
#test_check<-read.table("./tidy_data.txt")
#print(str(test_check))
|
/run_analysis.R
|
no_license
|
Vitalievna/Getting-and-Cleaning-Data
|
R
| false | false | 3,834 |
r
|
### The scipt to generate the tidy data from the given data set
## URL https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip .
## Create one R script called run_analysis.R that does the following:
## 1. Merges the training and the test sets to create one data set.
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
## 3. Uses descriptive activity names to name the activities in the data set
## 4. Appropriately labels the data set with descriptive activity names.
## 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
#require(reshape2)
#temp <-tempfile()
#download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip ", temp, mode = "wb")
#trying URL 'https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip '
#Content type 'application/zip' length 62556944 bytes (59.7 MB)
#downloaded 59.7 MB
#list.files<-unzip(temp)
#unlink(temp)
# now all files in the dataset downloaded and unzipped.
# Load: activity labels, only vector with values
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")[,2]
# Load: data column names, only values=names
features <- read.table("./UCI HAR Dataset/features.txt")[,2]
# Extract only the measurements on the mean and standard deviation for each measurement.
# determine the indices of desired features (those containing -mean() or -std())
extract_features <- grepl("mean|std", features)
# Load and process X_test & y_test data & subject_test.
X_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
# assign names of X_text as features:
names(X_test) = features
# Extract only the measurements on the mean and standard deviation for each measurement.
X_test = X_test[,extract_features]
# Load activity labels and assign names of y_test and subject_test.
y_test[,2] = activity_labels[y_test[,1]]
names(y_test) = c("Activity_ID", "Activity_Label")
names(subject_test) = "subject"
# Bind data(subiect_test + y_test +X_test)
test_data <- cbind(as.data.frame(subject_test), y_test, X_test)
# Load and process X_train & y_train data & subject_train.
X_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
# assign names of X_train as features.
names(X_train) = features
# Extract only the measurements on the mean and standard deviation for each measurement.
X_train = X_train[,extract_features]
# Load activity data and assign names of y_train and subject_train.
y_train[,2] = activity_labels[y_train[,1]]
names(y_train) = c("Activity_ID", "Activity_Label")
names(subject_train) = "subject"
# Bind train_data(subject_train + y_train + X_train)
train_data <- cbind(as.data.frame(subject_train), y_train, X_train)
##print(str(train_data))
# Merge test and train data in one dataset data
data = rbind(test_data, train_data)
#print(head(data, n=3))
# assisn id_labels and measurable values of merged data.
id_labels = c("subject", "Activity_ID", "Activity_Label")
data_labels = setdiff(colnames(data), id_labels)
melt_data = melt(data, id = id_labels, measure.vars = data_labels)
# Apply mean function to dataset using dcast function. Casting data frame.
tidy_data = dcast(melt_data, subject + Activity_Label ~ variable, mean)
##print(str(tidy_data))
# write tidy_data into tidy_data.txt file with row.names = FALSE to submit the file on github
write.table(tidy_data, file = "./tidy_data.txt", row.names = FALSE)
#test_check<-read.table("./tidy_data.txt")
#print(str(test_check))
|
rankhospital <- function(state, outcome, num) {
## Read outcome data
data <- read.csv("outcome-of-care-measures.csv")
## Check that state and outcome are valid
nbC <- 0
if (outcome == "heart attack"){nbC <- 11}
if (outcome == "heart failure"){nbC <- 17}
if (outcome == "pneumonia"){nbC <- 23}
if (nbC == 0) {stop("invalid outcome")}
S <- subset(data, data[,7] == state)
if(nrow(S) == 0) {stop("invalid state")}
newS <- data.frame("Hospital.name" = as.character(S[,2]),
"Rate" = as.numeric(as.character(S[,nbC])))
newS <- na.omit(newS)
sorted <- newS[order(newS[,2], newS[,1]), ]
sorted$Rank <- c(1:nrow(sorted))
if (num == "best") {return(sorted[1,1])}
if (num == "worst") {return(sorted[nrow(sorted),1])}
if (num > nrow(sorted)) {return("NA")}
if (is.numeric(num)) {return(sorted[num,1])}
answer
}
|
/rankhospital.R
|
no_license
|
JulieRojas/Rtasks
|
R
| false | false | 859 |
r
|
rankhospital <- function(state, outcome, num) {
## Read outcome data
data <- read.csv("outcome-of-care-measures.csv")
## Check that state and outcome are valid
nbC <- 0
if (outcome == "heart attack"){nbC <- 11}
if (outcome == "heart failure"){nbC <- 17}
if (outcome == "pneumonia"){nbC <- 23}
if (nbC == 0) {stop("invalid outcome")}
S <- subset(data, data[,7] == state)
if(nrow(S) == 0) {stop("invalid state")}
newS <- data.frame("Hospital.name" = as.character(S[,2]),
"Rate" = as.numeric(as.character(S[,nbC])))
newS <- na.omit(newS)
sorted <- newS[order(newS[,2], newS[,1]), ]
sorted$Rank <- c(1:nrow(sorted))
if (num == "best") {return(sorted[1,1])}
if (num == "worst") {return(sorted[nrow(sorted),1])}
if (num > nrow(sorted)) {return("NA")}
if (is.numeric(num)) {return(sorted[num,1])}
answer
}
|
#' Convert local isolation results to SF
#'
#' @param segregation_results a segreg object containing the results of a
#' call to measure_segregation().
#'
#' @return a spatial sf object with local isolation results
#'
#' @export
#'
#' @examples
#'
#' library("sf")
#' library("ggplot2")
#' library("segregr")
#'
#' # load sample data from package segregr
#' marilia_sf <- st_read(system.file("extdata/marilia_2010.gpkg", package = "segregr"))
#'
#' # calculate segregation metrics
#' segregation <- measure_segregation(marilia_sf)
#'
#' # export local isolation results
#' isolation <- isolation_to_sf(segregation)
#'
#' # plot local isolation
#' ggplot(data = isolation) +
#' geom_sf(aes(fill = isolation)) +
#' scale_fill_distiller(palette = "Spectral") +
#' facet_wrap(~group) +
#' theme_void()
isolation_to_sf <- function(segregation_results) {
return(
segregation_results$areal_units %>%
dplyr::select(id) %>%
dplyr::left_join(segregation_results$q, by = c("id"))
)
}
|
/R/isolation_to_sf.R
|
permissive
|
mvpsaraiva/segregr
|
R
| false | false | 1,005 |
r
|
#' Convert local isolation results to SF
#'
#' @param segregation_results a segreg object containing the results of a
#' call to measure_segregation().
#'
#' @return a spatial sf object with local isolation results
#'
#' @export
#'
#' @examples
#'
#' library("sf")
#' library("ggplot2")
#' library("segregr")
#'
#' # load sample data from package segregr
#' marilia_sf <- st_read(system.file("extdata/marilia_2010.gpkg", package = "segregr"))
#'
#' # calculate segregation metrics
#' segregation <- measure_segregation(marilia_sf)
#'
#' # export local isolation results
#' isolation <- isolation_to_sf(segregation)
#'
#' # plot local isolation
#' ggplot(data = isolation) +
#' geom_sf(aes(fill = isolation)) +
#' scale_fill_distiller(palette = "Spectral") +
#' facet_wrap(~group) +
#' theme_void()
isolation_to_sf <- function(segregation_results) {
return(
segregation_results$areal_units %>%
dplyr::select(id) %>%
dplyr::left_join(segregation_results$q, by = c("id"))
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gffRead.R
\name{gffRead}
\alias{gffRead}
\title{read in GTF/GFF file as a data frame}
\usage{
gffRead(gffFile, nrows = -1, verbose = FALSE)
}
\arguments{
\item{gffFile}{name of GTF/GFF on disk}
\item{nrows}{number of rows to read in (default -1, which means read all
rows)}
\item{verbose}{if TRUE, print status info at beginning and end of file read.
Default FALSE.}
}
\value{
data frame representing the GTF/GFF file
}
\description{
read in GTF/GFF file as a data frame
}
\examples{
gtfPath = system.file('extdata', 'annot.gtf.gz', package='ballgown')
annot = gffRead(gtfPath)
}
\seealso{
\code{\link{getAttributeField}} to extract data from "attributes"
column; \url{http://useast.ensembl.org/info/website/upload/gff.html} for
more information on the GTF/GFF file format.
}
\author{
Kasper Hansen
}
|
/man/gffRead.Rd
|
no_license
|
alyssafrazee/ballgown
|
R
| false | true | 886 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gffRead.R
\name{gffRead}
\alias{gffRead}
\title{read in GTF/GFF file as a data frame}
\usage{
gffRead(gffFile, nrows = -1, verbose = FALSE)
}
\arguments{
\item{gffFile}{name of GTF/GFF on disk}
\item{nrows}{number of rows to read in (default -1, which means read all
rows)}
\item{verbose}{if TRUE, print status info at beginning and end of file read.
Default FALSE.}
}
\value{
data frame representing the GTF/GFF file
}
\description{
read in GTF/GFF file as a data frame
}
\examples{
gtfPath = system.file('extdata', 'annot.gtf.gz', package='ballgown')
annot = gffRead(gtfPath)
}
\seealso{
\code{\link{getAttributeField}} to extract data from "attributes"
column; \url{http://useast.ensembl.org/info/website/upload/gff.html} for
more information on the GTF/GFF file format.
}
\author{
Kasper Hansen
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multiplot.R
\name{multiplot}
\alias{multiplot}
\title{multiplot}
\usage{
multiplot(
...,
plotlist = NULL,
ncol,
widths = rep_len(1, ncol),
labels = NULL,
label_size = 5
)
}
\arguments{
\item{...}{plots}
\item{plotlist}{plot list}
\item{ncol}{set the number of column to display the plots}
\item{widths}{the width of each plot}
\item{labels}{set labels for labeling the plots}
\item{label_size}{set font size of the label}
}
\value{
plot
}
\description{
plot multiple ggplot objects in one page
}
\author{
Guangchuang Yu
}
|
/man/multiplot.Rd
|
no_license
|
xiangpin/ggtree
|
R
| false | true | 617 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multiplot.R
\name{multiplot}
\alias{multiplot}
\title{multiplot}
\usage{
multiplot(
...,
plotlist = NULL,
ncol,
widths = rep_len(1, ncol),
labels = NULL,
label_size = 5
)
}
\arguments{
\item{...}{plots}
\item{plotlist}{plot list}
\item{ncol}{set the number of column to display the plots}
\item{widths}{the width of each plot}
\item{labels}{set labels for labeling the plots}
\item{label_size}{set font size of the label}
}
\value{
plot
}
\description{
plot multiple ggplot objects in one page
}
\author{
Guangchuang Yu
}
|
tlm = function(formula,data,Tukey=T,...){
# Check the variable under evaluation
key = gsub(' .+','',as.character(formula)[3])
data[[key]] = factor(data[[key]])
nl = length(levels(data[[key]]))
# fit the model
fnew = update(formula,~.-1)
fit = lm(formula=fnew,data=data)
Y = predict(fit,data,terms=key,type='term')+fit$residuals
data_new = fit$model
data_new[['Y']] = Y
X = data_new[key][,1]
lvl = levels(X)
# Refit and multiple testing
refit = lm(Y~X-1,data_new)
cat('overall p-value',c(anova(lm(Y~X-1,data_new))["Pr(>F)"])[[1]][1],'\n')
tk = TukeyHSD(aov(refit))
# Pool SD if it doesn't work otherwise
tt = try(pairwise.t.test(Y,X,pool.sd=F),silent = T)
if(class(tt)=="try-error") tt = pairwise.t.test(Y,X,pool.sd=T)
# mean and sd
mu = tapply(Y,X,mean)
std = tapply(Y,X,sd)
std[is.na(std)] = mean(std,na.rm = T)
lwr = mu-std
upr = mu+std
COL = rainbow(nl)
rnk = names(sort(mu,T))
# t-test output
ttX = matrix(NA,nl,nl,dimnames = list(rnk,rnk))
for(i in lvl){for(j in lvl){
if(i%in%rownames(tt$p.value) & j%in%colnames(tt$p.value)){
ttX[i,j] = ttX[j,i] = tt$p.value[i,j] }}}
diag(ttX) = 1
#
ttrnk = rep(NA,nl); ttrnk[1] = 1
for(i in 2:nl) ttrnk[i] = which(ttX[,i]>0.05)[1]
for(i in 2:nl) if(ttrnk[i]>ttrnk[i-1]) ttrnk[i]=ttrnk[i-1]+1
ttrnk = LETTERS[ttrnk]
names(ttrnk) = rnk;
# Tukey output
tkX = matrix(NA,nl,nl,dimnames = list(rnk,rnk))
for(i in lvl){for(j in lvl){
if(paste(i,j,sep='-')%in%rownames(tk$X)){
tkX[i,j] = tkX[j,i] = tk$X[which(paste(i,j,sep='-')==rownames(tk$X)),4]
}}}
diag(tkX) = 1
#
tkrnk = rep(NA,nl)
tkrnk[1] = 1
for(i in 2:nl) tkrnk[i] = which(tkX[,i]>0.05)[1]
for(i in 2:nl) if(tkrnk[i]>tkrnk[i-1]) tkrnk[i]=tkrnk[i-1]+1
tkrnk = LETTERS[tkrnk]
names(tkrnk) = rnk
# bar plot
if(Tukey){out = tkrnk}else{out = ttrnk}
barplot(mu,..., ylim = c(min(lwr)*0.8,max(upr)*1.2),space=F,col=COL,xpd = F)
for(i in 1:nl) lines(c(i-.5,i-.5),c(lwr[i],upr[i]),lwd=2)
for(i in 1:nl) text(i-.2,(mu[i]*0.3+upr[i]*0.7),out[lvl][i],cex=1.25)
# Return output
return(out)
}
|
/LM_TukeyTest.R
|
permissive
|
alenxav/miscellaneous
|
R
| false | false | 2,220 |
r
|
tlm = function(formula,data,Tukey=T,...){
# Check the variable under evaluation
key = gsub(' .+','',as.character(formula)[3])
data[[key]] = factor(data[[key]])
nl = length(levels(data[[key]]))
# fit the model
fnew = update(formula,~.-1)
fit = lm(formula=fnew,data=data)
Y = predict(fit,data,terms=key,type='term')+fit$residuals
data_new = fit$model
data_new[['Y']] = Y
X = data_new[key][,1]
lvl = levels(X)
# Refit and multiple testing
refit = lm(Y~X-1,data_new)
cat('overall p-value',c(anova(lm(Y~X-1,data_new))["Pr(>F)"])[[1]][1],'\n')
tk = TukeyHSD(aov(refit))
# Pool SD if it doesn't work otherwise
tt = try(pairwise.t.test(Y,X,pool.sd=F),silent = T)
if(class(tt)=="try-error") tt = pairwise.t.test(Y,X,pool.sd=T)
# mean and sd
mu = tapply(Y,X,mean)
std = tapply(Y,X,sd)
std[is.na(std)] = mean(std,na.rm = T)
lwr = mu-std
upr = mu+std
COL = rainbow(nl)
rnk = names(sort(mu,T))
# t-test output
ttX = matrix(NA,nl,nl,dimnames = list(rnk,rnk))
for(i in lvl){for(j in lvl){
if(i%in%rownames(tt$p.value) & j%in%colnames(tt$p.value)){
ttX[i,j] = ttX[j,i] = tt$p.value[i,j] }}}
diag(ttX) = 1
#
ttrnk = rep(NA,nl); ttrnk[1] = 1
for(i in 2:nl) ttrnk[i] = which(ttX[,i]>0.05)[1]
for(i in 2:nl) if(ttrnk[i]>ttrnk[i-1]) ttrnk[i]=ttrnk[i-1]+1
ttrnk = LETTERS[ttrnk]
names(ttrnk) = rnk;
# Tukey output
tkX = matrix(NA,nl,nl,dimnames = list(rnk,rnk))
for(i in lvl){for(j in lvl){
if(paste(i,j,sep='-')%in%rownames(tk$X)){
tkX[i,j] = tkX[j,i] = tk$X[which(paste(i,j,sep='-')==rownames(tk$X)),4]
}}}
diag(tkX) = 1
#
tkrnk = rep(NA,nl)
tkrnk[1] = 1
for(i in 2:nl) tkrnk[i] = which(tkX[,i]>0.05)[1]
for(i in 2:nl) if(tkrnk[i]>tkrnk[i-1]) tkrnk[i]=tkrnk[i-1]+1
tkrnk = LETTERS[tkrnk]
names(tkrnk) = rnk
# bar plot
if(Tukey){out = tkrnk}else{out = ttrnk}
barplot(mu,..., ylim = c(min(lwr)*0.8,max(upr)*1.2),space=F,col=COL,xpd = F)
for(i in 1:nl) lines(c(i-.5,i-.5),c(lwr[i],upr[i]),lwd=2)
for(i in 1:nl) text(i-.2,(mu[i]*0.3+upr[i]*0.7),out[lvl][i],cex=1.25)
# Return output
return(out)
}
|
################################################################################
##### 2017-01-09
#' Join similar lists along array-nodes.
#'
#' This function simplifies the task of joining list whose nodes are
#' arrays. This can occur when some computations must be performed in
#' smaller chunks due to large intermediate objects, whereas the
#' resulting lists of arrays themselves are rather small, e.g. when
#' some statistics are to be computed during a bootstrap procedure.
#'
#' @param ... A collection of similarly shaped lists that all have
#' one ore more array-nodes that should be joined together. Any
#' arguments that are \code{NULL} will be ignored. Note that the
#' arrays (at the nodes) must have the same number of dimensions
#' with properly named dimension names. The dimension names of
#' these arrays must pairwise have empty intersections. In the
#' cases where the first list have a node that only contains a
#' single number, it will be assumed that this is a feature that
#' is common for all the other lists, and the number from the
#' first list will be used in the result.
#'
#' @param array_nodes A list containing the vector-bookmarks that
#' specifies the array-nodes to be collected.
#'
#' @param .class A character-string, default \code{"array"} that
#' specifies the class to be used for the merged arrays.
#'
#' @return A single list where the array-nodes from the lists detected
#' in \code{...} have been joined together. The other components
#' of the lists are assumed to be identical, and the values from
#' the first list will be used for these.
#'
#' @export
list_array_join <- function(...,
array_nodes,
.class = "array") {
..arg_list <- list(...)
kill(...)
###-------------------------------------------------------------------
## Ignore any components that are given as 'NULL'
..arg_list <- ..arg_list[! vapply(
X = ..arg_list,
FUN = is.null,
FUN.VALUE = logical(1))]
###-------------------------------------------------------------------
## Sanity-check that arguments are present.
if (length(..arg_list) == 0)
error(.argument = "...",
"No arguments detected.")
###-------------------------------------------------------------------
## Identify any 'NULL'-values.
is_null <- vapply(
X = ..arg_list,
FUN = is.null,
FUN.VALUE = logical(1))
## Return 'NULL' if only 'NULL' values.
if (all(is_null))
return(NULL)
## Remove 'NULL'-values from the argument list.
if (any(is_null))
..arg_list <- ..arg_list[! is_null]
kill(is_null)
###-------------------------------------------------------------------
## Sanity-check that the stuff given to the function have the
## desired properties of being lists.
is_list <- vapply(
X = ..arg_list,
FUN = is.list,
FUN.VALUE = logical(1))
if (! all(is_list))
error(.argument = "...",
"Only lists are accepted as arguments!")
kill(is_list)
###-------------------------------------------------------------------
## Collect the pieces if more than one list is detected.
if (length(..arg_list) > 1) {
###-------------------------------------------------------------------
## Additional sanity checks should be added here. In
## particular, it should be tested that the nodes given as
## array_nodes exists and have the desired properties, and it
## should be verify that the other nodes are identical.
###-------------------------------------------------------------------
## Create quotes with the code that should be evaluated for
## all the nodes given in 'array_nodes'. The idea is to
## update the nodes of the first component, and then simply
## return that to the workflow. Nodes in the remaining lists
## will be 'NULL'ed in order to release memory. In the quote
## below, the '.bm' represents the "bookmarks" from
## 'array_nodes'.
my_abind_quote <- as.call(c(
quote(my_abind),
lapply(
X = seq_along(..arg_list),
FUN = function(x)
bquote(..arg_list[[.(x)]][[.bm]]))
))
NULL_finished_quote <- quote({
for (i in 2:length(..arg_list))
..arg_list[[i]][[.bm]] <- NULL
kill(i)})
## Execute the quotes for the desired nodes.
for (.bm in array_nodes) {
## Do nothing if the stuff in the first list does not contain
## an array or if it is a "degenerate" array (dim = 1L).
if (is.array(..arg_list[[1]][[.bm]])) {
if (! identical(x = dim(..arg_list[[1]][[.bm]]),
y = 1L)) {
..arg_list[[1]][[.bm]] <- eval(my_abind_quote)
class(..arg_list[[1]][[.bm]]) <- .class
}
}
eval(NULL_finished_quote)
}
kill(.bm, array_nodes, .class, my_abind_quote,
NULL_finished_quote) }
###-------------------------------------------------------------------
## Return the result with updated attributes for dimensions and
## dimension-names, those attributes will partially be in the
## same shape as '.list', i.e. only the nodes refering to arrays
## will be included in the reslt.
list_array_dims(.list = ..arg_list[[1]])
}
|
/R/list_array_join.R
|
no_license
|
LAJordanger/leanRcoding
|
R
| false | false | 5,652 |
r
|
################################################################################
##### 2017-01-09
#' Join similar lists along array-nodes.
#'
#' This function simplifies the task of joining list whose nodes are
#' arrays. This can occur when some computations must be performed in
#' smaller chunks due to large intermediate objects, whereas the
#' resulting lists of arrays themselves are rather small, e.g. when
#' some statistics are to be computed during a bootstrap procedure.
#'
#' @param ... A collection of similarly shaped lists that all have
#' one ore more array-nodes that should be joined together. Any
#' arguments that are \code{NULL} will be ignored. Note that the
#' arrays (at the nodes) must have the same number of dimensions
#' with properly named dimension names. The dimension names of
#' these arrays must pairwise have empty intersections. In the
#' cases where the first list have a node that only contains a
#' single number, it will be assumed that this is a feature that
#' is common for all the other lists, and the number from the
#' first list will be used in the result.
#'
#' @param array_nodes A list containing the vector-bookmarks that
#' specifies the array-nodes to be collected.
#'
#' @param .class A character-string, default \code{"array"} that
#' specifies the class to be used for the merged arrays.
#'
#' @return A single list where the array-nodes from the lists detected
#' in \code{...} have been joined together. The other components
#' of the lists are assumed to be identical, and the values from
#' the first list will be used for these.
#'
#' @export
list_array_join <- function(...,
array_nodes,
.class = "array") {
..arg_list <- list(...)
kill(...)
###-------------------------------------------------------------------
## Ignore any components that are given as 'NULL'
..arg_list <- ..arg_list[! vapply(
X = ..arg_list,
FUN = is.null,
FUN.VALUE = logical(1))]
###-------------------------------------------------------------------
## Sanity-check that arguments are present.
if (length(..arg_list) == 0)
error(.argument = "...",
"No arguments detected.")
###-------------------------------------------------------------------
## Identify any 'NULL'-values.
is_null <- vapply(
X = ..arg_list,
FUN = is.null,
FUN.VALUE = logical(1))
## Return 'NULL' if only 'NULL' values.
if (all(is_null))
return(NULL)
## Remove 'NULL'-values from the argument list.
if (any(is_null))
..arg_list <- ..arg_list[! is_null]
kill(is_null)
###-------------------------------------------------------------------
## Sanity-check that the stuff given to the function have the
## desired properties of being lists.
is_list <- vapply(
X = ..arg_list,
FUN = is.list,
FUN.VALUE = logical(1))
if (! all(is_list))
error(.argument = "...",
"Only lists are accepted as arguments!")
kill(is_list)
###-------------------------------------------------------------------
## Collect the pieces if more than one list is detected.
if (length(..arg_list) > 1) {
###-------------------------------------------------------------------
## Additional sanity checks should be added here. In
## particular, it should be tested that the nodes given as
## array_nodes exists and have the desired properties, and it
## should be verify that the other nodes are identical.
###-------------------------------------------------------------------
## Create quotes with the code that should be evaluated for
## all the nodes given in 'array_nodes'. The idea is to
## update the nodes of the first component, and then simply
## return that to the workflow. Nodes in the remaining lists
## will be 'NULL'ed in order to release memory. In the quote
## below, the '.bm' represents the "bookmarks" from
## 'array_nodes'.
my_abind_quote <- as.call(c(
quote(my_abind),
lapply(
X = seq_along(..arg_list),
FUN = function(x)
bquote(..arg_list[[.(x)]][[.bm]]))
))
NULL_finished_quote <- quote({
for (i in 2:length(..arg_list))
..arg_list[[i]][[.bm]] <- NULL
kill(i)})
## Execute the quotes for the desired nodes.
for (.bm in array_nodes) {
## Do nothing if the stuff in the first list does not contain
## an array or if it is a "degenerate" array (dim = 1L).
if (is.array(..arg_list[[1]][[.bm]])) {
if (! identical(x = dim(..arg_list[[1]][[.bm]]),
y = 1L)) {
..arg_list[[1]][[.bm]] <- eval(my_abind_quote)
class(..arg_list[[1]][[.bm]]) <- .class
}
}
eval(NULL_finished_quote)
}
kill(.bm, array_nodes, .class, my_abind_quote,
NULL_finished_quote) }
###-------------------------------------------------------------------
## Return the result with updated attributes for dimensions and
## dimension-names, those attributes will partially be in the
## same shape as '.list', i.e. only the nodes refering to arrays
## will be included in the reslt.
list_array_dims(.list = ..arg_list[[1]])
}
|
library(ziphsmm)
### Name: hmmsim.cont
### Title: Simulate a hidden Markov series and its underlying states with
### zero-inflated emission distributions
### Aliases: hmmsim.cont
### ** Examples
prior_init <- c(0.5,0.2,0.3)
emit_init <- c(10,40,70)
zero_init <- c(0.5,0,0)
omega <- matrix(c(-0.3,0.2,0.1,0.1,-0.2,0.1,0.2,0.2,-0.4),3,3,byrow=TRUE)
timeindex <- rep(1,1000)
for(i in 2:1000) timeindex[i] <- timeindex[i-1] + sample(1:3,1)
result <- hmmsim.cont(n=1000,M=3,prior=prior_init, tpm_parm=omega,
emit_parm=emit_init,zeroprop=zero_init,timeindex=timeindex)
|
/data/genthat_extracted_code/ziphsmm/examples/hmmsim.cont.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 582 |
r
|
library(ziphsmm)
### Name: hmmsim.cont
### Title: Simulate a hidden Markov series and its underlying states with
### zero-inflated emission distributions
### Aliases: hmmsim.cont
### ** Examples
prior_init <- c(0.5,0.2,0.3)
emit_init <- c(10,40,70)
zero_init <- c(0.5,0,0)
omega <- matrix(c(-0.3,0.2,0.1,0.1,-0.2,0.1,0.2,0.2,-0.4),3,3,byrow=TRUE)
timeindex <- rep(1,1000)
for(i in 2:1000) timeindex[i] <- timeindex[i-1] + sample(1:3,1)
result <- hmmsim.cont(n=1000,M=3,prior=prior_init, tpm_parm=omega,
emit_parm=emit_init,zeroprop=zero_init,timeindex=timeindex)
|
# Example input file to Longevity R Markdown
replicates <- 8
strains <- c("CB4856", "CX11314", "ED3017", "JT11398", "DL238", "N2", "MY23", "JU258", "EG4725", "LKC34", "JU775", "MY16")
uniqueStrains <- strains
rows <- c("A", "B", "C", "D", "E", "F", "G", "H")
nStrains <- length(strains)
colRemove <- c(5, 6)
wellRemove <- data.frame(col = c(), row = c())
nums <- rep(10, 96)
|
/Scripts/p01_3mgmL.R
|
no_license
|
AndersenLab/Longevity
|
R
| false | false | 377 |
r
|
# Example input file to Longevity R Markdown
replicates <- 8
strains <- c("CB4856", "CX11314", "ED3017", "JT11398", "DL238", "N2", "MY23", "JU258", "EG4725", "LKC34", "JU775", "MY16")
uniqueStrains <- strains
rows <- c("A", "B", "C", "D", "E", "F", "G", "H")
nStrains <- length(strains)
colRemove <- c(5, 6)
wellRemove <- data.frame(col = c(), row = c())
nums <- rep(10, 96)
|
extract_TE <- function(dataset) {
tibble(dataset$studlab,
dataset$TE,
dataset$seTE
) %>%
rename(Author = 1, TE = 2, seTE = 3)
}
bayes_test <- function(dataset, defined_prior, iterations) {
brm(
TE | se(seTE) ~ 1 + (1 | Author),
data = dataset,
prior = defined_prior,
iter = iterations,
control = list(adapt_delta = 0.99)
)
}
post_samples <- function(dataset) {
p_samples <- posterior_samples(dataset, c("^b", "^sd")) %>%
rename("TE" = 1,
"tau" = 2)
print(ggplot(aes(x = TE), data = p_samples)+
geom_density(fill = "lightblue",
color = "lightblue",
alpha = 0.7) +
geom_point(y = 0, x = mean(p_samples$TE)) +
labs(x = expression(italic(TE)),
y = element_blank()) +
theme_minimal())
return(p_samples)
}
study_draw <- function(dataset) {
spread_draws(dataset, r_Author[Author,], b_Intercept) %>%
mutate(b_Intercept = r_Author + b_Intercept) %>%
left_join(., study_review_version)
}
pooled_effect_draw <- function(dataset) {
spread_draws(dataset, b_Intercept) %>%
mutate(Author = "Pooled Effect",
review_version = factor("combined_pooled"))
}
forest_plot <- function(model, data_study, data_pooled, cut, title, type, filename) {
forest_data <- bind_rows(data_study, data_pooled) %>%
ungroup() %>%
mutate(Author = str_replace_all(Author, "[.]", " ")) %>%
mutate(Author = reorder(Author, b_Intercept),
b_Intercept = exp(b_Intercept))
summary_forest <- group_by(forest_data, Author, review_version) %>%
median_qi(b_Intercept)
graph <- ggplot(aes(b_Intercept, relevel(Author, "Pooled Effect", after = Inf), fill = review_version),
data = forest_data) +
geom_vline(xintercept = exp(fixef(model)[1, 1]),
color = "grey",
size = 1) +
geom_vline(xintercept = exp(fixef(model)[1, 3:4]),
color = "grey",
linetype = 2) +
geom_vline(xintercept = 1,
color = "black",
size = 1) +
geom_density_ridges(
aes(fill = review_version),
rel_min_height = 0.01,
col = NA,
scale = 1,
alpha = 0.8
) +
geom_pointinterval(data = summary_forest, size = 1) +
geom_text(
data = mutate_if(summary_forest, is.numeric, round, 2),
aes(
label = glue("{b_Intercept} [{.lower}, {.upper}]"),
x = cut
),
hjust = "inward",
size = 5
) +
facet_grid(review_version~ ., scales = "free_y", space = "free") +
labs(x = "Relative Risk [95% Credible Interval]",
y = element_blank(),
title = element_blank(),
caption = element_blank()
) +
scale_fill_discrete(name = "Review version", labels = c("Previous version (v5)", "Current version (v6)", "Pooled effect")) +
xlim(0, cut) +
theme_minimal() +
theme(panel.spacing = unit(0.1, "lines"),
strip.text = element_blank(),
axis.text.y = element_text(size = 12))
outputs <- list("plot" = graph, "data" = forest_data, "summary" = summary_forest)
ggsave(filename = filename, plot = graph, device = "png", path = here("reports", "figure"))
return(outputs)
}
save_plots <- function(plot_name) {
png(here::here('reports', 'figure', paste(plot_name, ".png", sep = "")), width=1024, height=546, res=120)
}
|
/scripts/bayes_scripts.R
|
no_license
|
DidDrog11/smoking_covid
|
R
| false | false | 3,440 |
r
|
extract_TE <- function(dataset) {
tibble(dataset$studlab,
dataset$TE,
dataset$seTE
) %>%
rename(Author = 1, TE = 2, seTE = 3)
}
bayes_test <- function(dataset, defined_prior, iterations) {
brm(
TE | se(seTE) ~ 1 + (1 | Author),
data = dataset,
prior = defined_prior,
iter = iterations,
control = list(adapt_delta = 0.99)
)
}
post_samples <- function(dataset) {
p_samples <- posterior_samples(dataset, c("^b", "^sd")) %>%
rename("TE" = 1,
"tau" = 2)
print(ggplot(aes(x = TE), data = p_samples)+
geom_density(fill = "lightblue",
color = "lightblue",
alpha = 0.7) +
geom_point(y = 0, x = mean(p_samples$TE)) +
labs(x = expression(italic(TE)),
y = element_blank()) +
theme_minimal())
return(p_samples)
}
study_draw <- function(dataset) {
spread_draws(dataset, r_Author[Author,], b_Intercept) %>%
mutate(b_Intercept = r_Author + b_Intercept) %>%
left_join(., study_review_version)
}
pooled_effect_draw <- function(dataset) {
spread_draws(dataset, b_Intercept) %>%
mutate(Author = "Pooled Effect",
review_version = factor("combined_pooled"))
}
forest_plot <- function(model, data_study, data_pooled, cut, title, type, filename) {
forest_data <- bind_rows(data_study, data_pooled) %>%
ungroup() %>%
mutate(Author = str_replace_all(Author, "[.]", " ")) %>%
mutate(Author = reorder(Author, b_Intercept),
b_Intercept = exp(b_Intercept))
summary_forest <- group_by(forest_data, Author, review_version) %>%
median_qi(b_Intercept)
graph <- ggplot(aes(b_Intercept, relevel(Author, "Pooled Effect", after = Inf), fill = review_version),
data = forest_data) +
geom_vline(xintercept = exp(fixef(model)[1, 1]),
color = "grey",
size = 1) +
geom_vline(xintercept = exp(fixef(model)[1, 3:4]),
color = "grey",
linetype = 2) +
geom_vline(xintercept = 1,
color = "black",
size = 1) +
geom_density_ridges(
aes(fill = review_version),
rel_min_height = 0.01,
col = NA,
scale = 1,
alpha = 0.8
) +
geom_pointinterval(data = summary_forest, size = 1) +
geom_text(
data = mutate_if(summary_forest, is.numeric, round, 2),
aes(
label = glue("{b_Intercept} [{.lower}, {.upper}]"),
x = cut
),
hjust = "inward",
size = 5
) +
facet_grid(review_version~ ., scales = "free_y", space = "free") +
labs(x = "Relative Risk [95% Credible Interval]",
y = element_blank(),
title = element_blank(),
caption = element_blank()
) +
scale_fill_discrete(name = "Review version", labels = c("Previous version (v5)", "Current version (v6)", "Pooled effect")) +
xlim(0, cut) +
theme_minimal() +
theme(panel.spacing = unit(0.1, "lines"),
strip.text = element_blank(),
axis.text.y = element_text(size = 12))
outputs <- list("plot" = graph, "data" = forest_data, "summary" = summary_forest)
ggsave(filename = filename, plot = graph, device = "png", path = here("reports", "figure"))
return(outputs)
}
save_plots <- function(plot_name) {
png(here::here('reports', 'figure', paste(plot_name, ".png", sep = "")), width=1024, height=546, res=120)
}
|
data <- mtcars[, c ("mpg" , "disp", "hp")]
head (data)
model <- lm(mpg~disp + hp, data = data)
summary (model)
predict (model, newdata = data.frame (disp =140, hp = 80))
predict (model, newdata = data.frame (disp =160, hp = 70))
plot (model)
|
/IoT_Domain_Analyst_ECE_3502/Lab_3/Vehicles_Multiple_Linear_Regression.r
|
permissive
|
eshan5/VIT-Labs
|
R
| false | false | 245 |
r
|
data <- mtcars[, c ("mpg" , "disp", "hp")]
head (data)
model <- lm(mpg~disp + hp, data = data)
summary (model)
predict (model, newdata = data.frame (disp =140, hp = 80))
predict (model, newdata = data.frame (disp =160, hp = 70))
plot (model)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_data.R
\name{read_fastq}
\alias{read_fastq}
\title{Read a fastq file}
\usage{
read_fastq(datafile = NULL)
}
\arguments{
\item{datafile}{Path to a fastq file.}
}
\value{
Return a list contains reads, quality socres and dim of the data.
}
\description{
Read a fastq file and encode Phred quality score from 0 to 93 using ASCII 33 to 126.
}
\examples{
# Read a fastq file
datFile <- system.file("extdata", "sim.fastq", package = "CClust")
dat <- read_fastq(datafile = datFile)
}
|
/man/read_fastq.Rd
|
no_license
|
kdorman/CClust
|
R
| false | true | 559 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_data.R
\name{read_fastq}
\alias{read_fastq}
\title{Read a fastq file}
\usage{
read_fastq(datafile = NULL)
}
\arguments{
\item{datafile}{Path to a fastq file.}
}
\value{
Return a list contains reads, quality socres and dim of the data.
}
\description{
Read a fastq file and encode Phred quality score from 0 to 93 using ASCII 33 to 126.
}
\examples{
# Read a fastq file
datFile <- system.file("extdata", "sim.fastq", package = "CClust")
dat <- read_fastq(datafile = datFile)
}
|
fast_kendall <-function (x, y = NULL)
{
cor = FALSE
if (is.null(y)) {
n <- nrow(x)
if (!is.matrix(x) && !is.data.frame(x))
stop("x must be either numeric vector, matrix or data.frame.")
{
p <- ncol(x)
dn <- colnames(x)
ret <- diag(p)
dimnames(ret) <- list(dn, dn)
for (i in 1:p) {
if (i == p)
return(ret)
ord <- order(x[, i])
cur.x <- x[ord, i]
for (j in (i + 1):p) ret[i, j] <- ret[j, i] <- pcaPP:::.cor.fk.2d(cur.x,
x[ord, j], cor)/(n*(n-1))
}
}
}
else {
if (length(x) != length(y))
stop("x and y must have same length.")
n <- length(x)
ord <- order(x)
return(pcaPP:::.cor.fk.2d(x[ord], y[ord], cor)/(n*(n-1)))
}
}
|
/R/fast_kendall.R
|
no_license
|
fanne-stat/DGCPCA
|
R
| false | false | 825 |
r
|
fast_kendall <-function (x, y = NULL)
{
cor = FALSE
if (is.null(y)) {
n <- nrow(x)
if (!is.matrix(x) && !is.data.frame(x))
stop("x must be either numeric vector, matrix or data.frame.")
{
p <- ncol(x)
dn <- colnames(x)
ret <- diag(p)
dimnames(ret) <- list(dn, dn)
for (i in 1:p) {
if (i == p)
return(ret)
ord <- order(x[, i])
cur.x <- x[ord, i]
for (j in (i + 1):p) ret[i, j] <- ret[j, i] <- pcaPP:::.cor.fk.2d(cur.x,
x[ord, j], cor)/(n*(n-1))
}
}
}
else {
if (length(x) != length(y))
stop("x and y must have same length.")
n <- length(x)
ord <- order(x)
return(pcaPP:::.cor.fk.2d(x[ord], y[ord], cor)/(n*(n-1)))
}
}
|
# Libraries
library(tidyverse)
library(hrbrthemes)
library(viridis)
library(patchwork)
# create 3 data frame:
data1 <- data.frame( name=letters[1:5], value=c(17,18,20,22,24) )
data2 <- data.frame( name=letters[1:5], value=c(20,18,21,20,20) )
data3 <- data.frame( name=letters[1:5], value=c(24,23,21,19,18) )
# Plot
plot_pie <- function(data, vec){
ggplot(data, aes(x="name", y=value, fill=name)) +
geom_bar(width = 1, stat = "identity") +
coord_polar("y", start=0, direction = -1) +
scale_fill_viridis(discrete = TRUE, direction=-1) +
geom_text(aes(y = vec, label = rev(name), size=4, color=c( "white", rep("black", 4)))) +
scale_color_manual(values=c("black", "white")) +
theme_void() +
theme(legend.position = "none") +
xlab("") +
ylab("")
}
plot_pie(data1, c(10,35,55,75,93))
a <- plot_pie(data1, c(10,35,55,75,93))
b <- plot_pie(data2, c(10,35,53,75,93))
c <- plot_pie(data3, c(10,29,50,75,93))
p1 <- a + b + c
plot_bar <- function(data){
ggplot(data, aes(x=name, y=value, fill=name)) +
geom_bar( stat = "identity") +
scale_fill_viridis(discrete = TRUE, direction=-1) +
scale_color_manual(values=c("black", "white")) +
theme_ipsum() +
theme(
legend.position="none",
plot.title = element_text(size=14),
panel.grid = element_blank(),
) +
ylim(0,25) +
xlab("") +
ylab("")
}
# Make 3 barplots
a <- plot_bar(data1)
b <- plot_bar(data2)
c <- plot_bar(data3)
# Put them together with patchwork
p2 <- a + b + c
p1 / p2
|
/docs/slides/exploratory-viz/pie.R
|
no_license
|
walkerke/geog30323
|
R
| false | false | 1,527 |
r
|
# Libraries
library(tidyverse)
library(hrbrthemes)
library(viridis)
library(patchwork)
# create 3 data frame:
data1 <- data.frame( name=letters[1:5], value=c(17,18,20,22,24) )
data2 <- data.frame( name=letters[1:5], value=c(20,18,21,20,20) )
data3 <- data.frame( name=letters[1:5], value=c(24,23,21,19,18) )
# Plot
plot_pie <- function(data, vec){
ggplot(data, aes(x="name", y=value, fill=name)) +
geom_bar(width = 1, stat = "identity") +
coord_polar("y", start=0, direction = -1) +
scale_fill_viridis(discrete = TRUE, direction=-1) +
geom_text(aes(y = vec, label = rev(name), size=4, color=c( "white", rep("black", 4)))) +
scale_color_manual(values=c("black", "white")) +
theme_void() +
theme(legend.position = "none") +
xlab("") +
ylab("")
}
plot_pie(data1, c(10,35,55,75,93))
a <- plot_pie(data1, c(10,35,55,75,93))
b <- plot_pie(data2, c(10,35,53,75,93))
c <- plot_pie(data3, c(10,29,50,75,93))
p1 <- a + b + c
plot_bar <- function(data){
ggplot(data, aes(x=name, y=value, fill=name)) +
geom_bar( stat = "identity") +
scale_fill_viridis(discrete = TRUE, direction=-1) +
scale_color_manual(values=c("black", "white")) +
theme_ipsum() +
theme(
legend.position="none",
plot.title = element_text(size=14),
panel.grid = element_blank(),
) +
ylim(0,25) +
xlab("") +
ylab("")
}
# Make 3 barplots
a <- plot_bar(data1)
b <- plot_bar(data2)
c <- plot_bar(data3)
# Put them together with patchwork
p2 <- a + b + c
p1 / p2
|
#Load SQLDF library
library('sqldf')
#Download and unzip data
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile = "power_consumption.zip")
unzip("./power_consumption.zip")
#Query data for specific time-frame and do minor data formatting
file_query <- "SELECT Date,
strftime('%H:%M:%S', Time) Time,
Global_active_power,
Global_reactive_power,
Voltage,
Global_intensity,
Sub_metering_1,
Sub_metering_2,
Sub_metering_3
FROM file WHERE Date = '1/2/2007' or Date = '2/2/2007'"
file_name = "household_power_consumption.txt"
power_cons <- read.csv.sql(file_name, file_query, sep=";")
power_cons$Date <- as.Date(power_cons$Date, '%d/%m/%Y')
#Replace ? with NA in the dataset
power_cons[power_cons$Global_active_power == '?'] <- NA
power_cons[power_cons$Global_reactive_power == '?'] <- NA
power_cons[power_cons$Voltage == '?'] <- NA
power_cons[power_cons$Global_intensity == '?'] <- NA
power_cons[power_cons$Sub_metering_1 == '?'] <- NA
power_cons[power_cons$Sub_metering_2 == '?'] <- NA
power_cons[power_cons$Sub_metering_3 == '?'] <- NA
#Generate datetime column to be used in plots
power_cons$datetime <- as.POSIXlt(strftime(paste(power_cons$Date,power_cons$Time), '%Y-%m-%d %H:%M:%S'), '%Y-%m-%d %H:%M:%S', tz=Sys.timezone())
#Open PNG Device 480x480 pixels
png("./figure/plot1.png", width = 480, height = 480, units = "px")
#Plot histogram
hist(power_cons$Global_active_power, freq = TRUE, col = "red", xlab = "Global Active Power (kilowatts)", main = "Global Active Power")
#Close Device
dev.off()
|
/plot1.R
|
no_license
|
Michalsky/ExData_Plotting1
|
R
| false | false | 1,755 |
r
|
#Load SQLDF library
library('sqldf')
#Download and unzip data
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile = "power_consumption.zip")
unzip("./power_consumption.zip")
#Query data for specific time-frame and do minor data formatting
file_query <- "SELECT Date,
strftime('%H:%M:%S', Time) Time,
Global_active_power,
Global_reactive_power,
Voltage,
Global_intensity,
Sub_metering_1,
Sub_metering_2,
Sub_metering_3
FROM file WHERE Date = '1/2/2007' or Date = '2/2/2007'"
file_name = "household_power_consumption.txt"
power_cons <- read.csv.sql(file_name, file_query, sep=";")
power_cons$Date <- as.Date(power_cons$Date, '%d/%m/%Y')
#Replace ? with NA in the dataset
power_cons[power_cons$Global_active_power == '?'] <- NA
power_cons[power_cons$Global_reactive_power == '?'] <- NA
power_cons[power_cons$Voltage == '?'] <- NA
power_cons[power_cons$Global_intensity == '?'] <- NA
power_cons[power_cons$Sub_metering_1 == '?'] <- NA
power_cons[power_cons$Sub_metering_2 == '?'] <- NA
power_cons[power_cons$Sub_metering_3 == '?'] <- NA
#Generate datetime column to be used in plots
power_cons$datetime <- as.POSIXlt(strftime(paste(power_cons$Date,power_cons$Time), '%Y-%m-%d %H:%M:%S'), '%Y-%m-%d %H:%M:%S', tz=Sys.timezone())
#Open PNG Device 480x480 pixels
png("./figure/plot1.png", width = 480, height = 480, units = "px")
#Plot histogram
hist(power_cons$Global_active_power, freq = TRUE, col = "red", xlab = "Global Active Power (kilowatts)", main = "Global Active Power")
#Close Device
dev.off()
|
#' Build synthetic datasets as negative controls to decide the optimum
#' cluster number
#'
#' Process:
#' 1. Generate 50 synthetic datasets:
#' - Each contains 50 random samples from 44,890 samples (with replacement).
#' - Scramble genes in each dataset by random selection without replacement
#' and add random value between -0.1 and 0.1
#' 2. Row-normalize synthetic datasets
#' 3. PCA on synthetic datasets --> `bootstrap_PCs_rowNorm_Neg.rds`
#' 4. Combine top 20 PCs from training datasets and PC1s from synthetic datasets
#' --> `all_{#neg}.rds`
#' 5. Calculate distance matrix and hcut for each combined dataset
#' --> `res_dist_{#neg}.rds` and `res_hclust_{#neg}.rds`
#' 6. Evaluate how the negative controls were separated
#' --> `evals_{#neg}.rds` and `eval_summary_{#neg}.rds`
#'
#' Outputs:
#' Resulting files from this script are saved under
#' `GenomicSuperSignatureLibrary/refinebioRseq/Neg_Controls` directory. These
#' files will be available upon request.
suppressPackageStartupMessages({
library(dplyr)
})
##### Load 536 refine.bio datasets #############################################
wd <- "~/data2/GenomicSuperSignatureLibrary/refinebioRseq/PCAmodel_536"
allZ <- readRDS(file.path(wd, "allZ.rds")) # genes x 44,890 samples
genes <- readRDS("~/data2/model_building/data/topGenes_13934.rds") # top 13,934 genes
stat <- readRDS(file.path(wd, "refinebioRseq_536study_SdMean.rds"))
s <- stat$sd
m <- stat$mean
numOfDatasets <- c(10, 20, 30, 40, 50)
n <- numOfTopPCs <- 20
##### Synthetic 'experiment' from random sample selecting ######################
fname <- paste0("Neg_", 1:50)
synData <- vector("list", length(fname))
names(synData) <- fname
set.seed(1234)
for (i in seq_along(fname)) {
dataName <- fname[i]
scrambled <- allZ[, sample(ncol(allZ), 50, replace = TRUE)] # randomly select 50 samples
for (j in 1:50) {
# scramble genes for each sample in the synthetic dataset
scrambled[,j] <- scrambled[sample(nrow(allZ), replace = FALSE), j] +
runif(nrow(allZ), min = -0.1, max = 0.1)
rownames(scrambled) <- genes
}
synData[[dataName]] <- scrambled
}
##### PCA ######################################################################
synData_PCA <- vector("list", length(synData))
names(synData_PCA) <- names(synData)
for (study in names(synData)) {
x <- synData[[study]]
# Row normalization
x <- sweep(x, 1, m)
x <- sweep(x, 1, s, "/")
# PCA
pca_res <- prcomp(t(x))
synData_PCA[[study]]$rotation <- pca_res$rotation[,1:n]
colnames(synData_PCA[[study]]$rotation) <- paste0(study, ".PC", c(1:n))
eigs <- pca_res$sdev^2
pca_summary <- rbind(SD <- sqrt(eigs),
Variance <- eigs/sum(eigs),
Cumulative <- cumsum(eigs)/sum(eigs))
synData_PCA[[study]]$variance <- pca_summary[,1:n]
colnames(synData_PCA[[study]]$variance) <- paste0(study, ".PC", c(1:n))
rm(x)
}
dat_dir <- "~/data2/GenomicSuperSignaturePaper/inst/extdata/Neg_Controls"
saveRDS(synData_PCA, file.path(dat_dir, "bootstrap_PCs_rowNorm_Neg.rds"))
##### Distance matrix ##########################################################
dat_dir <- "~/data2/GenomicSuperSignaturePaper/inst/extdata/Neg_Controls"
neg <- readRDS(file.path(dat_dir, "bootstrap_PCs_rowNorm_Neg.rds"))
data <- lapply(neg, function(x) x$rotation) %>% Reduce(cbind,.) %>% t
## Select only PC1s from different number of negative controls
for (numOfDataset in numOfDatasets) {
## combine all samples with PC1s from different number of controls
ind <- c()
for (i in 1:numOfDataset) {
new_ind <- c(1) + numOfTopPCs*(i - 1) # select only PC1s
ind <- c(ind, new_ind)
}
neg_dat <- data[ind,]
all <- cbind(allZ, t(neg_dat)) %>% t # combine synData and training data
saveRDS(all, file.path(dat_dir, paste0("all_", numOfDataset, ".rds")))
## calculate distance matrix
all <- readRDS(file.path(dat_dir, paste0("all_", numOfDataset, ".rds")))
res.dist <- factoextra::get_dist(all, method = "spearman")
res.hclust <- stats::hclust(res.dist, method = "ward.D")
saveRDS(res.dist, file.path(dat_dir, paste0("res_dist_",numOfDataset,".rds")))
saveRDS(res.hclust, file.path(dat_dir, paste0("res_hclust_",numOfDataset,".rds")))
}
##### Find the minimum number of clusters ######################################
source("evaluateCluster.R")
dat_dir <- "~/data2/GenomicSuperSignaturePaper/inst/extdata/Neg_Controls"
evals <- vector(mode = "list", length = 9)
for (numOfDataset in numOfDatasets) {
## Load the target synthetic dataset
all <- readRDS(file.path(dat_dir, paste0("all_", numOfDataset,".rds")))
res.dist <- readRDS(file.path(dat_dir, paste0("res_dist_",numOfDataset,".rds")))
k_range <- c(round(nrow(all)/7,0), round(nrow(all)/6,0), round(nrow(all)/5,0),
round(nrow(all)/4,0), round(nrow(all)/3,0), round(nrow(all)/2.75,0),
round(nrow(all)/2.5,0), round(nrow(all)/2.25,0), round(nrow(all)/2,0))
## Evaluate clustering result
## For detail, check the function evaluateCluster
for (i in seq_along(k_range)) {
res.hcut <- factoextra::hcut(res.dist, k = k_range[i], hc_funct = "hclust",
hc_method = "ward.D", hc_metric = "spearman")
eval <- evaluateCluster(res.hcut, controlType = "Neg", hmTable = FALSE)
evals[[i]] <- eval
}
## `eval_summary` is a data frame with three columns:
## numSeparated, sizeOfMaxCluster, and numOfCluster.
eval_summary <- sapply(evals, function(x) {
data.frame(numSeparated = sum(x == 1),
sizeOfMaxCluster = max(x))
}) %>% t %>% as.data.frame
eval_summary$numOfCluster <- k_range
## Save
saveRDS(evals, file.path(dat_dir, paste0("evals_",numOfDataset,".rds")))
saveRDS(eval_summary,
file.path(dat_dir, paste0("eval_summary_",numOfDataset,".rds")))
}
|
/Methods/select_numOfClusters/neg_controls.R
|
no_license
|
shbrief/GenomicSuperSignaturePaper
|
R
| false | false | 5,983 |
r
|
#' Build synthetic datasets as negative controls to decide the optimum
#' cluster number
#'
#' Process:
#' 1. Generate 50 synthetic datasets:
#' - Each contains 50 random samples from 44,890 samples (with replacement).
#' - Scramble genes in each dataset by random selection without replacement
#' and add random value between -0.1 and 0.1
#' 2. Row-normalize synthetic datasets
#' 3. PCA on synthetic datasets --> `bootstrap_PCs_rowNorm_Neg.rds`
#' 4. Combine top 20 PCs from training datasets and PC1s from synthetic datasets
#' --> `all_{#neg}.rds`
#' 5. Calculate distance matrix and hcut for each combined dataset
#' --> `res_dist_{#neg}.rds` and `res_hclust_{#neg}.rds`
#' 6. Evaluate how the negative controls were separated
#' --> `evals_{#neg}.rds` and `eval_summary_{#neg}.rds`
#'
#' Outputs:
#' Resulting files from this script are saved under
#' `GenomicSuperSignatureLibrary/refinebioRseq/Neg_Controls` directory. These
#' files will be available upon request.
suppressPackageStartupMessages({
library(dplyr)
})
##### Load 536 refine.bio datasets #############################################
wd <- "~/data2/GenomicSuperSignatureLibrary/refinebioRseq/PCAmodel_536"
allZ <- readRDS(file.path(wd, "allZ.rds")) # genes x 44,890 samples
genes <- readRDS("~/data2/model_building/data/topGenes_13934.rds") # top 13,934 genes
stat <- readRDS(file.path(wd, "refinebioRseq_536study_SdMean.rds"))
s <- stat$sd
m <- stat$mean
numOfDatasets <- c(10, 20, 30, 40, 50)
n <- numOfTopPCs <- 20
##### Synthetic 'experiment' from random sample selecting ######################
fname <- paste0("Neg_", 1:50)
synData <- vector("list", length(fname))
names(synData) <- fname
set.seed(1234)
for (i in seq_along(fname)) {
dataName <- fname[i]
scrambled <- allZ[, sample(ncol(allZ), 50, replace = TRUE)] # randomly select 50 samples
for (j in 1:50) {
# scramble genes for each sample in the synthetic dataset
scrambled[,j] <- scrambled[sample(nrow(allZ), replace = FALSE), j] +
runif(nrow(allZ), min = -0.1, max = 0.1)
rownames(scrambled) <- genes
}
synData[[dataName]] <- scrambled
}
##### PCA ######################################################################
synData_PCA <- vector("list", length(synData))
names(synData_PCA) <- names(synData)
for (study in names(synData)) {
x <- synData[[study]]
# Row normalization
x <- sweep(x, 1, m)
x <- sweep(x, 1, s, "/")
# PCA
pca_res <- prcomp(t(x))
synData_PCA[[study]]$rotation <- pca_res$rotation[,1:n]
colnames(synData_PCA[[study]]$rotation) <- paste0(study, ".PC", c(1:n))
eigs <- pca_res$sdev^2
pca_summary <- rbind(SD <- sqrt(eigs),
Variance <- eigs/sum(eigs),
Cumulative <- cumsum(eigs)/sum(eigs))
synData_PCA[[study]]$variance <- pca_summary[,1:n]
colnames(synData_PCA[[study]]$variance) <- paste0(study, ".PC", c(1:n))
rm(x)
}
dat_dir <- "~/data2/GenomicSuperSignaturePaper/inst/extdata/Neg_Controls"
saveRDS(synData_PCA, file.path(dat_dir, "bootstrap_PCs_rowNorm_Neg.rds"))
##### Distance matrix ##########################################################
dat_dir <- "~/data2/GenomicSuperSignaturePaper/inst/extdata/Neg_Controls"
neg <- readRDS(file.path(dat_dir, "bootstrap_PCs_rowNorm_Neg.rds"))
data <- lapply(neg, function(x) x$rotation) %>% Reduce(cbind,.) %>% t
## Select only PC1s from different number of negative controls
for (numOfDataset in numOfDatasets) {
## combine all samples with PC1s from different number of controls
ind <- c()
for (i in 1:numOfDataset) {
new_ind <- c(1) + numOfTopPCs*(i - 1) # select only PC1s
ind <- c(ind, new_ind)
}
neg_dat <- data[ind,]
all <- cbind(allZ, t(neg_dat)) %>% t # combine synData and training data
saveRDS(all, file.path(dat_dir, paste0("all_", numOfDataset, ".rds")))
## calculate distance matrix
all <- readRDS(file.path(dat_dir, paste0("all_", numOfDataset, ".rds")))
res.dist <- factoextra::get_dist(all, method = "spearman")
res.hclust <- stats::hclust(res.dist, method = "ward.D")
saveRDS(res.dist, file.path(dat_dir, paste0("res_dist_",numOfDataset,".rds")))
saveRDS(res.hclust, file.path(dat_dir, paste0("res_hclust_",numOfDataset,".rds")))
}
##### Find the minimum number of clusters ######################################
source("evaluateCluster.R")
dat_dir <- "~/data2/GenomicSuperSignaturePaper/inst/extdata/Neg_Controls"
evals <- vector(mode = "list", length = 9)
for (numOfDataset in numOfDatasets) {
## Load the target synthetic dataset
all <- readRDS(file.path(dat_dir, paste0("all_", numOfDataset,".rds")))
res.dist <- readRDS(file.path(dat_dir, paste0("res_dist_",numOfDataset,".rds")))
k_range <- c(round(nrow(all)/7,0), round(nrow(all)/6,0), round(nrow(all)/5,0),
round(nrow(all)/4,0), round(nrow(all)/3,0), round(nrow(all)/2.75,0),
round(nrow(all)/2.5,0), round(nrow(all)/2.25,0), round(nrow(all)/2,0))
## Evaluate clustering result
## For detail, check the function evaluateCluster
for (i in seq_along(k_range)) {
res.hcut <- factoextra::hcut(res.dist, k = k_range[i], hc_funct = "hclust",
hc_method = "ward.D", hc_metric = "spearman")
eval <- evaluateCluster(res.hcut, controlType = "Neg", hmTable = FALSE)
evals[[i]] <- eval
}
## `eval_summary` is a data frame with three columns:
## numSeparated, sizeOfMaxCluster, and numOfCluster.
eval_summary <- sapply(evals, function(x) {
data.frame(numSeparated = sum(x == 1),
sizeOfMaxCluster = max(x))
}) %>% t %>% as.data.frame
eval_summary$numOfCluster <- k_range
## Save
saveRDS(evals, file.path(dat_dir, paste0("evals_",numOfDataset,".rds")))
saveRDS(eval_summary,
file.path(dat_dir, paste0("eval_summary_",numOfDataset,".rds")))
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 774
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 774
c
c Input Parameter (command line, file):
c input filename QBFLIB/QBF_1.0/NuSMV_diam_qdimacs/Counter4/counter4_13.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 137
c no.of clauses 774
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 774
c
c QBFLIB/QBF_1.0/NuSMV_diam_qdimacs/Counter4/counter4_13.qdimacs 137 774 E1 [] 0 52 85 774 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/QBF_1.0/NuSMV_diam_qdimacs/Counter4/counter4_13/counter4_13.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false | false | 645 |
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 774
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 774
c
c Input Parameter (command line, file):
c input filename QBFLIB/QBF_1.0/NuSMV_diam_qdimacs/Counter4/counter4_13.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 137
c no.of clauses 774
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 774
c
c QBFLIB/QBF_1.0/NuSMV_diam_qdimacs/Counter4/counter4_13.qdimacs 137 774 E1 [] 0 52 85 774 NONE
|
/二元正态分布密度函数图.R
|
no_license
|
qypx/quick-lookup
|
R
| false | false | 370 |
r
| ||
\name{fec_stan}
\alias{fec_stan}
\title{Modelling of faecal egg count data (one-sample case)}
\usage{
fec_stan(fec, rawCounts = FALSE, CF = 50, zeroInflation = TRUE,
muPrior, kappaPrior, phiPrior, nsamples = 2000, nburnin = 1000,
thinning = 1, nchain = 2, ncore = 1, adaptDelta = 0.95,
saveAll = FALSE, verbose = FALSE)
}
\arguments{
\item{fec}{numeric vector. Faecal egg counts.}
\item{rawCounts}{logical. If TRUE, \code{preFEC} and \code{postFEC} correspond to raw counts
(as counted on equipment). Otherwise they correspond to calculated epgs (raw counts times correction factor).
Defaults to \code{FALSE}.}
\item{CF}{a positive integer or a vector of positive integers. Correction factor(s).}
\item{zeroInflation}{logical. If true, uses the model with zero-inflation. Otherwise uses the model without zero-inflation}
\item{muPrior}{named list. Prior for the group mean epg parameter \eqn{\mu}. The default prior is \code{list(priorDist = "gamma",hyperpars=c(1,0.001))}, i.e. a gamma distribution with shape 1 and rate 0.001, its 90\% probability mass lies between 51 and 2996.}
\item{kappaPrior}{named list. Prior for the group dispersion parameter \eqn{\kappa}. The default prior is \code{list(priorDist = "gamma",hyperpars=c(1,0.7))}, i.e. a gamma distribution with shape 1 and rate 0.7, its 90\% probability mass lies between 0.1 and 4.3 with a median of 1.}
\item{phiPrior}{named list. Prior for the zero-inflation parameter \eqn{\phi}. The default prior is \code{list(priorDist = "beta",hyperpars=c(1,1))}, i.e. a uniform prior between 0 and 1.}
\item{nsamples}{a positive integer. Number of samples for each chain (including burn-in samples).}
\item{nburnin}{a positive integer. Number of burn-in samples.}
\item{thinning}{a positive integer. Thinning parameter, i.e. the period for saving samples.}
\item{nchain}{a positive integer. Number of chains.}
\item{ncore}{a positive integer. Number of cores to use when executing the chains in parallel.}
\item{adaptDelta}{numeric. The target acceptance rate, a numeric value between 0 and 1.}
\item{saveAll}{logical. If TRUE, posterior samples for all parameters are saved in the \code{stanfit} object. If FALSE, only samples for \eqn{\mu}, \eqn{\kappa} and \eqn{\phi} are saved. Default to FALSE.}
\item{verbose}{logical. If true, prints progress and debugging information.}
}
\value{
Prints out summary of \code{meanEPG} as the posterior mean epg. The posterior summary contains the mean, standard deviation (sd), 2.5\%, 50\% and 97.5\% percentiles, the 95\% highest posterior density interval (HPDLow95 and HPDHigh95) and the posterior mode. NOTE: we recommend to use the 95\% HPD interval and the mode for further statistical analysis.
The returned value is a list that consists of:
\item{stan.samples}{an object of S4 class \code{\link[rstan]{stanfit}} representing the fitted results}
\item{posterior.summary}{a data.frame that is the same as the printed posterior summary}
}
\description{
Models the mean of faecal egg counts with Bayesian hierarchical models. See Details for a list of model choices.
}
\details{
\subsection{List of built-in models}{
\itemize{
\item without zero-inflation: set \code{zeroInflation = FALSE}
\item with zero-inflation: set \code{zeroInflation = TRUE}
}
Note that this function only models the mean of egg counts, see \code{\link{fecr_stan}()} for modelling the reduction.
}
\subsection{Other information}{
The first time each model with non-default priors is applied, it can take up to 20 seconds to compile the model. Currently the function only support prior distributions with two parameters. For a complete list of supported priors and their parameterization, please consult the list of distributions in \href{http://mc-stan.org/documentation/}{Stan}.
The default number of samples per chain is 2000, with 1000 burn-in samples. Normally this is sufficient in Stan. If the chains do not converge, one should tune the MCMC parameters until convergence is reached to ensure reliable results.
}
}
\author{
Craig Wang
}
\seealso{
\code{\link{simData1s}} for simulating faecal egg count data with one sample
}
\examples{
## load the sample data
data(epgs)
## apply zero-infation model
model <- fec_stan(epgs$before, rawCounts = FALSE, CF = 50)
}
\keyword{models}
|
/fuzzedpackages/eggCounts/man/fec_stan.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false | false | 4,285 |
rd
|
\name{fec_stan}
\alias{fec_stan}
\title{Modelling of faecal egg count data (one-sample case)}
\usage{
fec_stan(fec, rawCounts = FALSE, CF = 50, zeroInflation = TRUE,
muPrior, kappaPrior, phiPrior, nsamples = 2000, nburnin = 1000,
thinning = 1, nchain = 2, ncore = 1, adaptDelta = 0.95,
saveAll = FALSE, verbose = FALSE)
}
\arguments{
\item{fec}{numeric vector. Faecal egg counts.}
\item{rawCounts}{logical. If TRUE, \code{preFEC} and \code{postFEC} correspond to raw counts
(as counted on equipment). Otherwise they correspond to calculated epgs (raw counts times correction factor).
Defaults to \code{FALSE}.}
\item{CF}{a positive integer or a vector of positive integers. Correction factor(s).}
\item{zeroInflation}{logical. If true, uses the model with zero-inflation. Otherwise uses the model without zero-inflation}
\item{muPrior}{named list. Prior for the group mean epg parameter \eqn{\mu}. The default prior is \code{list(priorDist = "gamma",hyperpars=c(1,0.001))}, i.e. a gamma distribution with shape 1 and rate 0.001, its 90\% probability mass lies between 51 and 2996.}
\item{kappaPrior}{named list. Prior for the group dispersion parameter \eqn{\kappa}. The default prior is \code{list(priorDist = "gamma",hyperpars=c(1,0.7))}, i.e. a gamma distribution with shape 1 and rate 0.7, its 90\% probability mass lies between 0.1 and 4.3 with a median of 1.}
\item{phiPrior}{named list. Prior for the zero-inflation parameter \eqn{\phi}. The default prior is \code{list(priorDist = "beta",hyperpars=c(1,1))}, i.e. a uniform prior between 0 and 1.}
\item{nsamples}{a positive integer. Number of samples for each chain (including burn-in samples).}
\item{nburnin}{a positive integer. Number of burn-in samples.}
\item{thinning}{a positive integer. Thinning parameter, i.e. the period for saving samples.}
\item{nchain}{a positive integer. Number of chains.}
\item{ncore}{a positive integer. Number of cores to use when executing the chains in parallel.}
\item{adaptDelta}{numeric. The target acceptance rate, a numeric value between 0 and 1.}
\item{saveAll}{logical. If TRUE, posterior samples for all parameters are saved in the \code{stanfit} object. If FALSE, only samples for \eqn{\mu}, \eqn{\kappa} and \eqn{\phi} are saved. Default to FALSE.}
\item{verbose}{logical. If true, prints progress and debugging information.}
}
\value{
Prints out summary of \code{meanEPG} as the posterior mean epg. The posterior summary contains the mean, standard deviation (sd), 2.5\%, 50\% and 97.5\% percentiles, the 95\% highest posterior density interval (HPDLow95 and HPDHigh95) and the posterior mode. NOTE: we recommend to use the 95\% HPD interval and the mode for further statistical analysis.
The returned value is a list that consists of:
\item{stan.samples}{an object of S4 class \code{\link[rstan]{stanfit}} representing the fitted results}
\item{posterior.summary}{a data.frame that is the same as the printed posterior summary}
}
\description{
Models the mean of faecal egg counts with Bayesian hierarchical models. See Details for a list of model choices.
}
\details{
\subsection{List of built-in models}{
\itemize{
\item without zero-inflation: set \code{zeroInflation = FALSE}
\item with zero-inflation: set \code{zeroInflation = TRUE}
}
Note that this function only models the mean of egg counts, see \code{\link{fecr_stan}()} for modelling the reduction.
}
\subsection{Other information}{
The first time each model with non-default priors is applied, it can take up to 20 seconds to compile the model. Currently the function only support prior distributions with two parameters. For a complete list of supported priors and their parameterization, please consult the list of distributions in \href{http://mc-stan.org/documentation/}{Stan}.
The default number of samples per chain is 2000, with 1000 burn-in samples. Normally this is sufficient in Stan. If the chains do not converge, one should tune the MCMC parameters until convergence is reached to ensure reliable results.
}
}
\author{
Craig Wang
}
\seealso{
\code{\link{simData1s}} for simulating faecal egg count data with one sample
}
\examples{
## load the sample data
data(epgs)
## apply zero-infation model
model <- fec_stan(epgs$before, rawCounts = FALSE, CF = 50)
}
\keyword{models}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-cache.R
\name{cache_is_activated}
\alias{cache_is_activated}
\title{Check if a cache is activated}
\usage{
cache_is_activated(cache_name = NULL)
}
\arguments{
\item{cache_name}{The name of the cache to check. If \code{NULL}, we check if
any cache is activated. If not \code{NULL}, we check if a specific cache is
activated.}
}
\description{
Check if a cache is activated
}
\keyword{internal}
|
/man/cache_is_activated.Rd
|
permissive
|
r-lib/styler
|
R
| false | true | 476 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-cache.R
\name{cache_is_activated}
\alias{cache_is_activated}
\title{Check if a cache is activated}
\usage{
cache_is_activated(cache_name = NULL)
}
\arguments{
\item{cache_name}{The name of the cache to check. If \code{NULL}, we check if
any cache is activated. If not \code{NULL}, we check if a specific cache is
activated.}
}
\description{
Check if a cache is activated
}
\keyword{internal}
|
\name{paired.tseries}
\alias{paired.tseries}
\docType{data}
\title{
Pairs of Time Series from Different Domains
}
\description{
Dataset formed by pairs of time series from different domains. Series were selected from the UCR Time Series Archive.
}
\details{
Each pair of series in the dataset (Series 1 and 2, Series 3 and 4, etc.) comes from the same domain, so this pairing could constitute a possible ground truth solution.
}
\note{
\code{abbreviate} can be used on the \code{colnames}.
}
\usage{data(paired.tseries)}
\format{
A \code{mts} object with 36 series of length 1000.
}
\source{http://www.cs.ucr.edu/~eamonn/SIGKDD2004/All_datasets/}
\references{
Keogh, E., Lonardi, S., & Ratanamahatana, C. A. (2004). Towards parameter-free data mining. Proceedings of the tenth ACM SIGKDD international conference on Knowledge discovery and data mining (pp. 206-215).
Montero, P and Vilar, J.A. (2014) \emph{TSclust: An R Package for Time Series Clustering.} Journal of Statistical Software, 62(1), 1-43. \url{http://www.jstatsoft.org/v62/i01/.}
}
\examples{
data(paired.tseries)
#Create the true solution, the pairs
true_cluster <- rep(1:18, each=2)
#test a dissimilarity metric and a cluster algorithm
intperdist <- diss( paired.tseries, "INT.PER") #create the distance matrix
#use hierarchical clustering and divide the tree in 18 clusters
intperclust <- cutree( hclust(intperdist), k=18 )
#use a cluster simmilarity index to rate the solution
cluster.evaluation( true_cluster, intperclust)
#### other evaluation criterion used in this dataset consist in counting the correct pairs
#### formed during agglomerative hierarchical cluster (see references)
true_pairs = (-matrix(1:36, ncol=2, byrow=TRUE))
hcintper <- hclust(intperdist, "complete")
#count within the hierarchical cluster the pairs
sum( match(data.frame(t(true_pairs)), data.frame(t(hcintper$merge)), nomatch=0) > 0 ) / 18
}
\keyword{datasets}
|
/man/paired.tseries.Rd
|
no_license
|
cran/TSclust
|
R
| false | false | 1,970 |
rd
|
\name{paired.tseries}
\alias{paired.tseries}
\docType{data}
\title{
Pairs of Time Series from Different Domains
}
\description{
Dataset formed by pairs of time series from different domains. Series were selected from the UCR Time Series Archive.
}
\details{
Each pair of series in the dataset (Series 1 and 2, Series 3 and 4, etc.) comes from the same domain, so this pairing could constitute a possible ground truth solution.
}
\note{
\code{abbreviate} can be used on the \code{colnames}.
}
\usage{data(paired.tseries)}
\format{
A \code{mts} object with 36 series of length 1000.
}
\source{http://www.cs.ucr.edu/~eamonn/SIGKDD2004/All_datasets/}
\references{
Keogh, E., Lonardi, S., & Ratanamahatana, C. A. (2004). Towards parameter-free data mining. Proceedings of the tenth ACM SIGKDD international conference on Knowledge discovery and data mining (pp. 206-215).
Montero, P and Vilar, J.A. (2014) \emph{TSclust: An R Package for Time Series Clustering.} Journal of Statistical Software, 62(1), 1-43. \url{http://www.jstatsoft.org/v62/i01/.}
}
\examples{
data(paired.tseries)
#Create the true solution, the pairs
true_cluster <- rep(1:18, each=2)
#test a dissimilarity metric and a cluster algorithm
intperdist <- diss( paired.tseries, "INT.PER") #create the distance matrix
#use hierarchical clustering and divide the tree in 18 clusters
intperclust <- cutree( hclust(intperdist), k=18 )
#use a cluster simmilarity index to rate the solution
cluster.evaluation( true_cluster, intperclust)
#### other evaluation criterion used in this dataset consist in counting the correct pairs
#### formed during agglomerative hierarchical cluster (see references)
true_pairs = (-matrix(1:36, ncol=2, byrow=TRUE))
hcintper <- hclust(intperdist, "complete")
#count within the hierarchical cluster the pairs
sum( match(data.frame(t(true_pairs)), data.frame(t(hcintper$merge)), nomatch=0) > 0 ) / 18
}
\keyword{datasets}
|
library(caret)
names(getModelInfo())
x <- matrix(rnorm(50*5), ncol=5)
y <- factor(rep(c("A", "B"), 25))
featurePlot(x,y)
install.packages("FSelector")
data("iris")
iris$Petal.Length <- as.factor(iris$Petal.Length)
Fselector::random.forest.importance(Petal.Length~., iris)
library(mlbench); data(Ozone)
library(randomForest)
na.omit(Ozone)->ozo
Boruta(V4~.,data=ozo,doTrace=2)->Bor.ozo
cat('Random forest run on all attributes:\n')
print(randomForest(V4~.,data=ozo))
cat('Random forest run only on confirmed attributes:\n')
print(randomForest(ozo[,getSelectedAttributes(Bor.ozo)],ozo$V4))
ggplot_missing <- function(x){
x %>%
is.na %>%
melt %>%
ggplot(data = .,
aes(x = Var2,
y = Var1)) +
geom_raster(aes(fill = value)) +
scale_fill_grey(name = "",
labels = c("Present","Missing")) +
theme_minimal() +
theme(axis.text.x = element_text(angle=45, vjust=0.5)) +
labs(x = "Variables in Dataset",
y = "Rows / observations")
}
ggplot_missing(Ozone)
boxplot(Ozone)
Bor.bank <- TentativeRoughFix(Bor.ozo)
print(Bor.ozo)
plot(Bor.ozo)
|
/SECOND/2lab-5.R
|
no_license
|
NadyaLE/BigData
|
R
| false | false | 1,030 |
r
|
library(caret)
names(getModelInfo())
x <- matrix(rnorm(50*5), ncol=5)
y <- factor(rep(c("A", "B"), 25))
featurePlot(x,y)
install.packages("FSelector")
data("iris")
iris$Petal.Length <- as.factor(iris$Petal.Length)
Fselector::random.forest.importance(Petal.Length~., iris)
library(mlbench); data(Ozone)
library(randomForest)
na.omit(Ozone)->ozo
Boruta(V4~.,data=ozo,doTrace=2)->Bor.ozo
cat('Random forest run on all attributes:\n')
print(randomForest(V4~.,data=ozo))
cat('Random forest run only on confirmed attributes:\n')
print(randomForest(ozo[,getSelectedAttributes(Bor.ozo)],ozo$V4))
ggplot_missing <- function(x){
x %>%
is.na %>%
melt %>%
ggplot(data = .,
aes(x = Var2,
y = Var1)) +
geom_raster(aes(fill = value)) +
scale_fill_grey(name = "",
labels = c("Present","Missing")) +
theme_minimal() +
theme(axis.text.x = element_text(angle=45, vjust=0.5)) +
labs(x = "Variables in Dataset",
y = "Rows / observations")
}
ggplot_missing(Ozone)
boxplot(Ozone)
Bor.bank <- TentativeRoughFix(Bor.ozo)
print(Bor.ozo)
plot(Bor.ozo)
|
library(readr)
Sravani <- read_csv("C:/Users/srava/Desktop/Sravani_1.csv")
View(Sravani)
install.packages("tidyr")
install.packages("dplyr") #Data Manipulation
install.packages("ggplot2")
library(tidyr)
library(dplyr)
library(ggplot2)
colnames(Sravani)
data <- data.frame(Sravani)
colnames(data)
# The X, Y goes in aes, Geom_point is for scatterplot.
attach(data)
colnames(data)[colnames(data) == "Age Bins"] <- 'AgeBins'
data$AgeBins_1 <- NA
data$AgeBins_1 <- as.factor(data$Age.Bins)
plot(data$AgeBins_1, legend.text = "AGe Bins",col= 'Blue')
#Plotting Age Bins - Segment wise
ggplot(data, aes(x=AgeBins_1))+
geom_histogram(stat = "count", na.rm = TRUE, aes(fill = segment))
+ facet_wrap(~segment,scales = "free")
#Plotting AgeBins Vs Employment Status
agevsemp = ggplot(data, aes(x=Q10))+
geom_histogram(stat = "count", na.rm = TRUE, aes(fill = segment))
+ facet_wrap(~segment,scales = "free")
plot(agevsemp)
#Layer with Income level on the plot above
data$IncomeLevel <- NA
data$IncomeLevel <- as.factor(data$Q8)
agevsempvsincome = agevsemp + geom_bar(data="IncomeLevel",stat = "count")
|
/04_Plots.R
|
no_license
|
sravanidn/LifeIns_Modling_Buy_NoBuy
|
R
| false | false | 1,184 |
r
|
library(readr)
Sravani <- read_csv("C:/Users/srava/Desktop/Sravani_1.csv")
View(Sravani)
install.packages("tidyr")
install.packages("dplyr") #Data Manipulation
install.packages("ggplot2")
library(tidyr)
library(dplyr)
library(ggplot2)
colnames(Sravani)
data <- data.frame(Sravani)
colnames(data)
# The X, Y goes in aes, Geom_point is for scatterplot.
attach(data)
colnames(data)[colnames(data) == "Age Bins"] <- 'AgeBins'
data$AgeBins_1 <- NA
data$AgeBins_1 <- as.factor(data$Age.Bins)
plot(data$AgeBins_1, legend.text = "AGe Bins",col= 'Blue')
#Plotting Age Bins - Segment wise
ggplot(data, aes(x=AgeBins_1))+
geom_histogram(stat = "count", na.rm = TRUE, aes(fill = segment))
+ facet_wrap(~segment,scales = "free")
#Plotting AgeBins Vs Employment Status
agevsemp = ggplot(data, aes(x=Q10))+
geom_histogram(stat = "count", na.rm = TRUE, aes(fill = segment))
+ facet_wrap(~segment,scales = "free")
plot(agevsemp)
#Layer with Income level on the plot above
data$IncomeLevel <- NA
data$IncomeLevel <- as.factor(data$Q8)
agevsempvsincome = agevsemp + geom_bar(data="IncomeLevel",stat = "count")
|
#' Convert WDI
#'
#' Convert WDI converts data from readWDI() to ISO country level. Adds Taiwan
#' as difference from global total.
#'
#'
#' @param x MAgPIE object containing WDI data region resolution
#' @param subtype Name of the worldbank indicator, e.g. "SP.POP.TOTL"
#' @return MAgPIE object of the WDI data disaggregated to country level
#' @author Jan Phillip Dietrich, Benjamin Bodirsky, Xiaoxi Wang, David Chen
#' @examples
#'
#' \dontrun{ a <- convertWDI(x)
#' }
#' @importFrom magclass getCells<-
#' @importFrom countrycode countrycode
convertWDI<-function(x,subtype){
WDI_data <- WDI::WDI_data
# changing scale of indicators
if (subtype %in% c("SP.POP.TOTL","NY.GDP.MKTP.PP.KD", "NV.AGR.TOTL.CD", "NY.GDP.MKTP.PP.CD","NY.GDP.MKTP.CD","NY.GDP.MKTP.KD","NY.GDP.MKTP.KN")) {
x <- x/1000000
#Kosovo added to Serbia
x["RS",,] <- dimSums(x[c("RS","XK"),,],dim=1,na.rm=T)
}else if (subtype %in% WDI_data$series[,"indicator"]){
# urbanisation rate and population density and land surface
# include c("SP.URB.TOTL.IN.ZS", "EN.POP.DNST", "AG.SRF.TOTL.K2", "NE.CON.PRVT.PC.KD", "NE.CON.PRVT.PP.CD","NE.CON.PRVT.PP.KD")
vcat("Warning: Kosovo left out of conversion and has differing population values from FAO", verbosity=2)
x <- x
}else {
stop("subtype does not exist in the dataset!")
}
y <- x
## Channel Islands add to JEY
JG <- "JEY"
names(JG) <- "JG"
getCells(y)<-countrycode::countrycode(getCells(y),"iso2c","iso3c", custom_match = JG)
y<-y[!is.na(getCells(y)),,]
y<-clean_magpie(y)
y<-y["ANT",,,invert=TRUE]
y<-toolCountryFill(y,fill = 0)
y[is.na(y)]<-0
y <- y[,sort(getYears(y)),]
#remove years which only contain 0s as entries
y <- y[,!apply(y,2,function(x) return(all(x==0))),]
y<-y[,sort(getYears(y)),]
return(y)
}
|
/R/convertWDI.R
|
no_license
|
caviddhen/mymadrat
|
R
| false | false | 1,803 |
r
|
#' Convert WDI
#'
#' Convert WDI converts data from readWDI() to ISO country level. Adds Taiwan
#' as difference from global total.
#'
#'
#' @param x MAgPIE object containing WDI data region resolution
#' @param subtype Name of the worldbank indicator, e.g. "SP.POP.TOTL"
#' @return MAgPIE object of the WDI data disaggregated to country level
#' @author Jan Phillip Dietrich, Benjamin Bodirsky, Xiaoxi Wang, David Chen
#' @examples
#'
#' \dontrun{ a <- convertWDI(x)
#' }
#' @importFrom magclass getCells<-
#' @importFrom countrycode countrycode
convertWDI<-function(x,subtype){
WDI_data <- WDI::WDI_data
# changing scale of indicators
if (subtype %in% c("SP.POP.TOTL","NY.GDP.MKTP.PP.KD", "NV.AGR.TOTL.CD", "NY.GDP.MKTP.PP.CD","NY.GDP.MKTP.CD","NY.GDP.MKTP.KD","NY.GDP.MKTP.KN")) {
x <- x/1000000
#Kosovo added to Serbia
x["RS",,] <- dimSums(x[c("RS","XK"),,],dim=1,na.rm=T)
}else if (subtype %in% WDI_data$series[,"indicator"]){
# urbanisation rate and population density and land surface
# include c("SP.URB.TOTL.IN.ZS", "EN.POP.DNST", "AG.SRF.TOTL.K2", "NE.CON.PRVT.PC.KD", "NE.CON.PRVT.PP.CD","NE.CON.PRVT.PP.KD")
vcat("Warning: Kosovo left out of conversion and has differing population values from FAO", verbosity=2)
x <- x
}else {
stop("subtype does not exist in the dataset!")
}
y <- x
## Channel Islands add to JEY
JG <- "JEY"
names(JG) <- "JG"
getCells(y)<-countrycode::countrycode(getCells(y),"iso2c","iso3c", custom_match = JG)
y<-y[!is.na(getCells(y)),,]
y<-clean_magpie(y)
y<-y["ANT",,,invert=TRUE]
y<-toolCountryFill(y,fill = 0)
y[is.na(y)]<-0
y <- y[,sort(getYears(y)),]
#remove years which only contain 0s as entries
y <- y[,!apply(y,2,function(x) return(all(x==0))),]
y<-y[,sort(getYears(y)),]
return(y)
}
|
\name{na.omit.fdata}
\Rdversion{1.1}
\alias{na.omit.fdata}
\alias{na.fail.fdata}
\title{ A wrapper for the na.omit and na.fail function for fdata object}
\description{
\code{na.fail} returns the object if it does not contain any missing values, and signals an error otherwise. \code{na.omit} returns the object with incomplete cases removed.\cr
If \code{na.omit.fdata} removes cases, the row numbers of the cases form the \code{"na.action"} attribute of the result, of class \code{"omit"}, see generic function \code{\link{na.omit}}.
}
\usage{
\method{na.omit}{fdata}(object,\dots)
\method{na.fail}{fdata}(object,\dots)
}
\arguments{
\item{object}{ an \code{fdata} object.}
\item{\dots}{further potential arguments passed to methods.}
}
\value{
The value returned from \code{omit} is a \code{fdata} object with incomplete cases removed.
}
\author{
Manuel Febrero Bande
}
\examples{
fdataobj<-fdata(MontrealTemp)
fdataobj$data[3,3]<-NA
fdataobj$data[10,]<-NA
fdastaobj2<-na.omit.fdata(fdataobj)
}
\keyword{descriptive}
|
/man/na.omit.rd
|
no_license
|
dgorbachev/fda.usc
|
R
| false | false | 1,053 |
rd
|
\name{na.omit.fdata}
\Rdversion{1.1}
\alias{na.omit.fdata}
\alias{na.fail.fdata}
\title{ A wrapper for the na.omit and na.fail function for fdata object}
\description{
\code{na.fail} returns the object if it does not contain any missing values, and signals an error otherwise. \code{na.omit} returns the object with incomplete cases removed.\cr
If \code{na.omit.fdata} removes cases, the row numbers of the cases form the \code{"na.action"} attribute of the result, of class \code{"omit"}, see generic function \code{\link{na.omit}}.
}
\usage{
\method{na.omit}{fdata}(object,\dots)
\method{na.fail}{fdata}(object,\dots)
}
\arguments{
\item{object}{ an \code{fdata} object.}
\item{\dots}{further potential arguments passed to methods.}
}
\value{
The value returned from \code{omit} is a \code{fdata} object with incomplete cases removed.
}
\author{
Manuel Febrero Bande
}
\examples{
fdataobj<-fdata(MontrealTemp)
fdataobj$data[3,3]<-NA
fdataobj$data[10,]<-NA
fdastaobj2<-na.omit.fdata(fdataobj)
}
\keyword{descriptive}
|
#!/usr/bin/R
#Input:
#parameter1:RNA editing file, row: RNA editing site, col:Samples without header
#parameter2:Expression File, row: Gene mathched with RNA editing file, col: samples matched with RNA editing file without header
#parameter3:analysis results
#librarys
args=commandArgs(T)
#input files
RawRNAeditingFile=args[1]
RawExpressionFile=args[2]
OutputFile=args[3]
#read files
RawRNAeditingData=read.table(RawRNAeditingFile,sep="\t",header=FALSE)
RawExpressionData=read.table(RawExpressionFile,sep="\t",header=FALSE)
Outputdata=RawRNAeditingData[,c(1,1:7)]
Outputdata[,2]=RawExpressionData[,1]
colnames(Outputdata)=c("EditingInformation","Gene","TtestP","MeanExpressionEdited","MeanExpressionNonedited","logFC","CorreP","CorreR")
for(i in 1:dim(RawRNAeditingData)[1]){
EachEditing=as.double(as.vector(RawRNAeditingData[i,2:dim(RawRNAeditingData)[2]]))
EachExpression=as.double(as.vector(RawExpressionData[i,2:dim(RawExpressionData)[2]]))
#Ttest
EditedIndex=intersect(which(is.na(EachEditing)=="FALSE"),which(is.na(EachExpression)=="FALSE"))
NonEditedIndex=intersect(which(is.na(EachEditing)=="TRUE"),which(is.na(EachExpression)=="FALSE"))
if(length(EditedIndex)<=2 || length(NonEditedIndex)<=2 || all(EachExpression[EditedIndex]==EachExpression[EditedIndex][1]) || all(EachExpression[NonEditedIndex]==EachExpression[NonEditedIndex][1])){
Outputdata[i,3]=NaN
}else{
Outputdata[i,3]=t.test(EachExpression[EditedIndex],EachExpression[NonEditedIndex])$p.value
}
Outputdata[i,4]=mean(EachExpression[EditedIndex])
Outputdata[i,5]=mean(EachExpression[NonEditedIndex])
Outputdata[i,6]=log2(Outputdata[i,4]/Outputdata[i,5])
#Correlation
if(length(EditedIndex)<=2 || all(EachExpression[EditedIndex] == EachExpression[EditedIndex[1]])){
Outputdata[i,7:8]=c(NaN,NaN)
}else{
correlation_results=cor.test(EachEditing[EditedIndex],EachExpression[EditedIndex],method="pearson")
Outputdata[i,7]=correlation_results$p.value
Outputdata[i,8]=correlation_results$estimate
}
}
write.table(Outputdata,OutputFile,quote = FALSE,sep="\t",row.names=FALSE,col.names=TRUE)
|
/CompeteComputeTwo.R
|
no_license
|
swu13/CAeditome
|
R
| false | false | 2,052 |
r
|
#!/usr/bin/R
#Input:
#parameter1:RNA editing file, row: RNA editing site, col:Samples without header
#parameter2:Expression File, row: Gene mathched with RNA editing file, col: samples matched with RNA editing file without header
#parameter3:analysis results
#librarys
args=commandArgs(T)
#input files
RawRNAeditingFile=args[1]
RawExpressionFile=args[2]
OutputFile=args[3]
#read files
RawRNAeditingData=read.table(RawRNAeditingFile,sep="\t",header=FALSE)
RawExpressionData=read.table(RawExpressionFile,sep="\t",header=FALSE)
Outputdata=RawRNAeditingData[,c(1,1:7)]
Outputdata[,2]=RawExpressionData[,1]
colnames(Outputdata)=c("EditingInformation","Gene","TtestP","MeanExpressionEdited","MeanExpressionNonedited","logFC","CorreP","CorreR")
for(i in 1:dim(RawRNAeditingData)[1]){
EachEditing=as.double(as.vector(RawRNAeditingData[i,2:dim(RawRNAeditingData)[2]]))
EachExpression=as.double(as.vector(RawExpressionData[i,2:dim(RawExpressionData)[2]]))
#Ttest
EditedIndex=intersect(which(is.na(EachEditing)=="FALSE"),which(is.na(EachExpression)=="FALSE"))
NonEditedIndex=intersect(which(is.na(EachEditing)=="TRUE"),which(is.na(EachExpression)=="FALSE"))
if(length(EditedIndex)<=2 || length(NonEditedIndex)<=2 || all(EachExpression[EditedIndex]==EachExpression[EditedIndex][1]) || all(EachExpression[NonEditedIndex]==EachExpression[NonEditedIndex][1])){
Outputdata[i,3]=NaN
}else{
Outputdata[i,3]=t.test(EachExpression[EditedIndex],EachExpression[NonEditedIndex])$p.value
}
Outputdata[i,4]=mean(EachExpression[EditedIndex])
Outputdata[i,5]=mean(EachExpression[NonEditedIndex])
Outputdata[i,6]=log2(Outputdata[i,4]/Outputdata[i,5])
#Correlation
if(length(EditedIndex)<=2 || all(EachExpression[EditedIndex] == EachExpression[EditedIndex[1]])){
Outputdata[i,7:8]=c(NaN,NaN)
}else{
correlation_results=cor.test(EachEditing[EditedIndex],EachExpression[EditedIndex],method="pearson")
Outputdata[i,7]=correlation_results$p.value
Outputdata[i,8]=correlation_results$estimate
}
}
write.table(Outputdata,OutputFile,quote = FALSE,sep="\t",row.names=FALSE,col.names=TRUE)
|
#' rredlist - IUCN Red List Client
#'
#' @section Taxonomic Names vs. IUCN IDs:
#' From the documentation (quoting): "It is advisable wherever possible to use
#' the taxon name (species name) to make your API calls, rather than using IDs.
#' IDs are not immovable are expected to be used mainly by organisations
#' that work closely with the IUCN Red List."
#'
#' @section Authentication:
#' IUCN requires you to get your own API key, an alphanumeric string that you
#' need to send in every request. See key A IUCN API token. See \code{\link{rl_use_iucn}}
#' for help getting and storing it. Get it at
#' <http://apiv3.iucnredlist.org/api/v3/token>
#' Keep this key private. You can pass the key in to each function via the
#' `key` parameter, but it's better to store the key either as a
#' environment variable (`IUCN_REDLIST_KEY`) or an R option
#' (`iucn_redlist_key`) - we recommend using the former option.
#'
#' @section High vs. Low level package APIs:
#' **High level API**
#' High level functions do the HTTP request and parse data to a data.frame for
#' ease of downstream use. The high level functions have no underscore on
#' the end of the function name, e.g., [rl_search()]
#'
#' **Low level API**
#' The parsing to data.frame in the high level API does take extra time.
#' The low level API only does the HTTP request, and gives back JSON without
#' doing any more parsing. The low level functions DO have an underscore on
#' the end of the function name, e.g., [rl_search_()]
#'
#' @section No Spatial:
#' This package does not include support for the spatial API, described at
#' <http://apiv3.iucnredlist.org/spatial>
#'
#' @section Citing the Red List API:
#' The citation is
#' `IUCN 2015. IUCN Red List of Threatened Species. Version 2015-4
#' <www.iucnredlist.org>`
#' You can get this programatically via [rl_citation()]
#'
#' @section Rate limiting:
#' From the IUCN folks: Too many frequent calls, or too many calls per day
#' might get your access blocked temporarily. If you're a heavy API user, the
#' Red List Unit asked that you contact them, as there might be better options.
#' They suggest a 2-second delay between your calls if you plan to make a
#' lot of calls.
#'
#' @section Citing the IUCN Red List API:
#' See <http://apiv3.iucnredlist.org/about>
#'
#' @importFrom jsonlite fromJSON
#' @name rredlist-package
#' @aliases rredlist
#' @docType package
#' @author Scott Chamberlain \email{myrmecocystus@@gmail.com}
#' @keywords package
NULL
|
/R/rredlist-package.R
|
permissive
|
yangxhcaf/rredlist
|
R
| false | false | 2,480 |
r
|
#' rredlist - IUCN Red List Client
#'
#' @section Taxonomic Names vs. IUCN IDs:
#' From the documentation (quoting): "It is advisable wherever possible to use
#' the taxon name (species name) to make your API calls, rather than using IDs.
#' IDs are not immovable are expected to be used mainly by organisations
#' that work closely with the IUCN Red List."
#'
#' @section Authentication:
#' IUCN requires you to get your own API key, an alphanumeric string that you
#' need to send in every request. See key A IUCN API token. See \code{\link{rl_use_iucn}}
#' for help getting and storing it. Get it at
#' <http://apiv3.iucnredlist.org/api/v3/token>
#' Keep this key private. You can pass the key in to each function via the
#' `key` parameter, but it's better to store the key either as a
#' environment variable (`IUCN_REDLIST_KEY`) or an R option
#' (`iucn_redlist_key`) - we recommend using the former option.
#'
#' @section High vs. Low level package APIs:
#' **High level API**
#' High level functions do the HTTP request and parse data to a data.frame for
#' ease of downstream use. The high level functions have no underscore on
#' the end of the function name, e.g., [rl_search()]
#'
#' **Low level API**
#' The parsing to data.frame in the high level API does take extra time.
#' The low level API only does the HTTP request, and gives back JSON without
#' doing any more parsing. The low level functions DO have an underscore on
#' the end of the function name, e.g., [rl_search_()]
#'
#' @section No Spatial:
#' This package does not include support for the spatial API, described at
#' <http://apiv3.iucnredlist.org/spatial>
#'
#' @section Citing the Red List API:
#' The citation is
#' `IUCN 2015. IUCN Red List of Threatened Species. Version 2015-4
#' <www.iucnredlist.org>`
#' You can get this programatically via [rl_citation()]
#'
#' @section Rate limiting:
#' From the IUCN folks: Too many frequent calls, or too many calls per day
#' might get your access blocked temporarily. If you're a heavy API user, the
#' Red List Unit asked that you contact them, as there might be better options.
#' They suggest a 2-second delay between your calls if you plan to make a
#' lot of calls.
#'
#' @section Citing the IUCN Red List API:
#' See <http://apiv3.iucnredlist.org/about>
#'
#' @importFrom jsonlite fromJSON
#' @name rredlist-package
#' @aliases rredlist
#' @docType package
#' @author Scott Chamberlain \email{myrmecocystus@@gmail.com}
#' @keywords package
NULL
|
sharpe_ratio_grid<-seq(from=0,to=1,length.out = 10)
### Trainer
#============#
# Note: Trains Model and Benchmarks then evaluates Quality
#---------------------------------------------------------#
# RESET: rm("NO.CHECK_Q")
#---------------------------------------------------------------------#
# Initializations
#---------------------------------------------------------------------#
if(!exists("NO.CHECK_Q")){
# Only run on local machine
print("Setting: Paths");
#setwd(paste(getwd(),"Polybox/UniBag/NE_BLNs/Numerics/",sep="/"))
print("loading: Utils"); source("utils_NE_BLNS.R")
beep(1)
# Simulated DATA: ONLY FOR TESTING: source("Simul.R")
# Crypto DATA:
print("loading: Crypto_Data"); source("Crypto_Currency_Data_Processor.R")
beep(2)
# Ensure that Initializations are only run once
NO.CHECK_Q<-TRUE
# Re-Anchor Working directory
setwd(anchor_directory)
# Beep to signal completion
beep(3)
}
# BEGIN TIMER A
Time.Start.Cov.Learning = Sys.time()
### Automated (Descriptive) Parmaeters
#-------------------------------------#
# Data Parameters
#-------------------------------------#
# Dimension Simulated Matrices
d<-(dim(X)[1])
# Number of Data Points
N<-(dim(X)[3])
##### -----------#
# Learning Phase
##### -----------#
# Apply Random Feature Map
#-------------------------#
# Initialize Dimension of Problem
d_intrinsic<-d*(d+1)/2
# Initialize Matrix of Xs
X_vect_test<-X_vect<-matrix(NA,nrow=(dim(X)[3] - 1),ncol=(d_intrinsic+prod(dim(X.ts)[-3]))) # First Dimension for Estimated Cov Matrix and Second For Data On that Time-Window
Y_vect_test<-Y_vect<-matrix(NA,nrow=(dim(X)[3] - 1),ncol=d_intrinsic)
# Initialize Results Matrix
Y_Predicted_with_NE_X_TEST<-Y_Predicted_with_NE_X<-matrix(NA,nrow=(N*d_intrinsic),ncol=1)
Y.pred<-as.numeric(Y_vect)
# Random Reference Matrix
rand.ref.mat<-rSPDd(d)
for(i in 1:(X_vect %>% nrow())){
# Generate Training-Set Features
X.i.loop<-feature.map(X[,,i],rand.ref.mat) # Map SPD to R^*
X.i.loop<-c(X.i.loop,as.numeric(X.ts[,,i])) # Append TS Data from time-window used to generate SPD matrix (Cov)
X_vect[i,]<-X.i.loop # Update Features Matrix
# Generate Test-Set Features
X.i.loop<-feature.map(X_test[,,i],rand.ref.mat) # Map SPD to R^*
X.i.loop<-c(X.i.loop,as.numeric(X.ts_test[,,i])) # Append TS Data from time-window used to generate SPD matrix (Cov)
X_vect_test[i,]<-X.i.loop # Update Features Matrix
# Generates Targets (a.k.a: Responses/Ys)
Y_vect[i,]<-feature.map(Y[,,i],rand.ref.mat)
Y_vect_test[i,]<-feature.map(Y_test[,,i],rand.ref.mat)
# Update User on progress of scheme
print(i/(X_vect %>% nrow()))
}
# Data Segmentation
train_data<-X_vect
trainingtarget<-Y_vect
test_data<-X_vect_test
test_target<-Y_vect_test
#-------------------------------#
# Pre-processing
#-------------------------------#
# Normalize Training Data
m.train<-colMeans(train_data)
s.train<-apply(train_data,2,sd)
train_data <- scale(train_data,center = m.train,scale = s.train)
# Use means and standard deviations from training set to normalize test set
col_means_train <- attr(train_data, "scaled:center")
col_stddevs_train <- attr(train_data, "scaled:scale")
test_data <- scale(test_data, center = m.train, scale = s.train)
#---------------------------------#
# B.1: (Deep) Covariance Learning
#---------------------------------#
model_NE_Deep_Cov<-keras_model_sequential()
# Define bulk of the network
model_NE_Deep_Cov %>% layer_dense(units=Height,activation = "relu",input_shape = (X_vect %>% ncol()))
for(i in 1:Depth){
model_NE_Deep_Cov %>% layer_dense(units=Height,activation = "relu",input_shape = d_intrinsic)%>%
layer_dropout(rate = dropout.rate)
}
# Readout Layer (ffNN)
model_NE_Deep_Cov %>% layer_dense(units=d_intrinsic)
# Compile (ffNN)
model_NE_Deep_Cov %>% keras::compile(loss="mse",
optimizer="adam",
metrics="mse")
## Report Model (Summary)
model_NE_Deep_Cov %>% summary()
# Compute Batch Size
batch.size<-max(1,(round(min(1,abs(Batch.size.percent))*length(train_data),digits = 0)))
# Fit ffNN
fittedmodel_NE_Deep_Cov<- model_NE_Deep_Cov %>%
keras::fit(train_data,
trainingtarget,
epochs=epochs,
batch_size=batch.size,
callbacks = list(# Overfitting Reduction
callback_reduce_lr_on_plateau(monitor = "loss", factor = 0.1)
))
## Predictions ffNN
y.predict<-model_NE_Deep_Cov %>% predict(train_data)
y.predict_test<-model_NE_Deep_Cov %>% predict(test_data)
# END TIMER A
Time.END.Cov.Learning = Sys.time()
# Error Analysis
#----------------#
# LOG-EUC Errors (Vectorized Errors)
LogEUC_var_err<-c(var(as.numeric(y.predict-trainingtarget)),var(as.numeric(y.predict_test-test_target)))
LogEUC_RMAE<-c(mean(abs(y.predict-trainingtarget))/mean(abs(trainingtarget)),mean(abs(y.predict_test-test_target))/mean(abs(test_target)))
# Frobenius Errors (Matricial Errors)
Frob_MSE<-rep(0,2)
for(i.error in 1:(y.predict %>% nrow())){# BEGIN EVALUATION
# Frobenius Errors (Matrix MSE): BEGIN
#-------------------------------------------------------------------------------#
# Evaluate Frobenius Norm on Training Set
ith.predicted.matrix<-readout.map(y.predict[i.error,],rand.ref.mat)
ith.target.matrix<-readout.map(trainingtarget[i.error,],rand.ref.mat)
Frob_MSE[1]<-Frob_MSE[1]+norm(ith.predicted.matrix-ith.target.matrix,"f")
# Evaluate Frobenius Norm on Test Set
ith.predicted.matrix<-readout.map(y.predict_test[i.error,],rand.ref.mat)
ith.target.matrix<-readout.map(test_target[i.error,],rand.ref.mat)
Frob_MSE[2]<-Frob_MSE[2]+norm(ith.predicted.matrix-ith.target.matrix,"f")
# Update User on Status of Error Evaluation
print(i.error/(y.predict %>% nrow()))
# Frobenius Errors (Matrix MSE): END
#-------------------------------------------------------------------------------#
} #END LOOP
# Average Accross all Observed Matrix (MSEs)
Frob_MSE<-Frob_MSE/(y.predict %>% nrow())
#---------------------#
# END ERROR EVALUATION
### Report Findings
Reports_Cov_Prediction<-rbind(LogEUC_var_err,LogEUC_RMAE,Frob_MSE); colnames(Reports_Cov_Prediction)<-c("Training","Test")
Reports_Cov_Prediction
#---------------------------------#
# B.2: (Deep) Alpha Learning
#---------------------------------#
Time.Start.Returns.Learning = Sys.time()
# Use data from same input space but notw output space is R^d
# Preprocess Training/Testing Targets (1 time-step ahead Movements)
#-#
# Initialize Price Changes (Training Set)
active_index_train<-(1+Window.size):(Window.size+N) # Start one day previous to evaluate first price movement
coin_exchange_movements_rates_train<-diff(prices_TS[active_index_train,])
# Initialize Price Changes (Tes ting Set)
active_index_test<-(1+N+Window.size):(Window.size+2*N) # Start one day previous to evaluate first price movement
coin_exchange_movements_rates_test<-diff(prices_TS[active_index_test,])
# Data Segmentation
trainingtarget_NE_Movements<-coin_exchange_movements_rates_train
test_target_NE_Movements<-coin_exchange_movements_rates_test
# Initialize Network
#-----------------------#
model_NE_Movements<-keras_model_sequential()
# Define bulk of the network
model_NE_Movements %>% layer_dense(units=Height,activation = "relu",input_shape = (X_vect %>% ncol()))
for(i in 1:Depth){
model_NE_Movements %>% layer_dense(units=Height,activation = "relu",input_shape = d_intrinsic)%>%
layer_dropout(rate = dropout.rate)
}
# Readout Layer (ffNN)
model_NE_Movements %>% layer_dense(units=d)
# Compile (ffNN)
model_NE_Movements %>% keras::compile(loss="mse",
optimizer="adam",
metrics="mse")
## Report Model (Summary)
model_NE_Movements %>% summary()
# Compute Batch Size
batch.size<-max(1,(round(min(1,abs(Batch.size.percent))*length(train_data),digits = 0)))
# Fit ffNN
fittedmodel_NE_Movements<- model_NE_Movements %>%
keras::fit(train_data,
trainingtarget_NE_Movements,
epochs=epochs,
batch_size=batch.size,
callbacks = list(# Overfitting Reduction
callback_reduce_lr_on_plateau(monitor = "loss", factor = 0.1)
))
## Predictions ffNN
y.predict_NE_Movements<-model_NE_Movements %>% predict(train_data)
y.predict_NE_Movements_test<-model_NE_Movements %>% predict(test_data)
# END TIMER A
Time.END.Returns.Learning = Sys.time()
#-------------------------------------------------------------------------------#
# C) Generate Efficient Portfolios
#-------------------------------------------------------------------------------#
# Initialize Reports
Financial_Reports_NE_Efficient<-matrix(NA,nrow=(sharpe_ratio_grid %>% length()),ncol=3)
colnames(Financial_Reports_NE_Efficient)<-c("Trading Gains","Portfolio Variance","Sharpe Ratio"); rownames(Financial_Reports_NE_Efficient)<-round(sharpe_ratio_grid,1)
Financial_Reports_NE_Efficient_test<-Financial_Reports_NE_Efficient
for(j.Sharpe_param in 1:(Financial_Reports_NE_Efficient %>% nrow())){# LOOP OVER ALL SHARPE_RATIO PARAMETERS
# Initialize Current Sharpe Ration Meta-Parameter
Sharpe_loop = sharpe_ratio_grid[j.Sharpe_param]
# Initialized MV Portfolios
Efficient_NE_Portfolio_Predictions = matrix(NA, nrow = (y.predict %>% nrow()), ncol=d)
Efficient_NE_Portfolio_Predictions_test = matrix(NA, nrow = (y.predict_test %>% nrow()), ncol=d)
# Generate Minimum Variance Portfolio with NE-Predicited Cov Matrices (Training)
# (Training)
for(i.error in 1:(y.predict %>% nrow())){# BEGIN EVALUATION
# Generate Predicted Matrix (Training Set)
ith.predicted.matrix<-readout.map(y.predict[i.error,],rand.ref.mat)
# Write Predicted Market Returns (alpha)
alpha.movement.ith<-y.predict_NE_Movements[i.error,]
# Build Minimum-Variance Portfolio
Efficient_NE_Portfolio_Predictions[i.error,]<-Efficient(ith.predicted.matrix,alpha.movement.ith,Sharpe_loop)
#-------------------------------------------------------------------------------#
# Update User on These Status of Compation
print(i.error/(y.predict %>% nrow()))
} #END LOOP
#(Test)
# Generate Minimum Variance Portfolio with NE-Predicited Cov Matrices (Test)
for(i.error in 1:(y.predict_test %>% nrow())){# BEGIN EVALUATION
# Generate Predicted Matrix (Training Set)
ith.predicted.matrix<-readout.map(y.predict_test[i.error,],rand.ref.mat)
# Write Predicted Market Returns (alpha)
alpha.movement.ith<-y.predict_NE_Movements_test[i.error,]
# Build Minimum-Variance Portfolio
Efficient_NE_Portfolio_Predictions_test[i.error,]<-Efficient(ith.predicted.matrix,alpha.movement.ith,Sharpe_loop)
#-------------------------------------------------------------------------------#
# Update User on These Status of Compation
print(1 + (i.error/(y.predict %>% nrow()))) # The +1 is to distinguish between the training and test set computations (from the outside)
#-------------------------------------------------------------------------------#
}
# Evaluate & Record Gains from Trading
Financial_Reports_NE_Efficient[j.Sharpe_param,1]<-sum(colSums(Efficient_NE_Portfolio_Predictions*coin_exchange_movements_rates_train))
Financial_Reports_NE_Efficient_test[j.Sharpe_param,1]<-sum(colSums(Efficient_NE_Portfolio_Predictions_test*coin_exchange_movements_rates_test))
# Evaluate Portfolio Variance
Financial_Reports_NE_Efficient[j.Sharpe_param,2]<-mean(sapply(as.data.frame(Efficient_NE_Portfolio_Predictions*coin_exchange_movements_rates_train),var))
Financial_Reports_NE_Efficient_test[j.Sharpe_param,2]<-mean(sapply(as.data.frame(Efficient_NE_Portfolio_Predictions_test*coin_exchange_movements_rates_test),var))
# Sharpe Ratio
Financial_Reports_NE_Efficient[j.Sharpe_param,3]<-Financial_Reports_NE_Efficient[j.Sharpe_param,2]/Financial_Reports_NE_Efficient[j.Sharpe_param,1]
Financial_Reports_NE_Efficient_test[j.Sharpe_param,3]<-Financial_Reports_NE_Efficient_test[j.Sharpe_param,2]/Financial_Reports_NE_Efficient_test[j.Sharpe_param,1]
}# END:LOOP OVER ALL SHARPE_RATIO PARAMETERS
# Determine Optimal (Largest) Sharpe-Ratio (In Training Set <- Probably can be further improved by CV!): i.e.: Min Variance and Max Returns!
opt.Sharpe<-which.max(Financial_Reports_NE_Efficient[,3])
# Define Optimal NE-Efficient Portfolio
Financial_Reports_NE_Efficient_opt = cbind(Financial_Reports_NE_Efficient[opt.Sharpe,],Financial_Reports_NE_Efficient_test[opt.Sharpe,])
### # Benchmark 1: Naive Markowitz
###---------------------------------#
source("Markowitz.R")
### # Benchmark 2: Deep Hedging Approaches
###----------------------------------------#
source("Deep_Hedging.R")
### AGREGATED REPORTS
#====================#
Financial_Reports_Train<-rbind(Financial_Reports_NE_Efficient_opt[,1],
Financial_Reports_DH[,1],
Financial_Reports_Mark[,1]); rownames(Financial_Reports_Train)<-c("NE_ffNN","DH","Mark")
Financial_Reports_Test<-rbind(Financial_Reports_NE_Efficient_opt[,2],
Financial_Reports_DH[,2],
Financial_Reports_Mark[,2]); rownames(Financial_Reports_Test)<-c("NE_ffNN","DH","Mark")
colnames(Financial_Reports_Test)<-colnames(Financial_Reports_Train)<-c("Trading Gains","Portfolio Variance","Sharpe Ratio")
# Reports Finding(s)
Financial_Reports_Train
Financial_Reports_Test
Financial_Reports_NE_Efficient
Financial_Reports_NE_Efficient_test
|
/Trainer.R
|
no_license
|
lnsongxf/Deep_Markowitz_Minimum_Variance_Prediction
|
R
| false | false | 13,651 |
r
|
sharpe_ratio_grid<-seq(from=0,to=1,length.out = 10)
### Trainer
#============#
# Note: Trains Model and Benchmarks then evaluates Quality
#---------------------------------------------------------#
# RESET: rm("NO.CHECK_Q")
#---------------------------------------------------------------------#
# Initializations
#---------------------------------------------------------------------#
if(!exists("NO.CHECK_Q")){
# Only run on local machine
print("Setting: Paths");
#setwd(paste(getwd(),"Polybox/UniBag/NE_BLNs/Numerics/",sep="/"))
print("loading: Utils"); source("utils_NE_BLNS.R")
beep(1)
# Simulated DATA: ONLY FOR TESTING: source("Simul.R")
# Crypto DATA:
print("loading: Crypto_Data"); source("Crypto_Currency_Data_Processor.R")
beep(2)
# Ensure that Initializations are only run once
NO.CHECK_Q<-TRUE
# Re-Anchor Working directory
setwd(anchor_directory)
# Beep to signal completion
beep(3)
}
# BEGIN TIMER A
Time.Start.Cov.Learning = Sys.time()
### Automated (Descriptive) Parmaeters
#-------------------------------------#
# Data Parameters
#-------------------------------------#
# Dimension Simulated Matrices
d<-(dim(X)[1])
# Number of Data Points
N<-(dim(X)[3])
##### -----------#
# Learning Phase
##### -----------#
# Apply Random Feature Map
#-------------------------#
# Initialize Dimension of Problem
d_intrinsic<-d*(d+1)/2
# Initialize Matrix of Xs
X_vect_test<-X_vect<-matrix(NA,nrow=(dim(X)[3] - 1),ncol=(d_intrinsic+prod(dim(X.ts)[-3]))) # First Dimension for Estimated Cov Matrix and Second For Data On that Time-Window
Y_vect_test<-Y_vect<-matrix(NA,nrow=(dim(X)[3] - 1),ncol=d_intrinsic)
# Initialize Results Matrix
Y_Predicted_with_NE_X_TEST<-Y_Predicted_with_NE_X<-matrix(NA,nrow=(N*d_intrinsic),ncol=1)
Y.pred<-as.numeric(Y_vect)
# Random Reference Matrix
rand.ref.mat<-rSPDd(d)
for(i in 1:(X_vect %>% nrow())){
# Generate Training-Set Features
X.i.loop<-feature.map(X[,,i],rand.ref.mat) # Map SPD to R^*
X.i.loop<-c(X.i.loop,as.numeric(X.ts[,,i])) # Append TS Data from time-window used to generate SPD matrix (Cov)
X_vect[i,]<-X.i.loop # Update Features Matrix
# Generate Test-Set Features
X.i.loop<-feature.map(X_test[,,i],rand.ref.mat) # Map SPD to R^*
X.i.loop<-c(X.i.loop,as.numeric(X.ts_test[,,i])) # Append TS Data from time-window used to generate SPD matrix (Cov)
X_vect_test[i,]<-X.i.loop # Update Features Matrix
# Generates Targets (a.k.a: Responses/Ys)
Y_vect[i,]<-feature.map(Y[,,i],rand.ref.mat)
Y_vect_test[i,]<-feature.map(Y_test[,,i],rand.ref.mat)
# Update User on progress of scheme
print(i/(X_vect %>% nrow()))
}
# Data Segmentation
train_data<-X_vect
trainingtarget<-Y_vect
test_data<-X_vect_test
test_target<-Y_vect_test
#-------------------------------#
# Pre-processing
#-------------------------------#
# Normalize Training Data
m.train<-colMeans(train_data)
s.train<-apply(train_data,2,sd)
train_data <- scale(train_data,center = m.train,scale = s.train)
# Use means and standard deviations from training set to normalize test set
col_means_train <- attr(train_data, "scaled:center")
col_stddevs_train <- attr(train_data, "scaled:scale")
test_data <- scale(test_data, center = m.train, scale = s.train)
#---------------------------------#
# B.1: (Deep) Covariance Learning
#---------------------------------#
model_NE_Deep_Cov<-keras_model_sequential()
# Define bulk of the network
model_NE_Deep_Cov %>% layer_dense(units=Height,activation = "relu",input_shape = (X_vect %>% ncol()))
for(i in 1:Depth){
model_NE_Deep_Cov %>% layer_dense(units=Height,activation = "relu",input_shape = d_intrinsic)%>%
layer_dropout(rate = dropout.rate)
}
# Readout Layer (ffNN)
model_NE_Deep_Cov %>% layer_dense(units=d_intrinsic)
# Compile (ffNN)
model_NE_Deep_Cov %>% keras::compile(loss="mse",
optimizer="adam",
metrics="mse")
## Report Model (Summary)
model_NE_Deep_Cov %>% summary()
# Compute Batch Size
batch.size<-max(1,(round(min(1,abs(Batch.size.percent))*length(train_data),digits = 0)))
# Fit ffNN
fittedmodel_NE_Deep_Cov<- model_NE_Deep_Cov %>%
keras::fit(train_data,
trainingtarget,
epochs=epochs,
batch_size=batch.size,
callbacks = list(# Overfitting Reduction
callback_reduce_lr_on_plateau(monitor = "loss", factor = 0.1)
))
## Predictions ffNN
y.predict<-model_NE_Deep_Cov %>% predict(train_data)
y.predict_test<-model_NE_Deep_Cov %>% predict(test_data)
# END TIMER A
Time.END.Cov.Learning = Sys.time()
# Error Analysis
#----------------#
# LOG-EUC Errors (Vectorized Errors)
LogEUC_var_err<-c(var(as.numeric(y.predict-trainingtarget)),var(as.numeric(y.predict_test-test_target)))
LogEUC_RMAE<-c(mean(abs(y.predict-trainingtarget))/mean(abs(trainingtarget)),mean(abs(y.predict_test-test_target))/mean(abs(test_target)))
# Frobenius Errors (Matricial Errors)
Frob_MSE<-rep(0,2)
for(i.error in 1:(y.predict %>% nrow())){# BEGIN EVALUATION
# Frobenius Errors (Matrix MSE): BEGIN
#-------------------------------------------------------------------------------#
# Evaluate Frobenius Norm on Training Set
ith.predicted.matrix<-readout.map(y.predict[i.error,],rand.ref.mat)
ith.target.matrix<-readout.map(trainingtarget[i.error,],rand.ref.mat)
Frob_MSE[1]<-Frob_MSE[1]+norm(ith.predicted.matrix-ith.target.matrix,"f")
# Evaluate Frobenius Norm on Test Set
ith.predicted.matrix<-readout.map(y.predict_test[i.error,],rand.ref.mat)
ith.target.matrix<-readout.map(test_target[i.error,],rand.ref.mat)
Frob_MSE[2]<-Frob_MSE[2]+norm(ith.predicted.matrix-ith.target.matrix,"f")
# Update User on Status of Error Evaluation
print(i.error/(y.predict %>% nrow()))
# Frobenius Errors (Matrix MSE): END
#-------------------------------------------------------------------------------#
} #END LOOP
# Average Accross all Observed Matrix (MSEs)
Frob_MSE<-Frob_MSE/(y.predict %>% nrow())
#---------------------#
# END ERROR EVALUATION
### Report Findings
Reports_Cov_Prediction<-rbind(LogEUC_var_err,LogEUC_RMAE,Frob_MSE); colnames(Reports_Cov_Prediction)<-c("Training","Test")
Reports_Cov_Prediction
#---------------------------------#
# B.2: (Deep) Alpha Learning
#---------------------------------#
Time.Start.Returns.Learning = Sys.time()
# Use data from same input space but notw output space is R^d
# Preprocess Training/Testing Targets (1 time-step ahead Movements)
#-#
# Initialize Price Changes (Training Set)
active_index_train<-(1+Window.size):(Window.size+N) # Start one day previous to evaluate first price movement
coin_exchange_movements_rates_train<-diff(prices_TS[active_index_train,])
# Initialize Price Changes (Tes ting Set)
active_index_test<-(1+N+Window.size):(Window.size+2*N) # Start one day previous to evaluate first price movement
coin_exchange_movements_rates_test<-diff(prices_TS[active_index_test,])
# Data Segmentation
trainingtarget_NE_Movements<-coin_exchange_movements_rates_train
test_target_NE_Movements<-coin_exchange_movements_rates_test
# Initialize Network
#-----------------------#
model_NE_Movements<-keras_model_sequential()
# Define bulk of the network
model_NE_Movements %>% layer_dense(units=Height,activation = "relu",input_shape = (X_vect %>% ncol()))
for(i in 1:Depth){
model_NE_Movements %>% layer_dense(units=Height,activation = "relu",input_shape = d_intrinsic)%>%
layer_dropout(rate = dropout.rate)
}
# Readout Layer (ffNN)
model_NE_Movements %>% layer_dense(units=d)
# Compile (ffNN)
model_NE_Movements %>% keras::compile(loss="mse",
optimizer="adam",
metrics="mse")
## Report Model (Summary)
model_NE_Movements %>% summary()
# Compute Batch Size
batch.size<-max(1,(round(min(1,abs(Batch.size.percent))*length(train_data),digits = 0)))
# Fit ffNN
fittedmodel_NE_Movements<- model_NE_Movements %>%
keras::fit(train_data,
trainingtarget_NE_Movements,
epochs=epochs,
batch_size=batch.size,
callbacks = list(# Overfitting Reduction
callback_reduce_lr_on_plateau(monitor = "loss", factor = 0.1)
))
## Predictions ffNN
y.predict_NE_Movements<-model_NE_Movements %>% predict(train_data)
y.predict_NE_Movements_test<-model_NE_Movements %>% predict(test_data)
# END TIMER A
Time.END.Returns.Learning = Sys.time()
#-------------------------------------------------------------------------------#
# C) Generate Efficient Portfolios
#-------------------------------------------------------------------------------#
# Initialize Reports
Financial_Reports_NE_Efficient<-matrix(NA,nrow=(sharpe_ratio_grid %>% length()),ncol=3)
colnames(Financial_Reports_NE_Efficient)<-c("Trading Gains","Portfolio Variance","Sharpe Ratio"); rownames(Financial_Reports_NE_Efficient)<-round(sharpe_ratio_grid,1)
Financial_Reports_NE_Efficient_test<-Financial_Reports_NE_Efficient
for(j.Sharpe_param in 1:(Financial_Reports_NE_Efficient %>% nrow())){# LOOP OVER ALL SHARPE_RATIO PARAMETERS
# Initialize Current Sharpe Ration Meta-Parameter
Sharpe_loop = sharpe_ratio_grid[j.Sharpe_param]
# Initialized MV Portfolios
Efficient_NE_Portfolio_Predictions = matrix(NA, nrow = (y.predict %>% nrow()), ncol=d)
Efficient_NE_Portfolio_Predictions_test = matrix(NA, nrow = (y.predict_test %>% nrow()), ncol=d)
# Generate Minimum Variance Portfolio with NE-Predicited Cov Matrices (Training)
# (Training)
for(i.error in 1:(y.predict %>% nrow())){# BEGIN EVALUATION
# Generate Predicted Matrix (Training Set)
ith.predicted.matrix<-readout.map(y.predict[i.error,],rand.ref.mat)
# Write Predicted Market Returns (alpha)
alpha.movement.ith<-y.predict_NE_Movements[i.error,]
# Build Minimum-Variance Portfolio
Efficient_NE_Portfolio_Predictions[i.error,]<-Efficient(ith.predicted.matrix,alpha.movement.ith,Sharpe_loop)
#-------------------------------------------------------------------------------#
# Update User on These Status of Compation
print(i.error/(y.predict %>% nrow()))
} #END LOOP
#(Test)
# Generate Minimum Variance Portfolio with NE-Predicited Cov Matrices (Test)
for(i.error in 1:(y.predict_test %>% nrow())){# BEGIN EVALUATION
# Generate Predicted Matrix (Training Set)
ith.predicted.matrix<-readout.map(y.predict_test[i.error,],rand.ref.mat)
# Write Predicted Market Returns (alpha)
alpha.movement.ith<-y.predict_NE_Movements_test[i.error,]
# Build Minimum-Variance Portfolio
Efficient_NE_Portfolio_Predictions_test[i.error,]<-Efficient(ith.predicted.matrix,alpha.movement.ith,Sharpe_loop)
#-------------------------------------------------------------------------------#
# Update User on These Status of Compation
print(1 + (i.error/(y.predict %>% nrow()))) # The +1 is to distinguish between the training and test set computations (from the outside)
#-------------------------------------------------------------------------------#
}
# Evaluate & Record Gains from Trading
Financial_Reports_NE_Efficient[j.Sharpe_param,1]<-sum(colSums(Efficient_NE_Portfolio_Predictions*coin_exchange_movements_rates_train))
Financial_Reports_NE_Efficient_test[j.Sharpe_param,1]<-sum(colSums(Efficient_NE_Portfolio_Predictions_test*coin_exchange_movements_rates_test))
# Evaluate Portfolio Variance
Financial_Reports_NE_Efficient[j.Sharpe_param,2]<-mean(sapply(as.data.frame(Efficient_NE_Portfolio_Predictions*coin_exchange_movements_rates_train),var))
Financial_Reports_NE_Efficient_test[j.Sharpe_param,2]<-mean(sapply(as.data.frame(Efficient_NE_Portfolio_Predictions_test*coin_exchange_movements_rates_test),var))
# Sharpe Ratio
Financial_Reports_NE_Efficient[j.Sharpe_param,3]<-Financial_Reports_NE_Efficient[j.Sharpe_param,2]/Financial_Reports_NE_Efficient[j.Sharpe_param,1]
Financial_Reports_NE_Efficient_test[j.Sharpe_param,3]<-Financial_Reports_NE_Efficient_test[j.Sharpe_param,2]/Financial_Reports_NE_Efficient_test[j.Sharpe_param,1]
}# END:LOOP OVER ALL SHARPE_RATIO PARAMETERS
# Determine Optimal (Largest) Sharpe-Ratio (In Training Set <- Probably can be further improved by CV!): i.e.: Min Variance and Max Returns!
opt.Sharpe<-which.max(Financial_Reports_NE_Efficient[,3])
# Define Optimal NE-Efficient Portfolio
Financial_Reports_NE_Efficient_opt = cbind(Financial_Reports_NE_Efficient[opt.Sharpe,],Financial_Reports_NE_Efficient_test[opt.Sharpe,])
### # Benchmark 1: Naive Markowitz
###---------------------------------#
source("Markowitz.R")
### # Benchmark 2: Deep Hedging Approaches
###----------------------------------------#
source("Deep_Hedging.R")
### AGREGATED REPORTS
#====================#
Financial_Reports_Train<-rbind(Financial_Reports_NE_Efficient_opt[,1],
Financial_Reports_DH[,1],
Financial_Reports_Mark[,1]); rownames(Financial_Reports_Train)<-c("NE_ffNN","DH","Mark")
Financial_Reports_Test<-rbind(Financial_Reports_NE_Efficient_opt[,2],
Financial_Reports_DH[,2],
Financial_Reports_Mark[,2]); rownames(Financial_Reports_Test)<-c("NE_ffNN","DH","Mark")
colnames(Financial_Reports_Test)<-colnames(Financial_Reports_Train)<-c("Trading Gains","Portfolio Variance","Sharpe Ratio")
# Reports Finding(s)
Financial_Reports_Train
Financial_Reports_Test
Financial_Reports_NE_Efficient
Financial_Reports_NE_Efficient_test
|
best <- function(state, outcomeName){
# Read the outcome data
outcome <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
states <- outcome$State
diseases <- c("heart attack","heart failure","pneumonia")
# Check the validity of state and outcome
if (! state %in% states ){
stop("invalid state")
}
if (! outcomeName %in% diseases){
stop("invalid outcome")
}
# Return the hospital name in one particular state with the lowest 30-day death rate
col_id <- 0
if (outcomeName == diseases[1]){
col_id <- 11
} else if (outcomeName == diseases[2]) {
col_id <- 17
} else if (outcomeName == diseases[3]) {
col_id <- 23
}
outcome.sub <- outcome[outcome[,7] == state, ]
outcome.sub2 <- outcome.sub[,c(2,7,col_id)]
outcome.sub3 <- outcome.sub2[outcome.sub2[,3]!="Not Available",]
outcome.sub3[,3] <- as.numeric(outcome.sub3[,3])
outcome2 <- outcome.sub3
bestrow <- outcome2[outcome2[,3] == min(outcome2[,3]),]
as.character(bestrow[1])
}
|
/best.R
|
no_license
|
marcelo-tibau/datasciencecoursera
|
R
| false | false | 1,036 |
r
|
best <- function(state, outcomeName){
# Read the outcome data
outcome <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
states <- outcome$State
diseases <- c("heart attack","heart failure","pneumonia")
# Check the validity of state and outcome
if (! state %in% states ){
stop("invalid state")
}
if (! outcomeName %in% diseases){
stop("invalid outcome")
}
# Return the hospital name in one particular state with the lowest 30-day death rate
col_id <- 0
if (outcomeName == diseases[1]){
col_id <- 11
} else if (outcomeName == diseases[2]) {
col_id <- 17
} else if (outcomeName == diseases[3]) {
col_id <- 23
}
outcome.sub <- outcome[outcome[,7] == state, ]
outcome.sub2 <- outcome.sub[,c(2,7,col_id)]
outcome.sub3 <- outcome.sub2[outcome.sub2[,3]!="Not Available",]
outcome.sub3[,3] <- as.numeric(outcome.sub3[,3])
outcome2 <- outcome.sub3
bestrow <- outcome2[outcome2[,3] == min(outcome2[,3]),]
as.character(bestrow[1])
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/construct_king_kongfig.R
\name{set_king_config}
\alias{set_king_config}
\title{King config assertion}
\usage{
set_king_config(args)
}
\arguments{
\item{args}{character vector to assert.}
}
\description{
King config assertion
}
|
/king/KingTest/man/set_king_config.Rd
|
no_license
|
permarcus/king_test
|
R
| false | true | 305 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/construct_king_kongfig.R
\name{set_king_config}
\alias{set_king_config}
\title{King config assertion}
\usage{
set_king_config(args)
}
\arguments{
\item{args}{character vector to assert.}
}
\description{
King config assertion
}
|
#' RM2C2: Scoring, Summarizing
#' @name score_color_shapes
#' @export
score_color_shapes <- function(df) {
PACKAGE.VERSION <- packageVersion("RM2C2")
scored <- df %>%
mutate(HIT = ifelse(trial_type == 1 & button_pressed == 1, 1, 0),
FA = ifelse(trial_type == 0 & button_pressed == 1, 1, 0),
MISS = ifelse(trial_type == 1 & button_pressed == 0, 1, 0),
CR = ifelse(trial_type == 0 & button_pressed == 0, 1, 0)) %>%
mutate(PACKAGE.VERSION = PACKAGE.VERSION)
return(scored)
}
|
/R/score_color_shapes.R
|
permissive
|
nelsonroque/surveydolphinr
|
R
| false | false | 519 |
r
|
#' RM2C2: Scoring, Summarizing
#' @name score_color_shapes
#' @export
score_color_shapes <- function(df) {
PACKAGE.VERSION <- packageVersion("RM2C2")
scored <- df %>%
mutate(HIT = ifelse(trial_type == 1 & button_pressed == 1, 1, 0),
FA = ifelse(trial_type == 0 & button_pressed == 1, 1, 0),
MISS = ifelse(trial_type == 1 & button_pressed == 0, 1, 0),
CR = ifelse(trial_type == 0 & button_pressed == 0, 1, 0)) %>%
mutate(PACKAGE.VERSION = PACKAGE.VERSION)
return(scored)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/editTree.R
\name{get_root}
\alias{get_root}
\title{Get root of a tree}
\usage{
get_root(icceTree)
}
\arguments{
\item{icceTree}{icceTree data structure}
}
\value{
node corresponding the the root of the tree
}
\description{
Returns the root of a given tree structure.
}
\examples{
root <- get_root(icceTree)
}
|
/man/get_root.Rd
|
permissive
|
ethanmoyer/ICCE
|
R
| false | true | 387 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/editTree.R
\name{get_root}
\alias{get_root}
\title{Get root of a tree}
\usage{
get_root(icceTree)
}
\arguments{
\item{icceTree}{icceTree data structure}
}
\value{
node corresponding the the root of the tree
}
\description{
Returns the root of a given tree structure.
}
\examples{
root <- get_root(icceTree)
}
|
/src/others.R
|
no_license
|
upura/predictor.for.sumo.match
|
R
| false | false | 3,404 |
r
| ||
library(RCurl)
library(readxl)
library(dplyr)
library(stringr)
# file <- getURLContent("http://nlihc.org/sites/default/files/oor/files/reports/state/2016-OOR-CT.xls")
#
# download.file("http://nlihc.org/sites/default/files/oor/files/reports/state/2016-OOR-CT.xls", "data/2016-OOR-CT.xls", mode="wb")
# ct <- read_excel("2016-OOR-CT.xls", sheet=1)
#
# ct <- read_excel(file, sheet=1)
for (i in 1:length(state.abb)) {
file_name <- paste0("2016-OOR-", state.abb[i], ".xls")
file_url <- paste0("http://nlihc.org/sites/default/files/oor/files/reports/state/", file_name)
download.file(file_url, paste0("data/", file_name), mode="wb")
state <- read_excel(paste0("data/", file_name), sheet=1)
state2 <- read_excel(paste0("data/", file_name), sheet=5)
state3 <- read_excel(paste0("data/", file_name), sheet=4)
state_only <- subset(state, TYPE=="STATE")
state_only2 <- subset(state2, TYPE=="STATE")
state_only3 <- subset(state3, TYPE=="STATE")
if (i==1) {
states_all <- state_only
states_all2 <- state_only2
states_all3 <- state_only3
} else {
states_all <- rbind(states_all, state_only)
states_all2 <- rbind(states_all2, state_only2)
states_all3 <- rbind(states_all3, state_only3)
}
}
colnames(states_all) <- make.names(colnames(states_all))
states_all <- states_all[c("STNAME", "Housing.Wage.for.2.bdrm.FMR")]
colnames(states_all) <- c("State", "Housing Wage")
write.csv(states_all, "data/housing.csv")
colnames(states_all2) <- make.names(colnames(states_all2))
states_all2 <- states_all2[c("STNAME", "Estimated.mean.renter.wage")]
colnames(states_all2) <- c("State", "Renter Wage")
write.csv(states_all2, "data/wage.csv")
colnames(states_all3) <- make.names(colnames(states_all3))
states_all3 <- states_all3[c("STNAME", "Work.hours.per.week.at.min..wage.needed.to.afford.1.bdrm.FMR")]
colnames(states_all3) <- c("State", "Hours")
write.csv(states_all3, "data/hours.csv")
states_all4 <- left_join(states_all, states_all2)
states_all4$gap <- states_all4$`Housing Wage` - states_all4$`Renter Wage`
states_all4 <- arrange(states_all4, gap)
states_all4$State <- factor(states_all4$State, levels=unique(states_all4$State))
library(ggalt)
library(ggplot2)
gg <- ggplot()
# doing this vs y axis major grid line
gg <- gg + geom_segment(data=states_all4, aes(y=State, yend=State, x=9, xend=35.175), color="#b2b2b2", size=0.15)
# dum…dum…dum!bell
gg <- gg + geom_dumbbell(data=states_all4, aes(y=State, x=`Renter Wage`, xend=`Housing Wage`),
size=1.5, color="#b2b2b2", point.size.l=3, point.size.r=3,
point.colour.l="#476b6b", point.colour.r="#cc0052")
# text below points
gg <- gg + geom_text(data=filter(states_all4, State=="Hawaii"),
aes(x=`Renter Wage`, y=State, label="Renter wage"),
color="#476b6b", size=3, vjust=-2, fontface="bold", family="Calibri")
gg <- gg + geom_text(data=filter(states_all4, State=="Hawaii"),
aes(x=`Housing Wage`-2, y=State, label="Two-Bedroom housing wage"),
color="#cc0052", size=3, vjust=-2, fontface="bold", family="Calibri")
# text above points
gg <- gg + geom_text(data=states_all4, aes(x=`Renter Wage`, y=State, label=paste("$",round(`Renter Wage`,2))),
color="#476b6b", size=2.75, vjust=2.5, family="Calibri")
gg <- gg + geom_text(data=states_all4, color="#cc0052", size=2.75, vjust=2.5, family="Calibri",
aes(x=`Housing Wage`, y=State, label=paste("$",round(`Housing Wage`, 2))))
# difference column
gg <- gg + geom_rect(data=states_all4, aes(xmin=36.05, xmax=38.175, ymin=-Inf, ymax=Inf), fill="#efefe3")
gg <- gg + geom_text(data=states_all4, aes(label=round(gap,2), y=State, x=37.1125), fontface="bold", size=3, family="Calibri")
gg <- gg + geom_text(data=filter(states_all4, State=="Hawaii"), aes(x=41.1125, y=State, label="Gap"),
color="#7a7d7e", size=3.1, vjust=-2, fontface="bold", family="Calibri")
gg <- gg + scale_x_continuous(expand=c(0,0), limits=c(9, 38.175))
gg <- gg + scale_y_discrete(expand=c(0.075,0))
gg <- gg + labs(x=NULL, y=NULL, title="Gaps between hourly wages and housing wages",
caption="Source: National Low Income Housing Coalition \nAndrew Ba Tran/TrendCT.org")
gg <- gg + theme_bw(base_family="Calibri")
gg <- gg + theme(panel.grid.major=element_blank())
gg <- gg + theme(panel.grid.minor=element_blank())
gg <- gg + theme(panel.border=element_blank())
gg <- gg + theme(axis.ticks=element_blank())
gg <- gg + theme(axis.text.x=element_blank())
gg <- gg + theme(plot.title=element_text(face="bold"))
gg <- gg + theme(plot.subtitle=element_text(face="italic", size=9, margin=margin(b=12)))
gg <- gg + theme(plot.caption=element_text(size=7, margin=margin(t=12), color="#7a7d7e"))
gg
library(choroplethr)
colnames(states_all3) <- c("region", "value")
states_all3$region <- str_to_lower(states_all3$region)
state_choropleth(states_all3, title = "Work hours per week at minimum wage needed to afford 1 bedroom")
|
/2016/05/housing-income-oor/analysis.R
|
permissive
|
steve-kasica/data-1
|
R
| false | false | 5,061 |
r
|
library(RCurl)
library(readxl)
library(dplyr)
library(stringr)
# file <- getURLContent("http://nlihc.org/sites/default/files/oor/files/reports/state/2016-OOR-CT.xls")
#
# download.file("http://nlihc.org/sites/default/files/oor/files/reports/state/2016-OOR-CT.xls", "data/2016-OOR-CT.xls", mode="wb")
# ct <- read_excel("2016-OOR-CT.xls", sheet=1)
#
# ct <- read_excel(file, sheet=1)
for (i in 1:length(state.abb)) {
file_name <- paste0("2016-OOR-", state.abb[i], ".xls")
file_url <- paste0("http://nlihc.org/sites/default/files/oor/files/reports/state/", file_name)
download.file(file_url, paste0("data/", file_name), mode="wb")
state <- read_excel(paste0("data/", file_name), sheet=1)
state2 <- read_excel(paste0("data/", file_name), sheet=5)
state3 <- read_excel(paste0("data/", file_name), sheet=4)
state_only <- subset(state, TYPE=="STATE")
state_only2 <- subset(state2, TYPE=="STATE")
state_only3 <- subset(state3, TYPE=="STATE")
if (i==1) {
states_all <- state_only
states_all2 <- state_only2
states_all3 <- state_only3
} else {
states_all <- rbind(states_all, state_only)
states_all2 <- rbind(states_all2, state_only2)
states_all3 <- rbind(states_all3, state_only3)
}
}
colnames(states_all) <- make.names(colnames(states_all))
states_all <- states_all[c("STNAME", "Housing.Wage.for.2.bdrm.FMR")]
colnames(states_all) <- c("State", "Housing Wage")
write.csv(states_all, "data/housing.csv")
colnames(states_all2) <- make.names(colnames(states_all2))
states_all2 <- states_all2[c("STNAME", "Estimated.mean.renter.wage")]
colnames(states_all2) <- c("State", "Renter Wage")
write.csv(states_all2, "data/wage.csv")
colnames(states_all3) <- make.names(colnames(states_all3))
states_all3 <- states_all3[c("STNAME", "Work.hours.per.week.at.min..wage.needed.to.afford.1.bdrm.FMR")]
colnames(states_all3) <- c("State", "Hours")
write.csv(states_all3, "data/hours.csv")
states_all4 <- left_join(states_all, states_all2)
states_all4$gap <- states_all4$`Housing Wage` - states_all4$`Renter Wage`
states_all4 <- arrange(states_all4, gap)
states_all4$State <- factor(states_all4$State, levels=unique(states_all4$State))
library(ggalt)
library(ggplot2)
gg <- ggplot()
# doing this vs y axis major grid line
gg <- gg + geom_segment(data=states_all4, aes(y=State, yend=State, x=9, xend=35.175), color="#b2b2b2", size=0.15)
# dum…dum…dum!bell
gg <- gg + geom_dumbbell(data=states_all4, aes(y=State, x=`Renter Wage`, xend=`Housing Wage`),
size=1.5, color="#b2b2b2", point.size.l=3, point.size.r=3,
point.colour.l="#476b6b", point.colour.r="#cc0052")
# text below points
gg <- gg + geom_text(data=filter(states_all4, State=="Hawaii"),
aes(x=`Renter Wage`, y=State, label="Renter wage"),
color="#476b6b", size=3, vjust=-2, fontface="bold", family="Calibri")
gg <- gg + geom_text(data=filter(states_all4, State=="Hawaii"),
aes(x=`Housing Wage`-2, y=State, label="Two-Bedroom housing wage"),
color="#cc0052", size=3, vjust=-2, fontface="bold", family="Calibri")
# text above points
gg <- gg + geom_text(data=states_all4, aes(x=`Renter Wage`, y=State, label=paste("$",round(`Renter Wage`,2))),
color="#476b6b", size=2.75, vjust=2.5, family="Calibri")
gg <- gg + geom_text(data=states_all4, color="#cc0052", size=2.75, vjust=2.5, family="Calibri",
aes(x=`Housing Wage`, y=State, label=paste("$",round(`Housing Wage`, 2))))
# difference column
gg <- gg + geom_rect(data=states_all4, aes(xmin=36.05, xmax=38.175, ymin=-Inf, ymax=Inf), fill="#efefe3")
gg <- gg + geom_text(data=states_all4, aes(label=round(gap,2), y=State, x=37.1125), fontface="bold", size=3, family="Calibri")
gg <- gg + geom_text(data=filter(states_all4, State=="Hawaii"), aes(x=41.1125, y=State, label="Gap"),
color="#7a7d7e", size=3.1, vjust=-2, fontface="bold", family="Calibri")
gg <- gg + scale_x_continuous(expand=c(0,0), limits=c(9, 38.175))
gg <- gg + scale_y_discrete(expand=c(0.075,0))
gg <- gg + labs(x=NULL, y=NULL, title="Gaps between hourly wages and housing wages",
caption="Source: National Low Income Housing Coalition \nAndrew Ba Tran/TrendCT.org")
gg <- gg + theme_bw(base_family="Calibri")
gg <- gg + theme(panel.grid.major=element_blank())
gg <- gg + theme(panel.grid.minor=element_blank())
gg <- gg + theme(panel.border=element_blank())
gg <- gg + theme(axis.ticks=element_blank())
gg <- gg + theme(axis.text.x=element_blank())
gg <- gg + theme(plot.title=element_text(face="bold"))
gg <- gg + theme(plot.subtitle=element_text(face="italic", size=9, margin=margin(b=12)))
gg <- gg + theme(plot.caption=element_text(size=7, margin=margin(t=12), color="#7a7d7e"))
gg
library(choroplethr)
colnames(states_all3) <- c("region", "value")
states_all3$region <- str_to_lower(states_all3$region)
state_choropleth(states_all3, title = "Work hours per week at minimum wage needed to afford 1 bedroom")
|
library(lubridate)
power_consumption <- read.table('household_power_consumption.txt', sep=';', stringsAsFactors=FALSE, header=TRUE)
date_range <- as.Date(c('2007-02-01', '2007-02-02'))
power_consumption <- power_consumption[as.Date(dmy(power_consumption$Date)) %in% date_range,]
png('plot1.png')
global_active_power = as.numeric(power_consumption$Global_active_power)
hist(global_active_power,
col='red',
xlab='Global Active Power (kilowatts)',
main='Global Active Power')
dev.off()
|
/plot1.R
|
no_license
|
dmaust/ExData_Plotting1
|
R
| false | false | 503 |
r
|
library(lubridate)
power_consumption <- read.table('household_power_consumption.txt', sep=';', stringsAsFactors=FALSE, header=TRUE)
date_range <- as.Date(c('2007-02-01', '2007-02-02'))
power_consumption <- power_consumption[as.Date(dmy(power_consumption$Date)) %in% date_range,]
png('plot1.png')
global_active_power = as.numeric(power_consumption$Global_active_power)
hist(global_active_power,
col='red',
xlab='Global Active Power (kilowatts)',
main='Global Active Power')
dev.off()
|
setwd("data-raw")
# a) Calcular Velocidad
# b) buffer 10m en puntos
# c) intersectar buffer lineas
# NO d) Calcular LKM a red
# NO e) Calcular diferencia de tiempo por grupal ID
# f) agregar flujo length id
# g) agregar velocidad mean
library(sf)
library(data.table)
net <- st_transform(st_read("shapefiles/roads.gpkg"), crs = 31983) # open street map
# podria calcular aqui esto pero puede ser tambien en el siguiente script
# net <- net[net$highway != "road" & net$highway != "residential", ]
# regiones
# regiones <- st_read("/home/sergio/INVENTARIOS/PHD/shapefiles/regiones.shp")
# regiones <- st_transform(regiones, 31983)
# net <- st_intersection(net, regiones)
# net$lengthm <- st_length(
# st_cast(
# net[st_dimension(net) == 1,]
# )
# )
# net$LKM <- set_units(net$lengthm, km)
lista <- as.list(list.files(path = "dados/speed", pattern = ".csv",
full.names = T))
for (i in 1:length(lista) ) {
#for (i in 1 ) {
ve1 <- fread(lista[[i]], h = T)
ve1 <- as.data.frame(ve1[!is.na(ve1$lon) & ve1$delta_time < 60*30, ])
ve1 <- st_as_sf(ve1, coords = c("lon","lat"))
st_crs(ve1) <- 4326
ve1 <- st_transform(ve1, st_crs(31983))
ve1$date_time_utm <- as.POSIXct(as.character(ve1$date_time_utm),
format = "%Y-%m-%d %H:%M:%S",
tz = "America/Sao_Paulo" )
ve1$hora <- strftime(ve1$date_time_utm,
format = "%Y-%m-%d_%H", tz = "UTC")
for (j in 1:length(unique(ve1$hora)) ) {
vej <- ve1[ ve1$hora == unique(ve1$hora)[j], ]
#rm(ve1)
vej_b10m <- st_buffer(x = vej, dist = 10 )
vej <- st_intersection(x = net, y = vej_b10m)
vej <- data.table(vej)
fluxo <- vej[ , .(length(veiculo),
mean(speed, na.rm = T),
median(speed, na.rm = T),
quantile(speed, .75, na.rm = T),
quantile(speed, .85, na.rm = T),
quantile(speed, .95, na.rm = T),
max(speed, na.rm = T)),
by = .(id, tipo, hora)]
names(fluxo) <- c("id", "tipo", "hora", "vei", "VelTipoMean", "VelTipoMedian",
"VTq75", "VTq85", "VTq95", "VMax" )
saveRDS(fluxo, paste0("dados/flow/fluxo_",i,"_",
unique(ve1$hora)[j],".rds"))
# AQUI
speed <- vej[ , .(mean(speed, na.rm = T),
median(speed, na.rm = T),
quantile(speed, .75, na.rm = T),
quantile(speed, .85, na.rm = T),
quantile(speed, .95, na.rm = T),
max(speed, na.rm = T)),
by = .(id, hora)]
names(speed) <- c("id", "hora","VelAllMean", "VelAllMedian",
"VTAllq75", "VTAllq85", "VTAllq95", "VAllMax" )
saveRDS(speed, paste0("dados/flow/speed_",i,"_",
unique(ve1$hora)[j],".rds"))
}
}
|
/data-raw/2_inter_allv2.R
|
no_license
|
ibarraespinosa/trapos
|
R
| false | false | 2,787 |
r
|
setwd("data-raw")
# a) Calcular Velocidad
# b) buffer 10m en puntos
# c) intersectar buffer lineas
# NO d) Calcular LKM a red
# NO e) Calcular diferencia de tiempo por grupal ID
# f) agregar flujo length id
# g) agregar velocidad mean
library(sf)
library(data.table)
net <- st_transform(st_read("shapefiles/roads.gpkg"), crs = 31983) # open street map
# podria calcular aqui esto pero puede ser tambien en el siguiente script
# net <- net[net$highway != "road" & net$highway != "residential", ]
# regiones
# regiones <- st_read("/home/sergio/INVENTARIOS/PHD/shapefiles/regiones.shp")
# regiones <- st_transform(regiones, 31983)
# net <- st_intersection(net, regiones)
# net$lengthm <- st_length(
# st_cast(
# net[st_dimension(net) == 1,]
# )
# )
# net$LKM <- set_units(net$lengthm, km)
lista <- as.list(list.files(path = "dados/speed", pattern = ".csv",
full.names = T))
for (i in 1:length(lista) ) {
#for (i in 1 ) {
ve1 <- fread(lista[[i]], h = T)
ve1 <- as.data.frame(ve1[!is.na(ve1$lon) & ve1$delta_time < 60*30, ])
ve1 <- st_as_sf(ve1, coords = c("lon","lat"))
st_crs(ve1) <- 4326
ve1 <- st_transform(ve1, st_crs(31983))
ve1$date_time_utm <- as.POSIXct(as.character(ve1$date_time_utm),
format = "%Y-%m-%d %H:%M:%S",
tz = "America/Sao_Paulo" )
ve1$hora <- strftime(ve1$date_time_utm,
format = "%Y-%m-%d_%H", tz = "UTC")
for (j in 1:length(unique(ve1$hora)) ) {
vej <- ve1[ ve1$hora == unique(ve1$hora)[j], ]
#rm(ve1)
vej_b10m <- st_buffer(x = vej, dist = 10 )
vej <- st_intersection(x = net, y = vej_b10m)
vej <- data.table(vej)
fluxo <- vej[ , .(length(veiculo),
mean(speed, na.rm = T),
median(speed, na.rm = T),
quantile(speed, .75, na.rm = T),
quantile(speed, .85, na.rm = T),
quantile(speed, .95, na.rm = T),
max(speed, na.rm = T)),
by = .(id, tipo, hora)]
names(fluxo) <- c("id", "tipo", "hora", "vei", "VelTipoMean", "VelTipoMedian",
"VTq75", "VTq85", "VTq95", "VMax" )
saveRDS(fluxo, paste0("dados/flow/fluxo_",i,"_",
unique(ve1$hora)[j],".rds"))
# AQUI
speed <- vej[ , .(mean(speed, na.rm = T),
median(speed, na.rm = T),
quantile(speed, .75, na.rm = T),
quantile(speed, .85, na.rm = T),
quantile(speed, .95, na.rm = T),
max(speed, na.rm = T)),
by = .(id, hora)]
names(speed) <- c("id", "hora","VelAllMean", "VelAllMedian",
"VTAllq75", "VTAllq85", "VTAllq95", "VAllMax" )
saveRDS(speed, paste0("dados/flow/speed_",i,"_",
unique(ve1$hora)[j],".rds"))
}
}
|
#' Fit multilevel models in a data stream II
#'
#' @description Fit multilevel models in a data stream, without managing units'
#' objects lists,
#'
#' @details This function fits the multilevel models in a data stream, similar
#' to \code{\link{sema_fit_one}}. However, while \code{\link{sema_fit_one}}
#' does not manage the storage and retrieval of units' objects, this
#' function does, which makes this function more user-friendly.
#' The function requires an id label, to retrieve the corresponding unit's
#' parameters, a vector with the data of the fixed effects covariates, a
#' vector with the data of the random effects covariates, the response or
#' dependent variable and the current state of the model parameters,
#' including the lists with the units' objects. Currently the algorithm
#' fits models including fixed effects at level 1 and 2 and random intercepts
#' and slopes for continuous outcomes.
#' @seealso \code{\link{sema_fit_one}}, \code{\link{sema_fit_df}},
#' \code{\link{summary_sema}}, \code{\link{ranef}},
#' \code{\link{store_resid_var}}, \code{\link{store_random_var}},
#' \code{\link{store_fixed_coef}}
#'
#' @param data_fixed A vector with the data of the fixed effects covariates.
#' @param data_random A vector with the data of the random effects covariates.
#' @param data_y A scalar with the response of this unit.
#' @param id A scalar which identifies the unit of this data point.
#' @param theta_list Alist consisting of 'theta', which is a list with all model
#' parameters and global sufficient statistics; 'id_vector' which is a vector
#' containig all id labels; and 'theta_j' which is a list of lists containing
#' all units parameters and contributions to the sufficient statistics. This
#' list is automatically generated.
#' @param print The default is FALSE, if TRUE the function
#' prints a summary of the model.
#' @param start_resid_var A scalar, optional if the user wants to provide a
#' start value of the residual variance, default start value is 1.
#' @param start_random_var A vector, optional if the user wants to provide a
#' start values of the variance of the random effects covariates, default
#' start value is 1. NOTE, if start values are provided make sure that the
#' length of the vector of start values matches the number of random effects.
#' @param start_fixed_coef A vector, optional if the user wants to provide
#' start values of the fixed effects, default is set to NULL such that
#' \code{sema_fit_one} creates the vector of start values matching the number
#' of fixed effects. NOTE, if start values are provided make sure that the
#' length of the vector of start values matches the number of fixed effects.
#' @param update The default is NULL, when an integer is provided
#' \code{\link{sema_update}} is called to do a full update to recompute all
#' contributions to the complete data suffient statistics.
#' @param prior_n If starting values are provided, prior_n determines the weight
#' of the starting value of the residual variance, default is 0.
#' @param prior_j If starting values are provided, prior_j determines the weight
#' of the starting value of the variance of the random effects and the fixed
#' effects, default is 0.
#' @keywords online multilevel models method fitting stream
#' @export
#' @examples
#' ## First we create a dataset, consisting of 2500 observations from 20
#' ## units. The fixed effects have the coefficients 1, 2, 3, 4, and 5. The
#' ## variance of the random effects equals 1, 4, and 9. Lastly the
#' ## residual variance equals 4:
#' test_data <- build_dataset(n = 1500,
#' j = 200,
#' fixed_coef = 1:5,
#' random_coef_sd = 1:3,
#' resid_sd = 2)
#' ## to simplify the indexing, we generate 2 vectors, one that indicates which
#' ## columns are fixed effects variables and the other to indicate in which
#' ## columns the random effects variables are
#'
#' data_fixed_var <- c(3:7)
#' data_random_var <- c(3,5,6)
#'
#' ## an object where fit_sema output is stored in, this should be \code{NULL}
#' ## because that tells the fit_sema function to create model statistics lists
#'
#' m1 <- NULL
#'
#' ## looping though the dataset like this:
#' for(i in 1:nrow(test_data)){
#' m1 <- sema_fit_set(data_fixed = test_data[i, data_fixed_var],
#' data_random = test_data[i, data_random_var],
#' data_y = test_data$y[i],
#' id = test_data$id[i],
#' theta_list = m1,
#' print = FALSE,
#' update = NULL)
#' }
#' @return A list with updated global parameters, a vector with id labels
#' and a list with lists of all units parameters and contributions.
sema_fit_set <- function(data_fixed,
data_random,
data_y,
id,
theta_list =
list("theta" =
create_theta_main(n_fixed =
length(data_fixed),
n_random =
length(data_random)),
"id_vector" = c(),
"theta_j" = NULL),
print = FALSE,
update = NULL,
start_resid_var = 1,
start_random_var = 1,
start_fixed_coef = NULL,
prior_n = 0,
prior_j = 0){
if(is.null(theta_list)){
theta_list <- list()
class(theta_list) <- c("list", "sema")
if(is.null(start_fixed_coef)){
start_fixed_coef <- matrix(1, nrow = length(data_fixed))
}
if(is.null(theta_list$id_vector)){
theta_list$id_vector <- c()
theta_list$unit <- list()
}
}
if(is.element(id, theta_list$id_vector)){
temp_id <- which(id == theta_list$id_vector)
temp_theta_j <- theta_list$unit[[temp_id]]
}
if(!is.element(id, theta_list$id_vector)){
theta_list$id_vector <- c(theta_list$id_vector, id)
temp_id <- match(id, theta_list$id_vector)
temp_theta_j <- NULL
}
if(!is.null(update) & !is.null(theta_list$model$n)){
if(update %% theta_list$model$n == 0){
tempres <- sema_update(theta_jList = theta_list$unit,
theta = theta_list$model)
res$model <- tempres$model
theta_list$unit <- tempres$unit
}
}
res <- sema_fit_one(data_fixed = as.numeric(data_fixed),
data_random = as.numeric(data_random),
data_y = data_y,
id = id,
theta_j = temp_theta_j,
theta = theta_list$model,
print = print,
start_resid_var = start_resid_var,
start_random_var = start_random_var,
start_fixed_coef = start_fixed_coef,
prior_n = prior_n,
prior_j = prior_j)
theta_list$unit[[temp_id]] <- res$unit
theta_list$model <- res$model
return(theta_list)
}
|
/R/semaFitset2.R
|
no_license
|
L-Ippel/SEMA
|
R
| false | false | 7,586 |
r
|
#' Fit multilevel models in a data stream II
#'
#' @description Fit multilevel models in a data stream, without managing units'
#' objects lists,
#'
#' @details This function fits the multilevel models in a data stream, similar
#' to \code{\link{sema_fit_one}}. However, while \code{\link{sema_fit_one}}
#' does not manage the storage and retrieval of units' objects, this
#' function does, which makes this function more user-friendly.
#' The function requires an id label, to retrieve the corresponding unit's
#' parameters, a vector with the data of the fixed effects covariates, a
#' vector with the data of the random effects covariates, the response or
#' dependent variable and the current state of the model parameters,
#' including the lists with the units' objects. Currently the algorithm
#' fits models including fixed effects at level 1 and 2 and random intercepts
#' and slopes for continuous outcomes.
#' @seealso \code{\link{sema_fit_one}}, \code{\link{sema_fit_df}},
#' \code{\link{summary_sema}}, \code{\link{ranef}},
#' \code{\link{store_resid_var}}, \code{\link{store_random_var}},
#' \code{\link{store_fixed_coef}}
#'
#' @param data_fixed A vector with the data of the fixed effects covariates.
#' @param data_random A vector with the data of the random effects covariates.
#' @param data_y A scalar with the response of this unit.
#' @param id A scalar which identifies the unit of this data point.
#' @param theta_list Alist consisting of 'theta', which is a list with all model
#' parameters and global sufficient statistics; 'id_vector' which is a vector
#' containig all id labels; and 'theta_j' which is a list of lists containing
#' all units parameters and contributions to the sufficient statistics. This
#' list is automatically generated.
#' @param print The default is FALSE, if TRUE the function
#' prints a summary of the model.
#' @param start_resid_var A scalar, optional if the user wants to provide a
#' start value of the residual variance, default start value is 1.
#' @param start_random_var A vector, optional if the user wants to provide a
#' start values of the variance of the random effects covariates, default
#' start value is 1. NOTE, if start values are provided make sure that the
#' length of the vector of start values matches the number of random effects.
#' @param start_fixed_coef A vector, optional if the user wants to provide
#' start values of the fixed effects, default is set to NULL such that
#' \code{sema_fit_one} creates the vector of start values matching the number
#' of fixed effects. NOTE, if start values are provided make sure that the
#' length of the vector of start values matches the number of fixed effects.
#' @param update The default is NULL, when an integer is provided
#' \code{\link{sema_update}} is called to do a full update to recompute all
#' contributions to the complete data suffient statistics.
#' @param prior_n If starting values are provided, prior_n determines the weight
#' of the starting value of the residual variance, default is 0.
#' @param prior_j If starting values are provided, prior_j determines the weight
#' of the starting value of the variance of the random effects and the fixed
#' effects, default is 0.
#' @keywords online multilevel models method fitting stream
#' @export
#' @examples
#' ## First we create a dataset, consisting of 2500 observations from 20
#' ## units. The fixed effects have the coefficients 1, 2, 3, 4, and 5. The
#' ## variance of the random effects equals 1, 4, and 9. Lastly the
#' ## residual variance equals 4:
#' test_data <- build_dataset(n = 1500,
#' j = 200,
#' fixed_coef = 1:5,
#' random_coef_sd = 1:3,
#' resid_sd = 2)
#' ## to simplify the indexing, we generate 2 vectors, one that indicates which
#' ## columns are fixed effects variables and the other to indicate in which
#' ## columns the random effects variables are
#'
#' data_fixed_var <- c(3:7)
#' data_random_var <- c(3,5,6)
#'
#' ## an object where fit_sema output is stored in, this should be \code{NULL}
#' ## because that tells the fit_sema function to create model statistics lists
#'
#' m1 <- NULL
#'
#' ## looping though the dataset like this:
#' for(i in 1:nrow(test_data)){
#' m1 <- sema_fit_set(data_fixed = test_data[i, data_fixed_var],
#' data_random = test_data[i, data_random_var],
#' data_y = test_data$y[i],
#' id = test_data$id[i],
#' theta_list = m1,
#' print = FALSE,
#' update = NULL)
#' }
#' @return A list with updated global parameters, a vector with id labels
#' and a list with lists of all units parameters and contributions.
sema_fit_set <- function(data_fixed,
data_random,
data_y,
id,
theta_list =
list("theta" =
create_theta_main(n_fixed =
length(data_fixed),
n_random =
length(data_random)),
"id_vector" = c(),
"theta_j" = NULL),
print = FALSE,
update = NULL,
start_resid_var = 1,
start_random_var = 1,
start_fixed_coef = NULL,
prior_n = 0,
prior_j = 0){
if(is.null(theta_list)){
theta_list <- list()
class(theta_list) <- c("list", "sema")
if(is.null(start_fixed_coef)){
start_fixed_coef <- matrix(1, nrow = length(data_fixed))
}
if(is.null(theta_list$id_vector)){
theta_list$id_vector <- c()
theta_list$unit <- list()
}
}
if(is.element(id, theta_list$id_vector)){
temp_id <- which(id == theta_list$id_vector)
temp_theta_j <- theta_list$unit[[temp_id]]
}
if(!is.element(id, theta_list$id_vector)){
theta_list$id_vector <- c(theta_list$id_vector, id)
temp_id <- match(id, theta_list$id_vector)
temp_theta_j <- NULL
}
if(!is.null(update) & !is.null(theta_list$model$n)){
if(update %% theta_list$model$n == 0){
tempres <- sema_update(theta_jList = theta_list$unit,
theta = theta_list$model)
res$model <- tempres$model
theta_list$unit <- tempres$unit
}
}
res <- sema_fit_one(data_fixed = as.numeric(data_fixed),
data_random = as.numeric(data_random),
data_y = data_y,
id = id,
theta_j = temp_theta_j,
theta = theta_list$model,
print = print,
start_resid_var = start_resid_var,
start_random_var = start_random_var,
start_fixed_coef = start_fixed_coef,
prior_n = prior_n,
prior_j = prior_j)
theta_list$unit[[temp_id]] <- res$unit
theta_list$model <- res$model
return(theta_list)
}
|
wd <- "D:/Users/julienm/Documents/_WORKS/_WEB/julienmoeys.github.io/docs"
pkg <- "macroutils"
# setwd( wd <- file.path( "D:/Users/julienm/Documents/_WORKS/_PROJECTS/r_packages", pkg, "www", pkg ) )
descNews <- c( "DESCRIPTION", "NEWS" )
index <- c( "00Index.html", "index.html" )
setwd( file.path( wd, pkg ) )
library( "knitr" )
library( pkg, character.only = TRUE )
knit_rd( pkg = pkg, frame = TRUE )
file.copy(
from = system.file( descNews, package = pkg ),
to = descNews,
overwrite = TRUE
)
for( i in index ){
index.html <- readLines( i )
index.html <- gsub( x = index.html, pattern = "../", replacement = "/easylegend/",
fixed = TRUE )
writeLines( text = index.html, con = i )
rm( index.html )
}
# Remove the user and computer name from macroutilsInfo.html
change <- c( "login", "user", "effective_user", "nodename" )
for( ch in change ){
chOld <- Sys.info()[[ ch ]]
chNew <- paste( rep( "*", nchar(chOld) ), collapse = "" )
macroutilsInfo.html <- readLines( "macroutilsInfo.html" )
macroutilsInfo.html <- gsub( x = macroutilsInfo.html,
pattern = chOld, replacement = chNew,
fixed = TRUE )
writeLines( text = macroutilsInfo.html, con = "macroutilsInfo.html" )
}
rm( macroutilsInfo.html )
|
/docs/knitr_macroutils.R
|
no_license
|
julienmoeys/julienmoeys.github.io
|
R
| false | false | 1,369 |
r
|
wd <- "D:/Users/julienm/Documents/_WORKS/_WEB/julienmoeys.github.io/docs"
pkg <- "macroutils"
# setwd( wd <- file.path( "D:/Users/julienm/Documents/_WORKS/_PROJECTS/r_packages", pkg, "www", pkg ) )
descNews <- c( "DESCRIPTION", "NEWS" )
index <- c( "00Index.html", "index.html" )
setwd( file.path( wd, pkg ) )
library( "knitr" )
library( pkg, character.only = TRUE )
knit_rd( pkg = pkg, frame = TRUE )
file.copy(
from = system.file( descNews, package = pkg ),
to = descNews,
overwrite = TRUE
)
for( i in index ){
index.html <- readLines( i )
index.html <- gsub( x = index.html, pattern = "../", replacement = "/easylegend/",
fixed = TRUE )
writeLines( text = index.html, con = i )
rm( index.html )
}
# Remove the user and computer name from macroutilsInfo.html
change <- c( "login", "user", "effective_user", "nodename" )
for( ch in change ){
chOld <- Sys.info()[[ ch ]]
chNew <- paste( rep( "*", nchar(chOld) ), collapse = "" )
macroutilsInfo.html <- readLines( "macroutilsInfo.html" )
macroutilsInfo.html <- gsub( x = macroutilsInfo.html,
pattern = chOld, replacement = chNew,
fixed = TRUE )
writeLines( text = macroutilsInfo.html, con = "macroutilsInfo.html" )
}
rm( macroutilsInfo.html )
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pali-sort.R
\name{pali_sort}
\alias{pali_sort}
\title{Sorting function for vectors of Pali words.}
\usage{
pali_sort(word_list)
}
\arguments{
\item{word_list}{A vector of Pali words}
}
\value{
A new vector of Pali words in Pali alphabetical order
}
\description{
Note that all Pali string comparisons are case-insensitive.
This algorithm is based on Quicksort, but creates lots of
intermediate data structures instead of doing swaps in place.
This has been implemented in C++ as the original R version
was about 500x slower.
}
\examples{
# Every unique word of of the Mahāsatipatthāna Sutta in
# Pali alphabetical order:
pali_sort(sati_sutta_long$word)
# A sorted list of 100 random words from the Tiptaka:
library(dplyr)
pali_sort(sample(tipitaka_long$word, 100))
}
|
/man/pali_sort.Rd
|
no_license
|
cran/tipitaka
|
R
| false | true | 848 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pali-sort.R
\name{pali_sort}
\alias{pali_sort}
\title{Sorting function for vectors of Pali words.}
\usage{
pali_sort(word_list)
}
\arguments{
\item{word_list}{A vector of Pali words}
}
\value{
A new vector of Pali words in Pali alphabetical order
}
\description{
Note that all Pali string comparisons are case-insensitive.
This algorithm is based on Quicksort, but creates lots of
intermediate data structures instead of doing swaps in place.
This has been implemented in C++ as the original R version
was about 500x slower.
}
\examples{
# Every unique word of of the Mahāsatipatthāna Sutta in
# Pali alphabetical order:
pali_sort(sati_sutta_long$word)
# A sorted list of 100 random words from the Tiptaka:
library(dplyr)
pali_sort(sample(tipitaka_long$word, 100))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fisheries.r
\name{mm}
\alias{mm}
\title{mm calculates the predicted Michaelis-Menton length at age}
\usage{
mm(p, ages)
}
\arguments{
\item{p}{is a vector the first three cells of which are a, b, c
for the mm curve.}
\item{ages}{is a vector of ages; could be a single number}
}
\value{
a vector of predicted lengths for a vector of ages in 'ages'
}
\description{
mm calculates length at age for the generalized
Michaelis-Menton curve.
}
\examples{
\dontrun{
ages <- seq(0,20,1) # sigma is ignored here
pars <- c(a=23.0,b=1.0,c=1.0,sigma=1.0) # a, b, c, sigma
cbind(ages,mm(pars,ages))
}
}
|
/man/mm.Rd
|
no_license
|
billpine/MQMF
|
R
| false | true | 704 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fisheries.r
\name{mm}
\alias{mm}
\title{mm calculates the predicted Michaelis-Menton length at age}
\usage{
mm(p, ages)
}
\arguments{
\item{p}{is a vector the first three cells of which are a, b, c
for the mm curve.}
\item{ages}{is a vector of ages; could be a single number}
}
\value{
a vector of predicted lengths for a vector of ages in 'ages'
}
\description{
mm calculates length at age for the generalized
Michaelis-Menton curve.
}
\examples{
\dontrun{
ages <- seq(0,20,1) # sigma is ignored here
pars <- c(a=23.0,b=1.0,c=1.0,sigma=1.0) # a, b, c, sigma
cbind(ages,mm(pars,ages))
}
}
|
## ## ## ##
## DPLYR ##
## ## ## ##
library(nycflights13)
library(tidyverse)
library(magrittr)
## ## ## ## ## ## ## ##
## SOME USEFUL TRICKS ##
## ## ## ## ## ## ## ##
# If want to move some vars at beginning of df:
# use select + everything()
select(flights, time_hour, air_time, everything())
## #### ##
## EGGS ##
## #### ##
## Change dep_time and sched_dep_time to be more readable
head(flights$dep_time)
head(flights$sched_dep_time)
# Change format for dep time and sched dep time
convertTime_toMinutesSinceMidnight <- function(time) {
newTime <- time %/% 100 * 60 + time %% 60 # 1st. is hours, 2nd minuttes
return(newTime)
}
flights %<>%
mutate(new_depTime = convertTime_toMinutesSinceMidnight(dep_time),
new_scheduledDepTiem = convertTime_toMinutesSinceMidnight(sched_dep_time))
not_cancelled <- flights %>%
filter(!is.na(dep_delay), !is.na(arr_delay))
# Find 10 most delayed flights
flights2<- flights %>%
mutate(dep_delay2 = convertTime_toMinutesSinceMidnight(dep_time) -
convertTime_toMinutesSinceMidnight(sched_dep_time)) %>%
mutate(mostDelayedFlight = min_rank(-dep_delay2)) %>% # same as min_rank(desc(...))
arrange(mostDelayedFlight) %>%
filter(mostDelayedFlight < 10)
## ## ## ##
## COUNTS ##
## ## ## ##
print("
When aggregating, always a good idea to include a count n()
or sum(!is.na(x)) to make sure not looking at subset of data
example:
")
delays <- not_cancelled %>%
group_by(tailnum) %>%
summarize(delay = mean(arr_delay))
ggplot(delays, aes(x = delay)) +
geom_freqpoly(binwidth = 10)
## Some airplanes have delays of more than 300 mins???
# Look at count + scatterplot of # of flights vs avg delay
delays <- not_cancelled %>%
group_by(tailnum) %>%
summarise(delay = mean(arr_delay),
count = n())
ggplot(delays) +
geom_point(aes(x = count, y = delay), alpha = 1/10)
# Variation decreases as the sample size increases
# -> much greater variation in the average delay when there are few flights
## Filter out points that do not have many observations to look
# at general pattern
delays %>%
filter(count > 25) %>%
ggplot() +
geom_point(aes(x = count, y = delay), alpha = 1/10)
## ## ## ## ## ## ## ##
## SUMMARY FUNCTIONS ##
## ## ## ## ## ## ## ##
# Neasures of location
# Can combine aggregation with logical subsetting:
not_cancelled %>%
group_by(year, month, day) %>%
summarize(avg_delay1 = mean(arr_delay),
avg_delay2 = mean(arr_delay[arr_delay > 0]))
## Measures of variation - e.g. why is distance to some destinations more var than others
# IQR would be more robust if have outliers
not_cancelled %>%
group_by(dest) %>%
summarise(dist = sd(distance)) %>%
arrange(desc(dist))
# Measures of rank - e,g, min max quantile etc...
# Measures of position, first, nth, last
# First and last equivalent to filtering on rank
not_cancelled %>%
group_by(year, month, day) %>%
summarize(r = min_rank(desc(dep_time))) %>%
filter(r %in% range(r))
# Counts - e.g n(), sum(!is.na()), n_distinct()
not_cancelled %>%
group_by(dest) %>%
summarise(carriers = n_distinct(carrier)) %>%
arrange(desc(carriers))
# Can also use weight var
not_cancelled %>%
count(tailnum, wt = distance)
## Counts can be used to calculate # of trues and prop of trues
not_cancelled %>%
group_by(year, month, day) %>%
summarise(n_early = sum(dep_time < 500)) # How many flights left before 5 am
# What prop of flights are delayed by more than an hour
not_cancelled %>%
group_by(year, month, day) %>%
summarise(hour_perc = mean(arr_delay > 60))
## ## ## ## ## # ## ## ## ## ##
## GROUPING W/ MULTIPLE VARS ##
## ## ## ## ## # ## ## ## ## ##
# @Note: Each summary peels off one level of the grouping
# !!! about weighted means and variance
## #### ##
## EGGS ##
## #### ##
## Equivalent
not_cancelled %>%
count(dest)
# <==>
not_cancelled %>%
group_by(dest) %>%
summarise(count = n()) %>%
select(dest,count)
not_cancelled %>%
count(tailnum, wt = distance)
not_cancelled %>%
group_by(tailnum) %>%
summarise(count = sum(distance))
View(flights %>%
filter(is.na(dep_delay)))
#Instead of doing is.na(dep_delay) | is.na(arr_delay)
# Can just check if dep_time is na
# # of cancelled flights per day
flights %>%
mutate(cancelled = (is.na(dep_delay) | is.na(arr_delay))) %>%
group_by(year, month, day) %>%
summarise(count = sum(cancelled, na.rm = T),
average_dep_delay = mean(dep_delay, na.rm = T),
prop_cancelled = mean(cancelled)) %>%
ggplot(aes(x = average_dep_delay, y = prop_cancelled)) +
geom_point() +
geom_smooth()
## Worst carrier
flights %>%
group_by(carrier) %>%
summarise(average_delay = mean(arr_delay, na.rm = T)) %>%
arrange(desc(average_delay))
## Count before first delay greater than an hour
View(not_cancelled %>%
mutate(bad_delay = (arr_delay > 60),
i = 1) %>%
group_by(tailnum) %>%
arrange(tailnum, year, month, day) %>%
mutate(numb_flights = cumsum(i)) %>%
filter(bad_delay == T) %>%
summarise(first_flight = first(numb_flights)))
# Alternatively
View(not_cancelled %>%
mutate(bad_delay = arr_delay > 60) %>%
group_by(tailnum) %>%
arrange(tailnum, year, month, day) %>%
mutate(count = cumsum(bad_delay)) %>%
filter(count < 1) %>%
count(sort = T))
## Count -> sort argument sort count insted of doing arrange(count)
###############
## LAST EGGS ##
###############
not_cancelled %>%
group_by(tailnum) %>%
summarise(mean_delay = mean(arr_delay, na.rm = T)) %>%
filter(mean_delay == max(mean_delay))
View(not_cancelled %>%
mutate(dep_timeHour = floor(dep_time / 100)) %>%
group_by(dep_timeHour) %>%
summarise(mean_delay = mean(arr_delay) + mean(dep_delay)) %>%
arrange(mean_delay))
## Verify with plots
# not_cancelled %>%
# mutate(total_delay = arr_delay + dep_delay) %>%
# ggplot() +
# geom_point(aes(x = dep_time, y = total_delay)) +
# geom_smooth(method = lm)
# For each dest -> total minutes of delay
dest_delay <- not_cancelled %>%
group_by(dest) %>%
summarise(total_delay = sum(arr_delay) + sum(dep_delay))
# Proportion of the total delay for its destination
# Do it in one go
not_cancelled %>%
group_by(dest) %>%
filter(arr_delay > 0) %>%
mutate(prop_delay = arr_delay / sum(arr_delay)) %>%
select(year:day, dest, arr_delay, prop_delay)
# How delay of one flight related to delay of the flight directly after it
timeDiffBetweenFlights <- not_cancelled %>%
filter(dep_delay > 0) %>%
arrange(year, month, day, new_depTime) %>%
mutate(diffBetweenTimes = (new_depTime - lag(new_depTime)) %% (24 * 60)) %>%
select(year:day, tailnum, new_depTime, dep_delay, diffBetweenTimes)
# FInd cuttoff for diff between last flight of day (in AM of next day)
# and first flight of the day by plotting (arbitrary?) #
timeDiffBetweenFlights %>% filter(between(new_depTime, 200, 600)) %>%
filter(diffBetweenTimes > 70) %>%
ggplot() +
geom_point(aes(x = new_depTime, y = diffBetweenTimes))
# Set first flight of the day delay as = 0, assume first flight happens
# between 5 and 6 AM
View(timeDiffBetweenFlights %<>%
mutate(firstFlightOfDay = ifelse((new_depTime >= 300 & new_depTime <= 360)
& diffBetweenTimes > 60, T, F)))
# How many did we get
nrow(timeDiffBetweenFlights %>%
group_by(year, month, day) %>%
summarise(nday = 1)) # 1 year
nrow(timeDiffBetweenFlights %>% filter(firstFlightOfDay == T)) # only 228
# Change way of doing this!
|
/dplyr.R
|
no_license
|
wazabata/R_Repo
|
R
| false | false | 7,616 |
r
|
## ## ## ##
## DPLYR ##
## ## ## ##
library(nycflights13)
library(tidyverse)
library(magrittr)
## ## ## ## ## ## ## ##
## SOME USEFUL TRICKS ##
## ## ## ## ## ## ## ##
# If want to move some vars at beginning of df:
# use select + everything()
select(flights, time_hour, air_time, everything())
## #### ##
## EGGS ##
## #### ##
## Change dep_time and sched_dep_time to be more readable
head(flights$dep_time)
head(flights$sched_dep_time)
# Change format for dep time and sched dep time
convertTime_toMinutesSinceMidnight <- function(time) {
newTime <- time %/% 100 * 60 + time %% 60 # 1st. is hours, 2nd minuttes
return(newTime)
}
flights %<>%
mutate(new_depTime = convertTime_toMinutesSinceMidnight(dep_time),
new_scheduledDepTiem = convertTime_toMinutesSinceMidnight(sched_dep_time))
not_cancelled <- flights %>%
filter(!is.na(dep_delay), !is.na(arr_delay))
# Find 10 most delayed flights
flights2<- flights %>%
mutate(dep_delay2 = convertTime_toMinutesSinceMidnight(dep_time) -
convertTime_toMinutesSinceMidnight(sched_dep_time)) %>%
mutate(mostDelayedFlight = min_rank(-dep_delay2)) %>% # same as min_rank(desc(...))
arrange(mostDelayedFlight) %>%
filter(mostDelayedFlight < 10)
## ## ## ##
## COUNTS ##
## ## ## ##
print("
When aggregating, always a good idea to include a count n()
or sum(!is.na(x)) to make sure not looking at subset of data
example:
")
delays <- not_cancelled %>%
group_by(tailnum) %>%
summarize(delay = mean(arr_delay))
ggplot(delays, aes(x = delay)) +
geom_freqpoly(binwidth = 10)
## Some airplanes have delays of more than 300 mins???
# Look at count + scatterplot of # of flights vs avg delay
delays <- not_cancelled %>%
group_by(tailnum) %>%
summarise(delay = mean(arr_delay),
count = n())
ggplot(delays) +
geom_point(aes(x = count, y = delay), alpha = 1/10)
# Variation decreases as the sample size increases
# -> much greater variation in the average delay when there are few flights
## Filter out points that do not have many observations to look
# at general pattern
delays %>%
filter(count > 25) %>%
ggplot() +
geom_point(aes(x = count, y = delay), alpha = 1/10)
## ## ## ## ## ## ## ##
## SUMMARY FUNCTIONS ##
## ## ## ## ## ## ## ##
# Neasures of location
# Can combine aggregation with logical subsetting:
not_cancelled %>%
group_by(year, month, day) %>%
summarize(avg_delay1 = mean(arr_delay),
avg_delay2 = mean(arr_delay[arr_delay > 0]))
## Measures of variation - e.g. why is distance to some destinations more var than others
# IQR would be more robust if have outliers
not_cancelled %>%
group_by(dest) %>%
summarise(dist = sd(distance)) %>%
arrange(desc(dist))
# Measures of rank - e,g, min max quantile etc...
# Measures of position, first, nth, last
# First and last equivalent to filtering on rank
not_cancelled %>%
group_by(year, month, day) %>%
summarize(r = min_rank(desc(dep_time))) %>%
filter(r %in% range(r))
# Counts - e.g n(), sum(!is.na()), n_distinct()
not_cancelled %>%
group_by(dest) %>%
summarise(carriers = n_distinct(carrier)) %>%
arrange(desc(carriers))
# Can also use weight var
not_cancelled %>%
count(tailnum, wt = distance)
## Counts can be used to calculate # of trues and prop of trues
not_cancelled %>%
group_by(year, month, day) %>%
summarise(n_early = sum(dep_time < 500)) # How many flights left before 5 am
# What prop of flights are delayed by more than an hour
not_cancelled %>%
group_by(year, month, day) %>%
summarise(hour_perc = mean(arr_delay > 60))
## ## ## ## ## # ## ## ## ## ##
## GROUPING W/ MULTIPLE VARS ##
## ## ## ## ## # ## ## ## ## ##
# @Note: Each summary peels off one level of the grouping
# !!! about weighted means and variance
## #### ##
## EGGS ##
## #### ##
## Equivalent
not_cancelled %>%
count(dest)
# <==>
not_cancelled %>%
group_by(dest) %>%
summarise(count = n()) %>%
select(dest,count)
not_cancelled %>%
count(tailnum, wt = distance)
not_cancelled %>%
group_by(tailnum) %>%
summarise(count = sum(distance))
View(flights %>%
filter(is.na(dep_delay)))
#Instead of doing is.na(dep_delay) | is.na(arr_delay)
# Can just check if dep_time is na
# # of cancelled flights per day
flights %>%
mutate(cancelled = (is.na(dep_delay) | is.na(arr_delay))) %>%
group_by(year, month, day) %>%
summarise(count = sum(cancelled, na.rm = T),
average_dep_delay = mean(dep_delay, na.rm = T),
prop_cancelled = mean(cancelled)) %>%
ggplot(aes(x = average_dep_delay, y = prop_cancelled)) +
geom_point() +
geom_smooth()
## Worst carrier
flights %>%
group_by(carrier) %>%
summarise(average_delay = mean(arr_delay, na.rm = T)) %>%
arrange(desc(average_delay))
## Count before first delay greater than an hour
View(not_cancelled %>%
mutate(bad_delay = (arr_delay > 60),
i = 1) %>%
group_by(tailnum) %>%
arrange(tailnum, year, month, day) %>%
mutate(numb_flights = cumsum(i)) %>%
filter(bad_delay == T) %>%
summarise(first_flight = first(numb_flights)))
# Alternatively
View(not_cancelled %>%
mutate(bad_delay = arr_delay > 60) %>%
group_by(tailnum) %>%
arrange(tailnum, year, month, day) %>%
mutate(count = cumsum(bad_delay)) %>%
filter(count < 1) %>%
count(sort = T))
## Count -> sort argument sort count insted of doing arrange(count)
###############
## LAST EGGS ##
###############
not_cancelled %>%
group_by(tailnum) %>%
summarise(mean_delay = mean(arr_delay, na.rm = T)) %>%
filter(mean_delay == max(mean_delay))
View(not_cancelled %>%
mutate(dep_timeHour = floor(dep_time / 100)) %>%
group_by(dep_timeHour) %>%
summarise(mean_delay = mean(arr_delay) + mean(dep_delay)) %>%
arrange(mean_delay))
## Verify with plots
# not_cancelled %>%
# mutate(total_delay = arr_delay + dep_delay) %>%
# ggplot() +
# geom_point(aes(x = dep_time, y = total_delay)) +
# geom_smooth(method = lm)
# For each dest -> total minutes of delay
dest_delay <- not_cancelled %>%
group_by(dest) %>%
summarise(total_delay = sum(arr_delay) + sum(dep_delay))
# Proportion of the total delay for its destination
# Do it in one go
not_cancelled %>%
group_by(dest) %>%
filter(arr_delay > 0) %>%
mutate(prop_delay = arr_delay / sum(arr_delay)) %>%
select(year:day, dest, arr_delay, prop_delay)
# How delay of one flight related to delay of the flight directly after it
timeDiffBetweenFlights <- not_cancelled %>%
filter(dep_delay > 0) %>%
arrange(year, month, day, new_depTime) %>%
mutate(diffBetweenTimes = (new_depTime - lag(new_depTime)) %% (24 * 60)) %>%
select(year:day, tailnum, new_depTime, dep_delay, diffBetweenTimes)
# FInd cuttoff for diff between last flight of day (in AM of next day)
# and first flight of the day by plotting (arbitrary?) #
timeDiffBetweenFlights %>% filter(between(new_depTime, 200, 600)) %>%
filter(diffBetweenTimes > 70) %>%
ggplot() +
geom_point(aes(x = new_depTime, y = diffBetweenTimes))
# Set first flight of the day delay as = 0, assume first flight happens
# between 5 and 6 AM
View(timeDiffBetweenFlights %<>%
mutate(firstFlightOfDay = ifelse((new_depTime >= 300 & new_depTime <= 360)
& diffBetweenTimes > 60, T, F)))
# How many did we get
nrow(timeDiffBetweenFlights %>%
group_by(year, month, day) %>%
summarise(nday = 1)) # 1 year
nrow(timeDiffBetweenFlights %>% filter(firstFlightOfDay == T)) # only 228
# Change way of doing this!
|
# *****************************************************************************
# Run GO enrichment analysis on the hubs of an interactome, in chunks. The code
# is invoked with the following command line parameters and is designed for
# cluster array execution (see the master submission script):
# <tcga_acro> <GOontology> <ChunkSize> <Increment>
# where:
# * tcga_acro: tcga acronym (e.g., brca, gbm, ov) indicating which interactome
# to run the analysis on. NOTE: the code expects that a binary R file named
# <tcga_acro>.rda
# will be available in the directory where the sipts is invoked form and will
# contain a variable nasmed <tcga_acro> containing the query interactome.
# * GOontology: either of the strings BP, MF, CC. Indicates which GO ontology to
# use.
# * ChunkSize: An integer used for incremental processing, see next argument.
# * Increment: An integer used to specify the range of hub genes to process.
# E.g., say that tcga_acro = brca. Then for a Increment value of N, the
# code will process the regulons for the hubs brca[[2]][start:end] where:
# start = (N-1)*ChunkSize+1
# end = N*ChunkSize.
#
# The analysis generateds a named list with entries named after processed hub genes.
# The entry for hub gene G is the data frame Utils::goEnrichment(regulon(G))[[1]].
# This list is assigned to a variable named:
# <tcga_acro>Res_<start> (e.g., brcaRes_1)
# and is saved for post-processing to a binary R file with the same name.
# *****************************************************************************
UTILSLIB = "~/cvs/R/Common/Utils.R"
# *****************************************************************************
# Run the GO enrichment analysis. Keeps track of the execution time, for
# logging purposes
#
# ARGUMENTS:
# * net: the slice of the interactome on which to run the analysis.
# * goOnt: string specifying which GO ontology to use (BP, MF, CC).
# * logFile: file for storing logging info.
# *****************************************************************************
doEnrichment <- function(net, goOnt = "BP", logFile = NULL){
start = Sys.time()
source(UTILSLIB, chdir = TRUE)
res = lapply(net, function(x, ontology){
return(goEnrichment(abs(x[,1]), ont = ontology)[[1]])
}, ontology = goOnt)
end = Sys.time()
names(res) = names(net)
if (!is.null(logFile))
cat(paste("\n\n\tElapsed time -> ", paste(toString(end-start), attr( end-start, "units"))), file = logFile, append=TRUE, sep="\n")
return(res)
}
# Parse command-line arguments
args = commandArgs(TRUE)
if (length(args) != 4)
stop("use:\t<command_name> tcga_acro GOontology ChunkSize Increment")
load(paste(args[1], ".rda", sep=""))
net = get(args[1])
ontology = args[2]
INC = as.integer(args[3])
start = (as.integer(args[4]) - 1)*INC+1
L = length(net[[2]])
# Invoke the analysis on the specified slice of the interactome and store the
# result list in a binary R file.
if (start <= L){
end = min(L, start+INC-1)
res = doEnrichment(net[[2]][start:end], goOnt = ontology, paste(args[1], "Log.txt", sep=""))
resVarName = paste(args[1], "Res_", start, sep="")
fileName = paste(args[1], "Res_", start, ".rda", sep="")
assign(resVarName, res, envir = globalenv())
save(list = c(resVarName), file=fileName)
}
|
/codes/Cluster/Pathway_Analysis/regulonGOenrichment_array.r
|
no_license
|
hjkim88/GTExProj
|
R
| false | false | 3,277 |
r
|
# *****************************************************************************
# Run GO enrichment analysis on the hubs of an interactome, in chunks. The code
# is invoked with the following command line parameters and is designed for
# cluster array execution (see the master submission script):
# <tcga_acro> <GOontology> <ChunkSize> <Increment>
# where:
# * tcga_acro: tcga acronym (e.g., brca, gbm, ov) indicating which interactome
# to run the analysis on. NOTE: the code expects that a binary R file named
# <tcga_acro>.rda
# will be available in the directory where the sipts is invoked form and will
# contain a variable nasmed <tcga_acro> containing the query interactome.
# * GOontology: either of the strings BP, MF, CC. Indicates which GO ontology to
# use.
# * ChunkSize: An integer used for incremental processing, see next argument.
# * Increment: An integer used to specify the range of hub genes to process.
# E.g., say that tcga_acro = brca. Then for a Increment value of N, the
# code will process the regulons for the hubs brca[[2]][start:end] where:
# start = (N-1)*ChunkSize+1
# end = N*ChunkSize.
#
# The analysis generateds a named list with entries named after processed hub genes.
# The entry for hub gene G is the data frame Utils::goEnrichment(regulon(G))[[1]].
# This list is assigned to a variable named:
# <tcga_acro>Res_<start> (e.g., brcaRes_1)
# and is saved for post-processing to a binary R file with the same name.
# *****************************************************************************
UTILSLIB = "~/cvs/R/Common/Utils.R"
# *****************************************************************************
# Run the GO enrichment analysis. Keeps track of the execution time, for
# logging purposes
#
# ARGUMENTS:
# * net: the slice of the interactome on which to run the analysis.
# * goOnt: string specifying which GO ontology to use (BP, MF, CC).
# * logFile: file for storing logging info.
# *****************************************************************************
doEnrichment <- function(net, goOnt = "BP", logFile = NULL){
start = Sys.time()
source(UTILSLIB, chdir = TRUE)
res = lapply(net, function(x, ontology){
return(goEnrichment(abs(x[,1]), ont = ontology)[[1]])
}, ontology = goOnt)
end = Sys.time()
names(res) = names(net)
if (!is.null(logFile))
cat(paste("\n\n\tElapsed time -> ", paste(toString(end-start), attr( end-start, "units"))), file = logFile, append=TRUE, sep="\n")
return(res)
}
# Parse command-line arguments
args = commandArgs(TRUE)
if (length(args) != 4)
stop("use:\t<command_name> tcga_acro GOontology ChunkSize Increment")
load(paste(args[1], ".rda", sep=""))
net = get(args[1])
ontology = args[2]
INC = as.integer(args[3])
start = (as.integer(args[4]) - 1)*INC+1
L = length(net[[2]])
# Invoke the analysis on the specified slice of the interactome and store the
# result list in a binary R file.
if (start <= L){
end = min(L, start+INC-1)
res = doEnrichment(net[[2]][start:end], goOnt = ontology, paste(args[1], "Log.txt", sep=""))
resVarName = paste(args[1], "Res_", start, sep="")
fileName = paste(args[1], "Res_", start, ".rda", sep="")
assign(resVarName, res, envir = globalenv())
save(list = c(resVarName), file=fileName)
}
|
# VERSION 2019-06-29
library(rgdal)
# Note: This R script is optimised for maps from GADM
# CHANGE GEOJSON FILE NAME HERE
file = "japan.json"
# CHANGE REGION NAME PROPERTY HERE
# IF MAP FROM GADM, DON'T CHANGE
nameproperty = "NAME_1"
# CHANGE REGION ABBREVIATION PROPERTY HERE
# IF MAP FROM GADM, DON'T CHANGE
abbrproperty = "HASC_1"
########################################################################################
# Imports json file
cat("Loading geojson file ........ \n")
feature_collection <- readOGR(dsn = file, stringsAsFactors = FALSE)
file_name = gsub("\\.json$", '', file)
file_name = gsub("\\.geojson$", '', file_name)
if((is.null(feature_collection$name[1]) == FALSE || is.null(feature_collection$NAME_1[1]) == FALSE || is.null(eval(parse(text = paste0("feature_collection$", nameproperty, "[1]")))) == FALSE) == FALSE){
cat("\n\nERROR: Unable to find region names. Please check below:\n\n")
cat("[Property]: [Value for first region]\n")
for(i in 1: ncol(feature_collection@data)){
cat(names(feature_collection@data)[i], ": ", feature_collection@data[1,i], "\n", sep = '')
}
rm(i)
cat("\nCheck which property contains the region name and amend the variable \"nameproperty\" accordingly.\n")
}else{
if(is.null(feature_collection$NAME_0[1]) == FALSE){
country = feature_collection$NAME_0[1]
cat("\nImported ", file, ": map data for ", country, "\n", sep = '')
rm(country)
}else{
cat("\nImported map data from ", file,"\n", sep = '')
}
# Finds "NAME_1" property for each region and saves it as region_name
if(is.null(feature_collection$name[1]) == FALSE){
region_name = feature_collection$name
}else if(is.null(feature_collection$NAME_1[1]) == FALSE){
region_name = feature_collection$NAME_1
}else{
eval(parse(text = paste0("region_name = feature_collection$", nameproperty)))
}
# Creates blank vectors for the region order, region id and region data (Region order is a temp vector)
region_order = seq(1, length(feature_collection), 1)
region_id = rep('', length(feature_collection))
region_data = rep('', length(feature_collection))
abbr = TRUE
# Finds "HASC_1" property, extracts the region abbreviation and saves in the data frame
if(is.null(feature_collection$HASC_1[1]) == FALSE){
region_abbreviation = feature_collection$HASC_1
region_abbreviation = gsub("^.*\\.", '', region_abbreviation)
# If not, find the abbrproperty inputted by the user
}else if(is.null(eval(parse(text = paste0("feature_collection$", abbrproperty, "[1]")))) == FALSE){
eval(parse(text = paste0("region_abbreviation = feature_collection$", abbrproperty)))
# If not, just leave it blank
}else{
region_abbreviation = rep('', length(feature_collection))
abbr = FALSE
}
# Creates data frame
df = data.frame(region_order, region_id, region_data, region_name, region_abbreviation)
colnames(df) = c("Region.Order", "Region.Id", "Region.Data", "Region.Name", "Region.Abbreviation")
# Sorts data frame according to region name and makes sure that region id will also follow that order
df = df[order(region_name),]
df$Region.Id = as.character(seq(1, length(feature_collection), 1))
df = df[order(df$Region.Order),]
region_id = df$Region.Id
df$Region.Order = NULL
df = df[order(df$Region.Name),]
# Rename column names
colnames(df) = c("Region Id", "Region Data", "Region Name", "Region Abbreviation")
# Creates cartogram_id property
feature_collection@data$cartogram_id = region_id
# Finds "GID_0" property for the 1st region (i.e. country acronym) and saves it as country
if(is.null(feature_collection$GID_0[1]) == FALSE){
country_gid = feature_collection$GID_0[1]
country_gid = tolower(country_gid)
# Exports the csv file. Automatically names file as "[country acronym]_data".csv
write.csv(df, file = paste(country_gid, "_data.csv", sep = ''), row.names=FALSE)
cat("Exported", paste(country_gid, "_data.csv", sep = ''), "\n")
jsonfile = paste(country_gid, "_processedmap.json", sep = '')
rm(country_gid)
}else{
write.csv(df, file = paste(file_name, "_data.csv", sep = ''), row.names=FALSE)
cat("Exported", paste(file_name, "_data.csv", sep = ''), "\n")
jsonfile = paste(file_name, "_processedmap.json", sep = '')
}
if (file.exists(jsonfile)) {
file.remove(jsonfile)
}
cat("Exporting geojson file ........ \n")
writeOGR(feature_collection, dsn = jsonfile, layer="", driver="GeoJSON")
cat("Exported", jsonfile, "\n")
if (is.null(feature_collection@bbox) == FALSE){
bbox = paste(feature_collection@bbox[1],feature_collection@bbox[2],feature_collection@bbox[3],feature_collection@bbox[4],sep=", ")
jsontxt = readLines(jsonfile)
jsontxt[3] = paste("\"bbox\": [", bbox , "],", sep = ' ')
file.remove(jsonfile)
writeLines(jsontxt, jsonfile, sep = "\n")
cat("Added in bbox information.\n")
rm(bbox)
rm(jsontxt)
cat("\nAll done.\n")
}else{
cat("Error: geojson file does not contain bbox information. Cartogram generator requires bbox information.\n")
}
if(abbr == FALSE){
cat("\nWarning: csv file does not contain region abbreviations. If the json file does contain the abbreviation info, please amend the \"abbrproperty\" variable and re-run this program. You can check for the property below:")
}
# Removes variables
rm(df, region_data, region_id, region_name, jsonfile, region_order, region_abbreviation, abbrproperty, abbr)
}
rm(feature_collection, file_name, file, nameproperty)
|
/geojson2csv_cartogram-web.R
|
permissive
|
bernardboey/geojson-to-csv-cartogram-web
|
R
| false | false | 5,550 |
r
|
# VERSION 2019-06-29
library(rgdal)
# Note: This R script is optimised for maps from GADM
# CHANGE GEOJSON FILE NAME HERE
file = "japan.json"
# CHANGE REGION NAME PROPERTY HERE
# IF MAP FROM GADM, DON'T CHANGE
nameproperty = "NAME_1"
# CHANGE REGION ABBREVIATION PROPERTY HERE
# IF MAP FROM GADM, DON'T CHANGE
abbrproperty = "HASC_1"
########################################################################################
# Imports json file
cat("Loading geojson file ........ \n")
feature_collection <- readOGR(dsn = file, stringsAsFactors = FALSE)
file_name = gsub("\\.json$", '', file)
file_name = gsub("\\.geojson$", '', file_name)
if((is.null(feature_collection$name[1]) == FALSE || is.null(feature_collection$NAME_1[1]) == FALSE || is.null(eval(parse(text = paste0("feature_collection$", nameproperty, "[1]")))) == FALSE) == FALSE){
cat("\n\nERROR: Unable to find region names. Please check below:\n\n")
cat("[Property]: [Value for first region]\n")
for(i in 1: ncol(feature_collection@data)){
cat(names(feature_collection@data)[i], ": ", feature_collection@data[1,i], "\n", sep = '')
}
rm(i)
cat("\nCheck which property contains the region name and amend the variable \"nameproperty\" accordingly.\n")
}else{
if(is.null(feature_collection$NAME_0[1]) == FALSE){
country = feature_collection$NAME_0[1]
cat("\nImported ", file, ": map data for ", country, "\n", sep = '')
rm(country)
}else{
cat("\nImported map data from ", file,"\n", sep = '')
}
# Finds "NAME_1" property for each region and saves it as region_name
if(is.null(feature_collection$name[1]) == FALSE){
region_name = feature_collection$name
}else if(is.null(feature_collection$NAME_1[1]) == FALSE){
region_name = feature_collection$NAME_1
}else{
eval(parse(text = paste0("region_name = feature_collection$", nameproperty)))
}
# Creates blank vectors for the region order, region id and region data (Region order is a temp vector)
region_order = seq(1, length(feature_collection), 1)
region_id = rep('', length(feature_collection))
region_data = rep('', length(feature_collection))
abbr = TRUE
# Finds "HASC_1" property, extracts the region abbreviation and saves in the data frame
if(is.null(feature_collection$HASC_1[1]) == FALSE){
region_abbreviation = feature_collection$HASC_1
region_abbreviation = gsub("^.*\\.", '', region_abbreviation)
# If not, find the abbrproperty inputted by the user
}else if(is.null(eval(parse(text = paste0("feature_collection$", abbrproperty, "[1]")))) == FALSE){
eval(parse(text = paste0("region_abbreviation = feature_collection$", abbrproperty)))
# If not, just leave it blank
}else{
region_abbreviation = rep('', length(feature_collection))
abbr = FALSE
}
# Creates data frame
df = data.frame(region_order, region_id, region_data, region_name, region_abbreviation)
colnames(df) = c("Region.Order", "Region.Id", "Region.Data", "Region.Name", "Region.Abbreviation")
# Sorts data frame according to region name and makes sure that region id will also follow that order
df = df[order(region_name),]
df$Region.Id = as.character(seq(1, length(feature_collection), 1))
df = df[order(df$Region.Order),]
region_id = df$Region.Id
df$Region.Order = NULL
df = df[order(df$Region.Name),]
# Rename column names
colnames(df) = c("Region Id", "Region Data", "Region Name", "Region Abbreviation")
# Creates cartogram_id property
feature_collection@data$cartogram_id = region_id
# Finds "GID_0" property for the 1st region (i.e. country acronym) and saves it as country
if(is.null(feature_collection$GID_0[1]) == FALSE){
country_gid = feature_collection$GID_0[1]
country_gid = tolower(country_gid)
# Exports the csv file. Automatically names file as "[country acronym]_data".csv
write.csv(df, file = paste(country_gid, "_data.csv", sep = ''), row.names=FALSE)
cat("Exported", paste(country_gid, "_data.csv", sep = ''), "\n")
jsonfile = paste(country_gid, "_processedmap.json", sep = '')
rm(country_gid)
}else{
write.csv(df, file = paste(file_name, "_data.csv", sep = ''), row.names=FALSE)
cat("Exported", paste(file_name, "_data.csv", sep = ''), "\n")
jsonfile = paste(file_name, "_processedmap.json", sep = '')
}
if (file.exists(jsonfile)) {
file.remove(jsonfile)
}
cat("Exporting geojson file ........ \n")
writeOGR(feature_collection, dsn = jsonfile, layer="", driver="GeoJSON")
cat("Exported", jsonfile, "\n")
if (is.null(feature_collection@bbox) == FALSE){
bbox = paste(feature_collection@bbox[1],feature_collection@bbox[2],feature_collection@bbox[3],feature_collection@bbox[4],sep=", ")
jsontxt = readLines(jsonfile)
jsontxt[3] = paste("\"bbox\": [", bbox , "],", sep = ' ')
file.remove(jsonfile)
writeLines(jsontxt, jsonfile, sep = "\n")
cat("Added in bbox information.\n")
rm(bbox)
rm(jsontxt)
cat("\nAll done.\n")
}else{
cat("Error: geojson file does not contain bbox information. Cartogram generator requires bbox information.\n")
}
if(abbr == FALSE){
cat("\nWarning: csv file does not contain region abbreviations. If the json file does contain the abbreviation info, please amend the \"abbrproperty\" variable and re-run this program. You can check for the property below:")
}
# Removes variables
rm(df, region_data, region_id, region_name, jsonfile, region_order, region_abbreviation, abbrproperty, abbr)
}
rm(feature_collection, file_name, file, nameproperty)
|
# 声明了一个vector(向量)
x <- c(2.1, 4.2, 3.3, 5.4)
#正整数索引
x[c(3,1)]
x[order(x)]
x[c(1,1)]
#负整数索引
x[-c(3,1)]
#正负索引不能混合使用
x[c(-1,2)]
#逻辑向量索引
x[c(TRUE, TRUE, FALSE, FALSE)]
x[x>3]
x[c(TRUE, TRUE, NA, FALSE)]
#空索引返回原向量
x[]
|
/BA/Homework/hwlearning/getsubset.R
|
permissive
|
jeklen/notes
|
R
| false | false | 303 |
r
|
# 声明了一个vector(向量)
x <- c(2.1, 4.2, 3.3, 5.4)
#正整数索引
x[c(3,1)]
x[order(x)]
x[c(1,1)]
#负整数索引
x[-c(3,1)]
#正负索引不能混合使用
x[c(-1,2)]
#逻辑向量索引
x[c(TRUE, TRUE, FALSE, FALSE)]
x[x>3]
x[c(TRUE, TRUE, NA, FALSE)]
#空索引返回原向量
x[]
|
# Analysis of CW data, Mixed-design ANOVA
# GA, 16.9.2021
install.packages("afex")
install.packages("emmeans")
install.packages("ggplot2")
library(afex)
library(emmeans)
library(ggplot2)
# change path accordingly
#setwd("~/tubCloud/1.2021.Projects/Christian_W/Auswertungsinfrastruktur/Auswertungsinfrastruktur/Auswerter")
setwd("./")
setwd("D:/Documents/Uni Kurse/Bachelorarbeit/Auswerter")
# reading data
df <- read.delim('dataframe_hyp4.csv', header=TRUE, sep=",")
# and converting variable columns to factors
df$Person <- as.factor(df$Person)
df$Umgebung <- as.factor(df$Umgebung)
df$Aufwachsort <- as.factor(df$Aufwachsort)
# plotting
bargraph<-ggplot(df, aes(Umgebung, MOS, fill=Umgebung)) +
geom_bar(stat="summary",position = "dodge") +
facet_grid(.~Aufwachsort) +
scale_fill_brewer(palette="Dark2")
bargraph
###### Mixed Design ANOVA with package afex
# Mixed-effect ANOVA
# dependent variable: MOS (on the left side of the tilde)
# within variable(s): the variable(s) measured using same participants ("within" them)
# between variable(s): the variable(s) measured using different participants ("between" them)
# participant: participant ID. It's given inside Error(), together with the within-variable
# there are many functions that can do the same ANOVA analysis in R.
# aov_ez(), aov_car(), aov_4(). They differ on how the arguments are passed but results are the same.
a1 <- aov_ez("Person", "MOS", df, between="Aufwachsort", within='Umgebung')
print(a1)
# the ANOVA table shows that the two main effects are signifficant
# (for Aufwachsort and Umgebung), and also that the interaction effect is signifficant
## post-hoc contrasts
# As the two main effect have only two levels each, we know which one
# ae signifficant (no need of pairwise-comparisons)
# just for completion we can show the main effects separatedely, get their confidence intervals etc.
# main effect for Umgebung (within variable)
m1 <-emmeans(a1, ~Umgebung)
m1
pairs(m1) # significant
# main effect for Aufwachsort (between variable)
m2 <-emmeans(a1, ~Aufwachsort)
m2
pairs(m2) # slight significance
# now as the interaction term is significant, we also can check
# which pair is sig. different with which other pair.
## Interaction term
inter <- emmeans(a1, ~Aufwachsort*Umgebung)
print(inter)
print(pairs(inter, reverse=TRUE))
# significant differences are for
# de/beach vs. de/landscape
# gr/beach vs. de/landscape
# gr/beach vs. gr/landscape
# de/landscape vs. gr/landscape
# as the table shows, p-values are already adjusted by Tukey method.
#ADDED BY ME:
#now get the confidence intervals of the comparisons
summary(pairs(inter, reverse=TRUE), infer=TRUE)
summary(pairs(inter, reverse=FALSE), infer=TRUE)
# Sources:
# https://cran.r-project.org/web/packages/afex/vignettes/afex_anova_example.html
# https://ademos.people.uic.edu/Chapter21.html#2_mixed_design_anovas_(afex)
|
/Auswerter/cw_anova.R
|
no_license
|
Chrismw98/Bachelorarbeit
|
R
| false | false | 2,939 |
r
|
# Analysis of CW data, Mixed-design ANOVA
# GA, 16.9.2021
install.packages("afex")
install.packages("emmeans")
install.packages("ggplot2")
library(afex)
library(emmeans)
library(ggplot2)
# change path accordingly
#setwd("~/tubCloud/1.2021.Projects/Christian_W/Auswertungsinfrastruktur/Auswertungsinfrastruktur/Auswerter")
setwd("./")
setwd("D:/Documents/Uni Kurse/Bachelorarbeit/Auswerter")
# reading data
df <- read.delim('dataframe_hyp4.csv', header=TRUE, sep=",")
# and converting variable columns to factors
df$Person <- as.factor(df$Person)
df$Umgebung <- as.factor(df$Umgebung)
df$Aufwachsort <- as.factor(df$Aufwachsort)
# plotting
bargraph<-ggplot(df, aes(Umgebung, MOS, fill=Umgebung)) +
geom_bar(stat="summary",position = "dodge") +
facet_grid(.~Aufwachsort) +
scale_fill_brewer(palette="Dark2")
bargraph
###### Mixed Design ANOVA with package afex
# Mixed-effect ANOVA
# dependent variable: MOS (on the left side of the tilde)
# within variable(s): the variable(s) measured using same participants ("within" them)
# between variable(s): the variable(s) measured using different participants ("between" them)
# participant: participant ID. It's given inside Error(), together with the within-variable
# there are many functions that can do the same ANOVA analysis in R.
# aov_ez(), aov_car(), aov_4(). They differ on how the arguments are passed but results are the same.
a1 <- aov_ez("Person", "MOS", df, between="Aufwachsort", within='Umgebung')
print(a1)
# the ANOVA table shows that the two main effects are signifficant
# (for Aufwachsort and Umgebung), and also that the interaction effect is signifficant
## post-hoc contrasts
# As the two main effect have only two levels each, we know which one
# ae signifficant (no need of pairwise-comparisons)
# just for completion we can show the main effects separatedely, get their confidence intervals etc.
# main effect for Umgebung (within variable)
m1 <-emmeans(a1, ~Umgebung)
m1
pairs(m1) # significant
# main effect for Aufwachsort (between variable)
m2 <-emmeans(a1, ~Aufwachsort)
m2
pairs(m2) # slight significance
# now as the interaction term is significant, we also can check
# which pair is sig. different with which other pair.
## Interaction term
inter <- emmeans(a1, ~Aufwachsort*Umgebung)
print(inter)
print(pairs(inter, reverse=TRUE))
# significant differences are for
# de/beach vs. de/landscape
# gr/beach vs. de/landscape
# gr/beach vs. gr/landscape
# de/landscape vs. gr/landscape
# as the table shows, p-values are already adjusted by Tukey method.
#ADDED BY ME:
#now get the confidence intervals of the comparisons
summary(pairs(inter, reverse=TRUE), infer=TRUE)
summary(pairs(inter, reverse=FALSE), infer=TRUE)
# Sources:
# https://cran.r-project.org/web/packages/afex/vignettes/afex_anova_example.html
# https://ademos.people.uic.edu/Chapter21.html#2_mixed_design_anovas_(afex)
|
#!/usr/bin/env Rscript
# Copyright (c) 2018 Talkowski Laboratory
# Contact: Ryan Collins <rlcollins@g.harvard.edu>
# Distributed under terms of the MIT license.
# Helper script to perform family-based VCF QC & plot results
###Set master parameters
options(stringsAsFactors=F,scipen=1000)
rare.max.freq <- 0.01
uncommon.max.freq <- 0.1
common.max.freq <- 0.5
major.max.freq <- 1
tiny.max.size <- 100
small.max.size <- 500
medium.max.size <- 2500
medlarge.max.size <- 10000
large.max.size <- 50000
huge.max.size <- 300000000
nocall.placeholder <- 9999
###################
###HELPER FUNCTIONS
###################
#Read & clean list of variant IDs & genotypes per sample
readDatPerSample <- function(ID,nocall.placeholder=9999){
#Set path
path <- paste(perSampDir,"/",ID,".VIDs_genotypes.txt.gz",sep="")
#Read & process data if file exists
if(file.exists(path)){
#Read data
x <- read.table(path,header=F,check.names=F)
#Convert genotypes to number of alleles
x[,2] <- sapply(x[,2],function(gt){
#Return nocall.placeholder for no-calls
if(gt=="./."){
return(nocall.placeholder)
}else{
sum(as.numeric(gsub(".","",unlist(strsplit(as.character(gt),split="/")),fixed=T)))
}
})
#Format output data
x[,2:3] <- apply(x[,2:3],2,as.numeric)
colnames(x) <- c("VID","alleles","GQ")
#Return data
return(x)
}else{
warning(paste("VID file not found for sample ",ID,sep=""))
return(NULL)
}
}
#Subset SV stats data
subsetDat <- function(dat,vlist,biallelic=T,
min.GQ.pro=0,max.GQ.pro=999,
min.GQ.par=0,max.GQ.par=999){
#Check input variant list
#Subset dat to variants found in sample & append number of alleles in sample
x <- merge(dat,vlist,by="VID",sort=F)
#Reorder columns
x <- x[,c(2:4,1,5:ncol(x))]
#Exclude non-biallelic sites, if optioned
if(biallelic==T){
x <- x[which(x$carriers>0 & x$other_gts==0),]
}
#Filter sites to specified GQ ranges
x <- x[which(is.na(x$pro.GQ) | (x$pro.GQ>=min.GQ.pro & x$pro.GQ<=max.GQ.pro)),]
# parent.GQ.range <- as.data.frame(t(apply(x[,which(colnames(x) %in% c("fa.GQ","mo.GQ")),],1,function(vals){
# if(any(!is.na(vals))){
# return(range(vals,na.rm=T))
# }else{
# return(c(NA,NA))
# }
# })))
# colnames(parent.GQ.range) <- c("min","max")
# x <- x[which((is.na(parent.GQ.range$min) | parent.GQ.range$min>=min.GQ.par) &
# (is.na(parent.GQ.range$max) | parent.GQ.range$max<=max.GQ.par)),]
#Return result
return(x)
}
#Gather matrix of SVs in any member of a family with information on allele counts in child & parent(s)
getFamDat <- function(dat,proband,father=NA,mother=NA,biallelic=T,nocall.placeholder=9999){
#Clean parent IDs
if(is.na(father)){
father <- NULL
}
if(is.na(mother)){
mother <- NULL
}
#Read VID lists for family members
VID.lists <- lapply(c(proband,father,mother),readDatPerSample)
names(VID.lists) <- c(proband,father,mother)
#Get master VID list of all family members
if(!is.null(father)){
vlist <- merge(VID.lists[[which(names(VID.lists)==proband)]],
VID.lists[[which(names(VID.lists)==father)]],
sort=F,by="VID",all=T,suffixes=c(".pro",".fa"))
if(!is.null(mother)){
vlist <- merge(vlist,
VID.lists[[which(names(VID.lists)==mother)]],
sort=F,by="VID",all=T)
colnames(vlist)[6:7] <- c("alleles.mo","GQ.mo")
}else{
vlist$alleles.mo <- NA
vlist$GQ.mo <- NA
}
}else{
vlist <- VID.lists[[which(names(VID.lists)==proband)]]
colnames(vlist[which(colnames(vlist)=="alleles")]) <- "alleles.pro"
colnames(vlist[which(colnames(vlist)=="GQ")]) <- "GQ.pro"
vlist$alleles.fa <- 0
vlist$GQ.fa <- NA
vlist <- merge(vlist,
VID.lists[[which(names(VID.lists)==mother)]],
sort=F,by="VID",all=T,suffixes=c(".pro",".mo"))
}
#Only retain sites where all three samples are not null genotype (no-call, ./., nocall.placeholder)
exclude <- which(sapply(vlist[,c(2,4,6)],
function(vals){
any(as.numeric(vals)==nocall.placeholder)
}))
if(length(exclude) > 0){
vlist <- vlist[-exclude,]
}
#Convert remaining NA allele counts to 0s
vlist[,c(2,4,6)] <- apply(vlist[,c(2,4,6)],2,function(vals){
vals[which(is.na(vals))] <- 0
return(vals)
})
#Add transmission information to vlist
trans <- t(apply(vlist[,c(2,4,6)],1,function(alleles){
#Convert allele counts to numeric
sapply(alleles,function(vals){
vals[which(is.na(vals))] <- 0
})
#Get allele counts
pro <- as.numeric(alleles[1])
fa <- as.numeric(alleles[2])
mo <- as.numeric(alleles[3])
#Infer child inheritance status
pro.denovo <- max(c(pro-(fa+mo),0))
pro.inherited <- pro-pro.denovo
#Divide credit for inherited variants between parents based on ratio of parent allele counts
parental.alleles <- sum(c(fa,mo),na.rm=T)
if(fa>0){
p.fa <- fa/parental.alleles
}else{
p.fa <- 0
}
if(mo>0){
p.mo <- mo/parental.alleles
}else{
p.mo <- 0
}
fa.transmitted <- pro.inherited*p.fa
fa.untransmitted <- max(c(0,fa-fa.transmitted))
mo.transmitted <- pro.inherited*p.mo
mo.untransmitted <- max(c(0,mo-mo.transmitted))
#Return vector of relevant transmission allele counts
return(c(pro,pro.inherited,pro.denovo,
fa,fa.transmitted,fa.untransmitted,
mo,mo.transmitted,mo.untransmitted))
}))
trans <- as.data.frame(cbind(vlist[,1],trans,vlist[,c(3,5,7)]))
colnames(trans) <- c("VID","pro.all","pro.inherited","pro.denovo",
"fa.all","fa.trans","fa.untrans",
"mo.all","mo.trans","mo.untrans",
"pro.GQ","fa.GQ","mo.GQ")
#Subset data & add transmission data
dat.fam <- subsetDat(dat=dat,vlist=trans,biallelic=biallelic)
dat.fam[,(ncol(dat.fam)-9):ncol(dat.fam)] <- apply(dat.fam[,(ncol(dat.fam)-9):ncol(dat.fam)],2,as.numeric)
return(dat.fam)
}
#Compute inheritance stats from a dat.fam dataframe
computeInheritance <- function(dat.fam,VIDs=NULL){
#Clean dat.fam
dat.fam <- as.data.frame(dat.fam)
#Subset data frame based on list of VIDs
if(!is.null(VIDs)){
dat.fam <- dat.fam[which(as.character(dat.fam$VID) %in% as.character(VIDs)),]
}
#Compute allele-based inheritance rates
pro.a.all <- sum(dat.fam$pro.all)
pro.a.denovo <- sum(dat.fam$pro.denovo)
pro.a.denovorate <- pro.a.denovo/pro.a.all
pro.a.inh <- pro.a.all-pro.a.denovo
pro.a.inhrate <- pro.a.inh/pro.a.all
fa.a.all <- sum(dat.fam$fa.all)
fa.a.trans <- sum(dat.fam$fa.trans)
fa.a.transrate <- fa.a.trans/fa.a.all
fa.a.untrans <- fa.a.all-fa.a.trans
fa.a.untransrate <- fa.a.untrans/fa.a.all
pro.a.patfrac <- fa.a.trans/pro.a.inh
mo.a.all <- sum(dat.fam$mo.all)
mo.a.trans <- sum(dat.fam$mo.trans)
mo.a.transrate <- mo.a.trans/mo.a.all
mo.a.untrans <- mo.a.all-mo.a.trans
mo.a.untransrate <- mo.a.untrans/mo.a.all
pro.a.matfrac <- mo.a.trans/pro.a.inh
pro.a.patmatratio <- pro.a.patfrac/(pro.a.patfrac+pro.a.matfrac)
#Compute variant-based inheritance rates
pro.v.all <- length(which(dat.fam$pro.all>0))
pro.v.denovo <- length(which(dat.fam$pro.all>0 & dat.fam$pro.inherited==0))
pro.v.denovorate <- pro.v.denovo/pro.v.all
pro.v.inh <- pro.v.all-pro.v.denovo
pro.v.inhrate <- pro.v.inh/pro.v.all
fa.v.all <- length(which(dat.fam$fa.all>0))
fa.v.trans <- length(which(dat.fam$pro.all>0 & dat.fam$fa.all>0))
fa.v.transrate <- fa.v.trans/fa.v.all
fa.v.untrans <- fa.v.all-fa.v.trans
fa.v.untransrate <- fa.v.untrans/fa.v.all
pro.v.patfrac <- fa.v.trans/pro.v.inh
mo.v.all <- length(which(dat.fam$mo.all>0))
mo.v.trans <- length(which(dat.fam$pro.all>0 & dat.fam$mo.all>0))
mo.v.transrate <- mo.v.trans/mo.v.all
mo.v.untrans <- mo.v.all-mo.v.trans
mo.v.untransrate <- mo.v.untrans/mo.v.all
pro.v.matfrac <- mo.v.trans/pro.v.inh
pro.v.patmatratio <- pro.a.patfrac/(pro.a.patfrac+pro.a.matfrac)
#Format & return vector of rates
return(c("pro.allele.all"=pro.a.all,
"pro.allele.inh"=pro.a.inh,
"pro.allele.inhrate"=pro.a.inhrate,
"pro.allele.patfrac"=pro.a.patfrac,
"pro.allele.matfrac"=pro.a.matfrac,
"pro.allele.patmatratio"=pro.a.patmatratio,
"pro.allele.denovo"=pro.a.denovo,
"pro.allele.denovorate"=pro.a.denovorate,
"fa.allele.all"=fa.a.all,
"fa.allele.trans"=fa.a.trans,
"fa.allele.transrate"=fa.a.transrate,
"fa.allele.untrans"=fa.a.untrans,
"fa.allele.untransrate"=fa.a.untransrate,
"mo.allele.all"=mo.a.all,
"mo.allele.trans"=mo.a.trans,
"mo.allele.transrate"=mo.a.transrate,
"mo.allele.untrans"=mo.a.untrans,
"mo.allele.untransrate"=mo.a.untransrate,
"pro.site.all"=pro.v.all,
"pro.site.inh"=pro.v.inh,
"pro.site.inhrate"=pro.v.inhrate,
"pro.site.patfrac"=pro.v.patfrac,
"pro.site.matfrac"=pro.v.matfrac,
"pro.site.patmatratio"=pro.v.patmatratio,
"pro.site.denovo"=pro.v.denovo,
"pro.site.denovorate"=pro.v.denovorate,
"fa.site.all"=fa.v.all,
"fa.site.trans"=fa.v.trans,
"fa.site.transrate"=fa.v.transrate,
"fa.site.untrans"=fa.v.untrans,
"fa.site.untransrate"=fa.v.untransrate,
"mo.site.all"=mo.v.all,
"mo.site.trans"=mo.v.trans,
"mo.site.transrate"=mo.v.transrate,
"mo.site.untrans"=mo.v.untrans,
"mo.site.untransrate"=mo.v.untransrate))
}
#Compute inheritance for a list of trios and return as a data frame
computeInheritanceMulti <- function(trio.dat.list,VIDs=NULL){
#Iterate over trios and compute inheritance
res <- as.data.frame(t(sapply(trio.dat.list,computeInheritance,VIDs=VIDs)))
return(res)
}
#Collect de novo rate per SV class
deNovoRateByClass <- function(trio.dat.list,VIDs=NULL){
#Collect median DNR across all classes
all.dat <- computeInheritanceMulti(trio.dat.list=trio.dat.list,VIDs=VIDs)
all.dnrs <- c(median(all.dat$pro.site.denovorate,na.rm=T),
median(all.dat$pro.allele.denovorate,na.rm=T))
#Iterate over classes and return DNRs
res <- sapply(svtypes$svtype,function(svtype){
if(is.null(VIDs)){
VIDs <- dat$VID
}
sub.dat <- computeInheritanceMulti(trio.dat.list=trio.dat.list,
VIDs=dat$VID[which(dat$VID %in% VIDs & dat$svtype==svtype)])
sub.dnrs <- c(median(sub.dat$pro.site.denovorate,na.rm=T),
median(sub.dat$pro.allele.denovorate,na.rm=T))
})
#Format & return results
res <- as.data.frame(cbind(all.dnrs,res))
colnames(res) <- c("ALL",svtypes$svtype)
rownames(res) <- c("variants","alleles")
return(res)
}
#Collect matrix of de novo rates by class by freq
deNovoRateByFreq <- function(trio.dat.list,freq.bins=40,count="variants"){
#Get frequency index
if(count=="variants"){
freq.idx <- which(colnames(dat)=="carrierFreq")
}else{
freq.idx <- which(colnames(dat)=="AF")
}
#Create evenly spaced freq bins on log10-scale
logfreq.min <- log10(min(dat[,freq.idx]))
logfreq.max <- log10(1)
logfreq.steps <- seq(logfreq.min,logfreq.max,by=(logfreq.max-logfreq.min)/(freq.bins-1))
freq.df <- data.frame("min.freq"=c(0,10^logfreq.steps[-length(logfreq.steps)]),
"max.freq"=10^logfreq.steps)
rownames(freq.df) <- paste(round(100*freq.df[,1],4),"-",
round(100*freq.df[,2],4),"%",sep="")
#Iterate over frequency bins and gather de novo rates
DNRs <- apply(freq.df,1,function(bounds){
dnrs <- deNovoRateByClass(trio.dat.list=trio.dat.list,
VIDs=dat$VID[which(dat[,freq.idx]>bounds[1] & dat[,freq.idx]<=bounds[2])])
return(as.numeric(dnrs[which(rownames(dnrs)==count),]))
})
#Format & return DNRs & freq.df
DNRs <- as.data.frame(DNRs)
DNRs <- apply(DNRs,2,as.numeric)
rownames(DNRs) <- c("ALL",svtypes$svtype)
return(list("DNRs"=DNRs,"bins"=freq.df))
}
#Collect matrix of de novo rates by class by size
deNovoRateBySize <- function(trio.dat.list,size.bins=40,count="variants"){
#Create evenly spaced size bins on log10-scale
logsize.min <- log10(50)
logsize.max <- log10(1000000)
logsize.steps <- seq(logsize.min,logsize.max,by=(logsize.max-logsize.min)/(size.bins-2))
size.df <- data.frame("min.size"=c(0,10^logsize.steps),
"max.size"=c(10^logsize.steps,300000000))
rownames(size.df) <- paste("10^",round(log10(size.df[,1]),1),
"-",round(log10(size.df[,2]),1),sep="")
#Iterate over sizeuency bins and gather de novo rates
DNRs <- apply(size.df,1,function(bounds){
dnrs <- deNovoRateByClass(trio.dat.list=trio.dat.list,
VIDs=dat$VID[which(dat$length>bounds[1] & dat$length<=bounds[2])])
return(as.numeric(dnrs[which(rownames(dnrs)==count),]))
})
#Format & return DNRs & size.df
DNRs <- as.data.frame(DNRs)
DNRs <- apply(DNRs,2,as.numeric)
rownames(DNRs) <- c("ALL",svtypes$svtype)
return(list("DNRs"=DNRs,"bins"=size.df))
}
#Collect matrix of de novo rates by size & freq combination
deNovoRateBySizeFreq <- function(trio.dat.list,VIDs=NULL,count="variants",
max.sizes,size.labs,max.freqs,freq.labs){
#Get frequency index
if(count=="variants"){
freq.idx <- which(colnames(dat)=="carrierFreq")
}else{
freq.idx <- which(colnames(dat)=="AF")
}
#Create size & freq bins
size.df <- data.frame("min.size"=c(0,0,max.sizes),
"max.size"=c(300000000,max.sizes,300000000))
rownames(size.df) <- c("ALL",size.labs)
freq.df <- data.frame("min.freq"=c(0,0,max.freqs),
"max.freq"=c(1,max.freqs,1))
rownames(freq.df) <- c("ALL",freq.labs)
#Instantiate VIDs if necessary
if(is.null(VIDs)){
VIDs <- dat$VID
}
#Iterate over size bins & create DNR df for all SV
DNRs <- as.data.frame(t(sapply(1:nrow(size.df),function(s){
#Iterate over frequency bins
sapply(1:nrow(freq.df),function(f){
#Get de novo rate
DNR <- deNovoRateByClass(trio.dat.list,
VIDs=dat$VID[which(dat$VID %in% VIDs &
dat$length>size.df[s,1] & dat$length<=size.df[s,2] &
dat[,freq.idx]>freq.df[f,1] & dat[,freq.idx]<=freq.df[f,2])])
return(DNR$ALL[which(rownames(DNR)==count)])
})
})))
colnames(DNRs) <- rownames(freq.df)
rownames(DNRs) <- rownames(size.df)
#Iterate over SV classes and create DNR df for each class
DNRs.byClass <- lapply(svtypes$svtype,function(svtype){
DNRs <- as.data.frame(t(sapply(1:nrow(size.df),function(s){
#Iterate over frequency bins
sapply(1:nrow(freq.df),function(f){
#Get de novo rate
DNR <- deNovoRateByClass(trio.dat.list,
VIDs=dat$VID[which(dat$VID %in% VIDs & dat$svtype==svtype &
dat$length>size.df[s,1] & dat$length<=size.df[s,2] &
dat[,freq.idx]>freq.df[f,1] & dat[,freq.idx]<=freq.df[f,2])])
return(DNR$ALL[which(rownames(DNR)==count)])
})
})))
colnames(DNRs) <- rownames(freq.df)
rownames(DNRs) <- rownames(size.df)
return(DNRs)
})
names(DNRs.byClass) <- svtypes$svtype
#Combine all DNR dfs & return
DNRs.all <- c(list(DNRs),DNRs.byClass)
names(DNRs.all)[1] <- "ALL"
return(DNRs.all)
}
#Collect matrix of de novo rates by class by size
deNovoRateBySize <- function(trio.dat.list,size.bins=40,count="variants"){
#Create evenly spaced size bins on log10-scale
logsize.min <- log10(50)
logsize.max <- log10(1000000)
logsize.steps <- seq(logsize.min,logsize.max,by=(logsize.max-logsize.min)/(size.bins-2))
size.df <- data.frame("min.size"=c(0,10^logsize.steps),
"max.size"=c(10^logsize.steps,300000000))
rownames(size.df) <- paste("10^",round(log10(size.df[,1]),1),
"-",round(log10(size.df[,2]),1),sep="")
#Iterate over sizeuency bins and gather de novo rates
DNRs <- apply(size.df,1,function(bounds){
dnrs <- deNovoRateByClass(trio.dat.list=trio.dat.list,
VIDs=dat$VID[which(dat$length>bounds[1] & dat$length<=bounds[2])])
return(as.numeric(dnrs[which(rownames(dnrs)==count),]))
})
#Format & return DNRs & size.df
DNRs <- as.data.frame(DNRs)
DNRs <- apply(DNRs,2,as.numeric)
rownames(DNRs) <- c("ALL",svtypes$svtype)
return(list("DNRs"=DNRs,"bins"=size.df))
}
#Collect matrix of de novo rates by class by minimum proband GQ
deNovoRateByProGQ <- function(trio.dat.list,GQ.bins=40,count="variants"){
#Create evenly spaced GQ bins
GQ.steps <- seq(0,1000,by=1000/GQ.bins)
#Iterate over min GQs and gather de novo rates
DNRs <- sapply(GQ.steps,function(min.GQ){
tdl.tmp <- lapply(trio.dat.list,function(df){
return(df[which(df$pro.GQ>=min.GQ),])
})
dnrs <- deNovoRateByClass(trio.dat.list=tdl.tmp)
return(as.numeric(dnrs[which(rownames(dnrs)==count),]))
})
#Format & return DNRs & GQ.df
DNRs <- as.data.frame(DNRs)
DNRs <- apply(DNRs,2,as.numeric)
rownames(DNRs) <- c("ALL",svtypes$svtype)
colnames(DNRs) <- paste("gt",GQ.steps,sep="")
return(list("DNRs"=DNRs,"bins"=GQ.steps))
}
############################
###PLOTTING HELPER FUNCTIONS
############################
#Generate main inheritance plot
plotInhStats <- function(inh.stats,count="variants",title=NULL,cex.lab=1){
#Subset inh.stats to relevant columns
if(count=="variants"){
plot.df <- data.frame(inh.stats$pro.site.inhrate,
inh.stats$pro.site.patfrac,
inh.stats$pro.site.matfrac,
inh.stats$pro.site.patmatratio,
inh.stats$pro.site.denovorate,
"total.site.transrate"=(inh.stats$fa.site.trans+inh.stats$mo.site.trans)/(inh.stats$fa.site.all+inh.stats$mo.site.all),
inh.stats$fa.site.transrate,
inh.stats$mo.site.transrate)
}else{
plot.df <- data.frame(inh.stats$pro.allele.inhrate,
inh.stats$pro.allele.patfrac,
inh.stats$pro.allele.matfrac,
inh.stats$pro.allele.patmatratio,
inh.stats$pro.allele.denovorate,
"total.allele.transrate"=(inh.stats$fa.allele.trans+inh.stats$mo.allele.trans)/(inh.stats$fa.allele.all+inh.stats$mo.allele.all),
inh.stats$fa.allele.transrate,
inh.stats$mo.allele.transrate)
}
#Create vector of median fractions as representative for each category
if(count=="variants"){
median.counts <- c(paste(prettyNum(round(median(inh.stats$pro.site.inh),0),big.mark=",")," / ",
prettyNum(round(median(inh.stats$pro.site.all),0),big.mark=","),sep=""),
paste(prettyNum(round(median(inh.stats$fa.site.trans),0),big.mark=",")," / ",
prettyNum(round(median(inh.stats$pro.site.inh),0),big.mark=","),sep=""),
paste(prettyNum(round(median(inh.stats$mo.site.trans),0),big.mark=",")," / ",
prettyNum(round(median(inh.stats$pro.site.inh),0),big.mark=","),sep=""),
paste(round(100*median(inh.stats$pro.site.patfrac),0),"% : ",
round(100*median(inh.stats$pro.site.matfrac),0),"%",sep=""),
paste(prettyNum(round(median(inh.stats$pro.site.denovo),0),big.mark=",")," / ",
prettyNum(round(median(inh.stats$pro.site.all),0),big.mark=","),sep=""),
paste(prettyNum(round(median(inh.stats$fa.site.trans+inh.stats$mo.site.trans),0),big.mark=",")," / ",
prettyNum(round(median(inh.stats$fa.site.all+inh.stats$mo.site.all),0),big.mark=","),sep=""),
paste(prettyNum(round(median(inh.stats$fa.site.trans),0),big.mark=",")," / ",
prettyNum(round(median(inh.stats$fa.site.all),0),big.mark=","),sep=""),
paste(prettyNum(round(median(inh.stats$mo.site.trans),0),big.mark=",")," / ",
prettyNum(round(median(inh.stats$mo.site.all),0),big.mark=","),sep=""))
}else{
median.counts <- c(paste(prettyNum(round(median(inh.stats$pro.allele.inh),0),big.mark=",")," / ",
prettyNum(round(median(inh.stats$pro.allele.all),0),big.mark=","),sep=""),
paste(prettyNum(round(median(inh.stats$fa.allele.trans),0),big.mark=",")," / ",
prettyNum(round(median(inh.stats$pro.allele.inh),0),big.mark=","),sep=""),
paste(prettyNum(round(median(inh.stats$mo.allele.trans),0),big.mark=",")," / ",
prettyNum(round(median(inh.stats$pro.allele.inh),0),big.mark=","),sep=""),
paste(round(100*median(inh.stats$pro.allele.patfrac),0),"% : ",
round(100*median(inh.stats$pro.allele.matfrac),0),"%",sep=""),
paste(prettyNum(round(median(inh.stats$pro.allele.denovo),0),big.mark=",")," / ",
prettyNum(round(median(inh.stats$pro.allele.all),0),big.mark=","),sep=""),
paste(prettyNum(round(median(inh.stats$fa.allele.trans+inh.stats$mo.allele.trans),0),big.mark=",")," / ",
prettyNum(round(median(inh.stats$fa.allele.all+inh.stats$mo.allele.all),0),big.mark=","),sep=""),
paste(prettyNum(round(median(inh.stats$fa.allele.trans),0),big.mark=",")," / ",
prettyNum(round(median(inh.stats$fa.allele.all),0),big.mark=","),sep=""),
paste(prettyNum(round(median(inh.stats$mo.allele.trans),0),big.mark=",")," / ",
prettyNum(round(median(inh.stats$mo.allele.all),0),big.mark=","),sep=""))
}
#Set plot colors
col.pat <- "#00B0CF"
col.mat <- "#F064A5"
col.dn <- "#F13D15"
col.other <- "gray35"
plot.cols <- c(col.other,col.pat,col.mat,col.other,
col.dn,col.other,col.pat,col.mat)
lab.cols <- plot.cols
lab.cols[which(lab.cols==col.other)] <- "black"
lab.fonts <- c(2,3,3,3,2,2,3,3)
#Prep plot area
par(mar=c(1,6.5,3,4.5))
plot(x=c(-0.05,1.05),y=c(0,-8.25),type="n",
xaxt="n",yaxt="n",xlab="",ylab="",yaxs="i")
#Dress up plot
abline(v=seq(0,1,0.2),lty=2,col="gray85")
# abline(h=c(0,-9),lwd=2,col="gray80")
abline(v=c(0,1),col="gray75")
# text(x=0.5,y=-0.2,pos=3,labels="SV Sites",font=4,cex=0.9)
# text(x=0.5,y=-9.2,pos=3,labels="SV Alleles",font=4,cex=0.9)
#Add category axes
cat.labels <- c("Proband\nInheritance Rate","Inh. Rate \n(Paternal) ","Inh. Rate \n(Maternal) ",
"Pat:Mat Ratio ","Proband\nDe Novo Rate","Parental\nTransmission Rate",
"Trans. Rate \n(Paternal) ","Trans. Rate \n(Maternal) ")
sapply(1:8,function(i){
axis(2,at=-i+0.3,line=-0.8,tick=F,las=2,cex.axis=0.7,font=lab.fonts[i],
labels=cat.labels[i],col.axis=lab.cols[i])
})
# sapply(1:8,function(i){
# axis(2,at=-i-8.7,line=-0.8,tick=F,las=2,cex.axis=0.7,font=lab.fonts[i],
# labels=cat.labels[i],col.axis=lab.cols[i])
# })
axis(2,at=c(-1,-5,-6,-9,-10,-14,-15,-18)+0.75,labels=NA,tck=-0.1,col="gray80")
#Add other axes & title
# axis(2,at=0.3,tick=F,las=2,line=-0.5,cex.axis=0.8,font=2,
# label=paste("n=",prettyNum(nrow(plot.df),big.mark=","),
# " ",fam.type,"s",sep=""))
axis(3,at=seq(0,1,0.2),labels=NA)
axis(3,at=seq(0,1,0.2),tick=F,line=-0.4,cex.axis=0.7,
labels=paste(seq(0,100,20),"%",sep=""))
mtext(3,line=1.5,text=title,font=2,cex=cex.lab)
axis(4,at=-0.1,tick=F,las=2,line=-0.5,cex.axis=0.8,font=2,
label="Median")
#Plot points & category info
sapply(1:ncol(plot.df),function(i){
if(any(!is.na(plot.df[,i]))){
#Add shading rect
rect(xleft=par("usr")[1],xright=par("usr")[2],
ybottom=-i+0.05,ytop=-i+0.45,bty="n",border=NA,
col=adjustcolor(plot.cols[i],alpha=0.15))
#Add points
beeswarm(plot.df[,i],add=T,horizontal=T,at=-i+0.25,
pch=21,cex=0.5,bg=plot.cols[i],pt.lwd=0.1,
corral="wrap",corralWidth=0.2)
#Add thick line & label for mean
cat.mean <- mean(plot.df[,i],na.rm=T)
segments(x0=cat.mean,x1=cat.mean,y0=-i,y1=-i+0.5,
lend="round",lwd=4,col=plot.cols[i])
text(x=cat.mean,y=-i+0.35,pos=3,cex=0.6,
labels=paste(round(100*cat.mean,1),"%",sep=""))
#Add median to right margin
axis(4,at=-i+0.3,line=-0.8,las=2,tick=F,cex.axis=0.6,
col.axis=lab.cols[i],labels=median.counts[i])
}
})
#Add clean-up box
box()
}
#Generate size distribution frame with log10 scaling
prepSizePlot <- function(xlims=c(50,1000000),cex.lab=1){
#Prep plot area
plot(x=log10(xlims),y=c(0,1),type="n",
xaxt="n",yaxt="n",xlab="",ylab="",yaxs="i")
#Add vertical gridlines
logscale.all <- log10(as.numeric(sapply(0:8,function(i){(1:9)*10^i})))
logscale.minor <- log10(as.numeric(sapply(0:8,function(i){c(5,10)*10^i})))
logscale.minor.labs <- as.character(sapply(c("bp","kb","Mb"),function(suf){paste(c(1,5,10,50,100,500),suf,sep="")}))
logscale.minor.labs <- c(logscale.minor.labs[-1],"1Gb")
logscale.major <- log10(as.numeric(10^(0:8)))
abline(v=logscale.all,col="gray97")
abline(v=logscale.minor,col="gray92")
abline(v=logscale.major,col="gray85")
#Add axes, title, and Alu/SVA/L1 ticks
axis(1,at=logscale.all,tck=-0.015,col="gray50",labels=NA)
axis(1,at=logscale.minor,tck=-0.0225,col="gray20",labels=NA)
axis(1,at=logscale.major,tck=-0.03,labels=NA)
axis(1,at=logscale.minor,tick=F,cex.axis=0.8,line=-0.3,las=2,
labels=logscale.minor.labs)
mtext(1,text="Size",line=2.25,cex=cex.lab)
axis(2,at=seq(0,1,0.2),tck=-0.025,labels=NA)
axis(2,at=seq(0,1,0.2),tick=F,line=-0.4,cex.axis=0.8,las=2,
labels=paste(seq(0,100,20),"%",sep=""))
mtext(2,text="De Novo Rate",line=2.2,cex=cex.lab)
#Add cleanup box
box()
}
#Plot DNRs vs size for all classes
plotDNRvsSize <- function(DNRs,bins,k=4,title=NULL,legend=T,fam.type="families",nfams,cex.lab=1){
#Prep plot area
par(mar=c(3.5,3.5,2.5,1))
prepSizePlot(cex.lab=cex.lab)
#Get midpoints for lines
mids <- log10(c(bins[1,2],
(bins[-c(1,nrow(bins)),1]+bins[-c(1,nrow(bins)),2])/2,
bins[nrow(bins),1]))
#Set type colors
colors <- c("gray15",svtypes$color)
lwds <- c(3,rep(2,times=nrow(svtypes)))
#Iterate over DNRs and plot per class
sapply(nrow(DNRs):1,function(i){
#Get values
vals <- as.numeric(DNRs[i,])
#Plot line & points
points(x=mids,y=vals,pch=19,cex=0.4,col=colors[i])
points(x=mids,
y=rollapply(vals,k,mean,partial=T,na.rm=T),
type="l",lwd=lwds[i],col=colors[i])
})
#Add legend
if(legend==T){
idx.for.legend <- which(apply(DNRs,1,function(vals){any(!is.na(vals))}))
legend("topright",bg="white",pch=19,cex=0.8*cex.lab,lwd=2,
legend=rownames(DNRs)[idx.for.legend],
col=colors[idx.for.legend])
}
#Add title & number of families
mtext(3,line=0.2,cex=0.8*cex.lab,text=paste("n=",prettyNum(nfams,big.mark=","),
" ",fam.type,"s",sep=""))
mtext(3,line=1,text=title,font=2,cex=cex.lab)
#Add cleanup box
box()
}
#Generate freq distribution frame with log10 scaling
prepFreqPlot <- function(xlims=c(1/10000,1),xlabel="Frequency",cex.lab=1){
#Prep plot area
plot(x=floor(log10(xlims)),y=c(0,1),type="n",
xaxt="n",yaxt="n",xlab="",ylab="",yaxs="i")
#Add vertical gridlines
logscale.all <- log10(as.numeric(sapply(0:8,function(i){(1:9)/10^i})))
logscale.minor <- log10(as.numeric(sapply(0:8,function(i){c(5,10)/10^i})))
logscale.major <- log10(as.numeric(1/10^(0:8)))
abline(v=logscale.all,col="gray97")
abline(v=logscale.minor,col="gray92")
abline(v=logscale.major,col="gray85")
#Add axes, title, and Alu/SVA/L1 ticks
axis(1,at=logscale.all,tck=-0.015,col="gray50",labels=NA)
axis(1,at=logscale.minor,tck=-0.0225,col="gray20",labels=NA)
axis(1,at=logscale.major,tck=-0.03,labels=NA)
for(i in -8:0){
axis(1,at=i,tick=F,cex.axis=0.8,line=-0.2,
labels=bquote(10^{.(i)}))
}
mtext(1,text=xlabel,line=2.25,cex=cex.lab)
axis(2,at=seq(0,1,0.2),tck=-0.025,labels=NA)
axis(2,at=seq(0,1,0.2),tick=F,line=-0.4,cex.axis=0.8,las=2,
labels=paste(seq(0,100,20),"%",sep=""))
mtext(2,text="De Novo Rate",line=2.2,cex=cex.lab)
#Add cleanup box
box()
}
#Plot DNRs vs freq for all classes
plotDNRvsFreq <- function(DNRs,bins,k=4,title=NULL,legend=T,fam.type="familie",nfams,count="variants",cex.lab=1){
#Get frequency index & x axis title
if(count=="variants"){
freq.idx <- which(colnames(dat)=="carrierFreq")
x.label <- "Carrier Frequency"
}else{
freq.idx <- which(colnames(dat)=="AF")
x.label <- "Allele Frequency"
}
#Prep plot area
par(mar=c(3.5,3.5,2.5,1))
prepFreqPlot(xlims=c(min(dat[,freq.idx],na.rm=T),1),
xlabel=x.label,cex.lab=cex.lab)
#Get midpoints for lines
mids <- log10(c(bins[1,2],(bins[-nrow(bins),1]+bins[-1,2])/2))
#Set type colors
colors <- c("gray15",svtypes$color)
lwds <- c(3,rep(2,times=nrow(svtypes)))
#Iterate over DNRs and plot per class
sapply(nrow(DNRs):1,function(i){
#Get values
vals <- as.numeric(DNRs[i,])
#Plot line & points
points(x=mids,y=vals,pch=19,cex=0.4,col=colors[i])
points(x=mids,
y=rollapply(vals,k,mean,partial=T,na.rm=T),
type="l",lwd=lwds[i],col=colors[i])
})
#Add legend
if(legend==T){
idx.for.legend <- which(apply(DNRs,1,function(vals){any(!is.na(vals))}))
legend("topright",bg="white",pch=19,cex=0.7,lwd=3,
legend=rownames(DNRs)[idx.for.legend],
col=colors[idx.for.legend])
}
#Add title & number of families
mtext(3,line=0.2,cex=0.8*cex.lab,text=paste("n=",prettyNum(nfams,big.mark=","),
" ",fam.type,"s",sep=""))
mtext(3,line=1,text=title,font=2,cex=cex.lab)
#Add cleanup box
box()
}
#Plot DNRs vs GQ for all classes
plotDNRvsGQ <- function(DNRs,bins,k=4,title=NULL,xlabel="Mininum GQ",
legend=T,fam.type="familie",nfams,count="variants",cex.lab=1){
#Get x axis title
if(count=="variants"){
x.label <- "Carrier Frequency"
}else{
x.label <- "Allele Frequency"
}
#Prep plot area
par(mar=c(3.5,3.5,2.5,1))
plot(x=range(bins),y=c(0,1),type="n",
xaxt="n",yaxt="n",xlab="",ylab="",yaxs="i")
#Add vertical gridlines
abline(v=seq(0,1000,50),col="gray92")
abline(v=seq(0,1000,100),col="gray85")
#Add axes & title
axis(1,at=seq(0,1000,100),tck=-0.03,labels=NA)
axis(1,at=seq(0,1000,100),tick=F,cex.axis=0.7*cex.lab,line=-0.4,
las=2,labels=paste(">",seq(0,1000,100),sep=""))
mtext(1,text=xlabel,line=2.25,cex=cex.lab)
axis(2,at=seq(0,1,0.2),tck=-0.025,labels=NA)
axis(2,at=seq(0,1,0.2),tick=F,line=-0.4,cex.axis=0.8,las=2,
labels=paste(seq(0,100,20),"%",sep=""))
mtext(2,text="De Novo Rate",line=2.2,cex=cex.lab)
#Set type colors
colors <- c("gray15",svtypes$color)
lwds <- c(3,rep(2,times=nrow(svtypes)))
#Iterate over DNRs and plot per class
sapply(nrow(DNRs):1,function(i){
#Get values
vals <- as.numeric(DNRs[i,])
#Plot line & points
points(x=bins,y=vals,pch=19,cex=0.4,col=colors[i])
points(x=bins,
y=rollapply(vals,k,mean,partial=T,na.rm=T),
type="l",lwd=lwds[i],col=colors[i])
})
#Add legend
if(legend==T){
idx.for.legend <- which(apply(DNRs,1,function(vals){any(!is.na(vals))}))
legend("topright",bg="white",pch=19,cex=0.7,lwd=3,
legend=rownames(DNRs)[idx.for.legend],
col=colors[idx.for.legend])
}
#Add title & number of families
mtext(3,line=0.2,cex=0.8*cex.lab,text=paste("n=",prettyNum(nfams,big.mark=","),
" ",fam.type,"s",sep=""))
mtext(3,line=1,text=title,font=2,cex=cex.lab)
#Add cleanup box
box()
}
#Generic heatmap function
plotHeatmap <- function(mat,nfams,fam.type,
x.labels=NULL,x.title=NULL,
y.labels=NULL,y.title=NULL,
title=NULL,cex.lab=1){
#Set values if NULL
if(is.null(x.labels)){
x.labels <- colnames(mat)
}
if(is.null(y.labels)){
y.labels <- rownames(mat)
}
#Prep plotting area
par(mar=c(2,4,4,2))
plot(x=c(0,ncol(mat)),y=c(0,-nrow(mat)),type="n",
xaxt="n",xaxs="i",xlab="",yaxt="n",yaxs="i",ylab="")
#Add axes
sapply(1:ncol(mat),function(i){
axis(3,at=i-0.5,tick=F,line=-0.8,las=2,labels=x.labels[i],cex.axis=0.7)
})
# mtext(1,line=2.75,text=x.title,cex=cex.lab)
axis(2,at=-(1:nrow(mat))+0.5,tick=F,line=-0.8,las=2,labels=y.labels,cex.axis=0.7)
# mtext(2,line=2.75,text=y.title,cex=cex.lab)
mtext(1,line=0,cex=0.7*cex.lab,
text=paste("Median of N=",prettyNum(nfams,big.mark=",")," ",fam.type,"s",sep=""))
mtext(3,line=2.25,text=title,font=2,cex=cex.lab)
#Plot all cells
sapply(1:nrow(mat),function(r){
sapply(1:ncol(mat),function(c){
#Get color range
col.range <- colorRampPalette(c("#FFFFFF","#FBDB69","#EF9C4B",
"#E45F30","#8B412B","#000000"))(101)
#Get & scale value
val <- mat[r,c]
pct <- round(100*val,0)
#Get color for shading
if(is.na(val)){
color <- "gray80"
label <- "N/A"
dens <- 12
}else{
color <- col.range[pct+1]
label <- paste(pct,"%",sep="")
dens <- NA
}
#Get text color
if(is.na(val)){
text.col <- "gray60"
}else{
if(val>0.5){
text.col <- "white"
}else{
text.col <- "black"
}
}
#Plot rectangle
rect(xleft=c-1,xright=c,ybottom=-r,ytop=-(r-1),
lwd=0.5,border="gray95",col=color,density=dens)
#Format cell annotation
text(x=c-0.5,y=-(r-0.5),labels=label,
cex=0.8,col=text.col)
})
})
#Clean up box
box()
}
############################
###INHERITANCE PLOT WRAPPERS
############################
#Wrapper for all standard inheritance plots
wrapperInheritancePlots <- function(fam.dat.list,fam.type,count="variants"){
#Set title prefix & suffix and freq filter index
if(count=="variants"){
title.prefix <- "SV Site "
freq.idx <- which(colnames(dat)=="carrierFreq")
freq.lab <- "CF"
}else{
title.prefix <- "SV Allele "
freq.idx <- which(colnames(dat)=="AF")
freq.lab <- "AF"
}
if(fam.type=="trio"){
title.suffix <- paste("(Trios; n=",
prettyNum(length(fam.dat.list),big.mark=","),
")",sep="")
}else{
if(fam.type=="duo"){
title.suffix <- paste("(Duos; n=",
prettyNum(length(fam.dat.list),big.mark=","),
")",sep="")
}else{
title.suffix <- paste("(Families; n=",
prettyNum(length(fam.dat.list),big.mark=","),
")",sep="")
}
}
#All variants
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_inheritance.",fam.type,"s.",count,".all_sv.pdf",sep=""),
height=3.75,width=4.5)
plotInhStats(inh.stats=computeInheritanceMulti(trio.dat.list=fam.dat.list,
VIDs=NULL),
title=paste(title.prefix,"Inheritance [All SV] ",title.suffix,sep=""),
count=count)
dev.off()
#Variants by class
sapply(svtypes$svtype,function(svtype){
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_inheritance.",fam.type,"s.",count,".",svtype,".pdf",sep=""),
height=3.75,width=4.5)
plotInhStats(inh.stats=computeInheritanceMulti(trio.dat.list=fam.dat.list,
VIDs=dat$VID[which(dat$svtype==svtype)]),
title=paste(title.prefix,"Inheritance [",svtype,"] ",title.suffix,sep=""),
count=count)
dev.off()
})
#Tiny
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_inheritance.",fam.type,"s.",count,".tiny_sv.pdf",sep=""),
height=3.75,width=4.5)
plotInhStats(inh.stats=computeInheritanceMulti(trio.dat.list=fam.dat.list,
VIDs=dat$VID[which(dat$length<=tiny.max.size)]),
title=paste(title.prefix,"Inheritance [<100bp] ",title.suffix,sep=""),
count=count)
dev.off()
#Small
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_inheritance.",fam.type,"s.",count,".small_sv.pdf",sep=""),
height=3.75,width=4.5)
plotInhStats(inh.stats=computeInheritanceMulti(trio.dat.list=fam.dat.list,
VIDs=dat$VID[which(dat$length>tiny.max.size & dat$length<=small.max.size)]),
title=paste(title.prefix,"Inheritance [100-500bp] ",title.suffix,sep=""),
count=count)
dev.off()
#Medium
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_inheritance.",fam.type,"s.",count,".medium_sv.pdf",sep=""),
height=3.75,width=4.5)
plotInhStats(inh.stats=computeInheritanceMulti(trio.dat.list=fam.dat.list,
VIDs=dat$VID[which(dat$length>small.max.size & dat$length<=medium.max.size)]),
title=paste(title.prefix,"Inheritance [500bp-2.5kb] ",title.suffix,sep=""),
count=count)
dev.off()
#Med-large
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_inheritance.",fam.type,"s.",count,".medlarge_sv.pdf",sep=""),
height=3.75,width=4.5)
plotInhStats(inh.stats=computeInheritanceMulti(trio.dat.list=fam.dat.list,
VIDs=dat$VID[which(dat$length>medium.max.size & dat$length<=medlarge.max.size)]),
title=paste(title.prefix,"Inheritance [2.5-10kb] ",title.suffix,sep=""),
count=count)
dev.off()
#Large
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_inheritance.",fam.type,"s.",count,".large_sv.pdf",sep=""),
height=3.75,width=4.5)
plotInhStats(inh.stats=computeInheritanceMulti(trio.dat.list=fam.dat.list,
VIDs=dat$VID[which(dat$length>medlarge.max.size & dat$length<=large.max.size)]),
title=paste(title.prefix,"Inheritance [10-50kb] ",title.suffix,sep=""),
count=count)
dev.off()
#Huge
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_inheritance.",fam.type,"s.",count,".huge_sv.pdf",sep=""),
height=3.75,width=4.5)
plotInhStats(inh.stats=computeInheritanceMulti(trio.dat.list=fam.dat.list,
VIDs=dat$VID[which(dat$length>large.max.size)]),
title=paste(title.prefix,"Inheritance [>50kb] ",title.suffix,sep=""),
count=count)
dev.off()
#Rare
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_inheritance.",fam.type,"s.",count,".rare_sv.pdf",sep=""),
height=3.75,width=4.5)
plotInhStats(inh.stats=computeInheritanceMulti(trio.dat.list=fam.dat.list,
VIDs=dat$VID[which(dat[,freq.idx]<=rare.max.freq)]),
title=paste(title.prefix,"Inheritance [",freq.lab,"<1%] ",title.suffix,sep=""),
count=count)
dev.off()
#Uncommon
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_inheritance.",fam.type,"s.",count,".uncommon_sv.pdf",sep=""),
height=3.75,width=4.5)
plotInhStats(inh.stats=computeInheritanceMulti(trio.dat.list=fam.dat.list,
VIDs=dat$VID[which(dat[,freq.idx]>rare.max.freq & dat[,freq.idx]<=uncommon.max.freq)]),
title=paste(title.prefix,"Inheritance [",freq.lab," 1-10%] ",title.suffix,sep=""),
count=count)
dev.off()
#Common
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_inheritance.",fam.type,"s.",count,".common_sv.pdf",sep=""),
height=3.75,width=4.5)
plotInhStats(inh.stats=computeInheritanceMulti(trio.dat.list=fam.dat.list,
VIDs=dat$VID[which(dat[,freq.idx]>uncommon.max.freq & dat[,freq.idx]<=common.max.freq)]),
title=paste(title.prefix,"Inheritance [",freq.lab," 10-50%] ",title.suffix,sep=""),
count=count)
dev.off()
#Major
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_inheritance.",fam.type,"s.",count,".major_sv.pdf",sep=""),
height=3.75,width=4.5)
plotInhStats(inh.stats=computeInheritanceMulti(trio.dat.list=fam.dat.list,
VIDs=dat$VID[which(dat[,freq.idx]>common.max.freq)]),
title=paste(title.prefix,"Inheritance [",freq.lab,">50%] ",title.suffix,sep=""),
count=count)
dev.off()
}
#Wrapper for de novo rate lineplots
wrapperDeNovoRateLines <- function(fam.dat.list,fam.type,count="variants",gq=F){
#Set title prefix
if(count=="variants"){
title.prefix <- "SV Site "
}else{
title.prefix <- "SV Allele "
}
#DNR by Size
size.dat <- deNovoRateBySize(trio.dat.list=fam.dat.list,size.bins=40,count=count)
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_de_novo_rate.",fam.type,"s.",count,".by_size.pdf",sep=""),
height=4,width=5)
plotDNRvsSize(DNRs=size.dat$DNRs,bins=size.dat$bins,k=4,nfams=length(fam.dat.list),
title=paste(title.prefix,"De Novo Rate by Size",sep=""),
fam.type=fam.type,legend=T)
dev.off()
#DNR by Freq
freq.dat <- deNovoRateByFreq(trio.dat.list=fam.dat.list,freq.bins=40,count=count)
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_de_novo_rate.",fam.type,"s.",count,".by_frequency.pdf",sep=""),
height=4,width=5)
plotDNRvsFreq(DNRs=freq.dat$DNRs,bins=freq.dat$bins,k=4,nfams=length(fam.dat.list),
title=paste(title.prefix,"De Novo Rate by Freq.",sep=""),
count=count,fam.type=fam.type,legend=T)
dev.off()
#DNR by Proband GQ
if(gq) {
GQ.dat <- deNovoRateByProGQ(trio.dat.list=fam.dat.list,GQ.bins=40,count=count)
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_de_novo_rate.",fam.type,"s.",count,".by_proband_GQ.pdf",sep=""),
height=4,width=5)
plotDNRvsGQ(DNRs=GQ.dat$DNRs,bins=GQ.dat$bins,k=4,nfams=length(fam.dat.list),
title=paste(title.prefix,"De Novo Rate by Min. Proband GQ",sep=""),
count=count,fam.type=fam.type,legend=T,xlab="Min. Proband GQ")
dev.off()
}
}
#Wrapper for de novo rate heatmaps
wrapperDeNovoRateHeats <- function(fam.dat.list,fam.type,count="variants"){
#Set title prefix
if(count=="variants"){
title.prefix <- "SV Site "
}else{
title.prefix <- "SV Allele "
}
#Gather DNR data
DNR.dat <- deNovoRateBySizeFreq(trio.dat.list=fam.dat.list,count=count,
max.sizes=c(tiny.max.size,small.max.size,medium.max.size,
medlarge.max.size,large.max.size),
size.labs=c("<100bp","100-\n500bp","500bp-\n2.5kb",
"2.5-10kb","10kb-50kb",">50kb"),
max.freqs=c(0.01,0.05,0.10,0.50),
freq.labs=c("<1%","1-5%","5-10%","10-50%",">50%"))
#Plot one heatmap for all variants
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_de_novo_rate.",fam.type,"s.",count,".size_vs_freq.all_sv.pdf",sep=""),
height=5,width=5)
plotHeatmap(mat=DNR.dat$ALL,nfams=length(fam.dat.list),fam.type=fam.type,
title=paste(title.prefix,"De Novo Rate, Size vs. Freq. [All SV]",sep=""))
dev.off()
#Plot one heatmap per variant class
sapply(svtypes$svtype,function(svtype){
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_de_novo_rate.",fam.type,"s.",count,".size_vs_freq.",svtype,".pdf",sep=""),
height=5,width=5)
plotHeatmap(mat=DNR.dat[[which(names(DNR.dat)==svtype)]],nfams=length(fam.dat.list),fam.type=fam.type,
title=paste(title.prefix,"De Novo Rate, Size vs. Freq. [",svtype,"]",sep=""))
dev.off()
})
}
#Wrapper for master summary panel
masterInhWrapper <- function(fam.dat.list,fam.type, gq=T){
#Set title suffix
if(fam.type=="trio"){
title.suffix <- paste("(Trios; n=",
prettyNum(length(fam.dat.list),big.mark=","),
")",sep="")
}else{
if(fam.type=="duo"){
title.suffix <- paste("(Duos; n=",
prettyNum(length(fam.dat.list),big.mark=","),
")",sep="")
}else{
title.suffix <- paste("(Families; n=",
prettyNum(length(fam.dat.list),big.mark=","),
")",sep="")
}
}
#Prepare plot area
width <- ifelse(gq, 12, 10)
pdf(paste(OUTDIR,"/main_plots/VCF_QC.SV_",fam.type,"_inheritance.pdf",sep=""),
height=5,width=width)
if(gq) {
layout(matrix(c(1,2,3,4,5,
6,7,8,9,10),
byrow=T,nrow=2))
} else {
layout(matrix(c(1,2,3,4,
5,6,7,8),
byrow=T,nrow=2))
}
#Set global cex.lab
cex.lab <- 0.75
###Top row: SV sites
#Master inheritance plot
plotInhStats(inh.stats=computeInheritanceMulti(trio.dat.list=fam.dat.list),
title=paste("SV Site Inheritance (n=",
prettyNum(length(fam.dat.list),big.mark=","),
" ",fam.type,"s)",sep=""),
count="variants",cex.lab=cex.lab)
#DNR vs size
size.dat.v <- deNovoRateBySize(trio.dat.list=fam.dat.list,size.bins=40,count="variants")
plotDNRvsSize(DNRs=size.dat.v$DNRs,bins=size.dat.v$bins,k=4,nfams=length(fam.dat.list),
title=paste("Site De Novo Rate by Size",sep=""),
fam.type=fam.type,legend=T,cex.lab=cex.lab)
#DNR vs frequency
freq.dat.v <- deNovoRateByFreq(trio.dat.list=fam.dat.list,freq.bins=40,count="variants")
plotDNRvsFreq(DNRs=freq.dat.v$DNRs,bins=freq.dat.v$bins,k=4,nfams=length(fam.dat.list),
title=paste("Site De Novo Rate by Freq.",sep=""),
count="variants",fam.type=fam.type,legend=F,cex.lab=cex.lab)
#DNR vs min proband GQ
if(gq) {
GQ.dat.v <- deNovoRateByProGQ(trio.dat.list=fam.dat.list,GQ.bins=40,count="variants")
plotDNRvsGQ(DNRs=GQ.dat.v$DNRs,bins=GQ.dat.v$bins,k=4,nfams=length(fam.dat.list),
title=paste("Site De Novo Rate by GQ",sep=""),
count="variants",fam.type=fam.type,legend=F,cex.lab=cex.lab,
xlab="Min. Proband GQ")
}
#DNR heatmap (size vs freq.)
DNR.dat.v <- deNovoRateBySizeFreq(trio.dat.list=fam.dat.list,count="variants",
max.sizes=c(tiny.max.size,small.max.size,medium.max.size,
medlarge.max.size,large.max.size),
size.labs=c("<100bp","100-\n500bp","500bp-\n2.5kb",
"2.5-10kb","10kb-50kb",">50kb"),
max.freqs=c(0.01,0.05,0.10,0.50),
freq.labs=c("<1%","1-5%","5-\n10%","10-\n50%",">50%"))
plotHeatmap(mat=DNR.dat.v$ALL,nfams=length(fam.dat.list),fam.type=fam.type,
title=paste("Site De Novo Rate, Size vs. Freq.",sep=""),cex.lab=cex.lab)
###Bottom row: SV alleles
#Master inheritance plot
plotInhStats(inh.stats=computeInheritanceMulti(trio.dat.list=fam.dat.list),
title=paste("SV Allele Inheritance (n=",
prettyNum(length(fam.dat.list),big.mark=","),
" ",fam.type,"s)",sep=""),
count="alleles",cex.lab=cex.lab)
#DNR vs size
size.dat.a <- deNovoRateBySize(trio.dat.list=fam.dat.list,size.bins=40,count="alleles")
plotDNRvsSize(DNRs=size.dat.a$DNRs,bins=size.dat.a$bins,k=4,nfams=length(fam.dat.list),
title=paste("Allele De Novo Rate by Size",sep=""),
fam.type=fam.type,legend=F,cex.lab=cex.lab)
#DNR vs frequency
freq.dat.a <- deNovoRateByFreq(trio.dat.list=fam.dat.list,freq.bins=40,count="alleles")
plotDNRvsFreq(DNRs=freq.dat.a$DNRs,bins=freq.dat.a$bins,k=4,nfams=length(fam.dat.list),
title=paste("Allele De Novo Rate by Freq.",sep=""),
count="alleles",fam.type=fam.type,legend=F,cex.lab=cex.lab)
#DNR vs min proband GQ
if(gq){
GQ.dat.a <- deNovoRateByProGQ(trio.dat.list=fam.dat.list,GQ.bins=40,count="alleles")
plotDNRvsGQ(DNRs=GQ.dat.a$DNRs,bins=GQ.dat.a$bins,k=4,nfams=length(fam.dat.list),
title=paste("Allele De Novo Rate by GQ",sep=""),
count="alleles",fam.type=fam.type,legend=F,cex.lab=cex.lab,
xlabel="Min. Proband GQ")
}
#DNR heatmap (size vs freq.)
DNR.dat.a <- deNovoRateBySizeFreq(trio.dat.list=fam.dat.list,count="alleles",
max.sizes=c(tiny.max.size,small.max.size,medium.max.size,
medlarge.max.size,large.max.size),
size.labs=c("<100bp","100-\n500bp","500bp-\n2.5kb",
"2.5-10kb","10kb-50kb",">50kb"),
max.freqs=c(0.01,0.05,0.10,0.50),
freq.labs=c("<1%","1-5%","5-\n10%","10-\n50%",">50%"))
plotHeatmap(mat=DNR.dat.a$ALL,nfams=length(fam.dat.list),fam.type=fam.type,
title=paste("Allele De Novo Rate, Size vs. Freq.",sep=""),cex.lab=cex.lab)
#Close device
dev.off()
}
########################
###RSCRIPT FUNCTIONALITY
########################
###Load libraries as needed
require(optparse)
require(beeswarm)
require(vioplot)
require(zoo)
###List of command-line options
option_list <- list(
make_option(c("-S", "--svtypes"), type="character", default=NULL,
help="tab-delimited file specifying SV types and HEX colors [default %default]",
metavar="character"),
make_option(c("-M", "--multiallelics"), type="logical", default=FALSE,
help="include multiallelic sites in inheritance calculations [default %default]",
metavar="logical")
)
###Get command-line arguments & options
args <- parse_args(OptionParser(usage="%prog svstats.bed famfile perSampleDir OUTDIR",
option_list=option_list),
positional_arguments=TRUE)
opts <- args$options
###Checks for appropriate positional arguments
if(length(args$args) != 4){
stop("Incorrect number of required positional arguments\n")
}
###Writes args & opts to vars
dat.in <- args$args[1]
famfile.in <- args$args[2]
perSampDir <- args$args[3]
OUTDIR <- args$args[4]
svtypes.file <- opts$svtypes
multiallelics <- opts$multiallelics
# #Dev parameters
# dat.in <- "~/scratch/xfer/gnomAD_v2_SV_MASTER_resolved_VCF.VCF_sites.stats.bed.gz"
# famfile.in <- "~/scratch/xfer/cleaned.fam"
# perSampDir <- "~/scratch/xfer/gnomAD_v2_SV_MASTER_resolved_VCF_perSample_VIDs_merged/"
# OUTDIR <- "~/scratch/famQC_plots_test/"
# # OUTDIR <- "~/scratch/VCF_plots_test/"
# svtypes.file <- "~/Desktop/Collins/Talkowski/code/sv-pipeline/ref/vcf_qc_refs/SV_colors.txt"
# multiallelics <- F
###Prepares I/O files
#Read & clean SV stats data
dat <- read.table(dat.in,comment.char="",sep="\t",header=T,check.names=F)
colnames(dat)[1] <- "chr"
#Restrict data to autosomes only, and exclude multiallelics (if optioned)
allosome.exclude.idx <- which(!(dat$chr %in% c(1:22,paste("chr",1:22,sep=""))))
multi.exclude.idx <- which(dat$other_gts>0)
cat(paste("NOTE: only autosomes considered during transmission analyses. Excluded ",
prettyNum(length(allosome.exclude.idx),big.mark=","),"/",
prettyNum(nrow(dat),big.mark=",")," (",
round(100*length(allosome.exclude.idx)/nrow(dat),1),
"%) of all variants as non-autosomal.\n",sep=""))
if(multiallelics==F){
cat(paste("NOTE: only biallelic variants considered during transmission analyses. Excluded ",
prettyNum(length(multi.exclude.idx),big.mark=","),"/",
prettyNum(nrow(dat),big.mark=",")," (",
round(100*length(multi.exclude.idx)/nrow(dat),1),
"%) of all variants as multiallelic.\n",sep=""))
all.exclude.idx <- unique(c(allosome.exclude.idx,multi.exclude.idx))
cat(paste("NOTE: excluded a nonredundant total of ",
prettyNum(length(all.exclude.idx),big.mark=","),"/",
prettyNum(nrow(dat),big.mark=",")," (",
round(100*length(all.exclude.idx)/nrow(dat),1),
"%) of all variants due to autosomal and/or multiallelic filters.\n",sep=""))
dat <- dat[-all.exclude.idx,]
}else{
dat <- dat[-allosome.exclude.idx,]
}
cat(paste("NOTE: retained ",
prettyNum(nrow(dat),big.mark=","),
" variants for transmission analyses.\n",sep=""))
#Read fam file and splits into duos and trios
fams <- read.table(famfile.in,comment.char="",header=T,check.names=F)
colnames(fams)[1] <- "FAM_ID"
# duos <- fams[grep("DUO_",fams$FAM_ID,fixed=T),]
# duos$FATHER[which(duos$FATHER==".")] <- NA
# duos$MOTHER[which(duos$MOTHER==".")] <- NA
trios <- fams[grep("TRIO_",fams$FAM_ID,fixed=T),]
#Sets sv types & colors
if(!is.null(svtypes.file)){
svtypes <- read.table(svtypes.file,sep="\t",header=F,comment.char="",check.names=F)
svtypes <- as.data.frame(apply(svtypes,2,as.character))
colnames(svtypes) <- c("svtype","color")
}else{
svtypes.v <- unique(dat$svtype)
svtypes.c <- brewer.pal(length(svtypes.v),"Dark2")
svtypes <- data.frame("svtype"=svtypes.v,
"color"=svtypes.c)
}
#Create output directory structure, if necessary
if(!dir.exists(OUTDIR)){
dir.create(OUTDIR)
}
if(!dir.exists(paste(OUTDIR,"/main_plots/",sep=""))){
dir.create(paste(OUTDIR,"/main_plots/",sep=""))
}
if(!dir.exists(paste(OUTDIR,"/supporting_plots/",sep=""))){
dir.create(paste(OUTDIR,"/supporting_plots/",sep=""))
}
if(!dir.exists(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/",sep=""))){
dir.create(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/",sep=""))
}
###Performs trio analyses, if any trios exist
if(nrow(trios)>0){
#Downsample to 100 trios if necessary
if(nrow(trios)>100){
trios <- trios[sample(1:nrow(trios),100,replace=F),]
}
#Read data
trio.dat <- apply(trios[,2:4],1,function(IDs){
IDs <- as.character(IDs)
return(getFamDat(dat=dat,proband=IDs[1],father=IDs[2],
mother=IDs[3],biallelic=!multiallelics))
})
names(trio.dat) <- trios[,1]
# if there are no GQ values in any of the trios, do not make GQ plots
gq <- any(unlist(lapply(trio.dat, function(trio){ sum(!is.na(trio$pro.GQ)) > 0 })))
#Master wrapper
masterInhWrapper(fam.dat.list=trio.dat,fam.type="trio", gq=gq)
#Standard inheritance panels
sapply(c("variants","alleles"),function(count){
wrapperInheritancePlots(fam.dat.list=trio.dat,
fam.type="trio",
count=count)
})
#De novo rate panels
sapply(c("variants","alleles"),function(count){
wrapperDeNovoRateLines(fam.dat.list=trio.dat,
fam.type="trio",
count=count,
gq=gq)
})
#De novo rate heatmaps
sapply(c("variants","alleles"),function(count){
wrapperDeNovoRateHeats(fam.dat.list=trio.dat,
fam.type="trio",
count=count)
})
}
# ###Performs duo analyses, if any duos exist
# if(nrow(duos)>0){
# #Read data
# duo.dat <- apply(duos[,2:4],1,function(IDs){
# IDs <- as.character(IDs)
# return(getFamDat(dat=dat,proband=IDs[1],father=IDs[2],
# mother=IDs[3],biallelic=!multiallelics))
# })
# names(duo.dat) <- duos[,1]
#
# #Master wrapper
# masterInhWrapper(fam.dat.list=duo.dat,fam.type="duo")
# #Standard inheritance panels
# sapply(c("variants","alleles"),function(count){
# wrapperInheritancePlots(fam.dat.list=duo.dat,
# fam.type="duo",
# count=count)
# })
# #De novo rate panels
# sapply(c("variants","alleles"),function(count){
# wrapperDeNovoRateLines(fam.dat.list=duo.dat,
# fam.type="duo",
# count=count)
# })
# #De novo rate heatmaps
# sapply(c("variants","alleles"),function(count){
# wrapperDeNovoRateHeats(fam.dat.list=duo.dat,
# fam.type="duo",
# count=count)
# })
# }
|
/src/sv-pipeline/scripts/vcf_qc/analyze_fams.R
|
no_license
|
AlesMaver/gatk-sv
|
R
| false | false | 57,869 |
r
|
#!/usr/bin/env Rscript
# Copyright (c) 2018 Talkowski Laboratory
# Contact: Ryan Collins <rlcollins@g.harvard.edu>
# Distributed under terms of the MIT license.
# Helper script to perform family-based VCF QC & plot results
###Set master parameters
options(stringsAsFactors=F,scipen=1000)
rare.max.freq <- 0.01
uncommon.max.freq <- 0.1
common.max.freq <- 0.5
major.max.freq <- 1
tiny.max.size <- 100
small.max.size <- 500
medium.max.size <- 2500
medlarge.max.size <- 10000
large.max.size <- 50000
huge.max.size <- 300000000
nocall.placeholder <- 9999
###################
###HELPER FUNCTIONS
###################
#Read & clean list of variant IDs & genotypes per sample
readDatPerSample <- function(ID,nocall.placeholder=9999){
#Set path
path <- paste(perSampDir,"/",ID,".VIDs_genotypes.txt.gz",sep="")
#Read & process data if file exists
if(file.exists(path)){
#Read data
x <- read.table(path,header=F,check.names=F)
#Convert genotypes to number of alleles
x[,2] <- sapply(x[,2],function(gt){
#Return nocall.placeholder for no-calls
if(gt=="./."){
return(nocall.placeholder)
}else{
sum(as.numeric(gsub(".","",unlist(strsplit(as.character(gt),split="/")),fixed=T)))
}
})
#Format output data
x[,2:3] <- apply(x[,2:3],2,as.numeric)
colnames(x) <- c("VID","alleles","GQ")
#Return data
return(x)
}else{
warning(paste("VID file not found for sample ",ID,sep=""))
return(NULL)
}
}
#Subset SV stats data
subsetDat <- function(dat,vlist,biallelic=T,
min.GQ.pro=0,max.GQ.pro=999,
min.GQ.par=0,max.GQ.par=999){
#Check input variant list
#Subset dat to variants found in sample & append number of alleles in sample
x <- merge(dat,vlist,by="VID",sort=F)
#Reorder columns
x <- x[,c(2:4,1,5:ncol(x))]
#Exclude non-biallelic sites, if optioned
if(biallelic==T){
x <- x[which(x$carriers>0 & x$other_gts==0),]
}
#Filter sites to specified GQ ranges
x <- x[which(is.na(x$pro.GQ) | (x$pro.GQ>=min.GQ.pro & x$pro.GQ<=max.GQ.pro)),]
# parent.GQ.range <- as.data.frame(t(apply(x[,which(colnames(x) %in% c("fa.GQ","mo.GQ")),],1,function(vals){
# if(any(!is.na(vals))){
# return(range(vals,na.rm=T))
# }else{
# return(c(NA,NA))
# }
# })))
# colnames(parent.GQ.range) <- c("min","max")
# x <- x[which((is.na(parent.GQ.range$min) | parent.GQ.range$min>=min.GQ.par) &
# (is.na(parent.GQ.range$max) | parent.GQ.range$max<=max.GQ.par)),]
#Return result
return(x)
}
#Gather matrix of SVs in any member of a family with information on allele counts in child & parent(s)
getFamDat <- function(dat,proband,father=NA,mother=NA,biallelic=T,nocall.placeholder=9999){
#Clean parent IDs
if(is.na(father)){
father <- NULL
}
if(is.na(mother)){
mother <- NULL
}
#Read VID lists for family members
VID.lists <- lapply(c(proband,father,mother),readDatPerSample)
names(VID.lists) <- c(proband,father,mother)
#Get master VID list of all family members
if(!is.null(father)){
vlist <- merge(VID.lists[[which(names(VID.lists)==proband)]],
VID.lists[[which(names(VID.lists)==father)]],
sort=F,by="VID",all=T,suffixes=c(".pro",".fa"))
if(!is.null(mother)){
vlist <- merge(vlist,
VID.lists[[which(names(VID.lists)==mother)]],
sort=F,by="VID",all=T)
colnames(vlist)[6:7] <- c("alleles.mo","GQ.mo")
}else{
vlist$alleles.mo <- NA
vlist$GQ.mo <- NA
}
}else{
vlist <- VID.lists[[which(names(VID.lists)==proband)]]
colnames(vlist[which(colnames(vlist)=="alleles")]) <- "alleles.pro"
colnames(vlist[which(colnames(vlist)=="GQ")]) <- "GQ.pro"
vlist$alleles.fa <- 0
vlist$GQ.fa <- NA
vlist <- merge(vlist,
VID.lists[[which(names(VID.lists)==mother)]],
sort=F,by="VID",all=T,suffixes=c(".pro",".mo"))
}
#Only retain sites where all three samples are not null genotype (no-call, ./., nocall.placeholder)
exclude <- which(sapply(vlist[,c(2,4,6)],
function(vals){
any(as.numeric(vals)==nocall.placeholder)
}))
if(length(exclude) > 0){
vlist <- vlist[-exclude,]
}
#Convert remaining NA allele counts to 0s
vlist[,c(2,4,6)] <- apply(vlist[,c(2,4,6)],2,function(vals){
vals[which(is.na(vals))] <- 0
return(vals)
})
#Add transmission information to vlist
trans <- t(apply(vlist[,c(2,4,6)],1,function(alleles){
#Convert allele counts to numeric
sapply(alleles,function(vals){
vals[which(is.na(vals))] <- 0
})
#Get allele counts
pro <- as.numeric(alleles[1])
fa <- as.numeric(alleles[2])
mo <- as.numeric(alleles[3])
#Infer child inheritance status
pro.denovo <- max(c(pro-(fa+mo),0))
pro.inherited <- pro-pro.denovo
#Divide credit for inherited variants between parents based on ratio of parent allele counts
parental.alleles <- sum(c(fa,mo),na.rm=T)
if(fa>0){
p.fa <- fa/parental.alleles
}else{
p.fa <- 0
}
if(mo>0){
p.mo <- mo/parental.alleles
}else{
p.mo <- 0
}
fa.transmitted <- pro.inherited*p.fa
fa.untransmitted <- max(c(0,fa-fa.transmitted))
mo.transmitted <- pro.inherited*p.mo
mo.untransmitted <- max(c(0,mo-mo.transmitted))
#Return vector of relevant transmission allele counts
return(c(pro,pro.inherited,pro.denovo,
fa,fa.transmitted,fa.untransmitted,
mo,mo.transmitted,mo.untransmitted))
}))
trans <- as.data.frame(cbind(vlist[,1],trans,vlist[,c(3,5,7)]))
colnames(trans) <- c("VID","pro.all","pro.inherited","pro.denovo",
"fa.all","fa.trans","fa.untrans",
"mo.all","mo.trans","mo.untrans",
"pro.GQ","fa.GQ","mo.GQ")
#Subset data & add transmission data
dat.fam <- subsetDat(dat=dat,vlist=trans,biallelic=biallelic)
dat.fam[,(ncol(dat.fam)-9):ncol(dat.fam)] <- apply(dat.fam[,(ncol(dat.fam)-9):ncol(dat.fam)],2,as.numeric)
return(dat.fam)
}
#Compute inheritance stats from a dat.fam dataframe
computeInheritance <- function(dat.fam,VIDs=NULL){
#Clean dat.fam
dat.fam <- as.data.frame(dat.fam)
#Subset data frame based on list of VIDs
if(!is.null(VIDs)){
dat.fam <- dat.fam[which(as.character(dat.fam$VID) %in% as.character(VIDs)),]
}
#Compute allele-based inheritance rates
pro.a.all <- sum(dat.fam$pro.all)
pro.a.denovo <- sum(dat.fam$pro.denovo)
pro.a.denovorate <- pro.a.denovo/pro.a.all
pro.a.inh <- pro.a.all-pro.a.denovo
pro.a.inhrate <- pro.a.inh/pro.a.all
fa.a.all <- sum(dat.fam$fa.all)
fa.a.trans <- sum(dat.fam$fa.trans)
fa.a.transrate <- fa.a.trans/fa.a.all
fa.a.untrans <- fa.a.all-fa.a.trans
fa.a.untransrate <- fa.a.untrans/fa.a.all
pro.a.patfrac <- fa.a.trans/pro.a.inh
mo.a.all <- sum(dat.fam$mo.all)
mo.a.trans <- sum(dat.fam$mo.trans)
mo.a.transrate <- mo.a.trans/mo.a.all
mo.a.untrans <- mo.a.all-mo.a.trans
mo.a.untransrate <- mo.a.untrans/mo.a.all
pro.a.matfrac <- mo.a.trans/pro.a.inh
pro.a.patmatratio <- pro.a.patfrac/(pro.a.patfrac+pro.a.matfrac)
#Compute variant-based inheritance rates
pro.v.all <- length(which(dat.fam$pro.all>0))
pro.v.denovo <- length(which(dat.fam$pro.all>0 & dat.fam$pro.inherited==0))
pro.v.denovorate <- pro.v.denovo/pro.v.all
pro.v.inh <- pro.v.all-pro.v.denovo
pro.v.inhrate <- pro.v.inh/pro.v.all
fa.v.all <- length(which(dat.fam$fa.all>0))
fa.v.trans <- length(which(dat.fam$pro.all>0 & dat.fam$fa.all>0))
fa.v.transrate <- fa.v.trans/fa.v.all
fa.v.untrans <- fa.v.all-fa.v.trans
fa.v.untransrate <- fa.v.untrans/fa.v.all
pro.v.patfrac <- fa.v.trans/pro.v.inh
mo.v.all <- length(which(dat.fam$mo.all>0))
mo.v.trans <- length(which(dat.fam$pro.all>0 & dat.fam$mo.all>0))
mo.v.transrate <- mo.v.trans/mo.v.all
mo.v.untrans <- mo.v.all-mo.v.trans
mo.v.untransrate <- mo.v.untrans/mo.v.all
pro.v.matfrac <- mo.v.trans/pro.v.inh
pro.v.patmatratio <- pro.a.patfrac/(pro.a.patfrac+pro.a.matfrac)
#Format & return vector of rates
return(c("pro.allele.all"=pro.a.all,
"pro.allele.inh"=pro.a.inh,
"pro.allele.inhrate"=pro.a.inhrate,
"pro.allele.patfrac"=pro.a.patfrac,
"pro.allele.matfrac"=pro.a.matfrac,
"pro.allele.patmatratio"=pro.a.patmatratio,
"pro.allele.denovo"=pro.a.denovo,
"pro.allele.denovorate"=pro.a.denovorate,
"fa.allele.all"=fa.a.all,
"fa.allele.trans"=fa.a.trans,
"fa.allele.transrate"=fa.a.transrate,
"fa.allele.untrans"=fa.a.untrans,
"fa.allele.untransrate"=fa.a.untransrate,
"mo.allele.all"=mo.a.all,
"mo.allele.trans"=mo.a.trans,
"mo.allele.transrate"=mo.a.transrate,
"mo.allele.untrans"=mo.a.untrans,
"mo.allele.untransrate"=mo.a.untransrate,
"pro.site.all"=pro.v.all,
"pro.site.inh"=pro.v.inh,
"pro.site.inhrate"=pro.v.inhrate,
"pro.site.patfrac"=pro.v.patfrac,
"pro.site.matfrac"=pro.v.matfrac,
"pro.site.patmatratio"=pro.v.patmatratio,
"pro.site.denovo"=pro.v.denovo,
"pro.site.denovorate"=pro.v.denovorate,
"fa.site.all"=fa.v.all,
"fa.site.trans"=fa.v.trans,
"fa.site.transrate"=fa.v.transrate,
"fa.site.untrans"=fa.v.untrans,
"fa.site.untransrate"=fa.v.untransrate,
"mo.site.all"=mo.v.all,
"mo.site.trans"=mo.v.trans,
"mo.site.transrate"=mo.v.transrate,
"mo.site.untrans"=mo.v.untrans,
"mo.site.untransrate"=mo.v.untransrate))
}
#Compute inheritance for a list of trios and return as a data frame
computeInheritanceMulti <- function(trio.dat.list,VIDs=NULL){
#Iterate over trios and compute inheritance
res <- as.data.frame(t(sapply(trio.dat.list,computeInheritance,VIDs=VIDs)))
return(res)
}
#Collect de novo rate per SV class
deNovoRateByClass <- function(trio.dat.list,VIDs=NULL){
#Collect median DNR across all classes
all.dat <- computeInheritanceMulti(trio.dat.list=trio.dat.list,VIDs=VIDs)
all.dnrs <- c(median(all.dat$pro.site.denovorate,na.rm=T),
median(all.dat$pro.allele.denovorate,na.rm=T))
#Iterate over classes and return DNRs
res <- sapply(svtypes$svtype,function(svtype){
if(is.null(VIDs)){
VIDs <- dat$VID
}
sub.dat <- computeInheritanceMulti(trio.dat.list=trio.dat.list,
VIDs=dat$VID[which(dat$VID %in% VIDs & dat$svtype==svtype)])
sub.dnrs <- c(median(sub.dat$pro.site.denovorate,na.rm=T),
median(sub.dat$pro.allele.denovorate,na.rm=T))
})
#Format & return results
res <- as.data.frame(cbind(all.dnrs,res))
colnames(res) <- c("ALL",svtypes$svtype)
rownames(res) <- c("variants","alleles")
return(res)
}
#Collect matrix of de novo rates by class by freq
deNovoRateByFreq <- function(trio.dat.list,freq.bins=40,count="variants"){
#Get frequency index
if(count=="variants"){
freq.idx <- which(colnames(dat)=="carrierFreq")
}else{
freq.idx <- which(colnames(dat)=="AF")
}
#Create evenly spaced freq bins on log10-scale
logfreq.min <- log10(min(dat[,freq.idx]))
logfreq.max <- log10(1)
logfreq.steps <- seq(logfreq.min,logfreq.max,by=(logfreq.max-logfreq.min)/(freq.bins-1))
freq.df <- data.frame("min.freq"=c(0,10^logfreq.steps[-length(logfreq.steps)]),
"max.freq"=10^logfreq.steps)
rownames(freq.df) <- paste(round(100*freq.df[,1],4),"-",
round(100*freq.df[,2],4),"%",sep="")
#Iterate over frequency bins and gather de novo rates
DNRs <- apply(freq.df,1,function(bounds){
dnrs <- deNovoRateByClass(trio.dat.list=trio.dat.list,
VIDs=dat$VID[which(dat[,freq.idx]>bounds[1] & dat[,freq.idx]<=bounds[2])])
return(as.numeric(dnrs[which(rownames(dnrs)==count),]))
})
#Format & return DNRs & freq.df
DNRs <- as.data.frame(DNRs)
DNRs <- apply(DNRs,2,as.numeric)
rownames(DNRs) <- c("ALL",svtypes$svtype)
return(list("DNRs"=DNRs,"bins"=freq.df))
}
#Collect matrix of de novo rates by class by size
deNovoRateBySize <- function(trio.dat.list,size.bins=40,count="variants"){
#Create evenly spaced size bins on log10-scale
logsize.min <- log10(50)
logsize.max <- log10(1000000)
logsize.steps <- seq(logsize.min,logsize.max,by=(logsize.max-logsize.min)/(size.bins-2))
size.df <- data.frame("min.size"=c(0,10^logsize.steps),
"max.size"=c(10^logsize.steps,300000000))
rownames(size.df) <- paste("10^",round(log10(size.df[,1]),1),
"-",round(log10(size.df[,2]),1),sep="")
#Iterate over sizeuency bins and gather de novo rates
DNRs <- apply(size.df,1,function(bounds){
dnrs <- deNovoRateByClass(trio.dat.list=trio.dat.list,
VIDs=dat$VID[which(dat$length>bounds[1] & dat$length<=bounds[2])])
return(as.numeric(dnrs[which(rownames(dnrs)==count),]))
})
#Format & return DNRs & size.df
DNRs <- as.data.frame(DNRs)
DNRs <- apply(DNRs,2,as.numeric)
rownames(DNRs) <- c("ALL",svtypes$svtype)
return(list("DNRs"=DNRs,"bins"=size.df))
}
#Collect matrix of de novo rates by size & freq combination
deNovoRateBySizeFreq <- function(trio.dat.list,VIDs=NULL,count="variants",
max.sizes,size.labs,max.freqs,freq.labs){
#Get frequency index
if(count=="variants"){
freq.idx <- which(colnames(dat)=="carrierFreq")
}else{
freq.idx <- which(colnames(dat)=="AF")
}
#Create size & freq bins
size.df <- data.frame("min.size"=c(0,0,max.sizes),
"max.size"=c(300000000,max.sizes,300000000))
rownames(size.df) <- c("ALL",size.labs)
freq.df <- data.frame("min.freq"=c(0,0,max.freqs),
"max.freq"=c(1,max.freqs,1))
rownames(freq.df) <- c("ALL",freq.labs)
#Instantiate VIDs if necessary
if(is.null(VIDs)){
VIDs <- dat$VID
}
#Iterate over size bins & create DNR df for all SV
DNRs <- as.data.frame(t(sapply(1:nrow(size.df),function(s){
#Iterate over frequency bins
sapply(1:nrow(freq.df),function(f){
#Get de novo rate
DNR <- deNovoRateByClass(trio.dat.list,
VIDs=dat$VID[which(dat$VID %in% VIDs &
dat$length>size.df[s,1] & dat$length<=size.df[s,2] &
dat[,freq.idx]>freq.df[f,1] & dat[,freq.idx]<=freq.df[f,2])])
return(DNR$ALL[which(rownames(DNR)==count)])
})
})))
colnames(DNRs) <- rownames(freq.df)
rownames(DNRs) <- rownames(size.df)
#Iterate over SV classes and create DNR df for each class
DNRs.byClass <- lapply(svtypes$svtype,function(svtype){
DNRs <- as.data.frame(t(sapply(1:nrow(size.df),function(s){
#Iterate over frequency bins
sapply(1:nrow(freq.df),function(f){
#Get de novo rate
DNR <- deNovoRateByClass(trio.dat.list,
VIDs=dat$VID[which(dat$VID %in% VIDs & dat$svtype==svtype &
dat$length>size.df[s,1] & dat$length<=size.df[s,2] &
dat[,freq.idx]>freq.df[f,1] & dat[,freq.idx]<=freq.df[f,2])])
return(DNR$ALL[which(rownames(DNR)==count)])
})
})))
colnames(DNRs) <- rownames(freq.df)
rownames(DNRs) <- rownames(size.df)
return(DNRs)
})
names(DNRs.byClass) <- svtypes$svtype
#Combine all DNR dfs & return
DNRs.all <- c(list(DNRs),DNRs.byClass)
names(DNRs.all)[1] <- "ALL"
return(DNRs.all)
}
#Collect matrix of de novo rates by class by size
deNovoRateBySize <- function(trio.dat.list,size.bins=40,count="variants"){
#Create evenly spaced size bins on log10-scale
logsize.min <- log10(50)
logsize.max <- log10(1000000)
logsize.steps <- seq(logsize.min,logsize.max,by=(logsize.max-logsize.min)/(size.bins-2))
size.df <- data.frame("min.size"=c(0,10^logsize.steps),
"max.size"=c(10^logsize.steps,300000000))
rownames(size.df) <- paste("10^",round(log10(size.df[,1]),1),
"-",round(log10(size.df[,2]),1),sep="")
#Iterate over sizeuency bins and gather de novo rates
DNRs <- apply(size.df,1,function(bounds){
dnrs <- deNovoRateByClass(trio.dat.list=trio.dat.list,
VIDs=dat$VID[which(dat$length>bounds[1] & dat$length<=bounds[2])])
return(as.numeric(dnrs[which(rownames(dnrs)==count),]))
})
#Format & return DNRs & size.df
DNRs <- as.data.frame(DNRs)
DNRs <- apply(DNRs,2,as.numeric)
rownames(DNRs) <- c("ALL",svtypes$svtype)
return(list("DNRs"=DNRs,"bins"=size.df))
}
#Collect matrix of de novo rates by class by minimum proband GQ
deNovoRateByProGQ <- function(trio.dat.list,GQ.bins=40,count="variants"){
#Create evenly spaced GQ bins
GQ.steps <- seq(0,1000,by=1000/GQ.bins)
#Iterate over min GQs and gather de novo rates
DNRs <- sapply(GQ.steps,function(min.GQ){
tdl.tmp <- lapply(trio.dat.list,function(df){
return(df[which(df$pro.GQ>=min.GQ),])
})
dnrs <- deNovoRateByClass(trio.dat.list=tdl.tmp)
return(as.numeric(dnrs[which(rownames(dnrs)==count),]))
})
#Format & return DNRs & GQ.df
DNRs <- as.data.frame(DNRs)
DNRs <- apply(DNRs,2,as.numeric)
rownames(DNRs) <- c("ALL",svtypes$svtype)
colnames(DNRs) <- paste("gt",GQ.steps,sep="")
return(list("DNRs"=DNRs,"bins"=GQ.steps))
}
############################
###PLOTTING HELPER FUNCTIONS
############################
#Generate main inheritance plot
plotInhStats <- function(inh.stats,count="variants",title=NULL,cex.lab=1){
#Subset inh.stats to relevant columns
if(count=="variants"){
plot.df <- data.frame(inh.stats$pro.site.inhrate,
inh.stats$pro.site.patfrac,
inh.stats$pro.site.matfrac,
inh.stats$pro.site.patmatratio,
inh.stats$pro.site.denovorate,
"total.site.transrate"=(inh.stats$fa.site.trans+inh.stats$mo.site.trans)/(inh.stats$fa.site.all+inh.stats$mo.site.all),
inh.stats$fa.site.transrate,
inh.stats$mo.site.transrate)
}else{
plot.df <- data.frame(inh.stats$pro.allele.inhrate,
inh.stats$pro.allele.patfrac,
inh.stats$pro.allele.matfrac,
inh.stats$pro.allele.patmatratio,
inh.stats$pro.allele.denovorate,
"total.allele.transrate"=(inh.stats$fa.allele.trans+inh.stats$mo.allele.trans)/(inh.stats$fa.allele.all+inh.stats$mo.allele.all),
inh.stats$fa.allele.transrate,
inh.stats$mo.allele.transrate)
}
#Create vector of median fractions as representative for each category
if(count=="variants"){
median.counts <- c(paste(prettyNum(round(median(inh.stats$pro.site.inh),0),big.mark=",")," / ",
prettyNum(round(median(inh.stats$pro.site.all),0),big.mark=","),sep=""),
paste(prettyNum(round(median(inh.stats$fa.site.trans),0),big.mark=",")," / ",
prettyNum(round(median(inh.stats$pro.site.inh),0),big.mark=","),sep=""),
paste(prettyNum(round(median(inh.stats$mo.site.trans),0),big.mark=",")," / ",
prettyNum(round(median(inh.stats$pro.site.inh),0),big.mark=","),sep=""),
paste(round(100*median(inh.stats$pro.site.patfrac),0),"% : ",
round(100*median(inh.stats$pro.site.matfrac),0),"%",sep=""),
paste(prettyNum(round(median(inh.stats$pro.site.denovo),0),big.mark=",")," / ",
prettyNum(round(median(inh.stats$pro.site.all),0),big.mark=","),sep=""),
paste(prettyNum(round(median(inh.stats$fa.site.trans+inh.stats$mo.site.trans),0),big.mark=",")," / ",
prettyNum(round(median(inh.stats$fa.site.all+inh.stats$mo.site.all),0),big.mark=","),sep=""),
paste(prettyNum(round(median(inh.stats$fa.site.trans),0),big.mark=",")," / ",
prettyNum(round(median(inh.stats$fa.site.all),0),big.mark=","),sep=""),
paste(prettyNum(round(median(inh.stats$mo.site.trans),0),big.mark=",")," / ",
prettyNum(round(median(inh.stats$mo.site.all),0),big.mark=","),sep=""))
}else{
median.counts <- c(paste(prettyNum(round(median(inh.stats$pro.allele.inh),0),big.mark=",")," / ",
prettyNum(round(median(inh.stats$pro.allele.all),0),big.mark=","),sep=""),
paste(prettyNum(round(median(inh.stats$fa.allele.trans),0),big.mark=",")," / ",
prettyNum(round(median(inh.stats$pro.allele.inh),0),big.mark=","),sep=""),
paste(prettyNum(round(median(inh.stats$mo.allele.trans),0),big.mark=",")," / ",
prettyNum(round(median(inh.stats$pro.allele.inh),0),big.mark=","),sep=""),
paste(round(100*median(inh.stats$pro.allele.patfrac),0),"% : ",
round(100*median(inh.stats$pro.allele.matfrac),0),"%",sep=""),
paste(prettyNum(round(median(inh.stats$pro.allele.denovo),0),big.mark=",")," / ",
prettyNum(round(median(inh.stats$pro.allele.all),0),big.mark=","),sep=""),
paste(prettyNum(round(median(inh.stats$fa.allele.trans+inh.stats$mo.allele.trans),0),big.mark=",")," / ",
prettyNum(round(median(inh.stats$fa.allele.all+inh.stats$mo.allele.all),0),big.mark=","),sep=""),
paste(prettyNum(round(median(inh.stats$fa.allele.trans),0),big.mark=",")," / ",
prettyNum(round(median(inh.stats$fa.allele.all),0),big.mark=","),sep=""),
paste(prettyNum(round(median(inh.stats$mo.allele.trans),0),big.mark=",")," / ",
prettyNum(round(median(inh.stats$mo.allele.all),0),big.mark=","),sep=""))
}
#Set plot colors
col.pat <- "#00B0CF"
col.mat <- "#F064A5"
col.dn <- "#F13D15"
col.other <- "gray35"
plot.cols <- c(col.other,col.pat,col.mat,col.other,
col.dn,col.other,col.pat,col.mat)
lab.cols <- plot.cols
lab.cols[which(lab.cols==col.other)] <- "black"
lab.fonts <- c(2,3,3,3,2,2,3,3)
#Prep plot area
par(mar=c(1,6.5,3,4.5))
plot(x=c(-0.05,1.05),y=c(0,-8.25),type="n",
xaxt="n",yaxt="n",xlab="",ylab="",yaxs="i")
#Dress up plot
abline(v=seq(0,1,0.2),lty=2,col="gray85")
# abline(h=c(0,-9),lwd=2,col="gray80")
abline(v=c(0,1),col="gray75")
# text(x=0.5,y=-0.2,pos=3,labels="SV Sites",font=4,cex=0.9)
# text(x=0.5,y=-9.2,pos=3,labels="SV Alleles",font=4,cex=0.9)
#Add category axes
cat.labels <- c("Proband\nInheritance Rate","Inh. Rate \n(Paternal) ","Inh. Rate \n(Maternal) ",
"Pat:Mat Ratio ","Proband\nDe Novo Rate","Parental\nTransmission Rate",
"Trans. Rate \n(Paternal) ","Trans. Rate \n(Maternal) ")
sapply(1:8,function(i){
axis(2,at=-i+0.3,line=-0.8,tick=F,las=2,cex.axis=0.7,font=lab.fonts[i],
labels=cat.labels[i],col.axis=lab.cols[i])
})
# sapply(1:8,function(i){
# axis(2,at=-i-8.7,line=-0.8,tick=F,las=2,cex.axis=0.7,font=lab.fonts[i],
# labels=cat.labels[i],col.axis=lab.cols[i])
# })
axis(2,at=c(-1,-5,-6,-9,-10,-14,-15,-18)+0.75,labels=NA,tck=-0.1,col="gray80")
#Add other axes & title
# axis(2,at=0.3,tick=F,las=2,line=-0.5,cex.axis=0.8,font=2,
# label=paste("n=",prettyNum(nrow(plot.df),big.mark=","),
# " ",fam.type,"s",sep=""))
axis(3,at=seq(0,1,0.2),labels=NA)
axis(3,at=seq(0,1,0.2),tick=F,line=-0.4,cex.axis=0.7,
labels=paste(seq(0,100,20),"%",sep=""))
mtext(3,line=1.5,text=title,font=2,cex=cex.lab)
axis(4,at=-0.1,tick=F,las=2,line=-0.5,cex.axis=0.8,font=2,
label="Median")
#Plot points & category info
sapply(1:ncol(plot.df),function(i){
if(any(!is.na(plot.df[,i]))){
#Add shading rect
rect(xleft=par("usr")[1],xright=par("usr")[2],
ybottom=-i+0.05,ytop=-i+0.45,bty="n",border=NA,
col=adjustcolor(plot.cols[i],alpha=0.15))
#Add points
beeswarm(plot.df[,i],add=T,horizontal=T,at=-i+0.25,
pch=21,cex=0.5,bg=plot.cols[i],pt.lwd=0.1,
corral="wrap",corralWidth=0.2)
#Add thick line & label for mean
cat.mean <- mean(plot.df[,i],na.rm=T)
segments(x0=cat.mean,x1=cat.mean,y0=-i,y1=-i+0.5,
lend="round",lwd=4,col=plot.cols[i])
text(x=cat.mean,y=-i+0.35,pos=3,cex=0.6,
labels=paste(round(100*cat.mean,1),"%",sep=""))
#Add median to right margin
axis(4,at=-i+0.3,line=-0.8,las=2,tick=F,cex.axis=0.6,
col.axis=lab.cols[i],labels=median.counts[i])
}
})
#Add clean-up box
box()
}
#Generate size distribution frame with log10 scaling
prepSizePlot <- function(xlims=c(50,1000000),cex.lab=1){
#Prep plot area
plot(x=log10(xlims),y=c(0,1),type="n",
xaxt="n",yaxt="n",xlab="",ylab="",yaxs="i")
#Add vertical gridlines
logscale.all <- log10(as.numeric(sapply(0:8,function(i){(1:9)*10^i})))
logscale.minor <- log10(as.numeric(sapply(0:8,function(i){c(5,10)*10^i})))
logscale.minor.labs <- as.character(sapply(c("bp","kb","Mb"),function(suf){paste(c(1,5,10,50,100,500),suf,sep="")}))
logscale.minor.labs <- c(logscale.minor.labs[-1],"1Gb")
logscale.major <- log10(as.numeric(10^(0:8)))
abline(v=logscale.all,col="gray97")
abline(v=logscale.minor,col="gray92")
abline(v=logscale.major,col="gray85")
#Add axes, title, and Alu/SVA/L1 ticks
axis(1,at=logscale.all,tck=-0.015,col="gray50",labels=NA)
axis(1,at=logscale.minor,tck=-0.0225,col="gray20",labels=NA)
axis(1,at=logscale.major,tck=-0.03,labels=NA)
axis(1,at=logscale.minor,tick=F,cex.axis=0.8,line=-0.3,las=2,
labels=logscale.minor.labs)
mtext(1,text="Size",line=2.25,cex=cex.lab)
axis(2,at=seq(0,1,0.2),tck=-0.025,labels=NA)
axis(2,at=seq(0,1,0.2),tick=F,line=-0.4,cex.axis=0.8,las=2,
labels=paste(seq(0,100,20),"%",sep=""))
mtext(2,text="De Novo Rate",line=2.2,cex=cex.lab)
#Add cleanup box
box()
}
#Plot DNRs vs size for all classes
plotDNRvsSize <- function(DNRs,bins,k=4,title=NULL,legend=T,fam.type="families",nfams,cex.lab=1){
#Prep plot area
par(mar=c(3.5,3.5,2.5,1))
prepSizePlot(cex.lab=cex.lab)
#Get midpoints for lines
mids <- log10(c(bins[1,2],
(bins[-c(1,nrow(bins)),1]+bins[-c(1,nrow(bins)),2])/2,
bins[nrow(bins),1]))
#Set type colors
colors <- c("gray15",svtypes$color)
lwds <- c(3,rep(2,times=nrow(svtypes)))
#Iterate over DNRs and plot per class
sapply(nrow(DNRs):1,function(i){
#Get values
vals <- as.numeric(DNRs[i,])
#Plot line & points
points(x=mids,y=vals,pch=19,cex=0.4,col=colors[i])
points(x=mids,
y=rollapply(vals,k,mean,partial=T,na.rm=T),
type="l",lwd=lwds[i],col=colors[i])
})
#Add legend
if(legend==T){
idx.for.legend <- which(apply(DNRs,1,function(vals){any(!is.na(vals))}))
legend("topright",bg="white",pch=19,cex=0.8*cex.lab,lwd=2,
legend=rownames(DNRs)[idx.for.legend],
col=colors[idx.for.legend])
}
#Add title & number of families
mtext(3,line=0.2,cex=0.8*cex.lab,text=paste("n=",prettyNum(nfams,big.mark=","),
" ",fam.type,"s",sep=""))
mtext(3,line=1,text=title,font=2,cex=cex.lab)
#Add cleanup box
box()
}
#Generate freq distribution frame with log10 scaling
prepFreqPlot <- function(xlims=c(1/10000,1),xlabel="Frequency",cex.lab=1){
#Prep plot area
plot(x=floor(log10(xlims)),y=c(0,1),type="n",
xaxt="n",yaxt="n",xlab="",ylab="",yaxs="i")
#Add vertical gridlines
logscale.all <- log10(as.numeric(sapply(0:8,function(i){(1:9)/10^i})))
logscale.minor <- log10(as.numeric(sapply(0:8,function(i){c(5,10)/10^i})))
logscale.major <- log10(as.numeric(1/10^(0:8)))
abline(v=logscale.all,col="gray97")
abline(v=logscale.minor,col="gray92")
abline(v=logscale.major,col="gray85")
#Add axes, title, and Alu/SVA/L1 ticks
axis(1,at=logscale.all,tck=-0.015,col="gray50",labels=NA)
axis(1,at=logscale.minor,tck=-0.0225,col="gray20",labels=NA)
axis(1,at=logscale.major,tck=-0.03,labels=NA)
for(i in -8:0){
axis(1,at=i,tick=F,cex.axis=0.8,line=-0.2,
labels=bquote(10^{.(i)}))
}
mtext(1,text=xlabel,line=2.25,cex=cex.lab)
axis(2,at=seq(0,1,0.2),tck=-0.025,labels=NA)
axis(2,at=seq(0,1,0.2),tick=F,line=-0.4,cex.axis=0.8,las=2,
labels=paste(seq(0,100,20),"%",sep=""))
mtext(2,text="De Novo Rate",line=2.2,cex=cex.lab)
#Add cleanup box
box()
}
#Plot DNRs vs freq for all classes
plotDNRvsFreq <- function(DNRs,bins,k=4,title=NULL,legend=T,fam.type="familie",nfams,count="variants",cex.lab=1){
#Get frequency index & x axis title
if(count=="variants"){
freq.idx <- which(colnames(dat)=="carrierFreq")
x.label <- "Carrier Frequency"
}else{
freq.idx <- which(colnames(dat)=="AF")
x.label <- "Allele Frequency"
}
#Prep plot area
par(mar=c(3.5,3.5,2.5,1))
prepFreqPlot(xlims=c(min(dat[,freq.idx],na.rm=T),1),
xlabel=x.label,cex.lab=cex.lab)
#Get midpoints for lines
mids <- log10(c(bins[1,2],(bins[-nrow(bins),1]+bins[-1,2])/2))
#Set type colors
colors <- c("gray15",svtypes$color)
lwds <- c(3,rep(2,times=nrow(svtypes)))
#Iterate over DNRs and plot per class
sapply(nrow(DNRs):1,function(i){
#Get values
vals <- as.numeric(DNRs[i,])
#Plot line & points
points(x=mids,y=vals,pch=19,cex=0.4,col=colors[i])
points(x=mids,
y=rollapply(vals,k,mean,partial=T,na.rm=T),
type="l",lwd=lwds[i],col=colors[i])
})
#Add legend
if(legend==T){
idx.for.legend <- which(apply(DNRs,1,function(vals){any(!is.na(vals))}))
legend("topright",bg="white",pch=19,cex=0.7,lwd=3,
legend=rownames(DNRs)[idx.for.legend],
col=colors[idx.for.legend])
}
#Add title & number of families
mtext(3,line=0.2,cex=0.8*cex.lab,text=paste("n=",prettyNum(nfams,big.mark=","),
" ",fam.type,"s",sep=""))
mtext(3,line=1,text=title,font=2,cex=cex.lab)
#Add cleanup box
box()
}
#Plot DNRs vs GQ for all classes
plotDNRvsGQ <- function(DNRs,bins,k=4,title=NULL,xlabel="Mininum GQ",
legend=T,fam.type="familie",nfams,count="variants",cex.lab=1){
#Get x axis title
if(count=="variants"){
x.label <- "Carrier Frequency"
}else{
x.label <- "Allele Frequency"
}
#Prep plot area
par(mar=c(3.5,3.5,2.5,1))
plot(x=range(bins),y=c(0,1),type="n",
xaxt="n",yaxt="n",xlab="",ylab="",yaxs="i")
#Add vertical gridlines
abline(v=seq(0,1000,50),col="gray92")
abline(v=seq(0,1000,100),col="gray85")
#Add axes & title
axis(1,at=seq(0,1000,100),tck=-0.03,labels=NA)
axis(1,at=seq(0,1000,100),tick=F,cex.axis=0.7*cex.lab,line=-0.4,
las=2,labels=paste(">",seq(0,1000,100),sep=""))
mtext(1,text=xlabel,line=2.25,cex=cex.lab)
axis(2,at=seq(0,1,0.2),tck=-0.025,labels=NA)
axis(2,at=seq(0,1,0.2),tick=F,line=-0.4,cex.axis=0.8,las=2,
labels=paste(seq(0,100,20),"%",sep=""))
mtext(2,text="De Novo Rate",line=2.2,cex=cex.lab)
#Set type colors
colors <- c("gray15",svtypes$color)
lwds <- c(3,rep(2,times=nrow(svtypes)))
#Iterate over DNRs and plot per class
sapply(nrow(DNRs):1,function(i){
#Get values
vals <- as.numeric(DNRs[i,])
#Plot line & points
points(x=bins,y=vals,pch=19,cex=0.4,col=colors[i])
points(x=bins,
y=rollapply(vals,k,mean,partial=T,na.rm=T),
type="l",lwd=lwds[i],col=colors[i])
})
#Add legend
if(legend==T){
idx.for.legend <- which(apply(DNRs,1,function(vals){any(!is.na(vals))}))
legend("topright",bg="white",pch=19,cex=0.7,lwd=3,
legend=rownames(DNRs)[idx.for.legend],
col=colors[idx.for.legend])
}
#Add title & number of families
mtext(3,line=0.2,cex=0.8*cex.lab,text=paste("n=",prettyNum(nfams,big.mark=","),
" ",fam.type,"s",sep=""))
mtext(3,line=1,text=title,font=2,cex=cex.lab)
#Add cleanup box
box()
}
#Generic heatmap function
plotHeatmap <- function(mat,nfams,fam.type,
x.labels=NULL,x.title=NULL,
y.labels=NULL,y.title=NULL,
title=NULL,cex.lab=1){
#Set values if NULL
if(is.null(x.labels)){
x.labels <- colnames(mat)
}
if(is.null(y.labels)){
y.labels <- rownames(mat)
}
#Prep plotting area
par(mar=c(2,4,4,2))
plot(x=c(0,ncol(mat)),y=c(0,-nrow(mat)),type="n",
xaxt="n",xaxs="i",xlab="",yaxt="n",yaxs="i",ylab="")
#Add axes
sapply(1:ncol(mat),function(i){
axis(3,at=i-0.5,tick=F,line=-0.8,las=2,labels=x.labels[i],cex.axis=0.7)
})
# mtext(1,line=2.75,text=x.title,cex=cex.lab)
axis(2,at=-(1:nrow(mat))+0.5,tick=F,line=-0.8,las=2,labels=y.labels,cex.axis=0.7)
# mtext(2,line=2.75,text=y.title,cex=cex.lab)
mtext(1,line=0,cex=0.7*cex.lab,
text=paste("Median of N=",prettyNum(nfams,big.mark=",")," ",fam.type,"s",sep=""))
mtext(3,line=2.25,text=title,font=2,cex=cex.lab)
#Plot all cells
sapply(1:nrow(mat),function(r){
sapply(1:ncol(mat),function(c){
#Get color range
col.range <- colorRampPalette(c("#FFFFFF","#FBDB69","#EF9C4B",
"#E45F30","#8B412B","#000000"))(101)
#Get & scale value
val <- mat[r,c]
pct <- round(100*val,0)
#Get color for shading
if(is.na(val)){
color <- "gray80"
label <- "N/A"
dens <- 12
}else{
color <- col.range[pct+1]
label <- paste(pct,"%",sep="")
dens <- NA
}
#Get text color
if(is.na(val)){
text.col <- "gray60"
}else{
if(val>0.5){
text.col <- "white"
}else{
text.col <- "black"
}
}
#Plot rectangle
rect(xleft=c-1,xright=c,ybottom=-r,ytop=-(r-1),
lwd=0.5,border="gray95",col=color,density=dens)
#Format cell annotation
text(x=c-0.5,y=-(r-0.5),labels=label,
cex=0.8,col=text.col)
})
})
#Clean up box
box()
}
############################
###INHERITANCE PLOT WRAPPERS
############################
#Wrapper for all standard inheritance plots
wrapperInheritancePlots <- function(fam.dat.list,fam.type,count="variants"){
#Set title prefix & suffix and freq filter index
if(count=="variants"){
title.prefix <- "SV Site "
freq.idx <- which(colnames(dat)=="carrierFreq")
freq.lab <- "CF"
}else{
title.prefix <- "SV Allele "
freq.idx <- which(colnames(dat)=="AF")
freq.lab <- "AF"
}
if(fam.type=="trio"){
title.suffix <- paste("(Trios; n=",
prettyNum(length(fam.dat.list),big.mark=","),
")",sep="")
}else{
if(fam.type=="duo"){
title.suffix <- paste("(Duos; n=",
prettyNum(length(fam.dat.list),big.mark=","),
")",sep="")
}else{
title.suffix <- paste("(Families; n=",
prettyNum(length(fam.dat.list),big.mark=","),
")",sep="")
}
}
#All variants
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_inheritance.",fam.type,"s.",count,".all_sv.pdf",sep=""),
height=3.75,width=4.5)
plotInhStats(inh.stats=computeInheritanceMulti(trio.dat.list=fam.dat.list,
VIDs=NULL),
title=paste(title.prefix,"Inheritance [All SV] ",title.suffix,sep=""),
count=count)
dev.off()
#Variants by class
sapply(svtypes$svtype,function(svtype){
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_inheritance.",fam.type,"s.",count,".",svtype,".pdf",sep=""),
height=3.75,width=4.5)
plotInhStats(inh.stats=computeInheritanceMulti(trio.dat.list=fam.dat.list,
VIDs=dat$VID[which(dat$svtype==svtype)]),
title=paste(title.prefix,"Inheritance [",svtype,"] ",title.suffix,sep=""),
count=count)
dev.off()
})
#Tiny
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_inheritance.",fam.type,"s.",count,".tiny_sv.pdf",sep=""),
height=3.75,width=4.5)
plotInhStats(inh.stats=computeInheritanceMulti(trio.dat.list=fam.dat.list,
VIDs=dat$VID[which(dat$length<=tiny.max.size)]),
title=paste(title.prefix,"Inheritance [<100bp] ",title.suffix,sep=""),
count=count)
dev.off()
#Small
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_inheritance.",fam.type,"s.",count,".small_sv.pdf",sep=""),
height=3.75,width=4.5)
plotInhStats(inh.stats=computeInheritanceMulti(trio.dat.list=fam.dat.list,
VIDs=dat$VID[which(dat$length>tiny.max.size & dat$length<=small.max.size)]),
title=paste(title.prefix,"Inheritance [100-500bp] ",title.suffix,sep=""),
count=count)
dev.off()
#Medium
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_inheritance.",fam.type,"s.",count,".medium_sv.pdf",sep=""),
height=3.75,width=4.5)
plotInhStats(inh.stats=computeInheritanceMulti(trio.dat.list=fam.dat.list,
VIDs=dat$VID[which(dat$length>small.max.size & dat$length<=medium.max.size)]),
title=paste(title.prefix,"Inheritance [500bp-2.5kb] ",title.suffix,sep=""),
count=count)
dev.off()
#Med-large
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_inheritance.",fam.type,"s.",count,".medlarge_sv.pdf",sep=""),
height=3.75,width=4.5)
plotInhStats(inh.stats=computeInheritanceMulti(trio.dat.list=fam.dat.list,
VIDs=dat$VID[which(dat$length>medium.max.size & dat$length<=medlarge.max.size)]),
title=paste(title.prefix,"Inheritance [2.5-10kb] ",title.suffix,sep=""),
count=count)
dev.off()
#Large
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_inheritance.",fam.type,"s.",count,".large_sv.pdf",sep=""),
height=3.75,width=4.5)
plotInhStats(inh.stats=computeInheritanceMulti(trio.dat.list=fam.dat.list,
VIDs=dat$VID[which(dat$length>medlarge.max.size & dat$length<=large.max.size)]),
title=paste(title.prefix,"Inheritance [10-50kb] ",title.suffix,sep=""),
count=count)
dev.off()
#Huge
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_inheritance.",fam.type,"s.",count,".huge_sv.pdf",sep=""),
height=3.75,width=4.5)
plotInhStats(inh.stats=computeInheritanceMulti(trio.dat.list=fam.dat.list,
VIDs=dat$VID[which(dat$length>large.max.size)]),
title=paste(title.prefix,"Inheritance [>50kb] ",title.suffix,sep=""),
count=count)
dev.off()
#Rare
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_inheritance.",fam.type,"s.",count,".rare_sv.pdf",sep=""),
height=3.75,width=4.5)
plotInhStats(inh.stats=computeInheritanceMulti(trio.dat.list=fam.dat.list,
VIDs=dat$VID[which(dat[,freq.idx]<=rare.max.freq)]),
title=paste(title.prefix,"Inheritance [",freq.lab,"<1%] ",title.suffix,sep=""),
count=count)
dev.off()
#Uncommon
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_inheritance.",fam.type,"s.",count,".uncommon_sv.pdf",sep=""),
height=3.75,width=4.5)
plotInhStats(inh.stats=computeInheritanceMulti(trio.dat.list=fam.dat.list,
VIDs=dat$VID[which(dat[,freq.idx]>rare.max.freq & dat[,freq.idx]<=uncommon.max.freq)]),
title=paste(title.prefix,"Inheritance [",freq.lab," 1-10%] ",title.suffix,sep=""),
count=count)
dev.off()
#Common
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_inheritance.",fam.type,"s.",count,".common_sv.pdf",sep=""),
height=3.75,width=4.5)
plotInhStats(inh.stats=computeInheritanceMulti(trio.dat.list=fam.dat.list,
VIDs=dat$VID[which(dat[,freq.idx]>uncommon.max.freq & dat[,freq.idx]<=common.max.freq)]),
title=paste(title.prefix,"Inheritance [",freq.lab," 10-50%] ",title.suffix,sep=""),
count=count)
dev.off()
#Major
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_inheritance.",fam.type,"s.",count,".major_sv.pdf",sep=""),
height=3.75,width=4.5)
plotInhStats(inh.stats=computeInheritanceMulti(trio.dat.list=fam.dat.list,
VIDs=dat$VID[which(dat[,freq.idx]>common.max.freq)]),
title=paste(title.prefix,"Inheritance [",freq.lab,">50%] ",title.suffix,sep=""),
count=count)
dev.off()
}
#Wrapper for de novo rate lineplots
wrapperDeNovoRateLines <- function(fam.dat.list,fam.type,count="variants",gq=F){
#Set title prefix
if(count=="variants"){
title.prefix <- "SV Site "
}else{
title.prefix <- "SV Allele "
}
#DNR by Size
size.dat <- deNovoRateBySize(trio.dat.list=fam.dat.list,size.bins=40,count=count)
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_de_novo_rate.",fam.type,"s.",count,".by_size.pdf",sep=""),
height=4,width=5)
plotDNRvsSize(DNRs=size.dat$DNRs,bins=size.dat$bins,k=4,nfams=length(fam.dat.list),
title=paste(title.prefix,"De Novo Rate by Size",sep=""),
fam.type=fam.type,legend=T)
dev.off()
#DNR by Freq
freq.dat <- deNovoRateByFreq(trio.dat.list=fam.dat.list,freq.bins=40,count=count)
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_de_novo_rate.",fam.type,"s.",count,".by_frequency.pdf",sep=""),
height=4,width=5)
plotDNRvsFreq(DNRs=freq.dat$DNRs,bins=freq.dat$bins,k=4,nfams=length(fam.dat.list),
title=paste(title.prefix,"De Novo Rate by Freq.",sep=""),
count=count,fam.type=fam.type,legend=T)
dev.off()
#DNR by Proband GQ
if(gq) {
GQ.dat <- deNovoRateByProGQ(trio.dat.list=fam.dat.list,GQ.bins=40,count=count)
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_de_novo_rate.",fam.type,"s.",count,".by_proband_GQ.pdf",sep=""),
height=4,width=5)
plotDNRvsGQ(DNRs=GQ.dat$DNRs,bins=GQ.dat$bins,k=4,nfams=length(fam.dat.list),
title=paste(title.prefix,"De Novo Rate by Min. Proband GQ",sep=""),
count=count,fam.type=fam.type,legend=T,xlab="Min. Proband GQ")
dev.off()
}
}
#Wrapper for de novo rate heatmaps
wrapperDeNovoRateHeats <- function(fam.dat.list,fam.type,count="variants"){
#Set title prefix
if(count=="variants"){
title.prefix <- "SV Site "
}else{
title.prefix <- "SV Allele "
}
#Gather DNR data
DNR.dat <- deNovoRateBySizeFreq(trio.dat.list=fam.dat.list,count=count,
max.sizes=c(tiny.max.size,small.max.size,medium.max.size,
medlarge.max.size,large.max.size),
size.labs=c("<100bp","100-\n500bp","500bp-\n2.5kb",
"2.5-10kb","10kb-50kb",">50kb"),
max.freqs=c(0.01,0.05,0.10,0.50),
freq.labs=c("<1%","1-5%","5-10%","10-50%",">50%"))
#Plot one heatmap for all variants
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_de_novo_rate.",fam.type,"s.",count,".size_vs_freq.all_sv.pdf",sep=""),
height=5,width=5)
plotHeatmap(mat=DNR.dat$ALL,nfams=length(fam.dat.list),fam.type=fam.type,
title=paste(title.prefix,"De Novo Rate, Size vs. Freq. [All SV]",sep=""))
dev.off()
#Plot one heatmap per variant class
sapply(svtypes$svtype,function(svtype){
pdf(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/sv_de_novo_rate.",fam.type,"s.",count,".size_vs_freq.",svtype,".pdf",sep=""),
height=5,width=5)
plotHeatmap(mat=DNR.dat[[which(names(DNR.dat)==svtype)]],nfams=length(fam.dat.list),fam.type=fam.type,
title=paste(title.prefix,"De Novo Rate, Size vs. Freq. [",svtype,"]",sep=""))
dev.off()
})
}
#Wrapper for master summary panel
masterInhWrapper <- function(fam.dat.list,fam.type, gq=T){
#Set title suffix
if(fam.type=="trio"){
title.suffix <- paste("(Trios; n=",
prettyNum(length(fam.dat.list),big.mark=","),
")",sep="")
}else{
if(fam.type=="duo"){
title.suffix <- paste("(Duos; n=",
prettyNum(length(fam.dat.list),big.mark=","),
")",sep="")
}else{
title.suffix <- paste("(Families; n=",
prettyNum(length(fam.dat.list),big.mark=","),
")",sep="")
}
}
#Prepare plot area
width <- ifelse(gq, 12, 10)
pdf(paste(OUTDIR,"/main_plots/VCF_QC.SV_",fam.type,"_inheritance.pdf",sep=""),
height=5,width=width)
if(gq) {
layout(matrix(c(1,2,3,4,5,
6,7,8,9,10),
byrow=T,nrow=2))
} else {
layout(matrix(c(1,2,3,4,
5,6,7,8),
byrow=T,nrow=2))
}
#Set global cex.lab
cex.lab <- 0.75
###Top row: SV sites
#Master inheritance plot
plotInhStats(inh.stats=computeInheritanceMulti(trio.dat.list=fam.dat.list),
title=paste("SV Site Inheritance (n=",
prettyNum(length(fam.dat.list),big.mark=","),
" ",fam.type,"s)",sep=""),
count="variants",cex.lab=cex.lab)
#DNR vs size
size.dat.v <- deNovoRateBySize(trio.dat.list=fam.dat.list,size.bins=40,count="variants")
plotDNRvsSize(DNRs=size.dat.v$DNRs,bins=size.dat.v$bins,k=4,nfams=length(fam.dat.list),
title=paste("Site De Novo Rate by Size",sep=""),
fam.type=fam.type,legend=T,cex.lab=cex.lab)
#DNR vs frequency
freq.dat.v <- deNovoRateByFreq(trio.dat.list=fam.dat.list,freq.bins=40,count="variants")
plotDNRvsFreq(DNRs=freq.dat.v$DNRs,bins=freq.dat.v$bins,k=4,nfams=length(fam.dat.list),
title=paste("Site De Novo Rate by Freq.",sep=""),
count="variants",fam.type=fam.type,legend=F,cex.lab=cex.lab)
#DNR vs min proband GQ
if(gq) {
GQ.dat.v <- deNovoRateByProGQ(trio.dat.list=fam.dat.list,GQ.bins=40,count="variants")
plotDNRvsGQ(DNRs=GQ.dat.v$DNRs,bins=GQ.dat.v$bins,k=4,nfams=length(fam.dat.list),
title=paste("Site De Novo Rate by GQ",sep=""),
count="variants",fam.type=fam.type,legend=F,cex.lab=cex.lab,
xlab="Min. Proband GQ")
}
#DNR heatmap (size vs freq.)
DNR.dat.v <- deNovoRateBySizeFreq(trio.dat.list=fam.dat.list,count="variants",
max.sizes=c(tiny.max.size,small.max.size,medium.max.size,
medlarge.max.size,large.max.size),
size.labs=c("<100bp","100-\n500bp","500bp-\n2.5kb",
"2.5-10kb","10kb-50kb",">50kb"),
max.freqs=c(0.01,0.05,0.10,0.50),
freq.labs=c("<1%","1-5%","5-\n10%","10-\n50%",">50%"))
plotHeatmap(mat=DNR.dat.v$ALL,nfams=length(fam.dat.list),fam.type=fam.type,
title=paste("Site De Novo Rate, Size vs. Freq.",sep=""),cex.lab=cex.lab)
###Bottom row: SV alleles
#Master inheritance plot
plotInhStats(inh.stats=computeInheritanceMulti(trio.dat.list=fam.dat.list),
title=paste("SV Allele Inheritance (n=",
prettyNum(length(fam.dat.list),big.mark=","),
" ",fam.type,"s)",sep=""),
count="alleles",cex.lab=cex.lab)
#DNR vs size
size.dat.a <- deNovoRateBySize(trio.dat.list=fam.dat.list,size.bins=40,count="alleles")
plotDNRvsSize(DNRs=size.dat.a$DNRs,bins=size.dat.a$bins,k=4,nfams=length(fam.dat.list),
title=paste("Allele De Novo Rate by Size",sep=""),
fam.type=fam.type,legend=F,cex.lab=cex.lab)
#DNR vs frequency
freq.dat.a <- deNovoRateByFreq(trio.dat.list=fam.dat.list,freq.bins=40,count="alleles")
plotDNRvsFreq(DNRs=freq.dat.a$DNRs,bins=freq.dat.a$bins,k=4,nfams=length(fam.dat.list),
title=paste("Allele De Novo Rate by Freq.",sep=""),
count="alleles",fam.type=fam.type,legend=F,cex.lab=cex.lab)
#DNR vs min proband GQ
if(gq){
GQ.dat.a <- deNovoRateByProGQ(trio.dat.list=fam.dat.list,GQ.bins=40,count="alleles")
plotDNRvsGQ(DNRs=GQ.dat.a$DNRs,bins=GQ.dat.a$bins,k=4,nfams=length(fam.dat.list),
title=paste("Allele De Novo Rate by GQ",sep=""),
count="alleles",fam.type=fam.type,legend=F,cex.lab=cex.lab,
xlabel="Min. Proband GQ")
}
#DNR heatmap (size vs freq.)
DNR.dat.a <- deNovoRateBySizeFreq(trio.dat.list=fam.dat.list,count="alleles",
max.sizes=c(tiny.max.size,small.max.size,medium.max.size,
medlarge.max.size,large.max.size),
size.labs=c("<100bp","100-\n500bp","500bp-\n2.5kb",
"2.5-10kb","10kb-50kb",">50kb"),
max.freqs=c(0.01,0.05,0.10,0.50),
freq.labs=c("<1%","1-5%","5-\n10%","10-\n50%",">50%"))
plotHeatmap(mat=DNR.dat.a$ALL,nfams=length(fam.dat.list),fam.type=fam.type,
title=paste("Allele De Novo Rate, Size vs. Freq.",sep=""),cex.lab=cex.lab)
#Close device
dev.off()
}
########################
###RSCRIPT FUNCTIONALITY
########################
###Load libraries as needed
require(optparse)
require(beeswarm)
require(vioplot)
require(zoo)
###List of command-line options
option_list <- list(
make_option(c("-S", "--svtypes"), type="character", default=NULL,
help="tab-delimited file specifying SV types and HEX colors [default %default]",
metavar="character"),
make_option(c("-M", "--multiallelics"), type="logical", default=FALSE,
help="include multiallelic sites in inheritance calculations [default %default]",
metavar="logical")
)
###Get command-line arguments & options
args <- parse_args(OptionParser(usage="%prog svstats.bed famfile perSampleDir OUTDIR",
option_list=option_list),
positional_arguments=TRUE)
opts <- args$options
###Checks for appropriate positional arguments
if(length(args$args) != 4){
stop("Incorrect number of required positional arguments\n")
}
###Writes args & opts to vars
dat.in <- args$args[1]
famfile.in <- args$args[2]
perSampDir <- args$args[3]
OUTDIR <- args$args[4]
svtypes.file <- opts$svtypes
multiallelics <- opts$multiallelics
# #Dev parameters
# dat.in <- "~/scratch/xfer/gnomAD_v2_SV_MASTER_resolved_VCF.VCF_sites.stats.bed.gz"
# famfile.in <- "~/scratch/xfer/cleaned.fam"
# perSampDir <- "~/scratch/xfer/gnomAD_v2_SV_MASTER_resolved_VCF_perSample_VIDs_merged/"
# OUTDIR <- "~/scratch/famQC_plots_test/"
# # OUTDIR <- "~/scratch/VCF_plots_test/"
# svtypes.file <- "~/Desktop/Collins/Talkowski/code/sv-pipeline/ref/vcf_qc_refs/SV_colors.txt"
# multiallelics <- F
###Prepares I/O files
#Read & clean SV stats data
dat <- read.table(dat.in,comment.char="",sep="\t",header=T,check.names=F)
colnames(dat)[1] <- "chr"
#Restrict data to autosomes only, and exclude multiallelics (if optioned)
allosome.exclude.idx <- which(!(dat$chr %in% c(1:22,paste("chr",1:22,sep=""))))
multi.exclude.idx <- which(dat$other_gts>0)
cat(paste("NOTE: only autosomes considered during transmission analyses. Excluded ",
prettyNum(length(allosome.exclude.idx),big.mark=","),"/",
prettyNum(nrow(dat),big.mark=",")," (",
round(100*length(allosome.exclude.idx)/nrow(dat),1),
"%) of all variants as non-autosomal.\n",sep=""))
if(multiallelics==F){
cat(paste("NOTE: only biallelic variants considered during transmission analyses. Excluded ",
prettyNum(length(multi.exclude.idx),big.mark=","),"/",
prettyNum(nrow(dat),big.mark=",")," (",
round(100*length(multi.exclude.idx)/nrow(dat),1),
"%) of all variants as multiallelic.\n",sep=""))
all.exclude.idx <- unique(c(allosome.exclude.idx,multi.exclude.idx))
cat(paste("NOTE: excluded a nonredundant total of ",
prettyNum(length(all.exclude.idx),big.mark=","),"/",
prettyNum(nrow(dat),big.mark=",")," (",
round(100*length(all.exclude.idx)/nrow(dat),1),
"%) of all variants due to autosomal and/or multiallelic filters.\n",sep=""))
dat <- dat[-all.exclude.idx,]
}else{
dat <- dat[-allosome.exclude.idx,]
}
cat(paste("NOTE: retained ",
prettyNum(nrow(dat),big.mark=","),
" variants for transmission analyses.\n",sep=""))
#Read fam file and splits into duos and trios
fams <- read.table(famfile.in,comment.char="",header=T,check.names=F)
colnames(fams)[1] <- "FAM_ID"
# duos <- fams[grep("DUO_",fams$FAM_ID,fixed=T),]
# duos$FATHER[which(duos$FATHER==".")] <- NA
# duos$MOTHER[which(duos$MOTHER==".")] <- NA
trios <- fams[grep("TRIO_",fams$FAM_ID,fixed=T),]
#Sets sv types & colors
if(!is.null(svtypes.file)){
svtypes <- read.table(svtypes.file,sep="\t",header=F,comment.char="",check.names=F)
svtypes <- as.data.frame(apply(svtypes,2,as.character))
colnames(svtypes) <- c("svtype","color")
}else{
svtypes.v <- unique(dat$svtype)
svtypes.c <- brewer.pal(length(svtypes.v),"Dark2")
svtypes <- data.frame("svtype"=svtypes.v,
"color"=svtypes.c)
}
#Create output directory structure, if necessary
if(!dir.exists(OUTDIR)){
dir.create(OUTDIR)
}
if(!dir.exists(paste(OUTDIR,"/main_plots/",sep=""))){
dir.create(paste(OUTDIR,"/main_plots/",sep=""))
}
if(!dir.exists(paste(OUTDIR,"/supporting_plots/",sep=""))){
dir.create(paste(OUTDIR,"/supporting_plots/",sep=""))
}
if(!dir.exists(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/",sep=""))){
dir.create(paste(OUTDIR,"/supporting_plots/sv_inheritance_plots/",sep=""))
}
###Performs trio analyses, if any trios exist
if(nrow(trios)>0){
#Downsample to 100 trios if necessary
if(nrow(trios)>100){
trios <- trios[sample(1:nrow(trios),100,replace=F),]
}
#Read data
trio.dat <- apply(trios[,2:4],1,function(IDs){
IDs <- as.character(IDs)
return(getFamDat(dat=dat,proband=IDs[1],father=IDs[2],
mother=IDs[3],biallelic=!multiallelics))
})
names(trio.dat) <- trios[,1]
# if there are no GQ values in any of the trios, do not make GQ plots
gq <- any(unlist(lapply(trio.dat, function(trio){ sum(!is.na(trio$pro.GQ)) > 0 })))
#Master wrapper
masterInhWrapper(fam.dat.list=trio.dat,fam.type="trio", gq=gq)
#Standard inheritance panels
sapply(c("variants","alleles"),function(count){
wrapperInheritancePlots(fam.dat.list=trio.dat,
fam.type="trio",
count=count)
})
#De novo rate panels
sapply(c("variants","alleles"),function(count){
wrapperDeNovoRateLines(fam.dat.list=trio.dat,
fam.type="trio",
count=count,
gq=gq)
})
#De novo rate heatmaps
sapply(c("variants","alleles"),function(count){
wrapperDeNovoRateHeats(fam.dat.list=trio.dat,
fam.type="trio",
count=count)
})
}
# ###Performs duo analyses, if any duos exist
# if(nrow(duos)>0){
# #Read data
# duo.dat <- apply(duos[,2:4],1,function(IDs){
# IDs <- as.character(IDs)
# return(getFamDat(dat=dat,proband=IDs[1],father=IDs[2],
# mother=IDs[3],biallelic=!multiallelics))
# })
# names(duo.dat) <- duos[,1]
#
# #Master wrapper
# masterInhWrapper(fam.dat.list=duo.dat,fam.type="duo")
# #Standard inheritance panels
# sapply(c("variants","alleles"),function(count){
# wrapperInheritancePlots(fam.dat.list=duo.dat,
# fam.type="duo",
# count=count)
# })
# #De novo rate panels
# sapply(c("variants","alleles"),function(count){
# wrapperDeNovoRateLines(fam.dat.list=duo.dat,
# fam.type="duo",
# count=count)
# })
# #De novo rate heatmaps
# sapply(c("variants","alleles"),function(count){
# wrapperDeNovoRateHeats(fam.dat.list=duo.dat,
# fam.type="duo",
# count=count)
# })
# }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/est_mean_ipwe.R
\name{est_mean_ipwe}
\alias{est_mean_ipwe}
\title{Estimate the marginal mean response of a linear static treatment regime}
\usage{
est_mean_ipwe(beta, x, censor_y, delta, ph, a, ghat,
check_complete = TRUE)
}
\arguments{
\item{beta}{Numeric vector. A set of parameter that indexes the regime.}
\item{x}{Numeric Matrix. The baseline covariates from all observed data.}
\item{censor_y}{Numeric vector. The censored survival times from all observed data, i.e. \code{censor_y = min(Y, C)}}
\item{delta}{Numeric vector. The censoring indicators from all observed data. We use 1 for uncensored, 0 for censored.}
\item{ph}{Numeric vector. The estimated propensity score of being assigned treatment \code{A=1}
by the original data generation mechanism for all observed data.}
\item{a}{Numeric vector. The vector of observed treatment level for all observed data. Treatment levels
should be coded as 0/1.}
\item{ghat}{Numeric vector. The conditional/unconditional probabilities of
event that the censoring variable is larger than the observed survival time given covariates
for each observation.
a.k.a \eqn{F(T > y_0 \mid x_0).}
This can be calculated by function \code{\link{LocalKM}}.
Estimation of conditional cumulative function value at \eqn{y_0} is
implemented in \code{\link{tauhat_func}}.}
\item{check_complete}{logical. Since this value estimation method is purely
nonparametric, we need at least one unit in collected data such that the observed
treatment assignment is the same what the regime parameter suggests. If \code{check_complete}
is \code{TRUE}. It will check if any observation satisfies this criterion.
When none observation satisfies, a message is printed to console to raise users
awareness that the input regime parameter \code{beta} does not agree with any observed treatment level assignment.
Then a sufficiently small number is returned from this function, to keep
the genetic algorithm running smoothly.}
}
\description{
Assume we have binary treatment options for each subject in the target population.
This function evaluates a given treatment regime by the estimated
marginal mean response.
We assume the space of treatment regimes are linear
decision functions indexed by parametric coefficients.
This R function is an empirical \emph{value function} in the
literature of optimal treatment regime estimation. Since the goal here
is to maximize population's \strong{marginal mean} response, this function, which estimates
the performance of a set of parameters in terms of the \strong{marginal mean},
is the objective function in a nonparametric policy-search method.
The user facing application which utilizes this function is \code{\link{IPWE_mean_IndCen}}.
}
\examples{
GenerateData <- function(n)
{
x1 <- runif(n, min=-0.5,max=0.5)
x2 <- runif(n, min=-0.5,max=0.5)
error <- rnorm(n, sd= 1)
ph <- rep(0.5,n)
a <- rbinom(n = n, size = 1, prob=ph)
c <- 1.5 + + runif(n = n, min=0, max=2)
cmplt_y <- pmin(2+x1+x2 + a*(1 - x1 - x2) + (0.2 + a*(1+x1+x2)) * error, 4.4)
censor_y <- pmin(cmplt_y, c)
delta <- as.numeric(c > cmplt_y)
return(data.frame(x1=x1,x2=x2,a=a, censor_y = censor_y, delta=delta))
}
n <- 100
data <- GenerateData(n)
# here the value for argument ghat uses 0.5 vector for brevity.
mean_hat <- est_mean_ipwe(c(-1,0,2), x=cbind(1, data$x1, data$x2),
censor_y = data$censor_y, delta = data$delta, ph = rep(0.5,n),
a = data$a, ghat = rep(0.5,n))
}
|
/man/est_mean_ipwe.Rd
|
no_license
|
cran/QTOCen
|
R
| false | true | 3,582 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/est_mean_ipwe.R
\name{est_mean_ipwe}
\alias{est_mean_ipwe}
\title{Estimate the marginal mean response of a linear static treatment regime}
\usage{
est_mean_ipwe(beta, x, censor_y, delta, ph, a, ghat,
check_complete = TRUE)
}
\arguments{
\item{beta}{Numeric vector. A set of parameter that indexes the regime.}
\item{x}{Numeric Matrix. The baseline covariates from all observed data.}
\item{censor_y}{Numeric vector. The censored survival times from all observed data, i.e. \code{censor_y = min(Y, C)}}
\item{delta}{Numeric vector. The censoring indicators from all observed data. We use 1 for uncensored, 0 for censored.}
\item{ph}{Numeric vector. The estimated propensity score of being assigned treatment \code{A=1}
by the original data generation mechanism for all observed data.}
\item{a}{Numeric vector. The vector of observed treatment level for all observed data. Treatment levels
should be coded as 0/1.}
\item{ghat}{Numeric vector. The conditional/unconditional probabilities of
event that the censoring variable is larger than the observed survival time given covariates
for each observation.
a.k.a \eqn{F(T > y_0 \mid x_0).}
This can be calculated by function \code{\link{LocalKM}}.
Estimation of conditional cumulative function value at \eqn{y_0} is
implemented in \code{\link{tauhat_func}}.}
\item{check_complete}{logical. Since this value estimation method is purely
nonparametric, we need at least one unit in collected data such that the observed
treatment assignment is the same what the regime parameter suggests. If \code{check_complete}
is \code{TRUE}. It will check if any observation satisfies this criterion.
When none observation satisfies, a message is printed to console to raise users
awareness that the input regime parameter \code{beta} does not agree with any observed treatment level assignment.
Then a sufficiently small number is returned from this function, to keep
the genetic algorithm running smoothly.}
}
\description{
Assume we have binary treatment options for each subject in the target population.
This function evaluates a given treatment regime by the estimated
marginal mean response.
We assume the space of treatment regimes are linear
decision functions indexed by parametric coefficients.
This R function is an empirical \emph{value function} in the
literature of optimal treatment regime estimation. Since the goal here
is to maximize population's \strong{marginal mean} response, this function, which estimates
the performance of a set of parameters in terms of the \strong{marginal mean},
is the objective function in a nonparametric policy-search method.
The user facing application which utilizes this function is \code{\link{IPWE_mean_IndCen}}.
}
\examples{
GenerateData <- function(n)
{
x1 <- runif(n, min=-0.5,max=0.5)
x2 <- runif(n, min=-0.5,max=0.5)
error <- rnorm(n, sd= 1)
ph <- rep(0.5,n)
a <- rbinom(n = n, size = 1, prob=ph)
c <- 1.5 + + runif(n = n, min=0, max=2)
cmplt_y <- pmin(2+x1+x2 + a*(1 - x1 - x2) + (0.2 + a*(1+x1+x2)) * error, 4.4)
censor_y <- pmin(cmplt_y, c)
delta <- as.numeric(c > cmplt_y)
return(data.frame(x1=x1,x2=x2,a=a, censor_y = censor_y, delta=delta))
}
n <- 100
data <- GenerateData(n)
# here the value for argument ghat uses 0.5 vector for brevity.
mean_hat <- est_mean_ipwe(c(-1,0,2), x=cbind(1, data$x1, data$x2),
censor_y = data$censor_y, delta = data$delta, ph = rep(0.5,n),
a = data$a, ghat = rep(0.5,n))
}
|
## Set standard values
defineVariables <-
function(){
DALYassign("ageGroups", c("0-4", "5-14", "15-44", "45-59", "60+"))
DALYassign("fixed", c("Age Group", "Male", "Female"))
DALYassign("txtLBL", c("INCIDENCE", "TREATMENT", "ONSET",
"DURATION", "DWtreated", "DWuntreated",
"MORTALITY", "AvgAgeDeath"))
DALYassign("txtLbl", c("Inc", "Trt", "Ons", "Dur",
"DWt", "DWn", "Mrt", "Lxp"))
DALYassign("txtlbl", c("inc", "trt", "ons", "dur",
"DWt", "DWn", "mrt", "lxp"))
DALYassign("distributions", c("Beta-Pert", "Beta", "Gamma", "Normal",
"LogNormal.geom", "LogNormal.arithm",
"Uniform", "Fixed"))
DALYassign("stratifications", c("Age and Sex", "Age", "Sex", "None"))
DALYassign("ages", c(0, 1, 5, 10, 15, 20, 25,
30, 35, 40, 45, 50, 55, 60,
65, 70, 75, 80, 85, 90, 95))
## standard life expectancy tables
DALYassign("stdLEtab", tclVar("GBD2010"))
## GBD1990 (Coale-Demeny model life table, west)
DALYassign("stdM", c(80.00, 79.36, 75.38, 70.40, 65.41, 60.44, 55.47,
50.51, 45.57, 40.64, 35.77, 30.99, 26.32, 21.81,
17.50, 13.58, 10.17, 7.45, 5.24, 3.54, 2.31))
DALYassign("stdF", c(82.50, 81.84, 77.95, 72.99, 68.02, 63.08, 58.17,
53.27, 48.38, 43.53, 38.72, 33.99, 29.37, 24.83,
20.44, 16.20, 12.28, 8.90, 6.22, 4.25, 2.89))
## GBD 2010 (synthetic life table)
DALYassign("stdGBD", c(86.02, 85.21, 81.25, 76.27, 71.29, 66.35, 61.40,
56.46, 51.53, 46.64, 38.00, 33.32, 28.73, 24.20,
19.80, 15.62, 11.75, 8.31, 4.00, 2.66, 1.87))
## WHO/GHE (projected frontier life table, 2050)
DALYassign("stdWHO", c(91.94, 91.00, 87.02, 82.03, 77.04, 72.06, 67.08,
62.11, 57.15, 52.20, 47.27, 42.36, 37.49, 32.65,
27.86, 23.15, 18.62, 14.41, 10.70, 7.60, 5.13))
## 'pop' = Population matrix
.pop <- tclArray()
for(x in seq(0, 2))
.pop[[0, x]] <- DALYget("fixed")[x+1]
for(y in seq(5))
.pop[[y, 0]] <- DALYget("ageGroups")[y]
DALYassign(".pop", .pop)
DALYassign("pop", matrix(nrow = 5, ncol = 2))
## 'LE' = Life Expectancy table
.LE <- tclArray()
.LE[[0, 0]] <- "Age"
for(x in seq(2)) .LE[[0, x]] <- DALYget("fixed")[x+1]
for(y in seq(21)) .LE[[y, 0]] <- DALYget("ages")[y]
DALYassign(".LE", .LE)
DALYassign("LE", matrix(nrow = 21, ncol = 2))
setStdLE()
## Set 'data', 'dist' & 'strat'
distList <- c(3, 2, 8, 8, 2, 2, 3, 8)
for (i in seq(8)){
for (j in seq(8)){
## 'txtlbl' + 'i' = parameters per outcome
DALYassign(paste(".", DALYget("txtlbl")[j], i, sep = ""),
tclArray())
DALYassign(paste(DALYget("txtlbl")[j], i, sep = ""),
matrix(nrow = 6, ncol = 5))
## 'strat' + 'txtLbl' + 'i' = stratification per parameter per outcome
DALYassign(paste(".strat", DALYget("txtLbl")[j], i, sep = ""),
tclVar(DALYget("stratifications")[1]))
DALYassign(paste("strat", DALYget("txtLbl")[j], i, sep = ""),
DALYget("stratifications")[1])
## 'distributions' + 'txtLbl' + 'i' = dist per parameter per outcome
d <- distList[j]
DALYassign(paste(".dist", DALYget("txtLbl")[j], i, sep = ""),
tclVar(DALYget("distributions")[d]))
DALYassign(paste("dist", DALYget("txtLbl")[j], i, sep = ""),
DALYget("distributions")[d])
}
## assign 'ageGroups' labels to '.txtlbl' + 'i'
for (j in seq(5)){
for(k in seq(8)){
DALYeval(parse(text = paste(".", DALYget("txtlbl")[k], i,
"[[", j, ",0]] <- '",
DALYget("ageGroups")[j], "'", sep = "")))
}
}
}
## assign 'disease' and 'outcome' names
DALYassign("diseaseName", tclVar())
for (i in seq(8))
DALYassign(paste("outcome", i, "Name", sep = ""), tclVar())
## assign 'aw' and 'dr' variables
DALYassign(".aw", tclVar("No"))
DALYassign(".dr", tclVar("0"))
## assign 'option' variables
DALYassign(".it", tclVar("20000"))
DALYassign(".optOP", tclVar("Summed over age/sex classes"))
DALYassign(".optOC", tclVar("Summed over outcomes"))
DALYassign(".optRA", tclVar("Absolute"))
DALYassign(".optHist", tclVar("1"))
DALYassign("it", 20000)
DALYassign("optOP", "Summed over age/sex classes")
DALYassign("optOC", "Summed over outcomes")
DALYassign("optRA", "Absolute")
DALYassign("optHist", 1)
}
|
/R/defineVariables.R
|
no_license
|
cran/DALY
|
R
| false | false | 4,807 |
r
|
## Set standard values
defineVariables <-
function(){
DALYassign("ageGroups", c("0-4", "5-14", "15-44", "45-59", "60+"))
DALYassign("fixed", c("Age Group", "Male", "Female"))
DALYassign("txtLBL", c("INCIDENCE", "TREATMENT", "ONSET",
"DURATION", "DWtreated", "DWuntreated",
"MORTALITY", "AvgAgeDeath"))
DALYassign("txtLbl", c("Inc", "Trt", "Ons", "Dur",
"DWt", "DWn", "Mrt", "Lxp"))
DALYassign("txtlbl", c("inc", "trt", "ons", "dur",
"DWt", "DWn", "mrt", "lxp"))
DALYassign("distributions", c("Beta-Pert", "Beta", "Gamma", "Normal",
"LogNormal.geom", "LogNormal.arithm",
"Uniform", "Fixed"))
DALYassign("stratifications", c("Age and Sex", "Age", "Sex", "None"))
DALYassign("ages", c(0, 1, 5, 10, 15, 20, 25,
30, 35, 40, 45, 50, 55, 60,
65, 70, 75, 80, 85, 90, 95))
## standard life expectancy tables
DALYassign("stdLEtab", tclVar("GBD2010"))
## GBD1990 (Coale-Demeny model life table, west)
DALYassign("stdM", c(80.00, 79.36, 75.38, 70.40, 65.41, 60.44, 55.47,
50.51, 45.57, 40.64, 35.77, 30.99, 26.32, 21.81,
17.50, 13.58, 10.17, 7.45, 5.24, 3.54, 2.31))
DALYassign("stdF", c(82.50, 81.84, 77.95, 72.99, 68.02, 63.08, 58.17,
53.27, 48.38, 43.53, 38.72, 33.99, 29.37, 24.83,
20.44, 16.20, 12.28, 8.90, 6.22, 4.25, 2.89))
## GBD 2010 (synthetic life table)
DALYassign("stdGBD", c(86.02, 85.21, 81.25, 76.27, 71.29, 66.35, 61.40,
56.46, 51.53, 46.64, 38.00, 33.32, 28.73, 24.20,
19.80, 15.62, 11.75, 8.31, 4.00, 2.66, 1.87))
## WHO/GHE (projected frontier life table, 2050)
DALYassign("stdWHO", c(91.94, 91.00, 87.02, 82.03, 77.04, 72.06, 67.08,
62.11, 57.15, 52.20, 47.27, 42.36, 37.49, 32.65,
27.86, 23.15, 18.62, 14.41, 10.70, 7.60, 5.13))
## 'pop' = Population matrix
.pop <- tclArray()
for(x in seq(0, 2))
.pop[[0, x]] <- DALYget("fixed")[x+1]
for(y in seq(5))
.pop[[y, 0]] <- DALYget("ageGroups")[y]
DALYassign(".pop", .pop)
DALYassign("pop", matrix(nrow = 5, ncol = 2))
## 'LE' = Life Expectancy table
.LE <- tclArray()
.LE[[0, 0]] <- "Age"
for(x in seq(2)) .LE[[0, x]] <- DALYget("fixed")[x+1]
for(y in seq(21)) .LE[[y, 0]] <- DALYget("ages")[y]
DALYassign(".LE", .LE)
DALYassign("LE", matrix(nrow = 21, ncol = 2))
setStdLE()
## Set 'data', 'dist' & 'strat'
distList <- c(3, 2, 8, 8, 2, 2, 3, 8)
for (i in seq(8)){
for (j in seq(8)){
## 'txtlbl' + 'i' = parameters per outcome
DALYassign(paste(".", DALYget("txtlbl")[j], i, sep = ""),
tclArray())
DALYassign(paste(DALYget("txtlbl")[j], i, sep = ""),
matrix(nrow = 6, ncol = 5))
## 'strat' + 'txtLbl' + 'i' = stratification per parameter per outcome
DALYassign(paste(".strat", DALYget("txtLbl")[j], i, sep = ""),
tclVar(DALYget("stratifications")[1]))
DALYassign(paste("strat", DALYget("txtLbl")[j], i, sep = ""),
DALYget("stratifications")[1])
## 'distributions' + 'txtLbl' + 'i' = dist per parameter per outcome
d <- distList[j]
DALYassign(paste(".dist", DALYget("txtLbl")[j], i, sep = ""),
tclVar(DALYget("distributions")[d]))
DALYassign(paste("dist", DALYget("txtLbl")[j], i, sep = ""),
DALYget("distributions")[d])
}
## assign 'ageGroups' labels to '.txtlbl' + 'i'
for (j in seq(5)){
for(k in seq(8)){
DALYeval(parse(text = paste(".", DALYget("txtlbl")[k], i,
"[[", j, ",0]] <- '",
DALYget("ageGroups")[j], "'", sep = "")))
}
}
}
## assign 'disease' and 'outcome' names
DALYassign("diseaseName", tclVar())
for (i in seq(8))
DALYassign(paste("outcome", i, "Name", sep = ""), tclVar())
## assign 'aw' and 'dr' variables
DALYassign(".aw", tclVar("No"))
DALYassign(".dr", tclVar("0"))
## assign 'option' variables
DALYassign(".it", tclVar("20000"))
DALYassign(".optOP", tclVar("Summed over age/sex classes"))
DALYassign(".optOC", tclVar("Summed over outcomes"))
DALYassign(".optRA", tclVar("Absolute"))
DALYassign(".optHist", tclVar("1"))
DALYassign("it", 20000)
DALYassign("optOP", "Summed over age/sex classes")
DALYassign("optOC", "Summed over outcomes")
DALYassign("optRA", "Absolute")
DALYassign("optHist", 1)
}
|
# K-means cluster analysis
# Final model for use in analysis
# Plots to determine number of clusters and variables to use
# Caitlin O'Brien-Carelli
# 10/18/2019
# ----------------------
# Set up R
rm(list=ls())
library(ggplot2)
library(rgdal)
library(dplyr)
library(RColorBrewer)
library(plyr)
library(data.table)
library(dendextend)
library(purrr)
library(cluster)
library(gridExtra)
library(plotly)
# turn off scientific notation
options(scipen=999)
# ----------------------
# home drive
j = ifelse(Sys.info()[1]=='Windows', 'J:', '/home/j')
# data directory
dir = paste0(j, '/Project/Evaluation/GF/outcome_measurement/uga/arv_stockouts/')
# import the data
dt = readRDS(paste0(dir, 'prepped_data/arv_stockouts_2013_2019.rds'))
# set the working directory to the code repo to source functions
setwd('C:/Users/ccarelli/local/gf/outcome_measurement/hiv/uga/arvs/cluster_analysis/')
# drop 2013 - reporting is very low and short time series
dt = dt[year!=2013]
# ----------------------
# source the functions for elbow plots and silhouette widths
source('cluster_functions.R')
#----------------------------
# create a data frame on which to run linear regressions
# create a new data table
df = copy(dt)
# calculate if the facility reported on test kit or arv stock outs
df[!is.na(test_kits), reported_tests:=TRUE]
df[is.na(reported_tests), reported_tests:=FALSE]
df[!is.na(arvs), reported_arvs:=TRUE]
df[is.na(reported_arvs), reported_arvs:=FALSE]
#-----------
# sum to the annual level - weeks out and stock outs
df = df[ ,.(test_kits=sum(test_kits, na.rm=T),
arvs=sum(arvs, na.rm=T), reported_tests=sum(reported_tests),
reported_arvs=sum(reported_arvs)),
by=.(facility, level,region, year)]
# calculate percent of time out of both commodities
df[ , percent_tests:=round(100*(test_kits/reported_tests), 1)]
df[ , percent_arvs:=round(100*(arvs/reported_arvs), 1)]
# if they were never out of stock, set percent of time out to 0
df[is.na(percent_tests), percent_tests:=0]
df[is.na(percent_arvs), percent_arvs:=0]
#------------------------
# sum the main data table to a single value
# calculate if the facility reported
dt[!is.na(test_kits), reported_tests:=TRUE]
dt[is.na(reported_tests), reported_tests:=FALSE]
dt[!is.na(arvs), reported_arvs:=TRUE]
dt[is.na(reported_arvs), reported_arvs:=FALSE]
# total test kits and total arvs
dt = dt[ ,.(test_kits=sum(test_kits, na.rm=T),
arvs=sum(arvs, na.rm=T), reported_tests=sum(reported_tests),
reported_arvs=sum(reported_arvs)),
by=.(facility, level,region)] # do not include year
# calculate percent of time out of both commodities
# no NANs in data set - otherwise replace with 0s
dt[ , percent_tests:=round(100*(test_kits/reported_tests), 1)]
dt[ , percent_arvs:=round(100*(arvs/reported_arvs), 1)]
#------------------------
# calculate the slopes per facility
# calculate using the annual data, but append to the full time series data
# slope of change in percent tests
for (f in unique(df$facility)) {
model = lm(percent_tests~year, data=df[facility==f])
dt[facility==f, test_slope:=coef(model)[[2]]]
}
# slope of change in percent arvs
for (f in unique(df$facility)) {
model = lm(percent_arvs~year, data=df[facility==f])
dt[facility==f, arv_slope:=coef(model)[[2]]]
}
#------------------------
# scale both
dt[ , test_slope_scale:=((test_slope - mean(test_slope))/sd(test_slope))]
dt[ , arv_slope_scale:=((arv_slope - mean(arv_slope))/sd(arv_slope))]
dt[ , arvs_scale:=((percent_tests - mean(percent_tests))/sd(percent_tests))]
dt[ , tests_scale:=((percent_arvs - mean(percent_arvs))/sd(percent_arvs))]
#----------------------------------------
# create a matrix for cluster analysis
dt_k = dt[ ,.(test_slope_scale, arv_slope_scale, tests_scale, arvs_scale)]
#----------------------------------------
# calculate elbow plots and silhouette widths and plot
# calculate using sourced functions
elbow = elbow_fun(dt_k, 2, 10)
sil = sil_fun(dt_k, 2, 10)
# ----------------------
# plot the elbow plot
elbow_df = ggplot(elbow, aes(x=k, y=tot_withinss))+
geom_point()+
geom_line()+
theme_bw()+
labs(y = "Total within-cluster sum of squares", x = "K Clusters",
title='Elbow plot to empirically determine k clusters',
subtitle='Variables: % of reporting weeks out of ARVs,
% of reporting weeks out of tests, slopes* (2014 - 2019)',
caption = '*Slope of the annual change in time out of stock')+
theme(text=element_text(size=18))
# ----------------------
# plot the silhouette plot
sil_df = ggplot(sil, aes(x=k, y=sil_width))+
geom_point()+
geom_line()+
theme_bw() +
labs(x='K Clusters', y='Average silhouette width',
title='Silhouette Width to determine k clusters')+
theme(text=element_text(size=18))
# ----------------------
#----------------------------------------
# plot the clusters
list_of_plots = NULL
list_of_plots_slope = NULL
i = 1
# function to run the calculations for every cluster
for (x in c(2:10)) {
# run a test cluster
k_clust = kmeans(dt_k, centers = x)
dt[ , kcluster:=k_clust$cluster]
# mark the slope centroids for labeling
dt[ ,centroid_x_slope:=mean(test_slope_scale, na.rm=T), by=kcluster]
dt[ ,centroid_y_slope:=mean(arv_slope_scale, na.rm=T), by=kcluster]
dt[ ,slope_label:=paste0(round(centroid_x_slope, 1), ", ", round(centroid_y_slope, 1)), by=kcluster]
# mark the centroids for labeling
dt[ ,centroid_x:=mean(tests_scale, na.rm=T), by=kcluster]
dt[ ,centroid_y:=mean(arvs_scale, na.rm=T), by=kcluster]
dt[ ,label:=paste0(round(centroid_x), ", ", round(centroid_y)), by=kcluster]
# rbind the data
interim_data = copy(dt)
interim_data[ , total_clusters:=x]
if (i ==1) full_data = interim_data
if (1 < i) full_data = rbind(full_data, interim_data)
# create the plots of the percent of time out
list_of_plots[[i]] = ggplot(full_data[total_clusters==x],
aes(x=tests_scale, y=arvs_scale, color=factor(kcluster)))+
geom_jitter(alpha=0.6)+
theme_bw()+
annotate("text", x=full_data[total_clusters==x]$centroid_x,
y=full_data[total_clusters==x]$centroid_y,
label=full_data[total_clusters==x]$label)+
labs(x = "Percent of weeks out of test kits, scaled",
y = "Percent of weeks out of ARVs, scaled", color='Clusters',
title="Percent of reporting weeks out of test kits and ARVs, 2014 - 2019",
caption = "Percentage is equal to total weeks out/total weeks reported per facility",
subtitle=paste0('Number of clusters = ', x))+
theme(text=element_text(size=18))
# create plots of the slope
list_of_plots_slope[[i]] = ggplot(full_data[total_clusters==x],
aes(x=test_slope, y=arv_slope, color=factor(kcluster)))+
geom_jitter(alpha=0.6)+
theme_bw()+
annotate("text", x=full_data[total_clusters==x]$centroid_x_slope,
y=full_data[total_clusters==x]$centroid_y_slope,
label=full_data[total_clusters==x]$slope_label)+
labs(x = "Slope of change in test kit stock outs",
y = "Slope of change in ARV stock outs", color='Clusters',
title="Change in stock out percent of time stocked out, 2014 - 2019",
subtitle=paste0('Number of clusters = ', x))+
theme(text=element_text(size=18))
i = i+1 }
ah = full_data[total_clusters==5]
ah = ah[kcluster==2 | kcluster==5]
ggplot(ah[kcluster==2 | kcluster==5],
aes(x=test_slope_scale, y=arv_slope_scale, color=factor(kcluster)))+
geom_jitter(alpha=0.2)+
theme_bw()+
labs(x = "Slope of change in test kit stock outs",
y = "Slope of change in ARV stock outs", color='Clusters',
title="Change in stock out percent of time stocked out, 2014 - 2019",
subtitle=paste0('Number of clusters = ', 5))+
theme(text=element_text(size=18))
ah_k = ah[ ,.(test_slope_scale, arv_slope_scale, tests_scale, arvs_scale)]
pamah = pam(ah_k, k=5)
pamah$silinfo
sil = sil_fun(ah_k, 5, 5)
silhouette(ah_k, )
#----------------------------
# print a pdt of plots
pdf(paste0(dir, 'k_means_outputs/all_scaled_2014_2019.pdf'),height=9, width=18)
grid.arrange(elbow_df, sil_df, nrow=1)
for(i in seq(length(list_of_plots_slope))) {
p = list_of_plots[[i]]
p_slope = list_of_plots_slope[[i]]
grid.arrange(p, p_slope, sil_df, nrow=1)
}
dev.off()
#----------------------------
# create a 3d graph for visualization
plot_ly(full_data[total_clusters==4],
x = ~percent_tests, y = ~percent_arvs, z = ~test_slope, color = ~factor(kcluster),
colors = brewer.pal(9, 'RdYlBu')) %>%
add_markers() %>%
layout(scene = list(xaxis = list(title = '% Test kits'),
yaxis = list(title = '% ARVs'),
zaxis = list(title = 'Slope of tests')))
#----------------------------
# export a data set for analysis, including cluster assignmwnra
dt_export = full_data[total_clusters==4, .(facility, percent_tests,
percent_arvs, test_slope, arv_slope)]
saveRDS(dt_export, paste0(dir, 'prepped_data/cluster_assignments.RDS'))
|
/outcome_measurement/hiv/uga/arvs/cluster_analysis/arvs_tests_both_scaled.R
|
no_license
|
ihmeuw/gf
|
R
| false | false | 9,176 |
r
|
# K-means cluster analysis
# Final model for use in analysis
# Plots to determine number of clusters and variables to use
# Caitlin O'Brien-Carelli
# 10/18/2019
# ----------------------
# Set up R
rm(list=ls())
library(ggplot2)
library(rgdal)
library(dplyr)
library(RColorBrewer)
library(plyr)
library(data.table)
library(dendextend)
library(purrr)
library(cluster)
library(gridExtra)
library(plotly)
# turn off scientific notation
options(scipen=999)
# ----------------------
# home drive
j = ifelse(Sys.info()[1]=='Windows', 'J:', '/home/j')
# data directory
dir = paste0(j, '/Project/Evaluation/GF/outcome_measurement/uga/arv_stockouts/')
# import the data
dt = readRDS(paste0(dir, 'prepped_data/arv_stockouts_2013_2019.rds'))
# set the working directory to the code repo to source functions
setwd('C:/Users/ccarelli/local/gf/outcome_measurement/hiv/uga/arvs/cluster_analysis/')
# drop 2013 - reporting is very low and short time series
dt = dt[year!=2013]
# ----------------------
# source the functions for elbow plots and silhouette widths
source('cluster_functions.R')
#----------------------------
# create a data frame on which to run linear regressions
# create a new data table
df = copy(dt)
# calculate if the facility reported on test kit or arv stock outs
df[!is.na(test_kits), reported_tests:=TRUE]
df[is.na(reported_tests), reported_tests:=FALSE]
df[!is.na(arvs), reported_arvs:=TRUE]
df[is.na(reported_arvs), reported_arvs:=FALSE]
#-----------
# sum to the annual level - weeks out and stock outs
df = df[ ,.(test_kits=sum(test_kits, na.rm=T),
arvs=sum(arvs, na.rm=T), reported_tests=sum(reported_tests),
reported_arvs=sum(reported_arvs)),
by=.(facility, level,region, year)]
# calculate percent of time out of both commodities
df[ , percent_tests:=round(100*(test_kits/reported_tests), 1)]
df[ , percent_arvs:=round(100*(arvs/reported_arvs), 1)]
# if they were never out of stock, set percent of time out to 0
df[is.na(percent_tests), percent_tests:=0]
df[is.na(percent_arvs), percent_arvs:=0]
#------------------------
# sum the main data table to a single value
# calculate if the facility reported
dt[!is.na(test_kits), reported_tests:=TRUE]
dt[is.na(reported_tests), reported_tests:=FALSE]
dt[!is.na(arvs), reported_arvs:=TRUE]
dt[is.na(reported_arvs), reported_arvs:=FALSE]
# total test kits and total arvs
dt = dt[ ,.(test_kits=sum(test_kits, na.rm=T),
arvs=sum(arvs, na.rm=T), reported_tests=sum(reported_tests),
reported_arvs=sum(reported_arvs)),
by=.(facility, level,region)] # do not include year
# calculate percent of time out of both commodities
# no NANs in data set - otherwise replace with 0s
dt[ , percent_tests:=round(100*(test_kits/reported_tests), 1)]
dt[ , percent_arvs:=round(100*(arvs/reported_arvs), 1)]
#------------------------
# calculate the slopes per facility
# calculate using the annual data, but append to the full time series data
# slope of change in percent tests
for (f in unique(df$facility)) {
model = lm(percent_tests~year, data=df[facility==f])
dt[facility==f, test_slope:=coef(model)[[2]]]
}
# slope of change in percent arvs
for (f in unique(df$facility)) {
model = lm(percent_arvs~year, data=df[facility==f])
dt[facility==f, arv_slope:=coef(model)[[2]]]
}
#------------------------
# scale both
dt[ , test_slope_scale:=((test_slope - mean(test_slope))/sd(test_slope))]
dt[ , arv_slope_scale:=((arv_slope - mean(arv_slope))/sd(arv_slope))]
dt[ , arvs_scale:=((percent_tests - mean(percent_tests))/sd(percent_tests))]
dt[ , tests_scale:=((percent_arvs - mean(percent_arvs))/sd(percent_arvs))]
#----------------------------------------
# create a matrix for cluster analysis
dt_k = dt[ ,.(test_slope_scale, arv_slope_scale, tests_scale, arvs_scale)]
#----------------------------------------
# calculate elbow plots and silhouette widths and plot
# calculate using sourced functions
elbow = elbow_fun(dt_k, 2, 10)
sil = sil_fun(dt_k, 2, 10)
# ----------------------
# plot the elbow plot
elbow_df = ggplot(elbow, aes(x=k, y=tot_withinss))+
geom_point()+
geom_line()+
theme_bw()+
labs(y = "Total within-cluster sum of squares", x = "K Clusters",
title='Elbow plot to empirically determine k clusters',
subtitle='Variables: % of reporting weeks out of ARVs,
% of reporting weeks out of tests, slopes* (2014 - 2019)',
caption = '*Slope of the annual change in time out of stock')+
theme(text=element_text(size=18))
# ----------------------
# plot the silhouette plot
sil_df = ggplot(sil, aes(x=k, y=sil_width))+
geom_point()+
geom_line()+
theme_bw() +
labs(x='K Clusters', y='Average silhouette width',
title='Silhouette Width to determine k clusters')+
theme(text=element_text(size=18))
# ----------------------
#----------------------------------------
# plot the clusters
list_of_plots = NULL
list_of_plots_slope = NULL
i = 1
# function to run the calculations for every cluster
for (x in c(2:10)) {
# run a test cluster
k_clust = kmeans(dt_k, centers = x)
dt[ , kcluster:=k_clust$cluster]
# mark the slope centroids for labeling
dt[ ,centroid_x_slope:=mean(test_slope_scale, na.rm=T), by=kcluster]
dt[ ,centroid_y_slope:=mean(arv_slope_scale, na.rm=T), by=kcluster]
dt[ ,slope_label:=paste0(round(centroid_x_slope, 1), ", ", round(centroid_y_slope, 1)), by=kcluster]
# mark the centroids for labeling
dt[ ,centroid_x:=mean(tests_scale, na.rm=T), by=kcluster]
dt[ ,centroid_y:=mean(arvs_scale, na.rm=T), by=kcluster]
dt[ ,label:=paste0(round(centroid_x), ", ", round(centroid_y)), by=kcluster]
# rbind the data
interim_data = copy(dt)
interim_data[ , total_clusters:=x]
if (i ==1) full_data = interim_data
if (1 < i) full_data = rbind(full_data, interim_data)
# create the plots of the percent of time out
list_of_plots[[i]] = ggplot(full_data[total_clusters==x],
aes(x=tests_scale, y=arvs_scale, color=factor(kcluster)))+
geom_jitter(alpha=0.6)+
theme_bw()+
annotate("text", x=full_data[total_clusters==x]$centroid_x,
y=full_data[total_clusters==x]$centroid_y,
label=full_data[total_clusters==x]$label)+
labs(x = "Percent of weeks out of test kits, scaled",
y = "Percent of weeks out of ARVs, scaled", color='Clusters',
title="Percent of reporting weeks out of test kits and ARVs, 2014 - 2019",
caption = "Percentage is equal to total weeks out/total weeks reported per facility",
subtitle=paste0('Number of clusters = ', x))+
theme(text=element_text(size=18))
# create plots of the slope
list_of_plots_slope[[i]] = ggplot(full_data[total_clusters==x],
aes(x=test_slope, y=arv_slope, color=factor(kcluster)))+
geom_jitter(alpha=0.6)+
theme_bw()+
annotate("text", x=full_data[total_clusters==x]$centroid_x_slope,
y=full_data[total_clusters==x]$centroid_y_slope,
label=full_data[total_clusters==x]$slope_label)+
labs(x = "Slope of change in test kit stock outs",
y = "Slope of change in ARV stock outs", color='Clusters',
title="Change in stock out percent of time stocked out, 2014 - 2019",
subtitle=paste0('Number of clusters = ', x))+
theme(text=element_text(size=18))
i = i+1 }
ah = full_data[total_clusters==5]
ah = ah[kcluster==2 | kcluster==5]
ggplot(ah[kcluster==2 | kcluster==5],
aes(x=test_slope_scale, y=arv_slope_scale, color=factor(kcluster)))+
geom_jitter(alpha=0.2)+
theme_bw()+
labs(x = "Slope of change in test kit stock outs",
y = "Slope of change in ARV stock outs", color='Clusters',
title="Change in stock out percent of time stocked out, 2014 - 2019",
subtitle=paste0('Number of clusters = ', 5))+
theme(text=element_text(size=18))
ah_k = ah[ ,.(test_slope_scale, arv_slope_scale, tests_scale, arvs_scale)]
pamah = pam(ah_k, k=5)
pamah$silinfo
sil = sil_fun(ah_k, 5, 5)
silhouette(ah_k, )
#----------------------------
# print a pdt of plots
pdf(paste0(dir, 'k_means_outputs/all_scaled_2014_2019.pdf'),height=9, width=18)
grid.arrange(elbow_df, sil_df, nrow=1)
for(i in seq(length(list_of_plots_slope))) {
p = list_of_plots[[i]]
p_slope = list_of_plots_slope[[i]]
grid.arrange(p, p_slope, sil_df, nrow=1)
}
dev.off()
#----------------------------
# create a 3d graph for visualization
plot_ly(full_data[total_clusters==4],
x = ~percent_tests, y = ~percent_arvs, z = ~test_slope, color = ~factor(kcluster),
colors = brewer.pal(9, 'RdYlBu')) %>%
add_markers() %>%
layout(scene = list(xaxis = list(title = '% Test kits'),
yaxis = list(title = '% ARVs'),
zaxis = list(title = 'Slope of tests')))
#----------------------------
# export a data set for analysis, including cluster assignmwnra
dt_export = full_data[total_clusters==4, .(facility, percent_tests,
percent_arvs, test_slope, arv_slope)]
saveRDS(dt_export, paste0(dir, 'prepped_data/cluster_assignments.RDS'))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/holdings.R
\name{fmpc_rss_sec}
\alias{fmpc_rss_sec}
\title{RSS feed of latest submissions to SEC}
\usage{
fmpc_rss_sec(limit = 100)
}
\arguments{
\item{limit}{limit output to a specific number of results}
}
\value{
a data frame of title, data, link to submission, cik, and submission
type
}
\description{
RSS feed of latest submissions to SEC that includes 6-k, 10-Q, 13F
}
\examples{
\dontrun{
# Demo offers AAON as an example
fmpc_set_token()
fmpc_rss_sec()
}
}
|
/man/fmpc_rss_sec.Rd
|
no_license
|
cran/fmpcloudr
|
R
| false | true | 544 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/holdings.R
\name{fmpc_rss_sec}
\alias{fmpc_rss_sec}
\title{RSS feed of latest submissions to SEC}
\usage{
fmpc_rss_sec(limit = 100)
}
\arguments{
\item{limit}{limit output to a specific number of results}
}
\value{
a data frame of title, data, link to submission, cik, and submission
type
}
\description{
RSS feed of latest submissions to SEC that includes 6-k, 10-Q, 13F
}
\examples{
\dontrun{
# Demo offers AAON as an example
fmpc_set_token()
fmpc_rss_sec()
}
}
|
#devtools::use_package('ggplot2')
#library('ggplot2')
#' Individual factor map for \code{\link[rsvd]{rpca}} using \code{\link[ggplot2]{ggplot}}.
#'
#' @description Creates a pretty plot which is showing the individual factor map, i.e,
#' plotting the principal component scores.
#'
#' @param rpcaObj Object returned by the \code{\link[rsvd]{rpca}} function.
#'
#' @param pcs Array_like. \cr
#' An array with two values indicating the two PCs which should be used for plotting.
#' By default the first two PCs are used, e.g., \eqn{c(1,2)}.
#'
#' @param groups Factor, optional. \cr
#' Factor indicating groups.
#'
#' @param alpha Scalar, optional. \cr
#' Alpha transparency for scatter plot.
#'
#' @param ellipse Bool (\eqn{TRUE}, \eqn{FALSE}), optional. \cr
#' Draw a 1sd data ellipse for each group, if \eqn{TRUE}.
#'
#' @param alpha.ellipse Scalar, optional. \cr
#' Alpha transparency for ellipse.
#'
#' @param ind_labels Bool (\eqn{TRUE}, \eqn{FALSE}), optional. \cr
#' Plot names for each individual point, if \eqn{TRUE}.
#'
#' @param ind_labels.names Array_like, optional. \cr
#' User specific labels for the individual points.
#'
#' @seealso \code{\link[rsvd]{rpca}}, \code{\link[ggplot2]{ggplot}}
#'
#' @author N. Benjamin Erichson, \email{erichson@uw.edu}
#'
#' @examples
#' #See ?rpca
#' @export
ggindplot <- function( rpcaObj, pcs = c(1,2), groups = NULL, alpha = 0.6, ellipse = TRUE, alpha.ellipse=0.2,
ind_labels=TRUE, ind_labels.names=NULL)
{
if (!requireNamespace("ggplot2", quietly = TRUE)) {
stop("The package 'ggplot2' is needed for this function to work. Please install it.",
call. = FALSE)
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Check if retx is provided
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(is.null(rpcaObj$x)) stop("ggbiplot requires the rotated variables, i.e., set rpca(..., retx = TRUE).")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Check selected pcs
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
stopifnot(length(pcs) == 2)
if(max(pcs) > ncol(rpcaObj$rotation)) stop("Selected PC is not valid.")
if(min(pcs) < 1) stop("Selected PC is not valid.")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Dimensions
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
n = nrow(rpcaObj$x)
p = nrow(rpcaObj$rotation)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Label PCs
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
variance = rpcaObj$sdev**2
explained_variance_ratio = round(variance / rpcaObj$var,3) * 100
PC1 = paste("PC", pcs[1], "(", explained_variance_ratio[pcs[1]] , "% explained var.)", sep="")
PC2 = paste("PC", pcs[2], "(", explained_variance_ratio[pcs[2]] , "% explained var.)", sep="")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Scale principal component scores
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Z1 = rpcaObj$x[, pcs[1]] #* (rpcaObj$eigvals[pcs[1]]**0.5)
Z2 = rpcaObj$x[, pcs[2]] #* (rpcaObj$eigvals[pcs[2]]**0.5)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create data frame
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
df <- data.frame(scores=cbind(Z1,Z2), row.names = 1:n)
colnames(df) <- c( 'a', 'b')
if(is.null(rownames(rpcaObj$x))) {
df$"indName" <- as.character(1:n)
} else {
df$"indName" <- rownames(rpcaObj$x)
}
if(!is.null(ind_labels.names)) df$"indName" <- ind_labels.names
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Workaround for CRAN: Nulling
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
x <- NULL # Setting the variables to NULL first
y <- NULL # Setting the variables to NULL first
a <- NULL # Setting the variables to NULL first
b <- NULL # Setting the variables to NULL first
class <- NULL # Setting the variables to NULL first
indName <- NULL # Setting the variables to NULL first
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Scores
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(is.null(groups)) {
df$"class" <- 'subjects'
} else{
df$"class" <- groups
}
g <- ggplot2::ggplot(data=df, ggplot2::aes(x = a, y = b, colour = class )) +
ggplot2::geom_point(size = 2, alpha = alpha) +
ggplot2::theme(legend.position = "none")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Stat ellipse
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(is.null(groups) == 0 && ellipse==TRUE){
g <- g + ggplot2::stat_ellipse( geom = "polygon", alpha = alpha.ellipse,
ggplot2::aes(fill = class))
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Label data points
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(ind_labels==TRUE) {
g <- g + ggplot2::geom_text(data = df,
ggplot2::aes(label = indName, x = a, y = b,
angle = 0, hjust = 0.1, vjust = 0.1),
color = 'black', size = 4)
}
g <- g + ggplot2::ggtitle('Individuals factor map (PCA)')
g <- g + ggplot2::xlab(PC1) + ggplot2::ylab(PC2)
g <- g + ggplot2::guides(colour=FALSE)
g <- g + ggplot2::geom_vline(xintercept=0, linetype="dashed", color = "black")
g <- g + ggplot2::geom_hline(yintercept=0, linetype="dashed", color = "black")
g <- g + ggplot2::theme_bw()
g <- g + ggplot2::theme(panel.grid.major = ggplot2::element_blank(),
panel.grid.minor = ggplot2::element_blank())
return(g)
}
|
/R/ggindplot.R
|
no_license
|
LTLA/rSVD
|
R
| false | false | 6,478 |
r
|
#devtools::use_package('ggplot2')
#library('ggplot2')
#' Individual factor map for \code{\link[rsvd]{rpca}} using \code{\link[ggplot2]{ggplot}}.
#'
#' @description Creates a pretty plot which is showing the individual factor map, i.e,
#' plotting the principal component scores.
#'
#' @param rpcaObj Object returned by the \code{\link[rsvd]{rpca}} function.
#'
#' @param pcs Array_like. \cr
#' An array with two values indicating the two PCs which should be used for plotting.
#' By default the first two PCs are used, e.g., \eqn{c(1,2)}.
#'
#' @param groups Factor, optional. \cr
#' Factor indicating groups.
#'
#' @param alpha Scalar, optional. \cr
#' Alpha transparency for scatter plot.
#'
#' @param ellipse Bool (\eqn{TRUE}, \eqn{FALSE}), optional. \cr
#' Draw a 1sd data ellipse for each group, if \eqn{TRUE}.
#'
#' @param alpha.ellipse Scalar, optional. \cr
#' Alpha transparency for ellipse.
#'
#' @param ind_labels Bool (\eqn{TRUE}, \eqn{FALSE}), optional. \cr
#' Plot names for each individual point, if \eqn{TRUE}.
#'
#' @param ind_labels.names Array_like, optional. \cr
#' User specific labels for the individual points.
#'
#' @seealso \code{\link[rsvd]{rpca}}, \code{\link[ggplot2]{ggplot}}
#'
#' @author N. Benjamin Erichson, \email{erichson@uw.edu}
#'
#' @examples
#' #See ?rpca
#' @export
ggindplot <- function( rpcaObj, pcs = c(1,2), groups = NULL, alpha = 0.6, ellipse = TRUE, alpha.ellipse=0.2,
ind_labels=TRUE, ind_labels.names=NULL)
{
if (!requireNamespace("ggplot2", quietly = TRUE)) {
stop("The package 'ggplot2' is needed for this function to work. Please install it.",
call. = FALSE)
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Check if retx is provided
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(is.null(rpcaObj$x)) stop("ggbiplot requires the rotated variables, i.e., set rpca(..., retx = TRUE).")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Check selected pcs
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
stopifnot(length(pcs) == 2)
if(max(pcs) > ncol(rpcaObj$rotation)) stop("Selected PC is not valid.")
if(min(pcs) < 1) stop("Selected PC is not valid.")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Dimensions
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
n = nrow(rpcaObj$x)
p = nrow(rpcaObj$rotation)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Label PCs
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
variance = rpcaObj$sdev**2
explained_variance_ratio = round(variance / rpcaObj$var,3) * 100
PC1 = paste("PC", pcs[1], "(", explained_variance_ratio[pcs[1]] , "% explained var.)", sep="")
PC2 = paste("PC", pcs[2], "(", explained_variance_ratio[pcs[2]] , "% explained var.)", sep="")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Scale principal component scores
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Z1 = rpcaObj$x[, pcs[1]] #* (rpcaObj$eigvals[pcs[1]]**0.5)
Z2 = rpcaObj$x[, pcs[2]] #* (rpcaObj$eigvals[pcs[2]]**0.5)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create data frame
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
df <- data.frame(scores=cbind(Z1,Z2), row.names = 1:n)
colnames(df) <- c( 'a', 'b')
if(is.null(rownames(rpcaObj$x))) {
df$"indName" <- as.character(1:n)
} else {
df$"indName" <- rownames(rpcaObj$x)
}
if(!is.null(ind_labels.names)) df$"indName" <- ind_labels.names
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Workaround for CRAN: Nulling
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
x <- NULL # Setting the variables to NULL first
y <- NULL # Setting the variables to NULL first
a <- NULL # Setting the variables to NULL first
b <- NULL # Setting the variables to NULL first
class <- NULL # Setting the variables to NULL first
indName <- NULL # Setting the variables to NULL first
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Scores
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(is.null(groups)) {
df$"class" <- 'subjects'
} else{
df$"class" <- groups
}
g <- ggplot2::ggplot(data=df, ggplot2::aes(x = a, y = b, colour = class )) +
ggplot2::geom_point(size = 2, alpha = alpha) +
ggplot2::theme(legend.position = "none")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Stat ellipse
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(is.null(groups) == 0 && ellipse==TRUE){
g <- g + ggplot2::stat_ellipse( geom = "polygon", alpha = alpha.ellipse,
ggplot2::aes(fill = class))
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Label data points
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(ind_labels==TRUE) {
g <- g + ggplot2::geom_text(data = df,
ggplot2::aes(label = indName, x = a, y = b,
angle = 0, hjust = 0.1, vjust = 0.1),
color = 'black', size = 4)
}
g <- g + ggplot2::ggtitle('Individuals factor map (PCA)')
g <- g + ggplot2::xlab(PC1) + ggplot2::ylab(PC2)
g <- g + ggplot2::guides(colour=FALSE)
g <- g + ggplot2::geom_vline(xintercept=0, linetype="dashed", color = "black")
g <- g + ggplot2::geom_hline(yintercept=0, linetype="dashed", color = "black")
g <- g + ggplot2::theme_bw()
g <- g + ggplot2::theme(panel.grid.major = ggplot2::element_blank(),
panel.grid.minor = ggplot2::element_blank())
return(g)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/list_replace.R
\name{list_replace}
\alias{list_replace}
\title{Helper function to replace values in a list}
\usage{
list_replace(x, list, values)
}
\arguments{
\item{x}{a list}
\item{list}{indices of list}
\item{values}{what values to replace with}
}
\value{
x with list values replaced
}
\description{
Helper function to replace values in a list
}
|
/man/list_replace.Rd
|
no_license
|
guhjy/drinf
|
R
| false | true | 429 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/list_replace.R
\name{list_replace}
\alias{list_replace}
\title{Helper function to replace values in a list}
\usage{
list_replace(x, list, values)
}
\arguments{
\item{x}{a list}
\item{list}{indices of list}
\item{values}{what values to replace with}
}
\value{
x with list values replaced
}
\description{
Helper function to replace values in a list
}
|
.generate_main_js <- function(data, chart_id = "sankey", node_width = 15, node_padding = 10, layout = 32,
units = "", node_tooltip = NULL, link_tooltip_fw = NULL, link_tooltip_bw = NULL) {
if (is.null(node_tooltip)) node_tooltip <- 'd.name + "\\nTotal Out: " + out_n + " transactions for $" + format(out_total) + "\\nTotal In: " + in_n + " transactions for $" + format(in_total);'
if (is.null(link_tooltip_fw)) {
link_tooltip_fw <- 'd.source.name + " sent $" + d.value + " to " + d.target.name + " on " + d.date'
if ("reverse" %in% tolower(names(data))) {
link_tooltip_bw <- 'd.target.name + " sent $" + d.value + " to " + d.source.name + " on " + d.date'
} else {
link_tooltip_bw <- link_tooltip_fw
}
}
src <- sprintf('"%s"', data$source) %>% paste(collapse = ", ")
target <- sprintf('"%s"', data$target) %>% paste(collapse = ", ")
value <- sprintf('"%s"', data$value) %>% paste(collapse = ", ")
date <- sprintf('"%s"', data$date) %>% paste(collapse = ", ")
reverse <- sprintf('"%s"', data$reverse) %>% paste(collapse = ", ")
data_json <- paste('{"source": [', src, '], "target": [', target, '], "value": [', value, '], "date": [', date, '], "reverse": [', reverse, ']}', sep = "")
js <- paste('\n\t\t\t(function(){',
'\n\t\t\t\tvar width = $(window).width() * .78;',
'\n\t\t\t\tvar height = $(window).height() * .75;',
'\n\t\t\t\tvar params = {',
'\n\t\t\t\t\t"dom": "sankey",',
'\n\t\t\t\t\t"width": width,',
'\n\t\t\t\t\t"height": height,',
'\n\t\t\t\t\t"data": ', data_json, ',',
'\n\t\t\t\t\t"nodeWidth": ', node_width, ',',
'\n\t\t\t\t\t"nodePadding": ', node_padding, ',',
'\n\t\t\t\t\t"layout": ', layout, ',',
'\n\t\t\t\t\t"units": "', units, '",',
'\n\t\t\t\t\t"id": "sankey"',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tparams.units ? units = " " + params.units : units = "";',
'\n',
'\n\t\t\t\t//hard code these now but eventually make available',
'\n\t\t\t\tvar formatNumber = d3.format("0,.0f"), // zero decimal places',
'\n\t\t\t\tformat = function(d) { return formatNumber(d) + units; },',
'\n\t\t\t\tcolor = d3.scale.category20();',
'\n',
'\n\t\t\t\tif(params.labelFormat){',
'\n\t\t\t\t\tformatNumber = d3.format(".2%");',
'\n\t\t\t\t}',
'\n',
'\n\t\t\t\tvar svg = d3.select("#" + params.id).append("svg")',
'\n\t\t\t\t\t.attr("width", params.width)',
'\n\t\t\t\t\t.attr("height", params.height);',
'\n',
'\n\t\t\t\tvar sankey = d3.sankey()',
'\n\t\t\t\t\t.nodeWidth(params.nodeWidth)',
'\n\t\t\t\t\t.nodePadding(params.nodePadding)',
'\n\t\t\t\t\t.layout(params.layout)',
'\n\t\t\t\t\t.size([params.width,params.height]);',
'\n',
'\n\t\t\t\tvar path = sankey.link();',
'\n',
'\n\t\t\t\tvar data = params.data,',
'\n\t\t\t\t\tlinks = [],',
'\n\t\t\t\t\tnodes = [];',
'\n',
'\n\t\t\t\t//get all source and target into nodes',
'\n\t\t\t\t//will reduce to unique in the next step',
'\n\t\t\t\t//also get links in object form',
'\n\t\t\t\tdata.source.forEach(function (d, i) {',
'\n\t\t\t\t\tnodes.push({ "name": data.source[i] });',
'\n\t\t\t\t\tnodes.push({ "name": data.target[i] });',
'\n\t\t\t\t\tlinks.push({ "source": data.source[i], "target": data.target[i], "value": +data.value[i], "date": data.date[i], "reverse": data.reverse[i] });',
'\n\t\t\t\t});',
'\n',
'\n\t\t\t\t//now get nodes based on links data',
'\n\t\t\t\t//thanks Mike Bostock https://groups.google.com/d/msg/d3-js/pl297cFtIQk/Eso4q_eBu1IJ',
'\n\t\t\t\t//this handy little function returns only the distinct / unique nodes',
'\n\t\t\t\tnodes = d3.keys(d3.nest()',
'\n\t\t\t\t\t.key(function (d) { return d.name; })',
'\n\t\t\t\t\t.map(nodes));',
'\n',
'\n\t\t\t\t//it appears d3 with force layout wants a numeric source and target',
'\n\t\t\t\t//so loop through each link replacing the text with its index from node',
'\n\t\t\t\tlinks.forEach(function (d, i) {',
'\n\t\t\t\t\tlinks[i].source = nodes.indexOf(links[i].source);',
'\n\t\t\t\t\tlinks[i].target = nodes.indexOf(links[i].target);',
'\n\t\t\t\t});',
'\n',
'\n\t\t\t\t//now loop through each nodes to make nodes an array of objects rather than an array of strings',
'\n\t\t\t\tnodes.forEach(function (d, i) {',
'\n\t\t\t\t\tnodes[i] = { "name": d };',
'\n\t\t\t\t});',
'\n',
'\n\t\t\t\tsankey',
'\n\t\t\t\t\t.nodes(nodes)',
'\n\t\t\t\t\t.links(links)',
'\n\t\t\t\t\t.layout(params.layout);',
'\n',
'\n\t\t\t\tvar link = svg.append("g").selectAll(".link")',
'\n\t\t\t\t\t.data(links)',
'\n\t\t\t\t\t.enter().append("path")',
'\n\t\t\t\t\t.attr("class", "link")',
'\n\t\t\t\t\t.attr("d", path)',
'\n\t\t\t\t\t.style("stroke-width", function (d) { return Math.max(2, d.dy); })',
'\n\t\t\t\t\t.sort(function (a, b) { return b.dy - a.dy; });',
'\n',
'\n\t\t\t\tlink.append("title")',
'\n\t\t\t\t\t.text(function (d) { return(d.reverse === "1" ? ', link_tooltip_bw, ' : ', link_tooltip_fw, ') });',
'\n',
'\n\t\t\t\tvar node = svg.append("g").selectAll(".node")',
'\n\t\t\t\t\t.data(nodes)',
'\n\t\t\t\t\t.enter().append("g")',
'\n\t\t\t\t\t.attr("class", "node")',
'\n\t\t\t\t\t.attr("transform", function (d) { return "translate(" + d.x + "," + d.y + ")"; })',
'\n\t\t\t\t\t.call(d3.behavior.drag()',
'\n\t\t\t\t\t\t.origin(function (d) { return d; })',
'\n\t\t\t\t\t\t.on("dragstart", function () { this.parentNode.appendChild(this); })',
'\n\t\t\t\t\t\t.on("drag", dragmove));',
'\n',
'\n\t\t\t\tnode.append("rect")',
'\n\t\t\t\t\t.attr("height", function (d) { return d.dy > 1 ? d.dy : 1; })',
'\n\t\t\t\t\t.attr("width", sankey.nodeWidth())',
'\n\t\t\t\t\t.style("fill", function (d) { return d.color = color(d.name.replace(/ .*/, "")); })',
'\n\t\t\t\t\t.style("stroke", function (d) { return d3.rgb(d.color).darker(2); })',
'\n\t\t\t\t\t.append("title")',
'\n\t\t\t\t\t.text(function (d) {',
'\n\t\t\t\t\t\tvar out_total = 0,',
'\n\t\t\t\t\t\t\tin_total = 0,',
'\n\t\t\t\t\t\t\tout_n = d.sourceLinks.filter(function(r) { return r.reverse === "0"; }).length + d.targetLinks.filter(function(r) { return r.reverse === "1"; }).length,',
'\n\t\t\t\t\t\t\tin_n = d.targetLinks.filter(function(r) { return r.reverse === "0"; }).length + d.sourceLinks.filter(function(r) { return r.reverse === "1"; }).length;',
'\n\t\t\t\t\t\td.sourceLinks.forEach(function(s) {',
'\n\t\t\t\t\t\t\tout_total += (s.reverse === "0") ? s.value : 0;',
'\n\t\t\t\t\t\t\tin_total += (s.reverse === "1") ? s.value : 0;',
'\n\t\t\t\t\t\t});',
'\n\t\t\t\t\t\td.targetLinks.forEach(function(t) {',
'\n\t\t\t\t\t\t\tin_total += (t.reverse === "0") ? t.value : 0;',
'\n\t\t\t\t\t\t\tout_total += (t.reverse === "1") ? t.value : 0;',
'\n\t\t\t\t\t\t});',
'\n\t\t\t\t\t\treturn ', node_tooltip, ' });',
'\n',
'\n\t\t\t\tnode.append("text")',
'\n\t\t\t\t.attr("x", -6)',
'\n\t\t\t\t.attr("y", function (d) { return d.dy / 2; })',
'\n\t\t\t\t.attr("dy", ".35em")',
'\n\t\t\t\t.attr("text-anchor", "end")',
'\n\t\t\t\t.attr("transform", null)',
'\n\t\t\t\t.text(function (d) { return d.name; })',
'\n\t\t\t\t.filter(function (d) { return d.x < params.width / 2; })',
'\n\t\t\t\t.attr("x", 6 + sankey.nodeWidth())',
'\n\t\t\t\t.attr("text-anchor", "start");',
'\n',
'\n\t\t\t\t// the function for moving the nodes',
'\n\t\t\t\tfunction dragmove(d) {',
'\n\t\t\t\t\td3.select(this).attr("transform","translate(" + (d.x = Math.max(0, Math.min(params.width - d.dx, d3.event.x))) + "," + (d.y = Math.max(0, Math.min(params.height - d.dy, d3.event.y))) + ")");',
'\n\t\t\t\t\tsankey.relayout();',
'\n\t\t\t\t\tlink.attr("d", path);',
'\n\t\t\t\t}',
'\n\t\t\t})();', sep = "")
return(js)
}
.generate_gif_js <- function(data, targets, delay = 2) {
data$path <- apply(data, 1, function(row) {
if (row[1] %in% targets) {
return(sprintf("%s %s %s", row[1], row[2], row[4]))
} else {
return(sprintf("%s %s %s", row[2], row[1], row[4]))
}
}) %>% unlist() %>% unname()
dates <- data$date %>% as.Date(format = "%m/%d/%y") %>% unique() %>% sort() %>% as.character()
events_array <- vector("character")
entities <- c(data$source, data$target) %>% unique()
for (i in 1:length(dates)) {
tmp <- data %>% filter(as.Date(date, format = "%m/%d/%y") == dates[i])
highlight <- c(tmp$source, tmp$target) %>% unique()
node_colors <- sapply(entities, function(entity) {
if (entity %in% highlight) {
if (entity %in% targets) {
return(sprintf('"%s": "#FF6A6A"', entity))
} else {
return(sprintf('"%s": "#90EE90"', entity))
}
} else {
return(sprintf('"%s": "#F7F7F7"', entity))
}
}) %>% unlist() %>% unname()
node_colors <- paste(node_colors, collapse = ", ")
text_colors <- sapply(entities, function(entity) {
if (entity %in% highlight) {
return(sprintf('"%s": "#000000"', entity))
} else {
return(sprintf('"%s": "#F7F7F7"', entity))
}
}) %>% unlist() %>% unname()
text_colors <- paste(text_colors, collapse = ", ")
link_colors <- apply(data, 1, function(row) {
if ("reverse" %in% names(data)) {
if (as.Date(row[4], format = "%m/%d/%y") == dates[i]) {
if (row[5] == 1) {
return(sprintf('"%s": "#A020F0"', row[6]))
} else {
return(sprintf('"%s": "#66CCFF"', row[6]))
}
} else {
return(sprintf('"%s": "#F7F7F7"', row[6]))
}
} else {
if (as.Date(row[4], format = "%m/%d/%y") == dates[i]) {
return(sprintf('"%s": "#66CCFF"', row[5]))
} else {
return(sprintf('"%s": "#F7F7F7"', row[5]))
}
}
}) %>% unlist() %>% unname()
link_colors <- paste(link_colors, collapse = ", ")
event <- sprintf('%d: {"node_colors": {%s}, "text_colors": {%s}, "link_colors": {%s}}', (i - 1), node_colors, text_colors, link_colors)
events_array <- c(events_array, event)
}
n <- length(events_array)
events <- paste(events_array, collapse = ", ") %>% {sprintf("{%s}", .)}
targets <- sprintf('"%s"', targets) %>% paste(collapse = ", ")
gif <- paste('\n\t\t\t$(function() {',
'\n\t\t\t\tvar selector = $("#sankey");',
'\n\t\t\t\tvar delay_sec = ', delay, ';',
'\n\t\t\t\tvar num = -1,',
'\n\t\t\t\tlen = ', n, ';',
'\n\t\t\t\tvar step_counter = 0;',
'\n\t\t\t\tvar threshold = 0;',
'\n\t\t\t\tvar height = 0;',
'\n\t\t\t\tvar timer = null;',
'\n\t\t\t\tvar events = ', events, ';',
'\n\t\t\t\tvar targets = [', targets, '];',
'\n',
'\n\t\t\t\tfunction reset() {',
'\n\t\t\t\t\tstop();',
'\n\t\t\t\t\tnum = 0;',
'\n\t\t\t\t\tthreshold = 0;',
'\n\t\t\t\t\theight = 0;',
'\n\t\t\t\t\tdelay_sec = 2;',
'\n\t\t\t\t\td3.selectAll("#sankey svg .node rect")',
'\n\t\t\t\t\t\t.style("fill", function(d) { return (targets.indexOf(d.name) > -1) ? "#FF6A6A" : "#90EE90" })',
'\n\t\t\t\t\t\t.style("stroke", function(d) { return (targets.indexOf(d.name) >= 0) ? "#FF6A6A" : "#90EE90" })',
'\n\t\t\t\t\t$("#sankey svg .node text").css("fill", "#000000");',
'\n\t\t\t\t\td3.selectAll("#sankey svg path.link")',
'\n\t\t\t\t\t\t.style("stroke", function(d) { return(d.reverse === "1" ? "#A020F0" : "#66CCFF") })',
'\n\t\t\t\t\t$("#events").animate({scrollTop: 0}, "fast");',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tfunction blank_graph() {',
'\n\t\t\t\t\t$("#sankey svg .node rect").css("fill", "#F7F7F7").css("stroke", "#F7F7F7");',
'\n\t\t\t\t\t$("#sankey svg .node text").css("fill", "#F7F7F7");',
'\n\t\t\t\t\t$("#sankey svg path.link").css("stroke", "#F7F7F7");',
'\n\t\t\t\t\t$("#events").animate({scrollTop: 0}, "fast");',
'\n\t\t\t\t\t$("#events").children().css("color", "#000000");',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tfunction event(num, scroll = true) {',
'\n\t\t\t\t\tif (num === -1) {',
'\n\t\t\t\t\t\tthreshold = 0;',
'\n\t\t\t\t\t\theight = 0;',
'\n\t\t\t\t\t\tblank_graph();',
'\n\t\t\t\t\t} else {',
'\n\t\t\t\t\t\t$("#events").children().css("color", "#000000");',
'\n\t\t\t\t\t\td3.selectAll("#sankey svg .node rect")',
'\n\t\t\t\t\t\t\t.style("fill", function(d) { return events[num]["node_colors"][d.name] })',
'\n\t\t\t\t\t\t\t.style("stroke", function(d) { return events[num]["node_colors"][d.name] })',
'\n\t\t\t\t\t\td3.selectAll("#sankey svg .node text")',
'\n\t\t\t\t\t\t\t.style("fill", function(d) { return events[num]["text_colors"][d.name] })',
'\n\t\t\t\t\t\td3.selectAll("#sankey svg path.link")',
'\n\t\t\t\t\t\t\t.style("stroke", function (d) {',
'\n\t\t\t\t\t\t\t\tif (targets.indexOf(d.source.name) > -1) {',
'\n\t\t\t\t\t\t\t\t\tvar path = d.source.name + " " + d.target.name + " " + d.date;',
'\n\t\t\t\t\t\t\t\t\treturn events[num]["link_colors"][path];',
'\n\t\t\t\t\t\t\t\t} else {',
'\n\t\t\t\t\t\t\t\t\tvar path = d.target.name + " " + d.source.name + " " + d.date;',
'\n\t\t\t\t\t\t\t\t\treturn events[num]["link_colors"][path];',
'\n\t\t\t\t\t\t\t\t}',
'\n\t\t\t\t\t\t\t});',
'\n\t\t\t\t\t\t\t$("#events h3:nth-of-type(" + (num + 1) + ")").css("color", "#FF6A6A");',
'\n\t\t\t\t\t\t\t$("#events p:nth-of-type(" + (num + 1) + ")").css("color", "#FF6A6A");',
'\n\t\t\t\t\t\t\tif (scroll) {',
'\n\t\t\t\t\t\t\t\tthreshold += ($("#events p:nth-of-type(" + (num + 1) + ")").outerHeight(true) + $("#events h3:nth-of-type(" + (num + 1) + ")").outerHeight(true));',
'\n\t\t\t\t\t\t\t\t\tif (threshold >= ($("#events").height() - 5)) {',
'\n\t\t\t\t\t\t\t\t\t\theight += $("#events p:nth-of-type(" + (num + 1) + ")").outerHeight(true) + $("#events h3:nth-of-type(" + (num + 1) + ")").outerHeight(true);',
'\n\t\t\t\t\t\t\t\t\t\t$("#events").animate({scrollTop: height}, "slow");',
'\n\t\t\t\t\t\t\t\t\t}',
'\n\t\t\t\t\t\t\t} else {',
'\n\t\t\t\t\t\t\t\tif (step_counter === 1) {',
'\n\t\t\t\t\t\t\t\t\tthreshold = ($("#events").height() - 5) - ($("#events p:nth-of-type(" + (num + 1) + ")").outerHeight(true) + $("#events h3:nth-of-type(" + (num + 1) + ")").outerHeight(true));',
'\n\t\t\t\t\t\t\t\t} else {',
'\n\t\t\t\t\t\t\t\t\tthreshold -= ($("#events p:nth-of-type(" + (num + 1) + ")").outerHeight(true) + $("#events h3:nth-of-type(" + (num + 1) + ")").outerHeight(true));',
'\n\t\t\t\t\t\t\t\t}',
'\n\t\t\t\t\t\t\t}',
'\n\t\t\t\t\t}',
'\n\t\t\t\t}',
'\n',
'\n\t\t\t\tfunction loop() {',
'\n\t\t\t\t\tnum = (num === len) ? -1 : num;',
'\n\t\t\t\t\tevent(num);',
'\n\t\t\t\t\tnum++;',
'\n\t\t\t\t\tstart();',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tfunction start() {',
'\n\t\t\t\t\ttimer = setTimeout(loop, (delay_sec * 1000));',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tfunction stop() {',
'\n\t\t\t\t\tclearTimeout(timer);',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tfunction forward() {',
'\n\t\t\t\t\tstop();',
'\n\t\t\t\t\tnum += 1;',
'\n\t\t\t\t\tstep_counter -= 1;',
'\n\t\t\t\t\tstep_counter = (step_counter < 0) ? 0 : step_counter;',
'\n\t\t\t\t\tevent(num);',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tfunction backward() {',
'\n\t\t\t\t\tstop();',
'\n\t\t\t\t\tnum -= 1;',
'\n\t\t\t\t\tstep_counter += 1;',
'\n\t\t\t\t\tevent(num, false);',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tfunction speed_up() {',
'\n\t\t\t\t\tstop();',
'\n\t\t\t\t\tdelay_sec -= .2;',
'\n\t\t\t\t\tdelay_sec = (delay_sec <= 0) ? 1 : delay_sec;',
'\n\t\t\t\t\tstart();',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tfunction speed_down() {',
'\n\t\t\t\t\tstop();',
'\n\t\t\t\t\tdelay_sec += .2;',
'\n\t\t\t\t\tdelay_sec = (delay_sec > 10) ? 10 : delay_sec;',
'\n\t\t\t\t\tstart();',
'\n\t\t\t\t}',
'\n',
'\n\t\t\t\t$("#start").on("click", start);',
'\n\t\t\t\t$("#stop").on("click", stop);',
'\n\t\t\t\t$("#step-forward").on("click", forward);',
'\n\t\t\t\t$("#step-back").on("click", backward);',
'\n\t\t\t\t$("#speed-up").on("click", speed_up);',
'\n\t\t\t\t$("#speed-down").on("click", speed_down);',
'\n\t\t\t\t$("#reset").on("click", reset);',
'\n\t\t\t});', sep = "")
return(gif)
}
.generate_after_js <- function(data, targets, target_color = "#FF6A6A", non_target_color = "#90EE90") {
entities <- unique(c(data$source, data$target))
if ("reverse" %in% names(data)) {
link_colors <- ifelse(data$reverse == 1, "#A020F0", "#66CCFF")
} else {
link_colors <- "#66CCFF"
}
link_colors <- sapply(1:nrow(data), function(i) paste(data$source[i], data$target[i], data$date[i]) %>% {sprintf('"%s": "%s"', ., link_colors[i])}) %>% unlist() %>% unname()
link_colors <- paste(link_colors, collapse = ", ")
json = vector()
for (entity in entities) {
if (entity %in% targets) {
json <- c(json, sprintf('"%s": "%s"', entity, target_color))
} else {
json <- c(json, sprintf('"%s": "%s"', entity, non_target_color))
}
}
node_colors <- paste(json, collapse = ", ")
js <- paste('\n\t\t\tvar node_colors = JSON.parse(\'{', node_colors, '}\');',
'\n\t\t\tvar link_colors = JSON.parse(\'{', link_colors, '}\');',
'\n\t\t\td3.selectAll("#sankey svg .node rect")',
'\n\t\t\t\t.style("fill", function(d) { return node_colors[d.name] })',
'\n\t\t\t\t.style("stroke", function(d) { d3.rgb(node_colors[d.name]).darker(2); })',
'\n',
'\n\t\t\td3.selectAll("#sankey svg .node rect title")',
'\n\t\t\t\t.style("color", "#FF6A6A")',
'\n',
'\n\t\t\td3.selectAll("#sankey svg path.link")',
'\n\t\t\t\t.style("stroke", function(d) { return link_colors[d.source.name + " " + d.target.name + " " + d.date] })', sep = "")
return(js)
}
.generate_events_array <- function(data) {
dates <- data$date %>% as.Date(format = "%m/%d/%y") %>% unique() %>% sort() %>% as.character()
events_array <- vector("character")
for (d in dates) {
tmp <- data %>% filter(as.Date(date, format = "%m/%d/%y") == d)
tmp$value <- format(tmp$value, big.mark = ",")
events <- apply(tmp, 1, function(row) {
if ("reverse" %in% names(data)) {
if (row[5] == 1) {
return(sprintf("+ %s sends %s $%s", row[2], row[1], row[3]))
} else {
return(sprintf("+ %s sends %s $%s", row[1], row[2], row[3]))
}
} else {
return(sprintf("+ %s sends %s $%s", row[1], row[2], row[3]))
}
}) %>% unlist()
events <- paste(events, collapse = "<br>")
events_array_elem <- sprintf('"<h3 style = \'padding: 0px; margin: 0px;\'>%s</h3><p style = \'margin: 3px 0px 10px 15px; font-size: small;\'>%s</p>"',
as.character(d),
events)
events_array <- c(events_array, events_array_elem)
}
events_array <- paste(events_array, collapse = ", ") %>% {sprintf("[%s]", .)}
return(events_array)
}
.add_reverse_flags <- function(data) {
reverse <- sapply(2:nrow(data), function(i) {
j <- 1:(i - 1)
row <- c(data$source[i], data$target[i])
if (data$target[i] %in% data$source[j]) {
k <- which(data$source[j] == data$target[i])
if (any(data$source[i] %in% data$target[k])) {
row_duplicated <- data[j, ] %>% filter(source == row[1] & target == row[2]) %>% nrow()
if (row_duplicated) {
dupe <- which(data$source[j] %in% row[1] & data$target[j] %in% row[2]) %>% min()
rev <- which(data$target[j] %in% row[1] & data$source[j] %in% row[2]) %>% min()
if (dupe < rev) {
reverse <- 0
} else {
reverse <- 1
}
} else {
reverse <- 1
}
} else {
reverse <- 0
}
} else {
reverse <- 0
}
return(reverse)
}) %>% unlist() %>% unname()
data$reverse <- c(0, reverse)
return(data)
}
.reverse_paths <- function(data) {
if (!"reverse" %in% names(data)) data <- .add_reverse_flags(data)
data <- lapply(1:nrow(data), function(i) {
if (data$reverse[i] == 1) {
data <- data[i, c(2, 1, 3:5)]
names(data) <- c("source", "target", "value", "date", "reverse")
return(data)
} else {
return(data[i, ])
}
}) %>% plyr::rbind.fill()
return(data)
}
generate_html <- function(data, targets, graph_title, page_title = "Sankey Diagram", after_script = TRUE, gif = TRUE, dir = ".", allow_circular_paths = TRUE, destfile = "index.html") {
if (!all(c("source", "target", "value", "date") %in% names(data))) stop("Your data doesn't look right. You should have a source, target, value, and date column.")
if (!require(dplyr)) stop("I know it's a faux pas, but dplyr is far too amazing to not use. As a result, the package does need to be installed for this code to work.")
if (allow_circular_paths) data <- .reverse_paths(data)
events <- .generate_events_array(data)
main <- .generate_main_js(data)
after <- .generate_after_js(data, targets)
gif <- .generate_gif_js(data, targets)
html <- paste('<!DOCTYPE HTML>',
'\n<html>',
'\n\t<head>',
'\n\t\t<meta charset = "utf-8">',
'\n\t\t<script type = "text/javascript" src="http://d3js.org/d3.v3.min.js" charset="utf-8"></script>',
'\n\t\t<script type = "text/javascript" src = "http://code.jquery.com/jquery-latest.min.js"></script>',
'\n\t\t<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/font-awesome/4.4.0/css/font-awesome.min.css">',
'\n\t\t<style>',
'\n\t\t\t.node rect {',
'\n\t\t\t\tcursor: move;',
'\n\t\t\t\tfill-opacity: .9;',
'\n\t\t\t\tshape-rendering: crispEdges;',
'\n\t\t\t}',
'\n',
'\n\t\t\t.node text {',
'\n\t\t\t\tpointer-events: none;',
'\n\t\t\t\ttext-shadow: 0 1px 0 #fff;',
'\n\t\t\t}',
'\n',
'\n\t\t\t.link {',
'\n\t\t\t\tfill: none;',
'\n\t\t\t\tstroke: #000;',
'\n\t\t\t\tstroke-opacity: .2;',
'\n\t\t\t}',
'\n',
'\n\t\t\t.link:hover {',
'\n\t\t\t\tstroke-opacity: .5;',
'\n\t\t\t}',
'\n',
'\n\t\t\tsvg {',
'\n\t\t\t\tfont: 10px sans-serif;',
'\n\t\t\t}',
'\n\t\t</style>',
'\n\t\t<style>',
'\n\t\t\t.rChart {',
'\n\t\t\t\tdisplay: block;',
'\n\t\t\t\tmargin-left: auto;',
'\n\t\t\t\tmargin-right: auto;',
'\n\t\t\t\twidth: 80%;',
'\n\t\t\t\theight: 100%;',
'\n\t\t\t}',
'\n',
'\n\t\t\t#sankey {',
'\n\t\t\t\twidth: 80%;',
'\n\t\t\t\tfloat: left;',
'\n\t\t\t}',
'\n',
'\n\t\t\t#events {',
'\n\t\t\t\twidth: 19%;',
'\n\t\t\t\tfloat: left;',
'\n\t\t\t\tfont-size: small;',
'\n\t\t\t\toverflow: auto;',
'\n\t\t\t}',
'\n',
'\n\t\t\t#events h3 {',
'\n\t\t\t\tfont-size: 12px;',
'\n\t\t\t\tfont-weight: bold;',
'\n\t\t\t}',
'\n',
'\n\t\t\t#events p {',
'\n\t\t\t\tfont-size: 10px;',
'\n\t\t\t}',
'\n',
'\n\t\t\t#controls {',
'\n\t\t\t\twidth: 80%;',
'\n\t\t\t\theight: auto;',
'\n\t\t\t\tdisplay: bl',
'\n\t\t\t\tmargin-top: 10px;',
'\n\t\t\t\tmargin-left: auto;',
'\n\t\t\t\tmargin-right: auto;',
'\n\t\t\t\tfloat: left;',
'\n\t\t\t}',
'\n',
'\n\t\t\tsvg {',
'\n\t\t\t\theight: 100%;',
'\n\t\t\t}',
'\n',
'\n\t\t\t.divider {',
'\n\t\t\t\twidth: 5px;',
'\n\t\t\t\theight: auto;',
'\n\t\t\t\tdisplay: inline-block;',
'\n\t\t\t}',
'\n',
'\n\t\t\tfooter {',
'\n\t\t\t\twidth: 100%;',
'\n\t\t\t\tfloat: left;',
'\n\t\t\t\tfont-size: small;',
'\n\t\t\t\ttext-align: left;',
'\n\t\t\t\tmargin-top: 10px;',
'\n\t\t\t}',
'\n',
'\n\t\t</style>',
'\n\t\t<script>',
'\n\t\t\td3.sankey = function() {',
'\n\t\t\t\tvar sankey = {},',
'\n\t\t\t\tnodeWidth = 24,',
'\n\t\t\t\tnodePadding = 8,',
'\n\t\t\t\tsize = [1, 1],',
'\n\t\t\t\tnodes = [],',
'\n\t\t\t\tlinks = [];',
'\n',
'\n\t\t\t\tsankey.nodeWidth = function(_) {',
'\n\t\t\t\t\tif (!arguments.length) return nodeWidth;',
'\n\t\t\t\t\tnodeWidth = +_;',
'\n\t\t\t\t\treturn sankey;',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tsankey.nodePadding = function(_) {',
'\n\t\t\t\t\tif (!arguments.length) return nodePadding;',
'\n\t\t\t\t\tnodePadding = +_;',
'\n\t\t\t\t\treturn sankey;',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tsankey.nodes = function(_) {',
'\n\t\t\t\t\tif (!arguments.length) return nodes;',
'\n\t\t\t\t\tnodes = _;',
'\n\t\t\t\t\treturn sankey;',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tsankey.links = function(_) {',
'\n\t\t\t\t\tif (!arguments.length) return links;',
'\n\t\t\t\t\tlinks = _;',
'\n\t\t\t\t\treturn sankey;',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tsankey.size = function(_) {',
'\n\t\t\t\t\tif (!arguments.length) return size;',
'\n\t\t\t\t\tsize = _;',
'\n\t\t\t\t\treturn sankey;',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tsankey.layout = function(iterations) {',
'\n\t\t\t\t\tcomputeNodeLinks();',
'\n\t\t\t\t\tcomputeNodeValues();',
'\n\t\t\t\t\tcomputeNodeBreadths();',
'\n\t\t\t\t\tcomputeNodeDepths(iterations);',
'\n\t\t\t\t\tcomputeLinkDepths();',
'\n\t\t\t\t\treturn sankey;',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tsankey.relayout = function() {',
'\n\t\t\t\t\tcomputeLinkDepths();',
'\n\t\t\t\t\treturn sankey;',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tsankey.link = function() {',
'\n\t\t\t\t\tvar curvature = .5;',
'\n\t\t\t\t\tfunction link(d) {',
'\n\t\t\t\t\t\tvar x0 = d.source.x + d.source.dx,',
'\n\t\t\t\t\t\tx1 = d.target.x,',
'\n\t\t\t\t\t\txi = d3.interpolateNumber(x0, x1),',
'\n\t\t\t\t\t\tx2 = xi(curvature),',
'\n\t\t\t\t\t\tx3 = xi(1 - curvature),',
'\n\t\t\t\t\t\ty0 = d.source.y + d.sy + d.dy / 2,',
'\n\t\t\t\t\t\ty1 = d.target.y + d.ty + d.dy / 2;',
'\n\t\t\t\t\t\treturn "M" + x0 + "," + y0 + "C" + x2 + "," + y0 + " " + x3 + "," + y1 + " " + x1 + "," + y1;',
'\n\t\t\t\t\t}',
'\n',
'\n\t\t\t\t\tlink.curvature = function(_) {',
'\n\t\t\t\t\t\tif (!arguments.length) return curvature;',
'\n\t\t\t\t\t\tcurvature = +_;',
'\n\t\t\t\t\t\treturn link;',
'\n\t\t\t\t\t};',
'\n',
'\n\t\t\t\t\treturn link;',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\t// Populate the sourceLinks and targetLinks for each node.',
'\n\t\t\t\t// Also, if the source and target are not objects, assume they are indices.',
'\n\t\t\t\tfunction computeNodeLinks() {',
'\n\t\t\t\t\tnodes.forEach(function(node) {',
'\n\t\t\t\t\t\tnode.sourceLinks = [];',
'\n\t\t\t\t\t\tnode.targetLinks = [];',
'\n\t\t\t\t\t});',
'\n\t\t\t\t\tlinks.forEach(function(link) {',
'\n\t\t\t\t\t\tvar source = link.source,',
'\n\t\t\t\t\t\ttarget = link.target;',
'\n\t\t\t\t\t\tif (typeof source === "number") source = link.source = nodes[link.source];',
'\n\t\t\t\t\t\tif (typeof target === "number") target = link.target = nodes[link.target];',
'\n\t\t\t\t\t\tsource.sourceLinks.push(link);',
'\n\t\t\t\t\t\ttarget.targetLinks.push(link);',
'\n\t\t\t\t\t});',
'\n\t\t\t\t}',
'\n\t\t\t\t// Compute the value (size) of each node by summing the associated links.',
'\n\t\t\t\tfunction computeNodeValues() {',
'\n\t\t\t\t\tnodes.forEach(function(node) {',
'\n\t\t\t\t\t\tnode.value = Math.max(d3.sum(node.sourceLinks, value), d3.sum(node.targetLinks, value));',
'\n\t\t\t\t\t});',
'\n\t\t\t\t}',
'\n\t\t\t\t// Iteratively assign the breadth (x-position) for each node.',
'\n\t\t\t\t// Nodes are assigned the maximum breadth of incoming neighbors plus one;',
'\n\t\t\t\t// nodes with no incoming links are assigned breadth zero, while',
'\n\t\t\t\t// nodes with no outgoing links are assigned the maximum breadth.',
'\n\t\t\t\tfunction computeNodeBreadths() {',
'\n\t\t\t\t\tvar remainingNodes = nodes,',
'\n\t\t\t\t\tnextNodes,',
'\n\t\t\t\t\tx = 0;',
'\n\t\t\t\t\twhile (remainingNodes.length) {',
'\n\t\t\t\t\t\tnextNodes = [];',
'\n\t\t\t\t\t\tremainingNodes.forEach(function(node) {',
'\n\t\t\t\t\t\t\tnode.x = x;',
'\n\t\t\t\t\t\t\tnode.dx = nodeWidth;',
'\n\t\t\t\t\t\t\tnode.sourceLinks.forEach(function(link) {',
'\n\t\t\t\t\t\t\t\tnextNodes.push(link.target);',
'\n\t\t\t\t\t\t\t});',
'\n\t\t\t\t\t\t});',
'\n\t\t\t\t\t\tremainingNodes = nextNodes;',
'\n\t\t\t\t\t\t++x;',
'\n\t\t\t\t\t}',
'\n\t\t\t\t\t//',
'\n\t\t\t\t\tmoveSinksRight(x);',
'\n\t\t\t\t\tscaleNodeBreadths((size[0] - nodeWidth) / (x - 1));',
'\n\t\t\t\t}',
'\n\t\t\t\tfunction moveSourcesRight() {',
'\n\t\t\t\t\tnodes.forEach(function(node) {',
'\n\t\t\t\t\t\tif (!node.targetLinks.length) {',
'\n\t\t\t\t\t\t\tnode.x = d3.min(node.sourceLinks, function(d) { return d.target.x; }) - 1;',
'\n\t\t\t\t\t\t}',
'\n\t\t\t\t\t});',
'\n\t\t\t\t}',
'\n\t\t\t\tfunction moveSinksRight(x) {',
'\n\t\t\t\t\tnodes.forEach(function(node) {',
'\n\t\t\t\t\t\tif (!node.sourceLinks.length) {',
'\n\t\t\t\t\t\t\tnode.x = x - 1;',
'\n\t\t\t\t\t\t}',
'\n\t\t\t\t\t});',
'\n\t\t\t\t}',
'\n\t\t\t\tfunction scaleNodeBreadths(kx) {',
'\n\t\t\t\t\tnodes.forEach(function(node) {',
'\n\t\t\t\t\t\tnode.x *= kx;',
'\n\t\t\t\t\t});',
'\n\t\t\t\t}',
'\n\t\t\t\tfunction computeNodeDepths(iterations) {',
'\n\t\t\t\t\tvar nodesByBreadth = d3.nest()',
'\n\t\t\t\t\t.key(function(d) { return d.x; })',
'\n\t\t\t\t\t.sortKeys(d3.ascending)',
'\n\t\t\t\t\t.entries(nodes)',
'\n\t\t\t\t\t.map(function(d) { return d.values; });',
'\n\t\t\t\t\t//',
'\n\t\t\t\t\tinitializeNodeDepth();',
'\n\t\t\t\t\tresolveCollisions();',
'\n\t\t\t\t\tfor (var alpha = 1; iterations > 0; --iterations) {',
'\n\t\t\t\t\t\trelaxRightToLeft(alpha *= .99);',
'\n\t\t\t\t\t\tresolveCollisions();',
'\n\t\t\t\t\t\trelaxLeftToRight(alpha);',
'\n\t\t\t\t\t\tresolveCollisions();',
'\n\t\t\t\t\t}',
'\n\t\t\t\t\tfunction initializeNodeDepth() {',
'\n\t\t\t\t\t\tvar ky = d3.min(nodesByBreadth, function(nodes) {',
'\n\t\t\t\t\t\t\treturn (size[1] - (nodes.length - 1) * nodePadding) / d3.sum(nodes, value);',
'\n\t\t\t\t\t\t});',
'\n\t\t\t\t\t\tnodesByBreadth.forEach(function(nodes) {',
'\n\t\t\t\t\t\t\tnodes.forEach(function(node, i) {',
'\n\t\t\t\t\t\t\t\tnode.y = i;',
'\n\t\t\t\t\t\t\t\tnode.dy = node.value * ky;',
'\n\t\t\t\t\t\t\t});',
'\n\t\t\t\t\t\t});',
'\n\t\t\t\t\t\tlinks.forEach(function(link) {',
'\n\t\t\t\t\t\t\tlink.dy = link.value * ky;',
'\n\t\t\t\t\t\t});',
'\n\t\t\t\t\t}',
'\n\t\t\t\t\tfunction relaxLeftToRight(alpha) {',
'\n\t\t\t\t\t\tnodesByBreadth.forEach(function(nodes, breadth) {',
'\n\t\t\t\t\t\t\tnodes.forEach(function(node) {',
'\n\t\t\t\t\t\t\t\tif (node.targetLinks.length) {',
'\n\t\t\t\t\t\t\t\t\tvar y = d3.sum(node.targetLinks, weightedSource) / d3.sum(node.targetLinks, value);',
'\n\t\t\t\t\t\t\t\t\tnode.y += (y - center(node)) * alpha;',
'\n\t\t\t\t\t\t\t\t}',
'\n\t\t\t\t\t\t\t});',
'\n\t\t\t\t\t\t});',
'\n\t\t\t\t\t\tfunction weightedSource(link) {',
'\n\t\t\t\t\t\t\treturn center(link.source) * link.value;',
'\n\t\t\t\t\t\t}',
'\n\t\t\t\t\t}',
'\n\t\t\t\t\tfunction relaxRightToLeft(alpha) {',
'\n\t\t\t\t\t\tnodesByBreadth.slice().reverse().forEach(function(nodes) {',
'\n\t\t\t\t\t\t\tnodes.forEach(function(node) {',
'\n\t\t\t\t\t\t\t\tif (node.sourceLinks.length) {',
'\n\t\t\t\t\t\t\t\t\tvar y = d3.sum(node.sourceLinks, weightedTarget) / d3.sum(node.sourceLinks, value);',
'\n\t\t\t\t\t\t\t\t\tnode.y += (y - center(node)) * alpha;',
'\n\t\t\t\t\t\t\t\t}',
'\n\t\t\t\t\t\t\t});',
'\n\t\t\t\t\t\t});',
'\n\t\t\t\t\t\tfunction weightedTarget(link) {',
'\n\t\t\t\t\t\t\treturn center(link.target) * link.value;',
'\n\t\t\t\t\t\t}',
'\n\t\t\t\t\t}',
'\n\t\t\t\t\tfunction resolveCollisions() {',
'\n\t\t\t\t\t\tnodesByBreadth.forEach(function(nodes) {',
'\n\t\t\t\t\t\t\tvar node,',
'\n\t\t\t\t\t\t\tdy,',
'\n\t\t\t\t\t\t\ty0 = 0,',
'\n\t\t\t\t\t\t\tn = nodes.length,',
'\n\t\t\t\t\t\t\ti;',
'\n\t\t\t\t\t\t\t// Push any overlapping nodes down.',
'\n\t\t\t\t\t\t\tnodes.sort(ascendingDepth);',
'\n\t\t\t\t\t\t\tfor (i = 0; i < n; ++i) {',
'\n\t\t\t\t\t\t\t\tnode = nodes[i];',
'\n\t\t\t\t\t\t\t\tdy = y0 - node.y;',
'\n\t\t\t\t\t\t\t\tif (dy > 0) node.y += dy;',
'\n\t\t\t\t\t\t\t\t\ty0 = node.y + node.dy + nodePadding;',
'\n\t\t\t\t\t\t\t}',
'\n\t\t\t\t\t\t\t// If the bottommost node goes outside the bounds, push it back up.',
'\n\t\t\t\t\t\t\tdy = y0 - nodePadding - size[1];',
'\n\t\t\t\t\t\t\tif (dy > 0) {',
'\n\t\t\t\t\t\t\t\ty0 = node.y -= dy;',
'\n\t\t\t\t\t\t\t\t// Push any overlapping nodes back up.',
'\n\t\t\t\t\t\t\t\tfor (i = n - 2; i >= 0; --i) {',
'\n\t\t\t\t\t\t\t\t\tnode = nodes[i];',
'\n\t\t\t\t\t\t\t\t\tdy = node.y + node.dy + nodePadding - y0;',
'\n\t\t\t\t\t\t\t\t\tif (dy > 0) node.y -= dy;',
'\n\t\t\t\t\t\t\t\t\ty0 = node.y;',
'\n\t\t\t\t\t\t\t\t}',
'\n\t\t\t\t\t\t\t}',
'\n\t\t\t\t\t\t});',
'\n\t\t\t\t\t}',
'\n\t\t\t\t\tfunction ascendingDepth(a, b) {',
'\n\t\t\t\t\t\treturn a.y - b.y;',
'\n\t\t\t\t\t}',
'\n\t\t\t\t}',
'\n\t\t\t\tfunction computeLinkDepths() {',
'\n\t\t\t\t\tnodes.forEach(function(node) {',
'\n\t\t\t\t\t\tnode.sourceLinks.sort(ascendingTargetDepth);',
'\n\t\t\t\t\t\tnode.targetLinks.sort(ascendingSourceDepth);',
'\n\t\t\t\t\t});',
'\n\t\t\t\t\tnodes.forEach(function(node) {',
'\n\t\t\t\t\t\tvar sy = 0, ty = 0;',
'\n\t\t\t\t\t\tnode.sourceLinks.forEach(function(link) {',
'\n\t\t\t\t\t\t\tlink.sy = sy;',
'\n\t\t\t\t\t\t\tsy += link.dy;',
'\n\t\t\t\t\t\t});',
'\n\t\t\t\t\t\tnode.targetLinks.forEach(function(link) {',
'\n\t\t\t\t\t\t\tlink.ty = ty;',
'\n\t\t\t\t\t\t\tty += link.dy;',
'\n\t\t\t\t\t\t});',
'\n\t\t\t\t\t});',
'\n\t\t\t\t\tfunction ascendingSourceDepth(a, b) {',
'\n\t\t\t\t\t\treturn a.source.y - b.source.y;',
'\n\t\t\t\t\t}',
'\n',
'\n\t\t\t\t\tfunction ascendingTargetDepth(a, b) {',
'\n\t\t\t\t\t\treturn a.target.y - b.target.y;',
'\n\t\t\t\t\t}',
'\n\t\t\t\t}',
'\n\t\t\t\tfunction center(node) {',
'\n\t\t\t\t\treturn node.y + node.dy / 2;',
'\n\t\t\t\t}',
'\n\t\t\t\tfunction value(link) {',
'\n\t\t\t\t\treturn link.value;',
'\n\t\t\t\t}',
'\n\t\t\t\treturn sankey;',
'\n\t\t\t};',
'\n\t\t</script>',
'\n',
'\n\t\t<title>', page_title, '</title>',
'\n\t</head>',
'\n',
'\n\t<body>',
'\n\t\t<div style = "text-align: center;"><h1>', graph_title, '</h1></div>',
'\n\t\t<div>',
'\n\t\t\t<div id = "sankey" class = "rChart rCharts_d3_sankey" align = "center"></div>',
'\n\t\t\t<div id = "events">',
'\n\t\t\t\t<script>',
'\n\t\t\t\t\t$(function() {',
'\n\t\t\t\t\t\tvar events = ', events, ';',
'\n\t\t\t\t\t\tfor (i = 0; i < events.length; i++) {',
'\n\t\t\t\t\t\t\t$("#events").append(events[i]);',
'\n\t\t\t\t\t\t}',
'\n\t\t\t\t\t\t$("#sankey").css("height", ($(window).height() * .80) + "px");',
'\n\t\t\t\t\t\t$("#events").css("height", ($(window).height() * .80) + "px");',
'\n\t\t\t\t\t});',
'\n\t\t\t\t</script>',
'\n\t\t\t</div>',
'\n\t\t</div>',
'\n\t\t<div id = "controls" align = "center">',
'\n\t\t\t<button type = "button" class = "btn" id = "start">',
'\n\t\t\t\t<i class = "fa fa-play"></i>',
'\n\t\t\t</button>',
'\n\t\t\t<button type = "button" class = "btn" id = "stop">',
'\n\t\t\t\t<i class = "fa fa-stop"></i>',
'\n\t\t\t</button>',
'\n\t\t\t<button type = "button" class = "btn" id = "step-back">',
'\n\t\t\t\t<i class = "fa fa-step-backward"></i>',
'\n\t\t\t</button>',
'\n\t\t\t<button type = "button" class = "btn" id = "step-forward">',
'\n\t\t\t\t<i class = "fa fa-step-forward"></i>',
'\n\t\t\t</button>',
'\n\t\t\t<div class = "divider"></div>',
'\n\t\t\t<button type = "button" class = "btn" id = "speed-down">',
'\n\t\t\t\t<i class = "fa fa-minus"></i>',
'\n\t\t\t</button>',
'\n\t\t\t<button type = "button" class = "btn" id = "speed-up">',
'\n\t\t\t\t<i class = "fa fa-plus"></i>',
'\n\t\t\t</button>',
'\n\t\t\t<div class = "divider"></div>',
'\n\t\t\t<button type = "button" class = "btn" id = "reset">',
'\n\t\t\t\t<i class = "fa fa-rotate-left"></i>',
'\n\t\t\t</button>',
'\n\t\t</div>',
'\n\t\t<script>',
main,
'\n\t\t</script>',
'\n\t\t<script>',
after,
'\n\t\t</script>',
'\n\t\t<script>',
gif,
'\n\t\t</script>',
'\n\t\t<footer>',
'\n\t\t\t<p>Very special thanks goes out to <a href = "https://github.com/ramnathv">Ramnath Vaidyanathan</a> and <a href = "https://github.com/timelyportfolio">@timelyportfolio</a> for their amazing work on getting d3 graphics to work with R.</p>',
'\n\t\t</footer>',
'\n\t</body>',
'\n</html>', sep = "")
if (is.na(destfile)) {
return(html)
} else {
writeLines(text = html, con = destfile)
}
}
|
/sankey.R
|
no_license
|
dondealban/sankey-2
|
R
| false | false | 45,468 |
r
|
.generate_main_js <- function(data, chart_id = "sankey", node_width = 15, node_padding = 10, layout = 32,
units = "", node_tooltip = NULL, link_tooltip_fw = NULL, link_tooltip_bw = NULL) {
if (is.null(node_tooltip)) node_tooltip <- 'd.name + "\\nTotal Out: " + out_n + " transactions for $" + format(out_total) + "\\nTotal In: " + in_n + " transactions for $" + format(in_total);'
if (is.null(link_tooltip_fw)) {
link_tooltip_fw <- 'd.source.name + " sent $" + d.value + " to " + d.target.name + " on " + d.date'
if ("reverse" %in% tolower(names(data))) {
link_tooltip_bw <- 'd.target.name + " sent $" + d.value + " to " + d.source.name + " on " + d.date'
} else {
link_tooltip_bw <- link_tooltip_fw
}
}
src <- sprintf('"%s"', data$source) %>% paste(collapse = ", ")
target <- sprintf('"%s"', data$target) %>% paste(collapse = ", ")
value <- sprintf('"%s"', data$value) %>% paste(collapse = ", ")
date <- sprintf('"%s"', data$date) %>% paste(collapse = ", ")
reverse <- sprintf('"%s"', data$reverse) %>% paste(collapse = ", ")
data_json <- paste('{"source": [', src, '], "target": [', target, '], "value": [', value, '], "date": [', date, '], "reverse": [', reverse, ']}', sep = "")
js <- paste('\n\t\t\t(function(){',
'\n\t\t\t\tvar width = $(window).width() * .78;',
'\n\t\t\t\tvar height = $(window).height() * .75;',
'\n\t\t\t\tvar params = {',
'\n\t\t\t\t\t"dom": "sankey",',
'\n\t\t\t\t\t"width": width,',
'\n\t\t\t\t\t"height": height,',
'\n\t\t\t\t\t"data": ', data_json, ',',
'\n\t\t\t\t\t"nodeWidth": ', node_width, ',',
'\n\t\t\t\t\t"nodePadding": ', node_padding, ',',
'\n\t\t\t\t\t"layout": ', layout, ',',
'\n\t\t\t\t\t"units": "', units, '",',
'\n\t\t\t\t\t"id": "sankey"',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tparams.units ? units = " " + params.units : units = "";',
'\n',
'\n\t\t\t\t//hard code these now but eventually make available',
'\n\t\t\t\tvar formatNumber = d3.format("0,.0f"), // zero decimal places',
'\n\t\t\t\tformat = function(d) { return formatNumber(d) + units; },',
'\n\t\t\t\tcolor = d3.scale.category20();',
'\n',
'\n\t\t\t\tif(params.labelFormat){',
'\n\t\t\t\t\tformatNumber = d3.format(".2%");',
'\n\t\t\t\t}',
'\n',
'\n\t\t\t\tvar svg = d3.select("#" + params.id).append("svg")',
'\n\t\t\t\t\t.attr("width", params.width)',
'\n\t\t\t\t\t.attr("height", params.height);',
'\n',
'\n\t\t\t\tvar sankey = d3.sankey()',
'\n\t\t\t\t\t.nodeWidth(params.nodeWidth)',
'\n\t\t\t\t\t.nodePadding(params.nodePadding)',
'\n\t\t\t\t\t.layout(params.layout)',
'\n\t\t\t\t\t.size([params.width,params.height]);',
'\n',
'\n\t\t\t\tvar path = sankey.link();',
'\n',
'\n\t\t\t\tvar data = params.data,',
'\n\t\t\t\t\tlinks = [],',
'\n\t\t\t\t\tnodes = [];',
'\n',
'\n\t\t\t\t//get all source and target into nodes',
'\n\t\t\t\t//will reduce to unique in the next step',
'\n\t\t\t\t//also get links in object form',
'\n\t\t\t\tdata.source.forEach(function (d, i) {',
'\n\t\t\t\t\tnodes.push({ "name": data.source[i] });',
'\n\t\t\t\t\tnodes.push({ "name": data.target[i] });',
'\n\t\t\t\t\tlinks.push({ "source": data.source[i], "target": data.target[i], "value": +data.value[i], "date": data.date[i], "reverse": data.reverse[i] });',
'\n\t\t\t\t});',
'\n',
'\n\t\t\t\t//now get nodes based on links data',
'\n\t\t\t\t//thanks Mike Bostock https://groups.google.com/d/msg/d3-js/pl297cFtIQk/Eso4q_eBu1IJ',
'\n\t\t\t\t//this handy little function returns only the distinct / unique nodes',
'\n\t\t\t\tnodes = d3.keys(d3.nest()',
'\n\t\t\t\t\t.key(function (d) { return d.name; })',
'\n\t\t\t\t\t.map(nodes));',
'\n',
'\n\t\t\t\t//it appears d3 with force layout wants a numeric source and target',
'\n\t\t\t\t//so loop through each link replacing the text with its index from node',
'\n\t\t\t\tlinks.forEach(function (d, i) {',
'\n\t\t\t\t\tlinks[i].source = nodes.indexOf(links[i].source);',
'\n\t\t\t\t\tlinks[i].target = nodes.indexOf(links[i].target);',
'\n\t\t\t\t});',
'\n',
'\n\t\t\t\t//now loop through each nodes to make nodes an array of objects rather than an array of strings',
'\n\t\t\t\tnodes.forEach(function (d, i) {',
'\n\t\t\t\t\tnodes[i] = { "name": d };',
'\n\t\t\t\t});',
'\n',
'\n\t\t\t\tsankey',
'\n\t\t\t\t\t.nodes(nodes)',
'\n\t\t\t\t\t.links(links)',
'\n\t\t\t\t\t.layout(params.layout);',
'\n',
'\n\t\t\t\tvar link = svg.append("g").selectAll(".link")',
'\n\t\t\t\t\t.data(links)',
'\n\t\t\t\t\t.enter().append("path")',
'\n\t\t\t\t\t.attr("class", "link")',
'\n\t\t\t\t\t.attr("d", path)',
'\n\t\t\t\t\t.style("stroke-width", function (d) { return Math.max(2, d.dy); })',
'\n\t\t\t\t\t.sort(function (a, b) { return b.dy - a.dy; });',
'\n',
'\n\t\t\t\tlink.append("title")',
'\n\t\t\t\t\t.text(function (d) { return(d.reverse === "1" ? ', link_tooltip_bw, ' : ', link_tooltip_fw, ') });',
'\n',
'\n\t\t\t\tvar node = svg.append("g").selectAll(".node")',
'\n\t\t\t\t\t.data(nodes)',
'\n\t\t\t\t\t.enter().append("g")',
'\n\t\t\t\t\t.attr("class", "node")',
'\n\t\t\t\t\t.attr("transform", function (d) { return "translate(" + d.x + "," + d.y + ")"; })',
'\n\t\t\t\t\t.call(d3.behavior.drag()',
'\n\t\t\t\t\t\t.origin(function (d) { return d; })',
'\n\t\t\t\t\t\t.on("dragstart", function () { this.parentNode.appendChild(this); })',
'\n\t\t\t\t\t\t.on("drag", dragmove));',
'\n',
'\n\t\t\t\tnode.append("rect")',
'\n\t\t\t\t\t.attr("height", function (d) { return d.dy > 1 ? d.dy : 1; })',
'\n\t\t\t\t\t.attr("width", sankey.nodeWidth())',
'\n\t\t\t\t\t.style("fill", function (d) { return d.color = color(d.name.replace(/ .*/, "")); })',
'\n\t\t\t\t\t.style("stroke", function (d) { return d3.rgb(d.color).darker(2); })',
'\n\t\t\t\t\t.append("title")',
'\n\t\t\t\t\t.text(function (d) {',
'\n\t\t\t\t\t\tvar out_total = 0,',
'\n\t\t\t\t\t\t\tin_total = 0,',
'\n\t\t\t\t\t\t\tout_n = d.sourceLinks.filter(function(r) { return r.reverse === "0"; }).length + d.targetLinks.filter(function(r) { return r.reverse === "1"; }).length,',
'\n\t\t\t\t\t\t\tin_n = d.targetLinks.filter(function(r) { return r.reverse === "0"; }).length + d.sourceLinks.filter(function(r) { return r.reverse === "1"; }).length;',
'\n\t\t\t\t\t\td.sourceLinks.forEach(function(s) {',
'\n\t\t\t\t\t\t\tout_total += (s.reverse === "0") ? s.value : 0;',
'\n\t\t\t\t\t\t\tin_total += (s.reverse === "1") ? s.value : 0;',
'\n\t\t\t\t\t\t});',
'\n\t\t\t\t\t\td.targetLinks.forEach(function(t) {',
'\n\t\t\t\t\t\t\tin_total += (t.reverse === "0") ? t.value : 0;',
'\n\t\t\t\t\t\t\tout_total += (t.reverse === "1") ? t.value : 0;',
'\n\t\t\t\t\t\t});',
'\n\t\t\t\t\t\treturn ', node_tooltip, ' });',
'\n',
'\n\t\t\t\tnode.append("text")',
'\n\t\t\t\t.attr("x", -6)',
'\n\t\t\t\t.attr("y", function (d) { return d.dy / 2; })',
'\n\t\t\t\t.attr("dy", ".35em")',
'\n\t\t\t\t.attr("text-anchor", "end")',
'\n\t\t\t\t.attr("transform", null)',
'\n\t\t\t\t.text(function (d) { return d.name; })',
'\n\t\t\t\t.filter(function (d) { return d.x < params.width / 2; })',
'\n\t\t\t\t.attr("x", 6 + sankey.nodeWidth())',
'\n\t\t\t\t.attr("text-anchor", "start");',
'\n',
'\n\t\t\t\t// the function for moving the nodes',
'\n\t\t\t\tfunction dragmove(d) {',
'\n\t\t\t\t\td3.select(this).attr("transform","translate(" + (d.x = Math.max(0, Math.min(params.width - d.dx, d3.event.x))) + "," + (d.y = Math.max(0, Math.min(params.height - d.dy, d3.event.y))) + ")");',
'\n\t\t\t\t\tsankey.relayout();',
'\n\t\t\t\t\tlink.attr("d", path);',
'\n\t\t\t\t}',
'\n\t\t\t})();', sep = "")
return(js)
}
.generate_gif_js <- function(data, targets, delay = 2) {
data$path <- apply(data, 1, function(row) {
if (row[1] %in% targets) {
return(sprintf("%s %s %s", row[1], row[2], row[4]))
} else {
return(sprintf("%s %s %s", row[2], row[1], row[4]))
}
}) %>% unlist() %>% unname()
dates <- data$date %>% as.Date(format = "%m/%d/%y") %>% unique() %>% sort() %>% as.character()
events_array <- vector("character")
entities <- c(data$source, data$target) %>% unique()
for (i in 1:length(dates)) {
tmp <- data %>% filter(as.Date(date, format = "%m/%d/%y") == dates[i])
highlight <- c(tmp$source, tmp$target) %>% unique()
node_colors <- sapply(entities, function(entity) {
if (entity %in% highlight) {
if (entity %in% targets) {
return(sprintf('"%s": "#FF6A6A"', entity))
} else {
return(sprintf('"%s": "#90EE90"', entity))
}
} else {
return(sprintf('"%s": "#F7F7F7"', entity))
}
}) %>% unlist() %>% unname()
node_colors <- paste(node_colors, collapse = ", ")
text_colors <- sapply(entities, function(entity) {
if (entity %in% highlight) {
return(sprintf('"%s": "#000000"', entity))
} else {
return(sprintf('"%s": "#F7F7F7"', entity))
}
}) %>% unlist() %>% unname()
text_colors <- paste(text_colors, collapse = ", ")
link_colors <- apply(data, 1, function(row) {
if ("reverse" %in% names(data)) {
if (as.Date(row[4], format = "%m/%d/%y") == dates[i]) {
if (row[5] == 1) {
return(sprintf('"%s": "#A020F0"', row[6]))
} else {
return(sprintf('"%s": "#66CCFF"', row[6]))
}
} else {
return(sprintf('"%s": "#F7F7F7"', row[6]))
}
} else {
if (as.Date(row[4], format = "%m/%d/%y") == dates[i]) {
return(sprintf('"%s": "#66CCFF"', row[5]))
} else {
return(sprintf('"%s": "#F7F7F7"', row[5]))
}
}
}) %>% unlist() %>% unname()
link_colors <- paste(link_colors, collapse = ", ")
event <- sprintf('%d: {"node_colors": {%s}, "text_colors": {%s}, "link_colors": {%s}}', (i - 1), node_colors, text_colors, link_colors)
events_array <- c(events_array, event)
}
n <- length(events_array)
events <- paste(events_array, collapse = ", ") %>% {sprintf("{%s}", .)}
targets <- sprintf('"%s"', targets) %>% paste(collapse = ", ")
gif <- paste('\n\t\t\t$(function() {',
'\n\t\t\t\tvar selector = $("#sankey");',
'\n\t\t\t\tvar delay_sec = ', delay, ';',
'\n\t\t\t\tvar num = -1,',
'\n\t\t\t\tlen = ', n, ';',
'\n\t\t\t\tvar step_counter = 0;',
'\n\t\t\t\tvar threshold = 0;',
'\n\t\t\t\tvar height = 0;',
'\n\t\t\t\tvar timer = null;',
'\n\t\t\t\tvar events = ', events, ';',
'\n\t\t\t\tvar targets = [', targets, '];',
'\n',
'\n\t\t\t\tfunction reset() {',
'\n\t\t\t\t\tstop();',
'\n\t\t\t\t\tnum = 0;',
'\n\t\t\t\t\tthreshold = 0;',
'\n\t\t\t\t\theight = 0;',
'\n\t\t\t\t\tdelay_sec = 2;',
'\n\t\t\t\t\td3.selectAll("#sankey svg .node rect")',
'\n\t\t\t\t\t\t.style("fill", function(d) { return (targets.indexOf(d.name) > -1) ? "#FF6A6A" : "#90EE90" })',
'\n\t\t\t\t\t\t.style("stroke", function(d) { return (targets.indexOf(d.name) >= 0) ? "#FF6A6A" : "#90EE90" })',
'\n\t\t\t\t\t$("#sankey svg .node text").css("fill", "#000000");',
'\n\t\t\t\t\td3.selectAll("#sankey svg path.link")',
'\n\t\t\t\t\t\t.style("stroke", function(d) { return(d.reverse === "1" ? "#A020F0" : "#66CCFF") })',
'\n\t\t\t\t\t$("#events").animate({scrollTop: 0}, "fast");',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tfunction blank_graph() {',
'\n\t\t\t\t\t$("#sankey svg .node rect").css("fill", "#F7F7F7").css("stroke", "#F7F7F7");',
'\n\t\t\t\t\t$("#sankey svg .node text").css("fill", "#F7F7F7");',
'\n\t\t\t\t\t$("#sankey svg path.link").css("stroke", "#F7F7F7");',
'\n\t\t\t\t\t$("#events").animate({scrollTop: 0}, "fast");',
'\n\t\t\t\t\t$("#events").children().css("color", "#000000");',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tfunction event(num, scroll = true) {',
'\n\t\t\t\t\tif (num === -1) {',
'\n\t\t\t\t\t\tthreshold = 0;',
'\n\t\t\t\t\t\theight = 0;',
'\n\t\t\t\t\t\tblank_graph();',
'\n\t\t\t\t\t} else {',
'\n\t\t\t\t\t\t$("#events").children().css("color", "#000000");',
'\n\t\t\t\t\t\td3.selectAll("#sankey svg .node rect")',
'\n\t\t\t\t\t\t\t.style("fill", function(d) { return events[num]["node_colors"][d.name] })',
'\n\t\t\t\t\t\t\t.style("stroke", function(d) { return events[num]["node_colors"][d.name] })',
'\n\t\t\t\t\t\td3.selectAll("#sankey svg .node text")',
'\n\t\t\t\t\t\t\t.style("fill", function(d) { return events[num]["text_colors"][d.name] })',
'\n\t\t\t\t\t\td3.selectAll("#sankey svg path.link")',
'\n\t\t\t\t\t\t\t.style("stroke", function (d) {',
'\n\t\t\t\t\t\t\t\tif (targets.indexOf(d.source.name) > -1) {',
'\n\t\t\t\t\t\t\t\t\tvar path = d.source.name + " " + d.target.name + " " + d.date;',
'\n\t\t\t\t\t\t\t\t\treturn events[num]["link_colors"][path];',
'\n\t\t\t\t\t\t\t\t} else {',
'\n\t\t\t\t\t\t\t\t\tvar path = d.target.name + " " + d.source.name + " " + d.date;',
'\n\t\t\t\t\t\t\t\t\treturn events[num]["link_colors"][path];',
'\n\t\t\t\t\t\t\t\t}',
'\n\t\t\t\t\t\t\t});',
'\n\t\t\t\t\t\t\t$("#events h3:nth-of-type(" + (num + 1) + ")").css("color", "#FF6A6A");',
'\n\t\t\t\t\t\t\t$("#events p:nth-of-type(" + (num + 1) + ")").css("color", "#FF6A6A");',
'\n\t\t\t\t\t\t\tif (scroll) {',
'\n\t\t\t\t\t\t\t\tthreshold += ($("#events p:nth-of-type(" + (num + 1) + ")").outerHeight(true) + $("#events h3:nth-of-type(" + (num + 1) + ")").outerHeight(true));',
'\n\t\t\t\t\t\t\t\t\tif (threshold >= ($("#events").height() - 5)) {',
'\n\t\t\t\t\t\t\t\t\t\theight += $("#events p:nth-of-type(" + (num + 1) + ")").outerHeight(true) + $("#events h3:nth-of-type(" + (num + 1) + ")").outerHeight(true);',
'\n\t\t\t\t\t\t\t\t\t\t$("#events").animate({scrollTop: height}, "slow");',
'\n\t\t\t\t\t\t\t\t\t}',
'\n\t\t\t\t\t\t\t} else {',
'\n\t\t\t\t\t\t\t\tif (step_counter === 1) {',
'\n\t\t\t\t\t\t\t\t\tthreshold = ($("#events").height() - 5) - ($("#events p:nth-of-type(" + (num + 1) + ")").outerHeight(true) + $("#events h3:nth-of-type(" + (num + 1) + ")").outerHeight(true));',
'\n\t\t\t\t\t\t\t\t} else {',
'\n\t\t\t\t\t\t\t\t\tthreshold -= ($("#events p:nth-of-type(" + (num + 1) + ")").outerHeight(true) + $("#events h3:nth-of-type(" + (num + 1) + ")").outerHeight(true));',
'\n\t\t\t\t\t\t\t\t}',
'\n\t\t\t\t\t\t\t}',
'\n\t\t\t\t\t}',
'\n\t\t\t\t}',
'\n',
'\n\t\t\t\tfunction loop() {',
'\n\t\t\t\t\tnum = (num === len) ? -1 : num;',
'\n\t\t\t\t\tevent(num);',
'\n\t\t\t\t\tnum++;',
'\n\t\t\t\t\tstart();',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tfunction start() {',
'\n\t\t\t\t\ttimer = setTimeout(loop, (delay_sec * 1000));',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tfunction stop() {',
'\n\t\t\t\t\tclearTimeout(timer);',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tfunction forward() {',
'\n\t\t\t\t\tstop();',
'\n\t\t\t\t\tnum += 1;',
'\n\t\t\t\t\tstep_counter -= 1;',
'\n\t\t\t\t\tstep_counter = (step_counter < 0) ? 0 : step_counter;',
'\n\t\t\t\t\tevent(num);',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tfunction backward() {',
'\n\t\t\t\t\tstop();',
'\n\t\t\t\t\tnum -= 1;',
'\n\t\t\t\t\tstep_counter += 1;',
'\n\t\t\t\t\tevent(num, false);',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tfunction speed_up() {',
'\n\t\t\t\t\tstop();',
'\n\t\t\t\t\tdelay_sec -= .2;',
'\n\t\t\t\t\tdelay_sec = (delay_sec <= 0) ? 1 : delay_sec;',
'\n\t\t\t\t\tstart();',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tfunction speed_down() {',
'\n\t\t\t\t\tstop();',
'\n\t\t\t\t\tdelay_sec += .2;',
'\n\t\t\t\t\tdelay_sec = (delay_sec > 10) ? 10 : delay_sec;',
'\n\t\t\t\t\tstart();',
'\n\t\t\t\t}',
'\n',
'\n\t\t\t\t$("#start").on("click", start);',
'\n\t\t\t\t$("#stop").on("click", stop);',
'\n\t\t\t\t$("#step-forward").on("click", forward);',
'\n\t\t\t\t$("#step-back").on("click", backward);',
'\n\t\t\t\t$("#speed-up").on("click", speed_up);',
'\n\t\t\t\t$("#speed-down").on("click", speed_down);',
'\n\t\t\t\t$("#reset").on("click", reset);',
'\n\t\t\t});', sep = "")
return(gif)
}
.generate_after_js <- function(data, targets, target_color = "#FF6A6A", non_target_color = "#90EE90") {
entities <- unique(c(data$source, data$target))
if ("reverse" %in% names(data)) {
link_colors <- ifelse(data$reverse == 1, "#A020F0", "#66CCFF")
} else {
link_colors <- "#66CCFF"
}
link_colors <- sapply(1:nrow(data), function(i) paste(data$source[i], data$target[i], data$date[i]) %>% {sprintf('"%s": "%s"', ., link_colors[i])}) %>% unlist() %>% unname()
link_colors <- paste(link_colors, collapse = ", ")
json = vector()
for (entity in entities) {
if (entity %in% targets) {
json <- c(json, sprintf('"%s": "%s"', entity, target_color))
} else {
json <- c(json, sprintf('"%s": "%s"', entity, non_target_color))
}
}
node_colors <- paste(json, collapse = ", ")
js <- paste('\n\t\t\tvar node_colors = JSON.parse(\'{', node_colors, '}\');',
'\n\t\t\tvar link_colors = JSON.parse(\'{', link_colors, '}\');',
'\n\t\t\td3.selectAll("#sankey svg .node rect")',
'\n\t\t\t\t.style("fill", function(d) { return node_colors[d.name] })',
'\n\t\t\t\t.style("stroke", function(d) { d3.rgb(node_colors[d.name]).darker(2); })',
'\n',
'\n\t\t\td3.selectAll("#sankey svg .node rect title")',
'\n\t\t\t\t.style("color", "#FF6A6A")',
'\n',
'\n\t\t\td3.selectAll("#sankey svg path.link")',
'\n\t\t\t\t.style("stroke", function(d) { return link_colors[d.source.name + " " + d.target.name + " " + d.date] })', sep = "")
return(js)
}
.generate_events_array <- function(data) {
dates <- data$date %>% as.Date(format = "%m/%d/%y") %>% unique() %>% sort() %>% as.character()
events_array <- vector("character")
for (d in dates) {
tmp <- data %>% filter(as.Date(date, format = "%m/%d/%y") == d)
tmp$value <- format(tmp$value, big.mark = ",")
events <- apply(tmp, 1, function(row) {
if ("reverse" %in% names(data)) {
if (row[5] == 1) {
return(sprintf("+ %s sends %s $%s", row[2], row[1], row[3]))
} else {
return(sprintf("+ %s sends %s $%s", row[1], row[2], row[3]))
}
} else {
return(sprintf("+ %s sends %s $%s", row[1], row[2], row[3]))
}
}) %>% unlist()
events <- paste(events, collapse = "<br>")
events_array_elem <- sprintf('"<h3 style = \'padding: 0px; margin: 0px;\'>%s</h3><p style = \'margin: 3px 0px 10px 15px; font-size: small;\'>%s</p>"',
as.character(d),
events)
events_array <- c(events_array, events_array_elem)
}
events_array <- paste(events_array, collapse = ", ") %>% {sprintf("[%s]", .)}
return(events_array)
}
.add_reverse_flags <- function(data) {
reverse <- sapply(2:nrow(data), function(i) {
j <- 1:(i - 1)
row <- c(data$source[i], data$target[i])
if (data$target[i] %in% data$source[j]) {
k <- which(data$source[j] == data$target[i])
if (any(data$source[i] %in% data$target[k])) {
row_duplicated <- data[j, ] %>% filter(source == row[1] & target == row[2]) %>% nrow()
if (row_duplicated) {
dupe <- which(data$source[j] %in% row[1] & data$target[j] %in% row[2]) %>% min()
rev <- which(data$target[j] %in% row[1] & data$source[j] %in% row[2]) %>% min()
if (dupe < rev) {
reverse <- 0
} else {
reverse <- 1
}
} else {
reverse <- 1
}
} else {
reverse <- 0
}
} else {
reverse <- 0
}
return(reverse)
}) %>% unlist() %>% unname()
data$reverse <- c(0, reverse)
return(data)
}
.reverse_paths <- function(data) {
if (!"reverse" %in% names(data)) data <- .add_reverse_flags(data)
data <- lapply(1:nrow(data), function(i) {
if (data$reverse[i] == 1) {
data <- data[i, c(2, 1, 3:5)]
names(data) <- c("source", "target", "value", "date", "reverse")
return(data)
} else {
return(data[i, ])
}
}) %>% plyr::rbind.fill()
return(data)
}
generate_html <- function(data, targets, graph_title, page_title = "Sankey Diagram", after_script = TRUE, gif = TRUE, dir = ".", allow_circular_paths = TRUE, destfile = "index.html") {
if (!all(c("source", "target", "value", "date") %in% names(data))) stop("Your data doesn't look right. You should have a source, target, value, and date column.")
if (!require(dplyr)) stop("I know it's a faux pas, but dplyr is far too amazing to not use. As a result, the package does need to be installed for this code to work.")
if (allow_circular_paths) data <- .reverse_paths(data)
events <- .generate_events_array(data)
main <- .generate_main_js(data)
after <- .generate_after_js(data, targets)
gif <- .generate_gif_js(data, targets)
html <- paste('<!DOCTYPE HTML>',
'\n<html>',
'\n\t<head>',
'\n\t\t<meta charset = "utf-8">',
'\n\t\t<script type = "text/javascript" src="http://d3js.org/d3.v3.min.js" charset="utf-8"></script>',
'\n\t\t<script type = "text/javascript" src = "http://code.jquery.com/jquery-latest.min.js"></script>',
'\n\t\t<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/font-awesome/4.4.0/css/font-awesome.min.css">',
'\n\t\t<style>',
'\n\t\t\t.node rect {',
'\n\t\t\t\tcursor: move;',
'\n\t\t\t\tfill-opacity: .9;',
'\n\t\t\t\tshape-rendering: crispEdges;',
'\n\t\t\t}',
'\n',
'\n\t\t\t.node text {',
'\n\t\t\t\tpointer-events: none;',
'\n\t\t\t\ttext-shadow: 0 1px 0 #fff;',
'\n\t\t\t}',
'\n',
'\n\t\t\t.link {',
'\n\t\t\t\tfill: none;',
'\n\t\t\t\tstroke: #000;',
'\n\t\t\t\tstroke-opacity: .2;',
'\n\t\t\t}',
'\n',
'\n\t\t\t.link:hover {',
'\n\t\t\t\tstroke-opacity: .5;',
'\n\t\t\t}',
'\n',
'\n\t\t\tsvg {',
'\n\t\t\t\tfont: 10px sans-serif;',
'\n\t\t\t}',
'\n\t\t</style>',
'\n\t\t<style>',
'\n\t\t\t.rChart {',
'\n\t\t\t\tdisplay: block;',
'\n\t\t\t\tmargin-left: auto;',
'\n\t\t\t\tmargin-right: auto;',
'\n\t\t\t\twidth: 80%;',
'\n\t\t\t\theight: 100%;',
'\n\t\t\t}',
'\n',
'\n\t\t\t#sankey {',
'\n\t\t\t\twidth: 80%;',
'\n\t\t\t\tfloat: left;',
'\n\t\t\t}',
'\n',
'\n\t\t\t#events {',
'\n\t\t\t\twidth: 19%;',
'\n\t\t\t\tfloat: left;',
'\n\t\t\t\tfont-size: small;',
'\n\t\t\t\toverflow: auto;',
'\n\t\t\t}',
'\n',
'\n\t\t\t#events h3 {',
'\n\t\t\t\tfont-size: 12px;',
'\n\t\t\t\tfont-weight: bold;',
'\n\t\t\t}',
'\n',
'\n\t\t\t#events p {',
'\n\t\t\t\tfont-size: 10px;',
'\n\t\t\t}',
'\n',
'\n\t\t\t#controls {',
'\n\t\t\t\twidth: 80%;',
'\n\t\t\t\theight: auto;',
'\n\t\t\t\tdisplay: bl',
'\n\t\t\t\tmargin-top: 10px;',
'\n\t\t\t\tmargin-left: auto;',
'\n\t\t\t\tmargin-right: auto;',
'\n\t\t\t\tfloat: left;',
'\n\t\t\t}',
'\n',
'\n\t\t\tsvg {',
'\n\t\t\t\theight: 100%;',
'\n\t\t\t}',
'\n',
'\n\t\t\t.divider {',
'\n\t\t\t\twidth: 5px;',
'\n\t\t\t\theight: auto;',
'\n\t\t\t\tdisplay: inline-block;',
'\n\t\t\t}',
'\n',
'\n\t\t\tfooter {',
'\n\t\t\t\twidth: 100%;',
'\n\t\t\t\tfloat: left;',
'\n\t\t\t\tfont-size: small;',
'\n\t\t\t\ttext-align: left;',
'\n\t\t\t\tmargin-top: 10px;',
'\n\t\t\t}',
'\n',
'\n\t\t</style>',
'\n\t\t<script>',
'\n\t\t\td3.sankey = function() {',
'\n\t\t\t\tvar sankey = {},',
'\n\t\t\t\tnodeWidth = 24,',
'\n\t\t\t\tnodePadding = 8,',
'\n\t\t\t\tsize = [1, 1],',
'\n\t\t\t\tnodes = [],',
'\n\t\t\t\tlinks = [];',
'\n',
'\n\t\t\t\tsankey.nodeWidth = function(_) {',
'\n\t\t\t\t\tif (!arguments.length) return nodeWidth;',
'\n\t\t\t\t\tnodeWidth = +_;',
'\n\t\t\t\t\treturn sankey;',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tsankey.nodePadding = function(_) {',
'\n\t\t\t\t\tif (!arguments.length) return nodePadding;',
'\n\t\t\t\t\tnodePadding = +_;',
'\n\t\t\t\t\treturn sankey;',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tsankey.nodes = function(_) {',
'\n\t\t\t\t\tif (!arguments.length) return nodes;',
'\n\t\t\t\t\tnodes = _;',
'\n\t\t\t\t\treturn sankey;',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tsankey.links = function(_) {',
'\n\t\t\t\t\tif (!arguments.length) return links;',
'\n\t\t\t\t\tlinks = _;',
'\n\t\t\t\t\treturn sankey;',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tsankey.size = function(_) {',
'\n\t\t\t\t\tif (!arguments.length) return size;',
'\n\t\t\t\t\tsize = _;',
'\n\t\t\t\t\treturn sankey;',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tsankey.layout = function(iterations) {',
'\n\t\t\t\t\tcomputeNodeLinks();',
'\n\t\t\t\t\tcomputeNodeValues();',
'\n\t\t\t\t\tcomputeNodeBreadths();',
'\n\t\t\t\t\tcomputeNodeDepths(iterations);',
'\n\t\t\t\t\tcomputeLinkDepths();',
'\n\t\t\t\t\treturn sankey;',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tsankey.relayout = function() {',
'\n\t\t\t\t\tcomputeLinkDepths();',
'\n\t\t\t\t\treturn sankey;',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\tsankey.link = function() {',
'\n\t\t\t\t\tvar curvature = .5;',
'\n\t\t\t\t\tfunction link(d) {',
'\n\t\t\t\t\t\tvar x0 = d.source.x + d.source.dx,',
'\n\t\t\t\t\t\tx1 = d.target.x,',
'\n\t\t\t\t\t\txi = d3.interpolateNumber(x0, x1),',
'\n\t\t\t\t\t\tx2 = xi(curvature),',
'\n\t\t\t\t\t\tx3 = xi(1 - curvature),',
'\n\t\t\t\t\t\ty0 = d.source.y + d.sy + d.dy / 2,',
'\n\t\t\t\t\t\ty1 = d.target.y + d.ty + d.dy / 2;',
'\n\t\t\t\t\t\treturn "M" + x0 + "," + y0 + "C" + x2 + "," + y0 + " " + x3 + "," + y1 + " " + x1 + "," + y1;',
'\n\t\t\t\t\t}',
'\n',
'\n\t\t\t\t\tlink.curvature = function(_) {',
'\n\t\t\t\t\t\tif (!arguments.length) return curvature;',
'\n\t\t\t\t\t\tcurvature = +_;',
'\n\t\t\t\t\t\treturn link;',
'\n\t\t\t\t\t};',
'\n',
'\n\t\t\t\t\treturn link;',
'\n\t\t\t\t};',
'\n',
'\n\t\t\t\t// Populate the sourceLinks and targetLinks for each node.',
'\n\t\t\t\t// Also, if the source and target are not objects, assume they are indices.',
'\n\t\t\t\tfunction computeNodeLinks() {',
'\n\t\t\t\t\tnodes.forEach(function(node) {',
'\n\t\t\t\t\t\tnode.sourceLinks = [];',
'\n\t\t\t\t\t\tnode.targetLinks = [];',
'\n\t\t\t\t\t});',
'\n\t\t\t\t\tlinks.forEach(function(link) {',
'\n\t\t\t\t\t\tvar source = link.source,',
'\n\t\t\t\t\t\ttarget = link.target;',
'\n\t\t\t\t\t\tif (typeof source === "number") source = link.source = nodes[link.source];',
'\n\t\t\t\t\t\tif (typeof target === "number") target = link.target = nodes[link.target];',
'\n\t\t\t\t\t\tsource.sourceLinks.push(link);',
'\n\t\t\t\t\t\ttarget.targetLinks.push(link);',
'\n\t\t\t\t\t});',
'\n\t\t\t\t}',
'\n\t\t\t\t// Compute the value (size) of each node by summing the associated links.',
'\n\t\t\t\tfunction computeNodeValues() {',
'\n\t\t\t\t\tnodes.forEach(function(node) {',
'\n\t\t\t\t\t\tnode.value = Math.max(d3.sum(node.sourceLinks, value), d3.sum(node.targetLinks, value));',
'\n\t\t\t\t\t});',
'\n\t\t\t\t}',
'\n\t\t\t\t// Iteratively assign the breadth (x-position) for each node.',
'\n\t\t\t\t// Nodes are assigned the maximum breadth of incoming neighbors plus one;',
'\n\t\t\t\t// nodes with no incoming links are assigned breadth zero, while',
'\n\t\t\t\t// nodes with no outgoing links are assigned the maximum breadth.',
'\n\t\t\t\tfunction computeNodeBreadths() {',
'\n\t\t\t\t\tvar remainingNodes = nodes,',
'\n\t\t\t\t\tnextNodes,',
'\n\t\t\t\t\tx = 0;',
'\n\t\t\t\t\twhile (remainingNodes.length) {',
'\n\t\t\t\t\t\tnextNodes = [];',
'\n\t\t\t\t\t\tremainingNodes.forEach(function(node) {',
'\n\t\t\t\t\t\t\tnode.x = x;',
'\n\t\t\t\t\t\t\tnode.dx = nodeWidth;',
'\n\t\t\t\t\t\t\tnode.sourceLinks.forEach(function(link) {',
'\n\t\t\t\t\t\t\t\tnextNodes.push(link.target);',
'\n\t\t\t\t\t\t\t});',
'\n\t\t\t\t\t\t});',
'\n\t\t\t\t\t\tremainingNodes = nextNodes;',
'\n\t\t\t\t\t\t++x;',
'\n\t\t\t\t\t}',
'\n\t\t\t\t\t//',
'\n\t\t\t\t\tmoveSinksRight(x);',
'\n\t\t\t\t\tscaleNodeBreadths((size[0] - nodeWidth) / (x - 1));',
'\n\t\t\t\t}',
'\n\t\t\t\tfunction moveSourcesRight() {',
'\n\t\t\t\t\tnodes.forEach(function(node) {',
'\n\t\t\t\t\t\tif (!node.targetLinks.length) {',
'\n\t\t\t\t\t\t\tnode.x = d3.min(node.sourceLinks, function(d) { return d.target.x; }) - 1;',
'\n\t\t\t\t\t\t}',
'\n\t\t\t\t\t});',
'\n\t\t\t\t}',
'\n\t\t\t\tfunction moveSinksRight(x) {',
'\n\t\t\t\t\tnodes.forEach(function(node) {',
'\n\t\t\t\t\t\tif (!node.sourceLinks.length) {',
'\n\t\t\t\t\t\t\tnode.x = x - 1;',
'\n\t\t\t\t\t\t}',
'\n\t\t\t\t\t});',
'\n\t\t\t\t}',
'\n\t\t\t\tfunction scaleNodeBreadths(kx) {',
'\n\t\t\t\t\tnodes.forEach(function(node) {',
'\n\t\t\t\t\t\tnode.x *= kx;',
'\n\t\t\t\t\t});',
'\n\t\t\t\t}',
'\n\t\t\t\tfunction computeNodeDepths(iterations) {',
'\n\t\t\t\t\tvar nodesByBreadth = d3.nest()',
'\n\t\t\t\t\t.key(function(d) { return d.x; })',
'\n\t\t\t\t\t.sortKeys(d3.ascending)',
'\n\t\t\t\t\t.entries(nodes)',
'\n\t\t\t\t\t.map(function(d) { return d.values; });',
'\n\t\t\t\t\t//',
'\n\t\t\t\t\tinitializeNodeDepth();',
'\n\t\t\t\t\tresolveCollisions();',
'\n\t\t\t\t\tfor (var alpha = 1; iterations > 0; --iterations) {',
'\n\t\t\t\t\t\trelaxRightToLeft(alpha *= .99);',
'\n\t\t\t\t\t\tresolveCollisions();',
'\n\t\t\t\t\t\trelaxLeftToRight(alpha);',
'\n\t\t\t\t\t\tresolveCollisions();',
'\n\t\t\t\t\t}',
'\n\t\t\t\t\tfunction initializeNodeDepth() {',
'\n\t\t\t\t\t\tvar ky = d3.min(nodesByBreadth, function(nodes) {',
'\n\t\t\t\t\t\t\treturn (size[1] - (nodes.length - 1) * nodePadding) / d3.sum(nodes, value);',
'\n\t\t\t\t\t\t});',
'\n\t\t\t\t\t\tnodesByBreadth.forEach(function(nodes) {',
'\n\t\t\t\t\t\t\tnodes.forEach(function(node, i) {',
'\n\t\t\t\t\t\t\t\tnode.y = i;',
'\n\t\t\t\t\t\t\t\tnode.dy = node.value * ky;',
'\n\t\t\t\t\t\t\t});',
'\n\t\t\t\t\t\t});',
'\n\t\t\t\t\t\tlinks.forEach(function(link) {',
'\n\t\t\t\t\t\t\tlink.dy = link.value * ky;',
'\n\t\t\t\t\t\t});',
'\n\t\t\t\t\t}',
'\n\t\t\t\t\tfunction relaxLeftToRight(alpha) {',
'\n\t\t\t\t\t\tnodesByBreadth.forEach(function(nodes, breadth) {',
'\n\t\t\t\t\t\t\tnodes.forEach(function(node) {',
'\n\t\t\t\t\t\t\t\tif (node.targetLinks.length) {',
'\n\t\t\t\t\t\t\t\t\tvar y = d3.sum(node.targetLinks, weightedSource) / d3.sum(node.targetLinks, value);',
'\n\t\t\t\t\t\t\t\t\tnode.y += (y - center(node)) * alpha;',
'\n\t\t\t\t\t\t\t\t}',
'\n\t\t\t\t\t\t\t});',
'\n\t\t\t\t\t\t});',
'\n\t\t\t\t\t\tfunction weightedSource(link) {',
'\n\t\t\t\t\t\t\treturn center(link.source) * link.value;',
'\n\t\t\t\t\t\t}',
'\n\t\t\t\t\t}',
'\n\t\t\t\t\tfunction relaxRightToLeft(alpha) {',
'\n\t\t\t\t\t\tnodesByBreadth.slice().reverse().forEach(function(nodes) {',
'\n\t\t\t\t\t\t\tnodes.forEach(function(node) {',
'\n\t\t\t\t\t\t\t\tif (node.sourceLinks.length) {',
'\n\t\t\t\t\t\t\t\t\tvar y = d3.sum(node.sourceLinks, weightedTarget) / d3.sum(node.sourceLinks, value);',
'\n\t\t\t\t\t\t\t\t\tnode.y += (y - center(node)) * alpha;',
'\n\t\t\t\t\t\t\t\t}',
'\n\t\t\t\t\t\t\t});',
'\n\t\t\t\t\t\t});',
'\n\t\t\t\t\t\tfunction weightedTarget(link) {',
'\n\t\t\t\t\t\t\treturn center(link.target) * link.value;',
'\n\t\t\t\t\t\t}',
'\n\t\t\t\t\t}',
'\n\t\t\t\t\tfunction resolveCollisions() {',
'\n\t\t\t\t\t\tnodesByBreadth.forEach(function(nodes) {',
'\n\t\t\t\t\t\t\tvar node,',
'\n\t\t\t\t\t\t\tdy,',
'\n\t\t\t\t\t\t\ty0 = 0,',
'\n\t\t\t\t\t\t\tn = nodes.length,',
'\n\t\t\t\t\t\t\ti;',
'\n\t\t\t\t\t\t\t// Push any overlapping nodes down.',
'\n\t\t\t\t\t\t\tnodes.sort(ascendingDepth);',
'\n\t\t\t\t\t\t\tfor (i = 0; i < n; ++i) {',
'\n\t\t\t\t\t\t\t\tnode = nodes[i];',
'\n\t\t\t\t\t\t\t\tdy = y0 - node.y;',
'\n\t\t\t\t\t\t\t\tif (dy > 0) node.y += dy;',
'\n\t\t\t\t\t\t\t\t\ty0 = node.y + node.dy + nodePadding;',
'\n\t\t\t\t\t\t\t}',
'\n\t\t\t\t\t\t\t// If the bottommost node goes outside the bounds, push it back up.',
'\n\t\t\t\t\t\t\tdy = y0 - nodePadding - size[1];',
'\n\t\t\t\t\t\t\tif (dy > 0) {',
'\n\t\t\t\t\t\t\t\ty0 = node.y -= dy;',
'\n\t\t\t\t\t\t\t\t// Push any overlapping nodes back up.',
'\n\t\t\t\t\t\t\t\tfor (i = n - 2; i >= 0; --i) {',
'\n\t\t\t\t\t\t\t\t\tnode = nodes[i];',
'\n\t\t\t\t\t\t\t\t\tdy = node.y + node.dy + nodePadding - y0;',
'\n\t\t\t\t\t\t\t\t\tif (dy > 0) node.y -= dy;',
'\n\t\t\t\t\t\t\t\t\ty0 = node.y;',
'\n\t\t\t\t\t\t\t\t}',
'\n\t\t\t\t\t\t\t}',
'\n\t\t\t\t\t\t});',
'\n\t\t\t\t\t}',
'\n\t\t\t\t\tfunction ascendingDepth(a, b) {',
'\n\t\t\t\t\t\treturn a.y - b.y;',
'\n\t\t\t\t\t}',
'\n\t\t\t\t}',
'\n\t\t\t\tfunction computeLinkDepths() {',
'\n\t\t\t\t\tnodes.forEach(function(node) {',
'\n\t\t\t\t\t\tnode.sourceLinks.sort(ascendingTargetDepth);',
'\n\t\t\t\t\t\tnode.targetLinks.sort(ascendingSourceDepth);',
'\n\t\t\t\t\t});',
'\n\t\t\t\t\tnodes.forEach(function(node) {',
'\n\t\t\t\t\t\tvar sy = 0, ty = 0;',
'\n\t\t\t\t\t\tnode.sourceLinks.forEach(function(link) {',
'\n\t\t\t\t\t\t\tlink.sy = sy;',
'\n\t\t\t\t\t\t\tsy += link.dy;',
'\n\t\t\t\t\t\t});',
'\n\t\t\t\t\t\tnode.targetLinks.forEach(function(link) {',
'\n\t\t\t\t\t\t\tlink.ty = ty;',
'\n\t\t\t\t\t\t\tty += link.dy;',
'\n\t\t\t\t\t\t});',
'\n\t\t\t\t\t});',
'\n\t\t\t\t\tfunction ascendingSourceDepth(a, b) {',
'\n\t\t\t\t\t\treturn a.source.y - b.source.y;',
'\n\t\t\t\t\t}',
'\n',
'\n\t\t\t\t\tfunction ascendingTargetDepth(a, b) {',
'\n\t\t\t\t\t\treturn a.target.y - b.target.y;',
'\n\t\t\t\t\t}',
'\n\t\t\t\t}',
'\n\t\t\t\tfunction center(node) {',
'\n\t\t\t\t\treturn node.y + node.dy / 2;',
'\n\t\t\t\t}',
'\n\t\t\t\tfunction value(link) {',
'\n\t\t\t\t\treturn link.value;',
'\n\t\t\t\t}',
'\n\t\t\t\treturn sankey;',
'\n\t\t\t};',
'\n\t\t</script>',
'\n',
'\n\t\t<title>', page_title, '</title>',
'\n\t</head>',
'\n',
'\n\t<body>',
'\n\t\t<div style = "text-align: center;"><h1>', graph_title, '</h1></div>',
'\n\t\t<div>',
'\n\t\t\t<div id = "sankey" class = "rChart rCharts_d3_sankey" align = "center"></div>',
'\n\t\t\t<div id = "events">',
'\n\t\t\t\t<script>',
'\n\t\t\t\t\t$(function() {',
'\n\t\t\t\t\t\tvar events = ', events, ';',
'\n\t\t\t\t\t\tfor (i = 0; i < events.length; i++) {',
'\n\t\t\t\t\t\t\t$("#events").append(events[i]);',
'\n\t\t\t\t\t\t}',
'\n\t\t\t\t\t\t$("#sankey").css("height", ($(window).height() * .80) + "px");',
'\n\t\t\t\t\t\t$("#events").css("height", ($(window).height() * .80) + "px");',
'\n\t\t\t\t\t});',
'\n\t\t\t\t</script>',
'\n\t\t\t</div>',
'\n\t\t</div>',
'\n\t\t<div id = "controls" align = "center">',
'\n\t\t\t<button type = "button" class = "btn" id = "start">',
'\n\t\t\t\t<i class = "fa fa-play"></i>',
'\n\t\t\t</button>',
'\n\t\t\t<button type = "button" class = "btn" id = "stop">',
'\n\t\t\t\t<i class = "fa fa-stop"></i>',
'\n\t\t\t</button>',
'\n\t\t\t<button type = "button" class = "btn" id = "step-back">',
'\n\t\t\t\t<i class = "fa fa-step-backward"></i>',
'\n\t\t\t</button>',
'\n\t\t\t<button type = "button" class = "btn" id = "step-forward">',
'\n\t\t\t\t<i class = "fa fa-step-forward"></i>',
'\n\t\t\t</button>',
'\n\t\t\t<div class = "divider"></div>',
'\n\t\t\t<button type = "button" class = "btn" id = "speed-down">',
'\n\t\t\t\t<i class = "fa fa-minus"></i>',
'\n\t\t\t</button>',
'\n\t\t\t<button type = "button" class = "btn" id = "speed-up">',
'\n\t\t\t\t<i class = "fa fa-plus"></i>',
'\n\t\t\t</button>',
'\n\t\t\t<div class = "divider"></div>',
'\n\t\t\t<button type = "button" class = "btn" id = "reset">',
'\n\t\t\t\t<i class = "fa fa-rotate-left"></i>',
'\n\t\t\t</button>',
'\n\t\t</div>',
'\n\t\t<script>',
main,
'\n\t\t</script>',
'\n\t\t<script>',
after,
'\n\t\t</script>',
'\n\t\t<script>',
gif,
'\n\t\t</script>',
'\n\t\t<footer>',
'\n\t\t\t<p>Very special thanks goes out to <a href = "https://github.com/ramnathv">Ramnath Vaidyanathan</a> and <a href = "https://github.com/timelyportfolio">@timelyportfolio</a> for their amazing work on getting d3 graphics to work with R.</p>',
'\n\t\t</footer>',
'\n\t</body>',
'\n</html>', sep = "")
if (is.na(destfile)) {
return(html)
} else {
writeLines(text = html, con = destfile)
}
}
|
#Terminal창에 복붙해서 github user email/name 입력해서 돌리기
git config --global user.email "you@example.com"
git config --global user.name "Your Name"
#package install
install.packages(c("usethis", "remotes"))
remotes::install_github("rstudio/rmarkdown")
#[Tools]-[Install package]-[Packages]:postcards
library(postcards)
create_postcard()
#'index.Rmd'_Rmarkdown 문서 생성
install.packages("distill")
library(distill)
create_website(dir=".",title="iyo-distill",gh_pages=TRUE)
library(distill)
create_post("AmesHousing", slug = "HW_AmesHousing")
library(distill)
create_post("HousePrice", slug = "HW_HousePrice")
|
/Lec1.R
|
no_license
|
Hannah646/skku
|
R
| false | false | 639 |
r
|
#Terminal창에 복붙해서 github user email/name 입력해서 돌리기
git config --global user.email "you@example.com"
git config --global user.name "Your Name"
#package install
install.packages(c("usethis", "remotes"))
remotes::install_github("rstudio/rmarkdown")
#[Tools]-[Install package]-[Packages]:postcards
library(postcards)
create_postcard()
#'index.Rmd'_Rmarkdown 문서 생성
install.packages("distill")
library(distill)
create_website(dir=".",title="iyo-distill",gh_pages=TRUE)
library(distill)
create_post("AmesHousing", slug = "HW_AmesHousing")
library(distill)
create_post("HousePrice", slug = "HW_HousePrice")
|
library(readr)
data(iris)
#Randomizing the observations.
gp <- runif(150,0,1)
iris <- iris[order(gp),]
#Implementing K-means algorithm
set.seed(9000)
call_kmeans <- function(data,k,iter){
tol <- 0.01
sse <- 100000
#Initializing the first k points as the centroids
centroids <- data[1:k,]
i <- 1
while(sse>tol & i<=iter){
i <- i+1
#Calculating euclidean distance from the centers
distances <- dist_fun(data,centroids)
#Renaming distances matrix column by the centroid numbers
colnames(distances) <- c(1:k)
#Finding which cluster does each observation belong to
min_distance <- colnames(distances)[apply(distances,1,which.min)]
#Clustering every group and finding out new centroids by taking the mean of those values
new_centroids <- aggregate(.~min_distance,data,FUN=mean)
#Calculating the sum of squared errors between the previous centroid value and the current centroid values
sse <- sum(sqrt(rowSums((new_centroids[,-1] - centroids)^2)))
#Updating new centroid values
centroids <- new_centroids[,2:ncol(new_centroids)]
}
#Return the cluster to which each observation belongs to.
min_distance
}
dist_fun <- function(x1,x2){
dist_matrix <- matrix(rep(0),nrow(x1),nrow(x2))
for(i in 1:nrow(x1)){
for( j in 1:nrow(x2)){
dist_matrix[i,j] <- as.matrix(sqrt(sum((x1[i,] - x2[j,])^2)))
}
}
dist_matrix
}
dist_fun_manhattan <- function(x1,x2){
dist_matrix <- matrix(rep(0),nrow(x1),nrow(x2))
for(i in 1:nrow(x1)){
for( j in 1:nrow(x2)){
dist_matrix[i,j] <- as.matrix((sum(abs(x1[i,] - x2[j,]))))
}
}
dist_matrix
}
results <- call_kmeans(iris[,c(1:4)],3,200)
#Evalutaing kmeans
table(results,as.numeric(factor(iris$Species)))
#Since it is an unsupervised technique the label names don't always match.In this case they match.
|
/kmeans.R
|
no_license
|
Vivek-95/K-means-Algorithm
|
R
| false | false | 1,894 |
r
|
library(readr)
data(iris)
#Randomizing the observations.
gp <- runif(150,0,1)
iris <- iris[order(gp),]
#Implementing K-means algorithm
set.seed(9000)
call_kmeans <- function(data,k,iter){
tol <- 0.01
sse <- 100000
#Initializing the first k points as the centroids
centroids <- data[1:k,]
i <- 1
while(sse>tol & i<=iter){
i <- i+1
#Calculating euclidean distance from the centers
distances <- dist_fun(data,centroids)
#Renaming distances matrix column by the centroid numbers
colnames(distances) <- c(1:k)
#Finding which cluster does each observation belong to
min_distance <- colnames(distances)[apply(distances,1,which.min)]
#Clustering every group and finding out new centroids by taking the mean of those values
new_centroids <- aggregate(.~min_distance,data,FUN=mean)
#Calculating the sum of squared errors between the previous centroid value and the current centroid values
sse <- sum(sqrt(rowSums((new_centroids[,-1] - centroids)^2)))
#Updating new centroid values
centroids <- new_centroids[,2:ncol(new_centroids)]
}
#Return the cluster to which each observation belongs to.
min_distance
}
dist_fun <- function(x1,x2){
dist_matrix <- matrix(rep(0),nrow(x1),nrow(x2))
for(i in 1:nrow(x1)){
for( j in 1:nrow(x2)){
dist_matrix[i,j] <- as.matrix(sqrt(sum((x1[i,] - x2[j,])^2)))
}
}
dist_matrix
}
dist_fun_manhattan <- function(x1,x2){
dist_matrix <- matrix(rep(0),nrow(x1),nrow(x2))
for(i in 1:nrow(x1)){
for( j in 1:nrow(x2)){
dist_matrix[i,j] <- as.matrix((sum(abs(x1[i,] - x2[j,]))))
}
}
dist_matrix
}
results <- call_kmeans(iris[,c(1:4)],3,200)
#Evalutaing kmeans
table(results,as.numeric(factor(iris$Species)))
#Since it is an unsupervised technique the label names don't always match.In this case they match.
|
# type-stable indexing of vector and matrix type objects
# @param x an R object typically a vector or matrix
# @param i optional index; if NULL, x is returned unchanged
# @param row indicating if rows or cols should be indexed
# only relevant if x has two or three dimensions
p <- function(x, i = NULL, row = TRUE) {
if (isTRUE(length(dim(x)) > 3L)) {
stop2("'p' can only handle objects up to 3 dimensions.")
}
if (!length(i)) {
out <- x
} else if (length(dim(x)) == 2L) {
if (row) {
out <- x[i, , drop = FALSE]
} else {
out <- x[, i, drop = FALSE]
}
} else if (length(dim(x)) == 3L) {
if (row) {
out <- x[i, , , drop = FALSE]
} else {
out <- x[, i, , drop = FALSE]
}
} else {
out <- x[i]
}
out
}
# extract parts of an object with selective dropping of dimensions
# @param x,...,drop same as in x[..., drop]
# @drop_dim: Optional numeric or logical vector controlling
# which dimensions to drop. Will overwrite argument 'drop'.
extract <- function(x, ..., drop = FALSE, drop_dim = NULL) {
if (!length(dim(x))) {
return(x[...])
}
if (length(drop_dim)) {
drop <- FALSE
} else {
drop <- as_one_logical(drop)
}
out <- x[..., drop = drop]
if (drop || !length(drop_dim) || any(dim(out) == 0L)) {
return(out)
}
if (is.numeric(drop_dim)) {
drop_dim <- seq_along(dim(x)) %in% drop_dim
}
if (!is.logical(drop_dim)) {
stop2("'drop_dim' needs to be logical or numeric.")
}
keep <- dim(out) > 1L | !drop_dim
new_dim <- dim(out)[keep]
if (length(new_dim) == 1L) {
# use vectors instead of 1D arrays
new_dim <- NULL
}
dim(out) <- new_dim
out
}
# savely extract columns without dropping other dimensions
# @param x an array
# @param i colum index
extract_col <- function(x, i) {
ldim <- length(dim(x))
if (ldim < 2L) {
return(x)
}
commas <- collapse(rep(", ", ldim - 2))
expr <- paste0("extract(x, , i", commas, ", drop_dim = 2)")
eval2(expr)
}
seq_rows <- function(x) {
seq_len(NROW(x))
}
seq_cols <- function(x) {
seq_len(NCOL(x))
}
seq_dim <- function(x, dim) {
dim <- as_one_numeric(dim)
if (dim == 1) {
len <- NROW(x)
} else if (dim == 2) {
len <- NCOL(x)
} else {
len <- dim(x)[dim]
}
if (length(len) == 1L && !isNA(len)) {
out <- seq_len(len)
} else {
out <- integer(0)
}
out
}
# match rows in x with rows in y
match_rows <- function(x, y, ...) {
x <- as.data.frame(x)
y <- as.data.frame(y)
x <- do_call("paste", c(x, sep = "\r"))
y <- do_call("paste", c(y, sep = "\r"))
match(x, y, ...)
}
# find elements of 'x' matching sub-elements passed via 'ls' and '...'
find_elements <- function(x, ..., ls = list(), fun = '%in%') {
x <- as.list(x)
if (!length(x)) {
return(logical(0))
}
out <- rep(TRUE, length(x))
ls <- c(ls, list(...))
if (!length(ls)) {
return(out)
}
if (is.null(names(ls))) {
stop("Argument 'ls' must be named.")
}
for (name in names(ls)) {
tmp <- lapply(x, "[[", name)
out <- out & do_call(fun, list(tmp, ls[[name]]))
}
out
}
# find rows of 'x' matching columns passed via 'ls' and '...'
# similar to 'find_elements' but for matrix like objects
find_rows <- function(x, ..., ls = list(), fun = '%in%') {
x <- as.data.frame(x)
if (!nrow(x)) {
return(logical(0))
}
out <- rep(TRUE, nrow(x))
ls <- c(ls, list(...))
if (!length(ls)) {
return(out)
}
if (is.null(names(ls))) {
stop("Argument 'ls' must be named.")
}
for (name in names(ls)) {
out <- out & do_call(fun, list(x[[name]], ls[[name]]))
}
out
}
# subset 'x' using arguments passed via 'ls' and '...'
subset2 <- function(x, ..., ls = list(), fun = '%in%') {
x[find_rows(x, ..., ls = ls, fun = fun), , drop = FALSE]
}
# convert array to list of elements with reduced dimension
# @param x an arrary of dimension d
# @return a list of arrays of dimension d-1
array2list <- function(x) {
if (is.null(dim(x))) {
return(as.list(x))
}
ndim <- length(dim(x))
out <- list(length = dim(x)[ndim])
ind <- collapse(rep(",", ndim - 1))
for (i in seq_len(dim(x)[ndim])) {
out[[i]] <- eval(parse(text = paste0("x[", ind, i, "]")))
if (length(dim(x)) > 2) {
# avoid accidental dropping of other dimensions
dim(out[[i]]) <- dim(x)[-ndim]
}
}
names(out) <- dimnames(x)[[ndim]]
out
}
# move elements to the start of a named object
move2start <- function(x, first) {
x[c(first, setdiff(names(x), first))]
}
# wrapper around replicate but without simplifying
repl <- function(expr, n) {
replicate(n, expr, simplify = FALSE)
}
# find the first element in A that is greater than target
# @param A a matrix
# @param target a vector of length nrow(A)
# @param i column of A being checked first
# @return a vector of the same length as target containing the
# column ids where A[,i] was first greater than target
first_greater <- function(A, target, i = 1) {
ifelse(target <= A[, i] | ncol(A) == i, i, first_greater(A, target, i + 1))
}
# check if an object is NULL
isNULL <- function(x) {
is.null(x) || ifelse(is.vector(x), all(sapply(x, is.null)), FALSE)
}
# recursively removes NULL entries from an object
rmNULL <- function(x, recursive = TRUE) {
x <- Filter(Negate(isNULL), x)
if (recursive) {
x <- lapply(x, function(x) if (is.list(x)) rmNULL(x) else x)
}
x
}
# find the first argument that is not NULL
first_not_null <- function(...) {
dots <- list(...)
out <- NULL
i <- 1L
while (isNULL(out) && i <= length(dots)) {
if (!isNULL(dots[[i]])) {
out <- dots[[i]]
}
i <- i + 1L
}
out
}
isNA <- function(x) {
length(x) == 1L && is.na(x)
}
is_equal <- function(x, y, ...) {
isTRUE(all.equal(x, y, ...))
}
# check if 'x' will behave like a factor in design matrices
is_like_factor <- function(x) {
is.factor(x) || is.character(x) || is.logical(x)
}
# as.factor but allows to pass levels
as_factor <- function(x, levels = NULL) {
if (is.null(levels)) {
out <- as.factor(x)
} else {
out <- factor(x, levels = levels)
}
out
}
# coerce 'x' to a single logical value
as_one_logical <- function(x, allow_na = FALSE) {
s <- substitute(x)
x <- as.logical(x)
if (length(x) != 1L || anyNA(x) && !allow_na) {
s <- deparse_combine(s, max_char = 100L)
stop2("Cannot coerce ", s, " to a single logical value.")
}
x
}
# coerce 'x' to a single numeric value
as_one_numeric <- function(x, allow_na = FALSE) {
s <- substitute(x)
x <- SW(as.numeric(x))
if (length(x) != 1L || anyNA(x) && !allow_na) {
s <- deparse_combine(s, max_char = 100L)
stop2("Cannot coerce ", s, " to a single numeric value.")
}
x
}
# coerce 'x' to a single character string
as_one_character <- function(x, allow_na = FALSE) {
s <- substitute(x)
x <- as.character(x)
if (length(x) != 1L || anyNA(x) && !allow_na) {
s <- deparse_combine(s, max_char = 100L)
stop2("Cannot coerce ", s, " to a single character value.")
}
x
}
has_rows <- function(x) {
isTRUE(nrow(x) > 0L)
}
has_cols <- function(x) {
isTRUE(ncol(x) > 0L)
}
# expand arguments to the same length
# @param ... arguments to expand
# @param length optional expansion length
# otherwise taken to be the largest supplied length
# @return a data.frame with one variable per element in '...'
expand <- function(..., dots = list(), length = NULL) {
dots <- c(dots, list(...))
max_dim <- NULL
if (is.null(length)) {
lengths <- lengths(dots)
length <- max(lengths)
max_dim <- dim(dots[[match(length, lengths)]])
}
out <- as.data.frame(lapply(dots, rep, length.out = length))
structure(out, max_dim = max_dim)
}
# structure but ignore NULL
structure_not_null <- function(.Data, ...) {
if (!is.null(.Data)) {
.Data <- structure(.Data, ...)
}
.Data
}
# remove specified attributes
rm_attr <- function(x, attr) {
attributes(x)[attr] <- NULL
x
}
# unidimensional subsetting while keeping attributes
subset_keep_attr <- function(x, y) {
att <- attributes(x)
x <- x[y]
att$names <- names(x)
attributes(x) <- att
x
}
# check if 'x' is a whole number (integer)
is_wholenumber <- function(x, tol = .Machine$double.eps) {
if (is.numeric(x)) {
out <- abs(x - round(x)) < tol
} else {
out <- rep(FALSE, length(x))
}
dim(out) <- dim(x)
out
}
# helper function to check symmetry of a matrix
is_symmetric <- function(x, tol = sqrt(.Machine$double.eps)) {
isSymmetric(x, tol = tol, check.attributes = FALSE)
}
# unlist lapply output
ulapply <- function(X, FUN, ..., recursive = TRUE, use.names = TRUE) {
unlist(lapply(X, FUN, ...), recursive, use.names)
}
# rbind lapply output
rblapply <- function(X, FUN, ...) {
do_call(rbind, lapply(X, FUN, ...))
}
# cbind lapply output
cblapply <- function(X, FUN, ...) {
do_call(cbind, lapply(X, FUN, ...))
}
# find variables in a character string or expression
all_vars <- function(expr, ...) {
if (is.character(expr)) {
expr <- parse(text = expr)
}
all.vars(expr, ...)
}
# append list(...) to x
lc <- function(x, ...) {
dots <- rmNULL(list(...), recursive = FALSE)
c(x, dots)
}
'c<-' <- function(x, value) {
c(x, value)
}
'lc<-' <- function(x, value) {
lc(x, value)
}
collapse <- function(..., sep = "") {
paste(..., sep = sep, collapse = "")
}
collapse_comma <- function(...) {
paste0("'", ..., "'", collapse = ", ")
}
# add characters to an existing string
'str_add<-' <- function(x, start = FALSE, value) {
if (start) paste0(value, x) else paste0(x, value)
}
# add list of characters to an existing list
'str_add_list<-' <- function(x, start = FALSE, value) {
stopifnot(is.list(x), is.list(value))
out <- if (start) list(value, x) else list(x, value)
collapse_lists(ls = out)
}
# type-stable if clause for strings with default else output
str_if <- function(cond, yes, no = "") {
cond <- as_one_logical(cond)
if (cond) as.character(yes) else as.character(no)
}
# select elements which match a regex pattern
str_subset <- function(x, pattern, ...) {
x[grepl(pattern, x, ...)]
}
# similar to glue::glue but specialized for generating Stan code
glue <- function(..., sep = "", collapse = NULL, envir = parent.frame(),
open = "{", close = "}", na = "NA") {
dots <- list(...)
dots <- dots[lengths(dots) > 0L]
args <- list(
.x = NULL, .sep = sep, .envir = envir, .open = open,
.close = close, .na = na, .trim = FALSE,
.transformer = zero_length_transformer
)
out <- do_call(glue::glue_data, c(dots, args))
if (!is.null(collapse)) {
collapse <- as_one_character(collapse)
out <- paste0(out, collapse = collapse)
}
out
}
# used in 'glue' to handle zero-length inputs
zero_length_transformer <- function(text, envir) {
out <- glue::identity_transformer(text, envir)
if (!length(out)) {
out <- ""
}
out
}
# collapse strings evaluated with glue
cglue <- function(..., envir = parent.frame()) {
glue(..., envir = envir, collapse = "")
}
# like stats:::na.omit.data.frame but allows to ignore variables
# keeps NAs in variables with attribute keep_na = TRUE
na.omit2 <- function(object, ...) {
stopifnot(is.data.frame(object))
omit <- logical(nrow(object))
for (j in seq_along(object)) {
x <- object[[j]]
keep_na <- isTRUE(attr(x, "keep_na", TRUE))
if (!is.atomic(x) || keep_na) {
next
}
x <- is.na(x)
d <- dim(x)
if (is.null(d) || length(d) != 2L) {
omit <- omit | x
} else {
for (ii in seq_len(d[2L])) {
omit <- omit | x[, ii]
}
}
}
if (any(omit > 0L)) {
out <- object[!omit, , drop = FALSE]
temp <- setNames(seq(omit)[omit], attr(object, "row.names")[omit])
attr(temp, "class") <- "omit"
attr(out, "na.action") <- temp
warning2("Rows containing NAs were excluded from the model.")
} else {
out <- object
}
out
}
# check if a certain package is installed
# @param package package name
# @param version optional minimal version number to require
require_package <- function(package, version = NULL) {
if (!requireNamespace(package, quietly = TRUE)) {
stop2("Please install the '", package, "' package.")
}
if (!is.null(version)) {
version <- as.package_version(version)
if (utils::packageVersion(package) < version) {
stop2("Please install package '", package,
"' version ", version, " or higher.")
}
}
invisible(TRUE)
}
# rename specified patterns in a character vector
# @param x a character vector to be renamed
# @param pattern the regular expressions in x to be replaced
# @param replacement the replacements
# @param fixed same as for 'gsub'
# @param check_dup: logical; check for duplications in x after renaming
# @param ... passed to 'gsub'
# @return renamed character vector of the same length as x
rename <- function(x, pattern = NULL, replacement = NULL,
fixed = TRUE, check_dup = FALSE, ...) {
pattern <- as.character(pattern)
replacement <- as.character(replacement)
if (!length(pattern) && !length(replacement)) {
# default renaming to avoid special characters in coeffcient names
pattern <- c(
" ", "(", ")", "[", "]", ",", "\"", "'",
"?", "+", "-", "*", "/", "^", "="
)
replacement <- c(rep("", 9), "P", "M", "MU", "D", "E", "EQ")
}
if (length(replacement) == 1L) {
replacement <- rep(replacement, length(pattern))
}
stopifnot(length(pattern) == length(replacement))
# avoid zero-length pattern error
has_chars <- nzchar(pattern)
pattern <- pattern[has_chars]
replacement <- replacement[has_chars]
out <- x
for (i in seq_along(pattern)) {
out <- gsub(pattern[i], replacement[i], out, fixed = fixed, ...)
}
dup <- duplicated(out)
if (check_dup && any(dup)) {
dup <- x[out %in% out[dup]]
stop2("Internal renaming led to duplicated names. \n",
"Occured for: ", collapse_comma(dup))
}
out
}
# collapse strings having the same name in different lists
# @param ... named lists
# @param ls a list of named lists
# @param a named list containing the collapsed strings
collapse_lists <- function(..., ls = list()) {
ls <- c(list(...), ls)
elements <- unique(unlist(lapply(ls, names)))
args <- c(FUN = collapse, lapply(ls, "[", elements), SIMPLIFY = FALSE)
out <- do_call(mapply, args)
names(out) <- elements
out
}
# create a named list using object names
nlist <- function(...) {
m <- match.call()
dots <- list(...)
no_names <- is.null(names(dots))
has_name <- if (no_names) FALSE else nzchar(names(dots))
if (all(has_name)) return(dots)
nms <- as.character(m)[-1]
if (no_names) {
names(dots) <- nms
} else {
names(dots)[!has_name] <- nms[!has_name]
}
dots
}
# initialize a named list
# @param names names of the elements
# @param values optional values of the elements
named_list <- function(names, values = NULL) {
if (!is.null(values)) {
if (length(values) <= 1L) {
values <- replicate(length(names), values)
}
values <- as.list(values)
stopifnot(length(values) == length(names))
} else {
values <- vector("list", length(names))
}
setNames(values, names)
}
#' Execute a Function Call
#'
#' Execute a function call similar to \code{\link{do.call}}, but without
#' deparsing function arguments.
#'
#' @param what Either a function or a non-empty character string naming the
#' function to be called.
#' @param args A list of arguments to the function call. The names attribute of
#' \code{args} gives the argument names.
#' @param pkg Optional name of the package in which to search for the
#' function if \code{what} is a character string.
#'
#' @return The result of the (evaluated) function call.
#'
#' @keywords internal
#' @export
do_call <- function(what, args, pkg = NULL) {
call <- ""
if (length(args)) {
if (!is.list(args)) {
stop2("'args' must be a list.")
}
fun_args <- names(args)
if (is.null(fun_args)) {
fun_args <- rep("", length(args))
} else {
nzc <- nzchar(fun_args)
fun_args[nzc] <- paste0("`", fun_args[nzc], "` = ")
}
names(args) <- paste0(".x", seq_along(args))
call <- paste0(fun_args, names(args), collapse = ",")
} else {
args <- list()
}
if (is.function(what)) {
args$.fun <- what
what <- ".fun"
} else {
what <- paste0("`", as_one_character(what), "`")
if (!is.null(pkg)) {
what <- paste0(as_one_character(pkg), "::", what)
}
}
call <- paste0(what, "(", call, ")")
eval2(call, envir = args, enclos = parent.frame())
}
empty_data_frame <- function() {
as.data.frame(matrix(nrow = 0, ncol = 0))
}
# replace elements in x with elements in value
# @param x named list-like object
# @param value another named list-like object
# @param dont_replace names of elements that cannot be replaced
'replace_args<-' <- function(x, dont_replace = NULL, value) {
value_name <- deparse_combine(substitute(value), max_char = 100L)
value <- as.list(value)
if (length(value) && is.null(names(value))) {
stop2("Argument '", value_name, "' must be named.")
}
invalid <- names(value)[names(value) %in% dont_replace]
if (length(invalid)) {
invalid <- collapse_comma(invalid)
stop2("Argument(s) ", invalid, " cannot be replaced.")
}
x[names(value)] <- value
x
}
# deparse 'x' if it is no string
deparse_no_string <- function(x) {
if (!is.character(x)) {
x <- deparse(x)
}
x
}
# combine deparse lines into one string
deparse_combine <- function(x, max_char = NULL) {
out <- collapse(deparse(x))
if (isTRUE(max_char > 0)) {
out <- substr(out, 1L, max_char)
}
out
}
# like 'eval' but parses characters before evaluation
eval2 <- function(expr, envir = parent.frame(), ...) {
if (is.character(expr)) {
expr <- parse(text = expr)
}
eval(expr, envir, ...)
}
# evaluate an expression without printing output or messages
# @param expr expression to be evaluated
# @param type type of output to be suppressed (see ?sink)
# @param try wrap evaluation of expr in 'try' and
# not suppress outputs if evaluation fails?
# @param silent actually evaluate silently?
eval_silent <- function(expr, type = "output", try = FALSE,
silent = TRUE, ...) {
try <- as_one_logical(try)
silent <- as_one_logical(silent)
type <- match.arg(type, c("output", "message"))
expr <- substitute(expr)
envir <- parent.frame()
if (silent) {
if (try && type == "message") {
try_out <- try(utils::capture.output(
out <- eval(expr, envir), type = type, ...
))
if (is(try_out, "try-error")) {
# try again without suppressing error messages
out <- eval(expr, envir)
}
} else {
utils::capture.output(out <- eval(expr, envir), type = type, ...)
}
} else {
out <- eval(expr, envir)
}
out
}
# find the name that 'x' had in a specific environment
substitute_name <- function(x, envir = parent.frame(), nchar = 50) {
out <- substitute(x)
out <- eval2(paste0("substitute(", out, ")"), envir = envir)
substr(collapse(deparse(out)), 1, nchar)
}
# recursive sorting of dependencies
# @param x named list of dependencies per element
# @param sorted already sorted element names
# @return a vector of sorted element names
sort_dependencies <- function(x, sorted = NULL) {
if (!length(x)) {
return(NULL)
}
if (length(names(x)) != length(x)) {
stop2("Argument 'x' must be named.")
}
take <- !ulapply(x, function(dep) any(!dep %in% sorted))
new <- setdiff(names(x)[take], sorted)
out <- union(sorted, new)
if (length(new)) {
out <- union(out, sort_dependencies(x, sorted = out))
} else if (!all(names(x) %in% out)) {
stop2("Cannot handle circular dependency structures.")
}
out
}
stop2 <- function(...) {
stop(..., call. = FALSE)
}
warning2 <- function(...) {
warning(..., call. = FALSE)
}
# get first occurrence of 'x' in '...' objects
# @param x The name of the required element
# @param ... named R objects that may contain 'x'
get_arg <- function(x, ...) {
dots <- list(...)
i <- 1
out <- NULL
while (i <= length(dots) && is.null(out)) {
if (!is.null(dots[[i]][[x]])) {
out <- dots[[i]][[x]]
} else {
i <- i + 1
}
}
out
}
SW <- function(expr) {
base::suppressWarnings(expr)
}
# get pattern matches in text as vector
# @param simplify return an atomic vector of matches?
# @param first only return the first match in each string?
# @return character vector containing matches
get_matches <- function(pattern, text, simplify = TRUE,
first = FALSE, ...) {
x <- regmatches(text, gregexpr(pattern, text, ...))
if (first) {
x <- lapply(x, function(t) if (length(t)) t[1] else t)
}
if (simplify) {
if (first) {
x <- lapply(x, function(t) if (length(t)) t else "")
}
x <- unlist(x)
}
x
}
# find matches in the parse tree of an expression
# @param pattern pattern to be matched
# @param expr expression to be searched in
# @return character vector containing matches
get_matches_expr <- function(pattern, expr, ...) {
if (is.character(expr)) {
expr <- parse(text = expr)
}
out <- NULL
for (i in seq_along(expr)) {
sexpr <- try(expr[[i]], silent = TRUE)
if (!is(sexpr, "try-error")) {
sexpr_char <- deparse_combine(sexpr)
out <- c(out, get_matches(pattern, sexpr_char, ...))
}
if (is.call(sexpr) || is.expression(sexpr)) {
out <- c(out, get_matches_expr(pattern, sexpr, ...))
}
}
unique(out)
}
# like 'grepl' but handles (parse trees of) expressions
grepl_expr <- function(pattern, expr, ...) {
as.logical(ulapply(expr, function(e)
length(get_matches_expr(pattern, e, ...)) > 0L))
}
# combine character vectors into a joint regular 'or' expression
# @param x a character vector
# @param escape escape all special characters in 'x'?
regex_or <- function(x, escape = FALSE) {
if (escape) {
x <- escape_all(x)
}
paste0("(", paste0("(", x, ")", collapse = "|"), ")")
}
# escape dots in character strings
escape_dot <- function(x) {
gsub(".", "\\.", x, fixed = TRUE)
}
# escape all special characters in character strings
escape_all <- function(x) {
specials <- c(".", "*", "+", "?", "^", "$", "(", ")", "[", "]", "|")
for (s in specials) {
x <- gsub(s, paste0("\\", s), x, fixed = TRUE)
}
x
}
# add an underscore to non-empty character strings
# @param x a character vector
# @param pos position of the underscore
usc <- function(x, pos = c("prefix", "suffix")) {
pos <- match.arg(pos)
x <- as.character(x)
if (!length(x)) x <- ""
if (pos == "prefix") {
x <- ifelse(nzchar(x), paste0("_", x), "")
} else {
x <- ifelse(nzchar(x), paste0(x, "_"), "")
}
x
}
# round using the largest remainder method
round_largest_remainder <- function(x) {
x <- as.numeric(x)
total <- round(sum(x))
out <- floor(x)
diff <- x - out
J <- order(diff, decreasing = TRUE)
I <- seq_len(total - floor(sum(out)))
out[J[I]] <- out[J[I]] + 1
out
}
# add leading and trailing whitespaces
# @param x object accepted by paste
# @param nsp number of whitespaces to add
wsp <- function(x = "", nsp = 1) {
sp <- collapse(rep(" ", nsp))
if (length(x)) {
out <- ifelse(nzchar(x), paste0(sp, x, sp), sp)
} else {
out <- NULL
}
out
}
# remove whitespaces from strings
rm_wsp <- function(x) {
gsub("[ \t\r\n]+", "", x, perl = TRUE)
}
# limit the number of characters of a vector
# @param x a character vector
# @param chars maximum number of characters to show
# @param lsuffix number of characters to keep at the end of the strings
# @return possible truncated character vector
limit_chars <- function(x, chars = NULL, lsuffix = 4) {
stopifnot(is.character(x))
if (!is.null(chars)) {
chars_x <- nchar(x) - lsuffix
suffix <- substr(x, chars_x + 1, chars_x + lsuffix)
x <- substr(x, 1, chars_x)
x <- ifelse(chars_x <= chars, x, paste0(substr(x, 1, chars - 3), "..."))
x <- paste0(x, suffix)
}
x
}
# ensure that deprecated arguments still work
# @param arg input to the new argument
# @param alias input to the deprecated argument
# @param default the default value of alias
# @param warn should a warning be printed if alias is specified?
use_alias <- function(arg, alias = NULL, default = NULL, warn = TRUE) {
arg_name <- Reduce(paste, deparse(substitute(arg)))
alias_name <- Reduce(paste, deparse(substitute(alias)))
if (!is_equal(alias, default)) {
arg <- alias
if (grepl("^dots\\$", alias_name)) {
alias_name <- gsub("^dots\\$", "", alias_name)
} else if (grepl("^dots\\[\\[", alias_name)) {
alias_name <- gsub("^dots\\[\\[\"|\"\\]\\]$", "", alias_name)
}
if (warn) {
warning2("Argument '", alias_name, "' is deprecated. ",
"Please use argument '", arg_name, "' instead.")
}
}
arg
}
warn_deprecated <- function(new, old = as.character(sys.call(sys.parent()))[1]) {
msg <- paste0("Function '", old, "' is deprecated.")
if (!missing(new)) {
msg <- paste0(msg, " Please use '", new, "' instead.")
}
warning2(msg)
invisible(NULL)
}
viridis6 <- function() {
c("#440154", "#414487", "#2A788E", "#22A884", "#7AD151", "#FDE725")
}
expect_match2 <- function(object, regexp, ..., all = TRUE) {
testthat::expect_match(object, regexp, fixed = TRUE, ..., all = all)
}
# startup messages for brms
.onAttach <- function(libname, pkgname) {
version <- utils::packageVersion("brms")
packageStartupMessage(
"Loading 'brms' package (version ", version, "). Useful instructions\n",
"can be found by typing help('brms'). A more detailed introduction\n",
"to the package is available through vignette('brms_overview')."
)
invisible(NULL)
}
.onLoad <- function(libname, pkgname) {
backports::import(pkgname)
}
|
/brms_simulator/misc.R
|
no_license
|
humanfactors/shiny-rt
|
R
| false | false | 25,861 |
r
|
# type-stable indexing of vector and matrix type objects
# @param x an R object typically a vector or matrix
# @param i optional index; if NULL, x is returned unchanged
# @param row indicating if rows or cols should be indexed
# only relevant if x has two or three dimensions
p <- function(x, i = NULL, row = TRUE) {
if (isTRUE(length(dim(x)) > 3L)) {
stop2("'p' can only handle objects up to 3 dimensions.")
}
if (!length(i)) {
out <- x
} else if (length(dim(x)) == 2L) {
if (row) {
out <- x[i, , drop = FALSE]
} else {
out <- x[, i, drop = FALSE]
}
} else if (length(dim(x)) == 3L) {
if (row) {
out <- x[i, , , drop = FALSE]
} else {
out <- x[, i, , drop = FALSE]
}
} else {
out <- x[i]
}
out
}
# extract parts of an object with selective dropping of dimensions
# @param x,...,drop same as in x[..., drop]
# @drop_dim: Optional numeric or logical vector controlling
# which dimensions to drop. Will overwrite argument 'drop'.
extract <- function(x, ..., drop = FALSE, drop_dim = NULL) {
if (!length(dim(x))) {
return(x[...])
}
if (length(drop_dim)) {
drop <- FALSE
} else {
drop <- as_one_logical(drop)
}
out <- x[..., drop = drop]
if (drop || !length(drop_dim) || any(dim(out) == 0L)) {
return(out)
}
if (is.numeric(drop_dim)) {
drop_dim <- seq_along(dim(x)) %in% drop_dim
}
if (!is.logical(drop_dim)) {
stop2("'drop_dim' needs to be logical or numeric.")
}
keep <- dim(out) > 1L | !drop_dim
new_dim <- dim(out)[keep]
if (length(new_dim) == 1L) {
# use vectors instead of 1D arrays
new_dim <- NULL
}
dim(out) <- new_dim
out
}
# savely extract columns without dropping other dimensions
# @param x an array
# @param i colum index
extract_col <- function(x, i) {
ldim <- length(dim(x))
if (ldim < 2L) {
return(x)
}
commas <- collapse(rep(", ", ldim - 2))
expr <- paste0("extract(x, , i", commas, ", drop_dim = 2)")
eval2(expr)
}
seq_rows <- function(x) {
seq_len(NROW(x))
}
seq_cols <- function(x) {
seq_len(NCOL(x))
}
seq_dim <- function(x, dim) {
dim <- as_one_numeric(dim)
if (dim == 1) {
len <- NROW(x)
} else if (dim == 2) {
len <- NCOL(x)
} else {
len <- dim(x)[dim]
}
if (length(len) == 1L && !isNA(len)) {
out <- seq_len(len)
} else {
out <- integer(0)
}
out
}
# match rows in x with rows in y
match_rows <- function(x, y, ...) {
x <- as.data.frame(x)
y <- as.data.frame(y)
x <- do_call("paste", c(x, sep = "\r"))
y <- do_call("paste", c(y, sep = "\r"))
match(x, y, ...)
}
# find elements of 'x' matching sub-elements passed via 'ls' and '...'
find_elements <- function(x, ..., ls = list(), fun = '%in%') {
x <- as.list(x)
if (!length(x)) {
return(logical(0))
}
out <- rep(TRUE, length(x))
ls <- c(ls, list(...))
if (!length(ls)) {
return(out)
}
if (is.null(names(ls))) {
stop("Argument 'ls' must be named.")
}
for (name in names(ls)) {
tmp <- lapply(x, "[[", name)
out <- out & do_call(fun, list(tmp, ls[[name]]))
}
out
}
# find rows of 'x' matching columns passed via 'ls' and '...'
# similar to 'find_elements' but for matrix like objects
find_rows <- function(x, ..., ls = list(), fun = '%in%') {
x <- as.data.frame(x)
if (!nrow(x)) {
return(logical(0))
}
out <- rep(TRUE, nrow(x))
ls <- c(ls, list(...))
if (!length(ls)) {
return(out)
}
if (is.null(names(ls))) {
stop("Argument 'ls' must be named.")
}
for (name in names(ls)) {
out <- out & do_call(fun, list(x[[name]], ls[[name]]))
}
out
}
# subset 'x' using arguments passed via 'ls' and '...'
subset2 <- function(x, ..., ls = list(), fun = '%in%') {
x[find_rows(x, ..., ls = ls, fun = fun), , drop = FALSE]
}
# convert array to list of elements with reduced dimension
# @param x an arrary of dimension d
# @return a list of arrays of dimension d-1
array2list <- function(x) {
if (is.null(dim(x))) {
return(as.list(x))
}
ndim <- length(dim(x))
out <- list(length = dim(x)[ndim])
ind <- collapse(rep(",", ndim - 1))
for (i in seq_len(dim(x)[ndim])) {
out[[i]] <- eval(parse(text = paste0("x[", ind, i, "]")))
if (length(dim(x)) > 2) {
# avoid accidental dropping of other dimensions
dim(out[[i]]) <- dim(x)[-ndim]
}
}
names(out) <- dimnames(x)[[ndim]]
out
}
# move elements to the start of a named object
move2start <- function(x, first) {
x[c(first, setdiff(names(x), first))]
}
# wrapper around replicate but without simplifying
repl <- function(expr, n) {
replicate(n, expr, simplify = FALSE)
}
# find the first element in A that is greater than target
# @param A a matrix
# @param target a vector of length nrow(A)
# @param i column of A being checked first
# @return a vector of the same length as target containing the
# column ids where A[,i] was first greater than target
first_greater <- function(A, target, i = 1) {
ifelse(target <= A[, i] | ncol(A) == i, i, first_greater(A, target, i + 1))
}
# check if an object is NULL
isNULL <- function(x) {
is.null(x) || ifelse(is.vector(x), all(sapply(x, is.null)), FALSE)
}
# recursively removes NULL entries from an object
rmNULL <- function(x, recursive = TRUE) {
x <- Filter(Negate(isNULL), x)
if (recursive) {
x <- lapply(x, function(x) if (is.list(x)) rmNULL(x) else x)
}
x
}
# find the first argument that is not NULL
first_not_null <- function(...) {
dots <- list(...)
out <- NULL
i <- 1L
while (isNULL(out) && i <= length(dots)) {
if (!isNULL(dots[[i]])) {
out <- dots[[i]]
}
i <- i + 1L
}
out
}
isNA <- function(x) {
length(x) == 1L && is.na(x)
}
is_equal <- function(x, y, ...) {
isTRUE(all.equal(x, y, ...))
}
# check if 'x' will behave like a factor in design matrices
is_like_factor <- function(x) {
is.factor(x) || is.character(x) || is.logical(x)
}
# as.factor but allows to pass levels
as_factor <- function(x, levels = NULL) {
if (is.null(levels)) {
out <- as.factor(x)
} else {
out <- factor(x, levels = levels)
}
out
}
# coerce 'x' to a single logical value
as_one_logical <- function(x, allow_na = FALSE) {
s <- substitute(x)
x <- as.logical(x)
if (length(x) != 1L || anyNA(x) && !allow_na) {
s <- deparse_combine(s, max_char = 100L)
stop2("Cannot coerce ", s, " to a single logical value.")
}
x
}
# coerce 'x' to a single numeric value
as_one_numeric <- function(x, allow_na = FALSE) {
s <- substitute(x)
x <- SW(as.numeric(x))
if (length(x) != 1L || anyNA(x) && !allow_na) {
s <- deparse_combine(s, max_char = 100L)
stop2("Cannot coerce ", s, " to a single numeric value.")
}
x
}
# coerce 'x' to a single character string
as_one_character <- function(x, allow_na = FALSE) {
s <- substitute(x)
x <- as.character(x)
if (length(x) != 1L || anyNA(x) && !allow_na) {
s <- deparse_combine(s, max_char = 100L)
stop2("Cannot coerce ", s, " to a single character value.")
}
x
}
has_rows <- function(x) {
isTRUE(nrow(x) > 0L)
}
has_cols <- function(x) {
isTRUE(ncol(x) > 0L)
}
# expand arguments to the same length
# @param ... arguments to expand
# @param length optional expansion length
# otherwise taken to be the largest supplied length
# @return a data.frame with one variable per element in '...'
expand <- function(..., dots = list(), length = NULL) {
dots <- c(dots, list(...))
max_dim <- NULL
if (is.null(length)) {
lengths <- lengths(dots)
length <- max(lengths)
max_dim <- dim(dots[[match(length, lengths)]])
}
out <- as.data.frame(lapply(dots, rep, length.out = length))
structure(out, max_dim = max_dim)
}
# structure but ignore NULL
structure_not_null <- function(.Data, ...) {
if (!is.null(.Data)) {
.Data <- structure(.Data, ...)
}
.Data
}
# remove specified attributes
rm_attr <- function(x, attr) {
attributes(x)[attr] <- NULL
x
}
# unidimensional subsetting while keeping attributes
subset_keep_attr <- function(x, y) {
att <- attributes(x)
x <- x[y]
att$names <- names(x)
attributes(x) <- att
x
}
# check if 'x' is a whole number (integer)
is_wholenumber <- function(x, tol = .Machine$double.eps) {
if (is.numeric(x)) {
out <- abs(x - round(x)) < tol
} else {
out <- rep(FALSE, length(x))
}
dim(out) <- dim(x)
out
}
# helper function to check symmetry of a matrix
is_symmetric <- function(x, tol = sqrt(.Machine$double.eps)) {
isSymmetric(x, tol = tol, check.attributes = FALSE)
}
# unlist lapply output
ulapply <- function(X, FUN, ..., recursive = TRUE, use.names = TRUE) {
unlist(lapply(X, FUN, ...), recursive, use.names)
}
# rbind lapply output
rblapply <- function(X, FUN, ...) {
do_call(rbind, lapply(X, FUN, ...))
}
# cbind lapply output
cblapply <- function(X, FUN, ...) {
do_call(cbind, lapply(X, FUN, ...))
}
# find variables in a character string or expression
all_vars <- function(expr, ...) {
if (is.character(expr)) {
expr <- parse(text = expr)
}
all.vars(expr, ...)
}
# append list(...) to x
lc <- function(x, ...) {
dots <- rmNULL(list(...), recursive = FALSE)
c(x, dots)
}
'c<-' <- function(x, value) {
c(x, value)
}
'lc<-' <- function(x, value) {
lc(x, value)
}
collapse <- function(..., sep = "") {
paste(..., sep = sep, collapse = "")
}
collapse_comma <- function(...) {
paste0("'", ..., "'", collapse = ", ")
}
# add characters to an existing string
'str_add<-' <- function(x, start = FALSE, value) {
if (start) paste0(value, x) else paste0(x, value)
}
# add list of characters to an existing list
'str_add_list<-' <- function(x, start = FALSE, value) {
stopifnot(is.list(x), is.list(value))
out <- if (start) list(value, x) else list(x, value)
collapse_lists(ls = out)
}
# type-stable if clause for strings with default else output
str_if <- function(cond, yes, no = "") {
cond <- as_one_logical(cond)
if (cond) as.character(yes) else as.character(no)
}
# select elements which match a regex pattern
str_subset <- function(x, pattern, ...) {
x[grepl(pattern, x, ...)]
}
# similar to glue::glue but specialized for generating Stan code
glue <- function(..., sep = "", collapse = NULL, envir = parent.frame(),
open = "{", close = "}", na = "NA") {
dots <- list(...)
dots <- dots[lengths(dots) > 0L]
args <- list(
.x = NULL, .sep = sep, .envir = envir, .open = open,
.close = close, .na = na, .trim = FALSE,
.transformer = zero_length_transformer
)
out <- do_call(glue::glue_data, c(dots, args))
if (!is.null(collapse)) {
collapse <- as_one_character(collapse)
out <- paste0(out, collapse = collapse)
}
out
}
# used in 'glue' to handle zero-length inputs
zero_length_transformer <- function(text, envir) {
out <- glue::identity_transformer(text, envir)
if (!length(out)) {
out <- ""
}
out
}
# collapse strings evaluated with glue
cglue <- function(..., envir = parent.frame()) {
glue(..., envir = envir, collapse = "")
}
# like stats:::na.omit.data.frame but allows to ignore variables
# keeps NAs in variables with attribute keep_na = TRUE
na.omit2 <- function(object, ...) {
stopifnot(is.data.frame(object))
omit <- logical(nrow(object))
for (j in seq_along(object)) {
x <- object[[j]]
keep_na <- isTRUE(attr(x, "keep_na", TRUE))
if (!is.atomic(x) || keep_na) {
next
}
x <- is.na(x)
d <- dim(x)
if (is.null(d) || length(d) != 2L) {
omit <- omit | x
} else {
for (ii in seq_len(d[2L])) {
omit <- omit | x[, ii]
}
}
}
if (any(omit > 0L)) {
out <- object[!omit, , drop = FALSE]
temp <- setNames(seq(omit)[omit], attr(object, "row.names")[omit])
attr(temp, "class") <- "omit"
attr(out, "na.action") <- temp
warning2("Rows containing NAs were excluded from the model.")
} else {
out <- object
}
out
}
# check if a certain package is installed
# @param package package name
# @param version optional minimal version number to require
require_package <- function(package, version = NULL) {
if (!requireNamespace(package, quietly = TRUE)) {
stop2("Please install the '", package, "' package.")
}
if (!is.null(version)) {
version <- as.package_version(version)
if (utils::packageVersion(package) < version) {
stop2("Please install package '", package,
"' version ", version, " or higher.")
}
}
invisible(TRUE)
}
# rename specified patterns in a character vector
# @param x a character vector to be renamed
# @param pattern the regular expressions in x to be replaced
# @param replacement the replacements
# @param fixed same as for 'gsub'
# @param check_dup: logical; check for duplications in x after renaming
# @param ... passed to 'gsub'
# @return renamed character vector of the same length as x
rename <- function(x, pattern = NULL, replacement = NULL,
fixed = TRUE, check_dup = FALSE, ...) {
pattern <- as.character(pattern)
replacement <- as.character(replacement)
if (!length(pattern) && !length(replacement)) {
# default renaming to avoid special characters in coeffcient names
pattern <- c(
" ", "(", ")", "[", "]", ",", "\"", "'",
"?", "+", "-", "*", "/", "^", "="
)
replacement <- c(rep("", 9), "P", "M", "MU", "D", "E", "EQ")
}
if (length(replacement) == 1L) {
replacement <- rep(replacement, length(pattern))
}
stopifnot(length(pattern) == length(replacement))
# avoid zero-length pattern error
has_chars <- nzchar(pattern)
pattern <- pattern[has_chars]
replacement <- replacement[has_chars]
out <- x
for (i in seq_along(pattern)) {
out <- gsub(pattern[i], replacement[i], out, fixed = fixed, ...)
}
dup <- duplicated(out)
if (check_dup && any(dup)) {
dup <- x[out %in% out[dup]]
stop2("Internal renaming led to duplicated names. \n",
"Occured for: ", collapse_comma(dup))
}
out
}
# collapse strings having the same name in different lists
# @param ... named lists
# @param ls a list of named lists
# @param a named list containing the collapsed strings
collapse_lists <- function(..., ls = list()) {
ls <- c(list(...), ls)
elements <- unique(unlist(lapply(ls, names)))
args <- c(FUN = collapse, lapply(ls, "[", elements), SIMPLIFY = FALSE)
out <- do_call(mapply, args)
names(out) <- elements
out
}
# create a named list using object names
nlist <- function(...) {
m <- match.call()
dots <- list(...)
no_names <- is.null(names(dots))
has_name <- if (no_names) FALSE else nzchar(names(dots))
if (all(has_name)) return(dots)
nms <- as.character(m)[-1]
if (no_names) {
names(dots) <- nms
} else {
names(dots)[!has_name] <- nms[!has_name]
}
dots
}
# initialize a named list
# @param names names of the elements
# @param values optional values of the elements
named_list <- function(names, values = NULL) {
if (!is.null(values)) {
if (length(values) <= 1L) {
values <- replicate(length(names), values)
}
values <- as.list(values)
stopifnot(length(values) == length(names))
} else {
values <- vector("list", length(names))
}
setNames(values, names)
}
#' Execute a Function Call
#'
#' Execute a function call similar to \code{\link{do.call}}, but without
#' deparsing function arguments.
#'
#' @param what Either a function or a non-empty character string naming the
#' function to be called.
#' @param args A list of arguments to the function call. The names attribute of
#' \code{args} gives the argument names.
#' @param pkg Optional name of the package in which to search for the
#' function if \code{what} is a character string.
#'
#' @return The result of the (evaluated) function call.
#'
#' @keywords internal
#' @export
do_call <- function(what, args, pkg = NULL) {
call <- ""
if (length(args)) {
if (!is.list(args)) {
stop2("'args' must be a list.")
}
fun_args <- names(args)
if (is.null(fun_args)) {
fun_args <- rep("", length(args))
} else {
nzc <- nzchar(fun_args)
fun_args[nzc] <- paste0("`", fun_args[nzc], "` = ")
}
names(args) <- paste0(".x", seq_along(args))
call <- paste0(fun_args, names(args), collapse = ",")
} else {
args <- list()
}
if (is.function(what)) {
args$.fun <- what
what <- ".fun"
} else {
what <- paste0("`", as_one_character(what), "`")
if (!is.null(pkg)) {
what <- paste0(as_one_character(pkg), "::", what)
}
}
call <- paste0(what, "(", call, ")")
eval2(call, envir = args, enclos = parent.frame())
}
empty_data_frame <- function() {
as.data.frame(matrix(nrow = 0, ncol = 0))
}
# replace elements in x with elements in value
# @param x named list-like object
# @param value another named list-like object
# @param dont_replace names of elements that cannot be replaced
'replace_args<-' <- function(x, dont_replace = NULL, value) {
value_name <- deparse_combine(substitute(value), max_char = 100L)
value <- as.list(value)
if (length(value) && is.null(names(value))) {
stop2("Argument '", value_name, "' must be named.")
}
invalid <- names(value)[names(value) %in% dont_replace]
if (length(invalid)) {
invalid <- collapse_comma(invalid)
stop2("Argument(s) ", invalid, " cannot be replaced.")
}
x[names(value)] <- value
x
}
# deparse 'x' if it is no string
deparse_no_string <- function(x) {
if (!is.character(x)) {
x <- deparse(x)
}
x
}
# combine deparse lines into one string
deparse_combine <- function(x, max_char = NULL) {
out <- collapse(deparse(x))
if (isTRUE(max_char > 0)) {
out <- substr(out, 1L, max_char)
}
out
}
# like 'eval' but parses characters before evaluation
eval2 <- function(expr, envir = parent.frame(), ...) {
if (is.character(expr)) {
expr <- parse(text = expr)
}
eval(expr, envir, ...)
}
# evaluate an expression without printing output or messages
# @param expr expression to be evaluated
# @param type type of output to be suppressed (see ?sink)
# @param try wrap evaluation of expr in 'try' and
# not suppress outputs if evaluation fails?
# @param silent actually evaluate silently?
eval_silent <- function(expr, type = "output", try = FALSE,
silent = TRUE, ...) {
try <- as_one_logical(try)
silent <- as_one_logical(silent)
type <- match.arg(type, c("output", "message"))
expr <- substitute(expr)
envir <- parent.frame()
if (silent) {
if (try && type == "message") {
try_out <- try(utils::capture.output(
out <- eval(expr, envir), type = type, ...
))
if (is(try_out, "try-error")) {
# try again without suppressing error messages
out <- eval(expr, envir)
}
} else {
utils::capture.output(out <- eval(expr, envir), type = type, ...)
}
} else {
out <- eval(expr, envir)
}
out
}
# find the name that 'x' had in a specific environment
substitute_name <- function(x, envir = parent.frame(), nchar = 50) {
out <- substitute(x)
out <- eval2(paste0("substitute(", out, ")"), envir = envir)
substr(collapse(deparse(out)), 1, nchar)
}
# recursive sorting of dependencies
# @param x named list of dependencies per element
# @param sorted already sorted element names
# @return a vector of sorted element names
sort_dependencies <- function(x, sorted = NULL) {
if (!length(x)) {
return(NULL)
}
if (length(names(x)) != length(x)) {
stop2("Argument 'x' must be named.")
}
take <- !ulapply(x, function(dep) any(!dep %in% sorted))
new <- setdiff(names(x)[take], sorted)
out <- union(sorted, new)
if (length(new)) {
out <- union(out, sort_dependencies(x, sorted = out))
} else if (!all(names(x) %in% out)) {
stop2("Cannot handle circular dependency structures.")
}
out
}
stop2 <- function(...) {
stop(..., call. = FALSE)
}
warning2 <- function(...) {
warning(..., call. = FALSE)
}
# get first occurrence of 'x' in '...' objects
# @param x The name of the required element
# @param ... named R objects that may contain 'x'
get_arg <- function(x, ...) {
dots <- list(...)
i <- 1
out <- NULL
while (i <= length(dots) && is.null(out)) {
if (!is.null(dots[[i]][[x]])) {
out <- dots[[i]][[x]]
} else {
i <- i + 1
}
}
out
}
SW <- function(expr) {
base::suppressWarnings(expr)
}
# get pattern matches in text as vector
# @param simplify return an atomic vector of matches?
# @param first only return the first match in each string?
# @return character vector containing matches
get_matches <- function(pattern, text, simplify = TRUE,
first = FALSE, ...) {
x <- regmatches(text, gregexpr(pattern, text, ...))
if (first) {
x <- lapply(x, function(t) if (length(t)) t[1] else t)
}
if (simplify) {
if (first) {
x <- lapply(x, function(t) if (length(t)) t else "")
}
x <- unlist(x)
}
x
}
# find matches in the parse tree of an expression
# @param pattern pattern to be matched
# @param expr expression to be searched in
# @return character vector containing matches
get_matches_expr <- function(pattern, expr, ...) {
if (is.character(expr)) {
expr <- parse(text = expr)
}
out <- NULL
for (i in seq_along(expr)) {
sexpr <- try(expr[[i]], silent = TRUE)
if (!is(sexpr, "try-error")) {
sexpr_char <- deparse_combine(sexpr)
out <- c(out, get_matches(pattern, sexpr_char, ...))
}
if (is.call(sexpr) || is.expression(sexpr)) {
out <- c(out, get_matches_expr(pattern, sexpr, ...))
}
}
unique(out)
}
# like 'grepl' but handles (parse trees of) expressions
grepl_expr <- function(pattern, expr, ...) {
as.logical(ulapply(expr, function(e)
length(get_matches_expr(pattern, e, ...)) > 0L))
}
# combine character vectors into a joint regular 'or' expression
# @param x a character vector
# @param escape escape all special characters in 'x'?
regex_or <- function(x, escape = FALSE) {
if (escape) {
x <- escape_all(x)
}
paste0("(", paste0("(", x, ")", collapse = "|"), ")")
}
# escape dots in character strings
escape_dot <- function(x) {
gsub(".", "\\.", x, fixed = TRUE)
}
# escape all special characters in character strings
escape_all <- function(x) {
specials <- c(".", "*", "+", "?", "^", "$", "(", ")", "[", "]", "|")
for (s in specials) {
x <- gsub(s, paste0("\\", s), x, fixed = TRUE)
}
x
}
# add an underscore to non-empty character strings
# @param x a character vector
# @param pos position of the underscore
usc <- function(x, pos = c("prefix", "suffix")) {
pos <- match.arg(pos)
x <- as.character(x)
if (!length(x)) x <- ""
if (pos == "prefix") {
x <- ifelse(nzchar(x), paste0("_", x), "")
} else {
x <- ifelse(nzchar(x), paste0(x, "_"), "")
}
x
}
# round using the largest remainder method
round_largest_remainder <- function(x) {
x <- as.numeric(x)
total <- round(sum(x))
out <- floor(x)
diff <- x - out
J <- order(diff, decreasing = TRUE)
I <- seq_len(total - floor(sum(out)))
out[J[I]] <- out[J[I]] + 1
out
}
# add leading and trailing whitespaces
# @param x object accepted by paste
# @param nsp number of whitespaces to add
wsp <- function(x = "", nsp = 1) {
sp <- collapse(rep(" ", nsp))
if (length(x)) {
out <- ifelse(nzchar(x), paste0(sp, x, sp), sp)
} else {
out <- NULL
}
out
}
# remove whitespaces from strings
rm_wsp <- function(x) {
gsub("[ \t\r\n]+", "", x, perl = TRUE)
}
# limit the number of characters of a vector
# @param x a character vector
# @param chars maximum number of characters to show
# @param lsuffix number of characters to keep at the end of the strings
# @return possible truncated character vector
limit_chars <- function(x, chars = NULL, lsuffix = 4) {
stopifnot(is.character(x))
if (!is.null(chars)) {
chars_x <- nchar(x) - lsuffix
suffix <- substr(x, chars_x + 1, chars_x + lsuffix)
x <- substr(x, 1, chars_x)
x <- ifelse(chars_x <= chars, x, paste0(substr(x, 1, chars - 3), "..."))
x <- paste0(x, suffix)
}
x
}
# ensure that deprecated arguments still work
# @param arg input to the new argument
# @param alias input to the deprecated argument
# @param default the default value of alias
# @param warn should a warning be printed if alias is specified?
use_alias <- function(arg, alias = NULL, default = NULL, warn = TRUE) {
arg_name <- Reduce(paste, deparse(substitute(arg)))
alias_name <- Reduce(paste, deparse(substitute(alias)))
if (!is_equal(alias, default)) {
arg <- alias
if (grepl("^dots\\$", alias_name)) {
alias_name <- gsub("^dots\\$", "", alias_name)
} else if (grepl("^dots\\[\\[", alias_name)) {
alias_name <- gsub("^dots\\[\\[\"|\"\\]\\]$", "", alias_name)
}
if (warn) {
warning2("Argument '", alias_name, "' is deprecated. ",
"Please use argument '", arg_name, "' instead.")
}
}
arg
}
warn_deprecated <- function(new, old = as.character(sys.call(sys.parent()))[1]) {
msg <- paste0("Function '", old, "' is deprecated.")
if (!missing(new)) {
msg <- paste0(msg, " Please use '", new, "' instead.")
}
warning2(msg)
invisible(NULL)
}
viridis6 <- function() {
c("#440154", "#414487", "#2A788E", "#22A884", "#7AD151", "#FDE725")
}
expect_match2 <- function(object, regexp, ..., all = TRUE) {
testthat::expect_match(object, regexp, fixed = TRUE, ..., all = all)
}
# startup messages for brms
.onAttach <- function(libname, pkgname) {
version <- utils::packageVersion("brms")
packageStartupMessage(
"Loading 'brms' package (version ", version, "). Useful instructions\n",
"can be found by typing help('brms'). A more detailed introduction\n",
"to the package is available through vignette('brms_overview')."
)
invisible(NULL)
}
.onLoad <- function(libname, pkgname) {
backports::import(pkgname)
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/DSE10YR.R
\name{VAR}
\alias{VAR}
\title{Value at Risk of company}
\usage{
VAR(return5, W, P, mu1, sd1)
}
\arguments{
\item{return5}{The vector containing the daily returns of the company in the year(s)}
\item{W}{The amount invested}
\item{P}{The confidence interval}
\item{mu1}{The mean return on the company's stock}
\item{sd1}{The standard deviation of the company's stock}
}
\value{
The total amount risked at that confidence interval on the specified sum invested
}
\description{
This function returns the Value at Risk of the company.
}
\details{
This function takes the daily returns, mean and standard deviation and returns the total monetary unit amount at risk
of loss on the specified sum invested.
}
\author{
Syed M. Fuad
}
\seealso{
\code{qnorm}
}
|
/VAR.Rd
|
no_license
|
ebna/DSE10YR
|
R
| false | false | 851 |
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/DSE10YR.R
\name{VAR}
\alias{VAR}
\title{Value at Risk of company}
\usage{
VAR(return5, W, P, mu1, sd1)
}
\arguments{
\item{return5}{The vector containing the daily returns of the company in the year(s)}
\item{W}{The amount invested}
\item{P}{The confidence interval}
\item{mu1}{The mean return on the company's stock}
\item{sd1}{The standard deviation of the company's stock}
}
\value{
The total amount risked at that confidence interval on the specified sum invested
}
\description{
This function returns the Value at Risk of the company.
}
\details{
This function takes the daily returns, mean and standard deviation and returns the total monetary unit amount at risk
of loss on the specified sum invested.
}
\author{
Syed M. Fuad
}
\seealso{
\code{qnorm}
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/sensitivity.analysis.R
\name{get.coef.var}
\alias{get.coef.var}
\title{Get coefficient of variance}
\usage{
get.coef.var(set)
}
\arguments{
\item{set}{numeric vector of trait values}
}
\value{
coeficient of variance
}
\description{
Given a set of numbers (a numeric vector), this returns the set's coefficient of variance.
}
|
/modules/uncertainty/man/get.coef.var.Rd
|
permissive
|
davidjpmoore/pecan
|
R
| false | false | 412 |
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/sensitivity.analysis.R
\name{get.coef.var}
\alias{get.coef.var}
\title{Get coefficient of variance}
\usage{
get.coef.var(set)
}
\arguments{
\item{set}{numeric vector of trait values}
}
\value{
coeficient of variance
}
\description{
Given a set of numbers (a numeric vector), this returns the set's coefficient of variance.
}
|
# Function to simulate SMART design II with survival outcomes
simSMART = function (n, tbetas, pi.x, pi.z1.R, pi.z1.NR, pi.z2.R, pi.z2.NR,
Tcheck, bshape1, bscale1, bshape2, bscale2, adt, decimal)
{
b.theta <- matrix(tbetas[(1):(np.t)],ncol=1)
b.gamma <- matrix(tbetas[(np.t+1):(np.t+np.g)],ncol=1)
b.eta <- matrix(tbetas[(np.t+np.g+1):(np.t+np.g+np.e)],ncol=1)
b.mu <- matrix(tbetas[(np.t+np.g+np.e+1):(np.t+np.g+np.e+np.m)],ncol=1)
V <- rbinom(n, 1, 0.75)#rnorm(n,2,1)
X <- rbinom(n, 1, pi.x)
dat=cbind('1'=1, V,X)
thetaZs = as.matrix(subset(dat,select=thetaZ))
gammaZs = as.matrix(subset(dat,select=gammaZ))
#Simulate Tr, time to response
theta=exp(thetaZs%*%b.theta)
u <- runif(n)
Tr <- (-log(u)/(theta))^(1/bshape1)*bscale1 #time diagnosis
#Death time before response or check-up time
gamma <- exp(gammaZs%*%b.gamma)
v <- runif(n)
Td <- (-log(v)/(gamma))^(1/bshape2)*bscale2
#Response status
minT = pmin(Tr, Td, Tcheck)
R = 1*(Tr==minT)
NR = 1*(Tcheck==minT)
D = 1*(Td==minT) # death before response/no response (By YC)
#Second treatment
Zr <- Znr <- rep(NA, n) #NA for subjects that die before response OR no-response by time of check-up
Zr[which(X==0 & R == 1)] <- rbinom(length(which(X==0 & R == 1)), 1, pi.z1.R)
Zr[which(X==1 & R == 1)] <- rbinom(length(which(X==1 & R == 1)), 1, pi.z2.R)
Znr[which(X==0 & NR == 1)] <- rbinom(length(which(X==0 & NR == 1)), 1, pi.z1.NR)
Znr[which(X==1 & NR == 1)] <- rbinom(length(which(X==1 & NR == 1)), 1, pi.z2.NR)
#Z[which(R==0)]=0
X1Zr1=1*(Zr==1 & X==1)
X1Zr0=1*(Zr==0 & X==1)
X1Zr1V1=1*(Zr==1 & X==1 & V==1)
#XZnr=Znr*X
dat = cbind(dat,Zr,Znr,X1Zr1,X1Zr0, X1Zr1V1)
#Simulate new Td for subject survive after response/check-up time
etaZs = as.matrix(subset(dat,select=etaZ))
muZs = as.matrix(subset(dat,select=muZ))
eta <- gamma*exp(etaZs%*%b.eta)
if(ncol(muZs)==0) {mu <- gamma}else{mu=gamma*exp(muZs%*%b.mu)} # gamma
Td.after.response <- ((-log(v)-(gamma-eta)*(Tr/bscale2)^bshape2)/(eta))^(1/bshape2)*bscale2
#Tr[R==1] < Td.after.response)[R==1] #check to make sure responded subject's death time is after response time
Td.after.noresponse <- ((-log(v)-(gamma-mu)*(Tcheck/bscale2)^bshape2)/(mu))^(1/bshape2)*bscale2
#Tcheck < Td.after.noresponse[NR==1] #check to make sure non-responded subject's death time is after check-up time
Td[which(R == 1)] <- Td.after.response[which(R == 1)]
Td[which(NR == 1)] <- Td.after.noresponse[which(NR == 1)]
#cbind(minT,Td,R,NR,D)
#Simulate censoring time
tc = runif(n,adt[1],adt[2])
T1 = pmin(minT,tc) # see whether censoring time is earlier than response/non-response/death time
R = R*(minT <= tc)
NR = NR*(minT <= tc)
D = D*(minT <= tc)
T2 = pmin(Td, tc) # see whether censoring time is earlier than death time
C2 = (Td <= tc)
simudata = data.frame(cbind(dat,T1, R, NR, D, T2, C2))
names(simudata)=c(colnames(dat),'T1','R','NR','D','T2','C2')
row.names(simudata) <- NULL
ceiling_dec <- function(x, level) round(x + 5*10^(-level-1), level)
simudata$T1 <- ceiling_dec(simudata$T1, decimal)
simudata$T2 <- ceiling_dec(simudata$T2, decimal)
simudata
}
|
/simSMART.R
|
no_license
|
ycchao/code_Joint_model_SMART
|
R
| false | false | 3,214 |
r
|
# Function to simulate SMART design II with survival outcomes
simSMART = function (n, tbetas, pi.x, pi.z1.R, pi.z1.NR, pi.z2.R, pi.z2.NR,
Tcheck, bshape1, bscale1, bshape2, bscale2, adt, decimal)
{
b.theta <- matrix(tbetas[(1):(np.t)],ncol=1)
b.gamma <- matrix(tbetas[(np.t+1):(np.t+np.g)],ncol=1)
b.eta <- matrix(tbetas[(np.t+np.g+1):(np.t+np.g+np.e)],ncol=1)
b.mu <- matrix(tbetas[(np.t+np.g+np.e+1):(np.t+np.g+np.e+np.m)],ncol=1)
V <- rbinom(n, 1, 0.75)#rnorm(n,2,1)
X <- rbinom(n, 1, pi.x)
dat=cbind('1'=1, V,X)
thetaZs = as.matrix(subset(dat,select=thetaZ))
gammaZs = as.matrix(subset(dat,select=gammaZ))
#Simulate Tr, time to response
theta=exp(thetaZs%*%b.theta)
u <- runif(n)
Tr <- (-log(u)/(theta))^(1/bshape1)*bscale1 #time diagnosis
#Death time before response or check-up time
gamma <- exp(gammaZs%*%b.gamma)
v <- runif(n)
Td <- (-log(v)/(gamma))^(1/bshape2)*bscale2
#Response status
minT = pmin(Tr, Td, Tcheck)
R = 1*(Tr==minT)
NR = 1*(Tcheck==minT)
D = 1*(Td==minT) # death before response/no response (By YC)
#Second treatment
Zr <- Znr <- rep(NA, n) #NA for subjects that die before response OR no-response by time of check-up
Zr[which(X==0 & R == 1)] <- rbinom(length(which(X==0 & R == 1)), 1, pi.z1.R)
Zr[which(X==1 & R == 1)] <- rbinom(length(which(X==1 & R == 1)), 1, pi.z2.R)
Znr[which(X==0 & NR == 1)] <- rbinom(length(which(X==0 & NR == 1)), 1, pi.z1.NR)
Znr[which(X==1 & NR == 1)] <- rbinom(length(which(X==1 & NR == 1)), 1, pi.z2.NR)
#Z[which(R==0)]=0
X1Zr1=1*(Zr==1 & X==1)
X1Zr0=1*(Zr==0 & X==1)
X1Zr1V1=1*(Zr==1 & X==1 & V==1)
#XZnr=Znr*X
dat = cbind(dat,Zr,Znr,X1Zr1,X1Zr0, X1Zr1V1)
#Simulate new Td for subject survive after response/check-up time
etaZs = as.matrix(subset(dat,select=etaZ))
muZs = as.matrix(subset(dat,select=muZ))
eta <- gamma*exp(etaZs%*%b.eta)
if(ncol(muZs)==0) {mu <- gamma}else{mu=gamma*exp(muZs%*%b.mu)} # gamma
Td.after.response <- ((-log(v)-(gamma-eta)*(Tr/bscale2)^bshape2)/(eta))^(1/bshape2)*bscale2
#Tr[R==1] < Td.after.response)[R==1] #check to make sure responded subject's death time is after response time
Td.after.noresponse <- ((-log(v)-(gamma-mu)*(Tcheck/bscale2)^bshape2)/(mu))^(1/bshape2)*bscale2
#Tcheck < Td.after.noresponse[NR==1] #check to make sure non-responded subject's death time is after check-up time
Td[which(R == 1)] <- Td.after.response[which(R == 1)]
Td[which(NR == 1)] <- Td.after.noresponse[which(NR == 1)]
#cbind(minT,Td,R,NR,D)
#Simulate censoring time
tc = runif(n,adt[1],adt[2])
T1 = pmin(minT,tc) # see whether censoring time is earlier than response/non-response/death time
R = R*(minT <= tc)
NR = NR*(minT <= tc)
D = D*(minT <= tc)
T2 = pmin(Td, tc) # see whether censoring time is earlier than death time
C2 = (Td <= tc)
simudata = data.frame(cbind(dat,T1, R, NR, D, T2, C2))
names(simudata)=c(colnames(dat),'T1','R','NR','D','T2','C2')
row.names(simudata) <- NULL
ceiling_dec <- function(x, level) round(x + 5*10^(-level-1), level)
simudata$T1 <- ceiling_dec(simudata$T1, decimal)
simudata$T2 <- ceiling_dec(simudata$T2, decimal)
simudata
}
|
model
{
for (i in 1:n)
{
# Linear predictors for individual traits
height[i] <- ter[1,1,species[i]] +
ter[1,2,species[i]]*NH4[i] + ter[1,3,species[i]]*P[i] +
ter[1,4,species[i]]*K[i] + ter[1,5,species[i]]*SALINITY[i] +
ter[1,6,species[i]]*SILT[i] + ter[1,7,species[i]]*PH[i] +
ter[1,8,species[i]]*URP[i] + ter[1,9,species[i]]*HH[i]
sla[i] <- ter[2,1,species[i]] +
ter[2,2,species[i]]*NH4[i] + ter[2,3,species[i]]*P[i] +
ter[2,4,species[i]]*K[i] + ter[2,5,species[i]]*SALINITY[i] +
ter[2,6,species[i]]*SILT[i] + ter[2,7,species[i]]*PH[i] +
ter[2,8,species[i]]*URP[i] + ter[2,9,species[i]]*HH[i]
wd[i] <- ter[3,1,species[i]] +
ter[3,2,species[i]]*NH4[i] + ter[3,3,species[i]]*P[i] +
ter[3,4,species[i]]*K[i] + ter[3,5,species[i]]*SALINITY[i] +
ter[3,6,species[i]]*SILT[i] + ter[3,7,species[i]]*PH[i] +
ter[3,8,species[i]]*URP[i] + ter[3,9,species[i]]*HH[i]
sc[i] <- ter[4,1,species[i]] +
ter[4,2,species[i]]*NH4[i] + ter[4,3,species[i]]*P[i] +
ter[4,4,species[i]]*K[i] + ter[4,5,species[i]]*SALINITY[i] +
ter[4,6,species[i]]*SILT[i] + ter[4,7,species[i]]*PH[i] +
ter[4,8,species[i]]*URP[i] + ter[4,9,species[i]]*HH[i]
# Definition of the likelihood
mu[i,1]<-height[i]
mu[i,2]<-sla[i]
mu[i,3]<-wd[i]
mu[i,4]<-sc[i]
traits[i,]~dmnorm(mu[i,], omega[species[i],,])
}
# species-specific TER model
%s
# species-species TTR model
%s
# data # n, nt, ne, traits, NH4, P, K, SALINITY, SILT, PH, URP, HH, ns, species, ttrI
}
|
/base.ter.ttr.R
|
permissive
|
boydorr/trait-analysis
|
R
| false | false | 1,647 |
r
|
model
{
for (i in 1:n)
{
# Linear predictors for individual traits
height[i] <- ter[1,1,species[i]] +
ter[1,2,species[i]]*NH4[i] + ter[1,3,species[i]]*P[i] +
ter[1,4,species[i]]*K[i] + ter[1,5,species[i]]*SALINITY[i] +
ter[1,6,species[i]]*SILT[i] + ter[1,7,species[i]]*PH[i] +
ter[1,8,species[i]]*URP[i] + ter[1,9,species[i]]*HH[i]
sla[i] <- ter[2,1,species[i]] +
ter[2,2,species[i]]*NH4[i] + ter[2,3,species[i]]*P[i] +
ter[2,4,species[i]]*K[i] + ter[2,5,species[i]]*SALINITY[i] +
ter[2,6,species[i]]*SILT[i] + ter[2,7,species[i]]*PH[i] +
ter[2,8,species[i]]*URP[i] + ter[2,9,species[i]]*HH[i]
wd[i] <- ter[3,1,species[i]] +
ter[3,2,species[i]]*NH4[i] + ter[3,3,species[i]]*P[i] +
ter[3,4,species[i]]*K[i] + ter[3,5,species[i]]*SALINITY[i] +
ter[3,6,species[i]]*SILT[i] + ter[3,7,species[i]]*PH[i] +
ter[3,8,species[i]]*URP[i] + ter[3,9,species[i]]*HH[i]
sc[i] <- ter[4,1,species[i]] +
ter[4,2,species[i]]*NH4[i] + ter[4,3,species[i]]*P[i] +
ter[4,4,species[i]]*K[i] + ter[4,5,species[i]]*SALINITY[i] +
ter[4,6,species[i]]*SILT[i] + ter[4,7,species[i]]*PH[i] +
ter[4,8,species[i]]*URP[i] + ter[4,9,species[i]]*HH[i]
# Definition of the likelihood
mu[i,1]<-height[i]
mu[i,2]<-sla[i]
mu[i,3]<-wd[i]
mu[i,4]<-sc[i]
traits[i,]~dmnorm(mu[i,], omega[species[i],,])
}
# species-specific TER model
%s
# species-species TTR model
%s
# data # n, nt, ne, traits, NH4, P, K, SALINITY, SILT, PH, URP, HH, ns, species, ttrI
}
|
rm(list = ls())
setwd("E://project//urine//addnew//thrid//gender.pred.age.roc1")
library(randomForest)
library('pROC')
library('ggplot2')
prof <- as.data.frame(read.csv('file:///E:/project/urine/new/hclust-barplot/data/newL610000.txt',head=T,row.names = 1,sep='\t'))
map <- as.data.frame(read.csv('file:///E:/project/urine/new/hclust-barplot/data/map.age.txt',head=T,row.names = 1,sep='\t'))
prof <- as.data.frame(t(prof[,rownames(map)]))
prof.F <- as.data.frame(prof[map$Gender=="F",])
prof.M <- as.data.frame(prof[map$Gender=="M",])
map.F <- as.data.frame(map[map$Gender=="F",])
map.M <- as.data.frame(map[map$Gender=="M",])
prof.F$Group <- map.F$Age
#########
data <- prof.F
set.seed(1)
sub <- sample(1:nrow(data),round(nrow(data)*4/5))
train_data <- data[sub,-ncol(data)] #取4/5的数据做训练集
test_data <- data[-sub,-ncol(data)] #取1/5的数据做测试集
train_Group <- data[sub,ncol(data)]
test_Group <- data[-sub,ncol(data)]
colnames(train_data) <- make.names(colnames(train_data))
colnames(test_data) <- make.names(colnames(test_data))
set.seed(1)
ntree_fit <- randomForest(train_Group~.,data=train_data,ntree=10000)
pdf("train.ErrorRate.age.pdf")
plot(ntree_fit)
dev.off()
pdf("train.ImportanceFeatures.age.pdf")
varImpPlot(ntree_fit)
dev.off()
sig <- ntree_fit$importance
write.table(sig,file="sig.age.txt",sep="\t")
pred <- as.data.frame(predict(ntree_fit,test_data))
pred$real <- test_Group
write.table(pred,file="pred.age.txt",sep="\t")
fig1.cor <- cor.test(pred[,1],pred[,2],method = "spearman")
colnames(pred) <- c("predict","real")
fig1 <- ggplot(pred,aes(x=predict , y=real))+geom_point() +
annotate("text",label=paste("pvalue=",fig1.cor$p.value," ","r=",fig1.cor$estimate) ,x=65,y=40)+
geom_smooth(method = "glm")
pdf("pred.age.line.pdf",useDingbats=F)
fig1
dev.off()
|
/Figure_6.R
|
no_license
|
RChGO/UrinaryMicrobiota
|
R
| false | false | 1,883 |
r
|
rm(list = ls())
setwd("E://project//urine//addnew//thrid//gender.pred.age.roc1")
library(randomForest)
library('pROC')
library('ggplot2')
prof <- as.data.frame(read.csv('file:///E:/project/urine/new/hclust-barplot/data/newL610000.txt',head=T,row.names = 1,sep='\t'))
map <- as.data.frame(read.csv('file:///E:/project/urine/new/hclust-barplot/data/map.age.txt',head=T,row.names = 1,sep='\t'))
prof <- as.data.frame(t(prof[,rownames(map)]))
prof.F <- as.data.frame(prof[map$Gender=="F",])
prof.M <- as.data.frame(prof[map$Gender=="M",])
map.F <- as.data.frame(map[map$Gender=="F",])
map.M <- as.data.frame(map[map$Gender=="M",])
prof.F$Group <- map.F$Age
#########
data <- prof.F
set.seed(1)
sub <- sample(1:nrow(data),round(nrow(data)*4/5))
train_data <- data[sub,-ncol(data)] #取4/5的数据做训练集
test_data <- data[-sub,-ncol(data)] #取1/5的数据做测试集
train_Group <- data[sub,ncol(data)]
test_Group <- data[-sub,ncol(data)]
colnames(train_data) <- make.names(colnames(train_data))
colnames(test_data) <- make.names(colnames(test_data))
set.seed(1)
ntree_fit <- randomForest(train_Group~.,data=train_data,ntree=10000)
pdf("train.ErrorRate.age.pdf")
plot(ntree_fit)
dev.off()
pdf("train.ImportanceFeatures.age.pdf")
varImpPlot(ntree_fit)
dev.off()
sig <- ntree_fit$importance
write.table(sig,file="sig.age.txt",sep="\t")
pred <- as.data.frame(predict(ntree_fit,test_data))
pred$real <- test_Group
write.table(pred,file="pred.age.txt",sep="\t")
fig1.cor <- cor.test(pred[,1],pred[,2],method = "spearman")
colnames(pred) <- c("predict","real")
fig1 <- ggplot(pred,aes(x=predict , y=real))+geom_point() +
annotate("text",label=paste("pvalue=",fig1.cor$p.value," ","r=",fig1.cor$estimate) ,x=65,y=40)+
geom_smooth(method = "glm")
pdf("pred.age.line.pdf",useDingbats=F)
fig1
dev.off()
|
#' Run a Python REPL
#'
#' This function provides a Python REPL in the \R session, which can be used
#' to interactively run Python code. All code executed within the REPL is
#' run within the Python main module, and any generated Python objects will
#' persist in the Python session after the REPL is detached.
#'
#' When working with R and Python scripts interactively, one can activate
#' the Python REPL with `repl_python()`, run Python code, and later run `exit`
#' to return to the \R console.
#'
#' @param module An (optional) Python module to be imported before
#' the REPL is launched.
#'
#' @param quiet Boolean; print a startup banner when launching the REPL? If
#' `TRUE`, the banner will be suppressed.
#'
#' @param input Python code to be run within the REPL. Setting this can be
#' useful if you'd like to drive the Python REPL programmatically.
#'
#' @seealso [py], for accessing objects created using the Python REPL.
#'
#' @section Magics: A handful of magics are supported in `repl_python()`:
#'
#' Lines prefixed with `!` are executed as system commands:
#' - `!cmd --arg1 --arg2`: Execute arbitrary system commands
#'
#' Magics start with a `%` prefix. Supported magics include:
#' - `%conda ...` executes a conda command in the active conda environment
#' - `%pip ...` executes pip for the active python.
#' - `%load`, `%loadpy`, `%run` executes a python file.
#' - `%system`, `!!` executes a system command and capture output
#' - `%env`: read current environment variables.
#' - `%env name`: read environment variable 'name'.
#' - `%env name=val`, `%env name val`: set environment variable 'name' to 'val'.
#' `val` elements in `{}` are interpolated using f-strings (required Python >= 3.6).
#' - `%cd <dir>` change working directory.
#' - `%cd -`: change to previous working directory (as set by `%cd`).
#' - `%cd -3`: change to 3rd most recent working directory (as set by `%cd`).
#' - `%cd -foo/bar`: change to most recent working directory matching `"foo/bar"` regex
#' (in history of directories set via `%cd`).
#' - `%pwd`: print current working directory.
#' - `%dhist`: print working directory history.
#'
#' Additionally, the output of system commands can be captured in a variable, e.g.:
#' - `x = !ls`
#'
#' where `x` will be a list of strings, consisting of
#' stdout output split in `"\n"` (stderr is not captured).
#'
#'
#' @section Example:
#' ````
#'
#' # enter the Python REPL, create a dictionary, and exit
#' repl_python()
#' dictionary = {'alpha': 1, 'beta': 2}
#' exit
#'
#' # access the created dictionary from R
#' py$dictionary
#' # $alpha
#' # [1] 1
#' #
#' # $beta
#' # [1] 2
#'
#' ````
#'
#' @importFrom utils packageVersion
#' @export
repl_python <- function(
module = NULL,
quiet = getOption("reticulate.repl.quiet", default = FALSE),
input = NULL)
{
# load module if requested
if (is.character(module))
import(module)
# run hooks for initialize, teardown
initialize <- getOption("reticulate.repl.initialize")
if (is.function(initialize)) {
initialize()
}
teardown <- getOption("reticulate.repl.teardown")
if (is.function(teardown)) {
on.exit(teardown(), add = TRUE)
}
# split provided code on newlines
if (!is.null(input))
input <- unlist(strsplit(input, "\n", fixed = TRUE))
# import other required modules for the REPL
sys <- import("sys", convert = TRUE)
codeop <- import("codeop", convert = TRUE)
# check to see if the current environment supports history
# (check for case where working directory not writable)
use_history <-
!"--vanilla" %in% commandArgs() &&
!"--no-save" %in% commandArgs() &&
!is.null(getwd()) &&
tryCatch(
{ utils::savehistory(tempfile()); TRUE },
error = function(e) FALSE
)
if (use_history) {
# if we have history, save and then restore the current
# R history
utils::savehistory()
on.exit(utils::loadhistory(), add = TRUE)
# file to be used for command history during session
histfile <- getOption("reticulate.repl.histfile")
if (is.null(histfile))
histfile <- file.path(tempdir(), ".reticulatehistory")
# load history (create empty file if none exists yet)
if (!file.exists(histfile))
file.create(histfile)
utils::loadhistory(histfile)
}
# buffer of pending console input (we don't evaluate code
# until the user has submitted a complete Python statement)
#
# we return an environment of functions bound in a local environment
# so that hook can manipulate the buffer if required
buffer <- stack(mode = "character")
# command compiler (used to check if we've received a complete piece
# of Python input)
compiler <- codeop$CommandCompiler()
# record whether the used has requested a quit
quit_requested <- FALSE
# inform others that the reticulate REPL is active
.globals$py_repl_active <- TRUE
on.exit(.globals$py_repl_active <- FALSE, add = TRUE)
# handle errors produced during REPL actions
handle_error <- function(output) {
failed <- inherits(output, "error")
if (failed) {
error_message <- py_last_error()$message
if (identical(.Platform$GUI, "RStudio") &&
requireNamespace("cli", quietly = TRUE))
error_message <- make_filepaths_clickable(error_message)
message(error_message, appendLF = !endsWith(error_message, "\n"))
}
failed
}
handle_interrupt <- function(condition) {
# swallow interrupts -- don't allow interrupted Python code to
# exit the REPL; we should only exit when an interrupt is sent
# when no Python code is executing
}
repl <- function() {
# flush stdout, stderr on each REPL iteration
on.exit(py_flush_output(), add = TRUE)
# read input (either from user or from code)
prompt <- if (buffer$empty()) ">>> " else "... "
if (is.null(input)) {
contents <- readline(prompt = prompt)
} else if (length(input)) {
contents <- input[[1L]]
input <<- tail(input, n = -1L)
writeLines(paste(prompt, contents), con = stdout())
} else {
quit_requested <<- TRUE
return()
}
# NULL implies the user sent EOF -- time to leave
if (is.null(contents)) {
writeLines("exit", con = stdout())
quit_requested <<- TRUE
return()
}
# trim whitespace for handling of special commands
trimmed <- gsub("^\\s*|\\s*$", "", contents)
# run hook provided by front-end (in case special actions
# need to be taken in response to console input)
hook <- getOption("reticulate.repl.hook")
if (is.function(hook)) {
status <- tryCatch(hook(buffer, contents, trimmed), error = identity)
# report errors to the user
if (inherits(status, "error")) {
message(paste("Error:", conditionMessage(status)))
return()
}
# a TRUE return implies the hook handled this input
if (isTRUE(status))
return()
}
# run hook provided by front-end, to notify that we're now busy
hook <- getOption("reticulate.repl.busy")
if (is.function(hook)) {
# run once now to indicate we're about to run
status <- tryCatch(hook(TRUE), error = identity)
if (inherits(status, "error"))
warning(status)
# run again on exit to indicate we're done
on.exit({
status <- tryCatch(hook(FALSE), error = identity)
if (inherits(status, "error"))
warning(status)
}, add = TRUE)
}
# special handling for top-level commands (when buffer is empty)
if (buffer$empty()) {
# handle user requests to quit
if (trimmed %in% c("quit", "exit")) {
quit_requested <<- TRUE
return()
}
# special handling for help requests prefixed with '?'
if (regexpr("?", trimmed, fixed = TRUE) == 1) {
code <- sprintf("help(\"%s\")", substring(trimmed, 2))
py_run_string(code)
return()
}
# similar handling for help requests postfixed with '?'
if (grepl("(^[\\#].*)[?]\\s*$", trimmed, perl = TRUE)) {
replaced <- sub("[?]\\s*$", "", trimmed)
code <- sprintf("help(\"%s\")", replaced)
py_run_string(code)
return()
}
if (getOption("reticulate.repl.use_magics", TRUE)) {
# expand any "!!" as system commands that capture output
trimmed <- gsub("!!", "%system ", trimmed)
# user intends to capture output from system command in var
# e.g.: x = !ls
if (grepl("^[[:alnum:]_.]\\s*=\\s*!", trimmed))
trimmed <- sub("=\\s*!", "= %system ", trimmed)
# magic
if (grepl("^%", trimmed)) {
py$`_` <- .globals$py_last_value <- invoke_magic(trimmed)
return()
}
# system
if (grepl("^!", trimmed)) {
system(str_drop_prefix(trimmed, "!"))
return()
}
# capture output from magic command in var
# # e.g.: x = %env USER
if (grepl("^[[:alnum:]_.]\\s*=\\s*%", trimmed)) {
s <- str_split1_on_first(trimmed, "\\s*=\\s*")
target <- s[[1]]
magic <- str_drop_prefix(s[2L], "%")
py$`_` <- .globals$py_last_value <- invoke_magic(magic)
py_run_string(sprintf("%s = _", target), local = FALSE, convert = FALSE)
return()
}
}
# if the user submitted a blank line at the top level,
# ignore it (note that we intentionally submit whitespace-only
# lines that might terminate a block)
if (!nzchar(trimmed))
return()
}
# update history file
if (use_history)
cat(contents, file = histfile, sep = "\n", append = TRUE)
# trim whitespace if the buffer is empty (this effectively allows leading
# whitespace in top-level Python commands)
if (buffer$empty()) contents <- trimmed
# update buffer
previous <- buffer$data()
buffer$push(contents)
# generate code to be sent to command interpreter
code <- paste(buffer$data(), collapse = "\n")
ready <- tryCatch(compiler(code), condition = identity)
# a NULL return implies that we can accept more input
if (is.null(ready))
return()
# on error, attempt to submit the previous buffer and then handle
# the newest line of code independently. this allows us to handle
# python constructs such as:
#
# def foo():
# return 42
# foo()
#
# try:
# print 1
# except:
# print 2
# print 3
#
# which would otherwise fail
if (length(previous) && inherits(ready, "error")) {
# submit previous code
pasted <- paste(previous, collapse = "\n")
tryCatch(
py_compile_eval(pasted, capture = FALSE),
error = handle_error,
interrupt = handle_interrupt
)
# now, handle the newest line of code submitted
buffer$set(contents)
code <- contents
ready <- tryCatch(compiler(code), condition = identity)
# a NULL return implies that we can accept more input
if (is.null(ready))
return()
}
# otherwise, we should have received a code output object
# so we can just run the code submitted thus far
buffer$clear()
tryCatch(
py_compile_eval(code, capture = FALSE),
error = handle_error,
interrupt = handle_interrupt
)
}
# notify the user we're entering the REPL (when requested)
if (!quiet) {
version <- paste(
sys$version_info$major,
sys$version_info$minor,
sys$version_info$micro,
sep = "."
)
# NOTE: we used to use sys.executable but that would report
# the R process rather than the Python process
config <- py_config()
executable <- config$python
fmt <- c(
"Python %s (%s)",
"Reticulate %s REPL -- A Python interpreter in R.",
"Enter 'exit' or 'quit' to exit the REPL and return to R."
)
msg <- sprintf(
paste(fmt, collapse = "\n"),
version,
executable,
utils::packageVersion("reticulate")
)
message(msg)
}
# enter the REPL loop
repeat {
if (quit_requested)
break
tryCatch(repl(), interrupt = identity)
}
}
# Check Whether the Python REPL is Active
#
# Check to see whether the Python REPL is active. This is primarily
# for use by R front-ends, which might want to toggle or affect
# the state of the Python REPL while it is running.
py_repl_active <- function() {
.globals$py_repl_active
}
invoke_magic <- function(command) {
stopifnot(is.character(command), length(command) == 1)
command <- str_drop_prefix(command, "%")
m <- str_split1_on_first(command, "\\s+")
cmd <- m[1]
args <- m[-1]
if (cmd == "pwd") {
if (length(args))
stop("%pwd magic takes no arguments, received: ", command)
dir <- getwd()
cat(dir, "\n")
return(invisible(dir))
}
# in IPython, this is stored in __main__._dh as a python list
# we avoid polluting `__main__` and also lazily create the history,
# also, this can only track changes made from `repl_python()`.
get_dhist <- function() {
dh <- .globals$magics_state$wd_history
if (is.null(dh)) {
.globals$magics_state <- new.env(parent = emptyenv())
dh <- import("collections")$deque(list(getwd()), 200L)
.globals$magics_state$wd_history <- dh
}
dh
}
if (cmd == "cd") {
hist <- get_dhist()
if (length(args) != 1)
stop("%cd magic takes 1 argument, received: ", command)
dir <- gsub("[\"']", "", args)
# strings auto complete as fs locations in RStudio IDE, so as a convenience
# we accept quoted file paths and unquote them here.
setwd2 <- function(dir) {
old_wd <- setwd(dir)
new_wd <- getwd()
cat(new_wd, "\n", sep = "")
hist$append(new_wd)
invisible(old_wd)
}
if (startsWith(args, "-")) {
if (args == "-") {
dir <- hist[-2L]
} else if (grepl("-[0-9]+$", args)) {
dir <- hist[as.integer(args)]
} else {
# partial matching by regex
hist <- import_builtins()$list(hist)
re <- str_drop_prefix(args, "-")
if (is_windows())
re <- gsub("[/]", "\\", re, fixed = TRUE)
dir <- grep(re, hist, perl = TRUE, value = TRUE)
if (!length(dir))
stop("No matching directory found in history for ", dQuote(re), ".",
"\nSee history with %dhist")
dir <- dir[[length(dir)]] # pick most recent match
}
# not implemented, -b bookmarks, -q quiet
} else
dir <- args
return(setwd2(dir))
}
if (cmd == "dhist") {
hist <- get_dhist()
hist <- import_builtins()$list(hist)
cat("Directory history:\n- ")
cat(hist, sep = "\n- ")
cat("\n")
return(invisible(hist))
}
if (cmd == "conda") {
info <- get_python_conda_info(py_exe())
return(conda_run2(cmd_line = paste("conda", args),
conda = info$conda,
envname = info$root))
}
if (cmd == "pip") {
if (is_conda_python(py_exe())) {
info <- get_python_conda_info(py_exe())
return(conda_run2(cmd_line = paste("pip", args),
conda = info$conda,
envname = info$root))
} else {
args <- shQuote(strsplit(args, "\\s+")[[1]])
system2(py_exe(), c("-m", "pip", args))
}
return()
}
if (cmd == "env") {
if (!length(args))
return(print(Sys.getenv()))
if (grepl("=|\\s", args)) # user setting var
args <- str_split1_on_first(args, "=|\\s+")
else {
print(val <- Sys.getenv(args))
return(val)
}
new_val <- args[[2]]
if (grepl("\\{.*\\}", new_val) && py_version() >= "3.6") {
#interpolate as f-strings
new_val <- py_eval(sprintf('f"%s"', new_val))
}
names(new_val) <- args[[1]]
do.call(Sys.setenv, as.list(new_val))
cat(sprintf("env: %s=%s\n", names(new_val), new_val))
return(invisible(new_val))
# not implemented: bash-style $var expansion
}
if (cmd %in% c("load", "loadpy", "run")) {
# only supports sourcing a python file in __main__
# not implemented:
# -r line ranges, -s specific symbols,
# reexecution of symbols from history,
# reexecution of namespace objects annotated by ipython shell with original source
# ipython extensions
file <- gsub("[\"']", "", args)
if (!file.exists(file))
stop("Python file not found: ", file)
py_run_file(file, local = FALSE, convert = FALSE)
return()
}
if (cmd %in% c("system", "sx")) {
if (is_windows())
return(shell(args, intern = TRUE))
else
return(as.list(system(args, intern = TRUE)))
}
stop("Magic not implemented: ", command)
}
#' IPython console
#'
#' Launch IPython console app.
#'
#' See https://ipython.readthedocs.io/ for features.
#'
#' @keywords internal
ipython <- function() {
ensure_python_initialized("IPython")
# set flag for frontend
.globals$py_repl_active <- TRUE
on.exit({
.globals$py_repl_active <- FALSE
}, add = TRUE)
# don't pollute R history w/ python commands
# (IPython keeps track of it's own history)
use_history <-
!"--vanilla" %in% commandArgs() &&
!"--no-save" %in% commandArgs() &&
!is.null(getwd()) &&
tryCatch(
{ utils::savehistory(tempfile()); TRUE },
error = function(e) FALSE
)
if (use_history) {
# if we have history, save and then restore the current
# R history
utils::savehistory()
on.exit(utils::loadhistory(), add = TRUE)
}
# Not implemented,
# - custom startup banner
# RStudio IDE support for:
# - image display
# - composition of multi-line code blocks
import("rpytools.ipython")$start_ipython()
}
|
/R/repl.R
|
permissive
|
rstudio/reticulate
|
R
| false | false | 17,823 |
r
|
#' Run a Python REPL
#'
#' This function provides a Python REPL in the \R session, which can be used
#' to interactively run Python code. All code executed within the REPL is
#' run within the Python main module, and any generated Python objects will
#' persist in the Python session after the REPL is detached.
#'
#' When working with R and Python scripts interactively, one can activate
#' the Python REPL with `repl_python()`, run Python code, and later run `exit`
#' to return to the \R console.
#'
#' @param module An (optional) Python module to be imported before
#' the REPL is launched.
#'
#' @param quiet Boolean; print a startup banner when launching the REPL? If
#' `TRUE`, the banner will be suppressed.
#'
#' @param input Python code to be run within the REPL. Setting this can be
#' useful if you'd like to drive the Python REPL programmatically.
#'
#' @seealso [py], for accessing objects created using the Python REPL.
#'
#' @section Magics: A handful of magics are supported in `repl_python()`:
#'
#' Lines prefixed with `!` are executed as system commands:
#' - `!cmd --arg1 --arg2`: Execute arbitrary system commands
#'
#' Magics start with a `%` prefix. Supported magics include:
#' - `%conda ...` executes a conda command in the active conda environment
#' - `%pip ...` executes pip for the active python.
#' - `%load`, `%loadpy`, `%run` executes a python file.
#' - `%system`, `!!` executes a system command and capture output
#' - `%env`: read current environment variables.
#' - `%env name`: read environment variable 'name'.
#' - `%env name=val`, `%env name val`: set environment variable 'name' to 'val'.
#' `val` elements in `{}` are interpolated using f-strings (required Python >= 3.6).
#' - `%cd <dir>` change working directory.
#' - `%cd -`: change to previous working directory (as set by `%cd`).
#' - `%cd -3`: change to 3rd most recent working directory (as set by `%cd`).
#' - `%cd -foo/bar`: change to most recent working directory matching `"foo/bar"` regex
#' (in history of directories set via `%cd`).
#' - `%pwd`: print current working directory.
#' - `%dhist`: print working directory history.
#'
#' Additionally, the output of system commands can be captured in a variable, e.g.:
#' - `x = !ls`
#'
#' where `x` will be a list of strings, consisting of
#' stdout output split in `"\n"` (stderr is not captured).
#'
#'
#' @section Example:
#' ````
#'
#' # enter the Python REPL, create a dictionary, and exit
#' repl_python()
#' dictionary = {'alpha': 1, 'beta': 2}
#' exit
#'
#' # access the created dictionary from R
#' py$dictionary
#' # $alpha
#' # [1] 1
#' #
#' # $beta
#' # [1] 2
#'
#' ````
#'
#' @importFrom utils packageVersion
#' @export
repl_python <- function(
module = NULL,
quiet = getOption("reticulate.repl.quiet", default = FALSE),
input = NULL)
{
# load module if requested
if (is.character(module))
import(module)
# run hooks for initialize, teardown
initialize <- getOption("reticulate.repl.initialize")
if (is.function(initialize)) {
initialize()
}
teardown <- getOption("reticulate.repl.teardown")
if (is.function(teardown)) {
on.exit(teardown(), add = TRUE)
}
# split provided code on newlines
if (!is.null(input))
input <- unlist(strsplit(input, "\n", fixed = TRUE))
# import other required modules for the REPL
sys <- import("sys", convert = TRUE)
codeop <- import("codeop", convert = TRUE)
# check to see if the current environment supports history
# (check for case where working directory not writable)
use_history <-
!"--vanilla" %in% commandArgs() &&
!"--no-save" %in% commandArgs() &&
!is.null(getwd()) &&
tryCatch(
{ utils::savehistory(tempfile()); TRUE },
error = function(e) FALSE
)
if (use_history) {
# if we have history, save and then restore the current
# R history
utils::savehistory()
on.exit(utils::loadhistory(), add = TRUE)
# file to be used for command history during session
histfile <- getOption("reticulate.repl.histfile")
if (is.null(histfile))
histfile <- file.path(tempdir(), ".reticulatehistory")
# load history (create empty file if none exists yet)
if (!file.exists(histfile))
file.create(histfile)
utils::loadhistory(histfile)
}
# buffer of pending console input (we don't evaluate code
# until the user has submitted a complete Python statement)
#
# we return an environment of functions bound in a local environment
# so that hook can manipulate the buffer if required
buffer <- stack(mode = "character")
# command compiler (used to check if we've received a complete piece
# of Python input)
compiler <- codeop$CommandCompiler()
# record whether the used has requested a quit
quit_requested <- FALSE
# inform others that the reticulate REPL is active
.globals$py_repl_active <- TRUE
on.exit(.globals$py_repl_active <- FALSE, add = TRUE)
# handle errors produced during REPL actions
handle_error <- function(output) {
failed <- inherits(output, "error")
if (failed) {
error_message <- py_last_error()$message
if (identical(.Platform$GUI, "RStudio") &&
requireNamespace("cli", quietly = TRUE))
error_message <- make_filepaths_clickable(error_message)
message(error_message, appendLF = !endsWith(error_message, "\n"))
}
failed
}
handle_interrupt <- function(condition) {
# swallow interrupts -- don't allow interrupted Python code to
# exit the REPL; we should only exit when an interrupt is sent
# when no Python code is executing
}
repl <- function() {
# flush stdout, stderr on each REPL iteration
on.exit(py_flush_output(), add = TRUE)
# read input (either from user or from code)
prompt <- if (buffer$empty()) ">>> " else "... "
if (is.null(input)) {
contents <- readline(prompt = prompt)
} else if (length(input)) {
contents <- input[[1L]]
input <<- tail(input, n = -1L)
writeLines(paste(prompt, contents), con = stdout())
} else {
quit_requested <<- TRUE
return()
}
# NULL implies the user sent EOF -- time to leave
if (is.null(contents)) {
writeLines("exit", con = stdout())
quit_requested <<- TRUE
return()
}
# trim whitespace for handling of special commands
trimmed <- gsub("^\\s*|\\s*$", "", contents)
# run hook provided by front-end (in case special actions
# need to be taken in response to console input)
hook <- getOption("reticulate.repl.hook")
if (is.function(hook)) {
status <- tryCatch(hook(buffer, contents, trimmed), error = identity)
# report errors to the user
if (inherits(status, "error")) {
message(paste("Error:", conditionMessage(status)))
return()
}
# a TRUE return implies the hook handled this input
if (isTRUE(status))
return()
}
# run hook provided by front-end, to notify that we're now busy
hook <- getOption("reticulate.repl.busy")
if (is.function(hook)) {
# run once now to indicate we're about to run
status <- tryCatch(hook(TRUE), error = identity)
if (inherits(status, "error"))
warning(status)
# run again on exit to indicate we're done
on.exit({
status <- tryCatch(hook(FALSE), error = identity)
if (inherits(status, "error"))
warning(status)
}, add = TRUE)
}
# special handling for top-level commands (when buffer is empty)
if (buffer$empty()) {
# handle user requests to quit
if (trimmed %in% c("quit", "exit")) {
quit_requested <<- TRUE
return()
}
# special handling for help requests prefixed with '?'
if (regexpr("?", trimmed, fixed = TRUE) == 1) {
code <- sprintf("help(\"%s\")", substring(trimmed, 2))
py_run_string(code)
return()
}
# similar handling for help requests postfixed with '?'
if (grepl("(^[\\#].*)[?]\\s*$", trimmed, perl = TRUE)) {
replaced <- sub("[?]\\s*$", "", trimmed)
code <- sprintf("help(\"%s\")", replaced)
py_run_string(code)
return()
}
if (getOption("reticulate.repl.use_magics", TRUE)) {
# expand any "!!" as system commands that capture output
trimmed <- gsub("!!", "%system ", trimmed)
# user intends to capture output from system command in var
# e.g.: x = !ls
if (grepl("^[[:alnum:]_.]\\s*=\\s*!", trimmed))
trimmed <- sub("=\\s*!", "= %system ", trimmed)
# magic
if (grepl("^%", trimmed)) {
py$`_` <- .globals$py_last_value <- invoke_magic(trimmed)
return()
}
# system
if (grepl("^!", trimmed)) {
system(str_drop_prefix(trimmed, "!"))
return()
}
# capture output from magic command in var
# # e.g.: x = %env USER
if (grepl("^[[:alnum:]_.]\\s*=\\s*%", trimmed)) {
s <- str_split1_on_first(trimmed, "\\s*=\\s*")
target <- s[[1]]
magic <- str_drop_prefix(s[2L], "%")
py$`_` <- .globals$py_last_value <- invoke_magic(magic)
py_run_string(sprintf("%s = _", target), local = FALSE, convert = FALSE)
return()
}
}
# if the user submitted a blank line at the top level,
# ignore it (note that we intentionally submit whitespace-only
# lines that might terminate a block)
if (!nzchar(trimmed))
return()
}
# update history file
if (use_history)
cat(contents, file = histfile, sep = "\n", append = TRUE)
# trim whitespace if the buffer is empty (this effectively allows leading
# whitespace in top-level Python commands)
if (buffer$empty()) contents <- trimmed
# update buffer
previous <- buffer$data()
buffer$push(contents)
# generate code to be sent to command interpreter
code <- paste(buffer$data(), collapse = "\n")
ready <- tryCatch(compiler(code), condition = identity)
# a NULL return implies that we can accept more input
if (is.null(ready))
return()
# on error, attempt to submit the previous buffer and then handle
# the newest line of code independently. this allows us to handle
# python constructs such as:
#
# def foo():
# return 42
# foo()
#
# try:
# print 1
# except:
# print 2
# print 3
#
# which would otherwise fail
if (length(previous) && inherits(ready, "error")) {
# submit previous code
pasted <- paste(previous, collapse = "\n")
tryCatch(
py_compile_eval(pasted, capture = FALSE),
error = handle_error,
interrupt = handle_interrupt
)
# now, handle the newest line of code submitted
buffer$set(contents)
code <- contents
ready <- tryCatch(compiler(code), condition = identity)
# a NULL return implies that we can accept more input
if (is.null(ready))
return()
}
# otherwise, we should have received a code output object
# so we can just run the code submitted thus far
buffer$clear()
tryCatch(
py_compile_eval(code, capture = FALSE),
error = handle_error,
interrupt = handle_interrupt
)
}
# notify the user we're entering the REPL (when requested)
if (!quiet) {
version <- paste(
sys$version_info$major,
sys$version_info$minor,
sys$version_info$micro,
sep = "."
)
# NOTE: we used to use sys.executable but that would report
# the R process rather than the Python process
config <- py_config()
executable <- config$python
fmt <- c(
"Python %s (%s)",
"Reticulate %s REPL -- A Python interpreter in R.",
"Enter 'exit' or 'quit' to exit the REPL and return to R."
)
msg <- sprintf(
paste(fmt, collapse = "\n"),
version,
executable,
utils::packageVersion("reticulate")
)
message(msg)
}
# enter the REPL loop
repeat {
if (quit_requested)
break
tryCatch(repl(), interrupt = identity)
}
}
# Check Whether the Python REPL is Active
#
# Check to see whether the Python REPL is active. This is primarily
# for use by R front-ends, which might want to toggle or affect
# the state of the Python REPL while it is running.
py_repl_active <- function() {
.globals$py_repl_active
}
invoke_magic <- function(command) {
stopifnot(is.character(command), length(command) == 1)
command <- str_drop_prefix(command, "%")
m <- str_split1_on_first(command, "\\s+")
cmd <- m[1]
args <- m[-1]
if (cmd == "pwd") {
if (length(args))
stop("%pwd magic takes no arguments, received: ", command)
dir <- getwd()
cat(dir, "\n")
return(invisible(dir))
}
# in IPython, this is stored in __main__._dh as a python list
# we avoid polluting `__main__` and also lazily create the history,
# also, this can only track changes made from `repl_python()`.
get_dhist <- function() {
dh <- .globals$magics_state$wd_history
if (is.null(dh)) {
.globals$magics_state <- new.env(parent = emptyenv())
dh <- import("collections")$deque(list(getwd()), 200L)
.globals$magics_state$wd_history <- dh
}
dh
}
if (cmd == "cd") {
hist <- get_dhist()
if (length(args) != 1)
stop("%cd magic takes 1 argument, received: ", command)
dir <- gsub("[\"']", "", args)
# strings auto complete as fs locations in RStudio IDE, so as a convenience
# we accept quoted file paths and unquote them here.
setwd2 <- function(dir) {
old_wd <- setwd(dir)
new_wd <- getwd()
cat(new_wd, "\n", sep = "")
hist$append(new_wd)
invisible(old_wd)
}
if (startsWith(args, "-")) {
if (args == "-") {
dir <- hist[-2L]
} else if (grepl("-[0-9]+$", args)) {
dir <- hist[as.integer(args)]
} else {
# partial matching by regex
hist <- import_builtins()$list(hist)
re <- str_drop_prefix(args, "-")
if (is_windows())
re <- gsub("[/]", "\\", re, fixed = TRUE)
dir <- grep(re, hist, perl = TRUE, value = TRUE)
if (!length(dir))
stop("No matching directory found in history for ", dQuote(re), ".",
"\nSee history with %dhist")
dir <- dir[[length(dir)]] # pick most recent match
}
# not implemented, -b bookmarks, -q quiet
} else
dir <- args
return(setwd2(dir))
}
if (cmd == "dhist") {
hist <- get_dhist()
hist <- import_builtins()$list(hist)
cat("Directory history:\n- ")
cat(hist, sep = "\n- ")
cat("\n")
return(invisible(hist))
}
if (cmd == "conda") {
info <- get_python_conda_info(py_exe())
return(conda_run2(cmd_line = paste("conda", args),
conda = info$conda,
envname = info$root))
}
if (cmd == "pip") {
if (is_conda_python(py_exe())) {
info <- get_python_conda_info(py_exe())
return(conda_run2(cmd_line = paste("pip", args),
conda = info$conda,
envname = info$root))
} else {
args <- shQuote(strsplit(args, "\\s+")[[1]])
system2(py_exe(), c("-m", "pip", args))
}
return()
}
if (cmd == "env") {
if (!length(args))
return(print(Sys.getenv()))
if (grepl("=|\\s", args)) # user setting var
args <- str_split1_on_first(args, "=|\\s+")
else {
print(val <- Sys.getenv(args))
return(val)
}
new_val <- args[[2]]
if (grepl("\\{.*\\}", new_val) && py_version() >= "3.6") {
#interpolate as f-strings
new_val <- py_eval(sprintf('f"%s"', new_val))
}
names(new_val) <- args[[1]]
do.call(Sys.setenv, as.list(new_val))
cat(sprintf("env: %s=%s\n", names(new_val), new_val))
return(invisible(new_val))
# not implemented: bash-style $var expansion
}
if (cmd %in% c("load", "loadpy", "run")) {
# only supports sourcing a python file in __main__
# not implemented:
# -r line ranges, -s specific symbols,
# reexecution of symbols from history,
# reexecution of namespace objects annotated by ipython shell with original source
# ipython extensions
file <- gsub("[\"']", "", args)
if (!file.exists(file))
stop("Python file not found: ", file)
py_run_file(file, local = FALSE, convert = FALSE)
return()
}
if (cmd %in% c("system", "sx")) {
if (is_windows())
return(shell(args, intern = TRUE))
else
return(as.list(system(args, intern = TRUE)))
}
stop("Magic not implemented: ", command)
}
#' IPython console
#'
#' Launch IPython console app.
#'
#' See https://ipython.readthedocs.io/ for features.
#'
#' @keywords internal
ipython <- function() {
ensure_python_initialized("IPython")
# set flag for frontend
.globals$py_repl_active <- TRUE
on.exit({
.globals$py_repl_active <- FALSE
}, add = TRUE)
# don't pollute R history w/ python commands
# (IPython keeps track of it's own history)
use_history <-
!"--vanilla" %in% commandArgs() &&
!"--no-save" %in% commandArgs() &&
!is.null(getwd()) &&
tryCatch(
{ utils::savehistory(tempfile()); TRUE },
error = function(e) FALSE
)
if (use_history) {
# if we have history, save and then restore the current
# R history
utils::savehistory()
on.exit(utils::loadhistory(), add = TRUE)
}
# Not implemented,
# - custom startup banner
# RStudio IDE support for:
# - image display
# - composition of multi-line code blocks
import("rpytools.ipython")$start_ipython()
}
|
meta <- read.table("../data/meta.dat", header=T)
attach(meta)
boxplot(factor~scale)
m=lm(( factor )~topic+country+scale+position, data=meta[-c(28,64,37,2,8,11),])
p=MCMCregress(factor~topic+country+scale+position, data=meta[-c(28,64,37,2,8,11),], B0=10)
oefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 0.44098 0.21457 2.055 0.04454 *
topicefficacy 0.14195 0.12485 1.137 0.26040
topicjob 0.19598 0.18607 1.053 0.29674
topicwomen 0.44791 0.17093 2.620 0.01128 *
countryCH 0.30985 0.14954 2.072 0.04288 *
countryCZ 0.28359 0.15377 1.844 0.07043 .
countryDK 0.32762 0.17281 1.896 0.06315 .
countryEE 0.53318 0.19331 2.758 0.00784 **
countryGR 0.31759 0.15377 2.065 0.04352 *
countrySI 0.08676 0.09965 0.871 0.38763
scaledirect 0.11833 0.09187 1.288 0.20303
scaletruefalse 0.41716 0.14268 2.924 0.00498 **
positionnegative -0.48415 0.07244 -6.683 1.15e-08 ***
positionrarely -0.07305 0.14240 -0.513 0.60998
positionusually -0.09638 0.14240 -0.677 0.50129
Mean SD Naive SE Time-series SE
(Intercept) 0.44386 0.21968 0.0021968 0.0022186
topicefficacy 0.14068 0.12861 0.0012861 0.0012180
topicjob 0.19162 0.19000 0.0019000 0.0018765
topicwomen 0.44437 0.17531 0.0017531 0.0014626
countryCH 0.30983 0.15206 0.0015206 0.0017332
countryCZ 0.28275 0.15528 0.0015528 0.0015975
countryDK 0.32490 0.17606 0.0017606 0.0020739
countryEE 0.53021 0.19620 0.0019620 0.0020899
countryGR 0.31806 0.15467 0.0015467 0.0015706
countrySI 0.08696 0.10131 0.0010131 0.0011451
scaledirect 0.11865 0.09463 0.0009463 0.0010534
scaletruefalse 0.41894 0.14622 0.0014622 0.0014927
positionnegative -0.48418 0.07301 0.0007301 0.0008210
positionrarely -0.07176 0.14547 0.0014547 0.0013554
positionusually -0.09638 0.14611 0.0014611 0.0013895
sigma2 0.03496 0.00684 0.0000684 0.0000836
dm <- model.matrix(factor ~ topic + country + scale + position, data=meta)
meta$negative <- dm[,13]
m=lm(( factor )~ topic + country + scale + negative, data=meta[-c(28,64,37,2,8,11),])
me$id <- as.numeric(paste(as.numeric(me$country),me$trait,me$method,sep=""))
me.wide <- reshape(me, v.names="methodeffect", idvar="id", timevar="analysis", direction="wide")
opar=par
par(mar=c(3,2,0,1))
meta <- read.csv("../output/meta3.csv", header=T,sep="\t")
boxplot(d[,1]~high, horizontal=T, boxwex=.5, names=c("Low","High"), frame.plot=F)
par(opar)
|
/input/meta.R
|
no_license
|
daob/ess-research
|
R
| false | false | 2,739 |
r
|
meta <- read.table("../data/meta.dat", header=T)
attach(meta)
boxplot(factor~scale)
m=lm(( factor )~topic+country+scale+position, data=meta[-c(28,64,37,2,8,11),])
p=MCMCregress(factor~topic+country+scale+position, data=meta[-c(28,64,37,2,8,11),], B0=10)
oefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 0.44098 0.21457 2.055 0.04454 *
topicefficacy 0.14195 0.12485 1.137 0.26040
topicjob 0.19598 0.18607 1.053 0.29674
topicwomen 0.44791 0.17093 2.620 0.01128 *
countryCH 0.30985 0.14954 2.072 0.04288 *
countryCZ 0.28359 0.15377 1.844 0.07043 .
countryDK 0.32762 0.17281 1.896 0.06315 .
countryEE 0.53318 0.19331 2.758 0.00784 **
countryGR 0.31759 0.15377 2.065 0.04352 *
countrySI 0.08676 0.09965 0.871 0.38763
scaledirect 0.11833 0.09187 1.288 0.20303
scaletruefalse 0.41716 0.14268 2.924 0.00498 **
positionnegative -0.48415 0.07244 -6.683 1.15e-08 ***
positionrarely -0.07305 0.14240 -0.513 0.60998
positionusually -0.09638 0.14240 -0.677 0.50129
Mean SD Naive SE Time-series SE
(Intercept) 0.44386 0.21968 0.0021968 0.0022186
topicefficacy 0.14068 0.12861 0.0012861 0.0012180
topicjob 0.19162 0.19000 0.0019000 0.0018765
topicwomen 0.44437 0.17531 0.0017531 0.0014626
countryCH 0.30983 0.15206 0.0015206 0.0017332
countryCZ 0.28275 0.15528 0.0015528 0.0015975
countryDK 0.32490 0.17606 0.0017606 0.0020739
countryEE 0.53021 0.19620 0.0019620 0.0020899
countryGR 0.31806 0.15467 0.0015467 0.0015706
countrySI 0.08696 0.10131 0.0010131 0.0011451
scaledirect 0.11865 0.09463 0.0009463 0.0010534
scaletruefalse 0.41894 0.14622 0.0014622 0.0014927
positionnegative -0.48418 0.07301 0.0007301 0.0008210
positionrarely -0.07176 0.14547 0.0014547 0.0013554
positionusually -0.09638 0.14611 0.0014611 0.0013895
sigma2 0.03496 0.00684 0.0000684 0.0000836
dm <- model.matrix(factor ~ topic + country + scale + position, data=meta)
meta$negative <- dm[,13]
m=lm(( factor )~ topic + country + scale + negative, data=meta[-c(28,64,37,2,8,11),])
me$id <- as.numeric(paste(as.numeric(me$country),me$trait,me$method,sep=""))
me.wide <- reshape(me, v.names="methodeffect", idvar="id", timevar="analysis", direction="wide")
opar=par
par(mar=c(3,2,0,1))
meta <- read.csv("../output/meta3.csv", header=T,sep="\t")
boxplot(d[,1]~high, horizontal=T, boxwex=.5, names=c("Low","High"), frame.plot=F)
par(opar)
|
# create gnuplot handle
h1 <- Gpinit()
# set gnuplot's additional search directories, to the extdata directory from Rgnuplot (default)
Gpsetloadpath(h1)
# change gnuplot's working directory to be the same as R's working directory (default)
Gpsetwd(h1)
# load the gnuplot script Gpcmd(h1, 'set terminal png;set output 'errorbar3.png';load 'fitlaser.gnu'')
Gpcmd(h1, "load \"fitlaser.gnu\"")
# pause R and gnuplot
Gppause()
# close gnuplot handle
h1 <- Gpclose(h1)
|
/demo/fitlaser.R
|
no_license
|
cran/Rgnuplot
|
R
| false | false | 465 |
r
|
# create gnuplot handle
h1 <- Gpinit()
# set gnuplot's additional search directories, to the extdata directory from Rgnuplot (default)
Gpsetloadpath(h1)
# change gnuplot's working directory to be the same as R's working directory (default)
Gpsetwd(h1)
# load the gnuplot script Gpcmd(h1, 'set terminal png;set output 'errorbar3.png';load 'fitlaser.gnu'')
Gpcmd(h1, "load \"fitlaser.gnu\"")
# pause R and gnuplot
Gppause()
# close gnuplot handle
h1 <- Gpclose(h1)
|
#' Eigenvector from the eigenvalues
#'
#' @param x Numeric vector of the eigenvalues
#'
#' @return The eigenvector for each eigenvalue
#' @export
#'
#' @examples
#' C <- matrix(c(1,2,3,2,5,6,3,6,10), 3, 3)
#' D <- matrix(c(1, 1, -1, 1, 3, 1, -1, 1, 3), 3, 3)
#' eigenvector(c(14.9330343736593, 1, 0.0669656263407531), C)
eigenvector <- function(x, A) {
if (!(is.numeric(x) & is.vector(x))) {
stop("It should be a numeric vector.")
}
n <- length(x)
if (nrow(A) != ncol(A) & !is.matrix(A)) {
stop("C should be a square matrix.")
}
if (nrow(A) != n) {
stop("The eigenvalues must be of the same length as the diagonal of the matrix")
}
# On the right more or less is this
l <- vector("list", length = n)
for (i in seq_len(n-1)) {
M <- A[-i, -i]
e <- eigen(M, symmetric = TRUE, only.values = TRUE)
l[[i]] <- x[i] - e$values
}
# On the left side more or less
l <- vector("list", length = n)
for (i in seq_len(n)) {
for (j in seq_len(n)) {
if (i == j){
next
}
l[[i]] <- prod(x[i]-x[j])
}
}
}
|
/R/eigenvector.R
|
permissive
|
llrs/eigen
|
R
| false | false | 1,166 |
r
|
#' Eigenvector from the eigenvalues
#'
#' @param x Numeric vector of the eigenvalues
#'
#' @return The eigenvector for each eigenvalue
#' @export
#'
#' @examples
#' C <- matrix(c(1,2,3,2,5,6,3,6,10), 3, 3)
#' D <- matrix(c(1, 1, -1, 1, 3, 1, -1, 1, 3), 3, 3)
#' eigenvector(c(14.9330343736593, 1, 0.0669656263407531), C)
eigenvector <- function(x, A) {
if (!(is.numeric(x) & is.vector(x))) {
stop("It should be a numeric vector.")
}
n <- length(x)
if (nrow(A) != ncol(A) & !is.matrix(A)) {
stop("C should be a square matrix.")
}
if (nrow(A) != n) {
stop("The eigenvalues must be of the same length as the diagonal of the matrix")
}
# On the right more or less is this
l <- vector("list", length = n)
for (i in seq_len(n-1)) {
M <- A[-i, -i]
e <- eigen(M, symmetric = TRUE, only.values = TRUE)
l[[i]] <- x[i] - e$values
}
# On the left side more or less
l <- vector("list", length = n)
for (i in seq_len(n)) {
for (j in seq_len(n)) {
if (i == j){
next
}
l[[i]] <- prod(x[i]-x[j])
}
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/writeCSVComment.R
\name{writeCSVComment}
\alias{writeCSVComment}
\title{Write a csv file with comment}
\usage{
writeCSVComment(data, file, metadata = "", comment = "#")
}
\arguments{
\item{data}{A data.frame}
\item{file}{A path for destination file}
\item{metadata}{A character string representing R codes as a preprocessing}
\item{comment}{A string used to identify comments}
}
\description{
Write a csv file with comment
}
|
/man/writeCSVComment.Rd
|
no_license
|
cardiomoon/rrtable
|
R
| false | true | 506 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/writeCSVComment.R
\name{writeCSVComment}
\alias{writeCSVComment}
\title{Write a csv file with comment}
\usage{
writeCSVComment(data, file, metadata = "", comment = "#")
}
\arguments{
\item{data}{A data.frame}
\item{file}{A path for destination file}
\item{metadata}{A character string representing R codes as a preprocessing}
\item{comment}{A string used to identify comments}
}
\description{
Write a csv file with comment
}
|
# library(readxl)
library(here)
library(httr)
library(lubridate)
library(purrr)
library(broom)
library(tidyr)
library(zoo)
library(ggplot2); library(ggrepel); library(gganimate)
library(gifski)
library(dplyr); library(tibble)
add_numbers <- function(df){
df.split <- split(df, df$geoId)
df.arr_by_date <- lapply(df.split, arrange, dateRep)
df.split.nos <- lapply(df.arr_by_date, function(x) tibble::add_column(x, nos = 1:nrow(x)))
dplyr::bind_rows(df.split.nos)
}
# get data ----------------------------------------------------------------
url <- paste("https://www.ecdc.europa.eu/sites/default/files/documents/COVID-19-geographic-disbtribution-worldwide-",format(Sys.time(), "%Y-%m-%d"), ".xlsx", sep = "")
# url <- "https://www.ecdc.europa.eu/sites/default/files/documents/COVID-19-geographic-disbtribution-worldwide-2020-04-12.xlsx"
httr::GET(url, authenticate(":", ":", type="ntlm"), write_disk(tf <- tempfile(fileext = ".xlsx")))
df <- readxl::read_xlsx(tf)
# graf 1 ------------------------------------------------------------------
# broj smrti
zemlje <- c("HR", "SI", "AT", "SE")
g1 <- df %>%
group_by(geoId) %>%
arrange(dateRep, .by_group = TRUE) %>%
mutate(
cum_deaths = cumsum(deaths)
) %>%
filter(geoId %in% zemlje, cum_deaths > 0) %>%
ggplot(aes(x = dateRep, y = cum_deaths)) +
geom_point(alpha = .3) +
scale_y_continuous(trans = "log2") +
geom_smooth(method = "lm") +
facet_grid(cols = vars(geoId), scales = "free_y") +
labs(title = "smrti",
subtitle = format(Sys.time(), "%Y-%m-%d"), x = "", y = "")
# ggsave("figs_out/deaths.svg", plot = g1, device = "cairo_pdf")
Cairo::Cairo(width = 8, height = 6, file="figs_out/deaths.svg", type="svg", units = "in")
g1
dev.off()
# graf 2 ------------------------------------------------------------------
# kretanje novopotvrđenih - ukupno vs zadnja 2 tjedna
df_wks <- df %>%
group_by(geoId) %>%
arrange(dateRep, .by_group = TRUE) %>%
mutate(cum_cases = cumsum(cases),
week = as.numeric(strftime(dateRep, format = "%V"))) %>%
filter(week %in% c(max(week) - 1, max(week))) %>%
add_numbers() %>%
filter(geoId %in% zemlje, cum_cases > 0) %>%
ungroup()
# model
dbl_df <- df %>%
group_by(geoId) %>%
arrange(dateRep, .by_group = TRUE) %>%
mutate(cum_cases = cumsum(cases),
weeks = strftime(dateRep, format = "%V")) %>%
filter(geoId %in% zemlje, cum_cases > 0) %>%
add_numbers() %>%
split(.$geoId) %>%
map(~ lm(log2(cum_cases) ~ nos, data = .x)) %>%
map_dfr(tidy, .id = "geoId") %>%
filter(term == "nos") %>%
mutate(doubling = 1/estimate)
dbl_wks <- split(df_wks, list(df_wks$geoId, df_wks$week)) %>%
map(~ lm(log2(cum_cases) ~ nos, data = .x)) %>%
map_dfr(tidy, .id = "cntr.week") %>%
separate(col = cntr.week, into = c("geoId", "week")) %>%
filter(term == "nos") %>%
mutate(doubling = 1/estimate)
g2 <- df %>%
group_by(geoId) %>%
arrange(dateRep, .by_group = TRUE) %>%
mutate(cum_cases = cumsum(cases),
weeks = strftime(dateRep, format = "%V")) %>%
filter(geoId %in% zemlje, cum_cases > 0) %>%
ggplot(aes(x = dateRep, y = cum_cases)) +
geom_point(alpha = .3) +
scale_y_continuous(trans = "log2") +
geom_smooth(method = "lm") +
geom_smooth(data = df_wks, aes(colour = as.character(week)), method = "lm") +
geom_text_repel(data = dbl_df, aes(label=round(doubling,1)),
x = -Inf, y = Inf, # hjust=-.5, vjust=2,
inherit.aes = FALSE, colour = "blue") +
geom_text_repel(data = dbl_wks, aes(label=round(doubling,1), colour = as.character(week)),
x = Inf, y = Inf, # hjust=-.5, vjust=3,
inherit.aes = FALSE) +
facet_grid(cols = vars(geoId)) +
labs(title = "kumulativni slučajevi",
subtitle = format(Sys.time(), "%Y-%m-%d"), x = "", y = "") +
guides(colour=FALSE)
# ggsave("figs_out/dynamics.svg", plot = g2)
Cairo::Cairo(width = 8, height = 6, file="figs_out/dynamics.svg", type="svg", units = "in")
g2
dev.off()
# graf 3 ------------------------------------------------------------------
# Broj slučajeva - Prosječno kretanje od prvog zabilježenog slučaja, iz ishodišta
df.cum_cases_days <- df %>%
arrange(geoId, dateRep) %>%
group_by(geoId) %>%
mutate(cum_cases = cumsum(cases)) %>%
filter(geoId %in% zemlje, cum_cases > 0) %>%
ungroup() %>%
split(.$geoId) %>%
lapply(function(x) add_column(x, days = 1:nrow(x)))
model_n_extract <- function(df, remove_intercept) {
m <- lm(log2(cum_cases) ~ days, data = df)
if (remove_intercept) m <- lm(log2(cum_cases) ~ days - 1, data = df)
m %>%
tidy %>%
filter(term == "days") %>%
pull(estimate) %>%
magrittr::raise_to_power(-1) %>%
round(2)
}
# model_n_extract(df.cum_cases_days$AT, remove_intercept = TRUE)
doubling_average <- df.cum_cases_days %>%
sapply(model_n_extract, remove_intercept = TRUE) %>%
enframe(name = "geoId")
g3 <- df.cum_cases_days %>%
bind_rows() %>%
ggplot(aes(x = days, y = cum_cases)) +
geom_abline(intercept = 0, slope = 1/5, size = .2, colour = "grey") + #, linetype = 2) +
geom_abline(intercept = 0, slope = 1/4, size = .2, colour = "grey") + #, linetype = 2) +
geom_abline(intercept = 0, slope = 1/3, size = .2, colour = "grey") + #, linetype = 2) +
geom_abline(intercept = 0, slope = 1/2, size = .2, colour = "grey") + #, linetype = 2) +
geom_abline(intercept = 0, slope = 1/1, size = .2, colour = "grey") + #, linetype = 2) +
geom_point() +
geom_smooth(method = "lm", se = FALSE, formula=y~x-1) +
scale_y_continuous(trans = "log2") +
geom_text(data = doubling_average, aes(label=value),
x = -Inf, y = Inf, hjust=-.2, vjust=1.8,
inherit.aes = FALSE, colour = "blue") +
facet_grid(cols = vars(geoId)) +
labs(title = "Duplanje",
subtitle = "- prosječni broj dana za duplanje slučajeva od prvog slučaja")
# ggsave("figs_out/doubling.svg", plot = g3)
Cairo::Cairo(width = 8, height = 6, file="figs_out/doubling.svg", type="svg", units = "in")
g3
dev.off()
# graf 4 ------------------------------------------------------------------
# Broj slučajeva vs kretanje (Vlejd spika)
# in this example, precipitation
ylim.sec <- c(-4, 18) # in this example, temperature
df_lag <- df %>%
arrange(geoId, dateRep) %>%
group_by(geoId) %>%
mutate(
cum_cases = cumsum(cases),
lag_cases = cases/lag(cases, 1),
lag_cases = case_when(is.infinite(lag_cases) ~ NA_real_,
TRUE ~ lag_cases),
lag_cases_smooth3 = zoo::rollapply(lag_cases, 3, mean, align = 'right', fill = NA),
) %>%
filter(geoId %in% zemlje, cum_cases > 0) %>%
filter(geoId != "AT") %>%
select(dateRep, geoId, cases, cum_cases, lag_cases, lag_cases_smooth3)
ylim.prim <- c(0, max(df_lag$cum_cases))
ylim.sec <- c(0, max(df_lag$lag_cases_smooth3, na.rm = TRUE))
b <- diff(ylim.prim)/diff(ylim.sec)
a <- b*(ylim.prim[1] - ylim.sec[1])
g4 <- ggplot(df_lag, aes(dateRep, cum_cases)) +
geom_col(colour = "darkgrey", fill = NA) +
geom_hline(aes(yintercept = a + b), linetype = 2, colour = "grey") +
geom_line(aes(y = a + lag_cases_smooth3*b), color = "red") +
scale_y_continuous("cum_cases", sec.axis = sec_axis(~ (. - a)/b, name = "porast")) +
facet_grid(cols = vars(geoId))
# scale_x_continuous("Month", breaks = 1:12)
# ggsave("figs_out/rate.svg", plot = g4)
Cairo::Cairo(width = 8, height = 6, file="figs_out/rate.svg", type="svg", units = "in")
g4
dev.off()
# graf 5 ------------------------------------------------------------------
# Broj kumulativnih slučajeva vs broj novih slučajeva - animacija po uzoru na https://youtu.be/54XLXg4fYsc
vizz_anim <- df %>%
add_numbers() %>%
group_by(geoId) %>%
arrange(dateRep, .by_group = TRUE) %>%
mutate(cum_cases = cumsum(cases)) %>%
filter(cum_cases > 0) %>%
mutate(
ma3 = rollapply(cases, 3, mean, align = 'right', fill = NA),
ma5 = rollapply(cases, 5, mean, align = 'right', fill = NA),
ma7 = rollapply(cases, 7, mean, align = 'right', fill = NA)
) %>%
ungroup() %>%
filter(geoId %in% zemlje) %>%
filter(dateRep > "2020-02-26") %>%
ggplot(aes(x = cum_cases, y = ma5, colour = geoId)) +
geom_line(aes(y = ma5, colour = geoId)) +
# geom_line(aes(y = ma7, colour = geoId), size = .1, linetype = 2) +
scale_x_continuous(limits = c(10, NA), trans = "log10") +
scale_y_continuous(limits = c(10, NA), trans = "log10") +
transition_reveal(along = dateRep)
anim_save("figs_out/beating.gif", vizz_anim, fps = 20, duration = 30, end_pause = 200)
|
/R/github_vizz.R
|
no_license
|
ipuzek/covid_SE_europe
|
R
| false | false | 8,806 |
r
|
# library(readxl)
library(here)
library(httr)
library(lubridate)
library(purrr)
library(broom)
library(tidyr)
library(zoo)
library(ggplot2); library(ggrepel); library(gganimate)
library(gifski)
library(dplyr); library(tibble)
add_numbers <- function(df){
df.split <- split(df, df$geoId)
df.arr_by_date <- lapply(df.split, arrange, dateRep)
df.split.nos <- lapply(df.arr_by_date, function(x) tibble::add_column(x, nos = 1:nrow(x)))
dplyr::bind_rows(df.split.nos)
}
# get data ----------------------------------------------------------------
url <- paste("https://www.ecdc.europa.eu/sites/default/files/documents/COVID-19-geographic-disbtribution-worldwide-",format(Sys.time(), "%Y-%m-%d"), ".xlsx", sep = "")
# url <- "https://www.ecdc.europa.eu/sites/default/files/documents/COVID-19-geographic-disbtribution-worldwide-2020-04-12.xlsx"
httr::GET(url, authenticate(":", ":", type="ntlm"), write_disk(tf <- tempfile(fileext = ".xlsx")))
df <- readxl::read_xlsx(tf)
# graf 1 ------------------------------------------------------------------
# broj smrti
zemlje <- c("HR", "SI", "AT", "SE")
g1 <- df %>%
group_by(geoId) %>%
arrange(dateRep, .by_group = TRUE) %>%
mutate(
cum_deaths = cumsum(deaths)
) %>%
filter(geoId %in% zemlje, cum_deaths > 0) %>%
ggplot(aes(x = dateRep, y = cum_deaths)) +
geom_point(alpha = .3) +
scale_y_continuous(trans = "log2") +
geom_smooth(method = "lm") +
facet_grid(cols = vars(geoId), scales = "free_y") +
labs(title = "smrti",
subtitle = format(Sys.time(), "%Y-%m-%d"), x = "", y = "")
# ggsave("figs_out/deaths.svg", plot = g1, device = "cairo_pdf")
Cairo::Cairo(width = 8, height = 6, file="figs_out/deaths.svg", type="svg", units = "in")
g1
dev.off()
# graf 2 ------------------------------------------------------------------
# kretanje novopotvrđenih - ukupno vs zadnja 2 tjedna
df_wks <- df %>%
group_by(geoId) %>%
arrange(dateRep, .by_group = TRUE) %>%
mutate(cum_cases = cumsum(cases),
week = as.numeric(strftime(dateRep, format = "%V"))) %>%
filter(week %in% c(max(week) - 1, max(week))) %>%
add_numbers() %>%
filter(geoId %in% zemlje, cum_cases > 0) %>%
ungroup()
# model
dbl_df <- df %>%
group_by(geoId) %>%
arrange(dateRep, .by_group = TRUE) %>%
mutate(cum_cases = cumsum(cases),
weeks = strftime(dateRep, format = "%V")) %>%
filter(geoId %in% zemlje, cum_cases > 0) %>%
add_numbers() %>%
split(.$geoId) %>%
map(~ lm(log2(cum_cases) ~ nos, data = .x)) %>%
map_dfr(tidy, .id = "geoId") %>%
filter(term == "nos") %>%
mutate(doubling = 1/estimate)
dbl_wks <- split(df_wks, list(df_wks$geoId, df_wks$week)) %>%
map(~ lm(log2(cum_cases) ~ nos, data = .x)) %>%
map_dfr(tidy, .id = "cntr.week") %>%
separate(col = cntr.week, into = c("geoId", "week")) %>%
filter(term == "nos") %>%
mutate(doubling = 1/estimate)
g2 <- df %>%
group_by(geoId) %>%
arrange(dateRep, .by_group = TRUE) %>%
mutate(cum_cases = cumsum(cases),
weeks = strftime(dateRep, format = "%V")) %>%
filter(geoId %in% zemlje, cum_cases > 0) %>%
ggplot(aes(x = dateRep, y = cum_cases)) +
geom_point(alpha = .3) +
scale_y_continuous(trans = "log2") +
geom_smooth(method = "lm") +
geom_smooth(data = df_wks, aes(colour = as.character(week)), method = "lm") +
geom_text_repel(data = dbl_df, aes(label=round(doubling,1)),
x = -Inf, y = Inf, # hjust=-.5, vjust=2,
inherit.aes = FALSE, colour = "blue") +
geom_text_repel(data = dbl_wks, aes(label=round(doubling,1), colour = as.character(week)),
x = Inf, y = Inf, # hjust=-.5, vjust=3,
inherit.aes = FALSE) +
facet_grid(cols = vars(geoId)) +
labs(title = "kumulativni slučajevi",
subtitle = format(Sys.time(), "%Y-%m-%d"), x = "", y = "") +
guides(colour=FALSE)
# ggsave("figs_out/dynamics.svg", plot = g2)
Cairo::Cairo(width = 8, height = 6, file="figs_out/dynamics.svg", type="svg", units = "in")
g2
dev.off()
# graf 3 ------------------------------------------------------------------
# Broj slučajeva - Prosječno kretanje od prvog zabilježenog slučaja, iz ishodišta
df.cum_cases_days <- df %>%
arrange(geoId, dateRep) %>%
group_by(geoId) %>%
mutate(cum_cases = cumsum(cases)) %>%
filter(geoId %in% zemlje, cum_cases > 0) %>%
ungroup() %>%
split(.$geoId) %>%
lapply(function(x) add_column(x, days = 1:nrow(x)))
model_n_extract <- function(df, remove_intercept) {
m <- lm(log2(cum_cases) ~ days, data = df)
if (remove_intercept) m <- lm(log2(cum_cases) ~ days - 1, data = df)
m %>%
tidy %>%
filter(term == "days") %>%
pull(estimate) %>%
magrittr::raise_to_power(-1) %>%
round(2)
}
# model_n_extract(df.cum_cases_days$AT, remove_intercept = TRUE)
doubling_average <- df.cum_cases_days %>%
sapply(model_n_extract, remove_intercept = TRUE) %>%
enframe(name = "geoId")
g3 <- df.cum_cases_days %>%
bind_rows() %>%
ggplot(aes(x = days, y = cum_cases)) +
geom_abline(intercept = 0, slope = 1/5, size = .2, colour = "grey") + #, linetype = 2) +
geom_abline(intercept = 0, slope = 1/4, size = .2, colour = "grey") + #, linetype = 2) +
geom_abline(intercept = 0, slope = 1/3, size = .2, colour = "grey") + #, linetype = 2) +
geom_abline(intercept = 0, slope = 1/2, size = .2, colour = "grey") + #, linetype = 2) +
geom_abline(intercept = 0, slope = 1/1, size = .2, colour = "grey") + #, linetype = 2) +
geom_point() +
geom_smooth(method = "lm", se = FALSE, formula=y~x-1) +
scale_y_continuous(trans = "log2") +
geom_text(data = doubling_average, aes(label=value),
x = -Inf, y = Inf, hjust=-.2, vjust=1.8,
inherit.aes = FALSE, colour = "blue") +
facet_grid(cols = vars(geoId)) +
labs(title = "Duplanje",
subtitle = "- prosječni broj dana za duplanje slučajeva od prvog slučaja")
# ggsave("figs_out/doubling.svg", plot = g3)
Cairo::Cairo(width = 8, height = 6, file="figs_out/doubling.svg", type="svg", units = "in")
g3
dev.off()
# graf 4 ------------------------------------------------------------------
# Broj slučajeva vs kretanje (Vlejd spika)
# in this example, precipitation
ylim.sec <- c(-4, 18) # in this example, temperature
df_lag <- df %>%
arrange(geoId, dateRep) %>%
group_by(geoId) %>%
mutate(
cum_cases = cumsum(cases),
lag_cases = cases/lag(cases, 1),
lag_cases = case_when(is.infinite(lag_cases) ~ NA_real_,
TRUE ~ lag_cases),
lag_cases_smooth3 = zoo::rollapply(lag_cases, 3, mean, align = 'right', fill = NA),
) %>%
filter(geoId %in% zemlje, cum_cases > 0) %>%
filter(geoId != "AT") %>%
select(dateRep, geoId, cases, cum_cases, lag_cases, lag_cases_smooth3)
ylim.prim <- c(0, max(df_lag$cum_cases))
ylim.sec <- c(0, max(df_lag$lag_cases_smooth3, na.rm = TRUE))
b <- diff(ylim.prim)/diff(ylim.sec)
a <- b*(ylim.prim[1] - ylim.sec[1])
g4 <- ggplot(df_lag, aes(dateRep, cum_cases)) +
geom_col(colour = "darkgrey", fill = NA) +
geom_hline(aes(yintercept = a + b), linetype = 2, colour = "grey") +
geom_line(aes(y = a + lag_cases_smooth3*b), color = "red") +
scale_y_continuous("cum_cases", sec.axis = sec_axis(~ (. - a)/b, name = "porast")) +
facet_grid(cols = vars(geoId))
# scale_x_continuous("Month", breaks = 1:12)
# ggsave("figs_out/rate.svg", plot = g4)
Cairo::Cairo(width = 8, height = 6, file="figs_out/rate.svg", type="svg", units = "in")
g4
dev.off()
# graf 5 ------------------------------------------------------------------
# Broj kumulativnih slučajeva vs broj novih slučajeva - animacija po uzoru na https://youtu.be/54XLXg4fYsc
vizz_anim <- df %>%
add_numbers() %>%
group_by(geoId) %>%
arrange(dateRep, .by_group = TRUE) %>%
mutate(cum_cases = cumsum(cases)) %>%
filter(cum_cases > 0) %>%
mutate(
ma3 = rollapply(cases, 3, mean, align = 'right', fill = NA),
ma5 = rollapply(cases, 5, mean, align = 'right', fill = NA),
ma7 = rollapply(cases, 7, mean, align = 'right', fill = NA)
) %>%
ungroup() %>%
filter(geoId %in% zemlje) %>%
filter(dateRep > "2020-02-26") %>%
ggplot(aes(x = cum_cases, y = ma5, colour = geoId)) +
geom_line(aes(y = ma5, colour = geoId)) +
# geom_line(aes(y = ma7, colour = geoId), size = .1, linetype = 2) +
scale_x_continuous(limits = c(10, NA), trans = "log10") +
scale_y_continuous(limits = c(10, NA), trans = "log10") +
transition_reveal(along = dateRep)
anim_save("figs_out/beating.gif", vizz_anim, fps = 20, duration = 30, end_pause = 200)
|
juarez <- "Go!"
|
/3.1 Personalizada.r
|
no_license
|
TopicosSelectos/tutoriales-2019-2-al150422
|
R
| false | false | 15 |
r
|
juarez <- "Go!"
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/yZ_inspect.R
\name{yZ_inspect}
\alias{yZ_inspect}
\title{Plot y and Z together}
\usage{
yZ_inspect(out, y, zlim, i, thresh = 0.7, col = list(blueToRed(7),
greys(10))[[1]], prop_lower_panel = 0.3, is.postpred = FALSE,
decimals_W = 1, na.color = "transparent", fy = function(lami) {
abline(h = cumsum(table(lami)) + 0.5, lwd = 3, col = "yellow", lty = 1)
}, fZ = function(Z) abline(v = 1:NCOL(Z) + 0.5, h = 1:NROW(Z) + 0.5,
col = "grey"), ...)
}
\arguments{
\item{fy}{function to execute after making y image}
\item{fZ}{function to execute after making Z image}
}
\description{
Plot y and Z together
}
|
/cytof/src/model3/cytof3/man/yZ_inspect.Rd
|
no_license
|
luiarthur/ucsc_litreview
|
R
| false | true | 693 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/yZ_inspect.R
\name{yZ_inspect}
\alias{yZ_inspect}
\title{Plot y and Z together}
\usage{
yZ_inspect(out, y, zlim, i, thresh = 0.7, col = list(blueToRed(7),
greys(10))[[1]], prop_lower_panel = 0.3, is.postpred = FALSE,
decimals_W = 1, na.color = "transparent", fy = function(lami) {
abline(h = cumsum(table(lami)) + 0.5, lwd = 3, col = "yellow", lty = 1)
}, fZ = function(Z) abline(v = 1:NCOL(Z) + 0.5, h = 1:NROW(Z) + 0.5,
col = "grey"), ...)
}
\arguments{
\item{fy}{function to execute after making y image}
\item{fZ}{function to execute after making Z image}
}
\description{
Plot y and Z together
}
|
# Takes a matrix and caches it in x. x can then be accessed
# through the returned list with get() or changed with set().
# getInv() and setInv() are used to get and set x's inverse matrix.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInv <- function(invM) inv <<- invM
getInv <- function() inv
list(set = set, get = get,
setInv = setInv,
getInv = getInv)
}
|
/makeCacheMatrix.R
|
no_license
|
gerardmac/Task2
|
R
| false | false | 479 |
r
|
# Takes a matrix and caches it in x. x can then be accessed
# through the returned list with get() or changed with set().
# getInv() and setInv() are used to get and set x's inverse matrix.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInv <- function(invM) inv <<- invM
getInv <- function() inv
list(set = set, get = get,
setInv = setInv,
getInv = getInv)
}
|
# Data prep util
# Small functions that are specifically used to clean readdata
# input refers to the vector to be operated on. In this case, it should represent the baseprice
# column
cleanBaseprices <-function(input, basewindow = 8){
y <- rle(input)
# If the current length is not BASE_WINDOW in size, use the previous value of baseprice
baseprice_run <- c()
for(i in 1:length(y$length)){
if(i == 1){
carryForwardValue <- input[1]
}
currentRun <- y$length[i]
if(currentRun >= basewindow){
carryForwardValue <- y$value[i]
}
baseprice_run <- append(baseprice_run, rep(carryForwardValue, y$length[i]))
}
stopifnot(length(baseprice_run) == length(input))
return(baseprice_run)
}
cleanUp <- function(inputDS){
if(all(is.na(inputDS$baseunits))){
inputDS <- inputDS[0,]
}
return(inputDS)
}
|
/dfe-ckf/src/main/resources/scripts/R/cleanBaseprices.R
|
no_license
|
ankursa/Projects
|
R
| false | false | 874 |
r
|
# Data prep util
# Small functions that are specifically used to clean readdata
# input refers to the vector to be operated on. In this case, it should represent the baseprice
# column
cleanBaseprices <-function(input, basewindow = 8){
y <- rle(input)
# If the current length is not BASE_WINDOW in size, use the previous value of baseprice
baseprice_run <- c()
for(i in 1:length(y$length)){
if(i == 1){
carryForwardValue <- input[1]
}
currentRun <- y$length[i]
if(currentRun >= basewindow){
carryForwardValue <- y$value[i]
}
baseprice_run <- append(baseprice_run, rep(carryForwardValue, y$length[i]))
}
stopifnot(length(baseprice_run) == length(input))
return(baseprice_run)
}
cleanUp <- function(inputDS){
if(all(is.na(inputDS$baseunits))){
inputDS <- inputDS[0,]
}
return(inputDS)
}
|
context("CVX")
test_that("kantorovich_CVX default distance", {
mu <- c(1/7,2/7,4/7)
nu <- c(1/4,1/4,1/2)
x <- kantorovich_CVX(mu, nu)
expect_equal(x, 0.107142857142857)
})
test_that("kantorovich_CVX - specified distance", {
mu <- c(1/4, 3/4, 0, 0)
nu <- c(0, 1/2, 1/2, 0)
dist <- structure(c(0, 1/3, 2/3, 1, 1/3, 0, 1/3, 2/3,
2/3, 1/3, 0, 1/3, 1, 2/3, 1/3, 0), .Dim = c(4L, 4L))
# doit trouver 1/4 et deux solutions
x <- kantorovich_CVX(mu, nu, dist=dist)
expect_equal(x, 1/4)
x <- kantorovich_CVX(mu, nu, dist=dist, solution = TRUE, solver = "GLPK")
sols <- list(structure(c(0, 0, 0, 0, 0.25, 0.25, 0, 0, 0, 0.5, 0, 0, 0, 0, 0, 0),
.Dim = c(4L, 4L),
.Dimnames = list(c("1", "2", "3", "4"), c("1", "2", "3", "4"))),
structure(c(0, 0, 0, 0, 0, 0.5, 0, 0, 0.25, 0.25, 0, 0, 0, 0, 0, 0), .Dim = c(4L, 4L),
.Dimnames = list(c("1", "2", "3", "4"), c("1", "2", "3", "4"))))
expect_true(all(abs(attr(x, "solution") - sols[[1]]) <= 1e-15) | all(abs(attr(x, "solution") - sols[[2]]) <= 1e-15))
})
test_that("kantorovich_CVX - nonsymmetric dist", {
mu <- c(1,2,4)/7
nu <- c(3,1,5)/9
D <- matrix(
c(
c(0, 1, 3),
c(1, 0, 4),
c(2, 4, 0)
),
byrow = TRUE, nrow = 3)
x <- kantorovich_CVX(mu, nu, dist=D)
expect_equal(x, 13/63)
x1 <- kantorovich_CVX(mu, nu, dist=D, solution=TRUE, solver = "GLPK")
x2 <- kantorovich(mu, nu, dist=D, details=TRUE)
expect_true(all.equal(attr(x1, "solution"), attr(x2, "joinings")[[1]], tolerance=1e-15, check.attributes=FALSE))
})
|
/tests/testthat/test-CVX.R
|
no_license
|
stla/kantorovich
|
R
| false | false | 1,632 |
r
|
context("CVX")
test_that("kantorovich_CVX default distance", {
mu <- c(1/7,2/7,4/7)
nu <- c(1/4,1/4,1/2)
x <- kantorovich_CVX(mu, nu)
expect_equal(x, 0.107142857142857)
})
test_that("kantorovich_CVX - specified distance", {
mu <- c(1/4, 3/4, 0, 0)
nu <- c(0, 1/2, 1/2, 0)
dist <- structure(c(0, 1/3, 2/3, 1, 1/3, 0, 1/3, 2/3,
2/3, 1/3, 0, 1/3, 1, 2/3, 1/3, 0), .Dim = c(4L, 4L))
# doit trouver 1/4 et deux solutions
x <- kantorovich_CVX(mu, nu, dist=dist)
expect_equal(x, 1/4)
x <- kantorovich_CVX(mu, nu, dist=dist, solution = TRUE, solver = "GLPK")
sols <- list(structure(c(0, 0, 0, 0, 0.25, 0.25, 0, 0, 0, 0.5, 0, 0, 0, 0, 0, 0),
.Dim = c(4L, 4L),
.Dimnames = list(c("1", "2", "3", "4"), c("1", "2", "3", "4"))),
structure(c(0, 0, 0, 0, 0, 0.5, 0, 0, 0.25, 0.25, 0, 0, 0, 0, 0, 0), .Dim = c(4L, 4L),
.Dimnames = list(c("1", "2", "3", "4"), c("1", "2", "3", "4"))))
expect_true(all(abs(attr(x, "solution") - sols[[1]]) <= 1e-15) | all(abs(attr(x, "solution") - sols[[2]]) <= 1e-15))
})
test_that("kantorovich_CVX - nonsymmetric dist", {
mu <- c(1,2,4)/7
nu <- c(3,1,5)/9
D <- matrix(
c(
c(0, 1, 3),
c(1, 0, 4),
c(2, 4, 0)
),
byrow = TRUE, nrow = 3)
x <- kantorovich_CVX(mu, nu, dist=D)
expect_equal(x, 13/63)
x1 <- kantorovich_CVX(mu, nu, dist=D, solution=TRUE, solver = "GLPK")
x2 <- kantorovich(mu, nu, dist=D, details=TRUE)
expect_true(all.equal(attr(x1, "solution"), attr(x2, "joinings")[[1]], tolerance=1e-15, check.attributes=FALSE))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/repo_public.R
\name{repo_find}
\alias{repo_find}
\title{Match items by matching any field}
\usage{
repo_find(what, all = F, show = "ds")
}
\arguments{
\item{what}{Character to be matched against any field (see
Details).}
\item{all}{Show also items tagged with "hide".}
\item{show}{Select columns to show.}
}
\value{
Used for side effects.
}
\description{
Match items by matching any field
}
\details{
This function actually calls print specifying the find
parameters. The find parameter can be any character string to be
matched against any item field, including string-converted size
(like "10x3").
}
\examples{
rp_path <- file.path(tempdir(), "example_repo")
rp <- repo_open(rp_path, TRUE)
rp$put(1, "item1", "Sample item 1", c("tag1", "tag2"))
rp$put(2, "item2", "Sample item 2", c("tag1", "hide"))
rp$put(3, "item3", "Sample item 3", c("tag2", "tag3"))
rp$print()
rp$find("tEm2")
rp$find("ag2", show="t")
## wiping the temp repo
unlink(rp_path, TRUE)
}
|
/man/repo_find.Rd
|
no_license
|
franapoli/repo
|
R
| false | true | 1,040 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/repo_public.R
\name{repo_find}
\alias{repo_find}
\title{Match items by matching any field}
\usage{
repo_find(what, all = F, show = "ds")
}
\arguments{
\item{what}{Character to be matched against any field (see
Details).}
\item{all}{Show also items tagged with "hide".}
\item{show}{Select columns to show.}
}
\value{
Used for side effects.
}
\description{
Match items by matching any field
}
\details{
This function actually calls print specifying the find
parameters. The find parameter can be any character string to be
matched against any item field, including string-converted size
(like "10x3").
}
\examples{
rp_path <- file.path(tempdir(), "example_repo")
rp <- repo_open(rp_path, TRUE)
rp$put(1, "item1", "Sample item 1", c("tag1", "tag2"))
rp$put(2, "item2", "Sample item 2", c("tag1", "hide"))
rp$put(3, "item3", "Sample item 3", c("tag2", "tag3"))
rp$print()
rp$find("tEm2")
rp$find("ag2", show="t")
## wiping the temp repo
unlink(rp_path, TRUE)
}
|
gender = c('M','F','F','M','F')
g1 = factor(gender,levels = c('F','M'))
months = c('Jan','Feb','March','April')
months
month.abb[1:12]
month.abb[1:6]
months1 = factor(months)
months1
months2 = factor(months,levels=c('Jan','Feb','March','April'),ordered = T)
months2
months2 = factor(months,levels=month.abb[1:12],ordered = T)
months2
months2 = factor(months,levels=c('April','Jan'),ordered = T)
months2
month5 = c(months,'xyz')
month5
diabetes = c('Type1','Type2','Type1','Type1')
diabetes
cat(diabetes)
gender = c(1,2,1,1,2) # 1-M,2-F
gender
fdiabetes = factor(diabetes)
class(fdiabetes)
fdiabetes
summary(fdiabetes)
summary(diabetes)
fdiabetes = c(fdiabetes,'Type3')
fdiabetes
class(fdiabetes)
#Add another Level Properly
fdiabetes = factor(fdiabetes,levels = c(levels(fdiabetes),'Type3'))
fdiabetes
levels(fdiabetes)
levels(fdiabetes) = c(levels(fdiabetes),'Type4')
fdiabetes
levels(fdiabetes)[5] = 'Type5'
fdiabetes
levels(fdiabetes)
summary(fdiabetes)
fdiabetes[4] = 'Type3'
fdiabetes[5] = 'Type6'
sum(is.na(fdiabetes))
table(fdiabetes,exclude = NULL)
table(fdiabetes,exclude = NA)
is.na(fdiabetes)
fdiabetes2 = na.omit(fdiabetes)
fdiabetes2
gender = c(1,2,1,1,2)
fgender = factor(gender)
fgender
fgender = factor(gender,levels=c(1,2),labels = c('M','F'))
fgender
#ordinal
clsposn = c(1,2,3,1)
class(clsposn)
summary(clsposn)
mode(clsposn)
fclsposn = factor(clsposn)
fclsposn
summary(fclsposn)
levels(fclsposn)
fclsposn2 = factor(clsposn,levels = c(1,2,3,4,5),ordered = T,labels = c('First','Second','Third','Fourth','Fifth'))
fclsposn2
summary(fclsposn2)
levels(fclsposn2)
fclsposn3 = factor(fclsposn2,levels(fclsposn2)[c(5,4,3,2,1)])
fclsposn3
levels(fclsposn2)
levels(fclsposn3)
fclsposn2
fclsposn4 = factor(fclsposn2,levels = rev(levels(fclsposn2)))
fclsposn4
rev(levels(fclsposn2))
status = c('Poor','Improved','Excellent')
fstatus = factor(status)
fstatus
# unordered Factors can be releveled - Making Poor First
fstatus2 = relevel(fstatus,'Poor')
fstatus2
fstatus3 = factor(status,ordered = T,levels = c('Poor','Improved','Excellent'))
fstatus3
summary(fstatus3)
plot(fstatus3)
plot(status)
plot(table(status))
|
/factors1.R
|
no_license
|
Hitesh-123/Datahandling
|
R
| false | false | 2,125 |
r
|
gender = c('M','F','F','M','F')
g1 = factor(gender,levels = c('F','M'))
months = c('Jan','Feb','March','April')
months
month.abb[1:12]
month.abb[1:6]
months1 = factor(months)
months1
months2 = factor(months,levels=c('Jan','Feb','March','April'),ordered = T)
months2
months2 = factor(months,levels=month.abb[1:12],ordered = T)
months2
months2 = factor(months,levels=c('April','Jan'),ordered = T)
months2
month5 = c(months,'xyz')
month5
diabetes = c('Type1','Type2','Type1','Type1')
diabetes
cat(diabetes)
gender = c(1,2,1,1,2) # 1-M,2-F
gender
fdiabetes = factor(diabetes)
class(fdiabetes)
fdiabetes
summary(fdiabetes)
summary(diabetes)
fdiabetes = c(fdiabetes,'Type3')
fdiabetes
class(fdiabetes)
#Add another Level Properly
fdiabetes = factor(fdiabetes,levels = c(levels(fdiabetes),'Type3'))
fdiabetes
levels(fdiabetes)
levels(fdiabetes) = c(levels(fdiabetes),'Type4')
fdiabetes
levels(fdiabetes)[5] = 'Type5'
fdiabetes
levels(fdiabetes)
summary(fdiabetes)
fdiabetes[4] = 'Type3'
fdiabetes[5] = 'Type6'
sum(is.na(fdiabetes))
table(fdiabetes,exclude = NULL)
table(fdiabetes,exclude = NA)
is.na(fdiabetes)
fdiabetes2 = na.omit(fdiabetes)
fdiabetes2
gender = c(1,2,1,1,2)
fgender = factor(gender)
fgender
fgender = factor(gender,levels=c(1,2),labels = c('M','F'))
fgender
#ordinal
clsposn = c(1,2,3,1)
class(clsposn)
summary(clsposn)
mode(clsposn)
fclsposn = factor(clsposn)
fclsposn
summary(fclsposn)
levels(fclsposn)
fclsposn2 = factor(clsposn,levels = c(1,2,3,4,5),ordered = T,labels = c('First','Second','Third','Fourth','Fifth'))
fclsposn2
summary(fclsposn2)
levels(fclsposn2)
fclsposn3 = factor(fclsposn2,levels(fclsposn2)[c(5,4,3,2,1)])
fclsposn3
levels(fclsposn2)
levels(fclsposn3)
fclsposn2
fclsposn4 = factor(fclsposn2,levels = rev(levels(fclsposn2)))
fclsposn4
rev(levels(fclsposn2))
status = c('Poor','Improved','Excellent')
fstatus = factor(status)
fstatus
# unordered Factors can be releveled - Making Poor First
fstatus2 = relevel(fstatus,'Poor')
fstatus2
fstatus3 = factor(status,ordered = T,levels = c('Poor','Improved','Excellent'))
fstatus3
summary(fstatus3)
plot(fstatus3)
plot(status)
plot(table(status))
|
require("XML")
require("plyr")
require("ggplot2")
require("gridExtra")
library(data.table)
xmlfile=xmlParse("/home/eduardo/trips.xml")
pointAttribs <- xpathSApply(doc=xmlfile, path="/scsimulator_matrix/trip", xmlAttrs)
# TRANSPOSE XPATH LIST TO DF
df <- data.frame(t(pointAttribs))
# CONVERT TO NUMERIC
df[c('start', 'count')] <- sapply(df[c('start', 'count')], function(x) as.numeric(as.character(x)))
horas <- c(0,3600,7200,10800,14400,18000,21600,25200,28800,32400,36000,39600,43200,46800,50400,54000,57600,61200,64800,68400,72000,75600,79200,82800,86400)
time <- aggregate(df$count, list(cut(df$start, breaks=horas)), sum)
time$horas <- c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23)
time$x <- time$x/1000
ps <- data.frame(xspline(time[,1:2], shape=-0.2, lwd=2, draw=F))
sum(time$x)
theme_set(theme_gray(base_size = 18))
png('trip_count.png')
ggplot(data=time, aes(x=horas, y=x, group=1)) +
geom_bar(stat="identity", fill="#56B4E9") +
xlab("Hora do Dia") + ylab("Número de Viagens (x 1000)")
dev.off()
|
/simulator_analyses/sbrc/trips_count.R
|
no_license
|
ezambomsantana/doutorado
|
R
| false | false | 1,040 |
r
|
require("XML")
require("plyr")
require("ggplot2")
require("gridExtra")
library(data.table)
xmlfile=xmlParse("/home/eduardo/trips.xml")
pointAttribs <- xpathSApply(doc=xmlfile, path="/scsimulator_matrix/trip", xmlAttrs)
# TRANSPOSE XPATH LIST TO DF
df <- data.frame(t(pointAttribs))
# CONVERT TO NUMERIC
df[c('start', 'count')] <- sapply(df[c('start', 'count')], function(x) as.numeric(as.character(x)))
horas <- c(0,3600,7200,10800,14400,18000,21600,25200,28800,32400,36000,39600,43200,46800,50400,54000,57600,61200,64800,68400,72000,75600,79200,82800,86400)
time <- aggregate(df$count, list(cut(df$start, breaks=horas)), sum)
time$horas <- c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23)
time$x <- time$x/1000
ps <- data.frame(xspline(time[,1:2], shape=-0.2, lwd=2, draw=F))
sum(time$x)
theme_set(theme_gray(base_size = 18))
png('trip_count.png')
ggplot(data=time, aes(x=horas, y=x, group=1)) +
geom_bar(stat="identity", fill="#56B4E9") +
xlab("Hora do Dia") + ylab("Número de Viagens (x 1000)")
dev.off()
|
#' @importFrom stringr str_c str_detect
#' @importFrom ncvreg std
#' @importFrom mvtnorm rmvnorm
#' @importFrom stats rnorm rpois rbinom rnbinom
#' @rdname sim.data
#' @export
sim.data.FA = function(n, ptot, pnonzero, nstudies, sd_raneff = 0,
family = "binomial", B = NULL, r = 2,
corr = NULL, seed, imbalance = 0, beta = NULL,
pnonzerovar = 0, sd_x = 1){
set.seed(seed = seed)
# set variables
p = ptot
p1 = pnonzero
d = nstudies
ok = NULL
family_info = family_export(family)
fam_fun = family_info$family_fun
link = family_info$link
link_int = family_info$link_int # Recoded link as integer
family = family_info$family
slopes = TRUE
if(pnonzero + pnonzerovar > ptot) stop("pnonzero + pnonzerovar > ptot")
# create fixed effects covariate matrix
if(is.null(corr)){
mat = matrix(rnorm(n*p, mean = 0, sd = sd_x), nrow = n) # 11/15 switching to var = 1, then scaling below
#mat = matrix(rbinom(n*p, p = 0.5, size = 1), nrow = n) # now switching back to normal to have more resolution to show prediction performance
}else if(is.matrix(corr)){
if((nrow(corr) != p) | (ncol(corr) != p)){
stop("corr must be either a single numeric value or a matrix of dimension ptot x ptot")
}
sigma = corr
mat = rmvnorm(n = n , mean = rep(0,p), sigma = sigma)
}else{
cor = matrix(corr, p, p)
diag(cor) = (sd_x)^2
sigma = cor # 0.5*cor
mat = rmvnorm(n = n , mean = rep(0,p), sigma = sigma)
}
# add intercept
if(sd_x == 1){
mat = std(mat)
}
X = cbind(rep(1, n), mat)
colnames(X) = c("(Intercept)",str_c("X", 1:(ncol(X)-1)))
# create raneff matrix (assuming only 1 random effect with nstudies levels for now)
drep = factor(rep(1:d, each = n/d))
if(imbalance == 1){
first = rep(1, floor(n/3)) ## change to 1/2?
second = rep(2:d, each = ceiling((2*n/3)/(d-1)))
if(length(first) + length(second) < n){
drep = factor(c(first, second, rep(d, length(drep) - length(first) - length(second))))
}else if(length(first) + length(second) > n){
drep = factor(c(first, second))
drep = drep[1:n]
}else{
drep = factor(c(first, second))
}
}
Z = model.matrix(~drep-1, contrasts.arg=list(drep=diag(nlevels(drep))))
if(slopes == T) Z = model.matrix(~drep:X-1, contrasts.arg=list(drep=diag(nlevels(drep))))
if(is.null(beta)){
if(pnonzerovar > 0){
beta <- c(0, rep(2, p1), rep(0, pnonzerovar))
X0 = X[,1:(p1+pnonzerovar+1)]
}else{
beta <- c(0, rep(2, p1))
X0 = X[,1:(p1+1)]
}
}else{
if(length(beta) < p1+pnonzerovar+1) beta = c(beta, rep(0, pnonzerovar))
X0 = X[,1:(p1+pnonzerovar+1)]
}
if(slopes == T) Z0 = model.matrix(~drep:X0-1, contrasts.arg=list(drep=diag(nlevels(drep))))
# Random effect covariance matrix
if(is.null(B)){
B = matrix(rnorm((ncol(Z0)/d)*r), nrow = ncol(Z0)/d, ncol = r)
}else{
if((ncol(B) != r) | (nrow(B) != ncol(Z0)/d)){
stop("dimensions of B not approrpiate, should have ", ncol(Z0)/d," rows and ", r, " columns")
}
}
Sigma = B %*% t(B) + diag(rep(sd_raneff^2, ncol(Z0)/d))
z1 = as.numeric(rmvnorm(d, mean = rep(0,ncol(Z0)/d),
sigma = Sigma))
eta = X0 %*% matrix(beta, ncol = 1) + Z0 %*% matrix(z1, ncol = 1)
mu = invlink(link_int, eta)
# simulate random effect and then y
if(family == "poisson"){
y = rpois(n, lambda = mu)
}else if(family == "binomial"){
y = rep(NA, n)
for(ii in 1:n){
y[ii] = rbinom(n = 1, size = 1, prob = mu[ii])
}
if(any(is.na(y))){
ok = which(is.na(y))
stop("y resulted in NA values")
}
}else if(family == "gaussian"){
y = rep(NA, n)
for(ii in 1:n){
y[ii] = rnorm(n = 1, mean = mu[ii], sd = 0.5)
}
if(any(is.na(y))){
ok = which(is.na(y))
stop("y resulted in NA values")
}
}else if(family == "negbin"){
# Default variance: theta = 2.0, phi = 1/2.0 = 0.5
# mu + mu^2 / theta = mu + mu^2 * phi
y = rep(NA, n)
for(ii in 1:n){
y[ii] = rnbinom(n = 1, size = 2.0, mu = mu[ii])
}
if(any(is.na(y))){
ok = which(is.na(y))
stop("y resulted in NA values")
}
}else{
print(family)
stop("Family not specifed properly")
}
if(slopes == TRUE) Z = model.matrix(~drep:X-1, contrasts.arg=list(drep=diag(nlevels(drep))))
colnames(Z) = str_c(rep(colnames(X), each = d), ":", rep(1:d, times = length(colnames(X))))
if(!is.null(ok)){
dat = list(y = y[-ok], X = X[-ok,], Z = Z[-ok,],
pnonzero = pnonzero, z1 = matrix(z1, nrow = d), group = drep[-ok],
X0 = X0, B = B)
}else{
dat = list(y = y, X = X, Z = Z, pnonzero = pnonzero, z1 = matrix(z1, nrow = d),
group = drep, X0 = X0, B = B)
}
return(dat)
}
|
/R/sim_generation_FA.R
|
no_license
|
hheiling/glmmPen
|
R
| false | false | 4,924 |
r
|
#' @importFrom stringr str_c str_detect
#' @importFrom ncvreg std
#' @importFrom mvtnorm rmvnorm
#' @importFrom stats rnorm rpois rbinom rnbinom
#' @rdname sim.data
#' @export
sim.data.FA = function(n, ptot, pnonzero, nstudies, sd_raneff = 0,
family = "binomial", B = NULL, r = 2,
corr = NULL, seed, imbalance = 0, beta = NULL,
pnonzerovar = 0, sd_x = 1){
set.seed(seed = seed)
# set variables
p = ptot
p1 = pnonzero
d = nstudies
ok = NULL
family_info = family_export(family)
fam_fun = family_info$family_fun
link = family_info$link
link_int = family_info$link_int # Recoded link as integer
family = family_info$family
slopes = TRUE
if(pnonzero + pnonzerovar > ptot) stop("pnonzero + pnonzerovar > ptot")
# create fixed effects covariate matrix
if(is.null(corr)){
mat = matrix(rnorm(n*p, mean = 0, sd = sd_x), nrow = n) # 11/15 switching to var = 1, then scaling below
#mat = matrix(rbinom(n*p, p = 0.5, size = 1), nrow = n) # now switching back to normal to have more resolution to show prediction performance
}else if(is.matrix(corr)){
if((nrow(corr) != p) | (ncol(corr) != p)){
stop("corr must be either a single numeric value or a matrix of dimension ptot x ptot")
}
sigma = corr
mat = rmvnorm(n = n , mean = rep(0,p), sigma = sigma)
}else{
cor = matrix(corr, p, p)
diag(cor) = (sd_x)^2
sigma = cor # 0.5*cor
mat = rmvnorm(n = n , mean = rep(0,p), sigma = sigma)
}
# add intercept
if(sd_x == 1){
mat = std(mat)
}
X = cbind(rep(1, n), mat)
colnames(X) = c("(Intercept)",str_c("X", 1:(ncol(X)-1)))
# create raneff matrix (assuming only 1 random effect with nstudies levels for now)
drep = factor(rep(1:d, each = n/d))
if(imbalance == 1){
first = rep(1, floor(n/3)) ## change to 1/2?
second = rep(2:d, each = ceiling((2*n/3)/(d-1)))
if(length(first) + length(second) < n){
drep = factor(c(first, second, rep(d, length(drep) - length(first) - length(second))))
}else if(length(first) + length(second) > n){
drep = factor(c(first, second))
drep = drep[1:n]
}else{
drep = factor(c(first, second))
}
}
Z = model.matrix(~drep-1, contrasts.arg=list(drep=diag(nlevels(drep))))
if(slopes == T) Z = model.matrix(~drep:X-1, contrasts.arg=list(drep=diag(nlevels(drep))))
if(is.null(beta)){
if(pnonzerovar > 0){
beta <- c(0, rep(2, p1), rep(0, pnonzerovar))
X0 = X[,1:(p1+pnonzerovar+1)]
}else{
beta <- c(0, rep(2, p1))
X0 = X[,1:(p1+1)]
}
}else{
if(length(beta) < p1+pnonzerovar+1) beta = c(beta, rep(0, pnonzerovar))
X0 = X[,1:(p1+pnonzerovar+1)]
}
if(slopes == T) Z0 = model.matrix(~drep:X0-1, contrasts.arg=list(drep=diag(nlevels(drep))))
# Random effect covariance matrix
if(is.null(B)){
B = matrix(rnorm((ncol(Z0)/d)*r), nrow = ncol(Z0)/d, ncol = r)
}else{
if((ncol(B) != r) | (nrow(B) != ncol(Z0)/d)){
stop("dimensions of B not approrpiate, should have ", ncol(Z0)/d," rows and ", r, " columns")
}
}
Sigma = B %*% t(B) + diag(rep(sd_raneff^2, ncol(Z0)/d))
z1 = as.numeric(rmvnorm(d, mean = rep(0,ncol(Z0)/d),
sigma = Sigma))
eta = X0 %*% matrix(beta, ncol = 1) + Z0 %*% matrix(z1, ncol = 1)
mu = invlink(link_int, eta)
# simulate random effect and then y
if(family == "poisson"){
y = rpois(n, lambda = mu)
}else if(family == "binomial"){
y = rep(NA, n)
for(ii in 1:n){
y[ii] = rbinom(n = 1, size = 1, prob = mu[ii])
}
if(any(is.na(y))){
ok = which(is.na(y))
stop("y resulted in NA values")
}
}else if(family == "gaussian"){
y = rep(NA, n)
for(ii in 1:n){
y[ii] = rnorm(n = 1, mean = mu[ii], sd = 0.5)
}
if(any(is.na(y))){
ok = which(is.na(y))
stop("y resulted in NA values")
}
}else if(family == "negbin"){
# Default variance: theta = 2.0, phi = 1/2.0 = 0.5
# mu + mu^2 / theta = mu + mu^2 * phi
y = rep(NA, n)
for(ii in 1:n){
y[ii] = rnbinom(n = 1, size = 2.0, mu = mu[ii])
}
if(any(is.na(y))){
ok = which(is.na(y))
stop("y resulted in NA values")
}
}else{
print(family)
stop("Family not specifed properly")
}
if(slopes == TRUE) Z = model.matrix(~drep:X-1, contrasts.arg=list(drep=diag(nlevels(drep))))
colnames(Z) = str_c(rep(colnames(X), each = d), ":", rep(1:d, times = length(colnames(X))))
if(!is.null(ok)){
dat = list(y = y[-ok], X = X[-ok,], Z = Z[-ok,],
pnonzero = pnonzero, z1 = matrix(z1, nrow = d), group = drep[-ok],
X0 = X0, B = B)
}else{
dat = list(y = y, X = X, Z = Z, pnonzero = pnonzero, z1 = matrix(z1, nrow = d),
group = drep, X0 = X0, B = B)
}
return(dat)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hybrid_recursive_mc.R
\name{hybrid_recursive_mc}
\alias{hybrid_recursive_mc}
\title{Hybrid-recursive Outlier Removal Procedure with Moving Criterion}
\usage{
hybrid_recursive_mc(exp_cell)
}
\arguments{
\item{exp_cell}{Numeric vector on which the outlier removal method takes
place. If experimental cell has 4 trials or less it will result in
\code{NA}.}
}
\value{
A vector with the mean of \code{exp_cell} after removing outliers,
percent of trials removed, and total number of trials in \code{exp_cell} before
outlier removal.
}
\description{
Hybrid-recursive outlier removal procedure with moving
criterion according to Van Selst & Jolicoeur (1994).
}
\references{
Grange, J.A. (2015). trimr: An implementation of common response
time trimming methods. R Package Version 1.0.0.
\url{https://cran.r-project.org/package=trimr}
Van Selst, M., & Jolicoeur, P. (1994). A solution to the effect of sample
size on outlier elimination. \emph{The quarterly journal of experimental
psychology, 47}(3), 631-650.
}
|
/man/hybrid_recursive_mc.Rd
|
no_license
|
cran/prepdat
|
R
| false | true | 1,125 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hybrid_recursive_mc.R
\name{hybrid_recursive_mc}
\alias{hybrid_recursive_mc}
\title{Hybrid-recursive Outlier Removal Procedure with Moving Criterion}
\usage{
hybrid_recursive_mc(exp_cell)
}
\arguments{
\item{exp_cell}{Numeric vector on which the outlier removal method takes
place. If experimental cell has 4 trials or less it will result in
\code{NA}.}
}
\value{
A vector with the mean of \code{exp_cell} after removing outliers,
percent of trials removed, and total number of trials in \code{exp_cell} before
outlier removal.
}
\description{
Hybrid-recursive outlier removal procedure with moving
criterion according to Van Selst & Jolicoeur (1994).
}
\references{
Grange, J.A. (2015). trimr: An implementation of common response
time trimming methods. R Package Version 1.0.0.
\url{https://cran.r-project.org/package=trimr}
Van Selst, M., & Jolicoeur, P. (1994). A solution to the effect of sample
size on outlier elimination. \emph{The quarterly journal of experimental
psychology, 47}(3), 631-650.
}
|
\name{lorenzattractor}
\alias{lorenzattractor}
\title{Simulate the Lorenz Attractor}
\description{
An implementation of the Lorenz dynamical system,
which describes the motion of a possible particle, which will
neither converge to a steady state, nor diverge to infinity;
but rather stay in a bounded but 'chaotically' defined
region, i.e., an attractor.
}
\usage{
lorenzattractor(numsteps, dt, sigma, r, b, plots)
}
\arguments{
\item{numsteps}{The number of simulated points}
\item{dt}{System parameter}
\item{sigma}{System parameter}
\item{r}{System parameter}
\item{b}{System parameter}
\item{plots}{If TRUE, it plots the Lorenz obtained}
}
\value{
It returns a matrix with the 3 dimensions of the Lorenz
}
\references{
Lorenz, Edward Norton (1963). Deterministic nonperiodic flow.
Journal of the Atmospheric Sciences 20(2) 130-141.
}
\author{Moreno I. Coco (moreno.cocoi@gmail.com) }
\examples{
## initialize the parameters
numsteps = 2 ^ 11; dt = .01; sigma = 10; r = 28; b = 8/3;
plots = TRUE
res = lorenzattractor(numsteps, dt, sigma, r, b, plots)
}
\keyword{ts}
|
/man/lorenzattractor.Rd
|
no_license
|
morenococo/crqa
|
R
| false | false | 1,134 |
rd
|
\name{lorenzattractor}
\alias{lorenzattractor}
\title{Simulate the Lorenz Attractor}
\description{
An implementation of the Lorenz dynamical system,
which describes the motion of a possible particle, which will
neither converge to a steady state, nor diverge to infinity;
but rather stay in a bounded but 'chaotically' defined
region, i.e., an attractor.
}
\usage{
lorenzattractor(numsteps, dt, sigma, r, b, plots)
}
\arguments{
\item{numsteps}{The number of simulated points}
\item{dt}{System parameter}
\item{sigma}{System parameter}
\item{r}{System parameter}
\item{b}{System parameter}
\item{plots}{If TRUE, it plots the Lorenz obtained}
}
\value{
It returns a matrix with the 3 dimensions of the Lorenz
}
\references{
Lorenz, Edward Norton (1963). Deterministic nonperiodic flow.
Journal of the Atmospheric Sciences 20(2) 130-141.
}
\author{Moreno I. Coco (moreno.cocoi@gmail.com) }
\examples{
## initialize the parameters
numsteps = 2 ^ 11; dt = .01; sigma = 10; r = 28; b = 8/3;
plots = TRUE
res = lorenzattractor(numsteps, dt, sigma, r, b, plots)
}
\keyword{ts}
|
# RCG BA subgroup work on Sampling PLan for Small Pelagic
# Nuno Prista (SLU, Sweden), 2019
# func "checkData" is a development of original work done by Alastair Pout (Marine Scotland, UK) during project fishpi2
rm(list=ls())
library(data.table)
library(fishPiCodes)
library(foreign)
# ========================
# reads in data
# ========================
# read file names
file_ltu <- "data\\original\\LTU_2019_29_04.csv"
# read data
dt_ltu<-fread(file_ltu, stringsAsFactors=FALSE, verbose=FALSE, fill=TRUE, sep=";", na.strings="NULL")
# ========================
# check and fix data
# ========================
source("funs\\func_checkData.r")
checkData(x = as.data.frame(dt_ltu), ignore_stop=TRUE)
# minor: rename columns
dt_ltu$vslLenCls <- dt_ltu$lenghtsegmnt; dt_ltu$lenghtsegmnt<-NULL
dt_ltu$landCat <- dt_ltu$LangCAT01; dt_ltu$LangCAT01<-NULL
# minor: issues with formating
dt_ltu$sppCode <- as.integer(dt_ltu$sppCode)
dt_ltu$landWt<-as.numeric(dt_ltu$landWt)
# major: issues with metiers [non existing in CL]
dt_ltu$foCatEu6[dt_ltu$foCatEu6=="GNS_SPF_16-31_0_0"]<-"GNS_SPF_16-109_0_0"
# check ERROR more than one departure date, trips:
# LTU20170000159 LTU20170000579 LTU20170000739 LTU20170001189
# fix:
dt_ltu$depDate[dt_ltu$fishTripId=="LTU20170000043"]<-"2017-01-16"
dt_ltu$depDate[dt_ltu$fishTripId=="LTU20170000155"]<-"2017-05-06"
dt_ltu$depDate[dt_ltu$fishTripId=="LTU20170000159"]<-"2017-05-16"
dt_ltu$depDate[dt_ltu$fishTripId=="LTU20170000579"]<-"2017-03-23"
dt_ltu$depDate[dt_ltu$fishTripId=="LTU20170000739"]<-"2017-05-06"
dt_ltu$depDate[dt_ltu$fishTripId=="LTU20170001189"]<-"2017-12-21"
# major: "27.3.d.28" area not in the code list
sum(dt_ltu$area=="27.3.d.28") # n = 449
# fix
dt_ltu$area[dt_ltu$area=="27.3.d.28"]<-"27.3.d.28.2" # according to CL
dt_ltu<-data.table(checkData(x = as.data.frame(dt_ltu)))
# ========================
# save data
# ========================
save(dt_ltu, file="data\\prepared\\LTU.Rdata")
|
/SmallPelag/DataPrep/001_read_and_prepare_data - LTU.r
|
no_license
|
ices-eg/RCG_BA
|
R
| false | false | 2,039 |
r
|
# RCG BA subgroup work on Sampling PLan for Small Pelagic
# Nuno Prista (SLU, Sweden), 2019
# func "checkData" is a development of original work done by Alastair Pout (Marine Scotland, UK) during project fishpi2
rm(list=ls())
library(data.table)
library(fishPiCodes)
library(foreign)
# ========================
# reads in data
# ========================
# read file names
file_ltu <- "data\\original\\LTU_2019_29_04.csv"
# read data
dt_ltu<-fread(file_ltu, stringsAsFactors=FALSE, verbose=FALSE, fill=TRUE, sep=";", na.strings="NULL")
# ========================
# check and fix data
# ========================
source("funs\\func_checkData.r")
checkData(x = as.data.frame(dt_ltu), ignore_stop=TRUE)
# minor: rename columns
dt_ltu$vslLenCls <- dt_ltu$lenghtsegmnt; dt_ltu$lenghtsegmnt<-NULL
dt_ltu$landCat <- dt_ltu$LangCAT01; dt_ltu$LangCAT01<-NULL
# minor: issues with formating
dt_ltu$sppCode <- as.integer(dt_ltu$sppCode)
dt_ltu$landWt<-as.numeric(dt_ltu$landWt)
# major: issues with metiers [non existing in CL]
dt_ltu$foCatEu6[dt_ltu$foCatEu6=="GNS_SPF_16-31_0_0"]<-"GNS_SPF_16-109_0_0"
# check ERROR more than one departure date, trips:
# LTU20170000159 LTU20170000579 LTU20170000739 LTU20170001189
# fix:
dt_ltu$depDate[dt_ltu$fishTripId=="LTU20170000043"]<-"2017-01-16"
dt_ltu$depDate[dt_ltu$fishTripId=="LTU20170000155"]<-"2017-05-06"
dt_ltu$depDate[dt_ltu$fishTripId=="LTU20170000159"]<-"2017-05-16"
dt_ltu$depDate[dt_ltu$fishTripId=="LTU20170000579"]<-"2017-03-23"
dt_ltu$depDate[dt_ltu$fishTripId=="LTU20170000739"]<-"2017-05-06"
dt_ltu$depDate[dt_ltu$fishTripId=="LTU20170001189"]<-"2017-12-21"
# major: "27.3.d.28" area not in the code list
sum(dt_ltu$area=="27.3.d.28") # n = 449
# fix
dt_ltu$area[dt_ltu$area=="27.3.d.28"]<-"27.3.d.28.2" # according to CL
dt_ltu<-data.table(checkData(x = as.data.frame(dt_ltu)))
# ========================
# save data
# ========================
save(dt_ltu, file="data\\prepared\\LTU.Rdata")
|
library(dataone)
### Name: getDataPackage
### Title: Download data from the DataONE Federation as a DataPackage.
### Aliases: getDataPackage getDataPackage,D1Client-method
### ** Examples
## Not run:
##D library(dataone)
##D d1c <- D1Client("PROD", "urn:node:KNB")
##D pid <- "solson.5.1"
##D pkg <- getDataPackage(d1c, pid)
## End(Not run)
|
/data/genthat_extracted_code/dataone/examples/getDataPackage.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 349 |
r
|
library(dataone)
### Name: getDataPackage
### Title: Download data from the DataONE Federation as a DataPackage.
### Aliases: getDataPackage getDataPackage,D1Client-method
### ** Examples
## Not run:
##D library(dataone)
##D d1c <- D1Client("PROD", "urn:node:KNB")
##D pid <- "solson.5.1"
##D pkg <- getDataPackage(d1c, pid)
## End(Not run)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/biosets.R
\name{plates_read}
\alias{plates_read}
\title{Read sets (plates) and calculate concentrations and variability.}
\usage{
plates_read(plates, cal_names, cal_values, exclude_cals = list(),
additional_vars = c("var_name"), additional_sep = "_", sep = ",",
dec = "AUTO", path = ".", file_name = "plate_#NUM#.csv",
model_func = bioset::fit_lnln, plot_func = bioset::plot_lnln,
interpolate_func = bioset::interpolate_lnln, write_data = TRUE,
use_written_data = FALSE)
}
\arguments{
\item{plates}{The number of plates (e.g. \code{3`` attempts to read }plate_1.csv\code{, }plate_2.csv\code{, }plate_3.csv\code{), see }file_name`.}
\item{cal_names}{A vector of strings containing the names of the samples used
as calibrators.}
\item{cal_values}{A numeric vector with the known concentrations of those
samples (must be in the same order).}
\item{exclude_cals}{A list of calibrators to exclude, e.g.:
\code{list(plate1 = c("CAL1"))}.}
\item{additional_vars}{Vector of strings containing the names for the
additional columns.}
\item{additional_sep}{String / RegExp that separates additional vars, e.g.:
\code{"ID_blue_cold"} with \code{additional_sep = "_"} will be separated
into three columns containing \code{"ID"}, \code{"blue"} and \code{"cold"}.
If the separated data would exceed the columns in \code{additional_vars}
the last column will contain a string with separator (e.g.: \code{"blue_cold"}).
If data is missing \code{NA} is inserted.}
\item{sep}{Separator used in the csv-file, either "," or ";" (see
\code{\link[utils:read.csv]{utils::read.csv()}}).}
\item{dec}{The character used for decimal points (see \code{\link[utils:read.csv]{utils::read.csv()}}).
"AUTO" will result in "." if \code{sep} is "," and "," for ";".}
\item{path}{The path to the file (no trailing "/" or "\\" !).}
\item{file_name}{Naming scheme for the files. The default is
\code{plate_#NUM#.csv}, where \code{#NUM#} gets replaced by the number of the plates,
see \code{plates}. The filename must contain \code{#NUM#}.}
\item{model_func}{A function generating a model to fit the calibrators,
e.g. \code{\link[=fit_linear]{fit_linear()}}, \code{\link[=fit_lnln]{fit_lnln()}}.}
\item{plot_func}{Function used to display the fitted line.}
\item{interpolate_func}{A function used to interpolate the concentrations of
the other samples, based on the model, e.g.
\code{\link[=interpolate_linear]{interpolate_linear()}}, \code{\link[=interpolate_lnln]{interpolate_lnln()}}.}
\item{write_data}{Write the calculated data into \code{data_all.csv} and
\code{data_samples.csv}?}
\item{use_written_data}{Try to read \code{data_all.csv} and \code{data_read.csv}
instead of raw data. Useful if you have to re-run the script, but the raw
data does not change.}
}
\value{
A list of params.
}
\description{
Writes the processed data into two files: \code{data_samples.csv},
\code{data_all.csv} and returns a list containing:
\itemize{
\item all: all rows including duplicate and calibrators.
\item samples: samples only, no calibrators, no duplicates.
\item plateNUMBER:
\itemize{
\item plot: plot of the calibrators
\item model: model used to fit a line to the calibrators
}
}
}
|
/man/plates_read.Rd
|
no_license
|
randomchars42/eenv
|
R
| false | true | 3,246 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/biosets.R
\name{plates_read}
\alias{plates_read}
\title{Read sets (plates) and calculate concentrations and variability.}
\usage{
plates_read(plates, cal_names, cal_values, exclude_cals = list(),
additional_vars = c("var_name"), additional_sep = "_", sep = ",",
dec = "AUTO", path = ".", file_name = "plate_#NUM#.csv",
model_func = bioset::fit_lnln, plot_func = bioset::plot_lnln,
interpolate_func = bioset::interpolate_lnln, write_data = TRUE,
use_written_data = FALSE)
}
\arguments{
\item{plates}{The number of plates (e.g. \code{3`` attempts to read }plate_1.csv\code{, }plate_2.csv\code{, }plate_3.csv\code{), see }file_name`.}
\item{cal_names}{A vector of strings containing the names of the samples used
as calibrators.}
\item{cal_values}{A numeric vector with the known concentrations of those
samples (must be in the same order).}
\item{exclude_cals}{A list of calibrators to exclude, e.g.:
\code{list(plate1 = c("CAL1"))}.}
\item{additional_vars}{Vector of strings containing the names for the
additional columns.}
\item{additional_sep}{String / RegExp that separates additional vars, e.g.:
\code{"ID_blue_cold"} with \code{additional_sep = "_"} will be separated
into three columns containing \code{"ID"}, \code{"blue"} and \code{"cold"}.
If the separated data would exceed the columns in \code{additional_vars}
the last column will contain a string with separator (e.g.: \code{"blue_cold"}).
If data is missing \code{NA} is inserted.}
\item{sep}{Separator used in the csv-file, either "," or ";" (see
\code{\link[utils:read.csv]{utils::read.csv()}}).}
\item{dec}{The character used for decimal points (see \code{\link[utils:read.csv]{utils::read.csv()}}).
"AUTO" will result in "." if \code{sep} is "," and "," for ";".}
\item{path}{The path to the file (no trailing "/" or "\\" !).}
\item{file_name}{Naming scheme for the files. The default is
\code{plate_#NUM#.csv}, where \code{#NUM#} gets replaced by the number of the plates,
see \code{plates}. The filename must contain \code{#NUM#}.}
\item{model_func}{A function generating a model to fit the calibrators,
e.g. \code{\link[=fit_linear]{fit_linear()}}, \code{\link[=fit_lnln]{fit_lnln()}}.}
\item{plot_func}{Function used to display the fitted line.}
\item{interpolate_func}{A function used to interpolate the concentrations of
the other samples, based on the model, e.g.
\code{\link[=interpolate_linear]{interpolate_linear()}}, \code{\link[=interpolate_lnln]{interpolate_lnln()}}.}
\item{write_data}{Write the calculated data into \code{data_all.csv} and
\code{data_samples.csv}?}
\item{use_written_data}{Try to read \code{data_all.csv} and \code{data_read.csv}
instead of raw data. Useful if you have to re-run the script, but the raw
data does not change.}
}
\value{
A list of params.
}
\description{
Writes the processed data into two files: \code{data_samples.csv},
\code{data_all.csv} and returns a list containing:
\itemize{
\item all: all rows including duplicate and calibrators.
\item samples: samples only, no calibrators, no duplicates.
\item plateNUMBER:
\itemize{
\item plot: plot of the calibrators
\item model: model used to fit a line to the calibrators
}
}
}
|
################################################################################
# Setup
library(ggplot2)
library(tess3r)
library(reshape2)
library(dplyr)
library(tikzDevice)
library(tools)
source("PlotParams.R")
################################################################################
# load res and data
load(paste0(res.dir,"tess3project.obj.res"))
################################################################################
# Plot
med = seq_along(tess3project.obj)
for(i in seq_along(tess3project.obj)) {
med[i] <- median(tess3project.obj[[i]]$rmse)
}
pl <- ggplot(data.frame(rmse = med, K = seq_along(tess3project.obj))) + geom_point(aes(x = as.factor(K), y = rmse)) +
geom_line(aes(x = K, y = rmse)) +
labs(y = "RMSE", x = "Number of ancestral population ($K$)") +
theme_gray() +
theme(legend.position = "none")
tikzDevice::tikz(paste0(fig.dir,"KSelection.tex"), width = slide$width * 0.8,height = slide$heigth * 0.8,standAlone = TRUE)
pl
dev.off()
texi2dvi(paste0(fig.dir,"KSelection.tex"),pdf = TRUE)
|
/2Article/Slides/Jobim2016/KSelection.R
|
permissive
|
cayek/Thesis
|
R
| false | false | 1,039 |
r
|
################################################################################
# Setup
library(ggplot2)
library(tess3r)
library(reshape2)
library(dplyr)
library(tikzDevice)
library(tools)
source("PlotParams.R")
################################################################################
# load res and data
load(paste0(res.dir,"tess3project.obj.res"))
################################################################################
# Plot
med = seq_along(tess3project.obj)
for(i in seq_along(tess3project.obj)) {
med[i] <- median(tess3project.obj[[i]]$rmse)
}
pl <- ggplot(data.frame(rmse = med, K = seq_along(tess3project.obj))) + geom_point(aes(x = as.factor(K), y = rmse)) +
geom_line(aes(x = K, y = rmse)) +
labs(y = "RMSE", x = "Number of ancestral population ($K$)") +
theme_gray() +
theme(legend.position = "none")
tikzDevice::tikz(paste0(fig.dir,"KSelection.tex"), width = slide$width * 0.8,height = slide$heigth * 0.8,standAlone = TRUE)
pl
dev.off()
texi2dvi(paste0(fig.dir,"KSelection.tex"),pdf = TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bcor.R
\name{bcor}
\alias{bcor}
\title{bcor: Bayesian Estimation of The Correlation Matrix}
\usage{
bcor(data, iter, burn, seed, CI, S0, nu0, mu0)
}
\arguments{
\item{data}{N by P data matrix.}
\item{iter}{Number of iterations for the Gibbs sampler.}
\item{burn}{Number of samples to burn in.}
\item{seed}{Seed for the Gibbs sampler}
\item{CI}{Credible interval quantile, as a decimal (ie, for 95 percent, 0.95).}
\item{S0}{Prior variance covariance matrix.}
\item{nu0}{Prior degrees of freedom for inverse Wishart prior distribution.}
\item{mu0}{Prior means for each column.}
}
\value{
Returns median posterior estimates of the correlation matrix.
}
\description{
This function estimates coefficient omega internal consistency reliability.
}
\examples{
set.seed(999)
your_data=mvrnorm(n=15,mu=c(0,0),Sigma=matrix(c(4,3,3,9),nrow=2,ncol=2))
Mu0=c(0,0)
Sigma0=matrix(c(1,0.6,0.6,4),nrow=2,ncol=2)
Nu0=1
bcor(data=your_data,iter=5000,burn=2500,seed=999,CI=0.95,
mu0=Mu0,S0=Sigma0,nu0=Nu0)
}
|
/man/bcor.Rd
|
permissive
|
cran/brxx
|
R
| false | true | 1,120 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bcor.R
\name{bcor}
\alias{bcor}
\title{bcor: Bayesian Estimation of The Correlation Matrix}
\usage{
bcor(data, iter, burn, seed, CI, S0, nu0, mu0)
}
\arguments{
\item{data}{N by P data matrix.}
\item{iter}{Number of iterations for the Gibbs sampler.}
\item{burn}{Number of samples to burn in.}
\item{seed}{Seed for the Gibbs sampler}
\item{CI}{Credible interval quantile, as a decimal (ie, for 95 percent, 0.95).}
\item{S0}{Prior variance covariance matrix.}
\item{nu0}{Prior degrees of freedom for inverse Wishart prior distribution.}
\item{mu0}{Prior means for each column.}
}
\value{
Returns median posterior estimates of the correlation matrix.
}
\description{
This function estimates coefficient omega internal consistency reliability.
}
\examples{
set.seed(999)
your_data=mvrnorm(n=15,mu=c(0,0),Sigma=matrix(c(4,3,3,9),nrow=2,ncol=2))
Mu0=c(0,0)
Sigma0=matrix(c(1,0.6,0.6,4),nrow=2,ncol=2)
Nu0=1
bcor(data=your_data,iter=5000,burn=2500,seed=999,CI=0.95,
mu0=Mu0,S0=Sigma0,nu0=Nu0)
}
|
#"Spatial Visualizations & Feature Creation"
All_in_one=read.csv("Final_All_in_one.csv", stringsAsFactors = F)
# Creating some visualisations regarding the deliveries area
library(ggmap)
library(ggplot2)
library(dplyr)
library(maps)
library(mapdata)
coords_del <- as.data.frame(cbind(lon=All_in_one$lng,lat=All_in_one$lat))
library(sp)
#change the deliveries coordinates into a SpatialPointsDataFrame
coords_1 <- cbind(Longitude = as.numeric(as.character(All_in_one$lng)), Latitude = as.numeric(as.character(All_in_one$lat)))
deliveries.pts <- SpatialPointsDataFrame(coords_del, All_in_one[,-(35:34)], proj4string = CRS("+init=epsg:4326"))
#plot just the delivery points
plot(deliveries.pts, pch = ".", col = "darkred")
library(MASS)
library(survival)
library(fitdistrplus)
library(qmap)
library(ggplot2)
library(ggmap)
#plot the hybrid Google Maps basemap
map <- ggmap(get_googlemap('London', zoom = 8, maptype = 'hybrid',scale = 2),
size = c(600, 600),extent='normal', darken = 0)
#plot the delivery points on top
map + geom_point(data = All_in_one, aes(x = All_in_one$lng, y = All_in_one$lat), color="red", size=0.2, alpha=0.5)
#finding data(long, lat,population) for worldwide cities
data("world.cities")
#flter only for UK
UK <- world.cities %>% filter(country.etc == "UK")
#tranforming our variables as to match them with UK cities
coords_del$new_lng=substr(coords_del$lon,1,nchar(coords_del$lon)-5 )
coords_del$new_lat=substr(coords_del$lat,1,nchar(coords_del$lat)-5 )
coords_del=coords_del[,-1]
coords_del=coords_del[,-1]
library(sqldf)
#match UK cities based on lat and long
UK_del=sqldf("select distinct * from coords_del a inner join UK b
on a.new_lat=b.lat and a.new_lng=b.long")
#removing the duplicated cities
UK_del=UK_del[-21,]
UK_del=UK_del[-3,]
UK_cities=read.csv("UK_cities.csv")
library(acs)
library(geosphere)
# Calculates the geodesic distance between two points specified by radian latitude/longitude using the
# Haversine formula (hf)
gcd.hf <- function(long1, lat1, long2, lat2) {
R <- 6371 # Earth mean radius [km]
delta.long <- (long2 - long1)
delta.lat <- (lat2 - lat1)
a <- sin(delta.lat/2)^2 + cos(lat1) * cos(lat2) * sin(delta.long/2)^2
c <- 2 * asin(min(1,sqrt(a)))
d = R * c
return(d) # Distance in km
}
#Calculating the delivery distance from each city centre
for(i in 1 :nrow(All_in_one)){
All_in_one$Dist_Crowborough[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[1],UK_del$lat[1])
All_in_one$Dist_Royal_Tunbridge_Wells[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[2],UK_del$lat[2])
All_in_one$Dist_Bexhill[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[3],UK_del$lat[3])
All_in_one$Dist_Hastings[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[4],UK_del$lat[4])
All_in_one$Dist_Ashford[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[5],UK_del$lat[5])
All_in_one$Dist_Uckfield[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[6],UK_del$lat[6])
All_in_one$Dist_Halstead[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[7],UK_del$lat[7])
All_in_one$Dist_Sevenoaks[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[8],UK_del$lat[8])
All_in_one$Dist_Tonbridge[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[9],UK_del$lat[9])
All_in_one$Dist_East_Grinstead[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[10],UK_del$lat[10])
All_in_one$Dist_Gillingham[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[11],UK_del$lat[11])
All_in_one$Dist_Snodland[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[12],UK_del$lat[12])
All_in_one$Dist_Aylesford_East_Malling[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[13],UK_del$lat[13])
All_in_one$Dist_Strood[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[14],UK_del$lat[14])
All_in_one$Dist_Maidstone[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[15],UK_del$lat[15])
All_in_one$Dist_Sheerness[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[16],UK_del$lat[16])
All_in_one$Dist_Sittingbourne[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[17],UK_del$lat[17])
All_in_one$Dist_Rochester[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[18],UK_del$lat[18])
All_in_one$Dist_Dartford[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[19],UK_del$lat[19])
All_in_one$Dist_Broadstairs[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[20],UK_del$lat[20])
All_in_one$Dist_Margate[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[21],UK_del$lat[21])
All_in_one$Dist_Whitstable[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[22],UK_del$lat[22])
All_in_one$Dist_Canterbury[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[23],UK_del$lat[23])
All_in_one$Dist_Folkestone[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[24],UK_del$lat[24])
All_in_one$Dist_Ramsgate[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[25],UK_del$lat[25])
}
#creating a variable "minimum distance", which returns the minimun distance from the closest city centre
for(i in 1 :nrow(All_in_one)){
All_in_one$Min_Dist[i]=with(All_in_one, pmin(Dist_Crowborough[i],Dist_Royal_Tunbridge_Wells[i],Dist_Bexhill[i],
Dist_Hastings[i],Dist_Ashford[i],Dist_Uckfield[i],Dist_Halstead[i],
Dist_Sevenoaks[i],Dist_Tonbridge[i],Dist_East_Grinstead[i],Dist_Gillingham[i],
Dist_Snodland[i],Dist_Aylesford_East_Malling[i],Dist_Strood[i],Dist_Maidstone[i],
Dist_Sheerness[i],Dist_Sittingbourne[i],Dist_Rochester[i],Dist_Dartford[i],Dist_Broadstairs[i],
Dist_Margate[i],Dist_Whitstable[i],Dist_Canterbury[i],Dist_Folkestone[i],Dist_Ramsgate[i]))
}
#Plot the distances from the city center
library(graphics)
library(plotly)
library(mime)
library(crosstalk)
closeness <- density(All_in_one$Min_Dist)
p <- plot_ly(x = ~density$x, y = ~density$y, type = 'scatter', mode = 'lines', fill = 'tozeroy') %>%
layout(xaxis = list(title = 'Distance from City Centre'),
yaxis = list(title = 'Closeness'))
p
closeness_to_city=(All_in_one$Min_Dist)
quantile(closeness_to_city)
for(i in 1 :nrow(All_in_one)){
if(All_in_one$Min_Dist[i]<=50){
All_in_one$Density_profile[i] ="High Closeness"
}else if(All_in_one$Min_Dist[i]<=150){
All_in_one$Density_profile[i]="Medium Closeness"
}else if(All_in_one$Min_Dist[i]<=200){
All_in_one$Density_profile[i]="Low Closeness"
}else
All_in_one$Density_profile[i]="Outskirts"
}
library(plotly)
high <- All_in_one[which(All_in_one$Density_profile == "High Closeness"),]
density1 <- density(high$Min_Dist)
medium <- All_in_one[which(All_in_one$Density_profile =="Medium Closeness"),]
density2 <- density(medium$Min_Dist)
low <- All_in_one[which(All_in_one$Density_profile == "Low Closeness"),]
density3 <- density(low$Min_Dist)
out <- All_in_one[which(All_in_one$Density_profile == "Outskirts"),]
density4 <- density(out$Min_Dist)
p <- plot_ly(x = ~density1$x, y = ~density1$y, type = 'scatter', mode = 'lines', name = 'High Closeness', fill = 'tozeroy') %>%
add_trace(x = ~density2$x, y = ~density2$y, name = 'Medium Closeness', fill = 'tozeroy') %>%
add_trace(x = ~density3$x, y = ~density3$y, name = 'Low Closeness', fill = 'tozeroy') %>%
add_trace(x = ~density4$x, y = ~density4$y, name = 'Outskirts', fill = 'tozeroy') %>%
layout(xaxis = list(title = 'Distance from City Centre'),
yaxis = list(title = 'Closeness'))
p#print plot
write.csv(All_in_one,"Final_All_in_one.csv")
|
/Feature Engineering(part 2).R
|
no_license
|
jopras/ST_project
|
R
| false | false | 7,825 |
r
|
#"Spatial Visualizations & Feature Creation"
All_in_one=read.csv("Final_All_in_one.csv", stringsAsFactors = F)
# Creating some visualisations regarding the deliveries area
library(ggmap)
library(ggplot2)
library(dplyr)
library(maps)
library(mapdata)
coords_del <- as.data.frame(cbind(lon=All_in_one$lng,lat=All_in_one$lat))
library(sp)
#change the deliveries coordinates into a SpatialPointsDataFrame
coords_1 <- cbind(Longitude = as.numeric(as.character(All_in_one$lng)), Latitude = as.numeric(as.character(All_in_one$lat)))
deliveries.pts <- SpatialPointsDataFrame(coords_del, All_in_one[,-(35:34)], proj4string = CRS("+init=epsg:4326"))
#plot just the delivery points
plot(deliveries.pts, pch = ".", col = "darkred")
library(MASS)
library(survival)
library(fitdistrplus)
library(qmap)
library(ggplot2)
library(ggmap)
#plot the hybrid Google Maps basemap
map <- ggmap(get_googlemap('London', zoom = 8, maptype = 'hybrid',scale = 2),
size = c(600, 600),extent='normal', darken = 0)
#plot the delivery points on top
map + geom_point(data = All_in_one, aes(x = All_in_one$lng, y = All_in_one$lat), color="red", size=0.2, alpha=0.5)
#finding data(long, lat,population) for worldwide cities
data("world.cities")
#flter only for UK
UK <- world.cities %>% filter(country.etc == "UK")
#tranforming our variables as to match them with UK cities
coords_del$new_lng=substr(coords_del$lon,1,nchar(coords_del$lon)-5 )
coords_del$new_lat=substr(coords_del$lat,1,nchar(coords_del$lat)-5 )
coords_del=coords_del[,-1]
coords_del=coords_del[,-1]
library(sqldf)
#match UK cities based on lat and long
UK_del=sqldf("select distinct * from coords_del a inner join UK b
on a.new_lat=b.lat and a.new_lng=b.long")
#removing the duplicated cities
UK_del=UK_del[-21,]
UK_del=UK_del[-3,]
UK_cities=read.csv("UK_cities.csv")
library(acs)
library(geosphere)
# Calculates the geodesic distance between two points specified by radian latitude/longitude using the
# Haversine formula (hf)
gcd.hf <- function(long1, lat1, long2, lat2) {
R <- 6371 # Earth mean radius [km]
delta.long <- (long2 - long1)
delta.lat <- (lat2 - lat1)
a <- sin(delta.lat/2)^2 + cos(lat1) * cos(lat2) * sin(delta.long/2)^2
c <- 2 * asin(min(1,sqrt(a)))
d = R * c
return(d) # Distance in km
}
#Calculating the delivery distance from each city centre
for(i in 1 :nrow(All_in_one)){
All_in_one$Dist_Crowborough[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[1],UK_del$lat[1])
All_in_one$Dist_Royal_Tunbridge_Wells[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[2],UK_del$lat[2])
All_in_one$Dist_Bexhill[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[3],UK_del$lat[3])
All_in_one$Dist_Hastings[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[4],UK_del$lat[4])
All_in_one$Dist_Ashford[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[5],UK_del$lat[5])
All_in_one$Dist_Uckfield[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[6],UK_del$lat[6])
All_in_one$Dist_Halstead[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[7],UK_del$lat[7])
All_in_one$Dist_Sevenoaks[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[8],UK_del$lat[8])
All_in_one$Dist_Tonbridge[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[9],UK_del$lat[9])
All_in_one$Dist_East_Grinstead[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[10],UK_del$lat[10])
All_in_one$Dist_Gillingham[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[11],UK_del$lat[11])
All_in_one$Dist_Snodland[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[12],UK_del$lat[12])
All_in_one$Dist_Aylesford_East_Malling[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[13],UK_del$lat[13])
All_in_one$Dist_Strood[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[14],UK_del$lat[14])
All_in_one$Dist_Maidstone[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[15],UK_del$lat[15])
All_in_one$Dist_Sheerness[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[16],UK_del$lat[16])
All_in_one$Dist_Sittingbourne[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[17],UK_del$lat[17])
All_in_one$Dist_Rochester[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[18],UK_del$lat[18])
All_in_one$Dist_Dartford[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[19],UK_del$lat[19])
All_in_one$Dist_Broadstairs[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[20],UK_del$lat[20])
All_in_one$Dist_Margate[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[21],UK_del$lat[21])
All_in_one$Dist_Whitstable[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[22],UK_del$lat[22])
All_in_one$Dist_Canterbury[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[23],UK_del$lat[23])
All_in_one$Dist_Folkestone[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[24],UK_del$lat[24])
All_in_one$Dist_Ramsgate[i]=gcd.hf(All_in_one$lng[i],All_in_one$lat[i],UK_del$long[25],UK_del$lat[25])
}
#creating a variable "minimum distance", which returns the minimun distance from the closest city centre
for(i in 1 :nrow(All_in_one)){
All_in_one$Min_Dist[i]=with(All_in_one, pmin(Dist_Crowborough[i],Dist_Royal_Tunbridge_Wells[i],Dist_Bexhill[i],
Dist_Hastings[i],Dist_Ashford[i],Dist_Uckfield[i],Dist_Halstead[i],
Dist_Sevenoaks[i],Dist_Tonbridge[i],Dist_East_Grinstead[i],Dist_Gillingham[i],
Dist_Snodland[i],Dist_Aylesford_East_Malling[i],Dist_Strood[i],Dist_Maidstone[i],
Dist_Sheerness[i],Dist_Sittingbourne[i],Dist_Rochester[i],Dist_Dartford[i],Dist_Broadstairs[i],
Dist_Margate[i],Dist_Whitstable[i],Dist_Canterbury[i],Dist_Folkestone[i],Dist_Ramsgate[i]))
}
#Plot the distances from the city center
library(graphics)
library(plotly)
library(mime)
library(crosstalk)
closeness <- density(All_in_one$Min_Dist)
p <- plot_ly(x = ~density$x, y = ~density$y, type = 'scatter', mode = 'lines', fill = 'tozeroy') %>%
layout(xaxis = list(title = 'Distance from City Centre'),
yaxis = list(title = 'Closeness'))
p
closeness_to_city=(All_in_one$Min_Dist)
quantile(closeness_to_city)
for(i in 1 :nrow(All_in_one)){
if(All_in_one$Min_Dist[i]<=50){
All_in_one$Density_profile[i] ="High Closeness"
}else if(All_in_one$Min_Dist[i]<=150){
All_in_one$Density_profile[i]="Medium Closeness"
}else if(All_in_one$Min_Dist[i]<=200){
All_in_one$Density_profile[i]="Low Closeness"
}else
All_in_one$Density_profile[i]="Outskirts"
}
library(plotly)
high <- All_in_one[which(All_in_one$Density_profile == "High Closeness"),]
density1 <- density(high$Min_Dist)
medium <- All_in_one[which(All_in_one$Density_profile =="Medium Closeness"),]
density2 <- density(medium$Min_Dist)
low <- All_in_one[which(All_in_one$Density_profile == "Low Closeness"),]
density3 <- density(low$Min_Dist)
out <- All_in_one[which(All_in_one$Density_profile == "Outskirts"),]
density4 <- density(out$Min_Dist)
p <- plot_ly(x = ~density1$x, y = ~density1$y, type = 'scatter', mode = 'lines', name = 'High Closeness', fill = 'tozeroy') %>%
add_trace(x = ~density2$x, y = ~density2$y, name = 'Medium Closeness', fill = 'tozeroy') %>%
add_trace(x = ~density3$x, y = ~density3$y, name = 'Low Closeness', fill = 'tozeroy') %>%
add_trace(x = ~density4$x, y = ~density4$y, name = 'Outskirts', fill = 'tozeroy') %>%
layout(xaxis = list(title = 'Distance from City Centre'),
yaxis = list(title = 'Closeness'))
p#print plot
write.csv(All_in_one,"Final_All_in_one.csv")
|
# R Script to get financial data from yahoo
# Libraries ---------------------------------------------------------------
require(quantmod)
# Setting WD --------------------------------------------------------------
this.dir <- dirname(parent.frame(2)$ofile)
setwd(this.dir)
# Get Stock Info And Write ------------------------------------------------
wantedStocks <- c("VTI")
for (i in 1:length(wantedStocks)){
getSymbols(wantedStocks[i], from=as.Date("08-01-01", format="%y-%m-%d"))
saveRDS(
get(wantedStocks[i]),
file = paste(wantedStocks[i], ".rds", sep="")
)
}
|
/Stock_Data/Download_Stocks.R
|
no_license
|
Fchang012/NerualNet_Exploration
|
R
| false | false | 585 |
r
|
# R Script to get financial data from yahoo
# Libraries ---------------------------------------------------------------
require(quantmod)
# Setting WD --------------------------------------------------------------
this.dir <- dirname(parent.frame(2)$ofile)
setwd(this.dir)
# Get Stock Info And Write ------------------------------------------------
wantedStocks <- c("VTI")
for (i in 1:length(wantedStocks)){
getSymbols(wantedStocks[i], from=as.Date("08-01-01", format="%y-%m-%d"))
saveRDS(
get(wantedStocks[i]),
file = paste(wantedStocks[i], ".rds", sep="")
)
}
|
library(rtweet)
library(stringi)
library(dplyr)
library(pbapply)
friends = get_friends("noamross")
followers = get_followers("noamross")
tweeps_id = unique(c(friends$ids$value, followers$id$value))
tweeps_info = lookup_users(friends$ids$value)
nyc_regex = "(NYC|New York|Gotham|Brooklyn|Kings|Staten|Prospect|Crown Heights|Park Slope|Manhattan|Bronx|Stuy|Queens\\b|BK\\b|\\bNY\\b)"
ny_tweeps_info = tweeps_info$users %>%
filter(stri_detect_regex(name, nyc_regex, case_insensitive=TRUE) |
stri_detect_regex(screen_name, nyc_regex, case_insensitive=TRUE) |
stri_detect_regex(location, nyc_regex, case_insensitive=TRUE) |
stri_detect_regex(description, nyc_regex, case_insensitive=TRUE))
ny_tweeps_info %>% select(name, screen_name, description, location) %>% arrange(name) %>% print(n=250)
ny_tweeps_friends = pblapply(ny_tweeps_info$user_id, function(x) {get_friends(x); Sys.sleep(61))
disease_regex = "(disease|parasite|outbreak|spillover|infect|patho(gen|ology)|\\svet(\\s|er)|vir(al|us))"
disease_tweeps_info = tweeps_info$users %>%
filter(stri_detect_regex(description, disease_regex))
disease_tweeps_info %>% select(name, screen_name, description, location) %>% arrange(location) %>% print(n=150)
|
/2016-09-03-search-twitter-followers.R
|
no_license
|
noamross/notebook
|
R
| false | false | 1,243 |
r
|
library(rtweet)
library(stringi)
library(dplyr)
library(pbapply)
friends = get_friends("noamross")
followers = get_followers("noamross")
tweeps_id = unique(c(friends$ids$value, followers$id$value))
tweeps_info = lookup_users(friends$ids$value)
nyc_regex = "(NYC|New York|Gotham|Brooklyn|Kings|Staten|Prospect|Crown Heights|Park Slope|Manhattan|Bronx|Stuy|Queens\\b|BK\\b|\\bNY\\b)"
ny_tweeps_info = tweeps_info$users %>%
filter(stri_detect_regex(name, nyc_regex, case_insensitive=TRUE) |
stri_detect_regex(screen_name, nyc_regex, case_insensitive=TRUE) |
stri_detect_regex(location, nyc_regex, case_insensitive=TRUE) |
stri_detect_regex(description, nyc_regex, case_insensitive=TRUE))
ny_tweeps_info %>% select(name, screen_name, description, location) %>% arrange(name) %>% print(n=250)
ny_tweeps_friends = pblapply(ny_tweeps_info$user_id, function(x) {get_friends(x); Sys.sleep(61))
disease_regex = "(disease|parasite|outbreak|spillover|infect|patho(gen|ology)|\\svet(\\s|er)|vir(al|us))"
disease_tweeps_info = tweeps_info$users %>%
filter(stri_detect_regex(description, disease_regex))
disease_tweeps_info %>% select(name, screen_name, description, location) %>% arrange(location) %>% print(n=150)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/402.p-Confidence_p-Bias_BASE_All_Graph.R
\name{PlotpCOpBIAll}
\alias{PlotpCOpBIAll}
\title{Plots p-confidence and p-bias for a given n and alpha level for
6 base methods (Wald, Wald-T, Likelihood, Score, Logit-Wald, ArcSine)}
\usage{
PlotpCOpBIAll(n, alp)
}
\arguments{
\item{n}{- Number of trials}
\item{alp}{- Alpha value (significance level required)}
}
\description{
Plots p-confidence and p-bias for a given n and alpha level for
6 base methods (Wald, Wald-T, Likelihood, Score, Logit-Wald, ArcSine)
}
\details{
p-confidence and p-bias plots for 6 base methods (Wald, Wald-T, Likelihood, Score, Logit-Wald, ArcSine)
}
\examples{
\dontrun{
n=5; alp=0.05
PlotpCOpBIAll(n,alp)
}
}
\references{
[1] 2005 Vos PW and Hudson S.
Evaluation Criteria for Discrete Confidence Intervals: Beyond Coverage and Length.
The American Statistician: 59; 137 - 142.
}
\seealso{
Other p-confidence and p-bias of base methods: \code{\link{PlotpCOpBIAS}},
\code{\link{PlotpCOpBIBA}}, \code{\link{PlotpCOpBIEX}},
\code{\link{PlotpCOpBILR}}, \code{\link{PlotpCOpBILT}},
\code{\link{PlotpCOpBISC}}, \code{\link{PlotpCOpBITW}},
\code{\link{PlotpCOpBIWD}}, \code{\link{pCOpBIAS}},
\code{\link{pCOpBIAll}}, \code{\link{pCOpBIBA}},
\code{\link{pCOpBIEX}}, \code{\link{pCOpBILR}},
\code{\link{pCOpBILT}}, \code{\link{pCOpBISC}},
\code{\link{pCOpBITW}}, \code{\link{pCOpBIWD}}
}
|
/man/PlotpCOpBIAll.Rd
|
no_license
|
ElsevierSoftwareX/SOFTX-D-16-00020
|
R
| false | true | 1,450 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/402.p-Confidence_p-Bias_BASE_All_Graph.R
\name{PlotpCOpBIAll}
\alias{PlotpCOpBIAll}
\title{Plots p-confidence and p-bias for a given n and alpha level for
6 base methods (Wald, Wald-T, Likelihood, Score, Logit-Wald, ArcSine)}
\usage{
PlotpCOpBIAll(n, alp)
}
\arguments{
\item{n}{- Number of trials}
\item{alp}{- Alpha value (significance level required)}
}
\description{
Plots p-confidence and p-bias for a given n and alpha level for
6 base methods (Wald, Wald-T, Likelihood, Score, Logit-Wald, ArcSine)
}
\details{
p-confidence and p-bias plots for 6 base methods (Wald, Wald-T, Likelihood, Score, Logit-Wald, ArcSine)
}
\examples{
\dontrun{
n=5; alp=0.05
PlotpCOpBIAll(n,alp)
}
}
\references{
[1] 2005 Vos PW and Hudson S.
Evaluation Criteria for Discrete Confidence Intervals: Beyond Coverage and Length.
The American Statistician: 59; 137 - 142.
}
\seealso{
Other p-confidence and p-bias of base methods: \code{\link{PlotpCOpBIAS}},
\code{\link{PlotpCOpBIBA}}, \code{\link{PlotpCOpBIEX}},
\code{\link{PlotpCOpBILR}}, \code{\link{PlotpCOpBILT}},
\code{\link{PlotpCOpBISC}}, \code{\link{PlotpCOpBITW}},
\code{\link{PlotpCOpBIWD}}, \code{\link{pCOpBIAS}},
\code{\link{pCOpBIAll}}, \code{\link{pCOpBIBA}},
\code{\link{pCOpBIEX}}, \code{\link{pCOpBILR}},
\code{\link{pCOpBILT}}, \code{\link{pCOpBISC}},
\code{\link{pCOpBITW}}, \code{\link{pCOpBIWD}}
}
|
################## SNAP DPSA Tradeoff Analayis Framework #######################
# This wrapper enables the use of the Master simulation framework developed by Cody to test a variety of management interventions.
#Version 1.0 will be for one species under multiple management options, with potential for life history uncertainty
rm(list=ls())
#setwd("~/Dropbox/SNAP/SNAP Working Group Repository/SNAPwg")
sapply(list.files(pattern="[.]R$", path="Functions", full.names=TRUE), source)
library(animation)
library(caTools)
library(plyr)
library(ggplot2)
library(gridExtra)
library(lattice)
library(ggthemes)
source("lenwei.R")
source("VisualizeMovement.R")
source("movArray.R")
source("InitialPop.R")
source("Recruitment.R")
source("samplingFunc.R")
# Storage Settings --------------------------------------------------------
Site<- 'Kenya'
Species<- 'Lobster'
RunName<- 'Stochastic Test Run 1'
Fishery<- paste(Site,Species)
FisheryPlace<- paste(Site,Species,sep='/')
RunName<- paste(FisheryPlace,RunName,sep='/')
dir.create(RunName)
FigureFolder<- paste(RunName,'/Figures',sep='')
ResultFolder<- paste(RunName,'/Results',sep='')
dir.create(FigureFolder)
dir.create(ResultFolder)
# Load Fishery ------------------------------------------------------------
OriginalWorkingDir<- getwd()
setwd(FisheryPlace)
Life<-read.csv("LifeHistory.csv") # life history characteristics
SimCTL<-read.csv("GrandSimCtlKEN.csv",header=F) # simulation controls
Fleets<-read.csv("Fleets.csv",header=F) # fleet characteristics
season<-read.csv("season.csv",header=F) # fishing seasons by fleet
Samp <- read.csv("SamplingParams.csv") # sampling controls for management
NoTakeZoneNULL<-read.csv("notakezoneKENnull.csv",header=F) # marine protected areas (0=open access, 1=MPA, 2=TURF?)
NoTakeZoneImp<-read.csv("notakezoneKEN.csv",header=F) # marine protected areas (0=open access, 1=MPA, 2=TURF?)
habitat<-read.csv("habitatKEN.csv",header=F)
ManageStrats<- read.csv('ManagementStrategies.csv')
setwd(OriginalWorkingDir)
# Tuning ----------------------------------------------------------
Master(Life,SimCTL,Fleets,season,Samp,ManageStrats[i,],Management,NoTakeZoneNULL,NoTakeZoneImp,habitat,Graphs=F,GraphsFish=F,PrintLifeHistory=F)
# Set Management ----------------------------------------------------------
Management<- NULL
Management$SizeLimit<- 70
Management$NTZ<- NoTakeZoneImp
shortseason<- season
shortseason[5:8,2]<- NA
Management$Season<- shortseason
Management$Quota<- 10000
Management$Effort<- 500
Management$Gear<- 1
Management$Tax<- 1.3
# Run Tradeoffs -----------------------------------------------------------
ManageSims<- list()
ManageResults<-as.data.frame(matrix(NA,nrow=dim(ManageStrats)[1],ncol=6))
colnames(ManageResults) <- c('ManagementPlan','Catch','FishingCost','FishingProfit','ManagementCost','SpawningBiomass')
for (i in 1:dim(ManageStrats)[1]) #Can replace this with mclapply later if this takes too long, easier to debug this way
{
ManageSims[[i]]<-Master(Life,SimCTL,Fleets,season,Samp,ManageStrats[i,],Management,NoTakeZoneNULL,NoTakeZoneImp,habitat,Graphs=F,GraphsFish=F,PrintLifeHistory=F)
# Test<-Master(Life,SimCTL,Fleets,season,Samp,ManageStrats[i,],Management,NoTakeZoneNULL,NoTakeZoneImp,habitat,Graphs=F,GraphsFish=F,PrintLifeHistory=F)
show(paste(round(100*(i/dim(ManageStrats)[1])),'% done with management iterations',sep=''))
}
TimeLength<- dim(ManageSims[[1]][[1]]$CatchByFisher)[2] #time run for fishing scenarios
SimLength<- SimCTL[grep('simTime',SimCTL[,2]),1] #Complete time with burn in
BurnIn<- SimCTL[grep('burn',SimCTL[,2]),1] #Burn in period
MSE <- ldply(ManageSims, function(mod)
{
data.frame(ldply(mod,function(mod2)
{
data.frame(rep(as.character(mod2$Fishery$ManagementPlan[1]),TimeLength),
rep(mod2$Fishery$Iteration,TimeLength),
1:TimeLength,
colSums(mod2$CatchByFisher,na.rm=T),
colSums(mod2$ProfitByFisher,na.rm=T),
colSums(mod2$CostByFisher,na.rm=T),
(mod2$CostOfManagement[BurnIn:SimLength]),
mod2$SpawningBiomass[BurnIn:SimLength],
rep(mod2$Fishery$mat50,TimeLength),
rep(sum(mod2$CostOfManagement[BurnIn:SimLength],na.rm=T),TimeLength)
)
}
))
})
colnames(MSE)<- c('ManagementPlan','Iteration','TimeStep','Catch','Profits','FishingCosts',
'ManagementCosts','SpawningBiomass','mat50','TotalManagementCosts')
MSE$ManagementCosts[is.na(MSE$ManagementCosts)]<- 0
MSE$Year<- floor(MSE$TimeStep/12)
MSE$RunName<- paste(MSE$ManagementPlan,MSE$Iteration,sep='-')
MSE_Totals<- ddply(MSE,c('ManagementPlan','Iteration'),summarize,TotalManagementCosts=sum(ManagementCosts,na.rm=T),TotalProfits=sum(Profits,na.rm=T),
NPV=sum(Profits*(1+0.05)^-TimeStep),FinalSSB=mean(SpawningBiomass[Year==max(Year)]),FinalProfits=Profits[TimeStep==max(TimeStep)])
MSE_ByMonth<- ddply(MSE,c('ManagementPlan','TimeStep'),summarize,Costs=mean(ManagementCosts,na.rm=T),Profits=mean(Profits,na.rm=T),
NPV=mean(Profits*(1+0.05)^-TimeStep),SSB=mean(SpawningBiomass))
MSE_ByYear<- ddply(MSE,c('ManagementPlan','Iteration','Year'),summarize,ManagementCosts=sum(ManagementCosts,na.rm=T),Profits=sum(Profits,na.rm=T),
PV=sum(Profits*(1+0.05)^-TimeStep),SSB=mean(SpawningBiomass))
MSE_ByYear<- ddply(MSE_ByYear,c('ManagementPlan','Year'),summarize,ManagementCosts=mean(ManagementCosts,na.rm=T),Profits=mean(Profits,na.rm=T),
PV=mean(PV),SSB=mean(SSB))
pdf(file=paste(FigureFolder,'Final Profit and Final Biomass Tradeoff No Cost.pdf',sep='/'),width=8,height=6)
print(ggplot(subset(MSE,TimeStep==max(TimeStep)),aes(Profits,SpawningBiomass))+
geom_point(aes(color=ManagementPlan),size=10,alpha=0.6)+
scale_size_continuous(range = c(5, 15))+
xlab('Final Fishing Profits')+
ylab('Final Spawning Biomass'))
dev.off()
pdf(file=paste(FigureFolder,'Final Profit and Final Biomass Tradeoff.pdf',sep='/'),width=8,height=6)
print(ggplot(subset(MSE,TimeStep==max(TimeStep)),aes(Profits,SpawningBiomass))+
geom_point(aes(color=ManagementPlan,size=TotalManagementCosts),alpha=0.6)+
scale_size_continuous(range = c(5, 15))+
xlab('Final Fishing Profits')+
ylab('Final Spawning Biomass'))
dev.off()
pdf(file=paste(FigureFolder,'Cumulative Tradeoffs.pdf',sep='/'),width=8,height=6)
print(ggplot(MSE_Totals,aes(TotalProfits,FinalSSB))+
geom_point(aes(color=ManagementPlan),size=10,alpha=0.6)+
xlab('Cumulative Fishing Profits')+
ylab('Final Spawning Biomass'))
dev.off()
pdf(file=paste(FigureFolder,'Cumulative Tradeoffs 2.pdf',sep='/'),width=8,height=6)
print(ggplot(MSE_Totals,aes(TotalProfits,FinalSSB))+
geom_point(aes(color=ManagementPlan,size=TotalManagementCosts),alpha=0.6)+
xlab('Cumulative Fishing Profits')+
ylab('Final Spawning Biomass')+
scale_size_continuous(range = c(5, 15)))
dev.off()
pdf(file=paste(FigureFolder,'Cumulative Tradeoffs 3.pdf',sep='/'),width=8,height=6)
print(ggplot(MSE_Totals,aes(NPV,FinalSSB))+
geom_point(aes(color=ManagementPlan,size=TotalManagementCosts),alpha=0.6)+
xlab('Net Present Fishing Profits')+
ylab('Final Spawning Biomass')+
scale_size_continuous(range = c(5, 15)))
dev.off()
pdf(file=paste(FigureFolder,'TimeTrend.pdf',sep='/'),width=8,height=6)
ProfitTrend=(ggplot(subset(MSE_ByYear,Year<= (max(Year)-1)),aes(Year,Profits))+
geom_line(aes(color=ManagementPlan),size=1,alpha=0.6)+
xlab('Year')+
ylab('Profits')
+theme(legend.position="none"))
CostsTrend=(ggplot(subset(MSE_ByYear,Year<= (max(Year)-1)),aes(Year,ManagementCosts))+
geom_line(aes(color=ManagementPlan),size=1,alpha=0.6)+
xlab('Year')+
ylab('Management Costs')
+theme(legend.position="none"))
SSBTrend=(ggplot(subset(MSE_ByYear,Year<= (max(Year)-1)),aes(Year,SSB))+
geom_line(aes(color=ManagementPlan),size=1,alpha=0.6)+
xlab('Year')+
ylab('SSB') )
print(grid.arrange(ProfitTrend,CostsTrend,SSBTrend,ncol=2))
dev.off()
pdf(file=paste(FigureFolder,'Profit SSB Phase Space.pdf',sep='/'),width=8,height=6)
print(ggplot(subset(MSE_ByYear,Year<=22),aes(SSB,Profits))+
geom_line(aes(color=Year),size=1,alpha=1)+
xlab('SSB')+
ylab('Profits')+facet_wrap(~ManagementPlan))
dev.off()
save.image(file=paste(ResultFolder,'FinalResults.rdata'))
|
/Kenya/Lobster/TradeoffAnalysis_KenyaLobster.R
|
no_license
|
SNAPwg/SNAP
|
R
| false | false | 8,686 |
r
|
################## SNAP DPSA Tradeoff Analayis Framework #######################
# This wrapper enables the use of the Master simulation framework developed by Cody to test a variety of management interventions.
#Version 1.0 will be for one species under multiple management options, with potential for life history uncertainty
rm(list=ls())
#setwd("~/Dropbox/SNAP/SNAP Working Group Repository/SNAPwg")
sapply(list.files(pattern="[.]R$", path="Functions", full.names=TRUE), source)
library(animation)
library(caTools)
library(plyr)
library(ggplot2)
library(gridExtra)
library(lattice)
library(ggthemes)
source("lenwei.R")
source("VisualizeMovement.R")
source("movArray.R")
source("InitialPop.R")
source("Recruitment.R")
source("samplingFunc.R")
# Storage Settings --------------------------------------------------------
Site<- 'Kenya'
Species<- 'Lobster'
RunName<- 'Stochastic Test Run 1'
Fishery<- paste(Site,Species)
FisheryPlace<- paste(Site,Species,sep='/')
RunName<- paste(FisheryPlace,RunName,sep='/')
dir.create(RunName)
FigureFolder<- paste(RunName,'/Figures',sep='')
ResultFolder<- paste(RunName,'/Results',sep='')
dir.create(FigureFolder)
dir.create(ResultFolder)
# Load Fishery ------------------------------------------------------------
OriginalWorkingDir<- getwd()
setwd(FisheryPlace)
Life<-read.csv("LifeHistory.csv") # life history characteristics
SimCTL<-read.csv("GrandSimCtlKEN.csv",header=F) # simulation controls
Fleets<-read.csv("Fleets.csv",header=F) # fleet characteristics
season<-read.csv("season.csv",header=F) # fishing seasons by fleet
Samp <- read.csv("SamplingParams.csv") # sampling controls for management
NoTakeZoneNULL<-read.csv("notakezoneKENnull.csv",header=F) # marine protected areas (0=open access, 1=MPA, 2=TURF?)
NoTakeZoneImp<-read.csv("notakezoneKEN.csv",header=F) # marine protected areas (0=open access, 1=MPA, 2=TURF?)
habitat<-read.csv("habitatKEN.csv",header=F)
ManageStrats<- read.csv('ManagementStrategies.csv')
setwd(OriginalWorkingDir)
# Tuning ----------------------------------------------------------
Master(Life,SimCTL,Fleets,season,Samp,ManageStrats[i,],Management,NoTakeZoneNULL,NoTakeZoneImp,habitat,Graphs=F,GraphsFish=F,PrintLifeHistory=F)
# Set Management ----------------------------------------------------------
Management<- NULL
Management$SizeLimit<- 70
Management$NTZ<- NoTakeZoneImp
shortseason<- season
shortseason[5:8,2]<- NA
Management$Season<- shortseason
Management$Quota<- 10000
Management$Effort<- 500
Management$Gear<- 1
Management$Tax<- 1.3
# Run Tradeoffs -----------------------------------------------------------
ManageSims<- list()
ManageResults<-as.data.frame(matrix(NA,nrow=dim(ManageStrats)[1],ncol=6))
colnames(ManageResults) <- c('ManagementPlan','Catch','FishingCost','FishingProfit','ManagementCost','SpawningBiomass')
for (i in 1:dim(ManageStrats)[1]) #Can replace this with mclapply later if this takes too long, easier to debug this way
{
ManageSims[[i]]<-Master(Life,SimCTL,Fleets,season,Samp,ManageStrats[i,],Management,NoTakeZoneNULL,NoTakeZoneImp,habitat,Graphs=F,GraphsFish=F,PrintLifeHistory=F)
# Test<-Master(Life,SimCTL,Fleets,season,Samp,ManageStrats[i,],Management,NoTakeZoneNULL,NoTakeZoneImp,habitat,Graphs=F,GraphsFish=F,PrintLifeHistory=F)
show(paste(round(100*(i/dim(ManageStrats)[1])),'% done with management iterations',sep=''))
}
TimeLength<- dim(ManageSims[[1]][[1]]$CatchByFisher)[2] #time run for fishing scenarios
SimLength<- SimCTL[grep('simTime',SimCTL[,2]),1] #Complete time with burn in
BurnIn<- SimCTL[grep('burn',SimCTL[,2]),1] #Burn in period
MSE <- ldply(ManageSims, function(mod)
{
data.frame(ldply(mod,function(mod2)
{
data.frame(rep(as.character(mod2$Fishery$ManagementPlan[1]),TimeLength),
rep(mod2$Fishery$Iteration,TimeLength),
1:TimeLength,
colSums(mod2$CatchByFisher,na.rm=T),
colSums(mod2$ProfitByFisher,na.rm=T),
colSums(mod2$CostByFisher,na.rm=T),
(mod2$CostOfManagement[BurnIn:SimLength]),
mod2$SpawningBiomass[BurnIn:SimLength],
rep(mod2$Fishery$mat50,TimeLength),
rep(sum(mod2$CostOfManagement[BurnIn:SimLength],na.rm=T),TimeLength)
)
}
))
})
colnames(MSE)<- c('ManagementPlan','Iteration','TimeStep','Catch','Profits','FishingCosts',
'ManagementCosts','SpawningBiomass','mat50','TotalManagementCosts')
MSE$ManagementCosts[is.na(MSE$ManagementCosts)]<- 0
MSE$Year<- floor(MSE$TimeStep/12)
MSE$RunName<- paste(MSE$ManagementPlan,MSE$Iteration,sep='-')
MSE_Totals<- ddply(MSE,c('ManagementPlan','Iteration'),summarize,TotalManagementCosts=sum(ManagementCosts,na.rm=T),TotalProfits=sum(Profits,na.rm=T),
NPV=sum(Profits*(1+0.05)^-TimeStep),FinalSSB=mean(SpawningBiomass[Year==max(Year)]),FinalProfits=Profits[TimeStep==max(TimeStep)])
MSE_ByMonth<- ddply(MSE,c('ManagementPlan','TimeStep'),summarize,Costs=mean(ManagementCosts,na.rm=T),Profits=mean(Profits,na.rm=T),
NPV=mean(Profits*(1+0.05)^-TimeStep),SSB=mean(SpawningBiomass))
MSE_ByYear<- ddply(MSE,c('ManagementPlan','Iteration','Year'),summarize,ManagementCosts=sum(ManagementCosts,na.rm=T),Profits=sum(Profits,na.rm=T),
PV=sum(Profits*(1+0.05)^-TimeStep),SSB=mean(SpawningBiomass))
MSE_ByYear<- ddply(MSE_ByYear,c('ManagementPlan','Year'),summarize,ManagementCosts=mean(ManagementCosts,na.rm=T),Profits=mean(Profits,na.rm=T),
PV=mean(PV),SSB=mean(SSB))
pdf(file=paste(FigureFolder,'Final Profit and Final Biomass Tradeoff No Cost.pdf',sep='/'),width=8,height=6)
print(ggplot(subset(MSE,TimeStep==max(TimeStep)),aes(Profits,SpawningBiomass))+
geom_point(aes(color=ManagementPlan),size=10,alpha=0.6)+
scale_size_continuous(range = c(5, 15))+
xlab('Final Fishing Profits')+
ylab('Final Spawning Biomass'))
dev.off()
pdf(file=paste(FigureFolder,'Final Profit and Final Biomass Tradeoff.pdf',sep='/'),width=8,height=6)
print(ggplot(subset(MSE,TimeStep==max(TimeStep)),aes(Profits,SpawningBiomass))+
geom_point(aes(color=ManagementPlan,size=TotalManagementCosts),alpha=0.6)+
scale_size_continuous(range = c(5, 15))+
xlab('Final Fishing Profits')+
ylab('Final Spawning Biomass'))
dev.off()
pdf(file=paste(FigureFolder,'Cumulative Tradeoffs.pdf',sep='/'),width=8,height=6)
print(ggplot(MSE_Totals,aes(TotalProfits,FinalSSB))+
geom_point(aes(color=ManagementPlan),size=10,alpha=0.6)+
xlab('Cumulative Fishing Profits')+
ylab('Final Spawning Biomass'))
dev.off()
pdf(file=paste(FigureFolder,'Cumulative Tradeoffs 2.pdf',sep='/'),width=8,height=6)
print(ggplot(MSE_Totals,aes(TotalProfits,FinalSSB))+
geom_point(aes(color=ManagementPlan,size=TotalManagementCosts),alpha=0.6)+
xlab('Cumulative Fishing Profits')+
ylab('Final Spawning Biomass')+
scale_size_continuous(range = c(5, 15)))
dev.off()
pdf(file=paste(FigureFolder,'Cumulative Tradeoffs 3.pdf',sep='/'),width=8,height=6)
print(ggplot(MSE_Totals,aes(NPV,FinalSSB))+
geom_point(aes(color=ManagementPlan,size=TotalManagementCosts),alpha=0.6)+
xlab('Net Present Fishing Profits')+
ylab('Final Spawning Biomass')+
scale_size_continuous(range = c(5, 15)))
dev.off()
pdf(file=paste(FigureFolder,'TimeTrend.pdf',sep='/'),width=8,height=6)
ProfitTrend=(ggplot(subset(MSE_ByYear,Year<= (max(Year)-1)),aes(Year,Profits))+
geom_line(aes(color=ManagementPlan),size=1,alpha=0.6)+
xlab('Year')+
ylab('Profits')
+theme(legend.position="none"))
CostsTrend=(ggplot(subset(MSE_ByYear,Year<= (max(Year)-1)),aes(Year,ManagementCosts))+
geom_line(aes(color=ManagementPlan),size=1,alpha=0.6)+
xlab('Year')+
ylab('Management Costs')
+theme(legend.position="none"))
SSBTrend=(ggplot(subset(MSE_ByYear,Year<= (max(Year)-1)),aes(Year,SSB))+
geom_line(aes(color=ManagementPlan),size=1,alpha=0.6)+
xlab('Year')+
ylab('SSB') )
print(grid.arrange(ProfitTrend,CostsTrend,SSBTrend,ncol=2))
dev.off()
pdf(file=paste(FigureFolder,'Profit SSB Phase Space.pdf',sep='/'),width=8,height=6)
print(ggplot(subset(MSE_ByYear,Year<=22),aes(SSB,Profits))+
geom_line(aes(color=Year),size=1,alpha=1)+
xlab('SSB')+
ylab('Profits')+facet_wrap(~ManagementPlan))
dev.off()
save.image(file=paste(ResultFolder,'FinalResults.rdata'))
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
#Required packages
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("genefilter", version = "3.8")
if (!require("clusterSim")) install.packages("clusterSim")
library(clusterSim)
if (!require("tidyverse")) install.packages("tidyverse")
library(tidyverse)
if (!require("psych")) install.packages("psych")
if (!require("ggplot2")) install.packages("ggplot2")
library(ggplot2)
if (!require("gplots")) install.packages("gplots")
library(gplots)
library(shiny)
library(shinydashboard)
if (!require("zip")) install.packages("zip")
library(zip)
shinyServer <- function(input, output) {
#--------------------------------------------------------------------------------------Home---------------------------------------------------------------------------------------------
output$contents <- renderTable({
req(input$file1)
tryCatch(
{
df <- read.csv(input$file1$datapath)
data <- data.frame(df[,-1], row.names=df[,1])
},
error = function(e) {
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)}
[1:5,1:7]
)
#------------------------------------------------------------------------------------Normalization---------------------------------------------------------------------------------------
output$contents1 <- renderTable({
req(input$file1)
tryCatch(
{
df <- read.csv(input$file1$datapath)
data <- data.frame(df[,-1], row.names=df[,1])
},
error = function(e) {
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)
if (input$checkbox1 == "TRUE"){
length_data <- nrow(data) #number of rows in data
for_nomr=data[3:length_data,] #colect only the region of data that contains scores
for(i in 1:length(for_nomr)) { #for every column in for_nomr dataframe (actually is for every subject for every timepoint)
for_nomr[,i] <-as.numeric(as.character(for_nomr[,i]))} #convert every score to numeric
#Normalization function
#formula: ((score-mean of column)/sqrt(sum((score-mean of column)^2)))
normalized=data.Normalization (for_nomr,type="n12",normalization="column")
#other option: a2=data.Normalization (for_nomr,type="n12",normalization="row")
#we can not do it by row, beacause we get warnings
for(i in 1:length(normalized)) { #for every column in normalized dataframe (actually is for every subject for every timepoint)
normalized[,i] <-as.factor(normalized[,i])} #convert every score to factor
first_two_rows <- data[1:2,] #get the Subject and Timepoint rows from data
new_data <- rbind(first_two_rows, normalized) #make them one dataframe
write.csv(new_data, file = "Normalized.csv", row.names = FALSE)
return(new_data[1:5,1:7])
}
else {
return(data[1:5,1:7] )}
}, include.rownames=TRUE)
output$`download Norm` <- downloadHandler(
filename = function() {
paste("Normalized","csv",sep = ".")
},
content = function(file){
file.copy("Normalized.csv", file)
}
)
#-------------------------------------------------------------------------------------ANOVA-----------------------------------------------------------------------------------------------
output$contents2 <- renderTable( digits = 10,{
req(input$file1)
tryCatch(
{
df <- read.csv(input$file1$datapath)
data <- data.frame(df[,-1], row.names=df[,1])
},
error = function(e) {
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)
if (input$checkbox2 == "TRUE"){
#Norm2
length_data <- nrow(data) #number of rows in data
for_nomr=data[3:length_data,] #colect only the region of data that contains scores
for(i in 1:length(for_nomr)) { #for every column in for_nomr dataframe (actually is for every subject for every timepoint)
for_nomr[,i] <-as.numeric(as.character(for_nomr[,i]))} #convert every score to numeric
#Normalization function
#formula: ((score-mean of column)/sqrt(sum((score-mean of column)^2)))
normalized=data.Normalization (for_nomr,type="n12",normalization="column")
#other option: a2=data.Normalization (for_nomr,type="n12",normalization="row")
#we can not do it by row, beacause we get warnings
for(i in 1:length(normalized)) { #for every column in normalized dataframe (actually is for every subject for every timepoint)
normalized[,i] <-as.factor(normalized[,i])} #convert every score to factor
first_two_rows <- data[1:2,] #get the Subject and Timepoint rows from data
new_data <- rbind(first_two_rows, normalized) #make them one dataframe
#write.csv(new_data, file = "Normalized.csv", row.names = FALSE)
###Repeated measures ANOVA###
significant_meta=data.frame(matrix(nrow = nrow(new_data) , ncol = 2)) #empty dataframe to save the significant metabolites with their pvalues
for (m in 3:nrow(new_data)) { #for every metabolite for the whole datafile of normalized metabolites
#start counting from 3rd row, because in 1st row are the Subjects and in 2nd are the Timepoints
idx=m #idx is the actual number of row in new_data dataframe
Score <- idx
metabolite = data.frame(y=new_data[Score,]) #create a new dataframe for the scores of each metabolite (of each row)
metabolite[nrow(metabolite)+1,] = new_data[1,] # +Subject row
metabolite[nrow(metabolite)+1,] = new_data[2,] # +Timepoint row
metabolite_rev=t(metabolite) #reverse the dataframe, in order to have them in columns
# t() command creates table
metabolite_rev=as.data.frame(metabolite_rev) #make it dataframe
names(metabolite_rev)[1] <- "Score" #the colname of the score column was the name of the metabolite. So, in order to be easier to work with, we rename the colname as "Score"
metabolite_rev$Score <- as.numeric(as.character(metabolite_rev$Score)) #convert the Score column to numeric
#ANOVA model
model = aov(Score~Timepoint+Error(Subject/Timepoint),metabolite_rev)
#The summary(model) is a list of elements. In order to be able to pick one particular element, we need to unlist them
sum_model = unlist(summary(model))
if (sum_model["Error: Subject:Timepoint.Pr(>F)1"]<0.05) { #check if the metabolite that is under investgation has significant p-value
significant_meta[m,1] <- idx #keep the idx number of it (the row number in the new_data dataframe)
significant_meta[m,2] <- sum_model["Error: Subject:Timepoint.Pr(>F)1"] #keep the corresponding pvalue
}
}
#significant_meta has all idxs of the significant metabolites and for no significant has NAs
new_sign <- na.omit(significant_meta) #remove the rows with NAs (the idxs of no significant metabolites)
#new_sign has the idxs only of the significant metabolites
all_significant=data.frame(matrix(nrow = nrow(new_sign) , ncol = 2)) #empty dataframe to save the actual names of significant metabolites with their pvalues
for (s in 1:nrow(new_sign)) { #for every row in new_sign dataframe (for every significant metabolite)
idx2=new_sign[s,1] #idx2 is the number of row in new_sign dataframe
sign_metabolite = data.frame(y=new_data[idx2,]) #create a new dataframe for each significant metabolite (for every row in new_data dataframe and each row is choosen by the idx2 number)
all_significant[s,1] <- row.names(sign_metabolite) #keep the name of the significant metabolite
all_significant[s,2] <- new_sign[s,2]} #keep its corresponding pvalue
colnames(all_significant)[1] <- "Sign_metabolite" #change the name of the 1st column
colnames(all_significant)[2] <- "p-value" #change the name of the 2nd column
write.csv(all_significant, file = "Significant_ANOVA.csv", row.names = FALSE) #make a .csv file with the significant metabolites and their pvalues
return(all_significant)
}
else {
text_if_not_anova <- paste("Check Repeated Measures ANOVA to view the significant metabolites")
return(text_if_not_anova)}
})
output$`download Anova` <- downloadHandler(
filename = function() {
paste("Significant_ANOVA","csv",sep = ".")
},
content = function(file){
file.copy("Significant_ANOVA.csv", file)
})
#-----------------------------------------------------------------------------------ANOVA Post Hoc---------------------------------------------------------------------------------
output$contents3 <- renderTable({
req(input$file1)
tryCatch(
{
df <- read.csv(input$file1$datapath)
data <- data.frame(df[,-1], row.names=df[,1])
},
error = function(e) {
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)
if (input$checkbox4 == "TRUE"){
data <- data.frame(df[,-1], row.names=df[,1])
#This will create a new directory in your working directory with the name "Anova"
#At the end, everithing is going to be inside Anova directory
anova <- paste("Anova_with_post_hoc") #set the name
dir.create(anova) #create directory
setwd(anova) #set the new directory as working directory
main <- getwd()
###Normalization###
length_data <- nrow(data) #number of rows in data
for_nomr=data[3:length_data,] #colect only the region of data that contains scores
for(i in 1:length(for_nomr)) { #for every column in for_nomr dataframe (actually is for every subject for every timepoint)
for_nomr[,i] <-as.numeric(as.character(for_nomr[,i]))} #convert every score to numeric
#Normalization function
#formula: ((score-mean of column)/sqrt(sum((score-mean of column)^2)))
normalized=data.Normalization (for_nomr,type="n12",normalization="column")
#other option: a2=data.Normalization (for_nomr,type="n12",normalization="row")
#we can not do it by row, beacause we get warnings
for(i in 1:length(normalized)) { #for every column in normalized dataframe (actually is for every subject for every timepoint)
normalized[,i] <-as.factor(normalized[,i])} #convert every score to factor
first_two_rows <- data[1:2,] #get the Subject and Timepoint rows from data
new_data <- rbind(first_two_rows, normalized) #make them one dataframe
#write.csv(new_data, file = "Normalized.csv", row.names = FALSE) #make a .csv file with the normalized values of raw data
###Repeated measures ANOVA###
significant_meta=data.frame(matrix(nrow = nrow(new_data) , ncol = 2)) #empty dataframe to save the significant metabolites with their pvalues
for (m in 3:nrow(new_data)) { #for every metabolite for the whole datafile of normalized metabolites
#start counting from 3rd row, because in 1st row are the Subjects and in 2nd are the Timepoints
idx=m #idx is the actual number of row in new_data dataframe
Score <- idx
metabolite = data.frame(y=new_data[Score,]) #create a new dataframe for the scores of each metabolite (of each row)
metabolite[nrow(metabolite)+1,] = new_data[1,] # +Subject row
metabolite[nrow(metabolite)+1,] = new_data[2,] # +Timepoint row
metabolite_rev=t(metabolite) #reverse the dataframe, in order to have them in columns
# t() command creates table
metabolite_rev=as.data.frame(metabolite_rev) #make it dataframe
names(metabolite_rev)[1] <- "Score" #the colname of the score column was the name of the metabolite. So, in order to be easier to work with, we rename the colname as "Score"
metabolite_rev$Score <- as.numeric(as.character(metabolite_rev$Score)) #convert the Score column to numeric
#ANOVA model
model = aov(Score~Timepoint+Error(Subject/Timepoint),metabolite_rev)
#The summary(model) is a list of elements. In order to be able to pick one particular element, we need to unlist them
sum_model = unlist(summary(model))
if (sum_model["Error: Subject:Timepoint.Pr(>F)1"]<0.01) { #check if the metabolite that is under investgation has significant p-value
significant_meta[m,1] <- idx #keep the idx number of it (the row number in the new_data dataframe)
significant_meta[m,2] <- sum_model["Error: Subject:Timepoint.Pr(>F)1"] #keep the corresponding pvalue
}
}
#significant_meta has all idxs of the significant metabolites and for no significant has NAs
new_sign <- na.omit(significant_meta) #remove the rows with NAs (the idxs of no significant metabolites)
#new_sign has the idxs only of the significant metabolites
all_significant=data.frame(matrix(nrow = nrow(new_sign) , ncol = 2)) #empty dataframe to save the actual names of significant metabolites with their pvalues
for (s in 1:nrow(new_sign)) { #for every row in new_sign dataframe (for every significant metabolite)
idx2=new_sign[s,1] #idx2 is the number of row in new_sign dataframe
sign_metabolite = data.frame(y=new_data[idx2,]) #create a new dataframe for each significant metabolite (for every row in new_data dataframe and each row is choosen by the idx2 number)
all_significant[s,1] <- row.names(sign_metabolite) #keep the name of the significant metabolite
all_significant[s,2] <- new_sign[s,2]} #keep its corresponding pvalue
colnames(all_significant)[1] <- "Sign_metabolite" #change the name of the 1st column
colnames(all_significant)[2] <- "p-value" #change the name of the 2nd column
#write.csv(all_significant, file = "Significant.csv", row.names = FALSE) #make a .csv file with the significant metabolites and their pvalues
#run a post hoc to get the combinations
model_fph <- aov(Score~Timepoint+Subject,metabolite_rev)
post_hoc <-TukeyHSD(model_fph,"Timepoint",conf.level=0.99, data=metabolite_rev)
#post_hoc_anova <- unlist(post_hoc)
#list with all possible combinations
list <- post_hoc$Timepoint
#dataframe with all possible combinations
list_df <- as.data.frame(list)
#how many combinations we have
nrow(list_df)
#all possible combinations
all_comb <- row.names(list_df)
all_comb <- as.matrix(all_comb)
#create a dataframe with all possible combinations (with valid names)
all_comb_valid_names=data.frame(matrix(nrow = nrow(all_comb) , ncol = 1))
for (n in 1:nrow(all_comb)) {
validn <- gsub("-", "_", all_comb[n,1])
all_comb_valid_names[n,1] <- validn
names(all_comb_valid_names)[1] <- "Combinations"}
#This will create a new directory in your working directory with the name "Post_hoc_test"
#At the end, everithing is going to be inside Anova directory
post_hoc_test <- paste("Post_hoc_test") #set the name
dir.create(post_hoc_test) #create directory
setwd(post_hoc_test) #set the new directory as working directory
#create .csv file for each combination
for (i in 1:nrow(all_comb_valid_names)) {
write.csv(all_comb_valid_names$Combinations[i], file=paste0(all_comb_valid_names$Combinations[i],".csv"), quote = FALSE)
}
#Post Hoc test
for (fph in 1:nrow(new_sign)) { #for every significant metabolite (for every row in new_sign dataframe ---- remember the new_sign dataframe contains the idx for each metabolite (the number of row in data dataframe))
#we need to get the original values of each metabolite, before the normalization
idx3=new_sign[fph,1] #idx3 is the number of row in new_sign dataframe
metabolite_fph = data.frame(y=new_data[idx3,]) #create a new dataframe for each significant metabolite (for every row in data dataframe and each row is choosen by the idx3 number)
metabolite_fph[nrow(metabolite_fph)+1,] = new_data[1,] # +Subject row
metabolite_fph[nrow(metabolite_fph)+1,] = new_data[2,] # +Timepoint row
#reverse the dataframe, in order to have them in columns
metabolite_fph_rev=t(metabolite_fph)
# t() command creates table
metabolite_fph_rev=as.data.frame(metabolite_fph_rev) #make it dataframe
names(metabolite_fph_rev)[1] <- "Score" #the colname of the score column was the name of the metabolite. So, in order to be easier to work with, we rename the colname as "Score"
metabolite_fph_rev$Score <- as.numeric(as.character(metabolite_fph_rev$Score)) #convert the Score column to numeric
#necessery model for post hoc test
model_fph <- aov(Score~Timepoint+Subject,metabolite_fph_rev)
#post hoc test
post_hoc <-TukeyHSD(model_fph,"Timepoint",data=metabolite_fph_rev)
#get the column with the pvalues
df_for_pvalue <- as.data.frame(post_hoc$Timepoint)
padj <- df_for_pvalue[4]
#get the significant combinations
sign_timepoints <- data.frame(matrix(nrow = nrow(padj) , ncol = 2))
for (ph in 1:nrow(padj)) {
if (padj[ph,1] < 0.05) {
sign_timepoints[ph,1] <- row.names(padj)[ph]
sign_timepoints[ph,2] <- padj[ph,1]
}
#exclude the NAs
significant_timepoints <- na.omit(sign_timepoints)
#only the significant combinations (without the p-value)
comb <- significant_timepoints[,1]
#significant combinations
comb <- as.matrix(comb)
#create a dataframe with significant combinations (with valid names)
comb_valid_names=data.frame(matrix(nrow = nrow(comb) , ncol = 1))
}
for (n in 1:nrow(comb)) {
validn_comp <- gsub("-", "_", comb[n,1])
comb_valid_names[n,1] <- validn_comp
names(comb_valid_names)[1] <- "significant_Combinations"}
#get the list with the .csv files that we created
temp <- list.files(pattern="*.csv")
#keep the names without the ".csv"
names_of_the_csv <- data.frame(matrix(nrow = length(temp) , ncol = 1))
for (name in 1:length(temp)) {
names_of_the_csv[name,1] <- tools::file_path_sans_ext(basename(temp[name]))
}
#get only the first column of sign_timepoints
first_of_sign_timepoints <- comb_valid_names[,1]
#write into corresponding .csv the name of the metabolite
for (csv in 1:(nrow(names_of_the_csv)-1)) {
if (names_of_the_csv[csv,1]%in%comb_valid_names[,1] == TRUE) {
write.table( row.names(metabolite_fph)[1],
file=temp[csv],
append = T,
sep=',',
row.names=F,
col.names=F )
}}
}
zip("Anovaph.zip", c(temp),recurse = TRUE)
ziplist <- zip_list("Anovaph.zip")
return(ziplist[1])
}
else {
text3 <- paste("Check Repeated Measures ANOVA Post-Hoc to view the Post-Hoc")
return(text3)}
})
output$`download panova` <- downloadHandler(
filename = function() {
paste("Anovaph","zip",sep = ".")
},
content = function(file){
file.copy("Anovaph.zip", file)
},
contentType = "application/zip"
)
#-----------------------------------------------------------------------------------ANOVA Plots-------------------------------------------------------------------------------------------
output$contents4 <- renderTable({
req(input$file1)
tryCatch(
{
df <- read.csv(input$file1$datapath)
data <- data.frame(df[,-1], row.names=df[,1])
},
error = function(e) {
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)
if (input$checkbox5 == "TRUE"){
data <- data.frame(df[,-1], row.names=df[,1])
#This will create a new directory in your working directory with the name "Anova"
#At the end, everithing is going to be inside Anova directory
anova <- paste("Anova_with_post_hoc") #set the name
dir.create(anova) #create directory
setwd(anova) #set the new directory as working directory
main <- getwd()
###Normalization###
length_data <- nrow(data) #number of rows in data
for_nomr=data[3:length_data,] #colect only the region of data that contains scores
for(i in 1:length(for_nomr)) { #for every column in for_nomr dataframe (actually is for every subject for every timepoint)
for_nomr[,i] <-as.numeric(as.character(for_nomr[,i]))} #convert every score to numeric
#Normalization function
#formula: ((score-mean of column)/sqrt(sum((score-mean of column)^2)))
normalized=data.Normalization (for_nomr,type="n12",normalization="column")
#other option: a2=data.Normalization (for_nomr,type="n12",normalization="row")
#we can not do it by row, beacause we get warnings
for(i in 1:length(normalized)) { #for every column in normalized dataframe (actually is for every subject for every timepoint)
normalized[,i] <-as.factor(normalized[,i])} #convert every score to factor
first_two_rows <- data[1:2,] #get the Subject and Timepoint rows from data
new_data <- rbind(first_two_rows, normalized) #make them one dataframe
#write.csv(new_data, file = "Normalized.csv", row.names = FALSE) #make a .csv file with the normalized values of raw data
###Repeated measures ANOVA###
significant_meta=data.frame(matrix(nrow = nrow(new_data) , ncol = 2)) #empty dataframe to save the significant metabolites with their pvalues
for (m in 3:nrow(new_data)) { #for every metabolite for the whole datafile of normalized metabolites
#start counting from 3rd row, because in 1st row are the Subjects and in 2nd are the Timepoints
idx=m #idx is the actual number of row in new_data dataframe
Score <- idx
metabolite = data.frame(y=new_data[Score,]) #create a new dataframe for the scores of each metabolite (of each row)
metabolite[nrow(metabolite)+1,] = new_data[1,] # +Subject row
metabolite[nrow(metabolite)+1,] = new_data[2,] # +Timepoint row
metabolite_rev=t(metabolite) #reverse the dataframe, in order to have them in columns
# t() command creates table
metabolite_rev=as.data.frame(metabolite_rev) #make it dataframe
names(metabolite_rev)[1] <- "Score" #the colname of the score column was the name of the metabolite. So, in order to be easier to work with, we rename the colname as "Score"
metabolite_rev$Score <- as.numeric(as.character(metabolite_rev$Score)) #convert the Score column to numeric
#ANOVA model
model = aov(Score~Timepoint+Error(Subject/Timepoint),metabolite_rev)
#The summary(model) is a list of elements. In order to be able to pick one particular element, we need to unlist them
sum_model = unlist(summary(model))
if (sum_model["Error: Subject:Timepoint.Pr(>F)1"]<0.01) { #check if the metabolite that is under investgation has significant p-value
significant_meta[m,1] <- idx #keep the idx number of it (the row number in the new_data dataframe)
significant_meta[m,2] <- sum_model["Error: Subject:Timepoint.Pr(>F)1"] #keep the corresponding pvalue
}
}
#significant_meta has all idxs of the significant metabolites and for no significant has NAs
new_sign <- na.omit(significant_meta) #remove the rows with NAs (the idxs of no significant metabolites)
#new_sign has the idxs only of the significant metabolites
all_significant=data.frame(matrix(nrow = nrow(new_sign) , ncol = 2)) #empty dataframe to save the actual names of significant metabolites with their pvalues
for (s in 1:nrow(new_sign)) { #for every row in new_sign dataframe (for every significant metabolite)
idx2=new_sign[s,1] #idx2 is the number of row in new_sign dataframe
sign_metabolite = data.frame(y=new_data[idx2,]) #create a new dataframe for each significant metabolite (for every row in new_data dataframe and each row is choosen by the idx2 number)
all_significant[s,1] <- row.names(sign_metabolite) #keep the name of the significant metabolite
all_significant[s,2] <- new_sign[s,2]} #keep its corresponding pvalue
colnames(all_significant)[1] <- "Sign_metabolite" #change the name of the 1st column
colnames(all_significant)[2] <- "p-value" #change the name of the 2nd column
#This will create a new directory in your working directory with the name "Plots"
#At the end, everithing is going to be inside Anova directory
plots <- paste("Plots") #set the name
dir.create(plots) #create directory
setwd(plots) #set the new directory as working directory
###Graphs###
for (o in 1:nrow(new_sign)) { #for every significant metabolite (for every row in new_sign dataframe ---- remember the new_sign dataframe contains the idx for each metabolite (the number of row in data dataframe))
#we need to get the original values of each metabolite, before the normalization
idx3=new_sign[o,1] #idx3 is the number of row in new_sign dataframe
metabolite_original = data.frame(y=data[idx3,]) #create a new dataframe for each significant metabolite (for every row in data dataframe and each row is choosen by the idx3 number)
metabolite_original[nrow(metabolite_original)+1,] = data[1,] # +Subject row
metabolite_original[nrow(metabolite_original)+1,] = data[2,] # +Timepoint row
#reverse the dataframe, in order to have them in columns
metabolite_original_rev=t(metabolite_original)
# t() command creates table
metabolite_original_rev=as.data.frame(metabolite_original_rev) #make it dataframe
names(metabolite_original_rev)[1] <- "Score" #the colname of the score column was the name of the metabolite. So, in order to be easier to work with, we rename the colname as "Score"
metabolite_original_rev$Score <- as.numeric(as.character(metabolite_original_rev$Score)) #convert the Score column to numeric
#We need to transform the scores because we have to big values. Otherwise, the graphs do not seem to be informative.
#Log transformation of score (use log10 to transform the values)
transformations <- as.data.frame(log10(metabolite_original_rev$Score))
#Bind Subject and Timepoint rows
first_two_rows_again <- as.data.frame(t(metabolite_original[2:3,])) #pick the Subject and Timepoint rows
trans_data <- cbind(first_two_rows_again, transformations) #make them one dataframe
names(trans_data)[3] <- "LogScore" #rename the column with the log10 scores
subject_num <- length(levels(trans_data$Subject)) #number of different levels of Subject factor, here is 38
timepoints_num <- length(levels(trans_data$Timepoint)) #number of different levels of Timepoint factor, here is 6
newdir <- paste0(row.names(metabolite_original)[1]) #pick the name of the metabolite
dir.create(newdir) #and create a new directory with that name
cwd <- getwd() #keep "in mind" your MAIN working directory
setwd(newdir) # "go inside" the directory that you just created and make it your temporary working directory
mypath <- file.path(getwd() ,paste("Bp_",row.names(metabolite_original)[1], ".png", sep = "")) #path to save the graph
png(file=mypath, width=862, height=392) #format of saving graph
mytitle = paste(row.names(metabolite_original)[1],": Log10 transformed intensity score distribution across all timepoints and subjects") #main title of the graph
ytitle = paste(row.names(metabolite_original)[1]) #label of y-axis
#make the graph
boxplot(trans_data$LogScore,
horizontal = TRUE,
main= mytitle,
las=2,
xlab= "Log10 transformed intensity score", ylab= ytitle,
col = "blue")
dev.off() #necessary command to be able to open the .png file that we just created
mypath2 <- file.path(getwd() ,paste("Bp_timepoints_", row.names(metabolite_original)[1], ".png", sep = "")) #path to save the graph
png(file=mypath2, width=862, height=392) #format of saving graph
mytitle2 = paste(row.names(metabolite_original)[1],": Boxplot comparing Log10 transformed intensity scores of six Timepoints") #main title of the graph
#make the graph
boxplot(trans_data$LogScore~trans_data$Timepoint,
main=mytitle2,
col= rainbow(timepoints_num),
las=2,
par(mar = c(4, 8, 1, 1)+ 0.4),
xlab= "Log10 transformed intensity scores",
horizontal = TRUE)
dev.off() #necessary command to be able to open the .png file that we just created
mypath3 <- file.path(getwd() ,paste("Bp_subjects_timepoints_", row.names(metabolite_original)[1], ".png", sep = "")) #path to save the graph
png(file=mypath3, width=862, height=1000) #format of saving graph
mytitle3 = paste( row.names(metabolite_original)[1],": Boxplot comparing Log10 transformed intensity scores of all subjects for all timepoints") #main title of the graph
#make the graph
boxplot(trans_data$LogScore~trans_data$Subject,
main=mytitle3,
xlab= "Log10 transformed intensity scores",
col= rainbow(subject_num),
horizontal = TRUE,
las=2)
dev.off() #necessary command to be able to open the .png file that we just created
mypath4 <- file.path(getwd() ,paste("Mean_", row.names(metabolite_original)[1], ".png", sep = "")) #path to save the graph
png(file=mypath4, width=862, height=392) #format of saving graph
mytitle4 = paste( row.names(metabolite_original)[1],": Mean Log10 transformed intensity score across Timepoints") #main title of the graph
#make the graph
plotmeans(trans_data$LogScore~trans_data$Timepoint,
digits=2,
ccol="red",
mean.labels=T,
main=mytitle4,
xlab= "Timepoints",
ylab= "Mean Log10 transformed intensity score")
dev.off() #necessary command to be able to open the .png file that we just created
means<- round(tapply(trans_data$LogScore, trans_data$Timepoint, mean), digits=2) #mean for each timepoint
mypath5 <- file.path(getwd() ,paste("Bp_timepoints_meandotted_", row.names(metabolite_original)[1], ".png", sep = "")) #path to save the graph
png(file=mypath5, width=862, height=392) #format of saving graph
mytitle5 = paste(row.names(metabolite_original)[1],": Boxplot comparing Log10 transformed intensity scores of six Timepoints (blackdotted mean)") #main title of the graph
#make the graph
boxplot(trans_data$LogScore ~ trans_data$Timepoint,
main=mytitle5,
xlab="Timepoints", ylab="Log10 transformed intensity scores", col=rainbow(timepoints_num))
points(means, col="black", pch=16, cex=1.5) #add dots with means
dev.off() #necessary command to be able to open the .png file that we just created
mypath8 <- file.path(getwd() ,paste("Vp_withbp_coloured_", row.names(metabolite_original)[1], ".png", sep = "")) #path to save the graph
png(file=mypath8, width=862, height=900) #format of saving graph
title_violin2 = paste(row.names(metabolite_original)[1],": Violin plots (with boxplots) comparing Log10 transformed intensity scores of six Timepoints") #main title of the graph
#make the graph
print(ggplot(trans_data, aes(x=trans_data$Timepoint, y=trans_data$LogScore, fill=trans_data$Timepoint)) +
geom_violin(trim=FALSE, show.legend=FALSE)+
geom_boxplot(width=0.1, fill="white")+
labs(title=title_violin2,x="Timepoints", y = "Log10 transformed intensity scores")+
coord_flip()+
scale_fill_brewer(palette="Dark2") + theme_minimal())
dev.off() #necessary command to be able to open the .png file that we just created
setwd(cwd)
}
plots_list <- list.files()
zipr("Plots.zip", c(plots_list), recurse = TRUE)
text <- paste("We created the plots for ",nrow(all_significant), " significant metabolites")
#ziplist1 <- zip_list("Plots.zip")
return(text)
}
else {
text3 <- paste("Check Repeated Measures ANOVA Plots to view the plots")
return(text3)}
} )
output$`download plots` <- downloadHandler(
filename = function() {
paste("Plots","zip",sep = ".")
},
content = function(file){
file.copy("Plots.zip", file)
},
contentType = "application/zip"
)
#-----------------------------------------------------------------------------------Friedman Test-------------------------------------------------------------------------------------------
output$contents5 <- renderTable( digits = 10,{
req(input$file1)
tryCatch(
{
df <- read.csv(input$file1$datapath)
data <- data.frame(df[,-1], row.names=df[,1])
},
error = function(e) {
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)
if (input$checkbox3 == "TRUE"){
#Set your working directory (where our datafile is)
#Load data
#without_missing.csv is the original Dummy file that Rick gave us, WITHOUT QC and SUBJECTS WITH MISSING VALUES
dt <- df
#Make first column the rownames of "data"
data <- data.frame(dt[,-1], row.names=dt[,1])
#This will create a new directory in your working directory with the name "Friedman"
#At the end, everything will be inside the Friedman directory
friedman <- paste("Friedman_test") #set the name
dir.create(friedman) #create directory
setwd(friedman) #set the new directory as working directory
###Normalization###
length_data <- nrow(data) #number of rows in data
for_nomr=data[3:length_data,] #collect only the region of data that contains scores
for(i in 1:length(for_nomr)) { #for every column in for_nomr dataframe (actually is for every subject for every timepoint)
for_nomr[,i] <-as.numeric(as.character(for_nomr[,i]))} #convert every score to numeric
#Normalization function
#formula: ((score-mean of column)/sqrt(sum((score-mean of column)^2)))
normalized=data.Normalization (for_nomr,type="n12",normalization="column")
#other option: a2=data.Normalization (for_nomr,type="n12",normalization="row")
#we can not do it by row, beacause we get warnings
for(i in 1:length(normalized)) { #for every column in normalized dataframe (actually is for every subject for every timepoint)
normalized[,i] <-as.factor(normalized[,i])} #convert every score to factor
first_two_rows <- data[1:2,] #get the Subject and Timepoint rows from data
new_data <- rbind(first_two_rows, normalized) #make them one dataframe
###Friedman's Test###
significant_meta=data.frame(matrix(nrow = nrow(new_data) , ncol = 2)) #empty dataframe to save the significant metabolites with their pvalues
for (m in 3:nrow(new_data)) { #for every metabolite for the whole datafile of normalized metabolites
#start counting from 3rd row, because in 1st row are the Subjects and in 2nd are the Timepoints
idx=m #idx is the actual number of row in new_data dataframe
Score <- idx
metabolite = data.frame(y=new_data[Score,]) #create a new dataframe for the scores of each metabolite (of each row)
metabolite[nrow(metabolite)+1,] = new_data[1,] # +Subject row
metabolite[nrow(metabolite)+1,] = new_data[2,] # +Timepoint row
metabolite_rev=t(metabolite) #reverse the dataframe, in order to have them in columns
# t() command creates table
metabolite_rev=as.data.frame(metabolite_rev) #make it dataframe
names(metabolite_rev)[1] <- "Score" #the colname of the score column was the name of the metabolite. So, in order to be easier to work with, we rename the colname as "Score"
metabolite_rev$Score <- as.numeric(as.character(metabolite_rev$Score)) #convert the Score column to numeric
#FRIEDMAN model
model = friedman.test(Score~Timepoint | Subject, data = metabolite_rev)
#The summary(model) is a list of elements. In order to be able to pick one particular element, we need to unlist them
sum_model = unlist(model)
if (sum_model["p.value"]<0.01) { # check if this particular metabolite significant p-value
significant_meta[m,1] <- idx # keep the idx number of it (the row number in the new_data dataframe)
significant_meta[m,2] <- sum_model["p.value"] # keep the corresponding p-value
}
}
#significant_meta has all idxs of the significant metabolites and for no significant has NAs
new_sign <- na.omit(significant_meta, cols="X2") #remove the rows with NAs (the idxs of no significant metabolites)
#new_sign has the idxs only of the significant metabolites
#new_sign
all_significant_f=data.frame(matrix(nrow = nrow(new_sign) , ncol = 2)) #empty dataframe to save the actual names of significant metabolites with their pvalues
for (s in 1:nrow(new_sign)) { #for every row in new_sign dataframe (for every significant metabolite)
idx2=new_sign[s,1] #idx2 is the number of row in new_sign dataframe
sign_metabolite = data.frame(y=new_data[idx2,]) #create a new dataframe for each significant metabolite (for every row in new_data dataframe and each row is choosen by the idx2 number)
all_significant_f[s,1] <- row.names(sign_metabolite) #keep the name of the significant metabolite
all_significant_f[s,2] <- new_sign[s,2]} #keep its corresponding pvalue
colnames(all_significant_f)[1] <- "Sign_metabolite" #change the name of the 1st column
colnames(all_significant_f)[2] <- "p-value" #change the name of the 2nd column
write.csv(all_significant_f, file = "Friedman_significant.csv", row.names = FALSE) #make a .csv file with the significant metabolites and their pvalues
return(all_significant_f)
}
else {
text_if_not_f <- paste("Check Friedman Test to view the significant metabolites")
return(text_if_not_f)}
})
output$`download friedman` <- downloadHandler(
filename = function() {
paste("Friedman_significant","csv",sep = ".")
},
content = function(file){
file.copy("Friedman_significant.csv", file)
})
#------------------------------------------------------------------------------------Friedman Test Plots -----------------------------------------------------------------------------------
output$contents6 <- renderTable({
req(input$file1)
tryCatch(
{
df <- read.csv(input$file1$datapath)
data <- data.frame(df[,-1], row.names=df[,1])
},
error = function(e) {
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)
if (input$checkbox6 == "TRUE"){
#Set your working directory (where our datafile is)
#Load data
#without_missing.csv is the original Dummy file that Rick gave us, WITHOUT QC and SUBJECTS WITH MISSING VALUES
dt <- df
#Make first column the rownames of "data"
data <- data.frame(dt[,-1], row.names=dt[,1])
#This will create a new directory in your working directory with the name "Friedman"
#At the end, everything will be inside the Friedman directory
friedman <- paste("Friedman_test") #set the name
dir.create(friedman) #create directory
setwd(friedman) #set the new directory as working directory
###Normalization###
length_data <- nrow(data) #number of rows in data
for_nomr=data[3:length_data,] #collect only the region of data that contains scores
for(i in 1:length(for_nomr)) { #for every column in for_nomr dataframe (actually is for every subject for every timepoint)
for_nomr[,i] <-as.numeric(as.character(for_nomr[,i]))} #convert every score to numeric
#Normalization function
#formula: ((score-mean of column)/sqrt(sum((score-mean of column)^2)))
normalized=data.Normalization (for_nomr,type="n12",normalization="column")
#other option: a2=data.Normalization (for_nomr,type="n12",normalization="row")
#we can not do it by row, beacause we get warnings
for(i in 1:length(normalized)) { #for every column in normalized dataframe (actually is for every subject for every timepoint)
normalized[,i] <-as.factor(normalized[,i])} #convert every score to factor
first_two_rows <- data[1:2,] #get the Subject and Timepoint rows from data
new_data <- rbind(first_two_rows, normalized) #make them one dataframe
###Friedman's Test###
significant_meta=data.frame(matrix(nrow = nrow(new_data) , ncol = 2)) #empty dataframe to save the significant metabolites with their pvalues
for (m in 3:nrow(new_data)) { #for every metabolite for the whole datafile of normalized metabolites
#start counting from 3rd row, because in 1st row are the Subjects and in 2nd are the Timepoints
idx=m #idx is the actual number of row in new_data dataframe
Score <- idx
metabolite = data.frame(y=new_data[Score,]) #create a new dataframe for the scores of each metabolite (of each row)
metabolite[nrow(metabolite)+1,] = new_data[1,] # +Subject row
metabolite[nrow(metabolite)+1,] = new_data[2,] # +Timepoint row
metabolite_rev=t(metabolite) #reverse the dataframe, in order to have them in columns
# t() command creates table
metabolite_rev=as.data.frame(metabolite_rev) #make it dataframe
names(metabolite_rev)[1] <- "Score" #the colname of the score column was the name of the metabolite. So, in order to be easier to work with, we rename the colname as "Score"
metabolite_rev$Score <- as.numeric(as.character(metabolite_rev$Score)) #convert the Score column to numeric
#FRIEDMAN model
model = friedman.test(Score~Timepoint | Subject, data = metabolite_rev)
#The summary(model) is a list of elements. In order to be able to pick one particular element, we need to unlist them
sum_model = unlist(model)
if (sum_model["p.value"]<0.01) { # check if this particular metabolite significant p-value
significant_meta[m,1] <- idx # keep the idx number of it (the row number in the new_data dataframe)
significant_meta[m,2] <- sum_model["p.value"] # keep the corresponding p-value
}
}
#significant_meta has all idxs of the significant metabolites and for no significant has NAs
new_sign <- na.omit(significant_meta, cols="X2") #remove the rows with NAs (the idxs of no significant metabolites)
#new_sign has the idxs only of the significant metabolites
#new_sign
all_significant_f=data.frame(matrix(nrow = nrow(new_sign) , ncol = 2)) #empty dataframe to save the actual names of significant metabolites with their pvalues
for (s in 1:nrow(new_sign)) { #for every row in new_sign dataframe (for every significant metabolite)
idx2=new_sign[s,1] #idx2 is the number of row in new_sign dataframe
sign_metabolite = data.frame(y=new_data[idx2,]) #create a new dataframe for each significant metabolite (for every row in new_data dataframe and each row is choosen by the idx2 number)
all_significant_f[s,1] <- row.names(sign_metabolite) #keep the name of the significant metabolite
all_significant_f[s,2] <- new_sign[s,2]} #keep its corresponding pvalue
colnames(all_significant_f)[1] <- "Sign_metabolite" #change the name of the 1st column
colnames(all_significant_f)[2] <- "p-value" #change the name of the 2nd column
write.csv(all_significant_f, file = "Friedman_significant.csv", row.names = FALSE) #make a .csv file with the significant metabolites and their pvalues
#This will create a new directory in your working directory with the name "Plots"
#At the end, everithing is going to be inside Anova directory
freidplots <- paste("freidPlots") #set the name
dir.create(freidplots) #create directory
setwd(freidplots) #set the new directory as working directory
###Graphs###
for (o in 1:nrow(new_sign)) { #for every significant metabolite (for every row in new_sign dataframe ---- remember the new_sign dataframe contains the idx for each metabolite (the number of row in data dataframe))
#we need to get the original values of each metabolite, before the normalization
idx3=new_sign[o,1] #idx3 is the number of row in new_sign dataframe
metabolite_original = data.frame(y=data[idx3,]) #create a new dataframe for each significant metabolite (for every row in data dataframe and each row is choosen by the idx3 number)
metabolite_original[nrow(metabolite_original)+1,] = data[1,] # +Subject row
metabolite_original[nrow(metabolite_original)+1,] = data[2,] # +Timepoint row
#reverse the dataframe, in order to have them in columns
metabolite_original_rev=t(metabolite_original)
# t() command creates table
metabolite_original_rev=as.data.frame(metabolite_original_rev) #make it dataframe
names(metabolite_original_rev)[1] <- "Score" #the colname of the score column was the name of the metabolite. So, in order to be easier to work with, we rename the colname as "Score"
metabolite_original_rev$Score <- as.numeric(as.character(metabolite_original_rev$Score)) #convert the Score column to numeric
#We need to transform the scores because we have to big values. Otherwise, the graphs do not seem to be informative.
#Log transformation of score (use log10 to transform the values)
transformations <- as.data.frame(log10(metabolite_original_rev$Score))
#Bind Subject and Timepoint rows
first_two_rows_again <- as.data.frame(t(metabolite_original[2:3,])) #pick the Subject and Timepoint rows
trans_data <- cbind(first_two_rows_again, transformations) #make them one dataframe
names(trans_data)[3] <- "LogScore" #rename the column with the log10 scores
subject_num <- length(levels(trans_data$Subject)) #number of different levels of Subject factor, here is 38
timepoints_num <- length(levels(trans_data$Timepoint)) #number of different levels of Timepoint factor, here is 6
newdir <- paste0(row.names(metabolite_original)[1]) #pick the name of the metabolite
dir.create(newdir) #and create a new directory with that name
cwd <- getwd() #keep "in mind" your MAIN working directory
setwd(newdir) # "go inside" the directory that you just created and make it your temporary working directory
mypath <- file.path(getwd() ,paste("Bp_",row.names(metabolite_original)[1], ".png", sep = "")) #path to save the graph
png(file=mypath, width=862, height=392) #format of saving graph
mytitle = paste(row.names(metabolite_original)[1],": Log10 transformed intensity score distribution across all timepoints and subjects") #main title of the graph
ytitle = paste(row.names(metabolite_original)[1]) #label of y-axis
#make the graph
boxplot(trans_data$LogScore,
horizontal = TRUE,
main= mytitle,
las=2,
xlab= "Log10 transformed intensity score", ylab= ytitle,
col = "blue")
dev.off() #necessary command to be able to open the .png file that we just created
mypath2 <- file.path(getwd() ,paste("Bp_timepoints_", row.names(metabolite_original)[1], ".png", sep = "")) #path to save the graph
png(file=mypath2, width=862, height=392) #format of saving graph
mytitle2 = paste(row.names(metabolite_original)[1],": Boxplot comparing Log10 transformed intensity scores of six Timepoints") #main title of the graph
#make the graph
boxplot(trans_data$LogScore~trans_data$Timepoint,
main=mytitle2,
col= rainbow(timepoints_num),
las=2,
par(mar = c(4, 8, 1, 1)+ 0.4),
xlab= "Log10 transformed intensity scores",
horizontal = TRUE)
dev.off() #necessary command to be able to open the .png file that we just created
mypath3 <- file.path(getwd() ,paste("Bp_subjects_timepoints_", row.names(metabolite_original)[1], ".png", sep = "")) #path to save the graph
png(file=mypath3, width=862, height=1000) #format of saving graph
mytitle3 = paste( row.names(metabolite_original)[1],": Boxplot comparing Log10 transformed intensity scores of all subjects for all timepoints") #main title of the graph
#make the graph
boxplot(trans_data$LogScore~trans_data$Subject,
main=mytitle3,
xlab= "Log10 transformed intensity scores",
col= rainbow(subject_num),
horizontal = TRUE,
las=2)
dev.off() #necessary command to be able to open the .png file that we just created
mypath4 <- file.path(getwd() ,paste("Mean_", row.names(metabolite_original)[1], ".png", sep = "")) #path to save the graph
png(file=mypath4, width=862, height=392) #format of saving graph
mytitle4 = paste( row.names(metabolite_original)[1],": Mean Log10 transformed intensity score across Timepoints") #main title of the graph
#make the graph
plotmeans(trans_data$LogScore~trans_data$Timepoint,
digits=2,
ccol="red",
mean.labels=T,
main=mytitle4,
xlab= "Timepoints",
ylab= "Mean Log10 transformed intensity score")
dev.off() #necessary command to be able to open the .png file that we just created
means<- round(tapply(trans_data$LogScore, trans_data$Timepoint, mean), digits=2) #mean for each timepoint
mypath5 <- file.path(getwd() ,paste("Bp_timepoints_meandotted_", row.names(metabolite_original)[1], ".png", sep = "")) #path to save the graph
png(file=mypath5, width=862, height=392) #format of saving graph
mytitle5 = paste(row.names(metabolite_original)[1],": Boxplot comparing Log10 transformed intensity scores of six Timepoints (blackdotted mean)") #main title of the graph
#make the graph
boxplot(trans_data$LogScore ~ trans_data$Timepoint,
main=mytitle5,
xlab="Timepoints", ylab="Log10 transformed intensity scores", col=rainbow(timepoints_num))
points(means, col="black", pch=16, cex=1.5) #add dots with means
dev.off() #necessary command to be able to open the .png file that we just created
mypath8 <- file.path(getwd() ,paste("Vp_withbp_coloured_", row.names(metabolite_original)[1], ".png", sep = "")) #path to save the graph
png(file=mypath8, width=862, height=900) #format of saving graph
title_violin2 = paste(row.names(metabolite_original)[1],": Violin plots (with boxplots) comparing Log10 transformed intensity scores of six Timepoints") #main title of the graph
#make the graph
print(ggplot(trans_data, aes(x=trans_data$Timepoint, y=trans_data$LogScore, fill=trans_data$Timepoint)) +
geom_violin(trim=FALSE, show.legend=FALSE)+
geom_boxplot(width=0.1, fill="white")+
labs(title=title_violin2,x="Timepoints", y = "Log10 transformed intensity scores")+
coord_flip()+
scale_fill_brewer(palette="Dark2") + theme_minimal())
dev.off() #necessary command to be able to open the .png file that we just created
setwd(cwd)
}
plots_list_f <- list.files()
zipr("fPlots.zip", c(plots_list_f), recurse = TRUE)
text6 <- paste("We created the plots for ",nrow(all_significant_f), " significant metabolites")
return(text6)
}
else {
text7 <- paste("Check Friedman Test Plots to view the plots")
return(text7)}
} )
output$`download fplots` <- downloadHandler(
filename = function() {
paste("fPlots","zip",sep = ".")
},
content = function(file){
file.copy("fPlots.zip", file)
},
contentType = "application/zip"
)
}
|
/server.R
|
no_license
|
module6group-msc/Shiny-web-application-for-mass-spectrometry-based-metabolomics-data-analysis
|
R
| false | false | 56,505 |
r
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
#Required packages
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("genefilter", version = "3.8")
if (!require("clusterSim")) install.packages("clusterSim")
library(clusterSim)
if (!require("tidyverse")) install.packages("tidyverse")
library(tidyverse)
if (!require("psych")) install.packages("psych")
if (!require("ggplot2")) install.packages("ggplot2")
library(ggplot2)
if (!require("gplots")) install.packages("gplots")
library(gplots)
library(shiny)
library(shinydashboard)
if (!require("zip")) install.packages("zip")
library(zip)
shinyServer <- function(input, output) {
#--------------------------------------------------------------------------------------Home---------------------------------------------------------------------------------------------
output$contents <- renderTable({
req(input$file1)
tryCatch(
{
df <- read.csv(input$file1$datapath)
data <- data.frame(df[,-1], row.names=df[,1])
},
error = function(e) {
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)}
[1:5,1:7]
)
#------------------------------------------------------------------------------------Normalization---------------------------------------------------------------------------------------
output$contents1 <- renderTable({
req(input$file1)
tryCatch(
{
df <- read.csv(input$file1$datapath)
data <- data.frame(df[,-1], row.names=df[,1])
},
error = function(e) {
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)
if (input$checkbox1 == "TRUE"){
length_data <- nrow(data) #number of rows in data
for_nomr=data[3:length_data,] #colect only the region of data that contains scores
for(i in 1:length(for_nomr)) { #for every column in for_nomr dataframe (actually is for every subject for every timepoint)
for_nomr[,i] <-as.numeric(as.character(for_nomr[,i]))} #convert every score to numeric
#Normalization function
#formula: ((score-mean of column)/sqrt(sum((score-mean of column)^2)))
normalized=data.Normalization (for_nomr,type="n12",normalization="column")
#other option: a2=data.Normalization (for_nomr,type="n12",normalization="row")
#we can not do it by row, beacause we get warnings
for(i in 1:length(normalized)) { #for every column in normalized dataframe (actually is for every subject for every timepoint)
normalized[,i] <-as.factor(normalized[,i])} #convert every score to factor
first_two_rows <- data[1:2,] #get the Subject and Timepoint rows from data
new_data <- rbind(first_two_rows, normalized) #make them one dataframe
write.csv(new_data, file = "Normalized.csv", row.names = FALSE)
return(new_data[1:5,1:7])
}
else {
return(data[1:5,1:7] )}
}, include.rownames=TRUE)
output$`download Norm` <- downloadHandler(
filename = function() {
paste("Normalized","csv",sep = ".")
},
content = function(file){
file.copy("Normalized.csv", file)
}
)
#-------------------------------------------------------------------------------------ANOVA-----------------------------------------------------------------------------------------------
output$contents2 <- renderTable( digits = 10,{
req(input$file1)
tryCatch(
{
df <- read.csv(input$file1$datapath)
data <- data.frame(df[,-1], row.names=df[,1])
},
error = function(e) {
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)
if (input$checkbox2 == "TRUE"){
#Norm2
length_data <- nrow(data) #number of rows in data
for_nomr=data[3:length_data,] #colect only the region of data that contains scores
for(i in 1:length(for_nomr)) { #for every column in for_nomr dataframe (actually is for every subject for every timepoint)
for_nomr[,i] <-as.numeric(as.character(for_nomr[,i]))} #convert every score to numeric
#Normalization function
#formula: ((score-mean of column)/sqrt(sum((score-mean of column)^2)))
normalized=data.Normalization (for_nomr,type="n12",normalization="column")
#other option: a2=data.Normalization (for_nomr,type="n12",normalization="row")
#we can not do it by row, beacause we get warnings
for(i in 1:length(normalized)) { #for every column in normalized dataframe (actually is for every subject for every timepoint)
normalized[,i] <-as.factor(normalized[,i])} #convert every score to factor
first_two_rows <- data[1:2,] #get the Subject and Timepoint rows from data
new_data <- rbind(first_two_rows, normalized) #make them one dataframe
#write.csv(new_data, file = "Normalized.csv", row.names = FALSE)
###Repeated measures ANOVA###
significant_meta=data.frame(matrix(nrow = nrow(new_data) , ncol = 2)) #empty dataframe to save the significant metabolites with their pvalues
for (m in 3:nrow(new_data)) { #for every metabolite for the whole datafile of normalized metabolites
#start counting from 3rd row, because in 1st row are the Subjects and in 2nd are the Timepoints
idx=m #idx is the actual number of row in new_data dataframe
Score <- idx
metabolite = data.frame(y=new_data[Score,]) #create a new dataframe for the scores of each metabolite (of each row)
metabolite[nrow(metabolite)+1,] = new_data[1,] # +Subject row
metabolite[nrow(metabolite)+1,] = new_data[2,] # +Timepoint row
metabolite_rev=t(metabolite) #reverse the dataframe, in order to have them in columns
# t() command creates table
metabolite_rev=as.data.frame(metabolite_rev) #make it dataframe
names(metabolite_rev)[1] <- "Score" #the colname of the score column was the name of the metabolite. So, in order to be easier to work with, we rename the colname as "Score"
metabolite_rev$Score <- as.numeric(as.character(metabolite_rev$Score)) #convert the Score column to numeric
#ANOVA model
model = aov(Score~Timepoint+Error(Subject/Timepoint),metabolite_rev)
#The summary(model) is a list of elements. In order to be able to pick one particular element, we need to unlist them
sum_model = unlist(summary(model))
if (sum_model["Error: Subject:Timepoint.Pr(>F)1"]<0.05) { #check if the metabolite that is under investgation has significant p-value
significant_meta[m,1] <- idx #keep the idx number of it (the row number in the new_data dataframe)
significant_meta[m,2] <- sum_model["Error: Subject:Timepoint.Pr(>F)1"] #keep the corresponding pvalue
}
}
#significant_meta has all idxs of the significant metabolites and for no significant has NAs
new_sign <- na.omit(significant_meta) #remove the rows with NAs (the idxs of no significant metabolites)
#new_sign has the idxs only of the significant metabolites
all_significant=data.frame(matrix(nrow = nrow(new_sign) , ncol = 2)) #empty dataframe to save the actual names of significant metabolites with their pvalues
for (s in 1:nrow(new_sign)) { #for every row in new_sign dataframe (for every significant metabolite)
idx2=new_sign[s,1] #idx2 is the number of row in new_sign dataframe
sign_metabolite = data.frame(y=new_data[idx2,]) #create a new dataframe for each significant metabolite (for every row in new_data dataframe and each row is choosen by the idx2 number)
all_significant[s,1] <- row.names(sign_metabolite) #keep the name of the significant metabolite
all_significant[s,2] <- new_sign[s,2]} #keep its corresponding pvalue
colnames(all_significant)[1] <- "Sign_metabolite" #change the name of the 1st column
colnames(all_significant)[2] <- "p-value" #change the name of the 2nd column
write.csv(all_significant, file = "Significant_ANOVA.csv", row.names = FALSE) #make a .csv file with the significant metabolites and their pvalues
return(all_significant)
}
else {
text_if_not_anova <- paste("Check Repeated Measures ANOVA to view the significant metabolites")
return(text_if_not_anova)}
})
output$`download Anova` <- downloadHandler(
filename = function() {
paste("Significant_ANOVA","csv",sep = ".")
},
content = function(file){
file.copy("Significant_ANOVA.csv", file)
})
#-----------------------------------------------------------------------------------ANOVA Post Hoc---------------------------------------------------------------------------------
output$contents3 <- renderTable({
req(input$file1)
tryCatch(
{
df <- read.csv(input$file1$datapath)
data <- data.frame(df[,-1], row.names=df[,1])
},
error = function(e) {
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)
if (input$checkbox4 == "TRUE"){
data <- data.frame(df[,-1], row.names=df[,1])
#This will create a new directory in your working directory with the name "Anova"
#At the end, everithing is going to be inside Anova directory
anova <- paste("Anova_with_post_hoc") #set the name
dir.create(anova) #create directory
setwd(anova) #set the new directory as working directory
main <- getwd()
###Normalization###
length_data <- nrow(data) #number of rows in data
for_nomr=data[3:length_data,] #colect only the region of data that contains scores
for(i in 1:length(for_nomr)) { #for every column in for_nomr dataframe (actually is for every subject for every timepoint)
for_nomr[,i] <-as.numeric(as.character(for_nomr[,i]))} #convert every score to numeric
#Normalization function
#formula: ((score-mean of column)/sqrt(sum((score-mean of column)^2)))
normalized=data.Normalization (for_nomr,type="n12",normalization="column")
#other option: a2=data.Normalization (for_nomr,type="n12",normalization="row")
#we can not do it by row, beacause we get warnings
for(i in 1:length(normalized)) { #for every column in normalized dataframe (actually is for every subject for every timepoint)
normalized[,i] <-as.factor(normalized[,i])} #convert every score to factor
first_two_rows <- data[1:2,] #get the Subject and Timepoint rows from data
new_data <- rbind(first_two_rows, normalized) #make them one dataframe
#write.csv(new_data, file = "Normalized.csv", row.names = FALSE) #make a .csv file with the normalized values of raw data
###Repeated measures ANOVA###
significant_meta=data.frame(matrix(nrow = nrow(new_data) , ncol = 2)) #empty dataframe to save the significant metabolites with their pvalues
for (m in 3:nrow(new_data)) { #for every metabolite for the whole datafile of normalized metabolites
#start counting from 3rd row, because in 1st row are the Subjects and in 2nd are the Timepoints
idx=m #idx is the actual number of row in new_data dataframe
Score <- idx
metabolite = data.frame(y=new_data[Score,]) #create a new dataframe for the scores of each metabolite (of each row)
metabolite[nrow(metabolite)+1,] = new_data[1,] # +Subject row
metabolite[nrow(metabolite)+1,] = new_data[2,] # +Timepoint row
metabolite_rev=t(metabolite) #reverse the dataframe, in order to have them in columns
# t() command creates table
metabolite_rev=as.data.frame(metabolite_rev) #make it dataframe
names(metabolite_rev)[1] <- "Score" #the colname of the score column was the name of the metabolite. So, in order to be easier to work with, we rename the colname as "Score"
metabolite_rev$Score <- as.numeric(as.character(metabolite_rev$Score)) #convert the Score column to numeric
#ANOVA model
model = aov(Score~Timepoint+Error(Subject/Timepoint),metabolite_rev)
#The summary(model) is a list of elements. In order to be able to pick one particular element, we need to unlist them
sum_model = unlist(summary(model))
if (sum_model["Error: Subject:Timepoint.Pr(>F)1"]<0.01) { #check if the metabolite that is under investgation has significant p-value
significant_meta[m,1] <- idx #keep the idx number of it (the row number in the new_data dataframe)
significant_meta[m,2] <- sum_model["Error: Subject:Timepoint.Pr(>F)1"] #keep the corresponding pvalue
}
}
#significant_meta has all idxs of the significant metabolites and for no significant has NAs
new_sign <- na.omit(significant_meta) #remove the rows with NAs (the idxs of no significant metabolites)
#new_sign has the idxs only of the significant metabolites
all_significant=data.frame(matrix(nrow = nrow(new_sign) , ncol = 2)) #empty dataframe to save the actual names of significant metabolites with their pvalues
for (s in 1:nrow(new_sign)) { #for every row in new_sign dataframe (for every significant metabolite)
idx2=new_sign[s,1] #idx2 is the number of row in new_sign dataframe
sign_metabolite = data.frame(y=new_data[idx2,]) #create a new dataframe for each significant metabolite (for every row in new_data dataframe and each row is choosen by the idx2 number)
all_significant[s,1] <- row.names(sign_metabolite) #keep the name of the significant metabolite
all_significant[s,2] <- new_sign[s,2]} #keep its corresponding pvalue
colnames(all_significant)[1] <- "Sign_metabolite" #change the name of the 1st column
colnames(all_significant)[2] <- "p-value" #change the name of the 2nd column
#write.csv(all_significant, file = "Significant.csv", row.names = FALSE) #make a .csv file with the significant metabolites and their pvalues
#run a post hoc to get the combinations
model_fph <- aov(Score~Timepoint+Subject,metabolite_rev)
post_hoc <-TukeyHSD(model_fph,"Timepoint",conf.level=0.99, data=metabolite_rev)
#post_hoc_anova <- unlist(post_hoc)
#list with all possible combinations
list <- post_hoc$Timepoint
#dataframe with all possible combinations
list_df <- as.data.frame(list)
#how many combinations we have
nrow(list_df)
#all possible combinations
all_comb <- row.names(list_df)
all_comb <- as.matrix(all_comb)
#create a dataframe with all possible combinations (with valid names)
all_comb_valid_names=data.frame(matrix(nrow = nrow(all_comb) , ncol = 1))
for (n in 1:nrow(all_comb)) {
validn <- gsub("-", "_", all_comb[n,1])
all_comb_valid_names[n,1] <- validn
names(all_comb_valid_names)[1] <- "Combinations"}
#This will create a new directory in your working directory with the name "Post_hoc_test"
#At the end, everithing is going to be inside Anova directory
post_hoc_test <- paste("Post_hoc_test") #set the name
dir.create(post_hoc_test) #create directory
setwd(post_hoc_test) #set the new directory as working directory
#create .csv file for each combination
for (i in 1:nrow(all_comb_valid_names)) {
write.csv(all_comb_valid_names$Combinations[i], file=paste0(all_comb_valid_names$Combinations[i],".csv"), quote = FALSE)
}
#Post Hoc test
for (fph in 1:nrow(new_sign)) { #for every significant metabolite (for every row in new_sign dataframe ---- remember the new_sign dataframe contains the idx for each metabolite (the number of row in data dataframe))
#we need to get the original values of each metabolite, before the normalization
idx3=new_sign[fph,1] #idx3 is the number of row in new_sign dataframe
metabolite_fph = data.frame(y=new_data[idx3,]) #create a new dataframe for each significant metabolite (for every row in data dataframe and each row is choosen by the idx3 number)
metabolite_fph[nrow(metabolite_fph)+1,] = new_data[1,] # +Subject row
metabolite_fph[nrow(metabolite_fph)+1,] = new_data[2,] # +Timepoint row
#reverse the dataframe, in order to have them in columns
metabolite_fph_rev=t(metabolite_fph)
# t() command creates table
metabolite_fph_rev=as.data.frame(metabolite_fph_rev) #make it dataframe
names(metabolite_fph_rev)[1] <- "Score" #the colname of the score column was the name of the metabolite. So, in order to be easier to work with, we rename the colname as "Score"
metabolite_fph_rev$Score <- as.numeric(as.character(metabolite_fph_rev$Score)) #convert the Score column to numeric
#necessery model for post hoc test
model_fph <- aov(Score~Timepoint+Subject,metabolite_fph_rev)
#post hoc test
post_hoc <-TukeyHSD(model_fph,"Timepoint",data=metabolite_fph_rev)
#get the column with the pvalues
df_for_pvalue <- as.data.frame(post_hoc$Timepoint)
padj <- df_for_pvalue[4]
#get the significant combinations
sign_timepoints <- data.frame(matrix(nrow = nrow(padj) , ncol = 2))
for (ph in 1:nrow(padj)) {
if (padj[ph,1] < 0.05) {
sign_timepoints[ph,1] <- row.names(padj)[ph]
sign_timepoints[ph,2] <- padj[ph,1]
}
#exclude the NAs
significant_timepoints <- na.omit(sign_timepoints)
#only the significant combinations (without the p-value)
comb <- significant_timepoints[,1]
#significant combinations
comb <- as.matrix(comb)
#create a dataframe with significant combinations (with valid names)
comb_valid_names=data.frame(matrix(nrow = nrow(comb) , ncol = 1))
}
for (n in 1:nrow(comb)) {
validn_comp <- gsub("-", "_", comb[n,1])
comb_valid_names[n,1] <- validn_comp
names(comb_valid_names)[1] <- "significant_Combinations"}
#get the list with the .csv files that we created
temp <- list.files(pattern="*.csv")
#keep the names without the ".csv"
names_of_the_csv <- data.frame(matrix(nrow = length(temp) , ncol = 1))
for (name in 1:length(temp)) {
names_of_the_csv[name,1] <- tools::file_path_sans_ext(basename(temp[name]))
}
#get only the first column of sign_timepoints
first_of_sign_timepoints <- comb_valid_names[,1]
#write into corresponding .csv the name of the metabolite
for (csv in 1:(nrow(names_of_the_csv)-1)) {
if (names_of_the_csv[csv,1]%in%comb_valid_names[,1] == TRUE) {
write.table( row.names(metabolite_fph)[1],
file=temp[csv],
append = T,
sep=',',
row.names=F,
col.names=F )
}}
}
zip("Anovaph.zip", c(temp),recurse = TRUE)
ziplist <- zip_list("Anovaph.zip")
return(ziplist[1])
}
else {
text3 <- paste("Check Repeated Measures ANOVA Post-Hoc to view the Post-Hoc")
return(text3)}
})
output$`download panova` <- downloadHandler(
filename = function() {
paste("Anovaph","zip",sep = ".")
},
content = function(file){
file.copy("Anovaph.zip", file)
},
contentType = "application/zip"
)
#-----------------------------------------------------------------------------------ANOVA Plots-------------------------------------------------------------------------------------------
output$contents4 <- renderTable({
req(input$file1)
tryCatch(
{
df <- read.csv(input$file1$datapath)
data <- data.frame(df[,-1], row.names=df[,1])
},
error = function(e) {
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)
if (input$checkbox5 == "TRUE"){
data <- data.frame(df[,-1], row.names=df[,1])
#This will create a new directory in your working directory with the name "Anova"
#At the end, everithing is going to be inside Anova directory
anova <- paste("Anova_with_post_hoc") #set the name
dir.create(anova) #create directory
setwd(anova) #set the new directory as working directory
main <- getwd()
###Normalization###
length_data <- nrow(data) #number of rows in data
for_nomr=data[3:length_data,] #colect only the region of data that contains scores
for(i in 1:length(for_nomr)) { #for every column in for_nomr dataframe (actually is for every subject for every timepoint)
for_nomr[,i] <-as.numeric(as.character(for_nomr[,i]))} #convert every score to numeric
#Normalization function
#formula: ((score-mean of column)/sqrt(sum((score-mean of column)^2)))
normalized=data.Normalization (for_nomr,type="n12",normalization="column")
#other option: a2=data.Normalization (for_nomr,type="n12",normalization="row")
#we can not do it by row, beacause we get warnings
for(i in 1:length(normalized)) { #for every column in normalized dataframe (actually is for every subject for every timepoint)
normalized[,i] <-as.factor(normalized[,i])} #convert every score to factor
first_two_rows <- data[1:2,] #get the Subject and Timepoint rows from data
new_data <- rbind(first_two_rows, normalized) #make them one dataframe
#write.csv(new_data, file = "Normalized.csv", row.names = FALSE) #make a .csv file with the normalized values of raw data
###Repeated measures ANOVA###
significant_meta=data.frame(matrix(nrow = nrow(new_data) , ncol = 2)) #empty dataframe to save the significant metabolites with their pvalues
for (m in 3:nrow(new_data)) { #for every metabolite for the whole datafile of normalized metabolites
#start counting from 3rd row, because in 1st row are the Subjects and in 2nd are the Timepoints
idx=m #idx is the actual number of row in new_data dataframe
Score <- idx
metabolite = data.frame(y=new_data[Score,]) #create a new dataframe for the scores of each metabolite (of each row)
metabolite[nrow(metabolite)+1,] = new_data[1,] # +Subject row
metabolite[nrow(metabolite)+1,] = new_data[2,] # +Timepoint row
metabolite_rev=t(metabolite) #reverse the dataframe, in order to have them in columns
# t() command creates table
metabolite_rev=as.data.frame(metabolite_rev) #make it dataframe
names(metabolite_rev)[1] <- "Score" #the colname of the score column was the name of the metabolite. So, in order to be easier to work with, we rename the colname as "Score"
metabolite_rev$Score <- as.numeric(as.character(metabolite_rev$Score)) #convert the Score column to numeric
#ANOVA model
model = aov(Score~Timepoint+Error(Subject/Timepoint),metabolite_rev)
#The summary(model) is a list of elements. In order to be able to pick one particular element, we need to unlist them
sum_model = unlist(summary(model))
if (sum_model["Error: Subject:Timepoint.Pr(>F)1"]<0.01) { #check if the metabolite that is under investgation has significant p-value
significant_meta[m,1] <- idx #keep the idx number of it (the row number in the new_data dataframe)
significant_meta[m,2] <- sum_model["Error: Subject:Timepoint.Pr(>F)1"] #keep the corresponding pvalue
}
}
#significant_meta has all idxs of the significant metabolites and for no significant has NAs
new_sign <- na.omit(significant_meta) #remove the rows with NAs (the idxs of no significant metabolites)
#new_sign has the idxs only of the significant metabolites
all_significant=data.frame(matrix(nrow = nrow(new_sign) , ncol = 2)) #empty dataframe to save the actual names of significant metabolites with their pvalues
for (s in 1:nrow(new_sign)) { #for every row in new_sign dataframe (for every significant metabolite)
idx2=new_sign[s,1] #idx2 is the number of row in new_sign dataframe
sign_metabolite = data.frame(y=new_data[idx2,]) #create a new dataframe for each significant metabolite (for every row in new_data dataframe and each row is choosen by the idx2 number)
all_significant[s,1] <- row.names(sign_metabolite) #keep the name of the significant metabolite
all_significant[s,2] <- new_sign[s,2]} #keep its corresponding pvalue
colnames(all_significant)[1] <- "Sign_metabolite" #change the name of the 1st column
colnames(all_significant)[2] <- "p-value" #change the name of the 2nd column
#This will create a new directory in your working directory with the name "Plots"
#At the end, everithing is going to be inside Anova directory
plots <- paste("Plots") #set the name
dir.create(plots) #create directory
setwd(plots) #set the new directory as working directory
###Graphs###
for (o in 1:nrow(new_sign)) { #for every significant metabolite (for every row in new_sign dataframe ---- remember the new_sign dataframe contains the idx for each metabolite (the number of row in data dataframe))
#we need to get the original values of each metabolite, before the normalization
idx3=new_sign[o,1] #idx3 is the number of row in new_sign dataframe
metabolite_original = data.frame(y=data[idx3,]) #create a new dataframe for each significant metabolite (for every row in data dataframe and each row is choosen by the idx3 number)
metabolite_original[nrow(metabolite_original)+1,] = data[1,] # +Subject row
metabolite_original[nrow(metabolite_original)+1,] = data[2,] # +Timepoint row
#reverse the dataframe, in order to have them in columns
metabolite_original_rev=t(metabolite_original)
# t() command creates table
metabolite_original_rev=as.data.frame(metabolite_original_rev) #make it dataframe
names(metabolite_original_rev)[1] <- "Score" #the colname of the score column was the name of the metabolite. So, in order to be easier to work with, we rename the colname as "Score"
metabolite_original_rev$Score <- as.numeric(as.character(metabolite_original_rev$Score)) #convert the Score column to numeric
#We need to transform the scores because we have to big values. Otherwise, the graphs do not seem to be informative.
#Log transformation of score (use log10 to transform the values)
transformations <- as.data.frame(log10(metabolite_original_rev$Score))
#Bind Subject and Timepoint rows
first_two_rows_again <- as.data.frame(t(metabolite_original[2:3,])) #pick the Subject and Timepoint rows
trans_data <- cbind(first_two_rows_again, transformations) #make them one dataframe
names(trans_data)[3] <- "LogScore" #rename the column with the log10 scores
subject_num <- length(levels(trans_data$Subject)) #number of different levels of Subject factor, here is 38
timepoints_num <- length(levels(trans_data$Timepoint)) #number of different levels of Timepoint factor, here is 6
newdir <- paste0(row.names(metabolite_original)[1]) #pick the name of the metabolite
dir.create(newdir) #and create a new directory with that name
cwd <- getwd() #keep "in mind" your MAIN working directory
setwd(newdir) # "go inside" the directory that you just created and make it your temporary working directory
mypath <- file.path(getwd() ,paste("Bp_",row.names(metabolite_original)[1], ".png", sep = "")) #path to save the graph
png(file=mypath, width=862, height=392) #format of saving graph
mytitle = paste(row.names(metabolite_original)[1],": Log10 transformed intensity score distribution across all timepoints and subjects") #main title of the graph
ytitle = paste(row.names(metabolite_original)[1]) #label of y-axis
#make the graph
boxplot(trans_data$LogScore,
horizontal = TRUE,
main= mytitle,
las=2,
xlab= "Log10 transformed intensity score", ylab= ytitle,
col = "blue")
dev.off() #necessary command to be able to open the .png file that we just created
mypath2 <- file.path(getwd() ,paste("Bp_timepoints_", row.names(metabolite_original)[1], ".png", sep = "")) #path to save the graph
png(file=mypath2, width=862, height=392) #format of saving graph
mytitle2 = paste(row.names(metabolite_original)[1],": Boxplot comparing Log10 transformed intensity scores of six Timepoints") #main title of the graph
#make the graph
boxplot(trans_data$LogScore~trans_data$Timepoint,
main=mytitle2,
col= rainbow(timepoints_num),
las=2,
par(mar = c(4, 8, 1, 1)+ 0.4),
xlab= "Log10 transformed intensity scores",
horizontal = TRUE)
dev.off() #necessary command to be able to open the .png file that we just created
mypath3 <- file.path(getwd() ,paste("Bp_subjects_timepoints_", row.names(metabolite_original)[1], ".png", sep = "")) #path to save the graph
png(file=mypath3, width=862, height=1000) #format of saving graph
mytitle3 = paste( row.names(metabolite_original)[1],": Boxplot comparing Log10 transformed intensity scores of all subjects for all timepoints") #main title of the graph
#make the graph
boxplot(trans_data$LogScore~trans_data$Subject,
main=mytitle3,
xlab= "Log10 transformed intensity scores",
col= rainbow(subject_num),
horizontal = TRUE,
las=2)
dev.off() #necessary command to be able to open the .png file that we just created
mypath4 <- file.path(getwd() ,paste("Mean_", row.names(metabolite_original)[1], ".png", sep = "")) #path to save the graph
png(file=mypath4, width=862, height=392) #format of saving graph
mytitle4 = paste( row.names(metabolite_original)[1],": Mean Log10 transformed intensity score across Timepoints") #main title of the graph
#make the graph
plotmeans(trans_data$LogScore~trans_data$Timepoint,
digits=2,
ccol="red",
mean.labels=T,
main=mytitle4,
xlab= "Timepoints",
ylab= "Mean Log10 transformed intensity score")
dev.off() #necessary command to be able to open the .png file that we just created
means<- round(tapply(trans_data$LogScore, trans_data$Timepoint, mean), digits=2) #mean for each timepoint
mypath5 <- file.path(getwd() ,paste("Bp_timepoints_meandotted_", row.names(metabolite_original)[1], ".png", sep = "")) #path to save the graph
png(file=mypath5, width=862, height=392) #format of saving graph
mytitle5 = paste(row.names(metabolite_original)[1],": Boxplot comparing Log10 transformed intensity scores of six Timepoints (blackdotted mean)") #main title of the graph
#make the graph
boxplot(trans_data$LogScore ~ trans_data$Timepoint,
main=mytitle5,
xlab="Timepoints", ylab="Log10 transformed intensity scores", col=rainbow(timepoints_num))
points(means, col="black", pch=16, cex=1.5) #add dots with means
dev.off() #necessary command to be able to open the .png file that we just created
mypath8 <- file.path(getwd() ,paste("Vp_withbp_coloured_", row.names(metabolite_original)[1], ".png", sep = "")) #path to save the graph
png(file=mypath8, width=862, height=900) #format of saving graph
title_violin2 = paste(row.names(metabolite_original)[1],": Violin plots (with boxplots) comparing Log10 transformed intensity scores of six Timepoints") #main title of the graph
#make the graph
print(ggplot(trans_data, aes(x=trans_data$Timepoint, y=trans_data$LogScore, fill=trans_data$Timepoint)) +
geom_violin(trim=FALSE, show.legend=FALSE)+
geom_boxplot(width=0.1, fill="white")+
labs(title=title_violin2,x="Timepoints", y = "Log10 transformed intensity scores")+
coord_flip()+
scale_fill_brewer(palette="Dark2") + theme_minimal())
dev.off() #necessary command to be able to open the .png file that we just created
setwd(cwd)
}
plots_list <- list.files()
zipr("Plots.zip", c(plots_list), recurse = TRUE)
text <- paste("We created the plots for ",nrow(all_significant), " significant metabolites")
#ziplist1 <- zip_list("Plots.zip")
return(text)
}
else {
text3 <- paste("Check Repeated Measures ANOVA Plots to view the plots")
return(text3)}
} )
output$`download plots` <- downloadHandler(
filename = function() {
paste("Plots","zip",sep = ".")
},
content = function(file){
file.copy("Plots.zip", file)
},
contentType = "application/zip"
)
#-----------------------------------------------------------------------------------Friedman Test-------------------------------------------------------------------------------------------
output$contents5 <- renderTable( digits = 10,{
req(input$file1)
tryCatch(
{
df <- read.csv(input$file1$datapath)
data <- data.frame(df[,-1], row.names=df[,1])
},
error = function(e) {
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)
if (input$checkbox3 == "TRUE"){
#Set your working directory (where our datafile is)
#Load data
#without_missing.csv is the original Dummy file that Rick gave us, WITHOUT QC and SUBJECTS WITH MISSING VALUES
dt <- df
#Make first column the rownames of "data"
data <- data.frame(dt[,-1], row.names=dt[,1])
#This will create a new directory in your working directory with the name "Friedman"
#At the end, everything will be inside the Friedman directory
friedman <- paste("Friedman_test") #set the name
dir.create(friedman) #create directory
setwd(friedman) #set the new directory as working directory
###Normalization###
length_data <- nrow(data) #number of rows in data
for_nomr=data[3:length_data,] #collect only the region of data that contains scores
for(i in 1:length(for_nomr)) { #for every column in for_nomr dataframe (actually is for every subject for every timepoint)
for_nomr[,i] <-as.numeric(as.character(for_nomr[,i]))} #convert every score to numeric
#Normalization function
#formula: ((score-mean of column)/sqrt(sum((score-mean of column)^2)))
normalized=data.Normalization (for_nomr,type="n12",normalization="column")
#other option: a2=data.Normalization (for_nomr,type="n12",normalization="row")
#we can not do it by row, beacause we get warnings
for(i in 1:length(normalized)) { #for every column in normalized dataframe (actually is for every subject for every timepoint)
normalized[,i] <-as.factor(normalized[,i])} #convert every score to factor
first_two_rows <- data[1:2,] #get the Subject and Timepoint rows from data
new_data <- rbind(first_two_rows, normalized) #make them one dataframe
###Friedman's Test###
significant_meta=data.frame(matrix(nrow = nrow(new_data) , ncol = 2)) #empty dataframe to save the significant metabolites with their pvalues
for (m in 3:nrow(new_data)) { #for every metabolite for the whole datafile of normalized metabolites
#start counting from 3rd row, because in 1st row are the Subjects and in 2nd are the Timepoints
idx=m #idx is the actual number of row in new_data dataframe
Score <- idx
metabolite = data.frame(y=new_data[Score,]) #create a new dataframe for the scores of each metabolite (of each row)
metabolite[nrow(metabolite)+1,] = new_data[1,] # +Subject row
metabolite[nrow(metabolite)+1,] = new_data[2,] # +Timepoint row
metabolite_rev=t(metabolite) #reverse the dataframe, in order to have them in columns
# t() command creates table
metabolite_rev=as.data.frame(metabolite_rev) #make it dataframe
names(metabolite_rev)[1] <- "Score" #the colname of the score column was the name of the metabolite. So, in order to be easier to work with, we rename the colname as "Score"
metabolite_rev$Score <- as.numeric(as.character(metabolite_rev$Score)) #convert the Score column to numeric
#FRIEDMAN model
model = friedman.test(Score~Timepoint | Subject, data = metabolite_rev)
#The summary(model) is a list of elements. In order to be able to pick one particular element, we need to unlist them
sum_model = unlist(model)
if (sum_model["p.value"]<0.01) { # check if this particular metabolite significant p-value
significant_meta[m,1] <- idx # keep the idx number of it (the row number in the new_data dataframe)
significant_meta[m,2] <- sum_model["p.value"] # keep the corresponding p-value
}
}
#significant_meta has all idxs of the significant metabolites and for no significant has NAs
new_sign <- na.omit(significant_meta, cols="X2") #remove the rows with NAs (the idxs of no significant metabolites)
#new_sign has the idxs only of the significant metabolites
#new_sign
all_significant_f=data.frame(matrix(nrow = nrow(new_sign) , ncol = 2)) #empty dataframe to save the actual names of significant metabolites with their pvalues
for (s in 1:nrow(new_sign)) { #for every row in new_sign dataframe (for every significant metabolite)
idx2=new_sign[s,1] #idx2 is the number of row in new_sign dataframe
sign_metabolite = data.frame(y=new_data[idx2,]) #create a new dataframe for each significant metabolite (for every row in new_data dataframe and each row is choosen by the idx2 number)
all_significant_f[s,1] <- row.names(sign_metabolite) #keep the name of the significant metabolite
all_significant_f[s,2] <- new_sign[s,2]} #keep its corresponding pvalue
colnames(all_significant_f)[1] <- "Sign_metabolite" #change the name of the 1st column
colnames(all_significant_f)[2] <- "p-value" #change the name of the 2nd column
write.csv(all_significant_f, file = "Friedman_significant.csv", row.names = FALSE) #make a .csv file with the significant metabolites and their pvalues
return(all_significant_f)
}
else {
text_if_not_f <- paste("Check Friedman Test to view the significant metabolites")
return(text_if_not_f)}
})
output$`download friedman` <- downloadHandler(
filename = function() {
paste("Friedman_significant","csv",sep = ".")
},
content = function(file){
file.copy("Friedman_significant.csv", file)
})
#------------------------------------------------------------------------------------Friedman Test Plots -----------------------------------------------------------------------------------
output$contents6 <- renderTable({
req(input$file1)
tryCatch(
{
df <- read.csv(input$file1$datapath)
data <- data.frame(df[,-1], row.names=df[,1])
},
error = function(e) {
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)
if (input$checkbox6 == "TRUE"){
#Set your working directory (where our datafile is)
#Load data
#without_missing.csv is the original Dummy file that Rick gave us, WITHOUT QC and SUBJECTS WITH MISSING VALUES
dt <- df
#Make first column the rownames of "data"
data <- data.frame(dt[,-1], row.names=dt[,1])
#This will create a new directory in your working directory with the name "Friedman"
#At the end, everything will be inside the Friedman directory
friedman <- paste("Friedman_test") #set the name
dir.create(friedman) #create directory
setwd(friedman) #set the new directory as working directory
###Normalization###
length_data <- nrow(data) #number of rows in data
for_nomr=data[3:length_data,] #collect only the region of data that contains scores
for(i in 1:length(for_nomr)) { #for every column in for_nomr dataframe (actually is for every subject for every timepoint)
for_nomr[,i] <-as.numeric(as.character(for_nomr[,i]))} #convert every score to numeric
#Normalization function
#formula: ((score-mean of column)/sqrt(sum((score-mean of column)^2)))
normalized=data.Normalization (for_nomr,type="n12",normalization="column")
#other option: a2=data.Normalization (for_nomr,type="n12",normalization="row")
#we can not do it by row, beacause we get warnings
for(i in 1:length(normalized)) { #for every column in normalized dataframe (actually is for every subject for every timepoint)
normalized[,i] <-as.factor(normalized[,i])} #convert every score to factor
first_two_rows <- data[1:2,] #get the Subject and Timepoint rows from data
new_data <- rbind(first_two_rows, normalized) #make them one dataframe
###Friedman's Test###
significant_meta=data.frame(matrix(nrow = nrow(new_data) , ncol = 2)) #empty dataframe to save the significant metabolites with their pvalues
for (m in 3:nrow(new_data)) { #for every metabolite for the whole datafile of normalized metabolites
#start counting from 3rd row, because in 1st row are the Subjects and in 2nd are the Timepoints
idx=m #idx is the actual number of row in new_data dataframe
Score <- idx
metabolite = data.frame(y=new_data[Score,]) #create a new dataframe for the scores of each metabolite (of each row)
metabolite[nrow(metabolite)+1,] = new_data[1,] # +Subject row
metabolite[nrow(metabolite)+1,] = new_data[2,] # +Timepoint row
metabolite_rev=t(metabolite) #reverse the dataframe, in order to have them in columns
# t() command creates table
metabolite_rev=as.data.frame(metabolite_rev) #make it dataframe
names(metabolite_rev)[1] <- "Score" #the colname of the score column was the name of the metabolite. So, in order to be easier to work with, we rename the colname as "Score"
metabolite_rev$Score <- as.numeric(as.character(metabolite_rev$Score)) #convert the Score column to numeric
#FRIEDMAN model
model = friedman.test(Score~Timepoint | Subject, data = metabolite_rev)
#The summary(model) is a list of elements. In order to be able to pick one particular element, we need to unlist them
sum_model = unlist(model)
if (sum_model["p.value"]<0.01) { # check if this particular metabolite significant p-value
significant_meta[m,1] <- idx # keep the idx number of it (the row number in the new_data dataframe)
significant_meta[m,2] <- sum_model["p.value"] # keep the corresponding p-value
}
}
#significant_meta has all idxs of the significant metabolites and for no significant has NAs
new_sign <- na.omit(significant_meta, cols="X2") #remove the rows with NAs (the idxs of no significant metabolites)
#new_sign has the idxs only of the significant metabolites
#new_sign
all_significant_f=data.frame(matrix(nrow = nrow(new_sign) , ncol = 2)) #empty dataframe to save the actual names of significant metabolites with their pvalues
for (s in 1:nrow(new_sign)) { #for every row in new_sign dataframe (for every significant metabolite)
idx2=new_sign[s,1] #idx2 is the number of row in new_sign dataframe
sign_metabolite = data.frame(y=new_data[idx2,]) #create a new dataframe for each significant metabolite (for every row in new_data dataframe and each row is choosen by the idx2 number)
all_significant_f[s,1] <- row.names(sign_metabolite) #keep the name of the significant metabolite
all_significant_f[s,2] <- new_sign[s,2]} #keep its corresponding pvalue
colnames(all_significant_f)[1] <- "Sign_metabolite" #change the name of the 1st column
colnames(all_significant_f)[2] <- "p-value" #change the name of the 2nd column
write.csv(all_significant_f, file = "Friedman_significant.csv", row.names = FALSE) #make a .csv file with the significant metabolites and their pvalues
#This will create a new directory in your working directory with the name "Plots"
#At the end, everithing is going to be inside Anova directory
freidplots <- paste("freidPlots") #set the name
dir.create(freidplots) #create directory
setwd(freidplots) #set the new directory as working directory
###Graphs###
for (o in 1:nrow(new_sign)) { #for every significant metabolite (for every row in new_sign dataframe ---- remember the new_sign dataframe contains the idx for each metabolite (the number of row in data dataframe))
#we need to get the original values of each metabolite, before the normalization
idx3=new_sign[o,1] #idx3 is the number of row in new_sign dataframe
metabolite_original = data.frame(y=data[idx3,]) #create a new dataframe for each significant metabolite (for every row in data dataframe and each row is choosen by the idx3 number)
metabolite_original[nrow(metabolite_original)+1,] = data[1,] # +Subject row
metabolite_original[nrow(metabolite_original)+1,] = data[2,] # +Timepoint row
#reverse the dataframe, in order to have them in columns
metabolite_original_rev=t(metabolite_original)
# t() command creates table
metabolite_original_rev=as.data.frame(metabolite_original_rev) #make it dataframe
names(metabolite_original_rev)[1] <- "Score" #the colname of the score column was the name of the metabolite. So, in order to be easier to work with, we rename the colname as "Score"
metabolite_original_rev$Score <- as.numeric(as.character(metabolite_original_rev$Score)) #convert the Score column to numeric
#We need to transform the scores because we have to big values. Otherwise, the graphs do not seem to be informative.
#Log transformation of score (use log10 to transform the values)
transformations <- as.data.frame(log10(metabolite_original_rev$Score))
#Bind Subject and Timepoint rows
first_two_rows_again <- as.data.frame(t(metabolite_original[2:3,])) #pick the Subject and Timepoint rows
trans_data <- cbind(first_two_rows_again, transformations) #make them one dataframe
names(trans_data)[3] <- "LogScore" #rename the column with the log10 scores
subject_num <- length(levels(trans_data$Subject)) #number of different levels of Subject factor, here is 38
timepoints_num <- length(levels(trans_data$Timepoint)) #number of different levels of Timepoint factor, here is 6
newdir <- paste0(row.names(metabolite_original)[1]) #pick the name of the metabolite
dir.create(newdir) #and create a new directory with that name
cwd <- getwd() #keep "in mind" your MAIN working directory
setwd(newdir) # "go inside" the directory that you just created and make it your temporary working directory
mypath <- file.path(getwd() ,paste("Bp_",row.names(metabolite_original)[1], ".png", sep = "")) #path to save the graph
png(file=mypath, width=862, height=392) #format of saving graph
mytitle = paste(row.names(metabolite_original)[1],": Log10 transformed intensity score distribution across all timepoints and subjects") #main title of the graph
ytitle = paste(row.names(metabolite_original)[1]) #label of y-axis
#make the graph
boxplot(trans_data$LogScore,
horizontal = TRUE,
main= mytitle,
las=2,
xlab= "Log10 transformed intensity score", ylab= ytitle,
col = "blue")
dev.off() #necessary command to be able to open the .png file that we just created
mypath2 <- file.path(getwd() ,paste("Bp_timepoints_", row.names(metabolite_original)[1], ".png", sep = "")) #path to save the graph
png(file=mypath2, width=862, height=392) #format of saving graph
mytitle2 = paste(row.names(metabolite_original)[1],": Boxplot comparing Log10 transformed intensity scores of six Timepoints") #main title of the graph
#make the graph
boxplot(trans_data$LogScore~trans_data$Timepoint,
main=mytitle2,
col= rainbow(timepoints_num),
las=2,
par(mar = c(4, 8, 1, 1)+ 0.4),
xlab= "Log10 transformed intensity scores",
horizontal = TRUE)
dev.off() #necessary command to be able to open the .png file that we just created
mypath3 <- file.path(getwd() ,paste("Bp_subjects_timepoints_", row.names(metabolite_original)[1], ".png", sep = "")) #path to save the graph
png(file=mypath3, width=862, height=1000) #format of saving graph
mytitle3 = paste( row.names(metabolite_original)[1],": Boxplot comparing Log10 transformed intensity scores of all subjects for all timepoints") #main title of the graph
#make the graph
boxplot(trans_data$LogScore~trans_data$Subject,
main=mytitle3,
xlab= "Log10 transformed intensity scores",
col= rainbow(subject_num),
horizontal = TRUE,
las=2)
dev.off() #necessary command to be able to open the .png file that we just created
mypath4 <- file.path(getwd() ,paste("Mean_", row.names(metabolite_original)[1], ".png", sep = "")) #path to save the graph
png(file=mypath4, width=862, height=392) #format of saving graph
mytitle4 = paste( row.names(metabolite_original)[1],": Mean Log10 transformed intensity score across Timepoints") #main title of the graph
#make the graph
plotmeans(trans_data$LogScore~trans_data$Timepoint,
digits=2,
ccol="red",
mean.labels=T,
main=mytitle4,
xlab= "Timepoints",
ylab= "Mean Log10 transformed intensity score")
dev.off() #necessary command to be able to open the .png file that we just created
means<- round(tapply(trans_data$LogScore, trans_data$Timepoint, mean), digits=2) #mean for each timepoint
mypath5 <- file.path(getwd() ,paste("Bp_timepoints_meandotted_", row.names(metabolite_original)[1], ".png", sep = "")) #path to save the graph
png(file=mypath5, width=862, height=392) #format of saving graph
mytitle5 = paste(row.names(metabolite_original)[1],": Boxplot comparing Log10 transformed intensity scores of six Timepoints (blackdotted mean)") #main title of the graph
#make the graph
boxplot(trans_data$LogScore ~ trans_data$Timepoint,
main=mytitle5,
xlab="Timepoints", ylab="Log10 transformed intensity scores", col=rainbow(timepoints_num))
points(means, col="black", pch=16, cex=1.5) #add dots with means
dev.off() #necessary command to be able to open the .png file that we just created
mypath8 <- file.path(getwd() ,paste("Vp_withbp_coloured_", row.names(metabolite_original)[1], ".png", sep = "")) #path to save the graph
png(file=mypath8, width=862, height=900) #format of saving graph
title_violin2 = paste(row.names(metabolite_original)[1],": Violin plots (with boxplots) comparing Log10 transformed intensity scores of six Timepoints") #main title of the graph
#make the graph
print(ggplot(trans_data, aes(x=trans_data$Timepoint, y=trans_data$LogScore, fill=trans_data$Timepoint)) +
geom_violin(trim=FALSE, show.legend=FALSE)+
geom_boxplot(width=0.1, fill="white")+
labs(title=title_violin2,x="Timepoints", y = "Log10 transformed intensity scores")+
coord_flip()+
scale_fill_brewer(palette="Dark2") + theme_minimal())
dev.off() #necessary command to be able to open the .png file that we just created
setwd(cwd)
}
plots_list_f <- list.files()
zipr("fPlots.zip", c(plots_list_f), recurse = TRUE)
text6 <- paste("We created the plots for ",nrow(all_significant_f), " significant metabolites")
return(text6)
}
else {
text7 <- paste("Check Friedman Test Plots to view the plots")
return(text7)}
} )
output$`download fplots` <- downloadHandler(
filename = function() {
paste("fPlots","zip",sep = ".")
},
content = function(file){
file.copy("fPlots.zip", file)
},
contentType = "application/zip"
)
}
|
# This script is to to validate the episurv information
source("read_data.R")
source("extractors.R")
source("validators.R")
source("merge_db.R")
library(dplyr)
library(lubridate)
episurv_path <- "../EpisurvData"
# read in MasseyQuestion and EntericDiseaseReport files
massey <- read_massey(episurv_path)
edr <- read_edr(episurv_path)
# don't add known shit to the database
massey <- massey[!validate_episurv(massey$episurvnum),]
edr <- edr[!validate_episurv(edr$EpiSurvNumber),]
# TODO: Run a validation report on the data we're merging
# TODO: validate against sample table??
# read in existing master sheet
master <- read.csv(file.path(episurv_path, "master_20150115.csv"), stringsAsFactors=F, colClasses="character")
# remove 'tfhj' row, and any missing both EpiSurvNumber and HospitalNo
master <- master %>% filter(EpiSurvNumber != "tfhj")
master <- master %>% filter(!(EpiSurvNumber == "" & Hospital.Numbers == ""))
# remap massey column names
massey <- massey %>% transmute(EpiSurvNumber=episurvnum,
Hospital.Numbers=hospitalnu,
other.hos.nos=hospitaln1,
other.hos.nos.2=hospitaln2,
PhoneId="",
Date.notified="",
DateCall="",
Chicken=chicken,
Beef=beef,
Pork=pork,
Lamb=lamb,
DeliHam=delimeats,
Bacon="",
Venison=huntedmeat,
MeatMemo="",
UnpasturisedMilk=rawmilk)
# TODO: ideally this would be done in the database
edr <- edr %>% mutate(Age5yrbrac = extract_age_bracket(edr$AgeInYears))
# converts dates
convert_dates <- function(db, date_col) {
for (d in intersect(date_col, names(db))) {
db[,d] <- extract_date(db[,d])
}
db
}
# convert date columns
date_col <- c("ReportDate",
"OnsetDt",
paste0("DateConsumed",1:8),
"OthRecDate",
paste0("PoolDate", 1:3),
paste0("RiverSeaDate", 1:3),
"DtArrived",
"DtLastDeparted",
"DtLastEntered",
"DtSecDeparted",
"DtSecEntered",
"DtThirdDeparted",
"DtThirdEntered",
"Date.notified",
"DateCall")
master <- convert_dates(master, date_col)
edr <- convert_dates(edr, date_col)
massey <- convert_dates(massey, date_col)
# OK, now go through and merge the data sets
# check column names
cols_common_master_edr <- intersect(names(master), names(edr))
cols_master_not_edr <- setdiff(names(master), names(edr))
cols_edr_not_master <- setdiff(names(edr), names(master))
cols_common_massey_master <- intersect(names(master), names(massey))
cols_master_not_massey <- setdiff(names(master), names(massey))
cols_massey_not_master <- setdiff(names(massey), names(master))
cat("Columns in the master set that aren't covered:\n")
print(setdiff(cols_master_not_massey, names(edr)))
cat("Columns in EDR that aren't covered in master sheet:\n")
print(setdiff(names(edr), c(cols_master_not_massey, "EpiSurvNumber")))
# check matching to EpiSurvNumber
master_ids <- master %>% select(EpiSurvNumber)
edr_ids <- edr %>% select(EpiSurvNumber)
rows <- nrow(edr_ids %>% anti_join(master_ids))
if (rows > 0) {
cat("WARNING:", rows, "rows in Enteric Disease Report", attr(edr, "db_name"), "that aren't in master sheet\n")
}
# check matching to hospital number
master_hos <- master %>% select(Hospital.Numbers)
massey_hos <- massey %>% select(Hospital.Numbers)
rows <- nrow(massey_hos %>% anti_join(master_hos))
if (rows > 0) {
cat("WARNING:", rows, "rows in Massey questions", attr(edr, "db_name"), "that aren't in master sheet\n")
}
# check matching to EpiSurvNumber
master_en <- master %>% select(EpiSurvNumber)
massey_en <- massey %>% select(EpiSurvNumber)
rows <- nrow(massey_en %>% anti_join(master_en))
if (rows > 0) {
cat("WARNING:", rows, "rows in Massey questions", attr(edr, "db_name"), "that aren't in master sheet\n")
}
# check matching to EpiSurvNumber
edr_en <- edr %>% select(EpiSurvNumber)
massey_en <- massey %>% select(EpiSurvNumber)
rows <- nrow(massey_en %>% anti_join(edr_en))
if (rows > 0) {
cat("WARNING:", rows, "rows in Massey questions", attr(edr, "db_name"), "that aren't in EDR sheet\n")
}
rows <- nrow(edr_en %>% anti_join(massey_en))
if (rows > 0) {
cat("WARNING:", rows, "rows in EDR", attr(edr, "db_name"), "that aren't in Massey Questions sheet\n")
}
# check matching EpiSurv+Hospital
master_en <- master %>% select(EpiSurvNumber, Hospital.Numbers)
massey_en <- massey %>% select(EpiSurvNumber, Hospital.Numbers)
# TODO: seems to be a bunch of hospital number disagreement :(
nomatch <- massey %>% inner_join(master, by="EpiSurvNumber") %>% filter(Hospital.Numbers.x != Hospital.Numbers.y)
nomatch <- nomatch %>% mutate(Hospital.Numbers.y = substring(Hospital.Numbers.y,1,15))
nomatch %>% select(EpiSurvNumber, Hospital.Numbers.x, Hospital.Numbers.y, other.hos.nos.x, other.hos.nos.y)
# match these against hospital numbers extracted from episurv data
hosp_num <- extract_hospital_from_comment(edr$Comments)
edr_hn <- data.frame(EpiSurvNumber = edr$EpiSurvNumber, hosp_num)
#nomatch %>% left_join(edr_hn, by="EpiSurvNumber") %>% select(EpiSurvNumber, Hospital.Numbers.x, hosp_num, Hospital.Numbers.y, other.hos.nos.x, other.hos.nos.y)
#nomatch <- joint %>% filter(Hospital.Numbers.x != Hospital.Numbers.y)
#nomatch %>% select(EpiSurvNumber, Hospital.Numbers.x, Hospital.Numbers.y)
#master %>% filter(Hospital.Numbers %in% nomatch$Hospital.Numbers.y)
# Now, do the merging
# for now we assume the episurv info is GOLD STANDARD and copy across all of that (even if it overrides what
# is already in place
master_new <- master
master_new <- merge_db(master_new, edr, "EpiSurvNumber")
master_new <- merge_db(master_new, massey, "EpiSurvNumber", add_new=FALSE)
master_new <- merge_db(master_new, massey, "Hospital.Numbers", add_new=FALSE)
# write the new master sheet out
write.csv(master_new, file.path(episurv_path, "master_new.csv"), row.names=F, na="")
|
/merge_episurv.R
|
no_license
|
jmarshallnz/db_export
|
R
| false | false | 6,297 |
r
|
# This script is to to validate the episurv information
source("read_data.R")
source("extractors.R")
source("validators.R")
source("merge_db.R")
library(dplyr)
library(lubridate)
episurv_path <- "../EpisurvData"
# read in MasseyQuestion and EntericDiseaseReport files
massey <- read_massey(episurv_path)
edr <- read_edr(episurv_path)
# don't add known shit to the database
massey <- massey[!validate_episurv(massey$episurvnum),]
edr <- edr[!validate_episurv(edr$EpiSurvNumber),]
# TODO: Run a validation report on the data we're merging
# TODO: validate against sample table??
# read in existing master sheet
master <- read.csv(file.path(episurv_path, "master_20150115.csv"), stringsAsFactors=F, colClasses="character")
# remove 'tfhj' row, and any missing both EpiSurvNumber and HospitalNo
master <- master %>% filter(EpiSurvNumber != "tfhj")
master <- master %>% filter(!(EpiSurvNumber == "" & Hospital.Numbers == ""))
# remap massey column names
massey <- massey %>% transmute(EpiSurvNumber=episurvnum,
Hospital.Numbers=hospitalnu,
other.hos.nos=hospitaln1,
other.hos.nos.2=hospitaln2,
PhoneId="",
Date.notified="",
DateCall="",
Chicken=chicken,
Beef=beef,
Pork=pork,
Lamb=lamb,
DeliHam=delimeats,
Bacon="",
Venison=huntedmeat,
MeatMemo="",
UnpasturisedMilk=rawmilk)
# TODO: ideally this would be done in the database
edr <- edr %>% mutate(Age5yrbrac = extract_age_bracket(edr$AgeInYears))
# converts dates
convert_dates <- function(db, date_col) {
for (d in intersect(date_col, names(db))) {
db[,d] <- extract_date(db[,d])
}
db
}
# convert date columns
date_col <- c("ReportDate",
"OnsetDt",
paste0("DateConsumed",1:8),
"OthRecDate",
paste0("PoolDate", 1:3),
paste0("RiverSeaDate", 1:3),
"DtArrived",
"DtLastDeparted",
"DtLastEntered",
"DtSecDeparted",
"DtSecEntered",
"DtThirdDeparted",
"DtThirdEntered",
"Date.notified",
"DateCall")
master <- convert_dates(master, date_col)
edr <- convert_dates(edr, date_col)
massey <- convert_dates(massey, date_col)
# OK, now go through and merge the data sets
# check column names
cols_common_master_edr <- intersect(names(master), names(edr))
cols_master_not_edr <- setdiff(names(master), names(edr))
cols_edr_not_master <- setdiff(names(edr), names(master))
cols_common_massey_master <- intersect(names(master), names(massey))
cols_master_not_massey <- setdiff(names(master), names(massey))
cols_massey_not_master <- setdiff(names(massey), names(master))
cat("Columns in the master set that aren't covered:\n")
print(setdiff(cols_master_not_massey, names(edr)))
cat("Columns in EDR that aren't covered in master sheet:\n")
print(setdiff(names(edr), c(cols_master_not_massey, "EpiSurvNumber")))
# check matching to EpiSurvNumber
master_ids <- master %>% select(EpiSurvNumber)
edr_ids <- edr %>% select(EpiSurvNumber)
rows <- nrow(edr_ids %>% anti_join(master_ids))
if (rows > 0) {
cat("WARNING:", rows, "rows in Enteric Disease Report", attr(edr, "db_name"), "that aren't in master sheet\n")
}
# check matching to hospital number
master_hos <- master %>% select(Hospital.Numbers)
massey_hos <- massey %>% select(Hospital.Numbers)
rows <- nrow(massey_hos %>% anti_join(master_hos))
if (rows > 0) {
cat("WARNING:", rows, "rows in Massey questions", attr(edr, "db_name"), "that aren't in master sheet\n")
}
# check matching to EpiSurvNumber
master_en <- master %>% select(EpiSurvNumber)
massey_en <- massey %>% select(EpiSurvNumber)
rows <- nrow(massey_en %>% anti_join(master_en))
if (rows > 0) {
cat("WARNING:", rows, "rows in Massey questions", attr(edr, "db_name"), "that aren't in master sheet\n")
}
# check matching to EpiSurvNumber
edr_en <- edr %>% select(EpiSurvNumber)
massey_en <- massey %>% select(EpiSurvNumber)
rows <- nrow(massey_en %>% anti_join(edr_en))
if (rows > 0) {
cat("WARNING:", rows, "rows in Massey questions", attr(edr, "db_name"), "that aren't in EDR sheet\n")
}
rows <- nrow(edr_en %>% anti_join(massey_en))
if (rows > 0) {
cat("WARNING:", rows, "rows in EDR", attr(edr, "db_name"), "that aren't in Massey Questions sheet\n")
}
# check matching EpiSurv+Hospital
master_en <- master %>% select(EpiSurvNumber, Hospital.Numbers)
massey_en <- massey %>% select(EpiSurvNumber, Hospital.Numbers)
# TODO: seems to be a bunch of hospital number disagreement :(
nomatch <- massey %>% inner_join(master, by="EpiSurvNumber") %>% filter(Hospital.Numbers.x != Hospital.Numbers.y)
nomatch <- nomatch %>% mutate(Hospital.Numbers.y = substring(Hospital.Numbers.y,1,15))
nomatch %>% select(EpiSurvNumber, Hospital.Numbers.x, Hospital.Numbers.y, other.hos.nos.x, other.hos.nos.y)
# match these against hospital numbers extracted from episurv data
hosp_num <- extract_hospital_from_comment(edr$Comments)
edr_hn <- data.frame(EpiSurvNumber = edr$EpiSurvNumber, hosp_num)
#nomatch %>% left_join(edr_hn, by="EpiSurvNumber") %>% select(EpiSurvNumber, Hospital.Numbers.x, hosp_num, Hospital.Numbers.y, other.hos.nos.x, other.hos.nos.y)
#nomatch <- joint %>% filter(Hospital.Numbers.x != Hospital.Numbers.y)
#nomatch %>% select(EpiSurvNumber, Hospital.Numbers.x, Hospital.Numbers.y)
#master %>% filter(Hospital.Numbers %in% nomatch$Hospital.Numbers.y)
# Now, do the merging
# for now we assume the episurv info is GOLD STANDARD and copy across all of that (even if it overrides what
# is already in place
master_new <- master
master_new <- merge_db(master_new, edr, "EpiSurvNumber")
master_new <- merge_db(master_new, massey, "EpiSurvNumber", add_new=FALSE)
master_new <- merge_db(master_new, massey, "Hospital.Numbers", add_new=FALSE)
# write the new master sheet out
write.csv(master_new, file.path(episurv_path, "master_new.csv"), row.names=F, na="")
|
## reading data
household <- read.table("/home/eman/Coursera/Exploratory Data Analysis/Project # 1/household_power_consumption.txt", sep=";")
head(household)
dim(household) # 2075260 9
names(household)
household$V1
names(household) <- c("date", "time", "global_active_power", "global_reactive_power", "voltage", "global_intensity", "sub_metering_1", "sub_metering_2", "sub_metering_3")
household$date[1:10]
household$time[1:10]
## check missing values
sum(household == '?') # number of missing value = 155874
## subset the date points
household$DateTime<-paste(household$date, household$time)
head(household$DateTime)
household$DateTime<-strptime(household$DateTime, "%d/%m/%Y %H:%M:%S")
start<-which(household$DateTime==strptime("2007-02-01", "%Y-%m-%d"))
end<-which(household$DateTime==strptime("2007-02-02 23:59:00", "%Y-%m-%d %H:%M:%S"))
subset_data <- household[start:end,]
head(subset_data)
dim(subset_data)
## -------------------------------------------------------
## plot#4
par(mfrow = c(2, 2), mar = c(4, 4, 4, 4))
plot(subset_data$DateTime, as.numeric(as.character(subset_data$global_active_power)), xlab = '', ylab = 'Global Active Power (in kilowatt)', type = 'l')
plot(subset_data$DateTime, as.numeric(as.character(subset_data$voltage)), xlab = "Datetime", ylab = 'Voltage', type = 'l')
plot(subset_data$DateTime, as.numeric(as.character(subset_data$sub_metering_1)), xlab = '', ylab = 'Energy sub meeting', type = 'l', col = 'black')
par(new=TRUE)
lines(subset_data$DateTime, as.numeric(as.character(subset_data$sub_metering_2)), xlab = '', ylab = 'Energy sub meeting', type = 'l', col = 'red')
par(new=TRUE)
lines(subset_data$DateTime, as.numeric(as.character(subset_data$sub_metering_3)), xlab = '', ylab = 'Energy sub meeting', type = 'l', col = 'blue')
legend('topright', legend=c("sub_metering_1", "sub_metering_2", "sub_metering_3"),
col=c("black", "red", "blue"), lty = c(1,1,1))
plot(subset_data$DateTime, as.numeric(as.character(subset_data$global_reactive_power)), xlab = "", ylab = 'global_reactive_power', type = 'l')
## plotting ..
dev.copy(png, file = "/home/eman/Coursera/Exploratory Data Analysis/Project # 1/plot4.png", bg = "black")
dev.off()
|
/Exploratory_Data_Analysis/Project-1/plot4.R
|
no_license
|
EAboelhamd/datasciencecoursera
|
R
| false | false | 2,211 |
r
|
## reading data
household <- read.table("/home/eman/Coursera/Exploratory Data Analysis/Project # 1/household_power_consumption.txt", sep=";")
head(household)
dim(household) # 2075260 9
names(household)
household$V1
names(household) <- c("date", "time", "global_active_power", "global_reactive_power", "voltage", "global_intensity", "sub_metering_1", "sub_metering_2", "sub_metering_3")
household$date[1:10]
household$time[1:10]
## check missing values
sum(household == '?') # number of missing value = 155874
## subset the date points
household$DateTime<-paste(household$date, household$time)
head(household$DateTime)
household$DateTime<-strptime(household$DateTime, "%d/%m/%Y %H:%M:%S")
start<-which(household$DateTime==strptime("2007-02-01", "%Y-%m-%d"))
end<-which(household$DateTime==strptime("2007-02-02 23:59:00", "%Y-%m-%d %H:%M:%S"))
subset_data <- household[start:end,]
head(subset_data)
dim(subset_data)
## -------------------------------------------------------
## plot#4
par(mfrow = c(2, 2), mar = c(4, 4, 4, 4))
plot(subset_data$DateTime, as.numeric(as.character(subset_data$global_active_power)), xlab = '', ylab = 'Global Active Power (in kilowatt)', type = 'l')
plot(subset_data$DateTime, as.numeric(as.character(subset_data$voltage)), xlab = "Datetime", ylab = 'Voltage', type = 'l')
plot(subset_data$DateTime, as.numeric(as.character(subset_data$sub_metering_1)), xlab = '', ylab = 'Energy sub meeting', type = 'l', col = 'black')
par(new=TRUE)
lines(subset_data$DateTime, as.numeric(as.character(subset_data$sub_metering_2)), xlab = '', ylab = 'Energy sub meeting', type = 'l', col = 'red')
par(new=TRUE)
lines(subset_data$DateTime, as.numeric(as.character(subset_data$sub_metering_3)), xlab = '', ylab = 'Energy sub meeting', type = 'l', col = 'blue')
legend('topright', legend=c("sub_metering_1", "sub_metering_2", "sub_metering_3"),
col=c("black", "red", "blue"), lty = c(1,1,1))
plot(subset_data$DateTime, as.numeric(as.character(subset_data$global_reactive_power)), xlab = "", ylab = 'global_reactive_power', type = 'l')
## plotting ..
dev.copy(png, file = "/home/eman/Coursera/Exploratory Data Analysis/Project # 1/plot4.png", bg = "black")
dev.off()
|
# LIAM SPURR (based off of MatrixEQTL sample commands)
# HORVATH LAB
# 24MAY18
# RUN_MATRIX_EQTL.R
# install missing required packages and load packages
load_package <- function(x) {
if (!require(x, character.only = TRUE)) {
install.packages(x, dep = TRUE)
if(!require(x, character.only = TRUE)) stop(paste0("Package: ", x, " not found"))
}
}
load_package("MatrixEQTL")
# get arguments from the command line
args = commandArgs(trailingOnly=TRUE)
# Linear model to use, modelANOVA, modelLINEAR, or modelLINEAR_CROSS
useModel = modelLINEAR;
# Genotype and gene location file names
SNP_file_name = args[1]
snps_location_file_name = args[2]
# Gene expression file name
expression_file_name = args[3]
gene_location_file_name = args[4]
# Covariates file name
# Covariates can be added in by initiating the variable to args[n], incrementing each subsequent args[m] = args[m + 1]
covariates_file_name = character()
# Output file name
output_file_name_cis = args[5]
output_file_name_tra = args[6]
# Only associations significant at this level will be saved
pvOutputThreshold_cis = 1e-5;
pvOutputThreshold_tra = 1e-5;
# Error covariance matrix
# Set to numeric() for identity.
errorCovariance = numeric();
# Distance for local gene-SNP pairs
cisDist = 1e6;
## Load genotype data
snps = SlicedData$new();
snps$fileDelimiter = "\t"; # the TAB character
snps$fileOmitCharacters = "NA"; # denote missing values;
snps$fileSkipRows = 1; # one row of column labels
snps$fileSkipColumns = 1; # one column of row labels
snps$fileSliceSize = 2000; # read file in slices of 2,000 rows
snps$LoadFile(SNP_file_name);
## Load gene expression data
gene = SlicedData$new();
gene$fileDelimiter = "\t"; # the TAB character
gene$fileOmitCharacters = "NA"; # denote missing values;
gene$fileSkipRows = 1; # one row of column labels
gene$fileSkipColumns = 1; # one column of row labels
gene$fileSliceSize = 2000; # read file in slices of 2,000 rows
gene$LoadFile(expression_file_name);
## Load covariates
cvrt = SlicedData$new();
cvrt$fileDelimiter = "\t"; # the TAB character
cvrt$fileOmitCharacters = "NA"; # denote missing values;
cvrt$fileSkipRows = 1; # one row of column labels
cvrt$fileSkipColumns = 1; # one column of row labels
if(length(covariates_file_name) > 0) {
cvrt$LoadFile(covariates_file_name);
}
## Run the analysis
snpspos = read.table(snps_location_file_name, header = TRUE, stringsAsFactors = FALSE)
genepos = read.table(gene_location_file_name, header = TRUE, stringsAsFactors = FALSE)
me = Matrix_eQTL_main(
snps = snps,
gene = gene,
cvrt = cvrt,
output_file_name = output_file_name_tra,
pvOutputThreshold = pvOutputThreshold_tra,
useModel = useModel,
errorCovariance = errorCovariance,
verbose = TRUE,
output_file_name.cis = output_file_name_cis,
pvOutputThreshold.cis = pvOutputThreshold_cis,
snpspos = snpspos,
genepos = genepos,
cisDist = cisDist,
pvalue.hist = "qqplot",
min.pv.by.genesnp = FALSE,
noFDRsaveMemory = FALSE);
# Produce and save the qq plot of the p-values
tiff(filename = args[7])
plot(me)
dev.off()
cat('Analysis done in: ', me$time.in.sec, ' seconds', '\n')
|
/run_matrix_ReQTL.R
|
no_license
|
jindouzi94/ReQTL
|
R
| false | false | 3,219 |
r
|
# LIAM SPURR (based off of MatrixEQTL sample commands)
# HORVATH LAB
# 24MAY18
# RUN_MATRIX_EQTL.R
# install missing required packages and load packages
load_package <- function(x) {
if (!require(x, character.only = TRUE)) {
install.packages(x, dep = TRUE)
if(!require(x, character.only = TRUE)) stop(paste0("Package: ", x, " not found"))
}
}
load_package("MatrixEQTL")
# get arguments from the command line
args = commandArgs(trailingOnly=TRUE)
# Linear model to use, modelANOVA, modelLINEAR, or modelLINEAR_CROSS
useModel = modelLINEAR;
# Genotype and gene location file names
SNP_file_name = args[1]
snps_location_file_name = args[2]
# Gene expression file name
expression_file_name = args[3]
gene_location_file_name = args[4]
# Covariates file name
# Covariates can be added in by initiating the variable to args[n], incrementing each subsequent args[m] = args[m + 1]
covariates_file_name = character()
# Output file name
output_file_name_cis = args[5]
output_file_name_tra = args[6]
# Only associations significant at this level will be saved
pvOutputThreshold_cis = 1e-5;
pvOutputThreshold_tra = 1e-5;
# Error covariance matrix
# Set to numeric() for identity.
errorCovariance = numeric();
# Distance for local gene-SNP pairs
cisDist = 1e6;
## Load genotype data
snps = SlicedData$new();
snps$fileDelimiter = "\t"; # the TAB character
snps$fileOmitCharacters = "NA"; # denote missing values;
snps$fileSkipRows = 1; # one row of column labels
snps$fileSkipColumns = 1; # one column of row labels
snps$fileSliceSize = 2000; # read file in slices of 2,000 rows
snps$LoadFile(SNP_file_name);
## Load gene expression data
gene = SlicedData$new();
gene$fileDelimiter = "\t"; # the TAB character
gene$fileOmitCharacters = "NA"; # denote missing values;
gene$fileSkipRows = 1; # one row of column labels
gene$fileSkipColumns = 1; # one column of row labels
gene$fileSliceSize = 2000; # read file in slices of 2,000 rows
gene$LoadFile(expression_file_name);
## Load covariates
cvrt = SlicedData$new();
cvrt$fileDelimiter = "\t"; # the TAB character
cvrt$fileOmitCharacters = "NA"; # denote missing values;
cvrt$fileSkipRows = 1; # one row of column labels
cvrt$fileSkipColumns = 1; # one column of row labels
if(length(covariates_file_name) > 0) {
cvrt$LoadFile(covariates_file_name);
}
## Run the analysis
snpspos = read.table(snps_location_file_name, header = TRUE, stringsAsFactors = FALSE)
genepos = read.table(gene_location_file_name, header = TRUE, stringsAsFactors = FALSE)
me = Matrix_eQTL_main(
snps = snps,
gene = gene,
cvrt = cvrt,
output_file_name = output_file_name_tra,
pvOutputThreshold = pvOutputThreshold_tra,
useModel = useModel,
errorCovariance = errorCovariance,
verbose = TRUE,
output_file_name.cis = output_file_name_cis,
pvOutputThreshold.cis = pvOutputThreshold_cis,
snpspos = snpspos,
genepos = genepos,
cisDist = cisDist,
pvalue.hist = "qqplot",
min.pv.by.genesnp = FALSE,
noFDRsaveMemory = FALSE);
# Produce and save the qq plot of the p-values
tiff(filename = args[7])
plot(me)
dev.off()
cat('Analysis done in: ', me$time.in.sec, ' seconds', '\n')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.