content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
x <- seq(-2,3,by=0.1)
y <- 3*x^4-4*x^3-12*x^2
plot(x,y,type="l",col="red")
title(main="y in red, y' in green, y'' in blue")
F0 <- expression(3*x^4-4*x^3-12*x^2)
F1 <- D(F0,'x')
F2 <- D(F1,'x')
points(x,eval(F1),type="l",col="green")
points(x,eval(F2),type="l",col="blue")
#derivative 0\1\2
f0 <- function(x)
{
f <- 3*x^4-4*x^3-12*x^2
return(f)
}
f1 <- function(x)
{
f <- D(F0,'x')
x <- x
return(eval(f))
}
f2 <- function(x)
{
f <- D(D(F0,'x'),'x')
x <- x
return(eval(f))
}
#parametre
a <- -10
b <- 10
e <- 0.01
#original newton
nt <- function(t0)
{
t <- t0 - f1(t0)/f2(t0)
while(abs(t-t0)>e)
{
t0 <- t
t <- t0 - f1(t0)/f2(t0)
}
return(t0)
}
#random newton
newton1 <- function()
{
ymin <- 0
xmin <- 0
for(i in 1:10)
{
x <- nt(runif(1,a,b))
y <- f0(x)
if(y < ymin)
{
ymin <- y
xmin <- x
}
}
z <- c(xmin,ymin)
sprintf("%.3f",z)
}
newton1()
|
/optimizing/line search/newton.R
|
no_license
|
kurobaneHITOMI/exercise-in-university
|
R
| false | false | 995 |
r
|
x <- seq(-2,3,by=0.1)
y <- 3*x^4-4*x^3-12*x^2
plot(x,y,type="l",col="red")
title(main="y in red, y' in green, y'' in blue")
F0 <- expression(3*x^4-4*x^3-12*x^2)
F1 <- D(F0,'x')
F2 <- D(F1,'x')
points(x,eval(F1),type="l",col="green")
points(x,eval(F2),type="l",col="blue")
#derivative 0\1\2
f0 <- function(x)
{
f <- 3*x^4-4*x^3-12*x^2
return(f)
}
f1 <- function(x)
{
f <- D(F0,'x')
x <- x
return(eval(f))
}
f2 <- function(x)
{
f <- D(D(F0,'x'),'x')
x <- x
return(eval(f))
}
#parametre
a <- -10
b <- 10
e <- 0.01
#original newton
nt <- function(t0)
{
t <- t0 - f1(t0)/f2(t0)
while(abs(t-t0)>e)
{
t0 <- t
t <- t0 - f1(t0)/f2(t0)
}
return(t0)
}
#random newton
newton1 <- function()
{
ymin <- 0
xmin <- 0
for(i in 1:10)
{
x <- nt(runif(1,a,b))
y <- f0(x)
if(y < ymin)
{
ymin <- y
xmin <- x
}
}
z <- c(xmin,ymin)
sprintf("%.3f",z)
}
newton1()
|
# Data Generation
## Libraries:
library(mvtnorm) # generates multivariate Gaussian sampels and calculate the densities
library(ggplot2)
library(reshape2)
## Initialization
set.seed(12345)
N <- 500
c0 <- '0'; c1 <- '1'; c2 <- '2' # class labels
mu0 <- c(1.0, 4.0); p0 <- 0.30
mu1 <- c(4.5, 0.5); p1 <- 0.50
mu2 <- c(3.0, -3.0); p2 <- 1 - p0 - p1
sigma <- matrix(c(1, 0, 0, 1), nrow = 2, ncol = 2, byrow = TRUE) # shared covariance matrix
sigma0 <- sigma; sigma1 <- sigma; sigma2 <- sigma
data <- data.frame(x1 = double(), x2 = double(), label = double()) # empty data.frame
## Generate class labels
data[1:N, 'label'] <- sample(c(c0, c1, c2), N, replace = TRUE, prob = c(p0, p1, p2))
## calculate the size of each class
N0 <- sum(data[1:N, 'label'] == c0);
N1 <- sum(data[1:N, 'label'] == c1); N2 <- N - N0 - N1
## Sample from the Gaussian distribution accroding to the class labels and statitics.
data[data[1:N, 'label'] == c0, c('x1', 'x2')] <- rmvnorm(n = N0, mu0, sigma0)
data[data[1:N, 'label'] == c1, c('x1', 'x2')] <- rmvnorm(n = N1, mu1, sigma1)
data[data[1:N, 'label'] == c2, c('x1', 'x2')] <- rmvnorm(n = N2, mu2, sigma2)
data[data[1:N, 'label'] == c2, 'label'] <- c0
## Take a look at the data set
ggplot(data = data, aes(x = x1, y = x2, color = label, label = ifelse(label == c0, '0', '1'))) +
geom_text(size = 5, alpha = 0.5) +
ggtitle('Data set') +
theme_minimal()
N <- nrow(data)
train.len <- round(N / 2)
train.index <- sample(N, train.len, replace = FALSE)
train.data <- data[train.index, c('x1', 'x2')]
test.data <- data[-train.index, c('x1', 'x2')]
train.label <- data[train.index, 'label']
test.label <- data[-train.index, 'label']
# Some conversions:
## rename just for convenience
N <- train.len
## convert data and labels to matrices
X1 <- unname(data.matrix(train.data))
T1 <- as.numeric(train.label)
X2 <- unname(data.matrix(test.data))
T2 <- as.numeric(test.label)
nn <- NeuralNetwork()$fit(X1, T1, k = 3, 20, 0.0001, validation_data = list(X2, T2), learning_rate = 0.1)
nn$history$epoch <- 1:20
nn$history %>% melt(id.vars = "epoch") %>% ggplot + geom_line(aes(x = epoch, y = value, color = variable))
|
/neural_network_demo.R
|
permissive
|
ControlNet/ml-algorithms
|
R
| false | false | 2,146 |
r
|
# Data Generation
## Libraries:
library(mvtnorm) # generates multivariate Gaussian sampels and calculate the densities
library(ggplot2)
library(reshape2)
## Initialization
set.seed(12345)
N <- 500
c0 <- '0'; c1 <- '1'; c2 <- '2' # class labels
mu0 <- c(1.0, 4.0); p0 <- 0.30
mu1 <- c(4.5, 0.5); p1 <- 0.50
mu2 <- c(3.0, -3.0); p2 <- 1 - p0 - p1
sigma <- matrix(c(1, 0, 0, 1), nrow = 2, ncol = 2, byrow = TRUE) # shared covariance matrix
sigma0 <- sigma; sigma1 <- sigma; sigma2 <- sigma
data <- data.frame(x1 = double(), x2 = double(), label = double()) # empty data.frame
## Generate class labels
data[1:N, 'label'] <- sample(c(c0, c1, c2), N, replace = TRUE, prob = c(p0, p1, p2))
## calculate the size of each class
N0 <- sum(data[1:N, 'label'] == c0);
N1 <- sum(data[1:N, 'label'] == c1); N2 <- N - N0 - N1
## Sample from the Gaussian distribution accroding to the class labels and statitics.
data[data[1:N, 'label'] == c0, c('x1', 'x2')] <- rmvnorm(n = N0, mu0, sigma0)
data[data[1:N, 'label'] == c1, c('x1', 'x2')] <- rmvnorm(n = N1, mu1, sigma1)
data[data[1:N, 'label'] == c2, c('x1', 'x2')] <- rmvnorm(n = N2, mu2, sigma2)
data[data[1:N, 'label'] == c2, 'label'] <- c0
## Take a look at the data set
ggplot(data = data, aes(x = x1, y = x2, color = label, label = ifelse(label == c0, '0', '1'))) +
geom_text(size = 5, alpha = 0.5) +
ggtitle('Data set') +
theme_minimal()
N <- nrow(data)
train.len <- round(N / 2)
train.index <- sample(N, train.len, replace = FALSE)
train.data <- data[train.index, c('x1', 'x2')]
test.data <- data[-train.index, c('x1', 'x2')]
train.label <- data[train.index, 'label']
test.label <- data[-train.index, 'label']
# Some conversions:
## rename just for convenience
N <- train.len
## convert data and labels to matrices
X1 <- unname(data.matrix(train.data))
T1 <- as.numeric(train.label)
X2 <- unname(data.matrix(test.data))
T2 <- as.numeric(test.label)
nn <- NeuralNetwork()$fit(X1, T1, k = 3, 20, 0.0001, validation_data = list(X2, T2), learning_rate = 0.1)
nn$history$epoch <- 1:20
nn$history %>% melt(id.vars = "epoch") %>% ggplot + geom_line(aes(x = epoch, y = value, color = variable))
|
pollutantmean <- function(directory, pollutant, id = 1:332) {
# empty vector for values
values_all <- c()
for (index in id) {
# convert id to string
index <- paste(paste(rep(0, 3-nchar(index)), collapse=""), index, sep="")
# read tada from file
data_all <- read.csv(paste(directory, "/", index, ".csv", sep = ""))
# save the useful values
values_all <- c(values_all, data_all[pollutant][!is.na(data_all[pollutant])])
}
# compute mean
mean(values_all)
}
# pollutantmean("specdata", "sulfate", 1:10)
# pollutantmean("specdata", "nitrate", 70:72)
# pollutantmean("specdata", "nitrate", 23)
|
/Data-Science-Specialization-Coursera/Course-2-R-programming/Programming-Assignment-1/pollutantmean.R
|
no_license
|
km1414/Courses
|
R
| false | false | 722 |
r
|
pollutantmean <- function(directory, pollutant, id = 1:332) {
# empty vector for values
values_all <- c()
for (index in id) {
# convert id to string
index <- paste(paste(rep(0, 3-nchar(index)), collapse=""), index, sep="")
# read tada from file
data_all <- read.csv(paste(directory, "/", index, ".csv", sep = ""))
# save the useful values
values_all <- c(values_all, data_all[pollutant][!is.na(data_all[pollutant])])
}
# compute mean
mean(values_all)
}
# pollutantmean("specdata", "sulfate", 1:10)
# pollutantmean("specdata", "nitrate", 70:72)
# pollutantmean("specdata", "nitrate", 23)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotTest.R
\name{plotTest}
\alias{plotTest}
\title{Calculate if single, observed metrics deviate beyond expectations}
\usage{
plotTest(results.table, concat.by)
}
\arguments{
\item{results.table}{Data frame of observed metrics with expected mean, SD and CI bound
in. See example}
\item{concat.by}{Whether to concatenate results by richness, plot or both. If
richness, observed scores are compared to all randomized scores where the plot had
the corresponding richness. If plot, observed scores (e.g. those from plot 1)
are compared to all randomized plot 1 scores. If both, both are run and each is
saved as a separate data frame in a single list.}
}
\value{
A data frame of 0s, 1s, and 2s.
}
\description{
Given a table of results, where means, SDs, and CIs are bound to the observed scores at
the corresponding richness or plot, this function calculates whether each observed
score is significantly less or ore than expected at that plot or richness.
}
\details{
Given a table of results, where means, SDs, and CIs are bound to the observed
scores at the corresponding richness or plot, this function returns 0, 1, or 2,
corresponding to not significant, significantly clustered, and significantly
overdispersed. Previously the metrics being passed to the function needed to be
explicitly specified, but the function now attempts to determine the names of the
metrics via the results.table input.
}
\examples{
#simulate tree with birth-death process
tree <- geiger::sim.bdtree(b=0.1, d=0, stop="taxa", n=50)
#simulate a log normal abundance distribution
sim.abundances <- round(rlnorm(5000, meanlog=2, sdlog=1)) + 1
#simulate a community of varying richness
cdm <- simulateComm(tree, richness.vector=10:25, abundances=sim.abundances)
#below not run for example timing issues on CRAN
#run the metrics and nulls combo function
rawResults <- metricsNnulls(tree=tree, picante.cdm=cdm, randomizations=2, cores="seq",
nulls=c("richness","frequency"), metrics=c("richness","NAW_MPD"))
#reduce the randomizations to a more manageable format
reduced <- reduceRandomizations(rawResults)
#calculate the observed metrics from the input CDM
observed <- observedMetrics(tree, cdm, metrics=c("richness","NAW_MPD"))
#summarize the means, SD and CI of the randomizations
summarized <- lapply(reduced, summaries, concat.by="richness")
#merge the observations and the summarized randomizations to facilitate significance
#testing
merged <- lapply(summarized, merge, observed)
#calculate the standardized scores of each observed metric as compared to the richness
#null model randomization.
plotTest(merged$richness, "richness")
#do the same as above but across all null models
#temp <- lapply(1:length(merged), function(x) plotTest(merged[[x]], "richness"))
}
\references{
Miller, E. T., D. R. Farine, and C. H. Trisos. 2016. Phylogenetic community
structure metrics and null models: a review with new methods and software.
Ecography DOI: 10.1111/ecog.02070
}
|
/man/plotTest.Rd
|
no_license
|
eliotmiller/metricTester
|
R
| false | true | 3,032 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotTest.R
\name{plotTest}
\alias{plotTest}
\title{Calculate if single, observed metrics deviate beyond expectations}
\usage{
plotTest(results.table, concat.by)
}
\arguments{
\item{results.table}{Data frame of observed metrics with expected mean, SD and CI bound
in. See example}
\item{concat.by}{Whether to concatenate results by richness, plot or both. If
richness, observed scores are compared to all randomized scores where the plot had
the corresponding richness. If plot, observed scores (e.g. those from plot 1)
are compared to all randomized plot 1 scores. If both, both are run and each is
saved as a separate data frame in a single list.}
}
\value{
A data frame of 0s, 1s, and 2s.
}
\description{
Given a table of results, where means, SDs, and CIs are bound to the observed scores at
the corresponding richness or plot, this function calculates whether each observed
score is significantly less or ore than expected at that plot or richness.
}
\details{
Given a table of results, where means, SDs, and CIs are bound to the observed
scores at the corresponding richness or plot, this function returns 0, 1, or 2,
corresponding to not significant, significantly clustered, and significantly
overdispersed. Previously the metrics being passed to the function needed to be
explicitly specified, but the function now attempts to determine the names of the
metrics via the results.table input.
}
\examples{
#simulate tree with birth-death process
tree <- geiger::sim.bdtree(b=0.1, d=0, stop="taxa", n=50)
#simulate a log normal abundance distribution
sim.abundances <- round(rlnorm(5000, meanlog=2, sdlog=1)) + 1
#simulate a community of varying richness
cdm <- simulateComm(tree, richness.vector=10:25, abundances=sim.abundances)
#below not run for example timing issues on CRAN
#run the metrics and nulls combo function
rawResults <- metricsNnulls(tree=tree, picante.cdm=cdm, randomizations=2, cores="seq",
nulls=c("richness","frequency"), metrics=c("richness","NAW_MPD"))
#reduce the randomizations to a more manageable format
reduced <- reduceRandomizations(rawResults)
#calculate the observed metrics from the input CDM
observed <- observedMetrics(tree, cdm, metrics=c("richness","NAW_MPD"))
#summarize the means, SD and CI of the randomizations
summarized <- lapply(reduced, summaries, concat.by="richness")
#merge the observations and the summarized randomizations to facilitate significance
#testing
merged <- lapply(summarized, merge, observed)
#calculate the standardized scores of each observed metric as compared to the richness
#null model randomization.
plotTest(merged$richness, "richness")
#do the same as above but across all null models
#temp <- lapply(1:length(merged), function(x) plotTest(merged[[x]], "richness"))
}
\references{
Miller, E. T., D. R. Farine, and C. H. Trisos. 2016. Phylogenetic community
structure metrics and null models: a review with new methods and software.
Ecography DOI: 10.1111/ecog.02070
}
|
#'
#' # Analysis of smoking and life expectancy
#'
#' This file is in the 'base' directory for analysis
#'
library(yscs)
data(smoking)
?data
head(smoking)
dim(smoking)
xqplot(smoking)
xqplot(smoking, ptype = 'n')
fit <- lm( LE ~ smoking_female + smoking_male, smoking)
summary(fit)
smoking$both <- with(smoking, smoking_female + smoking_male)
fit2 <- lm( LE ~ both + smoking_female + smoking_male, smoking)
summary(fit2)
|
/smoking/smoking_analysis.R
|
no_license
|
gmonette/math4939proj
|
R
| false | false | 426 |
r
|
#'
#' # Analysis of smoking and life expectancy
#'
#' This file is in the 'base' directory for analysis
#'
library(yscs)
data(smoking)
?data
head(smoking)
dim(smoking)
xqplot(smoking)
xqplot(smoking, ptype = 'n')
fit <- lm( LE ~ smoking_female + smoking_male, smoking)
summary(fit)
smoking$both <- with(smoking, smoking_female + smoking_male)
fit2 <- lm( LE ~ both + smoking_female + smoking_male, smoking)
summary(fit2)
|
#' @include trend-modules.R
#' @describeIn trend-modules Fit a logistic trend using
#' \code{\link[mbte]{tr_logistic}}. Equation used for fitting:
#' \code{value ~ A / (1 + exp(B * (C - time))) + D}. Fit coefficients:
#' \itemize{
#' \item prefix_A
#' \item prefix_rel_A (relative A - normed to signal maximum)
#' \item prefix_B
#' \item prefix_rel_B
#' \item prefix_C
#' \item prefix_rel_C
#' \item prefix_D
#' \item prefix_rel_D
#' }
#'
#' @export
tm_logistic <- function(coef_store = cl_store()) {
structure(
list(
fit_quo = tm_logistic_gen_quo,
store = coef_store
),
# default trend module
class = c("dtm", "list")
)
}
# modify fitting quosure for linear trend (store coefficients)
#' @importFrom mbte tr_logistic
#' @importFrom rlang ":=" quo
tm_logistic_gen_quo <- function(id, coef_store) {
# add symbols for coefficient store
sym_A <- gen_prefixed_sym(id, "A")
sym_rel_A <- gen_prefixed_sym(id, "rel_A")
sym_B <- gen_prefixed_sym(id, "B")
sym_rel_B <- gen_prefixed_sym(id, "rel_B")
sym_C <- gen_prefixed_sym(id, "C")
sym_rel_C <- gen_prefixed_sym(id, "rel_C")
sym_D <- gen_prefixed_sym(id, "D")
sym_rel_D <- gen_prefixed_sym(id, "rel_D")
quo({
# initialize fit parameters to `NA`
A <- B <- C <- D <- NA
safe_fit <- function() {
signal_max <- max(.signal[[.value_sym]])
# ensure that the fit-coefficients are added to `coef_store` in any case
# (even if errors are encountered)
on.exit({
coef_store$add_row(
row_nr = .row_nr,
!!sym_A := A,
!!sym_rel_A := A / signal_max,
!!sym_B := B,
!!sym_rel_B := B / signal_max,
!!sym_C := C,
!!sym_rel_C := C / signal_max,
!!sym_D := D,
!!sym_rel_D := D / signal_max
)
})
fit <- !!tr_logistic()
# Override defaults with actual fit coefficients (if fit is successful)
if (inherits(fit, "nls")) {
coefs <- coefficients(fit)
A <- coefs["A"]
B <- coefs["B"]
C <- coefs["C"]
D <- coefs["D"]
}
# return fit
fit
}
safe_fit()
})
}
# Fix R CMD CHECK issues: the variables below are provided by mbte_fit() using
# maskin.
globalVariables(c(".signal", ".time_sym", ".value_sym", ".row_nr"))
|
/R/logistic.R
|
no_license
|
mkerschbaumer/mbte.vis
|
R
| false | false | 2,424 |
r
|
#' @include trend-modules.R
#' @describeIn trend-modules Fit a logistic trend using
#' \code{\link[mbte]{tr_logistic}}. Equation used for fitting:
#' \code{value ~ A / (1 + exp(B * (C - time))) + D}. Fit coefficients:
#' \itemize{
#' \item prefix_A
#' \item prefix_rel_A (relative A - normed to signal maximum)
#' \item prefix_B
#' \item prefix_rel_B
#' \item prefix_C
#' \item prefix_rel_C
#' \item prefix_D
#' \item prefix_rel_D
#' }
#'
#' @export
tm_logistic <- function(coef_store = cl_store()) {
structure(
list(
fit_quo = tm_logistic_gen_quo,
store = coef_store
),
# default trend module
class = c("dtm", "list")
)
}
# modify fitting quosure for linear trend (store coefficients)
#' @importFrom mbte tr_logistic
#' @importFrom rlang ":=" quo
tm_logistic_gen_quo <- function(id, coef_store) {
# add symbols for coefficient store
sym_A <- gen_prefixed_sym(id, "A")
sym_rel_A <- gen_prefixed_sym(id, "rel_A")
sym_B <- gen_prefixed_sym(id, "B")
sym_rel_B <- gen_prefixed_sym(id, "rel_B")
sym_C <- gen_prefixed_sym(id, "C")
sym_rel_C <- gen_prefixed_sym(id, "rel_C")
sym_D <- gen_prefixed_sym(id, "D")
sym_rel_D <- gen_prefixed_sym(id, "rel_D")
quo({
# initialize fit parameters to `NA`
A <- B <- C <- D <- NA
safe_fit <- function() {
signal_max <- max(.signal[[.value_sym]])
# ensure that the fit-coefficients are added to `coef_store` in any case
# (even if errors are encountered)
on.exit({
coef_store$add_row(
row_nr = .row_nr,
!!sym_A := A,
!!sym_rel_A := A / signal_max,
!!sym_B := B,
!!sym_rel_B := B / signal_max,
!!sym_C := C,
!!sym_rel_C := C / signal_max,
!!sym_D := D,
!!sym_rel_D := D / signal_max
)
})
fit <- !!tr_logistic()
# Override defaults with actual fit coefficients (if fit is successful)
if (inherits(fit, "nls")) {
coefs <- coefficients(fit)
A <- coefs["A"]
B <- coefs["B"]
C <- coefs["C"]
D <- coefs["D"]
}
# return fit
fit
}
safe_fit()
})
}
# Fix R CMD CHECK issues: the variables below are provided by mbte_fit() using
# maskin.
globalVariables(c(".signal", ".time_sym", ".value_sym", ".row_nr"))
|
rm(list=ls())
.libPaths("/global/home/users/cbrook/R/x86_64-pc-linux-gnu-library/3.6")
#setwd("/Users/caraebrook/Documents/R/R_repositories/Berkeley-Reopening/Dec-2020/all-runs/Re-Run-12-24/FigS1/")
#no group, no test, no trace
library(data.table)
library(plyr)
library(dplyr)
library(EpiEstim)
library(deSolve)
library(matrixStats)
library(fitdistrplus)
#load parameters including pre-run titer trajectories for each individual
load("titer.dat.20K.Rdata")
#load("titer.dat.2K.Rdata")
load("virus.par.12.15.Rdata")
load("pop.par.base.Rdata")
get.real.cases <- function(pop.dat, event.dat, titer.dat1, within.host.theta, group.limit){
#if no cases caused, then ignore
if((pop.dat$original_potential_cases_caused_UCB>0) & (pop.dat$num_infection_events>0)){
#then allocate all the cases to the events
#distribute cases at random amongst the events
event.names <- 1:as.numeric(pop.dat$num_infection_events)
actual.events <- sample(x=event.names, size=as.numeric(pop.dat$original_potential_cases_caused_UCB), replace = T)
event.data <- cbind.data.frame(actual.events, event.dat[actual.events])
names(event.data) <- c("event", "gentime")
#and add the titer at the time of the event
gen.tmp = as.list(event.data$gentime)
event.data$titer <- c(unlist(lapply(gen.tmp, grab.titer, dat.vir =titer.dat1)))
#now that you have titer, here calculate the probability of transmission, given a certain viral load,
#based off of the probabiliy model from the URT in Ke et al. 2020
# in Ke et al. 2020, theta is fixed at 0.05 (could be modulated and/or fit to data)
#draw Km from a normal disribution centered at the midpoint between the two values explored in Ke et al. 2020 (10^3 and 10^4)
event.data$Km <- rnorm(nrow(event.data),mean=5500, sd=1000)
event.data$prob_exposure = within.host.theta*(event.data$titer/(event.data$titer + event.data$Km))
event.data$prob_exposure[event.data$prob_exposure<0] <- 0
#probability is small: ~5% for a typical contact if theta = 0.05 as in Ke.
#for theta = .7 here, up to 50% depending on theta
#does the infection happen? make it a probabilistic outcome of the titer
#then, you role a dice to see if this exposure causes an infection
tmp.prob <- as.list(event.data$prob_exposure)
event.data$InfectionYN = c(unlist(lapply(tmp.prob, test.titer)))
#then total the events that actually happen to incorporate into the original data
pop.dat$post_titer_potential_cases_caused_UCB <- sum(event.data$InfectionYN)
#and then, if there is a group size limit, impose it here
if((group.limit>0) & (pop.dat$obs_dist_limits==TRUE)){
#gives you the number of successful transmissions per event
event.sum <- ddply(event.data, .(event),summarize, N=sum(InfectionYN))
event.sum$over_lim = event.sum$N-group.limit
event.sum$over_lim[event.sum$over_lim<0] <- 0
#truncate # of events for the IDs listed above to the group limit.
event.data.list = dlply(subset(event.data, InfectionYN==1), .(event))
new.event.list <- lapply(event.data.list, impose.group, group.limit=group.limit)
#new.event.data <- do.call("rbind", new.event.list)
new.event.data <-data.table::rbindlist(new.event.list)
pop.dat$potential_cases_caused = sum(new.event.data$InfectionYN)
#in this case, return the generation time table after the group intervention
if(pop.dat$potential_cases_caused >0){
dat.gen.tab <- cbind.data.frame(rep(unique(pop.dat$employ_ids), nrow(new.event.data)), new.event.data$gentime)
names(dat.gen.tab) <- c("employ_ids", "generation_time")
}else{
dat.gen.tab <- cbind.data.frame(unique(pop.dat$employ_ids), NA)
names(dat.gen.tab) <- c("employ_ids", "generation_time")
}
}else{
pop.dat$potential_cases_caused <- pop.dat$post_titer_potential_cases_caused_UCB
if(pop.dat$potential_cases_caused >0){
event.data.out = subset(event.data, InfectionYN==1)
dat.gen.tab <- cbind.data.frame(rep(unique(pop.dat$employ_ids), nrow(event.data.out)), event.data.out$gentime)
names(dat.gen.tab) <- c("employ_ids", "generation_time")
}else{
dat.gen.tab <- cbind.data.frame(unique(pop.dat$employ_ids), NA)
names(dat.gen.tab) <- c("employ_ids", "generation_time")
}
}
}else{
#none take place
#return the original data with 0s
pop.dat$post_titer_potential_cases_caused_UCB <- 0
pop.dat$potential_cases_caused <- 0
#and return a table of generation times with nothing
dat.gen.tab <- cbind.data.frame(unique(pop.dat$employ_ids), NA)
names(dat.gen.tab) <- c("employ_ids", "generation_time")
}
return(list(pop.dat, dat.gen.tab))
}
test.titer <- function(prob1){
Y_N =sample(c(0,1), size=1, prob = c(1-prob1, prob1))
return(Y_N)
}
impose.group <- function(event.dat1, group.limit){
tot.transmissions = nrow(event.dat1)
if(tot.transmissions>group.limit){
choose.events <- sample(x=1:tot.transmissions, size=group.limit, replace = F)
event.dat2 = event.dat1[choose.events,]
return(event.dat2)
}else{
return(event.dat1)
}
}
get.event.time <- function(dat, genTime){
event.times = genTime(as.numeric(dat$num_infection_events))
return(event.times)
}
grab.titer <- function(dat1, dat.vir){
titer.out <- dat.vir$V[dat.vir$time>dat1][1]
return(titer.out)
}
normal_fn <- function(meanpar=NULL, sdpar=NULL){
out <- purrr::partial(rnorm,
mean = meanpar,
sd = sdpar)
return(out)
}
poisson_fn <- function(lambda=NULL){
out <- purrr::partial(rpois,
lambda = lambda)
return(out)
}
lognormal_fn <- function(meanlogpar=NULL, sdlogpar=NULL){
out <- purrr::partial(rlnorm,
meanlog = meanlogpar,
sdlog = sdlogpar)
return(out)
}
add.risk.cat <- function(dat, pop_dat){
dat = data.table(dat)
daily_new <- dat[, day := ceiling(time_isolation) #time_isolated
][, .(daily_isolations = .N), by = day
]
pop_dat$add <- 0
for (i in 1:length(daily_new$day)){
pop_dat$add[pop_dat$day==daily_new$day[i]] <- daily_new$daily_isolations[i]
}
out.vect <- as.data.frame(pop_dat$add)
return( out.vect)
}
add.risk.cat.exp <- function(dat, pop_dat, input_par){
dat = data.table(dat)
daily_new <- dat[, day := ceiling(exposure_time)
][, .(daily_exposures = .N), by = day
]
pop_dat$add <- 0
for (i in 1:length(daily_new$day)){
pop_dat$add[pop_dat$day==daily_new$day[i]] <- daily_new$daily_exposures[i]
}
out.vect <- as.data.frame(pop_dat$add)
#then add deaths based on each pop cat
pop.cat = unique(dat$employ_cat)
out.vect2 <- as.data.frame(as.numeric(input_par$par1[input_par$parameter=="CFR" & input_par$population==pop.cat])*out.vect)
# dat.out = cbind.data.frame(out.vect, out.vect2)
return(list(out.vect, out.vect2))
#return(dat.out)
}
cross.infect <- function(dat, all.sus, input.par){
pop.par = subset(input.par, population == unique(dat$infector_cat))
#first, elim any populations for which there are no longer remaining susceptibles
rem.cat = unique(all.sus$employ_cat)
all.cat = unique(pop.par$par2[pop.par$parameter=="meta-pop"])
missed.cat = setdiff(all.cat, rem.cat)
pop.par$sub = 0
for (i in 1: length(missed.cat)) {
pop.par$sub[pop.par$parameter=="meta-pop" & pop.par$par2==missed.cat[i]] <- 1
}
pop.par = subset(pop.par, sub==0)
#then allocate the population of the new cases based on the proportion within and without
tot.cases = nrow(dat)
#then need to reallocate probabilities comparatively without the remaining
possible.cat = unique(pop.par$par2[pop.par$parameter=="meta-pop"])
old.cat = as.numeric(unique(input.par$par2[input.par$parameter=="meta-pop"]))
old.prob = as.numeric(input.par$par1[input.par$parameter=="meta-pop"])[1:length(old.cat)]
if(length(possible.cat)<length(old.cat)){
if(length(possible.cat)==1){
dat$new_cat = possible.cat
}else{
#if you've run out of probabilities, just, rellocate proportionally
new.prob = rep((1/length(possible.cat)), length(possible.cat))
dat$new_cat = sample(x=possible.cat, size = tot.cases, replace = TRUE, prob = new.prob)
}
}else{
dat$new_cat = sample(x=old.cat, size = tot.cases, replace = TRUE, prob = old.prob)
}
return(dat)
}
assign.ID = function(sus.dat.sub, dat.new.sub){
#at the very end of the time series, you may run out of susceptibles in the right category, in which case, these just become lost infections
if(nrow(dat.new.sub)<=length(sus.dat.sub$employ_ids)){
dat.new.sub$new_infected = sample(sus.dat.sub$employ_ids, size=nrow(dat.new.sub), replace=FALSE)
}else{
new.count = length(sus.dat.sub$employ_ids)
new.missed = nrow(dat.new.sub) - new.count
row.tmp = seq(1, nrow(dat.new.sub),1)
row.take = sample(row.tmp, size = new.count, replace = FALSE)
dat.new.sub <- dat.new.sub[row.take,]
dat.new.sub$new_infected = sample(sus.dat.sub$employ_ids, size=nrow(dat.new.sub), replace=FALSE)
}
return(dat.new.sub)
}
assign.infections <- function(pop.mat, gen_list, timestep, input.par){
# assign new exposures (and times) based on 'actual cases caused' above
# and move those that have transmitted to isolated/recovered state
#(asymptomatics will be missed in iso time unless tested)
# timestep.prev = unique(pop.mat$timestep)
#first, pair each case with its generation times
new.mat <- dplyr::select(pop.mat, employ_ids, employ_cat, state, exposure_time, actual_cases_caused, time_isolation)
new.mat <- new.mat[!is.na(new.mat$actual_cases_caused) & new.mat$state==1,]
#only matters if it actually causes cases.
new.mat.zero = subset(new.mat, actual_cases_caused<1)
new.mat <- subset(new.mat, actual_cases_caused>0)
if(nrow(new.mat)>0){
new.mat.list <- dlply(new.mat, .(employ_ids))
#print("1")
new.mat.list <- lapply(new.mat.list, make.rows)
#the new new mat - no longer includes those which cased 0 actual cases
#should always have at least one row because of the if-statement above
#new.mat <- do.call("rbind", new.mat.list)
new.mat <- data.table::rbindlist(new.mat.list)
#now attach a generation time with each of these cases and a random sample from the susceptibles
new.mat$generation_time <- NA
index.ids = unique(new.mat$employ_ids)
for(i in 1:length(index.ids )){
tmp = nrow(new.mat[new.mat$employ_ids == index.ids[i],])
#print(index.ids[[i]])
new.mat$generation_time[new.mat$employ_ids == index.ids[i]] <- gen_list[[index.ids[i]]]$generation_time[1:tmp]
}
#now, attach a place to infect (susceptible)
#bias the sampling based on the proportion of infections within and without of your direct cohort
#first, pair the remaining susceptibles with their category
all.sus <- cbind.data.frame(pop.mat$employ_ids[pop.mat$state==0],pop.mat$employ_cat[pop.mat$state==0])
names(all.sus) = c("employ_ids", "employ_cat")
new.list = dlply(new.mat, .(employ_ids))
#cross infect by cat
#print("2")
new.list.out <- lapply(new.list, cross.infect, all.sus=all.sus, input.par=input.par)
new.mat = data.table::rbindlist(new.list.out)
#new.mat = do.call("rbind", new.list.out)
rownames(new.mat) <- c()
#then, assign names by category of new infections
id.cat = data.frame(sort(unique(new.mat$new_cat)))
all.sus = arrange(all.sus, employ_cat)
names(id.cat) <- "employ_cat"
tmp.sus = merge(x=all.sus, y=id.cat)
tmp.sus.split = dlply(tmp.sus, .(employ_cat))
new.mat.split <- dlply(new.mat, .(new_cat))
#print("3")
dat.new.split.out = mapply(FUN=assign.ID, sus.dat.sub= tmp.sus.split, dat.new.sub= new.mat.split, SIMPLIFY = FALSE)
new.mat = data.table::rbindlist(dat.new.split.out)
#new.mat = do.call("rbind", dat.new.split.out)
new.mat$new_exposure_time = new.mat$exposure_time + new.mat$generation_time
#and merge into pop.mat
#new.merge <- dplyr::select(new.mat, new_infected, employ_ids, infector_iso_time, new_exposure_time)
#names(new.merge) <- c("employ_ids", "infector", "infector_iso_time", "exposure_time")
#now put them into pop.mat
for(i in 1:nrow(new.mat)){
#identify infector and iso time
pop.mat$infector[pop.mat$employ_ids==new.mat$new_infected[i] ] <- new.mat$employ_ids[i]
pop.mat$infector_iso_time[pop.mat$employ_ids==new.mat$new_infected[i]] <- new.mat$infector_iso_time[i]
pop.mat$time_of_tracing_iso[pop.mat$employ_ids==new.mat$new_infected[i] & pop.mat$traced==TRUE] <- pop.mat$time_of_tracing_iso[pop.mat$employ_ids==new.mat$new_infected[i] & pop.mat$traced==TRUE] + new.mat$infector_iso_time[i]
pop.mat$time_of_tracing_iso[pop.mat$employ_ids==new.mat$new_infected[i] & pop.mat$traced==FALSE] <- Inf #if traced==FALSE, this is never tracked
#and exposure time
pop.mat$exposure_time[pop.mat$employ_ids==new.mat$new_infected[i]] <- new.mat$new_exposure_time[i]
#pop.mat$time_test_sensitive_start[pop.mat$employ_ids==new.mat$new_infected[i]] <-new.mat$new_exposure_time[i] + pop.mat$time_test_sensitive_start[pop.mat$employ_ids==new.mat$new_infected[i]]
#pop.mat$time_test_sensitive_end[pop.mat$employ_ids==new.mat$new_infected[i]] <-new.mat$new_exposure_time[i] + pop.mat$time_test_sensitive_end[pop.mat$employ_ids==new.mat$new_infected[i]]
pop.mat$case_source[pop.mat$employ_ids==new.mat$new_infected[i]] <- "UCB" #transmission within berkeley
#change state - only if exposure time is already achieved
pop.mat$state[pop.mat$employ_ids==new.mat$new_infected[i] & pop.mat$exposure_time<=timestep] <- 1
#otherwise, they still stay suceptible - but you mark them
pop.mat$state[pop.mat$employ_ids==new.mat$new_infected[i] & pop.mat$exposure_time>timestep] <- 3
} #else, just return pop mat
}
#now, make those that already transmitted recovered/isolated
pop.mat$state[(pop.mat$state==1 & !is.na(pop.mat$actual_cases_caused))] <- 5
#and, if any of the old "pre-exposed" have reached their exposure time, you can
#and return pop.mat
return(pop.mat)
}
assign.last.infections <- function(pop.mat, gen_list, remaining.susceptibles, timestep){
# assign new exposures (and times) based on 'actual cases caused' above
# and move those that have transmitted to isolated/recovered state
#(asymptomatics will be missed in iso time unless tested)
timestep.prev = unique(pop.mat$timestep)
if(remaining.susceptibles>0){
#first, pair each case with its generation times
new.mat <- dplyr::select(pop.mat, employ_ids, state, exposure_time, actual_cases_caused, time_isolation)#, time_of_tracing_iso)
new.mat <- new.mat[ new.mat$state==1 & !is.na(new.mat$actual_cases_caused) ,]
#get rid of those that cause no cases
new.mat <- new.mat[new.mat$actual_cases_caused>0,]
#sum(new.mat$actual_cases_caused)>remaining susceptibles
#so need to pick these at random to generate new infections instead
all.possible = c(rep(new.mat$employ_ids, times=new.mat$actual_cases_caused))
last.infector.ids = sample(all.possible, size=remaining.susceptibles, replace=FALSE)
last.infector.ids = data.frame(last.infector.ids)
names( last.infector.ids) ="employ_ids"
new.dat = ddply(last.infector.ids,.(employ_ids), summarise, actual_cases_caused=length(employ_ids))
#and new.mat becomes just these
new.dat$state <- new.dat$time_isolation <- new.dat$exposure_time <- NA
for (i in 1:nrow(new.mat)){
new.dat$state[new.dat$employ_ids==new.mat$employ_ids[i]] <- new.mat$state[i]
new.dat$time_isolation[new.dat$employ_ids==new.mat$employ_ids[i]] <- new.mat$time_isolation[i]
new.dat$exposure_time[new.dat$employ_ids==new.mat$employ_ids[i]] <- new.mat$exposure_time[i]
}
#then, new dat takes the place of new mat
new.dat.list <- dlply(new.dat, .(employ_ids))
new.dat.list <- lapply(new.dat.list, make.rows)
new.dat <- data.table::rbindlist(new.dat.list)
#new.dat <- do.call("rbind", new.dat.list)
#now attach a generation time with each of these cases and a random sample from the susceptibles
new.dat$generation_time <- NA
index.ids = unique(new.dat$employ_ids)
for(i in 1:length(index.ids )){
#print(index.ids[[i]])
new.mat$generation_time[new.mat$employ_ids == index.ids[i]] <- gen_list[[index.ids[i]]]$generation_time[1:length(new.mat$generation_time[new.mat$employ_ids == index.ids[i]])]
}
#now, attach a place to infect (susceptible) -- should be enough
all.sus <- pop.mat$employ_ids[pop.mat$state==0]
new.dat$new_infected <- sample(all.sus, size=nrow(new.dat), replace=FALSE)
new.dat$new_exposure_time = new.dat$exposure_time + new.dat$generation_time
#now put them into pop.mat
for(i in 1:nrow(new.dat)){
#identify infector and iso time
pop.mat$infector[pop.mat$employ_ids==new.dat$new_infected[i] ] <- new.dat$employ_ids[i]
pop.mat$infector_iso_time[pop.mat$employ_ids==new.dat$new_infected[i]] <- new.dat$infector_iso_time[i]
pop.mat$time_of_tracing_iso[pop.mat$employ_ids==new.dat$new_infected[i] & pop.mat$traced==TRUE] <- pop.mat$time_of_tracing_iso[pop.mat$employ_ids==new.dat$new_infected[i] & pop.mat$traced==TRUE] + new.dat$infector_iso_time[i]
pop.mat$time_of_tracing_iso[pop.mat$employ_ids==new.dat$new_infected[i] & pop.mat$traced==FALSE] <- Inf #if traced==FALSE, this is never tracked
#and exposure time
pop.mat$exposure_time[pop.mat$employ_ids==new.dat$new_infected[i]] <- new.dat$new_exposure_time[i]
#pop.mat$time_test_sensitive_start[pop.mat$employ_ids==new.mat$new_infected[i]] <-new.mat$new_exposure_time[i] + pop.mat$time_test_sensitive_start[pop.mat$employ_ids==new.mat$new_infected[i]]
#pop.mat$time_test_sensitive_end[pop.mat$employ_ids==new.mat$new_infected[i]] <-new.mat$new_exposure_time[i] + pop.mat$time_test_sensitive_end[pop.mat$employ_ids==new.mat$new_infected[i]]
pop.mat$case_source[pop.mat$employ_ids==new.dat$new_infected[i]] <- "UCB" #transmission within berkeley
#change state - only if exposure time is already achieved
pop.mat$state[pop.mat$employ_ids==new.dat$new_infected[i] & pop.mat$exposure_time<=timestep] <- 1
#otherwise, they still stay suceptible - but you mark them
pop.mat$state[pop.mat$employ_ids==new.dat$new_infected[i] & pop.mat$exposure_time>timestep] <- 3
} #else, just return pop mat
}
#other
#now, make those that already transmitted recovered/isolated
pop.mat$state[(pop.mat$state==1 & !is.na(pop.mat$actual_cases_caused))] <- 5
#and, if any of the old "pre-exposed" have reached their exposure time, you can
#then, those active infections will cause no more new cases
pop.mat$actual_cases_caused[pop.mat$state==1] <- 0
#and return pop.mat
return(pop.mat)
}
get.actual.cases = function(pop.dat, dat.gen, timestep){
sub.gen =subset(dat.gen, employ_ids==unique(pop.dat$employ_ids))
#count the number of exposure time + generation time combos that take place before the iso time
sub.gen$new_exposures = sub.gen$generation_time + pop.dat$exposure_time
n.out = length(sub.gen$new_exposures[sub.gen$new_exposures<=pop.dat$time_isolation])
return(n.out)
}
get.symptom.onset <- function(dat, dat.vir, LOD){
#get titer limit
symptom.lim <- as.numeric(unique(dat$titer_lim_for_symptoms))
#get the timing in the trajectory that first crosses this limit
dat$time_of_symptom_onset <- min(dat.vir$time[dat.vir$V>symptom.lim])
dat$time_test_sensitive_start <- min(dat.vir$time[dat.vir$V>LOD])
dat$time_test_sensitive_end <- max(dat.vir$time[dat.vir$V>LOD])
#will return infinity if wrong
return(dat)
}
make.rows <- function(dat){
n = dat$actual_cases_caused
new.dat <- data.frame(matrix(NA, nrow=n, ncol=5) )
names(new.dat) <- c("employ_ids", "exposure_time", "actual_cases_caused", "infector_iso_time", "infector_cat")#, "time_of_test_sensitivity")#, "time_of_tracing_iso")
new.dat$employ_ids <- rep(dat$employ_ids, nrow(new.dat))
new.dat$infector_iso_time <- rep(dat$time_isolation, nrow(new.dat))
new.dat$infector_cat <- rep(dat$employ_cat, nrow(new.dat))
new.dat$exposure_time <- rep(dat$exposure_time, nrow(new.dat))
#new.dat$time_of_tracing_iso <- rep(dat$time_of_tracing_iso, nrow(new.dat))
if(nrow(new.dat)>0){
new.dat$actual_cases_caused <- 1
return(new.dat)
} #else, return nothing
}
delayfn_surv <- function(delay_mean, delay_sd){
out <- purrr::partial(rnorm,
mean = delay_mean,
sd = delay_sd)
return(out)
}#symptomatic surveillance/TAT delay
generationTime_fn <- function(serial_dist=NULL, serial_shape = NULL, serial_scale = NULL) {
if(serial_dist=="weibull"){
out <- purrr::partial(rweibull,
shape = serial_shape,
scale = serial_scale)
}else if(serial_dist=="gamma"){
out <- purrr::partial(rgamma,
shape = serial_shape,
scale = serial_scale)
}
return(out)
} #weibull or gamma serial interval as the case may be
inc_fn <- function(n_inc_samp = NULL, meanInc=NULL, sdInc=NULL) {
out= purrr::partial(rlnorm,
meanlog = log(meanInc),
sdlog = log(sdInc))
#out[out < 1] <- 1
return(out)
} #lognormal incubation time draw
R0_fn <- function(meanR0=NULL, sdR0=NULL){
out <- purrr::partial(rlnorm,
meanlog = log(meanR0),
sdlog = log(sdR0))
return(out)
} #lognormal R0
R0_fn_nb <- function(muR0=NULL, sizeR0=NULL){
out <- purrr::partial(rnbinom,
mu = muR0,
size = sizeR0)
return(out)
} #nb R0
initiate.pop <- function(start.ID.employ, pop.UCB, n.init.exposed, pop.ID, within.host.theta, input.par, R0fn, eventFn, titer.dat, LOD, virus.par){
#sample serial interval
genTime = generationTime_fn(serial_dist = virus.par$distribution[virus.par$parameter=="generation_time"],
serial_shape= virus.par$par1[virus.par$parameter=="generation_time"],
serial_scale= virus.par$par2[virus.par$parameter=="generation_time"])
pop.par = subset(input.par, population==pop.ID)
#make table one
pop.mat = cbind.data.frame(matrix(NA, nrow=pop.UCB, ncol =27))
names(pop.mat) <- c( "employ_ids","employ_cat", "state", "traced", "testing", "obs_dist_limits", "exposure_time", "total_potential_cases_caused", "original_potential_cases_caused_UCB", "num_infection_events", "post_titer_potential_cases_caused_UCB", "potential_cases_caused", "actual_cases_caused", "case_source", "infector", "time_test_sensitive_start", "time_test_sensitive_end", "infector_iso_time", "time_of_tracing_iso", "time_of_next_test", "time_of_testing_iso", "titer_lim_for_symptoms", "time_of_symptom_onset", "time_of_symptom_iso", "time_isolation", "reason_isolated", "timestep")
#and fill in all you can
pop.mat$testing = pop.par$par1[pop.par$parameter=="test-on"]
pop.mat$timestep = 0
pop.mat$employ_cat = pop.ID
#assign them all an employer ID
pop.mat$employ_ids = start.ID.employ:(pop.UCB+start.ID.employ-1)
#assign a "first test" depending on how many days of testing per week...
test_rotation = as.character(pop.par$par1[pop.par$parameter=="test-rotation"] )
n.test.day.per.wk = as.numeric(pop.par$par1[pop.par$parameter=="n-test-days-per-week"])
#then, if this is bigger than a weekly regime, half of them must happen one week and half the other
if ((test_rotation=="biweekly" & n.test.day.per.wk==2) | (test_rotation=="weekly" & n.test.day.per.wk==2)){
pop.mat$time_of_next_test = rep(c(3,7), length=pop.UCB)
}else if((test_rotation=="biweekly" & n.test.day.per.wk==5) | (test_rotation=="weekly" & n.test.day.per.wk==5)){
pop.mat$time_of_next_test = rep(c(3,4,5,6,7), length=pop.UCB)
}else if((test_rotation=="biweekly" & n.test.day.per.wk==7) | (test_rotation=="weekly" & n.test.day.per.wk==7)){
pop.mat$time_of_next_test = rep(c(1,2,3,4,5,6,7), length=pop.UCB)
}else if(test_rotation=="two-week" & n.test.day.per.wk==7){
pop.mat$time_of_next_test = rep(c(1,2,3,4,5,6,7,8,9,10,11,12,13,14), length=pop.UCB)
}else if(test_rotation=="two-week" & n.test.day.per.wk==5){
pop.mat$time_of_next_test = rep(c(3,4,5,6,7,10,11,12,13,14), length=pop.UCB)
}else if(test_rotation=="two-week" & n.test.day.per.wk==2){
pop.mat$time_of_next_test = rep(c(3,7, 10,14), length=pop.UCB)
}else if (test_rotation=="two-week-ThFri"){
pop.mat$time_of_next_test = rep(c(seq((7-n.test.day.per.wk+1),7,1),seq((14-n.test.day.per.wk+1),14,1)), length=pop.UCB) #end of week. if 1 and 2 are weekend (sat/sun), then this is thursday/friday
}else if (test_rotation=="two-week-daily"){
pop.mat$time_of_next_test = rep(c(seq(1,14,1)), length=pop.UCB) #end of week. if 1 and 2 are weekend (sat/sun), then this is thursday/friday
}else if (test_rotation=="two-week-MonTues"){
pop.mat$time_of_next_test = rep(c(seq(3,4,1),seq(10,11,1)), length=pop.UCB) #end of week. if 1 and 2 are weekend (sat/sun), then this is thursday/friday
}else if (test_rotation=="two-week-TuesWed"){
pop.mat$time_of_next_test = rep(c(seq(4,5,1),seq(11,12,1)), length=pop.UCB) #end of week. if 1 and 2 are weekend (sat/sun), then this is thursday/friday
}else if (test_rotation=="two-week-MonFri"){
pop.mat$time_of_next_test = rep(c(3,7,10,14), length=pop.UCB) #end of week. if 1 and 2 are weekend (sat/sun), then this is thursday/friday
}else if (test_rotation=="two-week-MonWed"){
pop.mat$time_of_next_test = rep(c(3,5,10,12), length=pop.UCB) #end of week. if 1 and 2 are weekend (sat/sun), then this is thursday/friday
}else if (test_rotation=="one-week-ThFri"){
pop.mat$time_of_next_test = rep(seq((7-n.test.day.per.wk+1),7,1), length=pop.UCB)
}else if (test_rotation=="one-week-MonTues"){
pop.mat$time_of_next_test = rep(seq(3,4,1), length=pop.UCB)
}else if (test_rotation=="one-week-MonFri"){
pop.mat$time_of_next_test = rep(c(3,7), length=pop.UCB)
}else if(test_rotation=="one-week-daily"){
pop.mat$time_of_next_test = rep(c(seq(1,7,1)), length=pop.UCB)
}else if(test_rotation=="none"){
pop.mat$time_of_next_test=Inf
}else if(test_rotation=="thrice-weekly-MonTues"){
pop.mat$time_of_next_test = rep(c(3,4,10,11,17,18), length=pop.UCB)
}else if (test_rotation=="two_day"){
pop.mat$time_of_next_test = rep(c(3,7), length=pop.UCB)
}else if (test_rotation=="four_week"){
pop.mat$time_of_next_test = rep(c(seq((7-n.test.day.per.wk+1),7,1), seq((14-n.test.day.per.wk+1),14,1), seq((21-n.test.day.per.wk+1),21,1), seq((28-n.test.day.per.wk+1),28,1)), length=pop.UCB)
}
pop.mat$time_of_next_test = sample(pop.mat$time_of_next_test, size=length(pop.mat$time_of_next_test), replace = FALSE) #scramble
prop.traced = as.numeric(pop.par$par1[pop.par$parameter=="prop.trace"])
#for all, based on proportions, give whether traced or not
pop.mat$traced = sample(x=c("TRUE", "FALSE"), size=length(pop.mat$traced), replace=TRUE, prob=c(prop.traced, 1-prop.traced))
#and the same for proportion observign distancing limits
prop.obs = as.numeric(pop.par$par1[pop.par$parameter=="percent-obs-dist-lim"])
pop.mat$obs_dist_limits = sample(x=c("TRUE", "FALSE"), size=length(pop.mat$obs_dist_limits), replace=TRUE, prob=c(prop.obs, 1-prop.obs))
# and whether asymp or not
#pop.mat$stat_asymp = sample(x=c("TRUE", "FALSE"), size=length(pop.mat$stat_asymp), replace=TRUE, prob=c(prop.asym, 1-prop.asym))
#make initial state variable
pop.mat$state <- rep(as.integer(0),pop.UCB)
#based on the proportion vaccinated, some get moved to recovered (state 5) right away
#for all of our model runs, this is 0, so this gets skipped
if(as.numeric(pop.par$par1[pop.par$parameter=="prop-vaccinated"])>0){
tot.vacc <- round(as.numeric(pop.par$par1[pop.par$parameter=="prop-vaccinated"])*pop.UCB,0)
index.vacc = sample(1:pop.UCB, size=tot.vacc, replace=FALSE)
pop.mat$state[index.vacc] <- 5
}
#then, regardless of vaccination, overwrite susceptibles with those initially exposed
#initially exposed get distributed at random
index.init = sample(as.numeric(rownames(pop.mat[pop.mat$state==0,])), size=n.init.exposed, replace=FALSE)
pop.mat$state[index.init] <- 1
#here, build distributions
#symptomatic isolation delay
delayfn_symp = delayfn_surv(delay_mean=as.numeric(pop.par$par1[pop.par$parameter=="iso-lag"]),
delay_sd= as.numeric(pop.par$par2[pop.par$parameter=="iso-lag"]))
#turnaround testing delay
delayfn_TAT = delayfn_surv(delay_mean=as.numeric(pop.par$par1[pop.par$parameter=="TAT-lag"]),
delay_sd= as.numeric(pop.par$par2[pop.par$parameter=="TAT-lag"]))
#contact tracing lag
delayfn_trace = delayfn_surv(delay_mean=as.numeric(pop.par$par1[pop.par$parameter=="trace-lag"]),
delay_sd= as.numeric(pop.par$par2[pop.par$parameter=="trace-lag"]))
#titer limit for symptoms
titer_lim = lognormal_fn(meanlogpar=log(as.numeric(pop.par$par1[pop.par$parameter=="symptom-lim"])),
sdlogpar = log(as.numeric(pop.par$par2[pop.par$parameter=="symptom-lim"])))
prop.cases.UCB = as.numeric(pop.par$par1[pop.par$parameter=="prop.cases.UCB"])
#now generate potential new infections based on your status
#this gives the weekend average number of infections
#pop.mat$tot_potential_cases_caused[pop.mat$stat_asymp==TRUE] = floor(R0fn.asym.wk(length(pop.mat$tot_potential_cases_caused[pop.mat$stat_asymp==TRUE]))*prop.cases.UCB)
#pop.mat$tot_potential_cases_caused[pop.mat$stat_asymp==FALSE] = floor(R0fn.wk(length(pop.mat$tot_potential_cases_caused[pop.mat$stat_asymp==FALSE]))*prop.cases.UCB)
#and during the week, fewer cases
#pop.mat$wk_tot_potential_cases_caused[pop.mat$stat_asymp==TRUE] = floor(R0fn.asym(length(pop.mat$tot_potential_cases_caused[pop.mat$stat_asymp==TRUE]))*prop.cases.UCB)
#here are all possible cases
pop.mat$total_potential_cases_caused = R0fn(length(pop.mat$employ_ids))
#here are all possible at UC Berkeley - before the titer cull
pop.mat$original_potential_cases_caused_UCB = floor(pop.mat$total_potential_cases_caused*prop.cases.UCB)
#you should have already brought in a titer trajectory for everyone in your population
#choose a threshold titer for symptom onset
pop.mat$titer_lim_for_symptoms = titer_lim(pop.UCB)
pop.split <- dlply(pop.mat, .(employ_ids))
titer.split <- dlply(titer.dat, .(employ_ids))
#now, based on this, go into each person's virus trajectory and calculate the timing of symptom onset
#while you are at it, you can also look at their titer and the LOD and calculate the start/end times for which they are test sensitive
pop.split.new <- mapply(get.symptom.onset, dat = pop.split, dat.vir=titer.split, MoreArgs = list(LOD=LOD), SIMPLIFY = FALSE)
#when there is nothing that is under the limit, these infections become "asymptomatic" --
#we can later play with the proportion that classify as this by modulating the mean value for the symptom onset limit
pop.mat <- data.table::rbindlist(pop.split.new)
#pop.mat <- do.call("rbind", pop.split.new)
#and the delay to isolation
pop.mat$time_of_symptom_onset[pop.mat$time_of_symptom_onset<0]<-0
pop.mat$time_of_symptom_iso = delayfn_symp(pop.UCB)
pop.mat$time_of_symptom_iso[pop.mat$time_of_symptom_iso<0]<- 0
pop.mat$time_of_symptom_iso <- pop.mat$time_of_symptom_iso + pop.mat$time_of_symptom_onset
pop.mat$time_of_testing_iso = delayfn_TAT(pop.UCB)
pop.mat$time_of_testing_iso[pop.mat$time_of_testing_iso<0] <- 0
pop.mat$time_of_testing_iso <- pop.mat$time_of_testing_iso + pop.mat$time_of_next_test
pop.mat$time_of_tracing_iso = delayfn_trace(pop.UCB)
pop.mat$time_of_tracing_iso[pop.mat$time_of_tracing_iso<0] <- 0
#now, if not traced, never:
pop.mat$time_of_tracing_iso[pop.mat$traced==FALSE] <- Inf
pop.mat$time_of_tracing_iso[pop.mat$state>0] <- Inf # new introductions cannot be traced
pop.mat$infector[pop.mat$state>0] <- 0 # new introductions cannot be traced
pop.mat$infector_iso_time[pop.mat$state>0] <- Inf # new introductions cannot be traced
pop.mat$case_source[pop.mat$state>0] <- "alameda"
#NOW, we generate new cases:
#we break down each infectious individual based on that individual's:
#(a) within-host titer trajectory, (b) the selected value for within-host theta (how viral load translates toinfection probability),
#(c) the number of discrete transmission events that we draw for each person, and
#(d) the generation time of those contact events
#(for d, we currently use the Ferretti weibull, but we are hoping that a constant hazard of events
# + the titer trajectory of the pathogen should roughly produce the expected generation time)
#(1) First, for each person, we draw the number of possible cases from R0 - this equates to individual heterogeneity in infectiousness
# (one type of superspreading) and is already captured in the "total_potential_cases_caused" column, which then gets reduced down to the
# proportion in the UCB community in the "original_potential_cases_caused_UCB" column
#(2) Then, we draw a number of contact events, among which the above cases get distributed. (this equates to event-based superspreading
# - fewer event draws and a high number of transmissions from #1 generate the biggest superspreading events). Current, we draw this
# from a Poisson with lambda=3
#(3) Then, for each "event", we draw a time that this event took place (here, represented from the generation time Weibull, though this could change)
#(4) Then, for each event + time, we go into individual's titer trajectory to determine if each transmission actually
# takes place, based on the person's titer load at the point of infection. Since our initial R0 is 2.5, we fix theta at .7, such that the max
# probability of infection taking place is ~50% at peak viral load. If one 'event' generates multiple cases, each case is treated independently
# with this titer-based transmission probability.
#(5) If there is a group size limit, it gets imposed here. Say that group limit is 6 and one event is supposed to generate 10 cases.
# If this person abides by group limits (there is a parameter for this), we truncate the 10 person event to a 6 person event, and assume
# as a worst-case scenario that all 6 of those people get infected
#first, draw number of transmission events per person
pop.mat$num_infection_events <- eventFn(pop.UCB)
#then get a list of event times per person for each of these events
pop.list <- dlply(pop.mat, .(employ_ids))
event.times.list <- lapply(pop.list, get.event.time, genTime=genTime)
# now, each person has a number of cases, a number of events, a time for each event,
# and a virus titer trajectory.
# take this information and determine which events actually take place and when they occur
# also, if applicable, here impose the group size limit and record cases both before
# and after that limit occurs
# return the data as well as the edited event times list that replaces each
# failed case generation with NA
double.list <- mapply(FUN=get.real.cases, pop.dat=pop.list, event.dat=event.times.list, titer.dat1 = titer.split, MoreArgs = list(within.host.theta=within.host.theta, group.limit=as.numeric(pop.par$par1[pop.par$parameter=="group-size-limit"])), SIMPLIFY = FALSE)
pop.mat.list <- sapply(double.list, "[",1)
pop.mat <- data.table::rbindlist(pop.mat.list)
#pop.mat <- do.call("rbind", pop.mat.list)
gen_time_list <- sapply(double.list, "[",2)
dat.gen <- data.table::rbindlist(gen_time_list)
#dat.gen = do.call("rbind", gen_time_list)
#now cases from potential get distributed among events
#then we determine how many take place based on titers
#then we remove those that don't take place based on group size limitation
#then, we set an exposure time for those cases that actually occur
pop.mat$exposure_time[pop.mat$state>0] <- 0
#first, assume that isolation time is symptomatic
pop.mat$time_isolation[pop.mat$state==1 ] <- as.numeric(pop.mat$time_of_symptom_iso[pop.mat$state==1 ])
pop.mat$time_isolation = as.numeric(pop.mat$time_isolation)
pop.mat$reason_isolated[pop.mat$state==1 ] <- "symptom_iso"
#now, if testing (and, for other cases, tracing) comes first, we replace it
#test needs to be AFTER start time of test sensitive and before end time of test sensitive
pop.mat$reason_isolated[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_testing_iso & pop.mat$testing==TRUE & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end] <- "testing_iso"
pop.mat$time_isolation[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_testing_iso & pop.mat$testing==TRUE & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end & complete.cases(pop.mat)] <- pop.mat$time_of_testing_iso[pop.mat$state==1 & pop.mat$time_isolation > pop.mat$time_of_testing_iso & pop.mat$testing==TRUE & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end]
#and then, if any of these are Inf, change the reason to NA
pop.mat$reason_isolated[pop.mat$time_isolation==Inf] <- NA
#now, based on isolation time and the generation times in the vector, determine the actual number of cases caused,
#then export, regroup with other half of population and assign those new infections in the next time step
new.cases = dlply(pop.mat[pop.mat$state==1& pop.mat$potential_cases_caused>0,], .(employ_ids))
#if potential cases were 0, then actual cases are too:
pop.mat$actual_cases_caused[pop.mat$state==1 & pop.mat$potential_cases_caused ==0] <- 0
pop.mat$actual_cases_caused[pop.mat$state==1 & pop.mat$potential_cases_caused > 0] <- c(unlist(lapply(new.cases, get.actual.cases, dat.gen=dat.gen, timestep)))
#now pop it back out, join with other sub-mats and assign those infections in time and space using your generation time vector.
return(list(pop.mat, dat.gen))
}
epidemic.step = function(pop.mat, timestep, length_timestep, prob.out, gen_list, input.par){
#pop.mat <- as.data.frame(pop.mat)
pop.par = subset(input.par, population ==unique(pop.mat$employ_cat))
#advance timestep
pop.mat$timestep = timestep
#introduce outside infections into susceptible spaces (cannot misplace those "exposed" by UCB above since we are guaranteeing those transmissions to take place)
#could easily modulate this for risk cohorts in future
#check if weekend
# we say days 1 and 2 are testing
# days
###MULTIPLE
# if(timestep ==1 | timestep ==2 | (timestep%%7==1)| (timestep%%7==2)){
# n.outside.exposures = sum(sample(x=c(0,1), size=length(pop.mat$state[pop.mat$state==0]), replace=TRUE, prob = c(1-prob.out.wk, prob.out.wk)))
#}else{
n.outside.exposures = sum(sample(x=c(0,1), size=length(pop.mat$state[pop.mat$state==0]), replace=TRUE, prob = c(1-prob.out, prob.out)))
#}
if(n.outside.exposures>0){
#if you find some, fill them in with an exposure time of now, distributed at random
#could add in higher introduction rate for certain sub-groups in this case
new.case.ids = sample(pop.mat$employ_ids[pop.mat$state==0], size = n.outside.exposures, replace=FALSE)
# print(new.case.ids)
#and assign
for (i in 1:length(new.case.ids)){
#print(i)
#print(new.case.ids[i])
#expose the new cases immediately - but only those that have reached the current timestep already
#those "predestined" for exposure get passed over for now.
pop.mat$state[pop.mat$employ_ids==new.case.ids[i]] <- 1
#pop.mat$state[pop.mat$employ_ids==new.case.ids[i] & pop.mat$stat_asymp==TRUE] <- 2
#exposure time is this timestep
pop.mat$exposure_time[pop.mat$employ_ids==new.case.ids[i]] <- timestep #infection kicks off so you can now calculat symptom onset time
#pop.mat$time_of_symptom_onset[pop.mat$employ_ids==new.case.ids[i]] <- pop.mat$time_of_symptom_onset[pop.mat$employ_ids==new.case.ids[i]] + timestep
#tmp <- pop.mat$time_of_test_positivity[pop.mat$employ_ids==new.case.ids[i]]
#pop.mat$time_test_sensitive_end[pop.mat$employ_ids==new.case.ids[i]] <- pop.mat$exposure_time[pop.mat$employ_ids==new.case.ids[i]] + pop.mat$time_test_sensitive_end[pop.mat$employ_ids==new.case.ids[i]]
#pop.mat$time_test_sensitive_start[pop.mat$employ_ids==new.case.ids[i]] <- pop.mat$exposure_time[pop.mat$employ_ids==new.case.ids[i]] + pop.mat$time_test_sensitive_start[pop.mat$employ_ids==new.case.ids[i]]
#infector is outside source that cannot be tracked
pop.mat$infector[pop.mat$employ_ids==new.case.ids[i]] <- 0
pop.mat$infector_iso_time[pop.mat$employ_ids==new.case.ids[i]] <- Inf
pop.mat$case_source[pop.mat$employ_ids==new.case.ids[i]] <- "alameda"
#introduced cases cannot be traced
pop.mat$time_of_tracing_iso[pop.mat$employ_ids==new.case.ids[i]] <- Inf
}
}
#pop.mat <- subset(pop.mat, !is.na(employ_ids))
#now, for those that are currently exposed (both from outside and UCB),
# compute distributions of iso time
# and new actual cases caused.
#then, we can assign those times and move them to recovered status
pop.mat.old <- pop.mat
pop.mat <- dplyr::select(pop.mat, -(actual_cases_caused))
#first, go ahead and move test postivity to the appropriate degree
#pop.mat$time_of_test_positivity[pop.mat$state==1 | pop.mat$state==2] <- pop.mat$time_of_test_positivity[pop.mat$state==1 | pop.mat$state==2] + pop.mat$exposure_time[pop.mat$state==1 | pop.mat$state==2]
#print("7")
#first, assume that isolation time is symptomatic
pop.mat$time_isolation[pop.mat$state==1 ] <- as.numeric(pop.mat$time_of_symptom_iso[pop.mat$state==1])
pop.mat$time_isolation = as.numeric(pop.mat$time_isolation)
pop.mat$reason_isolated[pop.mat$state==1 ] <- "symptom_iso"
#now, if tracing comes first, we replace it
#tracing only applicable within our community
#print("8")
pop.mat$reason_isolated[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_tracing_iso & pop.mat$traced==TRUE & complete.cases(pop.mat) ] <- "tracing_iso"
pop.mat$time_isolation[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_tracing_iso & pop.mat$traced==TRUE & complete.cases(pop.mat) ] <- pop.mat$time_of_tracing_iso[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_tracing_iso & pop.mat$traced==TRUE & complete.cases(pop.mat)]
#or, finally, if testing comes first, we replace it here - IF the infection is test sensitive at the time of testing
#print("9")
pop.mat$reason_isolated[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_testing_iso & pop.mat$testing==TRUE & complete.cases(pop.mat) & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end] <- "testing_iso"
pop.mat$time_isolation[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_testing_iso & pop.mat$testing==TRUE & complete.cases(pop.mat) & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end] <- pop.mat$time_of_testing_iso[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_testing_iso & pop.mat$testing==TRUE & complete.cases(pop.mat) & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end]
#and then, if any of these are Inf, change the reason to NA
pop.mat$reason_isolated[pop.mat$time_isolation==Inf] <- NA
#first, double-check that nothing was exposed after the isolation time (would be based on tracing only)
#if that ever happens, that person becomes susceptible again because that infection was never generated
#first flag
#then, go in and find that person's infector and reduce their actual cases by one
#based on this case that did not occur
pop.mat$state[pop.mat$exposure_time>pop.mat$time_isolation & pop.mat$state==1 & pop.mat$reason_isolated=="tracing_iso" ] <- 7
pop.mat$reason_isolated[pop.mat$state==7] <- NA
pop.mat$time_isolation[pop.mat$state==7] <- NA
pop.mat$case_source[pop.mat$state==7 ] <- NA
#now remove a case from the infectors that "caused" these events
infector.sub1 = pop.mat[pop.mat$state==7,]
infector.sum1 = ddply(infector.sub1, .(infector), summarize, cases_removed = length(employ_ids)) #save this for the end
pop.mat$infector[pop.mat$state==7] <- NA
pop.mat$infector_iso_time[pop.mat$state==7] <- NA
pop.mat$exposure_time[pop.mat$state==7]<- NA
pop.mat$state[pop.mat$state==7] <- 0
#now, based on isolation time and the generation times in the vector, determine the actual number of cases caused,
#then export, and assign those new infections in the next time step
#now, advance forward all of the "time of etc." for susceptibles
#and time of next testing for all
new.cases = dlply(pop.mat[pop.mat$state==1& pop.mat$potential_cases_caused>0,], .(employ_ids))
#if potential cases were 0, then actual cases are too:
#pop.mat$actual_cases_caused[pop.mat$state==1 & pop.mat$potential_cases_caused ==0| pop.mat$state==2 & pop.mat$potential_cases_caused ==0] <- 0
#but, if potential cases were greater than 0, then actual might be as well, depending on the isolation times
#dat.gen.new = do.call("rbind", gen_list)
dat.gen.new = data.table::rbindlist(gen_list)
#pop.mat$actual_cases_caused[pop.mat$state==1 & pop.mat$potential_cases_caused > 0| pop.mat$state==2 & pop.mat$potential_cases_caused > 0] <- c(unlist(lapply(new.cases, get.actual.cases, dat.gen=dat.gen.new, timestep=timestep, weekend.amp=weekend.amp)))
new.actual.cases <- c(unlist(lapply(new.cases, get.actual.cases, dat.gen=dat.gen.new, timestep=timestep)))
#these have not kicked off, so let them kick forward
pop.mat$time_of_symptom_onset[pop.mat$state==0 | pop.mat$state==3] <- pop.mat$time_of_symptom_onset[pop.mat$state==0 | pop.mat$state==3] + length_timestep
pop.mat$time_of_symptom_iso[pop.mat$state==0 | pop.mat$state==3 ] <- pop.mat$time_of_symptom_iso[pop.mat$state==0 | pop.mat$state==3] + length_timestep
pop.mat$time_test_sensitive_start[pop.mat$state==0 | pop.mat$state==3 ] <- pop.mat$time_test_sensitive_start[pop.mat$state==0 | pop.mat$state==3] + length_timestep
pop.mat$time_test_sensitive_end[pop.mat$state==0 | pop.mat$state==3 ] <- pop.mat$time_test_sensitive_end[pop.mat$state==0 | pop.mat$state==3] + length_timestep
#tracing only gets started when infector iso time is assigned, so we don't touch it here
#if you are at your current testing date, then next test is bumped into the future.
#Otherwise, you just advance in time until you reach it
#but the lag time is maintained after the new test date, so deal with that first
pop.mat$time_of_testing_iso = pop.mat$time_of_testing_iso - pop.mat$time_of_next_test #now this is just the lag time
#now, compute actual next test day if today is the test day of the runs in question - add different frequencies depending on the type
pop.mat$time_of_next_test[pop.mat$time_of_next_test==timestep] <- timestep + as.numeric(pop.par$par1[pop.par$parameter=="test-freq"])
#now put the lag back on to the new test day for isolation
pop.mat$time_of_testing_iso <- pop.mat$time_of_testing_iso + pop.mat$time_of_next_test
pop.mat$time_of_testing_iso[pop.mat$time_of_next_test==Inf] <- Inf
#and, finally, check in on those that were "pre-exposed" up above.
#move them up to their appropriate status if they should be exposed now
#if they reach it, go ahead and assign their actual cases
#first, eliminate if they should not occur
#first flag
#then, go in and find that person's infector and reduce their actual cases by one
#based on this case that did not occur
pop.mat$state[pop.mat$state==3 & pop.mat$exposure_time>pop.mat$infector_iso_time ] <- 8
pop.mat$time_of_tracing_iso[pop.mat$state==8 & pop.mat$exposure_time>pop.mat$infector_iso_time ] <- pop.mat$time_of_tracing_iso[pop.mat$state==8 & pop.mat$exposure_time>pop.mat$infector_iso_time ] - pop.mat$infector_iso_time[pop.mat$state==8 & pop.mat$exposure_time>pop.mat$infector_iso_time]
pop.mat$case_source[pop.mat$state==8 & pop.mat$exposure_time>pop.mat$infector_iso_time] <- NA
#now remove a case from the infectors that "caused" these events
infector.sub2 = pop.mat[pop.mat$state==8,]
infector.sum2 = ddply(infector.sub2, .(infector), summarize, cases_removed = length(employ_ids)) #save this for the end
pop.mat$infector[pop.mat$state==8 & pop.mat$exposure_time>pop.mat$infector_iso_time ] <- NA
pop.mat$state[pop.mat$state==8 & pop.mat$exposure_time>pop.mat$infector_iso_time ] <- 0
pop.mat$infector_iso_time[pop.mat$state==0] <- NA
pop.mat$exposure_time[pop.mat$state==0] <- NA
if (exists('infector.sum1') & exists('infector.sum2')){
infector.sum <- rbind(infector.sum1, infector.sum2)
}else if(exists('infector.sum1')){
infector.sum <- infector.sum1
}else if(exists('infector.sum2')){
infector.sum <- infector.sum2
}
#then, if they pass that test and still remain 'pre-exposed', check to see if they should be elevated in status to 1 or 2
#(meaning they have reached the exposure time)
#if so, assign them isolation time and actual cases which get allocated in the next round.
#otherwise, they just keep current status as "pre-exposed"
#first make them complete cases, so that R is not angry with new columns being filled in
pop.mat$time_isolation[pop.mat$state==3 & pop.mat$exposure_time<=timestep] <- Inf
pop.mat$reason_isolated[pop.mat$state==3 & pop.mat$exposure_time<=timestep] <- "in progress"
#first, assume that isolation time is symptomatic
#print("1")
pop.mat$reason_isolated[pop.mat$state==3 & pop.mat$exposure_time<=timestep & complete.cases(pop.mat)] <- "symptom_iso"
# print("2")
pop.mat$time_isolation[pop.mat$state==3 & pop.mat$exposure_time<=timestep & complete.cases(pop.mat)] <- pop.mat$time_of_symptom_iso[pop.mat$state==3 & pop.mat$exposure_time<=timestep & complete.cases(pop.mat)]
pop.mat$time_isolation = as.numeric(pop.mat$time_isolation)
#now, if tracing comes first, we replace it
#print("3")
pop.mat$reason_isolated[pop.mat$state==3 & pop.mat$exposure_time<=timestep & pop.mat$time_of_tracing_iso<pop.mat$time_isolation & pop.mat$traced==TRUE & complete.cases(pop.mat)] <- "tracing_iso"
# print("4")
pop.mat$time_isolation[pop.mat$state==3 & pop.mat$exposure_time<=timestep & pop.mat$time_of_tracing_iso<pop.mat$time_isolation & pop.mat$traced==TRUE & complete.cases(pop.mat)] <- pop.mat$time_of_tracing_iso[pop.mat$state==3 & pop.mat$exposure_time<=timestep & pop.mat$time_of_tracing_iso<pop.mat$time_isolation & pop.mat$traced==TRUE & complete.cases(pop.mat)]
#finally, if testing comes first, we replace it
#print("5")
pop.mat$reason_isolated[pop.mat$state==3 & pop.mat$exposure_time<=timestep & pop.mat$time_of_testing_iso<pop.mat$time_isolation & pop.mat$testing==TRUE & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end & complete.cases(pop.mat)] <- "testing_iso"
#print("6")
#print(pop.mat[pop.mat$state==3 & pop.mat$exposure_time<=timestep & pop.mat$time_of_testing_iso<pop.mat$time_isolation & pop.mat$testing==TRUE & complete.cases(pop.mat) ,])
#print(pop.mat)
pop.mat$time_isolation[pop.mat$state==3 & pop.mat$exposure_time<=timestep & pop.mat$time_of_testing_iso<pop.mat$time_isolation & pop.mat$testing==TRUE & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end & complete.cases(pop.mat)] <- pop.mat$time_of_testing_iso[pop.mat$state==3 & pop.mat$exposure_time<=timestep & pop.mat$time_of_testing_iso<pop.mat$time_isolation & pop.mat$testing==TRUE & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end & complete.cases(pop.mat)]
#and then, if any of these are Inf, change the reason to NA
pop.mat$reason_isolated[pop.mat$time_isolation==Inf] <- NA
#now, based on isolation time and the generation times in the vector, determine the actual number of cases caused,
#then export, regroup with other half of population and assign those new infections in the next time step
new.cases = dlply(pop.mat[pop.mat$state== 3& pop.mat$potential_cases_caused>0 & pop.mat$exposure_time<=timestep & complete.cases(pop.mat) ,], .(employ_ids))
#if potential cases were 0, then actual cases are too:
#pop.mat$actual_cases_caused[pop.mat$state==3 & pop.mat$potential_cases_caused ==0 & pop.mat$exposure_time<=timestep | pop.mat$state==4 & pop.mat$potential_cases_caused ==0 & pop.mat$exposure_time<=timestep] <- 0
tmp.dat = pop.mat[pop.mat$state==3 & pop.mat$potential_cases_caused >0 & pop.mat$exposure_time<=timestep ,]
new.cases.2 <- dlply(tmp.dat, .(employ_ids))
new.actual.cases.3 <- c(unlist(lapply(new.cases.2, get.actual.cases, dat.gen=dat.gen.new, timestep=timestep)))
#now add the actual cases back in
pop.mat <- cbind.data.frame(pop.mat, pop.mat.old$actual_cases_caused)
names(pop.mat)[length(names(pop.mat))] <- "actual_cases_caused"
#reorder
pop.mat <- dplyr::select(pop.mat, names(pop.mat.old))
#and add in the new actual cases
pop.mat$actual_cases_caused[pop.mat$state==3 & pop.mat$potential_cases_caused ==0 & pop.mat$exposure_time<=timestep | pop.mat$state==1 & pop.mat$potential_cases_caused ==0 & pop.mat$exposure_time<=timestep ] <- 0
pop.mat$actual_cases_caused[pop.mat$state==1 & pop.mat$potential_cases_caused >0 & pop.mat$exposure_time<=timestep] <- new.actual.cases
pop.mat$actual_cases_caused[pop.mat$state==3 & pop.mat$potential_cases_caused >0 & pop.mat$exposure_time<=timestep ] <- new.actual.cases.3
#and, finally, change state so these cases can get allocated in the next round.
pop.mat$state[pop.mat$state==3 & pop.mat$exposure_time<=timestep ] <- 1
#and remove any avoided cases if there were some
if (exists('infector.sum')){
if(nrow(infector.sum)>0){
for(i in 1:length(infector.sum$infector)){
pop.mat$actual_cases_caused[pop.mat$employ_ids==infector.sum$infector[i]] <- pop.mat$actual_cases_caused[pop.mat$employ_ids==infector.sum$infector[i]] - infector.sum$cases_removed[i]
}
}
}
#and return
return(pop.mat)
}
get.mean.sd <- function(vector, name){
#first, trim to same length
min.length <- min(unlist(lapply(vector, length)))
for (i in 1:length(vector)){
vector[[i]] <- vector[[i]][1:min.length]
}
vec <- unlist(vector, use.names = FALSE)
DIM <- c(length(vector[[1]]),1)
n <- length(vector)
list.mean <- tapply(vec, rep(1:prod(DIM),times = n), mean)
attr(list.mean, "dim") <- DIM
list.mean <- as.data.frame(list.mean)
list.sd <- tapply(vec, rep(1:prod(DIM),times = n), sd)
attr(list.sd, "dim") <- DIM
list.sd <- as.data.frame(list.sd)
list.uci = list.mean + 1.96*list.sd
list.lci = list.mean - 1.96*list.sd
list.lci[list.lci<0] <- 0
list.uci[list.uci<0] <- 0
dat= cbind.data.frame(list.mean, list.lci, list.uci)
names(dat) = paste(c("mean", "lci", "uci"), name, sep="_")
return(dat)
}
get.mean.matrix <- function(mat){
#first, trim to same length
min.length <- min(unlist(lapply(mat, nrow)))
n.cat = ncol(mat[[1]])/3
for (i in 1:length(mat)){
mat[[i]] <- mat[[i]][1:min.length,]
}
list.mean <- Reduce("+",mat) / length(mat)
mat.2 <- do.call("cbind", mat)
#mat.2 <- data.table::rbindlist(mat)
list.sd <- apply(mat.2, 1, sd)
list.uci = list.mean + 1.96*list.sd
list.lci = list.mean - 1.96*list.sd
list.lci[list.lci<0] <- 0
list.uci[list.uci<0] <- 0
dat= cbind.data.frame(list.mean, list.lci, list.uci)
names(dat) = c(paste0("mean_iso_cat_",seq(1,n.cat,1)), paste0("lci_iso_cat_",seq(1,n.cat,1)), paste0("uci_iso_cat_",seq(1,n.cat,1)),
paste0("mean_exp_cat_",seq(1,n.cat,1)), paste0("lci_exp_cat_",seq(1,n.cat,1)), paste0("uci_exp_cat_",seq(1,n.cat,1)),
paste0("mean_deaths_cat_",seq(1,n.cat,1)), paste0("lci_deaths_cat_",seq(1,n.cat,1)), paste0("uci_deaths_cat_",seq(1,n.cat,1)))
return(dat)
}
convert.cat = function(dat){
n.cat = ncol(dat)/9
max.times = nrow(dat)
iso.dat = dat[,1:(n.cat*3)]
exp.dat = dat[,(n.cat*3+1):(n.cat*3*2)]
death.dat = dat[,(n.cat*3*2+1):ncol(dat)]
#then, sep by cat
list.iso <- list.exp <- list.deaths <- list()
for(i in 1:n.cat){
list.iso[[i]] <- cbind.data.frame(iso.dat[,i], iso.dat[,i+n.cat],iso.dat[,i+(n.cat*2)])
list.exp[[i]] <- cbind.data.frame(exp.dat[,i],exp.dat[,i+n.cat],exp.dat[,i+(n.cat*2)])
list.deaths[[i]] <- cbind.data.frame(death.dat[,i], death.dat[,i+n.cat],death.dat[,i+(n.cat*2)])
}
#iso.db <- do.call("rbind", list.iso)
iso.db <- data.table::rbindlist(list.iso)
iso.db$type = rep(1:n.cat, each = max.times)
iso.db$type <- paste0("iso-pop-", iso.db$type)
names(iso.db) <- c("mean", "lci", "uci", "type")
#exp.db <- do.call("rbind", list.exp)
exp.db <- data.table::rbindlist(list.exp)
exp.db$type = rep(1:n.cat, each = max.times)
exp.db$type <- paste0("exp-pop-", exp.db$type)
names(exp.db) <- c("mean", "lci", "uci", "type")
#death.db <- do.call("rbind", list.deaths)
death.db <- data.table::rbindlist(list.deaths)
death.db$type = rep(1:n.cat, each = max.times)
death.db$type <- paste0("death-pop-", death.db$type)
names(death.db) <- c("mean", "lci", "uci", "type")
return(list(iso.db, exp.db, death.db))
}
R.fit.sum <- function(mat.df){
#apply across all columns
mean.all <- apply(mat.df, 2,mean)
sd.all <- apply(mat.df, 2,sd)
lci.all <- mean.all-1.96*sd.all
lci.all[ lci.all < 0] <- 0
uci.all <- mean.all+1.96*sd.all
#and nbinom fit
all.fit <- apply(mat.df, 2, fitdist, distr="nbinom")
#and return
out.dat <- cbind.data.frame(mean.all, lci.all, uci.all)
out.dat$class <- names(mat.df)
#names(out.dat) <- names(mat.df)
#out.dat$estimate <- c("mean", "lci", "uci")
#out.dat[out.dat<0] <- 0
#and add fit
size.out <- list()
mu.out <- list()
for(i in 1:length(all.fit)){
size.out[[i]] <- all.fit[[i]]$estimate[1]
mu.out[[i]] <- all.fit[[i]]$estimate[2]
}
size.out <- c(unlist(size.out))
mu.out <- c(unlist(mu.out))
out.dat$nb_mu <- mu.out
out.dat$nb_size <- size.out
# names(size.out) <- names(mu.out) <- names(out.dat)
# out.dat <- rbind(out.dat, size.out, mu.out)
#
# out.dat$total_potential_cases <- as.numeric(out.dat$total_potential_cases)
# out.dat$UCB_potential_cases <- as.numeric(out.dat$UCB_potential_cases)
# out.dat$UCB_post_group_potential_cases <- as.numeric(out.dat$UCB_post_group_potential_cases)
# out.dat$UCB_post_titer_potential_cases <- as.numeric(out.dat$UCB_post_titer_potential_cases)
# out.dat$UCB_post_isolations_actual_cases <- as.numeric(out.dat$UCB_post_isolations_actual_cases)
#
return(out.dat)
}
R.fit.sum.lognorm <- function(mat.df){
#apply across all columns
mean.all <- apply(mat.df, 2,mean)
sd.all <- apply(mat.df, 2,sd)
lci.all <- mean.all-1.96*sd.all
lci.all[ lci.all < 0] <- 0
uci.all <- mean.all+1.96*sd.all
#and return
out.dat <- cbind.data.frame(mean.all, lci.all, uci.all)
out.dat$class <- names(mat.df)
#names(out.dat) <- names(mat.df)
#out.dat$estimate <- c("mean", "lci", "uci")
#out.dat[out.dat<0] <- 0
#
return(out.dat)
}
simulate.epidemic <- function(input.pop, n.init.exposed.vector, employ.id.vector, times, virus.par, input.par, burnin,
test.freq, length_timestep, bay.area.prev, initial.R, within.host.theta, titer.dat, LOD, test_rotation_name){
if (virus.par$distribution[virus.par$parameter=="R0"]=="log-normal"){
#sample R0 normal
R0fn = R0_fn(meanR0=virus.par$par1[virus.par$parameter=="R0"],
sdR0=virus.par$par2[virus.par$parameter=="R0"])
}else if(virus.par$distribution[virus.par$parameter=="R0"]=="negbinom"){
#sample R0 normal
R0fn = R0_fn_nb(muR0=virus.par$par1[virus.par$parameter=="R0"],
sizeR0=virus.par$par2[virus.par$parameter=="R0"])
}
#and the number of transmission events, from a negbinom
#remember that fewer events = higher likelihood of a big superspreading event.
#but the vast majority of people have both few events and few cases
eventFn = poisson_fn(lambda =as.numeric(input.par$par1[input.par$parameter=="transmission-events"]))
#and normal distribution of the detection limit
#then, form your new populations
#now split the population based on risk
tot.pop = length(input.pop)
pop.num = 1:tot.pop
titer.dat$cat <- NA
for (i in 1:(length(pop.num)-1)){
titer.dat$cat[titer.dat$employ_ids < employ.id.vector [i+1] & titer.dat$employ_ids >= employ.id.vector [i]] <- pop.num[i]
}
titer.dat$cat[is.na(titer.dat$cat)] <- pop.num[length(pop.num)]
#and split
titer.dat.split <- dlply(titer.dat, .(cat))
#make the proper number of pop.mat depending on the total number of subpopulations
#populate each using the appropriate parameters
out.list = mapply(FUN=initiate.pop, start.ID.employ = as.list(employ.id.vector), pop.UCB=as.list(input.pop), n.init.exposed= as.list(n.init.exposed.vector), pop.ID = as.list(pop.num), titer.dat=titer.dat.split,
MoreArgs= list(input.par=input.par, virus.par=virus.par, R0fn=R0fn, eventFn=eventFn, within.host.theta=within.host.theta, LOD=LOD))
pop.list = out.list[1,]
gen_list_long <- out.list[2,]
#original.r0 <- out.list[3,][[1]]
#gen_list_long_wkend <- out.list[3,]
#pop.mat <- do.call("rbind", pop.list)
pop.mat <- data.table::rbindlist(pop.list)
#gen.dat.all <- do.call("rbind", gen_list_long)
gen.dat.all <- data.table::rbindlist(gen_list_long)
#now, double-check that the generation time dataframe is the same length as the number of unique employ ids
if(sum(setdiff(pop.mat$employ_ids, gen.dat.all$employ_ids))>0){
missing.ids <- setdiff(pop.mat$employ_ids, gen.dat.all$employ_ids)
missing.cases <- list()
for(i in 1:length(missing.ids)){
missing.cases[[i]] <- pop.mat$potential_cases_caused[pop.mat$employ_ids==missing.ids[i]]
}
missing.cases <- c(unlist(missing.cases))
if(sum(missing.cases)>0){
missing.gen <- genTime(missing.cases)
add.dat <- cbind.data.frame(rep(missing.ids, missing.cases), missing.gen)
}else{
missing.gen <- rep(NA, length(missing.cases))
add.dat <- cbind.data.frame(missing.ids, missing.gen)
}
names(add.dat) <- names(gen.dat.all)
gen.dat.all <- rbind(gen.dat.all, add.dat)
gen.dat.all <- arrange(gen.dat.all, employ_ids)
}
gen_list = dlply(gen.dat.all, .(employ_ids))
#gen_list_wk = dlply(gen.dat.all.wk, .(employ_ids))
foi.bay.area = initial.R*bay.area.prev*length_timestep #rate per day at which susceptibles become infected
#foi.wkend = bay.area.R*bay.area.prev*length_timestep*weekend.amp
prob.outside.exposure =1-(exp(-1*foi.bay.area)) #for each person in berkeley, this is the probability of getting exposed each day
prob.outside.exposure[prob.outside.exposure<0] <- 0
#prob.outside.exposure.wk =1-(exp(-1*foi.wkend))
#could also be a vector
times_vect = seq(length_timestep,times, by = length_timestep)
for(i in 1: length(times_vect)){
#print(i)
timestep = times_vect[i]
#could make other functions here if people mostly infect their own subgroups
#here, we distribute the infections amongst new people and retire the old
pop.mat = assign.infections(pop.mat = pop.mat, gen_list=gen_list, timestep = timestep, input.par = input.par)
#now split it by population to introduce outside exposures
pop.split = dlply(pop.mat, .(employ_cat))
pop.mat.list = lapply(pop.split, FUN=epidemic.step, timestep= timestep, prob.out = prob.outside.exposure, gen_list=gen_list, input.par=input.par, length_timestep = length_timestep)
#then, rejoin
#pop.mat = do.call("rbind", pop.mat.list)#print(i)
pop.mat = data.table::rbindlist(pop.mat.list)
#then, just keep tabs that there are enough susceptibles to fill the new cases in the next step
remaining.susceptibles = length(pop.mat$state[pop.mat$state==0])
future.cases = sum(pop.mat$actual_cases_caused[pop.mat$state==1])
if(future.cases>remaining.susceptibles){ #if there are not enough susceptibles left for all of the assigned cases before you reach the end of the time series, then you go into the next step
#print(i)
pop.mat = assign.last.infections(pop.mat = pop.mat, gen_list = gen_list, remaining.susceptibles = remaining.susceptibles, timestep = timestep)
#print(i)
}
}
#collect all the "R" reduction info:
R.mat <- dplyr::select(pop.mat, total_potential_cases_caused, original_potential_cases_caused_UCB, post_titer_potential_cases_caused_UCB, potential_cases_caused, actual_cases_caused)
names(R.mat) <- c( "total_potential_cases", "UCB_potential_cases", "UCB_post_titer_potential_cases", "UCB_post_group_potential_cases", "UCB_post_isolations_actual_cases")
R.mat <- arrange(R.mat, desc(total_potential_cases))
R.mat$UCB_post_isolations_actual_cases[is.na(R.mat$UCB_post_isolations_actual_cases)] <- 0
#R.mat <- as.matrix(R.mat)
# #new R0
# new.R0 = subset(pop.mat, !is.na(infector))
# new.R0 = ddply(new.R0, .(infector), summarize, cases_caused=length(employ_ids))
# tot.introductions = new.R0$cases_caused[new.R0$infector=="0"]
# new.R0 = subset(new.R0, infector!="0")
#
# maxID = max(pop.mat$employ_ids)
# missing_ids <- (1:maxID)[!(1:maxID %in% new.R0$infector)]
#
# # add in missing days if any are missing
# if (length(missing_ids > 0)) {
# R0comp <- data.table::rbindlist(list(new.R0,
# data.table(infector = missing_ids,
# cases_caused = 0)))
# }
#
# R0comp <- arrange(R0comp, infector)
#
# #now add back in those cases not at UCB...
# #original.r0$actual_cases_caused_UCB <- R0comp$cases_caused
#get prop.asymptomatic at this cutoff
prop.asym <- length(pop.mat$time_of_symptom_onset[pop.mat$time_of_symptom_onset==Inf])/length(pop.mat$time_of_symptom_iso)
#from here, compute Reffective
R.dat = dplyr::select(pop.mat, employ_ids, infector, time_isolation, case_source)
R.dat = arrange(R.dat, time_isolation) #icidence will just be cases by time isolated
#if not isolated, you don't count for incidence...
R.dat = R.dat[!is.na(R.dat$time_isolation),]
R.dat$time_isolation = ceiling(R.dat$time_isolation)
#could add source. don't for now
R.sum = ddply(R.dat, .(time_isolation), summarise, length(employ_ids))
#R.sum = ddply(R.dat, .(time_isolated, source), summarise, length(employ_ids))
names(R.sum) = c( "day", "incidence")
#plot as incidence
#plot(as.incidence(R.sum$incidence, dates = R.sum$day))
#this will go in as your incidence data
#now add in pairs to estimate the serial interval
#T <- nrow(R.sum)
#t_start <- seq(2, T-13) # starting at 2 as conditional on the past observations
#t_end <- t_start + 13
#
# R.est = estimate_R(R.sum$incidence,
# method="parametric_si",
# config = make_config(list(#t_start = t_start,
# #t_end = t_end,
# mean_si = serial_mean, std_si = serial_sd)))
#
# #plot(R.est, "R")
# #get midpoint and R values and extract
# R.out = cbind.data.frame(get.midpoint(par.low = R.est$R$t_start, par.hi = R.est$R$t_end), R.est$R$`Mean(R)`)
# names(R.out) = c("day", "Reffective")
# #and try it based on pairs
pop.mat = data.table(pop.mat)
#now, get broad incidence data to report
UCB.mat = subset(pop.mat, case_source=="UCB")
alameda.mat = subset(pop.mat, case_source=="alameda")
symp.mat = subset(pop.mat, reason_isolated=="symptom_iso")
trace.mat = subset(pop.mat, reason_isolated=="tracing_iso")
test.mat = subset(pop.mat, reason_isolated=="testing_iso")
daily_exposures <- pop.mat[, day := ceiling(exposure_time) #time_isolated
][, .(daily_exposures = .N), by = day
]
# #daily isolations
daily_isolations <- pop.mat[, day := ceiling(time_isolation) #
][, .(daily_isolations = .N), by = day
]
daily_cal <- UCB.mat[, day := ceiling(time_isolation) #time_isolated
][, .(daily_isolations = .N), by = day
]
daily_alameda <- alameda.mat[, day := ceiling(time_isolation) #time_isolated
][, .(daily_isolations = .N), by = day
]
daily_symp <- symp.mat[, day := ceiling(time_isolation) #time_isolated
][, .(daily_isolations = .N), by = day
]
daily_trace <- trace.mat[, day := ceiling(time_isolation) #time_isolated
][, .(daily_isolations = .N), by = day
]
daily_test <- test.mat[, day := ceiling(time_isolation) #time_isolated
][, .(daily_isolations = .N), by = day
]
# maximum outbreak day
max_day <- ceiling(times)
# days with 0 cases in 0:max_week
#missing_days <- (0:max_day)[!(0:max_day %in% daily_isolations$day)]
missing_days <- (0:max_day)[!(0:max_day %in% daily_exposures$day)]
# add in missing days if any are missing
if (length(missing_days > 0)) {
daily_cases <- data.table::rbindlist(list(daily_exposures,
data.table(day = missing_days,
daily_exposures = 0)))
}
#reorder as appropriate
#daily_cases <- arrange(daily_cases, day)
# order and sum up
daily_cases <- daily_exposures[order(day)
][, cumulative := cumsum(daily_exposures)]
# cut at max_week
daily_cases <- daily_cases[day<=max_day]
# and isoaltions
daily_cases$daily_isolations <- 0
for (i in 1:length(daily_isolations$day)){
daily_cases$daily_isolations[daily_cases$day==daily_isolations$day[i]] <- daily_isolations$daily_isolations[i]
}
#and cumulative isolations
daily_cases$cumulative_iso = cumsum(daily_cases$daily_isolations)
# #and cases in UCB vs out
daily_cases$daily_UCB_isolations <- 0
for (i in 1:length(daily_cal$day)){
daily_cases$daily_UCB_isolations[daily_cases$day==daily_cal$day[i]] <- daily_cal$daily_isolations[i]
}
#
# #and cases in UCB vs out
daily_cases$daily_alameda_isolations <- 0
for (i in 1:length(daily_alameda$day)){
daily_cases$daily_alameda_isolations[daily_cases$day==daily_alameda$day[i]] <- daily_alameda$daily_isolations[i]
}
daily_cases$daily_symptomatic_isolations <- 0
for (i in 1:length(daily_symp$day)){
daily_cases$daily_symptomatic_isolations[daily_cases$day==daily_symp$day[i]] <- daily_symp$daily_isolations[i]
}
daily_cases$daily_tracing_isolations <- 0
for (i in 1:length(daily_trace$day)){
daily_cases$daily_tracing_isolations[daily_cases$day==daily_trace$day[i]] <- daily_trace$daily_isolations[i]
}
daily_cases$daily_testing_isolations <- 0
for (i in 1:length(daily_test$day)){
daily_cases$daily_testing_isolations[daily_cases$day==daily_test$day[i]] <- daily_test$daily_isolations[i]
}
#
# #now attach R-effective
# daily_cases$Reffective = NA
#
# for(i in 1:nrow(R.out)){
# daily_cases$Reffective[daily_cases$day==R.out$day[i]] <- R.out$Reffective[i]
# }
#
#add category
pop.mat.cat= dlply(pop.mat, .(employ_cat))
new_col <- lapply(pop.mat.cat, FUN=add.risk.cat, pop_dat=daily_cases)
#and also the daily exposures
new_col2 <- lapply(pop.mat.cat, FUN=add.risk.cat.exp, pop_dat=daily_cases, input_par=input.par)
new_col_exp <- sapply(new_col2, "[", 1)
new_col_deaths <- sapply(new_col2, "[", 2)
#tmp = data.table::rbindlist(new_col)
tmp = as.data.frame(do.call("cbind", new_col))
names(tmp) <- paste0("isolations-employ-cat-", unique(input.par$population))
tmp2 = as.data.frame(do.call("cbind", new_col_exp))
#tmp2 = data.table::rbindlist(new_col_exp)
names(tmp2) <- paste0("exposures-employ-cat-", unique(input.par$population))
tmp3 = as.data.frame(do.call("cbind", new_col_deaths))
#tmp3 = data.table::rbindlist(new_col_deaths)
names(tmp3) <- paste0("deaths-employ-cat-", unique(input.par$population))
#and attach to daily cases
daily_cases <- cbind.data.frame(daily_cases, tmp, tmp2, tmp3)
# #finally, calculate some summary statistics from the epidemic
# tot.exposures = sum(daily_cases$daily_exposures, na.rm=T)
# tot.isolations = sum(daily_cases$daily_isolations, na.rm=T)
# #time.to.control = max(daily_cases$day[!is.na(daily_cases$Reffective)])
# max.exposures.per.day = max(daily_cases$daily_exposures, na.rm=T)
# mean.exposures.per.day = mean(daily_cases$daily_exposures, na.rm=T)
# max.iso.per.day = max(daily_cases$daily_isolations, na.rm=T)
# mean.iso.per.day = mean(daily_cases$daily_isolations, na.rm=T)
# time.of.peak.iso = min(daily_cases$day[daily_cases$daily_isolations==max(daily_cases$daily_isolations, na.rm=T)])
# time.of.peak.exposure = min(daily_cases$day[daily_cases$daily_exposures==max(daily_cases$daily_exposures, na.rm=T)])
#
#and report out the max day before your cases are too few to calculate Reffective
#out.stat <- c(tot.exposures, tot.isolations, max.exposures.per.day, mean.exposures.per.day, max.iso.per.day, mean.iso.per.day, time.of.peak.exposure, time.of.peak.iso)
#names(out.stat) <- c("total_exposures", "total_isolations", "max_exp_per_day", "mean_exp_per_day", "max_iso_per_day", "mean_iso_per_day", "time_peak_exposure", "time_peak_isolation")
pop.mat$LOD <- LOD
#add TAT if this is a single population model, but if it is mixed in a multipop, note that
if(length(unique(input.par$par1[input.par$parameter=="TAT-lag"]))==1){
pop.mat$TAT <- unique(input.par$par1[input.par$parameter=="TAT-lag"])
}else{
pop.mat$TAT <- "multiple"
}
pop.mat$test_rotation <- test_rotation_name
return(list(daily_cases,pop.mat, prop.asym, R.mat))
}
replicate.epidemic = function(n.reps, input.pop, n.init.exposed.vector, employ.id.vector, times, virus.par, input.par, burnin, test.freq, length_timestep,
bay.area.prev, initial.R, within.host.theta, test_rotation_name, LOD, titer.dat){
out = replicate(n.reps, simulate.epidemic(virus.par = virus.par,
input.par = input.par,
input.pop=input.pop,
n.init.exposed.vector=n.init.exposed.vector,
times=times,
bay.area.prev = bay.area.prev,
initial.R = initial.R,
within.host.theta = within.host.theta,
burnin = burnin,
length_timestep=length_timestep,
employ.id.vector =employ.id.vector,
LOD = LOD,
titer.dat = titer.dat,
test_rotation_name = test_rotation_name), simplify = "array")
#make list
out.time<- out.daily <- out.cal <- out.iso <- out.cumulative <- out.ala <- out.symp <- out.trace <- out.test <- out.iso <-out.cum.iso <- pop.mat.chain <- out.prop.asym <- R.mat.out <- list()
#and make list of all the categories of sub-pop
out.cat <- list()
for (i in 1:ncol(out)){
#tmp <- data.table::cbindlist(out[,i][[1]])
tmp <- do.call("cbind", out[,i][[1]])
out.time[[i]] <- c(unlist(tmp[,1]))
out.daily[[i]] <- c(unlist(tmp[,2]))
out.cumulative[[i]] <- c(unlist(tmp[,3]))
out.iso[[i]] <- c(unlist(tmp[,4]))
out.cum.iso[[i]] <- c(unlist(tmp[,5]))
out.cal[[i]] <- c(unlist(tmp[,6]))
out.ala[[i]] <- c(unlist(tmp[,7]))
out.symp[[i]] <- c(unlist(tmp[,8]))
out.trace[[i]] <- c(unlist(tmp[,9]))
out.test[[i]] <- c(unlist(tmp[,10]))
#out.R[[i]] <- c(unlist(tmp[,11]))
out.cat[[i]] <- cbind(unlist(tmp[,11:(10+(length(unique(input.par$population)))*3)]))
#and save a chain of pop.mat
tmp2 <- out[,i][[2]]
pop.mat.chain[[i]] <- tmp2
#and the prop.asym
tmp3 <- out[,i][[3]]
out.prop.asym[[i]] <- tmp3
tmp4 <- out[,i][[4]]
rownames(tmp4) <- c()
R.mat.out[[i]] <- tmp4
#unique(input.par$population)
}
#now shorten them all to the same length and get mean + sd
#print(out.time)
mean.time = get.mean.sd(vector= out.time, name = "day")[,1]
#print(out.daily)
mean.daily = get.mean.sd(vector=out.daily, name = "exposures")
#print(out.cumulative)
mean.cumulative= get.mean.sd(vector=out.cumulative, name = "cumulative")
#print(out.cal)
mean.cal = get.mean.sd(vector=out.cal, name="UCB")
#print(out.ala)
mean.ala = get.mean.sd(vector=out.ala, name = "AlamedaCo")
#print(out.low)
mean.symp = get.mean.sd(vector=out.symp, name="symptomatic_iso")
mean.trace = get.mean.sd(vector=out.trace, name="tracing_iso")
mean.test = get.mean.sd(vector=out.test, name="testing_iso")
#print(out.iso)
mean.iso = get.mean.sd(vector=out.iso, name = "isolations")
#print(out.cum.iso)
mean.cum.iso = get.mean.sd(vector=out.cum.iso, name = "cumulative_isolations")
#print(out.sum)
#mean.sum = get.mean.sd.summary(out.sum)
#and the employ-cat
mean.cat = get.mean.matrix(mat=out.cat)
#print(out.hi)
mean.dat = cbind.data.frame(mean.time, mean.daily, mean.cumulative, mean.cal, mean.ala, mean.symp, mean.trace,mean.test, mean.iso, mean.cum.iso, mean.cat)#, mean.R)
names(mean.dat)[1] = "day"
#all of the descriptors can now change within the pop
mean.dat$LOD <- LOD
if(length(unique(input.par$par1[input.par$parameter=="TAT-lag"]))==1){
mean.dat$TAT <- unique(input.par$par1[input.par$parameter=="TAT-lag"])
}else{
mean.dat$TAT <- "multiple"
}
mean.dat$test_rotation <- test_rotation_name
#mean.dat$prop_asym = prop.asym
mean.dat$virus_par = unique(virus.par$version)
mean.dat$distance_limit = unique(input.par$par1[input.par$parameter=="group-size-limit"])
avg.prop.asym <- mean(c(unlist(out.prop.asym)))
mean.dat$prop_asym= avg.prop.asym
#and the long version
mean.daily$type = "all_exposures"
mean.cumulative$type = "cumulative"
mean.cal$type = "UCB"
mean.ala$type = "AlamedaCo"
mean.symp$type = "symptomatic_iso"
mean.trace$type = "tracing_iso"
mean.test$type = "testing_iso"
#mean.R$type = "Reffective"
mean.iso$type= "isolations"
#don't bother with employ-cat 00 can add later if needed
mean.cat.long.list = convert.cat(mean.cat)
mean.cat.long = data.table::rbindlist(mean.cat.long.list)
#mean.cat.long = do.call("rbind", mean.cat.long.list)
names(mean.daily) <- names(mean.cumulative) <- names(mean.cal) <- names(mean.ala) <- names(mean.symp) <- names(mean.trace) <- names(mean.test) <- names(mean.iso) <- c("mean", "lci", "uci", "type") #<- names(mean.R)
mean.long <- rbind(mean.daily, mean.cumulative, mean.cal, mean.ala, mean.symp, mean.trace, mean.test, mean.iso, mean.cat.long)#, mean.R)
n.cat = length(input.pop)
mean.long$day = c(rep(mean.time, (8+(3*n.cat))))#, mean.time[-1])
mean.long$LOD <- LOD
if(length(unique(input.par$par1[input.par$parameter=="TAT-lag"]))==1){
mean.long$TAT <- unique(input.par$par1[input.par$parameter=="TAT-lag"])
}else{
mean.long$TAT <- "multiple"
}
mean.long$test_rotation <- test_rotation_name
#mean.long$prop_asym = prop.asym
mean.long$virus_par = unique(virus.par$version)
mean.long$distance_limit = unique(input.par$par1[input.par$parameter=="group-size-limit"])
mean.long$prop_asym = avg.prop.asym
# mean.sum$sim_cat = sim_cat
# #mean.sum$prop_asym = prop.asym
# mean.sum$virus_par = unique(virus.par$version)
#
# mean.sum$superspread = superspread
# mean.sum$distance_limit = unique(input.par$par1[input.par$parameter=="group-size-limit"])
#
#
# #and summarize R
# mean.R = summarise.R(out.list.R=out.R, day.vec = mean.dat$day, n.reps=n.reps)
# mean.R$LOD <- LOD
# mean.R$TAT <- unique(input.par$par1[input.par$parameter=="TAT-lag"])
# mean.R$test_rotation <- test_rotation_name
# #mean.R$sim_cat = sim_cat
# #mean.R$prop_asym = prop.asym
# mean.R$virus_par = unique(virus.par$version)
#
# mean.R$superspread = superspread
# mean.R$distance_limit = unique(input.par$par1[input.par$parameter=="group-size-limit"])
#
# mean.R$prop_asym <- avg.prop.asym
#
#
# mean.R.mat = manage.R.matrix(mat.list=R.mat.out)
#and do the best you can with the R-output
#put it all together
#R.mat.use <- do.call("rbind", R.mat.out)
R.mat.use <- data.table::rbindlist(R.mat.out)
R.mat.use <- arrange(R.mat.use, total_potential_cases)
if(virus.par$distribution[virus.par$parameter=="R0"]=="negbinom"){
mean.R.mat = R.fit.sum(R.mat.use)
}else{
mean.R.mat = R.fit.sum.lognorm(R.mat.use)
}
rownames(mean.R.mat) <- c()
mean.R.mat$LOD <- LOD
if(length(unique(input.par$par1[input.par$parameter=="TAT-lag"]))==1){
mean.R.mat$TAT <- unique(input.par$par1[input.par$parameter=="TAT-lag"])
}else{
mean.R.mat$TAT <- "multiple"
}
mean.R.mat$test_rotation <- test_rotation_name
mean.R.mat$virus_par = unique(virus.par$version)
mean.R.mat$distance_limit = unique(input.par$par1[input.par$parameter=="group-size-limit"])
mean.R.mat$prop_asym <- avg.prop.asym
#return these summaries and the list of pop.mats
return(list(mean.dat, mean.long, pop.mat.chain, mean.R.mat))
}
pop.par.base$par1[pop.par.base$parameter=="group-size-limit"] <- 16
pop.par.base$par1[pop.par.base$parameter=="percent-obs-dist-lim"] <- .9
out = replicate.epidemic(n.reps = 100,
virus.par = virus.par,
input.par = pop.par.base,
input.pop=c(20000),#2000
n.init.exposed.vector=c(100),#10
times=365*2,
bay.area.prev = .1/100,
initial.R = 2.5,
within.host.theta = .72,
burnin = 0,
length_timestep=1,
employ.id.vector = c(1),
LOD=(10^1),
titer.dat = titer.dat,
test_rotation_name = "none")
save(out, file = "group-lim-16.Rdata")
|
/all-model-runs/Fig2-Group-Limits/group-limit-16.R
|
no_license
|
carabrook/Berkeley-COVID-testing
|
R
| false | false | 84,915 |
r
|
rm(list=ls())
.libPaths("/global/home/users/cbrook/R/x86_64-pc-linux-gnu-library/3.6")
#setwd("/Users/caraebrook/Documents/R/R_repositories/Berkeley-Reopening/Dec-2020/all-runs/Re-Run-12-24/FigS1/")
#no group, no test, no trace
library(data.table)
library(plyr)
library(dplyr)
library(EpiEstim)
library(deSolve)
library(matrixStats)
library(fitdistrplus)
#load parameters including pre-run titer trajectories for each individual
load("titer.dat.20K.Rdata")
#load("titer.dat.2K.Rdata")
load("virus.par.12.15.Rdata")
load("pop.par.base.Rdata")
get.real.cases <- function(pop.dat, event.dat, titer.dat1, within.host.theta, group.limit){
#if no cases caused, then ignore
if((pop.dat$original_potential_cases_caused_UCB>0) & (pop.dat$num_infection_events>0)){
#then allocate all the cases to the events
#distribute cases at random amongst the events
event.names <- 1:as.numeric(pop.dat$num_infection_events)
actual.events <- sample(x=event.names, size=as.numeric(pop.dat$original_potential_cases_caused_UCB), replace = T)
event.data <- cbind.data.frame(actual.events, event.dat[actual.events])
names(event.data) <- c("event", "gentime")
#and add the titer at the time of the event
gen.tmp = as.list(event.data$gentime)
event.data$titer <- c(unlist(lapply(gen.tmp, grab.titer, dat.vir =titer.dat1)))
#now that you have titer, here calculate the probability of transmission, given a certain viral load,
#based off of the probabiliy model from the URT in Ke et al. 2020
# in Ke et al. 2020, theta is fixed at 0.05 (could be modulated and/or fit to data)
#draw Km from a normal disribution centered at the midpoint between the two values explored in Ke et al. 2020 (10^3 and 10^4)
event.data$Km <- rnorm(nrow(event.data),mean=5500, sd=1000)
event.data$prob_exposure = within.host.theta*(event.data$titer/(event.data$titer + event.data$Km))
event.data$prob_exposure[event.data$prob_exposure<0] <- 0
#probability is small: ~5% for a typical contact if theta = 0.05 as in Ke.
#for theta = .7 here, up to 50% depending on theta
#does the infection happen? make it a probabilistic outcome of the titer
#then, you role a dice to see if this exposure causes an infection
tmp.prob <- as.list(event.data$prob_exposure)
event.data$InfectionYN = c(unlist(lapply(tmp.prob, test.titer)))
#then total the events that actually happen to incorporate into the original data
pop.dat$post_titer_potential_cases_caused_UCB <- sum(event.data$InfectionYN)
#and then, if there is a group size limit, impose it here
if((group.limit>0) & (pop.dat$obs_dist_limits==TRUE)){
#gives you the number of successful transmissions per event
event.sum <- ddply(event.data, .(event),summarize, N=sum(InfectionYN))
event.sum$over_lim = event.sum$N-group.limit
event.sum$over_lim[event.sum$over_lim<0] <- 0
#truncate # of events for the IDs listed above to the group limit.
event.data.list = dlply(subset(event.data, InfectionYN==1), .(event))
new.event.list <- lapply(event.data.list, impose.group, group.limit=group.limit)
#new.event.data <- do.call("rbind", new.event.list)
new.event.data <-data.table::rbindlist(new.event.list)
pop.dat$potential_cases_caused = sum(new.event.data$InfectionYN)
#in this case, return the generation time table after the group intervention
if(pop.dat$potential_cases_caused >0){
dat.gen.tab <- cbind.data.frame(rep(unique(pop.dat$employ_ids), nrow(new.event.data)), new.event.data$gentime)
names(dat.gen.tab) <- c("employ_ids", "generation_time")
}else{
dat.gen.tab <- cbind.data.frame(unique(pop.dat$employ_ids), NA)
names(dat.gen.tab) <- c("employ_ids", "generation_time")
}
}else{
pop.dat$potential_cases_caused <- pop.dat$post_titer_potential_cases_caused_UCB
if(pop.dat$potential_cases_caused >0){
event.data.out = subset(event.data, InfectionYN==1)
dat.gen.tab <- cbind.data.frame(rep(unique(pop.dat$employ_ids), nrow(event.data.out)), event.data.out$gentime)
names(dat.gen.tab) <- c("employ_ids", "generation_time")
}else{
dat.gen.tab <- cbind.data.frame(unique(pop.dat$employ_ids), NA)
names(dat.gen.tab) <- c("employ_ids", "generation_time")
}
}
}else{
#none take place
#return the original data with 0s
pop.dat$post_titer_potential_cases_caused_UCB <- 0
pop.dat$potential_cases_caused <- 0
#and return a table of generation times with nothing
dat.gen.tab <- cbind.data.frame(unique(pop.dat$employ_ids), NA)
names(dat.gen.tab) <- c("employ_ids", "generation_time")
}
return(list(pop.dat, dat.gen.tab))
}
test.titer <- function(prob1){
Y_N =sample(c(0,1), size=1, prob = c(1-prob1, prob1))
return(Y_N)
}
impose.group <- function(event.dat1, group.limit){
tot.transmissions = nrow(event.dat1)
if(tot.transmissions>group.limit){
choose.events <- sample(x=1:tot.transmissions, size=group.limit, replace = F)
event.dat2 = event.dat1[choose.events,]
return(event.dat2)
}else{
return(event.dat1)
}
}
get.event.time <- function(dat, genTime){
event.times = genTime(as.numeric(dat$num_infection_events))
return(event.times)
}
grab.titer <- function(dat1, dat.vir){
titer.out <- dat.vir$V[dat.vir$time>dat1][1]
return(titer.out)
}
normal_fn <- function(meanpar=NULL, sdpar=NULL){
out <- purrr::partial(rnorm,
mean = meanpar,
sd = sdpar)
return(out)
}
poisson_fn <- function(lambda=NULL){
out <- purrr::partial(rpois,
lambda = lambda)
return(out)
}
lognormal_fn <- function(meanlogpar=NULL, sdlogpar=NULL){
out <- purrr::partial(rlnorm,
meanlog = meanlogpar,
sdlog = sdlogpar)
return(out)
}
add.risk.cat <- function(dat, pop_dat){
dat = data.table(dat)
daily_new <- dat[, day := ceiling(time_isolation) #time_isolated
][, .(daily_isolations = .N), by = day
]
pop_dat$add <- 0
for (i in 1:length(daily_new$day)){
pop_dat$add[pop_dat$day==daily_new$day[i]] <- daily_new$daily_isolations[i]
}
out.vect <- as.data.frame(pop_dat$add)
return( out.vect)
}
add.risk.cat.exp <- function(dat, pop_dat, input_par){
dat = data.table(dat)
daily_new <- dat[, day := ceiling(exposure_time)
][, .(daily_exposures = .N), by = day
]
pop_dat$add <- 0
for (i in 1:length(daily_new$day)){
pop_dat$add[pop_dat$day==daily_new$day[i]] <- daily_new$daily_exposures[i]
}
out.vect <- as.data.frame(pop_dat$add)
#then add deaths based on each pop cat
pop.cat = unique(dat$employ_cat)
out.vect2 <- as.data.frame(as.numeric(input_par$par1[input_par$parameter=="CFR" & input_par$population==pop.cat])*out.vect)
# dat.out = cbind.data.frame(out.vect, out.vect2)
return(list(out.vect, out.vect2))
#return(dat.out)
}
cross.infect <- function(dat, all.sus, input.par){
pop.par = subset(input.par, population == unique(dat$infector_cat))
#first, elim any populations for which there are no longer remaining susceptibles
rem.cat = unique(all.sus$employ_cat)
all.cat = unique(pop.par$par2[pop.par$parameter=="meta-pop"])
missed.cat = setdiff(all.cat, rem.cat)
pop.par$sub = 0
for (i in 1: length(missed.cat)) {
pop.par$sub[pop.par$parameter=="meta-pop" & pop.par$par2==missed.cat[i]] <- 1
}
pop.par = subset(pop.par, sub==0)
#then allocate the population of the new cases based on the proportion within and without
tot.cases = nrow(dat)
#then need to reallocate probabilities comparatively without the remaining
possible.cat = unique(pop.par$par2[pop.par$parameter=="meta-pop"])
old.cat = as.numeric(unique(input.par$par2[input.par$parameter=="meta-pop"]))
old.prob = as.numeric(input.par$par1[input.par$parameter=="meta-pop"])[1:length(old.cat)]
if(length(possible.cat)<length(old.cat)){
if(length(possible.cat)==1){
dat$new_cat = possible.cat
}else{
#if you've run out of probabilities, just, rellocate proportionally
new.prob = rep((1/length(possible.cat)), length(possible.cat))
dat$new_cat = sample(x=possible.cat, size = tot.cases, replace = TRUE, prob = new.prob)
}
}else{
dat$new_cat = sample(x=old.cat, size = tot.cases, replace = TRUE, prob = old.prob)
}
return(dat)
}
assign.ID = function(sus.dat.sub, dat.new.sub){
#at the very end of the time series, you may run out of susceptibles in the right category, in which case, these just become lost infections
if(nrow(dat.new.sub)<=length(sus.dat.sub$employ_ids)){
dat.new.sub$new_infected = sample(sus.dat.sub$employ_ids, size=nrow(dat.new.sub), replace=FALSE)
}else{
new.count = length(sus.dat.sub$employ_ids)
new.missed = nrow(dat.new.sub) - new.count
row.tmp = seq(1, nrow(dat.new.sub),1)
row.take = sample(row.tmp, size = new.count, replace = FALSE)
dat.new.sub <- dat.new.sub[row.take,]
dat.new.sub$new_infected = sample(sus.dat.sub$employ_ids, size=nrow(dat.new.sub), replace=FALSE)
}
return(dat.new.sub)
}
assign.infections <- function(pop.mat, gen_list, timestep, input.par){
# assign new exposures (and times) based on 'actual cases caused' above
# and move those that have transmitted to isolated/recovered state
#(asymptomatics will be missed in iso time unless tested)
# timestep.prev = unique(pop.mat$timestep)
#first, pair each case with its generation times
new.mat <- dplyr::select(pop.mat, employ_ids, employ_cat, state, exposure_time, actual_cases_caused, time_isolation)
new.mat <- new.mat[!is.na(new.mat$actual_cases_caused) & new.mat$state==1,]
#only matters if it actually causes cases.
new.mat.zero = subset(new.mat, actual_cases_caused<1)
new.mat <- subset(new.mat, actual_cases_caused>0)
if(nrow(new.mat)>0){
new.mat.list <- dlply(new.mat, .(employ_ids))
#print("1")
new.mat.list <- lapply(new.mat.list, make.rows)
#the new new mat - no longer includes those which cased 0 actual cases
#should always have at least one row because of the if-statement above
#new.mat <- do.call("rbind", new.mat.list)
new.mat <- data.table::rbindlist(new.mat.list)
#now attach a generation time with each of these cases and a random sample from the susceptibles
new.mat$generation_time <- NA
index.ids = unique(new.mat$employ_ids)
for(i in 1:length(index.ids )){
tmp = nrow(new.mat[new.mat$employ_ids == index.ids[i],])
#print(index.ids[[i]])
new.mat$generation_time[new.mat$employ_ids == index.ids[i]] <- gen_list[[index.ids[i]]]$generation_time[1:tmp]
}
#now, attach a place to infect (susceptible)
#bias the sampling based on the proportion of infections within and without of your direct cohort
#first, pair the remaining susceptibles with their category
all.sus <- cbind.data.frame(pop.mat$employ_ids[pop.mat$state==0],pop.mat$employ_cat[pop.mat$state==0])
names(all.sus) = c("employ_ids", "employ_cat")
new.list = dlply(new.mat, .(employ_ids))
#cross infect by cat
#print("2")
new.list.out <- lapply(new.list, cross.infect, all.sus=all.sus, input.par=input.par)
new.mat = data.table::rbindlist(new.list.out)
#new.mat = do.call("rbind", new.list.out)
rownames(new.mat) <- c()
#then, assign names by category of new infections
id.cat = data.frame(sort(unique(new.mat$new_cat)))
all.sus = arrange(all.sus, employ_cat)
names(id.cat) <- "employ_cat"
tmp.sus = merge(x=all.sus, y=id.cat)
tmp.sus.split = dlply(tmp.sus, .(employ_cat))
new.mat.split <- dlply(new.mat, .(new_cat))
#print("3")
dat.new.split.out = mapply(FUN=assign.ID, sus.dat.sub= tmp.sus.split, dat.new.sub= new.mat.split, SIMPLIFY = FALSE)
new.mat = data.table::rbindlist(dat.new.split.out)
#new.mat = do.call("rbind", dat.new.split.out)
new.mat$new_exposure_time = new.mat$exposure_time + new.mat$generation_time
#and merge into pop.mat
#new.merge <- dplyr::select(new.mat, new_infected, employ_ids, infector_iso_time, new_exposure_time)
#names(new.merge) <- c("employ_ids", "infector", "infector_iso_time", "exposure_time")
#now put them into pop.mat
for(i in 1:nrow(new.mat)){
#identify infector and iso time
pop.mat$infector[pop.mat$employ_ids==new.mat$new_infected[i] ] <- new.mat$employ_ids[i]
pop.mat$infector_iso_time[pop.mat$employ_ids==new.mat$new_infected[i]] <- new.mat$infector_iso_time[i]
pop.mat$time_of_tracing_iso[pop.mat$employ_ids==new.mat$new_infected[i] & pop.mat$traced==TRUE] <- pop.mat$time_of_tracing_iso[pop.mat$employ_ids==new.mat$new_infected[i] & pop.mat$traced==TRUE] + new.mat$infector_iso_time[i]
pop.mat$time_of_tracing_iso[pop.mat$employ_ids==new.mat$new_infected[i] & pop.mat$traced==FALSE] <- Inf #if traced==FALSE, this is never tracked
#and exposure time
pop.mat$exposure_time[pop.mat$employ_ids==new.mat$new_infected[i]] <- new.mat$new_exposure_time[i]
#pop.mat$time_test_sensitive_start[pop.mat$employ_ids==new.mat$new_infected[i]] <-new.mat$new_exposure_time[i] + pop.mat$time_test_sensitive_start[pop.mat$employ_ids==new.mat$new_infected[i]]
#pop.mat$time_test_sensitive_end[pop.mat$employ_ids==new.mat$new_infected[i]] <-new.mat$new_exposure_time[i] + pop.mat$time_test_sensitive_end[pop.mat$employ_ids==new.mat$new_infected[i]]
pop.mat$case_source[pop.mat$employ_ids==new.mat$new_infected[i]] <- "UCB" #transmission within berkeley
#change state - only if exposure time is already achieved
pop.mat$state[pop.mat$employ_ids==new.mat$new_infected[i] & pop.mat$exposure_time<=timestep] <- 1
#otherwise, they still stay suceptible - but you mark them
pop.mat$state[pop.mat$employ_ids==new.mat$new_infected[i] & pop.mat$exposure_time>timestep] <- 3
} #else, just return pop mat
}
#now, make those that already transmitted recovered/isolated
pop.mat$state[(pop.mat$state==1 & !is.na(pop.mat$actual_cases_caused))] <- 5
#and, if any of the old "pre-exposed" have reached their exposure time, you can
#and return pop.mat
return(pop.mat)
}
assign.last.infections <- function(pop.mat, gen_list, remaining.susceptibles, timestep){
# assign new exposures (and times) based on 'actual cases caused' above
# and move those that have transmitted to isolated/recovered state
#(asymptomatics will be missed in iso time unless tested)
timestep.prev = unique(pop.mat$timestep)
if(remaining.susceptibles>0){
#first, pair each case with its generation times
new.mat <- dplyr::select(pop.mat, employ_ids, state, exposure_time, actual_cases_caused, time_isolation)#, time_of_tracing_iso)
new.mat <- new.mat[ new.mat$state==1 & !is.na(new.mat$actual_cases_caused) ,]
#get rid of those that cause no cases
new.mat <- new.mat[new.mat$actual_cases_caused>0,]
#sum(new.mat$actual_cases_caused)>remaining susceptibles
#so need to pick these at random to generate new infections instead
all.possible = c(rep(new.mat$employ_ids, times=new.mat$actual_cases_caused))
last.infector.ids = sample(all.possible, size=remaining.susceptibles, replace=FALSE)
last.infector.ids = data.frame(last.infector.ids)
names( last.infector.ids) ="employ_ids"
new.dat = ddply(last.infector.ids,.(employ_ids), summarise, actual_cases_caused=length(employ_ids))
#and new.mat becomes just these
new.dat$state <- new.dat$time_isolation <- new.dat$exposure_time <- NA
for (i in 1:nrow(new.mat)){
new.dat$state[new.dat$employ_ids==new.mat$employ_ids[i]] <- new.mat$state[i]
new.dat$time_isolation[new.dat$employ_ids==new.mat$employ_ids[i]] <- new.mat$time_isolation[i]
new.dat$exposure_time[new.dat$employ_ids==new.mat$employ_ids[i]] <- new.mat$exposure_time[i]
}
#then, new dat takes the place of new mat
new.dat.list <- dlply(new.dat, .(employ_ids))
new.dat.list <- lapply(new.dat.list, make.rows)
new.dat <- data.table::rbindlist(new.dat.list)
#new.dat <- do.call("rbind", new.dat.list)
#now attach a generation time with each of these cases and a random sample from the susceptibles
new.dat$generation_time <- NA
index.ids = unique(new.dat$employ_ids)
for(i in 1:length(index.ids )){
#print(index.ids[[i]])
new.mat$generation_time[new.mat$employ_ids == index.ids[i]] <- gen_list[[index.ids[i]]]$generation_time[1:length(new.mat$generation_time[new.mat$employ_ids == index.ids[i]])]
}
#now, attach a place to infect (susceptible) -- should be enough
all.sus <- pop.mat$employ_ids[pop.mat$state==0]
new.dat$new_infected <- sample(all.sus, size=nrow(new.dat), replace=FALSE)
new.dat$new_exposure_time = new.dat$exposure_time + new.dat$generation_time
#now put them into pop.mat
for(i in 1:nrow(new.dat)){
#identify infector and iso time
pop.mat$infector[pop.mat$employ_ids==new.dat$new_infected[i] ] <- new.dat$employ_ids[i]
pop.mat$infector_iso_time[pop.mat$employ_ids==new.dat$new_infected[i]] <- new.dat$infector_iso_time[i]
pop.mat$time_of_tracing_iso[pop.mat$employ_ids==new.dat$new_infected[i] & pop.mat$traced==TRUE] <- pop.mat$time_of_tracing_iso[pop.mat$employ_ids==new.dat$new_infected[i] & pop.mat$traced==TRUE] + new.dat$infector_iso_time[i]
pop.mat$time_of_tracing_iso[pop.mat$employ_ids==new.dat$new_infected[i] & pop.mat$traced==FALSE] <- Inf #if traced==FALSE, this is never tracked
#and exposure time
pop.mat$exposure_time[pop.mat$employ_ids==new.dat$new_infected[i]] <- new.dat$new_exposure_time[i]
#pop.mat$time_test_sensitive_start[pop.mat$employ_ids==new.mat$new_infected[i]] <-new.mat$new_exposure_time[i] + pop.mat$time_test_sensitive_start[pop.mat$employ_ids==new.mat$new_infected[i]]
#pop.mat$time_test_sensitive_end[pop.mat$employ_ids==new.mat$new_infected[i]] <-new.mat$new_exposure_time[i] + pop.mat$time_test_sensitive_end[pop.mat$employ_ids==new.mat$new_infected[i]]
pop.mat$case_source[pop.mat$employ_ids==new.dat$new_infected[i]] <- "UCB" #transmission within berkeley
#change state - only if exposure time is already achieved
pop.mat$state[pop.mat$employ_ids==new.dat$new_infected[i] & pop.mat$exposure_time<=timestep] <- 1
#otherwise, they still stay suceptible - but you mark them
pop.mat$state[pop.mat$employ_ids==new.dat$new_infected[i] & pop.mat$exposure_time>timestep] <- 3
} #else, just return pop mat
}
#other
#now, make those that already transmitted recovered/isolated
pop.mat$state[(pop.mat$state==1 & !is.na(pop.mat$actual_cases_caused))] <- 5
#and, if any of the old "pre-exposed" have reached their exposure time, you can
#then, those active infections will cause no more new cases
pop.mat$actual_cases_caused[pop.mat$state==1] <- 0
#and return pop.mat
return(pop.mat)
}
get.actual.cases = function(pop.dat, dat.gen, timestep){
sub.gen =subset(dat.gen, employ_ids==unique(pop.dat$employ_ids))
#count the number of exposure time + generation time combos that take place before the iso time
sub.gen$new_exposures = sub.gen$generation_time + pop.dat$exposure_time
n.out = length(sub.gen$new_exposures[sub.gen$new_exposures<=pop.dat$time_isolation])
return(n.out)
}
get.symptom.onset <- function(dat, dat.vir, LOD){
#get titer limit
symptom.lim <- as.numeric(unique(dat$titer_lim_for_symptoms))
#get the timing in the trajectory that first crosses this limit
dat$time_of_symptom_onset <- min(dat.vir$time[dat.vir$V>symptom.lim])
dat$time_test_sensitive_start <- min(dat.vir$time[dat.vir$V>LOD])
dat$time_test_sensitive_end <- max(dat.vir$time[dat.vir$V>LOD])
#will return infinity if wrong
return(dat)
}
make.rows <- function(dat){
n = dat$actual_cases_caused
new.dat <- data.frame(matrix(NA, nrow=n, ncol=5) )
names(new.dat) <- c("employ_ids", "exposure_time", "actual_cases_caused", "infector_iso_time", "infector_cat")#, "time_of_test_sensitivity")#, "time_of_tracing_iso")
new.dat$employ_ids <- rep(dat$employ_ids, nrow(new.dat))
new.dat$infector_iso_time <- rep(dat$time_isolation, nrow(new.dat))
new.dat$infector_cat <- rep(dat$employ_cat, nrow(new.dat))
new.dat$exposure_time <- rep(dat$exposure_time, nrow(new.dat))
#new.dat$time_of_tracing_iso <- rep(dat$time_of_tracing_iso, nrow(new.dat))
if(nrow(new.dat)>0){
new.dat$actual_cases_caused <- 1
return(new.dat)
} #else, return nothing
}
delayfn_surv <- function(delay_mean, delay_sd){
out <- purrr::partial(rnorm,
mean = delay_mean,
sd = delay_sd)
return(out)
}#symptomatic surveillance/TAT delay
generationTime_fn <- function(serial_dist=NULL, serial_shape = NULL, serial_scale = NULL) {
if(serial_dist=="weibull"){
out <- purrr::partial(rweibull,
shape = serial_shape,
scale = serial_scale)
}else if(serial_dist=="gamma"){
out <- purrr::partial(rgamma,
shape = serial_shape,
scale = serial_scale)
}
return(out)
} #weibull or gamma serial interval as the case may be
inc_fn <- function(n_inc_samp = NULL, meanInc=NULL, sdInc=NULL) {
out= purrr::partial(rlnorm,
meanlog = log(meanInc),
sdlog = log(sdInc))
#out[out < 1] <- 1
return(out)
} #lognormal incubation time draw
R0_fn <- function(meanR0=NULL, sdR0=NULL){
out <- purrr::partial(rlnorm,
meanlog = log(meanR0),
sdlog = log(sdR0))
return(out)
} #lognormal R0
R0_fn_nb <- function(muR0=NULL, sizeR0=NULL){
out <- purrr::partial(rnbinom,
mu = muR0,
size = sizeR0)
return(out)
} #nb R0
initiate.pop <- function(start.ID.employ, pop.UCB, n.init.exposed, pop.ID, within.host.theta, input.par, R0fn, eventFn, titer.dat, LOD, virus.par){
#sample serial interval
genTime = generationTime_fn(serial_dist = virus.par$distribution[virus.par$parameter=="generation_time"],
serial_shape= virus.par$par1[virus.par$parameter=="generation_time"],
serial_scale= virus.par$par2[virus.par$parameter=="generation_time"])
pop.par = subset(input.par, population==pop.ID)
#make table one
pop.mat = cbind.data.frame(matrix(NA, nrow=pop.UCB, ncol =27))
names(pop.mat) <- c( "employ_ids","employ_cat", "state", "traced", "testing", "obs_dist_limits", "exposure_time", "total_potential_cases_caused", "original_potential_cases_caused_UCB", "num_infection_events", "post_titer_potential_cases_caused_UCB", "potential_cases_caused", "actual_cases_caused", "case_source", "infector", "time_test_sensitive_start", "time_test_sensitive_end", "infector_iso_time", "time_of_tracing_iso", "time_of_next_test", "time_of_testing_iso", "titer_lim_for_symptoms", "time_of_symptom_onset", "time_of_symptom_iso", "time_isolation", "reason_isolated", "timestep")
#and fill in all you can
pop.mat$testing = pop.par$par1[pop.par$parameter=="test-on"]
pop.mat$timestep = 0
pop.mat$employ_cat = pop.ID
#assign them all an employer ID
pop.mat$employ_ids = start.ID.employ:(pop.UCB+start.ID.employ-1)
#assign a "first test" depending on how many days of testing per week...
test_rotation = as.character(pop.par$par1[pop.par$parameter=="test-rotation"] )
n.test.day.per.wk = as.numeric(pop.par$par1[pop.par$parameter=="n-test-days-per-week"])
#then, if this is bigger than a weekly regime, half of them must happen one week and half the other
if ((test_rotation=="biweekly" & n.test.day.per.wk==2) | (test_rotation=="weekly" & n.test.day.per.wk==2)){
pop.mat$time_of_next_test = rep(c(3,7), length=pop.UCB)
}else if((test_rotation=="biweekly" & n.test.day.per.wk==5) | (test_rotation=="weekly" & n.test.day.per.wk==5)){
pop.mat$time_of_next_test = rep(c(3,4,5,6,7), length=pop.UCB)
}else if((test_rotation=="biweekly" & n.test.day.per.wk==7) | (test_rotation=="weekly" & n.test.day.per.wk==7)){
pop.mat$time_of_next_test = rep(c(1,2,3,4,5,6,7), length=pop.UCB)
}else if(test_rotation=="two-week" & n.test.day.per.wk==7){
pop.mat$time_of_next_test = rep(c(1,2,3,4,5,6,7,8,9,10,11,12,13,14), length=pop.UCB)
}else if(test_rotation=="two-week" & n.test.day.per.wk==5){
pop.mat$time_of_next_test = rep(c(3,4,5,6,7,10,11,12,13,14), length=pop.UCB)
}else if(test_rotation=="two-week" & n.test.day.per.wk==2){
pop.mat$time_of_next_test = rep(c(3,7, 10,14), length=pop.UCB)
}else if (test_rotation=="two-week-ThFri"){
pop.mat$time_of_next_test = rep(c(seq((7-n.test.day.per.wk+1),7,1),seq((14-n.test.day.per.wk+1),14,1)), length=pop.UCB) #end of week. if 1 and 2 are weekend (sat/sun), then this is thursday/friday
}else if (test_rotation=="two-week-daily"){
pop.mat$time_of_next_test = rep(c(seq(1,14,1)), length=pop.UCB) #end of week. if 1 and 2 are weekend (sat/sun), then this is thursday/friday
}else if (test_rotation=="two-week-MonTues"){
pop.mat$time_of_next_test = rep(c(seq(3,4,1),seq(10,11,1)), length=pop.UCB) #end of week. if 1 and 2 are weekend (sat/sun), then this is thursday/friday
}else if (test_rotation=="two-week-TuesWed"){
pop.mat$time_of_next_test = rep(c(seq(4,5,1),seq(11,12,1)), length=pop.UCB) #end of week. if 1 and 2 are weekend (sat/sun), then this is thursday/friday
}else if (test_rotation=="two-week-MonFri"){
pop.mat$time_of_next_test = rep(c(3,7,10,14), length=pop.UCB) #end of week. if 1 and 2 are weekend (sat/sun), then this is thursday/friday
}else if (test_rotation=="two-week-MonWed"){
pop.mat$time_of_next_test = rep(c(3,5,10,12), length=pop.UCB) #end of week. if 1 and 2 are weekend (sat/sun), then this is thursday/friday
}else if (test_rotation=="one-week-ThFri"){
pop.mat$time_of_next_test = rep(seq((7-n.test.day.per.wk+1),7,1), length=pop.UCB)
}else if (test_rotation=="one-week-MonTues"){
pop.mat$time_of_next_test = rep(seq(3,4,1), length=pop.UCB)
}else if (test_rotation=="one-week-MonFri"){
pop.mat$time_of_next_test = rep(c(3,7), length=pop.UCB)
}else if(test_rotation=="one-week-daily"){
pop.mat$time_of_next_test = rep(c(seq(1,7,1)), length=pop.UCB)
}else if(test_rotation=="none"){
pop.mat$time_of_next_test=Inf
}else if(test_rotation=="thrice-weekly-MonTues"){
pop.mat$time_of_next_test = rep(c(3,4,10,11,17,18), length=pop.UCB)
}else if (test_rotation=="two_day"){
pop.mat$time_of_next_test = rep(c(3,7), length=pop.UCB)
}else if (test_rotation=="four_week"){
pop.mat$time_of_next_test = rep(c(seq((7-n.test.day.per.wk+1),7,1), seq((14-n.test.day.per.wk+1),14,1), seq((21-n.test.day.per.wk+1),21,1), seq((28-n.test.day.per.wk+1),28,1)), length=pop.UCB)
}
pop.mat$time_of_next_test = sample(pop.mat$time_of_next_test, size=length(pop.mat$time_of_next_test), replace = FALSE) #scramble
prop.traced = as.numeric(pop.par$par1[pop.par$parameter=="prop.trace"])
#for all, based on proportions, give whether traced or not
pop.mat$traced = sample(x=c("TRUE", "FALSE"), size=length(pop.mat$traced), replace=TRUE, prob=c(prop.traced, 1-prop.traced))
#and the same for proportion observign distancing limits
prop.obs = as.numeric(pop.par$par1[pop.par$parameter=="percent-obs-dist-lim"])
pop.mat$obs_dist_limits = sample(x=c("TRUE", "FALSE"), size=length(pop.mat$obs_dist_limits), replace=TRUE, prob=c(prop.obs, 1-prop.obs))
# and whether asymp or not
#pop.mat$stat_asymp = sample(x=c("TRUE", "FALSE"), size=length(pop.mat$stat_asymp), replace=TRUE, prob=c(prop.asym, 1-prop.asym))
#make initial state variable
pop.mat$state <- rep(as.integer(0),pop.UCB)
#based on the proportion vaccinated, some get moved to recovered (state 5) right away
#for all of our model runs, this is 0, so this gets skipped
if(as.numeric(pop.par$par1[pop.par$parameter=="prop-vaccinated"])>0){
tot.vacc <- round(as.numeric(pop.par$par1[pop.par$parameter=="prop-vaccinated"])*pop.UCB,0)
index.vacc = sample(1:pop.UCB, size=tot.vacc, replace=FALSE)
pop.mat$state[index.vacc] <- 5
}
#then, regardless of vaccination, overwrite susceptibles with those initially exposed
#initially exposed get distributed at random
index.init = sample(as.numeric(rownames(pop.mat[pop.mat$state==0,])), size=n.init.exposed, replace=FALSE)
pop.mat$state[index.init] <- 1
#here, build distributions
#symptomatic isolation delay
delayfn_symp = delayfn_surv(delay_mean=as.numeric(pop.par$par1[pop.par$parameter=="iso-lag"]),
delay_sd= as.numeric(pop.par$par2[pop.par$parameter=="iso-lag"]))
#turnaround testing delay
delayfn_TAT = delayfn_surv(delay_mean=as.numeric(pop.par$par1[pop.par$parameter=="TAT-lag"]),
delay_sd= as.numeric(pop.par$par2[pop.par$parameter=="TAT-lag"]))
#contact tracing lag
delayfn_trace = delayfn_surv(delay_mean=as.numeric(pop.par$par1[pop.par$parameter=="trace-lag"]),
delay_sd= as.numeric(pop.par$par2[pop.par$parameter=="trace-lag"]))
#titer limit for symptoms
titer_lim = lognormal_fn(meanlogpar=log(as.numeric(pop.par$par1[pop.par$parameter=="symptom-lim"])),
sdlogpar = log(as.numeric(pop.par$par2[pop.par$parameter=="symptom-lim"])))
prop.cases.UCB = as.numeric(pop.par$par1[pop.par$parameter=="prop.cases.UCB"])
#now generate potential new infections based on your status
#this gives the weekend average number of infections
#pop.mat$tot_potential_cases_caused[pop.mat$stat_asymp==TRUE] = floor(R0fn.asym.wk(length(pop.mat$tot_potential_cases_caused[pop.mat$stat_asymp==TRUE]))*prop.cases.UCB)
#pop.mat$tot_potential_cases_caused[pop.mat$stat_asymp==FALSE] = floor(R0fn.wk(length(pop.mat$tot_potential_cases_caused[pop.mat$stat_asymp==FALSE]))*prop.cases.UCB)
#and during the week, fewer cases
#pop.mat$wk_tot_potential_cases_caused[pop.mat$stat_asymp==TRUE] = floor(R0fn.asym(length(pop.mat$tot_potential_cases_caused[pop.mat$stat_asymp==TRUE]))*prop.cases.UCB)
#here are all possible cases
pop.mat$total_potential_cases_caused = R0fn(length(pop.mat$employ_ids))
#here are all possible at UC Berkeley - before the titer cull
pop.mat$original_potential_cases_caused_UCB = floor(pop.mat$total_potential_cases_caused*prop.cases.UCB)
#you should have already brought in a titer trajectory for everyone in your population
#choose a threshold titer for symptom onset
pop.mat$titer_lim_for_symptoms = titer_lim(pop.UCB)
pop.split <- dlply(pop.mat, .(employ_ids))
titer.split <- dlply(titer.dat, .(employ_ids))
#now, based on this, go into each person's virus trajectory and calculate the timing of symptom onset
#while you are at it, you can also look at their titer and the LOD and calculate the start/end times for which they are test sensitive
pop.split.new <- mapply(get.symptom.onset, dat = pop.split, dat.vir=titer.split, MoreArgs = list(LOD=LOD), SIMPLIFY = FALSE)
#when there is nothing that is under the limit, these infections become "asymptomatic" --
#we can later play with the proportion that classify as this by modulating the mean value for the symptom onset limit
pop.mat <- data.table::rbindlist(pop.split.new)
#pop.mat <- do.call("rbind", pop.split.new)
#and the delay to isolation
pop.mat$time_of_symptom_onset[pop.mat$time_of_symptom_onset<0]<-0
pop.mat$time_of_symptom_iso = delayfn_symp(pop.UCB)
pop.mat$time_of_symptom_iso[pop.mat$time_of_symptom_iso<0]<- 0
pop.mat$time_of_symptom_iso <- pop.mat$time_of_symptom_iso + pop.mat$time_of_symptom_onset
pop.mat$time_of_testing_iso = delayfn_TAT(pop.UCB)
pop.mat$time_of_testing_iso[pop.mat$time_of_testing_iso<0] <- 0
pop.mat$time_of_testing_iso <- pop.mat$time_of_testing_iso + pop.mat$time_of_next_test
pop.mat$time_of_tracing_iso = delayfn_trace(pop.UCB)
pop.mat$time_of_tracing_iso[pop.mat$time_of_tracing_iso<0] <- 0
#now, if not traced, never:
pop.mat$time_of_tracing_iso[pop.mat$traced==FALSE] <- Inf
pop.mat$time_of_tracing_iso[pop.mat$state>0] <- Inf # new introductions cannot be traced
pop.mat$infector[pop.mat$state>0] <- 0 # new introductions cannot be traced
pop.mat$infector_iso_time[pop.mat$state>0] <- Inf # new introductions cannot be traced
pop.mat$case_source[pop.mat$state>0] <- "alameda"
#NOW, we generate new cases:
#we break down each infectious individual based on that individual's:
#(a) within-host titer trajectory, (b) the selected value for within-host theta (how viral load translates toinfection probability),
#(c) the number of discrete transmission events that we draw for each person, and
#(d) the generation time of those contact events
#(for d, we currently use the Ferretti weibull, but we are hoping that a constant hazard of events
# + the titer trajectory of the pathogen should roughly produce the expected generation time)
#(1) First, for each person, we draw the number of possible cases from R0 - this equates to individual heterogeneity in infectiousness
# (one type of superspreading) and is already captured in the "total_potential_cases_caused" column, which then gets reduced down to the
# proportion in the UCB community in the "original_potential_cases_caused_UCB" column
#(2) Then, we draw a number of contact events, among which the above cases get distributed. (this equates to event-based superspreading
# - fewer event draws and a high number of transmissions from #1 generate the biggest superspreading events). Current, we draw this
# from a Poisson with lambda=3
#(3) Then, for each "event", we draw a time that this event took place (here, represented from the generation time Weibull, though this could change)
#(4) Then, for each event + time, we go into individual's titer trajectory to determine if each transmission actually
# takes place, based on the person's titer load at the point of infection. Since our initial R0 is 2.5, we fix theta at .7, such that the max
# probability of infection taking place is ~50% at peak viral load. If one 'event' generates multiple cases, each case is treated independently
# with this titer-based transmission probability.
#(5) If there is a group size limit, it gets imposed here. Say that group limit is 6 and one event is supposed to generate 10 cases.
# If this person abides by group limits (there is a parameter for this), we truncate the 10 person event to a 6 person event, and assume
# as a worst-case scenario that all 6 of those people get infected
#first, draw number of transmission events per person
pop.mat$num_infection_events <- eventFn(pop.UCB)
#then get a list of event times per person for each of these events
pop.list <- dlply(pop.mat, .(employ_ids))
event.times.list <- lapply(pop.list, get.event.time, genTime=genTime)
# now, each person has a number of cases, a number of events, a time for each event,
# and a virus titer trajectory.
# take this information and determine which events actually take place and when they occur
# also, if applicable, here impose the group size limit and record cases both before
# and after that limit occurs
# return the data as well as the edited event times list that replaces each
# failed case generation with NA
double.list <- mapply(FUN=get.real.cases, pop.dat=pop.list, event.dat=event.times.list, titer.dat1 = titer.split, MoreArgs = list(within.host.theta=within.host.theta, group.limit=as.numeric(pop.par$par1[pop.par$parameter=="group-size-limit"])), SIMPLIFY = FALSE)
pop.mat.list <- sapply(double.list, "[",1)
pop.mat <- data.table::rbindlist(pop.mat.list)
#pop.mat <- do.call("rbind", pop.mat.list)
gen_time_list <- sapply(double.list, "[",2)
dat.gen <- data.table::rbindlist(gen_time_list)
#dat.gen = do.call("rbind", gen_time_list)
#now cases from potential get distributed among events
#then we determine how many take place based on titers
#then we remove those that don't take place based on group size limitation
#then, we set an exposure time for those cases that actually occur
pop.mat$exposure_time[pop.mat$state>0] <- 0
#first, assume that isolation time is symptomatic
pop.mat$time_isolation[pop.mat$state==1 ] <- as.numeric(pop.mat$time_of_symptom_iso[pop.mat$state==1 ])
pop.mat$time_isolation = as.numeric(pop.mat$time_isolation)
pop.mat$reason_isolated[pop.mat$state==1 ] <- "symptom_iso"
#now, if testing (and, for other cases, tracing) comes first, we replace it
#test needs to be AFTER start time of test sensitive and before end time of test sensitive
pop.mat$reason_isolated[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_testing_iso & pop.mat$testing==TRUE & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end] <- "testing_iso"
pop.mat$time_isolation[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_testing_iso & pop.mat$testing==TRUE & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end & complete.cases(pop.mat)] <- pop.mat$time_of_testing_iso[pop.mat$state==1 & pop.mat$time_isolation > pop.mat$time_of_testing_iso & pop.mat$testing==TRUE & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end]
#and then, if any of these are Inf, change the reason to NA
pop.mat$reason_isolated[pop.mat$time_isolation==Inf] <- NA
#now, based on isolation time and the generation times in the vector, determine the actual number of cases caused,
#then export, regroup with other half of population and assign those new infections in the next time step
new.cases = dlply(pop.mat[pop.mat$state==1& pop.mat$potential_cases_caused>0,], .(employ_ids))
#if potential cases were 0, then actual cases are too:
pop.mat$actual_cases_caused[pop.mat$state==1 & pop.mat$potential_cases_caused ==0] <- 0
pop.mat$actual_cases_caused[pop.mat$state==1 & pop.mat$potential_cases_caused > 0] <- c(unlist(lapply(new.cases, get.actual.cases, dat.gen=dat.gen, timestep)))
#now pop it back out, join with other sub-mats and assign those infections in time and space using your generation time vector.
return(list(pop.mat, dat.gen))
}
epidemic.step = function(pop.mat, timestep, length_timestep, prob.out, gen_list, input.par){
#pop.mat <- as.data.frame(pop.mat)
pop.par = subset(input.par, population ==unique(pop.mat$employ_cat))
#advance timestep
pop.mat$timestep = timestep
#introduce outside infections into susceptible spaces (cannot misplace those "exposed" by UCB above since we are guaranteeing those transmissions to take place)
#could easily modulate this for risk cohorts in future
#check if weekend
# we say days 1 and 2 are testing
# days
###MULTIPLE
# if(timestep ==1 | timestep ==2 | (timestep%%7==1)| (timestep%%7==2)){
# n.outside.exposures = sum(sample(x=c(0,1), size=length(pop.mat$state[pop.mat$state==0]), replace=TRUE, prob = c(1-prob.out.wk, prob.out.wk)))
#}else{
n.outside.exposures = sum(sample(x=c(0,1), size=length(pop.mat$state[pop.mat$state==0]), replace=TRUE, prob = c(1-prob.out, prob.out)))
#}
if(n.outside.exposures>0){
#if you find some, fill them in with an exposure time of now, distributed at random
#could add in higher introduction rate for certain sub-groups in this case
new.case.ids = sample(pop.mat$employ_ids[pop.mat$state==0], size = n.outside.exposures, replace=FALSE)
# print(new.case.ids)
#and assign
for (i in 1:length(new.case.ids)){
#print(i)
#print(new.case.ids[i])
#expose the new cases immediately - but only those that have reached the current timestep already
#those "predestined" for exposure get passed over for now.
pop.mat$state[pop.mat$employ_ids==new.case.ids[i]] <- 1
#pop.mat$state[pop.mat$employ_ids==new.case.ids[i] & pop.mat$stat_asymp==TRUE] <- 2
#exposure time is this timestep
pop.mat$exposure_time[pop.mat$employ_ids==new.case.ids[i]] <- timestep #infection kicks off so you can now calculat symptom onset time
#pop.mat$time_of_symptom_onset[pop.mat$employ_ids==new.case.ids[i]] <- pop.mat$time_of_symptom_onset[pop.mat$employ_ids==new.case.ids[i]] + timestep
#tmp <- pop.mat$time_of_test_positivity[pop.mat$employ_ids==new.case.ids[i]]
#pop.mat$time_test_sensitive_end[pop.mat$employ_ids==new.case.ids[i]] <- pop.mat$exposure_time[pop.mat$employ_ids==new.case.ids[i]] + pop.mat$time_test_sensitive_end[pop.mat$employ_ids==new.case.ids[i]]
#pop.mat$time_test_sensitive_start[pop.mat$employ_ids==new.case.ids[i]] <- pop.mat$exposure_time[pop.mat$employ_ids==new.case.ids[i]] + pop.mat$time_test_sensitive_start[pop.mat$employ_ids==new.case.ids[i]]
#infector is outside source that cannot be tracked
pop.mat$infector[pop.mat$employ_ids==new.case.ids[i]] <- 0
pop.mat$infector_iso_time[pop.mat$employ_ids==new.case.ids[i]] <- Inf
pop.mat$case_source[pop.mat$employ_ids==new.case.ids[i]] <- "alameda"
#introduced cases cannot be traced
pop.mat$time_of_tracing_iso[pop.mat$employ_ids==new.case.ids[i]] <- Inf
}
}
#pop.mat <- subset(pop.mat, !is.na(employ_ids))
#now, for those that are currently exposed (both from outside and UCB),
# compute distributions of iso time
# and new actual cases caused.
#then, we can assign those times and move them to recovered status
pop.mat.old <- pop.mat
pop.mat <- dplyr::select(pop.mat, -(actual_cases_caused))
#first, go ahead and move test postivity to the appropriate degree
#pop.mat$time_of_test_positivity[pop.mat$state==1 | pop.mat$state==2] <- pop.mat$time_of_test_positivity[pop.mat$state==1 | pop.mat$state==2] + pop.mat$exposure_time[pop.mat$state==1 | pop.mat$state==2]
#print("7")
#first, assume that isolation time is symptomatic
pop.mat$time_isolation[pop.mat$state==1 ] <- as.numeric(pop.mat$time_of_symptom_iso[pop.mat$state==1])
pop.mat$time_isolation = as.numeric(pop.mat$time_isolation)
pop.mat$reason_isolated[pop.mat$state==1 ] <- "symptom_iso"
#now, if tracing comes first, we replace it
#tracing only applicable within our community
#print("8")
pop.mat$reason_isolated[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_tracing_iso & pop.mat$traced==TRUE & complete.cases(pop.mat) ] <- "tracing_iso"
pop.mat$time_isolation[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_tracing_iso & pop.mat$traced==TRUE & complete.cases(pop.mat) ] <- pop.mat$time_of_tracing_iso[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_tracing_iso & pop.mat$traced==TRUE & complete.cases(pop.mat)]
#or, finally, if testing comes first, we replace it here - IF the infection is test sensitive at the time of testing
#print("9")
pop.mat$reason_isolated[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_testing_iso & pop.mat$testing==TRUE & complete.cases(pop.mat) & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end] <- "testing_iso"
pop.mat$time_isolation[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_testing_iso & pop.mat$testing==TRUE & complete.cases(pop.mat) & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end] <- pop.mat$time_of_testing_iso[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_testing_iso & pop.mat$testing==TRUE & complete.cases(pop.mat) & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end]
#and then, if any of these are Inf, change the reason to NA
pop.mat$reason_isolated[pop.mat$time_isolation==Inf] <- NA
#first, double-check that nothing was exposed after the isolation time (would be based on tracing only)
#if that ever happens, that person becomes susceptible again because that infection was never generated
#first flag
#then, go in and find that person's infector and reduce their actual cases by one
#based on this case that did not occur
pop.mat$state[pop.mat$exposure_time>pop.mat$time_isolation & pop.mat$state==1 & pop.mat$reason_isolated=="tracing_iso" ] <- 7
pop.mat$reason_isolated[pop.mat$state==7] <- NA
pop.mat$time_isolation[pop.mat$state==7] <- NA
pop.mat$case_source[pop.mat$state==7 ] <- NA
#now remove a case from the infectors that "caused" these events
infector.sub1 = pop.mat[pop.mat$state==7,]
infector.sum1 = ddply(infector.sub1, .(infector), summarize, cases_removed = length(employ_ids)) #save this for the end
pop.mat$infector[pop.mat$state==7] <- NA
pop.mat$infector_iso_time[pop.mat$state==7] <- NA
pop.mat$exposure_time[pop.mat$state==7]<- NA
pop.mat$state[pop.mat$state==7] <- 0
#now, based on isolation time and the generation times in the vector, determine the actual number of cases caused,
#then export, and assign those new infections in the next time step
#now, advance forward all of the "time of etc." for susceptibles
#and time of next testing for all
new.cases = dlply(pop.mat[pop.mat$state==1& pop.mat$potential_cases_caused>0,], .(employ_ids))
#if potential cases were 0, then actual cases are too:
#pop.mat$actual_cases_caused[pop.mat$state==1 & pop.mat$potential_cases_caused ==0| pop.mat$state==2 & pop.mat$potential_cases_caused ==0] <- 0
#but, if potential cases were greater than 0, then actual might be as well, depending on the isolation times
#dat.gen.new = do.call("rbind", gen_list)
dat.gen.new = data.table::rbindlist(gen_list)
#pop.mat$actual_cases_caused[pop.mat$state==1 & pop.mat$potential_cases_caused > 0| pop.mat$state==2 & pop.mat$potential_cases_caused > 0] <- c(unlist(lapply(new.cases, get.actual.cases, dat.gen=dat.gen.new, timestep=timestep, weekend.amp=weekend.amp)))
new.actual.cases <- c(unlist(lapply(new.cases, get.actual.cases, dat.gen=dat.gen.new, timestep=timestep)))
#these have not kicked off, so let them kick forward
pop.mat$time_of_symptom_onset[pop.mat$state==0 | pop.mat$state==3] <- pop.mat$time_of_symptom_onset[pop.mat$state==0 | pop.mat$state==3] + length_timestep
pop.mat$time_of_symptom_iso[pop.mat$state==0 | pop.mat$state==3 ] <- pop.mat$time_of_symptom_iso[pop.mat$state==0 | pop.mat$state==3] + length_timestep
pop.mat$time_test_sensitive_start[pop.mat$state==0 | pop.mat$state==3 ] <- pop.mat$time_test_sensitive_start[pop.mat$state==0 | pop.mat$state==3] + length_timestep
pop.mat$time_test_sensitive_end[pop.mat$state==0 | pop.mat$state==3 ] <- pop.mat$time_test_sensitive_end[pop.mat$state==0 | pop.mat$state==3] + length_timestep
#tracing only gets started when infector iso time is assigned, so we don't touch it here
#if you are at your current testing date, then next test is bumped into the future.
#Otherwise, you just advance in time until you reach it
#but the lag time is maintained after the new test date, so deal with that first
pop.mat$time_of_testing_iso = pop.mat$time_of_testing_iso - pop.mat$time_of_next_test #now this is just the lag time
#now, compute actual next test day if today is the test day of the runs in question - add different frequencies depending on the type
pop.mat$time_of_next_test[pop.mat$time_of_next_test==timestep] <- timestep + as.numeric(pop.par$par1[pop.par$parameter=="test-freq"])
#now put the lag back on to the new test day for isolation
pop.mat$time_of_testing_iso <- pop.mat$time_of_testing_iso + pop.mat$time_of_next_test
pop.mat$time_of_testing_iso[pop.mat$time_of_next_test==Inf] <- Inf
#and, finally, check in on those that were "pre-exposed" up above.
#move them up to their appropriate status if they should be exposed now
#if they reach it, go ahead and assign their actual cases
#first, eliminate if they should not occur
#first flag
#then, go in and find that person's infector and reduce their actual cases by one
#based on this case that did not occur
pop.mat$state[pop.mat$state==3 & pop.mat$exposure_time>pop.mat$infector_iso_time ] <- 8
pop.mat$time_of_tracing_iso[pop.mat$state==8 & pop.mat$exposure_time>pop.mat$infector_iso_time ] <- pop.mat$time_of_tracing_iso[pop.mat$state==8 & pop.mat$exposure_time>pop.mat$infector_iso_time ] - pop.mat$infector_iso_time[pop.mat$state==8 & pop.mat$exposure_time>pop.mat$infector_iso_time]
pop.mat$case_source[pop.mat$state==8 & pop.mat$exposure_time>pop.mat$infector_iso_time] <- NA
#now remove a case from the infectors that "caused" these events
infector.sub2 = pop.mat[pop.mat$state==8,]
infector.sum2 = ddply(infector.sub2, .(infector), summarize, cases_removed = length(employ_ids)) #save this for the end
pop.mat$infector[pop.mat$state==8 & pop.mat$exposure_time>pop.mat$infector_iso_time ] <- NA
pop.mat$state[pop.mat$state==8 & pop.mat$exposure_time>pop.mat$infector_iso_time ] <- 0
pop.mat$infector_iso_time[pop.mat$state==0] <- NA
pop.mat$exposure_time[pop.mat$state==0] <- NA
if (exists('infector.sum1') & exists('infector.sum2')){
infector.sum <- rbind(infector.sum1, infector.sum2)
}else if(exists('infector.sum1')){
infector.sum <- infector.sum1
}else if(exists('infector.sum2')){
infector.sum <- infector.sum2
}
#then, if they pass that test and still remain 'pre-exposed', check to see if they should be elevated in status to 1 or 2
#(meaning they have reached the exposure time)
#if so, assign them isolation time and actual cases which get allocated in the next round.
#otherwise, they just keep current status as "pre-exposed"
#first make them complete cases, so that R is not angry with new columns being filled in
pop.mat$time_isolation[pop.mat$state==3 & pop.mat$exposure_time<=timestep] <- Inf
pop.mat$reason_isolated[pop.mat$state==3 & pop.mat$exposure_time<=timestep] <- "in progress"
#first, assume that isolation time is symptomatic
#print("1")
pop.mat$reason_isolated[pop.mat$state==3 & pop.mat$exposure_time<=timestep & complete.cases(pop.mat)] <- "symptom_iso"
# print("2")
pop.mat$time_isolation[pop.mat$state==3 & pop.mat$exposure_time<=timestep & complete.cases(pop.mat)] <- pop.mat$time_of_symptom_iso[pop.mat$state==3 & pop.mat$exposure_time<=timestep & complete.cases(pop.mat)]
pop.mat$time_isolation = as.numeric(pop.mat$time_isolation)
#now, if tracing comes first, we replace it
#print("3")
pop.mat$reason_isolated[pop.mat$state==3 & pop.mat$exposure_time<=timestep & pop.mat$time_of_tracing_iso<pop.mat$time_isolation & pop.mat$traced==TRUE & complete.cases(pop.mat)] <- "tracing_iso"
# print("4")
pop.mat$time_isolation[pop.mat$state==3 & pop.mat$exposure_time<=timestep & pop.mat$time_of_tracing_iso<pop.mat$time_isolation & pop.mat$traced==TRUE & complete.cases(pop.mat)] <- pop.mat$time_of_tracing_iso[pop.mat$state==3 & pop.mat$exposure_time<=timestep & pop.mat$time_of_tracing_iso<pop.mat$time_isolation & pop.mat$traced==TRUE & complete.cases(pop.mat)]
#finally, if testing comes first, we replace it
#print("5")
pop.mat$reason_isolated[pop.mat$state==3 & pop.mat$exposure_time<=timestep & pop.mat$time_of_testing_iso<pop.mat$time_isolation & pop.mat$testing==TRUE & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end & complete.cases(pop.mat)] <- "testing_iso"
#print("6")
#print(pop.mat[pop.mat$state==3 & pop.mat$exposure_time<=timestep & pop.mat$time_of_testing_iso<pop.mat$time_isolation & pop.mat$testing==TRUE & complete.cases(pop.mat) ,])
#print(pop.mat)
pop.mat$time_isolation[pop.mat$state==3 & pop.mat$exposure_time<=timestep & pop.mat$time_of_testing_iso<pop.mat$time_isolation & pop.mat$testing==TRUE & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end & complete.cases(pop.mat)] <- pop.mat$time_of_testing_iso[pop.mat$state==3 & pop.mat$exposure_time<=timestep & pop.mat$time_of_testing_iso<pop.mat$time_isolation & pop.mat$testing==TRUE & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end & complete.cases(pop.mat)]
#and then, if any of these are Inf, change the reason to NA
pop.mat$reason_isolated[pop.mat$time_isolation==Inf] <- NA
#now, based on isolation time and the generation times in the vector, determine the actual number of cases caused,
#then export, regroup with other half of population and assign those new infections in the next time step
new.cases = dlply(pop.mat[pop.mat$state== 3& pop.mat$potential_cases_caused>0 & pop.mat$exposure_time<=timestep & complete.cases(pop.mat) ,], .(employ_ids))
#if potential cases were 0, then actual cases are too:
#pop.mat$actual_cases_caused[pop.mat$state==3 & pop.mat$potential_cases_caused ==0 & pop.mat$exposure_time<=timestep | pop.mat$state==4 & pop.mat$potential_cases_caused ==0 & pop.mat$exposure_time<=timestep] <- 0
tmp.dat = pop.mat[pop.mat$state==3 & pop.mat$potential_cases_caused >0 & pop.mat$exposure_time<=timestep ,]
new.cases.2 <- dlply(tmp.dat, .(employ_ids))
new.actual.cases.3 <- c(unlist(lapply(new.cases.2, get.actual.cases, dat.gen=dat.gen.new, timestep=timestep)))
#now add the actual cases back in
pop.mat <- cbind.data.frame(pop.mat, pop.mat.old$actual_cases_caused)
names(pop.mat)[length(names(pop.mat))] <- "actual_cases_caused"
#reorder
pop.mat <- dplyr::select(pop.mat, names(pop.mat.old))
#and add in the new actual cases
pop.mat$actual_cases_caused[pop.mat$state==3 & pop.mat$potential_cases_caused ==0 & pop.mat$exposure_time<=timestep | pop.mat$state==1 & pop.mat$potential_cases_caused ==0 & pop.mat$exposure_time<=timestep ] <- 0
pop.mat$actual_cases_caused[pop.mat$state==1 & pop.mat$potential_cases_caused >0 & pop.mat$exposure_time<=timestep] <- new.actual.cases
pop.mat$actual_cases_caused[pop.mat$state==3 & pop.mat$potential_cases_caused >0 & pop.mat$exposure_time<=timestep ] <- new.actual.cases.3
#and, finally, change state so these cases can get allocated in the next round.
pop.mat$state[pop.mat$state==3 & pop.mat$exposure_time<=timestep ] <- 1
#and remove any avoided cases if there were some
if (exists('infector.sum')){
if(nrow(infector.sum)>0){
for(i in 1:length(infector.sum$infector)){
pop.mat$actual_cases_caused[pop.mat$employ_ids==infector.sum$infector[i]] <- pop.mat$actual_cases_caused[pop.mat$employ_ids==infector.sum$infector[i]] - infector.sum$cases_removed[i]
}
}
}
#and return
return(pop.mat)
}
get.mean.sd <- function(vector, name){
#first, trim to same length
min.length <- min(unlist(lapply(vector, length)))
for (i in 1:length(vector)){
vector[[i]] <- vector[[i]][1:min.length]
}
vec <- unlist(vector, use.names = FALSE)
DIM <- c(length(vector[[1]]),1)
n <- length(vector)
list.mean <- tapply(vec, rep(1:prod(DIM),times = n), mean)
attr(list.mean, "dim") <- DIM
list.mean <- as.data.frame(list.mean)
list.sd <- tapply(vec, rep(1:prod(DIM),times = n), sd)
attr(list.sd, "dim") <- DIM
list.sd <- as.data.frame(list.sd)
list.uci = list.mean + 1.96*list.sd
list.lci = list.mean - 1.96*list.sd
list.lci[list.lci<0] <- 0
list.uci[list.uci<0] <- 0
dat= cbind.data.frame(list.mean, list.lci, list.uci)
names(dat) = paste(c("mean", "lci", "uci"), name, sep="_")
return(dat)
}
get.mean.matrix <- function(mat){
#first, trim to same length
min.length <- min(unlist(lapply(mat, nrow)))
n.cat = ncol(mat[[1]])/3
for (i in 1:length(mat)){
mat[[i]] <- mat[[i]][1:min.length,]
}
list.mean <- Reduce("+",mat) / length(mat)
mat.2 <- do.call("cbind", mat)
#mat.2 <- data.table::rbindlist(mat)
list.sd <- apply(mat.2, 1, sd)
list.uci = list.mean + 1.96*list.sd
list.lci = list.mean - 1.96*list.sd
list.lci[list.lci<0] <- 0
list.uci[list.uci<0] <- 0
dat= cbind.data.frame(list.mean, list.lci, list.uci)
names(dat) = c(paste0("mean_iso_cat_",seq(1,n.cat,1)), paste0("lci_iso_cat_",seq(1,n.cat,1)), paste0("uci_iso_cat_",seq(1,n.cat,1)),
paste0("mean_exp_cat_",seq(1,n.cat,1)), paste0("lci_exp_cat_",seq(1,n.cat,1)), paste0("uci_exp_cat_",seq(1,n.cat,1)),
paste0("mean_deaths_cat_",seq(1,n.cat,1)), paste0("lci_deaths_cat_",seq(1,n.cat,1)), paste0("uci_deaths_cat_",seq(1,n.cat,1)))
return(dat)
}
convert.cat = function(dat){
n.cat = ncol(dat)/9
max.times = nrow(dat)
iso.dat = dat[,1:(n.cat*3)]
exp.dat = dat[,(n.cat*3+1):(n.cat*3*2)]
death.dat = dat[,(n.cat*3*2+1):ncol(dat)]
#then, sep by cat
list.iso <- list.exp <- list.deaths <- list()
for(i in 1:n.cat){
list.iso[[i]] <- cbind.data.frame(iso.dat[,i], iso.dat[,i+n.cat],iso.dat[,i+(n.cat*2)])
list.exp[[i]] <- cbind.data.frame(exp.dat[,i],exp.dat[,i+n.cat],exp.dat[,i+(n.cat*2)])
list.deaths[[i]] <- cbind.data.frame(death.dat[,i], death.dat[,i+n.cat],death.dat[,i+(n.cat*2)])
}
#iso.db <- do.call("rbind", list.iso)
iso.db <- data.table::rbindlist(list.iso)
iso.db$type = rep(1:n.cat, each = max.times)
iso.db$type <- paste0("iso-pop-", iso.db$type)
names(iso.db) <- c("mean", "lci", "uci", "type")
#exp.db <- do.call("rbind", list.exp)
exp.db <- data.table::rbindlist(list.exp)
exp.db$type = rep(1:n.cat, each = max.times)
exp.db$type <- paste0("exp-pop-", exp.db$type)
names(exp.db) <- c("mean", "lci", "uci", "type")
#death.db <- do.call("rbind", list.deaths)
death.db <- data.table::rbindlist(list.deaths)
death.db$type = rep(1:n.cat, each = max.times)
death.db$type <- paste0("death-pop-", death.db$type)
names(death.db) <- c("mean", "lci", "uci", "type")
return(list(iso.db, exp.db, death.db))
}
R.fit.sum <- function(mat.df){
#apply across all columns
mean.all <- apply(mat.df, 2,mean)
sd.all <- apply(mat.df, 2,sd)
lci.all <- mean.all-1.96*sd.all
lci.all[ lci.all < 0] <- 0
uci.all <- mean.all+1.96*sd.all
#and nbinom fit
all.fit <- apply(mat.df, 2, fitdist, distr="nbinom")
#and return
out.dat <- cbind.data.frame(mean.all, lci.all, uci.all)
out.dat$class <- names(mat.df)
#names(out.dat) <- names(mat.df)
#out.dat$estimate <- c("mean", "lci", "uci")
#out.dat[out.dat<0] <- 0
#and add fit
size.out <- list()
mu.out <- list()
for(i in 1:length(all.fit)){
size.out[[i]] <- all.fit[[i]]$estimate[1]
mu.out[[i]] <- all.fit[[i]]$estimate[2]
}
size.out <- c(unlist(size.out))
mu.out <- c(unlist(mu.out))
out.dat$nb_mu <- mu.out
out.dat$nb_size <- size.out
# names(size.out) <- names(mu.out) <- names(out.dat)
# out.dat <- rbind(out.dat, size.out, mu.out)
#
# out.dat$total_potential_cases <- as.numeric(out.dat$total_potential_cases)
# out.dat$UCB_potential_cases <- as.numeric(out.dat$UCB_potential_cases)
# out.dat$UCB_post_group_potential_cases <- as.numeric(out.dat$UCB_post_group_potential_cases)
# out.dat$UCB_post_titer_potential_cases <- as.numeric(out.dat$UCB_post_titer_potential_cases)
# out.dat$UCB_post_isolations_actual_cases <- as.numeric(out.dat$UCB_post_isolations_actual_cases)
#
return(out.dat)
}
R.fit.sum.lognorm <- function(mat.df){
#apply across all columns
mean.all <- apply(mat.df, 2,mean)
sd.all <- apply(mat.df, 2,sd)
lci.all <- mean.all-1.96*sd.all
lci.all[ lci.all < 0] <- 0
uci.all <- mean.all+1.96*sd.all
#and return
out.dat <- cbind.data.frame(mean.all, lci.all, uci.all)
out.dat$class <- names(mat.df)
#names(out.dat) <- names(mat.df)
#out.dat$estimate <- c("mean", "lci", "uci")
#out.dat[out.dat<0] <- 0
#
return(out.dat)
}
simulate.epidemic <- function(input.pop, n.init.exposed.vector, employ.id.vector, times, virus.par, input.par, burnin,
test.freq, length_timestep, bay.area.prev, initial.R, within.host.theta, titer.dat, LOD, test_rotation_name){
if (virus.par$distribution[virus.par$parameter=="R0"]=="log-normal"){
#sample R0 normal
R0fn = R0_fn(meanR0=virus.par$par1[virus.par$parameter=="R0"],
sdR0=virus.par$par2[virus.par$parameter=="R0"])
}else if(virus.par$distribution[virus.par$parameter=="R0"]=="negbinom"){
#sample R0 normal
R0fn = R0_fn_nb(muR0=virus.par$par1[virus.par$parameter=="R0"],
sizeR0=virus.par$par2[virus.par$parameter=="R0"])
}
#and the number of transmission events, from a negbinom
#remember that fewer events = higher likelihood of a big superspreading event.
#but the vast majority of people have both few events and few cases
eventFn = poisson_fn(lambda =as.numeric(input.par$par1[input.par$parameter=="transmission-events"]))
#and normal distribution of the detection limit
#then, form your new populations
#now split the population based on risk
tot.pop = length(input.pop)
pop.num = 1:tot.pop
titer.dat$cat <- NA
for (i in 1:(length(pop.num)-1)){
titer.dat$cat[titer.dat$employ_ids < employ.id.vector [i+1] & titer.dat$employ_ids >= employ.id.vector [i]] <- pop.num[i]
}
titer.dat$cat[is.na(titer.dat$cat)] <- pop.num[length(pop.num)]
#and split
titer.dat.split <- dlply(titer.dat, .(cat))
#make the proper number of pop.mat depending on the total number of subpopulations
#populate each using the appropriate parameters
out.list = mapply(FUN=initiate.pop, start.ID.employ = as.list(employ.id.vector), pop.UCB=as.list(input.pop), n.init.exposed= as.list(n.init.exposed.vector), pop.ID = as.list(pop.num), titer.dat=titer.dat.split,
MoreArgs= list(input.par=input.par, virus.par=virus.par, R0fn=R0fn, eventFn=eventFn, within.host.theta=within.host.theta, LOD=LOD))
pop.list = out.list[1,]
gen_list_long <- out.list[2,]
#original.r0 <- out.list[3,][[1]]
#gen_list_long_wkend <- out.list[3,]
#pop.mat <- do.call("rbind", pop.list)
pop.mat <- data.table::rbindlist(pop.list)
#gen.dat.all <- do.call("rbind", gen_list_long)
gen.dat.all <- data.table::rbindlist(gen_list_long)
#now, double-check that the generation time dataframe is the same length as the number of unique employ ids
if(sum(setdiff(pop.mat$employ_ids, gen.dat.all$employ_ids))>0){
missing.ids <- setdiff(pop.mat$employ_ids, gen.dat.all$employ_ids)
missing.cases <- list()
for(i in 1:length(missing.ids)){
missing.cases[[i]] <- pop.mat$potential_cases_caused[pop.mat$employ_ids==missing.ids[i]]
}
missing.cases <- c(unlist(missing.cases))
if(sum(missing.cases)>0){
missing.gen <- genTime(missing.cases)
add.dat <- cbind.data.frame(rep(missing.ids, missing.cases), missing.gen)
}else{
missing.gen <- rep(NA, length(missing.cases))
add.dat <- cbind.data.frame(missing.ids, missing.gen)
}
names(add.dat) <- names(gen.dat.all)
gen.dat.all <- rbind(gen.dat.all, add.dat)
gen.dat.all <- arrange(gen.dat.all, employ_ids)
}
gen_list = dlply(gen.dat.all, .(employ_ids))
#gen_list_wk = dlply(gen.dat.all.wk, .(employ_ids))
foi.bay.area = initial.R*bay.area.prev*length_timestep #rate per day at which susceptibles become infected
#foi.wkend = bay.area.R*bay.area.prev*length_timestep*weekend.amp
prob.outside.exposure =1-(exp(-1*foi.bay.area)) #for each person in berkeley, this is the probability of getting exposed each day
prob.outside.exposure[prob.outside.exposure<0] <- 0
#prob.outside.exposure.wk =1-(exp(-1*foi.wkend))
#could also be a vector
times_vect = seq(length_timestep,times, by = length_timestep)
for(i in 1: length(times_vect)){
#print(i)
timestep = times_vect[i]
#could make other functions here if people mostly infect their own subgroups
#here, we distribute the infections amongst new people and retire the old
pop.mat = assign.infections(pop.mat = pop.mat, gen_list=gen_list, timestep = timestep, input.par = input.par)
#now split it by population to introduce outside exposures
pop.split = dlply(pop.mat, .(employ_cat))
pop.mat.list = lapply(pop.split, FUN=epidemic.step, timestep= timestep, prob.out = prob.outside.exposure, gen_list=gen_list, input.par=input.par, length_timestep = length_timestep)
#then, rejoin
#pop.mat = do.call("rbind", pop.mat.list)#print(i)
pop.mat = data.table::rbindlist(pop.mat.list)
#then, just keep tabs that there are enough susceptibles to fill the new cases in the next step
remaining.susceptibles = length(pop.mat$state[pop.mat$state==0])
future.cases = sum(pop.mat$actual_cases_caused[pop.mat$state==1])
if(future.cases>remaining.susceptibles){ #if there are not enough susceptibles left for all of the assigned cases before you reach the end of the time series, then you go into the next step
#print(i)
pop.mat = assign.last.infections(pop.mat = pop.mat, gen_list = gen_list, remaining.susceptibles = remaining.susceptibles, timestep = timestep)
#print(i)
}
}
#collect all the "R" reduction info:
R.mat <- dplyr::select(pop.mat, total_potential_cases_caused, original_potential_cases_caused_UCB, post_titer_potential_cases_caused_UCB, potential_cases_caused, actual_cases_caused)
names(R.mat) <- c( "total_potential_cases", "UCB_potential_cases", "UCB_post_titer_potential_cases", "UCB_post_group_potential_cases", "UCB_post_isolations_actual_cases")
R.mat <- arrange(R.mat, desc(total_potential_cases))
R.mat$UCB_post_isolations_actual_cases[is.na(R.mat$UCB_post_isolations_actual_cases)] <- 0
#R.mat <- as.matrix(R.mat)
# #new R0
# new.R0 = subset(pop.mat, !is.na(infector))
# new.R0 = ddply(new.R0, .(infector), summarize, cases_caused=length(employ_ids))
# tot.introductions = new.R0$cases_caused[new.R0$infector=="0"]
# new.R0 = subset(new.R0, infector!="0")
#
# maxID = max(pop.mat$employ_ids)
# missing_ids <- (1:maxID)[!(1:maxID %in% new.R0$infector)]
#
# # add in missing days if any are missing
# if (length(missing_ids > 0)) {
# R0comp <- data.table::rbindlist(list(new.R0,
# data.table(infector = missing_ids,
# cases_caused = 0)))
# }
#
# R0comp <- arrange(R0comp, infector)
#
# #now add back in those cases not at UCB...
# #original.r0$actual_cases_caused_UCB <- R0comp$cases_caused
#get prop.asymptomatic at this cutoff
prop.asym <- length(pop.mat$time_of_symptom_onset[pop.mat$time_of_symptom_onset==Inf])/length(pop.mat$time_of_symptom_iso)
#from here, compute Reffective
R.dat = dplyr::select(pop.mat, employ_ids, infector, time_isolation, case_source)
R.dat = arrange(R.dat, time_isolation) #icidence will just be cases by time isolated
#if not isolated, you don't count for incidence...
R.dat = R.dat[!is.na(R.dat$time_isolation),]
R.dat$time_isolation = ceiling(R.dat$time_isolation)
#could add source. don't for now
R.sum = ddply(R.dat, .(time_isolation), summarise, length(employ_ids))
#R.sum = ddply(R.dat, .(time_isolated, source), summarise, length(employ_ids))
names(R.sum) = c( "day", "incidence")
#plot as incidence
#plot(as.incidence(R.sum$incidence, dates = R.sum$day))
#this will go in as your incidence data
#now add in pairs to estimate the serial interval
#T <- nrow(R.sum)
#t_start <- seq(2, T-13) # starting at 2 as conditional on the past observations
#t_end <- t_start + 13
#
# R.est = estimate_R(R.sum$incidence,
# method="parametric_si",
# config = make_config(list(#t_start = t_start,
# #t_end = t_end,
# mean_si = serial_mean, std_si = serial_sd)))
#
# #plot(R.est, "R")
# #get midpoint and R values and extract
# R.out = cbind.data.frame(get.midpoint(par.low = R.est$R$t_start, par.hi = R.est$R$t_end), R.est$R$`Mean(R)`)
# names(R.out) = c("day", "Reffective")
# #and try it based on pairs
pop.mat = data.table(pop.mat)
#now, get broad incidence data to report
UCB.mat = subset(pop.mat, case_source=="UCB")
alameda.mat = subset(pop.mat, case_source=="alameda")
symp.mat = subset(pop.mat, reason_isolated=="symptom_iso")
trace.mat = subset(pop.mat, reason_isolated=="tracing_iso")
test.mat = subset(pop.mat, reason_isolated=="testing_iso")
daily_exposures <- pop.mat[, day := ceiling(exposure_time) #time_isolated
][, .(daily_exposures = .N), by = day
]
# #daily isolations
daily_isolations <- pop.mat[, day := ceiling(time_isolation) #
][, .(daily_isolations = .N), by = day
]
daily_cal <- UCB.mat[, day := ceiling(time_isolation) #time_isolated
][, .(daily_isolations = .N), by = day
]
daily_alameda <- alameda.mat[, day := ceiling(time_isolation) #time_isolated
][, .(daily_isolations = .N), by = day
]
daily_symp <- symp.mat[, day := ceiling(time_isolation) #time_isolated
][, .(daily_isolations = .N), by = day
]
daily_trace <- trace.mat[, day := ceiling(time_isolation) #time_isolated
][, .(daily_isolations = .N), by = day
]
daily_test <- test.mat[, day := ceiling(time_isolation) #time_isolated
][, .(daily_isolations = .N), by = day
]
# maximum outbreak day
max_day <- ceiling(times)
# days with 0 cases in 0:max_week
#missing_days <- (0:max_day)[!(0:max_day %in% daily_isolations$day)]
missing_days <- (0:max_day)[!(0:max_day %in% daily_exposures$day)]
# add in missing days if any are missing
if (length(missing_days > 0)) {
daily_cases <- data.table::rbindlist(list(daily_exposures,
data.table(day = missing_days,
daily_exposures = 0)))
}
#reorder as appropriate
#daily_cases <- arrange(daily_cases, day)
# order and sum up
daily_cases <- daily_exposures[order(day)
][, cumulative := cumsum(daily_exposures)]
# cut at max_week
daily_cases <- daily_cases[day<=max_day]
# and isoaltions
daily_cases$daily_isolations <- 0
for (i in 1:length(daily_isolations$day)){
daily_cases$daily_isolations[daily_cases$day==daily_isolations$day[i]] <- daily_isolations$daily_isolations[i]
}
#and cumulative isolations
daily_cases$cumulative_iso = cumsum(daily_cases$daily_isolations)
# #and cases in UCB vs out
daily_cases$daily_UCB_isolations <- 0
for (i in 1:length(daily_cal$day)){
daily_cases$daily_UCB_isolations[daily_cases$day==daily_cal$day[i]] <- daily_cal$daily_isolations[i]
}
#
# #and cases in UCB vs out
daily_cases$daily_alameda_isolations <- 0
for (i in 1:length(daily_alameda$day)){
daily_cases$daily_alameda_isolations[daily_cases$day==daily_alameda$day[i]] <- daily_alameda$daily_isolations[i]
}
daily_cases$daily_symptomatic_isolations <- 0
for (i in 1:length(daily_symp$day)){
daily_cases$daily_symptomatic_isolations[daily_cases$day==daily_symp$day[i]] <- daily_symp$daily_isolations[i]
}
daily_cases$daily_tracing_isolations <- 0
for (i in 1:length(daily_trace$day)){
daily_cases$daily_tracing_isolations[daily_cases$day==daily_trace$day[i]] <- daily_trace$daily_isolations[i]
}
daily_cases$daily_testing_isolations <- 0
for (i in 1:length(daily_test$day)){
daily_cases$daily_testing_isolations[daily_cases$day==daily_test$day[i]] <- daily_test$daily_isolations[i]
}
#
# #now attach R-effective
# daily_cases$Reffective = NA
#
# for(i in 1:nrow(R.out)){
# daily_cases$Reffective[daily_cases$day==R.out$day[i]] <- R.out$Reffective[i]
# }
#
#add category
pop.mat.cat= dlply(pop.mat, .(employ_cat))
new_col <- lapply(pop.mat.cat, FUN=add.risk.cat, pop_dat=daily_cases)
#and also the daily exposures
new_col2 <- lapply(pop.mat.cat, FUN=add.risk.cat.exp, pop_dat=daily_cases, input_par=input.par)
new_col_exp <- sapply(new_col2, "[", 1)
new_col_deaths <- sapply(new_col2, "[", 2)
#tmp = data.table::rbindlist(new_col)
tmp = as.data.frame(do.call("cbind", new_col))
names(tmp) <- paste0("isolations-employ-cat-", unique(input.par$population))
tmp2 = as.data.frame(do.call("cbind", new_col_exp))
#tmp2 = data.table::rbindlist(new_col_exp)
names(tmp2) <- paste0("exposures-employ-cat-", unique(input.par$population))
tmp3 = as.data.frame(do.call("cbind", new_col_deaths))
#tmp3 = data.table::rbindlist(new_col_deaths)
names(tmp3) <- paste0("deaths-employ-cat-", unique(input.par$population))
#and attach to daily cases
daily_cases <- cbind.data.frame(daily_cases, tmp, tmp2, tmp3)
# #finally, calculate some summary statistics from the epidemic
# tot.exposures = sum(daily_cases$daily_exposures, na.rm=T)
# tot.isolations = sum(daily_cases$daily_isolations, na.rm=T)
# #time.to.control = max(daily_cases$day[!is.na(daily_cases$Reffective)])
# max.exposures.per.day = max(daily_cases$daily_exposures, na.rm=T)
# mean.exposures.per.day = mean(daily_cases$daily_exposures, na.rm=T)
# max.iso.per.day = max(daily_cases$daily_isolations, na.rm=T)
# mean.iso.per.day = mean(daily_cases$daily_isolations, na.rm=T)
# time.of.peak.iso = min(daily_cases$day[daily_cases$daily_isolations==max(daily_cases$daily_isolations, na.rm=T)])
# time.of.peak.exposure = min(daily_cases$day[daily_cases$daily_exposures==max(daily_cases$daily_exposures, na.rm=T)])
#
#and report out the max day before your cases are too few to calculate Reffective
#out.stat <- c(tot.exposures, tot.isolations, max.exposures.per.day, mean.exposures.per.day, max.iso.per.day, mean.iso.per.day, time.of.peak.exposure, time.of.peak.iso)
#names(out.stat) <- c("total_exposures", "total_isolations", "max_exp_per_day", "mean_exp_per_day", "max_iso_per_day", "mean_iso_per_day", "time_peak_exposure", "time_peak_isolation")
pop.mat$LOD <- LOD
#add TAT if this is a single population model, but if it is mixed in a multipop, note that
if(length(unique(input.par$par1[input.par$parameter=="TAT-lag"]))==1){
pop.mat$TAT <- unique(input.par$par1[input.par$parameter=="TAT-lag"])
}else{
pop.mat$TAT <- "multiple"
}
pop.mat$test_rotation <- test_rotation_name
return(list(daily_cases,pop.mat, prop.asym, R.mat))
}
replicate.epidemic = function(n.reps, input.pop, n.init.exposed.vector, employ.id.vector, times, virus.par, input.par, burnin, test.freq, length_timestep,
bay.area.prev, initial.R, within.host.theta, test_rotation_name, LOD, titer.dat){
out = replicate(n.reps, simulate.epidemic(virus.par = virus.par,
input.par = input.par,
input.pop=input.pop,
n.init.exposed.vector=n.init.exposed.vector,
times=times,
bay.area.prev = bay.area.prev,
initial.R = initial.R,
within.host.theta = within.host.theta,
burnin = burnin,
length_timestep=length_timestep,
employ.id.vector =employ.id.vector,
LOD = LOD,
titer.dat = titer.dat,
test_rotation_name = test_rotation_name), simplify = "array")
#make list
out.time<- out.daily <- out.cal <- out.iso <- out.cumulative <- out.ala <- out.symp <- out.trace <- out.test <- out.iso <-out.cum.iso <- pop.mat.chain <- out.prop.asym <- R.mat.out <- list()
#and make list of all the categories of sub-pop
out.cat <- list()
for (i in 1:ncol(out)){
#tmp <- data.table::cbindlist(out[,i][[1]])
tmp <- do.call("cbind", out[,i][[1]])
out.time[[i]] <- c(unlist(tmp[,1]))
out.daily[[i]] <- c(unlist(tmp[,2]))
out.cumulative[[i]] <- c(unlist(tmp[,3]))
out.iso[[i]] <- c(unlist(tmp[,4]))
out.cum.iso[[i]] <- c(unlist(tmp[,5]))
out.cal[[i]] <- c(unlist(tmp[,6]))
out.ala[[i]] <- c(unlist(tmp[,7]))
out.symp[[i]] <- c(unlist(tmp[,8]))
out.trace[[i]] <- c(unlist(tmp[,9]))
out.test[[i]] <- c(unlist(tmp[,10]))
#out.R[[i]] <- c(unlist(tmp[,11]))
out.cat[[i]] <- cbind(unlist(tmp[,11:(10+(length(unique(input.par$population)))*3)]))
#and save a chain of pop.mat
tmp2 <- out[,i][[2]]
pop.mat.chain[[i]] <- tmp2
#and the prop.asym
tmp3 <- out[,i][[3]]
out.prop.asym[[i]] <- tmp3
tmp4 <- out[,i][[4]]
rownames(tmp4) <- c()
R.mat.out[[i]] <- tmp4
#unique(input.par$population)
}
#now shorten them all to the same length and get mean + sd
#print(out.time)
mean.time = get.mean.sd(vector= out.time, name = "day")[,1]
#print(out.daily)
mean.daily = get.mean.sd(vector=out.daily, name = "exposures")
#print(out.cumulative)
mean.cumulative= get.mean.sd(vector=out.cumulative, name = "cumulative")
#print(out.cal)
mean.cal = get.mean.sd(vector=out.cal, name="UCB")
#print(out.ala)
mean.ala = get.mean.sd(vector=out.ala, name = "AlamedaCo")
#print(out.low)
mean.symp = get.mean.sd(vector=out.symp, name="symptomatic_iso")
mean.trace = get.mean.sd(vector=out.trace, name="tracing_iso")
mean.test = get.mean.sd(vector=out.test, name="testing_iso")
#print(out.iso)
mean.iso = get.mean.sd(vector=out.iso, name = "isolations")
#print(out.cum.iso)
mean.cum.iso = get.mean.sd(vector=out.cum.iso, name = "cumulative_isolations")
#print(out.sum)
#mean.sum = get.mean.sd.summary(out.sum)
#and the employ-cat
mean.cat = get.mean.matrix(mat=out.cat)
#print(out.hi)
mean.dat = cbind.data.frame(mean.time, mean.daily, mean.cumulative, mean.cal, mean.ala, mean.symp, mean.trace,mean.test, mean.iso, mean.cum.iso, mean.cat)#, mean.R)
names(mean.dat)[1] = "day"
#all of the descriptors can now change within the pop
mean.dat$LOD <- LOD
if(length(unique(input.par$par1[input.par$parameter=="TAT-lag"]))==1){
mean.dat$TAT <- unique(input.par$par1[input.par$parameter=="TAT-lag"])
}else{
mean.dat$TAT <- "multiple"
}
mean.dat$test_rotation <- test_rotation_name
#mean.dat$prop_asym = prop.asym
mean.dat$virus_par = unique(virus.par$version)
mean.dat$distance_limit = unique(input.par$par1[input.par$parameter=="group-size-limit"])
avg.prop.asym <- mean(c(unlist(out.prop.asym)))
mean.dat$prop_asym= avg.prop.asym
#and the long version
mean.daily$type = "all_exposures"
mean.cumulative$type = "cumulative"
mean.cal$type = "UCB"
mean.ala$type = "AlamedaCo"
mean.symp$type = "symptomatic_iso"
mean.trace$type = "tracing_iso"
mean.test$type = "testing_iso"
#mean.R$type = "Reffective"
mean.iso$type= "isolations"
#don't bother with employ-cat 00 can add later if needed
mean.cat.long.list = convert.cat(mean.cat)
mean.cat.long = data.table::rbindlist(mean.cat.long.list)
#mean.cat.long = do.call("rbind", mean.cat.long.list)
names(mean.daily) <- names(mean.cumulative) <- names(mean.cal) <- names(mean.ala) <- names(mean.symp) <- names(mean.trace) <- names(mean.test) <- names(mean.iso) <- c("mean", "lci", "uci", "type") #<- names(mean.R)
mean.long <- rbind(mean.daily, mean.cumulative, mean.cal, mean.ala, mean.symp, mean.trace, mean.test, mean.iso, mean.cat.long)#, mean.R)
n.cat = length(input.pop)
mean.long$day = c(rep(mean.time, (8+(3*n.cat))))#, mean.time[-1])
mean.long$LOD <- LOD
if(length(unique(input.par$par1[input.par$parameter=="TAT-lag"]))==1){
mean.long$TAT <- unique(input.par$par1[input.par$parameter=="TAT-lag"])
}else{
mean.long$TAT <- "multiple"
}
mean.long$test_rotation <- test_rotation_name
#mean.long$prop_asym = prop.asym
mean.long$virus_par = unique(virus.par$version)
mean.long$distance_limit = unique(input.par$par1[input.par$parameter=="group-size-limit"])
mean.long$prop_asym = avg.prop.asym
# mean.sum$sim_cat = sim_cat
# #mean.sum$prop_asym = prop.asym
# mean.sum$virus_par = unique(virus.par$version)
#
# mean.sum$superspread = superspread
# mean.sum$distance_limit = unique(input.par$par1[input.par$parameter=="group-size-limit"])
#
#
# #and summarize R
# mean.R = summarise.R(out.list.R=out.R, day.vec = mean.dat$day, n.reps=n.reps)
# mean.R$LOD <- LOD
# mean.R$TAT <- unique(input.par$par1[input.par$parameter=="TAT-lag"])
# mean.R$test_rotation <- test_rotation_name
# #mean.R$sim_cat = sim_cat
# #mean.R$prop_asym = prop.asym
# mean.R$virus_par = unique(virus.par$version)
#
# mean.R$superspread = superspread
# mean.R$distance_limit = unique(input.par$par1[input.par$parameter=="group-size-limit"])
#
# mean.R$prop_asym <- avg.prop.asym
#
#
# mean.R.mat = manage.R.matrix(mat.list=R.mat.out)
#and do the best you can with the R-output
#put it all together
#R.mat.use <- do.call("rbind", R.mat.out)
R.mat.use <- data.table::rbindlist(R.mat.out)
R.mat.use <- arrange(R.mat.use, total_potential_cases)
if(virus.par$distribution[virus.par$parameter=="R0"]=="negbinom"){
mean.R.mat = R.fit.sum(R.mat.use)
}else{
mean.R.mat = R.fit.sum.lognorm(R.mat.use)
}
rownames(mean.R.mat) <- c()
mean.R.mat$LOD <- LOD
if(length(unique(input.par$par1[input.par$parameter=="TAT-lag"]))==1){
mean.R.mat$TAT <- unique(input.par$par1[input.par$parameter=="TAT-lag"])
}else{
mean.R.mat$TAT <- "multiple"
}
mean.R.mat$test_rotation <- test_rotation_name
mean.R.mat$virus_par = unique(virus.par$version)
mean.R.mat$distance_limit = unique(input.par$par1[input.par$parameter=="group-size-limit"])
mean.R.mat$prop_asym <- avg.prop.asym
#return these summaries and the list of pop.mats
return(list(mean.dat, mean.long, pop.mat.chain, mean.R.mat))
}
pop.par.base$par1[pop.par.base$parameter=="group-size-limit"] <- 16
pop.par.base$par1[pop.par.base$parameter=="percent-obs-dist-lim"] <- .9
out = replicate.epidemic(n.reps = 100,
virus.par = virus.par,
input.par = pop.par.base,
input.pop=c(20000),#2000
n.init.exposed.vector=c(100),#10
times=365*2,
bay.area.prev = .1/100,
initial.R = 2.5,
within.host.theta = .72,
burnin = 0,
length_timestep=1,
employ.id.vector = c(1),
LOD=(10^1),
titer.dat = titer.dat,
test_rotation_name = "none")
save(out, file = "group-lim-16.Rdata")
|
library(dbarts)
library(CBPS)
load("data/HRS_causal_df.RData")
n <- nrow(data_df)
data_df[,"tmpTreat"] <- rep(NA, times = n)
data_df[which(data_df[,"PHYS_INACT"] == 0 & data_df[,"DEPRESS"] == 0), "tmpTreat"] <- 0 # control -- not inactive, not depressed
data_df[which(data_df[,"PHYS_INACT"] == 1 & data_df[,"DEPRESS"] == 0), "tmpTreat"] <- 1 # inactive & not depressed
data_df[which(data_df[,"PHYS_INACT"] == 0 & data_df[,"DEPRESS"] == 1), "tmpTreat"] <- 2 # active & depressed
data_df[which(data_df[,"PHYS_INACT"] == 1 & data_df[,"DEPRESS"] == 1), "tmpTreat"] <- 3 # inactive & depressed
ps_df <- data_df[,!colnames(data_df) %in% c("Y", "PHYS_INACT", "DEPRESS")]
ps_df[,"tmpTreat"] <- factor(data_df[,"tmpTreat"], levels = 0:3, labels = 0:3)
cbps_fit <- CBPS(tmpTreat ~ ., data = ps_df, ATT = 0, standardize = FALSE, iterations = 5000)
save(cbps_fit, file = "results/HRS_causal_cbps_fit.RData")
prop_score <- cbps_fit$fitted.values
colnames(prop_score) <- paste0("prop", 0:3)
weights <- cbps_fit$weights
####################
# Set up what we need to look at the balance table
tmp_df <- data_df[,!colnames(data_df) %in% c("Y", "tmpTreat", "PHYS_INACT", "DEPRESS")]
tmp_df[,"CHLD_HLTH"] <- as.numeric(tmp_df[,"CHLD_HLTH"])
Z_all <- makeModelMatrixFromDataFrame(tmp_df)
Z_all <- cbind(Z_all, prop_score[,paste0("prop", 1:3)])
ix0 <- which(data_df[,"tmpTreat"] == 0)
ix1 <- which(data_df[,"tmpTreat"] == 1)
ix2 <- which(data_df[,"tmpTreat"] == 2)
ix3 <- which(data_df[,"tmpTreat"] == 3)
Y_all <- data_df[,"Y"]
X_all <- matrix(0, nrow = n, ncol = 3)
X_all[ix1, 1] <- 1
X_all[ix2, 2] <- 1
X_all[ix3, 3] <- 1
R <- ncol(Z_all)
cutpoints <- list()
for(r in 1:R){
if(colnames(Z_all)[r] == "AGE") cutpoints[[r]] <- 720:1020
else if (!colnames(Z_all)[r] %in% c("cSEP", "prop1", "prop2", "prop3")) cutpoints[[r]] <- sort(unique(Z_all[,r]))
else cutpoints[[r]] <- seq(floor(min(Z_all[,r])), ceiling(max(Z_all[,r])), length = 1000)
}
n_all <- length(Y_all)
save(X_all, Y_all, Z_all, R, n_all, cutpoints, file = "data/HRS_causal_vcbart_data.RData")
############################
# Compute pairwise differences in unweighted and weighted means
# now we want chld health treated as a factor!
tmp_df <- data_df[,!colnames(data_df) %in% c("Y", "PHYS_INACT", "DEPRESS", "tmpTreat")]
tmp_df <- data_df[,c("AGE", "GENDER", "RACE", "cSEP", "EDUC", "CHLD_HLTH", "SOUTHERN", "FOREIGN", "SMOKE")]
tmp_Z <- makeModelMatrixFromDataFrame(tmp_df)
colnames(tmp_Z) <- c("Age", "Female","NH White", "NH Black", "Hispanic", "NH Other", "cSEP", "Educ",
"Poor chld health", "Fair chld health", "Good chld health", "Very good chld health", "Excellent chld health",
"Born in Southern US", "Born outside US", "Smoked")
p <- ncol(tmp_Z)
std_diff_before <- matrix(nrow = p, ncol = 6, dimnames = list(colnames(tmp_Z), c("1-0", "2-0", "3-0", "2-1", "3-1", "3-2")))
std_diff_after <- matrix(nrow = p, ncol = 6, dimnames = list(colnames(tmp_Z), c("1-0", "2-0", "3-0", "2-1", "3-1", "3-2")))
n0 <- length(ix0)
n1 <- length(ix1)
n2 <- length(ix2)
n3 <- length(ix3)
for(j in 1:p){
z_name <- colnames(tmp_Z)[j]
z <- tmp_Z[,j]
var0 <- var(z[ix0])
var1 <- var(z[ix1])
var2 <- var(z[ix2])
var3 <- var(z[ix3])
pooled_sd <- sqrt( ((n0 - 1)*var0 + (n1 - 1) * var1 + (n2 - 1) * var2 + (n3 - 1) * var3) / (n0 + n1 + n2 + n3 - 4))
before_mean0 <- mean(z[ix0])
before_mean1 <- mean(z[ix1])
before_mean2 <- mean(z[ix2])
before_mean3 <- mean(z[ix3])
after_mean0 <- weighted.mean(z[ix0], w = weights[ix0])
after_mean1 <- weighted.mean(z[ix1], w = weights[ix1])
after_mean2 <- weighted.mean(z[ix2], w = weights[ix2])
after_mean3 <- weighted.mean(z[ix3], w = weights[ix3])
std_diff_before[z_name, "1-0"] <- (before_mean1 - before_mean0)/pooled_sd
std_diff_before[z_name, "2-0"] <- (before_mean2 - before_mean0)/pooled_sd
std_diff_before[z_name, "3-0"] <- (before_mean3 - before_mean0)/pooled_sd
std_diff_before[z_name, "2-1"] <- (before_mean2 - before_mean1)/pooled_sd
std_diff_before[z_name, "3-1"] <- (before_mean3 - before_mean1)/pooled_sd
std_diff_before[z_name, "3-2"] <- (before_mean3 - before_mean2)/pooled_sd
std_diff_after[z_name, "1-0"] <- (after_mean1 - after_mean0)/pooled_sd
std_diff_after[z_name, "2-0"] <- (after_mean2 - after_mean0)/pooled_sd
std_diff_after[z_name, "3-0"] <- (after_mean3 - after_mean0)/pooled_sd
std_diff_after[z_name, "2-1"] <- (after_mean2 - after_mean1)/pooled_sd
std_diff_after[z_name, "3-1"] <- (after_mean3 - after_mean1)/pooled_sd
std_diff_after[z_name, "3-2"] <- (after_mean3 - after_mean2)/pooled_sd
}
save(tmp_Z, std_diff_before, std_diff_after, file = "results/HRS_causal_balance.RData")
|
/scripts/hrs_causal_cbps.R
|
no_license
|
skdeshpande91/VCBART
|
R
| false | false | 4,714 |
r
|
library(dbarts)
library(CBPS)
load("data/HRS_causal_df.RData")
n <- nrow(data_df)
data_df[,"tmpTreat"] <- rep(NA, times = n)
data_df[which(data_df[,"PHYS_INACT"] == 0 & data_df[,"DEPRESS"] == 0), "tmpTreat"] <- 0 # control -- not inactive, not depressed
data_df[which(data_df[,"PHYS_INACT"] == 1 & data_df[,"DEPRESS"] == 0), "tmpTreat"] <- 1 # inactive & not depressed
data_df[which(data_df[,"PHYS_INACT"] == 0 & data_df[,"DEPRESS"] == 1), "tmpTreat"] <- 2 # active & depressed
data_df[which(data_df[,"PHYS_INACT"] == 1 & data_df[,"DEPRESS"] == 1), "tmpTreat"] <- 3 # inactive & depressed
ps_df <- data_df[,!colnames(data_df) %in% c("Y", "PHYS_INACT", "DEPRESS")]
ps_df[,"tmpTreat"] <- factor(data_df[,"tmpTreat"], levels = 0:3, labels = 0:3)
cbps_fit <- CBPS(tmpTreat ~ ., data = ps_df, ATT = 0, standardize = FALSE, iterations = 5000)
save(cbps_fit, file = "results/HRS_causal_cbps_fit.RData")
prop_score <- cbps_fit$fitted.values
colnames(prop_score) <- paste0("prop", 0:3)
weights <- cbps_fit$weights
####################
# Set up what we need to look at the balance table
tmp_df <- data_df[,!colnames(data_df) %in% c("Y", "tmpTreat", "PHYS_INACT", "DEPRESS")]
tmp_df[,"CHLD_HLTH"] <- as.numeric(tmp_df[,"CHLD_HLTH"])
Z_all <- makeModelMatrixFromDataFrame(tmp_df)
Z_all <- cbind(Z_all, prop_score[,paste0("prop", 1:3)])
ix0 <- which(data_df[,"tmpTreat"] == 0)
ix1 <- which(data_df[,"tmpTreat"] == 1)
ix2 <- which(data_df[,"tmpTreat"] == 2)
ix3 <- which(data_df[,"tmpTreat"] == 3)
Y_all <- data_df[,"Y"]
X_all <- matrix(0, nrow = n, ncol = 3)
X_all[ix1, 1] <- 1
X_all[ix2, 2] <- 1
X_all[ix3, 3] <- 1
R <- ncol(Z_all)
cutpoints <- list()
for(r in 1:R){
if(colnames(Z_all)[r] == "AGE") cutpoints[[r]] <- 720:1020
else if (!colnames(Z_all)[r] %in% c("cSEP", "prop1", "prop2", "prop3")) cutpoints[[r]] <- sort(unique(Z_all[,r]))
else cutpoints[[r]] <- seq(floor(min(Z_all[,r])), ceiling(max(Z_all[,r])), length = 1000)
}
n_all <- length(Y_all)
save(X_all, Y_all, Z_all, R, n_all, cutpoints, file = "data/HRS_causal_vcbart_data.RData")
############################
# Compute pairwise differences in unweighted and weighted means
# now we want chld health treated as a factor!
tmp_df <- data_df[,!colnames(data_df) %in% c("Y", "PHYS_INACT", "DEPRESS", "tmpTreat")]
tmp_df <- data_df[,c("AGE", "GENDER", "RACE", "cSEP", "EDUC", "CHLD_HLTH", "SOUTHERN", "FOREIGN", "SMOKE")]
tmp_Z <- makeModelMatrixFromDataFrame(tmp_df)
colnames(tmp_Z) <- c("Age", "Female","NH White", "NH Black", "Hispanic", "NH Other", "cSEP", "Educ",
"Poor chld health", "Fair chld health", "Good chld health", "Very good chld health", "Excellent chld health",
"Born in Southern US", "Born outside US", "Smoked")
p <- ncol(tmp_Z)
std_diff_before <- matrix(nrow = p, ncol = 6, dimnames = list(colnames(tmp_Z), c("1-0", "2-0", "3-0", "2-1", "3-1", "3-2")))
std_diff_after <- matrix(nrow = p, ncol = 6, dimnames = list(colnames(tmp_Z), c("1-0", "2-0", "3-0", "2-1", "3-1", "3-2")))
n0 <- length(ix0)
n1 <- length(ix1)
n2 <- length(ix2)
n3 <- length(ix3)
for(j in 1:p){
z_name <- colnames(tmp_Z)[j]
z <- tmp_Z[,j]
var0 <- var(z[ix0])
var1 <- var(z[ix1])
var2 <- var(z[ix2])
var3 <- var(z[ix3])
pooled_sd <- sqrt( ((n0 - 1)*var0 + (n1 - 1) * var1 + (n2 - 1) * var2 + (n3 - 1) * var3) / (n0 + n1 + n2 + n3 - 4))
before_mean0 <- mean(z[ix0])
before_mean1 <- mean(z[ix1])
before_mean2 <- mean(z[ix2])
before_mean3 <- mean(z[ix3])
after_mean0 <- weighted.mean(z[ix0], w = weights[ix0])
after_mean1 <- weighted.mean(z[ix1], w = weights[ix1])
after_mean2 <- weighted.mean(z[ix2], w = weights[ix2])
after_mean3 <- weighted.mean(z[ix3], w = weights[ix3])
std_diff_before[z_name, "1-0"] <- (before_mean1 - before_mean0)/pooled_sd
std_diff_before[z_name, "2-0"] <- (before_mean2 - before_mean0)/pooled_sd
std_diff_before[z_name, "3-0"] <- (before_mean3 - before_mean0)/pooled_sd
std_diff_before[z_name, "2-1"] <- (before_mean2 - before_mean1)/pooled_sd
std_diff_before[z_name, "3-1"] <- (before_mean3 - before_mean1)/pooled_sd
std_diff_before[z_name, "3-2"] <- (before_mean3 - before_mean2)/pooled_sd
std_diff_after[z_name, "1-0"] <- (after_mean1 - after_mean0)/pooled_sd
std_diff_after[z_name, "2-0"] <- (after_mean2 - after_mean0)/pooled_sd
std_diff_after[z_name, "3-0"] <- (after_mean3 - after_mean0)/pooled_sd
std_diff_after[z_name, "2-1"] <- (after_mean2 - after_mean1)/pooled_sd
std_diff_after[z_name, "3-1"] <- (after_mean3 - after_mean1)/pooled_sd
std_diff_after[z_name, "3-2"] <- (after_mean3 - after_mean2)/pooled_sd
}
save(tmp_Z, std_diff_before, std_diff_after, file = "results/HRS_causal_balance.RData")
|
# HW1: tibble4
#
# 1. Consider the data frame `airquality`. Convert it into a tibble `t1`.
# 2. Calculate the total number of 'NA' for each variable and summarize it into a vector `v1`.
# Make sure the vector has the same names as the column names of `t1`.
# 3. Omit all rows which have 'NA' values and assign it to `t2`.
# 4. Sort the new data frame `t2` by column Temp(ascending) and then by column Ozone(ascending). Assign it to `t3`.(hint: check the order function)
## Do not modify this line! ## Write your code for 1. after this line! ##
library(tibble)
t1 <- as_tibble(airquality)
## Do not modify this line! ## Write your code for 2. after this line! ##
v1 <- vector(length = ncol(t1))
for (i in 1: ncol(t1)){
v1[i] <- sum(is.na(t1[,i]))
}
names(v1) <- names(t1)
## Do not modify this line! ## Write your code for 3. after this line!
t2 <- na.omit(t1)
## Do not modify this line! ## Write your code for 4. after this line! ##
t3 <- t2[order(t2$Temp, t2$Ozone),]
|
/HW1/hw1_7_tibble.R
|
no_license
|
QihangYang/Computational-Statistics
|
R
| false | false | 976 |
r
|
# HW1: tibble4
#
# 1. Consider the data frame `airquality`. Convert it into a tibble `t1`.
# 2. Calculate the total number of 'NA' for each variable and summarize it into a vector `v1`.
# Make sure the vector has the same names as the column names of `t1`.
# 3. Omit all rows which have 'NA' values and assign it to `t2`.
# 4. Sort the new data frame `t2` by column Temp(ascending) and then by column Ozone(ascending). Assign it to `t3`.(hint: check the order function)
## Do not modify this line! ## Write your code for 1. after this line! ##
library(tibble)
t1 <- as_tibble(airquality)
## Do not modify this line! ## Write your code for 2. after this line! ##
v1 <- vector(length = ncol(t1))
for (i in 1: ncol(t1)){
v1[i] <- sum(is.na(t1[,i]))
}
names(v1) <- names(t1)
## Do not modify this line! ## Write your code for 3. after this line!
t2 <- na.omit(t1)
## Do not modify this line! ## Write your code for 4. after this line! ##
t3 <- t2[order(t2$Temp, t2$Ozone),]
|
#' percopackage: A package for percolation analysis as a 2D spatial clustering algorithm
#'
#'Three functions are implemented in this package:
#'
#'@section percolate():
#'This function processes this data set to generate the
#'clusters for a range of percolation radius values.
#'It also generates and stores a set of data tables for
#'use by the other functions, as well as optional external
#'processing. For details see \code{\link{percolate}}
#'
#'@section mapClusters():
#'which allows for the mapping of the
#'clusters together with a shape file as a background. For details see
#' \code{\link{mapClusters}}
#'
#'@section plotClustFreq():
#'which plots three different analyses an png files:
#'
#'a) radius to maximum cluster size,
#'
#'b) radius to mean cluster size and
#'
#'c) radius to normalized max. cluster size.
#'
#'For details see \code{\link{plotClustFreq}}
#'
#'
#' @docType package
#' @name percopackage
NULL
|
/R/percopackage.R
|
permissive
|
petrpajdla/percopackage
|
R
| false | false | 943 |
r
|
#' percopackage: A package for percolation analysis as a 2D spatial clustering algorithm
#'
#'Three functions are implemented in this package:
#'
#'@section percolate():
#'This function processes this data set to generate the
#'clusters for a range of percolation radius values.
#'It also generates and stores a set of data tables for
#'use by the other functions, as well as optional external
#'processing. For details see \code{\link{percolate}}
#'
#'@section mapClusters():
#'which allows for the mapping of the
#'clusters together with a shape file as a background. For details see
#' \code{\link{mapClusters}}
#'
#'@section plotClustFreq():
#'which plots three different analyses an png files:
#'
#'a) radius to maximum cluster size,
#'
#'b) radius to mean cluster size and
#'
#'c) radius to normalized max. cluster size.
#'
#'For details see \code{\link{plotClustFreq}}
#'
#'
#' @docType package
#' @name percopackage
NULL
|
rm(list=ls())
source(paste0(here::here(), "/0-config.R"))
#Load data
d <- readRDS(paste0(res_dir,"stunting/sex_strat_stunting_desc_data.RDS"))
quantiles <- readRDS(paste0(res_dir,"stunting/quantile_data_stunting_sex_strat.RDS"))
quantiles.BW <- readRDS(paste0(res_dir,"stunting/quantile_data_stunting_sex_BW_strat.RDS"))
d$est[(is.na(d$pooling) | d$pooling=="no pooling") & d$measure %in% c("Prevalence","Cumulative incidence","Persistent stunting", "Recovery" )] <-
d$est[(is.na(d$pooling) | d$pooling=="no pooling") & d$measure %in% c("Prevalence","Cumulative incidence","Persistent stunting", "Recovery" )] * 100
d$lb[(is.na(d$pooling) | d$pooling=="no pooling") & d$measure %in% c("Prevalence","Cumulative incidence","Persistent stunting", "Recovery" )] <-
d$lb[(is.na(d$pooling) | d$pooling=="no pooling") & d$measure %in% c("Prevalence","Cumulative incidence","Persistent stunting", "Recovery" )] * 100
d$ub[(is.na(d$pooling) | d$pooling=="no pooling") & d$measure %in% c("Prevalence","Cumulative incidence","Persistent stunting", "Recovery" )] <-
d$ub[(is.na(d$pooling) | d$pooling=="no pooling") & d$measure %in% c("Prevalence","Cumulative incidence","Persistent stunting", "Recovery" )] * 100
#d %>% filter(measure=="Prevalence", disease=="co-occurrence", cohort!="pooled")
d$nmeas.f <- clean_nmeans(d$nmeas)
d$nstudy.f <- gsub("N=","",d$nstudy.f)
d$nmeas.f <- gsub("N=","",d$nmeas.f)
d$nstudy.f <- gsub(" studies","",d$nstudy.f)
d$nmeas.f <- gsub(" children","",d$nmeas.f)
#-------------------------------------------------------------------------------------------
# Mean LAZ by month
#-------------------------------------------------------------------------------------------
df <- d %>% filter(
disease == "stunting" &
measure == "Mean LAZ" &
birth == "yes" &
severe == "no" &
age_range == "1 month" &
cohort == "pooled"
)
df <- droplevels(df)
df <- df %>%
arrange(agecat) %>%
filter(!is.na(agecat)) %>%
filter(!is.na(region)) %>%
mutate(agecat = as.character(agecat)) %>%
mutate(agecat = ifelse(agecat == "Two weeks", ".5", agecat)) %>%
mutate(agecat = gsub(" month", "", agecat)) %>%
mutate(agecat = gsub(" months", "", agecat)) %>%
mutate(agecat = gsub("s", "", agecat)) %>%
mutate(agecat = ifelse(agecat == "One", "1", agecat)) %>%
mutate(agecat = as.numeric(agecat))
p <- ggplot(df,aes(y=est,x=agecat, group=region)) +
stat_smooth(aes(fill=region, color=region), se=F, span = 1) +
facet_wrap(~sex)+
geom_hline(yintercept = 0, colour = "black") +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10),
limits = c(-3, 0.5)) +
scale_x_continuous(limits = c(0,24), breaks = seq(0,24,2), labels = seq(0,24,2)) +
scale_fill_manual(values=tableau11, drop=TRUE,
name = 'Region') +
scale_color_manual(values=tableau11, drop=TRUE,
name = 'Region') +
xlab("Child age, months")+
ylab("Length-for-age Z-score") +
ggtitle("") +
theme(legend.position="right")
ggsave(p, file=paste0(fig_dir, "stunting/laz_by_sex.png"), width=10, height=4)
df2 <- df %>% mutate(agecat=ifelse(agecat==0.5,0,agecat))
p <- ggplot(df2,aes(y=est,x=agecat, group=sex, color=sex)) +
geom_point(aes(fill=sex), position = position_dodge(0.5)) +
geom_errorbar(aes(ymin=lb, ymax=ub), width = 0, position = position_dodge(0.5)) +
facet_wrap(~region) +
geom_hline(yintercept = 0, colour = "black") +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10),
limits = c(-3, 1.5)) +
scale_x_continuous(limits = c(-0.5,24.5), breaks = seq(0,24,2), labels = seq(0,24,2)) +
scale_fill_manual(values=tableau10, drop=TRUE, name = 'Sex') +
scale_color_manual(values=tableau10, drop=TRUE, name = 'Sex') +
xlab("Child age, months") +
ylab("Length-for-age Z-score") +
ggtitle("") +
coord_cartesian(xlim = c(0,24), ylim = c(-2.6, 0.5)) +
theme(legend.position="bottom")
p
ggsave(p, file=paste0(fig_dir, "stunting/laz_by_sex_alt.png"), width=10, height=4)
#-------------------------------------------------------------------------------------------
# Mean LAZ by month - birthweight strat
#-------------------------------------------------------------------------------------------
df <- d %>% filter(
disease == "stunting" &
measure == "Mean LAZ - BW" &
birth == "yes" &
severe == "no" &
age_range == "1 month" &
cohort == "pooled"
)
df <- droplevels(df)
df <- df %>%
arrange(agecat) %>%
filter(!is.na(agecat)) %>%
filter(!is.na(region)) %>%
filter(!is.na(birthwt)) %>%
mutate(agecat = as.character(agecat)) %>%
mutate(agecat = ifelse(agecat == "Two weeks", ".5", agecat)) %>%
mutate(agecat = gsub(" month", "", agecat)) %>%
mutate(agecat = gsub(" months", "", agecat)) %>%
mutate(agecat = gsub("s", "", agecat)) %>%
mutate(agecat = ifelse(agecat == "One", "1", agecat)) %>%
mutate(agecat = as.numeric(agecat))
p <- ggplot(df,aes(y=est,x=agecat, group=region)) +
stat_smooth(aes(fill=region, color=region), se=F, span = 1) +
facet_wrap(birthwt~sex)+
geom_hline(yintercept = 0, colour = "black") +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10),
limits = c(-3, 0.5)) +
scale_x_continuous(limits = c(0,24), breaks = seq(0,24,2), labels = seq(0,24,2)) +
scale_fill_manual(values=tableau11, drop=TRUE,
name = 'Region') +
scale_color_manual(values=tableau11, drop=TRUE,
name = 'Region') +
xlab("Child age, months")+
ylab("Length-for-age Z-score") +
ggtitle("") +
theme(legend.position="right")
ggsave(p, file=paste0(fig_dir, "stunting/laz_by_sex_and_BW.png"), width=10, height=4)
df2 <- df %>% mutate(agecat=ifelse(agecat==0.5,0,agecat))
p <- ggplot(df2,aes(y=est,x=agecat, group=sex, color=sex)) +
geom_point(aes(fill=sex), position = position_dodge(0.5)) +
geom_errorbar(aes(ymin=lb, ymax=ub), width = 0, position = position_dodge(0.5)) +
facet_wrap(region~birthwt, ncol=2) +
geom_hline(yintercept = 0, colour = "black") +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10),
limits = c(-4, 0.25)) +
scale_x_continuous(limits = c(-0.5,24.5), breaks = seq(0,24,2), labels = seq(0,24,2)) +
scale_fill_manual(values=tableau10, drop=TRUE, name = 'Sex') +
scale_color_manual(values=tableau10, drop=TRUE, name = 'Sex') +
xlab("Child age, months") +
ylab("Length-for-age Z-score") +
ggtitle("") +
coord_cartesian(xlim = c(0,24), ylim = c(-4, 0.25)) +
theme(legend.position="bottom")
p
ggsave(p, file=paste0(fig_dir, "stunting/laz_by_sex_and_BW_alt.png"), width=10, height=8)
#-------------------------------------------------------------------------------------------
# Mean LAZ by month with quantiles
#-------------------------------------------------------------------------------------------
quantile_d_overall <- quantiles$quantile_d_overall %>% mutate(region="Overall")
df <- rbind(quantile_d_overall, quantiles$quantile_d)
df$agecat <- factor(df$agecat,
levels=c("Two weeks", "One month",
paste0(2:24," months")))
df <- df %>%
arrange(agecat) %>%
filter(region!="Europe")
df <-droplevels(df)
df <- df %>%
ungroup(agecat) %>%
arrange(agecat) %>%
filter(!is.na(agecat)) %>%
filter(!is.na(region)) %>%
mutate(agecat = as.character(agecat)) %>%
mutate(agecat = ifelse(agecat == "Two weeks", ".5", agecat)) %>%
mutate(agecat = gsub(" month", "", agecat)) %>%
mutate(agecat = gsub(" months", "", agecat)) %>%
mutate(agecat = gsub("s", "", agecat)) %>%
mutate(agecat = ifelse(agecat == "One", "1", agecat)) %>%
mutate(agecat = as.numeric(agecat)) %>%
mutate(region = ifelse(region=="Asia", "South Asia", region)) %>%
gather(`ninetyfifth_perc`, `fiftieth_perc`, `fifth_perc`, key = "interval", value = "laz") %>%
mutate(region = factor(region, levels = c("Overall", "Africa", "Latin America", "South Asia")))
mean_laz_plot <- ggplot(df,aes(x = agecat, group = region)) +
geom_smooth(aes(y = laz, color = region, group = interval, linetype = interval), se = F, span = 1) +
facet_wrap(region~sex, nrow=2) +
geom_hline(yintercept = 0, colour = "black") +
scale_x_continuous(limits = c(0,24), breaks = seq(0,24,2), labels = seq(0,24,2)) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10)) +
scale_color_manual(values=c("Black", "#1F77B4", "#FF7F0E", "#2CA02C"), drop=TRUE, limits = levels(df$measure),
name = 'Region') +
scale_linetype_manual(name = "interval", values = c("fiftieth_perc" = "solid",
"ninetyfifth_perc" = "dashed",
"fifth_perc" = "dotted"),
breaks = c("fiftieth_perc",
"ninetyfifth_perc",
"fifth_perc"),
labels = c("Mean", "95th percentile", "5th percentile")) +
xlab("Child age, months") +
ylab("Length-for-age Z-score") +
ggtitle("") +
theme(strip.text = element_text(margin=margin(t=5))) +
guides(linetype = guide_legend(override.aes = list(col = 'black'),
keywidth = 3, keyheight = 1),
colour = FALSE) +
theme(legend.position = "bottom",
legend.title = element_blank(),
legend.background = element_blank(),
legend.box.background = element_rect(colour = "black"))
# save plot and underlying data
ggsave(mean_laz_plot, file=paste0(fig_dir,"stunting/fig-laz-2-mean-overall_region--allage-sex-stratified.png"), width=14, height=6)
df2 <- df %>% filter(region %in% c("Africa","South Asia"))
df2 <- droplevels(df2)
head(df2)
mean_laz_plot2 <- ggplot(df2, aes(x = agecat, group = region)) +
geom_smooth(aes(y = laz, color = region, group = interval, linetype = interval), se = F, span = 1) +
facet_wrap(region~sex, nrow=2) +
geom_hline(yintercept = 0, colour = "black") +
scale_x_continuous(limits = c(0,24), breaks = seq(0,24,2), labels = seq(0,24,2)) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10)) +
scale_color_manual(values=c("Black", "#1F77B4", "#FF7F0E", "#2CA02C"), drop=TRUE, limits = levels(df$measure),
name = 'Region') +
scale_linetype_manual(name = "interval", values = c("fiftieth_perc" = "solid",
"ninetyfifth_perc" = "dashed",
"fifth_perc" = "dotted"),
breaks = c("fiftieth_perc",
"ninetyfifth_perc",
"fifth_perc"),
labels = c("Mean", "95th percentile", "5th percentile")) +
xlab("Child age, months") +
ylab("Length-for-age Z-score") +
ggtitle("") +
theme(strip.text = element_text(margin=margin(t=5))) +
guides(linetype = guide_legend(override.aes = list(col = 'black'),
keywidth = 3, keyheight = 1),
colour = FALSE) +
theme(legend.position = "bottom",
legend.title = element_blank(),
legend.background = element_blank(),
legend.box.background = element_rect(colour = "black"))
# save plot and underlying data
ggsave(mean_laz_plot2, file=paste0(fig_dir,
"stunting/fig-laz-2-mean-overall_region--allage-sex-stratified2.png"),
width=8, height=6)
#-------------------------------------------------------------------------------------------
# Mean LAZ by month with quantiles - Birthweight strat
#-------------------------------------------------------------------------------------------
quantile_d_overall <- quantiles.BW$quantile_d_overall %>% mutate(region="Overall")
df <- rbind(quantile_d_overall, quantiles.BW$quantile_d)
df$birthwt <- gsub("or high ","", df$birthwt)
df$agecat <- factor(df$agecat,
levels=c("Two weeks", "One month",
paste0(2:24," months")))
df <- df %>%
arrange(agecat) %>%
filter(region!="Europe")
df <-droplevels(df)
df <- df %>%
ungroup(agecat) %>%
arrange(agecat) %>%
filter(!is.na(agecat)) %>%
filter(!is.na(region)) %>%
filter(!is.na(birthwt )) %>%
mutate(agecat = as.character(agecat)) %>%
mutate(agecat = ifelse(agecat == "Two weeks", ".5", agecat)) %>%
mutate(agecat = gsub(" month", "", agecat)) %>%
mutate(agecat = gsub(" months", "", agecat)) %>%
mutate(agecat = gsub("s", "", agecat)) %>%
mutate(agecat = ifelse(agecat == "One", "1", agecat)) %>%
mutate(agecat = as.numeric(agecat)) %>%
mutate(region = ifelse(region=="Asia", "South Asia", region)) %>%
gather(`ninetyfifth_perc`, `fiftieth_perc`, `fifth_perc`, key = "interval", value = "laz") %>%
mutate(region = factor(region, levels = c("Overall", "Africa", "Latin America", "South Asia")))
mean_laz_plot <- ggplot(df,aes(x = agecat, group = region)) +
geom_smooth(aes(y = laz, color = region, group = interval, linetype = interval), se = F, span = 1) +
facet_wrap(region~birthwt+sex, nrow=2) +
geom_hline(yintercept = 0, colour = "black") +
scale_x_continuous(limits = c(0,24), breaks = seq(0,24,2), labels = seq(0,24,2)) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10)) +
scale_color_manual(values=c("Black", "#1F77B4", "#FF7F0E", "#2CA02C"), drop=TRUE,
name = 'Region') +
scale_linetype_manual(name = "interval", values = c("fiftieth_perc" = "solid",
"ninetyfifth_perc" = "dashed",
"fifth_perc" = "dotted"),
breaks = c("fiftieth_perc",
"ninetyfifth_perc",
"fifth_perc"),
labels = c("Mean", "95th percentile", "5th percentile")) +
xlab("Child age, months") +
ylab("Length-for-age Z-score") +
ggtitle("") +
theme(strip.text = element_text(margin=margin(t=5))) +
guides(linetype = guide_legend(override.aes = list(col = 'black'),
keywidth = 3, keyheight = 1),
colour = FALSE) +
theme(legend.position = "bottom",
legend.title = element_blank(),
legend.background = element_blank(),
legend.box.background = element_rect(colour = "black"))
# save plot and underlying data
ggsave(mean_laz_plot, file=paste0(fig_dir,"stunting/fig-laz-2-mean-overall_region--allage-sex-BW-stratified.png"), width=14, height=6)
df2 <- df %>% filter(region %in% c("Africa","South Asia"))
df2 <- droplevels(df2)
head(df2)
mean_laz_plot2 <- ggplot(df2, aes(x = agecat, group = region)) +
geom_smooth(aes(y = laz, color = region, group = interval, linetype = interval), se = F, span = 1) +
facet_wrap(region~birthwt+sex, nrow=2) +
geom_hline(yintercept = 0, colour = "black") +
scale_x_continuous(limits = c(0,24), breaks = seq(0,24,2), labels = seq(0,24,2)) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10)) +
scale_color_manual(values=c("Black", "#1F77B4", "#FF7F0E", "#2CA02C"), drop=TRUE, limits = levels(df$measure),
name = 'Region') +
scale_linetype_manual(name = "interval", values = c("fiftieth_perc" = "solid",
"ninetyfifth_perc" = "dashed",
"fifth_perc" = "dotted"),
breaks = c("fiftieth_perc",
"ninetyfifth_perc",
"fifth_perc"),
labels = c("Mean", "95th percentile", "5th percentile")) +
xlab("Child age, months") +
ylab("Length-for-age Z-score") +
ggtitle("") +
theme(strip.text = element_text(margin=margin(t=5))) +
guides(linetype = guide_legend(override.aes = list(col = 'black'),
keywidth = 3, keyheight = 1),
colour = FALSE) +
theme(legend.position = "bottom",
legend.title = element_blank(),
legend.background = element_blank(),
legend.box.background = element_rect(colour = "black"))
# save plot and underlying data
ggsave(mean_laz_plot2, file=paste0(fig_dir,
"stunting/fig-laz-2-mean-overall_region--allage-sex-BW-stratified2.png"),
width=8, height=6)
|
/5-visualizations/stunting/0-extra/fig-haz-sex-strat.R
|
no_license
|
child-growth/ki-longitudinal-growth
|
R
| false | false | 16,405 |
r
|
rm(list=ls())
source(paste0(here::here(), "/0-config.R"))
#Load data
d <- readRDS(paste0(res_dir,"stunting/sex_strat_stunting_desc_data.RDS"))
quantiles <- readRDS(paste0(res_dir,"stunting/quantile_data_stunting_sex_strat.RDS"))
quantiles.BW <- readRDS(paste0(res_dir,"stunting/quantile_data_stunting_sex_BW_strat.RDS"))
d$est[(is.na(d$pooling) | d$pooling=="no pooling") & d$measure %in% c("Prevalence","Cumulative incidence","Persistent stunting", "Recovery" )] <-
d$est[(is.na(d$pooling) | d$pooling=="no pooling") & d$measure %in% c("Prevalence","Cumulative incidence","Persistent stunting", "Recovery" )] * 100
d$lb[(is.na(d$pooling) | d$pooling=="no pooling") & d$measure %in% c("Prevalence","Cumulative incidence","Persistent stunting", "Recovery" )] <-
d$lb[(is.na(d$pooling) | d$pooling=="no pooling") & d$measure %in% c("Prevalence","Cumulative incidence","Persistent stunting", "Recovery" )] * 100
d$ub[(is.na(d$pooling) | d$pooling=="no pooling") & d$measure %in% c("Prevalence","Cumulative incidence","Persistent stunting", "Recovery" )] <-
d$ub[(is.na(d$pooling) | d$pooling=="no pooling") & d$measure %in% c("Prevalence","Cumulative incidence","Persistent stunting", "Recovery" )] * 100
#d %>% filter(measure=="Prevalence", disease=="co-occurrence", cohort!="pooled")
d$nmeas.f <- clean_nmeans(d$nmeas)
d$nstudy.f <- gsub("N=","",d$nstudy.f)
d$nmeas.f <- gsub("N=","",d$nmeas.f)
d$nstudy.f <- gsub(" studies","",d$nstudy.f)
d$nmeas.f <- gsub(" children","",d$nmeas.f)
#-------------------------------------------------------------------------------------------
# Mean LAZ by month
#-------------------------------------------------------------------------------------------
df <- d %>% filter(
disease == "stunting" &
measure == "Mean LAZ" &
birth == "yes" &
severe == "no" &
age_range == "1 month" &
cohort == "pooled"
)
df <- droplevels(df)
df <- df %>%
arrange(agecat) %>%
filter(!is.na(agecat)) %>%
filter(!is.na(region)) %>%
mutate(agecat = as.character(agecat)) %>%
mutate(agecat = ifelse(agecat == "Two weeks", ".5", agecat)) %>%
mutate(agecat = gsub(" month", "", agecat)) %>%
mutate(agecat = gsub(" months", "", agecat)) %>%
mutate(agecat = gsub("s", "", agecat)) %>%
mutate(agecat = ifelse(agecat == "One", "1", agecat)) %>%
mutate(agecat = as.numeric(agecat))
p <- ggplot(df,aes(y=est,x=agecat, group=region)) +
stat_smooth(aes(fill=region, color=region), se=F, span = 1) +
facet_wrap(~sex)+
geom_hline(yintercept = 0, colour = "black") +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10),
limits = c(-3, 0.5)) +
scale_x_continuous(limits = c(0,24), breaks = seq(0,24,2), labels = seq(0,24,2)) +
scale_fill_manual(values=tableau11, drop=TRUE,
name = 'Region') +
scale_color_manual(values=tableau11, drop=TRUE,
name = 'Region') +
xlab("Child age, months")+
ylab("Length-for-age Z-score") +
ggtitle("") +
theme(legend.position="right")
ggsave(p, file=paste0(fig_dir, "stunting/laz_by_sex.png"), width=10, height=4)
df2 <- df %>% mutate(agecat=ifelse(agecat==0.5,0,agecat))
p <- ggplot(df2,aes(y=est,x=agecat, group=sex, color=sex)) +
geom_point(aes(fill=sex), position = position_dodge(0.5)) +
geom_errorbar(aes(ymin=lb, ymax=ub), width = 0, position = position_dodge(0.5)) +
facet_wrap(~region) +
geom_hline(yintercept = 0, colour = "black") +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10),
limits = c(-3, 1.5)) +
scale_x_continuous(limits = c(-0.5,24.5), breaks = seq(0,24,2), labels = seq(0,24,2)) +
scale_fill_manual(values=tableau10, drop=TRUE, name = 'Sex') +
scale_color_manual(values=tableau10, drop=TRUE, name = 'Sex') +
xlab("Child age, months") +
ylab("Length-for-age Z-score") +
ggtitle("") +
coord_cartesian(xlim = c(0,24), ylim = c(-2.6, 0.5)) +
theme(legend.position="bottom")
p
ggsave(p, file=paste0(fig_dir, "stunting/laz_by_sex_alt.png"), width=10, height=4)
#-------------------------------------------------------------------------------------------
# Mean LAZ by month - birthweight strat
#-------------------------------------------------------------------------------------------
df <- d %>% filter(
disease == "stunting" &
measure == "Mean LAZ - BW" &
birth == "yes" &
severe == "no" &
age_range == "1 month" &
cohort == "pooled"
)
df <- droplevels(df)
df <- df %>%
arrange(agecat) %>%
filter(!is.na(agecat)) %>%
filter(!is.na(region)) %>%
filter(!is.na(birthwt)) %>%
mutate(agecat = as.character(agecat)) %>%
mutate(agecat = ifelse(agecat == "Two weeks", ".5", agecat)) %>%
mutate(agecat = gsub(" month", "", agecat)) %>%
mutate(agecat = gsub(" months", "", agecat)) %>%
mutate(agecat = gsub("s", "", agecat)) %>%
mutate(agecat = ifelse(agecat == "One", "1", agecat)) %>%
mutate(agecat = as.numeric(agecat))
p <- ggplot(df,aes(y=est,x=agecat, group=region)) +
stat_smooth(aes(fill=region, color=region), se=F, span = 1) +
facet_wrap(birthwt~sex)+
geom_hline(yintercept = 0, colour = "black") +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10),
limits = c(-3, 0.5)) +
scale_x_continuous(limits = c(0,24), breaks = seq(0,24,2), labels = seq(0,24,2)) +
scale_fill_manual(values=tableau11, drop=TRUE,
name = 'Region') +
scale_color_manual(values=tableau11, drop=TRUE,
name = 'Region') +
xlab("Child age, months")+
ylab("Length-for-age Z-score") +
ggtitle("") +
theme(legend.position="right")
ggsave(p, file=paste0(fig_dir, "stunting/laz_by_sex_and_BW.png"), width=10, height=4)
df2 <- df %>% mutate(agecat=ifelse(agecat==0.5,0,agecat))
p <- ggplot(df2,aes(y=est,x=agecat, group=sex, color=sex)) +
geom_point(aes(fill=sex), position = position_dodge(0.5)) +
geom_errorbar(aes(ymin=lb, ymax=ub), width = 0, position = position_dodge(0.5)) +
facet_wrap(region~birthwt, ncol=2) +
geom_hline(yintercept = 0, colour = "black") +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10),
limits = c(-4, 0.25)) +
scale_x_continuous(limits = c(-0.5,24.5), breaks = seq(0,24,2), labels = seq(0,24,2)) +
scale_fill_manual(values=tableau10, drop=TRUE, name = 'Sex') +
scale_color_manual(values=tableau10, drop=TRUE, name = 'Sex') +
xlab("Child age, months") +
ylab("Length-for-age Z-score") +
ggtitle("") +
coord_cartesian(xlim = c(0,24), ylim = c(-4, 0.25)) +
theme(legend.position="bottom")
p
ggsave(p, file=paste0(fig_dir, "stunting/laz_by_sex_and_BW_alt.png"), width=10, height=8)
#-------------------------------------------------------------------------------------------
# Mean LAZ by month with quantiles
#-------------------------------------------------------------------------------------------
quantile_d_overall <- quantiles$quantile_d_overall %>% mutate(region="Overall")
df <- rbind(quantile_d_overall, quantiles$quantile_d)
df$agecat <- factor(df$agecat,
levels=c("Two weeks", "One month",
paste0(2:24," months")))
df <- df %>%
arrange(agecat) %>%
filter(region!="Europe")
df <-droplevels(df)
df <- df %>%
ungroup(agecat) %>%
arrange(agecat) %>%
filter(!is.na(agecat)) %>%
filter(!is.na(region)) %>%
mutate(agecat = as.character(agecat)) %>%
mutate(agecat = ifelse(agecat == "Two weeks", ".5", agecat)) %>%
mutate(agecat = gsub(" month", "", agecat)) %>%
mutate(agecat = gsub(" months", "", agecat)) %>%
mutate(agecat = gsub("s", "", agecat)) %>%
mutate(agecat = ifelse(agecat == "One", "1", agecat)) %>%
mutate(agecat = as.numeric(agecat)) %>%
mutate(region = ifelse(region=="Asia", "South Asia", region)) %>%
gather(`ninetyfifth_perc`, `fiftieth_perc`, `fifth_perc`, key = "interval", value = "laz") %>%
mutate(region = factor(region, levels = c("Overall", "Africa", "Latin America", "South Asia")))
mean_laz_plot <- ggplot(df,aes(x = agecat, group = region)) +
geom_smooth(aes(y = laz, color = region, group = interval, linetype = interval), se = F, span = 1) +
facet_wrap(region~sex, nrow=2) +
geom_hline(yintercept = 0, colour = "black") +
scale_x_continuous(limits = c(0,24), breaks = seq(0,24,2), labels = seq(0,24,2)) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10)) +
scale_color_manual(values=c("Black", "#1F77B4", "#FF7F0E", "#2CA02C"), drop=TRUE, limits = levels(df$measure),
name = 'Region') +
scale_linetype_manual(name = "interval", values = c("fiftieth_perc" = "solid",
"ninetyfifth_perc" = "dashed",
"fifth_perc" = "dotted"),
breaks = c("fiftieth_perc",
"ninetyfifth_perc",
"fifth_perc"),
labels = c("Mean", "95th percentile", "5th percentile")) +
xlab("Child age, months") +
ylab("Length-for-age Z-score") +
ggtitle("") +
theme(strip.text = element_text(margin=margin(t=5))) +
guides(linetype = guide_legend(override.aes = list(col = 'black'),
keywidth = 3, keyheight = 1),
colour = FALSE) +
theme(legend.position = "bottom",
legend.title = element_blank(),
legend.background = element_blank(),
legend.box.background = element_rect(colour = "black"))
# save plot and underlying data
ggsave(mean_laz_plot, file=paste0(fig_dir,"stunting/fig-laz-2-mean-overall_region--allage-sex-stratified.png"), width=14, height=6)
df2 <- df %>% filter(region %in% c("Africa","South Asia"))
df2 <- droplevels(df2)
head(df2)
mean_laz_plot2 <- ggplot(df2, aes(x = agecat, group = region)) +
geom_smooth(aes(y = laz, color = region, group = interval, linetype = interval), se = F, span = 1) +
facet_wrap(region~sex, nrow=2) +
geom_hline(yintercept = 0, colour = "black") +
scale_x_continuous(limits = c(0,24), breaks = seq(0,24,2), labels = seq(0,24,2)) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10)) +
scale_color_manual(values=c("Black", "#1F77B4", "#FF7F0E", "#2CA02C"), drop=TRUE, limits = levels(df$measure),
name = 'Region') +
scale_linetype_manual(name = "interval", values = c("fiftieth_perc" = "solid",
"ninetyfifth_perc" = "dashed",
"fifth_perc" = "dotted"),
breaks = c("fiftieth_perc",
"ninetyfifth_perc",
"fifth_perc"),
labels = c("Mean", "95th percentile", "5th percentile")) +
xlab("Child age, months") +
ylab("Length-for-age Z-score") +
ggtitle("") +
theme(strip.text = element_text(margin=margin(t=5))) +
guides(linetype = guide_legend(override.aes = list(col = 'black'),
keywidth = 3, keyheight = 1),
colour = FALSE) +
theme(legend.position = "bottom",
legend.title = element_blank(),
legend.background = element_blank(),
legend.box.background = element_rect(colour = "black"))
# save plot and underlying data
ggsave(mean_laz_plot2, file=paste0(fig_dir,
"stunting/fig-laz-2-mean-overall_region--allage-sex-stratified2.png"),
width=8, height=6)
#-------------------------------------------------------------------------------------------
# Mean LAZ by month with quantiles - Birthweight strat
#-------------------------------------------------------------------------------------------
quantile_d_overall <- quantiles.BW$quantile_d_overall %>% mutate(region="Overall")
df <- rbind(quantile_d_overall, quantiles.BW$quantile_d)
df$birthwt <- gsub("or high ","", df$birthwt)
df$agecat <- factor(df$agecat,
levels=c("Two weeks", "One month",
paste0(2:24," months")))
df <- df %>%
arrange(agecat) %>%
filter(region!="Europe")
df <-droplevels(df)
df <- df %>%
ungroup(agecat) %>%
arrange(agecat) %>%
filter(!is.na(agecat)) %>%
filter(!is.na(region)) %>%
filter(!is.na(birthwt )) %>%
mutate(agecat = as.character(agecat)) %>%
mutate(agecat = ifelse(agecat == "Two weeks", ".5", agecat)) %>%
mutate(agecat = gsub(" month", "", agecat)) %>%
mutate(agecat = gsub(" months", "", agecat)) %>%
mutate(agecat = gsub("s", "", agecat)) %>%
mutate(agecat = ifelse(agecat == "One", "1", agecat)) %>%
mutate(agecat = as.numeric(agecat)) %>%
mutate(region = ifelse(region=="Asia", "South Asia", region)) %>%
gather(`ninetyfifth_perc`, `fiftieth_perc`, `fifth_perc`, key = "interval", value = "laz") %>%
mutate(region = factor(region, levels = c("Overall", "Africa", "Latin America", "South Asia")))
mean_laz_plot <- ggplot(df,aes(x = agecat, group = region)) +
geom_smooth(aes(y = laz, color = region, group = interval, linetype = interval), se = F, span = 1) +
facet_wrap(region~birthwt+sex, nrow=2) +
geom_hline(yintercept = 0, colour = "black") +
scale_x_continuous(limits = c(0,24), breaks = seq(0,24,2), labels = seq(0,24,2)) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10)) +
scale_color_manual(values=c("Black", "#1F77B4", "#FF7F0E", "#2CA02C"), drop=TRUE,
name = 'Region') +
scale_linetype_manual(name = "interval", values = c("fiftieth_perc" = "solid",
"ninetyfifth_perc" = "dashed",
"fifth_perc" = "dotted"),
breaks = c("fiftieth_perc",
"ninetyfifth_perc",
"fifth_perc"),
labels = c("Mean", "95th percentile", "5th percentile")) +
xlab("Child age, months") +
ylab("Length-for-age Z-score") +
ggtitle("") +
theme(strip.text = element_text(margin=margin(t=5))) +
guides(linetype = guide_legend(override.aes = list(col = 'black'),
keywidth = 3, keyheight = 1),
colour = FALSE) +
theme(legend.position = "bottom",
legend.title = element_blank(),
legend.background = element_blank(),
legend.box.background = element_rect(colour = "black"))
# save plot and underlying data
ggsave(mean_laz_plot, file=paste0(fig_dir,"stunting/fig-laz-2-mean-overall_region--allage-sex-BW-stratified.png"), width=14, height=6)
df2 <- df %>% filter(region %in% c("Africa","South Asia"))
df2 <- droplevels(df2)
head(df2)
mean_laz_plot2 <- ggplot(df2, aes(x = agecat, group = region)) +
geom_smooth(aes(y = laz, color = region, group = interval, linetype = interval), se = F, span = 1) +
facet_wrap(region~birthwt+sex, nrow=2) +
geom_hline(yintercept = 0, colour = "black") +
scale_x_continuous(limits = c(0,24), breaks = seq(0,24,2), labels = seq(0,24,2)) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10)) +
scale_color_manual(values=c("Black", "#1F77B4", "#FF7F0E", "#2CA02C"), drop=TRUE, limits = levels(df$measure),
name = 'Region') +
scale_linetype_manual(name = "interval", values = c("fiftieth_perc" = "solid",
"ninetyfifth_perc" = "dashed",
"fifth_perc" = "dotted"),
breaks = c("fiftieth_perc",
"ninetyfifth_perc",
"fifth_perc"),
labels = c("Mean", "95th percentile", "5th percentile")) +
xlab("Child age, months") +
ylab("Length-for-age Z-score") +
ggtitle("") +
theme(strip.text = element_text(margin=margin(t=5))) +
guides(linetype = guide_legend(override.aes = list(col = 'black'),
keywidth = 3, keyheight = 1),
colour = FALSE) +
theme(legend.position = "bottom",
legend.title = element_blank(),
legend.background = element_blank(),
legend.box.background = element_rect(colour = "black"))
# save plot and underlying data
ggsave(mean_laz_plot2, file=paste0(fig_dir,
"stunting/fig-laz-2-mean-overall_region--allage-sex-BW-stratified2.png"),
width=8, height=6)
|
load("../data/dataFin.rda")
# Load relevant packages
library(randomForest)
# Prepare data for bootstrapping (sanely)
set.seed(271828)
data = dataFin[sample(1:nrow(dataFin), nrow(dataFin)),]
mTry = c(4, 7, 9, 10, 12, 15, 20, 30, 60, 93)
# Random forests
forestErr = matrix(0, nrow = 10, ncol = 10)
for (i in 1:10) {
train = 1:5000 + nrow(data) * (i - 1) / 10
test = 5001:6000 + nrow(data) * (i - 1) / 10
for (j in 1:10) {
treeRating = randomForest(rating ~ ., data[train,], importance = TRUE,
mTry = mTry[j])
predTree = predict(treeRating, newdata = data[test,])
forestErr[i, j] = 1 - mean((predTree == data[test, "rating"])^2)
}
}
colnames(forestErr) = mTry
forestErr = apply(forestErr, 2, mean)
save(forestErr, file = "../data/forestErr.rda")
|
/analysis/randomForest.R
|
no_license
|
xkuang/YelpNLP
|
R
| false | false | 795 |
r
|
load("../data/dataFin.rda")
# Load relevant packages
library(randomForest)
# Prepare data for bootstrapping (sanely)
set.seed(271828)
data = dataFin[sample(1:nrow(dataFin), nrow(dataFin)),]
mTry = c(4, 7, 9, 10, 12, 15, 20, 30, 60, 93)
# Random forests
forestErr = matrix(0, nrow = 10, ncol = 10)
for (i in 1:10) {
train = 1:5000 + nrow(data) * (i - 1) / 10
test = 5001:6000 + nrow(data) * (i - 1) / 10
for (j in 1:10) {
treeRating = randomForest(rating ~ ., data[train,], importance = TRUE,
mTry = mTry[j])
predTree = predict(treeRating, newdata = data[test,])
forestErr[i, j] = 1 - mean((predTree == data[test, "rating"])^2)
}
}
colnames(forestErr) = mTry
forestErr = apply(forestErr, 2, mean)
save(forestErr, file = "../data/forestErr.rda")
|
is_true = function(x) {
if (is.character(x)) {
return(grepl("<", x))
}
else {
return(x<=0.05)
}
}
judge_pval = function(a,b,c){
out = 0
for (aa in list(a,b,c)){
out = out + is_true(aa)
}
return(out)
}
set_ranges = function( gr, nm='' ){
upgr=flank(gr, width=100000, start=T)
dngr=flank(gr, width=100000, start=F)
newgr=flank(gr, width=100000, both=T)
end(newgr) <- end(newgr)+width(gr)
grset = list(gr, trim(upgr), trim(dngr), trim(newgr))
names(grset) = paste(c("x", "up", "dn", "all"), rep(nm, 4), sep="")
return(grset)
}
prepare_analysis_x = function( xgr ){
xset1 = set_ranges(xgr)
xset2 = set_ranges(xgr[grepl("Nrps", xgr$values),], "nrps")
xset3 = set_ranges(xgr[grepl("T1pks", xgr$values),], "t1pks")
return(c(xset1, xset2, xset3))
}
prepare_analysis_y = function( ygr ){
liney=ygr[ygr$values=="LINE",]
ltry=ygr[ygr$values=="LTR",]
dnay=ygr[ygr$values=="DNA",]
lly=ygr[ygr$values=="LINE" | ygr$values=="LTR", ]
ally = list(ygr, liney, ltry, dnay, lly)
names(ally) = c("y", "line", "ltr", "dna", "ll")
return(ally)
}
get_row_value = function( xobj ) {
routp = c(0,0,0,0)
routp[1]= xobj$query.population
routp[2]= xobj$reference.population
routp[3]= sum(xobj$projection.test.lower.tail, xobj$scaled.absolute.min.distance.sum.lower.tail, xobj$jaccard.measure.lower.tail)
routp[4]= judge_pval(xobj$projection.test.p.value, xobj$jaccard.measure.p.value, xobj$scaled.absolute.min.distance.sum.p.value)
return( routp )
}
single_analysis = function( xx, yy ){
y_to_x = GenometriCorrelation(query=yy, reference=xx)
ss = length(y_to_x)
output = matrix(0L, nrow=ss, ncol=4)
rownames(output)=names(y_to_x)
colnames(output)=c("Repeats", "GC", "AveLowTail", "AvePval")
for (mm in 1:ss ) {
output[mm,] = get_row_value( y_to_x[[mm]] )
}
return(output)
}
geometricorr_analysis = function(allx, ally) {
namesx = names(allx)
namesy = names(ally)
res = c()
idx = 1
for (nmx in namesx) {
for (nmy in namesy) {
xx = allx[[nmx]]
yy = ally[[nmy]]
output <- single_analysis(xx,yy)
nm = paste(nmx, nmy, sep="-")
res[[idx]] = output
names(res)[idx] = nm
idx = idx+1
}
}
return( res )
}
analysis_gc = function( xgr, ygr ){
allx = prepare_analysis_x( xgr )
#names(allx) = c("x", "up", "dn", "uptodn","nrps", "nrpsup", "nrpsdn", "nrpsall", "t1pks", "t1pksup","t1pksdn","t1pksall")
ally = prepare_analysis_y( ygr )
res = geometricorr_analysis( allx, ally )
return(res)
}
|
/tools.r
|
no_license
|
yingzhang121/Tolypocladium_inflatum_paper
|
R
| false | false | 2,721 |
r
|
is_true = function(x) {
if (is.character(x)) {
return(grepl("<", x))
}
else {
return(x<=0.05)
}
}
judge_pval = function(a,b,c){
out = 0
for (aa in list(a,b,c)){
out = out + is_true(aa)
}
return(out)
}
set_ranges = function( gr, nm='' ){
upgr=flank(gr, width=100000, start=T)
dngr=flank(gr, width=100000, start=F)
newgr=flank(gr, width=100000, both=T)
end(newgr) <- end(newgr)+width(gr)
grset = list(gr, trim(upgr), trim(dngr), trim(newgr))
names(grset) = paste(c("x", "up", "dn", "all"), rep(nm, 4), sep="")
return(grset)
}
prepare_analysis_x = function( xgr ){
xset1 = set_ranges(xgr)
xset2 = set_ranges(xgr[grepl("Nrps", xgr$values),], "nrps")
xset3 = set_ranges(xgr[grepl("T1pks", xgr$values),], "t1pks")
return(c(xset1, xset2, xset3))
}
prepare_analysis_y = function( ygr ){
liney=ygr[ygr$values=="LINE",]
ltry=ygr[ygr$values=="LTR",]
dnay=ygr[ygr$values=="DNA",]
lly=ygr[ygr$values=="LINE" | ygr$values=="LTR", ]
ally = list(ygr, liney, ltry, dnay, lly)
names(ally) = c("y", "line", "ltr", "dna", "ll")
return(ally)
}
get_row_value = function( xobj ) {
routp = c(0,0,0,0)
routp[1]= xobj$query.population
routp[2]= xobj$reference.population
routp[3]= sum(xobj$projection.test.lower.tail, xobj$scaled.absolute.min.distance.sum.lower.tail, xobj$jaccard.measure.lower.tail)
routp[4]= judge_pval(xobj$projection.test.p.value, xobj$jaccard.measure.p.value, xobj$scaled.absolute.min.distance.sum.p.value)
return( routp )
}
single_analysis = function( xx, yy ){
y_to_x = GenometriCorrelation(query=yy, reference=xx)
ss = length(y_to_x)
output = matrix(0L, nrow=ss, ncol=4)
rownames(output)=names(y_to_x)
colnames(output)=c("Repeats", "GC", "AveLowTail", "AvePval")
for (mm in 1:ss ) {
output[mm,] = get_row_value( y_to_x[[mm]] )
}
return(output)
}
geometricorr_analysis = function(allx, ally) {
namesx = names(allx)
namesy = names(ally)
res = c()
idx = 1
for (nmx in namesx) {
for (nmy in namesy) {
xx = allx[[nmx]]
yy = ally[[nmy]]
output <- single_analysis(xx,yy)
nm = paste(nmx, nmy, sep="-")
res[[idx]] = output
names(res)[idx] = nm
idx = idx+1
}
}
return( res )
}
analysis_gc = function( xgr, ygr ){
allx = prepare_analysis_x( xgr )
#names(allx) = c("x", "up", "dn", "uptodn","nrps", "nrpsup", "nrpsdn", "nrpsall", "t1pks", "t1pksup","t1pksdn","t1pksall")
ally = prepare_analysis_y( ygr )
res = geometricorr_analysis( allx, ally )
return(res)
}
|
###############################################################################
## MAPPING
###############################################################################
## 00. PRELIMINARIES
###############################################################################
require(rgdal)
require(readxl)
require(tidyr)
require(dplyr)
require(RColorBrewer)
require(classInt)
## 01. DOWNLOAD, UNZIP
###############################################################################
## shape file source: DIVA GIS www.diva-gis.org
## Country: Myanmar
## Subject: Administrative areas (GADM)
## http://biogeo.ucdavis.edu/data/diva/adm/MMR_adm.zip - downloaded 11.8.2016
## this one seems like it doens't fit census admin divisions...
## looks like themimu.info has both census and maps that match..
## 01.1 map
###############################################################################
map.path <- "sources/data/original/maps"
# data.url <-"http://geonode.themimu.info/geoserver/wfs?format_options=charset%3AUTF-8&typename=geonode%3Amyanmar_township_boundaries&outputFormat=SHAPE-ZIP&version=1.0.0&service=WFS&request=GetFeature"
# temp <- tempfile()
# download.file(data.url, temp)
# unzip(temp, exdir = map.path)
ShpMMRTownship <- readOGR(dsn=map.path, "myanmar_township_boundaries")
## 01.2 villages
map.path <- "sources/data/original/maps"
#http://geonode.themimu.info/layers/geonode%3Amagway_region_village_points
#http://geonode.themimu.info/layers/geonode%3Aayeyarwady_region_village_points
#http://geonode.themimu.info/layers/geonode%3AMandalay_region_village_points
PntMagway <- readOGR(dsn=map.path, "magway_region_village_points")
PntAyeyarwady <- readOGR(dsn=map.path, "ayeyarwady_region_village_points")
PntMandalay <- readOGR(dsn=map.path, "mandalay_region_village_points")
## 01.1 census - population based dataset
###############################################################################
# data.path <- "sources/data/original"
# # data.url <-paste0("http://www.themimu.info/sites/themimu.info/files/documents/",
# # "BaselineData_Census_Dataset_Township_MIMU_16Jun2016_ENG.xlsx")
# # data.location <- paste(data.path, "BaselineData_Census_Dataset_Township_MIMU_16Jun2016_ENG.xlsx", sep="/")
# # download.file(data.url, data.location, mode = "wb")
# estimates.data <- read_excel(data.location, sheet = 1, skip=1 )
# ## right, so this table has a 2 row-header, which makes importing directly
# ## pretty useless, as the column names become useless.
# ## so need to first fill in the NAs, but..
# ## the last columns - 259 - end don't have merged cells in first row
# ## and have to be treated diff...
# # first 258 cols:
# colnames(estimates.data)[1:258] <- zoo::na.locf(colnames(estimates.data)[1:258], from.last=TRUE)
#
# # manually last 6 sets of 4 cols
# colnames(estimates.data)[259:262] <- paste(colnames(estimates.data)[260], colnames(estimates.data)[261])
# colnames(estimates.data)[263:266] <- paste(colnames(estimates.data)[264], colnames(estimates.data)[265])
# colnames(estimates.data)[267:270] <- paste(colnames(estimates.data)[268], colnames(estimates.data)[269])
# colnames(estimates.data)[271:274] <- paste(colnames(estimates.data)[272], colnames(estimates.data)[273])
# colnames(estimates.data)[275:278] <- paste(colnames(estimates.data)[276], colnames(estimates.data)[277])
# colnames(estimates.data)[279:282] <- paste(colnames(estimates.data)[280], colnames(estimates.data)[281])
# # merge with 2 row of header and remove it from data
# colnames(estimates.data) <- paste(colnames(estimates.data), estimates.data[1,], sep=" - ")
# estimates.data<- estimates.data[-1,]
# # now we have to change the col types back to numeric
# estimates.data[, c(7:282)] <- sapply(estimates.data[, c(7:282)], as.numeric)
# # save clean table
# write.csv(estimates.data, file = "sources/data/clean/census.population.csv")
# # als save clean column names
# varz <- colnames(estimates.data)
# save(varz, file= "sources/data/clean/census.pop.varz.RData")
## 01.2 census - household based dataset
###############################################################################
# data.path <- "sources/data/original"
# # data.url <-paste0("http://www.themimu.info/sites/themimu.info/files/documents/",
# # "BaselineData_Census_Dataset_Township_MIMU_16Jun2016_ENG.xlsx")
# data.location <- paste(data.path, "BaselineData_Census_Dataset_Township_MIMU_16Jun2016_ENG.xlsx", sep="/")
# # download.file(data.url, data.location, mode = "wb")
# household.data <- read_excel(data.location, sheet = 2, skip=1 )
# ## right, so this table has a 2 row-header, which makes importing directly
# ## pretty useless, as the column names become useless.
# ## so need to first fill in the NAs, but..
# ## some manual cleaning
# colnames(household.data)[7:10] <- paste(colnames(household.data)[8], colnames(household.data)[9])
# colnames(household.data)[11:12] <- paste(colnames(household.data)[11], colnames(household.data)[12])
#
# colnames(household.data)[14:123] <- zoo::na.locf(colnames(household.data)[14:123], from.last=TRUE)
# # merge with 2 row of header and remove it from data
# colnames(household.data) <- paste(colnames(household.data), household.data[1,], sep=" - ")
# household.data<- household.data[-1,]
# # just fix name in col 13:
# colnames(household.data)[13] <- "Mean household size"
#
# # now we have to change the col types back to numeric
# household.data[, c(7:123)] <- sapply(household.data[, c(7:123)], as.numeric)
# # save clean table
# write.csv(household.data, file = "sources/data/clean/census.household.csv")
# # als save clean column names
# varz <- colnames(household.data)
# save(varz, file= "sources/data/clean/census.hh.varz.RData")
## 01.3 IRRI world rice stats
###############################################################################
data.path <- "sources/data/original"
# # data.url <-paste0("")
data.location <- paste(data.path, "IRRI-ALL-MYANMAR-DATA.xls", sep="/")
# # download.file(data.url, data.location, mode = "wb")
irri.data <- read_excel(data.location, sheet = 1, skip=3 )
irri.data$Value <- as.numeric(irri.data$Value)
write.csv(irri.data, file = "sources/data/clean/irri.data.csv")
## 02. merge map and pop.data, save
###############################################################################
# ShpMMRTownship@data <- left_join(ShpMMRTownship@data, estimates.data, by = c("TS_PCODE"="MIMU...Township.Pcode"))
# ShpMMRTownship@data <- left_join(ShpMMRTownship@data, household.data, by = c("TS_PCODE"="MIMU - Township Pcode"))
save(ShpMMRTownship, PntMagway, PntAyeyarwady,PntMandalay , file = "sources/data/clean/pop.map.RData")
range(unlist(ShpMMRTownship@data[306]), rm.na = TRUE)
|
/analysis/scripts/01-Mapping-Import-CleanUp.R
|
no_license
|
majazaloznik/MyanmarSecondaryData
|
R
| false | false | 6,815 |
r
|
###############################################################################
## MAPPING
###############################################################################
## 00. PRELIMINARIES
###############################################################################
require(rgdal)
require(readxl)
require(tidyr)
require(dplyr)
require(RColorBrewer)
require(classInt)
## 01. DOWNLOAD, UNZIP
###############################################################################
## shape file source: DIVA GIS www.diva-gis.org
## Country: Myanmar
## Subject: Administrative areas (GADM)
## http://biogeo.ucdavis.edu/data/diva/adm/MMR_adm.zip - downloaded 11.8.2016
## this one seems like it doens't fit census admin divisions...
## looks like themimu.info has both census and maps that match..
## 01.1 map
###############################################################################
map.path <- "sources/data/original/maps"
# data.url <-"http://geonode.themimu.info/geoserver/wfs?format_options=charset%3AUTF-8&typename=geonode%3Amyanmar_township_boundaries&outputFormat=SHAPE-ZIP&version=1.0.0&service=WFS&request=GetFeature"
# temp <- tempfile()
# download.file(data.url, temp)
# unzip(temp, exdir = map.path)
ShpMMRTownship <- readOGR(dsn=map.path, "myanmar_township_boundaries")
## 01.2 villages
map.path <- "sources/data/original/maps"
#http://geonode.themimu.info/layers/geonode%3Amagway_region_village_points
#http://geonode.themimu.info/layers/geonode%3Aayeyarwady_region_village_points
#http://geonode.themimu.info/layers/geonode%3AMandalay_region_village_points
PntMagway <- readOGR(dsn=map.path, "magway_region_village_points")
PntAyeyarwady <- readOGR(dsn=map.path, "ayeyarwady_region_village_points")
PntMandalay <- readOGR(dsn=map.path, "mandalay_region_village_points")
## 01.1 census - population based dataset
###############################################################################
# data.path <- "sources/data/original"
# # data.url <-paste0("http://www.themimu.info/sites/themimu.info/files/documents/",
# # "BaselineData_Census_Dataset_Township_MIMU_16Jun2016_ENG.xlsx")
# # data.location <- paste(data.path, "BaselineData_Census_Dataset_Township_MIMU_16Jun2016_ENG.xlsx", sep="/")
# # download.file(data.url, data.location, mode = "wb")
# estimates.data <- read_excel(data.location, sheet = 1, skip=1 )
# ## right, so this table has a 2 row-header, which makes importing directly
# ## pretty useless, as the column names become useless.
# ## so need to first fill in the NAs, but..
# ## the last columns - 259 - end don't have merged cells in first row
# ## and have to be treated diff...
# # first 258 cols:
# colnames(estimates.data)[1:258] <- zoo::na.locf(colnames(estimates.data)[1:258], from.last=TRUE)
#
# # manually last 6 sets of 4 cols
# colnames(estimates.data)[259:262] <- paste(colnames(estimates.data)[260], colnames(estimates.data)[261])
# colnames(estimates.data)[263:266] <- paste(colnames(estimates.data)[264], colnames(estimates.data)[265])
# colnames(estimates.data)[267:270] <- paste(colnames(estimates.data)[268], colnames(estimates.data)[269])
# colnames(estimates.data)[271:274] <- paste(colnames(estimates.data)[272], colnames(estimates.data)[273])
# colnames(estimates.data)[275:278] <- paste(colnames(estimates.data)[276], colnames(estimates.data)[277])
# colnames(estimates.data)[279:282] <- paste(colnames(estimates.data)[280], colnames(estimates.data)[281])
# # merge with 2 row of header and remove it from data
# colnames(estimates.data) <- paste(colnames(estimates.data), estimates.data[1,], sep=" - ")
# estimates.data<- estimates.data[-1,]
# # now we have to change the col types back to numeric
# estimates.data[, c(7:282)] <- sapply(estimates.data[, c(7:282)], as.numeric)
# # save clean table
# write.csv(estimates.data, file = "sources/data/clean/census.population.csv")
# # als save clean column names
# varz <- colnames(estimates.data)
# save(varz, file= "sources/data/clean/census.pop.varz.RData")
## 01.2 census - household based dataset
###############################################################################
# data.path <- "sources/data/original"
# # data.url <-paste0("http://www.themimu.info/sites/themimu.info/files/documents/",
# # "BaselineData_Census_Dataset_Township_MIMU_16Jun2016_ENG.xlsx")
# data.location <- paste(data.path, "BaselineData_Census_Dataset_Township_MIMU_16Jun2016_ENG.xlsx", sep="/")
# # download.file(data.url, data.location, mode = "wb")
# household.data <- read_excel(data.location, sheet = 2, skip=1 )
# ## right, so this table has a 2 row-header, which makes importing directly
# ## pretty useless, as the column names become useless.
# ## so need to first fill in the NAs, but..
# ## some manual cleaning
# colnames(household.data)[7:10] <- paste(colnames(household.data)[8], colnames(household.data)[9])
# colnames(household.data)[11:12] <- paste(colnames(household.data)[11], colnames(household.data)[12])
#
# colnames(household.data)[14:123] <- zoo::na.locf(colnames(household.data)[14:123], from.last=TRUE)
# # merge with 2 row of header and remove it from data
# colnames(household.data) <- paste(colnames(household.data), household.data[1,], sep=" - ")
# household.data<- household.data[-1,]
# # just fix name in col 13:
# colnames(household.data)[13] <- "Mean household size"
#
# # now we have to change the col types back to numeric
# household.data[, c(7:123)] <- sapply(household.data[, c(7:123)], as.numeric)
# # save clean table
# write.csv(household.data, file = "sources/data/clean/census.household.csv")
# # als save clean column names
# varz <- colnames(household.data)
# save(varz, file= "sources/data/clean/census.hh.varz.RData")
## 01.3 IRRI world rice stats
###############################################################################
data.path <- "sources/data/original"
# # data.url <-paste0("")
data.location <- paste(data.path, "IRRI-ALL-MYANMAR-DATA.xls", sep="/")
# # download.file(data.url, data.location, mode = "wb")
irri.data <- read_excel(data.location, sheet = 1, skip=3 )
irri.data$Value <- as.numeric(irri.data$Value)
write.csv(irri.data, file = "sources/data/clean/irri.data.csv")
## 02. merge map and pop.data, save
###############################################################################
# ShpMMRTownship@data <- left_join(ShpMMRTownship@data, estimates.data, by = c("TS_PCODE"="MIMU...Township.Pcode"))
# ShpMMRTownship@data <- left_join(ShpMMRTownship@data, household.data, by = c("TS_PCODE"="MIMU - Township Pcode"))
save(ShpMMRTownship, PntMagway, PntAyeyarwady,PntMandalay , file = "sources/data/clean/pop.map.RData")
range(unlist(ShpMMRTownship@data[306]), rm.na = TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write_to_readme_section.R
\name{write_to_readme_section}
\alias{write_to_readme_section}
\title{Writes to a README by section}
\usage{
write_to_readme_section(section_name, entry)
}
\description{
A vector of length 1 or greater is added with a timestamp under a the header determined by the provided section_name to the README.md of the working directory.
}
|
/man/write_to_readme_section.Rd
|
no_license
|
meerapatelmd/readi
|
R
| false | true | 436 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write_to_readme_section.R
\name{write_to_readme_section}
\alias{write_to_readme_section}
\title{Writes to a README by section}
\usage{
write_to_readme_section(section_name, entry)
}
\description{
A vector of length 1 or greater is added with a timestamp under a the header determined by the provided section_name to the README.md of the working directory.
}
|
library(data.table)
library(dplyr)
library(tidyr)
library(ggplot2)
library(gridExtra)
library(ggdendro)
library(cluster)
library(purrr)
library(tibble)
library(ggradar)
library(naniar)
library(lubridate)
library(fossil)
library(matrixStats)
library(kableExtra)
library(pastecs)
library(flexclust)
###############################################################################
################################ TABLES LOADING ###############################
###############################################################################
#load tables - XXX to be filled as required
setwd("~/XXX")
df_main <- fread("~/XXX/df_main_msm_sel.csv")
df_bio <- fread("~/XXX/df_bio_msm_sel.csv")
df_genes <- fread("~/XXX/df_genes_msm_sel.csv")
df_hes <- fread("~/XXX/df_hes_msm_sel.csv")
df_main <- as.data.frame(df_main)
df_bio <- as.data.frame(df_bio)
df_genes <- as.data.frame(df_genes)
df_hes <- as.data.frame(df_hes)
#load model
load("~/XXX/kmeans_2.RData")
kcca_model <- kcca_kmeans_2
cluster_names_ordered <- cluster_names_ordered_kmeans_2
cluster_ordered <- cluster_ordered_kmeans_2
###############################################################################
################################# GET FUNCTIONS ###############################
###############################################################################
source("~/functions.R")
###############################################################################
############################### PATIENTS SELECTION ############################
###############################################################################
##################### 1. SELECT DIABETIC PATIENTS AT INITIAL
#####################################################
#eid of diabetes at initial
eid_diabetes_initial <- df_main %>%
filter(status_init == "diabetes") %>%
filter(diabetes_class_source_init != "hba1c") %>%
filter(status_fu == "diabetes") %>%
dplyr::select(eid) %>%
unlist()
#dataframe of diabetes at initial
df_main_sel <- df_main %>%
filter(eid %in% eid_diabetes_initial)
df_bio_sel <- df_bio %>%
filter(eid %in% eid_diabetes_initial)
df_genes_sel <- df_genes %>%
filter(eid %in% eid_diabetes_initial)
df_hes_sel <- df_hes %>%
filter(eid %in% eid_diabetes_initial)
###############################################################################
################################# PREPARE DATA ################################
###############################################################################
##################### 1. PREPARE TABLES
#####################################################
#variables
var_main_init <- c("21001-0.0", "4080-0.0", "diabetes_age_diag_init", "status_init",
"diabetes_class_source_init")
var_bio_init <- c("30750-0.0", "30760-0.0", "30870-0.0")
var_main_fu <- c("21001-1.0", "4080-1.0", "diabetes_age_diag_fu", "status_fu",
"diabetes_class_source_fu")
var_bio_fu <- c("30750-1.0", "30760-1.0", "30870-1.0")
var_genes <- c("score_secr_weighted", "score_res_weighted")
#create table joined
df_all_sel_init <- prepare_tables_3df(
df_main_sel, c("eid", var_main_init),
df_bio_sel, c("eid", var_bio_init),
df_genes_sel,c("eid", var_genes),
"eid")
df_all_sel_fu <- prepare_tables_3df(
df_main_sel, c("eid", var_main_fu),
df_bio_sel, c("eid", var_bio_fu),
df_genes_sel,c("eid", var_genes),
"eid")
#rename columns
colnames(df_all_sel_init) <-
c("eid", "bmi", "sbp", "diabetes_age_diag", "status", "diabetes_class_source",
"hba1c", "hdl_chol","triglycerides",
"pgs_secr_w", "pgs_res_w")
colnames(df_all_sel_fu) <-
c("eid", "bmi", "sbp", "diabetes_age_diag", "status", "diabetes_class_source",
"hba1c", "hdl_chol", "triglycerides",
"pgs_secr_w", "pgs_res_w")
#complete_cases
var_compl_cases <-
c("bmi", "sbp", "diabetes_age_diag", "hba1c", "hdl_chol", "triglycerides",
"pgs_secr_w", "pgs_res_w")
compl_cases_init <- df_all_sel_init[complete.cases(df_all_sel_init[ ,var_compl_cases]),
"eid"]
compl_cases_fu <- df_all_sel_fu[complete.cases(df_all_sel_fu[ ,var_compl_cases]),
"eid"]
compl_cases <- intersect(compl_cases_init, compl_cases_fu)
#table with complete cases
df_all_sel_init <- df_all_sel_init %>%
filter(eid %in% compl_cases)
df_all_sel_fu <- df_all_sel_fu %>%
filter(eid %in% compl_cases)
##################### 2. ASSESSMENTS DATES
#####################################################
#Year and Date of Initial assessment
DOA1 <- df_main_sel %>%
filter(eid %in% compl_cases) %>%
dplyr::select(`53-0.0`)
YOA1 <- year(ymd(DOA1$`53-0.0`))
#Year and Date of FU assessment
DOA2 <- df_main_sel %>%
filter(eid %in% compl_cases) %>%
dplyr::select(`53-1.0`)
YOA2 <- year(ymd(DOA2$`53-1.0`))
#First and last year
first_year_a <- min(YOA1)
last_year_a <- max(YOA2)
#Year of follow-up
n_year_fu_a <- last_year_a - first_year_a + 1
##################### 3. DATE OF BIRTH
#####################################################
#Year of birth
YOB <- df_main_sel %>%
filter(eid %in% compl_cases) %>%
dplyr::select(`34-0.0`) %>%
unlist()
#Month of birth
MOB <- df_main_sel %>%
filter(eid %in% compl_cases) %>%
dplyr::select(`52-0.0`) %>%
unlist()
##################### 4. DATE OF DIABETES DIAGNOSIS
#####################################################
#initialize
YOD <- rep(NA, nrow(df_all_sel_init))
#Age of diagnosis from main matrix
AOD <- df_all_sel_init$diabetes_age_diag
df_all_sel_init %>%
filter(eid %in% unlist(df_all_sel_init$eid[which(is.na(AOD))]))
YOD <- as.integer(unlist(YOB + AOD))
#for patients born in H2 add +1 to YOD
for (ind in 1:length(MOB)){
if (MOB[ind] > 6){
YOD[ind] <- YOD[ind] + 1
}
}
#check no patients with YOD>YOA1 - ok
sum(YOD>YOA1, na.rm = TRUE)
for (ind in 1:length(YOD)){
if (!is.na(YOD[ind])){
if (YOD[ind]<YOA1[ind]){
YOD[ind] <- YOD[ind] + 1
}
}
}
#check no patients with YOD>YOA2 - ok
sum(YOD>YOA2, na.rm = TRUE)
for (ind in 1:length(YOD)){
if (!is.na(YOD[ind])){
if (YOD[ind]>YOA2[ind]){
YOD[ind] <- YOD[ind] - 1
}
}
}
##################### 5. DATE OF CVD DIAGNOSIS
#####################################################
#cvd diagnostics code
icd10_cardiovascular_list <- c(paste0("I0",0:2), # acute rheumatic fever
paste0("I0",5:9), # chronic rheumatic
paste0("I",20:25), # ischemic heart
paste0("I",26:28), # pulmonary
paste0("I",30:52), # other heart disease
paste0("I",60:69), # cerebrovascular
paste0("I",70:76), # arteries -- excluding I77.6 (in autoimmune)
paste0("I77", 0:5),
paste0("I77", 7:9),
paste0("I",78:79),
paste0("I",81:82), # vein thrombosis
paste0("I",95:99)) # other circulatory
icd10_cardiovascular <- c()
for(i in 1:length(icd10_cardiovascular_list)){
tmp <- grep(paste0("^", icd10_cardiovascular_list[i]),
df_hes$diag_icd10,
value=TRUE)
icd10_cardiovascular <- c(icd10_cardiovascular, tmp)
}
icd10_cardiovascular <- unique(icd10_cardiovascular)
#select patients with cvd diagnosis at anytime
case_cvd <- df_hes_sel %>%
filter(eid %in% unlist(df_all_sel_init$eid)) %>%
filter(diag_icd10 %in% icd10_cardiovascular) %>%
dplyr::select(eid, epistart) %>%
arrange(eid, epistart)
#patients with cvd episode after diag.
case_cvd$year_epistart <- year(dmy(case_cvd$epistart))
case_cvd <- inner_join(data.frame(eid = df_all_sel_init$eid,
yod = YOD),
case_cvd, by="eid") %>%
filter(year_epistart >= yod) %>%
dplyr::select(eid, year_epistart, yod)
#date of first episode of cvd
eid_cvd <- unique(unlist(case_cvd$eid))
YOC <- rep(NA, nrow(df_all_sel_init))
for(i in 1:nrow(df_all_sel_init)){
if(df_all_sel_init$eid[i] %in% eid_cvd){
pos_sel <- min(which(case_cvd$eid == df_all_sel_init$eid[i]))
YOC[i] <- case_cvd$year_epistart[pos_sel]
}else{
YOC[i] <- NA
}
}
sum(!is.na(YOC))
###############################################################################
################################# CREATE MATRIX ###############################
###############################################################################
##################### 1. GET BOUND DATES
#####################################################
#First year
##check min is diagnostic - ok
min(YOC, na.rm =TRUE)
min(YOD)
min(YOA1)
##assign first year
first_year <- min(YOD, na.rm = TRUE)
#Last year
##check max is compl.or YOA2
max(YOC, na.rm =TRUE)
max(YOD)
max(YOA2)
##assign last year
last_year <- max(YOC, na.rm = TRUE)
#Years of follow-up
n_year_fu <- last_year - first_year + 1
##################### 2. INITIALIZE MATRICES - ALL BUT AGE AT DIAGNOSIS
#####################################################
#Variables selected
var_selected <- c("bmi", "sbp", "hba1c", "hdl_chol","triglycerides",
"pgs_secr_w", "pgs_res_w")
#create matrices
matrices <- lapply(1:length(var_selected), matrix, data= NA,
nrow = nrow(df_all_sel_init), ncol = n_year_fu)
names(matrices) <- var_selected
for (ind in 1:nrow(df_all_sel_init)){
#obtain YOA
YOA1_ind <- YOA1[ind]
YOA2_ind <- YOA2[ind]
#obtain position of YOA
YOA1_pos <- YOA1_ind - first_year + 1
YOA2_pos <- YOA2_ind - first_year + 1
#fill matrices
for (col in 1:length(var_selected)){
#value at A1
matrices[[col]][ind, YOA1_pos] <- df_all_sel_init[ind, var_selected[col]]
#value at A2
matrices[[col]][ind, YOA2_pos] <- df_all_sel_fu[ind, var_selected[col]]
}
}
##################### 5. OBTAIN AGE AT DIAGNOSIS
#####################################################
mat_age_diag <- matrix(NA, nrow = nrow(df_all_sel_init), ncol = n_year_fu)
for (ind in 1:nrow(df_all_sel_init)){
#obtain first year
first_year_ind <- YOD[ind]
#obtain last year
last_year_ind <- YOC[ind]
#obtain position of first year
first_pos <- first_year_ind - first_year + 1
#obtain position of last year
if(is.na(last_year_ind)){
last_pos <- n_year_fu
}else{
last_pos <- last_year_ind - first_year + 1
}
#fill matrix
for (year in first_pos:last_pos){
mat_age_diag[ind, year] <- df_all_sel_init[ind, "diabetes_age_diag"]
}
}
###############################################################################
################################# INTERPOLATIONS ##############################
###############################################################################
##################### 1. INTERPOLATE ALL BUT AGES
#####################################################
mat_interpolated <- lapply(matrices, interpolate_linear)
##################### 2. EXTRAPOLATE ALL BUT AGES
#####################################################
mat_extrapolated <- lapply(mat_interpolated, function(mat){
extrapolate(mat, YOD, YOC, first_year)})
###############################################################################
############################### CLUSTER DATA ##################################
###############################################################################
##################### 1. SCALE
#####################################################
#age
mat_age_diag_scaled <- matrix(scale(as.vector(mat_age_diag)),
ncol = ncol(mat_age_diag))
#all others
mat_extrapolated_scaled <- lapply(mat_extrapolated, scale_matrix)
##################### 2. PREDICT
#####################################################
#prepare matrix
mat_clustered <- mat_age_diag_scaled
#loop over individuals
for(ind in 1:nrow(mat_clustered)){
#find pos with first non NA value
pos_start <- 1
while(is.na(mat_clustered[ind, pos_start])){
pos_start <- pos_start + 1
}
#find pos with last non NA value
if(!is.na(mat_clustered[ind, ncol(mat_clustered)])){
pos_end <- ncol(mat_clustered)
} else {
pos_end <- pos_start + 1
while(!is.na(mat_clustered[ind, pos_end])){
pos_end <- pos_end + 1
}
pos_end <- pos_end - 1
}
#loop over positions
for(pos in pos_start:pos_end){
mat_clustered[ind, pos] <-
predict(kcca_model,
data.frame(bmi = mat_extrapolated_scaled$bmi[ind, pos],
sbp = mat_extrapolated_scaled$sbp[ind, pos],
diabetes_age_diag = mat_age_diag_scaled[ind, pos],
hba1c = mat_extrapolated_scaled$hba1c[ind, pos],
hdl_chol = mat_extrapolated_scaled$hdl_chol[ind, pos],
triglycerides = mat_extrapolated_scaled$triglycerides[ind, pos],
pgs_secr_w = mat_extrapolated_scaled$pgs_secr_w[ind, pos],
pgs_res_w = mat_extrapolated_scaled$pgs_res_w[ind, pos]))
}
}
##################### 3. REORDER CLUSTERING
#####################################################
#clean matrix
mat_clustered_ordered <- matrix(NA, nrow = nrow(mat_clustered),
ncol = ncol(mat_clustered))
#reorder
for(i in 1:length(cluster_ordered)){
mat_clustered_ordered[
which(mat_clustered == cluster_ordered[i])] <- cluster_names_ordered[i]
}
apply(mat_clustered_ordered, 1, function(x)length(unique(x)))
mat_clustered_ordered[51,]
##################### 2. CLEAN CLUSTERING AND CODE FACTOR
#####################################################
#create matrices of variables
mat_variables <- lapply(1:(length(cluster_ordered)-1), matrix, data= 0,
nrow = nrow(mat_clustered_ordered),
ncol = ncol(mat_clustered_ordered))
names(mat_variables) <- cluster_names_ordered[-1]
2+3
#fill matrices
for(mat in 1:length(mat_variables)){
for(col in 1:ncol(mat_clustered_ordered)){
#replace NA with -1
mat_variables[[mat]][ ,col][
which(is.na(mat_clustered_ordered[ ,col]))] <- -1
#replace value of cluster
mat_variables[[mat]][ ,col][
which(mat_clustered_ordered[ ,col] ==
cluster_names_ordered[mat+1])] <- 1
}
}
##################### 3. CLEAN DATES
#####################################################
#deal with missing values
YOC[is.na(YOC)] <- 1900
#create matrix
dates <- data.frame(YOA1, YOA2, YOD, YOC)
##################### 4. GET INITIAL AND LAST CLUSTER CODED
#####################################################
cluster_init <- rep(NA, nrow(mat_clustered_ordered))
cluster_fu <- rep(NA, nrow(mat_clustered_ordered))
for(ind in 1:nrow(mat_clustered_ordered)){
#find pos with first non NA value
pos_start <- 1
while(is.na(mat_clustered_ordered[ind, pos_start])){
pos_start <- pos_start + 1
}
#find pos with last non NA value
if(!is.na(mat_clustered_ordered[ind, ncol(mat_clustered_ordered)])){
pos_end <- ncol(mat_clustered_ordered)
} else {
pos_end <- pos_start + 1
while(!is.na(mat_clustered_ordered[ind, pos_end])){
pos_end <- pos_end + 1
}
pos_end <- pos_end - 1
}
#get values
cluster_init[ind] <- mat_clustered_ordered[ind, pos_start]
cluster_fu[ind] <- mat_clustered_ordered[ind, pos_end]
#code
for(i in 1:length(cluster_names_ordered)){
cluster_init[which(cluster_init == cluster_names_ordered[i])] <- i
cluster_fu[which(cluster_fu == cluster_names_ordered[i])] <- i
}
}
clusters <- data.frame(cluster_init, cluster_fu)
##################### 5. STORE
#####################################################
out_dir <- "~/XXX/"
simul_name <- "_simul_V5"
save_for_mcmc(dates, out_dir, "dates", simul_name)
save_for_mcmc(mat_variables$MARD, out_dir, "mard", simul_name)
save_for_mcmc(mat_variables$MOD, out_dir, "mod", simul_name)
save_for_mcmc(mat_variables$SIRD, out_dir, "sird", simul_name)
save_for_mcmc(clusters, out_dir, "clusters", simul_name)
|
/mcmc/MCMC_v5.R
|
no_license
|
shalabysar/T2D_study
|
R
| false | false | 16,062 |
r
|
library(data.table)
library(dplyr)
library(tidyr)
library(ggplot2)
library(gridExtra)
library(ggdendro)
library(cluster)
library(purrr)
library(tibble)
library(ggradar)
library(naniar)
library(lubridate)
library(fossil)
library(matrixStats)
library(kableExtra)
library(pastecs)
library(flexclust)
###############################################################################
################################ TABLES LOADING ###############################
###############################################################################
#load tables - XXX to be filled as required
setwd("~/XXX")
df_main <- fread("~/XXX/df_main_msm_sel.csv")
df_bio <- fread("~/XXX/df_bio_msm_sel.csv")
df_genes <- fread("~/XXX/df_genes_msm_sel.csv")
df_hes <- fread("~/XXX/df_hes_msm_sel.csv")
df_main <- as.data.frame(df_main)
df_bio <- as.data.frame(df_bio)
df_genes <- as.data.frame(df_genes)
df_hes <- as.data.frame(df_hes)
#load model
load("~/XXX/kmeans_2.RData")
kcca_model <- kcca_kmeans_2
cluster_names_ordered <- cluster_names_ordered_kmeans_2
cluster_ordered <- cluster_ordered_kmeans_2
###############################################################################
################################# GET FUNCTIONS ###############################
###############################################################################
source("~/functions.R")
###############################################################################
############################### PATIENTS SELECTION ############################
###############################################################################
##################### 1. SELECT DIABETIC PATIENTS AT INITIAL
#####################################################
#eid of diabetes at initial
eid_diabetes_initial <- df_main %>%
filter(status_init == "diabetes") %>%
filter(diabetes_class_source_init != "hba1c") %>%
filter(status_fu == "diabetes") %>%
dplyr::select(eid) %>%
unlist()
#dataframe of diabetes at initial
df_main_sel <- df_main %>%
filter(eid %in% eid_diabetes_initial)
df_bio_sel <- df_bio %>%
filter(eid %in% eid_diabetes_initial)
df_genes_sel <- df_genes %>%
filter(eid %in% eid_diabetes_initial)
df_hes_sel <- df_hes %>%
filter(eid %in% eid_diabetes_initial)
###############################################################################
################################# PREPARE DATA ################################
###############################################################################
##################### 1. PREPARE TABLES
#####################################################
#variables
var_main_init <- c("21001-0.0", "4080-0.0", "diabetes_age_diag_init", "status_init",
"diabetes_class_source_init")
var_bio_init <- c("30750-0.0", "30760-0.0", "30870-0.0")
var_main_fu <- c("21001-1.0", "4080-1.0", "diabetes_age_diag_fu", "status_fu",
"diabetes_class_source_fu")
var_bio_fu <- c("30750-1.0", "30760-1.0", "30870-1.0")
var_genes <- c("score_secr_weighted", "score_res_weighted")
#create table joined
df_all_sel_init <- prepare_tables_3df(
df_main_sel, c("eid", var_main_init),
df_bio_sel, c("eid", var_bio_init),
df_genes_sel,c("eid", var_genes),
"eid")
df_all_sel_fu <- prepare_tables_3df(
df_main_sel, c("eid", var_main_fu),
df_bio_sel, c("eid", var_bio_fu),
df_genes_sel,c("eid", var_genes),
"eid")
#rename columns
colnames(df_all_sel_init) <-
c("eid", "bmi", "sbp", "diabetes_age_diag", "status", "diabetes_class_source",
"hba1c", "hdl_chol","triglycerides",
"pgs_secr_w", "pgs_res_w")
colnames(df_all_sel_fu) <-
c("eid", "bmi", "sbp", "diabetes_age_diag", "status", "diabetes_class_source",
"hba1c", "hdl_chol", "triglycerides",
"pgs_secr_w", "pgs_res_w")
#complete_cases
var_compl_cases <-
c("bmi", "sbp", "diabetes_age_diag", "hba1c", "hdl_chol", "triglycerides",
"pgs_secr_w", "pgs_res_w")
compl_cases_init <- df_all_sel_init[complete.cases(df_all_sel_init[ ,var_compl_cases]),
"eid"]
compl_cases_fu <- df_all_sel_fu[complete.cases(df_all_sel_fu[ ,var_compl_cases]),
"eid"]
compl_cases <- intersect(compl_cases_init, compl_cases_fu)
#table with complete cases
df_all_sel_init <- df_all_sel_init %>%
filter(eid %in% compl_cases)
df_all_sel_fu <- df_all_sel_fu %>%
filter(eid %in% compl_cases)
##################### 2. ASSESSMENTS DATES
#####################################################
#Year and Date of Initial assessment
DOA1 <- df_main_sel %>%
filter(eid %in% compl_cases) %>%
dplyr::select(`53-0.0`)
YOA1 <- year(ymd(DOA1$`53-0.0`))
#Year and Date of FU assessment
DOA2 <- df_main_sel %>%
filter(eid %in% compl_cases) %>%
dplyr::select(`53-1.0`)
YOA2 <- year(ymd(DOA2$`53-1.0`))
#First and last year
first_year_a <- min(YOA1)
last_year_a <- max(YOA2)
#Year of follow-up
n_year_fu_a <- last_year_a - first_year_a + 1
##################### 3. DATE OF BIRTH
#####################################################
#Year of birth
YOB <- df_main_sel %>%
filter(eid %in% compl_cases) %>%
dplyr::select(`34-0.0`) %>%
unlist()
#Month of birth
MOB <- df_main_sel %>%
filter(eid %in% compl_cases) %>%
dplyr::select(`52-0.0`) %>%
unlist()
##################### 4. DATE OF DIABETES DIAGNOSIS
#####################################################
#initialize
YOD <- rep(NA, nrow(df_all_sel_init))
#Age of diagnosis from main matrix
AOD <- df_all_sel_init$diabetes_age_diag
df_all_sel_init %>%
filter(eid %in% unlist(df_all_sel_init$eid[which(is.na(AOD))]))
YOD <- as.integer(unlist(YOB + AOD))
#for patients born in H2 add +1 to YOD
for (ind in 1:length(MOB)){
if (MOB[ind] > 6){
YOD[ind] <- YOD[ind] + 1
}
}
#check no patients with YOD>YOA1 - ok
sum(YOD>YOA1, na.rm = TRUE)
for (ind in 1:length(YOD)){
if (!is.na(YOD[ind])){
if (YOD[ind]<YOA1[ind]){
YOD[ind] <- YOD[ind] + 1
}
}
}
#check no patients with YOD>YOA2 - ok
sum(YOD>YOA2, na.rm = TRUE)
for (ind in 1:length(YOD)){
if (!is.na(YOD[ind])){
if (YOD[ind]>YOA2[ind]){
YOD[ind] <- YOD[ind] - 1
}
}
}
##################### 5. DATE OF CVD DIAGNOSIS
#####################################################
#cvd diagnostics code
icd10_cardiovascular_list <- c(paste0("I0",0:2), # acute rheumatic fever
paste0("I0",5:9), # chronic rheumatic
paste0("I",20:25), # ischemic heart
paste0("I",26:28), # pulmonary
paste0("I",30:52), # other heart disease
paste0("I",60:69), # cerebrovascular
paste0("I",70:76), # arteries -- excluding I77.6 (in autoimmune)
paste0("I77", 0:5),
paste0("I77", 7:9),
paste0("I",78:79),
paste0("I",81:82), # vein thrombosis
paste0("I",95:99)) # other circulatory
icd10_cardiovascular <- c()
for(i in 1:length(icd10_cardiovascular_list)){
tmp <- grep(paste0("^", icd10_cardiovascular_list[i]),
df_hes$diag_icd10,
value=TRUE)
icd10_cardiovascular <- c(icd10_cardiovascular, tmp)
}
icd10_cardiovascular <- unique(icd10_cardiovascular)
#select patients with cvd diagnosis at anytime
case_cvd <- df_hes_sel %>%
filter(eid %in% unlist(df_all_sel_init$eid)) %>%
filter(diag_icd10 %in% icd10_cardiovascular) %>%
dplyr::select(eid, epistart) %>%
arrange(eid, epistart)
#patients with cvd episode after diag.
case_cvd$year_epistart <- year(dmy(case_cvd$epistart))
case_cvd <- inner_join(data.frame(eid = df_all_sel_init$eid,
yod = YOD),
case_cvd, by="eid") %>%
filter(year_epistart >= yod) %>%
dplyr::select(eid, year_epistart, yod)
#date of first episode of cvd
eid_cvd <- unique(unlist(case_cvd$eid))
YOC <- rep(NA, nrow(df_all_sel_init))
for(i in 1:nrow(df_all_sel_init)){
if(df_all_sel_init$eid[i] %in% eid_cvd){
pos_sel <- min(which(case_cvd$eid == df_all_sel_init$eid[i]))
YOC[i] <- case_cvd$year_epistart[pos_sel]
}else{
YOC[i] <- NA
}
}
sum(!is.na(YOC))
###############################################################################
################################# CREATE MATRIX ###############################
###############################################################################
##################### 1. GET BOUND DATES
#####################################################
#First year
##check min is diagnostic - ok
min(YOC, na.rm =TRUE)
min(YOD)
min(YOA1)
##assign first year
first_year <- min(YOD, na.rm = TRUE)
#Last year
##check max is compl.or YOA2
max(YOC, na.rm =TRUE)
max(YOD)
max(YOA2)
##assign last year
last_year <- max(YOC, na.rm = TRUE)
#Years of follow-up
n_year_fu <- last_year - first_year + 1
##################### 2. INITIALIZE MATRICES - ALL BUT AGE AT DIAGNOSIS
#####################################################
#Variables selected
var_selected <- c("bmi", "sbp", "hba1c", "hdl_chol","triglycerides",
"pgs_secr_w", "pgs_res_w")
#create matrices
matrices <- lapply(1:length(var_selected), matrix, data= NA,
nrow = nrow(df_all_sel_init), ncol = n_year_fu)
names(matrices) <- var_selected
for (ind in 1:nrow(df_all_sel_init)){
#obtain YOA
YOA1_ind <- YOA1[ind]
YOA2_ind <- YOA2[ind]
#obtain position of YOA
YOA1_pos <- YOA1_ind - first_year + 1
YOA2_pos <- YOA2_ind - first_year + 1
#fill matrices
for (col in 1:length(var_selected)){
#value at A1
matrices[[col]][ind, YOA1_pos] <- df_all_sel_init[ind, var_selected[col]]
#value at A2
matrices[[col]][ind, YOA2_pos] <- df_all_sel_fu[ind, var_selected[col]]
}
}
##################### 5. OBTAIN AGE AT DIAGNOSIS
#####################################################
mat_age_diag <- matrix(NA, nrow = nrow(df_all_sel_init), ncol = n_year_fu)
for (ind in 1:nrow(df_all_sel_init)){
#obtain first year
first_year_ind <- YOD[ind]
#obtain last year
last_year_ind <- YOC[ind]
#obtain position of first year
first_pos <- first_year_ind - first_year + 1
#obtain position of last year
if(is.na(last_year_ind)){
last_pos <- n_year_fu
}else{
last_pos <- last_year_ind - first_year + 1
}
#fill matrix
for (year in first_pos:last_pos){
mat_age_diag[ind, year] <- df_all_sel_init[ind, "diabetes_age_diag"]
}
}
###############################################################################
################################# INTERPOLATIONS ##############################
###############################################################################
##################### 1. INTERPOLATE ALL BUT AGES
#####################################################
mat_interpolated <- lapply(matrices, interpolate_linear)
##################### 2. EXTRAPOLATE ALL BUT AGES
#####################################################
mat_extrapolated <- lapply(mat_interpolated, function(mat){
extrapolate(mat, YOD, YOC, first_year)})
###############################################################################
############################### CLUSTER DATA ##################################
###############################################################################
##################### 1. SCALE
#####################################################
#age
mat_age_diag_scaled <- matrix(scale(as.vector(mat_age_diag)),
ncol = ncol(mat_age_diag))
#all others
mat_extrapolated_scaled <- lapply(mat_extrapolated, scale_matrix)
##################### 2. PREDICT
#####################################################
#prepare matrix
mat_clustered <- mat_age_diag_scaled
#loop over individuals
for(ind in 1:nrow(mat_clustered)){
#find pos with first non NA value
pos_start <- 1
while(is.na(mat_clustered[ind, pos_start])){
pos_start <- pos_start + 1
}
#find pos with last non NA value
if(!is.na(mat_clustered[ind, ncol(mat_clustered)])){
pos_end <- ncol(mat_clustered)
} else {
pos_end <- pos_start + 1
while(!is.na(mat_clustered[ind, pos_end])){
pos_end <- pos_end + 1
}
pos_end <- pos_end - 1
}
#loop over positions
for(pos in pos_start:pos_end){
mat_clustered[ind, pos] <-
predict(kcca_model,
data.frame(bmi = mat_extrapolated_scaled$bmi[ind, pos],
sbp = mat_extrapolated_scaled$sbp[ind, pos],
diabetes_age_diag = mat_age_diag_scaled[ind, pos],
hba1c = mat_extrapolated_scaled$hba1c[ind, pos],
hdl_chol = mat_extrapolated_scaled$hdl_chol[ind, pos],
triglycerides = mat_extrapolated_scaled$triglycerides[ind, pos],
pgs_secr_w = mat_extrapolated_scaled$pgs_secr_w[ind, pos],
pgs_res_w = mat_extrapolated_scaled$pgs_res_w[ind, pos]))
}
}
##################### 3. REORDER CLUSTERING
#####################################################
#clean matrix
mat_clustered_ordered <- matrix(NA, nrow = nrow(mat_clustered),
ncol = ncol(mat_clustered))
#reorder
for(i in 1:length(cluster_ordered)){
mat_clustered_ordered[
which(mat_clustered == cluster_ordered[i])] <- cluster_names_ordered[i]
}
apply(mat_clustered_ordered, 1, function(x)length(unique(x)))
mat_clustered_ordered[51,]
##################### 2. CLEAN CLUSTERING AND CODE FACTOR
#####################################################
#create matrices of variables
mat_variables <- lapply(1:(length(cluster_ordered)-1), matrix, data= 0,
nrow = nrow(mat_clustered_ordered),
ncol = ncol(mat_clustered_ordered))
names(mat_variables) <- cluster_names_ordered[-1]
2+3
#fill matrices
for(mat in 1:length(mat_variables)){
for(col in 1:ncol(mat_clustered_ordered)){
#replace NA with -1
mat_variables[[mat]][ ,col][
which(is.na(mat_clustered_ordered[ ,col]))] <- -1
#replace value of cluster
mat_variables[[mat]][ ,col][
which(mat_clustered_ordered[ ,col] ==
cluster_names_ordered[mat+1])] <- 1
}
}
##################### 3. CLEAN DATES
#####################################################
#deal with missing values
YOC[is.na(YOC)] <- 1900
#create matrix
dates <- data.frame(YOA1, YOA2, YOD, YOC)
##################### 4. GET INITIAL AND LAST CLUSTER CODED
#####################################################
cluster_init <- rep(NA, nrow(mat_clustered_ordered))
cluster_fu <- rep(NA, nrow(mat_clustered_ordered))
for(ind in 1:nrow(mat_clustered_ordered)){
#find pos with first non NA value
pos_start <- 1
while(is.na(mat_clustered_ordered[ind, pos_start])){
pos_start <- pos_start + 1
}
#find pos with last non NA value
if(!is.na(mat_clustered_ordered[ind, ncol(mat_clustered_ordered)])){
pos_end <- ncol(mat_clustered_ordered)
} else {
pos_end <- pos_start + 1
while(!is.na(mat_clustered_ordered[ind, pos_end])){
pos_end <- pos_end + 1
}
pos_end <- pos_end - 1
}
#get values
cluster_init[ind] <- mat_clustered_ordered[ind, pos_start]
cluster_fu[ind] <- mat_clustered_ordered[ind, pos_end]
#code
for(i in 1:length(cluster_names_ordered)){
cluster_init[which(cluster_init == cluster_names_ordered[i])] <- i
cluster_fu[which(cluster_fu == cluster_names_ordered[i])] <- i
}
}
clusters <- data.frame(cluster_init, cluster_fu)
##################### 5. STORE
#####################################################
out_dir <- "~/XXX/"
simul_name <- "_simul_V5"
save_for_mcmc(dates, out_dir, "dates", simul_name)
save_for_mcmc(mat_variables$MARD, out_dir, "mard", simul_name)
save_for_mcmc(mat_variables$MOD, out_dir, "mod", simul_name)
save_for_mcmc(mat_variables$SIRD, out_dir, "sird", simul_name)
save_for_mcmc(clusters, out_dir, "clusters", simul_name)
|
best <- function(state, outcome, datafile="outcome-of-care-measures.csv") {
## read outcome data
outcomes <- read.csv(datafile, colClasses = "character")
## Check that state and outcome are OK
if(!(state %in% outcomes$State)) { stop("invalid state")}
if(!(outcome %in% c("heart attack","heart failure","pneumonia"))){ stop("invalid outcome")}
## variable "cause" is a column number in a table we construct later...
## ...there must be a better way...
if(outcome == "heart attack") {cause <- 2}
if(outcome == "heart failure") {cause <- 3}
if(outcome == "pneumonia") {cause <- 4}
## Return hospital name in that state with lowest 30-day death
## rate
statedata <- subset(outcomes,State == state)
stateoutcomes <- cbind(statedata[,2],statedata[,11],statedata[,17],statedata[,23])
colnames(stateoutcomes) <- c("Hospital","heart attack", "heart failure", "pneumonia")
# Now we have a nice matrix with the data we care about
# We need to find the minimum value, discarding NAs ...
suppressWarnings(minharm <- range(as.numeric(stateoutcomes[,cause]), na.rm=TRUE)[1])
# ... and put all rows with that value into bests
suppressWarnings(bests <- as.numeric(stateoutcomes[,cause]) == minharm)
bests <- which(as.logical(bests))
# ... and return stateoutcomes[bests,1]
stateoutcomes[[bests,1]]
}
|
/best.R
|
no_license
|
derwinmcgeary/ProgrammingAssignment3
|
R
| false | false | 1,342 |
r
|
best <- function(state, outcome, datafile="outcome-of-care-measures.csv") {
## read outcome data
outcomes <- read.csv(datafile, colClasses = "character")
## Check that state and outcome are OK
if(!(state %in% outcomes$State)) { stop("invalid state")}
if(!(outcome %in% c("heart attack","heart failure","pneumonia"))){ stop("invalid outcome")}
## variable "cause" is a column number in a table we construct later...
## ...there must be a better way...
if(outcome == "heart attack") {cause <- 2}
if(outcome == "heart failure") {cause <- 3}
if(outcome == "pneumonia") {cause <- 4}
## Return hospital name in that state with lowest 30-day death
## rate
statedata <- subset(outcomes,State == state)
stateoutcomes <- cbind(statedata[,2],statedata[,11],statedata[,17],statedata[,23])
colnames(stateoutcomes) <- c("Hospital","heart attack", "heart failure", "pneumonia")
# Now we have a nice matrix with the data we care about
# We need to find the minimum value, discarding NAs ...
suppressWarnings(minharm <- range(as.numeric(stateoutcomes[,cause]), na.rm=TRUE)[1])
# ... and put all rows with that value into bests
suppressWarnings(bests <- as.numeric(stateoutcomes[,cause]) == minharm)
bests <- which(as.logical(bests))
# ... and return stateoutcomes[bests,1]
stateoutcomes[[bests,1]]
}
|
library(Luminescence)
### Name: read_XSYG2R
### Title: Import XSYG files to R
### Aliases: read_XSYG2R
### Keywords: IO
### ** Examples
##(1) import XSYG file to R (uncomment for usage)
#FILE <- file.choose()
#temp <- read_XSYG2R(FILE)
##(2) additional examples for pure XML import using the package XML
## (uncomment for usage)
##import entire XML file
#FILE <- file.choose()
#temp <- XML::xmlRoot(XML::xmlTreeParse(FILE))
##search for specific subnodes with curves containing 'OSL'
#getNodeSet(temp, "//Sample/Sequence/Record[@recordType = 'OSL']/Curve")
##(2) How to extract single curves ... after import
data(ExampleData.XSYG, envir = environment())
##grep one OSL curves and plot the first curve
OSLcurve <- get_RLum(OSL.SARMeasurement$Sequence.Object, recordType="OSL")[[1]]
##(3) How to see the structure of an object?
structure_RLum(OSL.SARMeasurement$Sequence.Object)
|
/data/genthat_extracted_code/Luminescence/examples/read_XSYG2R.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 907 |
r
|
library(Luminescence)
### Name: read_XSYG2R
### Title: Import XSYG files to R
### Aliases: read_XSYG2R
### Keywords: IO
### ** Examples
##(1) import XSYG file to R (uncomment for usage)
#FILE <- file.choose()
#temp <- read_XSYG2R(FILE)
##(2) additional examples for pure XML import using the package XML
## (uncomment for usage)
##import entire XML file
#FILE <- file.choose()
#temp <- XML::xmlRoot(XML::xmlTreeParse(FILE))
##search for specific subnodes with curves containing 'OSL'
#getNodeSet(temp, "//Sample/Sequence/Record[@recordType = 'OSL']/Curve")
##(2) How to extract single curves ... after import
data(ExampleData.XSYG, envir = environment())
##grep one OSL curves and plot the first curve
OSLcurve <- get_RLum(OSL.SARMeasurement$Sequence.Object, recordType="OSL")[[1]]
##(3) How to see the structure of an object?
structure_RLum(OSL.SARMeasurement$Sequence.Object)
|
setwd("~/Desktop/Training/Coursera/Data_Science/Course_4/Project_1")
plotData <- read.table("household_power_consumption.txt", header=T, sep=";", na.strings="?")
## set time variable
finalData <- plotData[plotData$Date %in% c("1/2/2007","2/2/2007"),]
SetTime <-strptime(paste(finalData$Date, finalData$Time, sep=" "),"%d/%m/%Y %H:%M:%S")
finalData <- cbind(SetTime, finalData)
png(filename="plot4.png")
## Generating Plot 4
labels <- c("Sub_metering_1","Sub_metering_2","Sub_metering_3")
columnlines <- c("black","red","blue")
par(mfrow=c(2,2))
plot(finalData$SetTime, finalData$Global_active_power, type="l", col="green", xlab="", ylab="Global Active Power")
plot(finalData$SetTime, finalData$Voltage, type="l", col="orange", xlab="datetime", ylab="Voltage")
plot(finalData$SetTime, finalData$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(finalData$SetTime, finalData$Sub_metering_2, type="l", col="red")
lines(finalData$SetTime, finalData$Sub_metering_3, type="l", col="blue")
legend("topright", bty="n", legend=labels, lty=1, col=columnlines)
plot(finalData$SetTime, finalData$Global_reactive_power, type="l", col="blue", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
/plot4.R
|
no_license
|
singharu/Code-Plots
|
R
| false | false | 1,208 |
r
|
setwd("~/Desktop/Training/Coursera/Data_Science/Course_4/Project_1")
plotData <- read.table("household_power_consumption.txt", header=T, sep=";", na.strings="?")
## set time variable
finalData <- plotData[plotData$Date %in% c("1/2/2007","2/2/2007"),]
SetTime <-strptime(paste(finalData$Date, finalData$Time, sep=" "),"%d/%m/%Y %H:%M:%S")
finalData <- cbind(SetTime, finalData)
png(filename="plot4.png")
## Generating Plot 4
labels <- c("Sub_metering_1","Sub_metering_2","Sub_metering_3")
columnlines <- c("black","red","blue")
par(mfrow=c(2,2))
plot(finalData$SetTime, finalData$Global_active_power, type="l", col="green", xlab="", ylab="Global Active Power")
plot(finalData$SetTime, finalData$Voltage, type="l", col="orange", xlab="datetime", ylab="Voltage")
plot(finalData$SetTime, finalData$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(finalData$SetTime, finalData$Sub_metering_2, type="l", col="red")
lines(finalData$SetTime, finalData$Sub_metering_3, type="l", col="blue")
legend("topright", bty="n", legend=labels, lty=1, col=columnlines)
plot(finalData$SetTime, finalData$Global_reactive_power, type="l", col="blue", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
## Creates a function with a matrix object that caches and stores its inverse.
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setInverse <- function(inversefun) inverse <<- inversefun
getInverse <- function() inverse
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
##Creates a function to compute the inverse of the matrix created in the previous
## function. If the inverse has been calculated, it simply retrieves the inverse from the
## cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if (!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
mat <- x$get()
inverse <- solve(mat, ...)
x$setInverse(inverse)
inverse
}
|
/cachematrix.R
|
no_license
|
tatinthehat/ProgrammingAssignment2
|
R
| false | false | 907 |
r
|
## Creates a function with a matrix object that caches and stores its inverse.
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setInverse <- function(inversefun) inverse <<- inversefun
getInverse <- function() inverse
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
##Creates a function to compute the inverse of the matrix created in the previous
## function. If the inverse has been calculated, it simply retrieves the inverse from the
## cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if (!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
mat <- x$get()
inverse <- solve(mat, ...)
x$setInverse(inverse)
inverse
}
|
# This file shows the approximator package used on a simple 1-d test
# case. It generates some data randomly, then attempts to infer what
# the parameters used to generate that data are. One can then compare
# the estimates with the true values.
# load the libraries:
library(approximator)
library(emulator)
# set seed:
set.seed(0)
# First a design matrix:
D1.1d <- matrix(1:6)
# And a subsets object
source("subsets_1d.R")
# and some basis functions:
"basis.1d" <-
function (x)
{
out <- cbind(1,x)
colnames(out) <- c("const","x")
return(out)
}
# create a hyperparameter function:
source("hpafun_1d.R")
#...and call it, to creat hyperparameter object hpa.1d:
hpa.1d <- hpa.fun.1d(1:9)
# Now a function that creates data:
source("datamaker_1d.R")
z.1d <- generate.1d.observations(D1=D1.1d, subsets=subsets.1d, basis.fun=basis.1d, hpa=hpa.1d, betas = NULL, export.truth=FALSE)
# Now some checks. First, look at H:
jj.H <- H.fun.app(D1=D1.1d, subsets=subsets.1d , basis=basis.1d , hpa=hpa.1d)
# Look at jj.H and verify that it is right.
# Now optimize the hyperparameters:
a1 <- opt.1(D=D1.1d , z=z.1d , basis=basis.1d , subsets=subsets.1d , hpa=hpa.1d)
a2 <- opt.gt.1(level=2 , D=D1.1d , z=z.1d , basis=basis.1d , subsets=subsets.1d , hpa=hpa.1d)
# And use the second-level optimized hyperpareters (ie a2) to give the emulator mean:
jj.ans <- mdash.fun(3,D1=D1.1d,subsets=subsets.1d,hpa=a2,z=z.1d,basis=basis.1d)
# (preceding line gives a weird error, under MacOSX 10.5.6, with R-GUI
# 1.28 but no error when running R from the command line).
# And its variance:
jj.var <- c_fun(x=as.matrix(4),xdash=as.matrix(5),subsets=subsets.1d,hpa=hpa.1d)
|
/inst/doc/one.dim/apprex_1d.R
|
no_license
|
cran/approximator
|
R
| false | false | 1,684 |
r
|
# This file shows the approximator package used on a simple 1-d test
# case. It generates some data randomly, then attempts to infer what
# the parameters used to generate that data are. One can then compare
# the estimates with the true values.
# load the libraries:
library(approximator)
library(emulator)
# set seed:
set.seed(0)
# First a design matrix:
D1.1d <- matrix(1:6)
# And a subsets object
source("subsets_1d.R")
# and some basis functions:
"basis.1d" <-
function (x)
{
out <- cbind(1,x)
colnames(out) <- c("const","x")
return(out)
}
# create a hyperparameter function:
source("hpafun_1d.R")
#...and call it, to creat hyperparameter object hpa.1d:
hpa.1d <- hpa.fun.1d(1:9)
# Now a function that creates data:
source("datamaker_1d.R")
z.1d <- generate.1d.observations(D1=D1.1d, subsets=subsets.1d, basis.fun=basis.1d, hpa=hpa.1d, betas = NULL, export.truth=FALSE)
# Now some checks. First, look at H:
jj.H <- H.fun.app(D1=D1.1d, subsets=subsets.1d , basis=basis.1d , hpa=hpa.1d)
# Look at jj.H and verify that it is right.
# Now optimize the hyperparameters:
a1 <- opt.1(D=D1.1d , z=z.1d , basis=basis.1d , subsets=subsets.1d , hpa=hpa.1d)
a2 <- opt.gt.1(level=2 , D=D1.1d , z=z.1d , basis=basis.1d , subsets=subsets.1d , hpa=hpa.1d)
# And use the second-level optimized hyperpareters (ie a2) to give the emulator mean:
jj.ans <- mdash.fun(3,D1=D1.1d,subsets=subsets.1d,hpa=a2,z=z.1d,basis=basis.1d)
# (preceding line gives a weird error, under MacOSX 10.5.6, with R-GUI
# 1.28 but no error when running R from the command line).
# And its variance:
jj.var <- c_fun(x=as.matrix(4),xdash=as.matrix(5),subsets=subsets.1d,hpa=hpa.1d)
|
###removing NA
> y=c("a",NA,"c",NA,NA,"f")
> x=c(1,2,NA,4,NA,6)
> good=complete.cases(x,y)
> x(good)
Error: could not find function "x"
> x[good]
[1] 1 6
>
#list of files from a directory
l=list.files(path = "./specdata")
setwd("C:/Sony-Working folder Jan-2015/2015 Data science -Harvard/2015.01. CSCI E063 Big data analytics/R work directory images")
mydata = read.xls("mydata.xls")
|
/R tips.R
|
no_license
|
Ambooken/Github-details
|
R
| false | false | 401 |
r
|
###removing NA
> y=c("a",NA,"c",NA,NA,"f")
> x=c(1,2,NA,4,NA,6)
> good=complete.cases(x,y)
> x(good)
Error: could not find function "x"
> x[good]
[1] 1 6
>
#list of files from a directory
l=list.files(path = "./specdata")
setwd("C:/Sony-Working folder Jan-2015/2015 Data science -Harvard/2015.01. CSCI E063 Big data analytics/R work directory images")
mydata = read.xls("mydata.xls")
|
testlist <- list(x = structure(c(2.31584307392677e+77, 9.50026155582223e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance::fastdist,testlist)
str(result)
|
/multivariance/inst/testfiles/fastdist/AFL_fastdist/fastdist_valgrind_files/1613098203-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 303 |
r
|
testlist <- list(x = structure(c(2.31584307392677e+77, 9.50026155582223e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance::fastdist,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.genewise.R
\name{model.genewise}
\alias{model.genewise}
\title{Modeling NB Genewise Dispersion Using NBPSeq}
\usage{
model.genewise(counts, x)
}
\arguments{
\item{counts}{an m-by-n count matrix of non-negative integers. For a typical
RNA-Seq experiment, this is the read counts with m genes and n samples.}
\item{x}{an n-by-p design matrix.}
}
\value{
A list of quantities to be used in the main \code{\link{nb.gof.m}} function.
}
\description{
This function fits an NB regression model with
genewise dispersions using the adjusted profile likelihood estimator. See details below. The output of this function
will be passed to the main GOF function \code{\link{nb.gof.m}}.
}
\details{
details here (HOA)
}
\references{
See \url{https://github.com/gu-mi/NBGOF/wiki/} for more details.
}
\author{
Gu Mi <neo.migu@gmail.com>, Yanming Di, Daniel Schafer
}
|
/man/model.genewise.Rd
|
no_license
|
jjlcathy/NBGOF
|
R
| false | true | 938 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.genewise.R
\name{model.genewise}
\alias{model.genewise}
\title{Modeling NB Genewise Dispersion Using NBPSeq}
\usage{
model.genewise(counts, x)
}
\arguments{
\item{counts}{an m-by-n count matrix of non-negative integers. For a typical
RNA-Seq experiment, this is the read counts with m genes and n samples.}
\item{x}{an n-by-p design matrix.}
}
\value{
A list of quantities to be used in the main \code{\link{nb.gof.m}} function.
}
\description{
This function fits an NB regression model with
genewise dispersions using the adjusted profile likelihood estimator. See details below. The output of this function
will be passed to the main GOF function \code{\link{nb.gof.m}}.
}
\details{
details here (HOA)
}
\references{
See \url{https://github.com/gu-mi/NBGOF/wiki/} for more details.
}
\author{
Gu Mi <neo.migu@gmail.com>, Yanming Di, Daniel Schafer
}
|
skip_if_not_installed("gam")
s <- gam::s
data(kyphosis, package = "rpart")
void <- capture.output(m1 <- gam::gam(
Kyphosis ~ s(Age, 4) + Number,
family = binomial,
data = kyphosis,
trace = TRUE
))
test_that("model_info", {
expect_true(model_info(m1)$is_binomial)
expect_false(model_info(m1)$is_linear)
})
test_that("find_predictors", {
expect_identical(find_predictors(m1), list(conditional = c("Age", "Number")))
expect_identical(find_predictors(m1, flatten = TRUE), c("Age", "Number"))
expect_null(find_predictors(m1, effects = "random"))
})
test_that("find_response", {
expect_identical(find_response(m1), "Kyphosis")
})
test_that("link_inverse", {
expect_equal(link_inverse(m1)(0.2), plogis(0.2), tolerance = 1e-5)
})
test_that("get_data", {
expect_equal(nrow(get_data(m1)), 81)
expect_equal(colnames(get_data(m1)), c("Kyphosis", "Age", "Number"))
})
test_that("find_formula", {
expect_length(find_formula(m1), 1)
expect_equal(
find_formula(m1),
list(conditional = as.formula("Kyphosis ~ s(Age, 4) + Number")),
ignore_attr = TRUE
)
})
test_that("find_terms", {
expect_equal(
find_terms(m1),
list(
response = "Kyphosis",
conditional = c("s(Age, 4)", "Number")
)
)
expect_equal(
find_terms(m1, flatten = TRUE),
c("Kyphosis", "s(Age, 4)", "Number")
)
})
test_that("find_variables", {
expect_equal(
find_variables(m1),
list(
response = "Kyphosis",
conditional = c("Age", "Number")
)
)
expect_equal(
find_variables(m1, flatten = TRUE),
c("Kyphosis", "Age", "Number")
)
})
test_that("n_obs", {
expect_equal(n_obs(m1), 81)
})
test_that("linkfun", {
expect_false(is.null(link_function(m1)))
})
test_that("find_parameters", {
expect_equal(
find_parameters(m1),
list(
conditional = c("(Intercept)", "Number"),
smooth_terms = "s(Age, 4)"
)
)
expect_equal(nrow(get_parameters(m1)), 3)
expect_equal(
get_parameters(m1)$Parameter,
c("(Intercept)", "Number", "s(Age, 4)")
)
})
test_that("is_multivariate", {
expect_false(is_multivariate(m1))
})
test_that("find_algorithm", {
expect_equal(find_algorithm(m1), list(algorithm = "IWLS"))
})
test_that("find_statistic", {
expect_identical(find_statistic(m1), "F-statistic")
})
|
/tests/testthat/test-Gam2.R
|
no_license
|
cran/insight
|
R
| false | false | 2,410 |
r
|
skip_if_not_installed("gam")
s <- gam::s
data(kyphosis, package = "rpart")
void <- capture.output(m1 <- gam::gam(
Kyphosis ~ s(Age, 4) + Number,
family = binomial,
data = kyphosis,
trace = TRUE
))
test_that("model_info", {
expect_true(model_info(m1)$is_binomial)
expect_false(model_info(m1)$is_linear)
})
test_that("find_predictors", {
expect_identical(find_predictors(m1), list(conditional = c("Age", "Number")))
expect_identical(find_predictors(m1, flatten = TRUE), c("Age", "Number"))
expect_null(find_predictors(m1, effects = "random"))
})
test_that("find_response", {
expect_identical(find_response(m1), "Kyphosis")
})
test_that("link_inverse", {
expect_equal(link_inverse(m1)(0.2), plogis(0.2), tolerance = 1e-5)
})
test_that("get_data", {
expect_equal(nrow(get_data(m1)), 81)
expect_equal(colnames(get_data(m1)), c("Kyphosis", "Age", "Number"))
})
test_that("find_formula", {
expect_length(find_formula(m1), 1)
expect_equal(
find_formula(m1),
list(conditional = as.formula("Kyphosis ~ s(Age, 4) + Number")),
ignore_attr = TRUE
)
})
test_that("find_terms", {
expect_equal(
find_terms(m1),
list(
response = "Kyphosis",
conditional = c("s(Age, 4)", "Number")
)
)
expect_equal(
find_terms(m1, flatten = TRUE),
c("Kyphosis", "s(Age, 4)", "Number")
)
})
test_that("find_variables", {
expect_equal(
find_variables(m1),
list(
response = "Kyphosis",
conditional = c("Age", "Number")
)
)
expect_equal(
find_variables(m1, flatten = TRUE),
c("Kyphosis", "Age", "Number")
)
})
test_that("n_obs", {
expect_equal(n_obs(m1), 81)
})
test_that("linkfun", {
expect_false(is.null(link_function(m1)))
})
test_that("find_parameters", {
expect_equal(
find_parameters(m1),
list(
conditional = c("(Intercept)", "Number"),
smooth_terms = "s(Age, 4)"
)
)
expect_equal(nrow(get_parameters(m1)), 3)
expect_equal(
get_parameters(m1)$Parameter,
c("(Intercept)", "Number", "s(Age, 4)")
)
})
test_that("is_multivariate", {
expect_false(is_multivariate(m1))
})
test_that("find_algorithm", {
expect_equal(find_algorithm(m1), list(algorithm = "IWLS"))
})
test_that("find_statistic", {
expect_identical(find_statistic(m1), "F-statistic")
})
|
\name{getCounts}
\docType{methods}
\alias{getCounts-methods}
\alias{getCounts,Counts-method}
\alias{getCounts}
\alias{getFractions-methods}
\alias{getFractions,Counts-method}
\alias{getFractions}
\title{Accessors for the 'counts' and 'fractions' slots of a Counts object.}
\description{
Each measurement consists of an integer count and a corresponding sampling fraction. These values are required to defined an object of class \code{Counts} and are subsequently stored in the counts and fractions slots. The counts slot is an integer vector of counts. The fractions slot is a numeric vector of matched sampling fractions.
}
\section{Methods}{
\describe{
\item{\code{signature(object = "Counts")}}{
an object of class Counts.
%% ~~describe this method here~~
}
}}
\seealso{
\code{\link{Counts}}
}
\author{
Federico Comoglio, federico.comoglio@bsse.ethz.ch
}
\examples{
K <- newCounts( counts = c(20,30), fractions = c(0.075, 0.1))
getCounts(K)
getFractions(K)
}
\keyword{methods}
|
/man/accessors.Rd
|
no_license
|
cran/dupiR
|
R
| false | false | 1,001 |
rd
|
\name{getCounts}
\docType{methods}
\alias{getCounts-methods}
\alias{getCounts,Counts-method}
\alias{getCounts}
\alias{getFractions-methods}
\alias{getFractions,Counts-method}
\alias{getFractions}
\title{Accessors for the 'counts' and 'fractions' slots of a Counts object.}
\description{
Each measurement consists of an integer count and a corresponding sampling fraction. These values are required to defined an object of class \code{Counts} and are subsequently stored in the counts and fractions slots. The counts slot is an integer vector of counts. The fractions slot is a numeric vector of matched sampling fractions.
}
\section{Methods}{
\describe{
\item{\code{signature(object = "Counts")}}{
an object of class Counts.
%% ~~describe this method here~~
}
}}
\seealso{
\code{\link{Counts}}
}
\author{
Federico Comoglio, federico.comoglio@bsse.ethz.ch
}
\examples{
K <- newCounts( counts = c(20,30), fractions = c(0.075, 0.1))
getCounts(K)
getFractions(K)
}
\keyword{methods}
|
### read data into R
data<- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?", nrows = 69516, stringsAsFactors = FALSE)
### subset data to include only the dates we are interested in
powerdata<- data[66637:69516, ]
### transform date and time columns
date_time<- paste(as.Date(powerdata$Date), powerdata$Time)
powerdata$Date_Time<- as.POSIXct(date_time)
### script to plot data in historgram and save as a png file
with(powerdata, hist(Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col = "red"))
dev.copy(png, file = "plot1.png", width = 480, height = 480)
dev.off()
|
/plot1.R
|
no_license
|
lmauter/ExData_Plotting1
|
R
| false | false | 656 |
r
|
### read data into R
data<- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?", nrows = 69516, stringsAsFactors = FALSE)
### subset data to include only the dates we are interested in
powerdata<- data[66637:69516, ]
### transform date and time columns
date_time<- paste(as.Date(powerdata$Date), powerdata$Time)
powerdata$Date_Time<- as.POSIXct(date_time)
### script to plot data in historgram and save as a png file
with(powerdata, hist(Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col = "red"))
dev.copy(png, file = "plot1.png", width = 480, height = 480)
dev.off()
|
testlist <- list(a = 0L, b = 0L, x = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610056108-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 157 |
r
|
testlist <- list(a = 0L, b = 0L, x = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
#' Format numbers in constant width with leading zeros
#'
#' Formats numbers for alphabetic sorting by padding with zeros
#' to constant string width.
#' If width is not supplied, a default value is calculated
#' based on the values in \code{x}.
#'
#' @param x Vector of numbers.
#' @param width Width of character values returned.
#' @param digits Number of decimal places (defaults to 0).
#' @examples
#' paste0('ID', zeropad(1:20))
#' @export
zeropad = function(x, digits=0, width=NULL) {
if (is.null(width))
width = floor(max(log10(x))) + ifelse(digits>0, 2+digits, 1)
formatC(x, format="f", width=width, digits=digits, flag='0')
}
|
/R/zeropad.R
|
no_license
|
stevetnz/stevesRfunctions
|
R
| false | false | 644 |
r
|
#' Format numbers in constant width with leading zeros
#'
#' Formats numbers for alphabetic sorting by padding with zeros
#' to constant string width.
#' If width is not supplied, a default value is calculated
#' based on the values in \code{x}.
#'
#' @param x Vector of numbers.
#' @param width Width of character values returned.
#' @param digits Number of decimal places (defaults to 0).
#' @examples
#' paste0('ID', zeropad(1:20))
#' @export
zeropad = function(x, digits=0, width=NULL) {
if (is.null(width))
width = floor(max(log10(x))) + ifelse(digits>0, 2+digits, 1)
formatC(x, format="f", width=width, digits=digits, flag='0')
}
|
#' Counting the Number of Motifs in RNA or Protein Sequences
#' @description Counts the number of motifs occurring in RNA/protein sequences. Motifs employed by tool "rpiCOOL"
#' can be selected. New motifs can also be defined.
#'
#' @param seqs sequences loaded by function \code{\link[seqinr]{read.fasta}} of package
#' "seqinr" (\code{\link[seqinr]{seqinr-package}}). Or a list of RNA/protein sequences.
#' RNA sequences will be converted into lower case letters, but
#' protein sequences will be converted into upper case letters.
#' Each sequence should be a vector of single characters.
#' @param seqType a string that specifies the nature of the sequence: \code{"RNA"} or \code{"Pro"} (protein).
#' If the input is DNA sequence and \code{seqType = "RNA"}, the DNA sequence will be converted to RNA sequence automatically.
#' Default: \code{"RNA"}.
#' @param motifRNA strings specifying the motifs that are counted in RNA sequences. Ignored if \code{seqType = "Pro"}.
#' Options: \code{"rpiCOOL"}, \code{"selected5"},
#' \code{"Fox1"}, \code{"Nova"}, \code{"Slm2"}, \code{"Fusip1"}, \code{"PTB"}, \code{"ARE"}, \code{"hnRNPA1"},
#' \code{"PUM"}, \code{"U1A"}, \code{"HuD"}, \code{"QKI"}, \code{"U2B"}, \code{"SF1"}, \code{"HuR"}, \code{"YB1"},
#' \code{"AU"}, and \code{"UG"}. Multiple elements can be selected at the same time.
#' If \code{"rpiCOOL"}, all default motifs will be counted.
#' \code{"selected5"} indicates the the total number of the occurrences of: PUM, Fox-1, U1A, Nova, and ARE which
#' are regarded as the five most over-presented binding motifs. See details below.
#' @param motifPro strings specifying the motifs that are counted in protein sequences. Ignored if \code{seqType = "RNA"}.
#' Options: \code{"rpiCOOL"}, \code{"E"}, \code{"H"}, \code{"K"}, \code{"R"}, \code{"H_R"},
#' \code{"EE"}, \code{"KK"}, \code{"HR_RH"}, \code{"RS_SR"},
#' \code{"RGG"}, and \code{"YGG"}. Multiple elements can be selected at the same time.
#' \code{"H_R"} indicates the total number of the occurrences of: H and R.
#' \code{"HR_RH"} indicates the total number of the occurrences of: HR and RH.
#' \code{"RS_SR"} indicates the total number of the occurrences of: RS and SR.
#' If \code{"rpiCOOL"}, default motifs of rpiCOOL (\code{"E"}, \code{"K"}, \code{"H_R"},
#' \code{"EE"}, \code{"KK"}, \code{"RS_SR"}, \code{"RGG"}, and \code{"YGG"}) will be counted.
#' See details below.
#' @param newMotif list defining new motifs not listed above. New motifs are counted in RNA or protein sequences.
#' For example, \code{newMotif = list(hnRNPA1 = c("UAGGGU", "UAGGGA"), SF1 = "UACUAAC")}.
#' This parameter can be used together with parameter \code{motifRNA} or \code{motifPro} to count various motifs. Default: \code{NULL}.
#' @param newMotifOnly logical. If \code{TRUE}, only the new motifs defined in \code{newMotif} will be counted.
#' Default: \code{FALSE}.
#' @param parallel.cores an integer specfying the number of cores for parallel computation. Default: \code{2}.
#' Set \code{parallel.cores = -1} to run with all the cores.
#'
#' @return This function returns a data frame. Row names are the sequences names, and column names are the motif names.
#'
#' @details This function can count the motifs in RNA or protein sequences.
#'
#' The default motifs are selected or derived from tool "rpiCOOL" (Ref: [2]).
#'
#' \itemize{
#' \item Motifs of RNA
#'
#' \enumerate{
#' \item Fox1: UGCAUGU;
#' \item Nova: UCAUUUCAC, UCAUUUCAU, CCAUUUCAC, CCAUUUCAU;
#' \item Slm2: UAAAC, UAAAA, UAAUC, UAAUA;
#' \item Fusip1: AAAGA, AAAGG, AGAGA, AGAGG, CAAGA, CAAGG, CGAGA, CGAGG;
#' \item PTB: UUUUU, UUUCU, UCUUU, UCUCU;
#' \item ARE: UAUUUAUU;
#' \item hnRNPA1: UAGGGU, UAGGGA;
#' \item PUM: UGUAAAUA, UGUAGAUA, UGUAUAUA, UGUACAUA;
#' \item U1A: AUUGCAC;
#' \item HuD: UUAUUU;
#' \item QKI: AUUAAU, AUUAAC, ACUAAU, ACUAAC;
#' \item U2B: AUUGCAG;
#' \item SF1: UACUAAC;
#' \item HuR: UUUAUUU, UUUGUUU, UUUCUUU, UUUUUUU;
#' \item YB1: CCUGCG, UCUGCG;
#' \item AU: AU;
#' \item UG: UG.
#'
#' If \code{"rpiCOOL"}, all default motifs will be counted, and there is no need to input other default motifs.
#' \code{"selected5"} indicates the total number of the occurrences of: PUM, Fox-1, U1A, Nova, and ARE which
#' are regarded as the five most over-presented binding motifs.}
#'
#' \item Motifs of protein
#'
#' \enumerate{
#' \item E: E;
#' \item H: H;
#' \item K: K;
#' \item R: R;
#' \item EE: EE;
#' \item KK: KK;
#' \item HR (\code{"H_R"}): H, R;
#' \item HR (\code{"HR_RH"}): HR, RH;
#' \item RS (\code{"RS_SR"}): RS, SR;
#' \item RGG: RGG;
#' \item YGG: YGG.
#'
#' If \code{"rpiCOOL"}, default motifs of rpiCOOL (\code{"E"}, \code{"K"}, \code{"H_R"},
#' \code{"EE"}, \code{"KK"}, \code{"RS_SR"}, \code{"RGG"}, and \code{"YGG"}) will be counted.
#' } }
#'
#' There are some minor differences between this function and the extraction scheme of rpiCOOL.
#' In this function, motifs will be scanned directly.
#' As to the extraction scheme of rpiCOOL, some motifs (\code{"UG"}, \code{"AU"}, and \code{"H_R"})
#' are scanned in a 10 nt/aa sliding-window.
#'
#' @section References:
#' [1] Han S, Liang Y, Li Y, \emph{et al}.
#' ncProR: an integrated R package for effective ncRNA-protein interaction prediction.
#' (\emph{Submitted})
#'
#' [2] Akbaripour-Elahabad M, Zahiri J, Rafeh R, \emph{et al}.
#' rpiCOOL: A tool for In Silico RNA-protein interaction detection using random forest.
#' J. Theor. Biol. 2016; 402:1-8
#'
#' [3] Pancaldi V, Bahler J.
#' In silico characterization and prediction of global protein-mRNA interactions in yeast.
#' Nucleic Acids Res. 2011; 39:5826-36
#'
#' [4] Castello A, Fischer B, Eichelbaum K, \emph{et al}.
#' Insights into RNA Biology from an Atlas of Mammalian mRNA-Binding Proteins.
#' Cell 2012; 149:1393-1406
#'
#' [5] Ray D, Kazan H, Cook KB, \emph{et al}.
#' A compendium of RNA-binding motifs for decoding gene regulation.
#' Nature 2013; 499:172-177
#'
#' [6] Jiang P, Singh M, Coller HA.
#' Computational assessment of the cooperativity between RNA binding proteins and MicroRNAs in Transcript Decay.
#' PLoS Comput. Biol. 2013; 9:e1003075
#'
#' @importFrom parallel makeCluster
#' @importFrom parallel parLapply
#' @importFrom parallel stopCluster
#' @importFrom parallel detectCores
#' @importFrom seqinr getSequence
#' @seealso \code{\link{featureMotifs}}
#' @examples
#' data(demoPositiveSeq)
#' seqsRNA <- demoPositiveSeq$RNA.positive
#' seqsPro <- demoPositiveSeq$Pro.positive
#'
#' motifRNA1 <- computeMotifs(seqsRNA, seqType = "RNA", motifRNA = "rpiCOOL",
#' parallel.cores = 2)
#' motifRNA2 <- computeMotifs(seqsRNA, seqType = "RNA",
#' motifRNA = c("Fox1", "HuR", "ARE"), parallel.cores = 2)
#'
#' motifPro1 <- computeMotifs(seqsPro, seqType = "Pro",
#' motifPro = c("rpiCOOL", "HR_RH"), parallel.cores = 2)
#' motifPro2 <- computeMotifs(seqsPro, seqType = "Pro", motifPro = c("E", "K", "KK"),
#' newMotif = list(HR_RH = c("HR", "RH"), RGG = "RGG"),
#' parallel.cores = 2)
#' motifPro3 <- computeMotifs(seqsPro, seqType = "Pro",
#' newMotif = list(HR_RH = c("HR", "RH"), RGG = "RGG"),
#' newMotifOnly = TRUE, parallel.cores = 2)
#' @export
computeMotifs <- function(seqs, seqType = c("RNA", "Pro"),
motifRNA = c("rpiCOOL", "Fox1", "Nova", "Slm2", "Fusip1", "PTB", "ARE", "hnRNPA1", "PUM",
"U1A", "HuD", "QKI", "U2B", "SF1", "HuR", "YB1", "AU", "UG", "selected5"),
motifPro = c("rpiCOOL", "E", "H", "K", "R", "H_R", "EE", "KK", "HR_RH", "RS_SR",
"RGG", "YGG"),
newMotif = NULL, newMotifOnly = FALSE, parallel.cores = 2) {
message("+ Initializing... ", Sys.time())
seqType <- match.arg(seqType)
parallel.cores <- ifelse(parallel.cores == -1, parallel::detectCores(), parallel.cores)
cl <- parallel::makeCluster(parallel.cores)
if (seqType == "RNA") seqs <- parallel::parLapply(cl, seqs, Internal.checkRNA)
seqs <- sapply(seqs, seqinr::getSequence, as.string = TRUE)
motifPatterns <- NULL
if (!is.null(newMotif)) {
message("- Formatting the new defined motifs... ")
motifPatterns <- lapply(newMotif, function(x) {
motifExpr <- paste(x, collapse = ")|(?=")
motifExpr <- paste0("(?=", motifExpr, ")")
motifExpr
})
}
if (seqType == "RNA" & !newMotifOnly) {
candidateMotif <- unique(match.arg(motifRNA, several.ok = TRUE))
if ("rpiCOOL" %in% candidateMotif) {
candidateMotif <- c("Fox1", "Nova", "Slm2", "Fusip1", "PTB", "ARE", "hnRNPA1", "PUM", "U1A",
"HuD", "QKI", "U2B", "SF1", "HuR", "YB1", "AU", "UG", "selected5")
}
message("\n", "+ Processing the default motifs. ", Sys.time(), "\n", " ", paste(candidateMotif, collapse = ", "), "\n")
Fox1 <- "(?=UGCAUGU)"
Nova <- c("(?=UCAUUUCAC)|(?=UCAUUUCAU)|(?=CCAUUUCAC)|(?=CCAUUUCAU)")
Slm2 <- c("(?=UAAAC)|(?=UAAAA)|(?=UAAUC)|(?=UAAUA)")
Fusip1 <- c("(?=AAAGA)|(?=AAAGG)|(?=AGAGA)|(?=AGAGG)|(?=CAAGA)|(?=CAAGG)|(?=CGAGA)|(?=CGAGG)")
PTB <- c("(?=UUUUU)|(?=UUUCU)|(?=UCUUU)|(?=UCUCU)")
ARE <- "(?=UAUUUAUU)"
hnRNPA1 <- c("(?=UAGGGU)|(?=UAGGGA)")
PUM <- c("(?=UGUAAAUA)|(?=UGUAGAUA)|(?=UGUAUAUA)|(?=UGUACAUA)")
U1A <- "(?=AUUGCAC)"
HuD <- "(?=UUAUUU)"
QKI <- c("(?=AUUAAU)|(?=AUUAAC)|(?=ACUAAU)|(?=ACUAAC)")
U2B <- "(?=AUUGCAG)"
SF1 <- "(?=UACUAAC)"
HuR <- c("(?=UUUAUUU)|(?=UUUGUUU)|(?=UUUCUUU)|(?=UUUUUUU)")
YB1 <- c("(?=CCUGCG)|(?=UCUGCG)")
AU <- "(?=AU)"
UG <- "(?=UG)"
selected5 <- paste(Fox1, Nova, ARE, PUM, U1A, sep = "|")
Patterns <- mget(candidateMotif)
motifPatterns <- c(motifPatterns, Patterns)
}
if (seqType == "Pro" & !newMotifOnly) {
candidateMotif <- match.arg(motifPro, several.ok = TRUE)
if ("rpiCOOL" %in% candidateMotif) {
candidateMotif <- c(candidateMotif, "E", "K", "EE", "KK", "RS_SR", "RGG", "YGG", "H_R")
candidateMotif <- unique(candidateMotif)
candidateMotif <- candidateMotif[!candidateMotif %in% "rpiCOOL"]
}
message("\n", "+ Processing the default motifs. ", Sys.time(), "\n", " ", paste(candidateMotif, collapse = ", "), "\n")
E <- "E"
H <- "H"
K <- "K"
R <- "R"
H_R <- c("(?=H)|(?=R)")
EE <- "(?=EE)"
KK <- "(?=KK)"
HR_RH <- c("(?=HR)|(?=RH)")
RS_SR <- c("(?=RS)|(?=SR)")
RGG <- "(?=RGG)"
YGG <- "(?=YGG)"
Patterns <- mget(candidateMotif)
motifPatterns <- c(motifPatterns, Patterns)
}
motifCounts <- parallel::parLapply(cl, seqs, Internal.computeMotifs, motifs = motifPatterns)
parallel::stopCluster(cl)
motifCounts <- as.data.frame(t(data.frame(motifCounts, check.names = F)))
formatNames <- paste("motif_", names(motifCounts), sep = "")
names(motifCounts) <- formatNames
message("+ Completed. ", Sys.time(), "\n")
motifCounts
}
#' Extraction of the Motif Features of RNA and Protein Sequences
#' @description Basically a wrapper for \code{\link{computeMotifs}} function.
#' This function can count the motifs of RNA and protein sequences at the same time
#' and format the results as the dataset that can be used to build classifier.
#' @param seqRNA RNA sequences loaded by function \code{\link[seqinr]{read.fasta}} of package
#' "seqinr" (\code{\link[seqinr]{seqinr-package}}). Or a list of RNA sequences.
#' RNA sequences will be converted into lower case letters.
#' @param seqPro protein sequences loaded by function \code{\link[seqinr]{read.fasta}} of package
#' "seqinr" (\code{\link[seqinr]{seqinr-package}}). Or a list of protein sequences.
#' Protein sequences will be converted into upper case letters.
#' @param label optional. A string that indicates the class of the samples such as
#' "Interact", "Non.Interact". Default: \code{NULL}
#' @param featureMode a string that can be \code{"concatenate"} or \code{"combine"}.
#' If \code{"concatenate"}, the motif features of RNA and proteins will be simply concatenated.
#' If \code{"combine"}, the returned dataset will be formed by combining the motif features of RNA and proteins.
#' See details below. Default: \code{"concatenate"}.
#' @param newMotif.RNA list specifying the motifs that are counted in RNA sequences. Default: \code{NULL}.
#' For example, \code{newMotif = list(hnRNPA1 = c("UAGGGU", "UAGGGA"), SF1 = "UACUAAC")}.
#' Can be used with parameter \code{motifRNA} (see parameter \code{...}) to count various motifs.
#' @param newMotif.Pro list specifying the motifs that are counted in protein sequences. Default: \code{NULL}.
#' For example, \code{newMotif = list(YGG = "YGG", E = "E")}.
#' Can be used with parameter \code{motifPro} (see parameter \code{...}) to count various motifs.
#' @param newMotifOnly.RNA logical. If \code{TRUE}, only the new motifs defined in \code{newMotif.RNA} will be counted.
#' Default: \code{FALSE}.
#' @param newMotifOnly.Pro logical. If \code{TRUE}, only the new motifs defined in \code{newMotif.Pro} will be counted.
#' Default: \code{FALSE}.
#' @param parallel.cores an integer that indicates the number of cores for parallel computation. Default: \code{2}.
#' Set \code{parallel.cores = -1} to run with all the cores.
#' @param ... arguments (\code{motifRNA} and \code{motifPro}) passed to \code{\link{computeMotifs}}. See example below.
#'
#' @return This function returns a data frame. Row names are the sequences names, and column names are the motif names.
#' The names of RNA and protein sequences are seperated with ".",
#' i.e. the row names format: ""\emph{RNASequenceName}.\emph{proteinSequenceName}" (e.g. "YDL227C.YOR198C").
#' If \code{featureMode = "combine"}, the motif names of RNA and protein sequences are also seperated with ".",
#' i.e. the column format: "motif_\emph{RNAMotifName}.motif_\emph{proteinMotifName}" (e.g. "motif_PUM.motif_EE").
#'
#' @details
#' If \code{featureMode = "concatenate"}, \emph{m} RNA motif features will be simply
#' concatenated with \emph{n} protein motif features, and the final result has \emph{m} + \emph{n} features.
#' If \code{featureMode = "combine"}, \emph{m} RNA motif features will be
#' combined with \emph{n} protein motif features, resulting in \emph{m} * \emph{n} possible combinations.
#'
#' \code{...} can be used to pass the default motif patterns of RNA and protein sequences.
#' See details in \code{\link{computeMotifs}}.
#'
#' @section References:
#' [1] Han S, Liang Y, Li Y, \emph{et al}.
#' ncProR: an integrated R package for effective ncRNA-protein interaction prediction.
#' (\emph{Submitted})
#'
#' [2] Akbaripour-Elahabad M, Zahiri J, Rafeh R, \emph{et al}.
#' rpiCOOL: A tool for In Silico RNA-protein interaction detection using random forest.
#' J. Theor. Biol. 2016; 402:1-8
#'
#' [3] Pancaldi V, Bahler J.
#' In silico characterization and prediction of global protein-mRNA interactions in yeast.
#' Nucleic Acids Res. 2011; 39:5826-36
#'
#' [4] Castello A, Fischer B, Eichelbaum K, \emph{et al}.
#' Insights into RNA Biology from an Atlas of Mammalian mRNA-Binding Proteins.
#' Cell 2012; 149:1393-1406
#'
#' [5] Ray D, Kazan H, Cook KB, \emph{et al}.
#' A compendium of RNA-binding motifs for decoding gene regulation.
#' Nature 2013; 499:172-177
#'
#' [6] Jiang P, Singh M, Coller HA.
#' Computational assessment of the cooperativity between RNA binding proteins and MicroRNAs in Transcript Decay.
#' PLoS Comput. Biol. 2013; 9:e1003075
#'
#' @importFrom parallel makeCluster
#' @importFrom parallel parLapply
#' @importFrom parallel stopCluster
#' @importFrom parallel detectCores
#' @importFrom seqinr getSequence
#' @seealso \code{\link{computeMotifs}}
#' @examples
#' data(demoPositiveSeq)
#' seqsRNA <- demoPositiveSeq$RNA.positive
#' seqsPro <- demoPositiveSeq$Pro.positive
#'
#' dataset1 <- featureMotifs(seqRNA = seqsRNA, seqPro = seqsPro, featureMode = "conc",
#' newMotif.RNA = list(motif1 = c("cc", "cu")),
#' newMotif.Pro = list(motif2 = "KK"),
#' motifRNA = c("Fusip1", "AU", "UG"),
#' motifPro = c("E", "K", "HR_RH"))
#'
#' dataset2 <- featureMotifs(seqRNA = seqsRNA, seqPro = seqsPro, featureMode = "comb",
#' newMotif.RNA = list(motif1 = c("cc", "cu")),
#' newMotif.Pro = list(motif2 = c("R", "H")),
#' newMotifOnly.RNA = TRUE, newMotifOnly.Pro = FALSE)
#'
#' @export
featureMotifs <- function(seqRNA, seqPro, label = NULL, featureMode = c("concatenate", "combine"),
newMotif.RNA = NULL, newMotif.Pro = NULL, newMotifOnly.RNA = FALSE,
newMotifOnly.Pro = FALSE, parallel.cores = 2, ...) {
if (length(seqRNA) != length(seqPro)) stop("The number of RNA sequences should match the number of protein sequences!")
featureMode <- match.arg(featureMode)
featureRNA <- computeMotifs(seqs = seqRNA, seqType = "RNA", newMotif = newMotif.RNA, newMotifOnly = newMotifOnly.RNA, parallel.cores = parallel.cores, ...)
featurePro <- computeMotifs(seqs = seqPro, seqType = "Pro", newMotif = newMotif.Pro, newMotifOnly = newMotifOnly.Pro, parallel.cores = parallel.cores, ...)
sequenceName <- paste(row.names(featureRNA), row.names(featurePro), sep = ".")
if (featureMode == "combine") {
featureName <- sapply(names(featureRNA), function(nameRNA) {
names <- paste(nameRNA, names(featurePro), sep = ".")
})
featureName <- as.character(featureName)
featureValue <- mapply(Internal.combineMotifs, oneRNA = as.data.frame(t(featureRNA)),
onePro = as.data.frame(t(featurePro)))
features <- as.data.frame(t(featureValue), row.names = sequenceName)
names(features) <- featureName
} else {
features <- cbind(featureRNA, featurePro, row.names = sequenceName)
}
if (!is.null(label)) features <- data.frame(label = label, features)
features
}
|
/R/Motifs.R
|
no_license
|
Yan-Liao/ncProR
|
R
| false | false | 19,001 |
r
|
#' Counting the Number of Motifs in RNA or Protein Sequences
#' @description Counts the number of motifs occurring in RNA/protein sequences. Motifs employed by tool "rpiCOOL"
#' can be selected. New motifs can also be defined.
#'
#' @param seqs sequences loaded by function \code{\link[seqinr]{read.fasta}} of package
#' "seqinr" (\code{\link[seqinr]{seqinr-package}}). Or a list of RNA/protein sequences.
#' RNA sequences will be converted into lower case letters, but
#' protein sequences will be converted into upper case letters.
#' Each sequence should be a vector of single characters.
#' @param seqType a string that specifies the nature of the sequence: \code{"RNA"} or \code{"Pro"} (protein).
#' If the input is DNA sequence and \code{seqType = "RNA"}, the DNA sequence will be converted to RNA sequence automatically.
#' Default: \code{"RNA"}.
#' @param motifRNA strings specifying the motifs that are counted in RNA sequences. Ignored if \code{seqType = "Pro"}.
#' Options: \code{"rpiCOOL"}, \code{"selected5"},
#' \code{"Fox1"}, \code{"Nova"}, \code{"Slm2"}, \code{"Fusip1"}, \code{"PTB"}, \code{"ARE"}, \code{"hnRNPA1"},
#' \code{"PUM"}, \code{"U1A"}, \code{"HuD"}, \code{"QKI"}, \code{"U2B"}, \code{"SF1"}, \code{"HuR"}, \code{"YB1"},
#' \code{"AU"}, and \code{"UG"}. Multiple elements can be selected at the same time.
#' If \code{"rpiCOOL"}, all default motifs will be counted.
#' \code{"selected5"} indicates the the total number of the occurrences of: PUM, Fox-1, U1A, Nova, and ARE which
#' are regarded as the five most over-presented binding motifs. See details below.
#' @param motifPro strings specifying the motifs that are counted in protein sequences. Ignored if \code{seqType = "RNA"}.
#' Options: \code{"rpiCOOL"}, \code{"E"}, \code{"H"}, \code{"K"}, \code{"R"}, \code{"H_R"},
#' \code{"EE"}, \code{"KK"}, \code{"HR_RH"}, \code{"RS_SR"},
#' \code{"RGG"}, and \code{"YGG"}. Multiple elements can be selected at the same time.
#' \code{"H_R"} indicates the total number of the occurrences of: H and R.
#' \code{"HR_RH"} indicates the total number of the occurrences of: HR and RH.
#' \code{"RS_SR"} indicates the total number of the occurrences of: RS and SR.
#' If \code{"rpiCOOL"}, default motifs of rpiCOOL (\code{"E"}, \code{"K"}, \code{"H_R"},
#' \code{"EE"}, \code{"KK"}, \code{"RS_SR"}, \code{"RGG"}, and \code{"YGG"}) will be counted.
#' See details below.
#' @param newMotif list defining new motifs not listed above. New motifs are counted in RNA or protein sequences.
#' For example, \code{newMotif = list(hnRNPA1 = c("UAGGGU", "UAGGGA"), SF1 = "UACUAAC")}.
#' This parameter can be used together with parameter \code{motifRNA} or \code{motifPro} to count various motifs. Default: \code{NULL}.
#' @param newMotifOnly logical. If \code{TRUE}, only the new motifs defined in \code{newMotif} will be counted.
#' Default: \code{FALSE}.
#' @param parallel.cores an integer specfying the number of cores for parallel computation. Default: \code{2}.
#' Set \code{parallel.cores = -1} to run with all the cores.
#'
#' @return This function returns a data frame. Row names are the sequences names, and column names are the motif names.
#'
#' @details This function can count the motifs in RNA or protein sequences.
#'
#' The default motifs are selected or derived from tool "rpiCOOL" (Ref: [2]).
#'
#' \itemize{
#' \item Motifs of RNA
#'
#' \enumerate{
#' \item Fox1: UGCAUGU;
#' \item Nova: UCAUUUCAC, UCAUUUCAU, CCAUUUCAC, CCAUUUCAU;
#' \item Slm2: UAAAC, UAAAA, UAAUC, UAAUA;
#' \item Fusip1: AAAGA, AAAGG, AGAGA, AGAGG, CAAGA, CAAGG, CGAGA, CGAGG;
#' \item PTB: UUUUU, UUUCU, UCUUU, UCUCU;
#' \item ARE: UAUUUAUU;
#' \item hnRNPA1: UAGGGU, UAGGGA;
#' \item PUM: UGUAAAUA, UGUAGAUA, UGUAUAUA, UGUACAUA;
#' \item U1A: AUUGCAC;
#' \item HuD: UUAUUU;
#' \item QKI: AUUAAU, AUUAAC, ACUAAU, ACUAAC;
#' \item U2B: AUUGCAG;
#' \item SF1: UACUAAC;
#' \item HuR: UUUAUUU, UUUGUUU, UUUCUUU, UUUUUUU;
#' \item YB1: CCUGCG, UCUGCG;
#' \item AU: AU;
#' \item UG: UG.
#'
#' If \code{"rpiCOOL"}, all default motifs will be counted, and there is no need to input other default motifs.
#' \code{"selected5"} indicates the total number of the occurrences of: PUM, Fox-1, U1A, Nova, and ARE which
#' are regarded as the five most over-presented binding motifs.}
#'
#' \item Motifs of protein
#'
#' \enumerate{
#' \item E: E;
#' \item H: H;
#' \item K: K;
#' \item R: R;
#' \item EE: EE;
#' \item KK: KK;
#' \item HR (\code{"H_R"}): H, R;
#' \item HR (\code{"HR_RH"}): HR, RH;
#' \item RS (\code{"RS_SR"}): RS, SR;
#' \item RGG: RGG;
#' \item YGG: YGG.
#'
#' If \code{"rpiCOOL"}, default motifs of rpiCOOL (\code{"E"}, \code{"K"}, \code{"H_R"},
#' \code{"EE"}, \code{"KK"}, \code{"RS_SR"}, \code{"RGG"}, and \code{"YGG"}) will be counted.
#' } }
#'
#' There are some minor differences between this function and the extraction scheme of rpiCOOL.
#' In this function, motifs will be scanned directly.
#' As to the extraction scheme of rpiCOOL, some motifs (\code{"UG"}, \code{"AU"}, and \code{"H_R"})
#' are scanned in a 10 nt/aa sliding-window.
#'
#' @section References:
#' [1] Han S, Liang Y, Li Y, \emph{et al}.
#' ncProR: an integrated R package for effective ncRNA-protein interaction prediction.
#' (\emph{Submitted})
#'
#' [2] Akbaripour-Elahabad M, Zahiri J, Rafeh R, \emph{et al}.
#' rpiCOOL: A tool for In Silico RNA-protein interaction detection using random forest.
#' J. Theor. Biol. 2016; 402:1-8
#'
#' [3] Pancaldi V, Bahler J.
#' In silico characterization and prediction of global protein-mRNA interactions in yeast.
#' Nucleic Acids Res. 2011; 39:5826-36
#'
#' [4] Castello A, Fischer B, Eichelbaum K, \emph{et al}.
#' Insights into RNA Biology from an Atlas of Mammalian mRNA-Binding Proteins.
#' Cell 2012; 149:1393-1406
#'
#' [5] Ray D, Kazan H, Cook KB, \emph{et al}.
#' A compendium of RNA-binding motifs for decoding gene regulation.
#' Nature 2013; 499:172-177
#'
#' [6] Jiang P, Singh M, Coller HA.
#' Computational assessment of the cooperativity between RNA binding proteins and MicroRNAs in Transcript Decay.
#' PLoS Comput. Biol. 2013; 9:e1003075
#'
#' @importFrom parallel makeCluster
#' @importFrom parallel parLapply
#' @importFrom parallel stopCluster
#' @importFrom parallel detectCores
#' @importFrom seqinr getSequence
#' @seealso \code{\link{featureMotifs}}
#' @examples
#' data(demoPositiveSeq)
#' seqsRNA <- demoPositiveSeq$RNA.positive
#' seqsPro <- demoPositiveSeq$Pro.positive
#'
#' motifRNA1 <- computeMotifs(seqsRNA, seqType = "RNA", motifRNA = "rpiCOOL",
#' parallel.cores = 2)
#' motifRNA2 <- computeMotifs(seqsRNA, seqType = "RNA",
#' motifRNA = c("Fox1", "HuR", "ARE"), parallel.cores = 2)
#'
#' motifPro1 <- computeMotifs(seqsPro, seqType = "Pro",
#' motifPro = c("rpiCOOL", "HR_RH"), parallel.cores = 2)
#' motifPro2 <- computeMotifs(seqsPro, seqType = "Pro", motifPro = c("E", "K", "KK"),
#' newMotif = list(HR_RH = c("HR", "RH"), RGG = "RGG"),
#' parallel.cores = 2)
#' motifPro3 <- computeMotifs(seqsPro, seqType = "Pro",
#' newMotif = list(HR_RH = c("HR", "RH"), RGG = "RGG"),
#' newMotifOnly = TRUE, parallel.cores = 2)
#' @export
computeMotifs <- function(seqs, seqType = c("RNA", "Pro"),
motifRNA = c("rpiCOOL", "Fox1", "Nova", "Slm2", "Fusip1", "PTB", "ARE", "hnRNPA1", "PUM",
"U1A", "HuD", "QKI", "U2B", "SF1", "HuR", "YB1", "AU", "UG", "selected5"),
motifPro = c("rpiCOOL", "E", "H", "K", "R", "H_R", "EE", "KK", "HR_RH", "RS_SR",
"RGG", "YGG"),
newMotif = NULL, newMotifOnly = FALSE, parallel.cores = 2) {
message("+ Initializing... ", Sys.time())
seqType <- match.arg(seqType)
parallel.cores <- ifelse(parallel.cores == -1, parallel::detectCores(), parallel.cores)
cl <- parallel::makeCluster(parallel.cores)
if (seqType == "RNA") seqs <- parallel::parLapply(cl, seqs, Internal.checkRNA)
seqs <- sapply(seqs, seqinr::getSequence, as.string = TRUE)
motifPatterns <- NULL
if (!is.null(newMotif)) {
message("- Formatting the new defined motifs... ")
motifPatterns <- lapply(newMotif, function(x) {
motifExpr <- paste(x, collapse = ")|(?=")
motifExpr <- paste0("(?=", motifExpr, ")")
motifExpr
})
}
if (seqType == "RNA" & !newMotifOnly) {
candidateMotif <- unique(match.arg(motifRNA, several.ok = TRUE))
if ("rpiCOOL" %in% candidateMotif) {
candidateMotif <- c("Fox1", "Nova", "Slm2", "Fusip1", "PTB", "ARE", "hnRNPA1", "PUM", "U1A",
"HuD", "QKI", "U2B", "SF1", "HuR", "YB1", "AU", "UG", "selected5")
}
message("\n", "+ Processing the default motifs. ", Sys.time(), "\n", " ", paste(candidateMotif, collapse = ", "), "\n")
Fox1 <- "(?=UGCAUGU)"
Nova <- c("(?=UCAUUUCAC)|(?=UCAUUUCAU)|(?=CCAUUUCAC)|(?=CCAUUUCAU)")
Slm2 <- c("(?=UAAAC)|(?=UAAAA)|(?=UAAUC)|(?=UAAUA)")
Fusip1 <- c("(?=AAAGA)|(?=AAAGG)|(?=AGAGA)|(?=AGAGG)|(?=CAAGA)|(?=CAAGG)|(?=CGAGA)|(?=CGAGG)")
PTB <- c("(?=UUUUU)|(?=UUUCU)|(?=UCUUU)|(?=UCUCU)")
ARE <- "(?=UAUUUAUU)"
hnRNPA1 <- c("(?=UAGGGU)|(?=UAGGGA)")
PUM <- c("(?=UGUAAAUA)|(?=UGUAGAUA)|(?=UGUAUAUA)|(?=UGUACAUA)")
U1A <- "(?=AUUGCAC)"
HuD <- "(?=UUAUUU)"
QKI <- c("(?=AUUAAU)|(?=AUUAAC)|(?=ACUAAU)|(?=ACUAAC)")
U2B <- "(?=AUUGCAG)"
SF1 <- "(?=UACUAAC)"
HuR <- c("(?=UUUAUUU)|(?=UUUGUUU)|(?=UUUCUUU)|(?=UUUUUUU)")
YB1 <- c("(?=CCUGCG)|(?=UCUGCG)")
AU <- "(?=AU)"
UG <- "(?=UG)"
selected5 <- paste(Fox1, Nova, ARE, PUM, U1A, sep = "|")
Patterns <- mget(candidateMotif)
motifPatterns <- c(motifPatterns, Patterns)
}
if (seqType == "Pro" & !newMotifOnly) {
candidateMotif <- match.arg(motifPro, several.ok = TRUE)
if ("rpiCOOL" %in% candidateMotif) {
candidateMotif <- c(candidateMotif, "E", "K", "EE", "KK", "RS_SR", "RGG", "YGG", "H_R")
candidateMotif <- unique(candidateMotif)
candidateMotif <- candidateMotif[!candidateMotif %in% "rpiCOOL"]
}
message("\n", "+ Processing the default motifs. ", Sys.time(), "\n", " ", paste(candidateMotif, collapse = ", "), "\n")
E <- "E"
H <- "H"
K <- "K"
R <- "R"
H_R <- c("(?=H)|(?=R)")
EE <- "(?=EE)"
KK <- "(?=KK)"
HR_RH <- c("(?=HR)|(?=RH)")
RS_SR <- c("(?=RS)|(?=SR)")
RGG <- "(?=RGG)"
YGG <- "(?=YGG)"
Patterns <- mget(candidateMotif)
motifPatterns <- c(motifPatterns, Patterns)
}
motifCounts <- parallel::parLapply(cl, seqs, Internal.computeMotifs, motifs = motifPatterns)
parallel::stopCluster(cl)
motifCounts <- as.data.frame(t(data.frame(motifCounts, check.names = F)))
formatNames <- paste("motif_", names(motifCounts), sep = "")
names(motifCounts) <- formatNames
message("+ Completed. ", Sys.time(), "\n")
motifCounts
}
#' Extraction of the Motif Features of RNA and Protein Sequences
#' @description Basically a wrapper for \code{\link{computeMotifs}} function.
#' This function can count the motifs of RNA and protein sequences at the same time
#' and format the results as the dataset that can be used to build classifier.
#' @param seqRNA RNA sequences loaded by function \code{\link[seqinr]{read.fasta}} of package
#' "seqinr" (\code{\link[seqinr]{seqinr-package}}). Or a list of RNA sequences.
#' RNA sequences will be converted into lower case letters.
#' @param seqPro protein sequences loaded by function \code{\link[seqinr]{read.fasta}} of package
#' "seqinr" (\code{\link[seqinr]{seqinr-package}}). Or a list of protein sequences.
#' Protein sequences will be converted into upper case letters.
#' @param label optional. A string that indicates the class of the samples such as
#' "Interact", "Non.Interact". Default: \code{NULL}
#' @param featureMode a string that can be \code{"concatenate"} or \code{"combine"}.
#' If \code{"concatenate"}, the motif features of RNA and proteins will be simply concatenated.
#' If \code{"combine"}, the returned dataset will be formed by combining the motif features of RNA and proteins.
#' See details below. Default: \code{"concatenate"}.
#' @param newMotif.RNA list specifying the motifs that are counted in RNA sequences. Default: \code{NULL}.
#' For example, \code{newMotif = list(hnRNPA1 = c("UAGGGU", "UAGGGA"), SF1 = "UACUAAC")}.
#' Can be used with parameter \code{motifRNA} (see parameter \code{...}) to count various motifs.
#' @param newMotif.Pro list specifying the motifs that are counted in protein sequences. Default: \code{NULL}.
#' For example, \code{newMotif = list(YGG = "YGG", E = "E")}.
#' Can be used with parameter \code{motifPro} (see parameter \code{...}) to count various motifs.
#' @param newMotifOnly.RNA logical. If \code{TRUE}, only the new motifs defined in \code{newMotif.RNA} will be counted.
#' Default: \code{FALSE}.
#' @param newMotifOnly.Pro logical. If \code{TRUE}, only the new motifs defined in \code{newMotif.Pro} will be counted.
#' Default: \code{FALSE}.
#' @param parallel.cores an integer that indicates the number of cores for parallel computation. Default: \code{2}.
#' Set \code{parallel.cores = -1} to run with all the cores.
#' @param ... arguments (\code{motifRNA} and \code{motifPro}) passed to \code{\link{computeMotifs}}. See example below.
#'
#' @return This function returns a data frame. Row names are the sequences names, and column names are the motif names.
#' The names of RNA and protein sequences are seperated with ".",
#' i.e. the row names format: ""\emph{RNASequenceName}.\emph{proteinSequenceName}" (e.g. "YDL227C.YOR198C").
#' If \code{featureMode = "combine"}, the motif names of RNA and protein sequences are also seperated with ".",
#' i.e. the column format: "motif_\emph{RNAMotifName}.motif_\emph{proteinMotifName}" (e.g. "motif_PUM.motif_EE").
#'
#' @details
#' If \code{featureMode = "concatenate"}, \emph{m} RNA motif features will be simply
#' concatenated with \emph{n} protein motif features, and the final result has \emph{m} + \emph{n} features.
#' If \code{featureMode = "combine"}, \emph{m} RNA motif features will be
#' combined with \emph{n} protein motif features, resulting in \emph{m} * \emph{n} possible combinations.
#'
#' \code{...} can be used to pass the default motif patterns of RNA and protein sequences.
#' See details in \code{\link{computeMotifs}}.
#'
#' @section References:
#' [1] Han S, Liang Y, Li Y, \emph{et al}.
#' ncProR: an integrated R package for effective ncRNA-protein interaction prediction.
#' (\emph{Submitted})
#'
#' [2] Akbaripour-Elahabad M, Zahiri J, Rafeh R, \emph{et al}.
#' rpiCOOL: A tool for In Silico RNA-protein interaction detection using random forest.
#' J. Theor. Biol. 2016; 402:1-8
#'
#' [3] Pancaldi V, Bahler J.
#' In silico characterization and prediction of global protein-mRNA interactions in yeast.
#' Nucleic Acids Res. 2011; 39:5826-36
#'
#' [4] Castello A, Fischer B, Eichelbaum K, \emph{et al}.
#' Insights into RNA Biology from an Atlas of Mammalian mRNA-Binding Proteins.
#' Cell 2012; 149:1393-1406
#'
#' [5] Ray D, Kazan H, Cook KB, \emph{et al}.
#' A compendium of RNA-binding motifs for decoding gene regulation.
#' Nature 2013; 499:172-177
#'
#' [6] Jiang P, Singh M, Coller HA.
#' Computational assessment of the cooperativity between RNA binding proteins and MicroRNAs in Transcript Decay.
#' PLoS Comput. Biol. 2013; 9:e1003075
#'
#' @importFrom parallel makeCluster
#' @importFrom parallel parLapply
#' @importFrom parallel stopCluster
#' @importFrom parallel detectCores
#' @importFrom seqinr getSequence
#' @seealso \code{\link{computeMotifs}}
#' @examples
#' data(demoPositiveSeq)
#' seqsRNA <- demoPositiveSeq$RNA.positive
#' seqsPro <- demoPositiveSeq$Pro.positive
#'
#' dataset1 <- featureMotifs(seqRNA = seqsRNA, seqPro = seqsPro, featureMode = "conc",
#' newMotif.RNA = list(motif1 = c("cc", "cu")),
#' newMotif.Pro = list(motif2 = "KK"),
#' motifRNA = c("Fusip1", "AU", "UG"),
#' motifPro = c("E", "K", "HR_RH"))
#'
#' dataset2 <- featureMotifs(seqRNA = seqsRNA, seqPro = seqsPro, featureMode = "comb",
#' newMotif.RNA = list(motif1 = c("cc", "cu")),
#' newMotif.Pro = list(motif2 = c("R", "H")),
#' newMotifOnly.RNA = TRUE, newMotifOnly.Pro = FALSE)
#'
#' @export
featureMotifs <- function(seqRNA, seqPro, label = NULL, featureMode = c("concatenate", "combine"),
newMotif.RNA = NULL, newMotif.Pro = NULL, newMotifOnly.RNA = FALSE,
newMotifOnly.Pro = FALSE, parallel.cores = 2, ...) {
if (length(seqRNA) != length(seqPro)) stop("The number of RNA sequences should match the number of protein sequences!")
featureMode <- match.arg(featureMode)
featureRNA <- computeMotifs(seqs = seqRNA, seqType = "RNA", newMotif = newMotif.RNA, newMotifOnly = newMotifOnly.RNA, parallel.cores = parallel.cores, ...)
featurePro <- computeMotifs(seqs = seqPro, seqType = "Pro", newMotif = newMotif.Pro, newMotifOnly = newMotifOnly.Pro, parallel.cores = parallel.cores, ...)
sequenceName <- paste(row.names(featureRNA), row.names(featurePro), sep = ".")
if (featureMode == "combine") {
featureName <- sapply(names(featureRNA), function(nameRNA) {
names <- paste(nameRNA, names(featurePro), sep = ".")
})
featureName <- as.character(featureName)
featureValue <- mapply(Internal.combineMotifs, oneRNA = as.data.frame(t(featureRNA)),
onePro = as.data.frame(t(featurePro)))
features <- as.data.frame(t(featureValue), row.names = sequenceName)
names(features) <- featureName
} else {
features <- cbind(featureRNA, featurePro, row.names = sequenceName)
}
if (!is.null(label)) features <- data.frame(label = label, features)
features
}
|
library(dash)
library(dashHtmlComponents)
library(dashCoreComponents)
library(dashBio)
library(jsonlite)
model_data <- read_json("https://raw.githubusercontent.com/plotly/dash-bio-docs-files/master/mol2d_buckminsterfullerene.json")
app <- Dash$new()
app$layout(
htmlDiv(
list(
dashbioMolecule2dViewer(
id = 'my-dashbio-molecule2d',
modelData = model_data
),
htmlHr(),
htmlDiv(id = 'molecule2d-default-output')
)
)
)
app$callback(
output(id = "molecule2d-default-output", property = "children"),
params = list(
input(id = "my-dashbio-molecule2d", property = "selectedAtomIds")
),
update_selected_atoms <- function(ids){
if (is.null(ids[[1]]) | length(ids) < 1 ) {
return("No atom has been selected. Select atoms by clicking on them.")
}
else {
return(sprintf(" Selected atom ID: %s", as.character(paste(unlist(ids), collapse=' - '))))
}
}
)
app$run_server()
|
/dash_docs/chapters/dash_bio/molecule2dviewer/examples/defaultMolecule2dViewer.R
|
permissive
|
plotly/dash-docs
|
R
| false | false | 962 |
r
|
library(dash)
library(dashHtmlComponents)
library(dashCoreComponents)
library(dashBio)
library(jsonlite)
model_data <- read_json("https://raw.githubusercontent.com/plotly/dash-bio-docs-files/master/mol2d_buckminsterfullerene.json")
app <- Dash$new()
app$layout(
htmlDiv(
list(
dashbioMolecule2dViewer(
id = 'my-dashbio-molecule2d',
modelData = model_data
),
htmlHr(),
htmlDiv(id = 'molecule2d-default-output')
)
)
)
app$callback(
output(id = "molecule2d-default-output", property = "children"),
params = list(
input(id = "my-dashbio-molecule2d", property = "selectedAtomIds")
),
update_selected_atoms <- function(ids){
if (is.null(ids[[1]]) | length(ids) < 1 ) {
return("No atom has been selected. Select atoms by clicking on them.")
}
else {
return(sprintf(" Selected atom ID: %s", as.character(paste(unlist(ids), collapse=' - '))))
}
}
)
app$run_server()
|
##Packages
if (!require(plyr, quietly=TRUE)) {
install.packages("plyr")
library(plyr)
}
if (!require(ggplot2, quietly=TRUE)) {
install.packages("ggplot2")
library(ggplot2)
}
##########
#AutoSave
##########
.AutoSave <- function(){
save(foodDB, foodLog, macroGoals, currentWeight, dailySummary, file = "MacroCount.RData")
}
shinyServer(function(input, output) {
########################################
#HELPER FUNCTIONS
########################################
prediction_db <- dailySummary
#######################
#Initialization Script
#######################
.initializeModule <- function(){
if (file.exists("MacroCount.RData")){
load("MacroCount.RData", envir = .GlobalEnv)
} else {
##
#foodLog
##
foodLog <<- data.frame(matrix(ncol = 6, nrow = 0), stringsAsFactors = FALSE)
names(foodLog) <<- c("Date", "Proteins", "Carbs", "Fats", "Calories", "Current Weight")
##
#macroGoals
##
macroGoals <<- data.frame(matrix(ncol = 2, nrow = 4), stringsAsFactors = FALSE)
names(macroGoals) <<- c("Macros", "Percentages")
rownames(macroGoals)[1] <- "Protein (g)"
rownames(macroGoals)[2] <- "Carbs (g)"
rownames(macroGoals)[3] <- "Fats (g)"
rownames(macroGoals)[4] <- "Calories"
macroGoals <<- macroGoals
##
#foodDB
##
foodDB <<- data.frame(matrix(ncol = 6, nrow = 0), stringsAsFactors = FALSE)
names(foodDB) <<- c("Name", "Quantity", "Proteins", "Carbs", "Fats", "Calories")
##
#dailySummary
##
dailySummary <<- data.frame(matrix(ncol = 2, nrow = 4), stringsAsFactors = FALSE)
names(dailySummary) <- c("Current", "Goal")
rownames(dailySummary)[1] <- "Protein (g)"
rownames(dailySummary)[2] <- "Carbs (g)"
rownames(dailySummary)[3] <- "Fats (g)"
rownames(dailySummary)[4] <- "Calories"
dailySummary <<- dailySummary
##
#Weight
##
currentWeight <<- NA
}
}
.initializeModule()
#######################
#Summarize Every Change
#######################
.Summarize <- function(){
todays_indices <- which(foodLog$Date==as.character(Sys.Date()))
foodLog <- foodLog[todays_indices,]
dailySummary[1,1] <<- sum(as.numeric(foodLog$Proteins))
dailySummary[2,1] <<- sum(as.numeric(foodLog$Carbs))
dailySummary[3,1] <<- sum(as.numeric(foodLog$Fats))
dailySummary[4,1] <<- sum(as.numeric(foodLog$Calories))
dailySummary[1,2] <<- macroGoals[1,1]
dailySummary[2,2] <<- macroGoals[2,1]
dailySummary[3,2] <<- macroGoals[3,1]
dailySummary[4,2] <<- macroGoals[4,1]
}
.Summarize()
#The 'Add Food' button
addData <- observeEvent(input$add_food_button, {
#ACTION 1 - Update the Food Log
todays_date <- as.character(Sys.Date())
package_portion <- input$package_portion
package_protein <- input$protein
package_carbs <- input$carbs
package_fats <- input$fats
package_calories <- round(1.2*(package_protein*4 + package_carbs*4 + package_fats*9), digits =2)
portion_size <- input$actual_portion
portion_multiplier <- portion_size / package_portion
portion_protein <- round(portion_multiplier*package_protein, digits = 2)
portion_carbs <- round(portion_multiplier*package_carbs,digits = 2)
portion_fats <- round(portion_multiplier*package_fats,digits = 2)
total_cals <- round(1.1*(4*portion_protein + 4*portion_carbs + 9*portion_fats),digits =2)
entry_summary <- c(todays_date, portion_protein, portion_carbs, portion_fats, total_cals,currentWeight)
foodLog[nrow(foodLog)+1,] <<- entry_summary
.AutoSave()
#Action 2 - Update the Daily Summary
.Summarize()
output$dailysummary <- renderTable(dailySummary)
output$dailysummary_2 <- renderTable(dailySummary)
})
#The 'Add to Database' button
addData <- observeEvent(input$add_to_database_button, {
package_name <- input$name_db
package_portion <- input$package_portion_db
package_protein <- input$protein_db
package_carbs <- input$carbs_db
package_fats <- input$fats_db
package_calories <- round(1.2*(package_protein*4 + package_carbs*4 + package_fats*9), digits =2)
entry_summary <- c(package_name, package_portion, package_protein, package_carbs, package_fats,package_calories)
foodDB[nrow(foodDB)+1,] <<- entry_summary
.AutoSave()
output$foodDB<- renderTable(foodDB)
output$foodDB_2<- renderTable(foodDB)
})
#The "Predict" button
addData <- observeEvent(input$predict_button, {
portion_size <- input$portion_size
food_id <- input$row_id
db_portion <-as.numeric(foodDB[food_id,2])
pm <- portion_size / db_portion
db_protein <-round(as.numeric(foodDB[food_id,3]) * pm, digits = 2)
db_carbs <-round(as.numeric(foodDB[food_id,4]) * pm, digits = 2)
db_fats <-round(as.numeric(foodDB[food_id,5]) * pm,digits = 2)
db_cals <- round(1.1*(db_protein*4 + db_carbs*4 +db_fats*9) ,digits =2)
prediction_db[1,1]<- as.numeric(dailySummary[1,1]+db_protein)
prediction_db[2,1]<- as.numeric(dailySummary[2,1]+db_carbs)
prediction_db[3,1]<- as.numeric(dailySummary[3,1]+db_fats)
prediction_db[4,1]<- as.numeric(dailySummary[4,1]+db_cals)
output$prediction <- renderTable(prediction_db)
})
observeEvent(input$submit_button, {
portion_size <- input$portion_size
food_id <- input$row_id
db_portion <-as.numeric(foodDB[food_id,2])
pm <- portion_size / db_portion
db_protein <-round(as.numeric(foodDB[food_id,3]) * pm, digits = 2)
db_carbs <-round(as.numeric(foodDB[food_id,4]) * pm, digits = 2)
db_fats <-round(as.numeric(foodDB[food_id,5]) * pm,digits = 2)
db_cals <- round(1.1*(db_protein*4 + db_carbs*4 +db_fats*9) ,digits =2)
db_cals <- round(1.1*(db_protein*4 + db_carbs*4 +db_fats*9) ,digits =2)
todays_date <- as.character(Sys.Date())
entry_summary <- c(todays_date,db_protein,db_carbs,db_fats,db_cals,currentWeight)
foodLog[nrow(foodLog)+1,] <<- entry_summary
.AutoSave()
output$foodLog <- renderTable(foodLog, include.rownames=F)
})
observeEvent(input$reset_button_2, {
.Summarize()
prediction_db[1,1] <<- dailySummary[1,1]
prediction_db[2,1] <<- dailySummary[2,1]
prediction_db[3,1] <<- dailySummary[3,1]
prediction_db[4,1] <<- dailySummary[4,1]
output$prediction <- renderTable(prediction_db)
})
observeEvent(input$reset_button, {
.Summarize()
output$dailysummary <- renderTable(dailySummary)
})
observeEvent(input$add_to_macros_button, {
protein_goal <- input$protein_goal
carb_goal <- input$carb_goal
fat_goal <- input$fat_goal
currentWeight <<- input$current_weight
macro_sum <- protein_goal + carb_goal + fat_goal
calories_goal <- round((protein_goal*4+carb_goal*4+fat_goal*9), digits = 2)
protein_percentage <- round((protein_goal / macro_sum)*100, digits =0)
carb_percentage <- round((carb_goal / macro_sum)*100, digits =0)
fat_percentage <- round((fat_goal / macro_sum)*100, digits =0)
macroGoals$Macros <<- c(protein_goal, carb_goal, fat_goal,calories_goal)
macroGoals$Percentages <<-c(protein_percentage,carb_percentage,fat_percentage, NA)
.AutoSave()
output$macrogoals <- renderTable(macroGoals)
})
output$dailysummary <- renderTable(dailySummary)
output$foodLog <- renderTable(foodLog, include.rownames=F)
output$foodDB<- renderTable(foodDB)
output$foodDB_2<- renderTable(foodDB)
output$prediction <- renderTable(prediction_db)
output$macrogoals <- renderTable(macroGoals)
#########################
#PLOTS
#################
output$macro_graph <- renderPlot({
#Get Current Month, Year
date <- Sys.Date()
date_formatted <- format(date, "%b, %y")
#Subset the Log
log_subset <- apply(foodLog[1],2, function(x) format(as.Date(x), "%b, %y"))
filtered_indixes <- which(log_subset %in% date_formatted)
monthly_log <- (foodLog[filtered_indixes,])
#Get the Weights early on
start_weight <- monthly_log[1,6]
end_weight <- monthly_log[nrow(monthly_log),6]
#Group by Days
monthly_log[1] <-apply(monthly_log[1],2, function(x) (as.numeric(as.Date(x))))
monthly_log[2] <- apply(monthly_log[2],2, function(x) (as.numeric((x))))
monthly_log[3] <- apply(monthly_log[3],2, function(x) (as.numeric((x))))
monthly_log[4] <- apply(monthly_log[4],2, function(x) (as.numeric((x))))
monthly_log[5] <- apply(monthly_log[5],2, function(x) (as.numeric((x))))
monthly_log[6] <- apply(monthly_log[6],2, function(x) (as.numeric((x))))
monthly_log <-ddply(monthly_log,"Date",numcolwise(sum))
#Get the Goals
goal_protein <- macroGoals[1,1]
goal_carbs <- macroGoals[2,1]
goal_fats <- macroGoals[3,1]
goal_cals <- macroGoals[4,1]
#Get Current averages
avg_protein <- round(mean(as.numeric(monthly_log[,2])),digits =2)
avg_carbs <- round(mean(as.numeric(monthly_log[,3])),digits =2)
avg_fats <-round(mean(as.numeric(monthly_log[,4])),digits =2)
avg_cals <-round(mean(as.numeric(monthly_log[,5])),digits =2)
gram_df <- data.frame(matrix(nrow = 3))
gram_df$macros <- c("protein", "carbs","fats")
gram_df[1]<-NULL
gram_df$grams <- c(avg_protein, avg_carbs,avg_fats)
gram_df$hline <- c(goal_protein,goal_carbs,goal_fats)
bp <- ggplot(gram_df, aes(x=macros, y=grams)) +
geom_bar(position=position_dodge(), stat="identity")
bp <- bp + geom_errorbar(width=0.7, aes(y=hline, ymax=hline, ymin=hline), colour="#AA0000")
bp <- bp+ geom_text(aes(x=macros, y=grams, label=grams,vjust=-0.5))
print(bp)
})
output$calorie_graph <- renderPlot({
#Get Current Month, Year
date <- Sys.Date()
date_formatted <- format(date, "%b, %y")
#Subset the Log
log_subset <- apply(foodLog[1],2, function(x) format(as.Date(x), "%b, %y"))
filtered_indixes <- which(log_subset %in% date_formatted)
monthly_log <- (foodLog[filtered_indixes,])
#Get the Weights early on
start_weight <- monthly_log[1,6]
end_weight <- monthly_log[nrow(monthly_log),6]
#Group by Days
monthly_log[1] <-apply(monthly_log[1],2, function(x) (as.numeric(as.Date(x))))
monthly_log[2] <- apply(monthly_log[2],2, function(x) (as.numeric((x))))
monthly_log[3] <- apply(monthly_log[3],2, function(x) (as.numeric((x))))
monthly_log[4] <- apply(monthly_log[4],2, function(x) (as.numeric((x))))
monthly_log[5] <- apply(monthly_log[5],2, function(x) (as.numeric((x))))
monthly_log[6] <- apply(monthly_log[6],2, function(x) (as.numeric((x))))
monthly_log <-ddply(monthly_log,"Date",numcolwise(sum))
#Get the Goals
goal_protein <- macroGoals[1,1]
goal_carbs <- macroGoals[2,1]
goal_fats <- macroGoals[3,1]
goal_cals <- macroGoals[4,1]
#Get Current averages
avg_protein <- round(mean(as.numeric(monthly_log[,2])),digits =2)
avg_carbs <- round(mean(as.numeric(monthly_log[,3])),digits =2)
avg_fats <-round(mean(as.numeric(monthly_log[,4])),digits =2)
avg_cals <-round(mean(as.numeric(monthly_log[,5])),digits =2)
cal_df <-data.frame(matrix(nrow = 1))
cal_df$calories <- c("average")
cal_df[1]<-NULL
cal_df$kcal <- c(avg_cals)
cal_df$hline <- c(goal_cals)
bp2 <- ggplot(cal_df, aes(x=calories, y=kcal)) +
geom_bar(position=position_dodge(), stat="identity", width =.5)
bp2 <- bp2 + geom_errorbar(width=0.3, aes(y=hline, ymax=hline, ymin=hline), colour="#AA0000")
bp2 <- bp2 + geom_text(aes(x=calories, y=kcal, label=kcal,vjust=-0.5))
print(bp2)
})
})
|
/macroCount/server.R
|
no_license
|
biodim/macroCount_shiny
|
R
| false | false | 12,071 |
r
|
##Packages
if (!require(plyr, quietly=TRUE)) {
install.packages("plyr")
library(plyr)
}
if (!require(ggplot2, quietly=TRUE)) {
install.packages("ggplot2")
library(ggplot2)
}
##########
#AutoSave
##########
.AutoSave <- function(){
save(foodDB, foodLog, macroGoals, currentWeight, dailySummary, file = "MacroCount.RData")
}
shinyServer(function(input, output) {
########################################
#HELPER FUNCTIONS
########################################
prediction_db <- dailySummary
#######################
#Initialization Script
#######################
.initializeModule <- function(){
if (file.exists("MacroCount.RData")){
load("MacroCount.RData", envir = .GlobalEnv)
} else {
##
#foodLog
##
foodLog <<- data.frame(matrix(ncol = 6, nrow = 0), stringsAsFactors = FALSE)
names(foodLog) <<- c("Date", "Proteins", "Carbs", "Fats", "Calories", "Current Weight")
##
#macroGoals
##
macroGoals <<- data.frame(matrix(ncol = 2, nrow = 4), stringsAsFactors = FALSE)
names(macroGoals) <<- c("Macros", "Percentages")
rownames(macroGoals)[1] <- "Protein (g)"
rownames(macroGoals)[2] <- "Carbs (g)"
rownames(macroGoals)[3] <- "Fats (g)"
rownames(macroGoals)[4] <- "Calories"
macroGoals <<- macroGoals
##
#foodDB
##
foodDB <<- data.frame(matrix(ncol = 6, nrow = 0), stringsAsFactors = FALSE)
names(foodDB) <<- c("Name", "Quantity", "Proteins", "Carbs", "Fats", "Calories")
##
#dailySummary
##
dailySummary <<- data.frame(matrix(ncol = 2, nrow = 4), stringsAsFactors = FALSE)
names(dailySummary) <- c("Current", "Goal")
rownames(dailySummary)[1] <- "Protein (g)"
rownames(dailySummary)[2] <- "Carbs (g)"
rownames(dailySummary)[3] <- "Fats (g)"
rownames(dailySummary)[4] <- "Calories"
dailySummary <<- dailySummary
##
#Weight
##
currentWeight <<- NA
}
}
.initializeModule()
#######################
#Summarize Every Change
#######################
.Summarize <- function(){
todays_indices <- which(foodLog$Date==as.character(Sys.Date()))
foodLog <- foodLog[todays_indices,]
dailySummary[1,1] <<- sum(as.numeric(foodLog$Proteins))
dailySummary[2,1] <<- sum(as.numeric(foodLog$Carbs))
dailySummary[3,1] <<- sum(as.numeric(foodLog$Fats))
dailySummary[4,1] <<- sum(as.numeric(foodLog$Calories))
dailySummary[1,2] <<- macroGoals[1,1]
dailySummary[2,2] <<- macroGoals[2,1]
dailySummary[3,2] <<- macroGoals[3,1]
dailySummary[4,2] <<- macroGoals[4,1]
}
.Summarize()
#The 'Add Food' button
addData <- observeEvent(input$add_food_button, {
#ACTION 1 - Update the Food Log
todays_date <- as.character(Sys.Date())
package_portion <- input$package_portion
package_protein <- input$protein
package_carbs <- input$carbs
package_fats <- input$fats
package_calories <- round(1.2*(package_protein*4 + package_carbs*4 + package_fats*9), digits =2)
portion_size <- input$actual_portion
portion_multiplier <- portion_size / package_portion
portion_protein <- round(portion_multiplier*package_protein, digits = 2)
portion_carbs <- round(portion_multiplier*package_carbs,digits = 2)
portion_fats <- round(portion_multiplier*package_fats,digits = 2)
total_cals <- round(1.1*(4*portion_protein + 4*portion_carbs + 9*portion_fats),digits =2)
entry_summary <- c(todays_date, portion_protein, portion_carbs, portion_fats, total_cals,currentWeight)
foodLog[nrow(foodLog)+1,] <<- entry_summary
.AutoSave()
#Action 2 - Update the Daily Summary
.Summarize()
output$dailysummary <- renderTable(dailySummary)
output$dailysummary_2 <- renderTable(dailySummary)
})
#The 'Add to Database' button
addData <- observeEvent(input$add_to_database_button, {
package_name <- input$name_db
package_portion <- input$package_portion_db
package_protein <- input$protein_db
package_carbs <- input$carbs_db
package_fats <- input$fats_db
package_calories <- round(1.2*(package_protein*4 + package_carbs*4 + package_fats*9), digits =2)
entry_summary <- c(package_name, package_portion, package_protein, package_carbs, package_fats,package_calories)
foodDB[nrow(foodDB)+1,] <<- entry_summary
.AutoSave()
output$foodDB<- renderTable(foodDB)
output$foodDB_2<- renderTable(foodDB)
})
#The "Predict" button
addData <- observeEvent(input$predict_button, {
portion_size <- input$portion_size
food_id <- input$row_id
db_portion <-as.numeric(foodDB[food_id,2])
pm <- portion_size / db_portion
db_protein <-round(as.numeric(foodDB[food_id,3]) * pm, digits = 2)
db_carbs <-round(as.numeric(foodDB[food_id,4]) * pm, digits = 2)
db_fats <-round(as.numeric(foodDB[food_id,5]) * pm,digits = 2)
db_cals <- round(1.1*(db_protein*4 + db_carbs*4 +db_fats*9) ,digits =2)
prediction_db[1,1]<- as.numeric(dailySummary[1,1]+db_protein)
prediction_db[2,1]<- as.numeric(dailySummary[2,1]+db_carbs)
prediction_db[3,1]<- as.numeric(dailySummary[3,1]+db_fats)
prediction_db[4,1]<- as.numeric(dailySummary[4,1]+db_cals)
output$prediction <- renderTable(prediction_db)
})
observeEvent(input$submit_button, {
portion_size <- input$portion_size
food_id <- input$row_id
db_portion <-as.numeric(foodDB[food_id,2])
pm <- portion_size / db_portion
db_protein <-round(as.numeric(foodDB[food_id,3]) * pm, digits = 2)
db_carbs <-round(as.numeric(foodDB[food_id,4]) * pm, digits = 2)
db_fats <-round(as.numeric(foodDB[food_id,5]) * pm,digits = 2)
db_cals <- round(1.1*(db_protein*4 + db_carbs*4 +db_fats*9) ,digits =2)
db_cals <- round(1.1*(db_protein*4 + db_carbs*4 +db_fats*9) ,digits =2)
todays_date <- as.character(Sys.Date())
entry_summary <- c(todays_date,db_protein,db_carbs,db_fats,db_cals,currentWeight)
foodLog[nrow(foodLog)+1,] <<- entry_summary
.AutoSave()
output$foodLog <- renderTable(foodLog, include.rownames=F)
})
observeEvent(input$reset_button_2, {
.Summarize()
prediction_db[1,1] <<- dailySummary[1,1]
prediction_db[2,1] <<- dailySummary[2,1]
prediction_db[3,1] <<- dailySummary[3,1]
prediction_db[4,1] <<- dailySummary[4,1]
output$prediction <- renderTable(prediction_db)
})
observeEvent(input$reset_button, {
.Summarize()
output$dailysummary <- renderTable(dailySummary)
})
observeEvent(input$add_to_macros_button, {
protein_goal <- input$protein_goal
carb_goal <- input$carb_goal
fat_goal <- input$fat_goal
currentWeight <<- input$current_weight
macro_sum <- protein_goal + carb_goal + fat_goal
calories_goal <- round((protein_goal*4+carb_goal*4+fat_goal*9), digits = 2)
protein_percentage <- round((protein_goal / macro_sum)*100, digits =0)
carb_percentage <- round((carb_goal / macro_sum)*100, digits =0)
fat_percentage <- round((fat_goal / macro_sum)*100, digits =0)
macroGoals$Macros <<- c(protein_goal, carb_goal, fat_goal,calories_goal)
macroGoals$Percentages <<-c(protein_percentage,carb_percentage,fat_percentage, NA)
.AutoSave()
output$macrogoals <- renderTable(macroGoals)
})
output$dailysummary <- renderTable(dailySummary)
output$foodLog <- renderTable(foodLog, include.rownames=F)
output$foodDB<- renderTable(foodDB)
output$foodDB_2<- renderTable(foodDB)
output$prediction <- renderTable(prediction_db)
output$macrogoals <- renderTable(macroGoals)
#########################
#PLOTS
#################
output$macro_graph <- renderPlot({
#Get Current Month, Year
date <- Sys.Date()
date_formatted <- format(date, "%b, %y")
#Subset the Log
log_subset <- apply(foodLog[1],2, function(x) format(as.Date(x), "%b, %y"))
filtered_indixes <- which(log_subset %in% date_formatted)
monthly_log <- (foodLog[filtered_indixes,])
#Get the Weights early on
start_weight <- monthly_log[1,6]
end_weight <- monthly_log[nrow(monthly_log),6]
#Group by Days
monthly_log[1] <-apply(monthly_log[1],2, function(x) (as.numeric(as.Date(x))))
monthly_log[2] <- apply(monthly_log[2],2, function(x) (as.numeric((x))))
monthly_log[3] <- apply(monthly_log[3],2, function(x) (as.numeric((x))))
monthly_log[4] <- apply(monthly_log[4],2, function(x) (as.numeric((x))))
monthly_log[5] <- apply(monthly_log[5],2, function(x) (as.numeric((x))))
monthly_log[6] <- apply(monthly_log[6],2, function(x) (as.numeric((x))))
monthly_log <-ddply(monthly_log,"Date",numcolwise(sum))
#Get the Goals
goal_protein <- macroGoals[1,1]
goal_carbs <- macroGoals[2,1]
goal_fats <- macroGoals[3,1]
goal_cals <- macroGoals[4,1]
#Get Current averages
avg_protein <- round(mean(as.numeric(monthly_log[,2])),digits =2)
avg_carbs <- round(mean(as.numeric(monthly_log[,3])),digits =2)
avg_fats <-round(mean(as.numeric(monthly_log[,4])),digits =2)
avg_cals <-round(mean(as.numeric(monthly_log[,5])),digits =2)
gram_df <- data.frame(matrix(nrow = 3))
gram_df$macros <- c("protein", "carbs","fats")
gram_df[1]<-NULL
gram_df$grams <- c(avg_protein, avg_carbs,avg_fats)
gram_df$hline <- c(goal_protein,goal_carbs,goal_fats)
bp <- ggplot(gram_df, aes(x=macros, y=grams)) +
geom_bar(position=position_dodge(), stat="identity")
bp <- bp + geom_errorbar(width=0.7, aes(y=hline, ymax=hline, ymin=hline), colour="#AA0000")
bp <- bp+ geom_text(aes(x=macros, y=grams, label=grams,vjust=-0.5))
print(bp)
})
output$calorie_graph <- renderPlot({
#Get Current Month, Year
date <- Sys.Date()
date_formatted <- format(date, "%b, %y")
#Subset the Log
log_subset <- apply(foodLog[1],2, function(x) format(as.Date(x), "%b, %y"))
filtered_indixes <- which(log_subset %in% date_formatted)
monthly_log <- (foodLog[filtered_indixes,])
#Get the Weights early on
start_weight <- monthly_log[1,6]
end_weight <- monthly_log[nrow(monthly_log),6]
#Group by Days
monthly_log[1] <-apply(monthly_log[1],2, function(x) (as.numeric(as.Date(x))))
monthly_log[2] <- apply(monthly_log[2],2, function(x) (as.numeric((x))))
monthly_log[3] <- apply(monthly_log[3],2, function(x) (as.numeric((x))))
monthly_log[4] <- apply(monthly_log[4],2, function(x) (as.numeric((x))))
monthly_log[5] <- apply(monthly_log[5],2, function(x) (as.numeric((x))))
monthly_log[6] <- apply(monthly_log[6],2, function(x) (as.numeric((x))))
monthly_log <-ddply(monthly_log,"Date",numcolwise(sum))
#Get the Goals
goal_protein <- macroGoals[1,1]
goal_carbs <- macroGoals[2,1]
goal_fats <- macroGoals[3,1]
goal_cals <- macroGoals[4,1]
#Get Current averages
avg_protein <- round(mean(as.numeric(monthly_log[,2])),digits =2)
avg_carbs <- round(mean(as.numeric(monthly_log[,3])),digits =2)
avg_fats <-round(mean(as.numeric(monthly_log[,4])),digits =2)
avg_cals <-round(mean(as.numeric(monthly_log[,5])),digits =2)
cal_df <-data.frame(matrix(nrow = 1))
cal_df$calories <- c("average")
cal_df[1]<-NULL
cal_df$kcal <- c(avg_cals)
cal_df$hline <- c(goal_cals)
bp2 <- ggplot(cal_df, aes(x=calories, y=kcal)) +
geom_bar(position=position_dodge(), stat="identity", width =.5)
bp2 <- bp2 + geom_errorbar(width=0.3, aes(y=hline, ymax=hline, ymin=hline), colour="#AA0000")
bp2 <- bp2 + geom_text(aes(x=calories, y=kcal, label=kcal,vjust=-0.5))
print(bp2)
})
})
|
/rzão_do_mozão.R
|
no_license
|
Adams123/fire-model
|
R
| false | false | 1,189 |
r
| ||
#modeloVAR:https://youtu.be/AhSENS5Ka1U
#el modelo VAR parte de un efoque ateorico
#separa los efectos pasados que explican al vector de las variables
#endogenas a traves de su pasado o mediante variables autoregresivas
#el mas sencillo es el bivariado, pero tiene hasta 12 ordenes
MEXUSA <- read_csv("MEXUSA.csv")
library(tidyverse)
library(lubridate)
library(car)
library(urca)
library(tseries)
library(astsa)
library(forecast)
library(foreign)
library(timsac)
library(vars)
library(lmtest)
library(mFilter)
library(dynlm)
library(nlme)
library(quantmod)
library(xts)
attach(MEXUSA)
names(MEXUSA)
#convertir a series de tiempo
mex<-ts(MEXUSA[,1],start = c(1994,1),frequency = 4)
usa<-ts(MEXUSA[,2],start = c(1994,1),frequency = 4)
mex
usa
ndiffs(mex)
ndiffs(usa)#dice que debemos diferenciar una vez
ts.plot(mex,usa,col=c("blue","red"))
#diferenciamos una vez las dos series
mex1<-diff(mex)
usa1<-diff(usa)
ts.plot(mex1,usa1,col=c("blue","red"))
#generamos pruebas de granger de causalidad
#Ho:el PIB de USA no causa en el sentido de granger al de MEX>0.05
#Ha:el PIB de USA si causa en el sentido de granger al de MEX>0.05
grangertest(mex1~usa1,order=2)
#se acepta la Ha con dos retrasos
#Ho:el MEX de USA no causa en el sentido de granger al de USA>0.05
#Ha:el MEX de USA si causa en el sentido de granger al de USA>0.05
grangertest(usa1~mex1,order=1)
#se acepta la Ha en todos los retrasos del 1 al 12
evar<-cbind(mex1,usa1)
print(evar)
#vamos a ver cual es orden de nuestro modelo VAR
VARselect(evar,lag.max = 12)
var1<-VAR(evar,p=4)
var1#arroja los coeficientes tanto de la primera ecuacion como de la segunda
#pruebas de especificacion
summary(var1)#si todos los numero que arroja el modelo son
#menores que 1, cumplimos con las condiciones de estabilidad
#que el numero correcto de rezagos si es 4
#podemos graficarlo
plot(var1)
# ejemplo con TC y BMV
TCBMV <- read_csv("~/Library/Mobile Documents/com~apple~CloudDocs/R/TCBMV.csv")
attach(TCBMV)
names(TCBMV)
TCBMV
#vamos a convertir este data frame a una serie de tiempo
#empezamos por el tipo de cambio que lo llamamos tms2
tms2<-ts(TCBMV[,1],start=c(1993,1),freq=12)
#luego seguimos con la BMV que llamamos tp
tp<-ts(TCBMV[,2],start=c(1993,1),freq=12)
#vamos a generar logaritmos
ltm2<-log(tms2)
ltp<-log(tp)
#calculamos cuantas veces debe ser diferenciada una variable para lograr la estacionariedad
ndiffs(ltm2)#el resultado arroja que es 1. una vez debemos diferenciar
ndiffs(ltp)
#hay que trabajar con el mismo numero de diffs, sino se desbalancea la base de datos
#graficamos
ts.plot(ltp,ltm2,col=c("blue","red"))
#diferenciamos una vez tanto el TC como la BMV
dltp<-diff(ltp)
dltm2<-diff(ltm2)
#ahora graficamos
ts.plot(dltp,dltm2,col=c("blue","red"))
#generamos pruebas de causalidad de granger para ver que afecta a que
#esta es la primera parte de los modelos VAR, la causalidad
grangertest(dltp~dltm2,order=1)
#Ho:el tipo de cambio (dltm2)no causa en el sentido de Granger el IPCBMV(dltp)
#Ha:el tipo de cambio (dltm2)si causa en el sentido de Granger el IPCBMV(dltp)
#el resultado me arroja una p-value de 001364, con lo cual rechazo la Ho
#probamos al reves
grangertest(dltm2~dltp,order=1)
#Ho:la BMV(dltp)no causa en el sentido de Granger el TC(dltm2)
#Ho:la BMV(dltp)si causa en el sentido de Granger el TC(dltm2)
#El resultado arroja una p-value de 6.595e-05 *** muy pequeña por lo que
#rechazamos la hipotesis nula
#debemos hacer la prueba hasta doce atrasos
#creo un objeto con las primeras diferencias
vardltm2<-ts(dltm2,start = 1993,freq=12)
vardltp<-ts(dltp,start = 1993,freq=12)
ejvar<-cbind(vardltm2,vardltp)
#con ejvar uno la primera diferencia de ltm2 y ltp
ejvar
#Proceso VAR para saber el orden de nuestro modelo
VARselect(ejvar,lag.max = 12)
#el resultado nos dice que el orden es de 1. Así que lo tomamos
var1<-VAR(ejvar,p=1)
var1#esto nos da los coeficientes para la primera y segunda ecuacion mas la constante
#hacemos las pruebas de especificacion del modelo VAR
summary(var1)
#nos fijamos en las raíces del polinomio. Si son todos menores que uno
#estamos cumpliendo con la condicion de estabilidad.
#En este ejercicio, se cumplen 0.2184 y 0.2061
#ahora graficamos para observar el ACF y el PACF
plot(var1)
#usamos el comando de prueba de autocorrelacion serial
seriala<-serial.test(var1,lags.pt = 1, type = "PT.asymptotic")
#le pedimos que la imprima. Revisamos el p-value
seriala$serial
#Ho:los residuales no estan correlacionados >0.05 no rechazar la Ho
#Ha:los residuales si estan correlacionados <0.05 si rechazar la Ho
#la p-value es 2.2e-16 menor a 0.05 por tanto rechazamos la Ho
#se aprobo la prueba de autocorrelacion serial
#ahora procedemos a aplicar las prueba norma de los residuales
normalidad<-normality.test(var1)
normalidad$jb.mul
#nos fijamos en la curtosis y en el sesgo
#Ho:los residuales se distribuyen normalmente >0.05 No rechazar Ho
#Ha:los residuales no se distribuyen normalmente <0.05 Rechazar Ho
# como el resultado nos arroja un p-value muy pequeño, concluimos
# que no hay una distribucion normal en los errores
#llevamos 3 pruebas: estabilidad, autocorrelacion serial y normalida de resid
#ahora sigue la prueba de homocedasticidad de la var. de los resid
#revisamos si la varianza de los residuales es constante o no
arch1<-arch.test(var1, lags.multi = 1)
arch1$arch.mul
#Ho:la varianza de los residuales es constante
#Ha:la varianza de los residuales no es constante
#el resultado arroja una p-value de 1.423e-09, con lo cual
#se conclucluye que la varianza de los residuales no es constante
#ahora hacemos la siguiente prueba:
#Impulso respuesta de la BMV(dltp) ante una innovacion del TC(dltm2)
var1_irflp=irf(var1,response = "vardltp",n.ahead = 8,boot = TRUE)
var1_irflp
#esto nos dice como responde la BMV ante un impulso del TC
#nos pone la banda hacia abajo con un 95% de confianza. Lo graficamos
plot(var1_irflp)
#dos graficos. analisis
#ahora descomponemos la varianza ante un innovacion de la BMV(dltp)
VAR1_DESVAR_bmv<-fevd(var1,n.ahead = 50)$vardltp
VAR1_DESVAR_bmv
#ahora descomponemos la varianza ante un innovacion del TC(dltm2)
VAR1_DESVAR_tc<-fevd(var1,n.ahead = 50)$vardltm2
VAR1_DESVAR_tc
#fuente: https://youtu.be/bVyIrow238o
|
/VAR_1.R
|
no_license
|
xenofonte35/Graphs
|
R
| false | false | 6,179 |
r
|
#modeloVAR:https://youtu.be/AhSENS5Ka1U
#el modelo VAR parte de un efoque ateorico
#separa los efectos pasados que explican al vector de las variables
#endogenas a traves de su pasado o mediante variables autoregresivas
#el mas sencillo es el bivariado, pero tiene hasta 12 ordenes
MEXUSA <- read_csv("MEXUSA.csv")
library(tidyverse)
library(lubridate)
library(car)
library(urca)
library(tseries)
library(astsa)
library(forecast)
library(foreign)
library(timsac)
library(vars)
library(lmtest)
library(mFilter)
library(dynlm)
library(nlme)
library(quantmod)
library(xts)
attach(MEXUSA)
names(MEXUSA)
#convertir a series de tiempo
mex<-ts(MEXUSA[,1],start = c(1994,1),frequency = 4)
usa<-ts(MEXUSA[,2],start = c(1994,1),frequency = 4)
mex
usa
ndiffs(mex)
ndiffs(usa)#dice que debemos diferenciar una vez
ts.plot(mex,usa,col=c("blue","red"))
#diferenciamos una vez las dos series
mex1<-diff(mex)
usa1<-diff(usa)
ts.plot(mex1,usa1,col=c("blue","red"))
#generamos pruebas de granger de causalidad
#Ho:el PIB de USA no causa en el sentido de granger al de MEX>0.05
#Ha:el PIB de USA si causa en el sentido de granger al de MEX>0.05
grangertest(mex1~usa1,order=2)
#se acepta la Ha con dos retrasos
#Ho:el MEX de USA no causa en el sentido de granger al de USA>0.05
#Ha:el MEX de USA si causa en el sentido de granger al de USA>0.05
grangertest(usa1~mex1,order=1)
#se acepta la Ha en todos los retrasos del 1 al 12
evar<-cbind(mex1,usa1)
print(evar)
#vamos a ver cual es orden de nuestro modelo VAR
VARselect(evar,lag.max = 12)
var1<-VAR(evar,p=4)
var1#arroja los coeficientes tanto de la primera ecuacion como de la segunda
#pruebas de especificacion
summary(var1)#si todos los numero que arroja el modelo son
#menores que 1, cumplimos con las condiciones de estabilidad
#que el numero correcto de rezagos si es 4
#podemos graficarlo
plot(var1)
# ejemplo con TC y BMV
TCBMV <- read_csv("~/Library/Mobile Documents/com~apple~CloudDocs/R/TCBMV.csv")
attach(TCBMV)
names(TCBMV)
TCBMV
#vamos a convertir este data frame a una serie de tiempo
#empezamos por el tipo de cambio que lo llamamos tms2
tms2<-ts(TCBMV[,1],start=c(1993,1),freq=12)
#luego seguimos con la BMV que llamamos tp
tp<-ts(TCBMV[,2],start=c(1993,1),freq=12)
#vamos a generar logaritmos
ltm2<-log(tms2)
ltp<-log(tp)
#calculamos cuantas veces debe ser diferenciada una variable para lograr la estacionariedad
ndiffs(ltm2)#el resultado arroja que es 1. una vez debemos diferenciar
ndiffs(ltp)
#hay que trabajar con el mismo numero de diffs, sino se desbalancea la base de datos
#graficamos
ts.plot(ltp,ltm2,col=c("blue","red"))
#diferenciamos una vez tanto el TC como la BMV
dltp<-diff(ltp)
dltm2<-diff(ltm2)
#ahora graficamos
ts.plot(dltp,dltm2,col=c("blue","red"))
#generamos pruebas de causalidad de granger para ver que afecta a que
#esta es la primera parte de los modelos VAR, la causalidad
grangertest(dltp~dltm2,order=1)
#Ho:el tipo de cambio (dltm2)no causa en el sentido de Granger el IPCBMV(dltp)
#Ha:el tipo de cambio (dltm2)si causa en el sentido de Granger el IPCBMV(dltp)
#el resultado me arroja una p-value de 001364, con lo cual rechazo la Ho
#probamos al reves
grangertest(dltm2~dltp,order=1)
#Ho:la BMV(dltp)no causa en el sentido de Granger el TC(dltm2)
#Ho:la BMV(dltp)si causa en el sentido de Granger el TC(dltm2)
#El resultado arroja una p-value de 6.595e-05 *** muy pequeña por lo que
#rechazamos la hipotesis nula
#debemos hacer la prueba hasta doce atrasos
#creo un objeto con las primeras diferencias
vardltm2<-ts(dltm2,start = 1993,freq=12)
vardltp<-ts(dltp,start = 1993,freq=12)
ejvar<-cbind(vardltm2,vardltp)
#con ejvar uno la primera diferencia de ltm2 y ltp
ejvar
#Proceso VAR para saber el orden de nuestro modelo
VARselect(ejvar,lag.max = 12)
#el resultado nos dice que el orden es de 1. Así que lo tomamos
var1<-VAR(ejvar,p=1)
var1#esto nos da los coeficientes para la primera y segunda ecuacion mas la constante
#hacemos las pruebas de especificacion del modelo VAR
summary(var1)
#nos fijamos en las raíces del polinomio. Si son todos menores que uno
#estamos cumpliendo con la condicion de estabilidad.
#En este ejercicio, se cumplen 0.2184 y 0.2061
#ahora graficamos para observar el ACF y el PACF
plot(var1)
#usamos el comando de prueba de autocorrelacion serial
seriala<-serial.test(var1,lags.pt = 1, type = "PT.asymptotic")
#le pedimos que la imprima. Revisamos el p-value
seriala$serial
#Ho:los residuales no estan correlacionados >0.05 no rechazar la Ho
#Ha:los residuales si estan correlacionados <0.05 si rechazar la Ho
#la p-value es 2.2e-16 menor a 0.05 por tanto rechazamos la Ho
#se aprobo la prueba de autocorrelacion serial
#ahora procedemos a aplicar las prueba norma de los residuales
normalidad<-normality.test(var1)
normalidad$jb.mul
#nos fijamos en la curtosis y en el sesgo
#Ho:los residuales se distribuyen normalmente >0.05 No rechazar Ho
#Ha:los residuales no se distribuyen normalmente <0.05 Rechazar Ho
# como el resultado nos arroja un p-value muy pequeño, concluimos
# que no hay una distribucion normal en los errores
#llevamos 3 pruebas: estabilidad, autocorrelacion serial y normalida de resid
#ahora sigue la prueba de homocedasticidad de la var. de los resid
#revisamos si la varianza de los residuales es constante o no
arch1<-arch.test(var1, lags.multi = 1)
arch1$arch.mul
#Ho:la varianza de los residuales es constante
#Ha:la varianza de los residuales no es constante
#el resultado arroja una p-value de 1.423e-09, con lo cual
#se conclucluye que la varianza de los residuales no es constante
#ahora hacemos la siguiente prueba:
#Impulso respuesta de la BMV(dltp) ante una innovacion del TC(dltm2)
var1_irflp=irf(var1,response = "vardltp",n.ahead = 8,boot = TRUE)
var1_irflp
#esto nos dice como responde la BMV ante un impulso del TC
#nos pone la banda hacia abajo con un 95% de confianza. Lo graficamos
plot(var1_irflp)
#dos graficos. analisis
#ahora descomponemos la varianza ante un innovacion de la BMV(dltp)
VAR1_DESVAR_bmv<-fevd(var1,n.ahead = 50)$vardltp
VAR1_DESVAR_bmv
#ahora descomponemos la varianza ante un innovacion del TC(dltm2)
VAR1_DESVAR_tc<-fevd(var1,n.ahead = 50)$vardltm2
VAR1_DESVAR_tc
#fuente: https://youtu.be/bVyIrow238o
|
### This is a script for cleaning data for the course project
### Of the Coursera's "Getting and Cleaning Data" course
###
### It assumes the source data is downloaded and unzipped as the
### "UCI HAR Dataset" directory next to the script file
###
### Results are saved into "tidy" directory (created if does not exist)
library(reshape2)
# helper function to add path prefix and extension to a raw file name (or path portion)
rawFile <- function(name) {
paste("UCI HAR Dataset/", name, ".txt", sep = "")
}
# load (and glue together) train or test data, the setName parameter specifies
# the subdirectory (either 'train' or 'test')
loadRawDataset <- function(setName) {
# helper to resolve raw file path depending on set
rawSetFile <- function(name) {
# Construct file name like test/X_test for set 'test' and name 'X'
rawFile(paste(setName, "/", name, "_", setName, sep = ""))
}
x <- read.table(rawSetFile("X"), comment.char = "", colClasses = "numeric")
y <- read.table(rawSetFile("y"), col.names = "ActivityID")
s <- read.table(rawSetFile("subject"), col.names = "Subject")
cbind(s, y, x)
}
# This function integrates all the tyding steps into a single workflow
extractTidyData <- function() {
# load the common data (lookup tables)
activities <- read.table(rawFile("activity_labels"),
col.names = c("ActivityID", "Activity"))
features <- read.table(rawFile("features"),
col.names = c("FeatureID", "Feature"),
stringsAsFactors = FALSE)
# Step 1 - load and merge training and test data sets
d <- rbind(loadRawDataset("train"), loadRawDataset("test"))
# Step 2 - extract means and std. dev. on each measurement
matchingFeatures <- grepl("-mean()", features[,2], fixed = TRUE) |
grepl("-std()", features[,2], fixed = TRUE)
featureIndices <- features[matchingFeatures, "FeatureID"]
# sanitize feature names to avoid unsafe characters
features$Feature <- gsub("BodyBody", "Body",
gsub("\\.+$", "",
gsub("[^A-Za-z0-9]+", ".", features$Feature)))
# save feature names for step 4
featureNames = features[matchingFeatures, "Feature"]
# shift by 2 since we glued columns for subject/activity ids
featureIndices <- featureIndices + 2
# subset required features and subject/activity ids
ds <- d[,c(1, 2, featureIndices)]
# Step 3 - use descriptive activity names to name the activities
ds <- merge(activities, ds, by = "ActivityID",
all.x = FALSE, all.y = TRUE)
# filter out ActivityId and rearrange columns
ds <- ds[2:ncol(ds)]
# Step 4 - appropriately labels the data set with descriptive variable names
colnames(ds) <- c("Activity", "Subject", featureNames)
# write out step 4 result to a file
if (!file.exists("tidy")) {
dir.create("tidy")
}
write.table(ds, "tidy/step4.txt", row.name = FALSE)
# Step 5 - create independent data set with averages of each variable for
# each activity and subject
# use melt + cast to reshape ds intp aggregated form
melted <- melt(ds, id = c ("Activity", "Subject"))
ds2 <- dcast(melted, Activity+Subject~variable, mean, na.rm = TRUE)
# write out step 5 result to a file
write.table(ds2, "tidy/step5.txt", row.name = FALSE)
ds2
}
# extractTidyData()
|
/run_analysis.R
|
no_license
|
vokhotnikov/getclean-course-project
|
R
| false | false | 3,521 |
r
|
### This is a script for cleaning data for the course project
### Of the Coursera's "Getting and Cleaning Data" course
###
### It assumes the source data is downloaded and unzipped as the
### "UCI HAR Dataset" directory next to the script file
###
### Results are saved into "tidy" directory (created if does not exist)
library(reshape2)
# helper function to add path prefix and extension to a raw file name (or path portion)
rawFile <- function(name) {
paste("UCI HAR Dataset/", name, ".txt", sep = "")
}
# load (and glue together) train or test data, the setName parameter specifies
# the subdirectory (either 'train' or 'test')
loadRawDataset <- function(setName) {
# helper to resolve raw file path depending on set
rawSetFile <- function(name) {
# Construct file name like test/X_test for set 'test' and name 'X'
rawFile(paste(setName, "/", name, "_", setName, sep = ""))
}
x <- read.table(rawSetFile("X"), comment.char = "", colClasses = "numeric")
y <- read.table(rawSetFile("y"), col.names = "ActivityID")
s <- read.table(rawSetFile("subject"), col.names = "Subject")
cbind(s, y, x)
}
# This function integrates all the tyding steps into a single workflow
extractTidyData <- function() {
# load the common data (lookup tables)
activities <- read.table(rawFile("activity_labels"),
col.names = c("ActivityID", "Activity"))
features <- read.table(rawFile("features"),
col.names = c("FeatureID", "Feature"),
stringsAsFactors = FALSE)
# Step 1 - load and merge training and test data sets
d <- rbind(loadRawDataset("train"), loadRawDataset("test"))
# Step 2 - extract means and std. dev. on each measurement
matchingFeatures <- grepl("-mean()", features[,2], fixed = TRUE) |
grepl("-std()", features[,2], fixed = TRUE)
featureIndices <- features[matchingFeatures, "FeatureID"]
# sanitize feature names to avoid unsafe characters
features$Feature <- gsub("BodyBody", "Body",
gsub("\\.+$", "",
gsub("[^A-Za-z0-9]+", ".", features$Feature)))
# save feature names for step 4
featureNames = features[matchingFeatures, "Feature"]
# shift by 2 since we glued columns for subject/activity ids
featureIndices <- featureIndices + 2
# subset required features and subject/activity ids
ds <- d[,c(1, 2, featureIndices)]
# Step 3 - use descriptive activity names to name the activities
ds <- merge(activities, ds, by = "ActivityID",
all.x = FALSE, all.y = TRUE)
# filter out ActivityId and rearrange columns
ds <- ds[2:ncol(ds)]
# Step 4 - appropriately labels the data set with descriptive variable names
colnames(ds) <- c("Activity", "Subject", featureNames)
# write out step 4 result to a file
if (!file.exists("tidy")) {
dir.create("tidy")
}
write.table(ds, "tidy/step4.txt", row.name = FALSE)
# Step 5 - create independent data set with averages of each variable for
# each activity and subject
# use melt + cast to reshape ds intp aggregated form
melted <- melt(ds, id = c ("Activity", "Subject"))
ds2 <- dcast(melted, Activity+Subject~variable, mean, na.rm = TRUE)
# write out step 5 result to a file
write.table(ds2, "tidy/step5.txt", row.name = FALSE)
ds2
}
# extractTidyData()
|
library(glmnet)
mydata = read.table("./TrainingSet/AvgRank/stomach.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.75,family="gaussian",standardize=FALSE)
sink('./Model/EN/AvgRank/stomach/stomach_078.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/AvgRank/stomach/stomach_078.R
|
no_license
|
leon1003/QSMART
|
R
| false | false | 356 |
r
|
library(glmnet)
mydata = read.table("./TrainingSet/AvgRank/stomach.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.75,family="gaussian",standardize=FALSE)
sink('./Model/EN/AvgRank/stomach/stomach_078.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculateNumberIndividualsperStage.R
\name{calculateNumberIndivperStage}
\alias{calculateNumberIndivperStage}
\title{Create Vector of the Number of Individuals per Stage}
\usage{
calculateNumberIndivperStage(TMdata, stages)
}
\arguments{
\item{TMdata}{Data used to create the transition matrix.}
\item{stages}{vector of stage classes.}
}
\description{
Create Vector of the Number of Individuals per Stage
}
|
/man/calculateNumberIndivperStage.Rd
|
no_license
|
ksauby/GTMNERRproc
|
R
| false | true | 487 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculateNumberIndividualsperStage.R
\name{calculateNumberIndivperStage}
\alias{calculateNumberIndivperStage}
\title{Create Vector of the Number of Individuals per Stage}
\usage{
calculateNumberIndivperStage(TMdata, stages)
}
\arguments{
\item{TMdata}{Data used to create the transition matrix.}
\item{stages}{vector of stage classes.}
}
\description{
Create Vector of the Number of Individuals per Stage
}
|
library(data.table)
library(ggplot2)
library(reshape2)
data=data.frame(read.table('chisquared.csv',header=T,sep='\t'))
mdf<- melt(data, id="Condition") # convert to long format
p=ggplot(data=mdf,
aes(x=Condition, y=value, col=variable,group=variable)) +
geom_point(size=5)+
geom_line()+
theme_bw(20)+
xlab("Medical Condition")+
ylab("Obs.-Exp. Std. Res.")+
theme(axis.text.x = element_text(angle = 90, hjust = 1))
#scale_color_manual(values=c("#FF0000", "#66FF33", "#0000FF","#CC9900","#CC9900","#BDA0CB"))
|
/manuscript/supfig8/plot_chi.R
|
no_license
|
AshleyLab/myheartcounts
|
R
| false | false | 533 |
r
|
library(data.table)
library(ggplot2)
library(reshape2)
data=data.frame(read.table('chisquared.csv',header=T,sep='\t'))
mdf<- melt(data, id="Condition") # convert to long format
p=ggplot(data=mdf,
aes(x=Condition, y=value, col=variable,group=variable)) +
geom_point(size=5)+
geom_line()+
theme_bw(20)+
xlab("Medical Condition")+
ylab("Obs.-Exp. Std. Res.")+
theme(axis.text.x = element_text(angle = 90, hjust = 1))
#scale_color_manual(values=c("#FF0000", "#66FF33", "#0000FF","#CC9900","#CC9900","#BDA0CB"))
|
\name{gSource}
\alias{gSource}
\title{
Dialog for function GmaneSource() from package (tm)
}
\description{
This function provide interface to function \code{\link[tm]{GmaneSource}} from \pkg{tm} package
}
\usage{
gSource()
}
\details{
Create corpus from GmaneSource.\cr
You can also acces this function using \code{Rcmdr} menu.\cr
From \code{Rcmdr} menu choose \emph{TextMining -> Create Corpus -> Create GmaneSource...}
}
\author{
Dzemil Lushija \email{dzemill@gmail.com}
}
\seealso{
See also \code{\link[tm]{GmaneSource}}
}
\keyword{ Text mining }
|
/man/gSource.Rd
|
no_license
|
cran/RcmdrPlugin.TextMining
|
R
| false | false | 586 |
rd
|
\name{gSource}
\alias{gSource}
\title{
Dialog for function GmaneSource() from package (tm)
}
\description{
This function provide interface to function \code{\link[tm]{GmaneSource}} from \pkg{tm} package
}
\usage{
gSource()
}
\details{
Create corpus from GmaneSource.\cr
You can also acces this function using \code{Rcmdr} menu.\cr
From \code{Rcmdr} menu choose \emph{TextMining -> Create Corpus -> Create GmaneSource...}
}
\author{
Dzemil Lushija \email{dzemill@gmail.com}
}
\seealso{
See also \code{\link[tm]{GmaneSource}}
}
\keyword{ Text mining }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SL_functions.R
\name{survSuperLearner.CV.control}
\alias{survSuperLearner.CV.control}
\title{Control parameters for the cross validation steps in survival Super Learner}
\usage{
survSuperLearner.CV.control(
V = 10L,
stratifyCV = TRUE,
shuffle = TRUE,
validRows = NULL
)
}
\arguments{
\item{V}{Number of cross-validation folds. Defaults to 10.}
\item{stratifyCV}{Logical indicating whether to balance number of observed events across folds. Defaults to \code{TRUE}.}
\item{shuffle}{Logical indicating whether to shuffle the indices, or to simply assign sequentially. Defaults to \code{TRUE}. Should almost always be set to \code{TRUE} unless it is explicitly desired to assign sequentially.}
\item{validRows}{Optional custom list of indices for validation folds.}
}
\value{
Returns a list of length \code{V} with validation indices for each of the folds.
}
\description{
This function initiates control parameters for the cross-validation in \code{\link{survSuperLearner}} function.
}
|
/man/survSuperLearner.CV.control.Rd
|
no_license
|
Causal-Inference-class/survSuperLearner
|
R
| false | true | 1,073 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SL_functions.R
\name{survSuperLearner.CV.control}
\alias{survSuperLearner.CV.control}
\title{Control parameters for the cross validation steps in survival Super Learner}
\usage{
survSuperLearner.CV.control(
V = 10L,
stratifyCV = TRUE,
shuffle = TRUE,
validRows = NULL
)
}
\arguments{
\item{V}{Number of cross-validation folds. Defaults to 10.}
\item{stratifyCV}{Logical indicating whether to balance number of observed events across folds. Defaults to \code{TRUE}.}
\item{shuffle}{Logical indicating whether to shuffle the indices, or to simply assign sequentially. Defaults to \code{TRUE}. Should almost always be set to \code{TRUE} unless it is explicitly desired to assign sequentially.}
\item{validRows}{Optional custom list of indices for validation folds.}
}
\value{
Returns a list of length \code{V} with validation indices for each of the folds.
}
\description{
This function initiates control parameters for the cross-validation in \code{\link{survSuperLearner}} function.
}
|
corr <- function(directory, threshold = 0) {
source("complete.R")
completeCases <- complete(directory)
observedCases <- completeCases[completeCases$nobs > threshold,1]
files <- list.files(path = directory, full.names = TRUE)
#correlations <- rep(NA, length(observedCases))
for (i in observedCases) {
readData <- (read.csv(files[i]))
completeCases <- complete.cases(readData)
sulfate <- readData[completeCases, 2]
nitrate <- readData[completeCases, 3]
correlations[i] <- cor(x = sulfate, y = nitrate)
}
correlations <- correlations[complete.cases(correlations)]
}
|
/corr.R
|
no_license
|
0xdug/R-Programming
|
R
| false | false | 599 |
r
|
corr <- function(directory, threshold = 0) {
source("complete.R")
completeCases <- complete(directory)
observedCases <- completeCases[completeCases$nobs > threshold,1]
files <- list.files(path = directory, full.names = TRUE)
#correlations <- rep(NA, length(observedCases))
for (i in observedCases) {
readData <- (read.csv(files[i]))
completeCases <- complete.cases(readData)
sulfate <- readData[completeCases, 2]
nitrate <- readData[completeCases, 3]
correlations[i] <- cor(x = sulfate, y = nitrate)
}
correlations <- correlations[complete.cases(correlations)]
}
|
conn <- DBI::dbConnect(RPostgres::Postgres(),host="localhost",dbname="stf_jose",user=Sys.getenv("DBUSER"),password=Sys.getenv("DBPASSWORD"))
library(tidyverse)
library(DBI)
library(dbx)
dd %>%
slice(1:2)
dbGetQuery(conn,"select * from detalhes limit 2")
dbGetQuery(conn,"
select * from detalhes
order by incidente
limit 2
")
dbExecute(conn,"
alter table detalhes add column detalhes_id serial
")
dbGetQuery(conn,"
select * from detalhes
order by detalhes_id
limit 2
")
dbExecute(conn,"alter table detalhes drop column detalhes_id")
dd <- dbGetQuery(conn,"
select * from detalhes
where classe != 'AI'
")
dd <- dbGetQuery(conn,"
select * from detalhes
where classe <> 'AI'
")
query <- dbSendQuery(conn,"
select * from
detalhes
where classe = 'AI'
or relator_atual = 'MINISTRO PRESIDENTE'
")
dd <- dbFetch(query, n = 20)
dk <- dbFetch(query, n = 20)
df <- bind_rows(dd,dk)
dbWriteTable(conn,"dd", dd)
dbWriteTable(conn,"dk", dk)
dbExecute(conn,"
create table df as
(
select * from dd
union
select * from dk
)
")
dbExecute(conn,"drop table dd")
dfs <- list(dd =dd,df =df,dk =dk)
iwalk(dfs,~{
dbWriteTable(conn,.y, .x)
})
df1 <- data.frame(a = 1:5,
b = letters[1:5])
df2 <- data.frame(c = 6:10,
d = letters[6:10])
df3 <- bind_cols(df1,df2)
dbCreateTable(conn,"df1", df1)
dbCreateTable(conn,"df2", df2)
dbExecute(conn,"alter table df1 add column id serial")
dbExecute(conn,"alter table df2 add column id serial")
dbWriteTable(conn, "df1", df1, append = TRUE)
dbWriteTable(conn, "df2", df2, append = TRUE)
dbExecute(conn,"truncate df1")
dbExecute(conn,"truncate df2")
dbxInsert(conn,"df1", df1)
dbxInsert(conn,"df2", df2)
dbExecute(conn,"
create table df3 as
(
select df1.id, df1.a,df1.b,df2.c, df2.d
from df1, df2
where df1.id = df2.id
)
")
df <- expand.grid(list(v1=1:2,
v2=c("a","b","c")))
df <- purrr::cross_df(list(v1=1:2,
v2=c("a","b","c")))
df <- dbGetQuery(conn,"select a, d
from df1, df2
")
|
/R/dplyr_sql.R
|
no_license
|
jjesusfilho/rpg3
|
R
| false | false | 2,511 |
r
|
conn <- DBI::dbConnect(RPostgres::Postgres(),host="localhost",dbname="stf_jose",user=Sys.getenv("DBUSER"),password=Sys.getenv("DBPASSWORD"))
library(tidyverse)
library(DBI)
library(dbx)
dd %>%
slice(1:2)
dbGetQuery(conn,"select * from detalhes limit 2")
dbGetQuery(conn,"
select * from detalhes
order by incidente
limit 2
")
dbExecute(conn,"
alter table detalhes add column detalhes_id serial
")
dbGetQuery(conn,"
select * from detalhes
order by detalhes_id
limit 2
")
dbExecute(conn,"alter table detalhes drop column detalhes_id")
dd <- dbGetQuery(conn,"
select * from detalhes
where classe != 'AI'
")
dd <- dbGetQuery(conn,"
select * from detalhes
where classe <> 'AI'
")
query <- dbSendQuery(conn,"
select * from
detalhes
where classe = 'AI'
or relator_atual = 'MINISTRO PRESIDENTE'
")
dd <- dbFetch(query, n = 20)
dk <- dbFetch(query, n = 20)
df <- bind_rows(dd,dk)
dbWriteTable(conn,"dd", dd)
dbWriteTable(conn,"dk", dk)
dbExecute(conn,"
create table df as
(
select * from dd
union
select * from dk
)
")
dbExecute(conn,"drop table dd")
dfs <- list(dd =dd,df =df,dk =dk)
iwalk(dfs,~{
dbWriteTable(conn,.y, .x)
})
df1 <- data.frame(a = 1:5,
b = letters[1:5])
df2 <- data.frame(c = 6:10,
d = letters[6:10])
df3 <- bind_cols(df1,df2)
dbCreateTable(conn,"df1", df1)
dbCreateTable(conn,"df2", df2)
dbExecute(conn,"alter table df1 add column id serial")
dbExecute(conn,"alter table df2 add column id serial")
dbWriteTable(conn, "df1", df1, append = TRUE)
dbWriteTable(conn, "df2", df2, append = TRUE)
dbExecute(conn,"truncate df1")
dbExecute(conn,"truncate df2")
dbxInsert(conn,"df1", df1)
dbxInsert(conn,"df2", df2)
dbExecute(conn,"
create table df3 as
(
select df1.id, df1.a,df1.b,df2.c, df2.d
from df1, df2
where df1.id = df2.id
)
")
df <- expand.grid(list(v1=1:2,
v2=c("a","b","c")))
df <- purrr::cross_df(list(v1=1:2,
v2=c("a","b","c")))
df <- dbGetQuery(conn,"select a, d
from df1, df2
")
|
testlist <- list(Beta = 0, CVLinf = -3.40133070085832e-229, FM = -2.93779159239554e-306, L50 = 0, L95 = 0, LenBins = c(0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = -2.94652133491295e-306, SL95 = -3.29337414099116e-306, nage = -2139062144L, nlen = -2139062144L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
/DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615829639-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false | false | 426 |
r
|
testlist <- list(Beta = 0, CVLinf = -3.40133070085832e-229, FM = -2.93779159239554e-306, L50 = 0, L95 = 0, LenBins = c(0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = -2.94652133491295e-306, SL95 = -3.29337414099116e-306, nage = -2139062144L, nlen = -2139062144L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
# makeCacheMatrix creates a list containing a function to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
# The following function returns the inverse of the matrix. It first checks if
# the inverse has already been computed. If so, it gets the result and skips the
# computation. If not, it computes the inverse, sets the value in the cache via
# setinverse function.
# This function assumes that the matrix is always invertible.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("Returning cached data.")
return(inv)
}
else
{
message(" No cached data found")
data <- x$get() # obtains matrix from object x
inv <- solve(data) # finds inverse matrix
x$setinverse(inv)# assigns resulting inverse matrix to object x
message( "returning inverse of the matrix")
return (inv)
}
}
## CHECKING THIS PROGRAM:
## k <- matrix(rnorm(25),5,5)
## k1 <- makeCacheMatrix(k)
## cacheSolve(k1)
## x and inv are stored within the environment in which they were defined
## get and get inverse only fetch these from the enclosed enironment
|
/cachematrix.R
|
no_license
|
arkumar5420/ProgrammingAssignment2
|
R
| false | false | 1,502 |
r
|
# makeCacheMatrix creates a list containing a function to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
# The following function returns the inverse of the matrix. It first checks if
# the inverse has already been computed. If so, it gets the result and skips the
# computation. If not, it computes the inverse, sets the value in the cache via
# setinverse function.
# This function assumes that the matrix is always invertible.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("Returning cached data.")
return(inv)
}
else
{
message(" No cached data found")
data <- x$get() # obtains matrix from object x
inv <- solve(data) # finds inverse matrix
x$setinverse(inv)# assigns resulting inverse matrix to object x
message( "returning inverse of the matrix")
return (inv)
}
}
## CHECKING THIS PROGRAM:
## k <- matrix(rnorm(25),5,5)
## k1 <- makeCacheMatrix(k)
## cacheSolve(k1)
## x and inv are stored within the environment in which they were defined
## get and get inverse only fetch these from the enclosed enironment
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qcc_lims_shewhart_rules.R
\name{qcc_rule06}
\alias{qcc_rule06}
\title{Rule6: 4 out of 5 consecutive points are more than 1 sigma from the center line in the same direction}
\usage{
qcc_rule06(x, lcl_1s, ucl_1s, run = 5)
}
\arguments{
\item{x}{numeric vector of lab results}
\item{lcl_1s}{1-sigma lower limit}
\item{ucl_1s}{1-sigma upper limit}
\item{run}{violation if (run - 1) of run results meet the criterion}
}
\value{
TRUE if rule violation
}
\description{
Rule6: 4 out of 5 consecutive points are more than 1 sigma from the center line in the same direction
}
|
/inbolimsintern/man/qcc_rule06.Rd
|
permissive
|
inbo/inbolimsintern
|
R
| false | true | 647 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qcc_lims_shewhart_rules.R
\name{qcc_rule06}
\alias{qcc_rule06}
\title{Rule6: 4 out of 5 consecutive points are more than 1 sigma from the center line in the same direction}
\usage{
qcc_rule06(x, lcl_1s, ucl_1s, run = 5)
}
\arguments{
\item{x}{numeric vector of lab results}
\item{lcl_1s}{1-sigma lower limit}
\item{ucl_1s}{1-sigma upper limit}
\item{run}{violation if (run - 1) of run results meet the criterion}
}
\value{
TRUE if rule violation
}
\description{
Rule6: 4 out of 5 consecutive points are more than 1 sigma from the center line in the same direction
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/revdep-summarise.R, R/revdep.R
\name{revdep_check_save_logs}
\alias{revdep_check}
\alias{revdep_check_save_logs}
\alias{revdep_check_save_summary}
\alias{revdep_check_summary}
\title{Run R CMD check on all downstream dependencies.}
\usage{
revdep_check_save_logs(res, log_dir = "revdep")
revdep_check_save_summary(res, log_dir = "revdep")
revdep_check_summary(res)
revdep_check(pkg = ".", recursive = FALSE, ignore = NULL,
dependencies = c("Depends", "Imports", "Suggests", "LinkingTo"),
libpath = getOption("devtools.revdep.libpath"), srcpath = libpath,
bioconductor = FALSE, type = getOption("pkgType"),
threads = getOption("Ncpus", 1), check_dir = tempfile("check_cran"))
}
\arguments{
\item{res}{Result of \code{revdep_check}}
\item{log_dir}{Directory in which to save logs}
\item{pkg}{Path to package. Defaults to current directory.}
\item{recursive}{If \code{TRUE} look for full set of recursive dependencies.}
\item{ignore}{A character vector of package names to ignore. These packages
will not appear in returned vector. This is used in
\code{\link{revdep_check}} to avoid packages with installation problems
or extremely long check times.}
\item{dependencies}{A character vector listing the types of dependencies
to follow.}
\item{libpath}{Path to library to store dependencies packages - if you
you're doing this a lot it's a good idea to pick a directory and stick
with it so you don't have to download all the packages every time.}
\item{srcpath}{Path to directory to store source versions of dependent
packages - again, this saves a lot of time because you don't need to
redownload the packages every time you run the package.}
\item{bioconductor}{If \code{TRUE} also look for dependencies amongst
bioconductor packages.}
\item{type}{binary Package type to test (source, mac.binary etc). Defaults
to the same type as \code{\link{install.packages}()}.}
\item{threads}{Number of concurrent threads to use for checking.
It defaults to the option \code{"Ncpus"} or \code{1} if unset.}
\item{check_dir}{Directory to store results.}
}
\value{
An invisible list of results. But you'll probably want to look
at the check results on disk, which are saved in \code{check_dir}.
Summaries of all ERRORs and WARNINGs will be stored in
\code{check_dir/00check-summary.txt}.
}
\description{
Use \code{revdep_check()} to run \code{\link{check_cran}()} on all downstream
dependencies. Summarises the results with \code{revdep_check_summary} and
save logs with \code{revdep_check_save_logs}.
}
\section{Check process}{
\enumerate{
\item Install \code{pkg} (in special library, see below).
\item Find all CRAN packages that depend on \code{pkg}.
\item Install those packages, along with their dependencies.
\item Run \code{R CMD check} on each package.
\item Uninstall \code{pkg} (so other reverse dependency checks don't
use the development version instead of the CRAN version)
}
}
\section{Package library}{
By default \code{revdep_check} uses a temporary library to store any packages
that are required by the packages being tested. This ensures that they don't
interfere with your default library, but means that if you restart R
between checks, you'll need to reinstall all the packages. If you're
doing reverse dependency checks frequently, I recommend that you create
a directory for these packages and set \code{options(devtools.revdep.libpath)}.
}
\examples{
\dontrun{
# Run R CMD check on all downstream dependencies of ggplot2
res <- revdep_check("ggplot2")
revdep_check_summary(res)
revdep_check_save_logs(res)
}
}
\seealso{
\code{\link{revdep_maintainers}()} to run R CMD check on all reverse
dependencies.
}
|
/man/revdep_check.Rd
|
no_license
|
franapoli/devtools
|
R
| false | false | 3,738 |
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/revdep-summarise.R, R/revdep.R
\name{revdep_check_save_logs}
\alias{revdep_check}
\alias{revdep_check_save_logs}
\alias{revdep_check_save_summary}
\alias{revdep_check_summary}
\title{Run R CMD check on all downstream dependencies.}
\usage{
revdep_check_save_logs(res, log_dir = "revdep")
revdep_check_save_summary(res, log_dir = "revdep")
revdep_check_summary(res)
revdep_check(pkg = ".", recursive = FALSE, ignore = NULL,
dependencies = c("Depends", "Imports", "Suggests", "LinkingTo"),
libpath = getOption("devtools.revdep.libpath"), srcpath = libpath,
bioconductor = FALSE, type = getOption("pkgType"),
threads = getOption("Ncpus", 1), check_dir = tempfile("check_cran"))
}
\arguments{
\item{res}{Result of \code{revdep_check}}
\item{log_dir}{Directory in which to save logs}
\item{pkg}{Path to package. Defaults to current directory.}
\item{recursive}{If \code{TRUE} look for full set of recursive dependencies.}
\item{ignore}{A character vector of package names to ignore. These packages
will not appear in returned vector. This is used in
\code{\link{revdep_check}} to avoid packages with installation problems
or extremely long check times.}
\item{dependencies}{A character vector listing the types of dependencies
to follow.}
\item{libpath}{Path to library to store dependencies packages - if you
you're doing this a lot it's a good idea to pick a directory and stick
with it so you don't have to download all the packages every time.}
\item{srcpath}{Path to directory to store source versions of dependent
packages - again, this saves a lot of time because you don't need to
redownload the packages every time you run the package.}
\item{bioconductor}{If \code{TRUE} also look for dependencies amongst
bioconductor packages.}
\item{type}{binary Package type to test (source, mac.binary etc). Defaults
to the same type as \code{\link{install.packages}()}.}
\item{threads}{Number of concurrent threads to use for checking.
It defaults to the option \code{"Ncpus"} or \code{1} if unset.}
\item{check_dir}{Directory to store results.}
}
\value{
An invisible list of results. But you'll probably want to look
at the check results on disk, which are saved in \code{check_dir}.
Summaries of all ERRORs and WARNINGs will be stored in
\code{check_dir/00check-summary.txt}.
}
\description{
Use \code{revdep_check()} to run \code{\link{check_cran}()} on all downstream
dependencies. Summarises the results with \code{revdep_check_summary} and
save logs with \code{revdep_check_save_logs}.
}
\section{Check process}{
\enumerate{
\item Install \code{pkg} (in special library, see below).
\item Find all CRAN packages that depend on \code{pkg}.
\item Install those packages, along with their dependencies.
\item Run \code{R CMD check} on each package.
\item Uninstall \code{pkg} (so other reverse dependency checks don't
use the development version instead of the CRAN version)
}
}
\section{Package library}{
By default \code{revdep_check} uses a temporary library to store any packages
that are required by the packages being tested. This ensures that they don't
interfere with your default library, but means that if you restart R
between checks, you'll need to reinstall all the packages. If you're
doing reverse dependency checks frequently, I recommend that you create
a directory for these packages and set \code{options(devtools.revdep.libpath)}.
}
\examples{
\dontrun{
# Run R CMD check on all downstream dependencies of ggplot2
res <- revdep_check("ggplot2")
revdep_check_summary(res)
revdep_check_save_logs(res)
}
}
\seealso{
\code{\link{revdep_maintainers}()} to run R CMD check on all reverse
dependencies.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hist.antsImage.R
\name{mask_values}
\alias{mask_values}
\title{Simple Value extractor for antsImage Values}
\usage{
mask_values(x, mask)
}
\arguments{
\item{x}{Object of class \code{antsImage}}
\item{mask}{object to subset the image. If missing, then all
values of the image are plotted.}
}
\value{
Vector of values
}
\description{
Takes in a mask and an image and then returns the values
}
\examples{
img = makeImage(c(10,10),rnorm(100))
mask = img > 0
mask_values(img, mask)
mask_values(img)
}
|
/man/mask_values.Rd
|
permissive
|
ANTsX/ANTsRCore
|
R
| false | true | 576 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hist.antsImage.R
\name{mask_values}
\alias{mask_values}
\title{Simple Value extractor for antsImage Values}
\usage{
mask_values(x, mask)
}
\arguments{
\item{x}{Object of class \code{antsImage}}
\item{mask}{object to subset the image. If missing, then all
values of the image are plotted.}
}
\value{
Vector of values
}
\description{
Takes in a mask and an image and then returns the values
}
\examples{
img = makeImage(c(10,10),rnorm(100))
mask = img > 0
mask_values(img, mask)
mask_values(img)
}
|
#fit_and_save_stan_mod
#' fit and save a stan mod
#'
#' @param stan_model_filename
#' @param standat
#' @param chains
#' @param iterations
#' @param seed
#' @param output_filename
#' @return
#
fit_save_stan_mod <- function(stan_model_filename,
standat,
chains,
iterations,
seed,
output_filename) {
model <- rstan::stan(stan_model_filename,
data = standat,
chains = chains,
iter = iterations,
seed = seed)
saveRDS(model, file=output_filename)
}
|
/model_scripts/fit_save_stan_mod_function.R
|
no_license
|
nbrasset/geomphon-perception-ABX
|
R
| false | false | 748 |
r
|
#fit_and_save_stan_mod
#' fit and save a stan mod
#'
#' @param stan_model_filename
#' @param standat
#' @param chains
#' @param iterations
#' @param seed
#' @param output_filename
#' @return
#
fit_save_stan_mod <- function(stan_model_filename,
standat,
chains,
iterations,
seed,
output_filename) {
model <- rstan::stan(stan_model_filename,
data = standat,
chains = chains,
iter = iterations,
seed = seed)
saveRDS(model, file=output_filename)
}
|
context("Test filtering of controller data")
pmxClassHelpers <- test_pmxClass_helpers()
test_that("individual plot: get all pages", {
ctr <- pmxClassHelpers$ctr
expect_is(ctr, "pmxClass")
p <- ctr %>% get_plot("individual")
expect_equal(length(p), 5)
})
test_that("individual plot: get single page", {
ctr <- pmxClassHelpers$ctr
expect_is(ctr, "pmxClass")
p <- ctr %>% get_plot("individual", 2)
expect_true(inherits(p, "ggplot"))
})
test_that("individual plot: get some pages", {
ctr <- pmxClassHelpers$ctr
expect_is(ctr, "pmxClass")
p <- ctr %>% get_plot("individual", c(2, 4))
expect_equal(length(p), 2)
})
test_that("individual plot : don't exceed the effective number of pages", {
ctr <- pmxClassHelpers$ctr
expect_is(ctr, "pmxClass")
p <- ctr %>% get_plot("individual", 1:100)
expect_equal(length(p), 5)
})
test_that("can create a plot using setting dname", {
ctr <- pmxClassHelpers$ctr
expect_is(ctr, "pmxClass")
ctr %>% set_plot("DIS", pname = "distr1", type = "box", dname = "eta")
p <- ctr %>% get_plot("distr1")
pconf <- ggplot2::ggplot_build(p)
expect_equal(length(pconf$plot$layers), 4)
})
test_that("Create a plot with not valid dname throw message", {
ctr <- pmxClassHelpers$ctr
expect_is(ctr, "pmxClass")
expect_output(
ctr %>% set_plot("DIS", pname = "distr1", type = "box", dname = "xxx"),
"No data xxx provided for plot distr1"
)
})
|
/tests/testthat/test-plot.R
|
no_license
|
sehamsick/ggPMX
|
R
| false | false | 1,422 |
r
|
context("Test filtering of controller data")
pmxClassHelpers <- test_pmxClass_helpers()
test_that("individual plot: get all pages", {
ctr <- pmxClassHelpers$ctr
expect_is(ctr, "pmxClass")
p <- ctr %>% get_plot("individual")
expect_equal(length(p), 5)
})
test_that("individual plot: get single page", {
ctr <- pmxClassHelpers$ctr
expect_is(ctr, "pmxClass")
p <- ctr %>% get_plot("individual", 2)
expect_true(inherits(p, "ggplot"))
})
test_that("individual plot: get some pages", {
ctr <- pmxClassHelpers$ctr
expect_is(ctr, "pmxClass")
p <- ctr %>% get_plot("individual", c(2, 4))
expect_equal(length(p), 2)
})
test_that("individual plot : don't exceed the effective number of pages", {
ctr <- pmxClassHelpers$ctr
expect_is(ctr, "pmxClass")
p <- ctr %>% get_plot("individual", 1:100)
expect_equal(length(p), 5)
})
test_that("can create a plot using setting dname", {
ctr <- pmxClassHelpers$ctr
expect_is(ctr, "pmxClass")
ctr %>% set_plot("DIS", pname = "distr1", type = "box", dname = "eta")
p <- ctr %>% get_plot("distr1")
pconf <- ggplot2::ggplot_build(p)
expect_equal(length(pconf$plot$layers), 4)
})
test_that("Create a plot with not valid dname throw message", {
ctr <- pmxClassHelpers$ctr
expect_is(ctr, "pmxClass")
expect_output(
ctr %>% set_plot("DIS", pname = "distr1", type = "box", dname = "xxx"),
"No data xxx provided for plot distr1"
)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.string.R
\name{parcels.street.second.last}
\alias{parcels.street.second.last}
\title{parcels.street.second.last}
\usage{
parcels.street.second.last(update.log, street.explode)
}
\arguments{
\item{update.log}{logical vector}
\item{street.explode}{data.table}
}
\description{
Helper function for explode.street.
}
\keyword{explode,}
\keyword{helper}
\keyword{street.explode,}
|
/man/parcels.street.second.last.Rd
|
no_license
|
erikbjohn/methods.string
|
R
| false | true | 461 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.string.R
\name{parcels.street.second.last}
\alias{parcels.street.second.last}
\title{parcels.street.second.last}
\usage{
parcels.street.second.last(update.log, street.explode)
}
\arguments{
\item{update.log}{logical vector}
\item{street.explode}{data.table}
}
\description{
Helper function for explode.street.
}
\keyword{explode,}
\keyword{helper}
\keyword{street.explode,}
|
get.wpp.e0.data <- function(sex='M', start.year=1950, present.year=2015, wpp.year=2015, my.e0.file=NULL,
my.locations.file=NULL, verbose=FALSE) {
sex <- toupper(sex)
if(sex != 'M' && sex != 'F')
stop('Allowed values for argument "sex" are "M" and "F".')
########################################
# set data and match with areas
########################################
un.object <- read.UNe0(sex=sex, wpp.year=wpp.year, my.e0.file=my.e0.file,
present.year=present.year, verbose=verbose)
data <- un.object$data.object$data
# get region and area data
locations <- bayesTFR:::read.UNlocations(data, wpp.year=wpp.year, my.locations.file=my.locations.file,
package='bayesLife', verbose=verbose)
loc_data <- locations$loc_data
include <- locations$include
prediction.only <- locations$prediction.only
data_incl <- data[include,]
nr_countries_estimation <- nrow(data_incl)
if(any(!is.na(prediction.only))) { # move prediction countries at the end of data
data_prediction <- data[prediction.only,]
data_incl <- rbind(data_incl, data_prediction)
}
LEXmatrix.regions <- bayesTFR:::get.observed.time.matrix.and.regions(
data_incl, loc_data,
start.year=start.year,
present.year=present.year)
if (verbose)
cat('Dimension of the e0 matrix:', dim(LEXmatrix.regions$obs_matrix), '\n')
LEXmatrixsuppl.regions <- bayesTFR:::.get.suppl.matrix.and.regions(un.object, LEXmatrix.regions, loc_data,
start.year, present.year)
if(!is.null(un.object$suppl.data.object) && verbose)
cat('Dimension of the supplemental e0 matrix:', dim(LEXmatrixsuppl.regions$obs_matrix), '\n')
return(list(e0.matrix=LEXmatrix.regions$obs_matrix,
e0.matrix.all=LEXmatrix.regions$obs_matrix_all,
regions=LEXmatrix.regions$regions,
nr.countries.estimation=nr_countries_estimation,
suppl.data=bayesTFR:::.get.suppl.data.list(LEXmatrixsuppl.regions, matrix.name='e0.matrix')
))
}
read.UNe0 <- function(sex, wpp.year, my.e0.file=NULL, ...) {
un.dataset <- paste('e0', sex, sep='')
un.suppl.dataset <- paste('e0', sex, '_supplemental', sep='')
data <- bayesTFR:::do.read.un.file(un.dataset, wpp.year, my.file=my.e0.file, ...)
suppl.data <- bayesTFR:::do.read.un.file(un.suppl.dataset, wpp.year, my.file=my.e0.file, ...)
if(is.null(suppl.data$data)) suppl.data <- NULL
return(list(data.object=data, suppl.data.object=suppl.data))
}
set.e0.wpp.extra <- function(meta, countries=NULL, my.e0.file=NULL, my.locations.file=NULL, verbose=FALSE) {
#'countries' is a vector of country or region codes
un.object <- read.UNe0(sex=meta$sex, wpp.year=meta$wpp.year, my.e0.file=my.e0.file,
present.year=meta$present.year, verbose=verbose)
data <- un.object$data.object
extra.wpp <- bayesTFR:::.extra.matrix.regions(data=data, countries=countries, meta=meta,
package="bayesLife", verbose=verbose)
if(!is.null(extra.wpp)) {
extra.wpp <- list(e0.matrix=extra.wpp$tfr_matrix,
e0.matrix.all=extra.wpp$tfr_matrix_all,
regions=extra.wpp$regions,
nr.countries.estimation=extra.wpp$nr_countries_estimation,
is_processed = extra.wpp$is_processed)
locations <- bayesTFR:::read.UNlocations(data$data, wpp.year=meta$wpp.year,
my.locations.file=my.locations.file, package='bayesLife', verbose=verbose)
suppl.wpp <- bayesTFR:::.get.suppl.matrix.and.regions(un.object, extra.wpp, locations$loc_data,
meta$start.year, meta$present.year)
extra.wpp$suppl.data <- bayesTFR:::.get.suppl.data.list(suppl.wpp, matrix.name='e0.matrix')
}
return(extra.wpp)
}
get.wpp.e0.data.for.countries <- function(meta, sex='M', my.e0.file=NULL, my.locations.file=NULL, verbose=FALSE) {
sex <- toupper(sex)
if(sex != 'M' && sex != 'F')
stop('Allowed values for argument "sex" are "M" and "F".')
########################################
# set data and match with areas
########################################
un.object <- read.UNe0(sex=sex, wpp.year=meta$wpp.year, present.year=meta$present.year,
my.e0.file=my.e0.file, verbose=verbose)
data <- un.object$data.object$data
# get region and area data
locations <- bayesTFR:::read.UNlocations(data, wpp.year=meta$wpp.year,
my.locations.file=my.locations.file, package='bayesLife', verbose=verbose)
loc_data <- locations$loc_data
include <- c()
for (i in 1:length(meta$regions$country_code)) { # put countries into the same order as in meta
loc_index <- which(data$country_code == meta$regions$country_code[i])
if(length(loc_index) <= 0)
stop('Country ', meta$regions$country_code[i], ' not found.')
include <- c(include, loc_index)
}
data_incl <- data[include,]
LEXmatrix.regions <- bayesTFR:::get.observed.time.matrix.and.regions(
data_incl, loc_data,
start.year=meta$start.year,
present.year=meta$present.year)
if (verbose)
cat('Dimension of the e0 matrix:', dim(LEXmatrix.regions$obs_matrix), '\n')
LEXmatrixsuppl.regions <- bayesTFR:::.get.suppl.matrix.and.regions(un.object, LEXmatrix.regions, loc_data,
meta$start.year, meta$present.year)
if(!is.null(un.object$suppl.data.object) && verbose)
cat('Dimension of the supplemental e0 matrix:', dim(LEXmatrixsuppl.regions$obs_matrix), '\n')
return(list(e0.matrix=LEXmatrix.regions$obs_matrix,
e0.matrix.all=LEXmatrix.regions$obs_matrix_all,
regions=LEXmatrix.regions$regions,
nr.countries.estimation=nrow(data_incl),
suppl.data=bayesTFR:::.get.suppl.data.list(LEXmatrixsuppl.regions, matrix.name='e0.matrix')
))
}
|
/bayesLife/R/wpp_data.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 5,555 |
r
|
get.wpp.e0.data <- function(sex='M', start.year=1950, present.year=2015, wpp.year=2015, my.e0.file=NULL,
my.locations.file=NULL, verbose=FALSE) {
sex <- toupper(sex)
if(sex != 'M' && sex != 'F')
stop('Allowed values for argument "sex" are "M" and "F".')
########################################
# set data and match with areas
########################################
un.object <- read.UNe0(sex=sex, wpp.year=wpp.year, my.e0.file=my.e0.file,
present.year=present.year, verbose=verbose)
data <- un.object$data.object$data
# get region and area data
locations <- bayesTFR:::read.UNlocations(data, wpp.year=wpp.year, my.locations.file=my.locations.file,
package='bayesLife', verbose=verbose)
loc_data <- locations$loc_data
include <- locations$include
prediction.only <- locations$prediction.only
data_incl <- data[include,]
nr_countries_estimation <- nrow(data_incl)
if(any(!is.na(prediction.only))) { # move prediction countries at the end of data
data_prediction <- data[prediction.only,]
data_incl <- rbind(data_incl, data_prediction)
}
LEXmatrix.regions <- bayesTFR:::get.observed.time.matrix.and.regions(
data_incl, loc_data,
start.year=start.year,
present.year=present.year)
if (verbose)
cat('Dimension of the e0 matrix:', dim(LEXmatrix.regions$obs_matrix), '\n')
LEXmatrixsuppl.regions <- bayesTFR:::.get.suppl.matrix.and.regions(un.object, LEXmatrix.regions, loc_data,
start.year, present.year)
if(!is.null(un.object$suppl.data.object) && verbose)
cat('Dimension of the supplemental e0 matrix:', dim(LEXmatrixsuppl.regions$obs_matrix), '\n')
return(list(e0.matrix=LEXmatrix.regions$obs_matrix,
e0.matrix.all=LEXmatrix.regions$obs_matrix_all,
regions=LEXmatrix.regions$regions,
nr.countries.estimation=nr_countries_estimation,
suppl.data=bayesTFR:::.get.suppl.data.list(LEXmatrixsuppl.regions, matrix.name='e0.matrix')
))
}
read.UNe0 <- function(sex, wpp.year, my.e0.file=NULL, ...) {
un.dataset <- paste('e0', sex, sep='')
un.suppl.dataset <- paste('e0', sex, '_supplemental', sep='')
data <- bayesTFR:::do.read.un.file(un.dataset, wpp.year, my.file=my.e0.file, ...)
suppl.data <- bayesTFR:::do.read.un.file(un.suppl.dataset, wpp.year, my.file=my.e0.file, ...)
if(is.null(suppl.data$data)) suppl.data <- NULL
return(list(data.object=data, suppl.data.object=suppl.data))
}
set.e0.wpp.extra <- function(meta, countries=NULL, my.e0.file=NULL, my.locations.file=NULL, verbose=FALSE) {
#'countries' is a vector of country or region codes
un.object <- read.UNe0(sex=meta$sex, wpp.year=meta$wpp.year, my.e0.file=my.e0.file,
present.year=meta$present.year, verbose=verbose)
data <- un.object$data.object
extra.wpp <- bayesTFR:::.extra.matrix.regions(data=data, countries=countries, meta=meta,
package="bayesLife", verbose=verbose)
if(!is.null(extra.wpp)) {
extra.wpp <- list(e0.matrix=extra.wpp$tfr_matrix,
e0.matrix.all=extra.wpp$tfr_matrix_all,
regions=extra.wpp$regions,
nr.countries.estimation=extra.wpp$nr_countries_estimation,
is_processed = extra.wpp$is_processed)
locations <- bayesTFR:::read.UNlocations(data$data, wpp.year=meta$wpp.year,
my.locations.file=my.locations.file, package='bayesLife', verbose=verbose)
suppl.wpp <- bayesTFR:::.get.suppl.matrix.and.regions(un.object, extra.wpp, locations$loc_data,
meta$start.year, meta$present.year)
extra.wpp$suppl.data <- bayesTFR:::.get.suppl.data.list(suppl.wpp, matrix.name='e0.matrix')
}
return(extra.wpp)
}
get.wpp.e0.data.for.countries <- function(meta, sex='M', my.e0.file=NULL, my.locations.file=NULL, verbose=FALSE) {
sex <- toupper(sex)
if(sex != 'M' && sex != 'F')
stop('Allowed values for argument "sex" are "M" and "F".')
########################################
# set data and match with areas
########################################
un.object <- read.UNe0(sex=sex, wpp.year=meta$wpp.year, present.year=meta$present.year,
my.e0.file=my.e0.file, verbose=verbose)
data <- un.object$data.object$data
# get region and area data
locations <- bayesTFR:::read.UNlocations(data, wpp.year=meta$wpp.year,
my.locations.file=my.locations.file, package='bayesLife', verbose=verbose)
loc_data <- locations$loc_data
include <- c()
for (i in 1:length(meta$regions$country_code)) { # put countries into the same order as in meta
loc_index <- which(data$country_code == meta$regions$country_code[i])
if(length(loc_index) <= 0)
stop('Country ', meta$regions$country_code[i], ' not found.')
include <- c(include, loc_index)
}
data_incl <- data[include,]
LEXmatrix.regions <- bayesTFR:::get.observed.time.matrix.and.regions(
data_incl, loc_data,
start.year=meta$start.year,
present.year=meta$present.year)
if (verbose)
cat('Dimension of the e0 matrix:', dim(LEXmatrix.regions$obs_matrix), '\n')
LEXmatrixsuppl.regions <- bayesTFR:::.get.suppl.matrix.and.regions(un.object, LEXmatrix.regions, loc_data,
meta$start.year, meta$present.year)
if(!is.null(un.object$suppl.data.object) && verbose)
cat('Dimension of the supplemental e0 matrix:', dim(LEXmatrixsuppl.regions$obs_matrix), '\n')
return(list(e0.matrix=LEXmatrix.regions$obs_matrix,
e0.matrix.all=LEXmatrix.regions$obs_matrix_all,
regions=LEXmatrix.regions$regions,
nr.countries.estimation=nrow(data_incl),
suppl.data=bayesTFR:::.get.suppl.data.list(LEXmatrixsuppl.regions, matrix.name='e0.matrix')
))
}
|
#!/usr/bin/env Rscript
balanced_token = data.frame(dataset = 'Balanced', type='Tokens', accuracy = scan('balanced_token.txt'))
balanced_pos = data.frame(dataset = 'Balanced', type='Part-of-speech tags', accuracy = scan('balanced_pos.txt'))
unbalanced_token = data.frame(dataset = 'Unbalanced', type='Tokens', accuracy = scan('unbalanced_token.txt'))
unbalanced_pos = data.frame(dataset = 'Unbalanced', type='Part-of-speech tags', accuracy = scan('unbalanced_pos.txt'))
data = rbind(balanced_token, balanced_pos, unbalanced_token, unbalanced_pos)
library(ggplot2)
ggplot(data, aes(x = dataset, y = accuracy, colour = type, fill = type)) +
geom_boxplot() +
labs(title="System F1-score", x = "Dataset", y = "F1-score") +
scale_fill_manual(name="Features", values=c("darkorange", "darkolivegreen2")) +
scale_colour_manual(name="Features", values=c("darkorange3", "darkolivegreen4")) +
geom_hline(yintercept = 0.18) +
geom_label(x = 2, y = 0.18, label = "Random (unbalanced)", vjust = -.2, colour="black", fill="gray90", show.legend=FALSE) +
geom_hline(yintercept = 0.16666666666666666) +
geom_label(x = 1, y = 0.16666666666666666, label = "Random (balanced)", vjust = 1.2, colour="black", fill="gray90", show.legend=FALSE) +
theme(plot.background = element_rect(fill = "transparent"), legend.background = element_rect(fill = "transparent")) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 20))
|
/R/create_graph.R
|
no_license
|
imatr/Source-language-prediction
|
R
| false | false | 1,442 |
r
|
#!/usr/bin/env Rscript
balanced_token = data.frame(dataset = 'Balanced', type='Tokens', accuracy = scan('balanced_token.txt'))
balanced_pos = data.frame(dataset = 'Balanced', type='Part-of-speech tags', accuracy = scan('balanced_pos.txt'))
unbalanced_token = data.frame(dataset = 'Unbalanced', type='Tokens', accuracy = scan('unbalanced_token.txt'))
unbalanced_pos = data.frame(dataset = 'Unbalanced', type='Part-of-speech tags', accuracy = scan('unbalanced_pos.txt'))
data = rbind(balanced_token, balanced_pos, unbalanced_token, unbalanced_pos)
library(ggplot2)
ggplot(data, aes(x = dataset, y = accuracy, colour = type, fill = type)) +
geom_boxplot() +
labs(title="System F1-score", x = "Dataset", y = "F1-score") +
scale_fill_manual(name="Features", values=c("darkorange", "darkolivegreen2")) +
scale_colour_manual(name="Features", values=c("darkorange3", "darkolivegreen4")) +
geom_hline(yintercept = 0.18) +
geom_label(x = 2, y = 0.18, label = "Random (unbalanced)", vjust = -.2, colour="black", fill="gray90", show.legend=FALSE) +
geom_hline(yintercept = 0.16666666666666666) +
geom_label(x = 1, y = 0.16666666666666666, label = "Random (balanced)", vjust = 1.2, colour="black", fill="gray90", show.legend=FALSE) +
theme(plot.background = element_rect(fill = "transparent"), legend.background = element_rect(fill = "transparent")) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 20))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract.R
\name{extract_inits}
\alias{extract_inits}
\title{Generate initial conditions from a Stan fit}
\usage{
extract_inits(fit, current_inits, exclude_list = NULL, samples = 50)
}
\arguments{
\item{fit}{A stanfit object}
\item{current_inits}{A function that returns a list of initial conditions (such as
\code{create_initial_conditions()}). Only used in \code{exclude_list} is specified.}
\item{exclude_list}{A character vector of parameters to not initialise from the fit
object, defaulting to \code{NULL}.}
\item{samples}{Numeric, defaults to 50. Number of posterior samples.}
}
\value{
A function that when called returns a set of initial conditions as a named list.
}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#experimental}{\figure{lifecycle-experimental.svg}{options: alt='[Experimental]'}}}{\strong{[Experimental]}}
Extracts posterior samples to use to initialise a full model fit. This may be useful
for certain data sets where the sampler gets stuck or cannot easily be initialised.
In \code{estimate_infections()}, \code{epinow()} and \code{regional_epinow()} this option can be
engaged by setting \verb{stan_opts(init_fit = <stanfit>)}.
This implementation is based on the approach taken in \href{https://github.com/ImperialCollegeLondon/epidemia/}{epidemia}
authored by James Scott.
}
|
/man/extract_inits.Rd
|
permissive
|
medewitt/EpiNow2
|
R
| false | true | 1,423 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract.R
\name{extract_inits}
\alias{extract_inits}
\title{Generate initial conditions from a Stan fit}
\usage{
extract_inits(fit, current_inits, exclude_list = NULL, samples = 50)
}
\arguments{
\item{fit}{A stanfit object}
\item{current_inits}{A function that returns a list of initial conditions (such as
\code{create_initial_conditions()}). Only used in \code{exclude_list} is specified.}
\item{exclude_list}{A character vector of parameters to not initialise from the fit
object, defaulting to \code{NULL}.}
\item{samples}{Numeric, defaults to 50. Number of posterior samples.}
}
\value{
A function that when called returns a set of initial conditions as a named list.
}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#experimental}{\figure{lifecycle-experimental.svg}{options: alt='[Experimental]'}}}{\strong{[Experimental]}}
Extracts posterior samples to use to initialise a full model fit. This may be useful
for certain data sets where the sampler gets stuck or cannot easily be initialised.
In \code{estimate_infections()}, \code{epinow()} and \code{regional_epinow()} this option can be
engaged by setting \verb{stan_opts(init_fit = <stanfit>)}.
This implementation is based on the approach taken in \href{https://github.com/ImperialCollegeLondon/epidemia/}{epidemia}
authored by James Scott.
}
|
##Load Data File
data_load <- read.csv("./household_power_consumption.txt", header=TRUE, sep=";",
stringsAsFactors=FALSE, na.strings = "?", dec=".")
##Format Date + Time
data_load$Date <- as.Date(data_load$Date, format = "%d/%m/%Y")
##data_load$Time <- as.POSIXct(data_load$Time, format = "%H:%M:%s")
##Subset Data to date ranges provided
data_load_subset <-subset(data_load, data_load$Date >= "2007-02-01" & data_load$Date <= "2007-02-02")
##Combine Date + Time into new column
data_load_subset$TS <- as.POSIXct(paste(data_load_subset$Date, data_load_subset$Time, seperator = " "))
##Output Plot to 480 x 480 png
png("plot3.png", height = 480, width = 480)
plot(data_load_subset$Sub_metering_1~data_load_subset$TS, type = "l", ylab = "Energy sub metering", xlab = "")
##lines(data_load_subset$TS, data_load_subset$Sub_metering_1)
lines(data_load_subset$TS, data_load_subset$Sub_metering_2, col = "red")
lines(data_load_subset$TS, data_load_subset$Sub_metering_3, col = "blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1, lwd = 2, col=c("black", "red", "blue"))
dev.off()
|
/Plot3.R
|
no_license
|
RITIK-12/ExData_Plotting1
|
R
| false | false | 1,143 |
r
|
##Load Data File
data_load <- read.csv("./household_power_consumption.txt", header=TRUE, sep=";",
stringsAsFactors=FALSE, na.strings = "?", dec=".")
##Format Date + Time
data_load$Date <- as.Date(data_load$Date, format = "%d/%m/%Y")
##data_load$Time <- as.POSIXct(data_load$Time, format = "%H:%M:%s")
##Subset Data to date ranges provided
data_load_subset <-subset(data_load, data_load$Date >= "2007-02-01" & data_load$Date <= "2007-02-02")
##Combine Date + Time into new column
data_load_subset$TS <- as.POSIXct(paste(data_load_subset$Date, data_load_subset$Time, seperator = " "))
##Output Plot to 480 x 480 png
png("plot3.png", height = 480, width = 480)
plot(data_load_subset$Sub_metering_1~data_load_subset$TS, type = "l", ylab = "Energy sub metering", xlab = "")
##lines(data_load_subset$TS, data_load_subset$Sub_metering_1)
lines(data_load_subset$TS, data_load_subset$Sub_metering_2, col = "red")
lines(data_load_subset$TS, data_load_subset$Sub_metering_3, col = "blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1, lwd = 2, col=c("black", "red", "blue"))
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LANDSCAPE-Site.R
\name{has_mate_Site}
\alias{has_mate_Site}
\title{Site: Check Mating Swarm Resources}
\usage{
has_mate_Site()
}
\description{
Check if any \code{\link{Mating_Resource}} are present at this site. This is queried by mosquitoes during \code{\link{mbites_checkForResources}}.
\itemize{
\item binding: \code{Site$has_mate}
}
}
|
/MASH-dev/SeanWu/MBITES/man/has_mate_Site.Rd
|
no_license
|
aucarter/MASH-Main
|
R
| false | true | 417 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LANDSCAPE-Site.R
\name{has_mate_Site}
\alias{has_mate_Site}
\title{Site: Check Mating Swarm Resources}
\usage{
has_mate_Site()
}
\description{
Check if any \code{\link{Mating_Resource}} are present at this site. This is queried by mosquitoes during \code{\link{mbites_checkForResources}}.
\itemize{
\item binding: \code{Site$has_mate}
}
}
|
# ui.R
shinyUI(fluidPage(
titlePanel("N Gram Predictor Engine"),
fluidRow(
column(3,
h3("Buttons"),
actionButton("action", label = "Action"),
br(),
br(),
submitButton("Submit")),
column(3,
textInput("date",
label = h3("Phrase input"),
value = "of the"))
)
))
|
/ui.R
|
no_license
|
sco-lo-digital/jhu_capstone
|
R
| false | false | 436 |
r
|
# ui.R
shinyUI(fluidPage(
titlePanel("N Gram Predictor Engine"),
fluidRow(
column(3,
h3("Buttons"),
actionButton("action", label = "Action"),
br(),
br(),
submitButton("Submit")),
column(3,
textInput("date",
label = h3("Phrase input"),
value = "of the"))
)
))
|
list.of.packages <- c("kebabs","RColorBrewer")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
suppressPackageStartupMessages(require("kebabs"))
suppressPackageStartupMessages(require("RColorBrewer"))
options(echo=F) # if you want see commands in output file
args <- commandArgs(trailingOnly = TRUE)
path = args[1]
taxon = args[2]
out = args[3]
pos="RNA-binding protein"
tool = "RNAPred"
RBP_RNAPred = read.table(paste(path,"/",tool,"/RBP_",taxon,".",tool,".pred.txt",sep=""), header = F, sep = "\t", stringsAsFactors = F)
NRBP_RNAPred = read.table(paste(path,"/",tool,"/NRBP_",taxon,".",tool,".pred.txt",sep=""), header = F, sep = "\t", stringsAsFactors = F)
tool = "RBPPred"
RBP_RBPPred = read.table(paste(path,"/",tool,"/RBP_",taxon,".",tool,".pred.txt",sep=""), header = F, sep = "\t", stringsAsFactors = F)
NRBP_RBPPred = read.table(paste(path,"/",tool,"/NRBP_",taxon,".",tool,".pred.txt",sep=""), header = F, sep = "\t", stringsAsFactors = F)
tool = "SpotSeqRna"
RBP_SpotSeqRna = read.table(paste(path,"/",tool,"/RBP_",taxon,".",tool,".pred.txt",sep=""), header = F, sep = "\t", stringsAsFactors = F)
NRBP_SpotSeqRna = read.table(paste(path,"/",tool,"/NRBP_",taxon,".",tool,".pred.txt",sep=""), header = F, sep = "\t", stringsAsFactors = F)
tool = "TriPepSVM"
RBP_TriPepSVM = read.table(paste(path,"/",tool,"/RBP_",taxon,".",tool,".pred.txt",sep=""), header = F, sep = "\t", stringsAsFactors = F)
NRBP_TriPepSVM = read.table(paste(path,"/",tool,"/NRBP_",taxon,".",tool,".pred.txt",sep=""), header = F, sep = "\t", stringsAsFactors = F)
RESULT = data.frame()
### RNAPred
pred = ifelse(c(RBP_RNAPred$V3,NRBP_RNAPred$V3) == pos, 1, -1)
label = c(rep(1,nrow(RBP_RNAPred)),rep(-1,nrow(NRBP_RNAPred)))
performance = evaluatePrediction(pred, label, allLabels = c(1,-1), print = F)
print(c(performance$SENS, performance$SPEC, performance$BAL_ACC, performance$MAT_CC))
RESULT = rbind(RESULT,c(performance$SENS, performance$SPEC, performance$BAL_ACC, performance$MAT_CC * 100 ))
### SpotSeqRna
pred = ifelse(c(RBP_SpotSeqRna$V3,NRBP_SpotSeqRna$V3) == pos, 1, -1)
label = c(rep(1,nrow(RBP_SpotSeqRna)),rep(-1,nrow(NRBP_SpotSeqRna)))
performance = evaluatePrediction(pred, label, allLabels = c(1,-1), print = F)
print(c(performance$SENS, performance$SPEC, performance$BAL_ACC, performance$MAT_CC))
RESULT = rbind(RESULT,c(performance$SENS, performance$SPEC, performance$BAL_ACC, performance$MAT_CC * 100 ))
### RBPPred
pred = ifelse(c(RBP_RBPPred$V3,NRBP_RBPPred$V3) == pos, 1, -1)
label = c(rep(1,nrow(RBP_RBPPred)),rep(-1,nrow(NRBP_RBPPred)))
performance = evaluatePrediction(pred, label, allLabels = c(1,-1), print = F)
print(c(performance$SENS, performance$SPEC, performance$BAL_ACC, performance$MAT_CC))
RESULT = rbind(RESULT,c(performance$SENS, performance$SPEC, performance$BAL_ACC, performance$MAT_CC * 100 ))
### TriPepSVM
pred = ifelse(c(RBP_TriPepSVM$V3,NRBP_TriPepSVM$V3) == pos, 1, -1)
label = c(rep(1,nrow(RBP_TriPepSVM)),rep(-1,nrow(NRBP_TriPepSVM)))
performance = evaluatePrediction(pred, label, allLabels = c(1,-1), print = F)
print(c(performance$SENS, performance$SPEC, performance$BAL_ACC, performance$MAT_CC))
RESULT = rbind(RESULT,c(performance$SENS, performance$SPEC, performance$BAL_ACC, performance$MAT_CC * 100 ))
colnames(RESULT)=c("Sensitivity","Specificity","BACC","MCC")
pdf(out)
barplot(as.matrix(RESULT),beside=T,font=3, ylim=c(0,100), main = taxon)
dev.off()
|
/paper/performance/source/plotPerformance.r
|
no_license
|
lx130357/TriPepSVM
|
R
| false | false | 3,514 |
r
|
list.of.packages <- c("kebabs","RColorBrewer")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
suppressPackageStartupMessages(require("kebabs"))
suppressPackageStartupMessages(require("RColorBrewer"))
options(echo=F) # if you want see commands in output file
args <- commandArgs(trailingOnly = TRUE)
path = args[1]
taxon = args[2]
out = args[3]
pos="RNA-binding protein"
tool = "RNAPred"
RBP_RNAPred = read.table(paste(path,"/",tool,"/RBP_",taxon,".",tool,".pred.txt",sep=""), header = F, sep = "\t", stringsAsFactors = F)
NRBP_RNAPred = read.table(paste(path,"/",tool,"/NRBP_",taxon,".",tool,".pred.txt",sep=""), header = F, sep = "\t", stringsAsFactors = F)
tool = "RBPPred"
RBP_RBPPred = read.table(paste(path,"/",tool,"/RBP_",taxon,".",tool,".pred.txt",sep=""), header = F, sep = "\t", stringsAsFactors = F)
NRBP_RBPPred = read.table(paste(path,"/",tool,"/NRBP_",taxon,".",tool,".pred.txt",sep=""), header = F, sep = "\t", stringsAsFactors = F)
tool = "SpotSeqRna"
RBP_SpotSeqRna = read.table(paste(path,"/",tool,"/RBP_",taxon,".",tool,".pred.txt",sep=""), header = F, sep = "\t", stringsAsFactors = F)
NRBP_SpotSeqRna = read.table(paste(path,"/",tool,"/NRBP_",taxon,".",tool,".pred.txt",sep=""), header = F, sep = "\t", stringsAsFactors = F)
tool = "TriPepSVM"
RBP_TriPepSVM = read.table(paste(path,"/",tool,"/RBP_",taxon,".",tool,".pred.txt",sep=""), header = F, sep = "\t", stringsAsFactors = F)
NRBP_TriPepSVM = read.table(paste(path,"/",tool,"/NRBP_",taxon,".",tool,".pred.txt",sep=""), header = F, sep = "\t", stringsAsFactors = F)
RESULT = data.frame()
### RNAPred
pred = ifelse(c(RBP_RNAPred$V3,NRBP_RNAPred$V3) == pos, 1, -1)
label = c(rep(1,nrow(RBP_RNAPred)),rep(-1,nrow(NRBP_RNAPred)))
performance = evaluatePrediction(pred, label, allLabels = c(1,-1), print = F)
print(c(performance$SENS, performance$SPEC, performance$BAL_ACC, performance$MAT_CC))
RESULT = rbind(RESULT,c(performance$SENS, performance$SPEC, performance$BAL_ACC, performance$MAT_CC * 100 ))
### SpotSeqRna
pred = ifelse(c(RBP_SpotSeqRna$V3,NRBP_SpotSeqRna$V3) == pos, 1, -1)
label = c(rep(1,nrow(RBP_SpotSeqRna)),rep(-1,nrow(NRBP_SpotSeqRna)))
performance = evaluatePrediction(pred, label, allLabels = c(1,-1), print = F)
print(c(performance$SENS, performance$SPEC, performance$BAL_ACC, performance$MAT_CC))
RESULT = rbind(RESULT,c(performance$SENS, performance$SPEC, performance$BAL_ACC, performance$MAT_CC * 100 ))
### RBPPred
pred = ifelse(c(RBP_RBPPred$V3,NRBP_RBPPred$V3) == pos, 1, -1)
label = c(rep(1,nrow(RBP_RBPPred)),rep(-1,nrow(NRBP_RBPPred)))
performance = evaluatePrediction(pred, label, allLabels = c(1,-1), print = F)
print(c(performance$SENS, performance$SPEC, performance$BAL_ACC, performance$MAT_CC))
RESULT = rbind(RESULT,c(performance$SENS, performance$SPEC, performance$BAL_ACC, performance$MAT_CC * 100 ))
### TriPepSVM
pred = ifelse(c(RBP_TriPepSVM$V3,NRBP_TriPepSVM$V3) == pos, 1, -1)
label = c(rep(1,nrow(RBP_TriPepSVM)),rep(-1,nrow(NRBP_TriPepSVM)))
performance = evaluatePrediction(pred, label, allLabels = c(1,-1), print = F)
print(c(performance$SENS, performance$SPEC, performance$BAL_ACC, performance$MAT_CC))
RESULT = rbind(RESULT,c(performance$SENS, performance$SPEC, performance$BAL_ACC, performance$MAT_CC * 100 ))
colnames(RESULT)=c("Sensitivity","Specificity","BACC","MCC")
pdf(out)
barplot(as.matrix(RESULT),beside=T,font=3, ylim=c(0,100), main = taxon)
dev.off()
|
# take in before and char strings, if either is empty return empty string, else change before to chars
contamination_str <- function(before, char) {
# check if either string is empty
if ((nchar(before) == 0) | (nchar(char) == 0)) {
return('')
}
else {
# make list of char and collapse
return(paste(rep(char, nchar(before)), collapse = ''))
}
}
|
/src/contamination_str.R
|
no_license
|
nickbuker/CodewaRs
|
R
| false | false | 388 |
r
|
# take in before and char strings, if either is empty return empty string, else change before to chars
contamination_str <- function(before, char) {
# check if either string is empty
if ((nchar(before) == 0) | (nchar(char) == 0)) {
return('')
}
else {
# make list of char and collapse
return(paste(rep(char, nchar(before)), collapse = ''))
}
}
|
downloadURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
downloadFile <- "./Data/household_power_consumption.zip"
householdFile <- "./Data/household_power_consumption.txt"
if (!file.exists(householdFile)) {
download.file(downloadURL, downloadFile)
unzip(downloadFile, overwrite = T, exdir = "./Data")
}
plotData <- read.table(householdFile, header=T, sep=";", na.strings="?")
## set time variable
finalData <- plotData[plotData$Date %in% c("1/2/2007","2/2/2007"),]
SetTime <-strptime(paste(finalData$Date, finalData$Time, sep=" "),"%d/%m/%Y %H:%M:%S")
finalData <- cbind(SetTime, finalData)
#Plot 4
win.graph(200,200)
labels <- c("Sub_metering_1","Sub_metering_2","Sub_metering_3")
columnlines <- c("black","red","blue")
par(mfrow=c(2,2))
plot(finalData$SetTime, finalData$Global_active_power, type="l", col="green", xlab="", ylab="Global Active Power")
plot(finalData$SetTime, finalData$Voltage, type="l", col="orange", xlab="datetime", ylab="Voltage")
plot(finalData$SetTime, finalData$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(finalData$SetTime, finalData$Sub_metering_2, type="l", col="red")
lines(finalData$SetTime, finalData$Sub_metering_3, type="l", col="blue")
legend("topright", bty="n", legend=labels, lty=1, col=columnlines)
plot(finalData$SetTime, finalData$Global_reactive_power, type="l", col="blue", xlab="datetime", ylab="Global_reactive_power")
|
/Plot4.R
|
no_license
|
aeroUT/ExData_Plotting1
|
R
| false | false | 1,447 |
r
|
downloadURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
downloadFile <- "./Data/household_power_consumption.zip"
householdFile <- "./Data/household_power_consumption.txt"
if (!file.exists(householdFile)) {
download.file(downloadURL, downloadFile)
unzip(downloadFile, overwrite = T, exdir = "./Data")
}
plotData <- read.table(householdFile, header=T, sep=";", na.strings="?")
## set time variable
finalData <- plotData[plotData$Date %in% c("1/2/2007","2/2/2007"),]
SetTime <-strptime(paste(finalData$Date, finalData$Time, sep=" "),"%d/%m/%Y %H:%M:%S")
finalData <- cbind(SetTime, finalData)
#Plot 4
win.graph(200,200)
labels <- c("Sub_metering_1","Sub_metering_2","Sub_metering_3")
columnlines <- c("black","red","blue")
par(mfrow=c(2,2))
plot(finalData$SetTime, finalData$Global_active_power, type="l", col="green", xlab="", ylab="Global Active Power")
plot(finalData$SetTime, finalData$Voltage, type="l", col="orange", xlab="datetime", ylab="Voltage")
plot(finalData$SetTime, finalData$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(finalData$SetTime, finalData$Sub_metering_2, type="l", col="red")
lines(finalData$SetTime, finalData$Sub_metering_3, type="l", col="blue")
legend("topright", bty="n", legend=labels, lty=1, col=columnlines)
plot(finalData$SetTime, finalData$Global_reactive_power, type="l", col="blue", xlab="datetime", ylab="Global_reactive_power")
|
# sim list vax ----
sim_list_vax <- lst(
first_vax_type = bn_node(~rcat(n=..n, c("pfizer","az", ""), c(0.49,0.50, 0.01)), keep=FALSE),
covid_vax_pfizer_1_day = bn_node(
~as.integer(runif(n=..n, pfizerstart_day, pfizerstart_day+60)),
missing_rate = ~1-(first_vax_type=="pfizer")
),
covid_vax_pfizer_2_day = bn_node(
~as.integer(runif(n=..n, covid_vax_pfizer_1_day+30, covid_vax_pfizer_1_day+60)),
needs = c("covid_vax_pfizer_1_day"),
missing_rate = ~0.01
),
covid_vax_az_1_day = bn_node(
~as.integer(runif(n=..n, azstart_day, azstart_day+60)),
missing_rate = ~1-(first_vax_type=="az")
),
covid_vax_az_2_day = bn_node(
~as.integer(runif(n=..n, covid_vax_az_1_day+30, covid_vax_az_1_day+60)),
needs = c("covid_vax_az_1_day"),
missing_rate = ~0.01
),
)
# sim list jcvi ----
sim_list_jcvi <- lst(
bmi = bn_node(
~rfactor(n=..n, levels = c("Not obese", "Obese I (30-34.9)", "Obese II (35-39.9)", "Obese III (40+)"), p = c(0.5, 0.2, 0.2, 0.1)),
),
care_home_type = bn_node(
~rfactor(n=..n, levels=c("Carehome", "Nursinghome", "Mixed", ""), p = c(0.01, 0.01, 0.01, 0.97))
),
care_home_tpp = bn_node(
~care_home_type!=""
),
care_home_code = bn_node(
~rbernoulli(n=..n, p = 0.01)
),
asthma = bn_node( ~rbernoulli(n=..n, p = 0.02)),
chronic_neuro_disease = bn_node( ~rbernoulli(n=..n, p = 0.02)),
chronic_resp_disease = bn_node( ~rbernoulli(n=..n, p = 0.02)),
sev_obesity = bn_node( ~rbernoulli(n=..n, p = 0.02)),
diabetes = bn_node( ~rbernoulli(n=..n, p = 0.02)),
sev_mental = bn_node( ~rbernoulli(n=..n, p = 0.02)),
chronic_heart_disease = bn_node( ~rbernoulli(n=..n, p = 0.02)),
chronic_kidney_disease = bn_node( ~rbernoulli(n=..n, p = 0.02)),
chronic_liver_disease = bn_node( ~rbernoulli(n=..n, p = 0.02)),
immunosuppressed = bn_node( ~rbernoulli(n=..n, p = 0.02)),
learndis = bn_node( ~rbernoulli(n=..n, p = 0.02)),
cev_ever = bn_node( ~rbernoulli(n=..n, p = 0.02)),
cev = bn_node( ~rbernoulli(n=..n, p = 0.02)),
endoflife = bn_node( ~rbernoulli(n=..n, p = 0.001)),
housebound = bn_node( ~rbernoulli(n=..n, p = 0.001)),
)
sim_list_demographic <- lst(
has_follow_up_previous_year = bn_node(
~rbernoulli(n=..n, p=0.999)
),
hscworker = bn_node(
~rbernoulli(n=..n, p=0.01)
),
age = bn_node(
~as.integer(runif(n=..n, min=70, max=90))
),
age31aug2020 = bn_node(~age),
sex = bn_node(
~rfactor(n=..n, levels = c("F", "M"), p = c(0.51, 0.49)),
missing_rate = ~0.001 # this is shorthand for ~(rbernoulli(n=1, p = 0.2))
),
ethnicity = bn_node(
~rfactor(n=..n, levels = c(1,2,3,4,5), p = c(0.8, 0.05, 0.05, 0.05, 0.05)),
missing_rate = ~ 0.25
),
ethnicity_6_sus = bn_node(
~rfactor(n=..n, levels = c(0,1,2,3,4,5), p = c(0.1, 0.7, 0.05, 0.05, 0.05, 0.05)),
missing_rate = ~ 0
),
region = bn_node(
variable_formula = ~rfactor(n=..n, levels=c(
"North East",
"North West",
"Yorkshire and The Humber",
"East Midlands",
"West Midlands",
"East",
"London",
"South East",
"South West"
), p = c(0.2, 0.2, 0.3, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05))
),
imd = bn_node(
~factor(plyr::round_any(runif(n=..n, 1, 32000), 100), levels=seq(0,32000,100)),
missing_rate = ~0.02,
keep = FALSE
),
imd_integer = bn_node(
~as.integer(as.character(imd)),
keep=FALSE
),
imd_Q5 = bn_node(
~factor(
case_when(
(imd_integer >= 0) & (imd_integer < 32844*1/5) ~ "1 (most deprived)",
(imd_integer >= 32844*1/5) & (imd_integer < 32844*2/5) ~ "2",
(imd_integer >= 32844*2/5) & (imd_integer < 32844*3/5) ~ "3",
(imd_integer >= 32844*3/5) & (imd_integer < 32844*4/5) ~ "4",
(imd_integer >= 32844*4/5) & (imd_integer <= 32844*5/5) ~ "5 (least deprived)",
TRUE ~ "Unknown"
),
levels= c("1 (most deprived)", "2", "3", "4", "5 (least deprived)", "Unknown")
),
missing_rate = ~0
),
flu_vaccine = bn_node(
~rbernoulli(n=..n, p=0.5)
),
)
# sim list pre ----
sim_list_pre = lst(
# covid_test_0_day = bn_node(
# ~as.integer(runif(n=..n, index_day-100, index_day-1)),
# missing_rate = ~0.7
# ),
primary_care_covid_case_0_day = bn_node(
~as.integer(runif(n=..n, index_day-100, index_day-1)),
missing_rate = ~0.99
),
prior_covid_test_frequency = bn_node(
~as.integer(rpois(n=..n, lambda=3)),
missing_rate = ~0
),
positive_test_0_day = bn_node(
~as.integer(runif(n=..n, index_day-100, index_day-1)),
missing_rate = ~0.9
),
admitted_unplanned_0_day = bn_node(
~as.integer(runif(n=..n, index_day-100, index_day-1)),
missing_rate = ~0.9
),
discharged_unplanned_0_day = bn_node(
~as.integer(runif(n=..n, admitted_unplanned_0_day+1, admitted_unplanned_0_day+20)),
needs="admitted_unplanned_0_day"
),
admitted_planned_0_day = bn_node(
~as.integer(runif(n=..n, index_day-100, index_day-1)),
missing_rate = ~0.9
),
discharged_planned_0_day = bn_node(
~as.integer(runif(n=..n, admitted_planned_0_day+1, admitted_planned_0_day+20)),
needs="admitted_planned_0_day"
),
covidemergency_0_day = bn_node(
~as.integer(runif(n=..n, index_day-100, index_day-1)),
missing_rate = ~0.99
),
admitted_covid_0_day = bn_node(
~as.integer(runif(n=..n, index_day-100, index_day-1)),
missing_rate = ~0.99
),
)
# sim list post ----
sim_list_outcome = lst(
# ## post-baseline events (outcomes)
dereg_day = bn_node(
~ as.integer(runif(n = ..n, index_day, index_day + 120)),
missing_rate = ~0.99
),
# primary_care_covid_case_day = bn_node(
# ~ as.integer(runif(n = ..n, index_day, index_day + 100)),
# missing_rate = ~0.7
# ),
postest_day = bn_node(
~ as.integer(runif(n = ..n, index_day, index_day + 100)),
missing_rate = ~0.7
),
covidadmitted_day = bn_node(
~ as.integer(runif(n = ..n, index_day, index_day + 100)),
missing_rate = ~0.7
),
coviddeath_day = bn_node(
~death_day,
missing_rate = ~0.7,
needs = "death_day"
),
death_day = bn_node(
~ as.integer(runif(n = ..n, index_day, index_day + 100)),
missing_rate = ~0.90
),
)
|
/analysis/dummy/sim_lst.R
|
permissive
|
opensafely/covid-vaccine-effectiveness-sequential-vs-single
|
R
| false | false | 6,301 |
r
|
# sim list vax ----
sim_list_vax <- lst(
first_vax_type = bn_node(~rcat(n=..n, c("pfizer","az", ""), c(0.49,0.50, 0.01)), keep=FALSE),
covid_vax_pfizer_1_day = bn_node(
~as.integer(runif(n=..n, pfizerstart_day, pfizerstart_day+60)),
missing_rate = ~1-(first_vax_type=="pfizer")
),
covid_vax_pfizer_2_day = bn_node(
~as.integer(runif(n=..n, covid_vax_pfizer_1_day+30, covid_vax_pfizer_1_day+60)),
needs = c("covid_vax_pfizer_1_day"),
missing_rate = ~0.01
),
covid_vax_az_1_day = bn_node(
~as.integer(runif(n=..n, azstart_day, azstart_day+60)),
missing_rate = ~1-(first_vax_type=="az")
),
covid_vax_az_2_day = bn_node(
~as.integer(runif(n=..n, covid_vax_az_1_day+30, covid_vax_az_1_day+60)),
needs = c("covid_vax_az_1_day"),
missing_rate = ~0.01
),
)
# sim list jcvi ----
sim_list_jcvi <- lst(
bmi = bn_node(
~rfactor(n=..n, levels = c("Not obese", "Obese I (30-34.9)", "Obese II (35-39.9)", "Obese III (40+)"), p = c(0.5, 0.2, 0.2, 0.1)),
),
care_home_type = bn_node(
~rfactor(n=..n, levels=c("Carehome", "Nursinghome", "Mixed", ""), p = c(0.01, 0.01, 0.01, 0.97))
),
care_home_tpp = bn_node(
~care_home_type!=""
),
care_home_code = bn_node(
~rbernoulli(n=..n, p = 0.01)
),
asthma = bn_node( ~rbernoulli(n=..n, p = 0.02)),
chronic_neuro_disease = bn_node( ~rbernoulli(n=..n, p = 0.02)),
chronic_resp_disease = bn_node( ~rbernoulli(n=..n, p = 0.02)),
sev_obesity = bn_node( ~rbernoulli(n=..n, p = 0.02)),
diabetes = bn_node( ~rbernoulli(n=..n, p = 0.02)),
sev_mental = bn_node( ~rbernoulli(n=..n, p = 0.02)),
chronic_heart_disease = bn_node( ~rbernoulli(n=..n, p = 0.02)),
chronic_kidney_disease = bn_node( ~rbernoulli(n=..n, p = 0.02)),
chronic_liver_disease = bn_node( ~rbernoulli(n=..n, p = 0.02)),
immunosuppressed = bn_node( ~rbernoulli(n=..n, p = 0.02)),
learndis = bn_node( ~rbernoulli(n=..n, p = 0.02)),
cev_ever = bn_node( ~rbernoulli(n=..n, p = 0.02)),
cev = bn_node( ~rbernoulli(n=..n, p = 0.02)),
endoflife = bn_node( ~rbernoulli(n=..n, p = 0.001)),
housebound = bn_node( ~rbernoulli(n=..n, p = 0.001)),
)
sim_list_demographic <- lst(
has_follow_up_previous_year = bn_node(
~rbernoulli(n=..n, p=0.999)
),
hscworker = bn_node(
~rbernoulli(n=..n, p=0.01)
),
age = bn_node(
~as.integer(runif(n=..n, min=70, max=90))
),
age31aug2020 = bn_node(~age),
sex = bn_node(
~rfactor(n=..n, levels = c("F", "M"), p = c(0.51, 0.49)),
missing_rate = ~0.001 # this is shorthand for ~(rbernoulli(n=1, p = 0.2))
),
ethnicity = bn_node(
~rfactor(n=..n, levels = c(1,2,3,4,5), p = c(0.8, 0.05, 0.05, 0.05, 0.05)),
missing_rate = ~ 0.25
),
ethnicity_6_sus = bn_node(
~rfactor(n=..n, levels = c(0,1,2,3,4,5), p = c(0.1, 0.7, 0.05, 0.05, 0.05, 0.05)),
missing_rate = ~ 0
),
region = bn_node(
variable_formula = ~rfactor(n=..n, levels=c(
"North East",
"North West",
"Yorkshire and The Humber",
"East Midlands",
"West Midlands",
"East",
"London",
"South East",
"South West"
), p = c(0.2, 0.2, 0.3, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05))
),
imd = bn_node(
~factor(plyr::round_any(runif(n=..n, 1, 32000), 100), levels=seq(0,32000,100)),
missing_rate = ~0.02,
keep = FALSE
),
imd_integer = bn_node(
~as.integer(as.character(imd)),
keep=FALSE
),
imd_Q5 = bn_node(
~factor(
case_when(
(imd_integer >= 0) & (imd_integer < 32844*1/5) ~ "1 (most deprived)",
(imd_integer >= 32844*1/5) & (imd_integer < 32844*2/5) ~ "2",
(imd_integer >= 32844*2/5) & (imd_integer < 32844*3/5) ~ "3",
(imd_integer >= 32844*3/5) & (imd_integer < 32844*4/5) ~ "4",
(imd_integer >= 32844*4/5) & (imd_integer <= 32844*5/5) ~ "5 (least deprived)",
TRUE ~ "Unknown"
),
levels= c("1 (most deprived)", "2", "3", "4", "5 (least deprived)", "Unknown")
),
missing_rate = ~0
),
flu_vaccine = bn_node(
~rbernoulli(n=..n, p=0.5)
),
)
# sim list pre ----
sim_list_pre = lst(
# covid_test_0_day = bn_node(
# ~as.integer(runif(n=..n, index_day-100, index_day-1)),
# missing_rate = ~0.7
# ),
primary_care_covid_case_0_day = bn_node(
~as.integer(runif(n=..n, index_day-100, index_day-1)),
missing_rate = ~0.99
),
prior_covid_test_frequency = bn_node(
~as.integer(rpois(n=..n, lambda=3)),
missing_rate = ~0
),
positive_test_0_day = bn_node(
~as.integer(runif(n=..n, index_day-100, index_day-1)),
missing_rate = ~0.9
),
admitted_unplanned_0_day = bn_node(
~as.integer(runif(n=..n, index_day-100, index_day-1)),
missing_rate = ~0.9
),
discharged_unplanned_0_day = bn_node(
~as.integer(runif(n=..n, admitted_unplanned_0_day+1, admitted_unplanned_0_day+20)),
needs="admitted_unplanned_0_day"
),
admitted_planned_0_day = bn_node(
~as.integer(runif(n=..n, index_day-100, index_day-1)),
missing_rate = ~0.9
),
discharged_planned_0_day = bn_node(
~as.integer(runif(n=..n, admitted_planned_0_day+1, admitted_planned_0_day+20)),
needs="admitted_planned_0_day"
),
covidemergency_0_day = bn_node(
~as.integer(runif(n=..n, index_day-100, index_day-1)),
missing_rate = ~0.99
),
admitted_covid_0_day = bn_node(
~as.integer(runif(n=..n, index_day-100, index_day-1)),
missing_rate = ~0.99
),
)
# sim list post ----
sim_list_outcome = lst(
# ## post-baseline events (outcomes)
dereg_day = bn_node(
~ as.integer(runif(n = ..n, index_day, index_day + 120)),
missing_rate = ~0.99
),
# primary_care_covid_case_day = bn_node(
# ~ as.integer(runif(n = ..n, index_day, index_day + 100)),
# missing_rate = ~0.7
# ),
postest_day = bn_node(
~ as.integer(runif(n = ..n, index_day, index_day + 100)),
missing_rate = ~0.7
),
covidadmitted_day = bn_node(
~ as.integer(runif(n = ..n, index_day, index_day + 100)),
missing_rate = ~0.7
),
coviddeath_day = bn_node(
~death_day,
missing_rate = ~0.7,
needs = "death_day"
),
death_day = bn_node(
~ as.integer(runif(n = ..n, index_day, index_day + 100)),
missing_rate = ~0.90
),
)
|
#!/usr/bin/Rscript
# Plot 3 of Assignment: Course Project 1 of Exploratory Data Analysis#
# set working folder
pth <- "D:/Dropbox/E-books/_Coursera/4_Exploratory_Data_Analysis/Week_1/"
# load all the data
data_raw <- read.csv(paste0(pth, "/household_power_consumption.txt"), sep=";", stringsAsFactors=FALSE)
# subset data between 2007-02-01 and 2007-02-02
data_sub <- subset(data_raw, Date %in% c("1/2/2007", "2/2/2007"), drop = TRUE)
# create a new variable combining date and time
data_sub$time2 <- as.POSIXct(paste(data_sub$Date, data_sub$Time),
format="%d/%m/%Y %H:%M:%S")
# open png device
png(paste0(pth, "plot3.png"), width=480, height=480, units = "px")
# draw sub metering lines 1, 2, and 3
plot(data_sub$time2, as.numeric(data_sub$Sub_metering_1),
type="n",
xlab = "",
ylab = "Energy sub metering")
lines(data_sub$time2, as.numeric(data_sub$Sub_metering_1), col=1)
lines(data_sub$time2, as.numeric(data_sub$Sub_metering_2), col=2)
lines(data_sub$time2, as.numeric(data_sub$Sub_metering_3), col=4)
# add a legend
legend("topright",
lty = 1,
col = c(1, 2, 4),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
/plot3.R
|
no_license
|
hongtao510/ExData_Plotting1
|
R
| false | false | 1,248 |
r
|
#!/usr/bin/Rscript
# Plot 3 of Assignment: Course Project 1 of Exploratory Data Analysis#
# set working folder
pth <- "D:/Dropbox/E-books/_Coursera/4_Exploratory_Data_Analysis/Week_1/"
# load all the data
data_raw <- read.csv(paste0(pth, "/household_power_consumption.txt"), sep=";", stringsAsFactors=FALSE)
# subset data between 2007-02-01 and 2007-02-02
data_sub <- subset(data_raw, Date %in% c("1/2/2007", "2/2/2007"), drop = TRUE)
# create a new variable combining date and time
data_sub$time2 <- as.POSIXct(paste(data_sub$Date, data_sub$Time),
format="%d/%m/%Y %H:%M:%S")
# open png device
png(paste0(pth, "plot3.png"), width=480, height=480, units = "px")
# draw sub metering lines 1, 2, and 3
plot(data_sub$time2, as.numeric(data_sub$Sub_metering_1),
type="n",
xlab = "",
ylab = "Energy sub metering")
lines(data_sub$time2, as.numeric(data_sub$Sub_metering_1), col=1)
lines(data_sub$time2, as.numeric(data_sub$Sub_metering_2), col=2)
lines(data_sub$time2, as.numeric(data_sub$Sub_metering_3), col=4)
# add a legend
legend("topright",
lty = 1,
col = c(1, 2, 4),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
## stienen.R
##
## Stienen diagram with border correction
##
## $Revision: 1.5 $ $Date: 2014/09/22 11:15:42 $
stienen <- function(X, ..., bg="grey", border=list(bg=NULL)) {
Xname <- short.deparse(substitute(X))
stopifnot(is.ppp(X))
if(npoints(X) <= 1) {
do.call(plot,
resolve.defaults(list(x=Window(X)),
list(...),
list(main=Xname)))
return(invisible(NULL))
}
d <- nndist(X)
b <- bdist.points(X)
Y <- X %mark% d
gp <- graphicsPars("symbols")
do.call.plotfun(plot.ppp,
resolve.defaults(list(x=Y[b >= d],
markscale=1),
list(...),
list(bg=bg),
list(main=Xname)),
extrargs=gp)
do.call.plotfun(plot.ppp,
resolve.defaults(list(x=Y[b < d],
markscale=1,
add=TRUE),
border,
list(...),
list(bg=bg),
list(cols="grey", lwd=2)),
extrargs=gp)
return(invisible(NULL))
}
stienenSet <- function(X, edge=TRUE) {
stopifnot(is.ppp(X))
nnd <- nndist(X)
if(!edge) {
ok <- bdist.points(X) >= nnd
X <- X[ok]
nnd <- nnd[ok]
}
n <- npoints(X)
if(n == 0) return(emptywindow(Window(X)))
if(n == 1) return(Window(X))
d <- nnd/2
delta <- 2 * pi * max(d)/128
Z <- disc(d[1], X[1], delta=delta)
for(i in 2:n) Z <- union.owin(Z, disc(d[i], X[i], delta=delta))
return(Z)
}
|
/R/stienen.R
|
no_license
|
antiphon/spatstat
|
R
| false | false | 1,716 |
r
|
## stienen.R
##
## Stienen diagram with border correction
##
## $Revision: 1.5 $ $Date: 2014/09/22 11:15:42 $
stienen <- function(X, ..., bg="grey", border=list(bg=NULL)) {
Xname <- short.deparse(substitute(X))
stopifnot(is.ppp(X))
if(npoints(X) <= 1) {
do.call(plot,
resolve.defaults(list(x=Window(X)),
list(...),
list(main=Xname)))
return(invisible(NULL))
}
d <- nndist(X)
b <- bdist.points(X)
Y <- X %mark% d
gp <- graphicsPars("symbols")
do.call.plotfun(plot.ppp,
resolve.defaults(list(x=Y[b >= d],
markscale=1),
list(...),
list(bg=bg),
list(main=Xname)),
extrargs=gp)
do.call.plotfun(plot.ppp,
resolve.defaults(list(x=Y[b < d],
markscale=1,
add=TRUE),
border,
list(...),
list(bg=bg),
list(cols="grey", lwd=2)),
extrargs=gp)
return(invisible(NULL))
}
stienenSet <- function(X, edge=TRUE) {
stopifnot(is.ppp(X))
nnd <- nndist(X)
if(!edge) {
ok <- bdist.points(X) >= nnd
X <- X[ok]
nnd <- nnd[ok]
}
n <- npoints(X)
if(n == 0) return(emptywindow(Window(X)))
if(n == 1) return(Window(X))
d <- nnd/2
delta <- 2 * pi * max(d)/128
Z <- disc(d[1], X[1], delta=delta)
for(i in 2:n) Z <- union.owin(Z, disc(d[i], X[i], delta=delta))
return(Z)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rmf-create-mlt.R
\name{rmf_create_mlt}
\alias{rmf_create_mlt}
\title{Create an \code{RMODFLOW} mlt object}
\usage{
rmf_create_mlt(nml = 1, mltnam = "MULT", functn = NULL, rmlt = array(1,
dim = c(10, 10, 1)), operators = NULL, iprn = NULL)
}
\arguments{
\item{nml}{number of multiplier arrays to be defined; defaults to 1}
\item{mltnam}{character vector of length \code{nml} specifying the names of the multiplier arrays; defaults to 'MULT'}
\item{functn}{optional logical vector of length \code{nml} indicating if the multiplier array will be constructed from other multiplier arrays previously defined; defaults to NULL}
\item{rmlt}{numeric 3D array of dimensions \code{dis$nrow x dis$ncol x nml} specifying the mutliplier arrays; defaults to 1 for all cells}
\item{operators}{list with \code{nml} elements where each element is a character vector with the correct function which will be printed for that multiplier array. If no function is to be specifyied for an array, set to NULL; defaults to NULL}
\item{iprn}{numeric vector of length \code{nml} indicating the printing format and whether the multiplier array constructed in data set 4 will be printed to the listing file; defaults to NULL}
}
\value{
an \code{RMODFLOW} mlt object
}
\description{
\code{rmf_create_mlt} creates an \code{RMODFLOW} mlt object
}
\seealso{
\code{\link{rmf_read_mlt}}, \code{\link{rmf_write_mlt}}, \url{https://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/index.html?mult.htm}
}
|
/man/rmf_create_mlt.Rd
|
no_license
|
matejgedeon/RMODFLOW
|
R
| false | true | 1,549 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rmf-create-mlt.R
\name{rmf_create_mlt}
\alias{rmf_create_mlt}
\title{Create an \code{RMODFLOW} mlt object}
\usage{
rmf_create_mlt(nml = 1, mltnam = "MULT", functn = NULL, rmlt = array(1,
dim = c(10, 10, 1)), operators = NULL, iprn = NULL)
}
\arguments{
\item{nml}{number of multiplier arrays to be defined; defaults to 1}
\item{mltnam}{character vector of length \code{nml} specifying the names of the multiplier arrays; defaults to 'MULT'}
\item{functn}{optional logical vector of length \code{nml} indicating if the multiplier array will be constructed from other multiplier arrays previously defined; defaults to NULL}
\item{rmlt}{numeric 3D array of dimensions \code{dis$nrow x dis$ncol x nml} specifying the mutliplier arrays; defaults to 1 for all cells}
\item{operators}{list with \code{nml} elements where each element is a character vector with the correct function which will be printed for that multiplier array. If no function is to be specifyied for an array, set to NULL; defaults to NULL}
\item{iprn}{numeric vector of length \code{nml} indicating the printing format and whether the multiplier array constructed in data set 4 will be printed to the listing file; defaults to NULL}
}
\value{
an \code{RMODFLOW} mlt object
}
\description{
\code{rmf_create_mlt} creates an \code{RMODFLOW} mlt object
}
\seealso{
\code{\link{rmf_read_mlt}}, \code{\link{rmf_write_mlt}}, \url{https://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/index.html?mult.htm}
}
|
# Logistic Regression
# Importing the dataset
dataset = read.csv('Social_Network_Ads.csv')
dataset = dataset[, 3:5]
# Splitting the data into Training set and Test set
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature Scaling
training_set[, 1:2] = scale(training_set[, 1:2])
test_set[, 1:2] = scale(test_set[, 1:2])
# Fitting Logistic Regression to the Training set
classifier = glm(formula = Purchased ~ .,
family = binomial,
data = training_set)
# Predicting a new Test set result
prob_pred = predict(classifier, type = 'response', newdata = test_set[-3])
y_pred = ifelse(prob_pred > 0.5, 1, 0)
# Making the confusion matrix
cm = table(test_set[, 3], y_pred)
# Visualizing the Training set results
install.packages('ElemStatLearn')
library(ElemStatLearn)
set = training_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
prob_set = predict(classifier, type = 'response', newdata = grid_set)
y_grid = ifelse(prob_set > 0.5, 1, 0)
plot(set[, -3],
main = 'Logistic Regression (Training Set)',
xlab = 'Age', ylab = 'EstimatedSalary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
# Visualizing the Test set results
install.packages('ElemStatLearn')
library(ElemStatLearn)
set = test_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
prob_set = predict(classifier, type = 'response', newdata = grid_set)
y_grid = ifelse(prob_set > 0.5, 1, 0)
plot(set[, -3],
main = 'Logistic Regression (Test Set)',
xlab = 'Age', ylab = 'EstimatedSalary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
|
/part_3-classification/logistic-regression/logistic_regression.r
|
no_license
|
ssvnikhil/Machine-Learning
|
R
| false | false | 2,451 |
r
|
# Logistic Regression
# Importing the dataset
dataset = read.csv('Social_Network_Ads.csv')
dataset = dataset[, 3:5]
# Splitting the data into Training set and Test set
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature Scaling
training_set[, 1:2] = scale(training_set[, 1:2])
test_set[, 1:2] = scale(test_set[, 1:2])
# Fitting Logistic Regression to the Training set
classifier = glm(formula = Purchased ~ .,
family = binomial,
data = training_set)
# Predicting a new Test set result
prob_pred = predict(classifier, type = 'response', newdata = test_set[-3])
y_pred = ifelse(prob_pred > 0.5, 1, 0)
# Making the confusion matrix
cm = table(test_set[, 3], y_pred)
# Visualizing the Training set results
install.packages('ElemStatLearn')
library(ElemStatLearn)
set = training_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
prob_set = predict(classifier, type = 'response', newdata = grid_set)
y_grid = ifelse(prob_set > 0.5, 1, 0)
plot(set[, -3],
main = 'Logistic Regression (Training Set)',
xlab = 'Age', ylab = 'EstimatedSalary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
# Visualizing the Test set results
install.packages('ElemStatLearn')
library(ElemStatLearn)
set = test_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
prob_set = predict(classifier, type = 'response', newdata = grid_set)
y_grid = ifelse(prob_set > 0.5, 1, 0)
plot(set[, -3],
main = 'Logistic Regression (Test Set)',
xlab = 'Age', ylab = 'EstimatedSalary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
|
timestamp <- Sys.time()
library(caret)
library(plyr)
library(recipes)
library(dplyr)
model <- "sda"
#########################################################################
set.seed(2)
training <- twoClassSim(50, linearVars = 2)
testing <- twoClassSim(500, linearVars = 2)
trainX <- training[, -ncol(training)]
trainY <- training$Class
rec_cls <- recipe(Class ~ ., data = training) %>%
step_center(all_predictors()) %>%
step_scale(all_predictors())
cctrl1 <- trainControl(method = "cv", number = 3, returnResamp = "all",
classProbs = TRUE,
summaryFunction = twoClassSummary)
cctrl2 <- trainControl(method = "LOOCV",
classProbs = TRUE, summaryFunction = twoClassSummary)
cctrl3 <- trainControl(method = "none",
classProbs = TRUE, summaryFunction = twoClassSummary)
cctrlR <- trainControl(method = "cv", number = 3, returnResamp = "all", search = "random")
set.seed(849)
test_class_cv_model <- train(trainX, trainY,
method = "sda",
trControl = cctrl1,
metric = "ROC",
preProc = c("center", "scale"),
verbose = FALSE)
set.seed(849)
test_class_cv_form <- train(Class ~ ., data = training,
method = "sda",
trControl = cctrl1,
metric = "ROC",
preProc = c("center", "scale"),
verbose = FALSE)
test_class_pred <- predict(test_class_cv_model, testing[, -ncol(testing)])
test_class_prob <- predict(test_class_cv_model, testing[, -ncol(testing)], type = "prob")
test_class_pred_form <- predict(test_class_cv_form, testing[, -ncol(testing)])
test_class_prob_form <- predict(test_class_cv_form, testing[, -ncol(testing)], type = "prob")
set.seed(849)
test_class_rand <- train(trainX, trainY,
method = "sda",
trControl = cctrlR,
tuneLength = 4,
verbose = FALSE)
set.seed(849)
test_class_loo_model <- train(trainX, trainY,
method = "sda",
trControl = cctrl2,
metric = "ROC",
preProc = c("center", "scale"),
verbose = FALSE)
set.seed(849)
test_class_none_model <- train(trainX, trainY,
method = "sda",
trControl = cctrl3,
tuneGrid = test_class_cv_model$bestTune,
metric = "ROC",
preProc = c("center", "scale"))
test_class_none_pred <- predict(test_class_none_model, testing[, -ncol(testing)])
test_class_none_prob <- predict(test_class_none_model, testing[, -ncol(testing)], type = "prob")
set.seed(849)
test_class_rec <- train(x = rec_cls,
data = training,
method = "sda",
trControl = cctrl1,
metric = "ROC",
verbose = FALSE)
if(
!isTRUE(
all.equal(test_class_cv_model$results,
test_class_rec$results))
)
stop("CV weights not giving the same results")
test_class_imp_rec <- varImp(test_class_rec)
test_class_pred_rec <- predict(test_class_rec, testing[, -ncol(testing)])
test_class_prob_rec <- predict(test_class_rec, testing[, -ncol(testing)],
type = "prob")
test_levels <- levels(test_class_cv_model)
if(!all(levels(trainY) %in% test_levels))
cat("wrong levels")
#########################################################################
test_class_predictors1 <- predictors(test_class_cv_model)
#########################################################################
tests <- grep("test_", ls(), fixed = TRUE, value = TRUE)
sInfo <- sessionInfo()
timestamp_end <- Sys.time()
save(list = c(tests, "sInfo", "timestamp", "timestamp_end"),
file = file.path(getwd(), paste(model, ".RData", sep = "")))
if(!interactive())
q("no")
|
/RegressionTests/Code/sda.R
|
no_license
|
topepo/caret
|
R
| false | false | 4,231 |
r
|
timestamp <- Sys.time()
library(caret)
library(plyr)
library(recipes)
library(dplyr)
model <- "sda"
#########################################################################
set.seed(2)
training <- twoClassSim(50, linearVars = 2)
testing <- twoClassSim(500, linearVars = 2)
trainX <- training[, -ncol(training)]
trainY <- training$Class
rec_cls <- recipe(Class ~ ., data = training) %>%
step_center(all_predictors()) %>%
step_scale(all_predictors())
cctrl1 <- trainControl(method = "cv", number = 3, returnResamp = "all",
classProbs = TRUE,
summaryFunction = twoClassSummary)
cctrl2 <- trainControl(method = "LOOCV",
classProbs = TRUE, summaryFunction = twoClassSummary)
cctrl3 <- trainControl(method = "none",
classProbs = TRUE, summaryFunction = twoClassSummary)
cctrlR <- trainControl(method = "cv", number = 3, returnResamp = "all", search = "random")
set.seed(849)
test_class_cv_model <- train(trainX, trainY,
method = "sda",
trControl = cctrl1,
metric = "ROC",
preProc = c("center", "scale"),
verbose = FALSE)
set.seed(849)
test_class_cv_form <- train(Class ~ ., data = training,
method = "sda",
trControl = cctrl1,
metric = "ROC",
preProc = c("center", "scale"),
verbose = FALSE)
test_class_pred <- predict(test_class_cv_model, testing[, -ncol(testing)])
test_class_prob <- predict(test_class_cv_model, testing[, -ncol(testing)], type = "prob")
test_class_pred_form <- predict(test_class_cv_form, testing[, -ncol(testing)])
test_class_prob_form <- predict(test_class_cv_form, testing[, -ncol(testing)], type = "prob")
set.seed(849)
test_class_rand <- train(trainX, trainY,
method = "sda",
trControl = cctrlR,
tuneLength = 4,
verbose = FALSE)
set.seed(849)
test_class_loo_model <- train(trainX, trainY,
method = "sda",
trControl = cctrl2,
metric = "ROC",
preProc = c("center", "scale"),
verbose = FALSE)
set.seed(849)
test_class_none_model <- train(trainX, trainY,
method = "sda",
trControl = cctrl3,
tuneGrid = test_class_cv_model$bestTune,
metric = "ROC",
preProc = c("center", "scale"))
test_class_none_pred <- predict(test_class_none_model, testing[, -ncol(testing)])
test_class_none_prob <- predict(test_class_none_model, testing[, -ncol(testing)], type = "prob")
set.seed(849)
test_class_rec <- train(x = rec_cls,
data = training,
method = "sda",
trControl = cctrl1,
metric = "ROC",
verbose = FALSE)
if(
!isTRUE(
all.equal(test_class_cv_model$results,
test_class_rec$results))
)
stop("CV weights not giving the same results")
test_class_imp_rec <- varImp(test_class_rec)
test_class_pred_rec <- predict(test_class_rec, testing[, -ncol(testing)])
test_class_prob_rec <- predict(test_class_rec, testing[, -ncol(testing)],
type = "prob")
test_levels <- levels(test_class_cv_model)
if(!all(levels(trainY) %in% test_levels))
cat("wrong levels")
#########################################################################
test_class_predictors1 <- predictors(test_class_cv_model)
#########################################################################
tests <- grep("test_", ls(), fixed = TRUE, value = TRUE)
sInfo <- sessionInfo()
timestamp_end <- Sys.time()
save(list = c(tests, "sInfo", "timestamp", "timestamp_end"),
file = file.path(getwd(), paste(model, ".RData", sep = "")))
if(!interactive())
q("no")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fromto.R
\name{from_to_stats}
\alias{from_to_stats}
\title{From To function}
\usage{
from_to_stats(x, y, orig)
}
\arguments{
\item{x}{igraph object to query}
\item{y}{origin airport IATA code}
\item{orig}{"from" or "to" options}
}
\description{
Calculate edges weight from IATA Code
}
\examples{
\dontrun{
netDir <- make.netDir(OD_Sample)
from_to_stats(netDir$gDir, "JFK", orig = "from")
from_to_stats(netDir$gDir, "JFK", orig = "to")
}
}
|
/man/from_to_stats.Rd
|
no_license
|
ropensci/skynet
|
R
| false | true | 521 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fromto.R
\name{from_to_stats}
\alias{from_to_stats}
\title{From To function}
\usage{
from_to_stats(x, y, orig)
}
\arguments{
\item{x}{igraph object to query}
\item{y}{origin airport IATA code}
\item{orig}{"from" or "to" options}
}
\description{
Calculate edges weight from IATA Code
}
\examples{
\dontrun{
netDir <- make.netDir(OD_Sample)
from_to_stats(netDir$gDir, "JFK", orig = "from")
from_to_stats(netDir$gDir, "JFK", orig = "to")
}
}
|
## note that lag_one/lead_one pad the new entry with the first/last value,
## which is different than lag_n/lead_n(,1)
## this gives flexibility with differences, but be careful!
lag_one <- function(vec) {
return(c(vec[1],vec[-length(vec)]))
}
lead_one <- function(vec) {
return(c(vec[-1],vec[length(vec)]))
}
lag_n <- function(vec,n) {
if (n < length(vec)) {
return(c(rep(NA,n),vec[1:(length(vec)-n)]))
}
else {
return(vec<-NA)
}
}
lead_n <- function(vec,n) {
if (n < length(vec)) {
return(c(vec[-n:-1],rep(NA,n)))
}
else {
return(vec<-NA)
}
}
dateTimeStr <- function(intDate,intTime) {
return(paste0(stringr::str_pad(intDate,8,pad="0"),
stringr::str_pad(intTime,6,pad="0")))
}
# this was lifted from stack overflow - credit author
find_peaks <- function (x, m = 3){
shape <- diff(sign(diff(x, na.pad = FALSE)))
pks <- sapply(which(shape < 0), FUN = function(i){
z <- i - m + 1
z <- ifelse(z > 0, z, 1)
w <- i + m + 1
w <- ifelse(w < length(x), w, length(x))
if(all(x[c(z:i,(i+2):w)] <= x[i+1])) return(i+1) else return(numeric(0))
})
pks <- unlist(pks)
pks
}
|
/R/gps_utilities.R
|
no_license
|
CraigMohn/bikeCadHr
|
R
| false | false | 1,157 |
r
|
## note that lag_one/lead_one pad the new entry with the first/last value,
## which is different than lag_n/lead_n(,1)
## this gives flexibility with differences, but be careful!
lag_one <- function(vec) {
return(c(vec[1],vec[-length(vec)]))
}
lead_one <- function(vec) {
return(c(vec[-1],vec[length(vec)]))
}
lag_n <- function(vec,n) {
if (n < length(vec)) {
return(c(rep(NA,n),vec[1:(length(vec)-n)]))
}
else {
return(vec<-NA)
}
}
lead_n <- function(vec,n) {
if (n < length(vec)) {
return(c(vec[-n:-1],rep(NA,n)))
}
else {
return(vec<-NA)
}
}
dateTimeStr <- function(intDate,intTime) {
return(paste0(stringr::str_pad(intDate,8,pad="0"),
stringr::str_pad(intTime,6,pad="0")))
}
# this was lifted from stack overflow - credit author
find_peaks <- function (x, m = 3){
shape <- diff(sign(diff(x, na.pad = FALSE)))
pks <- sapply(which(shape < 0), FUN = function(i){
z <- i - m + 1
z <- ifelse(z > 0, z, 1)
w <- i + m + 1
w <- ifelse(w < length(x), w, length(x))
if(all(x[c(z:i,(i+2):w)] <= x[i+1])) return(i+1) else return(numeric(0))
})
pks <- unlist(pks)
pks
}
|
fileName <- "household_power_consumption.txt"
powerData <- read.table(fileName, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
twoMonthData<-subset(powerData,Date %in% c("1/2/2007","2/2/2007"))
datetime <- strptime(paste(twoMonthData$Date, twoMonthData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
png("plot2.png",width=480,height=480)
plot(datetime, as.numeric(twoMonthData$Global_active_power), type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
/plot2.R
|
no_license
|
SforSachin/ExData_Plotting1
|
R
| false | false | 473 |
r
|
fileName <- "household_power_consumption.txt"
powerData <- read.table(fileName, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
twoMonthData<-subset(powerData,Date %in% c("1/2/2007","2/2/2007"))
datetime <- strptime(paste(twoMonthData$Date, twoMonthData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
png("plot2.png",width=480,height=480)
plot(datetime, as.numeric(twoMonthData$Global_active_power), type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
#' Search for and collect taxonomic name data from the USGS Bison API
#' using solr
#'
#' @export
#' @param query Name to search for. Required.
#' @param method The field to query by. See description below for details.
#' @param exact Exact matching or not. See examples. Defaults to FALSE.
#' @param parsed If `TRUE` (default) creates data.frame of names data output.
#' Otherwise, a list.
#' @param callopts Further args passed on to [crul::HttpClient()] for HTTP
#' debugging/inspecting. In `bison`, `bison_providers`, and `bison_stats`,
#' `...` is used instead of callopts, but `...` is used here to pass additional
#' Solr params.
#' @param ... Further solr arguments passed in to the query. See examples below.
#' @return A list.
#' @description
#' See the SOLR documentation here <http://lucene.apache.org/solr/> for other
#' parameters you can use.
#'
#' The following two methods are possible, as far as I know you can only use
#' one at a time:
#' \itemize{
#' \item vernacularName The species specific common names that is searchable
#' in a case insensitive way.
#' \item scientificName The species scientific name that is associated
#' with a common name that is searchable in a case insensitive way.
#' }
#' @seealso [bison_solr()], [bison()]
#' @examples \dontrun{
#' # All taxa
#' bison_tax("*:*")
#'
#' # Some example calls
#' bison_tax(query="*bear")
#' bison_tax(query="Helianthus", method="scientificName")
#'
#' # Exact argument, here nothing found with latter call as '*bear'
#' # doesn't exist, which makes sense
#' bison_tax(query="*bear", exact=FALSE)
#' bison_tax(query="*bear", exact=TRUE)
#'
#' # Using solr arguments (not all Solr arguments work)
#' ## Return a certain number of rows
#' bison_tax(query="*bear", method="vernacularName", rows=3)
#' ## Return certain fields
#' bison_tax(query="*bear", method="vernacularName", fl='vernacularName')
#'
#' # Curl options
#' bison_tax(query='*dolphin', callopts=list(verbose = TRUE))
#' }
bison_tax <- function(query, method='vernacularName', exact=FALSE, parsed=TRUE,
callopts=list(), ...)
{
method <- match.arg(method, choices = c('vernacularName','scientificName'))
if (!length(method) == 1) {
stop("method can only be of length 1")
}
url <- sprintf('%s/solr/%s/select', bison_base(), method)
if (exact) {
qu_ <- paste0('"', query, '"')
} else {
qu_ <- query
}
args <- bs_compact(list(q = qu_, wt = "json", ...))
cli <- crul::HttpClient$new(url = url, opts = c(followlocation = 1, callopts))
out <- cli$get(query = args)
out$raise_for_status()
out <- jsonlite::fromJSON(out$parse("UTF-8"), FALSE)
# tt <- GET(url, query = args, c(config(followlocation = 1), callopts))
# stop_for_status(tt)
# out <- content(tt)
temp <- list(
numFound = out$response$numFound,
names = out$response$docs,
highlight = out$highlighting,
facets = out$facet_counts
)
if (parsed) {
data <- dplyr::bind_rows(lapply(out$response$docs, data.frame,
stringsAsFactors = FALSE))
data$X_version_ <- NULL
temp$names <- data
}
return( temp )
}
|
/R/bison_tax.r
|
permissive
|
esellers-usgs/rbison
|
R
| false | false | 3,122 |
r
|
#' Search for and collect taxonomic name data from the USGS Bison API
#' using solr
#'
#' @export
#' @param query Name to search for. Required.
#' @param method The field to query by. See description below for details.
#' @param exact Exact matching or not. See examples. Defaults to FALSE.
#' @param parsed If `TRUE` (default) creates data.frame of names data output.
#' Otherwise, a list.
#' @param callopts Further args passed on to [crul::HttpClient()] for HTTP
#' debugging/inspecting. In `bison`, `bison_providers`, and `bison_stats`,
#' `...` is used instead of callopts, but `...` is used here to pass additional
#' Solr params.
#' @param ... Further solr arguments passed in to the query. See examples below.
#' @return A list.
#' @description
#' See the SOLR documentation here <http://lucene.apache.org/solr/> for other
#' parameters you can use.
#'
#' The following two methods are possible, as far as I know you can only use
#' one at a time:
#' \itemize{
#' \item vernacularName The species specific common names that is searchable
#' in a case insensitive way.
#' \item scientificName The species scientific name that is associated
#' with a common name that is searchable in a case insensitive way.
#' }
#' @seealso [bison_solr()], [bison()]
#' @examples \dontrun{
#' # All taxa
#' bison_tax("*:*")
#'
#' # Some example calls
#' bison_tax(query="*bear")
#' bison_tax(query="Helianthus", method="scientificName")
#'
#' # Exact argument, here nothing found with latter call as '*bear'
#' # doesn't exist, which makes sense
#' bison_tax(query="*bear", exact=FALSE)
#' bison_tax(query="*bear", exact=TRUE)
#'
#' # Using solr arguments (not all Solr arguments work)
#' ## Return a certain number of rows
#' bison_tax(query="*bear", method="vernacularName", rows=3)
#' ## Return certain fields
#' bison_tax(query="*bear", method="vernacularName", fl='vernacularName')
#'
#' # Curl options
#' bison_tax(query='*dolphin', callopts=list(verbose = TRUE))
#' }
bison_tax <- function(query, method='vernacularName', exact=FALSE, parsed=TRUE,
callopts=list(), ...)
{
method <- match.arg(method, choices = c('vernacularName','scientificName'))
if (!length(method) == 1) {
stop("method can only be of length 1")
}
url <- sprintf('%s/solr/%s/select', bison_base(), method)
if (exact) {
qu_ <- paste0('"', query, '"')
} else {
qu_ <- query
}
args <- bs_compact(list(q = qu_, wt = "json", ...))
cli <- crul::HttpClient$new(url = url, opts = c(followlocation = 1, callopts))
out <- cli$get(query = args)
out$raise_for_status()
out <- jsonlite::fromJSON(out$parse("UTF-8"), FALSE)
# tt <- GET(url, query = args, c(config(followlocation = 1), callopts))
# stop_for_status(tt)
# out <- content(tt)
temp <- list(
numFound = out$response$numFound,
names = out$response$docs,
highlight = out$highlighting,
facets = out$facet_counts
)
if (parsed) {
data <- dplyr::bind_rows(lapply(out$response$docs, data.frame,
stringsAsFactors = FALSE))
data$X_version_ <- NULL
temp$names <- data
}
return( temp )
}
|
#R version 3.4.4
print("Hello, world!")
library(MASS)
attach(Boston)
head(Boston)
fit = lm(nox ~ poly(dis, 3), data = Boston)
#summary(fit)
print('#8')
sum(resid(fit)^2)
#print(poly(dis, 3))
print('#9')
predict(fit,newdata=list(dis=6))
print('#10')
summary(fit)$coef
print('#11')
fit = lm(nox ~ poly(dis, 4), data = Boston)
sum(resid(fit)^2)
print('#12')
predict(fit,newdata=list(dis=6))
print('#13')
summary(fit)$coef
|
/quiz5.R
|
no_license
|
nathanfitz-coder/practicallearning
|
R
| false | false | 437 |
r
|
#R version 3.4.4
print("Hello, world!")
library(MASS)
attach(Boston)
head(Boston)
fit = lm(nox ~ poly(dis, 3), data = Boston)
#summary(fit)
print('#8')
sum(resid(fit)^2)
#print(poly(dis, 3))
print('#9')
predict(fit,newdata=list(dis=6))
print('#10')
summary(fit)$coef
print('#11')
fit = lm(nox ~ poly(dis, 4), data = Boston)
sum(resid(fit)^2)
print('#12')
predict(fit,newdata=list(dis=6))
print('#13')
summary(fit)$coef
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper-functions.R
\name{naiveperiod}
\alias{naiveperiod}
\title{naiveperiod}
\usage{
naiveperiod(d)
}
\arguments{
\item{d}{The data to search period}
}
\value{
A list with the average period and amplitude
}
\description{
A naive approach for finding the period in a series
of data points
}
|
/man/naiveperiod.Rd
|
permissive
|
antonio-pgarcia/evoper
|
R
| false | true | 369 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper-functions.R
\name{naiveperiod}
\alias{naiveperiod}
\title{naiveperiod}
\usage{
naiveperiod(d)
}
\arguments{
\item{d}{The data to search period}
}
\value{
A list with the average period and amplitude
}
\description{
A naive approach for finding the period in a series
of data points
}
|
# scaling1.r
d0=read.table("P1-4.txt")
names(d0)=c("sales","profit","assets")
# only sales and profit
d1=d0[,c(1,2)]
# no centering, no scaling
#===================================================================
pr1=prcomp(d1,center=F,scale=F)
names(pr1)
# [1] "sdev" "rotation" "center" "scale" "x"
rot = pr1$rotation
as.matrix(d1)%*%rot
# PC1 PC2
# [1,] -109.31806 -8.0512140
# [2,] -153.20956 -3.9531452
# [3,] -95.61630 -3.0253929
# [4,] -66.39404 -8.6875789
# [5,] -63.54104 -4.2881256
# [6,] -265.18004 -3.4461289
# [7,] -265.81530 3.4197684
# [8,] -285.38543 7.8608183
# [9,] -92.36463 -0.4751719
# [10,] -166.03325 2.5880293
d1x = pr1$x
d1x
# PC1 PC2
# [1,] -109.31806 -8.0512140
# [2,] -153.20956 -3.9531452
# [3,] -95.61630 -3.0253929
# [4,] -66.39404 -8.6875789
# [5,] -63.54104 -4.2881256
# [6,] -265.18004 -3.4461289
# [7,] -265.81530 3.4197684
# [8,] -285.38543 7.8608183
# [9,] -92.36463 -0.4751719
# [10,] -166.03325 2.5880293
# covariance matrix
var(d1)
sales profit
sales 7476.4532 303.61862
profit 303.6186 26.19032
var(d1x)
PC1 PC2
PC1 7475.6278 -313.58116
PC2 -313.5812 27.01573
# correlation
cor(d1x)
# PC1 PC2
#PC1 1.0000000 -0.6977788
#PC2 -0.6977788 1.0000000
# still correlated
# centering (default), prcomp() agrees with eigen()
#===================================================================
pr2=prcomp(d1)
pr2$sdev^2 # [1] 7488.80605 13.83751
eigen(var(d1))
# $values
# [1] 7488.80605 13.83751
# $vectors
# [,1] [,2]
# [1,] -0.99917338 0.04065165
# [2,] -0.04065165 -0.99917338
d2x = pr2$x
var(d2x)
# PC1 PC2
# PC1 7.488806e+03 -3.446818e-14
# PC2 -3.446818e-14 1.383751e+01
apply(d2x,2,var)
PC1 PC2
7488.80605 13.83751
# PCs uncorrelated
# but sum of eigenvals do not add up to p=2
# centering and scaling
#=================================================================
pr3=prcomp(d1,scale=T)
d3x = pr3$x
var(d3x)
# PC1 PC2
# PC1 1.686136e+00 7.378598e-19
# PC2 7.378598e-19 3.138640e-01
sum(diag(var(d3x))) # [1] 2
|
/8Principal components/scaling1.r
|
no_license
|
JoeyZhao7/Statistical-Learning-with-R
|
R
| false | false | 2,217 |
r
|
# scaling1.r
d0=read.table("P1-4.txt")
names(d0)=c("sales","profit","assets")
# only sales and profit
d1=d0[,c(1,2)]
# no centering, no scaling
#===================================================================
pr1=prcomp(d1,center=F,scale=F)
names(pr1)
# [1] "sdev" "rotation" "center" "scale" "x"
rot = pr1$rotation
as.matrix(d1)%*%rot
# PC1 PC2
# [1,] -109.31806 -8.0512140
# [2,] -153.20956 -3.9531452
# [3,] -95.61630 -3.0253929
# [4,] -66.39404 -8.6875789
# [5,] -63.54104 -4.2881256
# [6,] -265.18004 -3.4461289
# [7,] -265.81530 3.4197684
# [8,] -285.38543 7.8608183
# [9,] -92.36463 -0.4751719
# [10,] -166.03325 2.5880293
d1x = pr1$x
d1x
# PC1 PC2
# [1,] -109.31806 -8.0512140
# [2,] -153.20956 -3.9531452
# [3,] -95.61630 -3.0253929
# [4,] -66.39404 -8.6875789
# [5,] -63.54104 -4.2881256
# [6,] -265.18004 -3.4461289
# [7,] -265.81530 3.4197684
# [8,] -285.38543 7.8608183
# [9,] -92.36463 -0.4751719
# [10,] -166.03325 2.5880293
# covariance matrix
var(d1)
sales profit
sales 7476.4532 303.61862
profit 303.6186 26.19032
var(d1x)
PC1 PC2
PC1 7475.6278 -313.58116
PC2 -313.5812 27.01573
# correlation
cor(d1x)
# PC1 PC2
#PC1 1.0000000 -0.6977788
#PC2 -0.6977788 1.0000000
# still correlated
# centering (default), prcomp() agrees with eigen()
#===================================================================
pr2=prcomp(d1)
pr2$sdev^2 # [1] 7488.80605 13.83751
eigen(var(d1))
# $values
# [1] 7488.80605 13.83751
# $vectors
# [,1] [,2]
# [1,] -0.99917338 0.04065165
# [2,] -0.04065165 -0.99917338
d2x = pr2$x
var(d2x)
# PC1 PC2
# PC1 7.488806e+03 -3.446818e-14
# PC2 -3.446818e-14 1.383751e+01
apply(d2x,2,var)
PC1 PC2
7488.80605 13.83751
# PCs uncorrelated
# but sum of eigenvals do not add up to p=2
# centering and scaling
#=================================================================
pr3=prcomp(d1,scale=T)
d3x = pr3$x
var(d3x)
# PC1 PC2
# PC1 1.686136e+00 7.378598e-19
# PC2 7.378598e-19 3.138640e-01
sum(diag(var(d3x))) # [1] 2
|
testlist <- list(L = structure(c(1.02532483752325e+261, 1.46340296343369e+288, 5.67165037573218e-306, NA, 5.43908332228391e-105, 1.12756429789692e-57, Inf, 4.85713114047829e-272, Inf, 3.30698101927463e+94, NaN, 4.12846164752131e-51, Inf, 61.3402689356117, 6.30846847410014e-13), .Dim = c(5L, 3L )), v = c(-6.06249726877468e-109, 2.12439140436259e-244, 2.7087550859215e-266, 4.30626744607068e-136, -1.02422924764792e+174, 3.5871647055511e-115, -4.07202669362491e+259, 7.96034529606837e+55, -1.52029575642479e+179, 2.11422598973339e-279, -3.33059179031837e+212, -1.33409174187634e+176, 1.28591596418051e-74, -1.906033682933e+273, -1.10316221979205e-225, -1.89330920809966e+185, 3.35532147368812e-161, -6.48053033324502e-229, 2.78353556605942e-180, 1.5660502088191e-82, -9.35042269640403e-273, -8.20918866662634e-196, 8.08085397924225e-255, 1.11188572779741e+119, 1.12491136815598e+250, -3.45329097920505e+258, 3.96709457654472e+161, 1.27908697326124e-52, 4.78605232762292e+109, -1.44507438733095e+77, 2.29013520567251e-47, 5.04400974620143e+140, 1.3823414842232e+180, -2.46067123116145e+197, 2.10880877114811e-35, -2.65480281800843e+246, 4.46512942305604e+151, 1.46517148118916e+258, 2.14470086971152e+273, 4.41472864579235e-172, -3.18572261538908e-239, -1.36637676658912e+130, 5.15706875371265e-165, 8.70570634845722e+101, 1.494394240137e-138, 2.65976677403056e-25, -1.70861577507925e+300, -3.61831920580663e+111, -6.85014159786188e+251, -9.53484611393976e+225, 1.84033459146182e+34, 4.17795991348531e+181, -3.84155006789089e-213, 5.2719705720911e+211, 1.26526148601746e+268, 1.93700057442335e+164, -8.62442764343466e-06, -1.73523559813364e-184, 3.97771040745531e+93, -3.28380968731998e-71, 2.54335813162893e+296, 2.55988580003963e+114, 503309409708462, 2.80853367569739e+296, 9.22551418738672e+111, -8.36617069307534e-256, 1.74222401161979e-221, -4.54461270494257e+70, 1.68081155202381e+152, -2.44249173646253e+91, 1.4663202466888e+227, 7.02531124111404e-171, 6.23775212696001e+283, 8.49240168878597e-262, 1.00333904237875e-74, 3.05221015424709e-17, 3.80089249064488e+274, 2.58248307686191e+141, -8.31035147436276e-170, 2.6068234597061e-175, 9.97498045655483e+170, 3.12989848911561e+203, -2.10733316271681e+238, 1.35989412423915e-18, -8.96180042509102e-170, -6.56877006726177e-40, 3.51078425115816e+255, -2.1534200687437e+248, -2.66332285997838e+65))
result <- do.call(Benchmarking:::chol_downdate2,testlist)
str(result)
|
/issuestests/Benchmarking/inst/testfiles/chol_downdate2/chol_downdate2_output/log_e78ce7438df9a1440309ef3e26dc1d3f4bb32f6a/chol_downdate2-test.R
|
no_license
|
akhikolla/RcppDeepStateTest
|
R
| false | false | 2,452 |
r
|
testlist <- list(L = structure(c(1.02532483752325e+261, 1.46340296343369e+288, 5.67165037573218e-306, NA, 5.43908332228391e-105, 1.12756429789692e-57, Inf, 4.85713114047829e-272, Inf, 3.30698101927463e+94, NaN, 4.12846164752131e-51, Inf, 61.3402689356117, 6.30846847410014e-13), .Dim = c(5L, 3L )), v = c(-6.06249726877468e-109, 2.12439140436259e-244, 2.7087550859215e-266, 4.30626744607068e-136, -1.02422924764792e+174, 3.5871647055511e-115, -4.07202669362491e+259, 7.96034529606837e+55, -1.52029575642479e+179, 2.11422598973339e-279, -3.33059179031837e+212, -1.33409174187634e+176, 1.28591596418051e-74, -1.906033682933e+273, -1.10316221979205e-225, -1.89330920809966e+185, 3.35532147368812e-161, -6.48053033324502e-229, 2.78353556605942e-180, 1.5660502088191e-82, -9.35042269640403e-273, -8.20918866662634e-196, 8.08085397924225e-255, 1.11188572779741e+119, 1.12491136815598e+250, -3.45329097920505e+258, 3.96709457654472e+161, 1.27908697326124e-52, 4.78605232762292e+109, -1.44507438733095e+77, 2.29013520567251e-47, 5.04400974620143e+140, 1.3823414842232e+180, -2.46067123116145e+197, 2.10880877114811e-35, -2.65480281800843e+246, 4.46512942305604e+151, 1.46517148118916e+258, 2.14470086971152e+273, 4.41472864579235e-172, -3.18572261538908e-239, -1.36637676658912e+130, 5.15706875371265e-165, 8.70570634845722e+101, 1.494394240137e-138, 2.65976677403056e-25, -1.70861577507925e+300, -3.61831920580663e+111, -6.85014159786188e+251, -9.53484611393976e+225, 1.84033459146182e+34, 4.17795991348531e+181, -3.84155006789089e-213, 5.2719705720911e+211, 1.26526148601746e+268, 1.93700057442335e+164, -8.62442764343466e-06, -1.73523559813364e-184, 3.97771040745531e+93, -3.28380968731998e-71, 2.54335813162893e+296, 2.55988580003963e+114, 503309409708462, 2.80853367569739e+296, 9.22551418738672e+111, -8.36617069307534e-256, 1.74222401161979e-221, -4.54461270494257e+70, 1.68081155202381e+152, -2.44249173646253e+91, 1.4663202466888e+227, 7.02531124111404e-171, 6.23775212696001e+283, 8.49240168878597e-262, 1.00333904237875e-74, 3.05221015424709e-17, 3.80089249064488e+274, 2.58248307686191e+141, -8.31035147436276e-170, 2.6068234597061e-175, 9.97498045655483e+170, 3.12989848911561e+203, -2.10733316271681e+238, 1.35989412423915e-18, -8.96180042509102e-170, -6.56877006726177e-40, 3.51078425115816e+255, -2.1534200687437e+248, -2.66332285997838e+65))
result <- do.call(Benchmarking:::chol_downdate2,testlist)
str(result)
|
insee_xls_to_zoo <- function(file){
require(xlsx)
require(zoo)
raw <- read.xlsx2(file, sheetIndex = 1, header = TRUE, startRow = 2)
raw[raw == ''] <- NA
if (prod(raw[1,1:2] == c('Année', 'Mois'))){
index <- paste(raw[2:nrow(raw),1], '.', raw[2:nrow(raw),2], sep='')
freq = 'mois'
}
if (prod(raw[1,1:2] == c('Année', 'Bimestre'))){
index <- paste(raw[2:nrow(raw),1], '.', raw[2:nrow(raw),2], sep='')
freq = 'bimestre'
}
if (prod(raw[1,1:2] == c('Année', 'Semestre'))){
index <- paste(raw[2:nrow(raw),1], '.', raw[2:nrow(raw),2], sep='')
freq = 'semestre'
}
if (prod(raw[1,1:2] == c('Année', 'Trimestre'))){
index <- paste(raw[2:nrow(raw),1], '.', raw[2:nrow(raw),2], sep='')
freq = 'Trimestre'
}
#if (freq %in% c('annual')){
# index <- paste(raw[,1], '.1', sep='')
#}
if (ncol(raw)>3){
# convert to numeric zoo ts
ts <- lapply(raw[,3:ncol(raw)], FUN = zoo, order.by = index)
ts <- lapply(ts, FUN = function(x){coredata(x) <- as.numeric(as.character(coredata(x))); return(x)})
names <- names(read.xlsx2(file, sheetIndex = 1, header = TRUE))
names <- names[3:length(names)]
series <- mapply(FUN = function(x,y,z, u){list(zoo = na.omit(x), name = y, freq = z, id = u)},
ts, names, rep(freq, length(names)), colnames(raw)[3:ncol(raw)],
SIMPLIFY = FALSE, USE.NAMES = FALSE)
} else{
# convert to numeric zoo ts
ts <- zoo(raw[,3], order.by = index)
coredata(ts) <- as.numeric(as.character(coredata(ts)))
name <- names(read.xlsx2(file, sheetIndex = 1, header = TRUE))[3]
series <- list(zoo = na.omit(ts), name = name, freq = freq, id = colnames(raw)[3])
series <- list(series)
#names(series)[4] <- colnames(raw)[3]
}
return(series)
}
|
/functions/extract_data/old/insee_xls_to_zoo_v3.R
|
no_license
|
Allisterh/gdp_forecasts-nsaleille
|
R
| false | false | 1,848 |
r
|
insee_xls_to_zoo <- function(file){
require(xlsx)
require(zoo)
raw <- read.xlsx2(file, sheetIndex = 1, header = TRUE, startRow = 2)
raw[raw == ''] <- NA
if (prod(raw[1,1:2] == c('Année', 'Mois'))){
index <- paste(raw[2:nrow(raw),1], '.', raw[2:nrow(raw),2], sep='')
freq = 'mois'
}
if (prod(raw[1,1:2] == c('Année', 'Bimestre'))){
index <- paste(raw[2:nrow(raw),1], '.', raw[2:nrow(raw),2], sep='')
freq = 'bimestre'
}
if (prod(raw[1,1:2] == c('Année', 'Semestre'))){
index <- paste(raw[2:nrow(raw),1], '.', raw[2:nrow(raw),2], sep='')
freq = 'semestre'
}
if (prod(raw[1,1:2] == c('Année', 'Trimestre'))){
index <- paste(raw[2:nrow(raw),1], '.', raw[2:nrow(raw),2], sep='')
freq = 'Trimestre'
}
#if (freq %in% c('annual')){
# index <- paste(raw[,1], '.1', sep='')
#}
if (ncol(raw)>3){
# convert to numeric zoo ts
ts <- lapply(raw[,3:ncol(raw)], FUN = zoo, order.by = index)
ts <- lapply(ts, FUN = function(x){coredata(x) <- as.numeric(as.character(coredata(x))); return(x)})
names <- names(read.xlsx2(file, sheetIndex = 1, header = TRUE))
names <- names[3:length(names)]
series <- mapply(FUN = function(x,y,z, u){list(zoo = na.omit(x), name = y, freq = z, id = u)},
ts, names, rep(freq, length(names)), colnames(raw)[3:ncol(raw)],
SIMPLIFY = FALSE, USE.NAMES = FALSE)
} else{
# convert to numeric zoo ts
ts <- zoo(raw[,3], order.by = index)
coredata(ts) <- as.numeric(as.character(coredata(ts)))
name <- names(read.xlsx2(file, sheetIndex = 1, header = TRUE))[3]
series <- list(zoo = na.omit(ts), name = name, freq = freq, id = colnames(raw)[3])
series <- list(series)
#names(series)[4] <- colnames(raw)[3]
}
return(series)
}
|
#############################
#######SPATIAL OBJECTS#######
########day04 reading########
#############################
library(sp)
#-----------------CLASS: Spatial Objects------------------#
getClass("Spatial") #slots: bbox=matrix; prof4string=CRS bbox = predefined extent
#example
bbox1 <- matrix(c(0,0,1,1), ncol = 2, dimnames=list(NULL, c("min", "max"))) #matrix with bbox attribute
p4s1 <- CRS(projargs = as.character(NA)) #CRS object with reference system; NA=missing reference
SpatialObject <- Spatial(bbox = matrix1, proj4string = p4s1) # creating the spatial object
SpatialObject
summary(SpatialObject) #printing matrix data with min and max xy coordinates
proj4string(SpatialObject) #printing predefined reference system
proj4string(SpatialObject) <- CRS(as.character(NA)) #changing the reference system
#-----------------Subclass: SpatialPoints-----------------#
getClass("SpatialPoints") #slots: coords=matrix; bbox=matrix; proj4string=CRS
#example
df <- read.csv("https://raw.githubusercontent.com/wegmann/R_data/master/Steigerwald_sample_points_all_data_subset_withNames.csv")
names(df) #printing column names
coords2 <- cbind(df$x, df$y) #defining columns with coordinates
p4s2 <- CRS("+proj=longlat +ellps=WGS84") #spatial reference
SpatialPoint <- SpatialPoints(coords = coords2, proj4string = p4s2) #creating the spatial point object
summary(SpatialPoint) #printing matrix data with min and max xy coordinates
coordinates(SpatialPoint) #printing all defined coordinates
proj4string(SpatialPoint) #printing predefined reference system
#example SpatialPointsDataFrame (matching points to a specific dataframe)
spdf1 <- SpatialPointsDataFrame(coords2, df, proj4string = p4s2, match.ID = TRUE)
summary(spdf1)
#---------------Subclass: SpatialLines-------------------#
getClass("SpatialLines") #slots: lines=list; bbox=matrix; Proj4string=CRS
#example
#install.packages("maps")
#install.packages("maptools")
library(maps)
library(maptools)
linesgermany <- map("world", "germany", plot = TRUE) #defining lines via maps package
p4s3 <- CRS("+proj=longlat +ellps=WGS84") #defining coordinate system
SpatialLines <- map2SpatialLines (linesgermany, proj4string = p4s3)
str(SpatialLines, max.level = 2)
summary(SpatialLines) #printing matrix data with min and max xy coordinates
coordinates(SpatialLines) #printing all defined coordinates
proj4string(SpatialLines) #printing predefined reference system
Lines_len <- sapply(slot(SpatialLines, "lines"), function (x) length(slot(x, "Lines")))
table(Lines_len) #printing the number of line objects
#--------------Subclass: SpatialPolygon------------------#
getClass("Polygon") #LinearRing Objects; extends the Line class --> adding slots (labelpoint)
#slots: labpt=numeric; area=numeric; hole=logical; ringDIR=integer; coords=matrix
getClass("Polygons") #MultiPolygon objects; list of valid Polygon objects
#slots: Polygons=list; PlorOrder=integer; labpt=numeric; ID=character; area=numeric
getClass("SpatialPolygons") #set of Polygon objects + slots of Spatial objects
#slots: polygons=list; plotOrder=integer; bbox=matrix; proj4string=CRS
#example SpatialPolygonsDataFrame
library(maps)
state.map <- map("state", plot = TRUE, fill = FALSE) #printing polygon outlines of US states
IDs <- sapply(strsplit(state.map$names, ":"), function(x) x[1])
library(maptools)
#####was falsch?######
state.sp <- map2SpatialPolygons(state.map, IDs =IDs, proj4string = CRS("+proj=longlat +ellps=WGS84"))
#-------------------------------------------------------#
|
/lecture_04/Spatial_Objects.R
|
permissive
|
MagdaHa/MB2_R
|
R
| false | false | 3,918 |
r
|
#############################
#######SPATIAL OBJECTS#######
########day04 reading########
#############################
library(sp)
#-----------------CLASS: Spatial Objects------------------#
getClass("Spatial") #slots: bbox=matrix; prof4string=CRS bbox = predefined extent
#example
bbox1 <- matrix(c(0,0,1,1), ncol = 2, dimnames=list(NULL, c("min", "max"))) #matrix with bbox attribute
p4s1 <- CRS(projargs = as.character(NA)) #CRS object with reference system; NA=missing reference
SpatialObject <- Spatial(bbox = matrix1, proj4string = p4s1) # creating the spatial object
SpatialObject
summary(SpatialObject) #printing matrix data with min and max xy coordinates
proj4string(SpatialObject) #printing predefined reference system
proj4string(SpatialObject) <- CRS(as.character(NA)) #changing the reference system
#-----------------Subclass: SpatialPoints-----------------#
getClass("SpatialPoints") #slots: coords=matrix; bbox=matrix; proj4string=CRS
#example
df <- read.csv("https://raw.githubusercontent.com/wegmann/R_data/master/Steigerwald_sample_points_all_data_subset_withNames.csv")
names(df) #printing column names
coords2 <- cbind(df$x, df$y) #defining columns with coordinates
p4s2 <- CRS("+proj=longlat +ellps=WGS84") #spatial reference
SpatialPoint <- SpatialPoints(coords = coords2, proj4string = p4s2) #creating the spatial point object
summary(SpatialPoint) #printing matrix data with min and max xy coordinates
coordinates(SpatialPoint) #printing all defined coordinates
proj4string(SpatialPoint) #printing predefined reference system
#example SpatialPointsDataFrame (matching points to a specific dataframe)
spdf1 <- SpatialPointsDataFrame(coords2, df, proj4string = p4s2, match.ID = TRUE)
summary(spdf1)
#---------------Subclass: SpatialLines-------------------#
getClass("SpatialLines") #slots: lines=list; bbox=matrix; Proj4string=CRS
#example
#install.packages("maps")
#install.packages("maptools")
library(maps)
library(maptools)
linesgermany <- map("world", "germany", plot = TRUE) #defining lines via maps package
p4s3 <- CRS("+proj=longlat +ellps=WGS84") #defining coordinate system
SpatialLines <- map2SpatialLines (linesgermany, proj4string = p4s3)
str(SpatialLines, max.level = 2)
summary(SpatialLines) #printing matrix data with min and max xy coordinates
coordinates(SpatialLines) #printing all defined coordinates
proj4string(SpatialLines) #printing predefined reference system
Lines_len <- sapply(slot(SpatialLines, "lines"), function (x) length(slot(x, "Lines")))
table(Lines_len) #printing the number of line objects
#--------------Subclass: SpatialPolygon------------------#
getClass("Polygon") #LinearRing Objects; extends the Line class --> adding slots (labelpoint)
#slots: labpt=numeric; area=numeric; hole=logical; ringDIR=integer; coords=matrix
getClass("Polygons") #MultiPolygon objects; list of valid Polygon objects
#slots: Polygons=list; PlorOrder=integer; labpt=numeric; ID=character; area=numeric
getClass("SpatialPolygons") #set of Polygon objects + slots of Spatial objects
#slots: polygons=list; plotOrder=integer; bbox=matrix; proj4string=CRS
#example SpatialPolygonsDataFrame
library(maps)
state.map <- map("state", plot = TRUE, fill = FALSE) #printing polygon outlines of US states
IDs <- sapply(strsplit(state.map$names, ":"), function(x) x[1])
library(maptools)
#####was falsch?######
state.sp <- map2SpatialPolygons(state.map, IDs =IDs, proj4string = CRS("+proj=longlat +ellps=WGS84"))
#-------------------------------------------------------#
|
\name{iClick.VisAssetPrice}
\alias{iClick.VisAssetPrice}
\title{
Visualize Daily Asset Price
}
\encoding{latin1}
\description{
This GUI conducts plots of daily asset price, including calendar heatmap and many plots which are not easy to use for new users.
}
\usage{
iClick.VisAssetPrice(dat, color4 = "r2b", color5 = "jet")
}
\arguments{
\item{dat}{
Time series object,xts.
}
\item{color4}{
Color choice for annual calendar heatmap, the default is "r2b".
}
\item{color5}{
Color choice for 6-year calendar heatmap, the default is "jet".
}
}
\details{
This GUI is designed for financial time series, maily daily stock price. Other time series data works also, as long as it has a date column. We call function calendarPlot() from package "openair", and modified the function calendarHeat() to fit daily price.
}
\value{
Output GUI
}
\author{
Ho Tsungwu <tsungwu@mail.shu.edu.tw>
}
\examples{
#data("IBM")
#assetPrice=IBM[,1]
#iClick.VisAssetPrice(assetPrice)
}
|
/man/iClick.VisAssetPrice.Rd
|
no_license
|
tsungwu/iClick
|
R
| false | false | 1,014 |
rd
|
\name{iClick.VisAssetPrice}
\alias{iClick.VisAssetPrice}
\title{
Visualize Daily Asset Price
}
\encoding{latin1}
\description{
This GUI conducts plots of daily asset price, including calendar heatmap and many plots which are not easy to use for new users.
}
\usage{
iClick.VisAssetPrice(dat, color4 = "r2b", color5 = "jet")
}
\arguments{
\item{dat}{
Time series object,xts.
}
\item{color4}{
Color choice for annual calendar heatmap, the default is "r2b".
}
\item{color5}{
Color choice for 6-year calendar heatmap, the default is "jet".
}
}
\details{
This GUI is designed for financial time series, maily daily stock price. Other time series data works also, as long as it has a date column. We call function calendarPlot() from package "openair", and modified the function calendarHeat() to fit daily price.
}
\value{
Output GUI
}
\author{
Ho Tsungwu <tsungwu@mail.shu.edu.tw>
}
\examples{
#data("IBM")
#assetPrice=IBM[,1]
#iClick.VisAssetPrice(assetPrice)
}
|
library(readxl)
library(reshape2)
library(ggplot2)
library(rlist)
library(RCy3)
library(reshape2)
library(frbs)
library(philentropy)
library(dendextend)
library(cluster)
library(ggfortify)
library(glmnet)
library(network)
library(factoextra)
library(gplots)
library(RColorBrewer)
library(corrplot)
library(dbscan)
library(plotly)
library(viridis)
library(hrbrthemes)
library(igraph)
library(xlsx)
library(devtools)
library(ComplexHeatmap)
library(mlbench)
library(graphics)
library(caret)
library(pacman)
|
/Code/Required_Libraries.R
|
no_license
|
VartikaBisht6197/NFnetFU
|
R
| false | false | 505 |
r
|
library(readxl)
library(reshape2)
library(ggplot2)
library(rlist)
library(RCy3)
library(reshape2)
library(frbs)
library(philentropy)
library(dendextend)
library(cluster)
library(ggfortify)
library(glmnet)
library(network)
library(factoextra)
library(gplots)
library(RColorBrewer)
library(corrplot)
library(dbscan)
library(plotly)
library(viridis)
library(hrbrthemes)
library(igraph)
library(xlsx)
library(devtools)
library(ComplexHeatmap)
library(mlbench)
library(graphics)
library(caret)
library(pacman)
|
library(mvtnorm)
library(MASS)
#Maximization Step
Mstep <- function(data, num_clusters, w) {
mu = matrix(0,num_clusters,ncol(data))
sigma = replicate(num_clusters, matrix(0,ncol(data),ncol(data)), simplify = FALSE)
pi = c()
wt = t(w)%*%data #(k x d) => w is (n x k) and data is (n x d)
cw = colSums(w) #(1xk)
#Updating mean
mu = sweep(wt, MARGIN = 1, cw, '/') #scalar multiplication of each data row vector with its corresponding weight
for(i in 1:num_clusters) {
#===================================================
# Vectorized implementation of covariance update
# mX = sweep(data, MARGIN = 2, mu[i,], '-') #(n x d) matrix
# mX = as.matrix(data[j,] - mu[i,]) #column vector
# wmX = sweep(t(mX), MARGIN = 2, w[, i], '*') #(d x n) matrix
# sigma[[i]] = wmX %*% mX #(d x d) matrix
# sigma[[i]] = sigma[[i]]/sum(w[, i])
#===================================================
#Updating covariance
sigma[[i]] = cov.wt(data, wt = w[, i])$cov
#Updating priors
pi[i] = sum(w[, i])/nrow(data)
#Adding some error to the diagonal of covariance matrix to make it invertible
if(!(is.not.singular(sigma[[i]]))) {
sigma[[i]] = sigma[[i]] + diag(0.0001, ncol(data))
}
}
return(list(n_mu = mu, n_sigma = sigma, n_prior = pi))
}
|
/ExpectationMaximization_GMM/mstep.R
|
no_license
|
Karthikeya254/MachineLearningAlgorithms
|
R
| false | false | 1,319 |
r
|
library(mvtnorm)
library(MASS)
#Maximization Step
Mstep <- function(data, num_clusters, w) {
mu = matrix(0,num_clusters,ncol(data))
sigma = replicate(num_clusters, matrix(0,ncol(data),ncol(data)), simplify = FALSE)
pi = c()
wt = t(w)%*%data #(k x d) => w is (n x k) and data is (n x d)
cw = colSums(w) #(1xk)
#Updating mean
mu = sweep(wt, MARGIN = 1, cw, '/') #scalar multiplication of each data row vector with its corresponding weight
for(i in 1:num_clusters) {
#===================================================
# Vectorized implementation of covariance update
# mX = sweep(data, MARGIN = 2, mu[i,], '-') #(n x d) matrix
# mX = as.matrix(data[j,] - mu[i,]) #column vector
# wmX = sweep(t(mX), MARGIN = 2, w[, i], '*') #(d x n) matrix
# sigma[[i]] = wmX %*% mX #(d x d) matrix
# sigma[[i]] = sigma[[i]]/sum(w[, i])
#===================================================
#Updating covariance
sigma[[i]] = cov.wt(data, wt = w[, i])$cov
#Updating priors
pi[i] = sum(w[, i])/nrow(data)
#Adding some error to the diagonal of covariance matrix to make it invertible
if(!(is.not.singular(sigma[[i]]))) {
sigma[[i]] = sigma[[i]] + diag(0.0001, ncol(data))
}
}
return(list(n_mu = mu, n_sigma = sigma, n_prior = pi))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AnswerSheets.R
\name{GenerateShortAnswerSheet}
\alias{GenerateShortAnswerSheet}
\title{Generating a short answer sheet}
\usage{
GenerateShortAnswerSheet(
ExamSheet,
versionColName = "Version",
correctColName = "CorrectChoice"
)
}
\arguments{
\item{ExamSheet}{a exam sheet that contains all versions, similar to \code{\link{CreateRandomExams}}}
\item{versionColName}{The name of the column in the original exam that contains the version number}
\item{correctColName}{The name of the column that contains the last index for the correct tag, or NA if it is not a correct choice.}
}
\value{
A data frame \itemize{
\item Each row identifies one version of the answer sheet
\item the first column is the version number, the rest of the columns are the questions,
}
}
\description{
Given a number of answer sheets generated by \code{\link{ConstructAnswerSheet}} that have been binded together. And that have a column, \code{versionColName}, that identifies each version. It collects all the answers together and places all the answers together for each exam.
}
\details{
Note that if the version number is 0, it is ignored, since it understands that version 0 is the reference version.
If the document has more than two layers, keep in mind that it just shows the top most layer numbering and then the inner most number of the correct answers.
Note how this implies as well that an exam with more than one possible answer can not be simplified into a short answer sheet.
IMPORTANTLY, If a certain exam has less answers than other exams, the are just cited sequentially. Which may cause confusion. To clarify. This may happen if a certain question has more than one solution marked as "correct", or if a certain question has no solutions marked as correct. In that case, The short answer sheet just sequentially names all the correct answers, disregarding which questions they are referring to. (This is a very special case that will only come up in a real scenario if you are writing a short answer question in the middle of a multiple choice test. Or if you are writing some questions to have multiple correct answers, but only a few of them, and those questions are not included in all exams... (So evil))
}
\examples{
csvfile <- system.file(
"extdata",
"ExampleTables",
"ExampleAnswerSheet.csv",
package = "TexExamRandomizer"
)
testASheet <- read.csv(
csvfile,
header = TRUE,
stringsAsFactors = FALSE,
na.strings = c("", "NA", "Na"),
strip.white = TRUE
)
GenerateShortAnswerSheet(testASheet)
}
\seealso{
Other Extracting information:
\code{\link{ConstructAnswerSheet}()},
\code{\link{CountNumberOfSections}()},
\code{\link{FindExamAnswers}()}
}
\concept{Extracting information}
|
/man/GenerateShortAnswerSheet.Rd
|
permissive
|
jsgro/TexExamRandomizer
|
R
| false | true | 2,806 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AnswerSheets.R
\name{GenerateShortAnswerSheet}
\alias{GenerateShortAnswerSheet}
\title{Generating a short answer sheet}
\usage{
GenerateShortAnswerSheet(
ExamSheet,
versionColName = "Version",
correctColName = "CorrectChoice"
)
}
\arguments{
\item{ExamSheet}{a exam sheet that contains all versions, similar to \code{\link{CreateRandomExams}}}
\item{versionColName}{The name of the column in the original exam that contains the version number}
\item{correctColName}{The name of the column that contains the last index for the correct tag, or NA if it is not a correct choice.}
}
\value{
A data frame \itemize{
\item Each row identifies one version of the answer sheet
\item the first column is the version number, the rest of the columns are the questions,
}
}
\description{
Given a number of answer sheets generated by \code{\link{ConstructAnswerSheet}} that have been binded together. And that have a column, \code{versionColName}, that identifies each version. It collects all the answers together and places all the answers together for each exam.
}
\details{
Note that if the version number is 0, it is ignored, since it understands that version 0 is the reference version.
If the document has more than two layers, keep in mind that it just shows the top most layer numbering and then the inner most number of the correct answers.
Note how this implies as well that an exam with more than one possible answer can not be simplified into a short answer sheet.
IMPORTANTLY, If a certain exam has less answers than other exams, the are just cited sequentially. Which may cause confusion. To clarify. This may happen if a certain question has more than one solution marked as "correct", or if a certain question has no solutions marked as correct. In that case, The short answer sheet just sequentially names all the correct answers, disregarding which questions they are referring to. (This is a very special case that will only come up in a real scenario if you are writing a short answer question in the middle of a multiple choice test. Or if you are writing some questions to have multiple correct answers, but only a few of them, and those questions are not included in all exams... (So evil))
}
\examples{
csvfile <- system.file(
"extdata",
"ExampleTables",
"ExampleAnswerSheet.csv",
package = "TexExamRandomizer"
)
testASheet <- read.csv(
csvfile,
header = TRUE,
stringsAsFactors = FALSE,
na.strings = c("", "NA", "Na"),
strip.white = TRUE
)
GenerateShortAnswerSheet(testASheet)
}
\seealso{
Other Extracting information:
\code{\link{ConstructAnswerSheet}()},
\code{\link{CountNumberOfSections}()},
\code{\link{FindExamAnswers}()}
}
\concept{Extracting information}
|
download_nwis_data <- function(site_nums = c("01427207", "01432160", "01435000", "01436690", "01466500")){
# create the file names that are needed for download_nwis_site_data
# tempdir() creates a temporary directory that is wiped out when you start a new R session;
# replace tempdir() with "1_fetch/out" or another desired folder if you want to retain the download
download_files <- file.path(tempdir(), paste0('nwis_', site_nums, '_data.csv'))
data_out <- data.frame(agency_cd = c(), site_no = c(), dateTime = c(),
X_00010_00000 = c(), X_00010_00000_cd = c(), tz_cd = c())
# loop through files to download
for (download_file in download_files){
download_nwis_site_data(download_file, parameterCd = '00010')
# read the downloaded data and append it to the existing data.frame
these_data <- read_csv(download_file, col_types = 'ccTdcc')
data_out <- rbind(data_out, these_data)
}
return(data_out)
}
nwis_site_info <- function(site_data_file){
site_data <- readRDS(site_data_file)
site_no <- unique(site_data$site_no)
site_info <- dataRetrieval::readNWISsite(site_no)
return(site_info)
}
download_nwis_site_data <- function(filepath, parameterCd = '00010', startDate="2014-05-01", endDate="2015-05-01"){
# filepaths look something like directory/nwis_01432160_data.csv,
# remove the directory with basename() and extract the 01432160 with the regular expression match
site_num <- basename(filepath) %>%
stringr::str_extract(pattern = "(?:[0-9]+)")
# readNWISdata is from the dataRetrieval package
data_out <- readNWISdata(sites=site_num, service="iv",
parameterCd = parameterCd, startDate = startDate, endDate = endDate)
# -- simulating a failure-prone web-sevice here, do not edit --
if (sample(c(T,F,F,F), 1)){
stop(site_num, ' has failed due to connection timeout. Try scmake() again')
}
# -- end of do-not-edit block
write_csv(data_out, path = filepath)
return(filepath)
}
|
/1_fetch/src/get_nwis_data.R
|
no_license
|
lindsayplatt/ds-pipelines-2
|
R
| false | false | 2,023 |
r
|
download_nwis_data <- function(site_nums = c("01427207", "01432160", "01435000", "01436690", "01466500")){
# create the file names that are needed for download_nwis_site_data
# tempdir() creates a temporary directory that is wiped out when you start a new R session;
# replace tempdir() with "1_fetch/out" or another desired folder if you want to retain the download
download_files <- file.path(tempdir(), paste0('nwis_', site_nums, '_data.csv'))
data_out <- data.frame(agency_cd = c(), site_no = c(), dateTime = c(),
X_00010_00000 = c(), X_00010_00000_cd = c(), tz_cd = c())
# loop through files to download
for (download_file in download_files){
download_nwis_site_data(download_file, parameterCd = '00010')
# read the downloaded data and append it to the existing data.frame
these_data <- read_csv(download_file, col_types = 'ccTdcc')
data_out <- rbind(data_out, these_data)
}
return(data_out)
}
nwis_site_info <- function(site_data_file){
site_data <- readRDS(site_data_file)
site_no <- unique(site_data$site_no)
site_info <- dataRetrieval::readNWISsite(site_no)
return(site_info)
}
download_nwis_site_data <- function(filepath, parameterCd = '00010', startDate="2014-05-01", endDate="2015-05-01"){
# filepaths look something like directory/nwis_01432160_data.csv,
# remove the directory with basename() and extract the 01432160 with the regular expression match
site_num <- basename(filepath) %>%
stringr::str_extract(pattern = "(?:[0-9]+)")
# readNWISdata is from the dataRetrieval package
data_out <- readNWISdata(sites=site_num, service="iv",
parameterCd = parameterCd, startDate = startDate, endDate = endDate)
# -- simulating a failure-prone web-sevice here, do not edit --
if (sample(c(T,F,F,F), 1)){
stop(site_num, ' has failed due to connection timeout. Try scmake() again')
}
# -- end of do-not-edit block
write_csv(data_out, path = filepath)
return(filepath)
}
|
# Descriptive statistics function, returning median and IQR, mean and sd, and Anderson-Darling test for normality
descriptive_stats <- function(d, colnamestring) {
d %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end) %>%
group_by(covid_preprint) %>%
summarise_at(.vars = c(colnamestring), .funs = funs(
median = median(., na.rm = TRUE),
IQR = IQR(., na.rm = TRUE),
mean = mean(., na.rm = TRUE),
sd = sd(., na.rm = TRUE),
ad_statistic = nortest::ad.test(.)$statistic,
ad_p = nortest::ad.test(.)$p.value
))
}
# Supplementary Table: descriptive statistics by server*preprint type combination
t1 <- preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end) %>%
group_by(covid_preprint, source) %>%
summarise(n = n()) %>%
ungroup()
t2 <- preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end) %>%
mutate(
doi_date = gsub("\\.", "-", substr(doi, 9, 18)),
days = as.numeric(ymd(posted_date) - ymd(doi_date))
) %>%
group_by(covid_preprint, source) %>%
summarise_at(.vars = c("days", "n_authors", "n_versions", "n_words", "n_refs"), .funs = funs(
stat = paste0(median(., na.rm = TRUE), " (", IQR(., na.rm = TRUE), ")")
)) %>%
ungroup()
t3 <- preprint_usage %>%
inner_join(preprints, by = c("doi", "source")) %>%
group_by(doi, source, posted_date, covid_preprint) %>%
summarize(
full_text_views = sum(full_text_views),
abstract_views = sum(abstract_views),
pdf_downloads = sum(pdf_downloads)
) %>%
ungroup() %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end) %>%
group_by(covid_preprint, source) %>%
summarise_at(.vars = c("abstract_views", "pdf_downloads"), .funs = funs(
stat = paste0(median(., na.rm = TRUE), " (", IQR(., na.rm = TRUE), ")")
)) %>%
ungroup()
t4 <- preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end) %>%
inner_join(preprint_citations, by = "doi") %>%
inner_join(preprint_comments %>% rename(comments = comments_count), by = "doi") %>%
inner_join(preprint_altmetrics, by = "doi") %>%
group_by(covid_preprint, source) %>%
summarise_at(.vars = c("citations", "twitter", "news", "blogs", "wikipedia", "comments"), .funs = funs(
stat = paste0(median(., na.rm = TRUE), " (", IQR(., na.rm = TRUE), ")")
)) %>%
ungroup()
cbind(t1,
t2 %>% select(-covid_preprint, -source),
t3 %>% select(-covid_preprint, -source),
t4 %>% select(-covid_preprint, -source)) %>%
assign_covid_preprint() %>%
mutate(set = interaction(source, covid_preprint)) %>%
select(-covid_preprint, -source) %>%
gather(variable, value, -set) %>%
mutate(variable = factor(variable, levels=unique(variable))) %>%
spread(set, value) %>%
mutate(variable = gsub("_stat", "", variable)) %>% write.csv("supplementary_table_descriptive_stats.csv")
# Figure 2: Preprint attributes
# Panel B: Preprint screening time
# Descriptive statistics
preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end) %>%
mutate(
doi_date = gsub("\\.", "-", substr(doi, 9, 18)),
days = as.numeric(ymd(posted_date) - ymd(doi_date))
) %>%
group_by(covid_preprint, source) %>%
summarise_at(.vars = c("days"), .funs = funs(
median = median(., na.rm = TRUE),
IQR = IQR(., na.rm = TRUE),
mean = mean(., na.rm = TRUE),
sd = sd(., na.rm = TRUE),
ad_statistic = nortest::ad.test(.)$statistic,
ad_p = nortest::ad.test(.)$p.value
))
# Two-way ANOVA, screening time ~ preprint type, server
anova_screening <- preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end) %>%
mutate(
doi_date = gsub("\\.", "-", substr(doi, 9, 18)),
days = as.numeric(ymd(posted_date) - ymd(doi_date))
) %>%
with(., aov(days ~ source * covid_preprint))
summary(anova_screening)
# Posthoc contrasts for specific preprint type/server combinations
preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end) %>%
assign_covid_preprint() %>%
mutate(
doi_date = gsub("\\.", "-", substr(doi, 9, 18)),
days = as.numeric(ymd(posted_date) - ymd(doi_date)),
int_source_covid_preprint = interaction(source, covid_preprint)
) %>%
with(., lm(days ~ int_source_covid_preprint - 1)) %>%
multcomp::glht(., linfct = multcomp::mcp(int_source_covid_preprint = "Tukey")) %>%
summary()
# Panel C: Revisions per preprint for COVID vs non-COVID
# Descriptive statistics
descriptive_stats(preprints, "n_versions")
# Mann-Whitney test
preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end) %>%
with(., wilcox.test(n_versions ~ covid_preprint))
# Panel D: License types
# Chi-squared test of association
preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end) %>%
mutate(license = case_when(
str_detect(license, "cc0") ~ "cc0",
T ~ license
)) %>%
with(., table(license, covid_preprint)) %>%
chisq.test()
# Panel E: Word counts
# Descriptive statistics
descriptive_stats(preprints %>% filter(source == "biorxiv"), "n_words")
# Mann-Whitney test
preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end,
source == "biorxiv") %>%
with(., wilcox.test(n_words ~ covid_preprint))
# Panel F: Reference counts
# Descriptive statistics
descriptive_stats(preprints %>% filter(source == "biorxiv"), "n_refs")
# Mann-Whitney, number of references
preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end,
source == "biorxiv") %>%
with(., wilcox.test(n_refs ~ covid_preprint))
# Figure 3: Preprint authorship
# Panel A: Author counts
# Descriptive statistics
descriptive_stats(preprints, "n_authors")
# Mann-Whitney test
preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end) %>%
with(., wilcox.test(n_authors ~ covid_preprint))
# Panel C: Author attributes: country, nationality (top 10 countries)
# Examine first-time preprinters pooling all countries
preprinter_history %>%
with(., table(author_group, preprinter_status)) %>%
prop.table(1) * 100
preprinter_history %>%
with(., table(author_group, preprinter_status)) %>%
chisq.test()
# Individual tests of first-time preprinters by country, only top 15 countries
options(scipen = 999)
rcompanion::groupwiseCMH(xtabs(n ~ preprinter_status + author_group + institution_match_country_code,
data = preprinter_history_tabular),
group = 3,
fisher = FALSE,
gtest = FALSE,
chisq = TRUE,
method = "bonferroni",
correct = "none",
digits = 3
) %>% arrange(adj.p)
# Full statistical test for United States, UK, Germany, India, France, Canada, Italy, China
xtabs(n ~ preprinter_status + author_group,
data = preprinter_history_tabular %>% filter(institution_match_country_code == "US")) %>%
prop.table(2) %>% round(3)*100
xtabs(n ~ preprinter_status + author_group,
data = preprinter_history_tabular %>% filter(institution_match_country_code == "GB")) %>%
prop.table(2) %>% round(3)*100
xtabs(n ~ preprinter_status + author_group,
data = preprinter_history_tabular %>% filter(institution_match_country_code == "DE")) %>%
prop.table(2) %>% round(3)*100
xtabs(n ~ preprinter_status + author_group,
data = preprinter_history_tabular %>% filter(institution_match_country_code == "IN")) %>%
prop.table(2) %>% round(3)*100
xtabs(n ~ preprinter_status + author_group,
data = preprinter_history_tabular %>% filter(institution_match_country_code == "FR")) %>%
prop.table(2) %>% round(3)*100
xtabs(n ~ preprinter_status + author_group,
data = preprinter_history_tabular %>% filter(institution_match_country_code == "CA")) %>%
prop.table(2) %>% round(3)*100
xtabs(n ~ preprinter_status + author_group,
data = preprinter_history_tabular %>% filter(institution_match_country_code == "IT")) %>%
prop.table(2) %>% round(3)*100
xtabs(n ~ preprinter_status + author_group,
data = preprinter_history_tabular %>% filter(institution_match_country_code == "CN")) %>%
prop.table(2) %>% round(3)*100
# Panel D: First case -> first preprint by location
# Calculate correlation between first case and first preprint (using calendar days)
preprint_timing %>%
mutate(
serial_preprint = (first_preprint - as.Date("2020-01-01")) %>% as.numeric(units = "days"),
serial_case = (first_case - as.Date("2020-01-01")) %>% as.numeric(units = "days")
) %>%
filter(!is.na(serial_preprint) & !is.na(serial_case)) %>%
with(., cor.test(serial_preprint, serial_case, method = "spearman"))
# Figure 4: Publication outcomes
# Percentage of preprints published
# Percentage values
preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end) %>%
mutate(is_published = !is.na(published_doi)) %>%
with(., table(covid_preprint, is_published)) %>%
prop.table(1) * 100
# Chi-square test of association
preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end) %>%
mutate(is_published = !is.na(published_doi)) %>%
with(., table(covid_preprint, is_published)) %>%
chisq.test()
# Panel C: Publishing timeline
# Descriptive statistics (including control period Jan - Dec 2019)
preprints %>%
mutate(covid_preprint = case_when(
(covid_preprint == T & posted_date >= analysis_start & posted_date <= analysis_end) ~ "COVID-19 preprints",
(covid_preprint == F & posted_date >= analysis_start & posted_date <= analysis_end) ~ "non-COVID-19 preprints",
T ~ "preprints 2019")) %>%
group_by(covid_preprint) %>%
filter(delay_in_days > 0) %>% # Filter out erroneous preprints that were published before preprinted
summarise_at(.vars = c("delay_in_days"), .funs = funs(
median = median(., na.rm = TRUE),
IQR = IQR(., na.rm = TRUE),
mean = mean(., na.rm = TRUE),
sd = sd(., na.rm = TRUE),
ad_statistic = nortest::ad.test(.)$statistic,
ad_p = nortest::ad.test(.)$p.value
))
# Mann-Whitney, time to publishing (excluding control period Jan - Dec 2019)
preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end,
delay_in_days > 0) %>%
with(., wilcox.test(delay_in_days~covid_preprint))
# Panel D: Time to publication for different publishers (top 10)
top_publishers <- preprints %>%
filter(covid_preprint == T,
posted_date >= analysis_start,
posted_date <= analysis_end) %>%
count(published_publisher) %>%
na.omit() %>%
top_n(10, n) %>%
pull(published_publisher)
# Two-way ANOVA, time to publishing ~ preprint type, publisher
anova_publishers <- preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end,
published_publisher %in% top_publishers,
delay_in_days > 0) %>%
assign_covid_preprint() %>%
with(., aov(delay_in_days ~ published_publisher*covid_preprint))
summary(anova_publishers)
# Posthoc contrasts for specific preprint type/publisher combinations
preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end,
published_publisher %in% top_publishers,
delay_in_days > 0) %>%
mutate(published_publisher = factor(published_publisher,
levels = c("American Association for the Advancement of Science (AAAS)",
"American Society for Microbiology",
"BMJ",
"Elsevier BV",
"Frontiers Media SA",
"MDPI AG",
"Oxford University Press (OUP)",
"Public Library of Science (PLoS)",
"Springer Science and Business Media LLC",
"Wiley"),
labels = c("AAAS", "ASM", "BMJ", "Elsevier", "Frontiers",
"MDPI", "OUP", "PLoS", "Springer", "Wiley")),
int_publisher_covid_preprint = interaction(published_publisher, covid_preprint)) %>%
with(., lm(delay_in_days ~ int_publisher_covid_preprint - 1)) %>%
multcomp::glht(., linfct = multcomp::mcp(int_publisher_covid_preprint = c(
"AAAS.FALSE - AAAS.TRUE = 0",
"ASM.FALSE - ASM.TRUE = 0",
"BMJ.FALSE - BMJ.TRUE = 0",
"Elsevier.FALSE - Elsevier.TRUE = 0",
"Frontiers.FALSE - Frontiers.TRUE = 0",
"MDPI.FALSE - MDPI.TRUE = 0",
"OUP.FALSE - OUP.TRUE = 0",
"PLoS.FALSE - PLoS.TRUE = 0",
"Springer.FALSE - Springer.TRUE = 0",
"Wiley.FALSE - Wiley.TRUE = 0"))) %>%
summary()
# Figure 5: Preprint access
# Panel A: Abstract views
# Prepare data
d <- preprint_usage %>%
inner_join(preprints, by = "doi") %>%
group_by(doi, posted_date, covid_preprint) %>%
summarize(
full_text_views = sum(full_text_views),
abstract_views = sum(abstract_views),
pdf_downloads = sum(pdf_downloads)
) %>%
ungroup() %>%
mutate(
posted_week = floor_date(posted_date, unit = "week", week_start = 1),
serial_date = (posted_date - as.Date("2020-01-01")) %>%
as.numeric(units = "days")
) %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end)
# Negative binomial regression, views ~ preprint type and posting date
views_nbmod <- d %>%
with(., MASS::glm.nb(abstract_views ~ covid_preprint * serial_date))
# Poisson regression, views ~ preprint type and posting date
views_poismod <- d %>%
with(., glm(abstract_views ~ covid_preprint * serial_date, family = "poisson"))
# Confirm negative binomial as better fitting model
AIC(views_nbmod, views_poismod)
# Model summary
views_nbmod %>% summary()
views_nbmod %>%
coef() %>%
exp
1 - (views_nbmod %>% coef() %>% .[3] %>% exp() %>% .^30) # Calculate multiplicative rate of views for each subsequent month for non-COVID-19 preprints
1 - (views_nbmod %>% coef() %>% .[3:4] %>% sum %>% exp() %>% .^30) # Calculate multiplicative rate of views for each subsequent month for COVID-19 preprints
# Panel B: PDF downloads
# Negative binomial regression, downloads ~ preprint type and posting date
dloads_nbmod <- d %>%
with(., MASS::glm.nb(pdf_downloads ~ covid_preprint * serial_date))
# Poisson regression, downloads ~ preprint type and posting date
dloads_poismod <- d %>%
with(., glm(pdf_downloads ~ covid_preprint * serial_date, family = "poisson"))
# Confirm negative binomial as better fitting model
AIC(dloads_nbmod, dloads_poismod)
# Model summary
dloads_nbmod %>% summary()
dloads_nbmod %>%
coef() %>%
exp()
1 - (dloads_nbmod %>% coef() %>% .[3] %>% exp() %>% .^30) # Calculate multiplicative rate of views for each subsequent month for non-COVID-19 preprints
1 - (dloads_nbmod %>% coef() %>% .[3:4] %>% sum %>% exp() %>% .^30) # Calculate multiplicative rate of views for each subsequent month for COVID-19 preprints
# Figure 6: Preprint usage
# Prepare data
d <- preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end) %>%
inner_join(preprint_citations, by = "doi") %>%
inner_join(preprint_comments %>% rename(comments = comments_count), by = "doi") %>%
inner_join(preprint_altmetrics, by = "doi") %>%
mutate(
posted_week = floor_date(posted_date, unit = "week", week_start = 1),
serial_date = (posted_date - as.Date("2020-01-01")) %>%
as.numeric(units = "days")
)
# Panel A: Citations per preprint (COVID vs non-COVID)
# Calculate table of any vs none
d %>%
mutate(citationflag = ifelse(citations == 0, "0", "1")) %>%
with(., table(covid_preprint, citationflag)) %>%
prop.table(1) * 100
# Negative binomial regression, citations ~ preprint type and posting date
citations_nbmod <- d %>%
with(., MASS::glm.nb(citations ~ covid_preprint + serial_date))
# Poisson regression, citations ~ preprint type and posting date
citations_poismod <- d %>%
with(., glm(citations ~ covid_preprint + serial_date, family = "poisson"))
# Confirm negative binomial as better fitting model
AIC(citations_nbmod, citations_poismod)
# Model summary
citations_nbmod %>% summary()
citations_nbmod %>%
coef() %>%
exp()
1 - (citations_nbmod %>% coef() %>% .[3] %>% exp() %>% .^30) # Calculate multiplicative rate of citations for each subsequent month
# Panel B: Tweets per preprint (COVID vs non-COVID)
# Negative binomial regression, tweets ~ preprint type and posting date
tweets_nbmod <- d %>%
with(., MASS::glm.nb(twitter ~ covid_preprint + serial_date))
# Poisson regression, tweets ~ preprint type and posting date
tweets_poismod <- d %>%
with(., glm(twitter ~ covid_preprint + serial_date, family = "poisson"))
# Confirm negative binomial as better fitting model
AIC(tweets_nbmod, tweets_poismod)
# Model summary
tweets_nbmod %>% summary()
tweets_nbmod %>%
coef() %>%
exp()
1 - (tweets_nbmod %>% coef() %>% .[3] %>% exp() %>% .^30) # Calculate multiplicative rate of tweets for each subsequent month
# Panel C: News mentions per preprint (COVID vs non-COVID)
# Calculate table of any vs none
d %>%
mutate(newsflag = ifelse(news == 0, "0", "1")) %>%
with(., table(covid_preprint, newsflag)) %>%
prop.table(1) * 100
# Negative binomial regression, news ~ preprint type and posting date
news_nbmod <- d %>%
with(., MASS::glm.nb(news ~ covid_preprint + serial_date))
# Poisson regression, news ~ preprint type and posting date
news_poismod <- d %>%
with(., glm(news ~ covid_preprint + serial_date, family = "poisson"))
# Confirm negative binomial as better fitting model
AIC(news_nbmod, news_poismod)
# Model summary
news_nbmod %>% summary()
news_nbmod %>%
coef() %>%
exp()
1 - (news_nbmod %>% coef() %>% .[3] %>% exp() %>% .^7) # Calculate multiplicative rate of news for each subsequent month
# Panel D: Blog mentions per preprint (COVID vs non-COVID)
# Negative binomial regression, blogs ~ preprint type and posting date
blogs_nbmod <- d %>%
with(., MASS::glm.nb(blogs ~ covid_preprint + serial_date))
# Poisson regression, blogs ~ preprint type and posting date
blogs_poismod <- d %>%
with(., glm(blogs ~ covid_preprint + serial_date, family = "poisson"))
# Confirm negative binomial as better fitting model
AIC(blogs_nbmod, blogs_poismod)
# Model summary
blogs_nbmod %>% summary()
blogs_nbmod %>%
coef() %>%
exp()
1 - (blogs_nbmod %>% coef() %>% .[3] %>% exp() %>% .^30) # Calculate multiplicative rate of blogs for each subsequent month
# Panel E: Wikipedia mentions per preprint (COVID vs non-COVID)
# Negative binomial regression, wikipedia ~ preprint type and posting date
wikipedia_nbmod <- d %>%
with(., MASS::glm.nb(wikipedia ~ covid_preprint + serial_date))
# Poisson regression, wikipedia ~ preprint type and posting date
wikipedia_poismod <- d %>%
with(., glm(wikipedia ~ covid_preprint + serial_date, family = "poisson"))
# Confirm negative binomial as better fitting model
AIC(wikipedia_nbmod, wikipedia_poismod)
# Model summary
wikipedia_nbmod %>% summary()
wikipedia_nbmod %>%
coef() %>%
exp()
1 - (wikipedia_nbmod %>% coef() %>% .[3] %>% exp() %>% .^30) # Calculate multiplicative rate of wikipedia mentions for each subsequent month
# Panel F: Count of comments per preprint (COVID vs non-COVID)
# Negative binomial regression, comments ~ preprint type and posting date
comments_nbmod <- d %>%
with(., MASS::glm.nb(comments ~ covid_preprint + serial_date))
# Poisson regression, comments ~ preprint type and posting date
comments_poismod <- d %>%
with(., glm(comments ~ covid_preprint + serial_date, family = "poisson"))
# Confirm negative binomial as better fitting model
AIC(comments_nbmod, comments_poismod)
# Model summary
comments_nbmod %>% summary()
comments_nbmod %>%
coef() %>%
exp()
1 - (comments_nbmod %>% coef() %>% .[3] %>% exp() %>% .^30) # Calculate multiplicative rate of comments for each subsequent month
# Supplementary Figure 1: Number of preprints in relation to previous epidemics (Zika, Ebola)
# Panel A
# Chi-square test of association
bind_rows(covid_counts, ebola_counts, zika_counts) %>%
spread(epidemic, value = n) %>%
column_to_rownames("epi_preprint") %>%
chisq.test()
# Supplementary Figure 3: Time prior to our study period
# Panel C: PDF downloads for additional preprint servers
# Two-way ANOVA, downloads ~ preprint type, server (including servers beyond bioRxiv, medRxiv)
all_server_dloads_aov <- bind_rows(preprints %>%
left_join(preprint_usage, by = c("doi", "source")) %>%
group_by(source, doi, posted_date, covid_preprint) %>%
summarize(pdf_downloads = sum(pdf_downloads)) %>%
ungroup() %>%
select(-doi) %>%
mutate(pdf_downloads = replace_na(pdf_downloads, 0)),
other_server_dloads %>%
mutate(posted_date = as.Date(posted_date, "%d/%m/%Y"))) %>%
filter(posted_date >= analysis_start & posted_date <= analysis_end)
anova_servers <- all_server_dloads_aov %>%
with(., aov(pdf_downloads ~ source * covid_preprint))
summary(anova_servers)
# Posthoc contrasts for specific preprint type/server combinations
all_server_dloads %>%
assign_covid_preprint() %>%
mutate(int_source_covid_preprint = interaction(source, covid_preprint)) %>%
with(., lm(pdf_downloads ~ int_source_covid_preprint - 1)) %>%
multcomp::glht(., linfct = multcomp::mcp(int_source_covid_preprint = "Tukey")) %>%
summary()
# Supplementary Model: Factors associated with word count
# Specify data (bioRxiv, complete word count)
d <- preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end,
source == "biorxiv",
n_words != 0,
n_refs != 0,
!is.na(category),
!is.na(institution_match_country_name)) %>%
mutate(is_published = !is.na(published_doi),
serial_date = (posted_date - as.Date("2020-01-01")) %>%
as.numeric(units = "days"))
# # Use top 10 cats/top 15 countries only? - NOT USED
# top_cats <- d %>% count(category) %>% top_n(10, n) %>% pull(category)
# top_nats <- d %>% count(institution_match_country_name) %>% top_n(15, n) %>% pull(institution_match_country_name)
#
# #d <- d %>% filter(category %in% top_cats & institution_match_country_name %in% top_nats)
# Set baselines as most common categories
d <- d %>% mutate(institution_match_country_name = fct_relevel(institution_match_country_name, "United States", after = 0),
category = fct_relevel(category, "neuroscience", after = 0))
# Build mixed-effects regression: all bioRxiv preprints
library(lme4)
library(lmerTest)
words_mix <- lmer(n_words ~ (1|category) + (1|institution_match_country_name) + n_authors + is_published*serial_date + covid_preprint, data = d, REML = TRUE)
summary(words_mix)
confint(words_mix)
car::vif(words_mix)
# Calculate intraclass coefficients
vcov <- words_mix %>% VarCorr(comp=c("Variance", "Std.Dev")) %>% as.data.frame %>% pull(vcov)
vcov[1]/sum(vcov)
vcov[2]/sum(vcov)
# LRTs for random effects
anova(words_mix,
lmer(formula = update(formula(words_mix), ~ . - (1|category)), data = d))
anova(words_mix,
lmer(formula = update(formula(words_mix), ~ . - (1|institution_match_country_name)), data = d))
# Build mixed-effects regression: published bioRxiv preprints only
words_pub_mix <- lmer(n_words ~ (1|category) + (1|institution_match_country_name) + n_authors + serial_date + delay_in_days*covid_preprint, data = d %>% filter(delay_in_days > 0), REML = TRUE)
summary(words_pub_mix)
confint(words_pub_mix)
car::vif(words_pub_mix)
# Calculate intraclass coefficients
vcov <- words_pub_mix %>% VarCorr(comp=c("Variance", "Std.Dev")) %>% as.data.frame %>% pull(vcov)
vcov[1]/sum(vcov)
vcov[2]/sum(vcov)
# LRTs for random effects
anova(words_pub_mix,
lmer(formula = update(formula(words_pub_mix), ~ . - (1|category)), data = d %>% filter(delay_in_days > 0)))
anova(words_pub_mix,
lmer(formula = update(formula(words_pub_mix), ~ . - (1|institution_match_country_name)), data = d %>% filter(delay_in_days > 0)))
|
/statistical_analyses.R
|
permissive
|
preprinting-a-pandemic/pandemic_preprints
|
R
| false | false | 24,698 |
r
|
# Descriptive statistics function, returning median and IQR, mean and sd, and Anderson-Darling test for normality
descriptive_stats <- function(d, colnamestring) {
d %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end) %>%
group_by(covid_preprint) %>%
summarise_at(.vars = c(colnamestring), .funs = funs(
median = median(., na.rm = TRUE),
IQR = IQR(., na.rm = TRUE),
mean = mean(., na.rm = TRUE),
sd = sd(., na.rm = TRUE),
ad_statistic = nortest::ad.test(.)$statistic,
ad_p = nortest::ad.test(.)$p.value
))
}
# Supplementary Table: descriptive statistics by server*preprint type combination
t1 <- preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end) %>%
group_by(covid_preprint, source) %>%
summarise(n = n()) %>%
ungroup()
t2 <- preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end) %>%
mutate(
doi_date = gsub("\\.", "-", substr(doi, 9, 18)),
days = as.numeric(ymd(posted_date) - ymd(doi_date))
) %>%
group_by(covid_preprint, source) %>%
summarise_at(.vars = c("days", "n_authors", "n_versions", "n_words", "n_refs"), .funs = funs(
stat = paste0(median(., na.rm = TRUE), " (", IQR(., na.rm = TRUE), ")")
)) %>%
ungroup()
t3 <- preprint_usage %>%
inner_join(preprints, by = c("doi", "source")) %>%
group_by(doi, source, posted_date, covid_preprint) %>%
summarize(
full_text_views = sum(full_text_views),
abstract_views = sum(abstract_views),
pdf_downloads = sum(pdf_downloads)
) %>%
ungroup() %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end) %>%
group_by(covid_preprint, source) %>%
summarise_at(.vars = c("abstract_views", "pdf_downloads"), .funs = funs(
stat = paste0(median(., na.rm = TRUE), " (", IQR(., na.rm = TRUE), ")")
)) %>%
ungroup()
t4 <- preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end) %>%
inner_join(preprint_citations, by = "doi") %>%
inner_join(preprint_comments %>% rename(comments = comments_count), by = "doi") %>%
inner_join(preprint_altmetrics, by = "doi") %>%
group_by(covid_preprint, source) %>%
summarise_at(.vars = c("citations", "twitter", "news", "blogs", "wikipedia", "comments"), .funs = funs(
stat = paste0(median(., na.rm = TRUE), " (", IQR(., na.rm = TRUE), ")")
)) %>%
ungroup()
cbind(t1,
t2 %>% select(-covid_preprint, -source),
t3 %>% select(-covid_preprint, -source),
t4 %>% select(-covid_preprint, -source)) %>%
assign_covid_preprint() %>%
mutate(set = interaction(source, covid_preprint)) %>%
select(-covid_preprint, -source) %>%
gather(variable, value, -set) %>%
mutate(variable = factor(variable, levels=unique(variable))) %>%
spread(set, value) %>%
mutate(variable = gsub("_stat", "", variable)) %>% write.csv("supplementary_table_descriptive_stats.csv")
# Figure 2: Preprint attributes
# Panel B: Preprint screening time
# Descriptive statistics
preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end) %>%
mutate(
doi_date = gsub("\\.", "-", substr(doi, 9, 18)),
days = as.numeric(ymd(posted_date) - ymd(doi_date))
) %>%
group_by(covid_preprint, source) %>%
summarise_at(.vars = c("days"), .funs = funs(
median = median(., na.rm = TRUE),
IQR = IQR(., na.rm = TRUE),
mean = mean(., na.rm = TRUE),
sd = sd(., na.rm = TRUE),
ad_statistic = nortest::ad.test(.)$statistic,
ad_p = nortest::ad.test(.)$p.value
))
# Two-way ANOVA, screening time ~ preprint type, server
anova_screening <- preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end) %>%
mutate(
doi_date = gsub("\\.", "-", substr(doi, 9, 18)),
days = as.numeric(ymd(posted_date) - ymd(doi_date))
) %>%
with(., aov(days ~ source * covid_preprint))
summary(anova_screening)
# Posthoc contrasts for specific preprint type/server combinations
preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end) %>%
assign_covid_preprint() %>%
mutate(
doi_date = gsub("\\.", "-", substr(doi, 9, 18)),
days = as.numeric(ymd(posted_date) - ymd(doi_date)),
int_source_covid_preprint = interaction(source, covid_preprint)
) %>%
with(., lm(days ~ int_source_covid_preprint - 1)) %>%
multcomp::glht(., linfct = multcomp::mcp(int_source_covid_preprint = "Tukey")) %>%
summary()
# Panel C: Revisions per preprint for COVID vs non-COVID
# Descriptive statistics
descriptive_stats(preprints, "n_versions")
# Mann-Whitney test
preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end) %>%
with(., wilcox.test(n_versions ~ covid_preprint))
# Panel D: License types
# Chi-squared test of association
preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end) %>%
mutate(license = case_when(
str_detect(license, "cc0") ~ "cc0",
T ~ license
)) %>%
with(., table(license, covid_preprint)) %>%
chisq.test()
# Panel E: Word counts
# Descriptive statistics
descriptive_stats(preprints %>% filter(source == "biorxiv"), "n_words")
# Mann-Whitney test
preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end,
source == "biorxiv") %>%
with(., wilcox.test(n_words ~ covid_preprint))
# Panel F: Reference counts
# Descriptive statistics
descriptive_stats(preprints %>% filter(source == "biorxiv"), "n_refs")
# Mann-Whitney, number of references
preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end,
source == "biorxiv") %>%
with(., wilcox.test(n_refs ~ covid_preprint))
# Figure 3: Preprint authorship
# Panel A: Author counts
# Descriptive statistics
descriptive_stats(preprints, "n_authors")
# Mann-Whitney test
preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end) %>%
with(., wilcox.test(n_authors ~ covid_preprint))
# Panel C: Author attributes: country, nationality (top 10 countries)
# Examine first-time preprinters pooling all countries
preprinter_history %>%
with(., table(author_group, preprinter_status)) %>%
prop.table(1) * 100
preprinter_history %>%
with(., table(author_group, preprinter_status)) %>%
chisq.test()
# Individual tests of first-time preprinters by country, only top 15 countries
options(scipen = 999)
rcompanion::groupwiseCMH(xtabs(n ~ preprinter_status + author_group + institution_match_country_code,
data = preprinter_history_tabular),
group = 3,
fisher = FALSE,
gtest = FALSE,
chisq = TRUE,
method = "bonferroni",
correct = "none",
digits = 3
) %>% arrange(adj.p)
# Full statistical test for United States, UK, Germany, India, France, Canada, Italy, China
xtabs(n ~ preprinter_status + author_group,
data = preprinter_history_tabular %>% filter(institution_match_country_code == "US")) %>%
prop.table(2) %>% round(3)*100
xtabs(n ~ preprinter_status + author_group,
data = preprinter_history_tabular %>% filter(institution_match_country_code == "GB")) %>%
prop.table(2) %>% round(3)*100
xtabs(n ~ preprinter_status + author_group,
data = preprinter_history_tabular %>% filter(institution_match_country_code == "DE")) %>%
prop.table(2) %>% round(3)*100
xtabs(n ~ preprinter_status + author_group,
data = preprinter_history_tabular %>% filter(institution_match_country_code == "IN")) %>%
prop.table(2) %>% round(3)*100
xtabs(n ~ preprinter_status + author_group,
data = preprinter_history_tabular %>% filter(institution_match_country_code == "FR")) %>%
prop.table(2) %>% round(3)*100
xtabs(n ~ preprinter_status + author_group,
data = preprinter_history_tabular %>% filter(institution_match_country_code == "CA")) %>%
prop.table(2) %>% round(3)*100
xtabs(n ~ preprinter_status + author_group,
data = preprinter_history_tabular %>% filter(institution_match_country_code == "IT")) %>%
prop.table(2) %>% round(3)*100
xtabs(n ~ preprinter_status + author_group,
data = preprinter_history_tabular %>% filter(institution_match_country_code == "CN")) %>%
prop.table(2) %>% round(3)*100
# Panel D: First case -> first preprint by location
# Calculate correlation between first case and first preprint (using calendar days)
preprint_timing %>%
mutate(
serial_preprint = (first_preprint - as.Date("2020-01-01")) %>% as.numeric(units = "days"),
serial_case = (first_case - as.Date("2020-01-01")) %>% as.numeric(units = "days")
) %>%
filter(!is.na(serial_preprint) & !is.na(serial_case)) %>%
with(., cor.test(serial_preprint, serial_case, method = "spearman"))
# Figure 4: Publication outcomes
# Percentage of preprints published
# Percentage values
preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end) %>%
mutate(is_published = !is.na(published_doi)) %>%
with(., table(covid_preprint, is_published)) %>%
prop.table(1) * 100
# Chi-square test of association
preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end) %>%
mutate(is_published = !is.na(published_doi)) %>%
with(., table(covid_preprint, is_published)) %>%
chisq.test()
# Panel C: Publishing timeline
# Descriptive statistics (including control period Jan - Dec 2019)
preprints %>%
mutate(covid_preprint = case_when(
(covid_preprint == T & posted_date >= analysis_start & posted_date <= analysis_end) ~ "COVID-19 preprints",
(covid_preprint == F & posted_date >= analysis_start & posted_date <= analysis_end) ~ "non-COVID-19 preprints",
T ~ "preprints 2019")) %>%
group_by(covid_preprint) %>%
filter(delay_in_days > 0) %>% # Filter out erroneous preprints that were published before preprinted
summarise_at(.vars = c("delay_in_days"), .funs = funs(
median = median(., na.rm = TRUE),
IQR = IQR(., na.rm = TRUE),
mean = mean(., na.rm = TRUE),
sd = sd(., na.rm = TRUE),
ad_statistic = nortest::ad.test(.)$statistic,
ad_p = nortest::ad.test(.)$p.value
))
# Mann-Whitney, time to publishing (excluding control period Jan - Dec 2019)
preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end,
delay_in_days > 0) %>%
with(., wilcox.test(delay_in_days~covid_preprint))
# Panel D: Time to publication for different publishers (top 10)
top_publishers <- preprints %>%
filter(covid_preprint == T,
posted_date >= analysis_start,
posted_date <= analysis_end) %>%
count(published_publisher) %>%
na.omit() %>%
top_n(10, n) %>%
pull(published_publisher)
# Two-way ANOVA, time to publishing ~ preprint type, publisher
anova_publishers <- preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end,
published_publisher %in% top_publishers,
delay_in_days > 0) %>%
assign_covid_preprint() %>%
with(., aov(delay_in_days ~ published_publisher*covid_preprint))
summary(anova_publishers)
# Posthoc contrasts for specific preprint type/publisher combinations
preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end,
published_publisher %in% top_publishers,
delay_in_days > 0) %>%
mutate(published_publisher = factor(published_publisher,
levels = c("American Association for the Advancement of Science (AAAS)",
"American Society for Microbiology",
"BMJ",
"Elsevier BV",
"Frontiers Media SA",
"MDPI AG",
"Oxford University Press (OUP)",
"Public Library of Science (PLoS)",
"Springer Science and Business Media LLC",
"Wiley"),
labels = c("AAAS", "ASM", "BMJ", "Elsevier", "Frontiers",
"MDPI", "OUP", "PLoS", "Springer", "Wiley")),
int_publisher_covid_preprint = interaction(published_publisher, covid_preprint)) %>%
with(., lm(delay_in_days ~ int_publisher_covid_preprint - 1)) %>%
multcomp::glht(., linfct = multcomp::mcp(int_publisher_covid_preprint = c(
"AAAS.FALSE - AAAS.TRUE = 0",
"ASM.FALSE - ASM.TRUE = 0",
"BMJ.FALSE - BMJ.TRUE = 0",
"Elsevier.FALSE - Elsevier.TRUE = 0",
"Frontiers.FALSE - Frontiers.TRUE = 0",
"MDPI.FALSE - MDPI.TRUE = 0",
"OUP.FALSE - OUP.TRUE = 0",
"PLoS.FALSE - PLoS.TRUE = 0",
"Springer.FALSE - Springer.TRUE = 0",
"Wiley.FALSE - Wiley.TRUE = 0"))) %>%
summary()
# Figure 5: Preprint access
# Panel A: Abstract views
# Prepare data
d <- preprint_usage %>%
inner_join(preprints, by = "doi") %>%
group_by(doi, posted_date, covid_preprint) %>%
summarize(
full_text_views = sum(full_text_views),
abstract_views = sum(abstract_views),
pdf_downloads = sum(pdf_downloads)
) %>%
ungroup() %>%
mutate(
posted_week = floor_date(posted_date, unit = "week", week_start = 1),
serial_date = (posted_date - as.Date("2020-01-01")) %>%
as.numeric(units = "days")
) %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end)
# Negative binomial regression, views ~ preprint type and posting date
views_nbmod <- d %>%
with(., MASS::glm.nb(abstract_views ~ covid_preprint * serial_date))
# Poisson regression, views ~ preprint type and posting date
views_poismod <- d %>%
with(., glm(abstract_views ~ covid_preprint * serial_date, family = "poisson"))
# Confirm negative binomial as better fitting model
AIC(views_nbmod, views_poismod)
# Model summary
views_nbmod %>% summary()
views_nbmod %>%
coef() %>%
exp
1 - (views_nbmod %>% coef() %>% .[3] %>% exp() %>% .^30) # Calculate multiplicative rate of views for each subsequent month for non-COVID-19 preprints
1 - (views_nbmod %>% coef() %>% .[3:4] %>% sum %>% exp() %>% .^30) # Calculate multiplicative rate of views for each subsequent month for COVID-19 preprints
# Panel B: PDF downloads
# Negative binomial regression, downloads ~ preprint type and posting date
dloads_nbmod <- d %>%
with(., MASS::glm.nb(pdf_downloads ~ covid_preprint * serial_date))
# Poisson regression, downloads ~ preprint type and posting date
dloads_poismod <- d %>%
with(., glm(pdf_downloads ~ covid_preprint * serial_date, family = "poisson"))
# Confirm negative binomial as better fitting model
AIC(dloads_nbmod, dloads_poismod)
# Model summary
dloads_nbmod %>% summary()
dloads_nbmod %>%
coef() %>%
exp()
1 - (dloads_nbmod %>% coef() %>% .[3] %>% exp() %>% .^30) # Calculate multiplicative rate of views for each subsequent month for non-COVID-19 preprints
1 - (dloads_nbmod %>% coef() %>% .[3:4] %>% sum %>% exp() %>% .^30) # Calculate multiplicative rate of views for each subsequent month for COVID-19 preprints
# Figure 6: Preprint usage
# Prepare data
d <- preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end) %>%
inner_join(preprint_citations, by = "doi") %>%
inner_join(preprint_comments %>% rename(comments = comments_count), by = "doi") %>%
inner_join(preprint_altmetrics, by = "doi") %>%
mutate(
posted_week = floor_date(posted_date, unit = "week", week_start = 1),
serial_date = (posted_date - as.Date("2020-01-01")) %>%
as.numeric(units = "days")
)
# Panel A: Citations per preprint (COVID vs non-COVID)
# Calculate table of any vs none
d %>%
mutate(citationflag = ifelse(citations == 0, "0", "1")) %>%
with(., table(covid_preprint, citationflag)) %>%
prop.table(1) * 100
# Negative binomial regression, citations ~ preprint type and posting date
citations_nbmod <- d %>%
with(., MASS::glm.nb(citations ~ covid_preprint + serial_date))
# Poisson regression, citations ~ preprint type and posting date
citations_poismod <- d %>%
with(., glm(citations ~ covid_preprint + serial_date, family = "poisson"))
# Confirm negative binomial as better fitting model
AIC(citations_nbmod, citations_poismod)
# Model summary
citations_nbmod %>% summary()
citations_nbmod %>%
coef() %>%
exp()
1 - (citations_nbmod %>% coef() %>% .[3] %>% exp() %>% .^30) # Calculate multiplicative rate of citations for each subsequent month
# Panel B: Tweets per preprint (COVID vs non-COVID)
# Negative binomial regression, tweets ~ preprint type and posting date
tweets_nbmod <- d %>%
with(., MASS::glm.nb(twitter ~ covid_preprint + serial_date))
# Poisson regression, tweets ~ preprint type and posting date
tweets_poismod <- d %>%
with(., glm(twitter ~ covid_preprint + serial_date, family = "poisson"))
# Confirm negative binomial as better fitting model
AIC(tweets_nbmod, tweets_poismod)
# Model summary
tweets_nbmod %>% summary()
tweets_nbmod %>%
coef() %>%
exp()
1 - (tweets_nbmod %>% coef() %>% .[3] %>% exp() %>% .^30) # Calculate multiplicative rate of tweets for each subsequent month
# Panel C: News mentions per preprint (COVID vs non-COVID)
# Calculate table of any vs none
d %>%
mutate(newsflag = ifelse(news == 0, "0", "1")) %>%
with(., table(covid_preprint, newsflag)) %>%
prop.table(1) * 100
# Negative binomial regression, news ~ preprint type and posting date
news_nbmod <- d %>%
with(., MASS::glm.nb(news ~ covid_preprint + serial_date))
# Poisson regression, news ~ preprint type and posting date
news_poismod <- d %>%
with(., glm(news ~ covid_preprint + serial_date, family = "poisson"))
# Confirm negative binomial as better fitting model
AIC(news_nbmod, news_poismod)
# Model summary
news_nbmod %>% summary()
news_nbmod %>%
coef() %>%
exp()
1 - (news_nbmod %>% coef() %>% .[3] %>% exp() %>% .^7) # Calculate multiplicative rate of news for each subsequent month
# Panel D: Blog mentions per preprint (COVID vs non-COVID)
# Negative binomial regression, blogs ~ preprint type and posting date
blogs_nbmod <- d %>%
with(., MASS::glm.nb(blogs ~ covid_preprint + serial_date))
# Poisson regression, blogs ~ preprint type and posting date
blogs_poismod <- d %>%
with(., glm(blogs ~ covid_preprint + serial_date, family = "poisson"))
# Confirm negative binomial as better fitting model
AIC(blogs_nbmod, blogs_poismod)
# Model summary
blogs_nbmod %>% summary()
blogs_nbmod %>%
coef() %>%
exp()
1 - (blogs_nbmod %>% coef() %>% .[3] %>% exp() %>% .^30) # Calculate multiplicative rate of blogs for each subsequent month
# Panel E: Wikipedia mentions per preprint (COVID vs non-COVID)
# Negative binomial regression, wikipedia ~ preprint type and posting date
wikipedia_nbmod <- d %>%
with(., MASS::glm.nb(wikipedia ~ covid_preprint + serial_date))
# Poisson regression, wikipedia ~ preprint type and posting date
wikipedia_poismod <- d %>%
with(., glm(wikipedia ~ covid_preprint + serial_date, family = "poisson"))
# Confirm negative binomial as better fitting model
AIC(wikipedia_nbmod, wikipedia_poismod)
# Model summary
wikipedia_nbmod %>% summary()
wikipedia_nbmod %>%
coef() %>%
exp()
1 - (wikipedia_nbmod %>% coef() %>% .[3] %>% exp() %>% .^30) # Calculate multiplicative rate of wikipedia mentions for each subsequent month
# Panel F: Count of comments per preprint (COVID vs non-COVID)
# Negative binomial regression, comments ~ preprint type and posting date
comments_nbmod <- d %>%
with(., MASS::glm.nb(comments ~ covid_preprint + serial_date))
# Poisson regression, comments ~ preprint type and posting date
comments_poismod <- d %>%
with(., glm(comments ~ covid_preprint + serial_date, family = "poisson"))
# Confirm negative binomial as better fitting model
AIC(comments_nbmod, comments_poismod)
# Model summary
comments_nbmod %>% summary()
comments_nbmod %>%
coef() %>%
exp()
1 - (comments_nbmod %>% coef() %>% .[3] %>% exp() %>% .^30) # Calculate multiplicative rate of comments for each subsequent month
# Supplementary Figure 1: Number of preprints in relation to previous epidemics (Zika, Ebola)
# Panel A
# Chi-square test of association
bind_rows(covid_counts, ebola_counts, zika_counts) %>%
spread(epidemic, value = n) %>%
column_to_rownames("epi_preprint") %>%
chisq.test()
# Supplementary Figure 3: Time prior to our study period
# Panel C: PDF downloads for additional preprint servers
# Two-way ANOVA, downloads ~ preprint type, server (including servers beyond bioRxiv, medRxiv)
all_server_dloads_aov <- bind_rows(preprints %>%
left_join(preprint_usage, by = c("doi", "source")) %>%
group_by(source, doi, posted_date, covid_preprint) %>%
summarize(pdf_downloads = sum(pdf_downloads)) %>%
ungroup() %>%
select(-doi) %>%
mutate(pdf_downloads = replace_na(pdf_downloads, 0)),
other_server_dloads %>%
mutate(posted_date = as.Date(posted_date, "%d/%m/%Y"))) %>%
filter(posted_date >= analysis_start & posted_date <= analysis_end)
anova_servers <- all_server_dloads_aov %>%
with(., aov(pdf_downloads ~ source * covid_preprint))
summary(anova_servers)
# Posthoc contrasts for specific preprint type/server combinations
all_server_dloads %>%
assign_covid_preprint() %>%
mutate(int_source_covid_preprint = interaction(source, covid_preprint)) %>%
with(., lm(pdf_downloads ~ int_source_covid_preprint - 1)) %>%
multcomp::glht(., linfct = multcomp::mcp(int_source_covid_preprint = "Tukey")) %>%
summary()
# Supplementary Model: Factors associated with word count
# Specify data (bioRxiv, complete word count)
d <- preprints %>%
filter(posted_date >= analysis_start,
posted_date <= analysis_end,
source == "biorxiv",
n_words != 0,
n_refs != 0,
!is.na(category),
!is.na(institution_match_country_name)) %>%
mutate(is_published = !is.na(published_doi),
serial_date = (posted_date - as.Date("2020-01-01")) %>%
as.numeric(units = "days"))
# # Use top 10 cats/top 15 countries only? - NOT USED
# top_cats <- d %>% count(category) %>% top_n(10, n) %>% pull(category)
# top_nats <- d %>% count(institution_match_country_name) %>% top_n(15, n) %>% pull(institution_match_country_name)
#
# #d <- d %>% filter(category %in% top_cats & institution_match_country_name %in% top_nats)
# Set baselines as most common categories
d <- d %>% mutate(institution_match_country_name = fct_relevel(institution_match_country_name, "United States", after = 0),
category = fct_relevel(category, "neuroscience", after = 0))
# Build mixed-effects regression: all bioRxiv preprints
library(lme4)
library(lmerTest)
words_mix <- lmer(n_words ~ (1|category) + (1|institution_match_country_name) + n_authors + is_published*serial_date + covid_preprint, data = d, REML = TRUE)
summary(words_mix)
confint(words_mix)
car::vif(words_mix)
# Calculate intraclass coefficients
vcov <- words_mix %>% VarCorr(comp=c("Variance", "Std.Dev")) %>% as.data.frame %>% pull(vcov)
vcov[1]/sum(vcov)
vcov[2]/sum(vcov)
# LRTs for random effects
anova(words_mix,
lmer(formula = update(formula(words_mix), ~ . - (1|category)), data = d))
anova(words_mix,
lmer(formula = update(formula(words_mix), ~ . - (1|institution_match_country_name)), data = d))
# Build mixed-effects regression: published bioRxiv preprints only
words_pub_mix <- lmer(n_words ~ (1|category) + (1|institution_match_country_name) + n_authors + serial_date + delay_in_days*covid_preprint, data = d %>% filter(delay_in_days > 0), REML = TRUE)
summary(words_pub_mix)
confint(words_pub_mix)
car::vif(words_pub_mix)
# Calculate intraclass coefficients
vcov <- words_pub_mix %>% VarCorr(comp=c("Variance", "Std.Dev")) %>% as.data.frame %>% pull(vcov)
vcov[1]/sum(vcov)
vcov[2]/sum(vcov)
# LRTs for random effects
anova(words_pub_mix,
lmer(formula = update(formula(words_pub_mix), ~ . - (1|category)), data = d %>% filter(delay_in_days > 0)))
anova(words_pub_mix,
lmer(formula = update(formula(words_pub_mix), ~ . - (1|institution_match_country_name)), data = d %>% filter(delay_in_days > 0)))
|
#!/usr/bin/env Rscript
## transcript produced by Segtools 1.1.14
segtools.r.dirname <-
system2("python",
c("-c", "'import segtools; print segtools.get_r_dirname()'"),
stdout = TRUE)
source(file.path(segtools.r.dirname, 'common.R'))
source(file.path(segtools.r.dirname, 'overlap.R'))
save.overlap.performance('./K5seq23rand100bp1bp/overlap-ordered', 'overlap.performance', './K5seq23rand100bp1bp/overlap-ordered/overlap.tab', row.normalize = 'FALSE', mnemonic_file = './K5seq23rand100bp1bp/overlap-direct/mnemonics1_new.txt', clobber = FALSE, col_mnemonic_file = './K5seq23rand100bp1bp/overlap-direct/mnemonics2_new.txt')
save.overlap.heatmap('./K5seq23rand100bp1bp/overlap-ordered', 'overlap', './K5seq23rand100bp1bp/overlap-ordered/overlap.tab', clobber = FALSE, col_mnemonic_file = './K5seq23rand100bp1bp/overlap-direct/mnemonics2_new.txt', cluster = FALSE, max_contrast = FALSE, row.normalize = 'FALSE', mnemonic_file = './K5seq23rand100bp1bp/overlap-direct/mnemonics1_new.txt')
|
/notebook/stablemarriage/K5seq23rand100bp1bp/overlap-ordered/overlap.R
|
no_license
|
xwinxu/sequencing-resolution-analysis
|
R
| false | false | 1,006 |
r
|
#!/usr/bin/env Rscript
## transcript produced by Segtools 1.1.14
segtools.r.dirname <-
system2("python",
c("-c", "'import segtools; print segtools.get_r_dirname()'"),
stdout = TRUE)
source(file.path(segtools.r.dirname, 'common.R'))
source(file.path(segtools.r.dirname, 'overlap.R'))
save.overlap.performance('./K5seq23rand100bp1bp/overlap-ordered', 'overlap.performance', './K5seq23rand100bp1bp/overlap-ordered/overlap.tab', row.normalize = 'FALSE', mnemonic_file = './K5seq23rand100bp1bp/overlap-direct/mnemonics1_new.txt', clobber = FALSE, col_mnemonic_file = './K5seq23rand100bp1bp/overlap-direct/mnemonics2_new.txt')
save.overlap.heatmap('./K5seq23rand100bp1bp/overlap-ordered', 'overlap', './K5seq23rand100bp1bp/overlap-ordered/overlap.tab', clobber = FALSE, col_mnemonic_file = './K5seq23rand100bp1bp/overlap-direct/mnemonics2_new.txt', cluster = FALSE, max_contrast = FALSE, row.normalize = 'FALSE', mnemonic_file = './K5seq23rand100bp1bp/overlap-direct/mnemonics1_new.txt')
|
context("Test Structure")
test_that("correct structure", {
path <- system.file('pdf', '1610.00147.pdf', package = 'pdfsearch')
expect_true(tibble::is.tibble(keyword_search(x = path, keyword = 'measurement error',
path = TRUE)))
})
test_that('surround_lines returns multiple lines', {
path <- system.file('pdf', '1610.00147.pdf', package = 'pdfsearch')
expect_true(is.list(keyword_search(x = path, keyword = 'measurement',
path = TRUE,
ignore_case = TRUE, surround_lines = 1)$line_text))
})
test_that('directory search', {
path <- system.file('pdf', '1610.00147.pdf', package = 'pdfsearch')
path <- gsub('/1610.00147.pdf', '', path)
expect_equal(length(table(keyword_directory(directory = path,
keyword = 'error', full_names = TRUE)$ID)), 2)
})
test_that('directory search max_search', {
path <- system.file('pdf', '1610.00147.pdf', package = 'pdfsearch')
path <- gsub('/1610.00147.pdf', '', path)
expect_equal(length(table(keyword_directory(directory = path,
keyword = 'error', full_names = TRUE,
max_search = 1)$ID)), 1)
})
test_that("heading search", {
file <- system.file('pdf', '1501.00450.pdf', package = 'pdfsearch')
expect_true(tibble::is.tibble(heading_search(file,
headings = c('abstract', 'introduction'),
path = TRUE)))
})
test_that("heading search within keyword search", {
file <- system.file('pdf', '1501.00450.pdf', package = 'pdfsearch')
head_args <- list(x = file,
headings = c('INTRODUCTION', 'Motivation', 'RESULTS'),
path = TRUE)
key_res <- keyword_search(file,
keyword = c('repeated measures', 'mixed effects'),
path = TRUE, heading_search = TRUE,
heading_args = head_args)
expect_true('heading' %in% names(key_res))
})
test_that("heading search returns NA", {
file <- system.file('pdf', '1501.00450.pdf', package = 'pdfsearch')
head_args <- list(x = file,
headings = c('Motivation', 'RESULTS'),
path = TRUE)
key_res <- keyword_search(file,
keyword = c('repeated measures', 'mixed effects'),
path = TRUE, heading_search = TRUE,
heading_args = head_args)
expect_true(any(key_res$heading == 'NA'))
})
test_that('ignore_case functionality', {
path <- system.file('pdf', '1610.00147.pdf', package = 'pdfsearch')
r_ignore_case <- keyword_search(x = path,
keyword = c('measurement error', 'R'),
ignore_case = c(TRUE, TRUE),
path = TRUE)
R_case <- keyword_search(x = path,
keyword = c('measurement error', 'R'),
ignore_case = c(FALSE, FALSE),
path = TRUE)
expect_false(isTRUE(all.equal(nrow(r_ignore_case),
nrow(R_case))))
})
test_that('Platform Specific Encoding', {
file <- system.file('pdf', '1610.00147.pdf', package = 'pdfsearch')
result <- keyword_search(file,
keyword = c('measurement', 'error'),
path = TRUE)
expect_false(any(grepl("\\n", result$line_text)))
})
|
/tests/testthat/test_structure.r
|
no_license
|
JDixonCS/pdfsearch
|
R
| false | false | 3,565 |
r
|
context("Test Structure")
test_that("correct structure", {
path <- system.file('pdf', '1610.00147.pdf', package = 'pdfsearch')
expect_true(tibble::is.tibble(keyword_search(x = path, keyword = 'measurement error',
path = TRUE)))
})
test_that('surround_lines returns multiple lines', {
path <- system.file('pdf', '1610.00147.pdf', package = 'pdfsearch')
expect_true(is.list(keyword_search(x = path, keyword = 'measurement',
path = TRUE,
ignore_case = TRUE, surround_lines = 1)$line_text))
})
test_that('directory search', {
path <- system.file('pdf', '1610.00147.pdf', package = 'pdfsearch')
path <- gsub('/1610.00147.pdf', '', path)
expect_equal(length(table(keyword_directory(directory = path,
keyword = 'error', full_names = TRUE)$ID)), 2)
})
test_that('directory search max_search', {
path <- system.file('pdf', '1610.00147.pdf', package = 'pdfsearch')
path <- gsub('/1610.00147.pdf', '', path)
expect_equal(length(table(keyword_directory(directory = path,
keyword = 'error', full_names = TRUE,
max_search = 1)$ID)), 1)
})
test_that("heading search", {
file <- system.file('pdf', '1501.00450.pdf', package = 'pdfsearch')
expect_true(tibble::is.tibble(heading_search(file,
headings = c('abstract', 'introduction'),
path = TRUE)))
})
test_that("heading search within keyword search", {
file <- system.file('pdf', '1501.00450.pdf', package = 'pdfsearch')
head_args <- list(x = file,
headings = c('INTRODUCTION', 'Motivation', 'RESULTS'),
path = TRUE)
key_res <- keyword_search(file,
keyword = c('repeated measures', 'mixed effects'),
path = TRUE, heading_search = TRUE,
heading_args = head_args)
expect_true('heading' %in% names(key_res))
})
test_that("heading search returns NA", {
file <- system.file('pdf', '1501.00450.pdf', package = 'pdfsearch')
head_args <- list(x = file,
headings = c('Motivation', 'RESULTS'),
path = TRUE)
key_res <- keyword_search(file,
keyword = c('repeated measures', 'mixed effects'),
path = TRUE, heading_search = TRUE,
heading_args = head_args)
expect_true(any(key_res$heading == 'NA'))
})
test_that('ignore_case functionality', {
path <- system.file('pdf', '1610.00147.pdf', package = 'pdfsearch')
r_ignore_case <- keyword_search(x = path,
keyword = c('measurement error', 'R'),
ignore_case = c(TRUE, TRUE),
path = TRUE)
R_case <- keyword_search(x = path,
keyword = c('measurement error', 'R'),
ignore_case = c(FALSE, FALSE),
path = TRUE)
expect_false(isTRUE(all.equal(nrow(r_ignore_case),
nrow(R_case))))
})
test_that('Platform Specific Encoding', {
file <- system.file('pdf', '1610.00147.pdf', package = 'pdfsearch')
result <- keyword_search(file,
keyword = c('measurement', 'error'),
path = TRUE)
expect_false(any(grepl("\\n", result$line_text)))
})
|
#' @title
#' fastq_bam_bwa
#'
#' @param fqs1 list of fastq files, may be a file with just the fastqs, one in each line.
#' @param fqs2 list of fastq files, may be a file with just the fastqs, one in each line. mate 2
#'
#' @return returns a single merged bam file
#'
#' @details
#' If fqs2 is missing, automatically use single end
#'
#' @export
fastq_bam_bwa <- function(fqs1, fqs2,
outfile,
samplename = opts_flow$get("samplename")){
## --- all subsequent steps would use this samplename
check_args(ignore = c("outfile", "fqs2"))
opts_flow$set(samplename = samplename)
pipename = "fastq_bam_bwa"
message("Generating a ", pipename, " flowmat for sample: ", samplename)
## Calling modules, each returns
## - a vector of outfiles
## - a flowmat, which we need to rbind and are done !
out_bwa = bwa.backtrack(fqs1 = fqs1, fqs2 = fqs2)
out_rg = picard_rg(out_bwa$outfiles)
if(missing(outfile))
outfile = sprintf("%s.rg.sorted.bam", samplename) ## feel free to change this !
out_merge = picard_merge(out_rg$outfiles, mergedbam = outfile)
## merging three flowmats ---
flowmat = rbind(out_bwa$flowmat, out_rg$flowmat, out_merge$flowmat)
return(list(outfiles = outfile, flowmat = flowmat))
}
## ----------------------
if(FALSE){
## example
require(flowr)
load_opts(fetch_conf("fastq_bam_bwa.conf"))
## This fails, extension seems weird
flow_mat = fastq_bam_bwa(fqs1 = rep("hello.fq.gz", 10),
fqs2 = rep("hello.fq", 10),
samplename = "smp")
## This fails, length is not the same, for paired end
flow_mat = fastq_bam_bwa(fqs1 = rep("hello.fq", 10),
fqs2 = rep("hello.fq", 11),
samplename = "smp")
## this works
out = fastq_bam_bwa(fqs1 = rep("hello.fq", 10),
fqs2 = rep("hello.fq", 10),
samplename = "smp")
debug(bwa.backtrack)
out = fastq_bam_bwa(fqs1 = rep("hello.fq", 10),
samplename = "smp")
}
|
/pipelines/extra_pipes/fastq_bam_bwa.R
|
no_license
|
flow-r/ultraseq
|
R
| false | false | 2,085 |
r
|
#' @title
#' fastq_bam_bwa
#'
#' @param fqs1 list of fastq files, may be a file with just the fastqs, one in each line.
#' @param fqs2 list of fastq files, may be a file with just the fastqs, one in each line. mate 2
#'
#' @return returns a single merged bam file
#'
#' @details
#' If fqs2 is missing, automatically use single end
#'
#' @export
fastq_bam_bwa <- function(fqs1, fqs2,
outfile,
samplename = opts_flow$get("samplename")){
## --- all subsequent steps would use this samplename
check_args(ignore = c("outfile", "fqs2"))
opts_flow$set(samplename = samplename)
pipename = "fastq_bam_bwa"
message("Generating a ", pipename, " flowmat for sample: ", samplename)
## Calling modules, each returns
## - a vector of outfiles
## - a flowmat, which we need to rbind and are done !
out_bwa = bwa.backtrack(fqs1 = fqs1, fqs2 = fqs2)
out_rg = picard_rg(out_bwa$outfiles)
if(missing(outfile))
outfile = sprintf("%s.rg.sorted.bam", samplename) ## feel free to change this !
out_merge = picard_merge(out_rg$outfiles, mergedbam = outfile)
## merging three flowmats ---
flowmat = rbind(out_bwa$flowmat, out_rg$flowmat, out_merge$flowmat)
return(list(outfiles = outfile, flowmat = flowmat))
}
## ----------------------
if(FALSE){
## example
require(flowr)
load_opts(fetch_conf("fastq_bam_bwa.conf"))
## This fails, extension seems weird
flow_mat = fastq_bam_bwa(fqs1 = rep("hello.fq.gz", 10),
fqs2 = rep("hello.fq", 10),
samplename = "smp")
## This fails, length is not the same, for paired end
flow_mat = fastq_bam_bwa(fqs1 = rep("hello.fq", 10),
fqs2 = rep("hello.fq", 11),
samplename = "smp")
## this works
out = fastq_bam_bwa(fqs1 = rep("hello.fq", 10),
fqs2 = rep("hello.fq", 10),
samplename = "smp")
debug(bwa.backtrack)
out = fastq_bam_bwa(fqs1 = rep("hello.fq", 10),
samplename = "smp")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cytoscape.R
\name{create_cytoscape_file}
\alias{create_cytoscape_file}
\title{Create an edge table file for Cytoscape}
\usage{
create_cytoscape_file(g)
}
\arguments{
\item{g}{A 'network_plot' object. See ?plot_network().}
}
\description{
The returned data frame can be saved as a .csv file. Then, in Cytopscape use
File -> Import -> Network -> File. Select the .csv file containing the data
frame generated by this function. There will be a popup window. The source,
interaction, and target columns should automatically be identified. Click OK.
}
\examples{
nw <- random_network(10)
g <- plot(nw)
nw_plot_cytoscape <- create_cytoscape_file(g)
\donttest{
# Save the edge table in a .csv file to be used in cytoscape.
write.table(nw_plot_cytoscape, file.path(tempdir(), "file_name.csv"),
sep = ",", row.names = FALSE, col.names = TRUE, quote = FALSE)
}
}
|
/man/create_cytoscape_file.Rd
|
no_license
|
KFWins2022/SeqNet
|
R
| false | true | 946 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cytoscape.R
\name{create_cytoscape_file}
\alias{create_cytoscape_file}
\title{Create an edge table file for Cytoscape}
\usage{
create_cytoscape_file(g)
}
\arguments{
\item{g}{A 'network_plot' object. See ?plot_network().}
}
\description{
The returned data frame can be saved as a .csv file. Then, in Cytopscape use
File -> Import -> Network -> File. Select the .csv file containing the data
frame generated by this function. There will be a popup window. The source,
interaction, and target columns should automatically be identified. Click OK.
}
\examples{
nw <- random_network(10)
g <- plot(nw)
nw_plot_cytoscape <- create_cytoscape_file(g)
\donttest{
# Save the edge table in a .csv file to be used in cytoscape.
write.table(nw_plot_cytoscape, file.path(tempdir(), "file_name.csv"),
sep = ",", row.names = FALSE, col.names = TRUE, quote = FALSE)
}
}
|
## Analyse simulated response/reaction time data
library(brms)
library(ggplot2)
respRT <- read.csv("data/mixed_sim2.csv")
respRT$Cond <- factor(respRT$Cond)
respRT$Rsp <- factor(respRT$Rsp)
## Plot data
ggplot(respRT, aes(x=factor(Cond), y=RT, fill=factor(Rsp))) + geom_boxplot()
## Model 1: random intercept only
## Need save_all_pars = TRUE if we want to compute BAyes factors
m0 <- brm(Rsp ~ RT + Cond + (1|Sbj), data=respRT, family=bernoulli(),
save_all_pars = TRUE)
m0
plot(m0)
plot(marginal_effects(m0))
## Model 2: Uncorrelated random slopes
m1 <- brm(Rsp ~ RT + Cond + (1|Sbj) + (0+RT|Sbj) + (0+Cond|Sbj), data=respRT,
family=bernoulli(), save_all_pars = TRUE)
m1
plot(m1)
plot(marginal_effects(m1))
## Compute Bayes factor comparing the two models
bf01 <- bayes_factor(m0, m1, log=TRUE)
bf01
## Model 3: Correlated random slopes
m2 <- brm(Rsp ~ RT*Cond + (1|Sbj) + (0+RT+Cond|Sbj), data=respRT,
family=bernoulli(), save_all_pars = TRUE)
## diagnostics
plot(m2)
launch_shinystan(m2)
bf20 <- bayes_factor(m2, m0, log=TRUE)
|
/scripts/mixed_brms_2.R
|
permissive
|
humburg/bayesian-data-analysis-in-r
|
R
| false | false | 1,075 |
r
|
## Analyse simulated response/reaction time data
library(brms)
library(ggplot2)
respRT <- read.csv("data/mixed_sim2.csv")
respRT$Cond <- factor(respRT$Cond)
respRT$Rsp <- factor(respRT$Rsp)
## Plot data
ggplot(respRT, aes(x=factor(Cond), y=RT, fill=factor(Rsp))) + geom_boxplot()
## Model 1: random intercept only
## Need save_all_pars = TRUE if we want to compute BAyes factors
m0 <- brm(Rsp ~ RT + Cond + (1|Sbj), data=respRT, family=bernoulli(),
save_all_pars = TRUE)
m0
plot(m0)
plot(marginal_effects(m0))
## Model 2: Uncorrelated random slopes
m1 <- brm(Rsp ~ RT + Cond + (1|Sbj) + (0+RT|Sbj) + (0+Cond|Sbj), data=respRT,
family=bernoulli(), save_all_pars = TRUE)
m1
plot(m1)
plot(marginal_effects(m1))
## Compute Bayes factor comparing the two models
bf01 <- bayes_factor(m0, m1, log=TRUE)
bf01
## Model 3: Correlated random slopes
m2 <- brm(Rsp ~ RT*Cond + (1|Sbj) + (0+RT+Cond|Sbj), data=respRT,
family=bernoulli(), save_all_pars = TRUE)
## diagnostics
plot(m2)
launch_shinystan(m2)
bf20 <- bayes_factor(m2, m0, log=TRUE)
|
figure.02.05 <- function() {
require(survey);
cat("\n\n#######################################");
cat("\n### figure.02.05() starts ...\n");
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
data(api);
cat("\n### str(apistrat)\n");
print( str(apistrat) );
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
DF.temp <- unique(apistrat[,c("stype","fpc","pw")]);
DF.temp[,"fpc_by_pw"] <- DF.temp[,"fpc"] / DF.temp[,"pw"];
cat("\n### unique(apistrat[,c('stype','fpc','pw')])\n");
print( DF.temp );
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
designStrat <- svydesign(
data = apistrat,
strata = ~stype,
fpc = ~fpc,
id = ~1
);
cat("\n### designStrat\n");
print( designStrat );
cat("\n### str(designStrat)\n");
print( str(designStrat) );
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
results.temp <- svytotal(design = designStrat, x = ~enroll);
cat("\n### svytotal(design = designStrat, x = ~enroll)\n");
print( results.temp );
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
results.temp <- svymean(design = designStrat, x = ~enroll);
cat("\n### svymean(design = designStrat, x = ~enroll)\n");
print( results.temp );
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
results.temp <- svytotal(design = designStrat, x = ~stype);
cat("\n### svytotal(design = designStrat, x = ~stype)\n");
print( results.temp );
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
cat("\n### figure.02.05() quits ...");
cat("\n#######################################\n");
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
return(NULL);
}
|
/exercises/statistics/lumley-complex-surveys-analysis/02-simple-and-stratified-sampling/text/code/figure-02-05.R
|
no_license
|
paradisepilot/statistics
|
R
| false | false | 1,767 |
r
|
figure.02.05 <- function() {
require(survey);
cat("\n\n#######################################");
cat("\n### figure.02.05() starts ...\n");
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
data(api);
cat("\n### str(apistrat)\n");
print( str(apistrat) );
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
DF.temp <- unique(apistrat[,c("stype","fpc","pw")]);
DF.temp[,"fpc_by_pw"] <- DF.temp[,"fpc"] / DF.temp[,"pw"];
cat("\n### unique(apistrat[,c('stype','fpc','pw')])\n");
print( DF.temp );
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
designStrat <- svydesign(
data = apistrat,
strata = ~stype,
fpc = ~fpc,
id = ~1
);
cat("\n### designStrat\n");
print( designStrat );
cat("\n### str(designStrat)\n");
print( str(designStrat) );
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
results.temp <- svytotal(design = designStrat, x = ~enroll);
cat("\n### svytotal(design = designStrat, x = ~enroll)\n");
print( results.temp );
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
results.temp <- svymean(design = designStrat, x = ~enroll);
cat("\n### svymean(design = designStrat, x = ~enroll)\n");
print( results.temp );
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
results.temp <- svytotal(design = designStrat, x = ~stype);
cat("\n### svytotal(design = designStrat, x = ~stype)\n");
print( results.temp );
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
cat("\n### figure.02.05() quits ...");
cat("\n#######################################\n");
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
return(NULL);
}
|
library(ape)
for (i in 1:3000) {
write.tree(rcoal(10),paste0("/Users/pmckenz1/Desktop/projects/quartet_proj/random_trees/samp",i,".phy"))
}
for (i in 1:2000) {
system(paste0("seq-gen -mGTR -l484 -s .1 -n1 -z12345 < /Users/pmckenz1/Desktop/projects/quartet_proj/random_trees/samp",i,".tre > /Users/pmckenz1/Desktop/projects/quartet_proj/tree_seqs/test",i,".dat"))
}
for (i in 1:1000) {
system(paste0("seq-gen -mGTR -l10000 -n1 -z12345 < /Users/pmckenz1/Desktop/projects/quartet_proj/random_trees/samp",(i+2000),".phy > /Users/pmckenz1/Desktop/projects/quartet_proj/tree_seqs/test",(i+2000),".dat"))
}
getwd()
plot(rtree(37))
write.tree(rtree(37),paste0("/Users/pmckenz1/Desktop/projects/quartet_proj/random_trees/tree37.phy"))
mytree <- read.tree("/Users/pmckenz1/Desktop/projects/quartet_proj/random_trees/tree37.phy")
cophenetic(mytree)["t2", "t4"]
|
/make_trees_seqs.R
|
no_license
|
pmckenz1/quartet_proj
|
R
| false | false | 860 |
r
|
library(ape)
for (i in 1:3000) {
write.tree(rcoal(10),paste0("/Users/pmckenz1/Desktop/projects/quartet_proj/random_trees/samp",i,".phy"))
}
for (i in 1:2000) {
system(paste0("seq-gen -mGTR -l484 -s .1 -n1 -z12345 < /Users/pmckenz1/Desktop/projects/quartet_proj/random_trees/samp",i,".tre > /Users/pmckenz1/Desktop/projects/quartet_proj/tree_seqs/test",i,".dat"))
}
for (i in 1:1000) {
system(paste0("seq-gen -mGTR -l10000 -n1 -z12345 < /Users/pmckenz1/Desktop/projects/quartet_proj/random_trees/samp",(i+2000),".phy > /Users/pmckenz1/Desktop/projects/quartet_proj/tree_seqs/test",(i+2000),".dat"))
}
getwd()
plot(rtree(37))
write.tree(rtree(37),paste0("/Users/pmckenz1/Desktop/projects/quartet_proj/random_trees/tree37.phy"))
mytree <- read.tree("/Users/pmckenz1/Desktop/projects/quartet_proj/random_trees/tree37.phy")
cophenetic(mytree)["t2", "t4"]
|
#' Install Python packages
#'
#' Install Python dependencies into a virtual environment or Conda environment.
#'
#' @param envname The name, or full path, of the environment in which Python
#' packages are to be installed. When `NULL` (the default), the active
#' environment as set by the `RETICULATE_PYTHON_ENV` variable will be used;
#' if that is unset, then the `r-reticulate` environment will be used.
#'
#' @param method Installation method. By default, "auto" automatically finds a
#' method that will work in the local environment. Change the default to force
#' a specific installation method. Note that the "virtualenv" method is not
#' available on Windows.
#'
#' @param conda Path to conda executable (or "auto" to find conda using the
#' PATH and other conventional install locations).
#'
#' @param python_version The requested Python version. Ignored when attempting
#' to install with a Python virtual environment.
#'
#' @param pip Boolean; use `pip` for package installation? This is only relevant
#' when Conda environments are used, as otherwise packages will be installed
#' from the Conda repositories.
#'
#' @param ... Additional arguments passed to [conda_install()]
#' or [virtualenv_install()].
#'
#' @details On Linux and OS X the "virtualenv" method will be used by default
#' ("conda" will be used if virtualenv isn't available). On Windows, the
#' "conda" method is always used.
#'
#' @import reticulate
#' @export
install_mlr3tabnet <- function(gpu=TRUE,
envname = NULL,
method = "auto",
conda = "auto",
python_version = NULL,
...) {
pytorch <- ifelse(gpu, "pytorch", "pytorch-cpu")
reticulate::py_install(
c("pytorch-tabnet", "fastai", pytorch),
...
)
}
|
/R/install.R
|
no_license
|
JackyP/mlr3tabnet
|
R
| false | false | 1,880 |
r
|
#' Install Python packages
#'
#' Install Python dependencies into a virtual environment or Conda environment.
#'
#' @param envname The name, or full path, of the environment in which Python
#' packages are to be installed. When `NULL` (the default), the active
#' environment as set by the `RETICULATE_PYTHON_ENV` variable will be used;
#' if that is unset, then the `r-reticulate` environment will be used.
#'
#' @param method Installation method. By default, "auto" automatically finds a
#' method that will work in the local environment. Change the default to force
#' a specific installation method. Note that the "virtualenv" method is not
#' available on Windows.
#'
#' @param conda Path to conda executable (or "auto" to find conda using the
#' PATH and other conventional install locations).
#'
#' @param python_version The requested Python version. Ignored when attempting
#' to install with a Python virtual environment.
#'
#' @param pip Boolean; use `pip` for package installation? This is only relevant
#' when Conda environments are used, as otherwise packages will be installed
#' from the Conda repositories.
#'
#' @param ... Additional arguments passed to [conda_install()]
#' or [virtualenv_install()].
#'
#' @details On Linux and OS X the "virtualenv" method will be used by default
#' ("conda" will be used if virtualenv isn't available). On Windows, the
#' "conda" method is always used.
#'
#' @import reticulate
#' @export
install_mlr3tabnet <- function(gpu=TRUE,
envname = NULL,
method = "auto",
conda = "auto",
python_version = NULL,
...) {
pytorch <- ifelse(gpu, "pytorch", "pytorch-cpu")
reticulate::py_install(
c("pytorch-tabnet", "fastai", pytorch),
...
)
}
|
library(randomForest)
set.seed(1)
songs <- read.csv('training_data.csv', header = T)
training_indices <- sample(nrow(songs), size = 400, replace = FALSE)
qualitative_vars = list("label","key", "time_signature", "mode")
songs = setQualitative(songs, qualitative_vars)
songs.train = songs[training_indices,]
songs.test = songs[-training_indices,]
serendipityGrove <- function(train, test, f) {
B <- 100
rf.fit <- randomForest(f, data = train, ntree = B)
rf.pred <- predict(rf.fit, newdata = test)
return(rf.pred)
}
treeAnalysis = function(train, test, f) {
error = list();
for (i in 1:500) {
B <- i
rf.fit <- randomForest(f, data = train, ntree = B, importance = TRUE)
rf.pred <- predict(rf.fit, newdata = test)
error[i] = mean(rf.pred == test$label)
}
importance(rf.fit)
varImpPlot(rf.fit)
return(which.min(error))
}
|
/random_forest.R
|
no_license
|
kasanari/jamboppers
|
R
| false | false | 858 |
r
|
library(randomForest)
set.seed(1)
songs <- read.csv('training_data.csv', header = T)
training_indices <- sample(nrow(songs), size = 400, replace = FALSE)
qualitative_vars = list("label","key", "time_signature", "mode")
songs = setQualitative(songs, qualitative_vars)
songs.train = songs[training_indices,]
songs.test = songs[-training_indices,]
serendipityGrove <- function(train, test, f) {
B <- 100
rf.fit <- randomForest(f, data = train, ntree = B)
rf.pred <- predict(rf.fit, newdata = test)
return(rf.pred)
}
treeAnalysis = function(train, test, f) {
error = list();
for (i in 1:500) {
B <- i
rf.fit <- randomForest(f, data = train, ntree = B, importance = TRUE)
rf.pred <- predict(rf.fit, newdata = test)
error[i] = mean(rf.pred == test$label)
}
importance(rf.fit)
varImpPlot(rf.fit)
return(which.min(error))
}
|
# Copyright 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
# This script is to continue assessing censored data,
# detemine which method to use for censoring, make a table of summary stats
require(tidyverse)
require(magrittr)
require(NADA)
require(lubridate)
setwd("C:/R Projects/Similkameen-WQOs")
# Load RT data
RT_sites <- read.csv("data/report/RT_plots/tables/RT_sites.csv",
stringsAsFactors = FALSE, header=TRUE)
#remove all values <0 (negative values throw an error in ROS)
RT_sites <- RT_sites %>% filter(Value>0)
#remove unnescessary columns
RT_sites <- dplyr::select(RT_sites, -c(Code, ResultLetter, Station))
# Summary Stats for table 1 - based upon uncensored data
table.1 <- RT_sites %>% group_by(EMS_ID, Variable) %>%
summarise(samples = length(DateTime),
start.date = min(DateTime),
end.date = max(DateTime),
mean=mean(Value),
median=median(Value),
sd=sd(Value),
min=min(Value),
max=max(Value),
quantile95=quantile(Value,probs=0.95, na.rm=TRUE),
quantile90=quantile(Value,probs=0.90, na.rm=TRUE),
quantile25=quantile(Value,probs=0.25, na.rm=TRUE),
quantile75=quantile(Value,probs=0.75, na.rm=TRUE),
IQR=IQR(Value, na.rm=TRUE),
unit=first(Units), n=n()) %>%
ungroup()
cens_data <- filter(RT_sites, CENSOR == TRUE)
table.2 <- cens_data %>% group_by(EMS_ID, Variable) %>%
summarise(N.ND = length(Value),
ND.Min = min(Value),
ND.Max = max(Value))
table.1 %<>% left_join(table.2, by = c("EMS_ID", "Variable"))
#write csv for table1
write.csv(table.1,'data/report/RT_plots/tables/Table1_censored.csv', row.names = FALSE)
# Determine non-detects by station to select method.
# Count samples that are D vs ND
method_selection <- RT_sites %>%
group_by(EMS_ID, Variable) %>%
summarise(N = length(Value),
N.ND = length(Value[CENSOR == TRUE])) %>%
ungroup() %>%
mutate(PROP.ND = N.ND / N,
METHOD = ifelse(PROP.ND == 1, "0.5 MDL",
ifelse(PROP.ND > 0 & N - N.ND < 3, "0.5 MDL - MEAN",
ifelse(PROP.ND > 0, "ROS", "MEAN"))))
# Join to data set
RT_sites %<>% left_join(select(method_selection, EMS_ID, Variable,
METHOD))
# Create Table (Breakdown by method)
method_summary <- method_selection %>%
group_by(METHOD, Variable) %>%
summarise(STN.COUNT = length(unique(EMS_ID)),
SMP.COUNT = sum(N))
#write csv for method summary
write.csv(method_summary,'data/report/RT_plots/tables/MethodSummary.csv', row.names = FALSE)
# Calculate Station Means
# GROUP 1 - STATIONS WHERE ALL DATA IS CENSORED, PROPORTION OF ND=1; mean is min MDL=0.00005
# I added the extra quantile steps even though it provides no information for g1_data
# added so that it can be combined with final data summary with g2,g3,g4 data
g1_data <- RT_sites %>% filter(METHOD == "0.5 MDL") %>%
group_by(EMS_ID, Variable) %>%
summarise(r_mean = signif(min(Value / 2), 3), #takes 1/2 of minimum MDL
r_med = signif(median(Value, na.rm = TRUE), 3),
r_95 = signif(quantile(Value, prob=0.95, na.rm = TRUE), 3),
r_90 = signif(quantile(Value, prob=0.90, na.rm = TRUE), 3),
r_10 = signif(quantile(Value, prob=0.10, na.rm = TRUE), 3),
r_25 = signif(quantile(Value, prob=0.25, na.rm = TRUE), 3),
r_75 = signif(quantile(Value, prob=0.75, na.rm = TRUE), 3),
r_min = signif(min(Value, na.rm = TRUE), 3), #indicates this value is half lowest MDL
r_max = signif(max(Value, na.rm = TRUE), 3),
r_n = length(Value)) %>%
ungroup() %>%
mutate(CENSOR.METHOD = "0.5 MDL")
# GROUP 2 - Stations where ND < 3, substitute detection limit, and calculate
# arithmetic mean
g2_data <- RT_sites %>% filter(METHOD == "0.5 MDL - MEAN") %>%
mutate(CENS.VAL = ifelse(CENSOR == TRUE, signif(Value / 2, 3),
Value)) %>%
group_by(EMS_ID, Variable) %>%
summarise(r_mean = signif(mean(CENS.VAL, 3)),
r_med = signif(median(CENS.VAL, na.rm = TRUE), 3),
r_95 = signif(quantile(CENS.VAL, prob=0.95, na.rm = TRUE), 3),
r_90 = signif(quantile(CENS.VAL, prob=0.90, na.rm = TRUE), 3),
r_10 = signif(quantile(CENS.VAL, prob=0.10, na.rm = TRUE), 3),
r_25 = signif(quantile(CENS.VAL, prob=0.25, na.rm = TRUE), 3),
r_75 = signif(quantile(CENS.VAL, prob=0.75, na.rm = TRUE), 3),
r_min = signif(min(CENS.VAL, na.rm = TRUE), 3), #indicates this value is half lowest MDL
r_max = signif(max(CENS.VAL, na.rm = TRUE), 3),
r_n = length(CENS.VAL))%>%
ungroup() %>%
mutate(CENSOR.METHOD = "0.5 MDL - MEAN")
# GROUP 3 - STATIONS WHERE %ND is >0% and <100% AND D >= 3, calculate station
# means using ros in package NADA
g3_data <- RT_sites %>% filter(METHOD == "ROS")
# Create function for running ROS and returning the calculated mean in a data
# frame (do() requires that a data frame is returned)
calc_ros_mean <- function(result, censor) {
x_ros <- ros(result, censor)
r_mean <- signif(mean(x_ros, na.rm = TRUE), 3)
r_med <- signif(median(x_ros, na.rm = TRUE), 3)
r_95 <- signif(quantile(x_ros, prob=0.95, na.rm = TRUE), 3)
r_90 <- signif(quantile(x_ros, prob=0.90, na.rm = TRUE), 3)
r_10 <- signif(quantile(x_ros, prob=0.10, na.rm = TRUE), 3)
r_25 <- signif(quantile(x_ros, prob=0.25, na.rm = TRUE), 3)
r_75 <- signif(quantile(x_ros, prob=0.75, na.rm = TRUE), 3)
r_min <- signif(min(x_ros$modeled, na.rm = TRUE), 3)
r_max <- signif(max(x_ros$modeled, na.rm = TRUE), 3)
return(data.frame(r_mean, r_med, r_95, r_90, r_10, r_25, r_75, r_min, r_max))
}
#######################################################
# Run analysis
g3_data %<>%
group_by(EMS_ID, Variable) %>%
do(calc_ros_mean(.$Value, .$CENSOR)) %>%
ungroup() %>%
mutate(CENSOR.METHOD = "ROS")
#the n() was not working within the calc_ros_mean function, had to add it afterwards
g3_data %<>% left_join(select(method_selection, EMS_ID, Variable,
N))
names(g3_data)[names(g3_data)=="N"]<- "r_n" #renaming column to match g1,g2,g4_data
# GROUP 4 - Stations where ND = 0%
g4_data <- RT_sites %>% filter(METHOD == "MEAN") %>%
group_by(EMS_ID, Variable) %>%
summarise(r_mean = signif(mean(Value, 3)),
r_med = signif(median(Value, na.rm = TRUE), 3),
r_95 = signif(quantile(Value, prob=0.95, na.rm = TRUE), 3),
r_90 = signif(quantile(Value, prob=0.90, na.rm = TRUE), 3),
r_10 = signif(quantile(Value, prob=0.10, na.rm = TRUE), 3),
r_25 = signif(quantile(Value, prob=0.25, na.rm = TRUE), 3),
r_75 = signif(quantile(Value, prob=0.75, na.rm = TRUE), 3),
r_min = signif(min(Value, na.rm = TRUE), 3), #indicates this value is half lowest MDL
r_max = signif(max(Value, na.rm = TRUE), 3),
r_n = length(Value)) %>%
ungroup() %>%
mutate(CENSOR.METHOD = "MEAN")
# COMBINE MEANS INTO TABLE & CALCULATE SUMMARY STATS
summary_stats <- bind_rows(g1_data, g2_data, g3_data, g4_data)
rm(g1_data, g2_data, g3_data, g4_data)
#Modify table if you want to add # of ND and units back in
summary_stats %<>% left_join(select(table.1, EMS_ID, Variable,
N.ND, unit)) %>%
mutate(PROP.N = signif(r_n / N.ND), 3)
#write csv for summary stats
write.csv(summary_stats,'data/report/RT_plots/tables/SummaryStats.csv', row.names = FALSE)
####################################################################################################################
# make box plots from the summary stats
rm(list=ls(all=TRUE))
#set working drive to get summary stats spreadsheet
setwd("C:/R Projects/Similkameen-WQOs")
#load summary stats table
sumstats <- read.csv("data/report/RT_plots/tables/SummaryStats.csv",
stringsAsFactors = FALSE, header=TRUE)
## order sites from upstream to downstream
sumstats$EMS_ID <- factor(sumstats$EMS_ID,
levels=c( "E215956", "E215957", "E206638"))
## First set working directory to save plots to.
setwd('C:/R Projects/Similkameen-WQOs/data/report/RT_Plots/WQOs')
parameters <- as.character(unique(sumstats$Variable))
#run box plot loop
for (i in 1:length(parameters)){
x <- parameters[i]
p <- sumstats %>% filter(Variable == x)
ggplot(p, aes(as.factor(EMS_ID))) +
geom_boxplot(aes(
lower = r_10,
upper = r_90,
middle = r_mean,
ymin = r_min - r_min*0.5,
ymax = r_max + r_max*0.5),
stat = "identity") +
#geom_hline(WQG-AL-ST, color="blue", size=1, linetype=2)+
#geom_hline(WQG-AL-LT, color="darkblue", size=1, linetype=2)+
#geom_hline(WQO-Old, color="black", size=1, linetype=2)+
#geom_hline(WQG-2015, color="darkgrey", size=1, linetype="dotted")+
xlab("EMS_ID") +
ylab(paste0(x)) +
theme_bw()
ggsave(filename=paste0("C:/R Projects/Similkameen-WQOs/data/report/RT_Plots/",x,".tiff"), units="in", width=9, height=6, dpi=300, compression = 'lzw')
print(paste0("Done figure ", x))
}
praise::praise()
|
/R/04eb_RT_CensoredStats.R
|
permissive
|
bcgov/Similkameen-WQOs
|
R
| false | false | 9,784 |
r
|
# Copyright 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
# This script is to continue assessing censored data,
# detemine which method to use for censoring, make a table of summary stats
require(tidyverse)
require(magrittr)
require(NADA)
require(lubridate)
setwd("C:/R Projects/Similkameen-WQOs")
# Load RT data
RT_sites <- read.csv("data/report/RT_plots/tables/RT_sites.csv",
stringsAsFactors = FALSE, header=TRUE)
#remove all values <0 (negative values throw an error in ROS)
RT_sites <- RT_sites %>% filter(Value>0)
#remove unnescessary columns
RT_sites <- dplyr::select(RT_sites, -c(Code, ResultLetter, Station))
# Summary Stats for table 1 - based upon uncensored data
table.1 <- RT_sites %>% group_by(EMS_ID, Variable) %>%
summarise(samples = length(DateTime),
start.date = min(DateTime),
end.date = max(DateTime),
mean=mean(Value),
median=median(Value),
sd=sd(Value),
min=min(Value),
max=max(Value),
quantile95=quantile(Value,probs=0.95, na.rm=TRUE),
quantile90=quantile(Value,probs=0.90, na.rm=TRUE),
quantile25=quantile(Value,probs=0.25, na.rm=TRUE),
quantile75=quantile(Value,probs=0.75, na.rm=TRUE),
IQR=IQR(Value, na.rm=TRUE),
unit=first(Units), n=n()) %>%
ungroup()
cens_data <- filter(RT_sites, CENSOR == TRUE)
table.2 <- cens_data %>% group_by(EMS_ID, Variable) %>%
summarise(N.ND = length(Value),
ND.Min = min(Value),
ND.Max = max(Value))
table.1 %<>% left_join(table.2, by = c("EMS_ID", "Variable"))
#write csv for table1
write.csv(table.1,'data/report/RT_plots/tables/Table1_censored.csv', row.names = FALSE)
# Determine non-detects by station to select method.
# Count samples that are D vs ND
method_selection <- RT_sites %>%
group_by(EMS_ID, Variable) %>%
summarise(N = length(Value),
N.ND = length(Value[CENSOR == TRUE])) %>%
ungroup() %>%
mutate(PROP.ND = N.ND / N,
METHOD = ifelse(PROP.ND == 1, "0.5 MDL",
ifelse(PROP.ND > 0 & N - N.ND < 3, "0.5 MDL - MEAN",
ifelse(PROP.ND > 0, "ROS", "MEAN"))))
# Join to data set
RT_sites %<>% left_join(select(method_selection, EMS_ID, Variable,
METHOD))
# Create Table (Breakdown by method)
method_summary <- method_selection %>%
group_by(METHOD, Variable) %>%
summarise(STN.COUNT = length(unique(EMS_ID)),
SMP.COUNT = sum(N))
#write csv for method summary
write.csv(method_summary,'data/report/RT_plots/tables/MethodSummary.csv', row.names = FALSE)
# Calculate Station Means
# GROUP 1 - STATIONS WHERE ALL DATA IS CENSORED, PROPORTION OF ND=1; mean is min MDL=0.00005
# I added the extra quantile steps even though it provides no information for g1_data
# added so that it can be combined with final data summary with g2,g3,g4 data
g1_data <- RT_sites %>% filter(METHOD == "0.5 MDL") %>%
group_by(EMS_ID, Variable) %>%
summarise(r_mean = signif(min(Value / 2), 3), #takes 1/2 of minimum MDL
r_med = signif(median(Value, na.rm = TRUE), 3),
r_95 = signif(quantile(Value, prob=0.95, na.rm = TRUE), 3),
r_90 = signif(quantile(Value, prob=0.90, na.rm = TRUE), 3),
r_10 = signif(quantile(Value, prob=0.10, na.rm = TRUE), 3),
r_25 = signif(quantile(Value, prob=0.25, na.rm = TRUE), 3),
r_75 = signif(quantile(Value, prob=0.75, na.rm = TRUE), 3),
r_min = signif(min(Value, na.rm = TRUE), 3), #indicates this value is half lowest MDL
r_max = signif(max(Value, na.rm = TRUE), 3),
r_n = length(Value)) %>%
ungroup() %>%
mutate(CENSOR.METHOD = "0.5 MDL")
# GROUP 2 - Stations where ND < 3, substitute detection limit, and calculate
# arithmetic mean
g2_data <- RT_sites %>% filter(METHOD == "0.5 MDL - MEAN") %>%
mutate(CENS.VAL = ifelse(CENSOR == TRUE, signif(Value / 2, 3),
Value)) %>%
group_by(EMS_ID, Variable) %>%
summarise(r_mean = signif(mean(CENS.VAL, 3)),
r_med = signif(median(CENS.VAL, na.rm = TRUE), 3),
r_95 = signif(quantile(CENS.VAL, prob=0.95, na.rm = TRUE), 3),
r_90 = signif(quantile(CENS.VAL, prob=0.90, na.rm = TRUE), 3),
r_10 = signif(quantile(CENS.VAL, prob=0.10, na.rm = TRUE), 3),
r_25 = signif(quantile(CENS.VAL, prob=0.25, na.rm = TRUE), 3),
r_75 = signif(quantile(CENS.VAL, prob=0.75, na.rm = TRUE), 3),
r_min = signif(min(CENS.VAL, na.rm = TRUE), 3), #indicates this value is half lowest MDL
r_max = signif(max(CENS.VAL, na.rm = TRUE), 3),
r_n = length(CENS.VAL))%>%
ungroup() %>%
mutate(CENSOR.METHOD = "0.5 MDL - MEAN")
# GROUP 3 - STATIONS WHERE %ND is >0% and <100% AND D >= 3, calculate station
# means using ros in package NADA
g3_data <- RT_sites %>% filter(METHOD == "ROS")
# Create function for running ROS and returning the calculated mean in a data
# frame (do() requires that a data frame is returned)
calc_ros_mean <- function(result, censor) {
x_ros <- ros(result, censor)
r_mean <- signif(mean(x_ros, na.rm = TRUE), 3)
r_med <- signif(median(x_ros, na.rm = TRUE), 3)
r_95 <- signif(quantile(x_ros, prob=0.95, na.rm = TRUE), 3)
r_90 <- signif(quantile(x_ros, prob=0.90, na.rm = TRUE), 3)
r_10 <- signif(quantile(x_ros, prob=0.10, na.rm = TRUE), 3)
r_25 <- signif(quantile(x_ros, prob=0.25, na.rm = TRUE), 3)
r_75 <- signif(quantile(x_ros, prob=0.75, na.rm = TRUE), 3)
r_min <- signif(min(x_ros$modeled, na.rm = TRUE), 3)
r_max <- signif(max(x_ros$modeled, na.rm = TRUE), 3)
return(data.frame(r_mean, r_med, r_95, r_90, r_10, r_25, r_75, r_min, r_max))
}
#######################################################
# Run analysis
g3_data %<>%
group_by(EMS_ID, Variable) %>%
do(calc_ros_mean(.$Value, .$CENSOR)) %>%
ungroup() %>%
mutate(CENSOR.METHOD = "ROS")
#the n() was not working within the calc_ros_mean function, had to add it afterwards
g3_data %<>% left_join(select(method_selection, EMS_ID, Variable,
N))
names(g3_data)[names(g3_data)=="N"]<- "r_n" #renaming column to match g1,g2,g4_data
# GROUP 4 - Stations where ND = 0%
g4_data <- RT_sites %>% filter(METHOD == "MEAN") %>%
group_by(EMS_ID, Variable) %>%
summarise(r_mean = signif(mean(Value, 3)),
r_med = signif(median(Value, na.rm = TRUE), 3),
r_95 = signif(quantile(Value, prob=0.95, na.rm = TRUE), 3),
r_90 = signif(quantile(Value, prob=0.90, na.rm = TRUE), 3),
r_10 = signif(quantile(Value, prob=0.10, na.rm = TRUE), 3),
r_25 = signif(quantile(Value, prob=0.25, na.rm = TRUE), 3),
r_75 = signif(quantile(Value, prob=0.75, na.rm = TRUE), 3),
r_min = signif(min(Value, na.rm = TRUE), 3), #indicates this value is half lowest MDL
r_max = signif(max(Value, na.rm = TRUE), 3),
r_n = length(Value)) %>%
ungroup() %>%
mutate(CENSOR.METHOD = "MEAN")
# COMBINE MEANS INTO TABLE & CALCULATE SUMMARY STATS
summary_stats <- bind_rows(g1_data, g2_data, g3_data, g4_data)
rm(g1_data, g2_data, g3_data, g4_data)
#Modify table if you want to add # of ND and units back in
summary_stats %<>% left_join(select(table.1, EMS_ID, Variable,
N.ND, unit)) %>%
mutate(PROP.N = signif(r_n / N.ND), 3)
#write csv for summary stats
write.csv(summary_stats,'data/report/RT_plots/tables/SummaryStats.csv', row.names = FALSE)
####################################################################################################################
# make box plots from the summary stats
rm(list=ls(all=TRUE))
#set working drive to get summary stats spreadsheet
setwd("C:/R Projects/Similkameen-WQOs")
#load summary stats table
sumstats <- read.csv("data/report/RT_plots/tables/SummaryStats.csv",
stringsAsFactors = FALSE, header=TRUE)
## order sites from upstream to downstream
sumstats$EMS_ID <- factor(sumstats$EMS_ID,
levels=c( "E215956", "E215957", "E206638"))
## First set working directory to save plots to.
setwd('C:/R Projects/Similkameen-WQOs/data/report/RT_Plots/WQOs')
parameters <- as.character(unique(sumstats$Variable))
#run box plot loop
for (i in 1:length(parameters)){
x <- parameters[i]
p <- sumstats %>% filter(Variable == x)
ggplot(p, aes(as.factor(EMS_ID))) +
geom_boxplot(aes(
lower = r_10,
upper = r_90,
middle = r_mean,
ymin = r_min - r_min*0.5,
ymax = r_max + r_max*0.5),
stat = "identity") +
#geom_hline(WQG-AL-ST, color="blue", size=1, linetype=2)+
#geom_hline(WQG-AL-LT, color="darkblue", size=1, linetype=2)+
#geom_hline(WQO-Old, color="black", size=1, linetype=2)+
#geom_hline(WQG-2015, color="darkgrey", size=1, linetype="dotted")+
xlab("EMS_ID") +
ylab(paste0(x)) +
theme_bw()
ggsave(filename=paste0("C:/R Projects/Similkameen-WQOs/data/report/RT_Plots/",x,".tiff"), units="in", width=9, height=6, dpi=300, compression = 'lzw')
print(paste0("Done figure ", x))
}
praise::praise()
|
runKMeans <- function(measurescores, initial_centroids, max_iters=10) {
K <- nrow(initial_centroids);
numrows <- nrow(measurescores);
idx <- data.frame(matrix(nrow=numrows, ncol=1));
centroids = initial_centroids;
previous_centroids = centroids;
par(mfrow = c(2, 5));
par(cex = 0.6);
par(mar = c(0, 0, 0, 0), oma = c(4, 4, 0.5, 0.5));
par(mgp = c(2, 0.6, 0));
#par(xaxt='n', yaxt='n');
for(i in 1:max_iters){
print(paste("K-Means iteration", i, "/", max_iters, "..."));
cat("\n");
idx = findclosestcentroids(measurescores, centroids);
plotprogresskmeans(measurescores[, c(1,3)], centroids[,c(1,3)],
previous_centroids[,c(1,3)],idx,K, i);
previous_centroids = centroids;
centroids = computecentroids(measurescores, idx, K);
print("Centroids = ");
print(centroids);
cat("\n");
}
clusterinfo <- list(idx=idx, centroids=centroids);
}
|
/group-practice-clustering/runKMeans.R
|
no_license
|
anjalibshah/health-data-science
|
R
| false | false | 948 |
r
|
runKMeans <- function(measurescores, initial_centroids, max_iters=10) {
K <- nrow(initial_centroids);
numrows <- nrow(measurescores);
idx <- data.frame(matrix(nrow=numrows, ncol=1));
centroids = initial_centroids;
previous_centroids = centroids;
par(mfrow = c(2, 5));
par(cex = 0.6);
par(mar = c(0, 0, 0, 0), oma = c(4, 4, 0.5, 0.5));
par(mgp = c(2, 0.6, 0));
#par(xaxt='n', yaxt='n');
for(i in 1:max_iters){
print(paste("K-Means iteration", i, "/", max_iters, "..."));
cat("\n");
idx = findclosestcentroids(measurescores, centroids);
plotprogresskmeans(measurescores[, c(1,3)], centroids[,c(1,3)],
previous_centroids[,c(1,3)],idx,K, i);
previous_centroids = centroids;
centroids = computecentroids(measurescores, idx, K);
print("Centroids = ");
print(centroids);
cat("\n");
}
clusterinfo <- list(idx=idx, centroids=centroids);
}
|
library(shiny)
BMI <- function(weight,height) weight/(height^2)
y<<-0
shinyServer(
function(input, output) {
y <<- y + 1
#output$w <- renderPrint({input$weight})
#output$h <- renderPrint({input$height})
output$bmi <- renderPrint({BMI(input$weight,input$height)})
output$text <- renderText(y)
}
)
|
/server.R
|
no_license
|
frenchfries00/ShinyCourse
|
R
| false | false | 319 |
r
|
library(shiny)
BMI <- function(weight,height) weight/(height^2)
y<<-0
shinyServer(
function(input, output) {
y <<- y + 1
#output$w <- renderPrint({input$weight})
#output$h <- renderPrint({input$height})
output$bmi <- renderPrint({BMI(input$weight,input$height)})
output$text <- renderText(y)
}
)
|
#' Tibble con i dati di c6h6 per 1 stazioni della regione PATRENTO
#'
#' @format Un tibble con 8 colonne e 731 osservazioni
#'
#' @usage
#' c6h6
"c6h6"
|
/R/c6h6.R
|
permissive
|
progettopulvirus/patrento
|
R
| false | false | 152 |
r
|
#' Tibble con i dati di c6h6 per 1 stazioni della regione PATRENTO
#'
#' @format Un tibble con 8 colonne e 731 osservazioni
#'
#' @usage
#' c6h6
"c6h6"
|
samplesize = 300;
p =0.02;
defective=9;
compare = 10;
npo = samplesize*p;
sd = sqrt(npo*(1-p));
tol = 0.5;
pvalue = 1- pnorm(compare-tol, npo,sd );
cat("The pvalue is",pvalue)
|
/Introduction_To_Probability_And_Statistics_For_Engineers_And_Scientists_by_Sheldon_M._Ross/CH8/EX8.6.b/Ex8_6b.R
|
permissive
|
FOSSEE/R_TBC_Uploads
|
R
| false | false | 183 |
r
|
samplesize = 300;
p =0.02;
defective=9;
compare = 10;
npo = samplesize*p;
sd = sqrt(npo*(1-p));
tol = 0.5;
pvalue = 1- pnorm(compare-tol, npo,sd );
cat("The pvalue is",pvalue)
|
#helper function to figure out the name of the activated quest as is saved in the steps
#list in the scenario quest
get_activated_quest_name = function(string = ""){
#The name of the quest is between square brackets - [quest name]
name = str_extract_all(string,"\\[(.*?)\\]")[[1]][1]
#removing the square brackets
name = substring(name, 2, nchar(name) - 1)
return(name)
}
|
/R/Preprocessing/Helpers/get_activated_quest_name.R
|
no_license
|
hejtmy/VR_City_Analysis
|
R
| false | false | 381 |
r
|
#helper function to figure out the name of the activated quest as is saved in the steps
#list in the scenario quest
get_activated_quest_name = function(string = ""){
#The name of the quest is between square brackets - [quest name]
name = str_extract_all(string,"\\[(.*?)\\]")[[1]][1]
#removing the square brackets
name = substring(name, 2, nchar(name) - 1)
return(name)
}
|
severities <- c(
"EMERG" = 0L,
"ALERT" = 1L,
"CRITICAL" = 2L,
"ERR" = 3L,
"WARNING" = 4L,
"NOTICE" = 5L,
"INFO" = 6L,
"DEBUG" = 7L
)
facilities <- c(
"KERN" = 0L,
"USER" = 1L,
"MAIL" = 2L,
"DAEMON" = 3L,
"AUTH" = 4L,
"SYSLOG" = 5L,
"LPR" = 6L,
"NEWS" = 7L,
"UUCP" = 8L,
"CRON" = 9L,
"AUTHPRIV" = 10L,
"FTP" = 11L,
"LOCAL0" = 16L,
"LOCAL1" = 17L,
"LOCAL2" = 18L,
"LOCAL3" = 19L,
"LOCAL4" = 20L,
"LOCAL5" = 21L,
"LOCAL6" = 22L,
"LOCAL7" = 23L
)
#' Send log message to syslog server
#'
#' Send log message to syslog server.
#'
#' @param message text message (string).
#' @param severity severity level (string).
#' @param facility log facility (string).
#' @param host machine that originally sends the message (string).
#' @param app_name application name that originally sends the message (string).
#' @param proc_id process id that originally sends the message (numeric).
#' @param server syslogd server hostname (string).
#' @param port syslogd server port (integer).
#'
#' @return Number of bytes written to socket.
#'
#' @examples
#' \dontrun{
#' syslog("log message", "INFO", app_name = 'program', server = 'logserver')
#' }
#' @export
syslog <- function(
message, severity = "NOTICE", facility = "USER",
host = Sys.info()[["nodename"]], app_name = Sys.info()[["user"]],
proc_id = Sys.getpid(),
server = "localhost", port = 601L)
{
sock <- utils::make.socket(server, port)
on.exit(utils::close.socket(sock))
str <- payload(message, severity, facility, host, app_name, proc_id)
nb <- utils::write.socket(sock, str)
return(nb)
}
payload <- function(message, severity, facility, host, app_name, proc_id)
{
fac <- facilities[[facility]]
sev <- severities[[severity]]
prival <- bitwOr(bitwShiftL(fac, 3L), sev)
priver <- paste0("<", prival, ">", "1")
tstamp <- "-"
msg_id <- "-"
struc_data <- "-"
paste(priver, tstamp, host, app_name, proc_id, msg_id, struc_data, message)
}
|
/R/syslog.R
|
no_license
|
cran/syslognet
|
R
| false | false | 2,045 |
r
|
severities <- c(
"EMERG" = 0L,
"ALERT" = 1L,
"CRITICAL" = 2L,
"ERR" = 3L,
"WARNING" = 4L,
"NOTICE" = 5L,
"INFO" = 6L,
"DEBUG" = 7L
)
facilities <- c(
"KERN" = 0L,
"USER" = 1L,
"MAIL" = 2L,
"DAEMON" = 3L,
"AUTH" = 4L,
"SYSLOG" = 5L,
"LPR" = 6L,
"NEWS" = 7L,
"UUCP" = 8L,
"CRON" = 9L,
"AUTHPRIV" = 10L,
"FTP" = 11L,
"LOCAL0" = 16L,
"LOCAL1" = 17L,
"LOCAL2" = 18L,
"LOCAL3" = 19L,
"LOCAL4" = 20L,
"LOCAL5" = 21L,
"LOCAL6" = 22L,
"LOCAL7" = 23L
)
#' Send log message to syslog server
#'
#' Send log message to syslog server.
#'
#' @param message text message (string).
#' @param severity severity level (string).
#' @param facility log facility (string).
#' @param host machine that originally sends the message (string).
#' @param app_name application name that originally sends the message (string).
#' @param proc_id process id that originally sends the message (numeric).
#' @param server syslogd server hostname (string).
#' @param port syslogd server port (integer).
#'
#' @return Number of bytes written to socket.
#'
#' @examples
#' \dontrun{
#' syslog("log message", "INFO", app_name = 'program', server = 'logserver')
#' }
#' @export
syslog <- function(
message, severity = "NOTICE", facility = "USER",
host = Sys.info()[["nodename"]], app_name = Sys.info()[["user"]],
proc_id = Sys.getpid(),
server = "localhost", port = 601L)
{
sock <- utils::make.socket(server, port)
on.exit(utils::close.socket(sock))
str <- payload(message, severity, facility, host, app_name, proc_id)
nb <- utils::write.socket(sock, str)
return(nb)
}
payload <- function(message, severity, facility, host, app_name, proc_id)
{
fac <- facilities[[facility]]
sev <- severities[[severity]]
prival <- bitwOr(bitwShiftL(fac, 3L), sev)
priver <- paste0("<", prival, ">", "1")
tstamp <- "-"
msg_id <- "-"
struc_data <- "-"
paste(priver, tstamp, host, app_name, proc_id, msg_id, struc_data, message)
}
|
# Make sure to install all dependencies (not needed if already done):
# install.packages("SqlRender")
# install.packages("DatabaseConnector")
# install.packages("ggplot2")
# install.packages("ParallelLogger")
# install.packages("readr")
# install.packages("tibble")
# install.packages("dplyr")
# install.packages("RJSONIO")
# install.packages("devtools")
# devtools::install_github("OHDSI/FeatureExtraction")
# devtools::install_github("OHDSI/ROhdsiWebApi")
# devtools::install_github("OHDSI/CohortDiagnostics")
# Load the package
library(Ohdsi2020StudyathonCohortDiagnosticsPCE)
package <- "Ohdsi2020StudyathonCohortDiagnosticsPCE"
path <- "s:/results"
# Optional: specify where the temporary files will be created:
options(andromedaTempFolder = file.path(path, "andromedaTemp"))
# Maximum number of cores to be used:
maxCores <- parallel::detectCores()
# Details for connecting to the server:
connectionDetails <- DatabaseConnector::createConnectionDetails(dbms = "pdw",
server = Sys.getenv("PDW_SERVER"),
user = NULL,
password = NULL,
port = Sys.getenv("PDW_PORT"))
# For Oracle: define a schema that can be used to emulate temp tables:
oracleTempSchema <- NULL
# Details specific to the database:
outputFolder <- "s:/results/ccae"
cdmDatabaseSchema <- "CDM_IBM_CCAE_V1247.dbo"
cohortDatabaseSchema <- "scratch.dbo"
cohortTable <- "mschuemi_skeleton_ccae"
databaseId <- "CCAE"
databaseName <- "IBM MarketScan Commercial Claims and Encounters Database"
databaseDescription <- "IBM MarketScan® Commercial Claims and Encounters Database (CCAE) represent data from individuals enrolled in United States employer-sponsored insurance health plans. The data includes adjudicated health insurance claims (e.g. inpatient, outpatient, and outpatient pharmacy) as well as enrollment data from large employers and health plans who provide private healthcare coverage to employees, their spouses, and dependents. Additionally, it captures laboratory tests for a subset of the covered lives. This administrative claims database includes a variety of fee-for-service, preferred provider organizations, and capitated health plans."
# Use this to run the cohorttDiagnostics. The results will be stored in the diagnosticsExport
# subfolder of the outputFolder. This can be shared between sites.
Ohdsi2020StudyathonCohortDiagnosticsPCE::runCohortDiagnostics(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
oracleTempSchema = oracleTempSchema,
outputFolder = outputFolder,
databaseId = databaseId,
databaseName = databaseName,
databaseDescription = databaseDescription,
createCohorts = TRUE,
runInclusionStatistics = TRUE,
runIncludedSourceConcepts = TRUE,
runOrphanConcepts = TRUE,
runTimeDistributions = TRUE,
runBreakdownIndexEvents = TRUE,
runIncidenceRates = TRUE,
runCohortOverlap = TRUE,
runCohortCharacterization = TRUE,
runTemporalCohortCharacterization = TRUE,
packageName = package,
minCellCount = 5)
# To view the results: Optional: if there are results zip files from multiple sites in a folder, this
# merges them, which will speed up starting the viewer:
CohortDiagnostics::preMergeDiagnosticsFiles(file.path(outputFolder, "diagnosticsExport"))
# *******************************************************
# Viewing the results -------------------------------------------------------------------------------
# *******************************************************
# Use this to view the results. Multiple zip files can be in the same folder. If the files were
# pre-merged, this is automatically detected:
CohortDiagnostics::launchDiagnosticsExplorer(file.path(outputFolder, "diagnosticsExport"))
# *******************************************************
# Sharing the results -------------------------------------------------------------------------------
# *******************************************************
#
# Upload results to the OHDSI SFTP server:
uploadResults(outputFolder, keyFileName, userName)
# Please send the study-coordinator an e-mail when done
|
/extras/CodeToRun.R
|
permissive
|
gowthamrao/Ohdsi2020StudyathonCohortDiagnosticsPCE
|
R
| false | false | 5,577 |
r
|
# Make sure to install all dependencies (not needed if already done):
# install.packages("SqlRender")
# install.packages("DatabaseConnector")
# install.packages("ggplot2")
# install.packages("ParallelLogger")
# install.packages("readr")
# install.packages("tibble")
# install.packages("dplyr")
# install.packages("RJSONIO")
# install.packages("devtools")
# devtools::install_github("OHDSI/FeatureExtraction")
# devtools::install_github("OHDSI/ROhdsiWebApi")
# devtools::install_github("OHDSI/CohortDiagnostics")
# Load the package
library(Ohdsi2020StudyathonCohortDiagnosticsPCE)
package <- "Ohdsi2020StudyathonCohortDiagnosticsPCE"
path <- "s:/results"
# Optional: specify where the temporary files will be created:
options(andromedaTempFolder = file.path(path, "andromedaTemp"))
# Maximum number of cores to be used:
maxCores <- parallel::detectCores()
# Details for connecting to the server:
connectionDetails <- DatabaseConnector::createConnectionDetails(dbms = "pdw",
server = Sys.getenv("PDW_SERVER"),
user = NULL,
password = NULL,
port = Sys.getenv("PDW_PORT"))
# For Oracle: define a schema that can be used to emulate temp tables:
oracleTempSchema <- NULL
# Details specific to the database:
outputFolder <- "s:/results/ccae"
cdmDatabaseSchema <- "CDM_IBM_CCAE_V1247.dbo"
cohortDatabaseSchema <- "scratch.dbo"
cohortTable <- "mschuemi_skeleton_ccae"
databaseId <- "CCAE"
databaseName <- "IBM MarketScan Commercial Claims and Encounters Database"
databaseDescription <- "IBM MarketScan® Commercial Claims and Encounters Database (CCAE) represent data from individuals enrolled in United States employer-sponsored insurance health plans. The data includes adjudicated health insurance claims (e.g. inpatient, outpatient, and outpatient pharmacy) as well as enrollment data from large employers and health plans who provide private healthcare coverage to employees, their spouses, and dependents. Additionally, it captures laboratory tests for a subset of the covered lives. This administrative claims database includes a variety of fee-for-service, preferred provider organizations, and capitated health plans."
# Use this to run the cohorttDiagnostics. The results will be stored in the diagnosticsExport
# subfolder of the outputFolder. This can be shared between sites.
Ohdsi2020StudyathonCohortDiagnosticsPCE::runCohortDiagnostics(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
oracleTempSchema = oracleTempSchema,
outputFolder = outputFolder,
databaseId = databaseId,
databaseName = databaseName,
databaseDescription = databaseDescription,
createCohorts = TRUE,
runInclusionStatistics = TRUE,
runIncludedSourceConcepts = TRUE,
runOrphanConcepts = TRUE,
runTimeDistributions = TRUE,
runBreakdownIndexEvents = TRUE,
runIncidenceRates = TRUE,
runCohortOverlap = TRUE,
runCohortCharacterization = TRUE,
runTemporalCohortCharacterization = TRUE,
packageName = package,
minCellCount = 5)
# To view the results: Optional: if there are results zip files from multiple sites in a folder, this
# merges them, which will speed up starting the viewer:
CohortDiagnostics::preMergeDiagnosticsFiles(file.path(outputFolder, "diagnosticsExport"))
# *******************************************************
# Viewing the results -------------------------------------------------------------------------------
# *******************************************************
# Use this to view the results. Multiple zip files can be in the same folder. If the files were
# pre-merged, this is automatically detected:
CohortDiagnostics::launchDiagnosticsExplorer(file.path(outputFolder, "diagnosticsExport"))
# *******************************************************
# Sharing the results -------------------------------------------------------------------------------
# *******************************************************
#
# Upload results to the OHDSI SFTP server:
uploadResults(outputFolder, keyFileName, userName)
# Please send the study-coordinator an e-mail when done
|
first_time <- function(path) {
generated <- dir(file.path(path, "man"), full.names = TRUE)
generated <- generated[!file.info(generated)$isdir]
namespace <- file.path(path, "NAMESPACE")
if (file.exists(namespace)) {
generated <- c(generated, namespace)
}
roxy <- vapply(generated, made_by_roxygen, logical(1))
all(!roxy)
}
made_by_roxygen <- function(path) {
if (!file.exists(path)) return(TRUE)
first <- read_lines(path, n = 1)
check_made_by(first)
}
add_made_by_roxygen <- function(path, comment) {
if (!file.exists(path)) stop("Can't find ", path, call. = FALSE)
lines <- read_lines(path)
if (check_made_by(lines[1])) return()
write_lines(c(made_by(comment), lines), path)
}
check_made_by <- function(first) {
if (length(first) == 0L) return(FALSE)
grepl("^. Generated by roxygen2", first)
}
made_by <- function(comment) {
paste0(comment, " Generated by roxygen2: do not edit by hand\n")
}
update_roxygen_version <- function(base_path) {
desc_path <- file.path(base_path, "DESCRIPTION")
cur <- as.character(utils::packageVersion("roxygen2"))
prev <- desc::desc_get("RoxygenNote", file = desc_path)[[1]]
if (!is.na(cur) && !is.na(prev) && package_version(cur) < package_version(prev)) {
warning("Version of roxygen2 last used with this package is ", prev, ". ",
" You only have version ", cur, call. = FALSE)
} else if (!identical(cur, prev)) {
message("Updating roxygen version in ", desc_path)
desc::desc_set(RoxygenNote = cur, file = desc_path)
}
}
|
/R/safety.R
|
no_license
|
shabbybanks/roxygen
|
R
| false | false | 1,533 |
r
|
first_time <- function(path) {
generated <- dir(file.path(path, "man"), full.names = TRUE)
generated <- generated[!file.info(generated)$isdir]
namespace <- file.path(path, "NAMESPACE")
if (file.exists(namespace)) {
generated <- c(generated, namespace)
}
roxy <- vapply(generated, made_by_roxygen, logical(1))
all(!roxy)
}
made_by_roxygen <- function(path) {
if (!file.exists(path)) return(TRUE)
first <- read_lines(path, n = 1)
check_made_by(first)
}
add_made_by_roxygen <- function(path, comment) {
if (!file.exists(path)) stop("Can't find ", path, call. = FALSE)
lines <- read_lines(path)
if (check_made_by(lines[1])) return()
write_lines(c(made_by(comment), lines), path)
}
check_made_by <- function(first) {
if (length(first) == 0L) return(FALSE)
grepl("^. Generated by roxygen2", first)
}
made_by <- function(comment) {
paste0(comment, " Generated by roxygen2: do not edit by hand\n")
}
update_roxygen_version <- function(base_path) {
desc_path <- file.path(base_path, "DESCRIPTION")
cur <- as.character(utils::packageVersion("roxygen2"))
prev <- desc::desc_get("RoxygenNote", file = desc_path)[[1]]
if (!is.na(cur) && !is.na(prev) && package_version(cur) < package_version(prev)) {
warning("Version of roxygen2 last used with this package is ", prev, ". ",
" You only have version ", cur, call. = FALSE)
} else if (!identical(cur, prev)) {
message("Updating roxygen version in ", desc_path)
desc::desc_set(RoxygenNote = cur, file = desc_path)
}
}
|
library(xml2)
### Name: xml_children
### Title: Navigate around the family tree.
### Aliases: xml_children xml_child xml_contents xml_parents xml_siblings
### xml_parent xml_length xml_root
### ** Examples
x <- read_xml("<foo> <bar><boo /></bar> <baz/> </foo>")
xml_children(x)
xml_children(xml_children(x))
xml_siblings(xml_children(x)[[1]])
# Note the each unique node only appears once in the output
xml_parent(xml_children(x))
# Mixed content
x <- read_xml("<foo> a <b/> c <d>e</d> f</foo>")
# Childen gets the elements, contents gets all node types
xml_children(x)
xml_contents(x)
xml_length(x)
xml_length(x, only_elements = FALSE)
# xml_child makes it easier to select specific children
xml_child(x)
xml_child(x, 2)
xml_child(x, "baz")
|
/data/genthat_extracted_code/xml2/examples/xml_children.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 755 |
r
|
library(xml2)
### Name: xml_children
### Title: Navigate around the family tree.
### Aliases: xml_children xml_child xml_contents xml_parents xml_siblings
### xml_parent xml_length xml_root
### ** Examples
x <- read_xml("<foo> <bar><boo /></bar> <baz/> </foo>")
xml_children(x)
xml_children(xml_children(x))
xml_siblings(xml_children(x)[[1]])
# Note the each unique node only appears once in the output
xml_parent(xml_children(x))
# Mixed content
x <- read_xml("<foo> a <b/> c <d>e</d> f</foo>")
# Childen gets the elements, contents gets all node types
xml_children(x)
xml_contents(x)
xml_length(x)
xml_length(x, only_elements = FALSE)
# xml_child makes it easier to select specific children
xml_child(x)
xml_child(x, 2)
xml_child(x, "baz")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.